1 /* 2 * Copyright (c) 1989 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms are permitted 9 * provided that the above copyright notice and this paragraph are 10 * duplicated in all such forms and that any documentation, 11 * advertising materials, and other materials related to such 12 * distribution and use acknowledge that the software was developed 13 * by the University of California, Berkeley. The name of the 14 * University may not be used to endorse or promote products derived 15 * from this software without specific prior written permission. 16 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. 19 * 20 * @(#)nfs_subs.c 7.19 (Berkeley) 03/05/90 21 */ 22 23 /* 24 * These functions support the macros and help fiddle mbuf chains for 25 * the nfs op functions. They do things like create the rpc header and 26 * copy data between mbuf chains and uio lists. 27 */ 28 #include "param.h" 29 #include "user.h" 30 #include "proc.h" 31 #include "systm.h" 32 #include "kernel.h" 33 #include "mount.h" 34 #include "file.h" 35 #include "vnode.h" 36 #include "mbuf.h" 37 #include "errno.h" 38 #include "map.h" 39 #include "rpcv2.h" 40 #include "nfsv2.h" 41 #include "nfsnode.h" 42 #include "nfs.h" 43 #include "nfsiom.h" 44 #include "xdr_subs.h" 45 #include "nfsm_subs.h" 46 47 #define TRUE 1 48 #define FALSE 0 49 50 /* 51 * Data items converted to xdr at startup, since they are constant 52 * This is kinda hokey, but may save a little time doing byte swaps 53 */ 54 u_long nfs_procids[NFS_NPROCS]; 55 u_long nfs_xdrneg1; 56 u_long rpc_call, rpc_vers, rpc_reply, rpc_msgdenied, 57 rpc_mismatch, rpc_auth_unix, rpc_msgaccepted; 58 u_long nfs_vers, nfs_prog, nfs_true, nfs_false; 59 /* And other global data */ 60 static u_long *rpc_uidp = (u_long *)0; 61 static u_long nfs_xid = 1; 62 static char *rpc_unixauth; 63 extern long hostid; 64 extern enum vtype v_type[NFLNK+1]; 65 extern struct proc *nfs_iodwant[MAX_ASYNCDAEMON]; 66 extern struct map nfsmap[NFS_MSIZ]; 67 68 /* Function ret types */ 69 static char *nfs_unixauth(); 70 71 /* 72 * Maximum number of groups passed through to NFS server. 73 * For release 3.X systems, the maximum value is 8. 74 * For release 4.X systems, the maximum value is 10. 75 */ 76 int numgrps = 8; 77 78 /* 79 * Create the header for an rpc request packet 80 * The function nfs_unixauth() creates a unix style authorization string 81 * and returns a ptr to it. 82 * The hsiz is the size of the rest of the nfs request header. 83 * (just used to decide if a cluster is a good idea) 84 * nb: Note that the prog, vers and procid args are already in xdr byte order 85 */ 86 struct mbuf *nfsm_reqh(prog, vers, procid, cred, hsiz, bpos, mb, retxid) 87 u_long prog; 88 u_long vers; 89 u_long procid; 90 struct ucred *cred; 91 int hsiz; 92 caddr_t *bpos; 93 struct mbuf **mb; 94 u_long *retxid; 95 { 96 register struct mbuf *mreq, *m; 97 register u_long *p; 98 struct mbuf *m1; 99 char *ap; 100 int asiz, siz; 101 102 NFSMGETHDR(mreq); 103 asiz = (((cred->cr_ngroups > numgrps) ? numgrps : cred->cr_ngroups)<<2); 104 #ifdef FILLINHOST 105 asiz += nfsm_rndup(hostnamelen)+(9*NFSX_UNSIGNED); 106 #else 107 asiz += 9*NFSX_UNSIGNED; 108 #endif 109 110 /* If we need a lot, alloc a cluster ?? */ 111 if ((asiz+hsiz+RPC_SIZ) > MHLEN) 112 NFSMCLGET(mreq, M_WAIT); 113 mreq->m_len = NFSMSIZ(mreq); 114 siz = mreq->m_len; 115 m1 = mreq; 116 /* 117 * Alloc enough mbufs 118 * We do it now to avoid all sleeps after the call to nfs_unixauth() 119 */ 120 while ((asiz+RPC_SIZ) > siz) { 121 MGET(m, M_WAIT, MT_DATA); 122 m1->m_next = m; 123 m->m_len = MLEN; 124 siz += MLEN; 125 m1 = m; 126 } 127 p = mtod(mreq, u_long *); 128 *p++ = *retxid = txdr_unsigned(++nfs_xid); 129 *p++ = rpc_call; 130 *p++ = rpc_vers; 131 *p++ = prog; 132 *p++ = vers; 133 *p++ = procid; 134 135 /* Now we can call nfs_unixauth() and copy it in */ 136 ap = nfs_unixauth(cred); 137 m = mreq; 138 siz = m->m_len-RPC_SIZ; 139 if (asiz <= siz) { 140 bcopy(ap, (caddr_t)p, asiz); 141 m->m_len = asiz+RPC_SIZ; 142 } else { 143 bcopy(ap, (caddr_t)p, siz); 144 ap += siz; 145 asiz -= siz; 146 while (asiz > 0) { 147 siz = (asiz > MLEN) ? MLEN : asiz; 148 m = m->m_next; 149 bcopy(ap, mtod(m, caddr_t), siz); 150 m->m_len = siz; 151 asiz -= siz; 152 ap += siz; 153 } 154 } 155 156 /* Finally, return values */ 157 *mb = m; 158 *bpos = mtod(m, caddr_t)+m->m_len; 159 return (mreq); 160 } 161 162 /* 163 * copies mbuf chain to the uio scatter/gather list 164 */ 165 nfsm_mbuftouio(mrep, uiop, siz, dpos) 166 struct mbuf **mrep; 167 struct uio *uiop; 168 int siz; 169 caddr_t *dpos; 170 { 171 register int xfer, left, len; 172 register struct mbuf *mp; 173 register char *mbufcp, *uiocp; 174 long uiosiz, rem; 175 176 mp = *mrep; 177 mbufcp = *dpos; 178 len = mtod(mp, caddr_t)+mp->m_len-mbufcp; 179 rem = nfsm_rndup(siz)-siz; 180 while (siz > 0) { 181 if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL) 182 return(EFBIG); 183 left = uiop->uio_iov->iov_len; 184 uiocp = uiop->uio_iov->iov_base; 185 if (left > siz) 186 left = siz; 187 uiosiz = left; 188 while (left > 0) { 189 while (len == 0) { 190 mp = mp->m_next; 191 if (mp == NULL) 192 return (EBADRPC); 193 mbufcp = mtod(mp, caddr_t); 194 len = mp->m_len; 195 } 196 xfer = (left > len) ? len : left; 197 #ifdef notdef 198 /* Not Yet.. */ 199 if (uiop->uio_iov->iov_op != NULL) 200 (*(uiop->uio_iov->iov_op)) 201 (mbufcp, uiocp, xfer); 202 else 203 #endif 204 if (uiop->uio_segflg == UIO_SYSSPACE) 205 bcopy(mbufcp, uiocp, xfer); 206 else 207 copyout(mbufcp, uiocp, xfer); 208 left -= xfer; 209 len -= xfer; 210 mbufcp += xfer; 211 uiocp += xfer; 212 uiop->uio_offset += xfer; 213 uiop->uio_resid -= xfer; 214 } 215 if (uiop->uio_iov->iov_len <= siz) { 216 uiop->uio_iovcnt--; 217 uiop->uio_iov++; 218 } else { 219 uiop->uio_iov->iov_base += uiosiz; 220 uiop->uio_iov->iov_len -= uiosiz; 221 } 222 siz -= uiosiz; 223 } 224 if (rem > 0) 225 mbufcp += rem; 226 *dpos = mbufcp; 227 *mrep = mp; 228 return(0); 229 } 230 231 /* 232 * copies a uio scatter/gather list to an mbuf chain... 233 */ 234 nfsm_uiotombuf(uiop, mq, siz, bpos) 235 register struct uio *uiop; 236 struct mbuf **mq; 237 int siz; 238 caddr_t *bpos; 239 { 240 register struct mbuf *mp; 241 struct mbuf *mp2; 242 long xfer, left, uiosiz; 243 int clflg; 244 int rem, len; 245 char *cp, *uiocp; 246 247 if (siz > MLEN) /* or should it >= MCLBYTES ?? */ 248 clflg = 1; 249 else 250 clflg = 0; 251 rem = nfsm_rndup(siz)-siz; 252 mp2 = *mq; 253 while (siz > 0) { 254 if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL) 255 return(EINVAL); 256 left = uiop->uio_iov->iov_len; 257 uiocp = uiop->uio_iov->iov_base; 258 if (left > siz) 259 left = siz; 260 uiosiz = left; 261 while (left > 0) { 262 MGET(mp, M_WAIT, MT_DATA); 263 if (clflg) 264 NFSMCLGET(mp, M_WAIT); 265 mp->m_len = NFSMSIZ(mp); 266 mp2->m_next = mp; 267 mp2 = mp; 268 xfer = (left > mp->m_len) ? mp->m_len : left; 269 #ifdef notdef 270 /* Not Yet.. */ 271 if (uiop->uio_iov->iov_op != NULL) 272 (*(uiop->uio_iov->iov_op)) 273 (uiocp, mtod(mp, caddr_t), xfer); 274 else 275 #endif 276 if (uiop->uio_segflg == UIO_SYSSPACE) 277 bcopy(uiocp, mtod(mp, caddr_t), xfer); 278 else 279 copyin(uiocp, mtod(mp, caddr_t), xfer); 280 len = mp->m_len; 281 mp->m_len = xfer; 282 left -= xfer; 283 uiocp += xfer; 284 uiop->uio_offset += xfer; 285 uiop->uio_resid -= xfer; 286 } 287 if (uiop->uio_iov->iov_len <= siz) { 288 uiop->uio_iovcnt--; 289 uiop->uio_iov++; 290 } else { 291 uiop->uio_iov->iov_base += uiosiz; 292 uiop->uio_iov->iov_len -= uiosiz; 293 } 294 siz -= uiosiz; 295 } 296 if (rem > 0) { 297 if (rem > (len-mp->m_len)) { 298 MGET(mp, M_WAIT, MT_DATA); 299 mp->m_len = 0; 300 mp2->m_next = mp; 301 } 302 cp = mtod(mp, caddr_t)+mp->m_len; 303 for (left = 0; left < rem; left++) 304 *cp++ = '\0'; 305 mp->m_len += rem; 306 *bpos = cp; 307 } else 308 *bpos = mtod(mp, caddr_t)+mp->m_len; 309 *mq = mp; 310 return(0); 311 } 312 313 /* 314 * Help break down an mbuf chain by setting the first siz bytes contiguous 315 * pointed to by returned val. 316 * If Updateflg == True we can overwrite the first part of the mbuf data 317 * This is used by the macros nfsm_disect and nfsm_disecton for tough 318 * cases. (The macros use the vars. dpos and dpos2) 319 */ 320 nfsm_disct(mdp, dposp, siz, left, updateflg, cp2) 321 struct mbuf **mdp; 322 caddr_t *dposp; 323 int siz; 324 int left; 325 int updateflg; 326 caddr_t *cp2; 327 { 328 register struct mbuf *mp, *mp2; 329 register int siz2, xfer; 330 register caddr_t p; 331 332 mp = *mdp; 333 while (left == 0) { 334 *mdp = mp = mp->m_next; 335 if (mp == NULL) 336 return(EBADRPC); 337 left = mp->m_len; 338 *dposp = mtod(mp, caddr_t); 339 } 340 if (left >= siz) { 341 *cp2 = *dposp; 342 *dposp += siz; 343 return(0); 344 } else if (mp->m_next == NULL) { 345 return(EBADRPC); 346 } else if (siz > MCLBYTES) { 347 panic("nfs S too big"); 348 } else { 349 /* Iff update, you can overwrite, else must alloc new mbuf */ 350 if (updateflg) { 351 NFSMINOFF(mp); 352 } else { 353 MGET(mp2, M_WAIT, MT_DATA); 354 mp2->m_next = mp->m_next; 355 mp->m_next = mp2; 356 mp->m_len -= left; 357 mp = mp2; 358 } 359 /* Alloc cluster iff we need it */ 360 if (!M_HASCL(mp) && siz > NFSMSIZ(mp)) { 361 NFSMCLGET(mp, M_WAIT); 362 if (!M_HASCL(mp)) 363 return(ENOBUFS); 364 } 365 *cp2 = p = mtod(mp, caddr_t); 366 bcopy(*dposp, p, left); /* Copy what was left */ 367 siz2 = siz-left; 368 p += left; 369 mp2 = mp->m_next; 370 /* Loop arround copying up the siz2 bytes */ 371 while (siz2 > 0) { 372 if (mp2 == NULL) 373 return (EBADRPC); 374 xfer = (siz2 > mp2->m_len) ? mp2->m_len : siz2; 375 bcopy(mtod(mp2, caddr_t), p, xfer); 376 NFSMADV(mp2, xfer); 377 mp2->m_len -= xfer; 378 siz2 -= xfer; 379 if (siz2 > 0) 380 mp2 = mp2->m_next; 381 } 382 mp->m_len = siz; 383 *mdp = mp2; 384 *dposp = mtod(mp2, caddr_t); 385 } 386 return (0); 387 } 388 389 /* 390 * Advance the position in the mbuf chain with/without freeing mbufs 391 */ 392 nfs_adv(mdp, dposp, offs, left) 393 struct mbuf **mdp; 394 caddr_t *dposp; 395 int offs; 396 int left; 397 { 398 register struct mbuf *m; 399 register int s; 400 401 m = *mdp; 402 s = left; 403 while (s < offs) { 404 offs -= s; 405 m = m->m_next; 406 if (m == NULL) 407 return(EBADRPC); 408 s = m->m_len; 409 } 410 *mdp = m; 411 *dposp = mtod(m, caddr_t)+offs; 412 return(0); 413 } 414 415 /* 416 * Copy a string into mbufs for the hard cases... 417 */ 418 nfsm_strtmbuf(mb, bpos, cp, siz) 419 struct mbuf **mb; 420 char **bpos; 421 char *cp; 422 long siz; 423 { 424 register struct mbuf *m1, *m2; 425 long left, xfer, len, tlen; 426 u_long *p; 427 int putsize; 428 429 putsize = 1; 430 m2 = *mb; 431 left = NFSMSIZ(m2)-m2->m_len; 432 if (left > 0) { 433 p = ((u_long *)(*bpos)); 434 *p++ = txdr_unsigned(siz); 435 putsize = 0; 436 left -= NFSX_UNSIGNED; 437 m2->m_len += NFSX_UNSIGNED; 438 if (left > 0) { 439 bcopy(cp, (caddr_t) p, left); 440 siz -= left; 441 cp += left; 442 m2->m_len += left; 443 left = 0; 444 } 445 } 446 /* Loop arround adding mbufs */ 447 while (siz > 0) { 448 MGET(m1, M_WAIT, MT_DATA); 449 if (siz > MLEN) 450 NFSMCLGET(m1, M_WAIT); 451 m1->m_len = NFSMSIZ(m1); 452 m2->m_next = m1; 453 m2 = m1; 454 p = mtod(m1, u_long *); 455 tlen = 0; 456 if (putsize) { 457 *p++ = txdr_unsigned(siz); 458 m1->m_len -= NFSX_UNSIGNED; 459 tlen = NFSX_UNSIGNED; 460 putsize = 0; 461 } 462 if (siz < m1->m_len) { 463 len = nfsm_rndup(siz); 464 xfer = siz; 465 if (xfer < len) 466 *(p+(xfer>>2)) = 0; 467 } else { 468 xfer = len = m1->m_len; 469 } 470 bcopy(cp, (caddr_t) p, xfer); 471 m1->m_len = len+tlen; 472 siz -= xfer; 473 cp += xfer; 474 } 475 *mb = m1; 476 *bpos = mtod(m1, caddr_t)+m1->m_len; 477 return(0); 478 } 479 480 /* 481 * Called once to initialize data structures... 482 */ 483 nfs_init() 484 { 485 register int i; 486 487 rpc_vers = txdr_unsigned(RPC_VER2); 488 rpc_call = txdr_unsigned(RPC_CALL); 489 rpc_reply = txdr_unsigned(RPC_REPLY); 490 rpc_msgdenied = txdr_unsigned(RPC_MSGDENIED); 491 rpc_msgaccepted = txdr_unsigned(RPC_MSGACCEPTED); 492 rpc_mismatch = txdr_unsigned(RPC_MISMATCH); 493 rpc_auth_unix = txdr_unsigned(RPCAUTH_UNIX); 494 nfs_vers = txdr_unsigned(NFS_VER2); 495 nfs_prog = txdr_unsigned(NFS_PROG); 496 nfs_true = txdr_unsigned(TRUE); 497 nfs_false = txdr_unsigned(FALSE); 498 /* Loop thru nfs procids */ 499 for (i = 0; i < NFS_NPROCS; i++) 500 nfs_procids[i] = txdr_unsigned(i); 501 /* Ensure async daemons disabled */ 502 for (i = 0; i < MAX_ASYNCDAEMON; i++) 503 nfs_iodwant[i] = (struct proc *)0; 504 v_type[0] = VNON; 505 v_type[1] = VREG; 506 v_type[2] = VDIR; 507 v_type[3] = VBLK; 508 v_type[4] = VCHR; 509 v_type[5] = VLNK; 510 nfs_xdrneg1 = txdr_unsigned(-1); 511 nfs_nhinit(); /* Init the nfsnode table */ 512 nfsrv_initcache(); /* Init the server request cache */ 513 rminit(nfsmap, (long)NFS_MAPREG, (long)1, "nfs mapreg", NFS_MSIZ); 514 /* And start timer */ 515 nfs_timer(); 516 } 517 518 /* 519 * Fill in the rest of the rpc_unixauth and return it 520 */ 521 static char *nfs_unixauth(cr) 522 register struct ucred *cr; 523 { 524 register u_long *p; 525 register int i; 526 int ngr; 527 528 /* Maybe someday there should be a cache of AUTH_SHORT's */ 529 if ((p = rpc_uidp) == NULL) { 530 #ifdef FILLINHOST 531 i = nfsm_rndup(hostnamelen)+(19*NFSX_UNSIGNED); 532 #else 533 i = 19*NFSX_UNSIGNED; 534 #endif 535 MALLOC(p, u_long *, i, M_TEMP, M_WAITOK); 536 bzero((caddr_t)p, i); 537 rpc_unixauth = (caddr_t)p; 538 *p++ = txdr_unsigned(RPCAUTH_UNIX); 539 p++; /* Fill in size later */ 540 *p++ = hostid; 541 #ifdef FILLINHOST 542 *p++ = txdr_unsigned(hostnamelen); 543 i = nfsm_rndup(hostnamelen); 544 bcopy(hostname, (caddr_t)p, hostnamelen); 545 p += (i>>2); 546 #else 547 *p++ = 0; 548 #endif 549 rpc_uidp = p; 550 } 551 *p++ = txdr_unsigned(cr->cr_uid); 552 *p++ = txdr_unsigned(cr->cr_groups[0]); 553 ngr = (cr->cr_ngroups > numgrps) ? numgrps : cr->cr_ngroups; 554 *p++ = txdr_unsigned(ngr); 555 for (i = 0; i < ngr; i++) 556 *p++ = txdr_unsigned(cr->cr_groups[i]); 557 /* And add the AUTH_NULL */ 558 *p++ = 0; 559 *p = 0; 560 i = (((caddr_t)p)-rpc_unixauth)-12; 561 p = (u_long *)(rpc_unixauth+4); 562 *p = txdr_unsigned(i); 563 return(rpc_unixauth); 564 } 565 566 /* 567 * Attribute cache routines. 568 * nfs_loadattrcache() - loads or updates the cache contents from attributes 569 * that are on the mbuf list 570 * nfs_getattrcache() - returns valid attributes if found in cache, returns 571 * error otherwise 572 */ 573 574 /* 575 * Load the attribute cache (that lives in the nfsnode entry) with 576 * the values on the mbuf list and 577 * Iff vap not NULL 578 * copy the attributes to *vaper 579 */ 580 nfs_loadattrcache(vpp, mdp, dposp, vaper) 581 struct vnode **vpp; 582 struct mbuf **mdp; 583 caddr_t *dposp; 584 struct vattr *vaper; 585 { 586 register struct vnode *vp = *vpp; 587 register struct vattr *vap; 588 register struct nfsv2_fattr *fp; 589 extern struct vnodeops spec_nfsv2nodeops; 590 register struct nfsnode *np; 591 register long t1; 592 caddr_t dpos, cp2; 593 int error = 0; 594 struct mbuf *md; 595 enum vtype type; 596 dev_t rdev; 597 struct timeval mtime; 598 struct vnode *nvp; 599 600 md = *mdp; 601 dpos = *dposp; 602 t1 = (mtod(md, caddr_t)+md->m_len)-dpos; 603 if (error = nfsm_disct(&md, &dpos, NFSX_FATTR, t1, TRUE, &cp2)) 604 return (error); 605 fp = (struct nfsv2_fattr *)cp2; 606 type = nfstov_type(fp->fa_type); 607 rdev = fxdr_unsigned(dev_t, fp->fa_rdev); 608 fxdr_time(&fp->fa_mtime, &mtime); 609 /* 610 * If v_type == VNON it is a new node, so fill in the v_type, 611 * n_mtime fields. Check to see if it represents a special 612 * device, and if so, check for a possible alias. Once the 613 * correct vnode has been obtained, fill in the rest of the 614 * information. 615 */ 616 np = VTONFS(vp); 617 if (vp->v_type == VNON) { 618 vp->v_type = type; 619 if (vp->v_type == VFIFO) { 620 #ifdef FIFO 621 extern struct vnodeops fifo_nfsv2nodeops; 622 vp->v_op = &fifo_nfsv2nodeops; 623 #else 624 return (EOPNOTSUPP); 625 #endif /* FIFO */ 626 } 627 if (vp->v_type == VCHR || vp->v_type == VBLK) { 628 vp->v_op = &spec_nfsv2nodeops; 629 if (nvp = checkalias(vp, rdev, vp->v_mount)) { 630 /* 631 * Reinitialize aliased node. 632 */ 633 np = VTONFS(nvp); 634 np->n_vnode = nvp; 635 np->n_flag = 0; 636 nfs_lock(nvp); 637 bcopy((caddr_t)&VTONFS(vp)->n_fh, 638 (caddr_t)&np->n_fh, NFSX_FH); 639 insque(np, nfs_hash(&np->n_fh)); 640 np->n_attrstamp = 0; 641 np->n_sillyrename = (struct sillyrename *)0; 642 /* 643 * Discard unneeded vnode and update actual one 644 */ 645 vput(vp); 646 *vpp = vp = nvp; 647 } 648 } 649 np->n_mtime = mtime.tv_sec; 650 } 651 vap = &np->n_vattr; 652 vap->va_type = type; 653 vap->va_mode = nfstov_mode(fp->fa_mode); 654 vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink); 655 vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid); 656 vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid); 657 vap->va_size = fxdr_unsigned(u_long, fp->fa_size); 658 if ((np->n_flag & NMODIFIED) == 0 || vap->va_size > np->n_size) 659 np->n_size = vap->va_size; 660 vap->va_size1 = 0; /* OR -1 ?? */ 661 vap->va_blocksize = fxdr_unsigned(long, fp->fa_blocksize); 662 vap->va_rdev = rdev; 663 vap->va_bytes = fxdr_unsigned(long, fp->fa_blocks) * vap->va_blocksize; 664 vap->va_bytes1 = 0; 665 vap->va_fsid = vp->v_mount->m_fsid.val[0]; 666 vap->va_fileid = fxdr_unsigned(long, fp->fa_fileid); 667 vap->va_atime.tv_sec = fxdr_unsigned(long, fp->fa_atime.tv_sec); 668 vap->va_atime.tv_usec = 0; 669 vap->va_flags = fxdr_unsigned(u_long, fp->fa_atime.tv_usec); 670 vap->va_mtime = mtime; 671 vap->va_ctime.tv_sec = fxdr_unsigned(long, fp->fa_ctime.tv_sec); 672 vap->va_ctime.tv_usec = 0; 673 vap->va_gen = fxdr_unsigned(u_long, fp->fa_ctime.tv_usec); 674 np->n_attrstamp = time.tv_sec; 675 *dposp = dpos; 676 *mdp = md; 677 if (vaper != NULL) { 678 bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(*vap)); 679 if ((np->n_flag & NMODIFIED) && (np->n_size > vap->va_size)) 680 vaper->va_size = np->n_size; 681 } 682 return (0); 683 } 684 685 /* 686 * Check the time stamp 687 * If the cache is valid, copy contents to *vap and return 0 688 * otherwise return an error 689 */ 690 nfs_getattrcache(vp, vap) 691 register struct vnode *vp; 692 struct vattr *vap; 693 { 694 register struct nfsnode *np; 695 696 np = VTONFS(vp); 697 if ((time.tv_sec-np->n_attrstamp) < NFS_ATTRTIMEO) { 698 nfsstats.attrcache_hits++; 699 bcopy((caddr_t)&np->n_vattr,(caddr_t)vap,sizeof(struct vattr)); 700 if ((np->n_flag & NMODIFIED) == 0) 701 np->n_size = vap->va_size; 702 else if (np->n_size > vap->va_size) 703 vap->va_size = np->n_size; 704 return (0); 705 } else { 706 nfsstats.attrcache_misses++; 707 return (ENOENT); 708 } 709 } 710 711 /* 712 * nfs_namei - a liitle like namei(), but for one element only 713 * essentially look up file handle, fill in ndp and call VOP_LOOKUP() 714 */ 715 nfs_namei(ndp, fhp, len, mdp, dposp) 716 register struct nameidata *ndp; 717 fhandle_t *fhp; 718 int len; 719 struct mbuf **mdp; 720 caddr_t *dposp; 721 { 722 register int i, rem; 723 register struct mbuf *md; 724 register char *cp; 725 struct vnode *dp = (struct vnode *)0; 726 int flag; 727 int docache; 728 int wantparent; 729 int lockparent; 730 int error = 0; 731 732 ndp->ni_vp = ndp->ni_dvp = (struct vnode *)0; 733 flag = ndp->ni_nameiop & OPFLAG; 734 wantparent = ndp->ni_nameiop & (LOCKPARENT | WANTPARENT); 735 lockparent = ndp->ni_nameiop & LOCKPARENT; 736 docache = (ndp->ni_nameiop & NOCACHE) ^ NOCACHE; 737 if (flag == DELETE || wantparent) 738 docache = 0; 739 740 /* Fill in the nameidata and call lookup */ 741 cp = *dposp; 742 md = *mdp; 743 rem = mtod(md, caddr_t)+md->m_len-cp; 744 ndp->ni_hash = 0; 745 for (i = 0; i < len;) { 746 if (rem == 0) { 747 md = md->m_next; 748 if (md == NULL) 749 return (EBADRPC); 750 cp = mtod(md, caddr_t); 751 rem = md->m_len; 752 } 753 if (*cp == '\0' || *cp == '/') 754 return (EINVAL); 755 if (*cp & 0200) 756 if ((*cp&0377) == ('/'|0200) || flag != DELETE) 757 return (EINVAL); 758 ndp->ni_dent.d_name[i++] = *cp; 759 ndp->ni_hash += (unsigned char)*cp * i; 760 cp++; 761 rem--; 762 } 763 *mdp = md; 764 len = nfsm_rndup(len)-len; 765 if (len > 0) 766 *dposp = cp+len; 767 else 768 *dposp = cp; 769 ndp->ni_namelen = i; 770 ndp->ni_dent.d_namlen = i; 771 ndp->ni_dent.d_name[i] = '\0'; 772 ndp->ni_pathlen = 1; 773 ndp->ni_dirp = ndp->ni_ptr = &ndp->ni_dent.d_name[0]; 774 ndp->ni_next = &ndp->ni_dent.d_name[i]; 775 ndp->ni_loopcnt = 0; /* Not actually used for now */ 776 ndp->ni_endoff = 0; 777 if (docache) 778 ndp->ni_makeentry = 1; 779 else 780 ndp->ni_makeentry = 0; 781 ndp->ni_isdotdot = (i == 2 && 782 ndp->ni_dent.d_name[1] == '.' && ndp->ni_dent.d_name[0] == '.'); 783 784 if (error = nfsrv_fhtovp(fhp, TRUE, &dp, ndp->ni_cred)) 785 return (error); 786 if (dp->v_type != VDIR) { 787 vput(dp); 788 return (ENOTDIR); 789 } 790 /* 791 * Must set current directory here to avoid confusion in namei() 792 * called from rename() 793 */ 794 ndp->ni_cdir = dp; 795 ndp->ni_rdir = (struct vnode *)0; 796 797 /* 798 * Handle "..": 799 * If this vnode is the root of the mounted 800 * file system, then ignore it so can't get out 801 */ 802 if (ndp->ni_isdotdot && (dp->v_flag & VROOT)) { 803 ndp->ni_dvp = dp; 804 ndp->ni_vp = dp; 805 VREF(dp); 806 goto nextname; 807 } 808 809 /* 810 * We now have a segment name to search for, and a directory to search. 811 */ 812 if (error = VOP_LOOKUP(dp, ndp)) { 813 if (ndp->ni_vp != NULL) 814 panic("leaf should be empty"); 815 /* 816 * If creating and at end of pathname, then can consider 817 * allowing file to be created. 818 */ 819 if (ndp->ni_dvp->v_mount->m_flag & (M_RDONLY | M_EXRDONLY)) 820 error = EROFS; 821 if (flag == LOOKUP || flag == DELETE || error != ENOENT) 822 goto bad; 823 /* 824 * We return with ni_vp NULL to indicate that the entry 825 * doesn't currently exist, leaving a pointer to the 826 * (possibly locked) directory inode in ndp->ni_dvp. 827 */ 828 return (0); /* should this be ENOENT? */ 829 } 830 831 dp = ndp->ni_vp; 832 833 nextname: 834 ndp->ni_ptr = ndp->ni_next; 835 /* 836 * Check for read-only file systems 837 */ 838 if (flag == DELETE || flag == RENAME) { 839 /* 840 * Disallow directory write attempts on read-only 841 * file systems. 842 */ 843 if ((dp->v_mount->m_flag & (M_RDONLY|M_EXRDONLY)) || 844 (wantparent && (ndp->ni_dvp->v_mount->m_flag & (M_RDONLY|M_EXRDONLY)))) { 845 error = EROFS; 846 goto bad2; 847 } 848 } 849 850 if (!wantparent) 851 vrele(ndp->ni_dvp); 852 853 if ((ndp->ni_nameiop & LOCKLEAF) == 0) 854 VOP_UNLOCK(dp); 855 return (0); 856 857 bad2: 858 if (lockparent) 859 VOP_UNLOCK(ndp->ni_dvp); 860 vrele(ndp->ni_dvp); 861 bad: 862 vput(dp); 863 ndp->ni_vp = NULL; 864 return (error); 865 } 866 867 /* 868 * A fiddled version of m_adj() that ensures null fill to a long 869 * boundary and only trims off the back end 870 */ 871 nfsm_adj(mp, len, nul) 872 struct mbuf *mp; 873 register int len; 874 int nul; 875 { 876 register struct mbuf *m; 877 register int count, i; 878 register char *cp; 879 880 /* 881 * Trim from tail. Scan the mbuf chain, 882 * calculating its length and finding the last mbuf. 883 * If the adjustment only affects this mbuf, then just 884 * adjust and return. Otherwise, rescan and truncate 885 * after the remaining size. 886 */ 887 count = 0; 888 m = mp; 889 for (;;) { 890 count += m->m_len; 891 if (m->m_next == (struct mbuf *)0) 892 break; 893 m = m->m_next; 894 } 895 if (m->m_len > len) { 896 m->m_len -= len; 897 if (nul > 0) { 898 cp = mtod(m, caddr_t)+m->m_len-nul; 899 for (i = 0; i < nul; i++) 900 *cp++ = '\0'; 901 } 902 return; 903 } 904 count -= len; 905 if (count < 0) 906 count = 0; 907 /* 908 * Correct length for chain is "count". 909 * Find the mbuf with last data, adjust its length, 910 * and toss data from remaining mbufs on chain. 911 */ 912 for (m = mp; m; m = m->m_next) { 913 if (m->m_len >= count) { 914 m->m_len = count; 915 if (nul > 0) { 916 cp = mtod(m, caddr_t)+m->m_len-nul; 917 for (i = 0; i < nul; i++) 918 *cp++ = '\0'; 919 } 920 break; 921 } 922 count -= m->m_len; 923 } 924 while (m = m->m_next) 925 m->m_len = 0; 926 } 927 928 /* 929 * nfsrv_fhtovp() - convert a fh to a vnode ptr (optionally locked) 930 * - look up fsid in mount list (if not found ret error) 931 * - check that it is exported 932 * - get vp by calling VFS_FHTOVP() macro 933 * - if not lockflag unlock it with VOP_UNLOCK() 934 * - if cred->cr_uid == 0 set it to m_exroot 935 */ 936 nfsrv_fhtovp(fhp, lockflag, vpp, cred) 937 fhandle_t *fhp; 938 int lockflag; 939 struct vnode **vpp; 940 struct ucred *cred; 941 { 942 register struct mount *mp; 943 944 if ((mp = getvfs(&fhp->fh_fsid)) == NULL) 945 return (ESTALE); 946 if ((mp->m_flag & M_EXPORTED) == 0) 947 return (EACCES); 948 if (VFS_FHTOVP(mp, &fhp->fh_fid, vpp)) 949 return (ESTALE); 950 if (cred->cr_uid == 0) 951 cred->cr_uid = mp->m_exroot; 952 if (!lockflag) 953 VOP_UNLOCK(*vpp); 954 return (0); 955 } 956