1 /* 2 * Copyright (c) 1989 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * %sccs.include.redist.c% 9 * 10 * @(#)nfs_subs.c 7.36 (Berkeley) 02/21/91 11 */ 12 13 /* 14 * These functions support the macros and help fiddle mbuf chains for 15 * the nfs op functions. They do things like create the rpc header and 16 * copy data between mbuf chains and uio lists. 17 */ 18 #include "param.h" 19 #include "user.h" 20 #include "proc.h" 21 #include "filedesc.h" 22 #include "systm.h" 23 #include "kernel.h" 24 #include "mount.h" 25 #include "file.h" 26 #include "vnode.h" 27 #include "mbuf.h" 28 #include "errno.h" 29 #include "map.h" 30 #include "rpcv2.h" 31 #include "nfsv2.h" 32 #include "nfsnode.h" 33 #include "nfs.h" 34 #include "nfsiom.h" 35 #include "xdr_subs.h" 36 #include "nfsm_subs.h" 37 #include "nfscompress.h" 38 39 #define TRUE 1 40 #define FALSE 0 41 42 /* 43 * Data items converted to xdr at startup, since they are constant 44 * This is kinda hokey, but may save a little time doing byte swaps 45 */ 46 u_long nfs_procids[NFS_NPROCS]; 47 u_long nfs_xdrneg1; 48 u_long rpc_call, rpc_vers, rpc_reply, rpc_msgdenied, 49 rpc_mismatch, rpc_auth_unix, rpc_msgaccepted; 50 u_long nfs_vers, nfs_prog, nfs_true, nfs_false; 51 /* And other global data */ 52 static u_long *rpc_uidp = (u_long *)0; 53 static u_long nfs_xid = 1; 54 static char *rpc_unixauth; 55 extern long hostid; 56 enum vtype ntov_type[7] = { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VNON }; 57 extern struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON]; 58 extern struct map nfsmap[NFS_MSIZ]; 59 extern struct nfsreq nfsreqh; 60 61 /* Function ret types */ 62 static char *nfs_unixauth(); 63 64 /* 65 * Maximum number of groups passed through to NFS server. 66 * According to RFC1057 it should be 16. 67 * For release 3.X systems, the maximum value is 8. 68 * For some other servers, the maximum value is 10. 69 */ 70 int numgrps = 8; 71 72 /* 73 * Create the header for an rpc request packet 74 * The function nfs_unixauth() creates a unix style authorization string 75 * and returns a ptr to it. 76 * The hsiz is the size of the rest of the nfs request header. 77 * (just used to decide if a cluster is a good idea) 78 * nb: Note that the prog, vers and procid args are already in xdr byte order 79 */ 80 struct mbuf *nfsm_reqh(prog, vers, procid, cred, hsiz, bpos, mb, retxid) 81 u_long prog; 82 u_long vers; 83 u_long procid; 84 struct ucred *cred; 85 int hsiz; 86 caddr_t *bpos; 87 struct mbuf **mb; 88 u_long *retxid; 89 { 90 register struct mbuf *mreq, *m; 91 register u_long *p; 92 struct mbuf *m1; 93 char *ap; 94 int asiz, siz; 95 96 NFSMGETHDR(mreq); 97 asiz = ((((cred->cr_ngroups - 1) > numgrps) ? numgrps : 98 (cred->cr_ngroups - 1)) << 2); 99 #ifdef FILLINHOST 100 asiz += nfsm_rndup(hostnamelen)+(9*NFSX_UNSIGNED); 101 #else 102 asiz += 9*NFSX_UNSIGNED; 103 #endif 104 105 /* If we need a lot, alloc a cluster ?? */ 106 if ((asiz+hsiz+RPC_SIZ) > MHLEN) 107 MCLGET(mreq, M_WAIT); 108 mreq->m_len = NFSMSIZ(mreq); 109 siz = mreq->m_len; 110 m1 = mreq; 111 /* 112 * Alloc enough mbufs 113 * We do it now to avoid all sleeps after the call to nfs_unixauth() 114 */ 115 while ((asiz+RPC_SIZ) > siz) { 116 MGET(m, M_WAIT, MT_DATA); 117 m1->m_next = m; 118 m->m_len = MLEN; 119 siz += MLEN; 120 m1 = m; 121 } 122 p = mtod(mreq, u_long *); 123 *p++ = *retxid = txdr_unsigned(++nfs_xid); 124 *p++ = rpc_call; 125 *p++ = rpc_vers; 126 *p++ = prog; 127 *p++ = vers; 128 *p++ = procid; 129 130 /* Now we can call nfs_unixauth() and copy it in */ 131 ap = nfs_unixauth(cred); 132 m = mreq; 133 siz = m->m_len-RPC_SIZ; 134 if (asiz <= siz) { 135 bcopy(ap, (caddr_t)p, asiz); 136 m->m_len = asiz+RPC_SIZ; 137 } else { 138 bcopy(ap, (caddr_t)p, siz); 139 ap += siz; 140 asiz -= siz; 141 while (asiz > 0) { 142 siz = (asiz > MLEN) ? MLEN : asiz; 143 m = m->m_next; 144 bcopy(ap, mtod(m, caddr_t), siz); 145 m->m_len = siz; 146 asiz -= siz; 147 ap += siz; 148 } 149 } 150 151 /* Finally, return values */ 152 *mb = m; 153 *bpos = mtod(m, caddr_t)+m->m_len; 154 return (mreq); 155 } 156 157 /* 158 * copies mbuf chain to the uio scatter/gather list 159 */ 160 nfsm_mbuftouio(mrep, uiop, siz, dpos) 161 struct mbuf **mrep; 162 register struct uio *uiop; 163 int siz; 164 caddr_t *dpos; 165 { 166 register char *mbufcp, *uiocp; 167 register int xfer, left, len; 168 register struct mbuf *mp; 169 long uiosiz, rem; 170 int error = 0; 171 172 mp = *mrep; 173 mbufcp = *dpos; 174 len = mtod(mp, caddr_t)+mp->m_len-mbufcp; 175 rem = nfsm_rndup(siz)-siz; 176 while (siz > 0) { 177 if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL) 178 return (EFBIG); 179 left = uiop->uio_iov->iov_len; 180 uiocp = uiop->uio_iov->iov_base; 181 if (left > siz) 182 left = siz; 183 uiosiz = left; 184 while (left > 0) { 185 while (len == 0) { 186 mp = mp->m_next; 187 if (mp == NULL) 188 return (EBADRPC); 189 mbufcp = mtod(mp, caddr_t); 190 len = mp->m_len; 191 } 192 xfer = (left > len) ? len : left; 193 #ifdef notdef 194 /* Not Yet.. */ 195 if (uiop->uio_iov->iov_op != NULL) 196 (*(uiop->uio_iov->iov_op)) 197 (mbufcp, uiocp, xfer); 198 else 199 #endif 200 if (uiop->uio_segflg == UIO_SYSSPACE) 201 bcopy(mbufcp, uiocp, xfer); 202 else 203 copyout(mbufcp, uiocp, xfer); 204 left -= xfer; 205 len -= xfer; 206 mbufcp += xfer; 207 uiocp += xfer; 208 uiop->uio_offset += xfer; 209 uiop->uio_resid -= xfer; 210 } 211 if (uiop->uio_iov->iov_len <= siz) { 212 uiop->uio_iovcnt--; 213 uiop->uio_iov++; 214 } else { 215 uiop->uio_iov->iov_base += uiosiz; 216 uiop->uio_iov->iov_len -= uiosiz; 217 } 218 siz -= uiosiz; 219 } 220 *dpos = mbufcp; 221 *mrep = mp; 222 if (rem > 0) { 223 if (len < rem) 224 error = nfs_adv(mrep, dpos, rem, len); 225 else 226 *dpos += rem; 227 } 228 return (error); 229 } 230 231 /* 232 * copies a uio scatter/gather list to an mbuf chain... 233 */ 234 nfsm_uiotombuf(uiop, mq, siz, bpos) 235 register struct uio *uiop; 236 struct mbuf **mq; 237 int siz; 238 caddr_t *bpos; 239 { 240 register char *uiocp; 241 register struct mbuf *mp, *mp2; 242 register int xfer, left, len; 243 int uiosiz, clflg, rem; 244 char *cp; 245 246 if (siz > MLEN) /* or should it >= MCLBYTES ?? */ 247 clflg = 1; 248 else 249 clflg = 0; 250 rem = nfsm_rndup(siz)-siz; 251 mp2 = *mq; 252 while (siz > 0) { 253 if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL) 254 return (EINVAL); 255 left = uiop->uio_iov->iov_len; 256 uiocp = uiop->uio_iov->iov_base; 257 if (left > siz) 258 left = siz; 259 uiosiz = left; 260 while (left > 0) { 261 MGET(mp, M_WAIT, MT_DATA); 262 if (clflg) 263 MCLGET(mp, M_WAIT); 264 mp->m_len = NFSMSIZ(mp); 265 mp2->m_next = mp; 266 mp2 = mp; 267 xfer = (left > mp->m_len) ? mp->m_len : left; 268 #ifdef notdef 269 /* Not Yet.. */ 270 if (uiop->uio_iov->iov_op != NULL) 271 (*(uiop->uio_iov->iov_op)) 272 (uiocp, mtod(mp, caddr_t), xfer); 273 else 274 #endif 275 if (uiop->uio_segflg == UIO_SYSSPACE) 276 bcopy(uiocp, mtod(mp, caddr_t), xfer); 277 else 278 copyin(uiocp, mtod(mp, caddr_t), xfer); 279 len = mp->m_len; 280 mp->m_len = xfer; 281 left -= xfer; 282 uiocp += xfer; 283 uiop->uio_offset += xfer; 284 uiop->uio_resid -= xfer; 285 } 286 if (uiop->uio_iov->iov_len <= siz) { 287 uiop->uio_iovcnt--; 288 uiop->uio_iov++; 289 } else { 290 uiop->uio_iov->iov_base += uiosiz; 291 uiop->uio_iov->iov_len -= uiosiz; 292 } 293 siz -= uiosiz; 294 } 295 if (rem > 0) { 296 if (rem > (len-mp->m_len)) { 297 MGET(mp, M_WAIT, MT_DATA); 298 mp->m_len = 0; 299 mp2->m_next = mp; 300 } 301 cp = mtod(mp, caddr_t)+mp->m_len; 302 for (left = 0; left < rem; left++) 303 *cp++ = '\0'; 304 mp->m_len += rem; 305 *bpos = cp; 306 } else 307 *bpos = mtod(mp, caddr_t)+mp->m_len; 308 *mq = mp; 309 return (0); 310 } 311 312 /* 313 * Help break down an mbuf chain by setting the first siz bytes contiguous 314 * pointed to by returned val. 315 * If Updateflg == True we can overwrite the first part of the mbuf data 316 * This is used by the macros nfsm_disect and nfsm_disecton for tough 317 * cases. (The macros use the vars. dpos and dpos2) 318 */ 319 nfsm_disct(mdp, dposp, siz, left, updateflg, cp2) 320 struct mbuf **mdp; 321 caddr_t *dposp; 322 int siz; 323 int left; 324 int updateflg; 325 caddr_t *cp2; 326 { 327 register struct mbuf *mp, *mp2; 328 register int siz2, xfer; 329 register caddr_t p; 330 331 mp = *mdp; 332 while (left == 0) { 333 *mdp = mp = mp->m_next; 334 if (mp == NULL) 335 return (EBADRPC); 336 left = mp->m_len; 337 *dposp = mtod(mp, caddr_t); 338 } 339 if (left >= siz) { 340 *cp2 = *dposp; 341 *dposp += siz; 342 } else if (mp->m_next == NULL) { 343 return (EBADRPC); 344 } else if (siz > MHLEN) { 345 panic("nfs S too big"); 346 } else { 347 /* Iff update, you can overwrite, else must alloc new mbuf */ 348 if (updateflg) { 349 NFSMINOFF(mp); 350 } else { 351 MGET(mp2, M_WAIT, MT_DATA); 352 mp2->m_next = mp->m_next; 353 mp->m_next = mp2; 354 mp->m_len -= left; 355 mp = mp2; 356 } 357 *cp2 = p = mtod(mp, caddr_t); 358 bcopy(*dposp, p, left); /* Copy what was left */ 359 siz2 = siz-left; 360 p += left; 361 mp2 = mp->m_next; 362 /* Loop around copying up the siz2 bytes */ 363 while (siz2 > 0) { 364 if (mp2 == NULL) 365 return (EBADRPC); 366 xfer = (siz2 > mp2->m_len) ? mp2->m_len : siz2; 367 if (xfer > 0) { 368 bcopy(mtod(mp2, caddr_t), p, xfer); 369 NFSMADV(mp2, xfer); 370 mp2->m_len -= xfer; 371 p += xfer; 372 siz2 -= xfer; 373 } 374 if (siz2 > 0) 375 mp2 = mp2->m_next; 376 } 377 mp->m_len = siz; 378 *mdp = mp2; 379 *dposp = mtod(mp2, caddr_t); 380 } 381 return (0); 382 } 383 384 /* 385 * Advance the position in the mbuf chain. 386 */ 387 nfs_adv(mdp, dposp, offs, left) 388 struct mbuf **mdp; 389 caddr_t *dposp; 390 int offs; 391 int left; 392 { 393 register struct mbuf *m; 394 register int s; 395 396 m = *mdp; 397 s = left; 398 while (s < offs) { 399 offs -= s; 400 m = m->m_next; 401 if (m == NULL) 402 return (EBADRPC); 403 s = m->m_len; 404 } 405 *mdp = m; 406 *dposp = mtod(m, caddr_t)+offs; 407 return (0); 408 } 409 410 /* 411 * Copy a string into mbufs for the hard cases... 412 */ 413 nfsm_strtmbuf(mb, bpos, cp, siz) 414 struct mbuf **mb; 415 char **bpos; 416 char *cp; 417 long siz; 418 { 419 register struct mbuf *m1, *m2; 420 long left, xfer, len, tlen; 421 u_long *p; 422 int putsize; 423 424 putsize = 1; 425 m2 = *mb; 426 left = NFSMSIZ(m2)-m2->m_len; 427 if (left > 0) { 428 p = ((u_long *)(*bpos)); 429 *p++ = txdr_unsigned(siz); 430 putsize = 0; 431 left -= NFSX_UNSIGNED; 432 m2->m_len += NFSX_UNSIGNED; 433 if (left > 0) { 434 bcopy(cp, (caddr_t) p, left); 435 siz -= left; 436 cp += left; 437 m2->m_len += left; 438 left = 0; 439 } 440 } 441 /* Loop arround adding mbufs */ 442 while (siz > 0) { 443 MGET(m1, M_WAIT, MT_DATA); 444 if (siz > MLEN) 445 MCLGET(m1, M_WAIT); 446 m1->m_len = NFSMSIZ(m1); 447 m2->m_next = m1; 448 m2 = m1; 449 p = mtod(m1, u_long *); 450 tlen = 0; 451 if (putsize) { 452 *p++ = txdr_unsigned(siz); 453 m1->m_len -= NFSX_UNSIGNED; 454 tlen = NFSX_UNSIGNED; 455 putsize = 0; 456 } 457 if (siz < m1->m_len) { 458 len = nfsm_rndup(siz); 459 xfer = siz; 460 if (xfer < len) 461 *(p+(xfer>>2)) = 0; 462 } else { 463 xfer = len = m1->m_len; 464 } 465 bcopy(cp, (caddr_t) p, xfer); 466 m1->m_len = len+tlen; 467 siz -= xfer; 468 cp += xfer; 469 } 470 *mb = m1; 471 *bpos = mtod(m1, caddr_t)+m1->m_len; 472 return (0); 473 } 474 475 /* 476 * Called once to initialize data structures... 477 */ 478 nfs_init() 479 { 480 register int i; 481 482 rpc_vers = txdr_unsigned(RPC_VER2); 483 rpc_call = txdr_unsigned(RPC_CALL); 484 rpc_reply = txdr_unsigned(RPC_REPLY); 485 rpc_msgdenied = txdr_unsigned(RPC_MSGDENIED); 486 rpc_msgaccepted = txdr_unsigned(RPC_MSGACCEPTED); 487 rpc_mismatch = txdr_unsigned(RPC_MISMATCH); 488 rpc_auth_unix = txdr_unsigned(RPCAUTH_UNIX); 489 nfs_vers = txdr_unsigned(NFS_VER2); 490 nfs_prog = txdr_unsigned(NFS_PROG); 491 nfs_true = txdr_unsigned(TRUE); 492 nfs_false = txdr_unsigned(FALSE); 493 /* Loop thru nfs procids */ 494 for (i = 0; i < NFS_NPROCS; i++) 495 nfs_procids[i] = txdr_unsigned(i); 496 /* Ensure async daemons disabled */ 497 for (i = 0; i < NFS_MAXASYNCDAEMON; i++) 498 nfs_iodwant[i] = (struct proc *)0; 499 nfs_xdrneg1 = txdr_unsigned(-1); 500 nfs_nhinit(); /* Init the nfsnode table */ 501 nfsrv_initcache(); /* Init the server request cache */ 502 rminit(nfsmap, (long)NFS_MAPREG, (long)1, "nfs mapreg", NFS_MSIZ); 503 504 /* 505 * Initialize reply list and start timer 506 */ 507 nfsreqh.r_prev = nfsreqh.r_next = &nfsreqh; 508 nfs_timer(); 509 } 510 511 /* 512 * Fill in the rest of the rpc_unixauth and return it 513 */ 514 static char *nfs_unixauth(cr) 515 register struct ucred *cr; 516 { 517 register u_long *p; 518 register int i; 519 int ngr; 520 521 /* Maybe someday there should be a cache of AUTH_SHORT's */ 522 if ((p = rpc_uidp) == NULL) { 523 #ifdef FILLINHOST 524 i = nfsm_rndup(hostnamelen)+(25*NFSX_UNSIGNED); 525 #else 526 i = 25*NFSX_UNSIGNED; 527 #endif 528 MALLOC(p, u_long *, i, M_TEMP, M_WAITOK); 529 bzero((caddr_t)p, i); 530 rpc_unixauth = (caddr_t)p; 531 *p++ = txdr_unsigned(RPCAUTH_UNIX); 532 p++; /* Fill in size later */ 533 *p++ = hostid; 534 #ifdef FILLINHOST 535 *p++ = txdr_unsigned(hostnamelen); 536 i = nfsm_rndup(hostnamelen); 537 bcopy(hostname, (caddr_t)p, hostnamelen); 538 p += (i>>2); 539 #else 540 *p++ = 0; 541 #endif 542 rpc_uidp = p; 543 } 544 *p++ = txdr_unsigned(cr->cr_uid); 545 *p++ = txdr_unsigned(cr->cr_groups[0]); 546 ngr = ((cr->cr_ngroups - 1) > numgrps) ? numgrps : (cr->cr_ngroups - 1); 547 *p++ = txdr_unsigned(ngr); 548 for (i = 1; i <= ngr; i++) 549 *p++ = txdr_unsigned(cr->cr_groups[i]); 550 /* And add the AUTH_NULL */ 551 *p++ = 0; 552 *p = 0; 553 i = (((caddr_t)p)-rpc_unixauth)-12; 554 p = (u_long *)(rpc_unixauth+4); 555 *p = txdr_unsigned(i); 556 return (rpc_unixauth); 557 } 558 559 /* 560 * Attribute cache routines. 561 * nfs_loadattrcache() - loads or updates the cache contents from attributes 562 * that are on the mbuf list 563 * nfs_getattrcache() - returns valid attributes if found in cache, returns 564 * error otherwise 565 */ 566 567 /* 568 * Load the attribute cache (that lives in the nfsnode entry) with 569 * the values on the mbuf list and 570 * Iff vap not NULL 571 * copy the attributes to *vaper 572 */ 573 nfs_loadattrcache(vpp, mdp, dposp, vaper) 574 struct vnode **vpp; 575 struct mbuf **mdp; 576 caddr_t *dposp; 577 struct vattr *vaper; 578 { 579 register struct vnode *vp = *vpp; 580 register struct vattr *vap; 581 register struct nfsv2_fattr *fp; 582 extern struct vnodeops spec_nfsv2nodeops; 583 register struct nfsnode *np; 584 register long t1; 585 caddr_t dpos, cp2; 586 int error = 0; 587 struct mbuf *md; 588 enum vtype type; 589 long rdev; 590 struct timeval mtime; 591 struct vnode *nvp; 592 593 md = *mdp; 594 dpos = *dposp; 595 t1 = (mtod(md, caddr_t)+md->m_len)-dpos; 596 if (error = nfsm_disct(&md, &dpos, NFSX_FATTR, t1, TRUE, &cp2)) 597 return (error); 598 fp = (struct nfsv2_fattr *)cp2; 599 type = nfstov_type(fp->fa_type); 600 rdev = fxdr_unsigned(long, fp->fa_rdev); 601 fxdr_time(&fp->fa_mtime, &mtime); 602 /* 603 * If v_type == VNON it is a new node, so fill in the v_type, 604 * n_mtime fields. Check to see if it represents a special 605 * device, and if so, check for a possible alias. Once the 606 * correct vnode has been obtained, fill in the rest of the 607 * information. 608 */ 609 np = VTONFS(vp); 610 if (vp->v_type == VNON) { 611 if (type == VCHR && rdev == 0xffffffff) 612 vp->v_type = type = VFIFO; 613 else 614 vp->v_type = type; 615 if (vp->v_type == VFIFO) { 616 #ifdef FIFO 617 extern struct vnodeops fifo_nfsv2nodeops; 618 vp->v_op = &fifo_nfsv2nodeops; 619 #else 620 return (EOPNOTSUPP); 621 #endif /* FIFO */ 622 } 623 if (vp->v_type == VCHR || vp->v_type == VBLK) { 624 vp->v_op = &spec_nfsv2nodeops; 625 if (nvp = checkalias(vp, (dev_t)rdev, vp->v_mount)) { 626 /* 627 * Reinitialize aliased node. 628 */ 629 np = VTONFS(nvp); 630 np->n_vnode = nvp; 631 np->n_flag = 0; 632 nfs_lock(nvp); 633 bcopy((caddr_t)&VTONFS(vp)->n_fh, 634 (caddr_t)&np->n_fh, NFSX_FH); 635 insque(np, nfs_hash(&np->n_fh)); 636 np->n_attrstamp = 0; 637 np->n_sillyrename = (struct sillyrename *)0; 638 /* 639 * Discard unneeded vnode and update actual one 640 */ 641 vput(vp); 642 *vpp = nvp; 643 } 644 } 645 np->n_mtime = mtime.tv_sec; 646 } 647 vap = &np->n_vattr; 648 vap->va_type = type; 649 vap->va_mode = nfstov_mode(fp->fa_mode); 650 vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink); 651 vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid); 652 vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid); 653 vap->va_size = fxdr_unsigned(u_long, fp->fa_size); 654 if ((np->n_flag & NMODIFIED) == 0 || vap->va_size > np->n_size) { 655 np->n_size = vap->va_size; 656 #ifdef NVM 657 vnode_pager_setsize(vp, np->n_size); 658 #endif 659 } 660 vap->va_size_rsv = 0; 661 vap->va_blocksize = fxdr_unsigned(long, fp->fa_blocksize); 662 vap->va_rdev = (dev_t)rdev; 663 vap->va_bytes = fxdr_unsigned(long, fp->fa_blocks) * NFS_FABLKSIZE; 664 vap->va_bytes_rsv = 0; 665 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; 666 vap->va_fileid = fxdr_unsigned(long, fp->fa_fileid); 667 vap->va_atime.tv_sec = fxdr_unsigned(long, fp->fa_atime.tv_sec); 668 vap->va_atime.tv_usec = 0; 669 vap->va_flags = fxdr_unsigned(u_long, fp->fa_atime.tv_usec); 670 vap->va_mtime = mtime; 671 vap->va_ctime.tv_sec = fxdr_unsigned(long, fp->fa_ctime.tv_sec); 672 vap->va_ctime.tv_usec = 0; 673 vap->va_gen = fxdr_unsigned(u_long, fp->fa_ctime.tv_usec); 674 np->n_attrstamp = time.tv_sec; 675 *dposp = dpos; 676 *mdp = md; 677 if (vaper != NULL) { 678 bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(*vap)); 679 if ((np->n_flag & NMODIFIED) && (np->n_size > vap->va_size)) 680 vaper->va_size = np->n_size; 681 } 682 return (0); 683 } 684 685 /* 686 * Check the time stamp 687 * If the cache is valid, copy contents to *vap and return 0 688 * otherwise return an error 689 */ 690 nfs_getattrcache(vp, vap) 691 register struct vnode *vp; 692 struct vattr *vap; 693 { 694 register struct nfsnode *np; 695 696 np = VTONFS(vp); 697 if ((time.tv_sec-np->n_attrstamp) < NFS_ATTRTIMEO) { 698 nfsstats.attrcache_hits++; 699 bcopy((caddr_t)&np->n_vattr,(caddr_t)vap,sizeof(struct vattr)); 700 if ((np->n_flag & NMODIFIED) == 0) { 701 np->n_size = vap->va_size; 702 #ifdef NVM 703 vnode_pager_setsize(vp, np->n_size); 704 #endif 705 } else if (np->n_size > vap->va_size) 706 vap->va_size = np->n_size; 707 return (0); 708 } else { 709 nfsstats.attrcache_misses++; 710 return (ENOENT); 711 } 712 } 713 714 /* 715 * Set up nameidata for a namei() call and do it 716 */ 717 nfs_namei(ndp, fhp, len, mdp, dposp) 718 register struct nameidata *ndp; 719 fhandle_t *fhp; 720 int len; 721 struct mbuf **mdp; 722 caddr_t *dposp; 723 { 724 register int i, rem; 725 register struct mbuf *md; 726 register char *cp; 727 struct vnode *dp; 728 int flag; 729 int error; 730 731 if ((ndp->ni_nameiop & HASBUF) == 0) { 732 flag = ndp->ni_nameiop & OPMASK; 733 /* 734 * Copy the name from the mbuf list to the d_name field of ndp 735 * and set the various ndp fields appropriately. 736 */ 737 cp = *dposp; 738 md = *mdp; 739 rem = mtod(md, caddr_t)+md->m_len-cp; 740 ndp->ni_hash = 0; 741 for (i = 0; i < len;) { 742 while (rem == 0) { 743 md = md->m_next; 744 if (md == NULL) 745 return (EBADRPC); 746 cp = mtod(md, caddr_t); 747 rem = md->m_len; 748 } 749 if (*cp == '\0' || *cp == '/') 750 return (EINVAL); 751 if (*cp & 0200) 752 if ((*cp&0377) == ('/'|0200) || flag != DELETE) 753 return (EINVAL); 754 ndp->ni_dent.d_name[i++] = *cp; 755 ndp->ni_hash += (unsigned char)*cp * i; 756 cp++; 757 rem--; 758 } 759 *mdp = md; 760 *dposp = cp; 761 len = nfsm_rndup(len)-len; 762 if (len > 0) { 763 if (rem < len) { 764 if (error = nfs_adv(mdp, dposp, len, rem)) 765 return (error); 766 } else 767 *dposp += len; 768 } 769 } else 770 i = len; 771 ndp->ni_namelen = i; 772 ndp->ni_dent.d_namlen = i; 773 ndp->ni_dent.d_name[i] = '\0'; 774 ndp->ni_segflg = UIO_SYSSPACE; 775 ndp->ni_pathlen = 1; 776 ndp->ni_pnbuf = ndp->ni_dirp = ndp->ni_ptr = &ndp->ni_dent.d_name[0]; 777 ndp->ni_next = &ndp->ni_dent.d_name[i]; 778 ndp->ni_nameiop |= (NOCROSSMOUNT | REMOTE | HASBUF | STARTDIR); 779 /* 780 * Extract and set starting directory. 781 */ 782 if (error = nfsrv_fhtovp(fhp, FALSE, &dp, ndp->ni_cred)) 783 return (error); 784 if (dp->v_type != VDIR) { 785 vrele(dp); 786 return (ENOTDIR); 787 } 788 ndp->ni_startdir = dp; 789 /* 790 * And call namei() to do the real work 791 */ 792 error = namei(ndp); 793 if (error || (ndp->ni_nameiop & SAVESTARTDIR) == 0) 794 vrele(dp); 795 return (error); 796 } 797 798 /* 799 * A fiddled version of m_adj() that ensures null fill to a long 800 * boundary and only trims off the back end 801 */ 802 nfsm_adj(mp, len, nul) 803 struct mbuf *mp; 804 register int len; 805 int nul; 806 { 807 register struct mbuf *m; 808 register int count, i; 809 register char *cp; 810 811 /* 812 * Trim from tail. Scan the mbuf chain, 813 * calculating its length and finding the last mbuf. 814 * If the adjustment only affects this mbuf, then just 815 * adjust and return. Otherwise, rescan and truncate 816 * after the remaining size. 817 */ 818 count = 0; 819 m = mp; 820 for (;;) { 821 count += m->m_len; 822 if (m->m_next == (struct mbuf *)0) 823 break; 824 m = m->m_next; 825 } 826 if (m->m_len > len) { 827 m->m_len -= len; 828 if (nul > 0) { 829 cp = mtod(m, caddr_t)+m->m_len-nul; 830 for (i = 0; i < nul; i++) 831 *cp++ = '\0'; 832 } 833 return; 834 } 835 count -= len; 836 if (count < 0) 837 count = 0; 838 /* 839 * Correct length for chain is "count". 840 * Find the mbuf with last data, adjust its length, 841 * and toss data from remaining mbufs on chain. 842 */ 843 for (m = mp; m; m = m->m_next) { 844 if (m->m_len >= count) { 845 m->m_len = count; 846 if (nul > 0) { 847 cp = mtod(m, caddr_t)+m->m_len-nul; 848 for (i = 0; i < nul; i++) 849 *cp++ = '\0'; 850 } 851 break; 852 } 853 count -= m->m_len; 854 } 855 while (m = m->m_next) 856 m->m_len = 0; 857 } 858 859 /* 860 * nfsrv_fhtovp() - convert a fh to a vnode ptr (optionally locked) 861 * - look up fsid in mount list (if not found ret error) 862 * - check that it is exported 863 * - get vp by calling VFS_FHTOVP() macro 864 * - if not lockflag unlock it with VOP_UNLOCK() 865 * - if cred->cr_uid == 0 set it to m_exroot 866 */ 867 nfsrv_fhtovp(fhp, lockflag, vpp, cred) 868 fhandle_t *fhp; 869 int lockflag; 870 struct vnode **vpp; 871 struct ucred *cred; 872 { 873 register struct mount *mp; 874 875 if ((mp = getvfs(&fhp->fh_fsid)) == NULL) 876 return (ESTALE); 877 if ((mp->mnt_flag & MNT_EXPORTED) == 0) 878 return (EACCES); 879 if (VFS_FHTOVP(mp, &fhp->fh_fid, vpp)) 880 return (ESTALE); 881 if (cred->cr_uid == 0) 882 cred->cr_uid = mp->mnt_exroot; 883 if (!lockflag) 884 VOP_UNLOCK(*vpp); 885 return (0); 886 } 887 888 /* 889 * These two functions implement nfs rpc compression. 890 * The algorithm is a trivial run length encoding of '\0' bytes. The high 891 * order nibble of hex "e" is or'd with the number of zeroes - 2 in four 892 * bits. (2 - 17 zeros) Any data byte with a high order nibble of hex "e" 893 * is byte stuffed. 894 * The compressed data is padded with 0x0 bytes to an even multiple of 895 * 4 bytes in length to avoid any weird long pointer alignments. 896 * If compression/uncompression is unsuccessful, the original mbuf list 897 * is returned. 898 * The first four bytes (the XID) are left uncompressed and the fifth 899 * byte is set to 0x1 for request and 0x2 for reply. 900 * An uncompressed RPC will always have the fifth byte == 0x0. 901 */ 902 struct mbuf * 903 nfs_compress(m0) 904 struct mbuf *m0; 905 { 906 register u_char ch, nextch; 907 register int i, rlelast; 908 register u_char *ip, *op; 909 register int ileft, oleft, noteof; 910 register struct mbuf *m, *om; 911 struct mbuf **mp, *retm; 912 int olen, clget; 913 914 i = rlelast = 0; 915 noteof = 1; 916 m = m0; 917 if (m->m_len < 12) 918 return (m0); 919 if (m->m_pkthdr.len >= MINCLSIZE) 920 clget = 1; 921 else 922 clget = 0; 923 ileft = m->m_len - 9; 924 ip = mtod(m, u_char *); 925 MGETHDR(om, M_WAIT, MT_DATA); 926 if (clget) 927 MCLGET(om, M_WAIT); 928 retm = om; 929 mp = &om->m_next; 930 olen = om->m_len = 5; 931 oleft = M_TRAILINGSPACE(om); 932 op = mtod(om, u_char *); 933 *((u_long *)op) = *((u_long *)ip); 934 ip += 7; 935 op += 4; 936 *op++ = *ip++ + 1; 937 nextch = *ip++; 938 while (noteof) { 939 ch = nextch; 940 if (ileft == 0) { 941 do { 942 m = m->m_next; 943 } while (m && m->m_len == 0); 944 if (m) { 945 ileft = m->m_len; 946 ip = mtod(m, u_char *); 947 } else { 948 noteof = 0; 949 nextch = 0x1; 950 goto doit; 951 } 952 } 953 nextch = *ip++; 954 ileft--; 955 doit: 956 if (ch == '\0') { 957 if (++i == NFSC_MAX || nextch != '\0') { 958 if (i < 2) { 959 nfscput('\0'); 960 } else { 961 if (rlelast == i) { 962 nfscput('\0'); 963 i--; 964 } 965 if (NFSCRLE(i) == (nextch & 0xff)) { 966 i--; 967 if (i < 2) { 968 nfscput('\0'); 969 } else { 970 nfscput(NFSCRLE(i)); 971 } 972 nfscput('\0'); 973 rlelast = 0; 974 } else { 975 nfscput(NFSCRLE(i)); 976 rlelast = i; 977 } 978 } 979 i = 0; 980 } 981 } else { 982 if ((ch & NFSCRL) == NFSCRL) { 983 nfscput(ch); 984 } 985 nfscput(ch); 986 i = rlelast = 0; 987 } 988 } 989 if (olen < m0->m_pkthdr.len) { 990 m_freem(m0); 991 if (i = (olen & 0x3)) { 992 i = 4 - i; 993 while (i-- > 0) { 994 nfscput('\0'); 995 } 996 } 997 retm->m_pkthdr.len = olen; 998 retm->m_pkthdr.rcvif = (struct ifnet *)0; 999 return (retm); 1000 } else { 1001 m_freem(retm); 1002 return (m0); 1003 } 1004 } 1005 1006 struct mbuf * 1007 nfs_uncompress(m0) 1008 struct mbuf *m0; 1009 { 1010 register u_char cp, nextcp, *ip, *op; 1011 register struct mbuf *m, *om; 1012 struct mbuf *retm, **mp; 1013 int i, j, noteof, clget, ileft, oleft, olen; 1014 1015 m = m0; 1016 i = 0; 1017 while (m && i < MINCLSIZE) { 1018 i += m->m_len; 1019 m = m->m_next; 1020 } 1021 if (i < 6) 1022 return (m0); 1023 if (i >= MINCLSIZE) 1024 clget = 1; 1025 else 1026 clget = 0; 1027 m = m0; 1028 MGET(om, M_WAIT, MT_DATA); 1029 if (clget) 1030 MCLGET(om, M_WAIT); 1031 olen = om->m_len = 8; 1032 oleft = M_TRAILINGSPACE(om); 1033 op = mtod(om, u_char *); 1034 retm = om; 1035 mp = &om->m_next; 1036 if (m->m_len >= 6) { 1037 ileft = m->m_len - 6; 1038 ip = mtod(m, u_char *); 1039 *((u_long *)op) = *((u_long *)ip); 1040 bzero(op + 4, 3); 1041 ip += 4; 1042 op += 7; 1043 if (*ip == '\0') { 1044 m_freem(om); 1045 return (m0); 1046 } 1047 *op++ = *ip++ - 1; 1048 cp = *ip++; 1049 } else { 1050 ileft = m->m_len; 1051 ip = mtod(m, u_char *); 1052 nfscget(*op++); 1053 nfscget(*op++); 1054 nfscget(*op++); 1055 nfscget(*op++); 1056 bzero(op, 3); 1057 op += 3; 1058 nfscget(*op); 1059 if (*op == '\0') { 1060 m_freem(om); 1061 return (m0); 1062 } 1063 (*op)--; 1064 op++; 1065 nfscget(cp); 1066 } 1067 noteof = 1; 1068 while (noteof) { 1069 if ((cp & NFSCRL) == NFSCRL) { 1070 nfscget(nextcp); 1071 if (cp == nextcp) { 1072 nfscput(cp); 1073 goto readit; 1074 } else { 1075 i = (cp & 0xf) + 2; 1076 for (j = 0; j < i; j++) { 1077 nfscput('\0'); 1078 } 1079 cp = nextcp; 1080 } 1081 } else { 1082 nfscput(cp); 1083 readit: 1084 nfscget(cp); 1085 } 1086 } 1087 m_freem(m0); 1088 if (i = (olen & 0x3)) 1089 om->m_len -= i; 1090 return (retm); 1091 } 1092