1 /* $NetBSD: nfs_subs.c,v 1.89 2000/11/27 08:39:49 chs Exp $ */ 2 3 /* 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Rick Macklem at The University of Guelph. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)nfs_subs.c 8.8 (Berkeley) 5/22/95 39 */ 40 41 /* 42 * Copyright 2000 Wasabi Systems, Inc. 43 * All rights reserved. 44 * 45 * Written by Frank van der Linden for Wasabi Systems, Inc. 46 * 47 * Redistribution and use in source and binary forms, with or without 48 * modification, are permitted provided that the following conditions 49 * are met: 50 * 1. Redistributions of source code must retain the above copyright 51 * notice, this list of conditions and the following disclaimer. 52 * 2. Redistributions in binary form must reproduce the above copyright 53 * notice, this list of conditions and the following disclaimer in the 54 * documentation and/or other materials provided with the distribution. 55 * 3. All advertising materials mentioning features or use of this software 56 * must display the following acknowledgement: 57 * This product includes software developed for the NetBSD Project by 58 * Wasabi Systems, Inc. 59 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 60 * or promote products derived from this software without specific prior 61 * written permission. 62 * 63 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 73 * POSSIBILITY OF SUCH DAMAGE. 74 */ 75 76 #include "fs_nfs.h" 77 #include "opt_nfs.h" 78 #include "opt_nfsserver.h" 79 #include "opt_iso.h" 80 #include "opt_inet.h" 81 82 /* 83 * These functions support the macros and help fiddle mbuf chains for 84 * the nfs op functions. They do things like create the rpc header and 85 * copy data between mbuf chains and uio lists. 86 */ 87 #include <sys/param.h> 88 #include <sys/proc.h> 89 #include <sys/systm.h> 90 #include <sys/kernel.h> 91 #include <sys/mount.h> 92 #include <sys/vnode.h> 93 #include <sys/namei.h> 94 #include <sys/mbuf.h> 95 #include <sys/socket.h> 96 #include <sys/stat.h> 97 #include <sys/malloc.h> 98 #include <sys/time.h> 99 #include <sys/dirent.h> 100 101 #include <uvm/uvm_extern.h> 102 103 #include <nfs/rpcv2.h> 104 #include <nfs/nfsproto.h> 105 #include <nfs/nfsnode.h> 106 #include <nfs/nfs.h> 107 #include <nfs/xdr_subs.h> 108 #include <nfs/nfsm_subs.h> 109 #include <nfs/nfsmount.h> 110 #include <nfs/nqnfs.h> 111 #include <nfs/nfsrtt.h> 112 #include <nfs/nfs_var.h> 113 114 #include <miscfs/specfs/specdev.h> 115 116 #include <netinet/in.h> 117 #ifdef ISO 118 #include <netiso/iso.h> 119 #endif 120 121 /* 122 * Data items converted to xdr at startup, since they are constant 123 * This is kinda hokey, but may save a little time doing byte swaps 124 */ 125 u_int32_t nfs_xdrneg1; 126 u_int32_t rpc_call, rpc_vers, rpc_reply, rpc_msgdenied, rpc_autherr, 127 rpc_mismatch, rpc_auth_unix, rpc_msgaccepted, 128 rpc_auth_kerb; 129 u_int32_t nfs_prog, nqnfs_prog, nfs_true, nfs_false; 130 131 /* And other global data */ 132 static u_int32_t nfs_xid = 0; 133 nfstype nfsv2_type[9] = { NFNON, NFREG, NFDIR, NFBLK, NFCHR, NFLNK, NFNON, 134 NFCHR, NFNON }; 135 nfstype nfsv3_type[9] = { NFNON, NFREG, NFDIR, NFBLK, NFCHR, NFLNK, NFSOCK, 136 NFFIFO, NFNON }; 137 enum vtype nv2tov_type[8] = { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VNON, VNON }; 138 enum vtype nv3tov_type[8]={ VNON, VREG, VDIR, VBLK, VCHR, VLNK, VSOCK, VFIFO }; 139 int nfs_ticks; 140 141 /* NFS client/server stats. */ 142 struct nfsstats nfsstats; 143 144 /* 145 * Mapping of old NFS Version 2 RPC numbers to generic numbers. 146 */ 147 int nfsv3_procid[NFS_NPROCS] = { 148 NFSPROC_NULL, 149 NFSPROC_GETATTR, 150 NFSPROC_SETATTR, 151 NFSPROC_NOOP, 152 NFSPROC_LOOKUP, 153 NFSPROC_READLINK, 154 NFSPROC_READ, 155 NFSPROC_NOOP, 156 NFSPROC_WRITE, 157 NFSPROC_CREATE, 158 NFSPROC_REMOVE, 159 NFSPROC_RENAME, 160 NFSPROC_LINK, 161 NFSPROC_SYMLINK, 162 NFSPROC_MKDIR, 163 NFSPROC_RMDIR, 164 NFSPROC_READDIR, 165 NFSPROC_FSSTAT, 166 NFSPROC_NOOP, 167 NFSPROC_NOOP, 168 NFSPROC_NOOP, 169 NFSPROC_NOOP, 170 NFSPROC_NOOP, 171 NFSPROC_NOOP, 172 NFSPROC_NOOP, 173 NFSPROC_NOOP 174 }; 175 176 /* 177 * and the reverse mapping from generic to Version 2 procedure numbers 178 */ 179 int nfsv2_procid[NFS_NPROCS] = { 180 NFSV2PROC_NULL, 181 NFSV2PROC_GETATTR, 182 NFSV2PROC_SETATTR, 183 NFSV2PROC_LOOKUP, 184 NFSV2PROC_NOOP, 185 NFSV2PROC_READLINK, 186 NFSV2PROC_READ, 187 NFSV2PROC_WRITE, 188 NFSV2PROC_CREATE, 189 NFSV2PROC_MKDIR, 190 NFSV2PROC_SYMLINK, 191 NFSV2PROC_CREATE, 192 NFSV2PROC_REMOVE, 193 NFSV2PROC_RMDIR, 194 NFSV2PROC_RENAME, 195 NFSV2PROC_LINK, 196 NFSV2PROC_READDIR, 197 NFSV2PROC_NOOP, 198 NFSV2PROC_STATFS, 199 NFSV2PROC_NOOP, 200 NFSV2PROC_NOOP, 201 NFSV2PROC_NOOP, 202 NFSV2PROC_NOOP, 203 NFSV2PROC_NOOP, 204 NFSV2PROC_NOOP, 205 NFSV2PROC_NOOP, 206 }; 207 208 /* 209 * Maps errno values to nfs error numbers. 210 * Use NFSERR_IO as the catch all for ones not specifically defined in 211 * RFC 1094. 212 */ 213 static u_char nfsrv_v2errmap[ELAST] = { 214 NFSERR_PERM, NFSERR_NOENT, NFSERR_IO, NFSERR_IO, NFSERR_IO, 215 NFSERR_NXIO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 216 NFSERR_IO, NFSERR_IO, NFSERR_ACCES, NFSERR_IO, NFSERR_IO, 217 NFSERR_IO, NFSERR_EXIST, NFSERR_IO, NFSERR_NODEV, NFSERR_NOTDIR, 218 NFSERR_ISDIR, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 219 NFSERR_IO, NFSERR_FBIG, NFSERR_NOSPC, NFSERR_IO, NFSERR_ROFS, 220 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 221 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 222 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 223 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 224 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 225 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 226 NFSERR_IO, NFSERR_IO, NFSERR_NAMETOL, NFSERR_IO, NFSERR_IO, 227 NFSERR_NOTEMPTY, NFSERR_IO, NFSERR_IO, NFSERR_DQUOT, NFSERR_STALE, 228 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 229 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 230 NFSERR_IO, NFSERR_IO, 231 }; 232 233 /* 234 * Maps errno values to nfs error numbers. 235 * Although it is not obvious whether or not NFS clients really care if 236 * a returned error value is in the specified list for the procedure, the 237 * safest thing to do is filter them appropriately. For Version 2, the 238 * X/Open XNFS document is the only specification that defines error values 239 * for each RPC (The RFC simply lists all possible error values for all RPCs), 240 * so I have decided to not do this for Version 2. 241 * The first entry is the default error return and the rest are the valid 242 * errors for that RPC in increasing numeric order. 243 */ 244 static short nfsv3err_null[] = { 245 0, 246 0, 247 }; 248 249 static short nfsv3err_getattr[] = { 250 NFSERR_IO, 251 NFSERR_IO, 252 NFSERR_STALE, 253 NFSERR_BADHANDLE, 254 NFSERR_SERVERFAULT, 255 0, 256 }; 257 258 static short nfsv3err_setattr[] = { 259 NFSERR_IO, 260 NFSERR_PERM, 261 NFSERR_IO, 262 NFSERR_ACCES, 263 NFSERR_INVAL, 264 NFSERR_NOSPC, 265 NFSERR_ROFS, 266 NFSERR_DQUOT, 267 NFSERR_STALE, 268 NFSERR_BADHANDLE, 269 NFSERR_NOT_SYNC, 270 NFSERR_SERVERFAULT, 271 0, 272 }; 273 274 static short nfsv3err_lookup[] = { 275 NFSERR_IO, 276 NFSERR_NOENT, 277 NFSERR_IO, 278 NFSERR_ACCES, 279 NFSERR_NOTDIR, 280 NFSERR_NAMETOL, 281 NFSERR_STALE, 282 NFSERR_BADHANDLE, 283 NFSERR_SERVERFAULT, 284 0, 285 }; 286 287 static short nfsv3err_access[] = { 288 NFSERR_IO, 289 NFSERR_IO, 290 NFSERR_STALE, 291 NFSERR_BADHANDLE, 292 NFSERR_SERVERFAULT, 293 0, 294 }; 295 296 static short nfsv3err_readlink[] = { 297 NFSERR_IO, 298 NFSERR_IO, 299 NFSERR_ACCES, 300 NFSERR_INVAL, 301 NFSERR_STALE, 302 NFSERR_BADHANDLE, 303 NFSERR_NOTSUPP, 304 NFSERR_SERVERFAULT, 305 0, 306 }; 307 308 static short nfsv3err_read[] = { 309 NFSERR_IO, 310 NFSERR_IO, 311 NFSERR_NXIO, 312 NFSERR_ACCES, 313 NFSERR_INVAL, 314 NFSERR_STALE, 315 NFSERR_BADHANDLE, 316 NFSERR_SERVERFAULT, 317 NFSERR_JUKEBOX, 318 0, 319 }; 320 321 static short nfsv3err_write[] = { 322 NFSERR_IO, 323 NFSERR_IO, 324 NFSERR_ACCES, 325 NFSERR_INVAL, 326 NFSERR_FBIG, 327 NFSERR_NOSPC, 328 NFSERR_ROFS, 329 NFSERR_DQUOT, 330 NFSERR_STALE, 331 NFSERR_BADHANDLE, 332 NFSERR_SERVERFAULT, 333 NFSERR_JUKEBOX, 334 0, 335 }; 336 337 static short nfsv3err_create[] = { 338 NFSERR_IO, 339 NFSERR_IO, 340 NFSERR_ACCES, 341 NFSERR_EXIST, 342 NFSERR_NOTDIR, 343 NFSERR_NOSPC, 344 NFSERR_ROFS, 345 NFSERR_NAMETOL, 346 NFSERR_DQUOT, 347 NFSERR_STALE, 348 NFSERR_BADHANDLE, 349 NFSERR_NOTSUPP, 350 NFSERR_SERVERFAULT, 351 0, 352 }; 353 354 static short nfsv3err_mkdir[] = { 355 NFSERR_IO, 356 NFSERR_IO, 357 NFSERR_ACCES, 358 NFSERR_EXIST, 359 NFSERR_NOTDIR, 360 NFSERR_NOSPC, 361 NFSERR_ROFS, 362 NFSERR_NAMETOL, 363 NFSERR_DQUOT, 364 NFSERR_STALE, 365 NFSERR_BADHANDLE, 366 NFSERR_NOTSUPP, 367 NFSERR_SERVERFAULT, 368 0, 369 }; 370 371 static short nfsv3err_symlink[] = { 372 NFSERR_IO, 373 NFSERR_IO, 374 NFSERR_ACCES, 375 NFSERR_EXIST, 376 NFSERR_NOTDIR, 377 NFSERR_NOSPC, 378 NFSERR_ROFS, 379 NFSERR_NAMETOL, 380 NFSERR_DQUOT, 381 NFSERR_STALE, 382 NFSERR_BADHANDLE, 383 NFSERR_NOTSUPP, 384 NFSERR_SERVERFAULT, 385 0, 386 }; 387 388 static short nfsv3err_mknod[] = { 389 NFSERR_IO, 390 NFSERR_IO, 391 NFSERR_ACCES, 392 NFSERR_EXIST, 393 NFSERR_NOTDIR, 394 NFSERR_NOSPC, 395 NFSERR_ROFS, 396 NFSERR_NAMETOL, 397 NFSERR_DQUOT, 398 NFSERR_STALE, 399 NFSERR_BADHANDLE, 400 NFSERR_NOTSUPP, 401 NFSERR_SERVERFAULT, 402 NFSERR_BADTYPE, 403 0, 404 }; 405 406 static short nfsv3err_remove[] = { 407 NFSERR_IO, 408 NFSERR_NOENT, 409 NFSERR_IO, 410 NFSERR_ACCES, 411 NFSERR_NOTDIR, 412 NFSERR_ROFS, 413 NFSERR_NAMETOL, 414 NFSERR_STALE, 415 NFSERR_BADHANDLE, 416 NFSERR_SERVERFAULT, 417 0, 418 }; 419 420 static short nfsv3err_rmdir[] = { 421 NFSERR_IO, 422 NFSERR_NOENT, 423 NFSERR_IO, 424 NFSERR_ACCES, 425 NFSERR_EXIST, 426 NFSERR_NOTDIR, 427 NFSERR_INVAL, 428 NFSERR_ROFS, 429 NFSERR_NAMETOL, 430 NFSERR_NOTEMPTY, 431 NFSERR_STALE, 432 NFSERR_BADHANDLE, 433 NFSERR_NOTSUPP, 434 NFSERR_SERVERFAULT, 435 0, 436 }; 437 438 static short nfsv3err_rename[] = { 439 NFSERR_IO, 440 NFSERR_NOENT, 441 NFSERR_IO, 442 NFSERR_ACCES, 443 NFSERR_EXIST, 444 NFSERR_XDEV, 445 NFSERR_NOTDIR, 446 NFSERR_ISDIR, 447 NFSERR_INVAL, 448 NFSERR_NOSPC, 449 NFSERR_ROFS, 450 NFSERR_MLINK, 451 NFSERR_NAMETOL, 452 NFSERR_NOTEMPTY, 453 NFSERR_DQUOT, 454 NFSERR_STALE, 455 NFSERR_BADHANDLE, 456 NFSERR_NOTSUPP, 457 NFSERR_SERVERFAULT, 458 0, 459 }; 460 461 static short nfsv3err_link[] = { 462 NFSERR_IO, 463 NFSERR_IO, 464 NFSERR_ACCES, 465 NFSERR_EXIST, 466 NFSERR_XDEV, 467 NFSERR_NOTDIR, 468 NFSERR_INVAL, 469 NFSERR_NOSPC, 470 NFSERR_ROFS, 471 NFSERR_MLINK, 472 NFSERR_NAMETOL, 473 NFSERR_DQUOT, 474 NFSERR_STALE, 475 NFSERR_BADHANDLE, 476 NFSERR_NOTSUPP, 477 NFSERR_SERVERFAULT, 478 0, 479 }; 480 481 static short nfsv3err_readdir[] = { 482 NFSERR_IO, 483 NFSERR_IO, 484 NFSERR_ACCES, 485 NFSERR_NOTDIR, 486 NFSERR_STALE, 487 NFSERR_BADHANDLE, 488 NFSERR_BAD_COOKIE, 489 NFSERR_TOOSMALL, 490 NFSERR_SERVERFAULT, 491 0, 492 }; 493 494 static short nfsv3err_readdirplus[] = { 495 NFSERR_IO, 496 NFSERR_IO, 497 NFSERR_ACCES, 498 NFSERR_NOTDIR, 499 NFSERR_STALE, 500 NFSERR_BADHANDLE, 501 NFSERR_BAD_COOKIE, 502 NFSERR_NOTSUPP, 503 NFSERR_TOOSMALL, 504 NFSERR_SERVERFAULT, 505 0, 506 }; 507 508 static short nfsv3err_fsstat[] = { 509 NFSERR_IO, 510 NFSERR_IO, 511 NFSERR_STALE, 512 NFSERR_BADHANDLE, 513 NFSERR_SERVERFAULT, 514 0, 515 }; 516 517 static short nfsv3err_fsinfo[] = { 518 NFSERR_STALE, 519 NFSERR_STALE, 520 NFSERR_BADHANDLE, 521 NFSERR_SERVERFAULT, 522 0, 523 }; 524 525 static short nfsv3err_pathconf[] = { 526 NFSERR_STALE, 527 NFSERR_STALE, 528 NFSERR_BADHANDLE, 529 NFSERR_SERVERFAULT, 530 0, 531 }; 532 533 static short nfsv3err_commit[] = { 534 NFSERR_IO, 535 NFSERR_IO, 536 NFSERR_STALE, 537 NFSERR_BADHANDLE, 538 NFSERR_SERVERFAULT, 539 0, 540 }; 541 542 static short *nfsrv_v3errmap[] = { 543 nfsv3err_null, 544 nfsv3err_getattr, 545 nfsv3err_setattr, 546 nfsv3err_lookup, 547 nfsv3err_access, 548 nfsv3err_readlink, 549 nfsv3err_read, 550 nfsv3err_write, 551 nfsv3err_create, 552 nfsv3err_mkdir, 553 nfsv3err_symlink, 554 nfsv3err_mknod, 555 nfsv3err_remove, 556 nfsv3err_rmdir, 557 nfsv3err_rename, 558 nfsv3err_link, 559 nfsv3err_readdir, 560 nfsv3err_readdirplus, 561 nfsv3err_fsstat, 562 nfsv3err_fsinfo, 563 nfsv3err_pathconf, 564 nfsv3err_commit, 565 }; 566 567 extern struct nfsrtt nfsrtt; 568 extern time_t nqnfsstarttime; 569 extern int nqsrv_clockskew; 570 extern int nqsrv_writeslack; 571 extern int nqsrv_maxlease; 572 extern int nqnfs_piggy[NFS_NPROCS]; 573 extern struct nfsnodehashhead *nfsnodehashtbl; 574 extern u_long nfsnodehash; 575 576 LIST_HEAD(nfsnodehashhead, nfsnode); 577 u_long nfsdirhashmask; 578 579 int nfs_webnamei __P((struct nameidata *, struct vnode *, struct proc *)); 580 581 /* 582 * Create the header for an rpc request packet 583 * The hsiz is the size of the rest of the nfs request header. 584 * (just used to decide if a cluster is a good idea) 585 */ 586 struct mbuf * 587 nfsm_reqh(vp, procid, hsiz, bposp) 588 struct vnode *vp; 589 u_long procid; 590 int hsiz; 591 caddr_t *bposp; 592 { 593 struct mbuf *mb; 594 caddr_t bpos; 595 struct nfsmount *nmp; 596 #ifndef NFS_V2_ONLY 597 u_int32_t *tl; 598 struct mbuf *mb2; 599 int nqflag; 600 #endif 601 602 MGET(mb, M_WAIT, MT_DATA); 603 if (hsiz >= MINCLSIZE) 604 MCLGET(mb, M_WAIT); 605 mb->m_len = 0; 606 bpos = mtod(mb, caddr_t); 607 608 /* 609 * For NQNFS, add lease request. 610 */ 611 if (vp) { 612 nmp = VFSTONFS(vp->v_mount); 613 #ifndef NFS_V2_ONLY 614 if (nmp->nm_flag & NFSMNT_NQNFS) { 615 nqflag = NQNFS_NEEDLEASE(vp, procid); 616 if (nqflag) { 617 nfsm_build(tl, u_int32_t *, 2*NFSX_UNSIGNED); 618 *tl++ = txdr_unsigned(nqflag); 619 *tl = txdr_unsigned(nmp->nm_leaseterm); 620 } else { 621 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 622 *tl = 0; 623 } 624 } 625 #endif 626 } 627 /* Finally, return values */ 628 *bposp = bpos; 629 return (mb); 630 } 631 632 /* 633 * Build the RPC header and fill in the authorization info. 634 * The authorization string argument is only used when the credentials 635 * come from outside of the kernel. 636 * Returns the head of the mbuf list. 637 */ 638 struct mbuf * 639 nfsm_rpchead(cr, nmflag, procid, auth_type, auth_len, auth_str, verf_len, 640 verf_str, mrest, mrest_len, mbp, xidp) 641 struct ucred *cr; 642 int nmflag; 643 int procid; 644 int auth_type; 645 int auth_len; 646 char *auth_str; 647 int verf_len; 648 char *verf_str; 649 struct mbuf *mrest; 650 int mrest_len; 651 struct mbuf **mbp; 652 u_int32_t *xidp; 653 { 654 struct mbuf *mb; 655 u_int32_t *tl; 656 caddr_t bpos; 657 int i; 658 struct mbuf *mreq, *mb2; 659 int siz, grpsiz, authsiz; 660 struct timeval tv; 661 static u_int32_t base; 662 663 authsiz = nfsm_rndup(auth_len); 664 MGETHDR(mb, M_WAIT, MT_DATA); 665 if ((authsiz + 10 * NFSX_UNSIGNED) >= MINCLSIZE) { 666 MCLGET(mb, M_WAIT); 667 } else if ((authsiz + 10 * NFSX_UNSIGNED) < MHLEN) { 668 MH_ALIGN(mb, authsiz + 10 * NFSX_UNSIGNED); 669 } else { 670 MH_ALIGN(mb, 8 * NFSX_UNSIGNED); 671 } 672 mb->m_len = 0; 673 mreq = mb; 674 bpos = mtod(mb, caddr_t); 675 676 /* 677 * First the RPC header. 678 */ 679 nfsm_build(tl, u_int32_t *, 8 * NFSX_UNSIGNED); 680 681 /* 682 * derive initial xid from system time 683 * XXX time is invalid if root not yet mounted 684 */ 685 if (!base && (rootvp)) { 686 microtime(&tv); 687 base = tv.tv_sec << 12; 688 nfs_xid = base; 689 } 690 /* 691 * Skip zero xid if it should ever happen. 692 */ 693 if (++nfs_xid == 0) 694 nfs_xid++; 695 696 *tl++ = *xidp = txdr_unsigned(nfs_xid); 697 *tl++ = rpc_call; 698 *tl++ = rpc_vers; 699 if (nmflag & NFSMNT_NQNFS) { 700 *tl++ = txdr_unsigned(NQNFS_PROG); 701 *tl++ = txdr_unsigned(NQNFS_VER3); 702 } else { 703 *tl++ = txdr_unsigned(NFS_PROG); 704 if (nmflag & NFSMNT_NFSV3) 705 *tl++ = txdr_unsigned(NFS_VER3); 706 else 707 *tl++ = txdr_unsigned(NFS_VER2); 708 } 709 if (nmflag & NFSMNT_NFSV3) 710 *tl++ = txdr_unsigned(procid); 711 else 712 *tl++ = txdr_unsigned(nfsv2_procid[procid]); 713 714 /* 715 * And then the authorization cred. 716 */ 717 *tl++ = txdr_unsigned(auth_type); 718 *tl = txdr_unsigned(authsiz); 719 switch (auth_type) { 720 case RPCAUTH_UNIX: 721 nfsm_build(tl, u_int32_t *, auth_len); 722 *tl++ = 0; /* stamp ?? */ 723 *tl++ = 0; /* NULL hostname */ 724 *tl++ = txdr_unsigned(cr->cr_uid); 725 *tl++ = txdr_unsigned(cr->cr_gid); 726 grpsiz = (auth_len >> 2) - 5; 727 *tl++ = txdr_unsigned(grpsiz); 728 for (i = 0; i < grpsiz; i++) 729 *tl++ = txdr_unsigned(cr->cr_groups[i]); 730 break; 731 case RPCAUTH_KERB4: 732 siz = auth_len; 733 while (siz > 0) { 734 if (M_TRAILINGSPACE(mb) == 0) { 735 MGET(mb2, M_WAIT, MT_DATA); 736 if (siz >= MINCLSIZE) 737 MCLGET(mb2, M_WAIT); 738 mb->m_next = mb2; 739 mb = mb2; 740 mb->m_len = 0; 741 bpos = mtod(mb, caddr_t); 742 } 743 i = min(siz, M_TRAILINGSPACE(mb)); 744 memcpy(bpos, auth_str, i); 745 mb->m_len += i; 746 auth_str += i; 747 bpos += i; 748 siz -= i; 749 } 750 if ((siz = (nfsm_rndup(auth_len) - auth_len)) > 0) { 751 for (i = 0; i < siz; i++) 752 *bpos++ = '\0'; 753 mb->m_len += siz; 754 } 755 break; 756 }; 757 758 /* 759 * And the verifier... 760 */ 761 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 762 if (verf_str) { 763 *tl++ = txdr_unsigned(RPCAUTH_KERB4); 764 *tl = txdr_unsigned(verf_len); 765 siz = verf_len; 766 while (siz > 0) { 767 if (M_TRAILINGSPACE(mb) == 0) { 768 MGET(mb2, M_WAIT, MT_DATA); 769 if (siz >= MINCLSIZE) 770 MCLGET(mb2, M_WAIT); 771 mb->m_next = mb2; 772 mb = mb2; 773 mb->m_len = 0; 774 bpos = mtod(mb, caddr_t); 775 } 776 i = min(siz, M_TRAILINGSPACE(mb)); 777 memcpy(bpos, verf_str, i); 778 mb->m_len += i; 779 verf_str += i; 780 bpos += i; 781 siz -= i; 782 } 783 if ((siz = (nfsm_rndup(verf_len) - verf_len)) > 0) { 784 for (i = 0; i < siz; i++) 785 *bpos++ = '\0'; 786 mb->m_len += siz; 787 } 788 } else { 789 *tl++ = txdr_unsigned(RPCAUTH_NULL); 790 *tl = 0; 791 } 792 mb->m_next = mrest; 793 mreq->m_pkthdr.len = authsiz + 10 * NFSX_UNSIGNED + mrest_len; 794 mreq->m_pkthdr.rcvif = (struct ifnet *)0; 795 *mbp = mb; 796 return (mreq); 797 } 798 799 /* 800 * copies mbuf chain to the uio scatter/gather list 801 */ 802 int 803 nfsm_mbuftouio(mrep, uiop, siz, dpos) 804 struct mbuf **mrep; 805 struct uio *uiop; 806 int siz; 807 caddr_t *dpos; 808 { 809 char *mbufcp, *uiocp; 810 int xfer, left, len; 811 struct mbuf *mp; 812 long uiosiz, rem; 813 int error = 0; 814 815 mp = *mrep; 816 mbufcp = *dpos; 817 len = mtod(mp, caddr_t)+mp->m_len-mbufcp; 818 rem = nfsm_rndup(siz)-siz; 819 while (siz > 0) { 820 if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL) 821 return (EFBIG); 822 left = uiop->uio_iov->iov_len; 823 uiocp = uiop->uio_iov->iov_base; 824 if (left > siz) 825 left = siz; 826 uiosiz = left; 827 while (left > 0) { 828 while (len == 0) { 829 mp = mp->m_next; 830 if (mp == NULL) 831 return (EBADRPC); 832 mbufcp = mtod(mp, caddr_t); 833 len = mp->m_len; 834 } 835 xfer = (left > len) ? len : left; 836 #ifdef notdef 837 /* Not Yet.. */ 838 if (uiop->uio_iov->iov_op != NULL) 839 (*(uiop->uio_iov->iov_op)) 840 (mbufcp, uiocp, xfer); 841 else 842 #endif 843 if (uiop->uio_segflg == UIO_SYSSPACE) 844 memcpy(uiocp, mbufcp, xfer); 845 else 846 copyout(mbufcp, uiocp, xfer); 847 left -= xfer; 848 len -= xfer; 849 mbufcp += xfer; 850 uiocp += xfer; 851 uiop->uio_offset += xfer; 852 uiop->uio_resid -= xfer; 853 } 854 if (uiop->uio_iov->iov_len <= siz) { 855 uiop->uio_iovcnt--; 856 uiop->uio_iov++; 857 } else { 858 (caddr_t)uiop->uio_iov->iov_base += uiosiz; 859 uiop->uio_iov->iov_len -= uiosiz; 860 } 861 siz -= uiosiz; 862 } 863 *dpos = mbufcp; 864 *mrep = mp; 865 if (rem > 0) { 866 if (len < rem) 867 error = nfs_adv(mrep, dpos, rem, len); 868 else 869 *dpos += rem; 870 } 871 return (error); 872 } 873 874 /* 875 * copies a uio scatter/gather list to an mbuf chain. 876 * NOTE: can ony handle iovcnt == 1 877 */ 878 int 879 nfsm_uiotombuf(uiop, mq, siz, bpos) 880 struct uio *uiop; 881 struct mbuf **mq; 882 int siz; 883 caddr_t *bpos; 884 { 885 char *uiocp; 886 struct mbuf *mp, *mp2; 887 int xfer, left, mlen; 888 int uiosiz, clflg, rem; 889 char *cp; 890 891 #ifdef DIAGNOSTIC 892 if (uiop->uio_iovcnt != 1) 893 panic("nfsm_uiotombuf: iovcnt != 1"); 894 #endif 895 896 if (siz > MLEN) /* or should it >= MCLBYTES ?? */ 897 clflg = 1; 898 else 899 clflg = 0; 900 rem = nfsm_rndup(siz)-siz; 901 mp = mp2 = *mq; 902 while (siz > 0) { 903 left = uiop->uio_iov->iov_len; 904 uiocp = uiop->uio_iov->iov_base; 905 if (left > siz) 906 left = siz; 907 uiosiz = left; 908 while (left > 0) { 909 mlen = M_TRAILINGSPACE(mp); 910 if (mlen == 0) { 911 MGET(mp, M_WAIT, MT_DATA); 912 if (clflg) 913 MCLGET(mp, M_WAIT); 914 mp->m_len = 0; 915 mp2->m_next = mp; 916 mp2 = mp; 917 mlen = M_TRAILINGSPACE(mp); 918 } 919 xfer = (left > mlen) ? mlen : left; 920 #ifdef notdef 921 /* Not Yet.. */ 922 if (uiop->uio_iov->iov_op != NULL) 923 (*(uiop->uio_iov->iov_op)) 924 (uiocp, mtod(mp, caddr_t)+mp->m_len, xfer); 925 else 926 #endif 927 if (uiop->uio_segflg == UIO_SYSSPACE) 928 memcpy(mtod(mp, caddr_t)+mp->m_len, uiocp, xfer); 929 else 930 copyin(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer); 931 mp->m_len += xfer; 932 left -= xfer; 933 uiocp += xfer; 934 uiop->uio_offset += xfer; 935 uiop->uio_resid -= xfer; 936 } 937 (caddr_t)uiop->uio_iov->iov_base += uiosiz; 938 uiop->uio_iov->iov_len -= uiosiz; 939 siz -= uiosiz; 940 } 941 if (rem > 0) { 942 if (rem > M_TRAILINGSPACE(mp)) { 943 MGET(mp, M_WAIT, MT_DATA); 944 mp->m_len = 0; 945 mp2->m_next = mp; 946 } 947 cp = mtod(mp, caddr_t)+mp->m_len; 948 for (left = 0; left < rem; left++) 949 *cp++ = '\0'; 950 mp->m_len += rem; 951 *bpos = cp; 952 } else 953 *bpos = mtod(mp, caddr_t)+mp->m_len; 954 *mq = mp; 955 return (0); 956 } 957 958 /* 959 * Get at least "siz" bytes of correctly aligned data. 960 * When called the mbuf pointers are not necessarily correct, 961 * dsosp points to what ought to be in m_data and left contains 962 * what ought to be in m_len. 963 * This is used by the macros nfsm_dissect and nfsm_dissecton for tough 964 * cases. (The macros use the vars. dpos and dpos2) 965 */ 966 int 967 nfsm_disct(mdp, dposp, siz, left, cp2) 968 struct mbuf **mdp; 969 caddr_t *dposp; 970 int siz; 971 int left; 972 caddr_t *cp2; 973 { 974 struct mbuf *m1, *m2; 975 struct mbuf *havebuf = NULL; 976 caddr_t src = *dposp; 977 caddr_t dst; 978 int len; 979 980 #ifdef DEBUG 981 if (left < 0) 982 panic("nfsm_disct: left < 0"); 983 #endif 984 m1 = *mdp; 985 /* 986 * Skip through the mbuf chain looking for an mbuf with 987 * some data. If the first mbuf found has enough data 988 * and it is correctly aligned return it. 989 */ 990 while (left == 0) { 991 havebuf = m1; 992 *mdp = m1 = m1->m_next; 993 if (m1 == NULL) 994 return (EBADRPC); 995 src = mtod(m1, caddr_t); 996 left = m1->m_len; 997 /* 998 * If we start a new mbuf and it is big enough 999 * and correctly aligned just return it, don't 1000 * do any pull up. 1001 */ 1002 if (left >= siz && nfsm_aligned(src)) { 1003 *cp2 = src; 1004 *dposp = src + siz; 1005 return (0); 1006 } 1007 } 1008 if (m1->m_flags & M_EXT) { 1009 if (havebuf) { 1010 /* If the first mbuf with data has external data 1011 * and there is a previous empty mbuf use it 1012 * to move the data into. 1013 */ 1014 m2 = m1; 1015 *mdp = m1 = havebuf; 1016 if (m1->m_flags & M_EXT) { 1017 MEXTREMOVE(m1); 1018 } 1019 } else { 1020 /* 1021 * If the first mbuf has a external data 1022 * and there is no previous empty mbuf 1023 * allocate a new mbuf and move the external 1024 * data to the new mbuf. Also make the first 1025 * mbuf look empty. 1026 */ 1027 m2 = m_get(M_WAIT, MT_DATA); 1028 m2->m_ext = m1->m_ext; 1029 m2->m_data = src; 1030 m2->m_len = left; 1031 MCLADDREFERENCE(m1, m2); 1032 MEXTREMOVE(m1); 1033 m2->m_next = m1->m_next; 1034 m1->m_next = m2; 1035 } 1036 m1->m_len = 0; 1037 dst = m1->m_dat; 1038 } else { 1039 /* 1040 * If the first mbuf has no external data 1041 * move the data to the front of the mbuf. 1042 */ 1043 if ((dst = m1->m_dat) != src) 1044 memmove(dst, src, left); 1045 dst += left; 1046 m1->m_len = left; 1047 m2 = m1->m_next; 1048 } 1049 m1->m_flags &= ~M_PKTHDR; 1050 *cp2 = m1->m_data = m1->m_dat; /* data is at beginning of buffer */ 1051 *dposp = mtod(m1, caddr_t) + siz; 1052 /* 1053 * Loop through mbufs pulling data up into first mbuf until 1054 * the first mbuf is full or there is no more data to 1055 * pullup. 1056 */ 1057 while ((len = (MLEN - m1->m_len)) != 0 && m2) { 1058 if ((len = min(len, m2->m_len)) != 0) 1059 memcpy(dst, m2->m_data, len); 1060 m1->m_len += len; 1061 dst += len; 1062 m2->m_data += len; 1063 m2->m_len -= len; 1064 m2 = m2->m_next; 1065 } 1066 if (m1->m_len < siz) 1067 return (EBADRPC); 1068 return (0); 1069 } 1070 1071 /* 1072 * Advance the position in the mbuf chain. 1073 */ 1074 int 1075 nfs_adv(mdp, dposp, offs, left) 1076 struct mbuf **mdp; 1077 caddr_t *dposp; 1078 int offs; 1079 int left; 1080 { 1081 struct mbuf *m; 1082 int s; 1083 1084 m = *mdp; 1085 s = left; 1086 while (s < offs) { 1087 offs -= s; 1088 m = m->m_next; 1089 if (m == NULL) 1090 return (EBADRPC); 1091 s = m->m_len; 1092 } 1093 *mdp = m; 1094 *dposp = mtod(m, caddr_t)+offs; 1095 return (0); 1096 } 1097 1098 /* 1099 * Copy a string into mbufs for the hard cases... 1100 */ 1101 int 1102 nfsm_strtmbuf(mb, bpos, cp, siz) 1103 struct mbuf **mb; 1104 char **bpos; 1105 const char *cp; 1106 long siz; 1107 { 1108 struct mbuf *m1 = NULL, *m2; 1109 long left, xfer, len, tlen; 1110 u_int32_t *tl; 1111 int putsize; 1112 1113 putsize = 1; 1114 m2 = *mb; 1115 left = M_TRAILINGSPACE(m2); 1116 if (left > 0) { 1117 tl = ((u_int32_t *)(*bpos)); 1118 *tl++ = txdr_unsigned(siz); 1119 putsize = 0; 1120 left -= NFSX_UNSIGNED; 1121 m2->m_len += NFSX_UNSIGNED; 1122 if (left > 0) { 1123 memcpy((caddr_t) tl, cp, left); 1124 siz -= left; 1125 cp += left; 1126 m2->m_len += left; 1127 left = 0; 1128 } 1129 } 1130 /* Loop around adding mbufs */ 1131 while (siz > 0) { 1132 MGET(m1, M_WAIT, MT_DATA); 1133 if (siz > MLEN) 1134 MCLGET(m1, M_WAIT); 1135 m1->m_len = NFSMSIZ(m1); 1136 m2->m_next = m1; 1137 m2 = m1; 1138 tl = mtod(m1, u_int32_t *); 1139 tlen = 0; 1140 if (putsize) { 1141 *tl++ = txdr_unsigned(siz); 1142 m1->m_len -= NFSX_UNSIGNED; 1143 tlen = NFSX_UNSIGNED; 1144 putsize = 0; 1145 } 1146 if (siz < m1->m_len) { 1147 len = nfsm_rndup(siz); 1148 xfer = siz; 1149 if (xfer < len) 1150 *(tl+(xfer>>2)) = 0; 1151 } else { 1152 xfer = len = m1->m_len; 1153 } 1154 memcpy((caddr_t) tl, cp, xfer); 1155 m1->m_len = len+tlen; 1156 siz -= xfer; 1157 cp += xfer; 1158 } 1159 *mb = m1; 1160 *bpos = mtod(m1, caddr_t)+m1->m_len; 1161 return (0); 1162 } 1163 1164 /* 1165 * Directory caching routines. They work as follows: 1166 * - a cache is maintained per VDIR nfsnode. 1167 * - for each offset cookie that is exported to userspace, and can 1168 * thus be thrown back at us as an offset to VOP_READDIR, store 1169 * information in the cache. 1170 * - cached are: 1171 * - cookie itself 1172 * - blocknumber (essentially just a search key in the buffer cache) 1173 * - entry number in block. 1174 * - offset cookie of block in which this entry is stored 1175 * - 32 bit cookie if NFSMNT_XLATECOOKIE is used. 1176 * - entries are looked up in a hash table 1177 * - also maintained is an LRU list of entries, used to determine 1178 * which ones to delete if the cache grows too large. 1179 * - if 32 <-> 64 translation mode is requested for a filesystem, 1180 * the cache also functions as a translation table 1181 * - in the translation case, invalidating the cache does not mean 1182 * flushing it, but just marking entries as invalid, except for 1183 * the <64bit cookie, 32bitcookie> pair which is still valid, to 1184 * still be able to use the cache as a translation table. 1185 * - 32 bit cookies are uniquely created by combining the hash table 1186 * entry value, and one generation count per hash table entry, 1187 * incremented each time an entry is appended to the chain. 1188 * - the cache is invalidated each time a direcory is modified 1189 * - sanity checks are also done; if an entry in a block turns 1190 * out not to have a matching cookie, the cache is invalidated 1191 * and a new block starting from the wanted offset is fetched from 1192 * the server. 1193 * - directory entries as read from the server are extended to contain 1194 * the 64bit and, optionally, the 32bit cookies, for sanity checking 1195 * the cache and exporting them to userspace through the cookie 1196 * argument to VOP_READDIR. 1197 */ 1198 1199 u_long 1200 nfs_dirhash(off) 1201 off_t off; 1202 { 1203 int i; 1204 char *cp = (char *)&off; 1205 u_long sum = 0L; 1206 1207 for (i = 0 ; i < sizeof (off); i++) 1208 sum += *cp++; 1209 1210 return sum; 1211 } 1212 1213 void 1214 nfs_initdircache(vp) 1215 struct vnode *vp; 1216 { 1217 struct nfsnode *np = VTONFS(vp); 1218 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1219 1220 np->n_dircachesize = 0; 1221 np->n_dblkno = 1; 1222 np->n_dircache = hashinit(NFS_DIRHASHSIZ, HASH_LIST, M_NFSDIROFF, 1223 M_WAITOK, &nfsdirhashmask); 1224 TAILQ_INIT(&np->n_dirchain); 1225 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) { 1226 MALLOC(np->n_dirgens, unsigned *, 1227 NFS_DIRHASHSIZ * sizeof (unsigned), M_NFSDIROFF, 1228 M_WAITOK); 1229 memset((caddr_t)np->n_dirgens, 0, 1230 NFS_DIRHASHSIZ * sizeof (unsigned)); 1231 } 1232 } 1233 1234 static struct nfsdircache dzero = {0, 0, {0, 0}, {0, 0}, 0, 0, 0}; 1235 1236 struct nfsdircache * 1237 nfs_searchdircache(vp, off, do32, hashent) 1238 struct vnode *vp; 1239 off_t off; 1240 int do32; 1241 int *hashent; 1242 { 1243 struct nfsdirhashhead *ndhp; 1244 struct nfsdircache *ndp = NULL; 1245 struct nfsnode *np = VTONFS(vp); 1246 unsigned ent; 1247 1248 /* 1249 * Zero is always a valid cookie. 1250 */ 1251 if (off == 0) 1252 return &dzero; 1253 1254 /* 1255 * We use a 32bit cookie as search key, directly reconstruct 1256 * the hashentry. Else use the hashfunction. 1257 */ 1258 if (do32) { 1259 ent = (u_int32_t)off >> 24; 1260 if (ent >= NFS_DIRHASHSIZ) 1261 return NULL; 1262 ndhp = &np->n_dircache[ent]; 1263 } else { 1264 ndhp = NFSDIRHASH(np, off); 1265 } 1266 1267 if (hashent) 1268 *hashent = (int)(ndhp - np->n_dircache); 1269 if (do32) { 1270 for (ndp = ndhp->lh_first; ndp; ndp = ndp->dc_hash.le_next) { 1271 if (ndp->dc_cookie32 == (u_int32_t)off) { 1272 /* 1273 * An invalidated entry will become the 1274 * start of a new block fetched from 1275 * the server. 1276 */ 1277 if (ndp->dc_blkno == -1) { 1278 ndp->dc_blkcookie = ndp->dc_cookie; 1279 ndp->dc_blkno = np->n_dblkno++; 1280 ndp->dc_entry = 0; 1281 } 1282 break; 1283 } 1284 } 1285 } else { 1286 for (ndp = ndhp->lh_first; ndp; ndp = ndp->dc_hash.le_next) 1287 if (ndp->dc_cookie == off) 1288 break; 1289 } 1290 return ndp; 1291 } 1292 1293 1294 struct nfsdircache * 1295 nfs_enterdircache(vp, off, blkoff, en, blkno) 1296 struct vnode *vp; 1297 off_t off, blkoff; 1298 daddr_t blkno; 1299 int en; 1300 { 1301 struct nfsnode *np = VTONFS(vp); 1302 struct nfsdirhashhead *ndhp; 1303 struct nfsdircache *ndp = NULL, *first; 1304 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1305 int hashent, gen, overwrite; 1306 1307 if (!np->n_dircache) 1308 /* 1309 * XXX would like to do this in nfs_nget but vtype 1310 * isn't known at that time. 1311 */ 1312 nfs_initdircache(vp); 1313 1314 /* 1315 * XXX refuse entries for offset 0. amd(8) erroneously sets 1316 * cookie 0 for the '.' entry, making this necessary. This 1317 * isn't so bad, as 0 is a special case anyway. 1318 */ 1319 if (off == 0) 1320 return &dzero; 1321 1322 ndp = nfs_searchdircache(vp, off, 0, &hashent); 1323 1324 if (ndp && ndp->dc_blkno != -1) { 1325 /* 1326 * Overwriting an old entry. Check if it's the same. 1327 * If so, just return. If not, remove the old entry. 1328 */ 1329 if (ndp->dc_blkcookie == blkoff && ndp->dc_entry == en) 1330 return ndp; 1331 TAILQ_REMOVE(&np->n_dirchain, ndp, dc_chain); 1332 LIST_REMOVE(ndp, dc_hash); 1333 FREE(ndp, M_NFSDIROFF); 1334 ndp = 0; 1335 } 1336 1337 ndhp = &np->n_dircache[hashent]; 1338 1339 if (!ndp) { 1340 MALLOC(ndp, struct nfsdircache *, sizeof (*ndp), M_NFSDIROFF, 1341 M_WAITOK); 1342 overwrite = 0; 1343 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) { 1344 /* 1345 * We're allocating a new entry, so bump the 1346 * generation number. 1347 */ 1348 gen = ++np->n_dirgens[hashent]; 1349 if (gen == 0) { 1350 np->n_dirgens[hashent]++; 1351 gen++; 1352 } 1353 ndp->dc_cookie32 = (hashent << 24) | (gen & 0xffffff); 1354 } 1355 } else 1356 overwrite = 1; 1357 1358 /* 1359 * If the entry number is 0, we are at the start of a new block, so 1360 * allocate a new blocknumber. 1361 */ 1362 if (en == 0) 1363 ndp->dc_blkno = np->n_dblkno++; 1364 else 1365 ndp->dc_blkno = blkno; 1366 1367 ndp->dc_cookie = off; 1368 ndp->dc_blkcookie = blkoff; 1369 ndp->dc_entry = en; 1370 1371 if (overwrite) 1372 return ndp; 1373 1374 /* 1375 * If the maximum directory cookie cache size has been reached 1376 * for this node, take one off the front. The idea is that 1377 * directories are typically read front-to-back once, so that 1378 * the oldest entries can be thrown away without much performance 1379 * loss. 1380 */ 1381 if (np->n_dircachesize == NFS_MAXDIRCACHE) { 1382 first = np->n_dirchain.tqh_first; 1383 TAILQ_REMOVE(&np->n_dirchain, first, dc_chain); 1384 LIST_REMOVE(first, dc_hash); 1385 FREE(first, M_NFSDIROFF); 1386 } else 1387 np->n_dircachesize++; 1388 1389 LIST_INSERT_HEAD(ndhp, ndp, dc_hash); 1390 TAILQ_INSERT_TAIL(&np->n_dirchain, ndp, dc_chain); 1391 return ndp; 1392 } 1393 1394 void 1395 nfs_invaldircache(vp, forcefree) 1396 struct vnode *vp; 1397 int forcefree; 1398 { 1399 struct nfsnode *np = VTONFS(vp); 1400 struct nfsdircache *ndp = NULL; 1401 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1402 1403 #ifdef DIAGNOSTIC 1404 if (vp->v_type != VDIR) 1405 panic("nfs: invaldircache: not dir"); 1406 #endif 1407 1408 if (!np->n_dircache) 1409 return; 1410 1411 if (!(nmp->nm_flag & NFSMNT_XLATECOOKIE) || forcefree) { 1412 while ((ndp = np->n_dirchain.tqh_first)) { 1413 TAILQ_REMOVE(&np->n_dirchain, ndp, dc_chain); 1414 LIST_REMOVE(ndp, dc_hash); 1415 FREE(ndp, M_NFSDIROFF); 1416 } 1417 np->n_dircachesize = 0; 1418 if (forcefree && np->n_dirgens) { 1419 FREE(np->n_dirgens, M_NFSDIROFF); 1420 } 1421 } else { 1422 for (ndp = np->n_dirchain.tqh_first; ndp; 1423 ndp = ndp->dc_chain.tqe_next) 1424 ndp->dc_blkno = -1; 1425 } 1426 1427 np->n_dblkno = 1; 1428 } 1429 1430 /* 1431 * Called once before VFS init to initialize shared and 1432 * server-specific data structures. 1433 */ 1434 void 1435 nfs_init() 1436 { 1437 nfsrtt.pos = 0; 1438 rpc_vers = txdr_unsigned(RPC_VER2); 1439 rpc_call = txdr_unsigned(RPC_CALL); 1440 rpc_reply = txdr_unsigned(RPC_REPLY); 1441 rpc_msgdenied = txdr_unsigned(RPC_MSGDENIED); 1442 rpc_msgaccepted = txdr_unsigned(RPC_MSGACCEPTED); 1443 rpc_mismatch = txdr_unsigned(RPC_MISMATCH); 1444 rpc_autherr = txdr_unsigned(RPC_AUTHERR); 1445 rpc_auth_unix = txdr_unsigned(RPCAUTH_UNIX); 1446 rpc_auth_kerb = txdr_unsigned(RPCAUTH_KERB4); 1447 nfs_prog = txdr_unsigned(NFS_PROG); 1448 nqnfs_prog = txdr_unsigned(NQNFS_PROG); 1449 nfs_true = txdr_unsigned(TRUE); 1450 nfs_false = txdr_unsigned(FALSE); 1451 nfs_xdrneg1 = txdr_unsigned(-1); 1452 nfs_ticks = (hz * NFS_TICKINTVL + 500) / 1000; 1453 if (nfs_ticks < 1) 1454 nfs_ticks = 1; 1455 #ifdef NFSSERVER 1456 nfsrv_init(0); /* Init server data structures */ 1457 nfsrv_initcache(); /* Init the server request cache */ 1458 #endif /* NFSSERVER */ 1459 1460 #if defined(NFSSERVER) || !defined(NFS_V2_ONLY) 1461 /* 1462 * Initialize the nqnfs data structures. 1463 */ 1464 if (nqnfsstarttime == 0) { 1465 nqnfsstarttime = boottime.tv_sec + nqsrv_maxlease 1466 + nqsrv_clockskew + nqsrv_writeslack; 1467 NQLOADNOVRAM(nqnfsstarttime); 1468 CIRCLEQ_INIT(&nqtimerhead); 1469 nqfhhashtbl = hashinit(NQLCHSZ, HASH_LIST, M_NQLEASE, 1470 M_WAITOK, &nqfhhash); 1471 } 1472 #endif 1473 1474 /* 1475 * Initialize reply list and start timer 1476 */ 1477 TAILQ_INIT(&nfs_reqq); 1478 nfs_timer(NULL); 1479 } 1480 1481 #ifdef NFS 1482 /* 1483 * Called once at VFS init to initialize client-specific data structures. 1484 */ 1485 void 1486 nfs_vfs_init() 1487 { 1488 int i; 1489 1490 /* Ensure async daemons disabled */ 1491 for (i = 0; i < NFS_MAXASYNCDAEMON; i++) { 1492 nfs_iodwant[i] = (struct proc *)0; 1493 nfs_iodmount[i] = (struct nfsmount *)0; 1494 } 1495 nfs_nhinit(); /* Init the nfsnode table */ 1496 } 1497 1498 void 1499 nfs_vfs_done() 1500 { 1501 nfs_nhdone(); 1502 } 1503 1504 /* 1505 * Attribute cache routines. 1506 * nfs_loadattrcache() - loads or updates the cache contents from attributes 1507 * that are on the mbuf list 1508 * nfs_getattrcache() - returns valid attributes if found in cache, returns 1509 * error otherwise 1510 */ 1511 1512 /* 1513 * Load the attribute cache (that lives in the nfsnode entry) with 1514 * the values on the mbuf list and 1515 * Iff vap not NULL 1516 * copy the attributes to *vaper 1517 */ 1518 int 1519 nfsm_loadattrcache(vpp, mdp, dposp, vaper) 1520 struct vnode **vpp; 1521 struct mbuf **mdp; 1522 caddr_t *dposp; 1523 struct vattr *vaper; 1524 { 1525 int32_t t1; 1526 caddr_t cp2; 1527 int error = 0; 1528 struct mbuf *md; 1529 int v3 = NFS_ISV3(*vpp); 1530 1531 md = *mdp; 1532 t1 = (mtod(md, caddr_t) + md->m_len) - *dposp; 1533 error = nfsm_disct(mdp, dposp, NFSX_FATTR(v3), t1, &cp2); 1534 if (error) 1535 return (error); 1536 return nfs_loadattrcache(vpp, (struct nfs_fattr *)cp2, vaper); 1537 } 1538 1539 int 1540 nfs_loadattrcache(vpp, fp, vaper) 1541 struct vnode **vpp; 1542 struct nfs_fattr *fp; 1543 struct vattr *vaper; 1544 { 1545 struct vnode *vp = *vpp; 1546 struct vattr *vap; 1547 int v3 = NFS_ISV3(vp); 1548 enum vtype vtyp; 1549 u_short vmode; 1550 struct timespec mtime; 1551 struct vnode *nvp; 1552 int32_t rdev; 1553 struct nfsnode *np; 1554 extern int (**spec_nfsv2nodeop_p) __P((void *)); 1555 1556 if (v3) { 1557 vtyp = nfsv3tov_type(fp->fa_type); 1558 vmode = fxdr_unsigned(u_short, fp->fa_mode); 1559 rdev = makedev(fxdr_unsigned(u_int32_t, fp->fa3_rdev.specdata1), 1560 fxdr_unsigned(u_int32_t, fp->fa3_rdev.specdata2)); 1561 fxdr_nfsv3time(&fp->fa3_mtime, &mtime); 1562 } else { 1563 vtyp = nfsv2tov_type(fp->fa_type); 1564 vmode = fxdr_unsigned(u_short, fp->fa_mode); 1565 if (vtyp == VNON || vtyp == VREG) 1566 vtyp = IFTOVT(vmode); 1567 rdev = fxdr_unsigned(int32_t, fp->fa2_rdev); 1568 fxdr_nfsv2time(&fp->fa2_mtime, &mtime); 1569 1570 /* 1571 * Really ugly NFSv2 kludge. 1572 */ 1573 if (vtyp == VCHR && rdev == 0xffffffff) 1574 vtyp = VFIFO; 1575 } 1576 1577 /* 1578 * If v_type == VNON it is a new node, so fill in the v_type, 1579 * n_mtime fields. Check to see if it represents a special 1580 * device, and if so, check for a possible alias. Once the 1581 * correct vnode has been obtained, fill in the rest of the 1582 * information. 1583 */ 1584 np = VTONFS(vp); 1585 if (vp->v_type == VNON) { 1586 vp->v_type = vtyp; 1587 if (vp->v_type == VFIFO) { 1588 extern int (**fifo_nfsv2nodeop_p) __P((void *)); 1589 vp->v_op = fifo_nfsv2nodeop_p; 1590 } 1591 if (vp->v_type == VCHR || vp->v_type == VBLK) { 1592 vp->v_op = spec_nfsv2nodeop_p; 1593 nvp = checkalias(vp, (dev_t)rdev, vp->v_mount); 1594 if (nvp) { 1595 /* 1596 * Discard unneeded vnode, but save its nfsnode. 1597 * Since the nfsnode does not have a lock, its 1598 * vnode lock has to be carried over. 1599 */ 1600 nvp->v_data = vp->v_data; 1601 vp->v_data = NULL; 1602 vp->v_op = spec_vnodeop_p; 1603 vput(vp); 1604 vgone(vp); 1605 /* 1606 * XXX When nfs starts locking, we need to 1607 * lock the new node here. 1608 */ 1609 /* 1610 * Reinitialize aliased node. 1611 */ 1612 np->n_vnode = nvp; 1613 *vpp = vp = nvp; 1614 } 1615 } 1616 np->n_mtime = mtime.tv_sec; 1617 } 1618 vap = np->n_vattr; 1619 vap->va_type = vtyp; 1620 vap->va_mode = vmode & ALLPERMS; 1621 vap->va_rdev = (dev_t)rdev; 1622 vap->va_mtime = mtime; 1623 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; 1624 switch (vtyp) { 1625 case VDIR: 1626 vap->va_blocksize = NFS_DIRFRAGSIZ; 1627 break; 1628 case VBLK: 1629 vap->va_blocksize = BLKDEV_IOSIZE; 1630 break; 1631 case VCHR: 1632 vap->va_blocksize = MAXBSIZE; 1633 break; 1634 default: 1635 vap->va_blocksize = v3 ? vp->v_mount->mnt_stat.f_iosize : 1636 fxdr_unsigned(int32_t, fp->fa2_blocksize); 1637 break; 1638 } 1639 if (v3) { 1640 vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink); 1641 vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid); 1642 vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid); 1643 vap->va_size = fxdr_hyper(&fp->fa3_size); 1644 vap->va_bytes = fxdr_hyper(&fp->fa3_used); 1645 vap->va_fileid = fxdr_unsigned(int32_t, 1646 fp->fa3_fileid.nfsuquad[1]); 1647 fxdr_nfsv3time(&fp->fa3_atime, &vap->va_atime); 1648 fxdr_nfsv3time(&fp->fa3_ctime, &vap->va_ctime); 1649 vap->va_flags = 0; 1650 vap->va_filerev = 0; 1651 } else { 1652 vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink); 1653 vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid); 1654 vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid); 1655 vap->va_size = fxdr_unsigned(u_int32_t, fp->fa2_size); 1656 vap->va_bytes = fxdr_unsigned(int32_t, fp->fa2_blocks) 1657 * NFS_FABLKSIZE; 1658 vap->va_fileid = fxdr_unsigned(int32_t, fp->fa2_fileid); 1659 fxdr_nfsv2time(&fp->fa2_atime, &vap->va_atime); 1660 vap->va_flags = 0; 1661 vap->va_ctime.tv_sec = fxdr_unsigned(u_int32_t, 1662 fp->fa2_ctime.nfsv2_sec); 1663 vap->va_ctime.tv_nsec = 0; 1664 vap->va_gen = fxdr_unsigned(u_int32_t,fp->fa2_ctime.nfsv2_usec); 1665 vap->va_filerev = 0; 1666 } 1667 if (vap->va_size != np->n_size) { 1668 if ((np->n_flag & NMODIFIED) && vap->va_size < np->n_size) { 1669 vap->va_size = np->n_size; 1670 } else { 1671 np->n_size = vap->va_size; 1672 if (vap->va_type == VREG) { 1673 uvm_vnp_setsize(vp, np->n_size); 1674 } 1675 } 1676 } 1677 np->n_attrstamp = time.tv_sec; 1678 if (vaper != NULL) { 1679 memcpy((caddr_t)vaper, (caddr_t)vap, sizeof(*vap)); 1680 if (np->n_flag & NCHG) { 1681 if (np->n_flag & NACC) 1682 vaper->va_atime = np->n_atim; 1683 if (np->n_flag & NUPD) 1684 vaper->va_mtime = np->n_mtim; 1685 } 1686 } 1687 return (0); 1688 } 1689 1690 /* 1691 * Check the time stamp 1692 * If the cache is valid, copy contents to *vap and return 0 1693 * otherwise return an error 1694 */ 1695 int 1696 nfs_getattrcache(vp, vaper) 1697 struct vnode *vp; 1698 struct vattr *vaper; 1699 { 1700 struct nfsnode *np = VTONFS(vp); 1701 struct vattr *vap; 1702 1703 if ((time.tv_sec - np->n_attrstamp) >= NFS_ATTRTIMEO(np)) { 1704 nfsstats.attrcache_misses++; 1705 return (ENOENT); 1706 } 1707 nfsstats.attrcache_hits++; 1708 vap = np->n_vattr; 1709 if (vap->va_size != np->n_size) { 1710 if (vap->va_type == VREG) { 1711 if (np->n_flag & NMODIFIED) { 1712 if (vap->va_size < np->n_size) 1713 vap->va_size = np->n_size; 1714 else 1715 np->n_size = vap->va_size; 1716 } else 1717 np->n_size = vap->va_size; 1718 uvm_vnp_setsize(vp, np->n_size); 1719 } else 1720 np->n_size = vap->va_size; 1721 } 1722 memcpy((caddr_t)vaper, (caddr_t)vap, sizeof(struct vattr)); 1723 if (np->n_flag & NCHG) { 1724 if (np->n_flag & NACC) 1725 vaper->va_atime = np->n_atim; 1726 if (np->n_flag & NUPD) 1727 vaper->va_mtime = np->n_mtim; 1728 } 1729 return (0); 1730 } 1731 1732 /* 1733 * Heuristic to see if the server XDR encodes directory cookies or not. 1734 * it is not supposed to, but a lot of servers may do this. Also, since 1735 * most/all servers will implement V2 as well, it is expected that they 1736 * may return just 32 bits worth of cookie information, so we need to 1737 * find out in which 32 bits this information is available. We do this 1738 * to avoid trouble with emulated binaries that can't handle 64 bit 1739 * directory offsets. 1740 */ 1741 1742 void 1743 nfs_cookieheuristic(vp, flagp, p, cred) 1744 struct vnode *vp; 1745 int *flagp; 1746 struct proc *p; 1747 struct ucred *cred; 1748 { 1749 struct uio auio; 1750 struct iovec aiov; 1751 caddr_t buf, cp; 1752 struct dirent *dp; 1753 off_t *cookies = NULL, *cop; 1754 int error, eof, nc, len; 1755 1756 MALLOC(buf, caddr_t, NFS_DIRFRAGSIZ, M_TEMP, M_WAITOK); 1757 1758 aiov.iov_base = buf; 1759 aiov.iov_len = NFS_DIRFRAGSIZ; 1760 auio.uio_iov = &aiov; 1761 auio.uio_iovcnt = 1; 1762 auio.uio_rw = UIO_READ; 1763 auio.uio_segflg = UIO_SYSSPACE; 1764 auio.uio_procp = p; 1765 auio.uio_resid = NFS_DIRFRAGSIZ; 1766 auio.uio_offset = 0; 1767 1768 error = VOP_READDIR(vp, &auio, cred, &eof, &cookies, &nc); 1769 1770 len = NFS_DIRFRAGSIZ - auio.uio_resid; 1771 if (error || len == 0) { 1772 FREE(buf, M_TEMP); 1773 if (cookies) 1774 free(cookies, M_TEMP); 1775 return; 1776 } 1777 1778 /* 1779 * Find the first valid entry and look at its offset cookie. 1780 */ 1781 1782 cp = buf; 1783 for (cop = cookies; len > 0; len -= dp->d_reclen) { 1784 dp = (struct dirent *)cp; 1785 if (dp->d_fileno != 0 && len >= dp->d_reclen) { 1786 if ((*cop >> 32) != 0 && (*cop & 0xffffffffLL) == 0) { 1787 *flagp |= NFSMNT_SWAPCOOKIE; 1788 nfs_invaldircache(vp, 0); 1789 nfs_vinvalbuf(vp, 0, cred, p, 1); 1790 } 1791 break; 1792 } 1793 cop++; 1794 cp += dp->d_reclen; 1795 } 1796 1797 FREE(buf, M_TEMP); 1798 free(cookies, M_TEMP); 1799 } 1800 #endif /* NFS */ 1801 1802 /* 1803 * Set up nameidata for a lookup() call and do it. 1804 * 1805 * If pubflag is set, this call is done for a lookup operation on the 1806 * public filehandle. In that case we allow crossing mountpoints and 1807 * absolute pathnames. However, the caller is expected to check that 1808 * the lookup result is within the public fs, and deny access if 1809 * it is not. 1810 */ 1811 int 1812 nfs_namei(ndp, fhp, len, slp, nam, mdp, dposp, retdirp, p, kerbflag, pubflag) 1813 struct nameidata *ndp; 1814 fhandle_t *fhp; 1815 int len; 1816 struct nfssvc_sock *slp; 1817 struct mbuf *nam; 1818 struct mbuf **mdp; 1819 caddr_t *dposp; 1820 struct vnode **retdirp; 1821 struct proc *p; 1822 int kerbflag, pubflag; 1823 { 1824 int i, rem; 1825 struct mbuf *md; 1826 char *fromcp, *tocp, *cp; 1827 struct iovec aiov; 1828 struct uio auio; 1829 struct vnode *dp; 1830 int error, rdonly, linklen; 1831 struct componentname *cnp = &ndp->ni_cnd; 1832 1833 *retdirp = (struct vnode *)0; 1834 1835 if ((len + 1) > MAXPATHLEN) 1836 return (ENAMETOOLONG); 1837 cnp->cn_pnbuf = PNBUF_GET(); 1838 1839 /* 1840 * Copy the name from the mbuf list to ndp->ni_pnbuf 1841 * and set the various ndp fields appropriately. 1842 */ 1843 fromcp = *dposp; 1844 tocp = cnp->cn_pnbuf; 1845 md = *mdp; 1846 rem = mtod(md, caddr_t) + md->m_len - fromcp; 1847 for (i = 0; i < len; i++) { 1848 while (rem == 0) { 1849 md = md->m_next; 1850 if (md == NULL) { 1851 error = EBADRPC; 1852 goto out; 1853 } 1854 fromcp = mtod(md, caddr_t); 1855 rem = md->m_len; 1856 } 1857 if (*fromcp == '\0' || (!pubflag && *fromcp == '/')) { 1858 error = EACCES; 1859 goto out; 1860 } 1861 *tocp++ = *fromcp++; 1862 rem--; 1863 } 1864 *tocp = '\0'; 1865 *mdp = md; 1866 *dposp = fromcp; 1867 len = nfsm_rndup(len)-len; 1868 if (len > 0) { 1869 if (rem >= len) 1870 *dposp += len; 1871 else if ((error = nfs_adv(mdp, dposp, len, rem)) != 0) 1872 goto out; 1873 } 1874 1875 /* 1876 * Extract and set starting directory. 1877 */ 1878 error = nfsrv_fhtovp(fhp, FALSE, &dp, ndp->ni_cnd.cn_cred, slp, 1879 nam, &rdonly, kerbflag, pubflag); 1880 if (error) 1881 goto out; 1882 if (dp->v_type != VDIR) { 1883 vrele(dp); 1884 error = ENOTDIR; 1885 goto out; 1886 } 1887 1888 if (rdonly) 1889 cnp->cn_flags |= RDONLY; 1890 1891 *retdirp = dp; 1892 1893 if (pubflag) { 1894 /* 1895 * Oh joy. For WebNFS, handle those pesky '%' escapes, 1896 * and the 'native path' indicator. 1897 */ 1898 cp = PNBUF_GET(); 1899 fromcp = cnp->cn_pnbuf; 1900 tocp = cp; 1901 if ((unsigned char)*fromcp >= WEBNFS_SPECCHAR_START) { 1902 switch ((unsigned char)*fromcp) { 1903 case WEBNFS_NATIVE_CHAR: 1904 /* 1905 * 'Native' path for us is the same 1906 * as a path according to the NFS spec, 1907 * just skip the escape char. 1908 */ 1909 fromcp++; 1910 break; 1911 /* 1912 * More may be added in the future, range 0x80-0xff 1913 */ 1914 default: 1915 error = EIO; 1916 FREE(cp, M_NAMEI); 1917 goto out; 1918 } 1919 } 1920 /* 1921 * Translate the '%' escapes, URL-style. 1922 */ 1923 while (*fromcp != '\0') { 1924 if (*fromcp == WEBNFS_ESC_CHAR) { 1925 if (fromcp[1] != '\0' && fromcp[2] != '\0') { 1926 fromcp++; 1927 *tocp++ = HEXSTRTOI(fromcp); 1928 fromcp += 2; 1929 continue; 1930 } else { 1931 error = ENOENT; 1932 FREE(cp, M_NAMEI); 1933 goto out; 1934 } 1935 } else 1936 *tocp++ = *fromcp++; 1937 } 1938 *tocp = '\0'; 1939 PNBUF_PUT(cnp->cn_pnbuf); 1940 cnp->cn_pnbuf = cp; 1941 } 1942 1943 ndp->ni_pathlen = (tocp - cnp->cn_pnbuf) + 1; 1944 ndp->ni_segflg = UIO_SYSSPACE; 1945 1946 if (pubflag) { 1947 ndp->ni_rootdir = rootvnode; 1948 ndp->ni_loopcnt = 0; 1949 if (cnp->cn_pnbuf[0] == '/') 1950 dp = rootvnode; 1951 } else { 1952 cnp->cn_flags |= NOCROSSMOUNT; 1953 } 1954 1955 cnp->cn_proc = p; 1956 VREF(dp); 1957 1958 for (;;) { 1959 cnp->cn_nameptr = cnp->cn_pnbuf; 1960 ndp->ni_startdir = dp; 1961 /* 1962 * And call lookup() to do the real work 1963 */ 1964 error = lookup(ndp); 1965 if (error) 1966 break; 1967 /* 1968 * Check for encountering a symbolic link 1969 */ 1970 if ((cnp->cn_flags & ISSYMLINK) == 0) { 1971 if (cnp->cn_flags & (SAVENAME | SAVESTART)) { 1972 cnp->cn_flags |= HASBUF; 1973 return (0); 1974 } 1975 break; 1976 } else { 1977 if ((cnp->cn_flags & LOCKPARENT) && ndp->ni_pathlen == 1) 1978 VOP_UNLOCK(ndp->ni_dvp, 0); 1979 if (!pubflag) { 1980 vrele(ndp->ni_dvp); 1981 vput(ndp->ni_vp); 1982 ndp->ni_vp = NULL; 1983 error = EINVAL; 1984 break; 1985 } 1986 1987 if (ndp->ni_loopcnt++ >= MAXSYMLINKS) { 1988 error = ELOOP; 1989 break; 1990 } 1991 if (ndp->ni_pathlen > 1) 1992 cp = PNBUF_GET(); 1993 else 1994 cp = cnp->cn_pnbuf; 1995 aiov.iov_base = cp; 1996 aiov.iov_len = MAXPATHLEN; 1997 auio.uio_iov = &aiov; 1998 auio.uio_iovcnt = 1; 1999 auio.uio_offset = 0; 2000 auio.uio_rw = UIO_READ; 2001 auio.uio_segflg = UIO_SYSSPACE; 2002 auio.uio_procp = (struct proc *)0; 2003 auio.uio_resid = MAXPATHLEN; 2004 error = VOP_READLINK(ndp->ni_vp, &auio, cnp->cn_cred); 2005 if (error) { 2006 badlink: 2007 if (ndp->ni_pathlen > 1) 2008 PNBUF_PUT(cp); 2009 break; 2010 } 2011 linklen = MAXPATHLEN - auio.uio_resid; 2012 if (linklen == 0) { 2013 error = ENOENT; 2014 goto badlink; 2015 } 2016 if (linklen + ndp->ni_pathlen >= MAXPATHLEN) { 2017 error = ENAMETOOLONG; 2018 goto badlink; 2019 } 2020 if (ndp->ni_pathlen > 1) { 2021 memcpy(cp + linklen, ndp->ni_next, ndp->ni_pathlen); 2022 PNBUF_PUT(cnp->cn_pnbuf); 2023 cnp->cn_pnbuf = cp; 2024 } else 2025 cnp->cn_pnbuf[linklen] = '\0'; 2026 ndp->ni_pathlen += linklen; 2027 vput(ndp->ni_vp); 2028 dp = ndp->ni_dvp; 2029 /* 2030 * Check if root directory should replace current directory. 2031 */ 2032 if (cnp->cn_pnbuf[0] == '/') { 2033 vrele(dp); 2034 dp = ndp->ni_rootdir; 2035 VREF(dp); 2036 } 2037 } 2038 } 2039 out: 2040 PNBUF_PUT(cnp->cn_pnbuf); 2041 return (error); 2042 } 2043 2044 /* 2045 * A fiddled version of m_adj() that ensures null fill to a long 2046 * boundary and only trims off the back end 2047 */ 2048 void 2049 nfsm_adj(mp, len, nul) 2050 struct mbuf *mp; 2051 int len; 2052 int nul; 2053 { 2054 struct mbuf *m; 2055 int count, i; 2056 char *cp; 2057 2058 /* 2059 * Trim from tail. Scan the mbuf chain, 2060 * calculating its length and finding the last mbuf. 2061 * If the adjustment only affects this mbuf, then just 2062 * adjust and return. Otherwise, rescan and truncate 2063 * after the remaining size. 2064 */ 2065 count = 0; 2066 m = mp; 2067 for (;;) { 2068 count += m->m_len; 2069 if (m->m_next == (struct mbuf *)0) 2070 break; 2071 m = m->m_next; 2072 } 2073 if (m->m_len > len) { 2074 m->m_len -= len; 2075 if (nul > 0) { 2076 cp = mtod(m, caddr_t)+m->m_len-nul; 2077 for (i = 0; i < nul; i++) 2078 *cp++ = '\0'; 2079 } 2080 return; 2081 } 2082 count -= len; 2083 if (count < 0) 2084 count = 0; 2085 /* 2086 * Correct length for chain is "count". 2087 * Find the mbuf with last data, adjust its length, 2088 * and toss data from remaining mbufs on chain. 2089 */ 2090 for (m = mp; m; m = m->m_next) { 2091 if (m->m_len >= count) { 2092 m->m_len = count; 2093 if (nul > 0) { 2094 cp = mtod(m, caddr_t)+m->m_len-nul; 2095 for (i = 0; i < nul; i++) 2096 *cp++ = '\0'; 2097 } 2098 break; 2099 } 2100 count -= m->m_len; 2101 } 2102 for (m = m->m_next;m;m = m->m_next) 2103 m->m_len = 0; 2104 } 2105 2106 /* 2107 * Make these functions instead of macros, so that the kernel text size 2108 * doesn't get too big... 2109 */ 2110 void 2111 nfsm_srvwcc(nfsd, before_ret, before_vap, after_ret, after_vap, mbp, bposp) 2112 struct nfsrv_descript *nfsd; 2113 int before_ret; 2114 struct vattr *before_vap; 2115 int after_ret; 2116 struct vattr *after_vap; 2117 struct mbuf **mbp; 2118 char **bposp; 2119 { 2120 struct mbuf *mb = *mbp, *mb2; 2121 char *bpos = *bposp; 2122 u_int32_t *tl; 2123 2124 if (before_ret) { 2125 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 2126 *tl = nfs_false; 2127 } else { 2128 nfsm_build(tl, u_int32_t *, 7 * NFSX_UNSIGNED); 2129 *tl++ = nfs_true; 2130 txdr_hyper(before_vap->va_size, tl); 2131 tl += 2; 2132 txdr_nfsv3time(&(before_vap->va_mtime), tl); 2133 tl += 2; 2134 txdr_nfsv3time(&(before_vap->va_ctime), tl); 2135 } 2136 *bposp = bpos; 2137 *mbp = mb; 2138 nfsm_srvpostopattr(nfsd, after_ret, after_vap, mbp, bposp); 2139 } 2140 2141 void 2142 nfsm_srvpostopattr(nfsd, after_ret, after_vap, mbp, bposp) 2143 struct nfsrv_descript *nfsd; 2144 int after_ret; 2145 struct vattr *after_vap; 2146 struct mbuf **mbp; 2147 char **bposp; 2148 { 2149 struct mbuf *mb = *mbp, *mb2; 2150 char *bpos = *bposp; 2151 u_int32_t *tl; 2152 struct nfs_fattr *fp; 2153 2154 if (after_ret) { 2155 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 2156 *tl = nfs_false; 2157 } else { 2158 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED + NFSX_V3FATTR); 2159 *tl++ = nfs_true; 2160 fp = (struct nfs_fattr *)tl; 2161 nfsm_srvfattr(nfsd, after_vap, fp); 2162 } 2163 *mbp = mb; 2164 *bposp = bpos; 2165 } 2166 2167 void 2168 nfsm_srvfattr(nfsd, vap, fp) 2169 struct nfsrv_descript *nfsd; 2170 struct vattr *vap; 2171 struct nfs_fattr *fp; 2172 { 2173 2174 fp->fa_nlink = txdr_unsigned(vap->va_nlink); 2175 fp->fa_uid = txdr_unsigned(vap->va_uid); 2176 fp->fa_gid = txdr_unsigned(vap->va_gid); 2177 if (nfsd->nd_flag & ND_NFSV3) { 2178 fp->fa_type = vtonfsv3_type(vap->va_type); 2179 fp->fa_mode = vtonfsv3_mode(vap->va_mode); 2180 txdr_hyper(vap->va_size, &fp->fa3_size); 2181 txdr_hyper(vap->va_bytes, &fp->fa3_used); 2182 fp->fa3_rdev.specdata1 = txdr_unsigned(major(vap->va_rdev)); 2183 fp->fa3_rdev.specdata2 = txdr_unsigned(minor(vap->va_rdev)); 2184 fp->fa3_fsid.nfsuquad[0] = 0; 2185 fp->fa3_fsid.nfsuquad[1] = txdr_unsigned(vap->va_fsid); 2186 fp->fa3_fileid.nfsuquad[0] = 0; 2187 fp->fa3_fileid.nfsuquad[1] = txdr_unsigned(vap->va_fileid); 2188 txdr_nfsv3time(&vap->va_atime, &fp->fa3_atime); 2189 txdr_nfsv3time(&vap->va_mtime, &fp->fa3_mtime); 2190 txdr_nfsv3time(&vap->va_ctime, &fp->fa3_ctime); 2191 } else { 2192 fp->fa_type = vtonfsv2_type(vap->va_type); 2193 fp->fa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 2194 fp->fa2_size = txdr_unsigned(vap->va_size); 2195 fp->fa2_blocksize = txdr_unsigned(vap->va_blocksize); 2196 if (vap->va_type == VFIFO) 2197 fp->fa2_rdev = 0xffffffff; 2198 else 2199 fp->fa2_rdev = txdr_unsigned(vap->va_rdev); 2200 fp->fa2_blocks = txdr_unsigned(vap->va_bytes / NFS_FABLKSIZE); 2201 fp->fa2_fsid = txdr_unsigned(vap->va_fsid); 2202 fp->fa2_fileid = txdr_unsigned(vap->va_fileid); 2203 txdr_nfsv2time(&vap->va_atime, &fp->fa2_atime); 2204 txdr_nfsv2time(&vap->va_mtime, &fp->fa2_mtime); 2205 txdr_nfsv2time(&vap->va_ctime, &fp->fa2_ctime); 2206 } 2207 } 2208 2209 /* 2210 * nfsrv_fhtovp() - convert a fh to a vnode ptr (optionally locked) 2211 * - look up fsid in mount list (if not found ret error) 2212 * - get vp and export rights by calling VFS_FHTOVP() 2213 * - if cred->cr_uid == 0 or MNT_EXPORTANON set it to credanon 2214 * - if not lockflag unlock it with VOP_UNLOCK() 2215 */ 2216 int 2217 nfsrv_fhtovp(fhp, lockflag, vpp, cred, slp, nam, rdonlyp, kerbflag, pubflag) 2218 fhandle_t *fhp; 2219 int lockflag; 2220 struct vnode **vpp; 2221 struct ucred *cred; 2222 struct nfssvc_sock *slp; 2223 struct mbuf *nam; 2224 int *rdonlyp; 2225 int kerbflag; 2226 { 2227 struct mount *mp; 2228 int i; 2229 struct ucred *credanon; 2230 int error, exflags; 2231 struct sockaddr_in *saddr; 2232 2233 *vpp = (struct vnode *)0; 2234 2235 if (nfs_ispublicfh(fhp)) { 2236 if (!pubflag || !nfs_pub.np_valid) 2237 return (ESTALE); 2238 fhp = &nfs_pub.np_handle; 2239 } 2240 2241 mp = vfs_getvfs(&fhp->fh_fsid); 2242 if (!mp) 2243 return (ESTALE); 2244 error = VFS_CHECKEXP(mp, nam, &exflags, &credanon); 2245 if (error) 2246 return (error); 2247 error = VFS_FHTOVP(mp, &fhp->fh_fid, vpp); 2248 if (error) 2249 return (error); 2250 2251 if (!(exflags & (MNT_EXNORESPORT|MNT_EXPUBLIC))) { 2252 saddr = mtod(nam, struct sockaddr_in *); 2253 if ((saddr->sin_family == AF_INET) && 2254 ntohs(saddr->sin_port) >= IPPORT_RESERVED) { 2255 vput(*vpp); 2256 return (NFSERR_AUTHERR | AUTH_TOOWEAK); 2257 } 2258 #ifdef INET6 2259 if ((saddr->sin_family == AF_INET6) && 2260 ntohs(saddr->sin_port) >= IPV6PORT_RESERVED) { 2261 vput(*vpp); 2262 return (NFSERR_AUTHERR | AUTH_TOOWEAK); 2263 } 2264 #endif 2265 } 2266 /* 2267 * Check/setup credentials. 2268 */ 2269 if (exflags & MNT_EXKERB) { 2270 if (!kerbflag) { 2271 vput(*vpp); 2272 return (NFSERR_AUTHERR | AUTH_TOOWEAK); 2273 } 2274 } else if (kerbflag) { 2275 vput(*vpp); 2276 return (NFSERR_AUTHERR | AUTH_TOOWEAK); 2277 } else if (cred->cr_uid == 0 || (exflags & MNT_EXPORTANON)) { 2278 cred->cr_uid = credanon->cr_uid; 2279 cred->cr_gid = credanon->cr_gid; 2280 for (i = 0; i < credanon->cr_ngroups && i < NGROUPS; i++) 2281 cred->cr_groups[i] = credanon->cr_groups[i]; 2282 cred->cr_ngroups = i; 2283 } 2284 if (exflags & MNT_EXRDONLY) 2285 *rdonlyp = 1; 2286 else 2287 *rdonlyp = 0; 2288 if (!lockflag) 2289 VOP_UNLOCK(*vpp, 0); 2290 return (0); 2291 } 2292 2293 /* 2294 * WebNFS: check if a filehandle is a public filehandle. For v3, this 2295 * means a length of 0, for v2 it means all zeroes. nfsm_srvmtofh has 2296 * transformed this to all zeroes in both cases, so check for it. 2297 */ 2298 int 2299 nfs_ispublicfh(fhp) 2300 fhandle_t *fhp; 2301 { 2302 char *cp = (char *)fhp; 2303 int i; 2304 2305 for (i = 0; i < NFSX_V3FH; i++) 2306 if (*cp++ != 0) 2307 return (FALSE); 2308 return (TRUE); 2309 } 2310 2311 /* 2312 * This function compares two net addresses by family and returns TRUE 2313 * if they are the same host. 2314 * If there is any doubt, return FALSE. 2315 * The AF_INET family is handled as a special case so that address mbufs 2316 * don't need to be saved to store "struct in_addr", which is only 4 bytes. 2317 */ 2318 int 2319 netaddr_match(family, haddr, nam) 2320 int family; 2321 union nethostaddr *haddr; 2322 struct mbuf *nam; 2323 { 2324 struct sockaddr_in *inetaddr; 2325 2326 switch (family) { 2327 case AF_INET: 2328 inetaddr = mtod(nam, struct sockaddr_in *); 2329 if (inetaddr->sin_family == AF_INET && 2330 inetaddr->sin_addr.s_addr == haddr->had_inetaddr) 2331 return (1); 2332 break; 2333 #ifdef INET6 2334 case AF_INET6: 2335 { 2336 struct sockaddr_in6 *sin6_1, *sin6_2; 2337 2338 sin6_1 = mtod(nam, struct sockaddr_in6 *); 2339 sin6_2 = mtod(haddr->had_nam, struct sockaddr_in6 *); 2340 if (sin6_1->sin6_family == AF_INET6 && 2341 IN6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr, &sin6_2->sin6_addr)) 2342 return 1; 2343 } 2344 #endif 2345 #ifdef ISO 2346 case AF_ISO: 2347 { 2348 struct sockaddr_iso *isoaddr1, *isoaddr2; 2349 2350 isoaddr1 = mtod(nam, struct sockaddr_iso *); 2351 isoaddr2 = mtod(haddr->had_nam, struct sockaddr_iso *); 2352 if (isoaddr1->siso_family == AF_ISO && 2353 isoaddr1->siso_nlen > 0 && 2354 isoaddr1->siso_nlen == isoaddr2->siso_nlen && 2355 SAME_ISOADDR(isoaddr1, isoaddr2)) 2356 return (1); 2357 break; 2358 } 2359 #endif /* ISO */ 2360 default: 2361 break; 2362 }; 2363 return (0); 2364 } 2365 2366 /* 2367 * The write verifier has changed (probably due to a server reboot), so all 2368 * B_NEEDCOMMIT blocks will have to be written again. Since they are on the 2369 * dirty block list as B_DELWRI, all this takes is clearing the B_NEEDCOMMIT 2370 * flag. Once done the new write verifier can be set for the mount point. 2371 */ 2372 void 2373 nfs_clearcommit(mp) 2374 struct mount *mp; 2375 { 2376 struct vnode *vp; 2377 struct nfsnode *np; 2378 struct vm_page *pg; 2379 int s; 2380 2381 s = splbio(); 2382 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { 2383 KASSERT(vp->v_mount == mp); 2384 if (vp->v_type == VNON) 2385 continue; 2386 np = VTONFS(vp); 2387 np->n_pushlo = np->n_pushhi = np->n_pushedlo = 2388 np->n_pushedhi = 0; 2389 np->n_commitflags &= 2390 ~(NFS_COMMIT_PUSH_VALID | NFS_COMMIT_PUSHED_VALID); 2391 simple_lock(&vp->v_uvm.u_obj.vmobjlock); 2392 TAILQ_FOREACH(pg, &vp->v_uvm.u_obj.memq, listq) { 2393 pg->flags &= ~PG_NEEDCOMMIT; 2394 } 2395 simple_unlock(&vp->v_uvm.u_obj.vmobjlock); 2396 } 2397 splx(s); 2398 } 2399 2400 void 2401 nfs_merge_commit_ranges(vp) 2402 struct vnode *vp; 2403 { 2404 struct nfsnode *np = VTONFS(vp); 2405 2406 if (!(np->n_commitflags & NFS_COMMIT_PUSHED_VALID)) { 2407 np->n_pushedlo = np->n_pushlo; 2408 np->n_pushedhi = np->n_pushhi; 2409 np->n_commitflags |= NFS_COMMIT_PUSHED_VALID; 2410 } else { 2411 if (np->n_pushlo < np->n_pushedlo) 2412 np->n_pushedlo = np->n_pushlo; 2413 if (np->n_pushhi > np->n_pushedhi) 2414 np->n_pushedhi = np->n_pushhi; 2415 } 2416 2417 np->n_pushlo = np->n_pushhi = 0; 2418 np->n_commitflags &= ~NFS_COMMIT_PUSH_VALID; 2419 2420 #ifdef fvdl_debug 2421 printf("merge: committed: %u - %u\n", (unsigned)np->n_pushedlo, 2422 (unsigned)np->n_pushedhi); 2423 #endif 2424 } 2425 2426 int 2427 nfs_in_committed_range(vp, off, len) 2428 struct vnode *vp; 2429 off_t off, len; 2430 { 2431 struct nfsnode *np = VTONFS(vp); 2432 off_t lo, hi; 2433 2434 if (!(np->n_commitflags & NFS_COMMIT_PUSHED_VALID)) 2435 return 0; 2436 lo = off; 2437 hi = lo + len; 2438 2439 return (lo >= np->n_pushedlo && hi <= np->n_pushedhi); 2440 } 2441 2442 int 2443 nfs_in_tobecommitted_range(vp, off, len) 2444 struct vnode *vp; 2445 off_t off, len; 2446 { 2447 struct nfsnode *np = VTONFS(vp); 2448 off_t lo, hi; 2449 2450 if (!(np->n_commitflags & NFS_COMMIT_PUSH_VALID)) 2451 return 0; 2452 lo = off; 2453 hi = lo + len; 2454 2455 return (lo >= np->n_pushlo && hi <= np->n_pushhi); 2456 } 2457 2458 void 2459 nfs_add_committed_range(vp, off, len) 2460 struct vnode *vp; 2461 off_t off, len; 2462 { 2463 struct nfsnode *np = VTONFS(vp); 2464 off_t lo, hi; 2465 2466 lo = off; 2467 hi = lo + len; 2468 2469 if (!(np->n_commitflags & NFS_COMMIT_PUSHED_VALID)) { 2470 np->n_pushedlo = lo; 2471 np->n_pushedhi = hi; 2472 np->n_commitflags |= NFS_COMMIT_PUSHED_VALID; 2473 } else { 2474 if (hi > np->n_pushedhi) 2475 np->n_pushedhi = hi; 2476 if (lo < np->n_pushedlo) 2477 np->n_pushedlo = lo; 2478 } 2479 #ifdef fvdl_debug 2480 printf("add: committed: %u - %u\n", (unsigned)np->n_pushedlo, 2481 (unsigned)np->n_pushedhi); 2482 #endif 2483 } 2484 2485 void 2486 nfs_del_committed_range(vp, off, len) 2487 struct vnode *vp; 2488 off_t off, len; 2489 { 2490 struct nfsnode *np = VTONFS(vp); 2491 off_t lo, hi; 2492 2493 if (!(np->n_commitflags & NFS_COMMIT_PUSHED_VALID)) 2494 return; 2495 2496 lo = off; 2497 hi = lo + len; 2498 2499 if (lo > np->n_pushedhi || hi < np->n_pushedlo) 2500 return; 2501 if (lo <= np->n_pushedlo) 2502 np->n_pushedlo = hi; 2503 else if (hi >= np->n_pushedhi) 2504 np->n_pushedhi = lo; 2505 else { 2506 /* 2507 * XXX There's only one range. If the deleted range 2508 * is in the middle, pick the largest of the 2509 * contiguous ranges that it leaves. 2510 */ 2511 if ((np->n_pushedlo - lo) > (hi - np->n_pushedhi)) 2512 np->n_pushedhi = lo; 2513 else 2514 np->n_pushedlo = hi; 2515 } 2516 #ifdef fvdl_debug 2517 printf("del: committed: %u - %u\n", (unsigned)np->n_pushedlo, 2518 (unsigned)np->n_pushedhi); 2519 #endif 2520 } 2521 2522 void 2523 nfs_add_tobecommitted_range(vp, off, len) 2524 struct vnode *vp; 2525 off_t off, len; 2526 { 2527 struct nfsnode *np = VTONFS(vp); 2528 off_t lo, hi; 2529 2530 lo = off; 2531 hi = lo + len; 2532 2533 if (!(np->n_commitflags & NFS_COMMIT_PUSH_VALID)) { 2534 np->n_pushlo = lo; 2535 np->n_pushhi = hi; 2536 np->n_commitflags |= NFS_COMMIT_PUSH_VALID; 2537 } else { 2538 if (lo < np->n_pushlo) 2539 np->n_pushlo = lo; 2540 if (hi > np->n_pushhi) 2541 np->n_pushhi = hi; 2542 } 2543 #ifdef fvdl_debug 2544 printf("add: tobecommitted: %u - %u\n", (unsigned)np->n_pushlo, 2545 (unsigned)np->n_pushhi); 2546 #endif 2547 } 2548 2549 void 2550 nfs_del_tobecommitted_range(vp, off, len) 2551 struct vnode *vp; 2552 off_t off, len; 2553 { 2554 struct nfsnode *np = VTONFS(vp); 2555 off_t lo, hi; 2556 2557 if (!(np->n_commitflags & NFS_COMMIT_PUSH_VALID)) 2558 return; 2559 2560 lo = off; 2561 hi = lo + len; 2562 2563 if (lo > np->n_pushhi || hi < np->n_pushlo) 2564 return; 2565 2566 if (lo <= np->n_pushlo) 2567 np->n_pushlo = hi; 2568 else if (hi >= np->n_pushhi) 2569 np->n_pushhi = lo; 2570 else { 2571 /* 2572 * XXX There's only one range. If the deleted range 2573 * is in the middle, pick the largest of the 2574 * contiguous ranges that it leaves. 2575 */ 2576 if ((np->n_pushlo - lo) > (hi - np->n_pushhi)) 2577 np->n_pushhi = lo; 2578 else 2579 np->n_pushlo = hi; 2580 } 2581 #ifdef fvdl_debug 2582 printf("del: tobecommitted: %u - %u\n", (unsigned)np->n_pushlo, 2583 (unsigned)np->n_pushhi); 2584 #endif 2585 } 2586 2587 /* 2588 * Map errnos to NFS error numbers. For Version 3 also filter out error 2589 * numbers not specified for the associated procedure. 2590 */ 2591 int 2592 nfsrv_errmap(nd, err) 2593 struct nfsrv_descript *nd; 2594 int err; 2595 { 2596 short *defaulterrp, *errp; 2597 2598 if (nd->nd_flag & ND_NFSV3) { 2599 if (nd->nd_procnum <= NFSPROC_COMMIT) { 2600 errp = defaulterrp = nfsrv_v3errmap[nd->nd_procnum]; 2601 while (*++errp) { 2602 if (*errp == err) 2603 return (err); 2604 else if (*errp > err) 2605 break; 2606 } 2607 return ((int)*defaulterrp); 2608 } else 2609 return (err & 0xffff); 2610 } 2611 if (err <= ELAST) 2612 return ((int)nfsrv_v2errmap[err - 1]); 2613 return (NFSERR_IO); 2614 } 2615 2616 /* 2617 * Sort the group list in increasing numerical order. 2618 * (Insertion sort by Chris Torek, who was grossed out by the bubble sort 2619 * that used to be here.) 2620 */ 2621 void 2622 nfsrvw_sort(list, num) 2623 gid_t *list; 2624 int num; 2625 { 2626 int i, j; 2627 gid_t v; 2628 2629 /* Insertion sort. */ 2630 for (i = 1; i < num; i++) { 2631 v = list[i]; 2632 /* find correct slot for value v, moving others up */ 2633 for (j = i; --j >= 0 && v < list[j];) 2634 list[j + 1] = list[j]; 2635 list[j + 1] = v; 2636 } 2637 } 2638 2639 /* 2640 * copy credentials making sure that the result can be compared with memcmp(). 2641 */ 2642 void 2643 nfsrv_setcred(incred, outcred) 2644 struct ucred *incred, *outcred; 2645 { 2646 int i; 2647 2648 memset((caddr_t)outcred, 0, sizeof (struct ucred)); 2649 outcred->cr_ref = 1; 2650 outcred->cr_uid = incred->cr_uid; 2651 outcred->cr_gid = incred->cr_gid; 2652 outcred->cr_ngroups = incred->cr_ngroups; 2653 for (i = 0; i < incred->cr_ngroups; i++) 2654 outcred->cr_groups[i] = incred->cr_groups[i]; 2655 nfsrvw_sort(outcred->cr_groups, outcred->cr_ngroups); 2656 } 2657