10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
51488Srsb * Common Development and Distribution License (the "License").
61488Srsb * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
21*12789SRoger.Faulkner@Oracle.COM
220Sstevel@tonic-gate /*
2312230SFrank.Rival@oracle.com * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
240Sstevel@tonic-gate */
250Sstevel@tonic-gate
260Sstevel@tonic-gate /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
270Sstevel@tonic-gate /* All Rights Reserved */
280Sstevel@tonic-gate
290Sstevel@tonic-gate /*
300Sstevel@tonic-gate * University Copyright- Copyright (c) 1982, 1986, 1988
310Sstevel@tonic-gate * The Regents of the University of California
320Sstevel@tonic-gate * All Rights Reserved
330Sstevel@tonic-gate *
340Sstevel@tonic-gate * University Acknowledgment- Portions of this document are derived from
350Sstevel@tonic-gate * software developed by the University of California, Berkeley, and its
360Sstevel@tonic-gate * contributors.
370Sstevel@tonic-gate */
380Sstevel@tonic-gate
390Sstevel@tonic-gate #include <sys/types.h>
400Sstevel@tonic-gate #include <sys/param.h>
410Sstevel@tonic-gate #include <sys/t_lock.h>
420Sstevel@tonic-gate #include <sys/errno.h>
430Sstevel@tonic-gate #include <sys/cred.h>
440Sstevel@tonic-gate #include <sys/user.h>
450Sstevel@tonic-gate #include <sys/uio.h>
460Sstevel@tonic-gate #include <sys/file.h>
470Sstevel@tonic-gate #include <sys/pathname.h>
480Sstevel@tonic-gate #include <sys/vfs.h>
493898Srsb #include <sys/vfs_opreg.h>
500Sstevel@tonic-gate #include <sys/vnode.h>
510Sstevel@tonic-gate #include <sys/rwstlock.h>
520Sstevel@tonic-gate #include <sys/fem.h>
530Sstevel@tonic-gate #include <sys/stat.h>
540Sstevel@tonic-gate #include <sys/mode.h>
550Sstevel@tonic-gate #include <sys/conf.h>
560Sstevel@tonic-gate #include <sys/sysmacros.h>
570Sstevel@tonic-gate #include <sys/cmn_err.h>
580Sstevel@tonic-gate #include <sys/systm.h>
590Sstevel@tonic-gate #include <sys/kmem.h>
600Sstevel@tonic-gate #include <sys/debug.h>
610Sstevel@tonic-gate #include <c2/audit.h>
620Sstevel@tonic-gate #include <sys/acl.h>
630Sstevel@tonic-gate #include <sys/nbmlock.h>
640Sstevel@tonic-gate #include <sys/fcntl.h>
650Sstevel@tonic-gate #include <fs/fs_subr.h>
669321SNeil.Perrin@Sun.COM #include <sys/taskq.h>
6710793Sdai.ngo@sun.com #include <fs/fs_reparse.h>
680Sstevel@tonic-gate
690Sstevel@tonic-gate /* Determine if this vnode is a file that is read-only */
700Sstevel@tonic-gate #define ISROFILE(vp) \
710Sstevel@tonic-gate ((vp)->v_type != VCHR && (vp)->v_type != VBLK && \
720Sstevel@tonic-gate (vp)->v_type != VFIFO && vn_is_readonly(vp))
730Sstevel@tonic-gate
74766Scarlsonj /* Tunable via /etc/system; used only by admin/install */
75766Scarlsonj int nfs_global_client_only;
76766Scarlsonj
770Sstevel@tonic-gate /*
781488Srsb * Array of vopstats_t for per-FS-type vopstats. This array has the same
791488Srsb * number of entries as and parallel to the vfssw table. (Arguably, it could
801488Srsb * be part of the vfssw table.) Once it's initialized, it's accessed using
811488Srsb * the same fstype index that is used to index into the vfssw table.
821488Srsb */
831488Srsb vopstats_t **vopstats_fstype;
841488Srsb
851488Srsb /* vopstats initialization template used for fast initialization via bcopy() */
861488Srsb static vopstats_t *vs_templatep;
871488Srsb
881488Srsb /* Kmem cache handle for vsk_anchor_t allocations */
891488Srsb kmem_cache_t *vsk_anchor_cache;
901488Srsb
914863Spraks /* file events cleanup routine */
924863Spraks extern void free_fopdata(vnode_t *);
934863Spraks
941488Srsb /*
951488Srsb * Root of AVL tree for the kstats associated with vopstats. Lock protects
961488Srsb * updates to vsktat_tree.
971488Srsb */
981488Srsb avl_tree_t vskstat_tree;
991488Srsb kmutex_t vskstat_tree_lock;
1001488Srsb
1011488Srsb /* Global variable which enables/disables the vopstats collection */
1021488Srsb int vopstats_enabled = 1;
1031488Srsb
1041488Srsb /*
1055050Sjwahlig * forward declarations for internal vnode specific data (vsd)
1065050Sjwahlig */
1075050Sjwahlig static void *vsd_realloc(void *, size_t, size_t);
1085050Sjwahlig
1095050Sjwahlig /*
11010793Sdai.ngo@sun.com * forward declarations for reparse point functions
11110793Sdai.ngo@sun.com */
11210793Sdai.ngo@sun.com static int fs_reparse_mark(char *target, vattr_t *vap, xvattr_t *xvattr);
11310793Sdai.ngo@sun.com
11410793Sdai.ngo@sun.com /*
1155050Sjwahlig * VSD -- VNODE SPECIFIC DATA
1165050Sjwahlig * The v_data pointer is typically used by a file system to store a
1175050Sjwahlig * pointer to the file system's private node (e.g. ufs inode, nfs rnode).
1185050Sjwahlig * However, there are times when additional project private data needs
1195050Sjwahlig * to be stored separately from the data (node) pointed to by v_data.
1205050Sjwahlig * This additional data could be stored by the file system itself or
1215050Sjwahlig * by a completely different kernel entity. VSD provides a way for
1225050Sjwahlig * callers to obtain a key and store a pointer to private data associated
1235050Sjwahlig * with a vnode.
1245050Sjwahlig *
1259885SRobert.Mastors@Sun.COM * Callers are responsible for protecting the vsd by holding v_vsd_lock
1265050Sjwahlig * for calls to vsd_set() and vsd_get().
1275050Sjwahlig */
1285050Sjwahlig
1295050Sjwahlig /*
1305050Sjwahlig * vsd_lock protects:
1315050Sjwahlig * vsd_nkeys - creation and deletion of vsd keys
1325050Sjwahlig * vsd_list - insertion and deletion of vsd_node in the vsd_list
1335050Sjwahlig * vsd_destructor - adding and removing destructors to the list
1345050Sjwahlig */
1355050Sjwahlig static kmutex_t vsd_lock;
1365050Sjwahlig static uint_t vsd_nkeys; /* size of destructor array */
1375050Sjwahlig /* list of vsd_node's */
1385050Sjwahlig static list_t *vsd_list = NULL;
1395050Sjwahlig /* per-key destructor funcs */
1405050Sjwahlig static void (**vsd_destructor)(void *);
1415050Sjwahlig
1425050Sjwahlig /*
1431488Srsb * The following is the common set of actions needed to update the
1441488Srsb * vopstats structure from a vnode op. Both VOPSTATS_UPDATE() and
1451488Srsb * VOPSTATS_UPDATE_IO() do almost the same thing, except for the
1461488Srsb * recording of the bytes transferred. Since the code is similar
1471488Srsb * but small, it is nearly a duplicate. Consequently any changes
1481488Srsb * to one may need to be reflected in the other.
1491488Srsb * Rundown of the variables:
1501488Srsb * vp - Pointer to the vnode
1511488Srsb * counter - Partial name structure member to update in vopstats for counts
1521488Srsb * bytecounter - Partial name structure member to update in vopstats for bytes
1531488Srsb * bytesval - Value to update in vopstats for bytes
1541488Srsb * fstype - Index into vsanchor_fstype[], same as index into vfssw[]
1551488Srsb * vsp - Pointer to vopstats structure (either in vfs or vsanchor_fstype[i])
1561488Srsb */
1571488Srsb
1581488Srsb #define VOPSTATS_UPDATE(vp, counter) { \
1591488Srsb vfs_t *vfsp = (vp)->v_vfsp; \
1601925Srsb if (vfsp && vfsp->vfs_implp && \
1611925Srsb (vfsp->vfs_flag & VFS_STATS) && (vp)->v_type != VBAD) { \
1621488Srsb vopstats_t *vsp = &vfsp->vfs_vopstats; \
1631738Sbmc uint64_t *stataddr = &(vsp->n##counter.value.ui64); \
1641738Sbmc extern void __dtrace_probe___fsinfo_##counter(vnode_t *, \
1651738Sbmc size_t, uint64_t *); \
1661738Sbmc __dtrace_probe___fsinfo_##counter(vp, 0, stataddr); \
1671738Sbmc (*stataddr)++; \
1681488Srsb if ((vsp = vfsp->vfs_fstypevsp) != NULL) { \
1691738Sbmc vsp->n##counter.value.ui64++; \
1701488Srsb } \
1711488Srsb } \
1721488Srsb }
1731488Srsb
1741488Srsb #define VOPSTATS_UPDATE_IO(vp, counter, bytecounter, bytesval) { \
1751488Srsb vfs_t *vfsp = (vp)->v_vfsp; \
1761925Srsb if (vfsp && vfsp->vfs_implp && \
1771925Srsb (vfsp->vfs_flag & VFS_STATS) && (vp)->v_type != VBAD) { \
1781488Srsb vopstats_t *vsp = &vfsp->vfs_vopstats; \
1791738Sbmc uint64_t *stataddr = &(vsp->n##counter.value.ui64); \
1801738Sbmc extern void __dtrace_probe___fsinfo_##counter(vnode_t *, \
1811738Sbmc size_t, uint64_t *); \
1821738Sbmc __dtrace_probe___fsinfo_##counter(vp, bytesval, stataddr); \
1831738Sbmc (*stataddr)++; \
1841488Srsb vsp->bytecounter.value.ui64 += bytesval; \
1851488Srsb if ((vsp = vfsp->vfs_fstypevsp) != NULL) { \
1861738Sbmc vsp->n##counter.value.ui64++; \
1871488Srsb vsp->bytecounter.value.ui64 += bytesval; \
1881488Srsb } \
1891488Srsb } \
1901488Srsb }
1911488Srsb
1921488Srsb /*
1934321Scasper * If the filesystem does not support XIDs map credential
1944321Scasper * If the vfsp is NULL, perhaps we should also map?
1954321Scasper */
1964321Scasper #define VOPXID_MAP_CR(vp, cr) { \
1974321Scasper vfs_t *vfsp = (vp)->v_vfsp; \
1984321Scasper if (vfsp != NULL && (vfsp->vfs_flag & VFS_XID) == 0) \
1994321Scasper cr = crgetmapped(cr); \
2004321Scasper }
2014321Scasper
2024321Scasper /*
2030Sstevel@tonic-gate * Convert stat(2) formats to vnode types and vice versa. (Knows about
2040Sstevel@tonic-gate * numerical order of S_IFMT and vnode types.)
2050Sstevel@tonic-gate */
2060Sstevel@tonic-gate enum vtype iftovt_tab[] = {
2070Sstevel@tonic-gate VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
2080Sstevel@tonic-gate VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON
2090Sstevel@tonic-gate };
2100Sstevel@tonic-gate
2110Sstevel@tonic-gate ushort_t vttoif_tab[] = {
2120Sstevel@tonic-gate 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, S_IFIFO,
2130Sstevel@tonic-gate S_IFDOOR, 0, S_IFSOCK, S_IFPORT, 0
2140Sstevel@tonic-gate };
2150Sstevel@tonic-gate
2160Sstevel@tonic-gate /*
2170Sstevel@tonic-gate * The system vnode cache.
2180Sstevel@tonic-gate */
2190Sstevel@tonic-gate
2200Sstevel@tonic-gate kmem_cache_t *vn_cache;
2210Sstevel@tonic-gate
2220Sstevel@tonic-gate
2230Sstevel@tonic-gate /*
2240Sstevel@tonic-gate * Vnode operations vector.
2250Sstevel@tonic-gate */
2260Sstevel@tonic-gate
2270Sstevel@tonic-gate static const fs_operation_trans_def_t vn_ops_table[] = {
2280Sstevel@tonic-gate VOPNAME_OPEN, offsetof(struct vnodeops, vop_open),
2290Sstevel@tonic-gate fs_nosys, fs_nosys,
2300Sstevel@tonic-gate
2310Sstevel@tonic-gate VOPNAME_CLOSE, offsetof(struct vnodeops, vop_close),
2320Sstevel@tonic-gate fs_nosys, fs_nosys,
2330Sstevel@tonic-gate
2340Sstevel@tonic-gate VOPNAME_READ, offsetof(struct vnodeops, vop_read),
2350Sstevel@tonic-gate fs_nosys, fs_nosys,
2360Sstevel@tonic-gate
2370Sstevel@tonic-gate VOPNAME_WRITE, offsetof(struct vnodeops, vop_write),
2380Sstevel@tonic-gate fs_nosys, fs_nosys,
2390Sstevel@tonic-gate
2400Sstevel@tonic-gate VOPNAME_IOCTL, offsetof(struct vnodeops, vop_ioctl),
2410Sstevel@tonic-gate fs_nosys, fs_nosys,
2420Sstevel@tonic-gate
2430Sstevel@tonic-gate VOPNAME_SETFL, offsetof(struct vnodeops, vop_setfl),
2440Sstevel@tonic-gate fs_setfl, fs_nosys,
2450Sstevel@tonic-gate
2460Sstevel@tonic-gate VOPNAME_GETATTR, offsetof(struct vnodeops, vop_getattr),
2470Sstevel@tonic-gate fs_nosys, fs_nosys,
2480Sstevel@tonic-gate
2490Sstevel@tonic-gate VOPNAME_SETATTR, offsetof(struct vnodeops, vop_setattr),
2500Sstevel@tonic-gate fs_nosys, fs_nosys,
2510Sstevel@tonic-gate
2520Sstevel@tonic-gate VOPNAME_ACCESS, offsetof(struct vnodeops, vop_access),
2530Sstevel@tonic-gate fs_nosys, fs_nosys,
2540Sstevel@tonic-gate
2550Sstevel@tonic-gate VOPNAME_LOOKUP, offsetof(struct vnodeops, vop_lookup),
2560Sstevel@tonic-gate fs_nosys, fs_nosys,
2570Sstevel@tonic-gate
2580Sstevel@tonic-gate VOPNAME_CREATE, offsetof(struct vnodeops, vop_create),
2590Sstevel@tonic-gate fs_nosys, fs_nosys,
2600Sstevel@tonic-gate
2610Sstevel@tonic-gate VOPNAME_REMOVE, offsetof(struct vnodeops, vop_remove),
2620Sstevel@tonic-gate fs_nosys, fs_nosys,
2630Sstevel@tonic-gate
2640Sstevel@tonic-gate VOPNAME_LINK, offsetof(struct vnodeops, vop_link),
2650Sstevel@tonic-gate fs_nosys, fs_nosys,
2660Sstevel@tonic-gate
2670Sstevel@tonic-gate VOPNAME_RENAME, offsetof(struct vnodeops, vop_rename),
2680Sstevel@tonic-gate fs_nosys, fs_nosys,
2690Sstevel@tonic-gate
2700Sstevel@tonic-gate VOPNAME_MKDIR, offsetof(struct vnodeops, vop_mkdir),
2710Sstevel@tonic-gate fs_nosys, fs_nosys,
2720Sstevel@tonic-gate
2730Sstevel@tonic-gate VOPNAME_RMDIR, offsetof(struct vnodeops, vop_rmdir),
2740Sstevel@tonic-gate fs_nosys, fs_nosys,
2750Sstevel@tonic-gate
2760Sstevel@tonic-gate VOPNAME_READDIR, offsetof(struct vnodeops, vop_readdir),
2770Sstevel@tonic-gate fs_nosys, fs_nosys,
2780Sstevel@tonic-gate
2790Sstevel@tonic-gate VOPNAME_SYMLINK, offsetof(struct vnodeops, vop_symlink),
2800Sstevel@tonic-gate fs_nosys, fs_nosys,
2810Sstevel@tonic-gate
2820Sstevel@tonic-gate VOPNAME_READLINK, offsetof(struct vnodeops, vop_readlink),
2830Sstevel@tonic-gate fs_nosys, fs_nosys,
2840Sstevel@tonic-gate
2850Sstevel@tonic-gate VOPNAME_FSYNC, offsetof(struct vnodeops, vop_fsync),
2860Sstevel@tonic-gate fs_nosys, fs_nosys,
2870Sstevel@tonic-gate
2880Sstevel@tonic-gate VOPNAME_INACTIVE, offsetof(struct vnodeops, vop_inactive),
2890Sstevel@tonic-gate fs_nosys, fs_nosys,
2900Sstevel@tonic-gate
2910Sstevel@tonic-gate VOPNAME_FID, offsetof(struct vnodeops, vop_fid),
2920Sstevel@tonic-gate fs_nosys, fs_nosys,
2930Sstevel@tonic-gate
2940Sstevel@tonic-gate VOPNAME_RWLOCK, offsetof(struct vnodeops, vop_rwlock),
2950Sstevel@tonic-gate fs_rwlock, fs_rwlock,
2960Sstevel@tonic-gate
2970Sstevel@tonic-gate VOPNAME_RWUNLOCK, offsetof(struct vnodeops, vop_rwunlock),
2980Sstevel@tonic-gate (fs_generic_func_p) fs_rwunlock,
2990Sstevel@tonic-gate (fs_generic_func_p) fs_rwunlock, /* no errors allowed */
3000Sstevel@tonic-gate
3010Sstevel@tonic-gate VOPNAME_SEEK, offsetof(struct vnodeops, vop_seek),
3020Sstevel@tonic-gate fs_nosys, fs_nosys,
3030Sstevel@tonic-gate
3040Sstevel@tonic-gate VOPNAME_CMP, offsetof(struct vnodeops, vop_cmp),
3050Sstevel@tonic-gate fs_cmp, fs_cmp, /* no errors allowed */
3060Sstevel@tonic-gate
3070Sstevel@tonic-gate VOPNAME_FRLOCK, offsetof(struct vnodeops, vop_frlock),
3080Sstevel@tonic-gate fs_frlock, fs_nosys,
3090Sstevel@tonic-gate
3100Sstevel@tonic-gate VOPNAME_SPACE, offsetof(struct vnodeops, vop_space),
3110Sstevel@tonic-gate fs_nosys, fs_nosys,
3120Sstevel@tonic-gate
3130Sstevel@tonic-gate VOPNAME_REALVP, offsetof(struct vnodeops, vop_realvp),
3140Sstevel@tonic-gate fs_nosys, fs_nosys,
3150Sstevel@tonic-gate
3160Sstevel@tonic-gate VOPNAME_GETPAGE, offsetof(struct vnodeops, vop_getpage),
3170Sstevel@tonic-gate fs_nosys, fs_nosys,
3180Sstevel@tonic-gate
3190Sstevel@tonic-gate VOPNAME_PUTPAGE, offsetof(struct vnodeops, vop_putpage),
3200Sstevel@tonic-gate fs_nosys, fs_nosys,
3210Sstevel@tonic-gate
3220Sstevel@tonic-gate VOPNAME_MAP, offsetof(struct vnodeops, vop_map),
3230Sstevel@tonic-gate (fs_generic_func_p) fs_nosys_map,
3240Sstevel@tonic-gate (fs_generic_func_p) fs_nosys_map,
3250Sstevel@tonic-gate
3260Sstevel@tonic-gate VOPNAME_ADDMAP, offsetof(struct vnodeops, vop_addmap),
3270Sstevel@tonic-gate (fs_generic_func_p) fs_nosys_addmap,
3280Sstevel@tonic-gate (fs_generic_func_p) fs_nosys_addmap,
3290Sstevel@tonic-gate
3300Sstevel@tonic-gate VOPNAME_DELMAP, offsetof(struct vnodeops, vop_delmap),
3310Sstevel@tonic-gate fs_nosys, fs_nosys,
3320Sstevel@tonic-gate
3330Sstevel@tonic-gate VOPNAME_POLL, offsetof(struct vnodeops, vop_poll),
3340Sstevel@tonic-gate (fs_generic_func_p) fs_poll, (fs_generic_func_p) fs_nosys_poll,
3350Sstevel@tonic-gate
3360Sstevel@tonic-gate VOPNAME_DUMP, offsetof(struct vnodeops, vop_dump),
3370Sstevel@tonic-gate fs_nosys, fs_nosys,
3380Sstevel@tonic-gate
3390Sstevel@tonic-gate VOPNAME_PATHCONF, offsetof(struct vnodeops, vop_pathconf),
3400Sstevel@tonic-gate fs_pathconf, fs_nosys,
3410Sstevel@tonic-gate
3420Sstevel@tonic-gate VOPNAME_PAGEIO, offsetof(struct vnodeops, vop_pageio),
3430Sstevel@tonic-gate fs_nosys, fs_nosys,
3440Sstevel@tonic-gate
3450Sstevel@tonic-gate VOPNAME_DUMPCTL, offsetof(struct vnodeops, vop_dumpctl),
3460Sstevel@tonic-gate fs_nosys, fs_nosys,
3470Sstevel@tonic-gate
3480Sstevel@tonic-gate VOPNAME_DISPOSE, offsetof(struct vnodeops, vop_dispose),
3490Sstevel@tonic-gate (fs_generic_func_p) fs_dispose,
3500Sstevel@tonic-gate (fs_generic_func_p) fs_nodispose,
3510Sstevel@tonic-gate
3520Sstevel@tonic-gate VOPNAME_SETSECATTR, offsetof(struct vnodeops, vop_setsecattr),
3530Sstevel@tonic-gate fs_nosys, fs_nosys,
3540Sstevel@tonic-gate
3550Sstevel@tonic-gate VOPNAME_GETSECATTR, offsetof(struct vnodeops, vop_getsecattr),
3560Sstevel@tonic-gate fs_fab_acl, fs_nosys,
3570Sstevel@tonic-gate
3580Sstevel@tonic-gate VOPNAME_SHRLOCK, offsetof(struct vnodeops, vop_shrlock),
3590Sstevel@tonic-gate fs_shrlock, fs_nosys,
3600Sstevel@tonic-gate
3610Sstevel@tonic-gate VOPNAME_VNEVENT, offsetof(struct vnodeops, vop_vnevent),
3620Sstevel@tonic-gate (fs_generic_func_p) fs_vnevent_nosupport,
3630Sstevel@tonic-gate (fs_generic_func_p) fs_vnevent_nosupport,
3640Sstevel@tonic-gate
36511539SChunli.Zhang@Sun.COM VOPNAME_REQZCBUF, offsetof(struct vnodeops, vop_reqzcbuf),
36611539SChunli.Zhang@Sun.COM fs_nosys, fs_nosys,
36711539SChunli.Zhang@Sun.COM
36811539SChunli.Zhang@Sun.COM VOPNAME_RETZCBUF, offsetof(struct vnodeops, vop_retzcbuf),
36911539SChunli.Zhang@Sun.COM fs_nosys, fs_nosys,
37011539SChunli.Zhang@Sun.COM
3710Sstevel@tonic-gate NULL, 0, NULL, NULL
3720Sstevel@tonic-gate };
3730Sstevel@tonic-gate
3745331Samw /* Extensible attribute (xva) routines. */
3755331Samw
3765331Samw /*
3775331Samw * Zero out the structure, set the size of the requested/returned bitmaps,
3785331Samw * set AT_XVATTR in the embedded vattr_t's va_mask, and set up the pointer
3795331Samw * to the returned attributes array.
3805331Samw */
3815331Samw void
xva_init(xvattr_t * xvap)3825331Samw xva_init(xvattr_t *xvap)
3835331Samw {
3845331Samw bzero(xvap, sizeof (xvattr_t));
3855331Samw xvap->xva_mapsize = XVA_MAPSIZE;
3865331Samw xvap->xva_magic = XVA_MAGIC;
3875331Samw xvap->xva_vattr.va_mask = AT_XVATTR;
3885331Samw xvap->xva_rtnattrmapp = &(xvap->xva_rtnattrmap)[0];
3895331Samw }
3905331Samw
3915331Samw /*
3925331Samw * If AT_XVATTR is set, returns a pointer to the embedded xoptattr_t
3935331Samw * structure. Otherwise, returns NULL.
3945331Samw */
3955331Samw xoptattr_t *
xva_getxoptattr(xvattr_t * xvap)3965331Samw xva_getxoptattr(xvattr_t *xvap)
3975331Samw {
3985331Samw xoptattr_t *xoap = NULL;
3995331Samw if (xvap->xva_vattr.va_mask & AT_XVATTR)
4005331Samw xoap = &xvap->xva_xoptattrs;
4015331Samw return (xoap);
4025331Samw }
4035331Samw
4041488Srsb /*
4051488Srsb * Used by the AVL routines to compare two vsk_anchor_t structures in the tree.
4061488Srsb * We use the f_fsid reported by VFS_STATVFS() since we use that for the
4071488Srsb * kstat name.
4081488Srsb */
4091488Srsb static int
vska_compar(const void * n1,const void * n2)4101488Srsb vska_compar(const void *n1, const void *n2)
4111488Srsb {
4121488Srsb int ret;
4131488Srsb ulong_t p1 = ((vsk_anchor_t *)n1)->vsk_fsid;
4141488Srsb ulong_t p2 = ((vsk_anchor_t *)n2)->vsk_fsid;
4151488Srsb
4161488Srsb if (p1 < p2) {
4171488Srsb ret = -1;
4181488Srsb } else if (p1 > p2) {
4191488Srsb ret = 1;
4201488Srsb } else {
4211488Srsb ret = 0;
4221488Srsb }
4231488Srsb
4241488Srsb return (ret);
4251488Srsb }
4261488Srsb
4271488Srsb /*
4281488Srsb * Used to create a single template which will be bcopy()ed to a newly
4291488Srsb * allocated vsanchor_combo_t structure in new_vsanchor(), below.
4301488Srsb */
4311488Srsb static vopstats_t *
create_vopstats_template()4321488Srsb create_vopstats_template()
4331488Srsb {
4341488Srsb vopstats_t *vsp;
4351488Srsb
4361488Srsb vsp = kmem_alloc(sizeof (vopstats_t), KM_SLEEP);
4371488Srsb bzero(vsp, sizeof (*vsp)); /* Start fresh */
4381488Srsb
4391488Srsb /* VOP_OPEN */
4401488Srsb kstat_named_init(&vsp->nopen, "nopen", KSTAT_DATA_UINT64);
4411488Srsb /* VOP_CLOSE */
4421488Srsb kstat_named_init(&vsp->nclose, "nclose", KSTAT_DATA_UINT64);
4431488Srsb /* VOP_READ I/O */
4441488Srsb kstat_named_init(&vsp->nread, "nread", KSTAT_DATA_UINT64);
4451488Srsb kstat_named_init(&vsp->read_bytes, "read_bytes", KSTAT_DATA_UINT64);
4461488Srsb /* VOP_WRITE I/O */
4471488Srsb kstat_named_init(&vsp->nwrite, "nwrite", KSTAT_DATA_UINT64);
4481488Srsb kstat_named_init(&vsp->write_bytes, "write_bytes", KSTAT_DATA_UINT64);
4491488Srsb /* VOP_IOCTL */
4501488Srsb kstat_named_init(&vsp->nioctl, "nioctl", KSTAT_DATA_UINT64);
4511488Srsb /* VOP_SETFL */
4521488Srsb kstat_named_init(&vsp->nsetfl, "nsetfl", KSTAT_DATA_UINT64);
4531488Srsb /* VOP_GETATTR */
4541488Srsb kstat_named_init(&vsp->ngetattr, "ngetattr", KSTAT_DATA_UINT64);
4551488Srsb /* VOP_SETATTR */
4561488Srsb kstat_named_init(&vsp->nsetattr, "nsetattr", KSTAT_DATA_UINT64);
4571488Srsb /* VOP_ACCESS */
4581488Srsb kstat_named_init(&vsp->naccess, "naccess", KSTAT_DATA_UINT64);
4591488Srsb /* VOP_LOOKUP */
4601488Srsb kstat_named_init(&vsp->nlookup, "nlookup", KSTAT_DATA_UINT64);
4611488Srsb /* VOP_CREATE */
4621488Srsb kstat_named_init(&vsp->ncreate, "ncreate", KSTAT_DATA_UINT64);
4631488Srsb /* VOP_REMOVE */
4641488Srsb kstat_named_init(&vsp->nremove, "nremove", KSTAT_DATA_UINT64);
4651488Srsb /* VOP_LINK */
4661488Srsb kstat_named_init(&vsp->nlink, "nlink", KSTAT_DATA_UINT64);
4671488Srsb /* VOP_RENAME */
4681488Srsb kstat_named_init(&vsp->nrename, "nrename", KSTAT_DATA_UINT64);
4691488Srsb /* VOP_MKDIR */
4701488Srsb kstat_named_init(&vsp->nmkdir, "nmkdir", KSTAT_DATA_UINT64);
4711488Srsb /* VOP_RMDIR */
4721488Srsb kstat_named_init(&vsp->nrmdir, "nrmdir", KSTAT_DATA_UINT64);
4731488Srsb /* VOP_READDIR I/O */
4741488Srsb kstat_named_init(&vsp->nreaddir, "nreaddir", KSTAT_DATA_UINT64);
4751488Srsb kstat_named_init(&vsp->readdir_bytes, "readdir_bytes",
4761488Srsb KSTAT_DATA_UINT64);
4771488Srsb /* VOP_SYMLINK */
4781488Srsb kstat_named_init(&vsp->nsymlink, "nsymlink", KSTAT_DATA_UINT64);
4791488Srsb /* VOP_READLINK */
4801488Srsb kstat_named_init(&vsp->nreadlink, "nreadlink", KSTAT_DATA_UINT64);
4811488Srsb /* VOP_FSYNC */
4821488Srsb kstat_named_init(&vsp->nfsync, "nfsync", KSTAT_DATA_UINT64);
4831488Srsb /* VOP_INACTIVE */
4841488Srsb kstat_named_init(&vsp->ninactive, "ninactive", KSTAT_DATA_UINT64);
4851488Srsb /* VOP_FID */
4861488Srsb kstat_named_init(&vsp->nfid, "nfid", KSTAT_DATA_UINT64);
4871488Srsb /* VOP_RWLOCK */
4881488Srsb kstat_named_init(&vsp->nrwlock, "nrwlock", KSTAT_DATA_UINT64);
4891488Srsb /* VOP_RWUNLOCK */
4901488Srsb kstat_named_init(&vsp->nrwunlock, "nrwunlock", KSTAT_DATA_UINT64);
4911488Srsb /* VOP_SEEK */
4921488Srsb kstat_named_init(&vsp->nseek, "nseek", KSTAT_DATA_UINT64);
4931488Srsb /* VOP_CMP */
4941488Srsb kstat_named_init(&vsp->ncmp, "ncmp", KSTAT_DATA_UINT64);
4951488Srsb /* VOP_FRLOCK */
4961488Srsb kstat_named_init(&vsp->nfrlock, "nfrlock", KSTAT_DATA_UINT64);
4971488Srsb /* VOP_SPACE */
4981488Srsb kstat_named_init(&vsp->nspace, "nspace", KSTAT_DATA_UINT64);
4991488Srsb /* VOP_REALVP */
5001488Srsb kstat_named_init(&vsp->nrealvp, "nrealvp", KSTAT_DATA_UINT64);
5011488Srsb /* VOP_GETPAGE */
5021488Srsb kstat_named_init(&vsp->ngetpage, "ngetpage", KSTAT_DATA_UINT64);
5031488Srsb /* VOP_PUTPAGE */
5041488Srsb kstat_named_init(&vsp->nputpage, "nputpage", KSTAT_DATA_UINT64);
5051488Srsb /* VOP_MAP */
5061488Srsb kstat_named_init(&vsp->nmap, "nmap", KSTAT_DATA_UINT64);
5071488Srsb /* VOP_ADDMAP */
5081488Srsb kstat_named_init(&vsp->naddmap, "naddmap", KSTAT_DATA_UINT64);
5091488Srsb /* VOP_DELMAP */
5101488Srsb kstat_named_init(&vsp->ndelmap, "ndelmap", KSTAT_DATA_UINT64);
5111488Srsb /* VOP_POLL */
5121488Srsb kstat_named_init(&vsp->npoll, "npoll", KSTAT_DATA_UINT64);
5131488Srsb /* VOP_DUMP */
5141488Srsb kstat_named_init(&vsp->ndump, "ndump", KSTAT_DATA_UINT64);
5151488Srsb /* VOP_PATHCONF */
5161488Srsb kstat_named_init(&vsp->npathconf, "npathconf", KSTAT_DATA_UINT64);
5171488Srsb /* VOP_PAGEIO */
5181488Srsb kstat_named_init(&vsp->npageio, "npageio", KSTAT_DATA_UINT64);
5191488Srsb /* VOP_DUMPCTL */
5201488Srsb kstat_named_init(&vsp->ndumpctl, "ndumpctl", KSTAT_DATA_UINT64);
5211488Srsb /* VOP_DISPOSE */
5221488Srsb kstat_named_init(&vsp->ndispose, "ndispose", KSTAT_DATA_UINT64);
5231488Srsb /* VOP_SETSECATTR */
5241488Srsb kstat_named_init(&vsp->nsetsecattr, "nsetsecattr", KSTAT_DATA_UINT64);
5251488Srsb /* VOP_GETSECATTR */
5261488Srsb kstat_named_init(&vsp->ngetsecattr, "ngetsecattr", KSTAT_DATA_UINT64);
5271488Srsb /* VOP_SHRLOCK */
5281488Srsb kstat_named_init(&vsp->nshrlock, "nshrlock", KSTAT_DATA_UINT64);
5291488Srsb /* VOP_VNEVENT */
5301488Srsb kstat_named_init(&vsp->nvnevent, "nvnevent", KSTAT_DATA_UINT64);
53111539SChunli.Zhang@Sun.COM /* VOP_REQZCBUF */
53211539SChunli.Zhang@Sun.COM kstat_named_init(&vsp->nreqzcbuf, "nreqzcbuf", KSTAT_DATA_UINT64);
53311539SChunli.Zhang@Sun.COM /* VOP_RETZCBUF */
53411539SChunli.Zhang@Sun.COM kstat_named_init(&vsp->nretzcbuf, "nretzcbuf", KSTAT_DATA_UINT64);
5351488Srsb
5361488Srsb return (vsp);
5371488Srsb }
5381488Srsb
5391488Srsb /*
5401488Srsb * Creates a kstat structure associated with a vopstats structure.
5411488Srsb */
5421488Srsb kstat_t *
new_vskstat(char * ksname,vopstats_t * vsp)5431488Srsb new_vskstat(char *ksname, vopstats_t *vsp)
5441488Srsb {
5451488Srsb kstat_t *ksp;
5461488Srsb
5471488Srsb if (!vopstats_enabled) {
5481488Srsb return (NULL);
5491488Srsb }
5501488Srsb
5511488Srsb ksp = kstat_create("unix", 0, ksname, "misc", KSTAT_TYPE_NAMED,
5521488Srsb sizeof (vopstats_t)/sizeof (kstat_named_t),
5531488Srsb KSTAT_FLAG_VIRTUAL|KSTAT_FLAG_WRITABLE);
5541488Srsb if (ksp) {
5551488Srsb ksp->ks_data = vsp;
5561488Srsb kstat_install(ksp);
5571488Srsb }
5581488Srsb
5591488Srsb return (ksp);
5601488Srsb }
5611488Srsb
5621488Srsb /*
5631488Srsb * Called from vfsinit() to initialize the support mechanisms for vopstats
5641488Srsb */
5651488Srsb void
vopstats_startup()5661488Srsb vopstats_startup()
5671488Srsb {
5681488Srsb if (!vopstats_enabled)
5691488Srsb return;
5701488Srsb
5711488Srsb /*
5721488Srsb * Creates the AVL tree which holds per-vfs vopstat anchors. This
5731488Srsb * is necessary since we need to check if a kstat exists before we
5741488Srsb * attempt to create it. Also, initialize its lock.
5751488Srsb */
5761488Srsb avl_create(&vskstat_tree, vska_compar, sizeof (vsk_anchor_t),
5771488Srsb offsetof(vsk_anchor_t, vsk_node));
5781488Srsb mutex_init(&vskstat_tree_lock, NULL, MUTEX_DEFAULT, NULL);
5791488Srsb
5801488Srsb vsk_anchor_cache = kmem_cache_create("vsk_anchor_cache",
5811488Srsb sizeof (vsk_anchor_t), sizeof (uintptr_t), NULL, NULL, NULL,
5821488Srsb NULL, NULL, 0);
5831488Srsb
5841488Srsb /*
5851488Srsb * Set up the array of pointers for the vopstats-by-FS-type.
5861488Srsb * The entries will be allocated/initialized as each file system
5871488Srsb * goes through modload/mod_installfs.
5881488Srsb */
5891488Srsb vopstats_fstype = (vopstats_t **)kmem_zalloc(
5901488Srsb (sizeof (vopstats_t *) * nfstype), KM_SLEEP);
5911488Srsb
5921488Srsb /* Set up the global vopstats initialization template */
5931488Srsb vs_templatep = create_vopstats_template();
5941488Srsb }
5951488Srsb
5961488Srsb /*
5971488Srsb * We need to have the all of the counters zeroed.
5981488Srsb * The initialization of the vopstats_t includes on the order of
5991488Srsb * 50 calls to kstat_named_init(). Rather that do that on every call,
6001488Srsb * we do it once in a template (vs_templatep) then bcopy it over.
6011488Srsb */
6021488Srsb void
initialize_vopstats(vopstats_t * vsp)6031488Srsb initialize_vopstats(vopstats_t *vsp)
6041488Srsb {
6051488Srsb if (vsp == NULL)
6061488Srsb return;
6071488Srsb
6081488Srsb bcopy(vs_templatep, vsp, sizeof (vopstats_t));
6091488Srsb }
6101488Srsb
6111488Srsb /*
6121520Srsb * If possible, determine which vopstats by fstype to use and
6131520Srsb * return a pointer to the caller.
6141488Srsb */
6151520Srsb vopstats_t *
get_fstype_vopstats(vfs_t * vfsp,struct vfssw * vswp)6161520Srsb get_fstype_vopstats(vfs_t *vfsp, struct vfssw *vswp)
6171488Srsb {
6181520Srsb int fstype = 0; /* Index into vfssw[] */
6191520Srsb vopstats_t *vsp = NULL;
6201488Srsb
6211488Srsb if (vfsp == NULL || (vfsp->vfs_flag & VFS_STATS) == 0 ||
6221488Srsb !vopstats_enabled)
6231520Srsb return (NULL);
6241488Srsb /*
6251488Srsb * Set up the fstype. We go to so much trouble because all versions
6261488Srsb * of NFS use the same fstype in their vfs even though they have
6271488Srsb * distinct entries in the vfssw[] table.
6281520Srsb * NOTE: A special vfs (e.g., EIO_vfs) may not have an entry.
6291488Srsb */
6301520Srsb if (vswp) {
6311520Srsb fstype = vswp - vfssw; /* Gets us the index */
6321488Srsb } else {
6331488Srsb fstype = vfsp->vfs_fstype;
6341488Srsb }
6351488Srsb
6361488Srsb /*
6371488Srsb * Point to the per-fstype vopstats. The only valid values are
6381488Srsb * non-zero positive values less than the number of vfssw[] table
6391488Srsb * entries.
6401488Srsb */
6411488Srsb if (fstype > 0 && fstype < nfstype) {
6421520Srsb vsp = vopstats_fstype[fstype];
6431488Srsb }
6441488Srsb
6451520Srsb return (vsp);
6461520Srsb }
6471520Srsb
6481520Srsb /*
6491520Srsb * Generate a kstat name, create the kstat structure, and allocate a
6501520Srsb * vsk_anchor_t to hold it together. Return the pointer to the vsk_anchor_t
6511520Srsb * to the caller. This must only be called from a mount.
6521520Srsb */
6531520Srsb vsk_anchor_t *
get_vskstat_anchor(vfs_t * vfsp)6541520Srsb get_vskstat_anchor(vfs_t *vfsp)
6551520Srsb {
6561520Srsb char kstatstr[KSTAT_STRLEN]; /* kstat name for vopstats */
6571520Srsb statvfs64_t statvfsbuf; /* Needed to find f_fsid */
6581520Srsb vsk_anchor_t *vskp = NULL; /* vfs <--> kstat anchor */
6591520Srsb kstat_t *ksp; /* Ptr to new kstat */
6601520Srsb avl_index_t where; /* Location in the AVL tree */
6611520Srsb
6621925Srsb if (vfsp == NULL || vfsp->vfs_implp == NULL ||
6631925Srsb (vfsp->vfs_flag & VFS_STATS) == 0 || !vopstats_enabled)
6641520Srsb return (NULL);
6651520Srsb
6661488Srsb /* Need to get the fsid to build a kstat name */
6671488Srsb if (VFS_STATVFS(vfsp, &statvfsbuf) == 0) {
6681488Srsb /* Create a name for our kstats based on fsid */
6691488Srsb (void) snprintf(kstatstr, KSTAT_STRLEN, "%s%lx",
6701488Srsb VOPSTATS_STR, statvfsbuf.f_fsid);
6711488Srsb
6721488Srsb /* Allocate and initialize the vsk_anchor_t */
6731488Srsb vskp = kmem_cache_alloc(vsk_anchor_cache, KM_SLEEP);
6741488Srsb bzero(vskp, sizeof (*vskp));
6751488Srsb vskp->vsk_fsid = statvfsbuf.f_fsid;
6761488Srsb
6771488Srsb mutex_enter(&vskstat_tree_lock);
6781488Srsb if (avl_find(&vskstat_tree, vskp, &where) == NULL) {
6791488Srsb avl_insert(&vskstat_tree, vskp, where);
6801488Srsb mutex_exit(&vskstat_tree_lock);
6811488Srsb
6821488Srsb /*
6831488Srsb * Now that we've got the anchor in the AVL
6841488Srsb * tree, we can create the kstat.
6851488Srsb */
6861488Srsb ksp = new_vskstat(kstatstr, &vfsp->vfs_vopstats);
6871488Srsb if (ksp) {
6881488Srsb vskp->vsk_ksp = ksp;
6891488Srsb }
6901488Srsb } else {
6911488Srsb /* Oops, found one! Release memory and lock. */
6921488Srsb mutex_exit(&vskstat_tree_lock);
6931488Srsb kmem_cache_free(vsk_anchor_cache, vskp);
6941520Srsb vskp = NULL;
6951488Srsb }
6961488Srsb }
6971520Srsb return (vskp);
6981488Srsb }
6991488Srsb
7001488Srsb /*
7011488Srsb * We're in the process of tearing down the vfs and need to cleanup
7021488Srsb * the data structures associated with the vopstats. Must only be called
7031488Srsb * from dounmount().
7041488Srsb */
7051488Srsb void
teardown_vopstats(vfs_t * vfsp)7061488Srsb teardown_vopstats(vfs_t *vfsp)
7071488Srsb {
7081488Srsb vsk_anchor_t *vskap;
7091488Srsb avl_index_t where;
7101488Srsb
7111925Srsb if (vfsp == NULL || vfsp->vfs_implp == NULL ||
7121925Srsb (vfsp->vfs_flag & VFS_STATS) == 0 || !vopstats_enabled)
7131488Srsb return;
7141488Srsb
7151488Srsb /* This is a safe check since VFS_STATS must be set (see above) */
7161488Srsb if ((vskap = vfsp->vfs_vskap) == NULL)
7171488Srsb return;
7181488Srsb
7191488Srsb /* Whack the pointer right away */
7201488Srsb vfsp->vfs_vskap = NULL;
7211488Srsb
7221488Srsb /* Lock the tree, remove the node, and delete the kstat */
7231488Srsb mutex_enter(&vskstat_tree_lock);
7241488Srsb if (avl_find(&vskstat_tree, vskap, &where)) {
7251488Srsb avl_remove(&vskstat_tree, vskap);
7261488Srsb }
7271488Srsb
7281488Srsb if (vskap->vsk_ksp) {
7291488Srsb kstat_delete(vskap->vsk_ksp);
7301488Srsb }
7311488Srsb mutex_exit(&vskstat_tree_lock);
7321488Srsb
7331488Srsb kmem_cache_free(vsk_anchor_cache, vskap);
7341488Srsb }
7350Sstevel@tonic-gate
7360Sstevel@tonic-gate /*
7370Sstevel@tonic-gate * Read or write a vnode. Called from kernel code.
7380Sstevel@tonic-gate */
7390Sstevel@tonic-gate int
vn_rdwr(enum uio_rw rw,struct vnode * vp,caddr_t base,ssize_t len,offset_t offset,enum uio_seg seg,int ioflag,rlim64_t ulimit,cred_t * cr,ssize_t * residp)7400Sstevel@tonic-gate vn_rdwr(
7410Sstevel@tonic-gate enum uio_rw rw,
7420Sstevel@tonic-gate struct vnode *vp,
7430Sstevel@tonic-gate caddr_t base,
7440Sstevel@tonic-gate ssize_t len,
7450Sstevel@tonic-gate offset_t offset,
7460Sstevel@tonic-gate enum uio_seg seg,
7470Sstevel@tonic-gate int ioflag,
7480Sstevel@tonic-gate rlim64_t ulimit, /* meaningful only if rw is UIO_WRITE */
7490Sstevel@tonic-gate cred_t *cr,
7500Sstevel@tonic-gate ssize_t *residp)
7510Sstevel@tonic-gate {
7520Sstevel@tonic-gate struct uio uio;
7530Sstevel@tonic-gate struct iovec iov;
7540Sstevel@tonic-gate int error;
7550Sstevel@tonic-gate int in_crit = 0;
7560Sstevel@tonic-gate
7570Sstevel@tonic-gate if (rw == UIO_WRITE && ISROFILE(vp))
7580Sstevel@tonic-gate return (EROFS);
7590Sstevel@tonic-gate
7600Sstevel@tonic-gate if (len < 0)
7610Sstevel@tonic-gate return (EIO);
7620Sstevel@tonic-gate
7634321Scasper VOPXID_MAP_CR(vp, cr);
7644321Scasper
7650Sstevel@tonic-gate iov.iov_base = base;
7660Sstevel@tonic-gate iov.iov_len = len;
7670Sstevel@tonic-gate uio.uio_iov = &iov;
7680Sstevel@tonic-gate uio.uio_iovcnt = 1;
7690Sstevel@tonic-gate uio.uio_loffset = offset;
7700Sstevel@tonic-gate uio.uio_segflg = (short)seg;
7710Sstevel@tonic-gate uio.uio_resid = len;
7720Sstevel@tonic-gate uio.uio_llimit = ulimit;
7730Sstevel@tonic-gate
7740Sstevel@tonic-gate /*
7750Sstevel@tonic-gate * We have to enter the critical region before calling VOP_RWLOCK
7760Sstevel@tonic-gate * to avoid a deadlock with ufs.
7770Sstevel@tonic-gate */
7780Sstevel@tonic-gate if (nbl_need_check(vp)) {
7790Sstevel@tonic-gate int svmand;
7800Sstevel@tonic-gate
7810Sstevel@tonic-gate nbl_start_crit(vp, RW_READER);
7820Sstevel@tonic-gate in_crit = 1;
7830Sstevel@tonic-gate error = nbl_svmand(vp, cr, &svmand);
7840Sstevel@tonic-gate if (error != 0)
7850Sstevel@tonic-gate goto done;
7860Sstevel@tonic-gate if (nbl_conflict(vp, rw == UIO_WRITE ? NBL_WRITE : NBL_READ,
7875331Samw uio.uio_offset, uio.uio_resid, svmand, NULL)) {
7880Sstevel@tonic-gate error = EACCES;
7890Sstevel@tonic-gate goto done;
7900Sstevel@tonic-gate }
7910Sstevel@tonic-gate }
7920Sstevel@tonic-gate
7930Sstevel@tonic-gate (void) VOP_RWLOCK(vp,
7944956Spf199842 rw == UIO_WRITE ? V_WRITELOCK_TRUE : V_WRITELOCK_FALSE, NULL);
7950Sstevel@tonic-gate if (rw == UIO_WRITE) {
7960Sstevel@tonic-gate uio.uio_fmode = FWRITE;
7970Sstevel@tonic-gate uio.uio_extflg = UIO_COPY_DEFAULT;
7980Sstevel@tonic-gate error = VOP_WRITE(vp, &uio, ioflag, cr, NULL);
7990Sstevel@tonic-gate } else {
8000Sstevel@tonic-gate uio.uio_fmode = FREAD;
8010Sstevel@tonic-gate uio.uio_extflg = UIO_COPY_CACHED;
8020Sstevel@tonic-gate error = VOP_READ(vp, &uio, ioflag, cr, NULL);
8030Sstevel@tonic-gate }
8045331Samw VOP_RWUNLOCK(vp,
8055331Samw rw == UIO_WRITE ? V_WRITELOCK_TRUE : V_WRITELOCK_FALSE, NULL);
8060Sstevel@tonic-gate if (residp)
8070Sstevel@tonic-gate *residp = uio.uio_resid;
8080Sstevel@tonic-gate else if (uio.uio_resid)
8090Sstevel@tonic-gate error = EIO;
8100Sstevel@tonic-gate
8110Sstevel@tonic-gate done:
8120Sstevel@tonic-gate if (in_crit)
8130Sstevel@tonic-gate nbl_end_crit(vp);
8140Sstevel@tonic-gate return (error);
8150Sstevel@tonic-gate }
8160Sstevel@tonic-gate
8170Sstevel@tonic-gate /*
8180Sstevel@tonic-gate * Release a vnode. Call VOP_INACTIVE on last reference or
8190Sstevel@tonic-gate * decrement reference count.
8200Sstevel@tonic-gate *
8210Sstevel@tonic-gate * To avoid race conditions, the v_count is left at 1 for
8220Sstevel@tonic-gate * the call to VOP_INACTIVE. This prevents another thread
8230Sstevel@tonic-gate * from reclaiming and releasing the vnode *before* the
8240Sstevel@tonic-gate * VOP_INACTIVE routine has a chance to destroy the vnode.
8250Sstevel@tonic-gate * We can't have more than 1 thread calling VOP_INACTIVE
8260Sstevel@tonic-gate * on a vnode.
8270Sstevel@tonic-gate */
8280Sstevel@tonic-gate void
vn_rele(vnode_t * vp)8290Sstevel@tonic-gate vn_rele(vnode_t *vp)
8300Sstevel@tonic-gate {
8316712Stomee VERIFY(vp->v_count > 0);
8320Sstevel@tonic-gate mutex_enter(&vp->v_lock);
8330Sstevel@tonic-gate if (vp->v_count == 1) {
8340Sstevel@tonic-gate mutex_exit(&vp->v_lock);
8355331Samw VOP_INACTIVE(vp, CRED(), NULL);
8366712Stomee return;
8376712Stomee }
8386712Stomee vp->v_count--;
8396712Stomee mutex_exit(&vp->v_lock);
8406712Stomee }
8416712Stomee
8426712Stomee /*
8436712Stomee * Release a vnode referenced by the DNLC. Multiple DNLC references are treated
8446712Stomee * as a single reference, so v_count is not decremented until the last DNLC hold
8456712Stomee * is released. This makes it possible to distinguish vnodes that are referenced
8466712Stomee * only by the DNLC.
8476712Stomee */
8486712Stomee void
vn_rele_dnlc(vnode_t * vp)8496712Stomee vn_rele_dnlc(vnode_t *vp)
8506712Stomee {
8516712Stomee VERIFY((vp->v_count > 0) && (vp->v_count_dnlc > 0));
8526712Stomee mutex_enter(&vp->v_lock);
8536712Stomee if (--vp->v_count_dnlc == 0) {
8546712Stomee if (vp->v_count == 1) {
8556712Stomee mutex_exit(&vp->v_lock);
8566712Stomee VOP_INACTIVE(vp, CRED(), NULL);
8576712Stomee return;
8586712Stomee }
8590Sstevel@tonic-gate vp->v_count--;
8600Sstevel@tonic-gate }
8616712Stomee mutex_exit(&vp->v_lock);
8620Sstevel@tonic-gate }
8630Sstevel@tonic-gate
8640Sstevel@tonic-gate /*
8650Sstevel@tonic-gate * Like vn_rele() except that it clears v_stream under v_lock.
8660Sstevel@tonic-gate * This is used by sockfs when it dismantels the association between
8670Sstevel@tonic-gate * the sockfs node and the vnode in the underlaying file system.
8680Sstevel@tonic-gate * v_lock has to be held to prevent a thread coming through the lookupname
8690Sstevel@tonic-gate * path from accessing a stream head that is going away.
8700Sstevel@tonic-gate */
8710Sstevel@tonic-gate void
vn_rele_stream(vnode_t * vp)8720Sstevel@tonic-gate vn_rele_stream(vnode_t *vp)
8730Sstevel@tonic-gate {
8746712Stomee VERIFY(vp->v_count > 0);
8750Sstevel@tonic-gate mutex_enter(&vp->v_lock);
8760Sstevel@tonic-gate vp->v_stream = NULL;
8770Sstevel@tonic-gate if (vp->v_count == 1) {
8780Sstevel@tonic-gate mutex_exit(&vp->v_lock);
8795331Samw VOP_INACTIVE(vp, CRED(), NULL);
8806712Stomee return;
8810Sstevel@tonic-gate }
8826712Stomee vp->v_count--;
8836712Stomee mutex_exit(&vp->v_lock);
8840Sstevel@tonic-gate }
8850Sstevel@tonic-gate
8869321SNeil.Perrin@Sun.COM static void
vn_rele_inactive(vnode_t * vp)8879321SNeil.Perrin@Sun.COM vn_rele_inactive(vnode_t *vp)
8889321SNeil.Perrin@Sun.COM {
8899321SNeil.Perrin@Sun.COM VOP_INACTIVE(vp, CRED(), NULL);
8909321SNeil.Perrin@Sun.COM }
8919321SNeil.Perrin@Sun.COM
8929321SNeil.Perrin@Sun.COM /*
8939321SNeil.Perrin@Sun.COM * Like vn_rele() except if we are going to call VOP_INACTIVE() then do it
8949321SNeil.Perrin@Sun.COM * asynchronously using a taskq. This can avoid deadlocks caused by re-entering
8959321SNeil.Perrin@Sun.COM * the file system as a result of releasing the vnode. Note, file systems
8969321SNeil.Perrin@Sun.COM * already have to handle the race where the vnode is incremented before the
8979321SNeil.Perrin@Sun.COM * inactive routine is called and does its locking.
8989321SNeil.Perrin@Sun.COM *
8999321SNeil.Perrin@Sun.COM * Warning: Excessive use of this routine can lead to performance problems.
9009321SNeil.Perrin@Sun.COM * This is because taskqs throttle back allocation if too many are created.
9019321SNeil.Perrin@Sun.COM */
9029321SNeil.Perrin@Sun.COM void
vn_rele_async(vnode_t * vp,taskq_t * taskq)9039321SNeil.Perrin@Sun.COM vn_rele_async(vnode_t *vp, taskq_t *taskq)
9049321SNeil.Perrin@Sun.COM {
9059321SNeil.Perrin@Sun.COM VERIFY(vp->v_count > 0);
9069321SNeil.Perrin@Sun.COM mutex_enter(&vp->v_lock);
9079321SNeil.Perrin@Sun.COM if (vp->v_count == 1) {
9089321SNeil.Perrin@Sun.COM mutex_exit(&vp->v_lock);
9099321SNeil.Perrin@Sun.COM VERIFY(taskq_dispatch(taskq, (task_func_t *)vn_rele_inactive,
9109321SNeil.Perrin@Sun.COM vp, TQ_SLEEP) != NULL);
9119321SNeil.Perrin@Sun.COM return;
9129321SNeil.Perrin@Sun.COM }
9139321SNeil.Perrin@Sun.COM vp->v_count--;
9149321SNeil.Perrin@Sun.COM mutex_exit(&vp->v_lock);
9159321SNeil.Perrin@Sun.COM }
9169321SNeil.Perrin@Sun.COM
9170Sstevel@tonic-gate int
vn_open(char * pnamep,enum uio_seg seg,int filemode,int createmode,struct vnode ** vpp,enum create crwhy,mode_t umask)9180Sstevel@tonic-gate vn_open(
9190Sstevel@tonic-gate char *pnamep,
9200Sstevel@tonic-gate enum uio_seg seg,
9210Sstevel@tonic-gate int filemode,
9220Sstevel@tonic-gate int createmode,
9230Sstevel@tonic-gate struct vnode **vpp,
9240Sstevel@tonic-gate enum create crwhy,
9250Sstevel@tonic-gate mode_t umask)
9260Sstevel@tonic-gate {
9275331Samw return (vn_openat(pnamep, seg, filemode, createmode, vpp, crwhy,
9285331Samw umask, NULL, -1));
9290Sstevel@tonic-gate }
9300Sstevel@tonic-gate
9310Sstevel@tonic-gate
9320Sstevel@tonic-gate /*
9330Sstevel@tonic-gate * Open/create a vnode.
9340Sstevel@tonic-gate * This may be callable by the kernel, the only known use
9350Sstevel@tonic-gate * of user context being that the current user credentials
9360Sstevel@tonic-gate * are used for permissions. crwhy is defined iff filemode & FCREAT.
9370Sstevel@tonic-gate */
9380Sstevel@tonic-gate int
vn_openat(char * pnamep,enum uio_seg seg,int filemode,int createmode,struct vnode ** vpp,enum create crwhy,mode_t umask,struct vnode * startvp,int fd)9390Sstevel@tonic-gate vn_openat(
9400Sstevel@tonic-gate char *pnamep,
9410Sstevel@tonic-gate enum uio_seg seg,
9420Sstevel@tonic-gate int filemode,
9430Sstevel@tonic-gate int createmode,
9440Sstevel@tonic-gate struct vnode **vpp,
9450Sstevel@tonic-gate enum create crwhy,
9460Sstevel@tonic-gate mode_t umask,
9475331Samw struct vnode *startvp,
9485331Samw int fd)
9490Sstevel@tonic-gate {
9500Sstevel@tonic-gate struct vnode *vp;
9510Sstevel@tonic-gate int mode;
9525331Samw int accessflags;
9530Sstevel@tonic-gate int error;
9540Sstevel@tonic-gate int in_crit = 0;
9555331Samw int open_done = 0;
9565331Samw int shrlock_done = 0;
9570Sstevel@tonic-gate struct vattr vattr;
9580Sstevel@tonic-gate enum symfollow follow;
9592051Sprabahar int estale_retry = 0;
9605331Samw struct shrlock shr;
9615331Samw struct shr_locowner shr_own;
9620Sstevel@tonic-gate
9630Sstevel@tonic-gate mode = 0;
9645331Samw accessflags = 0;
9650Sstevel@tonic-gate if (filemode & FREAD)
9660Sstevel@tonic-gate mode |= VREAD;
9670Sstevel@tonic-gate if (filemode & (FWRITE|FTRUNC))
9680Sstevel@tonic-gate mode |= VWRITE;
969*12789SRoger.Faulkner@Oracle.COM if (filemode & (FSEARCH|FEXEC|FXATTRDIROPEN))
9705331Samw mode |= VEXEC;
9710Sstevel@tonic-gate
9720Sstevel@tonic-gate /* symlink interpretation */
9730Sstevel@tonic-gate if (filemode & FNOFOLLOW)
9740Sstevel@tonic-gate follow = NO_FOLLOW;
9750Sstevel@tonic-gate else
9760Sstevel@tonic-gate follow = FOLLOW;
9770Sstevel@tonic-gate
9785331Samw if (filemode & FAPPEND)
9795331Samw accessflags |= V_APPEND;
9805331Samw
9810Sstevel@tonic-gate top:
9820Sstevel@tonic-gate if (filemode & FCREAT) {
9830Sstevel@tonic-gate enum vcexcl excl;
9840Sstevel@tonic-gate
9850Sstevel@tonic-gate /*
9860Sstevel@tonic-gate * Wish to create a file.
9870Sstevel@tonic-gate */
9880Sstevel@tonic-gate vattr.va_type = VREG;
9890Sstevel@tonic-gate vattr.va_mode = createmode;
9900Sstevel@tonic-gate vattr.va_mask = AT_TYPE|AT_MODE;
9910Sstevel@tonic-gate if (filemode & FTRUNC) {
9920Sstevel@tonic-gate vattr.va_size = 0;
9930Sstevel@tonic-gate vattr.va_mask |= AT_SIZE;
9940Sstevel@tonic-gate }
9950Sstevel@tonic-gate if (filemode & FEXCL)
9960Sstevel@tonic-gate excl = EXCL;
9970Sstevel@tonic-gate else
9980Sstevel@tonic-gate excl = NONEXCL;
9990Sstevel@tonic-gate
10000Sstevel@tonic-gate if (error =
10010Sstevel@tonic-gate vn_createat(pnamep, seg, &vattr, excl, mode, &vp, crwhy,
10025050Sjwahlig (filemode & ~(FTRUNC|FEXCL)), umask, startvp))
10030Sstevel@tonic-gate return (error);
10040Sstevel@tonic-gate } else {
10050Sstevel@tonic-gate /*
10060Sstevel@tonic-gate * Wish to open a file. Just look it up.
10070Sstevel@tonic-gate */
10080Sstevel@tonic-gate if (error = lookupnameat(pnamep, seg, follow,
10090Sstevel@tonic-gate NULLVPP, &vp, startvp)) {
10102051Sprabahar if ((error == ESTALE) &&
10112051Sprabahar fs_need_estale_retry(estale_retry++))
10120Sstevel@tonic-gate goto top;
10130Sstevel@tonic-gate return (error);
10140Sstevel@tonic-gate }
10150Sstevel@tonic-gate
10160Sstevel@tonic-gate /*
10170Sstevel@tonic-gate * Get the attributes to check whether file is large.
10180Sstevel@tonic-gate * We do this only if the FOFFMAX flag is not set and
10190Sstevel@tonic-gate * only for regular files.
10200Sstevel@tonic-gate */
10210Sstevel@tonic-gate
10220Sstevel@tonic-gate if (!(filemode & FOFFMAX) && (vp->v_type == VREG)) {
10230Sstevel@tonic-gate vattr.va_mask = AT_SIZE;
10245331Samw if ((error = VOP_GETATTR(vp, &vattr, 0,
10255331Samw CRED(), NULL))) {
10260Sstevel@tonic-gate goto out;
10270Sstevel@tonic-gate }
10280Sstevel@tonic-gate if (vattr.va_size > (u_offset_t)MAXOFF32_T) {
10290Sstevel@tonic-gate /*
10300Sstevel@tonic-gate * Large File API - regular open fails
10310Sstevel@tonic-gate * if FOFFMAX flag is set in file mode
10320Sstevel@tonic-gate */
10330Sstevel@tonic-gate error = EOVERFLOW;
10340Sstevel@tonic-gate goto out;
10350Sstevel@tonic-gate }
10360Sstevel@tonic-gate }
10370Sstevel@tonic-gate /*
10380Sstevel@tonic-gate * Can't write directories, active texts, or
10390Sstevel@tonic-gate * read-only filesystems. Can't truncate files
10400Sstevel@tonic-gate * on which mandatory locking is in effect.
10410Sstevel@tonic-gate */
10420Sstevel@tonic-gate if (filemode & (FWRITE|FTRUNC)) {
10430Sstevel@tonic-gate /*
10440Sstevel@tonic-gate * Allow writable directory if VDIROPEN flag is set.
10450Sstevel@tonic-gate */
10460Sstevel@tonic-gate if (vp->v_type == VDIR && !(vp->v_flag & VDIROPEN)) {
10470Sstevel@tonic-gate error = EISDIR;
10480Sstevel@tonic-gate goto out;
10490Sstevel@tonic-gate }
10500Sstevel@tonic-gate if (ISROFILE(vp)) {
10510Sstevel@tonic-gate error = EROFS;
10520Sstevel@tonic-gate goto out;
10530Sstevel@tonic-gate }
10540Sstevel@tonic-gate /*
10555331Samw * Can't truncate files on which
10565331Samw * sysv mandatory locking is in effect.
10570Sstevel@tonic-gate */
10580Sstevel@tonic-gate if (filemode & FTRUNC) {
10590Sstevel@tonic-gate vnode_t *rvp;
10600Sstevel@tonic-gate
10615331Samw if (VOP_REALVP(vp, &rvp, NULL) != 0)
10620Sstevel@tonic-gate rvp = vp;
10635331Samw if (rvp->v_filocks != NULL) {
10640Sstevel@tonic-gate vattr.va_mask = AT_MODE;
10655331Samw if ((error = VOP_GETATTR(vp,
10665331Samw &vattr, 0, CRED(), NULL)) == 0 &&
10675331Samw MANDLOCK(vp, vattr.va_mode))
10680Sstevel@tonic-gate error = EAGAIN;
10690Sstevel@tonic-gate }
10700Sstevel@tonic-gate }
10710Sstevel@tonic-gate if (error)
10720Sstevel@tonic-gate goto out;
10730Sstevel@tonic-gate }
10740Sstevel@tonic-gate /*
10750Sstevel@tonic-gate * Check permissions.
10760Sstevel@tonic-gate */
10775331Samw if (error = VOP_ACCESS(vp, mode, accessflags, CRED(), NULL))
10780Sstevel@tonic-gate goto out;
1079*12789SRoger.Faulkner@Oracle.COM /*
1080*12789SRoger.Faulkner@Oracle.COM * Require FSEARCH to return a directory.
1081*12789SRoger.Faulkner@Oracle.COM * Require FEXEC to return a regular file.
1082*12789SRoger.Faulkner@Oracle.COM */
1083*12789SRoger.Faulkner@Oracle.COM if ((filemode & FSEARCH) && vp->v_type != VDIR) {
1084*12789SRoger.Faulkner@Oracle.COM error = ENOTDIR;
1085*12789SRoger.Faulkner@Oracle.COM goto out;
1086*12789SRoger.Faulkner@Oracle.COM }
1087*12789SRoger.Faulkner@Oracle.COM if ((filemode & FEXEC) && vp->v_type != VREG) {
1088*12789SRoger.Faulkner@Oracle.COM error = ENOEXEC; /* XXX: error code? */
1089*12789SRoger.Faulkner@Oracle.COM goto out;
1090*12789SRoger.Faulkner@Oracle.COM }
10910Sstevel@tonic-gate }
10920Sstevel@tonic-gate
10930Sstevel@tonic-gate /*
10940Sstevel@tonic-gate * Do remaining checks for FNOFOLLOW and FNOLINKS.
10950Sstevel@tonic-gate */
10960Sstevel@tonic-gate if ((filemode & FNOFOLLOW) && vp->v_type == VLNK) {
10972712Snn35248 error = ELOOP;
10980Sstevel@tonic-gate goto out;
10990Sstevel@tonic-gate }
11000Sstevel@tonic-gate if (filemode & FNOLINKS) {
11010Sstevel@tonic-gate vattr.va_mask = AT_NLINK;
11025331Samw if ((error = VOP_GETATTR(vp, &vattr, 0, CRED(), NULL))) {
11030Sstevel@tonic-gate goto out;
11040Sstevel@tonic-gate }
11050Sstevel@tonic-gate if (vattr.va_nlink != 1) {
11060Sstevel@tonic-gate error = EMLINK;
11070Sstevel@tonic-gate goto out;
11080Sstevel@tonic-gate }
11090Sstevel@tonic-gate }
11100Sstevel@tonic-gate
11110Sstevel@tonic-gate /*
11120Sstevel@tonic-gate * Opening a socket corresponding to the AF_UNIX pathname
11130Sstevel@tonic-gate * in the filesystem name space is not supported.
11140Sstevel@tonic-gate * However, VSOCK nodes in namefs are supported in order
11150Sstevel@tonic-gate * to make fattach work for sockets.
11160Sstevel@tonic-gate *
11170Sstevel@tonic-gate * XXX This uses VOP_REALVP to distinguish between
11180Sstevel@tonic-gate * an unopened namefs node (where VOP_REALVP returns a
11190Sstevel@tonic-gate * different VSOCK vnode) and a VSOCK created by vn_create
11200Sstevel@tonic-gate * in some file system (where VOP_REALVP would never return
11210Sstevel@tonic-gate * a different vnode).
11220Sstevel@tonic-gate */
11230Sstevel@tonic-gate if (vp->v_type == VSOCK) {
11240Sstevel@tonic-gate struct vnode *nvp;
11250Sstevel@tonic-gate
11265331Samw error = VOP_REALVP(vp, &nvp, NULL);
11270Sstevel@tonic-gate if (error != 0 || nvp == NULL || nvp == vp ||
11280Sstevel@tonic-gate nvp->v_type != VSOCK) {
11290Sstevel@tonic-gate error = EOPNOTSUPP;
11300Sstevel@tonic-gate goto out;
11310Sstevel@tonic-gate }
11320Sstevel@tonic-gate }
11335331Samw
11345331Samw if ((vp->v_type == VREG) && nbl_need_check(vp)) {
11355331Samw /* get share reservation */
11365331Samw shr.s_access = 0;
11375331Samw if (filemode & FWRITE)
11385331Samw shr.s_access |= F_WRACC;
11395331Samw if (filemode & FREAD)
11405331Samw shr.s_access |= F_RDACC;
11415331Samw shr.s_deny = 0;
11425331Samw shr.s_sysid = 0;
11435331Samw shr.s_pid = ttoproc(curthread)->p_pid;
11445331Samw shr_own.sl_pid = shr.s_pid;
11455331Samw shr_own.sl_id = fd;
11465331Samw shr.s_own_len = sizeof (shr_own);
11475331Samw shr.s_owner = (caddr_t)&shr_own;
11485331Samw error = VOP_SHRLOCK(vp, F_SHARE_NBMAND, &shr, filemode, CRED(),
11495331Samw NULL);
11505331Samw if (error)
11515331Samw goto out;
11525331Samw shrlock_done = 1;
11535331Samw
11545331Samw /* nbmand conflict check if truncating file */
11555331Samw if ((filemode & FTRUNC) && !(filemode & FCREAT)) {
11565331Samw nbl_start_crit(vp, RW_READER);
11575331Samw in_crit = 1;
11585331Samw
11595331Samw vattr.va_mask = AT_SIZE;
11605331Samw if (error = VOP_GETATTR(vp, &vattr, 0, CRED(), NULL))
11615331Samw goto out;
11625331Samw if (nbl_conflict(vp, NBL_WRITE, 0, vattr.va_size, 0,
11635331Samw NULL)) {
11645331Samw error = EACCES;
11655331Samw goto out;
11665331Samw }
11675331Samw }
11685331Samw }
11695331Samw
11700Sstevel@tonic-gate /*
11710Sstevel@tonic-gate * Do opening protocol.
11720Sstevel@tonic-gate */
11735331Samw error = VOP_OPEN(&vp, filemode, CRED(), NULL);
11745331Samw if (error)
11755331Samw goto out;
11765331Samw open_done = 1;
11775331Samw
11780Sstevel@tonic-gate /*
11790Sstevel@tonic-gate * Truncate if required.
11800Sstevel@tonic-gate */
11815331Samw if ((filemode & FTRUNC) && !(filemode & FCREAT)) {
11820Sstevel@tonic-gate vattr.va_size = 0;
11830Sstevel@tonic-gate vattr.va_mask = AT_SIZE;
11840Sstevel@tonic-gate if ((error = VOP_SETATTR(vp, &vattr, 0, CRED(), NULL)) != 0)
11855331Samw goto out;
11860Sstevel@tonic-gate }
11870Sstevel@tonic-gate out:
11880Sstevel@tonic-gate ASSERT(vp->v_count > 0);
11890Sstevel@tonic-gate
11900Sstevel@tonic-gate if (in_crit) {
11910Sstevel@tonic-gate nbl_end_crit(vp);
11920Sstevel@tonic-gate in_crit = 0;
11930Sstevel@tonic-gate }
11940Sstevel@tonic-gate if (error) {
11955331Samw if (open_done) {
11965331Samw (void) VOP_CLOSE(vp, filemode, 1, (offset_t)0, CRED(),
11975331Samw NULL);
11985331Samw open_done = 0;
11995331Samw shrlock_done = 0;
12005331Samw }
12015331Samw if (shrlock_done) {
12025331Samw (void) VOP_SHRLOCK(vp, F_UNSHARE, &shr, 0, CRED(),
12035331Samw NULL);
12045331Samw shrlock_done = 0;
12055331Samw }
12065331Samw
12070Sstevel@tonic-gate /*
12080Sstevel@tonic-gate * The following clause was added to handle a problem
12090Sstevel@tonic-gate * with NFS consistency. It is possible that a lookup
12100Sstevel@tonic-gate * of the file to be opened succeeded, but the file
12110Sstevel@tonic-gate * itself doesn't actually exist on the server. This
12120Sstevel@tonic-gate * is chiefly due to the DNLC containing an entry for
12130Sstevel@tonic-gate * the file which has been removed on the server. In
12140Sstevel@tonic-gate * this case, we just start over. If there was some
12150Sstevel@tonic-gate * other cause for the ESTALE error, then the lookup
12160Sstevel@tonic-gate * of the file will fail and the error will be returned
12170Sstevel@tonic-gate * above instead of looping around from here.
12180Sstevel@tonic-gate */
12190Sstevel@tonic-gate VN_RELE(vp);
12202051Sprabahar if ((error == ESTALE) && fs_need_estale_retry(estale_retry++))
12210Sstevel@tonic-gate goto top;
12220Sstevel@tonic-gate } else
12230Sstevel@tonic-gate *vpp = vp;
12240Sstevel@tonic-gate return (error);
12250Sstevel@tonic-gate }
12260Sstevel@tonic-gate
12275331Samw /*
12285331Samw * The following two accessor functions are for the NFSv4 server. Since there
12295331Samw * is no VOP_OPEN_UP/DOWNGRADE we need a way for the NFS server to keep the
12305331Samw * vnode open counts correct when a client "upgrades" an open or does an
12315331Samw * open_downgrade. In NFS, an upgrade or downgrade can not only change the
12325331Samw * open mode (add or subtract read or write), but also change the share/deny
12335331Samw * modes. However, share reservations are not integrated with OPEN, yet, so
12345331Samw * we need to handle each separately. These functions are cleaner than having
12355331Samw * the NFS server manipulate the counts directly, however, nobody else should
12365331Samw * use these functions.
12375331Samw */
12385331Samw void
vn_open_upgrade(vnode_t * vp,int filemode)12395331Samw vn_open_upgrade(
12405331Samw vnode_t *vp,
12415331Samw int filemode)
12425331Samw {
12435331Samw ASSERT(vp->v_type == VREG);
12445331Samw
12455331Samw if (filemode & FREAD)
12465331Samw atomic_add_32(&(vp->v_rdcnt), 1);
12475331Samw if (filemode & FWRITE)
12485331Samw atomic_add_32(&(vp->v_wrcnt), 1);
12495331Samw
12505331Samw }
12515331Samw
12525331Samw void
vn_open_downgrade(vnode_t * vp,int filemode)12535331Samw vn_open_downgrade(
12545331Samw vnode_t *vp,
12555331Samw int filemode)
12565331Samw {
12575331Samw ASSERT(vp->v_type == VREG);
12585331Samw
12595331Samw if (filemode & FREAD) {
12605331Samw ASSERT(vp->v_rdcnt > 0);
12615331Samw atomic_add_32(&(vp->v_rdcnt), -1);
12625331Samw }
12635331Samw if (filemode & FWRITE) {
12645331Samw ASSERT(vp->v_wrcnt > 0);
12655331Samw atomic_add_32(&(vp->v_wrcnt), -1);
12665331Samw }
12675331Samw
12685331Samw }
12695331Samw
12700Sstevel@tonic-gate int
vn_create(char * pnamep,enum uio_seg seg,struct vattr * vap,enum vcexcl excl,int mode,struct vnode ** vpp,enum create why,int flag,mode_t umask)12710Sstevel@tonic-gate vn_create(
12720Sstevel@tonic-gate char *pnamep,
12730Sstevel@tonic-gate enum uio_seg seg,
12740Sstevel@tonic-gate struct vattr *vap,
12750Sstevel@tonic-gate enum vcexcl excl,
12760Sstevel@tonic-gate int mode,
12770Sstevel@tonic-gate struct vnode **vpp,
12780Sstevel@tonic-gate enum create why,
12790Sstevel@tonic-gate int flag,
12800Sstevel@tonic-gate mode_t umask)
12810Sstevel@tonic-gate {
12825331Samw return (vn_createat(pnamep, seg, vap, excl, mode, vpp, why, flag,
12835331Samw umask, NULL));
12840Sstevel@tonic-gate }
12850Sstevel@tonic-gate
12860Sstevel@tonic-gate /*
12870Sstevel@tonic-gate * Create a vnode (makenode).
12880Sstevel@tonic-gate */
12890Sstevel@tonic-gate int
vn_createat(char * pnamep,enum uio_seg seg,struct vattr * vap,enum vcexcl excl,int mode,struct vnode ** vpp,enum create why,int flag,mode_t umask,struct vnode * startvp)12900Sstevel@tonic-gate vn_createat(
12910Sstevel@tonic-gate char *pnamep,
12920Sstevel@tonic-gate enum uio_seg seg,
12930Sstevel@tonic-gate struct vattr *vap,
12940Sstevel@tonic-gate enum vcexcl excl,
12950Sstevel@tonic-gate int mode,
12960Sstevel@tonic-gate struct vnode **vpp,
12970Sstevel@tonic-gate enum create why,
12980Sstevel@tonic-gate int flag,
12990Sstevel@tonic-gate mode_t umask,
13000Sstevel@tonic-gate struct vnode *startvp)
13010Sstevel@tonic-gate {
13020Sstevel@tonic-gate struct vnode *dvp; /* ptr to parent dir vnode */
13030Sstevel@tonic-gate struct vnode *vp = NULL;
13040Sstevel@tonic-gate struct pathname pn;
13050Sstevel@tonic-gate int error;
13060Sstevel@tonic-gate int in_crit = 0;
13070Sstevel@tonic-gate struct vattr vattr;
13080Sstevel@tonic-gate enum symfollow follow;
13092051Sprabahar int estale_retry = 0;
131011861SMarek.Pospisil@Sun.COM uint32_t auditing = AU_AUDITING();
13110Sstevel@tonic-gate
13120Sstevel@tonic-gate ASSERT((vap->va_mask & (AT_TYPE|AT_MODE)) == (AT_TYPE|AT_MODE));
13130Sstevel@tonic-gate
13140Sstevel@tonic-gate /* symlink interpretation */
13150Sstevel@tonic-gate if ((flag & FNOFOLLOW) || excl == EXCL)
13160Sstevel@tonic-gate follow = NO_FOLLOW;
13170Sstevel@tonic-gate else
13180Sstevel@tonic-gate follow = FOLLOW;
13190Sstevel@tonic-gate flag &= ~(FNOFOLLOW|FNOLINKS);
13200Sstevel@tonic-gate
13210Sstevel@tonic-gate top:
13220Sstevel@tonic-gate /*
13230Sstevel@tonic-gate * Lookup directory.
13240Sstevel@tonic-gate * If new object is a file, call lower level to create it.
13250Sstevel@tonic-gate * Note that it is up to the lower level to enforce exclusive
13260Sstevel@tonic-gate * creation, if the file is already there.
13270Sstevel@tonic-gate * This allows the lower level to do whatever
13280Sstevel@tonic-gate * locking or protocol that is needed to prevent races.
13290Sstevel@tonic-gate * If the new object is directory call lower level to make
13300Sstevel@tonic-gate * the new directory, with "." and "..".
13310Sstevel@tonic-gate */
13320Sstevel@tonic-gate if (error = pn_get(pnamep, seg, &pn))
13330Sstevel@tonic-gate return (error);
133411861SMarek.Pospisil@Sun.COM if (auditing)
13350Sstevel@tonic-gate audit_vncreate_start();
13360Sstevel@tonic-gate dvp = NULL;
13370Sstevel@tonic-gate *vpp = NULL;
13380Sstevel@tonic-gate /*
13390Sstevel@tonic-gate * lookup will find the parent directory for the vnode.
13400Sstevel@tonic-gate * When it is done the pn holds the name of the entry
13410Sstevel@tonic-gate * in the directory.
13420Sstevel@tonic-gate * If this is a non-exclusive create we also find the node itself.
13430Sstevel@tonic-gate */
13440Sstevel@tonic-gate error = lookuppnat(&pn, NULL, follow, &dvp,
13450Sstevel@tonic-gate (excl == EXCL) ? NULLVPP : vpp, startvp);
13460Sstevel@tonic-gate if (error) {
13470Sstevel@tonic-gate pn_free(&pn);
13482051Sprabahar if ((error == ESTALE) && fs_need_estale_retry(estale_retry++))
13490Sstevel@tonic-gate goto top;
13500Sstevel@tonic-gate if (why == CRMKDIR && error == EINVAL)
13510Sstevel@tonic-gate error = EEXIST; /* SVID */
13520Sstevel@tonic-gate return (error);
13530Sstevel@tonic-gate }
13540Sstevel@tonic-gate
13550Sstevel@tonic-gate if (why != CRMKNOD)
13560Sstevel@tonic-gate vap->va_mode &= ~VSVTX;
13570Sstevel@tonic-gate
13580Sstevel@tonic-gate /*
13590Sstevel@tonic-gate * If default ACLs are defined for the directory don't apply the
13600Sstevel@tonic-gate * umask if umask is passed.
13610Sstevel@tonic-gate */
13620Sstevel@tonic-gate
13630Sstevel@tonic-gate if (umask) {
13640Sstevel@tonic-gate
13650Sstevel@tonic-gate vsecattr_t vsec;
13660Sstevel@tonic-gate
13670Sstevel@tonic-gate vsec.vsa_aclcnt = 0;
13680Sstevel@tonic-gate vsec.vsa_aclentp = NULL;
13690Sstevel@tonic-gate vsec.vsa_dfaclcnt = 0;
13700Sstevel@tonic-gate vsec.vsa_dfaclentp = NULL;
13710Sstevel@tonic-gate vsec.vsa_mask = VSA_DFACLCNT;
13725331Samw error = VOP_GETSECATTR(dvp, &vsec, 0, CRED(), NULL);
1373789Sahrens /*
1374789Sahrens * If error is ENOSYS then treat it as no error
1375789Sahrens * Don't want to force all file systems to support
1376789Sahrens * aclent_t style of ACL's.
1377789Sahrens */
1378789Sahrens if (error == ENOSYS)
1379789Sahrens error = 0;
1380789Sahrens if (error) {
13810Sstevel@tonic-gate if (*vpp != NULL)
13820Sstevel@tonic-gate VN_RELE(*vpp);
13830Sstevel@tonic-gate goto out;
1384789Sahrens } else {
1385789Sahrens /*
1386789Sahrens * Apply the umask if no default ACLs.
1387789Sahrens */
1388789Sahrens if (vsec.vsa_dfaclcnt == 0)
1389789Sahrens vap->va_mode &= ~umask;
13900Sstevel@tonic-gate
1391789Sahrens /*
1392789Sahrens * VOP_GETSECATTR() may have allocated memory for
1393789Sahrens * ACLs we didn't request, so double-check and
1394789Sahrens * free it if necessary.
1395789Sahrens */
1396789Sahrens if (vsec.vsa_aclcnt && vsec.vsa_aclentp != NULL)
1397789Sahrens kmem_free((caddr_t)vsec.vsa_aclentp,
1398789Sahrens vsec.vsa_aclcnt * sizeof (aclent_t));
1399789Sahrens if (vsec.vsa_dfaclcnt && vsec.vsa_dfaclentp != NULL)
1400789Sahrens kmem_free((caddr_t)vsec.vsa_dfaclentp,
1401789Sahrens vsec.vsa_dfaclcnt * sizeof (aclent_t));
1402789Sahrens }
14030Sstevel@tonic-gate }
14040Sstevel@tonic-gate
14050Sstevel@tonic-gate /*
14060Sstevel@tonic-gate * In general we want to generate EROFS if the file system is
14070Sstevel@tonic-gate * readonly. However, POSIX (IEEE Std. 1003.1) section 5.3.1
14080Sstevel@tonic-gate * documents the open system call, and it says that O_CREAT has no
14090Sstevel@tonic-gate * effect if the file already exists. Bug 1119649 states
14100Sstevel@tonic-gate * that open(path, O_CREAT, ...) fails when attempting to open an
14110Sstevel@tonic-gate * existing file on a read only file system. Thus, the first part
14120Sstevel@tonic-gate * of the following if statement has 3 checks:
14130Sstevel@tonic-gate * if the file exists &&
14140Sstevel@tonic-gate * it is being open with write access &&
14150Sstevel@tonic-gate * the file system is read only
14160Sstevel@tonic-gate * then generate EROFS
14170Sstevel@tonic-gate */
14180Sstevel@tonic-gate if ((*vpp != NULL && (mode & VWRITE) && ISROFILE(*vpp)) ||
14190Sstevel@tonic-gate (*vpp == NULL && dvp->v_vfsp->vfs_flag & VFS_RDONLY)) {
14200Sstevel@tonic-gate if (*vpp)
14210Sstevel@tonic-gate VN_RELE(*vpp);
14220Sstevel@tonic-gate error = EROFS;
14230Sstevel@tonic-gate } else if (excl == NONEXCL && *vpp != NULL) {
14240Sstevel@tonic-gate vnode_t *rvp;
14250Sstevel@tonic-gate
14260Sstevel@tonic-gate /*
14270Sstevel@tonic-gate * File already exists. If a mandatory lock has been
14280Sstevel@tonic-gate * applied, return error.
14290Sstevel@tonic-gate */
14300Sstevel@tonic-gate vp = *vpp;
14315331Samw if (VOP_REALVP(vp, &rvp, NULL) != 0)
14320Sstevel@tonic-gate rvp = vp;
14330Sstevel@tonic-gate if ((vap->va_mask & AT_SIZE) && nbl_need_check(vp)) {
14340Sstevel@tonic-gate nbl_start_crit(vp, RW_READER);
14350Sstevel@tonic-gate in_crit = 1;
14360Sstevel@tonic-gate }
14370Sstevel@tonic-gate if (rvp->v_filocks != NULL || rvp->v_shrlocks != NULL) {
14380Sstevel@tonic-gate vattr.va_mask = AT_MODE|AT_SIZE;
14395331Samw if (error = VOP_GETATTR(vp, &vattr, 0, CRED(), NULL)) {
14400Sstevel@tonic-gate goto out;
14410Sstevel@tonic-gate }
14420Sstevel@tonic-gate if (MANDLOCK(vp, vattr.va_mode)) {
14430Sstevel@tonic-gate error = EAGAIN;
14440Sstevel@tonic-gate goto out;
14450Sstevel@tonic-gate }
14460Sstevel@tonic-gate /*
14470Sstevel@tonic-gate * File cannot be truncated if non-blocking mandatory
14480Sstevel@tonic-gate * locks are currently on the file.
14490Sstevel@tonic-gate */
14500Sstevel@tonic-gate if ((vap->va_mask & AT_SIZE) && in_crit) {
14510Sstevel@tonic-gate u_offset_t offset;
14520Sstevel@tonic-gate ssize_t length;
14530Sstevel@tonic-gate
14540Sstevel@tonic-gate offset = vap->va_size > vattr.va_size ?
14554956Spf199842 vattr.va_size : vap->va_size;
14560Sstevel@tonic-gate length = vap->va_size > vattr.va_size ?
14574956Spf199842 vap->va_size - vattr.va_size :
14584956Spf199842 vattr.va_size - vap->va_size;
14590Sstevel@tonic-gate if (nbl_conflict(vp, NBL_WRITE, offset,
14605331Samw length, 0, NULL)) {
14610Sstevel@tonic-gate error = EACCES;
14620Sstevel@tonic-gate goto out;
14630Sstevel@tonic-gate }
14640Sstevel@tonic-gate }
14650Sstevel@tonic-gate }
14660Sstevel@tonic-gate
14670Sstevel@tonic-gate /*
14680Sstevel@tonic-gate * If the file is the root of a VFS, we've crossed a
14690Sstevel@tonic-gate * mount point and the "containing" directory that we
14700Sstevel@tonic-gate * acquired above (dvp) is irrelevant because it's in
14710Sstevel@tonic-gate * a different file system. We apply VOP_CREATE to the
14720Sstevel@tonic-gate * target itself instead of to the containing directory
14730Sstevel@tonic-gate * and supply a null path name to indicate (conventionally)
14740Sstevel@tonic-gate * the node itself as the "component" of interest.
14750Sstevel@tonic-gate *
14760Sstevel@tonic-gate * The intercession of the file system is necessary to
14770Sstevel@tonic-gate * ensure that the appropriate permission checks are
14780Sstevel@tonic-gate * done.
14790Sstevel@tonic-gate */
14800Sstevel@tonic-gate if (vp->v_flag & VROOT) {
14810Sstevel@tonic-gate ASSERT(why != CRMKDIR);
14825331Samw error = VOP_CREATE(vp, "", vap, excl, mode, vpp,
14835331Samw CRED(), flag, NULL, NULL);
14840Sstevel@tonic-gate /*
14850Sstevel@tonic-gate * If the create succeeded, it will have created
14860Sstevel@tonic-gate * a new reference to the vnode. Give up the
14870Sstevel@tonic-gate * original reference. The assertion should not
14880Sstevel@tonic-gate * get triggered because NBMAND locks only apply to
14890Sstevel@tonic-gate * VREG files. And if in_crit is non-zero for some
14900Sstevel@tonic-gate * reason, detect that here, rather than when we
14910Sstevel@tonic-gate * deference a null vp.
14920Sstevel@tonic-gate */
14930Sstevel@tonic-gate ASSERT(in_crit == 0);
14940Sstevel@tonic-gate VN_RELE(vp);
14950Sstevel@tonic-gate vp = NULL;
14960Sstevel@tonic-gate goto out;
14970Sstevel@tonic-gate }
14980Sstevel@tonic-gate
14990Sstevel@tonic-gate /*
15000Sstevel@tonic-gate * Large File API - non-large open (FOFFMAX flag not set)
15010Sstevel@tonic-gate * of regular file fails if the file size exceeds MAXOFF32_T.
15020Sstevel@tonic-gate */
15030Sstevel@tonic-gate if (why != CRMKDIR &&
15040Sstevel@tonic-gate !(flag & FOFFMAX) &&
15050Sstevel@tonic-gate (vp->v_type == VREG)) {
15060Sstevel@tonic-gate vattr.va_mask = AT_SIZE;
15075331Samw if ((error = VOP_GETATTR(vp, &vattr, 0,
15085331Samw CRED(), NULL))) {
15090Sstevel@tonic-gate goto out;
15100Sstevel@tonic-gate }
15110Sstevel@tonic-gate if ((vattr.va_size > (u_offset_t)MAXOFF32_T)) {
15120Sstevel@tonic-gate error = EOVERFLOW;
15130Sstevel@tonic-gate goto out;
15140Sstevel@tonic-gate }
15150Sstevel@tonic-gate }
15160Sstevel@tonic-gate }
15170Sstevel@tonic-gate
15180Sstevel@tonic-gate if (error == 0) {
15190Sstevel@tonic-gate /*
15200Sstevel@tonic-gate * Call mkdir() if specified, otherwise create().
15210Sstevel@tonic-gate */
15220Sstevel@tonic-gate int must_be_dir = pn_fixslash(&pn); /* trailing '/'? */
15230Sstevel@tonic-gate
15240Sstevel@tonic-gate if (why == CRMKDIR)
15255331Samw /*
15265331Samw * N.B., if vn_createat() ever requests
15275331Samw * case-insensitive behavior then it will need
15285331Samw * to be passed to VOP_MKDIR(). VOP_CREATE()
15295331Samw * will already get it via "flag"
15305331Samw */
15315331Samw error = VOP_MKDIR(dvp, pn.pn_path, vap, vpp, CRED(),
15325331Samw NULL, 0, NULL);
15330Sstevel@tonic-gate else if (!must_be_dir)
15340Sstevel@tonic-gate error = VOP_CREATE(dvp, pn.pn_path, vap,
15355331Samw excl, mode, vpp, CRED(), flag, NULL, NULL);
15360Sstevel@tonic-gate else
15370Sstevel@tonic-gate error = ENOTDIR;
15380Sstevel@tonic-gate }
15390Sstevel@tonic-gate
15400Sstevel@tonic-gate out:
15410Sstevel@tonic-gate
154211861SMarek.Pospisil@Sun.COM if (auditing)
15430Sstevel@tonic-gate audit_vncreate_finish(*vpp, error);
15440Sstevel@tonic-gate if (in_crit) {
15450Sstevel@tonic-gate nbl_end_crit(vp);
15460Sstevel@tonic-gate in_crit = 0;
15470Sstevel@tonic-gate }
15480Sstevel@tonic-gate if (vp != NULL) {
15490Sstevel@tonic-gate VN_RELE(vp);
15500Sstevel@tonic-gate vp = NULL;
15510Sstevel@tonic-gate }
15520Sstevel@tonic-gate pn_free(&pn);
15530Sstevel@tonic-gate VN_RELE(dvp);
15540Sstevel@tonic-gate /*
15550Sstevel@tonic-gate * The following clause was added to handle a problem
15560Sstevel@tonic-gate * with NFS consistency. It is possible that a lookup
15570Sstevel@tonic-gate * of the file to be created succeeded, but the file
15580Sstevel@tonic-gate * itself doesn't actually exist on the server. This
15590Sstevel@tonic-gate * is chiefly due to the DNLC containing an entry for
15600Sstevel@tonic-gate * the file which has been removed on the server. In
15610Sstevel@tonic-gate * this case, we just start over. If there was some
15620Sstevel@tonic-gate * other cause for the ESTALE error, then the lookup
15630Sstevel@tonic-gate * of the file will fail and the error will be returned
15640Sstevel@tonic-gate * above instead of looping around from here.
15650Sstevel@tonic-gate */
15662051Sprabahar if ((error == ESTALE) && fs_need_estale_retry(estale_retry++))
15670Sstevel@tonic-gate goto top;
15680Sstevel@tonic-gate return (error);
15690Sstevel@tonic-gate }
15700Sstevel@tonic-gate
15710Sstevel@tonic-gate int
vn_link(char * from,char * to,enum uio_seg seg)15720Sstevel@tonic-gate vn_link(char *from, char *to, enum uio_seg seg)
15730Sstevel@tonic-gate {
1574*12789SRoger.Faulkner@Oracle.COM return (vn_linkat(NULL, from, NO_FOLLOW, NULL, to, seg));
1575*12789SRoger.Faulkner@Oracle.COM }
1576*12789SRoger.Faulkner@Oracle.COM
1577*12789SRoger.Faulkner@Oracle.COM int
vn_linkat(vnode_t * fstartvp,char * from,enum symfollow follow,vnode_t * tstartvp,char * to,enum uio_seg seg)1578*12789SRoger.Faulkner@Oracle.COM vn_linkat(vnode_t *fstartvp, char *from, enum symfollow follow,
1579*12789SRoger.Faulkner@Oracle.COM vnode_t *tstartvp, char *to, enum uio_seg seg)
1580*12789SRoger.Faulkner@Oracle.COM {
15810Sstevel@tonic-gate struct vnode *fvp; /* from vnode ptr */
15820Sstevel@tonic-gate struct vnode *tdvp; /* to directory vnode ptr */
15830Sstevel@tonic-gate struct pathname pn;
15840Sstevel@tonic-gate int error;
15850Sstevel@tonic-gate struct vattr vattr;
15860Sstevel@tonic-gate dev_t fsid;
15872051Sprabahar int estale_retry = 0;
1588*12789SRoger.Faulkner@Oracle.COM uint32_t auditing = AU_AUDITING();
15890Sstevel@tonic-gate
15900Sstevel@tonic-gate top:
15910Sstevel@tonic-gate fvp = tdvp = NULL;
15920Sstevel@tonic-gate if (error = pn_get(to, seg, &pn))
15930Sstevel@tonic-gate return (error);
1594*12789SRoger.Faulkner@Oracle.COM if (auditing && fstartvp != NULL)
1595*12789SRoger.Faulkner@Oracle.COM audit_setfsat_path(1);
1596*12789SRoger.Faulkner@Oracle.COM if (error = lookupnameat(from, seg, follow, NULLVPP, &fvp, fstartvp))
15970Sstevel@tonic-gate goto out;
1598*12789SRoger.Faulkner@Oracle.COM if (auditing && tstartvp != NULL)
1599*12789SRoger.Faulkner@Oracle.COM audit_setfsat_path(3);
1600*12789SRoger.Faulkner@Oracle.COM if (error = lookuppnat(&pn, NULL, NO_FOLLOW, &tdvp, NULLVPP, tstartvp))
16010Sstevel@tonic-gate goto out;
16020Sstevel@tonic-gate /*
16030Sstevel@tonic-gate * Make sure both source vnode and target directory vnode are
16040Sstevel@tonic-gate * in the same vfs and that it is writeable.
16050Sstevel@tonic-gate */
16060Sstevel@tonic-gate vattr.va_mask = AT_FSID;
16075331Samw if (error = VOP_GETATTR(fvp, &vattr, 0, CRED(), NULL))
16080Sstevel@tonic-gate goto out;
16090Sstevel@tonic-gate fsid = vattr.va_fsid;
16100Sstevel@tonic-gate vattr.va_mask = AT_FSID;
16115331Samw if (error = VOP_GETATTR(tdvp, &vattr, 0, CRED(), NULL))
16120Sstevel@tonic-gate goto out;
16130Sstevel@tonic-gate if (fsid != vattr.va_fsid) {
16140Sstevel@tonic-gate error = EXDEV;
16150Sstevel@tonic-gate goto out;
16160Sstevel@tonic-gate }
16170Sstevel@tonic-gate if (tdvp->v_vfsp->vfs_flag & VFS_RDONLY) {
16180Sstevel@tonic-gate error = EROFS;
16190Sstevel@tonic-gate goto out;
16200Sstevel@tonic-gate }
16210Sstevel@tonic-gate /*
16220Sstevel@tonic-gate * Do the link.
16230Sstevel@tonic-gate */
16240Sstevel@tonic-gate (void) pn_fixslash(&pn);
16255331Samw error = VOP_LINK(tdvp, fvp, pn.pn_path, CRED(), NULL, 0);
16260Sstevel@tonic-gate out:
16270Sstevel@tonic-gate pn_free(&pn);
16280Sstevel@tonic-gate if (fvp)
16290Sstevel@tonic-gate VN_RELE(fvp);
16300Sstevel@tonic-gate if (tdvp)
16310Sstevel@tonic-gate VN_RELE(tdvp);
16322051Sprabahar if ((error == ESTALE) && fs_need_estale_retry(estale_retry++))
16330Sstevel@tonic-gate goto top;
16340Sstevel@tonic-gate return (error);
16350Sstevel@tonic-gate }
16360Sstevel@tonic-gate
16370Sstevel@tonic-gate int
vn_rename(char * from,char * to,enum uio_seg seg)16380Sstevel@tonic-gate vn_rename(char *from, char *to, enum uio_seg seg)
16390Sstevel@tonic-gate {
16400Sstevel@tonic-gate return (vn_renameat(NULL, from, NULL, to, seg));
16410Sstevel@tonic-gate }
16420Sstevel@tonic-gate
16430Sstevel@tonic-gate int
vn_renameat(vnode_t * fdvp,char * fname,vnode_t * tdvp,char * tname,enum uio_seg seg)16440Sstevel@tonic-gate vn_renameat(vnode_t *fdvp, char *fname, vnode_t *tdvp,
16450Sstevel@tonic-gate char *tname, enum uio_seg seg)
16460Sstevel@tonic-gate {
16470Sstevel@tonic-gate int error;
16480Sstevel@tonic-gate struct vattr vattr;
16490Sstevel@tonic-gate struct pathname fpn; /* from pathname */
16500Sstevel@tonic-gate struct pathname tpn; /* to pathname */
16510Sstevel@tonic-gate dev_t fsid;
16525331Samw int in_crit_src, in_crit_targ;
16530Sstevel@tonic-gate vnode_t *fromvp, *fvp;
16545331Samw vnode_t *tovp, *targvp;
16552051Sprabahar int estale_retry = 0;
165611861SMarek.Pospisil@Sun.COM uint32_t auditing = AU_AUDITING();
16570Sstevel@tonic-gate
16580Sstevel@tonic-gate top:
16595331Samw fvp = fromvp = tovp = targvp = NULL;
16605331Samw in_crit_src = in_crit_targ = 0;
16610Sstevel@tonic-gate /*
16620Sstevel@tonic-gate * Get to and from pathnames.
16630Sstevel@tonic-gate */
16640Sstevel@tonic-gate if (error = pn_get(fname, seg, &fpn))
16650Sstevel@tonic-gate return (error);
16660Sstevel@tonic-gate if (error = pn_get(tname, seg, &tpn)) {
16670Sstevel@tonic-gate pn_free(&fpn);
16680Sstevel@tonic-gate return (error);
16690Sstevel@tonic-gate }
16700Sstevel@tonic-gate
16710Sstevel@tonic-gate /*
16720Sstevel@tonic-gate * First we need to resolve the correct directories
16730Sstevel@tonic-gate * The passed in directories may only be a starting point,
16740Sstevel@tonic-gate * but we need the real directories the file(s) live in.
16750Sstevel@tonic-gate * For example the fname may be something like usr/lib/sparc
16760Sstevel@tonic-gate * and we were passed in the / directory, but we need to
16770Sstevel@tonic-gate * use the lib directory for the rename.
16780Sstevel@tonic-gate */
16790Sstevel@tonic-gate
1680*12789SRoger.Faulkner@Oracle.COM if (auditing && fdvp != NULL)
16810Sstevel@tonic-gate audit_setfsat_path(1);
16820Sstevel@tonic-gate /*
16830Sstevel@tonic-gate * Lookup to and from directories.
16840Sstevel@tonic-gate */
16850Sstevel@tonic-gate if (error = lookuppnat(&fpn, NULL, NO_FOLLOW, &fromvp, &fvp, fdvp)) {
16860Sstevel@tonic-gate goto out;
16870Sstevel@tonic-gate }
16880Sstevel@tonic-gate
16890Sstevel@tonic-gate /*
16900Sstevel@tonic-gate * Make sure there is an entry.
16910Sstevel@tonic-gate */
16920Sstevel@tonic-gate if (fvp == NULL) {
16930Sstevel@tonic-gate error = ENOENT;
16940Sstevel@tonic-gate goto out;
16950Sstevel@tonic-gate }
16960Sstevel@tonic-gate
1697*12789SRoger.Faulkner@Oracle.COM if (auditing && tdvp != NULL)
16980Sstevel@tonic-gate audit_setfsat_path(3);
16995331Samw if (error = lookuppnat(&tpn, NULL, NO_FOLLOW, &tovp, &targvp, tdvp)) {
17000Sstevel@tonic-gate goto out;
17010Sstevel@tonic-gate }
17020Sstevel@tonic-gate
17030Sstevel@tonic-gate /*
17040Sstevel@tonic-gate * Make sure both the from vnode directory and the to directory
17050Sstevel@tonic-gate * are in the same vfs and the to directory is writable.
17060Sstevel@tonic-gate * We check fsid's, not vfs pointers, so loopback fs works.
17070Sstevel@tonic-gate */
17080Sstevel@tonic-gate if (fromvp != tovp) {
17090Sstevel@tonic-gate vattr.va_mask = AT_FSID;
17105331Samw if (error = VOP_GETATTR(fromvp, &vattr, 0, CRED(), NULL))
17110Sstevel@tonic-gate goto out;
17120Sstevel@tonic-gate fsid = vattr.va_fsid;
17130Sstevel@tonic-gate vattr.va_mask = AT_FSID;
17145331Samw if (error = VOP_GETATTR(tovp, &vattr, 0, CRED(), NULL))
17150Sstevel@tonic-gate goto out;
17160Sstevel@tonic-gate if (fsid != vattr.va_fsid) {
17170Sstevel@tonic-gate error = EXDEV;
17180Sstevel@tonic-gate goto out;
17190Sstevel@tonic-gate }
17200Sstevel@tonic-gate }
17210Sstevel@tonic-gate
17220Sstevel@tonic-gate if (tovp->v_vfsp->vfs_flag & VFS_RDONLY) {
17230Sstevel@tonic-gate error = EROFS;
17240Sstevel@tonic-gate goto out;
17250Sstevel@tonic-gate }
17260Sstevel@tonic-gate
17275331Samw if (targvp && (fvp != targvp)) {
17285331Samw nbl_start_crit(targvp, RW_READER);
17295331Samw in_crit_targ = 1;
17305331Samw if (nbl_conflict(targvp, NBL_REMOVE, 0, 0, 0, NULL)) {
17315331Samw error = EACCES;
17325331Samw goto out;
17335331Samw }
17345331Samw }
17355331Samw
17360Sstevel@tonic-gate if (nbl_need_check(fvp)) {
17370Sstevel@tonic-gate nbl_start_crit(fvp, RW_READER);
17385331Samw in_crit_src = 1;
17395331Samw if (nbl_conflict(fvp, NBL_RENAME, 0, 0, 0, NULL)) {
17400Sstevel@tonic-gate error = EACCES;
17410Sstevel@tonic-gate goto out;
17420Sstevel@tonic-gate }
17430Sstevel@tonic-gate }
17440Sstevel@tonic-gate
17450Sstevel@tonic-gate /*
17460Sstevel@tonic-gate * Do the rename.
17470Sstevel@tonic-gate */
17480Sstevel@tonic-gate (void) pn_fixslash(&tpn);
17495331Samw error = VOP_RENAME(fromvp, fpn.pn_path, tovp, tpn.pn_path, CRED(),
17505331Samw NULL, 0);
17510Sstevel@tonic-gate
17520Sstevel@tonic-gate out:
17530Sstevel@tonic-gate pn_free(&fpn);
17540Sstevel@tonic-gate pn_free(&tpn);
17555331Samw if (in_crit_src)
17560Sstevel@tonic-gate nbl_end_crit(fvp);
17575331Samw if (in_crit_targ)
17585331Samw nbl_end_crit(targvp);
17590Sstevel@tonic-gate if (fromvp)
17600Sstevel@tonic-gate VN_RELE(fromvp);
17610Sstevel@tonic-gate if (tovp)
17620Sstevel@tonic-gate VN_RELE(tovp);
17635331Samw if (targvp)
17645331Samw VN_RELE(targvp);
17650Sstevel@tonic-gate if (fvp)
17660Sstevel@tonic-gate VN_RELE(fvp);
17672051Sprabahar if ((error == ESTALE) && fs_need_estale_retry(estale_retry++))
17680Sstevel@tonic-gate goto top;
17690Sstevel@tonic-gate return (error);
17700Sstevel@tonic-gate }
17710Sstevel@tonic-gate
17720Sstevel@tonic-gate /*
17730Sstevel@tonic-gate * Remove a file or directory.
17740Sstevel@tonic-gate */
17750Sstevel@tonic-gate int
vn_remove(char * fnamep,enum uio_seg seg,enum rm dirflag)17760Sstevel@tonic-gate vn_remove(char *fnamep, enum uio_seg seg, enum rm dirflag)
17770Sstevel@tonic-gate {
17780Sstevel@tonic-gate return (vn_removeat(NULL, fnamep, seg, dirflag));
17790Sstevel@tonic-gate }
17800Sstevel@tonic-gate
17810Sstevel@tonic-gate int
vn_removeat(vnode_t * startvp,char * fnamep,enum uio_seg seg,enum rm dirflag)17820Sstevel@tonic-gate vn_removeat(vnode_t *startvp, char *fnamep, enum uio_seg seg, enum rm dirflag)
17830Sstevel@tonic-gate {
17840Sstevel@tonic-gate struct vnode *vp; /* entry vnode */
17850Sstevel@tonic-gate struct vnode *dvp; /* ptr to parent dir vnode */
17860Sstevel@tonic-gate struct vnode *coveredvp;
17870Sstevel@tonic-gate struct pathname pn; /* name of entry */
17880Sstevel@tonic-gate enum vtype vtype;
17890Sstevel@tonic-gate int error;
17900Sstevel@tonic-gate struct vfs *vfsp;
17910Sstevel@tonic-gate struct vfs *dvfsp; /* ptr to parent dir vfs */
17920Sstevel@tonic-gate int in_crit = 0;
17932051Sprabahar int estale_retry = 0;
17940Sstevel@tonic-gate
17950Sstevel@tonic-gate top:
17960Sstevel@tonic-gate if (error = pn_get(fnamep, seg, &pn))
17970Sstevel@tonic-gate return (error);
17980Sstevel@tonic-gate dvp = vp = NULL;
17990Sstevel@tonic-gate if (error = lookuppnat(&pn, NULL, NO_FOLLOW, &dvp, &vp, startvp)) {
18000Sstevel@tonic-gate pn_free(&pn);
18012051Sprabahar if ((error == ESTALE) && fs_need_estale_retry(estale_retry++))
18020Sstevel@tonic-gate goto top;
18030Sstevel@tonic-gate return (error);
18040Sstevel@tonic-gate }
18050Sstevel@tonic-gate
18060Sstevel@tonic-gate /*
18070Sstevel@tonic-gate * Make sure there is an entry.
18080Sstevel@tonic-gate */
18090Sstevel@tonic-gate if (vp == NULL) {
18100Sstevel@tonic-gate error = ENOENT;
18110Sstevel@tonic-gate goto out;
18120Sstevel@tonic-gate }
18130Sstevel@tonic-gate
18140Sstevel@tonic-gate vfsp = vp->v_vfsp;
18150Sstevel@tonic-gate dvfsp = dvp->v_vfsp;
18160Sstevel@tonic-gate
18170Sstevel@tonic-gate /*
18180Sstevel@tonic-gate * If the named file is the root of a mounted filesystem, fail,
18190Sstevel@tonic-gate * unless it's marked unlinkable. In that case, unmount the
18200Sstevel@tonic-gate * filesystem and proceed to unlink the covered vnode. (If the
18210Sstevel@tonic-gate * covered vnode is a directory, use rmdir instead of unlink,
18220Sstevel@tonic-gate * to avoid file system corruption.)
18230Sstevel@tonic-gate */
18240Sstevel@tonic-gate if (vp->v_flag & VROOT) {
18254956Spf199842 if ((vfsp->vfs_flag & VFS_UNLINKABLE) == 0) {
18264956Spf199842 error = EBUSY;
18274956Spf199842 goto out;
18284956Spf199842 }
18294956Spf199842
18304956Spf199842 /*
18314956Spf199842 * Namefs specific code starts here.
18324956Spf199842 */
18334956Spf199842
18344956Spf199842 if (dirflag == RMDIRECTORY) {
18350Sstevel@tonic-gate /*
18364956Spf199842 * User called rmdir(2) on a file that has
18374956Spf199842 * been namefs mounted on top of. Since
18384956Spf199842 * namefs doesn't allow directories to
18394956Spf199842 * be mounted on other files we know
18404956Spf199842 * vp is not of type VDIR so fail to operation.
18410Sstevel@tonic-gate */
18424956Spf199842 error = ENOTDIR;
18434956Spf199842 goto out;
18444956Spf199842 }
18454956Spf199842
18464956Spf199842 /*
18474956Spf199842 * If VROOT is still set after grabbing vp->v_lock,
18484956Spf199842 * noone has finished nm_unmount so far and coveredvp
18494956Spf199842 * is valid.
18504956Spf199842 * If we manage to grab vn_vfswlock(coveredvp) before releasing
18514956Spf199842 * vp->v_lock, any race window is eliminated.
18524956Spf199842 */
18534956Spf199842
18544956Spf199842 mutex_enter(&vp->v_lock);
18554956Spf199842 if ((vp->v_flag & VROOT) == 0) {
18564956Spf199842 /* Someone beat us to the unmount */
18574956Spf199842 mutex_exit(&vp->v_lock);
18580Sstevel@tonic-gate error = EBUSY;
18594956Spf199842 goto out;
18604956Spf199842 }
18614956Spf199842 vfsp = vp->v_vfsp;
18624956Spf199842 coveredvp = vfsp->vfs_vnodecovered;
18634956Spf199842 ASSERT(coveredvp);
18644956Spf199842 /*
18654956Spf199842 * Note: Implementation of vn_vfswlock shows that ordering of
18664956Spf199842 * v_lock / vn_vfswlock is not an issue here.
18674956Spf199842 */
18684956Spf199842 error = vn_vfswlock(coveredvp);
18694956Spf199842 mutex_exit(&vp->v_lock);
18704956Spf199842
18714956Spf199842 if (error)
18724956Spf199842 goto out;
18734956Spf199842
18744956Spf199842 VN_HOLD(coveredvp);
18754956Spf199842 VN_RELE(vp);
18764956Spf199842 error = dounmount(vfsp, 0, CRED());
18774956Spf199842
18784956Spf199842 /*
18794956Spf199842 * Unmounted the namefs file system; now get
18804956Spf199842 * the object it was mounted over.
18814956Spf199842 */
18824956Spf199842 vp = coveredvp;
18834956Spf199842 /*
18844956Spf199842 * If namefs was mounted over a directory, then
18854956Spf199842 * we want to use rmdir() instead of unlink().
18864956Spf199842 */
18874956Spf199842 if (vp->v_type == VDIR)
18884956Spf199842 dirflag = RMDIRECTORY;
18890Sstevel@tonic-gate
18900Sstevel@tonic-gate if (error)
18910Sstevel@tonic-gate goto out;
18920Sstevel@tonic-gate }
18930Sstevel@tonic-gate
18940Sstevel@tonic-gate /*
18950Sstevel@tonic-gate * Make sure filesystem is writeable.
18960Sstevel@tonic-gate * We check the parent directory's vfs in case this is an lofs vnode.
18970Sstevel@tonic-gate */
18980Sstevel@tonic-gate if (dvfsp && dvfsp->vfs_flag & VFS_RDONLY) {
18990Sstevel@tonic-gate error = EROFS;
19000Sstevel@tonic-gate goto out;
19010Sstevel@tonic-gate }
19020Sstevel@tonic-gate
19030Sstevel@tonic-gate vtype = vp->v_type;
19040Sstevel@tonic-gate
19050Sstevel@tonic-gate /*
19060Sstevel@tonic-gate * If there is the possibility of an nbmand share reservation, make
19070Sstevel@tonic-gate * sure it's okay to remove the file. Keep a reference to the
19080Sstevel@tonic-gate * vnode, so that we can exit the nbl critical region after
19090Sstevel@tonic-gate * calling VOP_REMOVE.
19100Sstevel@tonic-gate * If there is no possibility of an nbmand share reservation,
19110Sstevel@tonic-gate * release the vnode reference now. Filesystems like NFS may
19120Sstevel@tonic-gate * behave differently if there is an extra reference, so get rid of
19130Sstevel@tonic-gate * this one. Fortunately, we can't have nbmand mounts on NFS
19140Sstevel@tonic-gate * filesystems.
19150Sstevel@tonic-gate */
19160Sstevel@tonic-gate if (nbl_need_check(vp)) {
19170Sstevel@tonic-gate nbl_start_crit(vp, RW_READER);
19180Sstevel@tonic-gate in_crit = 1;
19195331Samw if (nbl_conflict(vp, NBL_REMOVE, 0, 0, 0, NULL)) {
19200Sstevel@tonic-gate error = EACCES;
19210Sstevel@tonic-gate goto out;
19220Sstevel@tonic-gate }
19230Sstevel@tonic-gate } else {
19240Sstevel@tonic-gate VN_RELE(vp);
19250Sstevel@tonic-gate vp = NULL;
19260Sstevel@tonic-gate }
19270Sstevel@tonic-gate
19280Sstevel@tonic-gate if (dirflag == RMDIRECTORY) {
19290Sstevel@tonic-gate /*
19300Sstevel@tonic-gate * Caller is using rmdir(2), which can only be applied to
19310Sstevel@tonic-gate * directories.
19320Sstevel@tonic-gate */
19330Sstevel@tonic-gate if (vtype != VDIR) {
19340Sstevel@tonic-gate error = ENOTDIR;
19350Sstevel@tonic-gate } else {
19360Sstevel@tonic-gate vnode_t *cwd;
19370Sstevel@tonic-gate proc_t *pp = curproc;
19380Sstevel@tonic-gate
19390Sstevel@tonic-gate mutex_enter(&pp->p_lock);
19400Sstevel@tonic-gate cwd = PTOU(pp)->u_cdir;
19410Sstevel@tonic-gate VN_HOLD(cwd);
19420Sstevel@tonic-gate mutex_exit(&pp->p_lock);
19435331Samw error = VOP_RMDIR(dvp, pn.pn_path, cwd, CRED(),
19445331Samw NULL, 0);
19450Sstevel@tonic-gate VN_RELE(cwd);
19460Sstevel@tonic-gate }
19470Sstevel@tonic-gate } else {
19480Sstevel@tonic-gate /*
19490Sstevel@tonic-gate * Unlink(2) can be applied to anything.
19500Sstevel@tonic-gate */
19515331Samw error = VOP_REMOVE(dvp, pn.pn_path, CRED(), NULL, 0);
19520Sstevel@tonic-gate }
19530Sstevel@tonic-gate
19540Sstevel@tonic-gate out:
19550Sstevel@tonic-gate pn_free(&pn);
19560Sstevel@tonic-gate if (in_crit) {
19570Sstevel@tonic-gate nbl_end_crit(vp);
19580Sstevel@tonic-gate in_crit = 0;
19590Sstevel@tonic-gate }
19600Sstevel@tonic-gate if (vp != NULL)
19610Sstevel@tonic-gate VN_RELE(vp);
19620Sstevel@tonic-gate if (dvp != NULL)
19630Sstevel@tonic-gate VN_RELE(dvp);
19642051Sprabahar if ((error == ESTALE) && fs_need_estale_retry(estale_retry++))
19650Sstevel@tonic-gate goto top;
19660Sstevel@tonic-gate return (error);
19670Sstevel@tonic-gate }
19680Sstevel@tonic-gate
19690Sstevel@tonic-gate /*
19700Sstevel@tonic-gate * Utility function to compare equality of vnodes.
19710Sstevel@tonic-gate * Compare the underlying real vnodes, if there are underlying vnodes.
19720Sstevel@tonic-gate * This is a more thorough comparison than the VN_CMP() macro provides.
19730Sstevel@tonic-gate */
19740Sstevel@tonic-gate int
vn_compare(vnode_t * vp1,vnode_t * vp2)19750Sstevel@tonic-gate vn_compare(vnode_t *vp1, vnode_t *vp2)
19760Sstevel@tonic-gate {
19770Sstevel@tonic-gate vnode_t *realvp;
19780Sstevel@tonic-gate
19795331Samw if (vp1 != NULL && VOP_REALVP(vp1, &realvp, NULL) == 0)
19800Sstevel@tonic-gate vp1 = realvp;
19815331Samw if (vp2 != NULL && VOP_REALVP(vp2, &realvp, NULL) == 0)
19820Sstevel@tonic-gate vp2 = realvp;
19830Sstevel@tonic-gate return (VN_CMP(vp1, vp2));
19840Sstevel@tonic-gate }
19850Sstevel@tonic-gate
19860Sstevel@tonic-gate /*
19870Sstevel@tonic-gate * The number of locks to hash into. This value must be a power
19880Sstevel@tonic-gate * of 2 minus 1 and should probably also be prime.
19890Sstevel@tonic-gate */
19900Sstevel@tonic-gate #define NUM_BUCKETS 1023
19910Sstevel@tonic-gate
19920Sstevel@tonic-gate struct vn_vfslocks_bucket {
19930Sstevel@tonic-gate kmutex_t vb_lock;
19940Sstevel@tonic-gate vn_vfslocks_entry_t *vb_list;
19950Sstevel@tonic-gate char pad[64 - sizeof (kmutex_t) - sizeof (void *)];
19960Sstevel@tonic-gate };
19970Sstevel@tonic-gate
19980Sstevel@tonic-gate /*
19990Sstevel@tonic-gate * Total number of buckets will be NUM_BUCKETS + 1 .
20000Sstevel@tonic-gate */
20010Sstevel@tonic-gate
20020Sstevel@tonic-gate #pragma align 64(vn_vfslocks_buckets)
20030Sstevel@tonic-gate static struct vn_vfslocks_bucket vn_vfslocks_buckets[NUM_BUCKETS + 1];
20040Sstevel@tonic-gate
20050Sstevel@tonic-gate #define VN_VFSLOCKS_SHIFT 9
20060Sstevel@tonic-gate
20070Sstevel@tonic-gate #define VN_VFSLOCKS_HASH(vfsvpptr) \
20080Sstevel@tonic-gate ((((intptr_t)(vfsvpptr)) >> VN_VFSLOCKS_SHIFT) & NUM_BUCKETS)
20090Sstevel@tonic-gate
20100Sstevel@tonic-gate /*
20110Sstevel@tonic-gate * vn_vfslocks_getlock() uses an HASH scheme to generate
20120Sstevel@tonic-gate * rwstlock using vfs/vnode pointer passed to it.
20130Sstevel@tonic-gate *
20140Sstevel@tonic-gate * vn_vfslocks_rele() releases a reference in the
20150Sstevel@tonic-gate * HASH table which allows the entry allocated by
20160Sstevel@tonic-gate * vn_vfslocks_getlock() to be freed at a later
20170Sstevel@tonic-gate * stage when the refcount drops to zero.
20180Sstevel@tonic-gate */
20190Sstevel@tonic-gate
20200Sstevel@tonic-gate vn_vfslocks_entry_t *
vn_vfslocks_getlock(void * vfsvpptr)20210Sstevel@tonic-gate vn_vfslocks_getlock(void *vfsvpptr)
20220Sstevel@tonic-gate {
20230Sstevel@tonic-gate struct vn_vfslocks_bucket *bp;
20240Sstevel@tonic-gate vn_vfslocks_entry_t *vep;
20250Sstevel@tonic-gate vn_vfslocks_entry_t *tvep;
20260Sstevel@tonic-gate
20270Sstevel@tonic-gate ASSERT(vfsvpptr != NULL);
20280Sstevel@tonic-gate bp = &vn_vfslocks_buckets[VN_VFSLOCKS_HASH(vfsvpptr)];
20290Sstevel@tonic-gate
20300Sstevel@tonic-gate mutex_enter(&bp->vb_lock);
20310Sstevel@tonic-gate for (vep = bp->vb_list; vep != NULL; vep = vep->ve_next) {
20320Sstevel@tonic-gate if (vep->ve_vpvfs == vfsvpptr) {
20330Sstevel@tonic-gate vep->ve_refcnt++;
20340Sstevel@tonic-gate mutex_exit(&bp->vb_lock);
20350Sstevel@tonic-gate return (vep);
20360Sstevel@tonic-gate }
20370Sstevel@tonic-gate }
20380Sstevel@tonic-gate mutex_exit(&bp->vb_lock);
20390Sstevel@tonic-gate vep = kmem_alloc(sizeof (*vep), KM_SLEEP);
20400Sstevel@tonic-gate rwst_init(&vep->ve_lock, NULL, RW_DEFAULT, NULL);
20410Sstevel@tonic-gate vep->ve_vpvfs = (char *)vfsvpptr;
20420Sstevel@tonic-gate vep->ve_refcnt = 1;
20430Sstevel@tonic-gate mutex_enter(&bp->vb_lock);
20440Sstevel@tonic-gate for (tvep = bp->vb_list; tvep != NULL; tvep = tvep->ve_next) {
20450Sstevel@tonic-gate if (tvep->ve_vpvfs == vfsvpptr) {
20460Sstevel@tonic-gate tvep->ve_refcnt++;
20470Sstevel@tonic-gate mutex_exit(&bp->vb_lock);
20480Sstevel@tonic-gate
20490Sstevel@tonic-gate /*
20500Sstevel@tonic-gate * There is already an entry in the hash
20510Sstevel@tonic-gate * destroy what we just allocated.
20520Sstevel@tonic-gate */
20530Sstevel@tonic-gate rwst_destroy(&vep->ve_lock);
20540Sstevel@tonic-gate kmem_free(vep, sizeof (*vep));
20550Sstevel@tonic-gate return (tvep);
20560Sstevel@tonic-gate }
20570Sstevel@tonic-gate }
20580Sstevel@tonic-gate vep->ve_next = bp->vb_list;
20590Sstevel@tonic-gate bp->vb_list = vep;
20600Sstevel@tonic-gate mutex_exit(&bp->vb_lock);
20610Sstevel@tonic-gate return (vep);
20620Sstevel@tonic-gate }
20630Sstevel@tonic-gate
20640Sstevel@tonic-gate void
vn_vfslocks_rele(vn_vfslocks_entry_t * vepent)20650Sstevel@tonic-gate vn_vfslocks_rele(vn_vfslocks_entry_t *vepent)
20660Sstevel@tonic-gate {
20670Sstevel@tonic-gate struct vn_vfslocks_bucket *bp;
20680Sstevel@tonic-gate vn_vfslocks_entry_t *vep;
20690Sstevel@tonic-gate vn_vfslocks_entry_t *pvep;
20700Sstevel@tonic-gate
20710Sstevel@tonic-gate ASSERT(vepent != NULL);
20720Sstevel@tonic-gate ASSERT(vepent->ve_vpvfs != NULL);
20730Sstevel@tonic-gate
20740Sstevel@tonic-gate bp = &vn_vfslocks_buckets[VN_VFSLOCKS_HASH(vepent->ve_vpvfs)];
20750Sstevel@tonic-gate
20760Sstevel@tonic-gate mutex_enter(&bp->vb_lock);
20770Sstevel@tonic-gate vepent->ve_refcnt--;
20780Sstevel@tonic-gate
20790Sstevel@tonic-gate if ((int32_t)vepent->ve_refcnt < 0)
20800Sstevel@tonic-gate cmn_err(CE_PANIC, "vn_vfslocks_rele: refcount negative");
20810Sstevel@tonic-gate
20820Sstevel@tonic-gate if (vepent->ve_refcnt == 0) {
20830Sstevel@tonic-gate for (vep = bp->vb_list; vep != NULL; vep = vep->ve_next) {
20840Sstevel@tonic-gate if (vep->ve_vpvfs == vepent->ve_vpvfs) {
20850Sstevel@tonic-gate if (bp->vb_list == vep)
20860Sstevel@tonic-gate bp->vb_list = vep->ve_next;
20870Sstevel@tonic-gate else {
20880Sstevel@tonic-gate /* LINTED */
20890Sstevel@tonic-gate pvep->ve_next = vep->ve_next;
20900Sstevel@tonic-gate }
20910Sstevel@tonic-gate mutex_exit(&bp->vb_lock);
20920Sstevel@tonic-gate rwst_destroy(&vep->ve_lock);
20930Sstevel@tonic-gate kmem_free(vep, sizeof (*vep));
20940Sstevel@tonic-gate return;
20950Sstevel@tonic-gate }
20960Sstevel@tonic-gate pvep = vep;
20970Sstevel@tonic-gate }
20980Sstevel@tonic-gate cmn_err(CE_PANIC, "vn_vfslocks_rele: vp/vfs not found");
20990Sstevel@tonic-gate }
21000Sstevel@tonic-gate mutex_exit(&bp->vb_lock);
21010Sstevel@tonic-gate }
21020Sstevel@tonic-gate
21030Sstevel@tonic-gate /*
21040Sstevel@tonic-gate * vn_vfswlock_wait is used to implement a lock which is logically a writers
21050Sstevel@tonic-gate * lock protecting the v_vfsmountedhere field.
21060Sstevel@tonic-gate * vn_vfswlock_wait has been modified to be similar to vn_vfswlock,
21070Sstevel@tonic-gate * except that it blocks to acquire the lock VVFSLOCK.
21080Sstevel@tonic-gate *
21090Sstevel@tonic-gate * traverse() and routines re-implementing part of traverse (e.g. autofs)
21100Sstevel@tonic-gate * need to hold this lock. mount(), vn_rename(), vn_remove() and so on
21110Sstevel@tonic-gate * need the non-blocking version of the writers lock i.e. vn_vfswlock
21120Sstevel@tonic-gate */
21130Sstevel@tonic-gate int
vn_vfswlock_wait(vnode_t * vp)21140Sstevel@tonic-gate vn_vfswlock_wait(vnode_t *vp)
21150Sstevel@tonic-gate {
21160Sstevel@tonic-gate int retval;
21170Sstevel@tonic-gate vn_vfslocks_entry_t *vpvfsentry;
21180Sstevel@tonic-gate ASSERT(vp != NULL);
21190Sstevel@tonic-gate
21200Sstevel@tonic-gate vpvfsentry = vn_vfslocks_getlock(vp);
21210Sstevel@tonic-gate retval = rwst_enter_sig(&vpvfsentry->ve_lock, RW_WRITER);
21220Sstevel@tonic-gate
21230Sstevel@tonic-gate if (retval == EINTR) {
21240Sstevel@tonic-gate vn_vfslocks_rele(vpvfsentry);
21250Sstevel@tonic-gate return (EINTR);
21260Sstevel@tonic-gate }
21270Sstevel@tonic-gate return (retval);
21280Sstevel@tonic-gate }
21290Sstevel@tonic-gate
21300Sstevel@tonic-gate int
vn_vfsrlock_wait(vnode_t * vp)21310Sstevel@tonic-gate vn_vfsrlock_wait(vnode_t *vp)
21320Sstevel@tonic-gate {
21330Sstevel@tonic-gate int retval;
21340Sstevel@tonic-gate vn_vfslocks_entry_t *vpvfsentry;
21350Sstevel@tonic-gate ASSERT(vp != NULL);
21360Sstevel@tonic-gate
21370Sstevel@tonic-gate vpvfsentry = vn_vfslocks_getlock(vp);
21380Sstevel@tonic-gate retval = rwst_enter_sig(&vpvfsentry->ve_lock, RW_READER);
21390Sstevel@tonic-gate
21400Sstevel@tonic-gate if (retval == EINTR) {
21410Sstevel@tonic-gate vn_vfslocks_rele(vpvfsentry);
21420Sstevel@tonic-gate return (EINTR);
21430Sstevel@tonic-gate }
21440Sstevel@tonic-gate
21450Sstevel@tonic-gate return (retval);
21460Sstevel@tonic-gate }
21470Sstevel@tonic-gate
21480Sstevel@tonic-gate
21490Sstevel@tonic-gate /*
21500Sstevel@tonic-gate * vn_vfswlock is used to implement a lock which is logically a writers lock
21510Sstevel@tonic-gate * protecting the v_vfsmountedhere field.
21520Sstevel@tonic-gate */
21530Sstevel@tonic-gate int
vn_vfswlock(vnode_t * vp)21540Sstevel@tonic-gate vn_vfswlock(vnode_t *vp)
21550Sstevel@tonic-gate {
21560Sstevel@tonic-gate vn_vfslocks_entry_t *vpvfsentry;
21570Sstevel@tonic-gate
21580Sstevel@tonic-gate /*
21590Sstevel@tonic-gate * If vp is NULL then somebody is trying to lock the covered vnode
21600Sstevel@tonic-gate * of /. (vfs_vnodecovered is NULL for /). This situation will
21610Sstevel@tonic-gate * only happen when unmounting /. Since that operation will fail
21620Sstevel@tonic-gate * anyway, return EBUSY here instead of in VFS_UNMOUNT.
21630Sstevel@tonic-gate */
21640Sstevel@tonic-gate if (vp == NULL)
21650Sstevel@tonic-gate return (EBUSY);
21660Sstevel@tonic-gate
21670Sstevel@tonic-gate vpvfsentry = vn_vfslocks_getlock(vp);
21680Sstevel@tonic-gate
21690Sstevel@tonic-gate if (rwst_tryenter(&vpvfsentry->ve_lock, RW_WRITER))
21700Sstevel@tonic-gate return (0);
21710Sstevel@tonic-gate
21720Sstevel@tonic-gate vn_vfslocks_rele(vpvfsentry);
21730Sstevel@tonic-gate return (EBUSY);
21740Sstevel@tonic-gate }
21750Sstevel@tonic-gate
21760Sstevel@tonic-gate int
vn_vfsrlock(vnode_t * vp)21770Sstevel@tonic-gate vn_vfsrlock(vnode_t *vp)
21780Sstevel@tonic-gate {
21790Sstevel@tonic-gate vn_vfslocks_entry_t *vpvfsentry;
21800Sstevel@tonic-gate
21810Sstevel@tonic-gate /*
21820Sstevel@tonic-gate * If vp is NULL then somebody is trying to lock the covered vnode
21830Sstevel@tonic-gate * of /. (vfs_vnodecovered is NULL for /). This situation will
21840Sstevel@tonic-gate * only happen when unmounting /. Since that operation will fail
21850Sstevel@tonic-gate * anyway, return EBUSY here instead of in VFS_UNMOUNT.
21860Sstevel@tonic-gate */
21870Sstevel@tonic-gate if (vp == NULL)
21880Sstevel@tonic-gate return (EBUSY);
21890Sstevel@tonic-gate
21900Sstevel@tonic-gate vpvfsentry = vn_vfslocks_getlock(vp);
21910Sstevel@tonic-gate
21920Sstevel@tonic-gate if (rwst_tryenter(&vpvfsentry->ve_lock, RW_READER))
21930Sstevel@tonic-gate return (0);
21940Sstevel@tonic-gate
21950Sstevel@tonic-gate vn_vfslocks_rele(vpvfsentry);
21960Sstevel@tonic-gate return (EBUSY);
21970Sstevel@tonic-gate }
21980Sstevel@tonic-gate
21990Sstevel@tonic-gate void
vn_vfsunlock(vnode_t * vp)22000Sstevel@tonic-gate vn_vfsunlock(vnode_t *vp)
22010Sstevel@tonic-gate {
22020Sstevel@tonic-gate vn_vfslocks_entry_t *vpvfsentry;
22030Sstevel@tonic-gate
22040Sstevel@tonic-gate /*
22050Sstevel@tonic-gate * ve_refcnt needs to be decremented twice.
22060Sstevel@tonic-gate * 1. To release refernce after a call to vn_vfslocks_getlock()
22070Sstevel@tonic-gate * 2. To release the reference from the locking routines like
22080Sstevel@tonic-gate * vn_vfsrlock/vn_vfswlock etc,.
22090Sstevel@tonic-gate */
22100Sstevel@tonic-gate vpvfsentry = vn_vfslocks_getlock(vp);
22110Sstevel@tonic-gate vn_vfslocks_rele(vpvfsentry);
22120Sstevel@tonic-gate
22130Sstevel@tonic-gate rwst_exit(&vpvfsentry->ve_lock);
22140Sstevel@tonic-gate vn_vfslocks_rele(vpvfsentry);
22150Sstevel@tonic-gate }
22160Sstevel@tonic-gate
22170Sstevel@tonic-gate int
vn_vfswlock_held(vnode_t * vp)22180Sstevel@tonic-gate vn_vfswlock_held(vnode_t *vp)
22190Sstevel@tonic-gate {
22200Sstevel@tonic-gate int held;
22210Sstevel@tonic-gate vn_vfslocks_entry_t *vpvfsentry;
22220Sstevel@tonic-gate
22230Sstevel@tonic-gate ASSERT(vp != NULL);
22240Sstevel@tonic-gate
22250Sstevel@tonic-gate vpvfsentry = vn_vfslocks_getlock(vp);
22260Sstevel@tonic-gate held = rwst_lock_held(&vpvfsentry->ve_lock, RW_WRITER);
22270Sstevel@tonic-gate
22280Sstevel@tonic-gate vn_vfslocks_rele(vpvfsentry);
22290Sstevel@tonic-gate return (held);
22300Sstevel@tonic-gate }
22310Sstevel@tonic-gate
22320Sstevel@tonic-gate
22330Sstevel@tonic-gate int
vn_make_ops(const char * name,const fs_operation_def_t * templ,vnodeops_t ** actual)22340Sstevel@tonic-gate vn_make_ops(
22350Sstevel@tonic-gate const char *name, /* Name of file system */
22360Sstevel@tonic-gate const fs_operation_def_t *templ, /* Operation specification */
22370Sstevel@tonic-gate vnodeops_t **actual) /* Return the vnodeops */
22380Sstevel@tonic-gate {
22390Sstevel@tonic-gate int unused_ops;
22400Sstevel@tonic-gate int error;
22410Sstevel@tonic-gate
22420Sstevel@tonic-gate *actual = (vnodeops_t *)kmem_alloc(sizeof (vnodeops_t), KM_SLEEP);
22430Sstevel@tonic-gate
22440Sstevel@tonic-gate (*actual)->vnop_name = name;
22450Sstevel@tonic-gate
22460Sstevel@tonic-gate error = fs_build_vector(*actual, &unused_ops, vn_ops_table, templ);
22470Sstevel@tonic-gate if (error) {
22480Sstevel@tonic-gate kmem_free(*actual, sizeof (vnodeops_t));
22490Sstevel@tonic-gate }
22500Sstevel@tonic-gate
22510Sstevel@tonic-gate #if DEBUG
22520Sstevel@tonic-gate if (unused_ops != 0)
22530Sstevel@tonic-gate cmn_err(CE_WARN, "vn_make_ops: %s: %d operations supplied "
22540Sstevel@tonic-gate "but not used", name, unused_ops);
22550Sstevel@tonic-gate #endif
22560Sstevel@tonic-gate
22570Sstevel@tonic-gate return (error);
22580Sstevel@tonic-gate }
22590Sstevel@tonic-gate
22600Sstevel@tonic-gate /*
22610Sstevel@tonic-gate * Free the vnodeops created as a result of vn_make_ops()
22620Sstevel@tonic-gate */
22630Sstevel@tonic-gate void
vn_freevnodeops(vnodeops_t * vnops)22640Sstevel@tonic-gate vn_freevnodeops(vnodeops_t *vnops)
22650Sstevel@tonic-gate {
22660Sstevel@tonic-gate kmem_free(vnops, sizeof (vnodeops_t));
22670Sstevel@tonic-gate }
22680Sstevel@tonic-gate
22690Sstevel@tonic-gate /*
22700Sstevel@tonic-gate * Vnode cache.
22710Sstevel@tonic-gate */
22720Sstevel@tonic-gate
22730Sstevel@tonic-gate /* ARGSUSED */
22740Sstevel@tonic-gate static int
vn_cache_constructor(void * buf,void * cdrarg,int kmflags)22750Sstevel@tonic-gate vn_cache_constructor(void *buf, void *cdrarg, int kmflags)
22760Sstevel@tonic-gate {
22770Sstevel@tonic-gate struct vnode *vp;
22780Sstevel@tonic-gate
22790Sstevel@tonic-gate vp = buf;
22800Sstevel@tonic-gate
22810Sstevel@tonic-gate mutex_init(&vp->v_lock, NULL, MUTEX_DEFAULT, NULL);
22829885SRobert.Mastors@Sun.COM mutex_init(&vp->v_vsd_lock, NULL, MUTEX_DEFAULT, NULL);
22830Sstevel@tonic-gate cv_init(&vp->v_cv, NULL, CV_DEFAULT, NULL);
22840Sstevel@tonic-gate rw_init(&vp->v_nbllock, NULL, RW_DEFAULT, NULL);
22850Sstevel@tonic-gate vp->v_femhead = NULL; /* Must be done before vn_reinit() */
22860Sstevel@tonic-gate vp->v_path = NULL;
22870Sstevel@tonic-gate vp->v_mpssdata = NULL;
22885050Sjwahlig vp->v_vsd = NULL;
22894863Spraks vp->v_fopdata = NULL;
22900Sstevel@tonic-gate
22910Sstevel@tonic-gate return (0);
22920Sstevel@tonic-gate }
22930Sstevel@tonic-gate
22940Sstevel@tonic-gate /* ARGSUSED */
22950Sstevel@tonic-gate static void
vn_cache_destructor(void * buf,void * cdrarg)22960Sstevel@tonic-gate vn_cache_destructor(void *buf, void *cdrarg)
22970Sstevel@tonic-gate {
22980Sstevel@tonic-gate struct vnode *vp;
22990Sstevel@tonic-gate
23000Sstevel@tonic-gate vp = buf;
23010Sstevel@tonic-gate
23020Sstevel@tonic-gate rw_destroy(&vp->v_nbllock);
23030Sstevel@tonic-gate cv_destroy(&vp->v_cv);
23049885SRobert.Mastors@Sun.COM mutex_destroy(&vp->v_vsd_lock);
23050Sstevel@tonic-gate mutex_destroy(&vp->v_lock);
23060Sstevel@tonic-gate }
23070Sstevel@tonic-gate
23080Sstevel@tonic-gate void
vn_create_cache(void)23090Sstevel@tonic-gate vn_create_cache(void)
23100Sstevel@tonic-gate {
231112230SFrank.Rival@oracle.com /* LINTED */
231212230SFrank.Rival@oracle.com ASSERT((1 << VNODE_ALIGN_LOG2) ==
231312230SFrank.Rival@oracle.com P2ROUNDUP(sizeof (struct vnode), VNODE_ALIGN));
231412230SFrank.Rival@oracle.com vn_cache = kmem_cache_create("vn_cache", sizeof (struct vnode),
231512230SFrank.Rival@oracle.com VNODE_ALIGN, vn_cache_constructor, vn_cache_destructor, NULL, NULL,
23160Sstevel@tonic-gate NULL, 0);
23170Sstevel@tonic-gate }
23180Sstevel@tonic-gate
23190Sstevel@tonic-gate void
vn_destroy_cache(void)23200Sstevel@tonic-gate vn_destroy_cache(void)
23210Sstevel@tonic-gate {
23220Sstevel@tonic-gate kmem_cache_destroy(vn_cache);
23230Sstevel@tonic-gate }
23240Sstevel@tonic-gate
23250Sstevel@tonic-gate /*
23260Sstevel@tonic-gate * Used by file systems when fs-specific nodes (e.g., ufs inodes) are
23270Sstevel@tonic-gate * cached by the file system and vnodes remain associated.
23280Sstevel@tonic-gate */
23290Sstevel@tonic-gate void
vn_recycle(vnode_t * vp)23300Sstevel@tonic-gate vn_recycle(vnode_t *vp)
23310Sstevel@tonic-gate {
23320Sstevel@tonic-gate ASSERT(vp->v_pages == NULL);
23330Sstevel@tonic-gate
23340Sstevel@tonic-gate /*
23350Sstevel@tonic-gate * XXX - This really belongs in vn_reinit(), but we have some issues
23360Sstevel@tonic-gate * with the counts. Best to have it here for clean initialization.
23370Sstevel@tonic-gate */
23380Sstevel@tonic-gate vp->v_rdcnt = 0;
23390Sstevel@tonic-gate vp->v_wrcnt = 0;
23400Sstevel@tonic-gate vp->v_mmap_read = 0;
23410Sstevel@tonic-gate vp->v_mmap_write = 0;
23420Sstevel@tonic-gate
23430Sstevel@tonic-gate /*
23440Sstevel@tonic-gate * If FEM was in use, make sure everything gets cleaned up
23450Sstevel@tonic-gate * NOTE: vp->v_femhead is initialized to NULL in the vnode
23460Sstevel@tonic-gate * constructor.
23470Sstevel@tonic-gate */
23480Sstevel@tonic-gate if (vp->v_femhead) {
23490Sstevel@tonic-gate /* XXX - There should be a free_femhead() that does all this */
23500Sstevel@tonic-gate ASSERT(vp->v_femhead->femh_list == NULL);
23510Sstevel@tonic-gate mutex_destroy(&vp->v_femhead->femh_lock);
23520Sstevel@tonic-gate kmem_free(vp->v_femhead, sizeof (*(vp->v_femhead)));
23530Sstevel@tonic-gate vp->v_femhead = NULL;
23540Sstevel@tonic-gate }
23550Sstevel@tonic-gate if (vp->v_path) {
23560Sstevel@tonic-gate kmem_free(vp->v_path, strlen(vp->v_path) + 1);
23570Sstevel@tonic-gate vp->v_path = NULL;
23580Sstevel@tonic-gate }
23594863Spraks
23604863Spraks if (vp->v_fopdata != NULL) {
23614863Spraks free_fopdata(vp);
23624863Spraks }
23630Sstevel@tonic-gate vp->v_mpssdata = NULL;
23645050Sjwahlig vsd_free(vp);
23650Sstevel@tonic-gate }
23660Sstevel@tonic-gate
23670Sstevel@tonic-gate /*
23680Sstevel@tonic-gate * Used to reset the vnode fields including those that are directly accessible
23690Sstevel@tonic-gate * as well as those which require an accessor function.
23700Sstevel@tonic-gate *
23710Sstevel@tonic-gate * Does not initialize:
23729885SRobert.Mastors@Sun.COM * synchronization objects: v_lock, v_vsd_lock, v_nbllock, v_cv
23730Sstevel@tonic-gate * v_data (since FS-nodes and vnodes point to each other and should
23740Sstevel@tonic-gate * be updated simultaneously)
23750Sstevel@tonic-gate * v_op (in case someone needs to make a VOP call on this object)
23760Sstevel@tonic-gate */
23770Sstevel@tonic-gate void
vn_reinit(vnode_t * vp)23780Sstevel@tonic-gate vn_reinit(vnode_t *vp)
23790Sstevel@tonic-gate {
23800Sstevel@tonic-gate vp->v_count = 1;
23816712Stomee vp->v_count_dnlc = 0;
23820Sstevel@tonic-gate vp->v_vfsp = NULL;
23830Sstevel@tonic-gate vp->v_stream = NULL;
23840Sstevel@tonic-gate vp->v_vfsmountedhere = NULL;
23850Sstevel@tonic-gate vp->v_flag = 0;
23860Sstevel@tonic-gate vp->v_type = VNON;
23870Sstevel@tonic-gate vp->v_rdev = NODEV;
23880Sstevel@tonic-gate
23890Sstevel@tonic-gate vp->v_filocks = NULL;
23900Sstevel@tonic-gate vp->v_shrlocks = NULL;
23910Sstevel@tonic-gate vp->v_pages = NULL;
23920Sstevel@tonic-gate
23930Sstevel@tonic-gate vp->v_locality = NULL;
23945331Samw vp->v_xattrdir = NULL;
23950Sstevel@tonic-gate
23960Sstevel@tonic-gate /* Handles v_femhead, v_path, and the r/w/map counts */
23970Sstevel@tonic-gate vn_recycle(vp);
23980Sstevel@tonic-gate }
23990Sstevel@tonic-gate
24000Sstevel@tonic-gate vnode_t *
vn_alloc(int kmflag)24010Sstevel@tonic-gate vn_alloc(int kmflag)
24020Sstevel@tonic-gate {
24030Sstevel@tonic-gate vnode_t *vp;
24040Sstevel@tonic-gate
24050Sstevel@tonic-gate vp = kmem_cache_alloc(vn_cache, kmflag);
24060Sstevel@tonic-gate
24070Sstevel@tonic-gate if (vp != NULL) {
24080Sstevel@tonic-gate vp->v_femhead = NULL; /* Must be done before vn_reinit() */
24094863Spraks vp->v_fopdata = NULL;
24100Sstevel@tonic-gate vn_reinit(vp);
24110Sstevel@tonic-gate }
24120Sstevel@tonic-gate
24130Sstevel@tonic-gate return (vp);
24140Sstevel@tonic-gate }
24150Sstevel@tonic-gate
24160Sstevel@tonic-gate void
vn_free(vnode_t * vp)24170Sstevel@tonic-gate vn_free(vnode_t *vp)
24180Sstevel@tonic-gate {
24195331Samw ASSERT(vp->v_shrlocks == NULL);
24205331Samw ASSERT(vp->v_filocks == NULL);
24215331Samw
24220Sstevel@tonic-gate /*
24230Sstevel@tonic-gate * Some file systems call vn_free() with v_count of zero,
24240Sstevel@tonic-gate * some with v_count of 1. In any case, the value should
24250Sstevel@tonic-gate * never be anything else.
24260Sstevel@tonic-gate */
24270Sstevel@tonic-gate ASSERT((vp->v_count == 0) || (vp->v_count == 1));
24286712Stomee ASSERT(vp->v_count_dnlc == 0);
24290Sstevel@tonic-gate if (vp->v_path != NULL) {
24300Sstevel@tonic-gate kmem_free(vp->v_path, strlen(vp->v_path) + 1);
24310Sstevel@tonic-gate vp->v_path = NULL;
24320Sstevel@tonic-gate }
24330Sstevel@tonic-gate
24340Sstevel@tonic-gate /* If FEM was in use, make sure everything gets cleaned up */
24350Sstevel@tonic-gate if (vp->v_femhead) {
24360Sstevel@tonic-gate /* XXX - There should be a free_femhead() that does all this */
24370Sstevel@tonic-gate ASSERT(vp->v_femhead->femh_list == NULL);
24380Sstevel@tonic-gate mutex_destroy(&vp->v_femhead->femh_lock);
24390Sstevel@tonic-gate kmem_free(vp->v_femhead, sizeof (*(vp->v_femhead)));
24400Sstevel@tonic-gate vp->v_femhead = NULL;
24410Sstevel@tonic-gate }
24424863Spraks
24434863Spraks if (vp->v_fopdata != NULL) {
24444863Spraks free_fopdata(vp);
24454863Spraks }
24460Sstevel@tonic-gate vp->v_mpssdata = NULL;
24475050Sjwahlig vsd_free(vp);
24480Sstevel@tonic-gate kmem_cache_free(vn_cache, vp);
24490Sstevel@tonic-gate }
24500Sstevel@tonic-gate
24510Sstevel@tonic-gate /*
24520Sstevel@tonic-gate * vnode status changes, should define better states than 1, 0.
24530Sstevel@tonic-gate */
24540Sstevel@tonic-gate void
vn_reclaim(vnode_t * vp)24550Sstevel@tonic-gate vn_reclaim(vnode_t *vp)
24560Sstevel@tonic-gate {
24570Sstevel@tonic-gate vfs_t *vfsp = vp->v_vfsp;
24580Sstevel@tonic-gate
24591925Srsb if (vfsp == NULL ||
24601925Srsb vfsp->vfs_implp == NULL || vfsp->vfs_femhead == NULL) {
24610Sstevel@tonic-gate return;
24620Sstevel@tonic-gate }
24630Sstevel@tonic-gate (void) VFS_VNSTATE(vfsp, vp, VNTRANS_RECLAIMED);
24640Sstevel@tonic-gate }
24650Sstevel@tonic-gate
24660Sstevel@tonic-gate void
vn_idle(vnode_t * vp)24670Sstevel@tonic-gate vn_idle(vnode_t *vp)
24680Sstevel@tonic-gate {
24690Sstevel@tonic-gate vfs_t *vfsp = vp->v_vfsp;
24700Sstevel@tonic-gate
24711925Srsb if (vfsp == NULL ||
24721925Srsb vfsp->vfs_implp == NULL || vfsp->vfs_femhead == NULL) {
24730Sstevel@tonic-gate return;
24740Sstevel@tonic-gate }
24750Sstevel@tonic-gate (void) VFS_VNSTATE(vfsp, vp, VNTRANS_IDLED);
24760Sstevel@tonic-gate }
24770Sstevel@tonic-gate void
vn_exists(vnode_t * vp)24780Sstevel@tonic-gate vn_exists(vnode_t *vp)
24790Sstevel@tonic-gate {
24800Sstevel@tonic-gate vfs_t *vfsp = vp->v_vfsp;
24810Sstevel@tonic-gate
24821925Srsb if (vfsp == NULL ||
24831925Srsb vfsp->vfs_implp == NULL || vfsp->vfs_femhead == NULL) {
24840Sstevel@tonic-gate return;
24850Sstevel@tonic-gate }
24860Sstevel@tonic-gate (void) VFS_VNSTATE(vfsp, vp, VNTRANS_EXISTS);
24870Sstevel@tonic-gate }
24880Sstevel@tonic-gate
24890Sstevel@tonic-gate void
vn_invalid(vnode_t * vp)24900Sstevel@tonic-gate vn_invalid(vnode_t *vp)
24910Sstevel@tonic-gate {
24920Sstevel@tonic-gate vfs_t *vfsp = vp->v_vfsp;
24930Sstevel@tonic-gate
24941925Srsb if (vfsp == NULL ||
24951925Srsb vfsp->vfs_implp == NULL || vfsp->vfs_femhead == NULL) {
24960Sstevel@tonic-gate return;
24970Sstevel@tonic-gate }
24980Sstevel@tonic-gate (void) VFS_VNSTATE(vfsp, vp, VNTRANS_DESTROYED);
24990Sstevel@tonic-gate }
25000Sstevel@tonic-gate
25010Sstevel@tonic-gate /* Vnode event notification */
25020Sstevel@tonic-gate
25030Sstevel@tonic-gate int
vnevent_support(vnode_t * vp,caller_context_t * ct)25045331Samw vnevent_support(vnode_t *vp, caller_context_t *ct)
25050Sstevel@tonic-gate {
25060Sstevel@tonic-gate if (vp == NULL)
25070Sstevel@tonic-gate return (EINVAL);
25080Sstevel@tonic-gate
25095331Samw return (VOP_VNEVENT(vp, VE_SUPPORT, NULL, NULL, ct));
25100Sstevel@tonic-gate }
25110Sstevel@tonic-gate
25120Sstevel@tonic-gate void
vnevent_rename_src(vnode_t * vp,vnode_t * dvp,char * name,caller_context_t * ct)25135331Samw vnevent_rename_src(vnode_t *vp, vnode_t *dvp, char *name, caller_context_t *ct)
25140Sstevel@tonic-gate {
25150Sstevel@tonic-gate if (vp == NULL || vp->v_femhead == NULL) {
25160Sstevel@tonic-gate return;
25170Sstevel@tonic-gate }
25185331Samw (void) VOP_VNEVENT(vp, VE_RENAME_SRC, dvp, name, ct);
25190Sstevel@tonic-gate }
25200Sstevel@tonic-gate
25210Sstevel@tonic-gate void
vnevent_rename_dest(vnode_t * vp,vnode_t * dvp,char * name,caller_context_t * ct)25225331Samw vnevent_rename_dest(vnode_t *vp, vnode_t *dvp, char *name,
25235331Samw caller_context_t *ct)
25244863Spraks {
25254863Spraks if (vp == NULL || vp->v_femhead == NULL) {
25264863Spraks return;
25274863Spraks }
25285331Samw (void) VOP_VNEVENT(vp, VE_RENAME_DEST, dvp, name, ct);
25294863Spraks }
25304863Spraks
25314863Spraks void
vnevent_rename_dest_dir(vnode_t * vp,caller_context_t * ct)25325331Samw vnevent_rename_dest_dir(vnode_t *vp, caller_context_t *ct)
25334863Spraks {
25344863Spraks if (vp == NULL || vp->v_femhead == NULL) {
25354863Spraks return;
25364863Spraks }
25375331Samw (void) VOP_VNEVENT(vp, VE_RENAME_DEST_DIR, NULL, NULL, ct);
25384863Spraks }
25394863Spraks
25404863Spraks void
vnevent_remove(vnode_t * vp,vnode_t * dvp,char * name,caller_context_t * ct)25415331Samw vnevent_remove(vnode_t *vp, vnode_t *dvp, char *name, caller_context_t *ct)
25420Sstevel@tonic-gate {
25430Sstevel@tonic-gate if (vp == NULL || vp->v_femhead == NULL) {
25440Sstevel@tonic-gate return;
25450Sstevel@tonic-gate }
25465331Samw (void) VOP_VNEVENT(vp, VE_REMOVE, dvp, name, ct);
25470Sstevel@tonic-gate }
25480Sstevel@tonic-gate
25490Sstevel@tonic-gate void
vnevent_rmdir(vnode_t * vp,vnode_t * dvp,char * name,caller_context_t * ct)25505331Samw vnevent_rmdir(vnode_t *vp, vnode_t *dvp, char *name, caller_context_t *ct)
25514863Spraks {
25524863Spraks if (vp == NULL || vp->v_femhead == NULL) {
25534863Spraks return;
25544863Spraks }
25555331Samw (void) VOP_VNEVENT(vp, VE_RMDIR, dvp, name, ct);
25564863Spraks }
25574863Spraks
25584863Spraks void
vnevent_create(vnode_t * vp,caller_context_t * ct)25595331Samw vnevent_create(vnode_t *vp, caller_context_t *ct)
25600Sstevel@tonic-gate {
25610Sstevel@tonic-gate if (vp == NULL || vp->v_femhead == NULL) {
25620Sstevel@tonic-gate return;
25630Sstevel@tonic-gate }
25645331Samw (void) VOP_VNEVENT(vp, VE_CREATE, NULL, NULL, ct);
25650Sstevel@tonic-gate }
25660Sstevel@tonic-gate
25670Sstevel@tonic-gate void
vnevent_link(vnode_t * vp,caller_context_t * ct)25685331Samw vnevent_link(vnode_t *vp, caller_context_t *ct)
25690Sstevel@tonic-gate {
25700Sstevel@tonic-gate if (vp == NULL || vp->v_femhead == NULL) {
25710Sstevel@tonic-gate return;
25720Sstevel@tonic-gate }
25735331Samw (void) VOP_VNEVENT(vp, VE_LINK, NULL, NULL, ct);
25744863Spraks }
25754863Spraks
25764863Spraks void
vnevent_mountedover(vnode_t * vp,caller_context_t * ct)25775331Samw vnevent_mountedover(vnode_t *vp, caller_context_t *ct)
25784863Spraks {
25794863Spraks if (vp == NULL || vp->v_femhead == NULL) {
25804863Spraks return;
25814863Spraks }
25825331Samw (void) VOP_VNEVENT(vp, VE_MOUNTEDOVER, NULL, NULL, ct);
25830Sstevel@tonic-gate }
25840Sstevel@tonic-gate
25850Sstevel@tonic-gate /*
25860Sstevel@tonic-gate * Vnode accessors.
25870Sstevel@tonic-gate */
25880Sstevel@tonic-gate
25890Sstevel@tonic-gate int
vn_is_readonly(vnode_t * vp)25900Sstevel@tonic-gate vn_is_readonly(vnode_t *vp)
25910Sstevel@tonic-gate {
25920Sstevel@tonic-gate return (vp->v_vfsp->vfs_flag & VFS_RDONLY);
25930Sstevel@tonic-gate }
25940Sstevel@tonic-gate
25950Sstevel@tonic-gate int
vn_has_flocks(vnode_t * vp)25960Sstevel@tonic-gate vn_has_flocks(vnode_t *vp)
25970Sstevel@tonic-gate {
25980Sstevel@tonic-gate return (vp->v_filocks != NULL);
25990Sstevel@tonic-gate }
26000Sstevel@tonic-gate
26010Sstevel@tonic-gate int
vn_has_mandatory_locks(vnode_t * vp,int mode)26020Sstevel@tonic-gate vn_has_mandatory_locks(vnode_t *vp, int mode)
26030Sstevel@tonic-gate {
26040Sstevel@tonic-gate return ((vp->v_filocks != NULL) && (MANDLOCK(vp, mode)));
26050Sstevel@tonic-gate }
26060Sstevel@tonic-gate
26070Sstevel@tonic-gate int
vn_has_cached_data(vnode_t * vp)26080Sstevel@tonic-gate vn_has_cached_data(vnode_t *vp)
26090Sstevel@tonic-gate {
26100Sstevel@tonic-gate return (vp->v_pages != NULL);
26110Sstevel@tonic-gate }
26120Sstevel@tonic-gate
26130Sstevel@tonic-gate /*
26140Sstevel@tonic-gate * Return 0 if the vnode in question shouldn't be permitted into a zone via
26150Sstevel@tonic-gate * zone_enter(2).
26160Sstevel@tonic-gate */
26170Sstevel@tonic-gate int
vn_can_change_zones(vnode_t * vp)26180Sstevel@tonic-gate vn_can_change_zones(vnode_t *vp)
26190Sstevel@tonic-gate {
26200Sstevel@tonic-gate struct vfssw *vswp;
26210Sstevel@tonic-gate int allow = 1;
26220Sstevel@tonic-gate vnode_t *rvp;
26230Sstevel@tonic-gate
2624766Scarlsonj if (nfs_global_client_only != 0)
2625766Scarlsonj return (1);
2626766Scarlsonj
26270Sstevel@tonic-gate /*
26280Sstevel@tonic-gate * We always want to look at the underlying vnode if there is one.
26290Sstevel@tonic-gate */
26305331Samw if (VOP_REALVP(vp, &rvp, NULL) != 0)
26310Sstevel@tonic-gate rvp = vp;
26320Sstevel@tonic-gate /*
26330Sstevel@tonic-gate * Some pseudo filesystems (including doorfs) don't actually register
26340Sstevel@tonic-gate * their vfsops_t, so the following may return NULL; we happily let
26350Sstevel@tonic-gate * such vnodes switch zones.
26360Sstevel@tonic-gate */
26370Sstevel@tonic-gate vswp = vfs_getvfsswbyvfsops(vfs_getops(rvp->v_vfsp));
26380Sstevel@tonic-gate if (vswp != NULL) {
26390Sstevel@tonic-gate if (vswp->vsw_flag & VSW_NOTZONESAFE)
26400Sstevel@tonic-gate allow = 0;
26410Sstevel@tonic-gate vfs_unrefvfssw(vswp);
26420Sstevel@tonic-gate }
26430Sstevel@tonic-gate return (allow);
26440Sstevel@tonic-gate }
26450Sstevel@tonic-gate
26460Sstevel@tonic-gate /*
26470Sstevel@tonic-gate * Return nonzero if the vnode is a mount point, zero if not.
26480Sstevel@tonic-gate */
26490Sstevel@tonic-gate int
vn_ismntpt(vnode_t * vp)26500Sstevel@tonic-gate vn_ismntpt(vnode_t *vp)
26510Sstevel@tonic-gate {
26520Sstevel@tonic-gate return (vp->v_vfsmountedhere != NULL);
26530Sstevel@tonic-gate }
26540Sstevel@tonic-gate
26550Sstevel@tonic-gate /* Retrieve the vfs (if any) mounted on this vnode */
26560Sstevel@tonic-gate vfs_t *
vn_mountedvfs(vnode_t * vp)26570Sstevel@tonic-gate vn_mountedvfs(vnode_t *vp)
26580Sstevel@tonic-gate {
26590Sstevel@tonic-gate return (vp->v_vfsmountedhere);
26600Sstevel@tonic-gate }
26610Sstevel@tonic-gate
26620Sstevel@tonic-gate /*
26636712Stomee * Return nonzero if the vnode is referenced by the dnlc, zero if not.
26646712Stomee */
26656712Stomee int
vn_in_dnlc(vnode_t * vp)26666712Stomee vn_in_dnlc(vnode_t *vp)
26676712Stomee {
26686712Stomee return (vp->v_count_dnlc > 0);
26696712Stomee }
26706712Stomee
26716712Stomee /*
26725331Samw * vn_has_other_opens() checks whether a particular file is opened by more than
26735331Samw * just the caller and whether the open is for read and/or write.
26745331Samw * This routine is for calling after the caller has already called VOP_OPEN()
26755331Samw * and the caller wishes to know if they are the only one with it open for
26765331Samw * the mode(s) specified.
26775331Samw *
26785331Samw * Vnode counts are only kept on regular files (v_type=VREG).
26795331Samw */
26805331Samw int
vn_has_other_opens(vnode_t * vp,v_mode_t mode)26815331Samw vn_has_other_opens(
26825331Samw vnode_t *vp,
26835331Samw v_mode_t mode)
26845331Samw {
26855331Samw
26865331Samw ASSERT(vp != NULL);
26875331Samw
26885331Samw switch (mode) {
26895331Samw case V_WRITE:
26905331Samw if (vp->v_wrcnt > 1)
26915331Samw return (V_TRUE);
26925331Samw break;
26935331Samw case V_RDORWR:
26945331Samw if ((vp->v_rdcnt > 1) || (vp->v_wrcnt > 1))
26955331Samw return (V_TRUE);
26965331Samw break;
26975331Samw case V_RDANDWR:
26985331Samw if ((vp->v_rdcnt > 1) && (vp->v_wrcnt > 1))
26995331Samw return (V_TRUE);
27005331Samw break;
27015331Samw case V_READ:
27025331Samw if (vp->v_rdcnt > 1)
27035331Samw return (V_TRUE);
27045331Samw break;
27055331Samw }
27065331Samw
27075331Samw return (V_FALSE);
27085331Samw }
27095331Samw
27105331Samw /*
27110Sstevel@tonic-gate * vn_is_opened() checks whether a particular file is opened and
27120Sstevel@tonic-gate * whether the open is for read and/or write.
27130Sstevel@tonic-gate *
27140Sstevel@tonic-gate * Vnode counts are only kept on regular files (v_type=VREG).
27150Sstevel@tonic-gate */
27160Sstevel@tonic-gate int
vn_is_opened(vnode_t * vp,v_mode_t mode)27170Sstevel@tonic-gate vn_is_opened(
27180Sstevel@tonic-gate vnode_t *vp,
27190Sstevel@tonic-gate v_mode_t mode)
27200Sstevel@tonic-gate {
27210Sstevel@tonic-gate
27220Sstevel@tonic-gate ASSERT(vp != NULL);
27230Sstevel@tonic-gate
27240Sstevel@tonic-gate switch (mode) {
27250Sstevel@tonic-gate case V_WRITE:
27260Sstevel@tonic-gate if (vp->v_wrcnt)
27270Sstevel@tonic-gate return (V_TRUE);
27280Sstevel@tonic-gate break;
27290Sstevel@tonic-gate case V_RDANDWR:
27300Sstevel@tonic-gate if (vp->v_rdcnt && vp->v_wrcnt)
27310Sstevel@tonic-gate return (V_TRUE);
27320Sstevel@tonic-gate break;
27330Sstevel@tonic-gate case V_RDORWR:
27340Sstevel@tonic-gate if (vp->v_rdcnt || vp->v_wrcnt)
27350Sstevel@tonic-gate return (V_TRUE);
27360Sstevel@tonic-gate break;
27370Sstevel@tonic-gate case V_READ:
27380Sstevel@tonic-gate if (vp->v_rdcnt)
27390Sstevel@tonic-gate return (V_TRUE);
27400Sstevel@tonic-gate break;
27410Sstevel@tonic-gate }
27420Sstevel@tonic-gate
27430Sstevel@tonic-gate return (V_FALSE);
27440Sstevel@tonic-gate }
27450Sstevel@tonic-gate
27460Sstevel@tonic-gate /*
27470Sstevel@tonic-gate * vn_is_mapped() checks whether a particular file is mapped and whether
27480Sstevel@tonic-gate * the file is mapped read and/or write.
27490Sstevel@tonic-gate */
27500Sstevel@tonic-gate int
vn_is_mapped(vnode_t * vp,v_mode_t mode)27510Sstevel@tonic-gate vn_is_mapped(
27520Sstevel@tonic-gate vnode_t *vp,
27530Sstevel@tonic-gate v_mode_t mode)
27540Sstevel@tonic-gate {
27550Sstevel@tonic-gate
27560Sstevel@tonic-gate ASSERT(vp != NULL);
27570Sstevel@tonic-gate
27580Sstevel@tonic-gate #if !defined(_LP64)
27590Sstevel@tonic-gate switch (mode) {
27600Sstevel@tonic-gate /*
27610Sstevel@tonic-gate * The atomic_add_64_nv functions force atomicity in the
27620Sstevel@tonic-gate * case of 32 bit architectures. Otherwise the 64 bit values
27630Sstevel@tonic-gate * require two fetches. The value of the fields may be
27640Sstevel@tonic-gate * (potentially) changed between the first fetch and the
27650Sstevel@tonic-gate * second
27660Sstevel@tonic-gate */
27670Sstevel@tonic-gate case V_WRITE:
27680Sstevel@tonic-gate if (atomic_add_64_nv((&(vp->v_mmap_write)), 0))
27690Sstevel@tonic-gate return (V_TRUE);
27700Sstevel@tonic-gate break;
27710Sstevel@tonic-gate case V_RDANDWR:
27720Sstevel@tonic-gate if ((atomic_add_64_nv((&(vp->v_mmap_read)), 0)) &&
27730Sstevel@tonic-gate (atomic_add_64_nv((&(vp->v_mmap_write)), 0)))
27740Sstevel@tonic-gate return (V_TRUE);
27750Sstevel@tonic-gate break;
27760Sstevel@tonic-gate case V_RDORWR:
27770Sstevel@tonic-gate if ((atomic_add_64_nv((&(vp->v_mmap_read)), 0)) ||
27780Sstevel@tonic-gate (atomic_add_64_nv((&(vp->v_mmap_write)), 0)))
27790Sstevel@tonic-gate return (V_TRUE);
27800Sstevel@tonic-gate break;
27810Sstevel@tonic-gate case V_READ:
27820Sstevel@tonic-gate if (atomic_add_64_nv((&(vp->v_mmap_read)), 0))
27830Sstevel@tonic-gate return (V_TRUE);
27840Sstevel@tonic-gate break;
27850Sstevel@tonic-gate }
27860Sstevel@tonic-gate #else
27870Sstevel@tonic-gate switch (mode) {
27880Sstevel@tonic-gate case V_WRITE:
27890Sstevel@tonic-gate if (vp->v_mmap_write)
27900Sstevel@tonic-gate return (V_TRUE);
27910Sstevel@tonic-gate break;
27920Sstevel@tonic-gate case V_RDANDWR:
27930Sstevel@tonic-gate if (vp->v_mmap_read && vp->v_mmap_write)
27940Sstevel@tonic-gate return (V_TRUE);
27950Sstevel@tonic-gate break;
27960Sstevel@tonic-gate case V_RDORWR:
27970Sstevel@tonic-gate if (vp->v_mmap_read || vp->v_mmap_write)
27980Sstevel@tonic-gate return (V_TRUE);
27990Sstevel@tonic-gate break;
28000Sstevel@tonic-gate case V_READ:
28010Sstevel@tonic-gate if (vp->v_mmap_read)
28020Sstevel@tonic-gate return (V_TRUE);
28030Sstevel@tonic-gate break;
28040Sstevel@tonic-gate }
28050Sstevel@tonic-gate #endif
28060Sstevel@tonic-gate
28070Sstevel@tonic-gate return (V_FALSE);
28080Sstevel@tonic-gate }
28090Sstevel@tonic-gate
28100Sstevel@tonic-gate /*
28110Sstevel@tonic-gate * Set the operations vector for a vnode.
28120Sstevel@tonic-gate *
28130Sstevel@tonic-gate * FEM ensures that the v_femhead pointer is filled in before the
28140Sstevel@tonic-gate * v_op pointer is changed. This means that if the v_femhead pointer
28150Sstevel@tonic-gate * is NULL, and the v_op field hasn't changed since before which checked
28160Sstevel@tonic-gate * the v_femhead pointer; then our update is ok - we are not racing with
28170Sstevel@tonic-gate * FEM.
28180Sstevel@tonic-gate */
28190Sstevel@tonic-gate void
vn_setops(vnode_t * vp,vnodeops_t * vnodeops)28200Sstevel@tonic-gate vn_setops(vnode_t *vp, vnodeops_t *vnodeops)
28210Sstevel@tonic-gate {
28220Sstevel@tonic-gate vnodeops_t *op;
28230Sstevel@tonic-gate
28240Sstevel@tonic-gate ASSERT(vp != NULL);
28250Sstevel@tonic-gate ASSERT(vnodeops != NULL);
28260Sstevel@tonic-gate
28270Sstevel@tonic-gate op = vp->v_op;
28280Sstevel@tonic-gate membar_consumer();
28290Sstevel@tonic-gate /*
28300Sstevel@tonic-gate * If vp->v_femhead == NULL, then we'll call casptr() to do the
28310Sstevel@tonic-gate * compare-and-swap on vp->v_op. If either fails, then FEM is
28320Sstevel@tonic-gate * in effect on the vnode and we need to have FEM deal with it.
28330Sstevel@tonic-gate */
28340Sstevel@tonic-gate if (vp->v_femhead != NULL || casptr(&vp->v_op, op, vnodeops) != op) {
28350Sstevel@tonic-gate fem_setvnops(vp, vnodeops);
28360Sstevel@tonic-gate }
28370Sstevel@tonic-gate }
28380Sstevel@tonic-gate
28390Sstevel@tonic-gate /*
28400Sstevel@tonic-gate * Retrieve the operations vector for a vnode
28410Sstevel@tonic-gate * As with vn_setops(above); make sure we aren't racing with FEM.
28420Sstevel@tonic-gate * FEM sets the v_op to a special, internal, vnodeops that wouldn't
28430Sstevel@tonic-gate * make sense to the callers of this routine.
28440Sstevel@tonic-gate */
28450Sstevel@tonic-gate vnodeops_t *
vn_getops(vnode_t * vp)28460Sstevel@tonic-gate vn_getops(vnode_t *vp)
28470Sstevel@tonic-gate {
28480Sstevel@tonic-gate vnodeops_t *op;
28490Sstevel@tonic-gate
28500Sstevel@tonic-gate ASSERT(vp != NULL);
28510Sstevel@tonic-gate
28520Sstevel@tonic-gate op = vp->v_op;
28530Sstevel@tonic-gate membar_consumer();
28540Sstevel@tonic-gate if (vp->v_femhead == NULL && op == vp->v_op) {
28550Sstevel@tonic-gate return (op);
28560Sstevel@tonic-gate } else {
28570Sstevel@tonic-gate return (fem_getvnops(vp));
28580Sstevel@tonic-gate }
28590Sstevel@tonic-gate }
28600Sstevel@tonic-gate
28610Sstevel@tonic-gate /*
28620Sstevel@tonic-gate * Returns non-zero (1) if the vnodeops matches that of the vnode.
28630Sstevel@tonic-gate * Returns zero (0) if not.
28640Sstevel@tonic-gate */
28650Sstevel@tonic-gate int
vn_matchops(vnode_t * vp,vnodeops_t * vnodeops)28660Sstevel@tonic-gate vn_matchops(vnode_t *vp, vnodeops_t *vnodeops)
28670Sstevel@tonic-gate {
28680Sstevel@tonic-gate return (vn_getops(vp) == vnodeops);
28690Sstevel@tonic-gate }
28700Sstevel@tonic-gate
28710Sstevel@tonic-gate /*
28720Sstevel@tonic-gate * Returns non-zero (1) if the specified operation matches the
28730Sstevel@tonic-gate * corresponding operation for that the vnode.
28740Sstevel@tonic-gate * Returns zero (0) if not.
28750Sstevel@tonic-gate */
28760Sstevel@tonic-gate
28770Sstevel@tonic-gate #define MATCHNAME(n1, n2) (((n1)[0] == (n2)[0]) && (strcmp((n1), (n2)) == 0))
28780Sstevel@tonic-gate
28790Sstevel@tonic-gate int
vn_matchopval(vnode_t * vp,char * vopname,fs_generic_func_p funcp)28800Sstevel@tonic-gate vn_matchopval(vnode_t *vp, char *vopname, fs_generic_func_p funcp)
28810Sstevel@tonic-gate {
28820Sstevel@tonic-gate const fs_operation_trans_def_t *otdp;
28830Sstevel@tonic-gate fs_generic_func_p *loc = NULL;
28840Sstevel@tonic-gate vnodeops_t *vop = vn_getops(vp);
28850Sstevel@tonic-gate
28860Sstevel@tonic-gate ASSERT(vopname != NULL);
28870Sstevel@tonic-gate
28880Sstevel@tonic-gate for (otdp = vn_ops_table; otdp->name != NULL; otdp++) {
28890Sstevel@tonic-gate if (MATCHNAME(otdp->name, vopname)) {
28904956Spf199842 loc = (fs_generic_func_p *)
28914956Spf199842 ((char *)(vop) + otdp->offset);
28920Sstevel@tonic-gate break;
28930Sstevel@tonic-gate }
28940Sstevel@tonic-gate }
28950Sstevel@tonic-gate
28960Sstevel@tonic-gate return ((loc != NULL) && (*loc == funcp));
28970Sstevel@tonic-gate }
28980Sstevel@tonic-gate
28990Sstevel@tonic-gate /*
29000Sstevel@tonic-gate * fs_new_caller_id() needs to return a unique ID on a given local system.
29010Sstevel@tonic-gate * The IDs do not need to survive across reboots. These are primarily
29020Sstevel@tonic-gate * used so that (FEM) monitors can detect particular callers (such as
29030Sstevel@tonic-gate * the NFS server) to a given vnode/vfs operation.
29040Sstevel@tonic-gate */
29050Sstevel@tonic-gate u_longlong_t
fs_new_caller_id()29060Sstevel@tonic-gate fs_new_caller_id()
29070Sstevel@tonic-gate {
29080Sstevel@tonic-gate static uint64_t next_caller_id = 0LL; /* First call returns 1 */
29090Sstevel@tonic-gate
29100Sstevel@tonic-gate return ((u_longlong_t)atomic_add_64_nv(&next_caller_id, 1));
29110Sstevel@tonic-gate }
29120Sstevel@tonic-gate
29130Sstevel@tonic-gate /*
29140Sstevel@tonic-gate * Given a starting vnode and a path, updates the path in the target vnode in
29150Sstevel@tonic-gate * a safe manner. If the vnode already has path information embedded, then the
2916254Seschrock * cached path is left untouched.
29170Sstevel@tonic-gate */
29183855Ssn199410
29193855Ssn199410 size_t max_vnode_path = 4 * MAXPATHLEN;
29203855Ssn199410
29210Sstevel@tonic-gate void
vn_setpath(vnode_t * rootvp,struct vnode * startvp,struct vnode * vp,const char * path,size_t plen)29220Sstevel@tonic-gate vn_setpath(vnode_t *rootvp, struct vnode *startvp, struct vnode *vp,
29230Sstevel@tonic-gate const char *path, size_t plen)
29240Sstevel@tonic-gate {
29250Sstevel@tonic-gate char *rpath;
29260Sstevel@tonic-gate vnode_t *base;
29270Sstevel@tonic-gate size_t rpathlen, rpathalloc;
29280Sstevel@tonic-gate int doslash = 1;
29290Sstevel@tonic-gate
29300Sstevel@tonic-gate if (*path == '/') {
29310Sstevel@tonic-gate base = rootvp;
29320Sstevel@tonic-gate path++;
29330Sstevel@tonic-gate plen--;
29340Sstevel@tonic-gate } else {
29350Sstevel@tonic-gate base = startvp;
29360Sstevel@tonic-gate }
29370Sstevel@tonic-gate
29380Sstevel@tonic-gate /*
29390Sstevel@tonic-gate * We cannot grab base->v_lock while we hold vp->v_lock because of
29400Sstevel@tonic-gate * the potential for deadlock.
29410Sstevel@tonic-gate */
29420Sstevel@tonic-gate mutex_enter(&base->v_lock);
29430Sstevel@tonic-gate if (base->v_path == NULL) {
29440Sstevel@tonic-gate mutex_exit(&base->v_lock);
29450Sstevel@tonic-gate return;
29460Sstevel@tonic-gate }
29470Sstevel@tonic-gate
29480Sstevel@tonic-gate rpathlen = strlen(base->v_path);
29490Sstevel@tonic-gate rpathalloc = rpathlen + plen + 1;
29500Sstevel@tonic-gate /* Avoid adding a slash if there's already one there */
29510Sstevel@tonic-gate if (base->v_path[rpathlen-1] == '/')
29520Sstevel@tonic-gate doslash = 0;
29530Sstevel@tonic-gate else
29540Sstevel@tonic-gate rpathalloc++;
29550Sstevel@tonic-gate
29560Sstevel@tonic-gate /*
29570Sstevel@tonic-gate * We don't want to call kmem_alloc(KM_SLEEP) with kernel locks held,
29580Sstevel@tonic-gate * so we must do this dance. If, by chance, something changes the path,
29590Sstevel@tonic-gate * just give up since there is no real harm.
29600Sstevel@tonic-gate */
29610Sstevel@tonic-gate mutex_exit(&base->v_lock);
29620Sstevel@tonic-gate
29633855Ssn199410 /* Paths should stay within reason */
29643855Ssn199410 if (rpathalloc > max_vnode_path)
29653855Ssn199410 return;
29663855Ssn199410
29670Sstevel@tonic-gate rpath = kmem_alloc(rpathalloc, KM_SLEEP);
29680Sstevel@tonic-gate
29690Sstevel@tonic-gate mutex_enter(&base->v_lock);
29700Sstevel@tonic-gate if (base->v_path == NULL || strlen(base->v_path) != rpathlen) {
29710Sstevel@tonic-gate mutex_exit(&base->v_lock);
29720Sstevel@tonic-gate kmem_free(rpath, rpathalloc);
29730Sstevel@tonic-gate return;
29740Sstevel@tonic-gate }
29750Sstevel@tonic-gate bcopy(base->v_path, rpath, rpathlen);
29760Sstevel@tonic-gate mutex_exit(&base->v_lock);
29770Sstevel@tonic-gate
29780Sstevel@tonic-gate if (doslash)
29790Sstevel@tonic-gate rpath[rpathlen++] = '/';
29800Sstevel@tonic-gate bcopy(path, rpath + rpathlen, plen);
29810Sstevel@tonic-gate rpath[rpathlen + plen] = '\0';
29820Sstevel@tonic-gate
29830Sstevel@tonic-gate mutex_enter(&vp->v_lock);
29840Sstevel@tonic-gate if (vp->v_path != NULL) {
29850Sstevel@tonic-gate mutex_exit(&vp->v_lock);
29860Sstevel@tonic-gate kmem_free(rpath, rpathalloc);
29870Sstevel@tonic-gate } else {
29880Sstevel@tonic-gate vp->v_path = rpath;
29890Sstevel@tonic-gate mutex_exit(&vp->v_lock);
29900Sstevel@tonic-gate }
29910Sstevel@tonic-gate }
29920Sstevel@tonic-gate
29930Sstevel@tonic-gate /*
29940Sstevel@tonic-gate * Sets the path to the vnode to be the given string, regardless of current
29950Sstevel@tonic-gate * context. The string must be a complete path from rootdir. This is only used
29960Sstevel@tonic-gate * by fsop_root() for setting the path based on the mountpoint.
29970Sstevel@tonic-gate */
29980Sstevel@tonic-gate void
vn_setpath_str(struct vnode * vp,const char * str,size_t len)29990Sstevel@tonic-gate vn_setpath_str(struct vnode *vp, const char *str, size_t len)
30000Sstevel@tonic-gate {
30010Sstevel@tonic-gate char *buf = kmem_alloc(len + 1, KM_SLEEP);
30020Sstevel@tonic-gate
30030Sstevel@tonic-gate mutex_enter(&vp->v_lock);
30040Sstevel@tonic-gate if (vp->v_path != NULL) {
30050Sstevel@tonic-gate mutex_exit(&vp->v_lock);
30060Sstevel@tonic-gate kmem_free(buf, len + 1);
30070Sstevel@tonic-gate return;
30080Sstevel@tonic-gate }
30090Sstevel@tonic-gate
30100Sstevel@tonic-gate vp->v_path = buf;
30110Sstevel@tonic-gate bcopy(str, vp->v_path, len);
30120Sstevel@tonic-gate vp->v_path[len] = '\0';
30130Sstevel@tonic-gate
30140Sstevel@tonic-gate mutex_exit(&vp->v_lock);
30150Sstevel@tonic-gate }
30160Sstevel@tonic-gate
30170Sstevel@tonic-gate /*
30186976Seschrock * Called from within filesystem's vop_rename() to handle renames once the
30196976Seschrock * target vnode is available.
30206976Seschrock */
30216976Seschrock void
vn_renamepath(vnode_t * dvp,vnode_t * vp,const char * nm,size_t len)30226976Seschrock vn_renamepath(vnode_t *dvp, vnode_t *vp, const char *nm, size_t len)
30236976Seschrock {
30246976Seschrock char *tmp;
30256976Seschrock
30266976Seschrock mutex_enter(&vp->v_lock);
30276976Seschrock tmp = vp->v_path;
30286976Seschrock vp->v_path = NULL;
30296976Seschrock mutex_exit(&vp->v_lock);
30306976Seschrock vn_setpath(rootdir, dvp, vp, nm, len);
30316976Seschrock if (tmp != NULL)
30326976Seschrock kmem_free(tmp, strlen(tmp) + 1);
30336976Seschrock }
30346976Seschrock
30356976Seschrock /*
30360Sstevel@tonic-gate * Similar to vn_setpath_str(), this function sets the path of the destination
30370Sstevel@tonic-gate * vnode to the be the same as the source vnode.
30380Sstevel@tonic-gate */
30390Sstevel@tonic-gate void
vn_copypath(struct vnode * src,struct vnode * dst)30400Sstevel@tonic-gate vn_copypath(struct vnode *src, struct vnode *dst)
30410Sstevel@tonic-gate {
30420Sstevel@tonic-gate char *buf;
30430Sstevel@tonic-gate int alloc;
30440Sstevel@tonic-gate
30450Sstevel@tonic-gate mutex_enter(&src->v_lock);
30460Sstevel@tonic-gate if (src->v_path == NULL) {
30470Sstevel@tonic-gate mutex_exit(&src->v_lock);
30480Sstevel@tonic-gate return;
30490Sstevel@tonic-gate }
30500Sstevel@tonic-gate alloc = strlen(src->v_path) + 1;
30510Sstevel@tonic-gate
30520Sstevel@tonic-gate /* avoid kmem_alloc() with lock held */
30530Sstevel@tonic-gate mutex_exit(&src->v_lock);
30540Sstevel@tonic-gate buf = kmem_alloc(alloc, KM_SLEEP);
30550Sstevel@tonic-gate mutex_enter(&src->v_lock);
30560Sstevel@tonic-gate if (src->v_path == NULL || strlen(src->v_path) + 1 != alloc) {
30570Sstevel@tonic-gate mutex_exit(&src->v_lock);
30580Sstevel@tonic-gate kmem_free(buf, alloc);
30590Sstevel@tonic-gate return;
30600Sstevel@tonic-gate }
30610Sstevel@tonic-gate bcopy(src->v_path, buf, alloc);
30620Sstevel@tonic-gate mutex_exit(&src->v_lock);
30630Sstevel@tonic-gate
30640Sstevel@tonic-gate mutex_enter(&dst->v_lock);
30650Sstevel@tonic-gate if (dst->v_path != NULL) {
30660Sstevel@tonic-gate mutex_exit(&dst->v_lock);
30670Sstevel@tonic-gate kmem_free(buf, alloc);
30680Sstevel@tonic-gate return;
30690Sstevel@tonic-gate }
30700Sstevel@tonic-gate dst->v_path = buf;
30710Sstevel@tonic-gate mutex_exit(&dst->v_lock);
30720Sstevel@tonic-gate }
30730Sstevel@tonic-gate
30740Sstevel@tonic-gate /*
30750Sstevel@tonic-gate * XXX Private interface for segvn routines that handle vnode
30760Sstevel@tonic-gate * large page segments.
30770Sstevel@tonic-gate *
30780Sstevel@tonic-gate * return 1 if vp's file system VOP_PAGEIO() implementation
30790Sstevel@tonic-gate * can be safely used instead of VOP_GETPAGE() for handling
30800Sstevel@tonic-gate * pagefaults against regular non swap files. VOP_PAGEIO()
30810Sstevel@tonic-gate * interface is considered safe here if its implementation
30820Sstevel@tonic-gate * is very close to VOP_GETPAGE() implementation.
30830Sstevel@tonic-gate * e.g. It zero's out the part of the page beyond EOF. Doesn't
30840Sstevel@tonic-gate * panic if there're file holes but instead returns an error.
30850Sstevel@tonic-gate * Doesn't assume file won't be changed by user writes, etc.
30860Sstevel@tonic-gate *
30870Sstevel@tonic-gate * return 0 otherwise.
30880Sstevel@tonic-gate *
30890Sstevel@tonic-gate * For now allow segvn to only use VOP_PAGEIO() with ufs and nfs.
30900Sstevel@tonic-gate */
30910Sstevel@tonic-gate int
vn_vmpss_usepageio(vnode_t * vp)30920Sstevel@tonic-gate vn_vmpss_usepageio(vnode_t *vp)
30930Sstevel@tonic-gate {
30940Sstevel@tonic-gate vfs_t *vfsp = vp->v_vfsp;
30950Sstevel@tonic-gate char *fsname = vfssw[vfsp->vfs_fstype].vsw_name;
30960Sstevel@tonic-gate char *pageio_ok_fss[] = {"ufs", "nfs", NULL};
30970Sstevel@tonic-gate char **fsok = pageio_ok_fss;
30980Sstevel@tonic-gate
30990Sstevel@tonic-gate if (fsname == NULL) {
31000Sstevel@tonic-gate return (0);
31010Sstevel@tonic-gate }
31020Sstevel@tonic-gate
31030Sstevel@tonic-gate for (; *fsok; fsok++) {
31040Sstevel@tonic-gate if (strcmp(*fsok, fsname) == 0) {
31050Sstevel@tonic-gate return (1);
31060Sstevel@tonic-gate }
31070Sstevel@tonic-gate }
31080Sstevel@tonic-gate return (0);
31090Sstevel@tonic-gate }
31100Sstevel@tonic-gate
31110Sstevel@tonic-gate /* VOP_XXX() macros call the corresponding fop_xxx() function */
31120Sstevel@tonic-gate
31130Sstevel@tonic-gate int
fop_open(vnode_t ** vpp,int mode,cred_t * cr,caller_context_t * ct)31140Sstevel@tonic-gate fop_open(
31150Sstevel@tonic-gate vnode_t **vpp,
31160Sstevel@tonic-gate int mode,
31175331Samw cred_t *cr,
31185331Samw caller_context_t *ct)
31190Sstevel@tonic-gate {
31200Sstevel@tonic-gate int ret;
31210Sstevel@tonic-gate vnode_t *vp = *vpp;
31220Sstevel@tonic-gate
31230Sstevel@tonic-gate VN_HOLD(vp);
31240Sstevel@tonic-gate /*
31250Sstevel@tonic-gate * Adding to the vnode counts before calling open
31260Sstevel@tonic-gate * avoids the need for a mutex. It circumvents a race
31270Sstevel@tonic-gate * condition where a query made on the vnode counts results in a
31280Sstevel@tonic-gate * false negative. The inquirer goes away believing the file is
31290Sstevel@tonic-gate * not open when there is an open on the file already under way.
31300Sstevel@tonic-gate *
31310Sstevel@tonic-gate * The counts are meant to prevent NFS from granting a delegation
31320Sstevel@tonic-gate * when it would be dangerous to do so.
31330Sstevel@tonic-gate *
31340Sstevel@tonic-gate * The vnode counts are only kept on regular files
31350Sstevel@tonic-gate */
31360Sstevel@tonic-gate if ((*vpp)->v_type == VREG) {
31370Sstevel@tonic-gate if (mode & FREAD)
31380Sstevel@tonic-gate atomic_add_32(&((*vpp)->v_rdcnt), 1);
31390Sstevel@tonic-gate if (mode & FWRITE)
31400Sstevel@tonic-gate atomic_add_32(&((*vpp)->v_wrcnt), 1);
31410Sstevel@tonic-gate }
31420Sstevel@tonic-gate
31434321Scasper VOPXID_MAP_CR(vp, cr);
31444321Scasper
31455331Samw ret = (*(*(vpp))->v_op->vop_open)(vpp, mode, cr, ct);
31460Sstevel@tonic-gate
31470Sstevel@tonic-gate if (ret) {
31480Sstevel@tonic-gate /*
31490Sstevel@tonic-gate * Use the saved vp just in case the vnode ptr got trashed
31500Sstevel@tonic-gate * by the error.
31510Sstevel@tonic-gate */
31521738Sbmc VOPSTATS_UPDATE(vp, open);
31530Sstevel@tonic-gate if ((vp->v_type == VREG) && (mode & FREAD))
31540Sstevel@tonic-gate atomic_add_32(&(vp->v_rdcnt), -1);
31550Sstevel@tonic-gate if ((vp->v_type == VREG) && (mode & FWRITE))
31560Sstevel@tonic-gate atomic_add_32(&(vp->v_wrcnt), -1);
31570Sstevel@tonic-gate } else {
31580Sstevel@tonic-gate /*
31590Sstevel@tonic-gate * Some filesystems will return a different vnode,
31600Sstevel@tonic-gate * but the same path was still used to open it.
31610Sstevel@tonic-gate * So if we do change the vnode and need to
31620Sstevel@tonic-gate * copy over the path, do so here, rather than special
31630Sstevel@tonic-gate * casing each filesystem. Adjust the vnode counts to
31640Sstevel@tonic-gate * reflect the vnode switch.
31650Sstevel@tonic-gate */
31661738Sbmc VOPSTATS_UPDATE(*vpp, open);
31670Sstevel@tonic-gate if (*vpp != vp && *vpp != NULL) {
3168254Seschrock vn_copypath(vp, *vpp);
3169254Seschrock if (((*vpp)->v_type == VREG) && (mode & FREAD))
3170254Seschrock atomic_add_32(&((*vpp)->v_rdcnt), 1);
3171254Seschrock if ((vp->v_type == VREG) && (mode & FREAD))
3172254Seschrock atomic_add_32(&(vp->v_rdcnt), -1);
3173254Seschrock if (((*vpp)->v_type == VREG) && (mode & FWRITE))
3174254Seschrock atomic_add_32(&((*vpp)->v_wrcnt), 1);
3175254Seschrock if ((vp->v_type == VREG) && (mode & FWRITE))
3176254Seschrock atomic_add_32(&(vp->v_wrcnt), -1);
31770Sstevel@tonic-gate }
31780Sstevel@tonic-gate }
31790Sstevel@tonic-gate VN_RELE(vp);
31800Sstevel@tonic-gate return (ret);
31810Sstevel@tonic-gate }
31820Sstevel@tonic-gate
31830Sstevel@tonic-gate int
fop_close(vnode_t * vp,int flag,int count,offset_t offset,cred_t * cr,caller_context_t * ct)31840Sstevel@tonic-gate fop_close(
31850Sstevel@tonic-gate vnode_t *vp,
31860Sstevel@tonic-gate int flag,
31870Sstevel@tonic-gate int count,
31880Sstevel@tonic-gate offset_t offset,
31895331Samw cred_t *cr,
31905331Samw caller_context_t *ct)
31910Sstevel@tonic-gate {
31921488Srsb int err;
31931488Srsb
31944321Scasper VOPXID_MAP_CR(vp, cr);
31954321Scasper
31965331Samw err = (*(vp)->v_op->vop_close)(vp, flag, count, offset, cr, ct);
31971738Sbmc VOPSTATS_UPDATE(vp, close);
31980Sstevel@tonic-gate /*
31990Sstevel@tonic-gate * Check passed in count to handle possible dups. Vnode counts are only
32000Sstevel@tonic-gate * kept on regular files
32010Sstevel@tonic-gate */
32020Sstevel@tonic-gate if ((vp->v_type == VREG) && (count == 1)) {
32030Sstevel@tonic-gate if (flag & FREAD) {
32040Sstevel@tonic-gate ASSERT(vp->v_rdcnt > 0);
32050Sstevel@tonic-gate atomic_add_32(&(vp->v_rdcnt), -1);
32060Sstevel@tonic-gate }
32070Sstevel@tonic-gate if (flag & FWRITE) {
32080Sstevel@tonic-gate ASSERT(vp->v_wrcnt > 0);
32090Sstevel@tonic-gate atomic_add_32(&(vp->v_wrcnt), -1);
32100Sstevel@tonic-gate }
32110Sstevel@tonic-gate }
32121488Srsb return (err);
32130Sstevel@tonic-gate }
32140Sstevel@tonic-gate
32150Sstevel@tonic-gate int
fop_read(vnode_t * vp,uio_t * uiop,int ioflag,cred_t * cr,caller_context_t * ct)32160Sstevel@tonic-gate fop_read(
32170Sstevel@tonic-gate vnode_t *vp,
32180Sstevel@tonic-gate uio_t *uiop,
32190Sstevel@tonic-gate int ioflag,
32200Sstevel@tonic-gate cred_t *cr,
32215331Samw caller_context_t *ct)
32220Sstevel@tonic-gate {
32231488Srsb int err;
32241488Srsb ssize_t resid_start = uiop->uio_resid;
32251488Srsb
32264321Scasper VOPXID_MAP_CR(vp, cr);
32274321Scasper
32281488Srsb err = (*(vp)->v_op->vop_read)(vp, uiop, ioflag, cr, ct);
32291738Sbmc VOPSTATS_UPDATE_IO(vp, read,
32301488Srsb read_bytes, (resid_start - uiop->uio_resid));
32311488Srsb return (err);
32320Sstevel@tonic-gate }
32330Sstevel@tonic-gate
32340Sstevel@tonic-gate int
fop_write(vnode_t * vp,uio_t * uiop,int ioflag,cred_t * cr,caller_context_t * ct)32350Sstevel@tonic-gate fop_write(
32360Sstevel@tonic-gate vnode_t *vp,
32370Sstevel@tonic-gate uio_t *uiop,
32380Sstevel@tonic-gate int ioflag,
32390Sstevel@tonic-gate cred_t *cr,
32405331Samw caller_context_t *ct)
32410Sstevel@tonic-gate {
32421488Srsb int err;
32431488Srsb ssize_t resid_start = uiop->uio_resid;
32441488Srsb
32454321Scasper VOPXID_MAP_CR(vp, cr);
32464321Scasper
32471488Srsb err = (*(vp)->v_op->vop_write)(vp, uiop, ioflag, cr, ct);
32481738Sbmc VOPSTATS_UPDATE_IO(vp, write,
32491488Srsb write_bytes, (resid_start - uiop->uio_resid));
32501488Srsb return (err);
32510Sstevel@tonic-gate }
32520Sstevel@tonic-gate
32530Sstevel@tonic-gate int
fop_ioctl(vnode_t * vp,int cmd,intptr_t arg,int flag,cred_t * cr,int * rvalp,caller_context_t * ct)32540Sstevel@tonic-gate fop_ioctl(
32550Sstevel@tonic-gate vnode_t *vp,
32560Sstevel@tonic-gate int cmd,
32570Sstevel@tonic-gate intptr_t arg,
32580Sstevel@tonic-gate int flag,
32590Sstevel@tonic-gate cred_t *cr,
32605331Samw int *rvalp,
32615331Samw caller_context_t *ct)
32620Sstevel@tonic-gate {
32631488Srsb int err;
32641488Srsb
32654321Scasper VOPXID_MAP_CR(vp, cr);
32664321Scasper
32675331Samw err = (*(vp)->v_op->vop_ioctl)(vp, cmd, arg, flag, cr, rvalp, ct);
32681738Sbmc VOPSTATS_UPDATE(vp, ioctl);
32691488Srsb return (err);
32700Sstevel@tonic-gate }
32710Sstevel@tonic-gate
32720Sstevel@tonic-gate int
fop_setfl(vnode_t * vp,int oflags,int nflags,cred_t * cr,caller_context_t * ct)32730Sstevel@tonic-gate fop_setfl(
32740Sstevel@tonic-gate vnode_t *vp,
32750Sstevel@tonic-gate int oflags,
32760Sstevel@tonic-gate int nflags,
32775331Samw cred_t *cr,
32785331Samw caller_context_t *ct)
32790Sstevel@tonic-gate {
32801488Srsb int err;
32811488Srsb
32824321Scasper VOPXID_MAP_CR(vp, cr);
32834321Scasper
32845331Samw err = (*(vp)->v_op->vop_setfl)(vp, oflags, nflags, cr, ct);
32851738Sbmc VOPSTATS_UPDATE(vp, setfl);
32861488Srsb return (err);
32870Sstevel@tonic-gate }
32880Sstevel@tonic-gate
32890Sstevel@tonic-gate int
fop_getattr(vnode_t * vp,vattr_t * vap,int flags,cred_t * cr,caller_context_t * ct)32900Sstevel@tonic-gate fop_getattr(
32910Sstevel@tonic-gate vnode_t *vp,
32920Sstevel@tonic-gate vattr_t *vap,
32930Sstevel@tonic-gate int flags,
32945331Samw cred_t *cr,
32955331Samw caller_context_t *ct)
32960Sstevel@tonic-gate {
32971488Srsb int err;
32981488Srsb
32994321Scasper VOPXID_MAP_CR(vp, cr);
33004321Scasper
33015331Samw /*
33025331Samw * If this file system doesn't understand the xvattr extensions
33035331Samw * then turn off the xvattr bit.
33045331Samw */
33055331Samw if (vfs_has_feature(vp->v_vfsp, VFSFT_XVATTR) == 0) {
33065331Samw vap->va_mask &= ~AT_XVATTR;
33075331Samw }
33085331Samw
33095331Samw /*
33105331Samw * We're only allowed to skip the ACL check iff we used a 32 bit
33115331Samw * ACE mask with VOP_ACCESS() to determine permissions.
33125331Samw */
33135331Samw if ((flags & ATTR_NOACLCHECK) &&
33145331Samw vfs_has_feature(vp->v_vfsp, VFSFT_ACEMASKONACCESS) == 0) {
33155331Samw return (EINVAL);
33165331Samw }
33175331Samw err = (*(vp)->v_op->vop_getattr)(vp, vap, flags, cr, ct);
33181738Sbmc VOPSTATS_UPDATE(vp, getattr);
33191488Srsb return (err);
33200Sstevel@tonic-gate }
33210Sstevel@tonic-gate
33220Sstevel@tonic-gate int
fop_setattr(vnode_t * vp,vattr_t * vap,int flags,cred_t * cr,caller_context_t * ct)33230Sstevel@tonic-gate fop_setattr(
33240Sstevel@tonic-gate vnode_t *vp,
33250Sstevel@tonic-gate vattr_t *vap,
33260Sstevel@tonic-gate int flags,
33270Sstevel@tonic-gate cred_t *cr,
33280Sstevel@tonic-gate caller_context_t *ct)
33290Sstevel@tonic-gate {
33301488Srsb int err;
33311488Srsb
33324321Scasper VOPXID_MAP_CR(vp, cr);
33334321Scasper
33345331Samw /*
33355331Samw * If this file system doesn't understand the xvattr extensions
33365331Samw * then turn off the xvattr bit.
33375331Samw */
33385331Samw if (vfs_has_feature(vp->v_vfsp, VFSFT_XVATTR) == 0) {
33395331Samw vap->va_mask &= ~AT_XVATTR;
33405331Samw }
33415331Samw
33425331Samw /*
33435331Samw * We're only allowed to skip the ACL check iff we used a 32 bit
33445331Samw * ACE mask with VOP_ACCESS() to determine permissions.
33455331Samw */
33465331Samw if ((flags & ATTR_NOACLCHECK) &&
33475331Samw vfs_has_feature(vp->v_vfsp, VFSFT_ACEMASKONACCESS) == 0) {
33485331Samw return (EINVAL);
33495331Samw }
33501488Srsb err = (*(vp)->v_op->vop_setattr)(vp, vap, flags, cr, ct);
33511738Sbmc VOPSTATS_UPDATE(vp, setattr);
33521488Srsb return (err);
33530Sstevel@tonic-gate }
33540Sstevel@tonic-gate
33550Sstevel@tonic-gate int
fop_access(vnode_t * vp,int mode,int flags,cred_t * cr,caller_context_t * ct)33560Sstevel@tonic-gate fop_access(
33570Sstevel@tonic-gate vnode_t *vp,
33580Sstevel@tonic-gate int mode,
33590Sstevel@tonic-gate int flags,
33605331Samw cred_t *cr,
33615331Samw caller_context_t *ct)
33620Sstevel@tonic-gate {
33631488Srsb int err;
33641488Srsb
33655331Samw if ((flags & V_ACE_MASK) &&
33665331Samw vfs_has_feature(vp->v_vfsp, VFSFT_ACEMASKONACCESS) == 0) {
33675331Samw return (EINVAL);
33685331Samw }
33695331Samw
33704321Scasper VOPXID_MAP_CR(vp, cr);
33714321Scasper
33725331Samw err = (*(vp)->v_op->vop_access)(vp, mode, flags, cr, ct);
33731738Sbmc VOPSTATS_UPDATE(vp, access);
33741488Srsb return (err);
33750Sstevel@tonic-gate }
33760Sstevel@tonic-gate
33770Sstevel@tonic-gate int
fop_lookup(vnode_t * dvp,char * nm,vnode_t ** vpp,pathname_t * pnp,int flags,vnode_t * rdir,cred_t * cr,caller_context_t * ct,int * deflags,pathname_t * ppnp)33780Sstevel@tonic-gate fop_lookup(
33790Sstevel@tonic-gate vnode_t *dvp,
33800Sstevel@tonic-gate char *nm,
33810Sstevel@tonic-gate vnode_t **vpp,
33820Sstevel@tonic-gate pathname_t *pnp,
33830Sstevel@tonic-gate int flags,
33840Sstevel@tonic-gate vnode_t *rdir,
33855331Samw cred_t *cr,
33865331Samw caller_context_t *ct,
33875331Samw int *deflags, /* Returned per-dirent flags */
33885331Samw pathname_t *ppnp) /* Returned case-preserved name in directory */
33890Sstevel@tonic-gate {
3390254Seschrock int ret;
3391254Seschrock
33925331Samw /*
33935331Samw * If this file system doesn't support case-insensitive access
33945331Samw * and said access is requested, fail quickly. It is required
33955331Samw * that if the vfs supports case-insensitive lookup, it also
33965331Samw * supports extended dirent flags.
33975331Samw */
33985331Samw if (flags & FIGNORECASE &&
33995331Samw (vfs_has_feature(dvp->v_vfsp, VFSFT_CASEINSENSITIVE) == 0 &&
34005331Samw vfs_has_feature(dvp->v_vfsp, VFSFT_NOCASESENSITIVE) == 0))
34015331Samw return (EINVAL);
34025331Samw
34034321Scasper VOPXID_MAP_CR(dvp, cr);
34044321Scasper
34055331Samw if ((flags & LOOKUP_XATTR) && (flags & LOOKUP_HAVE_SYSATTR_DIR) == 0) {
34065331Samw ret = xattr_dir_lookup(dvp, vpp, flags, cr);
34075331Samw } else {
34085331Samw ret = (*(dvp)->v_op->vop_lookup)
34095331Samw (dvp, nm, vpp, pnp, flags, rdir, cr, ct, deflags, ppnp);
34105331Samw }
34111488Srsb if (ret == 0 && *vpp) {
34121738Sbmc VOPSTATS_UPDATE(*vpp, lookup);
34131488Srsb if ((*vpp)->v_path == NULL) {
34141488Srsb vn_setpath(rootdir, dvp, *vpp, nm, strlen(nm));
34151488Srsb }
34161488Srsb }
3417254Seschrock
3418254Seschrock return (ret);
34190Sstevel@tonic-gate }
34200Sstevel@tonic-gate
34210Sstevel@tonic-gate int
fop_create(vnode_t * dvp,char * name,vattr_t * vap,vcexcl_t excl,int mode,vnode_t ** vpp,cred_t * cr,int flags,caller_context_t * ct,vsecattr_t * vsecp)34220Sstevel@tonic-gate fop_create(
34230Sstevel@tonic-gate vnode_t *dvp,
34240Sstevel@tonic-gate char *name,
34250Sstevel@tonic-gate vattr_t *vap,
34260Sstevel@tonic-gate vcexcl_t excl,
34270Sstevel@tonic-gate int mode,
34280Sstevel@tonic-gate vnode_t **vpp,
34290Sstevel@tonic-gate cred_t *cr,
34305331Samw int flags,
34315331Samw caller_context_t *ct,
34325331Samw vsecattr_t *vsecp) /* ACL to set during create */
34330Sstevel@tonic-gate {
34340Sstevel@tonic-gate int ret;
34350Sstevel@tonic-gate
34365331Samw if (vsecp != NULL &&
34375331Samw vfs_has_feature(dvp->v_vfsp, VFSFT_ACLONCREATE) == 0) {
34385331Samw return (EINVAL);
34395331Samw }
34405331Samw /*
34415331Samw * If this file system doesn't support case-insensitive access
34425331Samw * and said access is requested, fail quickly.
34435331Samw */
34445331Samw if (flags & FIGNORECASE &&
34455331Samw (vfs_has_feature(dvp->v_vfsp, VFSFT_CASEINSENSITIVE) == 0 &&
34465331Samw vfs_has_feature(dvp->v_vfsp, VFSFT_NOCASESENSITIVE) == 0))
34475331Samw return (EINVAL);
34485331Samw
34494321Scasper VOPXID_MAP_CR(dvp, cr);
34504321Scasper
34510Sstevel@tonic-gate ret = (*(dvp)->v_op->vop_create)
34525331Samw (dvp, name, vap, excl, mode, vpp, cr, flags, ct, vsecp);
34531488Srsb if (ret == 0 && *vpp) {
34541738Sbmc VOPSTATS_UPDATE(*vpp, create);
34551488Srsb if ((*vpp)->v_path == NULL) {
34561488Srsb vn_setpath(rootdir, dvp, *vpp, name, strlen(name));
34571488Srsb }
34581488Srsb }
34590Sstevel@tonic-gate
34600Sstevel@tonic-gate return (ret);
34610Sstevel@tonic-gate }
34620Sstevel@tonic-gate
34630Sstevel@tonic-gate int
fop_remove(vnode_t * dvp,char * nm,cred_t * cr,caller_context_t * ct,int flags)34640Sstevel@tonic-gate fop_remove(
34650Sstevel@tonic-gate vnode_t *dvp,
34660Sstevel@tonic-gate char *nm,
34675331Samw cred_t *cr,
34685331Samw caller_context_t *ct,
34695331Samw int flags)
34700Sstevel@tonic-gate {
34711488Srsb int err;
34721488Srsb
34735331Samw /*
34745331Samw * If this file system doesn't support case-insensitive access
34755331Samw * and said access is requested, fail quickly.
34765331Samw */
34775331Samw if (flags & FIGNORECASE &&
34785331Samw (vfs_has_feature(dvp->v_vfsp, VFSFT_CASEINSENSITIVE) == 0 &&
34795331Samw vfs_has_feature(dvp->v_vfsp, VFSFT_NOCASESENSITIVE) == 0))
34805331Samw return (EINVAL);
34815331Samw
34824321Scasper VOPXID_MAP_CR(dvp, cr);
34834321Scasper
34845331Samw err = (*(dvp)->v_op->vop_remove)(dvp, nm, cr, ct, flags);
34851738Sbmc VOPSTATS_UPDATE(dvp, remove);
34861488Srsb return (err);
34870Sstevel@tonic-gate }
34880Sstevel@tonic-gate
34890Sstevel@tonic-gate int
fop_link(vnode_t * tdvp,vnode_t * svp,char * tnm,cred_t * cr,caller_context_t * ct,int flags)34900Sstevel@tonic-gate fop_link(
34910Sstevel@tonic-gate vnode_t *tdvp,
34920Sstevel@tonic-gate vnode_t *svp,
34930Sstevel@tonic-gate char *tnm,
34945331Samw cred_t *cr,
34955331Samw caller_context_t *ct,
34965331Samw int flags)
34970Sstevel@tonic-gate {
34981488Srsb int err;
34991488Srsb
35005331Samw /*
35015331Samw * If the target file system doesn't support case-insensitive access
35025331Samw * and said access is requested, fail quickly.
35035331Samw */
35045331Samw if (flags & FIGNORECASE &&
35055331Samw (vfs_has_feature(tdvp->v_vfsp, VFSFT_CASEINSENSITIVE) == 0 &&
35065331Samw vfs_has_feature(tdvp->v_vfsp, VFSFT_NOCASESENSITIVE) == 0))
35075331Samw return (EINVAL);
35085331Samw
35094321Scasper VOPXID_MAP_CR(tdvp, cr);
35104321Scasper
35115331Samw err = (*(tdvp)->v_op->vop_link)(tdvp, svp, tnm, cr, ct, flags);
35121738Sbmc VOPSTATS_UPDATE(tdvp, link);
35131488Srsb return (err);
35140Sstevel@tonic-gate }
35150Sstevel@tonic-gate
35160Sstevel@tonic-gate int
fop_rename(vnode_t * sdvp,char * snm,vnode_t * tdvp,char * tnm,cred_t * cr,caller_context_t * ct,int flags)35170Sstevel@tonic-gate fop_rename(
35180Sstevel@tonic-gate vnode_t *sdvp,
35190Sstevel@tonic-gate char *snm,
35200Sstevel@tonic-gate vnode_t *tdvp,
35210Sstevel@tonic-gate char *tnm,
35225331Samw cred_t *cr,
35235331Samw caller_context_t *ct,
35245331Samw int flags)
35250Sstevel@tonic-gate {
35261488Srsb int err;
35271488Srsb
35285331Samw /*
35295331Samw * If the file system involved does not support
35305331Samw * case-insensitive access and said access is requested, fail
35315331Samw * quickly.
35325331Samw */
35335331Samw if (flags & FIGNORECASE &&
35345331Samw ((vfs_has_feature(sdvp->v_vfsp, VFSFT_CASEINSENSITIVE) == 0 &&
35355331Samw vfs_has_feature(sdvp->v_vfsp, VFSFT_NOCASESENSITIVE) == 0)))
35365331Samw return (EINVAL);
35375331Samw
35384321Scasper VOPXID_MAP_CR(tdvp, cr);
35394321Scasper
35405331Samw err = (*(sdvp)->v_op->vop_rename)(sdvp, snm, tdvp, tnm, cr, ct, flags);
35411738Sbmc VOPSTATS_UPDATE(sdvp, rename);
35421488Srsb return (err);
35430Sstevel@tonic-gate }
35440Sstevel@tonic-gate
35450Sstevel@tonic-gate int
fop_mkdir(vnode_t * dvp,char * dirname,vattr_t * vap,vnode_t ** vpp,cred_t * cr,caller_context_t * ct,int flags,vsecattr_t * vsecp)35460Sstevel@tonic-gate fop_mkdir(
35470Sstevel@tonic-gate vnode_t *dvp,
35480Sstevel@tonic-gate char *dirname,
35490Sstevel@tonic-gate vattr_t *vap,
35500Sstevel@tonic-gate vnode_t **vpp,
35515331Samw cred_t *cr,
35525331Samw caller_context_t *ct,
35535331Samw int flags,
35545331Samw vsecattr_t *vsecp) /* ACL to set during create */
35550Sstevel@tonic-gate {
35560Sstevel@tonic-gate int ret;
35570Sstevel@tonic-gate
35585331Samw if (vsecp != NULL &&
35595331Samw vfs_has_feature(dvp->v_vfsp, VFSFT_ACLONCREATE) == 0) {
35605331Samw return (EINVAL);
35615331Samw }
35625331Samw /*
35635331Samw * If this file system doesn't support case-insensitive access
35645331Samw * and said access is requested, fail quickly.
35655331Samw */
35665331Samw if (flags & FIGNORECASE &&
35675331Samw (vfs_has_feature(dvp->v_vfsp, VFSFT_CASEINSENSITIVE) == 0 &&
35685331Samw vfs_has_feature(dvp->v_vfsp, VFSFT_NOCASESENSITIVE) == 0))
35695331Samw return (EINVAL);
35705331Samw
35714321Scasper VOPXID_MAP_CR(dvp, cr);
35724321Scasper
35735331Samw ret = (*(dvp)->v_op->vop_mkdir)
35745331Samw (dvp, dirname, vap, vpp, cr, ct, flags, vsecp);
35751488Srsb if (ret == 0 && *vpp) {
35761738Sbmc VOPSTATS_UPDATE(*vpp, mkdir);
35771488Srsb if ((*vpp)->v_path == NULL) {
35781488Srsb vn_setpath(rootdir, dvp, *vpp, dirname,
35791488Srsb strlen(dirname));
35801488Srsb }
35811488Srsb }
35820Sstevel@tonic-gate
35830Sstevel@tonic-gate return (ret);
35840Sstevel@tonic-gate }
35850Sstevel@tonic-gate
35860Sstevel@tonic-gate int
fop_rmdir(vnode_t * dvp,char * nm,vnode_t * cdir,cred_t * cr,caller_context_t * ct,int flags)35870Sstevel@tonic-gate fop_rmdir(
35880Sstevel@tonic-gate vnode_t *dvp,
35890Sstevel@tonic-gate char *nm,
35900Sstevel@tonic-gate vnode_t *cdir,
35915331Samw cred_t *cr,
35925331Samw caller_context_t *ct,
35935331Samw int flags)
35940Sstevel@tonic-gate {
35951488Srsb int err;
35961488Srsb
35975331Samw /*
35985331Samw * If this file system doesn't support case-insensitive access
35995331Samw * and said access is requested, fail quickly.
36005331Samw */
36015331Samw if (flags & FIGNORECASE &&
36025331Samw (vfs_has_feature(dvp->v_vfsp, VFSFT_CASEINSENSITIVE) == 0 &&
36035331Samw vfs_has_feature(dvp->v_vfsp, VFSFT_NOCASESENSITIVE) == 0))
36045331Samw return (EINVAL);
36055331Samw
36064321Scasper VOPXID_MAP_CR(dvp, cr);
36074321Scasper
36085331Samw err = (*(dvp)->v_op->vop_rmdir)(dvp, nm, cdir, cr, ct, flags);
36091738Sbmc VOPSTATS_UPDATE(dvp, rmdir);
36101488Srsb return (err);
36110Sstevel@tonic-gate }
36120Sstevel@tonic-gate
36130Sstevel@tonic-gate int
fop_readdir(vnode_t * vp,uio_t * uiop,cred_t * cr,int * eofp,caller_context_t * ct,int flags)36140Sstevel@tonic-gate fop_readdir(
36150Sstevel@tonic-gate vnode_t *vp,
36160Sstevel@tonic-gate uio_t *uiop,
36170Sstevel@tonic-gate cred_t *cr,
36185331Samw int *eofp,
36195331Samw caller_context_t *ct,
36205331Samw int flags)
36210Sstevel@tonic-gate {
36221488Srsb int err;
36231488Srsb ssize_t resid_start = uiop->uio_resid;
36241488Srsb
36255331Samw /*
36265331Samw * If this file system doesn't support retrieving directory
36275331Samw * entry flags and said access is requested, fail quickly.
36285331Samw */
36295331Samw if (flags & V_RDDIR_ENTFLAGS &&
36305331Samw vfs_has_feature(vp->v_vfsp, VFSFT_DIRENTFLAGS) == 0)
36315331Samw return (EINVAL);
36325331Samw
36334321Scasper VOPXID_MAP_CR(vp, cr);
36344321Scasper
36355331Samw err = (*(vp)->v_op->vop_readdir)(vp, uiop, cr, eofp, ct, flags);
36361738Sbmc VOPSTATS_UPDATE_IO(vp, readdir,
36371488Srsb readdir_bytes, (resid_start - uiop->uio_resid));
36381488Srsb return (err);
36390Sstevel@tonic-gate }
36400Sstevel@tonic-gate
36410Sstevel@tonic-gate int
fop_symlink(vnode_t * dvp,char * linkname,vattr_t * vap,char * target,cred_t * cr,caller_context_t * ct,int flags)36420Sstevel@tonic-gate fop_symlink(
36430Sstevel@tonic-gate vnode_t *dvp,
36440Sstevel@tonic-gate char *linkname,
36450Sstevel@tonic-gate vattr_t *vap,
36460Sstevel@tonic-gate char *target,
36475331Samw cred_t *cr,
36485331Samw caller_context_t *ct,
36495331Samw int flags)
36500Sstevel@tonic-gate {
36511488Srsb int err;
365210793Sdai.ngo@sun.com xvattr_t xvattr;
36531488Srsb
36545331Samw /*
36555331Samw * If this file system doesn't support case-insensitive access
36565331Samw * and said access is requested, fail quickly.
36575331Samw */
36585331Samw if (flags & FIGNORECASE &&
36595331Samw (vfs_has_feature(dvp->v_vfsp, VFSFT_CASEINSENSITIVE) == 0 &&
36605331Samw vfs_has_feature(dvp->v_vfsp, VFSFT_NOCASESENSITIVE) == 0))
36615331Samw return (EINVAL);
36625331Samw
36634321Scasper VOPXID_MAP_CR(dvp, cr);
36644321Scasper
366510793Sdai.ngo@sun.com /* check for reparse point */
366610793Sdai.ngo@sun.com if ((vfs_has_feature(dvp->v_vfsp, VFSFT_REPARSE)) &&
366710793Sdai.ngo@sun.com (strncmp(target, FS_REPARSE_TAG_STR,
366810793Sdai.ngo@sun.com strlen(FS_REPARSE_TAG_STR)) == 0)) {
366910793Sdai.ngo@sun.com if (!fs_reparse_mark(target, vap, &xvattr))
367010793Sdai.ngo@sun.com vap = (vattr_t *)&xvattr;
367110793Sdai.ngo@sun.com }
367210793Sdai.ngo@sun.com
36735331Samw err = (*(dvp)->v_op->vop_symlink)
36745331Samw (dvp, linkname, vap, target, cr, ct, flags);
36751738Sbmc VOPSTATS_UPDATE(dvp, symlink);
36761488Srsb return (err);
36770Sstevel@tonic-gate }
36780Sstevel@tonic-gate
36790Sstevel@tonic-gate int
fop_readlink(vnode_t * vp,uio_t * uiop,cred_t * cr,caller_context_t * ct)36800Sstevel@tonic-gate fop_readlink(
36810Sstevel@tonic-gate vnode_t *vp,
36820Sstevel@tonic-gate uio_t *uiop,
36835331Samw cred_t *cr,
36845331Samw caller_context_t *ct)
36850Sstevel@tonic-gate {
36861488Srsb int err;
36871488Srsb
36884321Scasper VOPXID_MAP_CR(vp, cr);
36894321Scasper
36905331Samw err = (*(vp)->v_op->vop_readlink)(vp, uiop, cr, ct);
36911738Sbmc VOPSTATS_UPDATE(vp, readlink);
36921488Srsb return (err);
36930Sstevel@tonic-gate }
36940Sstevel@tonic-gate
36950Sstevel@tonic-gate int
fop_fsync(vnode_t * vp,int syncflag,cred_t * cr,caller_context_t * ct)36960Sstevel@tonic-gate fop_fsync(
36970Sstevel@tonic-gate vnode_t *vp,
36980Sstevel@tonic-gate int syncflag,
36995331Samw cred_t *cr,
37005331Samw caller_context_t *ct)
37010Sstevel@tonic-gate {
37021488Srsb int err;
37031488Srsb
37044321Scasper VOPXID_MAP_CR(vp, cr);
37054321Scasper
37065331Samw err = (*(vp)->v_op->vop_fsync)(vp, syncflag, cr, ct);
37071738Sbmc VOPSTATS_UPDATE(vp, fsync);
37081488Srsb return (err);
37090Sstevel@tonic-gate }
37100Sstevel@tonic-gate
37110Sstevel@tonic-gate void
fop_inactive(vnode_t * vp,cred_t * cr,caller_context_t * ct)37120Sstevel@tonic-gate fop_inactive(
37130Sstevel@tonic-gate vnode_t *vp,
37145331Samw cred_t *cr,
37155331Samw caller_context_t *ct)
37160Sstevel@tonic-gate {
37171488Srsb /* Need to update stats before vop call since we may lose the vnode */
37181738Sbmc VOPSTATS_UPDATE(vp, inactive);
37194321Scasper
37204321Scasper VOPXID_MAP_CR(vp, cr);
37214321Scasper
37225331Samw (*(vp)->v_op->vop_inactive)(vp, cr, ct);
37230Sstevel@tonic-gate }
37240Sstevel@tonic-gate
37250Sstevel@tonic-gate int
fop_fid(vnode_t * vp,fid_t * fidp,caller_context_t * ct)37260Sstevel@tonic-gate fop_fid(
37270Sstevel@tonic-gate vnode_t *vp,
37285331Samw fid_t *fidp,
37295331Samw caller_context_t *ct)
37300Sstevel@tonic-gate {
37311488Srsb int err;
37321488Srsb
37335331Samw err = (*(vp)->v_op->vop_fid)(vp, fidp, ct);
37341738Sbmc VOPSTATS_UPDATE(vp, fid);
37351488Srsb return (err);
37360Sstevel@tonic-gate }
37370Sstevel@tonic-gate
37380Sstevel@tonic-gate int
fop_rwlock(vnode_t * vp,int write_lock,caller_context_t * ct)37390Sstevel@tonic-gate fop_rwlock(
37400Sstevel@tonic-gate vnode_t *vp,
37410Sstevel@tonic-gate int write_lock,
37420Sstevel@tonic-gate caller_context_t *ct)
37430Sstevel@tonic-gate {
37441488Srsb int ret;
37451488Srsb
37461488Srsb ret = ((*(vp)->v_op->vop_rwlock)(vp, write_lock, ct));
37471738Sbmc VOPSTATS_UPDATE(vp, rwlock);
37481488Srsb return (ret);
37490Sstevel@tonic-gate }
37500Sstevel@tonic-gate
37510Sstevel@tonic-gate void
fop_rwunlock(vnode_t * vp,int write_lock,caller_context_t * ct)37520Sstevel@tonic-gate fop_rwunlock(
37530Sstevel@tonic-gate vnode_t *vp,
37540Sstevel@tonic-gate int write_lock,
37550Sstevel@tonic-gate caller_context_t *ct)
37560Sstevel@tonic-gate {
37570Sstevel@tonic-gate (*(vp)->v_op->vop_rwunlock)(vp, write_lock, ct);
37581738Sbmc VOPSTATS_UPDATE(vp, rwunlock);
37590Sstevel@tonic-gate }
37600Sstevel@tonic-gate
37610Sstevel@tonic-gate int
fop_seek(vnode_t * vp,offset_t ooff,offset_t * noffp,caller_context_t * ct)37620Sstevel@tonic-gate fop_seek(
37630Sstevel@tonic-gate vnode_t *vp,
37640Sstevel@tonic-gate offset_t ooff,
37655331Samw offset_t *noffp,
37665331Samw caller_context_t *ct)
37670Sstevel@tonic-gate {
37681488Srsb int err;
37691488Srsb
37705331Samw err = (*(vp)->v_op->vop_seek)(vp, ooff, noffp, ct);
37711738Sbmc VOPSTATS_UPDATE(vp, seek);
37721488Srsb return (err);
37730Sstevel@tonic-gate }
37740Sstevel@tonic-gate
37750Sstevel@tonic-gate int
fop_cmp(vnode_t * vp1,vnode_t * vp2,caller_context_t * ct)37760Sstevel@tonic-gate fop_cmp(
37770Sstevel@tonic-gate vnode_t *vp1,
37785331Samw vnode_t *vp2,
37795331Samw caller_context_t *ct)
37800Sstevel@tonic-gate {
37811488Srsb int err;
37821488Srsb
37835331Samw err = (*(vp1)->v_op->vop_cmp)(vp1, vp2, ct);
37841738Sbmc VOPSTATS_UPDATE(vp1, cmp);
37851488Srsb return (err);
37860Sstevel@tonic-gate }
37870Sstevel@tonic-gate
37880Sstevel@tonic-gate int
fop_frlock(vnode_t * vp,int cmd,flock64_t * bfp,int flag,offset_t offset,struct flk_callback * flk_cbp,cred_t * cr,caller_context_t * ct)37890Sstevel@tonic-gate fop_frlock(
37900Sstevel@tonic-gate vnode_t *vp,
37910Sstevel@tonic-gate int cmd,
37920Sstevel@tonic-gate flock64_t *bfp,
37930Sstevel@tonic-gate int flag,
37940Sstevel@tonic-gate offset_t offset,
37950Sstevel@tonic-gate struct flk_callback *flk_cbp,
37965331Samw cred_t *cr,
37975331Samw caller_context_t *ct)
37980Sstevel@tonic-gate {
37991488Srsb int err;
38001488Srsb
38014321Scasper VOPXID_MAP_CR(vp, cr);
38024321Scasper
38031488Srsb err = (*(vp)->v_op->vop_frlock)
38045331Samw (vp, cmd, bfp, flag, offset, flk_cbp, cr, ct);
38051738Sbmc VOPSTATS_UPDATE(vp, frlock);
38061488Srsb return (err);
38070Sstevel@tonic-gate }
38080Sstevel@tonic-gate
38090Sstevel@tonic-gate int
fop_space(vnode_t * vp,int cmd,flock64_t * bfp,int flag,offset_t offset,cred_t * cr,caller_context_t * ct)38100Sstevel@tonic-gate fop_space(
38110Sstevel@tonic-gate vnode_t *vp,
38120Sstevel@tonic-gate int cmd,
38130Sstevel@tonic-gate flock64_t *bfp,
38140Sstevel@tonic-gate int flag,
38150Sstevel@tonic-gate offset_t offset,
38160Sstevel@tonic-gate cred_t *cr,
38170Sstevel@tonic-gate caller_context_t *ct)
38180Sstevel@tonic-gate {
38191488Srsb int err;
38201488Srsb
38214321Scasper VOPXID_MAP_CR(vp, cr);
38224321Scasper
38231488Srsb err = (*(vp)->v_op->vop_space)(vp, cmd, bfp, flag, offset, cr, ct);
38241738Sbmc VOPSTATS_UPDATE(vp, space);
38251488Srsb return (err);
38260Sstevel@tonic-gate }
38270Sstevel@tonic-gate
38280Sstevel@tonic-gate int
fop_realvp(vnode_t * vp,vnode_t ** vpp,caller_context_t * ct)38290Sstevel@tonic-gate fop_realvp(
38300Sstevel@tonic-gate vnode_t *vp,
38315331Samw vnode_t **vpp,
38325331Samw caller_context_t *ct)
38330Sstevel@tonic-gate {
38341488Srsb int err;
38351488Srsb
38365331Samw err = (*(vp)->v_op->vop_realvp)(vp, vpp, ct);
38371738Sbmc VOPSTATS_UPDATE(vp, realvp);
38381488Srsb return (err);
38390Sstevel@tonic-gate }
38400Sstevel@tonic-gate
38410Sstevel@tonic-gate int
fop_getpage(vnode_t * vp,offset_t off,size_t len,uint_t * protp,page_t ** plarr,size_t plsz,struct seg * seg,caddr_t addr,enum seg_rw rw,cred_t * cr,caller_context_t * ct)38420Sstevel@tonic-gate fop_getpage(
38430Sstevel@tonic-gate vnode_t *vp,
38440Sstevel@tonic-gate offset_t off,
38450Sstevel@tonic-gate size_t len,
38460Sstevel@tonic-gate uint_t *protp,
38470Sstevel@tonic-gate page_t **plarr,
38480Sstevel@tonic-gate size_t plsz,
38490Sstevel@tonic-gate struct seg *seg,
38500Sstevel@tonic-gate caddr_t addr,
38510Sstevel@tonic-gate enum seg_rw rw,
38525331Samw cred_t *cr,
38535331Samw caller_context_t *ct)
38540Sstevel@tonic-gate {
38551488Srsb int err;
38561488Srsb
38574321Scasper VOPXID_MAP_CR(vp, cr);
38584321Scasper
38591488Srsb err = (*(vp)->v_op->vop_getpage)
38605331Samw (vp, off, len, protp, plarr, plsz, seg, addr, rw, cr, ct);
38611738Sbmc VOPSTATS_UPDATE(vp, getpage);
38621488Srsb return (err);
38630Sstevel@tonic-gate }
38640Sstevel@tonic-gate
38650Sstevel@tonic-gate int
fop_putpage(vnode_t * vp,offset_t off,size_t len,int flags,cred_t * cr,caller_context_t * ct)38660Sstevel@tonic-gate fop_putpage(
38670Sstevel@tonic-gate vnode_t *vp,
38680Sstevel@tonic-gate offset_t off,
38690Sstevel@tonic-gate size_t len,
38700Sstevel@tonic-gate int flags,
38715331Samw cred_t *cr,
38725331Samw caller_context_t *ct)
38730Sstevel@tonic-gate {
38741488Srsb int err;
38751488Srsb
38764321Scasper VOPXID_MAP_CR(vp, cr);
38774321Scasper
38785331Samw err = (*(vp)->v_op->vop_putpage)(vp, off, len, flags, cr, ct);
38791738Sbmc VOPSTATS_UPDATE(vp, putpage);
38801488Srsb return (err);
38810Sstevel@tonic-gate }
38820Sstevel@tonic-gate
38830Sstevel@tonic-gate int
fop_map(vnode_t * vp,offset_t off,struct as * as,caddr_t * addrp,size_t len,uchar_t prot,uchar_t maxprot,uint_t flags,cred_t * cr,caller_context_t * ct)38840Sstevel@tonic-gate fop_map(
38850Sstevel@tonic-gate vnode_t *vp,
38860Sstevel@tonic-gate offset_t off,
38870Sstevel@tonic-gate struct as *as,
38880Sstevel@tonic-gate caddr_t *addrp,
38890Sstevel@tonic-gate size_t len,
38900Sstevel@tonic-gate uchar_t prot,
38910Sstevel@tonic-gate uchar_t maxprot,
38920Sstevel@tonic-gate uint_t flags,
38935331Samw cred_t *cr,
38945331Samw caller_context_t *ct)
38950Sstevel@tonic-gate {
38961488Srsb int err;
38971488Srsb
38984321Scasper VOPXID_MAP_CR(vp, cr);
38994321Scasper
39001488Srsb err = (*(vp)->v_op->vop_map)
39015331Samw (vp, off, as, addrp, len, prot, maxprot, flags, cr, ct);
39021738Sbmc VOPSTATS_UPDATE(vp, map);
39031488Srsb return (err);
39040Sstevel@tonic-gate }
39050Sstevel@tonic-gate
39060Sstevel@tonic-gate int
fop_addmap(vnode_t * vp,offset_t off,struct as * as,caddr_t addr,size_t len,uchar_t prot,uchar_t maxprot,uint_t flags,cred_t * cr,caller_context_t * ct)39070Sstevel@tonic-gate fop_addmap(
39080Sstevel@tonic-gate vnode_t *vp,
39090Sstevel@tonic-gate offset_t off,
39100Sstevel@tonic-gate struct as *as,
39110Sstevel@tonic-gate caddr_t addr,
39120Sstevel@tonic-gate size_t len,
39130Sstevel@tonic-gate uchar_t prot,
39140Sstevel@tonic-gate uchar_t maxprot,
39150Sstevel@tonic-gate uint_t flags,
39165331Samw cred_t *cr,
39175331Samw caller_context_t *ct)
39180Sstevel@tonic-gate {
39190Sstevel@tonic-gate int error;
39200Sstevel@tonic-gate u_longlong_t delta;
39210Sstevel@tonic-gate
39224321Scasper VOPXID_MAP_CR(vp, cr);
39234321Scasper
39240Sstevel@tonic-gate error = (*(vp)->v_op->vop_addmap)
39255331Samw (vp, off, as, addr, len, prot, maxprot, flags, cr, ct);
39260Sstevel@tonic-gate
39270Sstevel@tonic-gate if ((!error) && (vp->v_type == VREG)) {
39280Sstevel@tonic-gate delta = (u_longlong_t)btopr(len);
39290Sstevel@tonic-gate /*
39300Sstevel@tonic-gate * If file is declared MAP_PRIVATE, it can't be written back
39310Sstevel@tonic-gate * even if open for write. Handle as read.
39320Sstevel@tonic-gate */
39330Sstevel@tonic-gate if (flags & MAP_PRIVATE) {
39340Sstevel@tonic-gate atomic_add_64((uint64_t *)(&(vp->v_mmap_read)),
39354956Spf199842 (int64_t)delta);
39360Sstevel@tonic-gate } else {
39370Sstevel@tonic-gate /*
39380Sstevel@tonic-gate * atomic_add_64 forces the fetch of a 64 bit value to
39390Sstevel@tonic-gate * be atomic on 32 bit machines
39400Sstevel@tonic-gate */
39410Sstevel@tonic-gate if (maxprot & PROT_WRITE)
39420Sstevel@tonic-gate atomic_add_64((uint64_t *)(&(vp->v_mmap_write)),
39434956Spf199842 (int64_t)delta);
39440Sstevel@tonic-gate if (maxprot & PROT_READ)
39450Sstevel@tonic-gate atomic_add_64((uint64_t *)(&(vp->v_mmap_read)),
39464956Spf199842 (int64_t)delta);
39470Sstevel@tonic-gate if (maxprot & PROT_EXEC)
39480Sstevel@tonic-gate atomic_add_64((uint64_t *)(&(vp->v_mmap_read)),
39494956Spf199842 (int64_t)delta);
39500Sstevel@tonic-gate }
39510Sstevel@tonic-gate }
39521738Sbmc VOPSTATS_UPDATE(vp, addmap);
39530Sstevel@tonic-gate return (error);
39540Sstevel@tonic-gate }
39550Sstevel@tonic-gate
39560Sstevel@tonic-gate int
fop_delmap(vnode_t * vp,offset_t off,struct as * as,caddr_t addr,size_t len,uint_t prot,uint_t maxprot,uint_t flags,cred_t * cr,caller_context_t * ct)39570Sstevel@tonic-gate fop_delmap(
39580Sstevel@tonic-gate vnode_t *vp,
39590Sstevel@tonic-gate offset_t off,
39600Sstevel@tonic-gate struct as *as,
39610Sstevel@tonic-gate caddr_t addr,
39620Sstevel@tonic-gate size_t len,
39630Sstevel@tonic-gate uint_t prot,
39640Sstevel@tonic-gate uint_t maxprot,
39650Sstevel@tonic-gate uint_t flags,
39665331Samw cred_t *cr,
39675331Samw caller_context_t *ct)
39680Sstevel@tonic-gate {
39690Sstevel@tonic-gate int error;
39700Sstevel@tonic-gate u_longlong_t delta;
39714321Scasper
39724321Scasper VOPXID_MAP_CR(vp, cr);
39734321Scasper
39740Sstevel@tonic-gate error = (*(vp)->v_op->vop_delmap)
39755331Samw (vp, off, as, addr, len, prot, maxprot, flags, cr, ct);
39760Sstevel@tonic-gate
39770Sstevel@tonic-gate /*
39780Sstevel@tonic-gate * NFS calls into delmap twice, the first time
39790Sstevel@tonic-gate * it simply establishes a callback mechanism and returns EAGAIN
39800Sstevel@tonic-gate * while the real work is being done upon the second invocation.
39810Sstevel@tonic-gate * We have to detect this here and only decrement the counts upon
39820Sstevel@tonic-gate * the second delmap request.
39830Sstevel@tonic-gate */
39840Sstevel@tonic-gate if ((error != EAGAIN) && (vp->v_type == VREG)) {
39850Sstevel@tonic-gate
39860Sstevel@tonic-gate delta = (u_longlong_t)btopr(len);
39870Sstevel@tonic-gate
39880Sstevel@tonic-gate if (flags & MAP_PRIVATE) {
39890Sstevel@tonic-gate atomic_add_64((uint64_t *)(&(vp->v_mmap_read)),
39904956Spf199842 (int64_t)(-delta));
39910Sstevel@tonic-gate } else {
39920Sstevel@tonic-gate /*
39930Sstevel@tonic-gate * atomic_add_64 forces the fetch of a 64 bit value
39940Sstevel@tonic-gate * to be atomic on 32 bit machines
39950Sstevel@tonic-gate */
39960Sstevel@tonic-gate if (maxprot & PROT_WRITE)
39970Sstevel@tonic-gate atomic_add_64((uint64_t *)(&(vp->v_mmap_write)),
39984956Spf199842 (int64_t)(-delta));
39990Sstevel@tonic-gate if (maxprot & PROT_READ)
40000Sstevel@tonic-gate atomic_add_64((uint64_t *)(&(vp->v_mmap_read)),
40014956Spf199842 (int64_t)(-delta));
40020Sstevel@tonic-gate if (maxprot & PROT_EXEC)
40030Sstevel@tonic-gate atomic_add_64((uint64_t *)(&(vp->v_mmap_read)),
40044956Spf199842 (int64_t)(-delta));
40050Sstevel@tonic-gate }
40060Sstevel@tonic-gate }
40071738Sbmc VOPSTATS_UPDATE(vp, delmap);
40080Sstevel@tonic-gate return (error);
40090Sstevel@tonic-gate }
40100Sstevel@tonic-gate
40110Sstevel@tonic-gate
40120Sstevel@tonic-gate int
fop_poll(vnode_t * vp,short events,int anyyet,short * reventsp,struct pollhead ** phpp,caller_context_t * ct)40130Sstevel@tonic-gate fop_poll(
40140Sstevel@tonic-gate vnode_t *vp,
40150Sstevel@tonic-gate short events,
40160Sstevel@tonic-gate int anyyet,
40170Sstevel@tonic-gate short *reventsp,
40185331Samw struct pollhead **phpp,
40195331Samw caller_context_t *ct)
40200Sstevel@tonic-gate {
40211488Srsb int err;
40221488Srsb
40235331Samw err = (*(vp)->v_op->vop_poll)(vp, events, anyyet, reventsp, phpp, ct);
40241738Sbmc VOPSTATS_UPDATE(vp, poll);
40251488Srsb return (err);
40260Sstevel@tonic-gate }
40270Sstevel@tonic-gate
40280Sstevel@tonic-gate int
fop_dump(vnode_t * vp,caddr_t addr,offset_t lbdn,offset_t dblks,caller_context_t * ct)40290Sstevel@tonic-gate fop_dump(
40300Sstevel@tonic-gate vnode_t *vp,
40310Sstevel@tonic-gate caddr_t addr,
40326264Srm15945 offset_t lbdn,
40336264Srm15945 offset_t dblks,
40345331Samw caller_context_t *ct)
40350Sstevel@tonic-gate {
40361488Srsb int err;
40371488Srsb
40386264Srm15945 /* ensure lbdn and dblks can be passed safely to bdev_dump */
40396264Srm15945 if ((lbdn != (daddr_t)lbdn) || (dblks != (int)dblks))
40406264Srm15945 return (EIO);
40416264Srm15945
40425331Samw err = (*(vp)->v_op->vop_dump)(vp, addr, lbdn, dblks, ct);
40431738Sbmc VOPSTATS_UPDATE(vp, dump);
40441488Srsb return (err);
40450Sstevel@tonic-gate }
40460Sstevel@tonic-gate
40470Sstevel@tonic-gate int
fop_pathconf(vnode_t * vp,int cmd,ulong_t * valp,cred_t * cr,caller_context_t * ct)40480Sstevel@tonic-gate fop_pathconf(
40490Sstevel@tonic-gate vnode_t *vp,
40500Sstevel@tonic-gate int cmd,
40510Sstevel@tonic-gate ulong_t *valp,
40525331Samw cred_t *cr,
40535331Samw caller_context_t *ct)
40540Sstevel@tonic-gate {
40551488Srsb int err;
40561488Srsb
40574321Scasper VOPXID_MAP_CR(vp, cr);
40584321Scasper
40595331Samw err = (*(vp)->v_op->vop_pathconf)(vp, cmd, valp, cr, ct);
40601738Sbmc VOPSTATS_UPDATE(vp, pathconf);
40611488Srsb return (err);
40620Sstevel@tonic-gate }
40630Sstevel@tonic-gate
40640Sstevel@tonic-gate int
fop_pageio(vnode_t * vp,struct page * pp,u_offset_t io_off,size_t io_len,int flags,cred_t * cr,caller_context_t * ct)40650Sstevel@tonic-gate fop_pageio(
40660Sstevel@tonic-gate vnode_t *vp,
40670Sstevel@tonic-gate struct page *pp,
40680Sstevel@tonic-gate u_offset_t io_off,
40690Sstevel@tonic-gate size_t io_len,
40700Sstevel@tonic-gate int flags,
40715331Samw cred_t *cr,
40725331Samw caller_context_t *ct)
40730Sstevel@tonic-gate {
40741488Srsb int err;
40751488Srsb
40764321Scasper VOPXID_MAP_CR(vp, cr);
40774321Scasper
40785331Samw err = (*(vp)->v_op->vop_pageio)(vp, pp, io_off, io_len, flags, cr, ct);
40791738Sbmc VOPSTATS_UPDATE(vp, pageio);
40801488Srsb return (err);
40810Sstevel@tonic-gate }
40820Sstevel@tonic-gate
40830Sstevel@tonic-gate int
fop_dumpctl(vnode_t * vp,int action,offset_t * blkp,caller_context_t * ct)40840Sstevel@tonic-gate fop_dumpctl(
40850Sstevel@tonic-gate vnode_t *vp,
40860Sstevel@tonic-gate int action,
40876264Srm15945 offset_t *blkp,
40885331Samw caller_context_t *ct)
40890Sstevel@tonic-gate {
40901488Srsb int err;
40915331Samw err = (*(vp)->v_op->vop_dumpctl)(vp, action, blkp, ct);
40921738Sbmc VOPSTATS_UPDATE(vp, dumpctl);
40931488Srsb return (err);
40940Sstevel@tonic-gate }
40950Sstevel@tonic-gate
40960Sstevel@tonic-gate void
fop_dispose(vnode_t * vp,page_t * pp,int flag,int dn,cred_t * cr,caller_context_t * ct)40970Sstevel@tonic-gate fop_dispose(
40980Sstevel@tonic-gate vnode_t *vp,
40990Sstevel@tonic-gate page_t *pp,
41000Sstevel@tonic-gate int flag,
41010Sstevel@tonic-gate int dn,
41025331Samw cred_t *cr,
41035331Samw caller_context_t *ct)
41040Sstevel@tonic-gate {
41051488Srsb /* Must do stats first since it's possible to lose the vnode */
41061738Sbmc VOPSTATS_UPDATE(vp, dispose);
41074321Scasper
41084321Scasper VOPXID_MAP_CR(vp, cr);
41094321Scasper
41105331Samw (*(vp)->v_op->vop_dispose)(vp, pp, flag, dn, cr, ct);
41110Sstevel@tonic-gate }
41120Sstevel@tonic-gate
41130Sstevel@tonic-gate int
fop_setsecattr(vnode_t * vp,vsecattr_t * vsap,int flag,cred_t * cr,caller_context_t * ct)41140Sstevel@tonic-gate fop_setsecattr(
41150Sstevel@tonic-gate vnode_t *vp,
41160Sstevel@tonic-gate vsecattr_t *vsap,
41170Sstevel@tonic-gate int flag,
41185331Samw cred_t *cr,
41195331Samw caller_context_t *ct)
41200Sstevel@tonic-gate {
41211488Srsb int err;
41221488Srsb
41234321Scasper VOPXID_MAP_CR(vp, cr);
41244321Scasper
41255331Samw /*
41265331Samw * We're only allowed to skip the ACL check iff we used a 32 bit
41275331Samw * ACE mask with VOP_ACCESS() to determine permissions.
41285331Samw */
41295331Samw if ((flag & ATTR_NOACLCHECK) &&
41305331Samw vfs_has_feature(vp->v_vfsp, VFSFT_ACEMASKONACCESS) == 0) {
41315331Samw return (EINVAL);
41325331Samw }
41335331Samw err = (*(vp)->v_op->vop_setsecattr) (vp, vsap, flag, cr, ct);
41341738Sbmc VOPSTATS_UPDATE(vp, setsecattr);
41351488Srsb return (err);
41360Sstevel@tonic-gate }
41370Sstevel@tonic-gate
41380Sstevel@tonic-gate int
fop_getsecattr(vnode_t * vp,vsecattr_t * vsap,int flag,cred_t * cr,caller_context_t * ct)41390Sstevel@tonic-gate fop_getsecattr(
41400Sstevel@tonic-gate vnode_t *vp,
41410Sstevel@tonic-gate vsecattr_t *vsap,
41420Sstevel@tonic-gate int flag,
41435331Samw cred_t *cr,
41445331Samw caller_context_t *ct)
41450Sstevel@tonic-gate {
41461488Srsb int err;
41471488Srsb
41485331Samw /*
41495331Samw * We're only allowed to skip the ACL check iff we used a 32 bit
41505331Samw * ACE mask with VOP_ACCESS() to determine permissions.
41515331Samw */
41525331Samw if ((flag & ATTR_NOACLCHECK) &&
41535331Samw vfs_has_feature(vp->v_vfsp, VFSFT_ACEMASKONACCESS) == 0) {
41545331Samw return (EINVAL);
41555331Samw }
41565331Samw
41574321Scasper VOPXID_MAP_CR(vp, cr);
41584321Scasper
41595331Samw err = (*(vp)->v_op->vop_getsecattr) (vp, vsap, flag, cr, ct);
41601738Sbmc VOPSTATS_UPDATE(vp, getsecattr);
41611488Srsb return (err);
41620Sstevel@tonic-gate }
41630Sstevel@tonic-gate
41640Sstevel@tonic-gate int
fop_shrlock(vnode_t * vp,int cmd,struct shrlock * shr,int flag,cred_t * cr,caller_context_t * ct)41650Sstevel@tonic-gate fop_shrlock(
41660Sstevel@tonic-gate vnode_t *vp,
41670Sstevel@tonic-gate int cmd,
41680Sstevel@tonic-gate struct shrlock *shr,
41690Sstevel@tonic-gate int flag,
41705331Samw cred_t *cr,
41715331Samw caller_context_t *ct)
41720Sstevel@tonic-gate {
41731488Srsb int err;
41741488Srsb
41754321Scasper VOPXID_MAP_CR(vp, cr);
41764321Scasper
41775331Samw err = (*(vp)->v_op->vop_shrlock)(vp, cmd, shr, flag, cr, ct);
41781738Sbmc VOPSTATS_UPDATE(vp, shrlock);
41791488Srsb return (err);
41800Sstevel@tonic-gate }
41810Sstevel@tonic-gate
41820Sstevel@tonic-gate int
fop_vnevent(vnode_t * vp,vnevent_t vnevent,vnode_t * dvp,char * fnm,caller_context_t * ct)41835331Samw fop_vnevent(vnode_t *vp, vnevent_t vnevent, vnode_t *dvp, char *fnm,
41845331Samw caller_context_t *ct)
41850Sstevel@tonic-gate {
41861488Srsb int err;
41871488Srsb
41885331Samw err = (*(vp)->v_op->vop_vnevent)(vp, vnevent, dvp, fnm, ct);
41891738Sbmc VOPSTATS_UPDATE(vp, vnevent);
41901488Srsb return (err);
41910Sstevel@tonic-gate }
41925050Sjwahlig
419311539SChunli.Zhang@Sun.COM int
fop_reqzcbuf(vnode_t * vp,enum uio_rw ioflag,xuio_t * uiop,cred_t * cr,caller_context_t * ct)419411539SChunli.Zhang@Sun.COM fop_reqzcbuf(vnode_t *vp, enum uio_rw ioflag, xuio_t *uiop, cred_t *cr,
419511539SChunli.Zhang@Sun.COM caller_context_t *ct)
419611539SChunli.Zhang@Sun.COM {
419711539SChunli.Zhang@Sun.COM int err;
419811539SChunli.Zhang@Sun.COM
419911539SChunli.Zhang@Sun.COM if (vfs_has_feature(vp->v_vfsp, VFSFT_ZEROCOPY_SUPPORTED) == 0)
420011539SChunli.Zhang@Sun.COM return (ENOTSUP);
420111539SChunli.Zhang@Sun.COM err = (*(vp)->v_op->vop_reqzcbuf)(vp, ioflag, uiop, cr, ct);
420211539SChunli.Zhang@Sun.COM VOPSTATS_UPDATE(vp, reqzcbuf);
420311539SChunli.Zhang@Sun.COM return (err);
420411539SChunli.Zhang@Sun.COM }
420511539SChunli.Zhang@Sun.COM
420611539SChunli.Zhang@Sun.COM int
fop_retzcbuf(vnode_t * vp,xuio_t * uiop,cred_t * cr,caller_context_t * ct)420711539SChunli.Zhang@Sun.COM fop_retzcbuf(vnode_t *vp, xuio_t *uiop, cred_t *cr, caller_context_t *ct)
420811539SChunli.Zhang@Sun.COM {
420911539SChunli.Zhang@Sun.COM int err;
421011539SChunli.Zhang@Sun.COM
421111539SChunli.Zhang@Sun.COM if (vfs_has_feature(vp->v_vfsp, VFSFT_ZEROCOPY_SUPPORTED) == 0)
421211539SChunli.Zhang@Sun.COM return (ENOTSUP);
421311539SChunli.Zhang@Sun.COM err = (*(vp)->v_op->vop_retzcbuf)(vp, uiop, cr, ct);
421411539SChunli.Zhang@Sun.COM VOPSTATS_UPDATE(vp, retzcbuf);
421511539SChunli.Zhang@Sun.COM return (err);
421611539SChunli.Zhang@Sun.COM }
421711539SChunli.Zhang@Sun.COM
42185050Sjwahlig /*
42195050Sjwahlig * Default destructor
42205050Sjwahlig * Needed because NULL destructor means that the key is unused
42215050Sjwahlig */
42225050Sjwahlig /* ARGSUSED */
42235050Sjwahlig void
vsd_defaultdestructor(void * value)42245050Sjwahlig vsd_defaultdestructor(void *value)
42255050Sjwahlig {}
42265050Sjwahlig
42275050Sjwahlig /*
42285050Sjwahlig * Create a key (index into per vnode array)
42295050Sjwahlig * Locks out vsd_create, vsd_destroy, and vsd_free
42305050Sjwahlig * May allocate memory with lock held
42315050Sjwahlig */
42325050Sjwahlig void
vsd_create(uint_t * keyp,void (* destructor)(void *))42335050Sjwahlig vsd_create(uint_t *keyp, void (*destructor)(void *))
42345050Sjwahlig {
42355050Sjwahlig int i;
42365050Sjwahlig uint_t nkeys;
42375050Sjwahlig
42385050Sjwahlig /*
42395050Sjwahlig * if key is allocated, do nothing
42405050Sjwahlig */
42415050Sjwahlig mutex_enter(&vsd_lock);
42425050Sjwahlig if (*keyp) {
42435050Sjwahlig mutex_exit(&vsd_lock);
42445050Sjwahlig return;
42455050Sjwahlig }
42465050Sjwahlig /*
42475050Sjwahlig * find an unused key
42485050Sjwahlig */
42495050Sjwahlig if (destructor == NULL)
42505050Sjwahlig destructor = vsd_defaultdestructor;
42515050Sjwahlig
42525050Sjwahlig for (i = 0; i < vsd_nkeys; ++i)
42535050Sjwahlig if (vsd_destructor[i] == NULL)
42545050Sjwahlig break;
42555050Sjwahlig
42565050Sjwahlig /*
42575050Sjwahlig * if no unused keys, increase the size of the destructor array
42585050Sjwahlig */
42595050Sjwahlig if (i == vsd_nkeys) {
42605050Sjwahlig if ((nkeys = (vsd_nkeys << 1)) == 0)
42615050Sjwahlig nkeys = 1;
42625050Sjwahlig vsd_destructor =
42635050Sjwahlig (void (**)(void *))vsd_realloc((void *)vsd_destructor,
42645050Sjwahlig (size_t)(vsd_nkeys * sizeof (void (*)(void *))),
42655050Sjwahlig (size_t)(nkeys * sizeof (void (*)(void *))));
42665050Sjwahlig vsd_nkeys = nkeys;
42675050Sjwahlig }
42685050Sjwahlig
42695050Sjwahlig /*
42705050Sjwahlig * allocate the next available unused key
42715050Sjwahlig */
42725050Sjwahlig vsd_destructor[i] = destructor;
42735050Sjwahlig *keyp = i + 1;
42745050Sjwahlig
42755050Sjwahlig /* create vsd_list, if it doesn't exist */
42765050Sjwahlig if (vsd_list == NULL) {
42775050Sjwahlig vsd_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
42785050Sjwahlig list_create(vsd_list, sizeof (struct vsd_node),
42795050Sjwahlig offsetof(struct vsd_node, vs_nodes));
42805050Sjwahlig }
42815050Sjwahlig
42825050Sjwahlig mutex_exit(&vsd_lock);
42835050Sjwahlig }
42845050Sjwahlig
42855050Sjwahlig /*
42865050Sjwahlig * Destroy a key
42875050Sjwahlig *
42885050Sjwahlig * Assumes that the caller is preventing vsd_set and vsd_get
42895050Sjwahlig * Locks out vsd_create, vsd_destroy, and vsd_free
42905050Sjwahlig * May free memory with lock held
42915050Sjwahlig */
42925050Sjwahlig void
vsd_destroy(uint_t * keyp)42935050Sjwahlig vsd_destroy(uint_t *keyp)
42945050Sjwahlig {
42955050Sjwahlig uint_t key;
42965050Sjwahlig struct vsd_node *vsd;
42975050Sjwahlig
42985050Sjwahlig /*
42995050Sjwahlig * protect the key namespace and our destructor lists
43005050Sjwahlig */
43015050Sjwahlig mutex_enter(&vsd_lock);
43025050Sjwahlig key = *keyp;
43035050Sjwahlig *keyp = 0;
43045050Sjwahlig
43055050Sjwahlig ASSERT(key <= vsd_nkeys);
43065050Sjwahlig
43075050Sjwahlig /*
43085050Sjwahlig * if the key is valid
43095050Sjwahlig */
43105050Sjwahlig if (key != 0) {
43115050Sjwahlig uint_t k = key - 1;
43125050Sjwahlig /*
43135050Sjwahlig * for every vnode with VSD, call key's destructor
43145050Sjwahlig */
43155050Sjwahlig for (vsd = list_head(vsd_list); vsd != NULL;
43165050Sjwahlig vsd = list_next(vsd_list, vsd)) {
43175050Sjwahlig /*
43185050Sjwahlig * no VSD for key in this vnode
43195050Sjwahlig */
43205050Sjwahlig if (key > vsd->vs_nkeys)
43215050Sjwahlig continue;
43225050Sjwahlig /*
43235050Sjwahlig * call destructor for key
43245050Sjwahlig */
43255050Sjwahlig if (vsd->vs_value[k] && vsd_destructor[k])
43265050Sjwahlig (*vsd_destructor[k])(vsd->vs_value[k]);
43275050Sjwahlig /*
43285050Sjwahlig * reset value for key
43295050Sjwahlig */
43305050Sjwahlig vsd->vs_value[k] = NULL;
43315050Sjwahlig }
43325050Sjwahlig /*
43335050Sjwahlig * actually free the key (NULL destructor == unused)
43345050Sjwahlig */
43355050Sjwahlig vsd_destructor[k] = NULL;
43365050Sjwahlig }
43375050Sjwahlig
43385050Sjwahlig mutex_exit(&vsd_lock);
43395050Sjwahlig }
43405050Sjwahlig
43415050Sjwahlig /*
43425050Sjwahlig * Quickly return the per vnode value that was stored with the specified key
43435050Sjwahlig * Assumes the caller is protecting key from vsd_create and vsd_destroy
43449885SRobert.Mastors@Sun.COM * Assumes the caller is holding v_vsd_lock to protect the vsd.
43455050Sjwahlig */
43465050Sjwahlig void *
vsd_get(vnode_t * vp,uint_t key)43475050Sjwahlig vsd_get(vnode_t *vp, uint_t key)
43485050Sjwahlig {
43495050Sjwahlig struct vsd_node *vsd;
43505050Sjwahlig
43515050Sjwahlig ASSERT(vp != NULL);
43529885SRobert.Mastors@Sun.COM ASSERT(mutex_owned(&vp->v_vsd_lock));
43535050Sjwahlig
43545050Sjwahlig vsd = vp->v_vsd;
43555050Sjwahlig
43565050Sjwahlig if (key && vsd != NULL && key <= vsd->vs_nkeys)
43575050Sjwahlig return (vsd->vs_value[key - 1]);
43585050Sjwahlig return (NULL);
43595050Sjwahlig }
43605050Sjwahlig
43615050Sjwahlig /*
43625050Sjwahlig * Set a per vnode value indexed with the specified key
43639885SRobert.Mastors@Sun.COM * Assumes the caller is holding v_vsd_lock to protect the vsd.
43645050Sjwahlig */
43655050Sjwahlig int
vsd_set(vnode_t * vp,uint_t key,void * value)43665050Sjwahlig vsd_set(vnode_t *vp, uint_t key, void *value)
43675050Sjwahlig {
43689885SRobert.Mastors@Sun.COM struct vsd_node *vsd;
43699885SRobert.Mastors@Sun.COM
43709885SRobert.Mastors@Sun.COM ASSERT(vp != NULL);
43719885SRobert.Mastors@Sun.COM ASSERT(mutex_owned(&vp->v_vsd_lock));
43725050Sjwahlig
43735050Sjwahlig if (key == 0)
43745050Sjwahlig return (EINVAL);
43759885SRobert.Mastors@Sun.COM
43769885SRobert.Mastors@Sun.COM vsd = vp->v_vsd;
43775050Sjwahlig if (vsd == NULL)
43785050Sjwahlig vsd = vp->v_vsd = kmem_zalloc(sizeof (*vsd), KM_SLEEP);
43795050Sjwahlig
43805050Sjwahlig /*
43815050Sjwahlig * If the vsd was just allocated, vs_nkeys will be 0, so the following
43825050Sjwahlig * code won't happen and we will continue down and allocate space for
43835050Sjwahlig * the vs_value array.
43845050Sjwahlig * If the caller is replacing one value with another, then it is up
43855050Sjwahlig * to the caller to free/rele/destroy the previous value (if needed).
43865050Sjwahlig */
43875050Sjwahlig if (key <= vsd->vs_nkeys) {
43885050Sjwahlig vsd->vs_value[key - 1] = value;
43895050Sjwahlig return (0);
43905050Sjwahlig }
43915050Sjwahlig
43925050Sjwahlig ASSERT(key <= vsd_nkeys);
43935050Sjwahlig
43945050Sjwahlig if (vsd->vs_nkeys == 0) {
43955050Sjwahlig mutex_enter(&vsd_lock); /* lock out vsd_destroy() */
43965050Sjwahlig /*
43975050Sjwahlig * Link onto list of all VSD nodes.
43985050Sjwahlig */
43995050Sjwahlig list_insert_head(vsd_list, vsd);
44005050Sjwahlig mutex_exit(&vsd_lock);
44015050Sjwahlig }
44025050Sjwahlig
44035050Sjwahlig /*
44045050Sjwahlig * Allocate vnode local storage and set the value for key
44055050Sjwahlig */
44065050Sjwahlig vsd->vs_value = vsd_realloc(vsd->vs_value,
44075050Sjwahlig vsd->vs_nkeys * sizeof (void *),
44085050Sjwahlig key * sizeof (void *));
44095050Sjwahlig vsd->vs_nkeys = key;
44105050Sjwahlig vsd->vs_value[key - 1] = value;
44115050Sjwahlig
44125050Sjwahlig return (0);
44135050Sjwahlig }
44145050Sjwahlig
44155050Sjwahlig /*
44165050Sjwahlig * Called from vn_free() to run the destructor function for each vsd
44175050Sjwahlig * Locks out vsd_create and vsd_destroy
44185050Sjwahlig * Assumes that the destructor *DOES NOT* use vsd
44195050Sjwahlig */
44205050Sjwahlig void
vsd_free(vnode_t * vp)44215050Sjwahlig vsd_free(vnode_t *vp)
44225050Sjwahlig {
44235050Sjwahlig int i;
44245050Sjwahlig struct vsd_node *vsd = vp->v_vsd;
44255050Sjwahlig
44265050Sjwahlig if (vsd == NULL)
44275050Sjwahlig return;
44285050Sjwahlig
44295050Sjwahlig if (vsd->vs_nkeys == 0) {
44305050Sjwahlig kmem_free(vsd, sizeof (*vsd));
44315050Sjwahlig vp->v_vsd = NULL;
44325050Sjwahlig return;
44335050Sjwahlig }
44345050Sjwahlig
44355050Sjwahlig /*
44365050Sjwahlig * lock out vsd_create and vsd_destroy, call
44375050Sjwahlig * the destructor, and mark the value as destroyed.
44385050Sjwahlig */
44395050Sjwahlig mutex_enter(&vsd_lock);
44405050Sjwahlig
44415050Sjwahlig for (i = 0; i < vsd->vs_nkeys; i++) {
44425050Sjwahlig if (vsd->vs_value[i] && vsd_destructor[i])
44435050Sjwahlig (*vsd_destructor[i])(vsd->vs_value[i]);
44445050Sjwahlig vsd->vs_value[i] = NULL;
44455050Sjwahlig }
44465050Sjwahlig
44475050Sjwahlig /*
44485050Sjwahlig * remove from linked list of VSD nodes
44495050Sjwahlig */
44505050Sjwahlig list_remove(vsd_list, vsd);
44515050Sjwahlig
44525050Sjwahlig mutex_exit(&vsd_lock);
44535050Sjwahlig
44545050Sjwahlig /*
44555050Sjwahlig * free up the VSD
44565050Sjwahlig */
44575050Sjwahlig kmem_free(vsd->vs_value, vsd->vs_nkeys * sizeof (void *));
44585050Sjwahlig kmem_free(vsd, sizeof (struct vsd_node));
44595050Sjwahlig vp->v_vsd = NULL;
44605050Sjwahlig }
44615050Sjwahlig
44625050Sjwahlig /*
44635050Sjwahlig * realloc
44645050Sjwahlig */
44655050Sjwahlig static void *
vsd_realloc(void * old,size_t osize,size_t nsize)44665050Sjwahlig vsd_realloc(void *old, size_t osize, size_t nsize)
44675050Sjwahlig {
44685050Sjwahlig void *new;
44695050Sjwahlig
44705050Sjwahlig new = kmem_zalloc(nsize, KM_SLEEP);
44715050Sjwahlig if (old) {
44725050Sjwahlig bcopy(old, new, osize);
44735050Sjwahlig kmem_free(old, osize);
44745050Sjwahlig }
44755050Sjwahlig return (new);
44765050Sjwahlig }
447710793Sdai.ngo@sun.com
447810793Sdai.ngo@sun.com /*
447910793Sdai.ngo@sun.com * Setup the extensible system attribute for creating a reparse point.
448010793Sdai.ngo@sun.com * The symlink data 'target' is validated for proper format of a reparse
448110793Sdai.ngo@sun.com * string and a check also made to make sure the symlink data does not
448210793Sdai.ngo@sun.com * point to an existing file.
448310793Sdai.ngo@sun.com *
448410793Sdai.ngo@sun.com * return 0 if ok else -1.
448510793Sdai.ngo@sun.com */
448610793Sdai.ngo@sun.com static int
fs_reparse_mark(char * target,vattr_t * vap,xvattr_t * xvattr)448710793Sdai.ngo@sun.com fs_reparse_mark(char *target, vattr_t *vap, xvattr_t *xvattr)
448810793Sdai.ngo@sun.com {
448910793Sdai.ngo@sun.com xoptattr_t *xoap;
449010793Sdai.ngo@sun.com
449110793Sdai.ngo@sun.com if ((!target) || (!vap) || (!xvattr))
449210793Sdai.ngo@sun.com return (-1);
449310793Sdai.ngo@sun.com
449410793Sdai.ngo@sun.com /* validate reparse string */
449510793Sdai.ngo@sun.com if (reparse_validate((const char *)target))
449610793Sdai.ngo@sun.com return (-1);
449710793Sdai.ngo@sun.com
449810793Sdai.ngo@sun.com xva_init(xvattr);
449910793Sdai.ngo@sun.com xvattr->xva_vattr = *vap;
450010793Sdai.ngo@sun.com xvattr->xva_vattr.va_mask |= AT_XVATTR;
450110793Sdai.ngo@sun.com xoap = xva_getxoptattr(xvattr);
450210793Sdai.ngo@sun.com ASSERT(xoap);
450310793Sdai.ngo@sun.com XVA_SET_REQ(xvattr, XAT_REPARSE);
450410793Sdai.ngo@sun.com xoap->xoa_reparse = 1;
450510793Sdai.ngo@sun.com
450610793Sdai.ngo@sun.com return (0);
450710793Sdai.ngo@sun.com }
450811291SRobert.Thurlow@Sun.COM
450911291SRobert.Thurlow@Sun.COM /*
451011291SRobert.Thurlow@Sun.COM * Function to check whether a symlink is a reparse point.
451111291SRobert.Thurlow@Sun.COM * Return B_TRUE if it is a reparse point, else return B_FALSE
451211291SRobert.Thurlow@Sun.COM */
451311291SRobert.Thurlow@Sun.COM boolean_t
vn_is_reparse(vnode_t * vp,cred_t * cr,caller_context_t * ct)451411291SRobert.Thurlow@Sun.COM vn_is_reparse(vnode_t *vp, cred_t *cr, caller_context_t *ct)
451511291SRobert.Thurlow@Sun.COM {
451611291SRobert.Thurlow@Sun.COM xvattr_t xvattr;
451711291SRobert.Thurlow@Sun.COM xoptattr_t *xoap;
451811291SRobert.Thurlow@Sun.COM
451911291SRobert.Thurlow@Sun.COM if ((vp->v_type != VLNK) ||
452011291SRobert.Thurlow@Sun.COM !(vfs_has_feature(vp->v_vfsp, VFSFT_XVATTR)))
452111291SRobert.Thurlow@Sun.COM return (B_FALSE);
452211291SRobert.Thurlow@Sun.COM
452311291SRobert.Thurlow@Sun.COM xva_init(&xvattr);
452411291SRobert.Thurlow@Sun.COM xoap = xva_getxoptattr(&xvattr);
452511291SRobert.Thurlow@Sun.COM ASSERT(xoap);
452611291SRobert.Thurlow@Sun.COM XVA_SET_REQ(&xvattr, XAT_REPARSE);
452711291SRobert.Thurlow@Sun.COM
452811291SRobert.Thurlow@Sun.COM if (VOP_GETATTR(vp, &xvattr.xva_vattr, 0, cr, ct))
452911291SRobert.Thurlow@Sun.COM return (B_FALSE);
453011291SRobert.Thurlow@Sun.COM
453111291SRobert.Thurlow@Sun.COM if ((!(xvattr.xva_vattr.va_mask & AT_XVATTR)) ||
453211291SRobert.Thurlow@Sun.COM (!(XVA_ISSET_RTN(&xvattr, XAT_REPARSE))))
453311291SRobert.Thurlow@Sun.COM return (B_FALSE);
453411291SRobert.Thurlow@Sun.COM
453511291SRobert.Thurlow@Sun.COM return (xoap->xoa_reparse ? B_TRUE : B_FALSE);
453611291SRobert.Thurlow@Sun.COM }
4537