xref: /dflybsd-src/sys/kern/vfs_default.c (revision 5d6897ab88180d0f34082af35f34d78c6c929e73)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed
6  * to Berkeley by John Heidemann of the UCLA Ficus project.
7  *
8  * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *
39  * $FreeBSD: src/sys/kern/vfs_default.c,v 1.28.2.7 2003/01/10 18:23:26 bde Exp $
40  * $DragonFly: src/sys/kern/vfs_default.c,v 1.20 2004/10/07 10:03:02 dillon Exp $
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/buf.h>
46 #include <sys/conf.h>
47 #include <sys/kernel.h>
48 #include <sys/lock.h>
49 #include <sys/malloc.h>
50 #include <sys/mount.h>
51 #include <sys/unistd.h>
52 #include <sys/vnode.h>
53 #include <sys/namei.h>
54 #include <sys/poll.h>
55 
56 #include <machine/limits.h>
57 
58 #include <vm/vm.h>
59 #include <vm/vm_object.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_pager.h>
62 #include <vm/vnode_pager.h>
63 
64 static int	vop_nolookup (struct vop_lookup_args *);
65 static int	vop_nostrategy (struct vop_strategy_args *);
66 
67 /*
68  * This vnode table stores what we want to do if the filesystem doesn't
69  * implement a particular VOP.
70  *
71  * If there is no specific entry here, we will return EOPNOTSUPP.
72  */
73 struct vop_ops *default_vnode_vops;
74 static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
75 	{ &vop_default_desc,		vop_eopnotsupp },
76 	{ &vop_advlock_desc,		vop_einval },
77 	{ &vop_bwrite_desc,		(void *) vop_stdbwrite },
78 	{ &vop_close_desc,		vop_null },
79 	{ &vop_createvobject_desc,	(void *) vop_stdcreatevobject },
80 	{ &vop_destroyvobject_desc,	(void *) vop_stddestroyvobject },
81 	{ &vop_fsync_desc,		vop_null },
82 	{ &vop_getvobject_desc,		(void *) vop_stdgetvobject },
83 	{ &vop_ioctl_desc,		vop_enotty },
84 	{ &vop_islocked_desc,		(void *) vop_stdislocked },
85 	{ &vop_lease_desc,		vop_null },
86 	{ &vop_lock_desc,		(void *) vop_stdlock },
87 	{ &vop_mmap_desc,		vop_einval },
88 	{ &vop_resolve_desc,		(void *) vop_noresolve },
89 	{ &vop_lookup_desc,		(void *) vop_nolookup },
90 	{ &vop_open_desc,		vop_null },
91 	{ &vop_pathconf_desc,		vop_einval },
92 	{ &vop_poll_desc,		(void *) vop_nopoll },
93 	{ &vop_readlink_desc,		vop_einval },
94 	{ &vop_reallocblks_desc,	vop_eopnotsupp },
95 	{ &vop_revoke_desc,		(void *) vop_stdrevoke },
96 	{ &vop_strategy_desc,		(void *) vop_nostrategy },
97 	{ &vop_unlock_desc,		(void *) vop_stdunlock },
98 	{ &vop_getacl_desc,		vop_eopnotsupp },
99 	{ &vop_setacl_desc,		vop_eopnotsupp },
100 	{ &vop_aclcheck_desc,		vop_eopnotsupp },
101 	{ &vop_getextattr_desc,		vop_eopnotsupp },
102 	{ &vop_setextattr_desc,		vop_eopnotsupp },
103 	{ NULL, NULL }
104 };
105 
106 static struct vnodeopv_desc default_vnodeop_opv_desc =
107         { &default_vnode_vops, default_vnodeop_entries };
108 
109 VNODEOP_SET(default_vnodeop_opv_desc);
110 
111 int
112 vop_eopnotsupp(struct vop_generic_args *ap)
113 {
114 	return (EOPNOTSUPP);
115 }
116 
117 int
118 vop_ebadf(struct vop_generic_args *ap)
119 {
120 	return (EBADF);
121 }
122 
123 int
124 vop_enotty(struct vop_generic_args *ap)
125 {
126 	return (ENOTTY);
127 }
128 
129 int
130 vop_einval(struct vop_generic_args *ap)
131 {
132 	return (EINVAL);
133 }
134 
135 int
136 vop_null(struct vop_generic_args *ap)
137 {
138 	return (0);
139 }
140 
141 int
142 vop_defaultop(struct vop_generic_args *ap)
143 {
144 	return (VOCALL(default_vnode_vops, ap));
145 }
146 
147 int
148 vop_panic(struct vop_generic_args *ap)
149 {
150 
151 	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
152 }
153 
154 /*
155  * vop_noresolve { struct namecache *a_ncp }	XXX STOPGAP FUNCTION
156  *
157  * XXX OLD API ROUTINE!  WHEN ALL VFSs HAVE BEEN CLEANED UP THIS PROCEDURE
158  * WILL BE REMOVED.  This procedure exists for all VFSs which have not
159  * yet implemented vop_resolve().  It converts vop_resolve() into a
160  * vop_lookup() and does appropriate translations.
161  *
162  * Resolve a ncp for VFSs which do not support the VOP.  Eventually all
163  * VFSs will support this VOP and this routine can be removed, since
164  * vop_resolve() is far less complex then the older LOOKUP/CACHEDLOOKUP
165  * API.
166  *
167  * A locked ncp is passed in to be resolved.  The NCP is resolved by
168  * figuring out the vnode (if any) and calling cache_setvp() to attach the
169  * vnode to the entry.  If the entry represents a non-existant node then
170  * cache_setvp() is called with a NULL vnode to resolve the entry into a
171  * negative cache entry.  No vnode locks are retained and the
172  * ncp is left locked on return.
173  *
174  * There is a potential directory and vnode interlock.   The lock order
175  * requirement is: namecache, governing directory, resolved vnode.
176  */
177 int
178 vop_noresolve(struct vop_resolve_args *ap)
179 {
180 	int error;
181 	struct vnode *dvp;
182 	struct vnode *vp;
183 	struct namecache *ncp;
184 	struct componentname cnp;
185 
186 	ncp = ap->a_ncp;	/* locked namecache node */
187 	if (ncp->nc_flag & NCF_MOUNTPT)	/* can't cross a mount point! */
188 		return(EPERM);
189 	if (ncp->nc_parent == NULL)
190 		return(EPERM);
191 	if ((dvp = ncp->nc_parent->nc_vp) == NULL)
192 		return(EPERM);
193 
194 	if ((error = vget(dvp, NULL, LK_EXCLUSIVE, curthread)) != 0) {
195 		printf("[diagnostic] vop_noresolve: EAGAIN on ncp %p %*.*s\n",
196 			ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
197 		return(EAGAIN);
198 	}
199 
200 	bzero(&cnp, sizeof(cnp));
201 	cnp.cn_nameiop = NAMEI_LOOKUP;
202 	cnp.cn_flags = CNP_ISLASTCN;
203 	cnp.cn_nameptr = ncp->nc_name;
204 	cnp.cn_namelen = ncp->nc_nlen;
205 	cnp.cn_cred = ap->a_cred;
206 	cnp.cn_td = curthread; /* XXX */
207 
208 	/*
209 	 * vop_lookup() always returns vp locked.  dvp may or may not be
210 	 * left locked depending on CNP_PDIRUNLOCK.
211 	 */
212 	error = vop_lookup(ap->a_head.a_ops, dvp, &vp, &cnp);
213 	if (error == 0)
214 		VOP_UNLOCK(vp, NULL, 0, curthread);
215 	if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0)
216 		VOP_UNLOCK(dvp, NULL, 0, curthread);
217 	if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
218 		/* was resolved by another process while we were unlocked */
219 		if (error == 0)
220 			vrele(vp);
221 	} else if (error == 0) {
222 		KKASSERT(vp != NULL);
223 		cache_setvp(ncp, vp);
224 		vrele(vp);
225 	} else if (error == ENOENT) {
226 		KKASSERT(vp == NULL);
227 		if (cnp.cn_flags & CNP_ISWHITEOUT)
228 			ncp->nc_flag |= NCF_WHITEOUT;
229 		cache_setvp(ncp, NULL);
230 	}
231 	vrele(dvp);
232 	return (error);
233 }
234 
235 #if 0
236 
237 /*
238  * vop_noremove { struct namecache *a_ncp }	XXX STOPGAP FUNCTION
239  *
240  * Remove the file/dir represented by a_ncp.
241  *
242  * XXX ultra difficult.  A number of existing filesystems, including UFS,
243  *     assume that the directory will remain locked and the lookup will
244  *     store the directory offset and other things in the directory inode
245  *     for the later VOP_REMOVE to use.  We have to move all that
246  *     functionality into e.g. UFS's VOP_REMOVE itself.
247  */
248 static int
249 vop_nonremove(struct vop_nremove_args *ap)
250 {
251 	struct namecache *ncfile;
252 	struct namecache *ncdir;
253 	struct componentname cnd;
254 	struct vnode *vp;
255 	struct vnode *vpd;
256 	thread_t td;
257 	int error;
258 
259 	td = curthread;
260 	ncfile = ap->a_ncp;
261 	ncdir = ncfile->nc_parent;
262 
263 	if ((error = cache_vget(ncdir, ap->a_cred, LK_EXCLUSIVE, &vpd)) != 0)
264 		return (error);
265 	if ((error = cache_vget(ncfile, ap->a_cred, LK_EXCLUSIVE, &vp)) != 0) {
266 		vput(vpd);
267 		return (error);
268 	}
269 	bzero(&cnd, sizeof(cnd));
270 	cnd.cn_nameiop = NAMEI_DELETE;
271 	cnd.cn_td = td;
272 	cnd.cn_cred = ap->a_cred;
273 	cnd.cn_nameptr = ncfile->nc_name;
274 	cnd.cn_namelen = ncfile->nc_nlen;
275 	error = VOP_REMOVE(vpd, NCPNULL, vp, &cnd);
276 	if (error == 0)
277 		cache_purge(vp);
278 	vput(vp);
279 	vput(vpd);
280 
281 	/*
282 	 * Re-resolve the ncp to match the fact that the file has been
283 	 * deleted from the namespace.  If an error occured leave the ncp
284 	 * unresolved (meaning that we have no idea what the correct state
285 	 * is).
286 	 */
287 	if (error == 0) {
288 		cache_setunresolved(ncfile);
289 		cache_setvp(ncfile, NULL);
290 	}
291         return (error);
292 }
293 
294 #endif
295 
296 
297 static int
298 vop_nolookup(ap)
299 	struct vop_lookup_args /* {
300 		struct vnode *a_dvp;
301 		struct vnode **a_vpp;
302 		struct componentname *a_cnp;
303 	} */ *ap;
304 {
305 
306 	*ap->a_vpp = NULL;
307 	return (ENOTDIR);
308 }
309 
310 /*
311  *	vop_nostrategy:
312  *
313  *	Strategy routine for VFS devices that have none.
314  *
315  *	B_ERROR and B_INVAL must be cleared prior to calling any strategy
316  *	routine.  Typically this is done for a B_READ strategy call.  Typically
317  *	B_INVAL is assumed to already be clear prior to a write and should not
318  *	be cleared manually unless you just made the buffer invalid.  B_ERROR
319  *	should be cleared either way.
320  */
321 
322 static int
323 vop_nostrategy (struct vop_strategy_args *ap)
324 {
325 	printf("No strategy for buffer at %p\n", ap->a_bp);
326 	vprint("", ap->a_vp);
327 	vprint("", ap->a_bp->b_vp);
328 	ap->a_bp->b_flags |= B_ERROR;
329 	ap->a_bp->b_error = EOPNOTSUPP;
330 	biodone(ap->a_bp);
331 	return (EOPNOTSUPP);
332 }
333 
334 int
335 vop_stdpathconf(ap)
336 	struct vop_pathconf_args /* {
337 	struct vnode *a_vp;
338 	int a_name;
339 	int *a_retval;
340 	} */ *ap;
341 {
342 
343 	switch (ap->a_name) {
344 		case _PC_LINK_MAX:
345 			*ap->a_retval = LINK_MAX;
346 			return (0);
347 		case _PC_MAX_CANON:
348 			*ap->a_retval = MAX_CANON;
349 			return (0);
350 		case _PC_MAX_INPUT:
351 			*ap->a_retval = MAX_INPUT;
352 			return (0);
353 		case _PC_PIPE_BUF:
354 			*ap->a_retval = PIPE_BUF;
355 			return (0);
356 		case _PC_CHOWN_RESTRICTED:
357 			*ap->a_retval = 1;
358 			return (0);
359 		case _PC_VDISABLE:
360 			*ap->a_retval = _POSIX_VDISABLE;
361 			return (0);
362 		default:
363 			return (EINVAL);
364 	}
365 	/* NOTREACHED */
366 }
367 
368 /*
369  * Standard lock.  The lock is recursive-capable only if the lock was
370  * initialized with LK_CANRECURSE or that flag is passed in a_flags.
371  */
372 int
373 vop_stdlock(ap)
374 	struct vop_lock_args /* {
375 		struct vnode *a_vp;
376 		lwkt_tokref_t a_vlock;
377 		int a_flags;
378 		struct proc *a_p;
379 	} */ *ap;
380 {
381 	int error;
382 
383 #ifndef	DEBUG_LOCKS
384 	error = lockmgr(&ap->a_vp->v_lock, ap->a_flags,
385 			ap->a_vlock, ap->a_td);
386 #else
387 	error = debuglockmgr(&ap->a_vp->v_lock, ap->a_flags,
388 			ap->a_vlock, ap->a_td,
389 			"vop_stdlock", ap->a_vp->filename, ap->a_vp->line);
390 #endif
391 	return(error);
392 }
393 
394 int
395 vop_stdunlock(ap)
396 	struct vop_unlock_args /* {
397 		struct vnode *a_vp;
398 		lwkt_tokref_t a_vlock;
399 		int a_flags;
400 		struct thread *a_td;
401 	} */ *ap;
402 {
403 	int error;
404 
405 	error = lockmgr(&ap->a_vp->v_lock, ap->a_flags | LK_RELEASE,
406 			ap->a_vlock, ap->a_td);
407 	return(error);
408 }
409 
410 int
411 vop_stdislocked(ap)
412 	struct vop_islocked_args /* {
413 		struct vnode *a_vp;
414 		struct thread *a_td;
415 	} */ *ap;
416 {
417 	return (lockstatus(&ap->a_vp->v_lock, ap->a_td));
418 }
419 
420 /*
421  * Return true for select/poll.
422  */
423 int
424 vop_nopoll(ap)
425 	struct vop_poll_args /* {
426 		struct vnode *a_vp;
427 		int  a_events;
428 		struct ucred *a_cred;
429 		struct proc *a_p;
430 	} */ *ap;
431 {
432 	/*
433 	 * Return true for read/write.  If the user asked for something
434 	 * special, return POLLNVAL, so that clients have a way of
435 	 * determining reliably whether or not the extended
436 	 * functionality is present without hard-coding knowledge
437 	 * of specific filesystem implementations.
438 	 */
439 	if (ap->a_events & ~POLLSTANDARD)
440 		return (POLLNVAL);
441 
442 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
443 }
444 
445 /*
446  * Implement poll for local filesystems that support it.
447  */
448 int
449 vop_stdpoll(ap)
450 	struct vop_poll_args /* {
451 		struct vnode *a_vp;
452 		int  a_events;
453 		struct ucred *a_cred;
454 		struct thread *a_td;
455 	} */ *ap;
456 {
457 	if (ap->a_events & ~POLLSTANDARD)
458 		return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
459 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
460 }
461 
462 int
463 vop_stdbwrite(ap)
464 	struct vop_bwrite_args *ap;
465 {
466 	return (bwrite(ap->a_bp));
467 }
468 
469 int
470 vop_stdcreatevobject(ap)
471 	struct vop_createvobject_args /* {
472 		struct vnode *a_vp;
473 		struct proc *a_td;
474 	} */ *ap;
475 {
476 	struct vnode *vp = ap->a_vp;
477 	struct thread *td = ap->a_td;
478 	struct vattr vat;
479 	vm_object_t object;
480 	int error = 0;
481 
482 	if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
483 		return (0);
484 
485 retry:
486 	if ((object = vp->v_object) == NULL) {
487 		if (vp->v_type == VREG || vp->v_type == VDIR) {
488 			if ((error = VOP_GETATTR(vp, &vat, td)) != 0)
489 				goto retn;
490 			object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
491 		} else if (vp->v_rdev && dev_is_good(vp->v_rdev)) {
492 			/*
493 			 * XXX v_rdev uses NULL/non-NULL instead of NODEV
494 			 *
495 			 * This simply allocates the biggest object possible
496 			 * for a disk vnode.  This should be fixed, but doesn't
497 			 * cause any problems (yet).
498 			 */
499 			object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
500 		} else {
501 			goto retn;
502 		}
503 		/*
504 		 * Dereference the reference we just created.  This assumes
505 		 * that the object is associated with the vp.
506 		 */
507 		object->ref_count--;
508 		vp->v_usecount--;
509 	} else {
510 		if (object->flags & OBJ_DEAD) {
511 			VOP_UNLOCK(vp, NULL, 0, td);
512 			tsleep(object, 0, "vodead", 0);
513 			vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
514 			goto retry;
515 		}
516 	}
517 
518 	KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
519 	vp->v_flag |= VOBJBUF;
520 
521 retn:
522 	return (error);
523 }
524 
525 int
526 vop_stddestroyvobject(ap)
527 	struct vop_destroyvobject_args /* {
528 		struct vnode *vp;
529 	} */ *ap;
530 {
531 	struct vnode *vp = ap->a_vp;
532 	vm_object_t obj = vp->v_object;
533 
534 	if (vp->v_object == NULL)
535 		return (0);
536 
537 	if (obj->ref_count == 0) {
538 		/*
539 		 * vclean() may be called twice. The first time
540 		 * removes the primary reference to the object,
541 		 * the second time goes one further and is a
542 		 * special-case to terminate the object.
543 		 *
544 		 * don't double-terminate the object.
545 		 */
546 		if ((obj->flags & OBJ_DEAD) == 0)
547 			vm_object_terminate(obj);
548 	} else {
549 		/*
550 		 * Woe to the process that tries to page now :-).
551 		 */
552 		vm_pager_deallocate(obj);
553 	}
554 	return (0);
555 }
556 
557 /*
558  * Return the underlying VM object.  This routine may be called with or
559  * without the vnode interlock held.  If called without, the returned
560  * object is not guarenteed to be valid.  The syncer typically gets the
561  * object without holding the interlock in order to quickly test whether
562  * it might be dirty before going heavy-weight.  vm_object's use zalloc
563  * and thus stable-storage, so this is safe.
564  */
565 int
566 vop_stdgetvobject(ap)
567 	struct vop_getvobject_args /* {
568 		struct vnode *vp;
569 		struct vm_object **objpp;
570 	} */ *ap;
571 {
572 	struct vnode *vp = ap->a_vp;
573 	struct vm_object **objpp = ap->a_objpp;
574 
575 	if (objpp)
576 		*objpp = vp->v_object;
577 	return (vp->v_object ? 0 : EINVAL);
578 }
579 
580 /*
581  * vfs default ops
582  * used to fill the vfs fucntion table to get reasonable default return values.
583  */
584 int
585 vfs_stdmount(struct mount *mp, char *path, caddr_t data,
586 	struct nameidata *ndp, struct thread *td)
587 {
588 	return (0);
589 }
590 
591 int
592 vfs_stdunmount(struct mount *mp, int mntflags, struct thread *td)
593 {
594 	return (0);
595 }
596 
597 int
598 vfs_stdroot(struct mount *mp, struct vnode **vpp)
599 {
600 	return (EOPNOTSUPP);
601 }
602 
603 int
604 vfs_stdstatfs(struct mount *mp, struct statfs *sbp, struct thread *td)
605 {
606 	return (EOPNOTSUPP);
607 }
608 
609 int
610 vfs_stdvptofh(struct vnode *vp, struct fid *fhp)
611 {
612 	return (EOPNOTSUPP);
613 }
614 
615 int
616 vfs_stdstart(struct mount *mp, int flags, struct thread *td)
617 {
618 	return (0);
619 }
620 
621 int
622 vfs_stdquotactl(struct mount *mp, int cmds, uid_t uid,
623 	caddr_t arg, struct thread *td)
624 {
625 	return (EOPNOTSUPP);
626 }
627 
628 int
629 vfs_stdsync(struct mount *mp, int waitfor, struct thread *td)
630 {
631 	return (0);
632 }
633 
634 int
635 vfs_stdvget(struct mount *mp, ino_t ino, struct vnode **vpp)
636 {
637 	return (EOPNOTSUPP);
638 }
639 
640 int
641 vfs_stdfhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
642 {
643 	return (EOPNOTSUPP);
644 }
645 
646 int
647 vfs_stdcheckexp(struct mount *mp, struct sockaddr *nam, int *extflagsp,
648 	struct ucred **credanonp)
649 {
650 	return (EOPNOTSUPP);
651 }
652 
653 int
654 vfs_stdinit(struct vfsconf *vfsp)
655 {
656 	return (0);
657 }
658 
659 int
660 vfs_stduninit(struct vfsconf *vfsp)
661 {
662 	return(0);
663 }
664 
665 int
666 vfs_stdextattrctl(struct mount *mp, int cmd, const char *attrname,
667 	caddr_t arg, struct thread *td)
668 {
669 	return(EOPNOTSUPP);
670 }
671 
672 /* end of vfs default ops */
673