1 /* $NetBSD: genfs_vnops.c,v 1.220 2023/03/03 10:02:51 hannken Exp $ */
2
3 /*-
4 * Copyright (c) 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. Neither the name of the University nor the names of its contributors
42 * may be used to endorse or promote products derived from this software
43 * without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 *
57 */
58
59 #include <sys/cdefs.h>
60 __KERNEL_RCSID(0, "$NetBSD: genfs_vnops.c,v 1.220 2023/03/03 10:02:51 hannken Exp $");
61
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/proc.h>
65 #include <sys/kernel.h>
66 #include <sys/mount.h>
67 #include <sys/fstrans.h>
68 #include <sys/namei.h>
69 #include <sys/vnode_impl.h>
70 #include <sys/fcntl.h>
71 #include <sys/kmem.h>
72 #include <sys/poll.h>
73 #include <sys/mman.h>
74 #include <sys/file.h>
75 #include <sys/kauth.h>
76 #include <sys/stat.h>
77 #include <sys/extattr.h>
78
79 #include <miscfs/genfs/genfs.h>
80 #include <miscfs/genfs/genfs_node.h>
81 #include <miscfs/specfs/specdev.h>
82
83 static void filt_genfsdetach(struct knote *);
84 static int filt_genfsread(struct knote *, long);
85 static int filt_genfsvnode(struct knote *, long);
86
87 /*
88 * Find the end of the first path component in NAME and return its
89 * length.
90 */
91 int
genfs_parsepath(void * v)92 genfs_parsepath(void *v)
93 {
94 struct vop_parsepath_args /* {
95 struct vnode *a_dvp;
96 const char *a_name;
97 size_t *a_ret;
98 } */ *ap = v;
99 const char *name = ap->a_name;
100 size_t pos;
101
102 (void)ap->a_dvp;
103
104 pos = 0;
105 while (name[pos] != '\0' && name[pos] != '/') {
106 pos++;
107 }
108 *ap->a_retval = pos;
109 return 0;
110 }
111
112 int
genfs_poll(void * v)113 genfs_poll(void *v)
114 {
115 struct vop_poll_args /* {
116 struct vnode *a_vp;
117 int a_events;
118 struct lwp *a_l;
119 } */ *ap = v;
120
121 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
122 }
123
124 int
genfs_seek(void * v)125 genfs_seek(void *v)
126 {
127 struct vop_seek_args /* {
128 struct vnode *a_vp;
129 off_t a_oldoff;
130 off_t a_newoff;
131 kauth_cred_t cred;
132 } */ *ap = v;
133
134 if (ap->a_newoff < 0)
135 return (EINVAL);
136
137 return (0);
138 }
139
140 int
genfs_abortop(void * v)141 genfs_abortop(void *v)
142 {
143 struct vop_abortop_args /* {
144 struct vnode *a_dvp;
145 struct componentname *a_cnp;
146 } */ *ap = v;
147
148 (void)ap;
149
150 return (0);
151 }
152
153 int
genfs_fcntl(void * v)154 genfs_fcntl(void *v)
155 {
156 struct vop_fcntl_args /* {
157 struct vnode *a_vp;
158 u_int a_command;
159 void *a_data;
160 int a_fflag;
161 kauth_cred_t a_cred;
162 struct lwp *a_l;
163 } */ *ap = v;
164
165 if (ap->a_command == F_SETFL)
166 return (0);
167 else
168 return (EOPNOTSUPP);
169 }
170
171 /*ARGSUSED*/
172 int
genfs_badop(void * v)173 genfs_badop(void *v)
174 {
175
176 panic("genfs: bad op");
177 }
178
179 /*ARGSUSED*/
180 int
genfs_nullop(void * v)181 genfs_nullop(void *v)
182 {
183
184 return (0);
185 }
186
187 /*ARGSUSED*/
188 int
genfs_einval(void * v)189 genfs_einval(void *v)
190 {
191
192 return (EINVAL);
193 }
194
195 int
genfs_erofs_link(void * v)196 genfs_erofs_link(void *v)
197 {
198 /* also for symlink */
199 struct vop_link_v2_args /* {
200 struct vnode *a_dvp;
201 struct vnode **a_vpp;
202 struct componentname *a_cnp;
203 } */ *ap = v;
204
205 VOP_ABORTOP(ap->a_dvp, ap->a_cnp);
206 return EROFS;
207 }
208
209 /*
210 * Called when an fs doesn't support a particular vop.
211 * This takes care to vrele, vput, or vunlock passed in vnodes
212 * and calls VOP_ABORTOP for a componentname (in non-rename VOP).
213 */
214 int
genfs_eopnotsupp(void * v)215 genfs_eopnotsupp(void *v)
216 {
217 struct vop_generic_args /*
218 struct vnodeop_desc *a_desc;
219 / * other random data follows, presumably * /
220 } */ *ap = v;
221 struct vnodeop_desc *desc = ap->a_desc;
222 struct vnode *vp, *vp_last = NULL;
223 int flags, i, j, offset_cnp, offset_vp;
224
225 KASSERT(desc->vdesc_offset != VOP_LOOKUP_DESCOFFSET);
226 KASSERT(desc->vdesc_offset != VOP_ABORTOP_DESCOFFSET);
227
228 /*
229 * Abort any componentname that lookup potentially left state in.
230 *
231 * As is logical, componentnames for VOP_RENAME are handled by
232 * the caller of VOP_RENAME. Yay, rename!
233 */
234 if (desc->vdesc_offset != VOP_RENAME_DESCOFFSET &&
235 (offset_vp = desc->vdesc_vp_offsets[0]) != VDESC_NO_OFFSET &&
236 (offset_cnp = desc->vdesc_componentname_offset) != VDESC_NO_OFFSET){
237 struct componentname *cnp;
238 struct vnode *dvp;
239
240 dvp = *VOPARG_OFFSETTO(struct vnode **, offset_vp, ap);
241 cnp = *VOPARG_OFFSETTO(struct componentname **, offset_cnp, ap);
242
243 VOP_ABORTOP(dvp, cnp);
244 }
245
246 flags = desc->vdesc_flags;
247 for (i = 0; i < VDESC_MAX_VPS; flags >>=1, i++) {
248 if ((offset_vp = desc->vdesc_vp_offsets[i]) == VDESC_NO_OFFSET)
249 break; /* stop at end of list */
250 if ((j = flags & VDESC_VP0_WILLPUT)) {
251 vp = *VOPARG_OFFSETTO(struct vnode **, offset_vp, ap);
252
253 /* Skip if NULL */
254 if (!vp)
255 continue;
256
257 switch (j) {
258 case VDESC_VP0_WILLPUT:
259 /* Check for dvp == vp cases */
260 if (vp == vp_last)
261 vrele(vp);
262 else {
263 vput(vp);
264 vp_last = vp;
265 }
266 break;
267 case VDESC_VP0_WILLRELE:
268 vrele(vp);
269 break;
270 }
271 }
272 }
273
274 return (EOPNOTSUPP);
275 }
276
277 /*ARGSUSED*/
278 int
genfs_ebadf(void * v)279 genfs_ebadf(void *v)
280 {
281
282 return (EBADF);
283 }
284
285 /* ARGSUSED */
286 int
genfs_enoioctl(void * v)287 genfs_enoioctl(void *v)
288 {
289
290 return (EPASSTHROUGH);
291 }
292
293
294 /*
295 * Eliminate all activity associated with the requested vnode
296 * and with all vnodes aliased to the requested vnode.
297 */
298 int
genfs_revoke(void * v)299 genfs_revoke(void *v)
300 {
301 struct vop_revoke_args /* {
302 struct vnode *a_vp;
303 int a_flags;
304 } */ *ap = v;
305
306 #ifdef DIAGNOSTIC
307 if ((ap->a_flags & REVOKEALL) == 0)
308 panic("genfs_revoke: not revokeall");
309 #endif
310 vrevoke(ap->a_vp);
311 return (0);
312 }
313
314 /*
315 * Lock the node (for deadfs).
316 */
317 int
genfs_deadlock(void * v)318 genfs_deadlock(void *v)
319 {
320 struct vop_lock_args /* {
321 struct vnode *a_vp;
322 int a_flags;
323 } */ *ap = v;
324 vnode_t *vp = ap->a_vp;
325 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
326 int flags = ap->a_flags;
327 krw_t op;
328
329 if (! ISSET(flags, LK_RETRY))
330 return ENOENT;
331
332 if (ISSET(flags, LK_DOWNGRADE)) {
333 rw_downgrade(&vip->vi_lock);
334 } else if (ISSET(flags, LK_UPGRADE)) {
335 KASSERT(ISSET(flags, LK_NOWAIT));
336 if (!rw_tryupgrade(&vip->vi_lock)) {
337 return EBUSY;
338 }
339 } else if ((flags & (LK_EXCLUSIVE | LK_SHARED)) != 0) {
340 op = (ISSET(flags, LK_EXCLUSIVE) ? RW_WRITER : RW_READER);
341 if (ISSET(flags, LK_NOWAIT)) {
342 if (!rw_tryenter(&vip->vi_lock, op))
343 return EBUSY;
344 } else {
345 rw_enter(&vip->vi_lock, op);
346 }
347 }
348 VSTATE_ASSERT_UNLOCKED(vp, VS_RECLAIMED);
349 return 0;
350 }
351
352 /*
353 * Unlock the node (for deadfs).
354 */
355 int
genfs_deadunlock(void * v)356 genfs_deadunlock(void *v)
357 {
358 struct vop_unlock_args /* {
359 struct vnode *a_vp;
360 } */ *ap = v;
361 vnode_t *vp = ap->a_vp;
362 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
363
364 rw_exit(&vip->vi_lock);
365
366 return 0;
367 }
368
369 /*
370 * Lock the node.
371 */
372 int
genfs_lock(void * v)373 genfs_lock(void *v)
374 {
375 struct vop_lock_args /* {
376 struct vnode *a_vp;
377 int a_flags;
378 } */ *ap = v;
379 vnode_t *vp = ap->a_vp;
380 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
381 int flags = ap->a_flags;
382 krw_t op;
383
384 if (ISSET(flags, LK_DOWNGRADE)) {
385 rw_downgrade(&vip->vi_lock);
386 } else if (ISSET(flags, LK_UPGRADE)) {
387 KASSERT(ISSET(flags, LK_NOWAIT));
388 if (!rw_tryupgrade(&vip->vi_lock)) {
389 return EBUSY;
390 }
391 } else if ((flags & (LK_EXCLUSIVE | LK_SHARED)) != 0) {
392 op = (ISSET(flags, LK_EXCLUSIVE) ? RW_WRITER : RW_READER);
393 if (ISSET(flags, LK_NOWAIT)) {
394 if (!rw_tryenter(&vip->vi_lock, op))
395 return EBUSY;
396 } else {
397 rw_enter(&vip->vi_lock, op);
398 }
399 }
400 VSTATE_ASSERT_UNLOCKED(vp, VS_ACTIVE);
401 return 0;
402 }
403
404 /*
405 * Unlock the node.
406 */
407 int
genfs_unlock(void * v)408 genfs_unlock(void *v)
409 {
410 struct vop_unlock_args /* {
411 struct vnode *a_vp;
412 } */ *ap = v;
413 vnode_t *vp = ap->a_vp;
414 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
415
416 rw_exit(&vip->vi_lock);
417
418 return 0;
419 }
420
421 /*
422 * Return whether or not the node is locked.
423 */
424 int
genfs_islocked(void * v)425 genfs_islocked(void *v)
426 {
427 struct vop_islocked_args /* {
428 struct vnode *a_vp;
429 } */ *ap = v;
430 vnode_t *vp = ap->a_vp;
431 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
432
433 if (rw_write_held(&vip->vi_lock))
434 return LK_EXCLUSIVE;
435
436 if (rw_read_held(&vip->vi_lock))
437 return LK_SHARED;
438
439 return 0;
440 }
441
442 int
genfs_mmap(void * v)443 genfs_mmap(void *v)
444 {
445
446 return (0);
447 }
448
449 /*
450 * VOP_PUTPAGES() for vnodes which never have pages.
451 */
452
453 int
genfs_null_putpages(void * v)454 genfs_null_putpages(void *v)
455 {
456 struct vop_putpages_args /* {
457 struct vnode *a_vp;
458 voff_t a_offlo;
459 voff_t a_offhi;
460 int a_flags;
461 } */ *ap = v;
462 struct vnode *vp = ap->a_vp;
463
464 KASSERT(vp->v_uobj.uo_npages == 0);
465 rw_exit(vp->v_uobj.vmobjlock);
466 return (0);
467 }
468
469 void
genfs_node_init(struct vnode * vp,const struct genfs_ops * ops)470 genfs_node_init(struct vnode *vp, const struct genfs_ops *ops)
471 {
472 struct genfs_node *gp = VTOG(vp);
473
474 rw_init(&gp->g_glock);
475 gp->g_op = ops;
476 }
477
478 void
genfs_node_destroy(struct vnode * vp)479 genfs_node_destroy(struct vnode *vp)
480 {
481 struct genfs_node *gp = VTOG(vp);
482
483 rw_destroy(&gp->g_glock);
484 }
485
486 void
genfs_size(struct vnode * vp,off_t size,off_t * eobp,int flags)487 genfs_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
488 {
489 int bsize;
490
491 bsize = 1 << vp->v_mount->mnt_fs_bshift;
492 *eobp = (size + bsize - 1) & ~(bsize - 1);
493 }
494
495 static void
filt_genfsdetach(struct knote * kn)496 filt_genfsdetach(struct knote *kn)
497 {
498 struct vnode *vp = (struct vnode *)kn->kn_hook;
499
500 vn_knote_detach(vp, kn);
501 }
502
503 static int
filt_genfsread(struct knote * kn,long hint)504 filt_genfsread(struct knote *kn, long hint)
505 {
506 struct vnode *vp = (struct vnode *)kn->kn_hook;
507 int rv;
508
509 /*
510 * filesystem is gone, so set the EOF flag and schedule
511 * the knote for deletion.
512 */
513 switch (hint) {
514 case NOTE_REVOKE:
515 KASSERT(mutex_owned(vp->v_interlock));
516 knote_set_eof(kn, EV_ONESHOT);
517 return (1);
518 case 0:
519 mutex_enter(vp->v_interlock);
520 kn->kn_data = vp->v_size - ((file_t *)kn->kn_obj)->f_offset;
521 rv = (kn->kn_data != 0);
522 mutex_exit(vp->v_interlock);
523 return rv;
524 default:
525 KASSERT(mutex_owned(vp->v_interlock));
526 kn->kn_data = vp->v_size - ((file_t *)kn->kn_obj)->f_offset;
527 return (kn->kn_data != 0);
528 }
529 }
530
531 static int
filt_genfswrite(struct knote * kn,long hint)532 filt_genfswrite(struct knote *kn, long hint)
533 {
534 struct vnode *vp = (struct vnode *)kn->kn_hook;
535
536 /*
537 * filesystem is gone, so set the EOF flag and schedule
538 * the knote for deletion.
539 */
540 switch (hint) {
541 case NOTE_REVOKE:
542 KASSERT(mutex_owned(vp->v_interlock));
543 knote_set_eof(kn, EV_ONESHOT);
544 return (1);
545 case 0:
546 mutex_enter(vp->v_interlock);
547 kn->kn_data = 0;
548 mutex_exit(vp->v_interlock);
549 return 1;
550 default:
551 KASSERT(mutex_owned(vp->v_interlock));
552 kn->kn_data = 0;
553 return 1;
554 }
555 }
556
557 static int
filt_genfsvnode(struct knote * kn,long hint)558 filt_genfsvnode(struct knote *kn, long hint)
559 {
560 struct vnode *vp = (struct vnode *)kn->kn_hook;
561 int fflags;
562
563 switch (hint) {
564 case NOTE_REVOKE:
565 KASSERT(mutex_owned(vp->v_interlock));
566 knote_set_eof(kn, 0);
567 if ((kn->kn_sfflags & hint) != 0)
568 kn->kn_fflags |= hint;
569 return (1);
570 case 0:
571 mutex_enter(vp->v_interlock);
572 fflags = kn->kn_fflags;
573 mutex_exit(vp->v_interlock);
574 break;
575 default:
576 KASSERT(mutex_owned(vp->v_interlock));
577 if ((kn->kn_sfflags & hint) != 0)
578 kn->kn_fflags |= hint;
579 fflags = kn->kn_fflags;
580 break;
581 }
582
583 return (fflags != 0);
584 }
585
586 static const struct filterops genfsread_filtops = {
587 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE,
588 .f_attach = NULL,
589 .f_detach = filt_genfsdetach,
590 .f_event = filt_genfsread,
591 };
592
593 static const struct filterops genfswrite_filtops = {
594 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE,
595 .f_attach = NULL,
596 .f_detach = filt_genfsdetach,
597 .f_event = filt_genfswrite,
598 };
599
600 static const struct filterops genfsvnode_filtops = {
601 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE,
602 .f_attach = NULL,
603 .f_detach = filt_genfsdetach,
604 .f_event = filt_genfsvnode,
605 };
606
607 int
genfs_kqfilter(void * v)608 genfs_kqfilter(void *v)
609 {
610 struct vop_kqfilter_args /* {
611 struct vnode *a_vp;
612 struct knote *a_kn;
613 } */ *ap = v;
614 struct vnode *vp;
615 struct knote *kn;
616
617 vp = ap->a_vp;
618 kn = ap->a_kn;
619 switch (kn->kn_filter) {
620 case EVFILT_READ:
621 kn->kn_fop = &genfsread_filtops;
622 break;
623 case EVFILT_WRITE:
624 kn->kn_fop = &genfswrite_filtops;
625 break;
626 case EVFILT_VNODE:
627 kn->kn_fop = &genfsvnode_filtops;
628 break;
629 default:
630 return (EINVAL);
631 }
632
633 kn->kn_hook = vp;
634
635 vn_knote_attach(vp, kn);
636
637 return (0);
638 }
639
640 void
genfs_node_wrlock(struct vnode * vp)641 genfs_node_wrlock(struct vnode *vp)
642 {
643 struct genfs_node *gp = VTOG(vp);
644
645 rw_enter(&gp->g_glock, RW_WRITER);
646 }
647
648 void
genfs_node_rdlock(struct vnode * vp)649 genfs_node_rdlock(struct vnode *vp)
650 {
651 struct genfs_node *gp = VTOG(vp);
652
653 rw_enter(&gp->g_glock, RW_READER);
654 }
655
656 int
genfs_node_rdtrylock(struct vnode * vp)657 genfs_node_rdtrylock(struct vnode *vp)
658 {
659 struct genfs_node *gp = VTOG(vp);
660
661 return rw_tryenter(&gp->g_glock, RW_READER);
662 }
663
664 void
genfs_node_unlock(struct vnode * vp)665 genfs_node_unlock(struct vnode *vp)
666 {
667 struct genfs_node *gp = VTOG(vp);
668
669 rw_exit(&gp->g_glock);
670 }
671
672 int
genfs_node_wrlocked(struct vnode * vp)673 genfs_node_wrlocked(struct vnode *vp)
674 {
675 struct genfs_node *gp = VTOG(vp);
676
677 return rw_write_held(&gp->g_glock);
678 }
679
680 /*
681 * Common filesystem object access control check routine. Accepts a
682 * vnode, cred, uid, gid, mode, acl, requested access mode.
683 * Returns 0 on success, or an errno on failure.
684 */
685 int
genfs_can_access(vnode_t * vp,kauth_cred_t cred,uid_t file_uid,gid_t file_gid,mode_t file_mode,struct acl * acl,accmode_t accmode)686 genfs_can_access(vnode_t *vp, kauth_cred_t cred, uid_t file_uid, gid_t file_gid,
687 mode_t file_mode, struct acl *acl, accmode_t accmode)
688 {
689 accmode_t dac_granted;
690 int error;
691
692 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0);
693 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE));
694
695 /*
696 * Look for a normal, non-privileged way to access the file/directory
697 * as requested. If it exists, go with that.
698 */
699
700 dac_granted = 0;
701
702 /* Check the owner. */
703 if (kauth_cred_geteuid(cred) == file_uid) {
704 dac_granted |= VADMIN;
705 if (file_mode & S_IXUSR)
706 dac_granted |= VEXEC;
707 if (file_mode & S_IRUSR)
708 dac_granted |= VREAD;
709 if (file_mode & S_IWUSR)
710 dac_granted |= (VWRITE | VAPPEND);
711
712 goto privchk;
713 }
714
715 /* Otherwise, check the groups (first match) */
716 /* Otherwise, check the groups. */
717 error = kauth_cred_groupmember(cred, file_gid);
718 if (error > 0)
719 return error;
720 if (error == 0) {
721 if (file_mode & S_IXGRP)
722 dac_granted |= VEXEC;
723 if (file_mode & S_IRGRP)
724 dac_granted |= VREAD;
725 if (file_mode & S_IWGRP)
726 dac_granted |= (VWRITE | VAPPEND);
727
728 goto privchk;
729 }
730
731 /* Otherwise, check everyone else. */
732 if (file_mode & S_IXOTH)
733 dac_granted |= VEXEC;
734 if (file_mode & S_IROTH)
735 dac_granted |= VREAD;
736 if (file_mode & S_IWOTH)
737 dac_granted |= (VWRITE | VAPPEND);
738
739 privchk:
740 if ((accmode & dac_granted) == accmode)
741 return 0;
742
743 return (accmode & VADMIN) ? EPERM : EACCES;
744 }
745
746 /*
747 * Implement a version of genfs_can_access() that understands POSIX.1e ACL
748 * semantics;
749 * the access ACL has already been prepared for evaluation by the file system
750 * and is passed via 'uid', 'gid', and 'acl'. Return 0 on success, else an
751 * errno value.
752 */
753 int
genfs_can_access_acl_posix1e(vnode_t * vp,kauth_cred_t cred,uid_t file_uid,gid_t file_gid,mode_t file_mode,struct acl * acl,accmode_t accmode)754 genfs_can_access_acl_posix1e(vnode_t *vp, kauth_cred_t cred, uid_t file_uid,
755 gid_t file_gid, mode_t file_mode, struct acl *acl, accmode_t accmode)
756 {
757 struct acl_entry *acl_other, *acl_mask;
758 accmode_t dac_granted;
759 accmode_t acl_mask_granted;
760 int group_matched, i;
761 int error;
762
763 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0);
764 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE));
765
766 /*
767 * The owner matches if the effective uid associated with the
768 * credential matches that of the ACL_USER_OBJ entry. While we're
769 * doing the first scan, also cache the location of the ACL_MASK and
770 * ACL_OTHER entries, preventing some future iterations.
771 */
772 acl_mask = acl_other = NULL;
773 for (i = 0; i < acl->acl_cnt; i++) {
774 struct acl_entry *ae = &acl->acl_entry[i];
775 switch (ae->ae_tag) {
776 case ACL_USER_OBJ:
777 if (kauth_cred_geteuid(cred) != file_uid)
778 break;
779 dac_granted = 0;
780 dac_granted |= VADMIN;
781 if (ae->ae_perm & ACL_EXECUTE)
782 dac_granted |= VEXEC;
783 if (ae->ae_perm & ACL_READ)
784 dac_granted |= VREAD;
785 if (ae->ae_perm & ACL_WRITE)
786 dac_granted |= (VWRITE | VAPPEND);
787 goto out;
788
789 case ACL_MASK:
790 acl_mask = ae;
791 break;
792
793 case ACL_OTHER:
794 acl_other = ae;
795 break;
796
797 default:
798 break;
799 }
800 }
801
802 /*
803 * An ACL_OTHER entry should always exist in a valid access ACL. If
804 * it doesn't, then generate a serious failure. For now, this means
805 * a debugging message and EPERM, but in the future should probably
806 * be a panic.
807 */
808 if (acl_other == NULL) {
809 /*
810 * XXX This should never happen
811 */
812 printf("%s: ACL_OTHER missing\n", __func__);
813 return EPERM;
814 }
815
816 /*
817 * Checks against ACL_USER, ACL_GROUP_OBJ, and ACL_GROUP fields are
818 * masked by an ACL_MASK entry, if any. As such, first identify the
819 * ACL_MASK field, then iterate through identifying potential user
820 * matches, then group matches. If there is no ACL_MASK, assume that
821 * the mask allows all requests to succeed.
822 */
823 if (acl_mask != NULL) {
824 acl_mask_granted = 0;
825 if (acl_mask->ae_perm & ACL_EXECUTE)
826 acl_mask_granted |= VEXEC;
827 if (acl_mask->ae_perm & ACL_READ)
828 acl_mask_granted |= VREAD;
829 if (acl_mask->ae_perm & ACL_WRITE)
830 acl_mask_granted |= (VWRITE | VAPPEND);
831 } else
832 acl_mask_granted = VEXEC | VREAD | VWRITE | VAPPEND;
833
834 /*
835 * Check ACL_USER ACL entries. There will either be one or no
836 * matches; if there is one, we accept or rejected based on the
837 * match; otherwise, we continue on to groups.
838 */
839 for (i = 0; i < acl->acl_cnt; i++) {
840 struct acl_entry *ae = &acl->acl_entry[i];
841 switch (ae->ae_tag) {
842 case ACL_USER:
843 if (kauth_cred_geteuid(cred) != ae->ae_id)
844 break;
845 dac_granted = 0;
846 if (ae->ae_perm & ACL_EXECUTE)
847 dac_granted |= VEXEC;
848 if (ae->ae_perm & ACL_READ)
849 dac_granted |= VREAD;
850 if (ae->ae_perm & ACL_WRITE)
851 dac_granted |= (VWRITE | VAPPEND);
852 dac_granted &= acl_mask_granted;
853 goto out;
854 }
855 }
856
857 /*
858 * Group match is best-match, not first-match, so find a "best"
859 * match. Iterate across, testing each potential group match. Make
860 * sure we keep track of whether we found a match or not, so that we
861 * know if we should try again with any available privilege, or if we
862 * should move on to ACL_OTHER.
863 */
864 group_matched = 0;
865 for (i = 0; i < acl->acl_cnt; i++) {
866 struct acl_entry *ae = &acl->acl_entry[i];
867 switch (ae->ae_tag) {
868 case ACL_GROUP_OBJ:
869 error = kauth_cred_groupmember(cred, file_gid);
870 if (error > 0)
871 return error;
872 if (error)
873 break;
874 dac_granted = 0;
875 if (ae->ae_perm & ACL_EXECUTE)
876 dac_granted |= VEXEC;
877 if (ae->ae_perm & ACL_READ)
878 dac_granted |= VREAD;
879 if (ae->ae_perm & ACL_WRITE)
880 dac_granted |= (VWRITE | VAPPEND);
881 dac_granted &= acl_mask_granted;
882
883 if ((accmode & dac_granted) == accmode)
884 return 0;
885
886 group_matched = 1;
887 break;
888
889 case ACL_GROUP:
890 error = kauth_cred_groupmember(cred, ae->ae_id);
891 if (error > 0)
892 return error;
893 if (error)
894 break;
895 dac_granted = 0;
896 if (ae->ae_perm & ACL_EXECUTE)
897 dac_granted |= VEXEC;
898 if (ae->ae_perm & ACL_READ)
899 dac_granted |= VREAD;
900 if (ae->ae_perm & ACL_WRITE)
901 dac_granted |= (VWRITE | VAPPEND);
902 dac_granted &= acl_mask_granted;
903
904 if ((accmode & dac_granted) == accmode)
905 return 0;
906
907 group_matched = 1;
908 break;
909
910 default:
911 break;
912 }
913 }
914
915 if (group_matched == 1) {
916 /*
917 * There was a match, but it did not grant rights via pure
918 * DAC. Try again, this time with privilege.
919 */
920 for (i = 0; i < acl->acl_cnt; i++) {
921 struct acl_entry *ae = &acl->acl_entry[i];
922 switch (ae->ae_tag) {
923 case ACL_GROUP_OBJ:
924 error = kauth_cred_groupmember(cred, file_gid);
925 if (error > 0)
926 return error;
927 if (error)
928 break;
929 dac_granted = 0;
930 if (ae->ae_perm & ACL_EXECUTE)
931 dac_granted |= VEXEC;
932 if (ae->ae_perm & ACL_READ)
933 dac_granted |= VREAD;
934 if (ae->ae_perm & ACL_WRITE)
935 dac_granted |= (VWRITE | VAPPEND);
936 dac_granted &= acl_mask_granted;
937 goto out;
938
939 case ACL_GROUP:
940 error = kauth_cred_groupmember(cred, ae->ae_id);
941 if (error > 0)
942 return error;
943 if (error)
944 break;
945 dac_granted = 0;
946 if (ae->ae_perm & ACL_EXECUTE)
947 dac_granted |= VEXEC;
948 if (ae->ae_perm & ACL_READ)
949 dac_granted |= VREAD;
950 if (ae->ae_perm & ACL_WRITE)
951 dac_granted |= (VWRITE | VAPPEND);
952 dac_granted &= acl_mask_granted;
953
954 goto out;
955 default:
956 break;
957 }
958 }
959 /*
960 * Even with privilege, group membership was not sufficient.
961 * Return failure.
962 */
963 dac_granted = 0;
964 goto out;
965 }
966
967 /*
968 * Fall back on ACL_OTHER. ACL_MASK is not applied to ACL_OTHER.
969 */
970 dac_granted = 0;
971 if (acl_other->ae_perm & ACL_EXECUTE)
972 dac_granted |= VEXEC;
973 if (acl_other->ae_perm & ACL_READ)
974 dac_granted |= VREAD;
975 if (acl_other->ae_perm & ACL_WRITE)
976 dac_granted |= (VWRITE | VAPPEND);
977
978 out:
979 if ((accmode & dac_granted) == accmode)
980 return 0;
981 return (accmode & VADMIN) ? EPERM : EACCES;
982 }
983
984 static struct {
985 accmode_t accmode;
986 int mask;
987 } accmode2mask[] = {
988 { VREAD, ACL_READ_DATA },
989 { VWRITE, ACL_WRITE_DATA },
990 { VAPPEND, ACL_APPEND_DATA },
991 { VEXEC, ACL_EXECUTE },
992 { VREAD_NAMED_ATTRS, ACL_READ_NAMED_ATTRS },
993 { VWRITE_NAMED_ATTRS, ACL_WRITE_NAMED_ATTRS },
994 { VDELETE_CHILD, ACL_DELETE_CHILD },
995 { VREAD_ATTRIBUTES, ACL_READ_ATTRIBUTES },
996 { VWRITE_ATTRIBUTES, ACL_WRITE_ATTRIBUTES },
997 { VDELETE, ACL_DELETE },
998 { VREAD_ACL, ACL_READ_ACL },
999 { VWRITE_ACL, ACL_WRITE_ACL },
1000 { VWRITE_OWNER, ACL_WRITE_OWNER },
1001 { VSYNCHRONIZE, ACL_SYNCHRONIZE },
1002 { 0, 0 },
1003 };
1004
1005 static int
_access_mask_from_accmode(accmode_t accmode)1006 _access_mask_from_accmode(accmode_t accmode)
1007 {
1008 int access_mask = 0, i;
1009
1010 for (i = 0; accmode2mask[i].accmode != 0; i++) {
1011 if (accmode & accmode2mask[i].accmode)
1012 access_mask |= accmode2mask[i].mask;
1013 }
1014
1015 /*
1016 * VAPPEND is just a modifier for VWRITE; if the caller asked
1017 * for 'VAPPEND | VWRITE', we want to check for ACL_APPEND_DATA only.
1018 */
1019 if (access_mask & ACL_APPEND_DATA)
1020 access_mask &= ~ACL_WRITE_DATA;
1021
1022 return (access_mask);
1023 }
1024
1025 /*
1026 * Return 0, iff access is allowed, 1 otherwise.
1027 */
1028 static int
_acl_denies(const struct acl * aclp,int access_mask,kauth_cred_t cred,int file_uid,int file_gid,int * denied_explicitly)1029 _acl_denies(const struct acl *aclp, int access_mask, kauth_cred_t cred,
1030 int file_uid, int file_gid, int *denied_explicitly)
1031 {
1032 int i, error;
1033 const struct acl_entry *ae;
1034
1035 if (denied_explicitly != NULL)
1036 *denied_explicitly = 0;
1037
1038 KASSERT(aclp->acl_cnt <= ACL_MAX_ENTRIES);
1039
1040 for (i = 0; i < aclp->acl_cnt; i++) {
1041 ae = &(aclp->acl_entry[i]);
1042
1043 if (ae->ae_entry_type != ACL_ENTRY_TYPE_ALLOW &&
1044 ae->ae_entry_type != ACL_ENTRY_TYPE_DENY)
1045 continue;
1046 if (ae->ae_flags & ACL_ENTRY_INHERIT_ONLY)
1047 continue;
1048 switch (ae->ae_tag) {
1049 case ACL_USER_OBJ:
1050 if (kauth_cred_geteuid(cred) != file_uid)
1051 continue;
1052 break;
1053 case ACL_USER:
1054 if (kauth_cred_geteuid(cred) != ae->ae_id)
1055 continue;
1056 break;
1057 case ACL_GROUP_OBJ:
1058 error = kauth_cred_groupmember(cred, file_gid);
1059 if (error > 0)
1060 return error;
1061 if (error != 0)
1062 continue;
1063 break;
1064 case ACL_GROUP:
1065 error = kauth_cred_groupmember(cred, ae->ae_id);
1066 if (error > 0)
1067 return error;
1068 if (error != 0)
1069 continue;
1070 break;
1071 default:
1072 KASSERT(ae->ae_tag == ACL_EVERYONE);
1073 }
1074
1075 if (ae->ae_entry_type == ACL_ENTRY_TYPE_DENY) {
1076 if (ae->ae_perm & access_mask) {
1077 if (denied_explicitly != NULL)
1078 *denied_explicitly = 1;
1079 return (1);
1080 }
1081 }
1082
1083 access_mask &= ~(ae->ae_perm);
1084 if (access_mask == 0)
1085 return (0);
1086 }
1087
1088 if (access_mask == 0)
1089 return (0);
1090
1091 return (1);
1092 }
1093
1094 int
genfs_can_access_acl_nfs4(vnode_t * vp,kauth_cred_t cred,uid_t file_uid,gid_t file_gid,mode_t file_mode,struct acl * aclp,accmode_t accmode)1095 genfs_can_access_acl_nfs4(vnode_t *vp, kauth_cred_t cred, uid_t file_uid,
1096 gid_t file_gid, mode_t file_mode, struct acl *aclp, accmode_t accmode)
1097 {
1098 int denied, explicitly_denied, access_mask, is_directory,
1099 must_be_owner = 0;
1100 file_mode = 0;
1101
1102 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND |
1103 VEXPLICIT_DENY | VREAD_NAMED_ATTRS | VWRITE_NAMED_ATTRS |
1104 VDELETE_CHILD | VREAD_ATTRIBUTES | VWRITE_ATTRIBUTES | VDELETE |
1105 VREAD_ACL | VWRITE_ACL | VWRITE_OWNER | VSYNCHRONIZE)) == 0);
1106 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE));
1107
1108 if (accmode & VADMIN)
1109 must_be_owner = 1;
1110
1111 /*
1112 * Ignore VSYNCHRONIZE permission.
1113 */
1114 accmode &= ~VSYNCHRONIZE;
1115
1116 access_mask = _access_mask_from_accmode(accmode);
1117
1118 if (vp && vp->v_type == VDIR)
1119 is_directory = 1;
1120 else
1121 is_directory = 0;
1122
1123 /*
1124 * File owner is always allowed to read and write the ACL
1125 * and basic attributes. This is to prevent a situation
1126 * where user would change ACL in a way that prevents him
1127 * from undoing the change.
1128 */
1129 if (kauth_cred_geteuid(cred) == file_uid)
1130 access_mask &= ~(ACL_READ_ACL | ACL_WRITE_ACL |
1131 ACL_READ_ATTRIBUTES | ACL_WRITE_ATTRIBUTES);
1132
1133 /*
1134 * Ignore append permission for regular files; use write
1135 * permission instead.
1136 */
1137 if (!is_directory && (access_mask & ACL_APPEND_DATA)) {
1138 access_mask &= ~ACL_APPEND_DATA;
1139 access_mask |= ACL_WRITE_DATA;
1140 }
1141
1142 denied = _acl_denies(aclp, access_mask, cred, file_uid, file_gid,
1143 &explicitly_denied);
1144
1145 if (must_be_owner) {
1146 if (kauth_cred_geteuid(cred) != file_uid)
1147 denied = EPERM;
1148 }
1149
1150 /*
1151 * For VEXEC, ensure that at least one execute bit is set for
1152 * non-directories. We have to check the mode here to stay
1153 * consistent with execve(2). See the test in
1154 * exec_check_permissions().
1155 */
1156 __acl_nfs4_sync_mode_from_acl(&file_mode, aclp);
1157 if (!denied && !is_directory && (accmode & VEXEC) &&
1158 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0)
1159 denied = EACCES;
1160
1161 if (!denied)
1162 return (0);
1163
1164 /*
1165 * Access failed. Iff it was not denied explicitly and
1166 * VEXPLICIT_DENY flag was specified, allow access.
1167 */
1168 if ((accmode & VEXPLICIT_DENY) && explicitly_denied == 0)
1169 return (0);
1170
1171 accmode &= ~VEXPLICIT_DENY;
1172
1173 if (accmode & (VADMIN_PERMS | VDELETE_CHILD | VDELETE))
1174 denied = EPERM;
1175 else
1176 denied = EACCES;
1177
1178 return (denied);
1179 }
1180
1181 /*
1182 * Common routine to check if chmod() is allowed.
1183 *
1184 * Policy:
1185 * - You must own the file, and
1186 * - You must not set the "sticky" bit (meaningless, see chmod(2))
1187 * - You must be a member of the group if you're trying to set the
1188 * SGIDf bit
1189 *
1190 * vp - vnode of the file-system object
1191 * cred - credentials of the invoker
1192 * cur_uid, cur_gid - current uid/gid of the file-system object
1193 * new_mode - new mode for the file-system object
1194 *
1195 * Returns 0 if the change is allowed, or an error value otherwise.
1196 */
1197 int
genfs_can_chmod(vnode_t * vp,kauth_cred_t cred,uid_t cur_uid,gid_t cur_gid,mode_t new_mode)1198 genfs_can_chmod(vnode_t *vp, kauth_cred_t cred, uid_t cur_uid,
1199 gid_t cur_gid, mode_t new_mode)
1200 {
1201 int error;
1202
1203 /*
1204 * To modify the permissions on a file, must possess VADMIN
1205 * for that file.
1206 */
1207 if ((error = VOP_ACCESSX(vp, VWRITE_ACL, cred)) != 0)
1208 return (error);
1209
1210 /*
1211 * Unprivileged users can't set the sticky bit on files.
1212 */
1213 if ((vp->v_type != VDIR) && (new_mode & S_ISTXT))
1214 return (EFTYPE);
1215
1216 /*
1217 * If the invoker is trying to set the SGID bit on the file,
1218 * check group membership.
1219 */
1220 if (new_mode & S_ISGID) {
1221 int ismember;
1222
1223 error = kauth_cred_ismember_gid(cred, cur_gid,
1224 &ismember);
1225 if (error || !ismember)
1226 return (EPERM);
1227 }
1228
1229 /*
1230 * Deny setting setuid if we are not the file owner.
1231 */
1232 if ((new_mode & S_ISUID) && cur_uid != kauth_cred_geteuid(cred))
1233 return (EPERM);
1234
1235 return (0);
1236 }
1237
1238 /*
1239 * Common routine to check if chown() is allowed.
1240 *
1241 * Policy:
1242 * - You must own the file, and
1243 * - You must not try to change ownership, and
1244 * - You must be member of the new group
1245 *
1246 * vp - vnode
1247 * cred - credentials of the invoker
1248 * cur_uid, cur_gid - current uid/gid of the file-system object
1249 * new_uid, new_gid - target uid/gid of the file-system object
1250 *
1251 * Returns 0 if the change is allowed, or an error value otherwise.
1252 */
1253 int
genfs_can_chown(vnode_t * vp,kauth_cred_t cred,uid_t cur_uid,gid_t cur_gid,uid_t new_uid,gid_t new_gid)1254 genfs_can_chown(vnode_t *vp, kauth_cred_t cred, uid_t cur_uid,
1255 gid_t cur_gid, uid_t new_uid, gid_t new_gid)
1256 {
1257 int error, ismember;
1258
1259 /*
1260 * To modify the ownership of a file, must possess VADMIN for that
1261 * file.
1262 */
1263 if ((error = VOP_ACCESSX(vp, VWRITE_OWNER, cred)) != 0)
1264 return (error);
1265
1266 /*
1267 * You can only change ownership of a file if:
1268 * You own the file and...
1269 */
1270 if (kauth_cred_geteuid(cred) == cur_uid) {
1271 /*
1272 * You don't try to change ownership, and...
1273 */
1274 if (new_uid != cur_uid)
1275 return (EPERM);
1276
1277 /*
1278 * You don't try to change group (no-op), or...
1279 */
1280 if (new_gid == cur_gid)
1281 return (0);
1282
1283 /*
1284 * Your effective gid is the new gid, or...
1285 */
1286 if (kauth_cred_getegid(cred) == new_gid)
1287 return (0);
1288
1289 /*
1290 * The new gid is one you're a member of.
1291 */
1292 ismember = 0;
1293 error = kauth_cred_ismember_gid(cred, new_gid,
1294 &ismember);
1295 if (!error && ismember)
1296 return (0);
1297 }
1298
1299 return (EPERM);
1300 }
1301
1302 int
genfs_can_chtimes(vnode_t * vp,kauth_cred_t cred,uid_t owner_uid,u_int vaflags)1303 genfs_can_chtimes(vnode_t *vp, kauth_cred_t cred, uid_t owner_uid,
1304 u_int vaflags)
1305 {
1306 int error;
1307 /*
1308 * Grant permission if the caller is the owner of the file, or
1309 * the super-user, or has ACL_WRITE_ATTRIBUTES permission on
1310 * on the file. If the time pointer is null, then write
1311 * permission on the file is also sufficient.
1312 *
1313 * From NFSv4.1, draft 21, 6.2.1.3.1, Discussion of Mask Attributes:
1314 * A user having ACL_WRITE_DATA or ACL_WRITE_ATTRIBUTES
1315 * will be allowed to set the times [..] to the current
1316 * server time.
1317 */
1318 error = VOP_ACCESSX(vp, VWRITE_ATTRIBUTES, cred);
1319 if (error != 0 && (vaflags & VA_UTIMES_NULL) != 0)
1320 error = VOP_ACCESS(vp, VWRITE, cred);
1321
1322 if (error)
1323 return (vaflags & VA_UTIMES_NULL) == 0 ? EPERM : EACCES;
1324
1325 return 0;
1326 }
1327
1328 /*
1329 * Common routine to check if chflags() is allowed.
1330 *
1331 * Policy:
1332 * - You must own the file, and
1333 * - You must not change system flags, and
1334 * - You must not change flags on character/block devices.
1335 *
1336 * vp - vnode
1337 * cred - credentials of the invoker
1338 * owner_uid - uid of the file-system object
1339 * changing_sysflags - true if the invoker wants to change system flags
1340 */
1341 int
genfs_can_chflags(vnode_t * vp,kauth_cred_t cred,uid_t owner_uid,bool changing_sysflags)1342 genfs_can_chflags(vnode_t *vp, kauth_cred_t cred,
1343 uid_t owner_uid, bool changing_sysflags)
1344 {
1345
1346 /* The user must own the file. */
1347 if (kauth_cred_geteuid(cred) != owner_uid) {
1348 return EPERM;
1349 }
1350
1351 if (changing_sysflags) {
1352 return EPERM;
1353 }
1354
1355 /*
1356 * Unprivileged users cannot change the flags on devices, even if they
1357 * own them.
1358 */
1359 if (vp->v_type == VCHR || vp->v_type == VBLK) {
1360 return EPERM;
1361 }
1362
1363 return 0;
1364 }
1365
1366 /*
1367 * Common "sticky" policy.
1368 *
1369 * When a directory is "sticky" (as determined by the caller), this
1370 * function may help implementing the following policy:
1371 * - Renaming a file in it is only possible if the user owns the directory
1372 * or the file being renamed.
1373 * - Deleting a file from it is only possible if the user owns the
1374 * directory or the file being deleted.
1375 */
1376 int
genfs_can_sticky(vnode_t * vp,kauth_cred_t cred,uid_t dir_uid,uid_t file_uid)1377 genfs_can_sticky(vnode_t *vp, kauth_cred_t cred, uid_t dir_uid, uid_t file_uid)
1378 {
1379 if (kauth_cred_geteuid(cred) != dir_uid &&
1380 kauth_cred_geteuid(cred) != file_uid)
1381 return EPERM;
1382
1383 return 0;
1384 }
1385
1386 int
genfs_can_extattr(vnode_t * vp,kauth_cred_t cred,accmode_t accmode,int attrnamespace)1387 genfs_can_extattr(vnode_t *vp, kauth_cred_t cred, accmode_t accmode,
1388 int attrnamespace)
1389 {
1390 /*
1391 * Kernel-invoked always succeeds.
1392 */
1393 if (cred == NOCRED)
1394 return 0;
1395
1396 switch (attrnamespace) {
1397 case EXTATTR_NAMESPACE_SYSTEM:
1398 return kauth_authorize_system(cred, KAUTH_SYSTEM_FS_EXTATTR,
1399 0, vp->v_mount, NULL, NULL);
1400 case EXTATTR_NAMESPACE_USER:
1401 return VOP_ACCESS(vp, accmode, cred);
1402 default:
1403 return EPERM;
1404 }
1405 }
1406
1407 int
genfs_access(void * v)1408 genfs_access(void *v)
1409 {
1410 struct vop_access_args *ap = v;
1411
1412 KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN |
1413 VAPPEND)) == 0);
1414
1415 return VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred);
1416 }
1417
1418 int
genfs_accessx(void * v)1419 genfs_accessx(void *v)
1420 {
1421 struct vop_accessx_args *ap = v;
1422 int error;
1423 accmode_t accmode = ap->a_accmode;
1424 error = vfs_unixify_accmode(&accmode);
1425 if (error != 0)
1426 return error;
1427
1428 if (accmode == 0)
1429 return 0;
1430
1431 return VOP_ACCESS(ap->a_vp, accmode, ap->a_cred);
1432 }
1433
1434 /*
1435 * genfs_pathconf:
1436 *
1437 * Standard implementation of POSIX pathconf, to get information about limits
1438 * for a filesystem.
1439 * Override per filesystem for the case where the filesystem has smaller
1440 * limits.
1441 */
1442 int
genfs_pathconf(void * v)1443 genfs_pathconf(void *v)
1444 {
1445 struct vop_pathconf_args *ap = v;
1446
1447 switch (ap->a_name) {
1448 case _PC_PATH_MAX:
1449 *ap->a_retval = PATH_MAX;
1450 return 0;
1451 case _PC_ACL_EXTENDED:
1452 case _PC_ACL_NFS4:
1453 *ap->a_retval = 0;
1454 return 0;
1455 default:
1456 return EINVAL;
1457 }
1458 }
1459