xref: /netbsd-src/sys/ufs/lfs/ulfs_quota2.c (revision 22d402f6810d2330849c90708228e6aa0418f21a)
1 /*	$NetBSD: ulfs_quota2.c,v 1.35 2022/05/28 22:08:46 andvar Exp $	*/
2 /*  from NetBSD: ufs_quota2.c,v 1.40 2015/03/28 19:24:05 maxv Exp Exp  */
3 /*  from NetBSD: ffs_quota2.c,v 1.5 2015/02/22 14:12:48 maxv Exp  */
4 
5 /*-
6   * Copyright (c) 2010 Manuel Bouyer
7   * All rights reserved.
8   *
9   * Redistribution and use in source and binary forms, with or without
10   * modification, are permitted provided that the following conditions
11   * are met:
12   * 1. Redistributions of source code must retain the above copyright
13   *    notice, this list of conditions and the following disclaimer.
14   * 2. Redistributions in binary form must reproduce the above copyright
15   *    notice, this list of conditions and the following disclaimer in the
16   *    documentation and/or other materials provided with the distribution.
17   *
18   * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19   * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20   * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21   * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22   * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23   * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24   * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25   * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26   * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28   * POSSIBILITY OF SUCH DAMAGE.
29   */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: ulfs_quota2.c,v 1.35 2022/05/28 22:08:46 andvar Exp $");
33 
34 #include <sys/buf.h>
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/namei.h>
39 #include <sys/file.h>
40 #include <sys/proc.h>
41 #include <sys/vnode.h>
42 #include <sys/mount.h>
43 #include <sys/kauth.h>
44 #include <sys/quota.h>
45 #include <sys/quotactl.h>
46 
47 #include <ufs/lfs/lfs.h>
48 #include <ufs/lfs/lfs_accessors.h>
49 #include <ufs/lfs/lfs_extern.h>
50 
51 #include <ufs/lfs/ulfs_quota2.h>
52 #include <ufs/lfs/ulfs_inode.h>
53 #include <ufs/lfs/ulfsmount.h>
54 #include <ufs/lfs/ulfs_bswap.h>
55 #include <ufs/lfs/ulfs_extern.h>
56 #include <ufs/lfs/ulfs_quota.h>
57 
58 /*
59  * LOCKING:
60  * Data in the entries are protected by the associated struct dquot's
61  * dq_interlock (this means we can't read or change a quota entry without
62  * grabbing a dquot for it).
63  * The header and lists (including pointers in the data entries, and q2e_uid)
64  * are protected by the global dqlock.
65  * the locking order is dq_interlock -> dqlock
66  */
67 
68 static int quota2_bwrite(struct mount *, struct buf *);
69 static int getinoquota2(struct inode *, bool, bool, struct buf **,
70     struct quota2_entry **);
71 static int getq2h(struct ulfsmount *, int, struct buf **,
72     struct quota2_header **, int);
73 static int getq2e(struct ulfsmount *, int, daddr_t, int, struct buf **,
74     struct quota2_entry **, int);
75 static int quota2_walk_list(struct ulfsmount *, struct buf *, int,
76     uint64_t *, int, void *,
77     int (*func)(struct ulfsmount *, uint64_t *, struct quota2_entry *,
78       uint64_t, void *));
79 
80 static const char *limnames[] = INITQLNAMES;
81 
82 static void
quota2_dict_update_q2e_limits(int objtype,const struct quotaval * val,struct quota2_entry * q2e)83 quota2_dict_update_q2e_limits(int objtype, const struct quotaval *val,
84     struct quota2_entry *q2e)
85 {
86 	/* make sure we can index q2e_val[] by the fs-independent objtype */
87 	CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
88 	CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
89 
90 	q2e->q2e_val[objtype].q2v_hardlimit = val->qv_hardlimit;
91 	q2e->q2e_val[objtype].q2v_softlimit = val->qv_softlimit;
92 	q2e->q2e_val[objtype].q2v_grace = val->qv_grace;
93 }
94 
95 /*
96  * Convert internal representation to FS-independent representation.
97  * (Note that while the two types are currently identical, the
98  * internal representation is an on-disk struct and the FS-independent
99  * representation is not, and they might diverge in the future.)
100  */
101 static void
q2val_to_quotaval(struct quota2_val * q2v,struct quotaval * qv)102 q2val_to_quotaval(struct quota2_val *q2v, struct quotaval *qv)
103 {
104 	qv->qv_softlimit = q2v->q2v_softlimit;
105 	qv->qv_hardlimit = q2v->q2v_hardlimit;
106 	qv->qv_usage = q2v->q2v_cur;
107 	qv->qv_expiretime = q2v->q2v_time;
108 	qv->qv_grace = q2v->q2v_grace;
109 }
110 
111 /*
112  * Convert a quota2entry and default-flag to the FS-independent
113  * representation.
114  */
115 static void
q2e_to_quotaval(struct quota2_entry * q2e,int def,id_t * id,int objtype,struct quotaval * ret)116 q2e_to_quotaval(struct quota2_entry *q2e, int def,
117 	       id_t *id, int objtype, struct quotaval *ret)
118 {
119 	if (def) {
120 		*id = QUOTA_DEFAULTID;
121 	} else {
122 		*id = q2e->q2e_uid;
123 	}
124 
125 	KASSERT(objtype >= 0 && objtype < N_QL);
126 	q2val_to_quotaval(&q2e->q2e_val[objtype], ret);
127 }
128 
129 
130 static int
quota2_bwrite(struct mount * mp,struct buf * bp)131 quota2_bwrite(struct mount *mp, struct buf *bp)
132 {
133 	if (mp->mnt_flag & MNT_SYNCHRONOUS)
134 		return bwrite(bp);
135 	else {
136 		bdwrite(bp);
137 		return 0;
138 	}
139 }
140 
141 static int
getq2h(struct ulfsmount * ump,int type,struct buf ** bpp,struct quota2_header ** q2hp,int flags)142 getq2h(struct ulfsmount *ump, int type,
143     struct buf **bpp, struct quota2_header **q2hp, int flags)
144 {
145 	struct lfs *fs = ump->um_lfs;
146 	const int needswap = ULFS_MPNEEDSWAP(fs);
147 	int error;
148 	struct buf *bp;
149 	struct quota2_header *q2h;
150 
151 	KASSERT(mutex_owned(&lfs_dqlock));
152 	error = bread(ump->um_quotas[type], 0, ump->umq2_bsize, flags, &bp);
153 	if (error)
154 		return error;
155 	if (bp->b_resid != 0)
156 		panic("dq2get: %s quota file truncated", lfs_quotatypes[type]);
157 
158 	q2h = (void *)bp->b_data;
159 	if (ulfs_rw32(q2h->q2h_magic_number, needswap) != Q2_HEAD_MAGIC ||
160 	    q2h->q2h_type != type)
161 		panic("dq2get: corrupted %s quota header", lfs_quotatypes[type]);
162 	*bpp = bp;
163 	*q2hp = q2h;
164 	return 0;
165 }
166 
167 static int
getq2e(struct ulfsmount * ump,int type,daddr_t lblkno,int blkoffset,struct buf ** bpp,struct quota2_entry ** q2ep,int flags)168 getq2e(struct ulfsmount *ump, int type, daddr_t lblkno, int blkoffset,
169     struct buf **bpp, struct quota2_entry **q2ep, int flags)
170 {
171 	int error;
172 	struct buf *bp;
173 
174 	if (blkoffset & (sizeof(uint64_t) - 1)) {
175 		panic("dq2get: %s quota file corrupted",
176 		    lfs_quotatypes[type]);
177 	}
178 	error = bread(ump->um_quotas[type], lblkno, ump->umq2_bsize, flags, &bp);
179 	if (error)
180 		return error;
181 	if (bp->b_resid != 0) {
182 		panic("dq2get: %s quota file corrupted",
183 		    lfs_quotatypes[type]);
184 	}
185 	*q2ep = (void *)((char *)bp->b_data + blkoffset);
186 	*bpp = bp;
187 	return 0;
188 }
189 
190 /* walk a quota entry list, calling the callback for each entry */
191 #define Q2WL_ABORT 0x10000000
192 
193 static int
quota2_walk_list(struct ulfsmount * ump,struct buf * hbp,int type,uint64_t * offp,int flags,void * a,int (* func)(struct ulfsmount *,uint64_t *,struct quota2_entry *,uint64_t,void *))194 quota2_walk_list(struct ulfsmount *ump, struct buf *hbp, int type,
195     uint64_t *offp, int flags, void *a,
196     int (*func)(struct ulfsmount *, uint64_t *, struct quota2_entry *, uint64_t, void *))
197 {
198 	struct lfs *fs = ump->um_lfs;
199 	const int needswap = ULFS_MPNEEDSWAP(fs);
200 	daddr_t off = ulfs_rw64(*offp, needswap);
201 	struct buf *bp, *obp = hbp;
202 	int ret = 0, ret2 = 0;
203 	struct quota2_entry *q2e;
204 	daddr_t lblkno, blkoff, olblkno = 0;
205 
206 	KASSERT(mutex_owned(&lfs_dqlock));
207 
208 	while (off != 0) {
209 		lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
210 		blkoff = (off & ump->umq2_bmask);
211 		if (lblkno == 0) {
212 			/* in the header block */
213 			bp = hbp;
214 		} else if (lblkno == olblkno) {
215 			/* still in the same buf */
216 			bp = obp;
217 		} else {
218 			ret = bread(ump->um_quotas[type], lblkno,
219 			    ump->umq2_bsize, flags, &bp);
220 			if (ret)
221 				return ret;
222 			if (bp->b_resid != 0) {
223 				panic("quota2_walk_list: %s quota file corrupted",
224 				    lfs_quotatypes[type]);
225 			}
226 		}
227 		q2e = (void *)((char *)(bp->b_data) + blkoff);
228 		ret = (*func)(ump, offp, q2e, off, a);
229 		if (off != ulfs_rw64(*offp, needswap)) {
230 			/* callback changed parent's pointer, redo */
231 			off = ulfs_rw64(*offp, needswap);
232 			if (bp != hbp && bp != obp)
233 				ret2 = bwrite(bp);
234 		} else {
235 			/* parent if now current */
236 			if (obp != bp && obp != hbp) {
237 				if (flags & B_MODIFY)
238 					ret2 = bwrite(obp);
239 				else
240 					brelse(obp, 0);
241 			}
242 			obp = bp;
243 			olblkno = lblkno;
244 			offp = &(q2e->q2e_next);
245 			off = ulfs_rw64(*offp, needswap);
246 		}
247 		if (ret)
248 			break;
249 		if (ret2) {
250 			ret = ret2;
251 			break;
252 		}
253 	}
254 	if (obp != hbp) {
255 		if (flags & B_MODIFY)
256 			ret2 = bwrite(obp);
257 		else
258 			brelse(obp, 0);
259 	}
260 	if (ret & Q2WL_ABORT)
261 		return 0;
262 	if (ret == 0)
263 		return ret2;
264 	return ret;
265 }
266 
267 int
lfsquota2_umount(struct mount * mp,int flags)268 lfsquota2_umount(struct mount *mp, int flags)
269 {
270 	int i, error;
271 	struct ulfsmount *ump = VFSTOULFS(mp);
272 	struct lfs *fs = ump->um_lfs;
273 
274 	if ((fs->um_flags & ULFS_QUOTA2) == 0)
275 		return 0;
276 
277 	for (i = 0; i < ULFS_MAXQUOTAS; i++) {
278 		if (ump->um_quotas[i] != NULLVP) {
279 			error = vn_close(ump->um_quotas[i], FREAD|FWRITE,
280 			    ump->um_cred[i]);
281 			if (error) {
282 				printf("quota2_umount failed: close(%p) %d\n",
283 				    ump->um_quotas[i], error);
284 				return error;
285 			}
286 		}
287 		ump->um_quotas[i] = NULLVP;
288 	}
289 	return 0;
290 }
291 
292 static int
quota2_q2ealloc(struct ulfsmount * ump,int type,uid_t uid,struct dquot * dq)293 quota2_q2ealloc(struct ulfsmount *ump, int type, uid_t uid, struct dquot *dq)
294 {
295 	int error, error2;
296 	struct buf *hbp, *bp;
297 	struct quota2_header *q2h;
298 	struct quota2_entry *q2e;
299 	daddr_t offset;
300 	u_long hash_mask;
301 	struct lfs *fs = ump->um_lfs;
302 	const int needswap = ULFS_MPNEEDSWAP(fs);
303 
304 	KASSERT(mutex_owned(&dq->dq_interlock));
305 	KASSERT(mutex_owned(&lfs_dqlock));
306 	error = getq2h(ump, type, &hbp, &q2h, B_MODIFY);
307 	if (error)
308 		return error;
309 	offset = ulfs_rw64(q2h->q2h_free, needswap);
310 	if (offset == 0) {
311 		struct vnode *vp = ump->um_quotas[type];
312 		struct inode *ip = VTOI(vp);
313 		uint64_t size = ip->i_size;
314 		/* need to allocate a new disk block */
315 		error = lfs_balloc(vp, size, ump->umq2_bsize,
316 		    ump->um_cred[type], B_CLRBUF | B_SYNC, &bp);
317 		if (error) {
318 			brelse(hbp, 0);
319 			return error;
320 		}
321 		KASSERT((ip->i_size % ump->umq2_bsize) == 0);
322 		ip->i_size += ump->umq2_bsize;
323 		DIP_ASSIGN(ip, size, ip->i_size);
324 		ip->i_state |= IN_CHANGE | IN_UPDATE;
325 		uvm_vnp_setsize(vp, ip->i_size);
326 		lfsquota2_addfreeq2e(q2h, bp->b_data, size, ump->umq2_bsize,
327 		    needswap);
328 		error = bwrite(bp);
329 		error2 = lfs_update(vp, NULL, NULL, UPDATE_WAIT);
330 		if (error || error2) {
331 			brelse(hbp, 0);
332 			if (error)
333 				return error;
334 			return error2;
335 		}
336 		offset = ulfs_rw64(q2h->q2h_free, needswap);
337 		KASSERT(offset != 0);
338 	}
339 	dq->dq2_lblkno = (offset >> ump->um_mountp->mnt_fs_bshift);
340 	dq->dq2_blkoff = (offset & ump->umq2_bmask);
341 	if (dq->dq2_lblkno == 0) {
342 		bp = hbp;
343 		q2e = (void *)((char *)bp->b_data + dq->dq2_blkoff);
344 	} else {
345 		error = getq2e(ump, type, dq->dq2_lblkno,
346 		    dq->dq2_blkoff, &bp, &q2e, B_MODIFY);
347 		if (error) {
348 			brelse(hbp, 0);
349 			return error;
350 		}
351 	}
352 	hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
353 	/* remove from free list */
354 	q2h->q2h_free = q2e->q2e_next;
355 
356 	memcpy(q2e, &q2h->q2h_defentry, sizeof(*q2e));
357 	q2e->q2e_uid = ulfs_rw32(uid, needswap);
358 	/* insert in hash list */
359 	q2e->q2e_next = q2h->q2h_entries[uid & hash_mask];
360 	q2h->q2h_entries[uid & hash_mask] = ulfs_rw64(offset, needswap);
361 	if (hbp != bp) {
362 		bwrite(hbp);
363 	}
364 	bwrite(bp);
365 	return 0;
366 }
367 
368 static int
getinoquota2(struct inode * ip,bool alloc,bool modify,struct buf ** bpp,struct quota2_entry ** q2ep)369 getinoquota2(struct inode *ip, bool alloc, bool modify, struct buf **bpp,
370     struct quota2_entry **q2ep)
371 {
372 	int error;
373 	int i;
374 	struct dquot *dq;
375 	struct ulfsmount *ump = ip->i_ump;
376 	u_int32_t ino_ids[ULFS_MAXQUOTAS];
377 
378 	error = lfs_getinoquota(ip);
379 	if (error)
380 		return error;
381 
382         ino_ids[ULFS_USRQUOTA] = ip->i_uid;
383         ino_ids[ULFS_GRPQUOTA] = ip->i_gid;
384 	/* first get the interlock for all dquot */
385 	for (i = 0; i < ULFS_MAXQUOTAS; i++) {
386 		dq = ip->i_dquot[i];
387 		if (dq == NODQUOT)
388 			continue;
389 		mutex_enter(&dq->dq_interlock);
390 	}
391 	/* now get the corresponding quota entry */
392 	for (i = 0; i < ULFS_MAXQUOTAS; i++) {
393 		bpp[i] = NULL;
394 		q2ep[i] = NULL;
395 		dq = ip->i_dquot[i];
396 		if (dq == NODQUOT)
397 			continue;
398 		if (__predict_false(ump->um_quotas[i] == NULL)) {
399 			/*
400 			 * quotas have been turned off. This can happen
401 			 * at umount time.
402 			 */
403 			mutex_exit(&dq->dq_interlock);
404 			lfs_dqrele(NULLVP, dq);
405 			ip->i_dquot[i] = NULL;
406 			continue;
407 		}
408 
409 		if ((dq->dq2_lblkno | dq->dq2_blkoff) == 0) {
410 			if (!alloc) {
411 				continue;
412 			}
413 			/* need to alloc a new on-disk quot */
414 			mutex_enter(&lfs_dqlock);
415 			error = quota2_q2ealloc(ump, i, ino_ids[i], dq);
416 			mutex_exit(&lfs_dqlock);
417 			if (error)
418 				return error;
419 		}
420 		KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
421 		error = getq2e(ump, i, dq->dq2_lblkno,
422 		    dq->dq2_blkoff, &bpp[i], &q2ep[i],
423 		    modify ? B_MODIFY : 0);
424 		if (error)
425 			return error;
426 	}
427 	return 0;
428 }
429 
430 __inline static int __unused
lfsquota2_check_limit(struct quota2_val * q2v,uint64_t change,time_t now)431 lfsquota2_check_limit(struct quota2_val *q2v, uint64_t change, time_t now)
432 {
433 	return lfsquota_check_limit(q2v->q2v_cur, change, q2v->q2v_softlimit,
434 	    q2v->q2v_hardlimit, q2v->q2v_time, now);
435 }
436 
437 static int
quota2_check(struct inode * ip,int vtype,int64_t change,kauth_cred_t cred,int flags)438 quota2_check(struct inode *ip, int vtype, int64_t change, kauth_cred_t cred,
439     int flags)
440 {
441 	int error;
442 	struct buf *bp[ULFS_MAXQUOTAS];
443 	struct quota2_entry *q2e[ULFS_MAXQUOTAS];
444 	struct quota2_val *q2vp;
445 	struct dquot *dq;
446 	uint64_t ncurblks;
447 	struct ulfsmount *ump = ip->i_ump;
448 	struct lfs *fs = ip->i_lfs;
449 	struct mount *mp = ump->um_mountp;
450 	const int needswap = ULFS_MPNEEDSWAP(fs);
451 	int i;
452 
453 	if ((error = getinoquota2(ip, change > 0, change != 0, bp, q2e)) != 0)
454 		return error;
455 	if (change == 0) {
456 		for (i = 0; i < ULFS_MAXQUOTAS; i++) {
457 			dq = ip->i_dquot[i];
458 			if (dq == NODQUOT)
459 				continue;
460 			if (bp[i])
461 				brelse(bp[i], 0);
462 			mutex_exit(&dq->dq_interlock);
463 		}
464 		return 0;
465 	}
466 	if (change < 0) {
467 		for (i = 0; i < ULFS_MAXQUOTAS; i++) {
468 			dq = ip->i_dquot[i];
469 			if (dq == NODQUOT)
470 				continue;
471 			if (q2e[i] == NULL) {
472 				mutex_exit(&dq->dq_interlock);
473 				continue;
474 			}
475 			q2vp = &q2e[i]->q2e_val[vtype];
476 			ncurblks = ulfs_rw64(q2vp->q2v_cur, needswap);
477 			if (ncurblks < -change)
478 				ncurblks = 0;
479 			else
480 				ncurblks += change;
481 			q2vp->q2v_cur = ulfs_rw64(ncurblks, needswap);
482 			quota2_bwrite(mp, bp[i]);
483 			mutex_exit(&dq->dq_interlock);
484 		}
485 		return 0;
486 	}
487 	/* see if the allocation is allowed */
488 	for (i = 0; i < ULFS_MAXQUOTAS; i++) {
489 		struct quota2_val q2v;
490 		int ql_stat;
491 		dq = ip->i_dquot[i];
492 		if (dq == NODQUOT)
493 			continue;
494 		KASSERT(q2e[i] != NULL);
495 		lfsquota2_ulfs_rwq2v(&q2e[i]->q2e_val[vtype], &q2v, needswap);
496 		ql_stat = lfsquota2_check_limit(&q2v, change, time_second);
497 
498 		if ((flags & FORCE) == 0 &&
499 		    kauth_authorize_system(cred, KAUTH_SYSTEM_FS_QUOTA,
500 		    KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT,
501 		    KAUTH_ARG(i), KAUTH_ARG(vtype), NULL) != 0) {
502 			/* enforce this limit */
503 			switch(QL_STATUS(ql_stat)) {
504 			case QL_S_DENY_HARD:
505 				if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
506 					uprintf("\n%s: write failed, %s %s "
507 					    "limit reached\n",
508 					    mp->mnt_stat.f_mntonname,
509 					    lfs_quotatypes[i], limnames[vtype]);
510 					dq->dq_flags |= DQ_WARN(vtype);
511 				}
512 				error = EDQUOT;
513 				break;
514 			case QL_S_DENY_GRACE:
515 				if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
516 					uprintf("\n%s: write failed, %s %s "
517 					    "limit reached\n",
518 					    mp->mnt_stat.f_mntonname,
519 					    lfs_quotatypes[i], limnames[vtype]);
520 					dq->dq_flags |= DQ_WARN(vtype);
521 				}
522 				error = EDQUOT;
523 				break;
524 			case QL_S_ALLOW_SOFT:
525 				if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
526 					uprintf("\n%s: warning, %s %s "
527 					    "quota exceeded\n",
528 					    mp->mnt_stat.f_mntonname,
529 					    lfs_quotatypes[i], limnames[vtype]);
530 					dq->dq_flags |= DQ_WARN(vtype);
531 				}
532 				break;
533 			}
534 		}
535 		/*
536 		 * always do this; we don't know if the allocation will
537 		 * succed or not in the end. if we don't do the allocation
538 		 * q2v_time will be ignored anyway
539 		 */
540 		if (ql_stat & QL_F_CROSS) {
541 			q2v.q2v_time = time_second + q2v.q2v_grace;
542 			lfsquota2_ulfs_rwq2v(&q2v, &q2e[i]->q2e_val[vtype],
543 			    needswap);
544 		}
545 	}
546 
547 	/* now do the allocation if allowed */
548 	for (i = 0; i < ULFS_MAXQUOTAS; i++) {
549 		dq = ip->i_dquot[i];
550 		if (dq == NODQUOT)
551 			continue;
552 		KASSERT(q2e[i] != NULL);
553 		if (error == 0) {
554 			q2vp = &q2e[i]->q2e_val[vtype];
555 			ncurblks = ulfs_rw64(q2vp->q2v_cur, needswap);
556 			q2vp->q2v_cur = ulfs_rw64(ncurblks + change, needswap);
557 			quota2_bwrite(mp, bp[i]);
558 		} else
559 			brelse(bp[i], 0);
560 		mutex_exit(&dq->dq_interlock);
561 	}
562 	return error;
563 }
564 
565 int
lfs_chkdq2(struct inode * ip,int64_t change,kauth_cred_t cred,int flags)566 lfs_chkdq2(struct inode *ip, int64_t change, kauth_cred_t cred, int flags)
567 {
568 	return quota2_check(ip, QL_BLOCK, change, cred, flags);
569 }
570 
571 int
lfs_chkiq2(struct inode * ip,int32_t change,kauth_cred_t cred,int flags)572 lfs_chkiq2(struct inode *ip, int32_t change, kauth_cred_t cred, int flags)
573 {
574 	return quota2_check(ip, QL_FILE, change, cred, flags);
575 }
576 
577 int
lfsquota2_handle_cmd_put(struct ulfsmount * ump,const struct quotakey * key,const struct quotaval * val)578 lfsquota2_handle_cmd_put(struct ulfsmount *ump, const struct quotakey *key,
579     const struct quotaval *val)
580 {
581 	int error;
582 	struct dquot *dq;
583 	struct quota2_header *q2h;
584 	struct quota2_entry q2e, *q2ep;
585 	struct buf *bp;
586 	struct lfs *fs = ump->um_lfs;
587 	const int needswap = ULFS_MPNEEDSWAP(fs);
588 
589 	/* make sure we can index by the fs-independent idtype */
590 	CTASSERT(QUOTA_IDTYPE_USER == ULFS_USRQUOTA);
591 	CTASSERT(QUOTA_IDTYPE_GROUP == ULFS_GRPQUOTA);
592 
593 	if (ump->um_quotas[key->qk_idtype] == NULLVP)
594 		return ENODEV;
595 
596 	if (key->qk_id == QUOTA_DEFAULTID) {
597 		mutex_enter(&lfs_dqlock);
598 		error = getq2h(ump, key->qk_idtype, &bp, &q2h, B_MODIFY);
599 		if (error) {
600 			mutex_exit(&lfs_dqlock);
601 			goto out_error;
602 		}
603 		lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
604 		quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
605 		lfsquota2_ulfs_rwq2e(&q2e, &q2h->q2h_defentry, needswap);
606 		mutex_exit(&lfs_dqlock);
607 		quota2_bwrite(ump->um_mountp, bp);
608 		goto out_error;
609 	}
610 
611 	error = lfs_dqget(NULLVP, key->qk_id, ump, key->qk_idtype, &dq);
612 	if (error)
613 		goto out_error;
614 
615 	mutex_enter(&dq->dq_interlock);
616 	if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
617 		/* need to alloc a new on-disk quot */
618 		mutex_enter(&lfs_dqlock);
619 		error = quota2_q2ealloc(ump, key->qk_idtype, key->qk_id, dq);
620 		mutex_exit(&lfs_dqlock);
621 		if (error)
622 			goto out_il;
623 	}
624 	KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
625 	error = getq2e(ump, key->qk_idtype, dq->dq2_lblkno,
626 	    dq->dq2_blkoff, &bp, &q2ep, B_MODIFY);
627 	if (error)
628 		goto out_il;
629 
630 	lfsquota2_ulfs_rwq2e(q2ep, &q2e, needswap);
631 	/*
632 	 * Reset time limit if previously had no soft limit or were
633 	 * under it, but now have a soft limit and are over it.
634 	 */
635 	if (val->qv_softlimit &&
636 	    q2e.q2e_val[key->qk_objtype].q2v_cur >= val->qv_softlimit &&
637 	    (q2e.q2e_val[key->qk_objtype].q2v_softlimit == 0 ||
638 	     q2e.q2e_val[key->qk_objtype].q2v_cur < q2e.q2e_val[key->qk_objtype].q2v_softlimit))
639 		q2e.q2e_val[key->qk_objtype].q2v_time = time_second + val->qv_grace;
640 	quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
641 	lfsquota2_ulfs_rwq2e(&q2e, q2ep, needswap);
642 	quota2_bwrite(ump->um_mountp, bp);
643 
644 out_il:
645 	mutex_exit(&dq->dq_interlock);
646 	lfs_dqrele(NULLVP, dq);
647 out_error:
648 	return error;
649 }
650 
651 struct dq2clear_callback {
652 	uid_t id;
653 	struct dquot *dq;
654 	struct quota2_header *q2h;
655 };
656 
657 static int
dq2clear_callback(struct ulfsmount * ump,uint64_t * offp,struct quota2_entry * q2e,uint64_t off,void * v)658 dq2clear_callback(struct ulfsmount *ump, uint64_t *offp, struct quota2_entry *q2e,
659     uint64_t off, void *v)
660 {
661 	struct dq2clear_callback *c = v;
662 	struct lfs *fs = ump->um_lfs;
663 	const int needswap = ULFS_MPNEEDSWAP(fs);
664 	uint64_t myoff;
665 
666 	if (ulfs_rw32(q2e->q2e_uid, needswap) == c->id) {
667 		KASSERT(mutex_owned(&c->dq->dq_interlock));
668 		c->dq->dq2_lblkno = 0;
669 		c->dq->dq2_blkoff = 0;
670 		myoff = *offp;
671 		/* remove from hash list */
672 		*offp = q2e->q2e_next;
673 		/* add to free list */
674 		q2e->q2e_next = c->q2h->q2h_free;
675 		c->q2h->q2h_free = myoff;
676 		return Q2WL_ABORT;
677 	}
678 	return 0;
679 }
680 int
lfsquota2_handle_cmd_del(struct ulfsmount * ump,const struct quotakey * qk)681 lfsquota2_handle_cmd_del(struct ulfsmount *ump, const struct quotakey *qk)
682 {
683 	int idtype;
684 	id_t id;
685 	int objtype;
686 	int error, i, canfree;
687 	struct dquot *dq;
688 	struct quota2_header *q2h;
689 	struct quota2_entry q2e, *q2ep;
690 	struct buf *hbp, *bp;
691 	u_long hash_mask;
692 	struct dq2clear_callback c;
693 
694 	idtype = qk->qk_idtype;
695 	id = qk->qk_id;
696 	objtype = qk->qk_objtype;
697 
698 	if (ump->um_quotas[idtype] == NULLVP)
699 		return ENODEV;
700 	if (id == QUOTA_DEFAULTID)
701 		return EOPNOTSUPP;
702 
703 	/* get the default entry before locking the entry's buffer */
704 	mutex_enter(&lfs_dqlock);
705 	error = getq2h(ump, idtype, &hbp, &q2h, 0);
706 	if (error) {
707 		mutex_exit(&lfs_dqlock);
708 		return error;
709 	}
710 	/* we'll copy to another disk entry, so no need to swap */
711 	memcpy(&q2e, &q2h->q2h_defentry, sizeof(q2e));
712 	mutex_exit(&lfs_dqlock);
713 	brelse(hbp, 0);
714 
715 	error = lfs_dqget(NULLVP, id, ump, idtype, &dq);
716 	if (error)
717 		return error;
718 
719 	mutex_enter(&dq->dq_interlock);
720 	if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
721 		/* already clear, nothing to do */
722 		error = ENOENT;
723 		goto out_il;
724 	}
725 
726 	error = getq2e(ump, idtype, dq->dq2_lblkno, dq->dq2_blkoff,
727 	    &bp, &q2ep, B_MODIFY);
728 	if (error)
729 		goto out_error;
730 
731 	/* make sure we can index by the objtype passed in */
732 	CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
733 	CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
734 
735 	/* clear the requested objtype by copying from the default entry */
736 	q2ep->q2e_val[objtype].q2v_softlimit =
737 		q2e.q2e_val[objtype].q2v_softlimit;
738 	q2ep->q2e_val[objtype].q2v_hardlimit =
739 		q2e.q2e_val[objtype].q2v_hardlimit;
740 	q2ep->q2e_val[objtype].q2v_grace =
741 		q2e.q2e_val[objtype].q2v_grace;
742 	q2ep->q2e_val[objtype].q2v_time = 0;
743 
744 	/* if this entry now contains no information, we can free it */
745 	canfree = 1;
746 	for (i = 0; i < N_QL; i++) {
747 		if (q2ep->q2e_val[i].q2v_cur != 0 ||
748 		    (q2ep->q2e_val[i].q2v_softlimit !=
749 		     q2e.q2e_val[i].q2v_softlimit) ||
750 		    (q2ep->q2e_val[i].q2v_hardlimit !=
751 		     q2e.q2e_val[i].q2v_hardlimit) ||
752 		    (q2ep->q2e_val[i].q2v_grace !=
753 		     q2e.q2e_val[i].q2v_grace)) {
754 			canfree = 0;
755 			break;
756 		}
757 		/* note: do not need to check q2v_time */
758 	}
759 
760 	if (canfree == 0) {
761 		quota2_bwrite(ump->um_mountp, bp);
762 		goto out_error;
763 	}
764 	/* we can free it. release bp so we can walk the list */
765 	brelse(bp, 0);
766 	mutex_enter(&lfs_dqlock);
767 	error = getq2h(ump, idtype, &hbp, &q2h, 0);
768 	if (error)
769 		goto out_dqlock;
770 
771 	hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
772 	c.dq = dq;
773 	c.id = id;
774 	c.q2h = q2h;
775 	error = quota2_walk_list(ump, hbp, idtype,
776 	    &q2h->q2h_entries[id & hash_mask], B_MODIFY, &c,
777 	    dq2clear_callback);
778 
779 	bwrite(hbp);
780 
781 out_dqlock:
782 	mutex_exit(&lfs_dqlock);
783 out_error:
784 out_il:
785 	mutex_exit(&dq->dq_interlock);
786 	lfs_dqrele(NULLVP, dq);
787 	return error;
788 }
789 
790 static int
quota2_fetch_q2e(struct ulfsmount * ump,const struct quotakey * qk,struct quota2_entry * ret)791 quota2_fetch_q2e(struct ulfsmount *ump, const struct quotakey *qk,
792     struct quota2_entry *ret)
793 {
794 	struct dquot *dq;
795 	int error;
796 	struct quota2_entry *q2ep;
797 	struct buf *bp;
798 	struct lfs *fs = ump->um_lfs;
799 	const int needswap = ULFS_MPNEEDSWAP(fs);
800 
801 	error = lfs_dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
802 	if (error)
803 		return error;
804 
805 	mutex_enter(&dq->dq_interlock);
806 	if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
807 		mutex_exit(&dq->dq_interlock);
808 		lfs_dqrele(NULLVP, dq);
809 		return ENOENT;
810 	}
811 	error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
812 	    &bp, &q2ep, 0);
813 	if (error) {
814 		mutex_exit(&dq->dq_interlock);
815 		lfs_dqrele(NULLVP, dq);
816 		return error;
817 	}
818 	lfsquota2_ulfs_rwq2e(q2ep, ret, needswap);
819 	brelse(bp, 0);
820 	mutex_exit(&dq->dq_interlock);
821 	lfs_dqrele(NULLVP, dq);
822 
823 	return 0;
824 }
825 
826 static int
quota2_fetch_quotaval(struct ulfsmount * ump,const struct quotakey * qk,struct quotaval * ret)827 quota2_fetch_quotaval(struct ulfsmount *ump, const struct quotakey *qk,
828     struct quotaval *ret)
829 {
830 	struct dquot *dq;
831 	int error;
832 	struct quota2_entry *q2ep, q2e;
833 	struct buf  *bp;
834 	struct lfs *fs = ump->um_lfs;
835 	const int needswap = ULFS_MPNEEDSWAP(fs);
836 	id_t id2;
837 
838 	error = lfs_dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
839 	if (error)
840 		return error;
841 
842 	mutex_enter(&dq->dq_interlock);
843 	if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
844 		mutex_exit(&dq->dq_interlock);
845 		lfs_dqrele(NULLVP, dq);
846 		return ENOENT;
847 	}
848 	error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
849 	    &bp, &q2ep, 0);
850 	if (error) {
851 		mutex_exit(&dq->dq_interlock);
852 		lfs_dqrele(NULLVP, dq);
853 		return error;
854 	}
855 	lfsquota2_ulfs_rwq2e(q2ep, &q2e, needswap);
856 	brelse(bp, 0);
857 	mutex_exit(&dq->dq_interlock);
858 	lfs_dqrele(NULLVP, dq);
859 
860 	q2e_to_quotaval(&q2e, 0, &id2, qk->qk_objtype, ret);
861 	KASSERT(id2 == qk->qk_id);
862 	return 0;
863 }
864 
865 int
lfsquota2_handle_cmd_get(struct ulfsmount * ump,const struct quotakey * qk,struct quotaval * qv)866 lfsquota2_handle_cmd_get(struct ulfsmount *ump, const struct quotakey *qk,
867     struct quotaval *qv)
868 {
869 	int error;
870 	struct quota2_header *q2h;
871 	struct quota2_entry q2e;
872 	struct buf *bp;
873 	struct lfs *fs = ump->um_lfs;
874 	const int needswap = ULFS_MPNEEDSWAP(fs);
875 	id_t id2;
876 
877 	/*
878 	 * Make sure the FS-independent codes match the internal ones,
879 	 * so we can use the passed-in objtype without having to
880 	 * convert it explicitly to QL_BLOCK/QL_FILE.
881 	 */
882 	CTASSERT(QL_BLOCK == QUOTA_OBJTYPE_BLOCKS);
883 	CTASSERT(QL_FILE == QUOTA_OBJTYPE_FILES);
884 	CTASSERT(N_QL == 2);
885 
886 	if (qk->qk_objtype < 0 || qk->qk_objtype >= N_QL) {
887 		return EINVAL;
888 	}
889 
890 	if (ump->um_quotas[qk->qk_idtype] == NULLVP)
891 		return ENODEV;
892 	if (qk->qk_id == QUOTA_DEFAULTID) {
893 		mutex_enter(&lfs_dqlock);
894 		error = getq2h(ump, qk->qk_idtype, &bp, &q2h, 0);
895 		if (error) {
896 			mutex_exit(&lfs_dqlock);
897 			return error;
898 		}
899 		lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
900 		mutex_exit(&lfs_dqlock);
901 		brelse(bp, 0);
902 		q2e_to_quotaval(&q2e, qk->qk_id == QUOTA_DEFAULTID, &id2,
903 				qk->qk_objtype, qv);
904 		(void)id2;
905 	} else
906 		error = quota2_fetch_quotaval(ump, qk, qv);
907 
908 	return error;
909 }
910 
911 /*
912  * Cursor structure we used.
913  *
914  * This will get stored in userland between calls so we must not assume
915  * it isn't arbitrarily corrupted.
916  */
917 struct ulfsq2_cursor {
918 	uint32_t q2c_magic;	/* magic number */
919 	int q2c_hashsize;	/* size of hash table at last go */
920 
921 	int q2c_users_done;	/* true if we've returned all user data */
922 	int q2c_groups_done;	/* true if we've returned all group data */
923 	int q2c_defaults_done;	/* true if we've returned the default values */
924 	int q2c_hashpos;	/* slot to start at in hash table */
925 	int q2c_uidpos;		/* number of ids we've handled */
926 	int q2c_blocks_done;	/* true if we've returned the blocks value */
927 };
928 
929 /*
930  * State of a single cursorget call, or at least the part of it that
931  * needs to be passed around.
932  */
933 struct q2cursor_state {
934 	/* data return pointers */
935 	struct quotakey *keys;
936 	struct quotaval *vals;
937 
938 	/* key/value counters */
939 	unsigned maxkeyvals;
940 	unsigned numkeys;	/* number of keys assigned */
941 
942 	/* ID to key/value conversion state */
943 	int skipfirst;		/* if true skip first key/value */
944 	int skiplast;		/* if true skip last key/value */
945 
946 	/* ID counters */
947 	unsigned maxids;	/* maximum number of IDs to handle */
948 	unsigned numids;	/* number of IDs handled */
949 };
950 
951 /*
952  * Additional structure for getids callback.
953  */
954 struct q2cursor_getids {
955 	struct q2cursor_state *state;
956 	int idtype;
957 	unsigned skip;		/* number of ids to skip over */
958 	unsigned new_skip;	/* number of ids to skip over next time */
959 	unsigned skipped;	/* number skipped so far */
960 	int stopped;		/* true if we stopped quota_walk_list early */
961 };
962 
963 /*
964  * Cursor-related functions
965  */
966 
967 /* magic number */
968 #define Q2C_MAGIC (0xbeebe111)
969 
970 /* extract cursor from caller form */
971 #define Q2CURSOR(qkc) ((struct ulfsq2_cursor *)&qkc->u.qkc_space[0])
972 
973 /*
974  * Check that a cursor we're handed is something like valid. If
975  * someone munges it and it still passes these checks, they'll get
976  * partial or odd results back but won't break anything.
977  */
978 static int
q2cursor_check(struct ulfsq2_cursor * cursor)979 q2cursor_check(struct ulfsq2_cursor *cursor)
980 {
981 	if (cursor->q2c_magic != Q2C_MAGIC) {
982 		return EINVAL;
983 	}
984 	if (cursor->q2c_hashsize < 0) {
985 		return EINVAL;
986 	}
987 
988 	if (cursor->q2c_users_done != 0 && cursor->q2c_users_done != 1) {
989 		return EINVAL;
990 	}
991 	if (cursor->q2c_groups_done != 0 && cursor->q2c_groups_done != 1) {
992 		return EINVAL;
993 	}
994 	if (cursor->q2c_defaults_done != 0 && cursor->q2c_defaults_done != 1) {
995 		return EINVAL;
996 	}
997 	if (cursor->q2c_hashpos < 0 || cursor->q2c_uidpos < 0) {
998 		return EINVAL;
999 	}
1000 	if (cursor->q2c_blocks_done != 0 && cursor->q2c_blocks_done != 1) {
1001 		return EINVAL;
1002 	}
1003 	return 0;
1004 }
1005 
1006 /*
1007  * Set up the q2cursor state.
1008  */
1009 static void
q2cursor_initstate(struct q2cursor_state * state,struct quotakey * keys,struct quotaval * vals,unsigned maxkeyvals,int blocks_done)1010 q2cursor_initstate(struct q2cursor_state *state, struct quotakey *keys,
1011     struct quotaval *vals, unsigned maxkeyvals, int blocks_done)
1012 {
1013 	state->keys = keys;
1014 	state->vals = vals;
1015 
1016 	state->maxkeyvals = maxkeyvals;
1017 	state->numkeys = 0;
1018 
1019 	/*
1020 	 * For each ID there are two quotavals to return. If the
1021 	 * maximum number of entries to return is odd, we might want
1022 	 * to skip the first quotaval of the first ID, or the last
1023 	 * quotaval of the last ID, but not both. So the number of IDs
1024 	 * we want is (up to) half the number of return slots we have,
1025 	 * rounded up.
1026 	 */
1027 
1028 	state->maxids = (state->maxkeyvals + 1) / 2;
1029 	state->numids = 0;
1030 	if (state->maxkeyvals % 2) {
1031 		if (blocks_done) {
1032 			state->skipfirst = 1;
1033 			state->skiplast = 0;
1034 		} else {
1035 			state->skipfirst = 0;
1036 			state->skiplast = 1;
1037 		}
1038 	} else {
1039 		state->skipfirst = 0;
1040 		state->skiplast = 0;
1041 	}
1042 }
1043 
1044 /*
1045  * Choose which idtype we're going to work on. If doing a full
1046  * iteration, we do users first, then groups, but either might be
1047  * disabled or marked to skip via cursorsetidtype(), so don't make
1048  * silly assumptions.
1049  */
1050 static int
q2cursor_pickidtype(struct ulfsq2_cursor * cursor,int * idtype_ret)1051 q2cursor_pickidtype(struct ulfsq2_cursor *cursor, int *idtype_ret)
1052 {
1053 	if (cursor->q2c_users_done == 0) {
1054 		*idtype_ret = QUOTA_IDTYPE_USER;
1055 	} else if (cursor->q2c_groups_done == 0) {
1056 		*idtype_ret = QUOTA_IDTYPE_GROUP;
1057 	} else {
1058 		return EAGAIN;
1059 	}
1060 	return 0;
1061 }
1062 
1063 /*
1064  * Add an ID to the current state. Sets up either one or two keys to
1065  * refer to it, depending on whether it's first/last and the setting
1066  * of skipfirst. (skiplast does not need to be explicitly tested)
1067  */
1068 static void
q2cursor_addid(struct q2cursor_state * state,int idtype,id_t id)1069 q2cursor_addid(struct q2cursor_state *state, int idtype, id_t id)
1070 {
1071 	KASSERT(state->numids < state->maxids);
1072 	KASSERT(state->numkeys < state->maxkeyvals);
1073 
1074 	if (!state->skipfirst || state->numkeys > 0) {
1075 		state->keys[state->numkeys].qk_idtype = idtype;
1076 		state->keys[state->numkeys].qk_id = id;
1077 		state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_BLOCKS;
1078 		state->numkeys++;
1079 	}
1080 	if (state->numkeys < state->maxkeyvals) {
1081 		state->keys[state->numkeys].qk_idtype = idtype;
1082 		state->keys[state->numkeys].qk_id = id;
1083 		state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_FILES;
1084 		state->numkeys++;
1085 	} else {
1086 		KASSERT(state->skiplast);
1087 	}
1088 	state->numids++;
1089 }
1090 
1091 /*
1092  * Callback function for getting IDs. Update counting and call addid.
1093  */
1094 static int
q2cursor_getids_callback(struct ulfsmount * ump,uint64_t * offp,struct quota2_entry * q2ep,uint64_t off,void * v)1095 q2cursor_getids_callback(struct ulfsmount *ump, uint64_t *offp,
1096     struct quota2_entry *q2ep, uint64_t off, void *v)
1097 {
1098 	struct q2cursor_getids *gi = v;
1099 	id_t id;
1100 	struct lfs *fs = ump->um_lfs;
1101 	const int needswap = ULFS_MPNEEDSWAP(fs);
1102 
1103 	if (gi->skipped < gi->skip) {
1104 		gi->skipped++;
1105 		return 0;
1106 	}
1107 	id = ulfs_rw32(q2ep->q2e_uid, needswap);
1108 	q2cursor_addid(gi->state, gi->idtype, id);
1109 	gi->new_skip++;
1110 	if (gi->state->numids >= gi->state->maxids) {
1111 		/* got enough ids, stop now */
1112 		gi->stopped = 1;
1113 		return Q2WL_ABORT;
1114 	}
1115 	return 0;
1116 }
1117 
1118 /*
1119  * Fill in a batch of quotakeys by scanning one or more hash chains.
1120  */
1121 static int
q2cursor_getkeys(struct ulfsmount * ump,int idtype,struct ulfsq2_cursor * cursor,struct q2cursor_state * state,int * hashsize_ret,struct quota2_entry * default_q2e_ret)1122 q2cursor_getkeys(struct ulfsmount *ump, int idtype, struct ulfsq2_cursor *cursor,
1123     struct q2cursor_state *state,
1124     int *hashsize_ret, struct quota2_entry *default_q2e_ret)
1125 {
1126 	struct lfs *fs = ump->um_lfs;
1127 	const int needswap = ULFS_MPNEEDSWAP(fs);
1128 	struct buf *hbp;
1129 	struct quota2_header *q2h;
1130 	int quota2_hash_size;
1131 	struct q2cursor_getids gi;
1132 	uint64_t offset;
1133 	int error;
1134 
1135 	/*
1136 	 * Read the header block.
1137 	 */
1138 
1139 	mutex_enter(&lfs_dqlock);
1140 	error = getq2h(ump, idtype, &hbp, &q2h, 0);
1141 	if (error) {
1142 		mutex_exit(&lfs_dqlock);
1143 		return error;
1144 	}
1145 
1146 	/* if the table size has changed, make the caller start over */
1147 	quota2_hash_size = ulfs_rw16(q2h->q2h_hash_size, needswap);
1148 	if (cursor->q2c_hashsize == 0) {
1149 		cursor->q2c_hashsize = quota2_hash_size;
1150 	} else if (cursor->q2c_hashsize != quota2_hash_size) {
1151 		error = EDEADLK;
1152 		goto scanfail;
1153 	}
1154 
1155 	/* grab the entry with the default values out of the header */
1156 	lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, default_q2e_ret, needswap);
1157 
1158 	/* If we haven't done the defaults yet, that goes first. */
1159 	if (cursor->q2c_defaults_done == 0) {
1160 		q2cursor_addid(state, idtype, QUOTA_DEFAULTID);
1161 		/* if we read both halves, mark it done */
1162 		if (state->numids < state->maxids || !state->skiplast) {
1163 			cursor->q2c_defaults_done = 1;
1164 		}
1165 	}
1166 
1167 	gi.state = state;
1168 	gi.idtype = idtype;
1169 
1170 	while (state->numids < state->maxids) {
1171 		if (cursor->q2c_hashpos >= quota2_hash_size) {
1172 			/* nothing more left */
1173 			break;
1174 		}
1175 
1176 		/* scan this hash chain */
1177 		gi.skip = cursor->q2c_uidpos;
1178 		gi.new_skip = gi.skip;
1179 		gi.skipped = 0;
1180 		gi.stopped = 0;
1181 		offset = q2h->q2h_entries[cursor->q2c_hashpos];
1182 
1183 		error = quota2_walk_list(ump, hbp, idtype, &offset, 0, &gi,
1184 		    q2cursor_getids_callback);
1185 		KASSERT(error != Q2WL_ABORT);
1186 		if (error) {
1187 			break;
1188 		}
1189 		if (gi.stopped) {
1190 			/* callback stopped before reading whole chain */
1191 			cursor->q2c_uidpos = gi.new_skip;
1192 			/* if we didn't get both halves, back up */
1193 			if (state->numids == state->maxids && state->skiplast){
1194 				KASSERT(cursor->q2c_uidpos > 0);
1195 				cursor->q2c_uidpos--;
1196 			}
1197 		} else {
1198 			/* read whole chain */
1199 			/* if we got both halves of the last id, advance */
1200 			if (state->numids < state->maxids || !state->skiplast){
1201 				cursor->q2c_uidpos = 0;
1202 				cursor->q2c_hashpos++;
1203 			}
1204 		}
1205 	}
1206 
1207 scanfail:
1208 	mutex_exit(&lfs_dqlock);
1209 	brelse(hbp, 0);
1210 	if (error)
1211 		return error;
1212 
1213 	*hashsize_ret = quota2_hash_size;
1214 	return 0;
1215 }
1216 
1217 /*
1218  * Fetch the quotavals for the quotakeys.
1219  */
1220 static int
q2cursor_getvals(struct ulfsmount * ump,struct q2cursor_state * state,const struct quota2_entry * default_q2e)1221 q2cursor_getvals(struct ulfsmount *ump, struct q2cursor_state *state,
1222     const struct quota2_entry *default_q2e)
1223 {
1224 	int hasid;
1225 	id_t loadedid, id;
1226 	unsigned pos;
1227 	struct quota2_entry q2e;
1228 	int objtype;
1229 	int error;
1230 
1231 	hasid = 0;
1232 	loadedid = 0;
1233 	for (pos = 0; pos < state->numkeys; pos++) {
1234 		id = state->keys[pos].qk_id;
1235 		if (!hasid || id != loadedid) {
1236 			hasid = 1;
1237 			loadedid = id;
1238 			if (id == QUOTA_DEFAULTID) {
1239 				q2e = *default_q2e;
1240 			} else {
1241 				error = quota2_fetch_q2e(ump,
1242 							 &state->keys[pos],
1243 							 &q2e);
1244 				if (error == ENOENT) {
1245 					/* something changed - start over */
1246 					error = EDEADLK;
1247 				}
1248 				if (error) {
1249 					return error;
1250 				}
1251  			}
1252 		}
1253 
1254 
1255 		objtype = state->keys[pos].qk_objtype;
1256 		KASSERT(objtype >= 0 && objtype < N_QL);
1257 		q2val_to_quotaval(&q2e.q2e_val[objtype], &state->vals[pos]);
1258 	}
1259 
1260 	return 0;
1261 }
1262 
1263 /*
1264  * Handle cursorget.
1265  *
1266  * We can't just read keys and values directly, because we can't walk
1267  * the list with qdlock and grab dq_interlock to read the entries at
1268  * the same time. So we're going to do two passes: one to figure out
1269  * which IDs we want and fill in the keys, and then a second to use
1270  * the keys to fetch the values.
1271  */
1272 int
lfsquota2_handle_cmd_cursorget(struct ulfsmount * ump,struct quotakcursor * qkc,struct quotakey * keys,struct quotaval * vals,unsigned maxreturn,unsigned * ret)1273 lfsquota2_handle_cmd_cursorget(struct ulfsmount *ump, struct quotakcursor *qkc,
1274     struct quotakey *keys, struct quotaval *vals, unsigned maxreturn,
1275     unsigned *ret)
1276 {
1277 	int error;
1278 	struct ulfsq2_cursor *cursor;
1279 	struct ulfsq2_cursor newcursor;
1280 	struct q2cursor_state state;
1281 	struct quota2_entry default_q2e;
1282 	int idtype;
1283 	int quota2_hash_size = 0; /* XXXuninit */
1284 
1285 	/*
1286 	 * Convert and validate the cursor.
1287 	 */
1288 	cursor = Q2CURSOR(qkc);
1289 	error = q2cursor_check(cursor);
1290 	if (error) {
1291 		return error;
1292 	}
1293 
1294 	/*
1295 	 * Make sure our on-disk codes match the values of the
1296 	 * FS-independent ones. This avoids the need for explicit
1297 	 * conversion (which would be a NOP anyway and thus easily
1298 	 * left out or called in the wrong places...)
1299 	 */
1300 	CTASSERT(QUOTA_IDTYPE_USER == ULFS_USRQUOTA);
1301 	CTASSERT(QUOTA_IDTYPE_GROUP == ULFS_GRPQUOTA);
1302 	CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
1303 	CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
1304 
1305 	/*
1306 	 * If some of the idtypes aren't configured/enabled, arrange
1307 	 * to skip over them.
1308 	 */
1309 	if (cursor->q2c_users_done == 0 &&
1310 	    ump->um_quotas[ULFS_USRQUOTA] == NULLVP) {
1311 		cursor->q2c_users_done = 1;
1312 	}
1313 	if (cursor->q2c_groups_done == 0 &&
1314 	    ump->um_quotas[ULFS_GRPQUOTA] == NULLVP) {
1315 		cursor->q2c_groups_done = 1;
1316 	}
1317 
1318 	/* Loop over, potentially, both idtypes */
1319 	while (1) {
1320 
1321 		/* Choose id type */
1322 		error = q2cursor_pickidtype(cursor, &idtype);
1323 		if (error == EAGAIN) {
1324 			/* nothing more to do, return 0 */
1325 			*ret = 0;
1326 			return 0;
1327 		}
1328 		KASSERT(ump->um_quotas[idtype] != NULLVP);
1329 
1330 		/*
1331 		 * Initialize the per-call iteration state. Copy the
1332 		 * cursor state so we can update it in place but back
1333 		 * out on error.
1334 		 */
1335 		q2cursor_initstate(&state, keys, vals, maxreturn,
1336 				   cursor->q2c_blocks_done);
1337 		newcursor = *cursor;
1338 
1339 		/* Assign keys */
1340 		error = q2cursor_getkeys(ump, idtype, &newcursor, &state,
1341 					 &quota2_hash_size, &default_q2e);
1342 		if (error) {
1343 			return error;
1344 		}
1345 
1346 		/* Now fill in the values. */
1347 		error = q2cursor_getvals(ump, &state, &default_q2e);
1348 		if (error) {
1349 			return error;
1350 		}
1351 
1352 		/*
1353 		 * Now that we aren't going to fail and lose what we
1354 		 * did so far, we can update the cursor state.
1355 		 */
1356 
1357 		if (newcursor.q2c_hashpos >= quota2_hash_size) {
1358 			if (idtype == QUOTA_IDTYPE_USER)
1359 				cursor->q2c_users_done = 1;
1360 			else
1361 				cursor->q2c_groups_done = 1;
1362 
1363 			/* start over on another id type */
1364 			cursor->q2c_hashsize = 0;
1365 			cursor->q2c_defaults_done = 0;
1366 			cursor->q2c_hashpos = 0;
1367 			cursor->q2c_uidpos = 0;
1368 			cursor->q2c_blocks_done = 0;
1369 		} else {
1370 			*cursor = newcursor;
1371 			cursor->q2c_blocks_done = state.skiplast;
1372 		}
1373 
1374 		/*
1375 		 * If we have something to return, return it.
1376 		 * Otherwise, continue to the other idtype, if any,
1377 		 * and only return zero at end of iteration.
1378 		 */
1379 		if (state.numkeys > 0) {
1380 			break;
1381 		}
1382 	}
1383 
1384 	*ret = state.numkeys;
1385 	return 0;
1386 }
1387 
1388 int
lfsquota2_handle_cmd_cursoropen(struct ulfsmount * ump,struct quotakcursor * qkc)1389 lfsquota2_handle_cmd_cursoropen(struct ulfsmount *ump, struct quotakcursor *qkc)
1390 {
1391 	struct ulfsq2_cursor *cursor;
1392 
1393 	CTASSERT(sizeof(*cursor) <= sizeof(qkc->u.qkc_space));
1394 	cursor = Q2CURSOR(qkc);
1395 
1396 	cursor->q2c_magic = Q2C_MAGIC;
1397 	cursor->q2c_hashsize = 0;
1398 
1399 	cursor->q2c_users_done = 0;
1400 	cursor->q2c_groups_done = 0;
1401 	cursor->q2c_defaults_done = 0;
1402 	cursor->q2c_hashpos = 0;
1403 	cursor->q2c_uidpos = 0;
1404 	cursor->q2c_blocks_done = 0;
1405 	return 0;
1406 }
1407 
1408 int
lfsquota2_handle_cmd_cursorclose(struct ulfsmount * ump,struct quotakcursor * qkc)1409 lfsquota2_handle_cmd_cursorclose(struct ulfsmount *ump, struct quotakcursor *qkc)
1410 {
1411 	struct ulfsq2_cursor *cursor;
1412 	int error;
1413 
1414 	cursor = Q2CURSOR(qkc);
1415 	error = q2cursor_check(cursor);
1416 	if (error) {
1417 		return error;
1418 	}
1419 
1420 	/* nothing to do */
1421 
1422 	return 0;
1423 }
1424 
1425 int
lfsquota2_handle_cmd_cursorskipidtype(struct ulfsmount * ump,struct quotakcursor * qkc,int idtype)1426 lfsquota2_handle_cmd_cursorskipidtype(struct ulfsmount *ump,
1427     struct quotakcursor *qkc, int idtype)
1428 {
1429 	struct ulfsq2_cursor *cursor;
1430 	int error;
1431 
1432 	cursor = Q2CURSOR(qkc);
1433 	error = q2cursor_check(cursor);
1434 	if (error) {
1435 		return error;
1436 	}
1437 
1438 	switch (idtype) {
1439 	    case QUOTA_IDTYPE_USER:
1440 		cursor->q2c_users_done = 1;
1441 		break;
1442 	    case QUOTA_IDTYPE_GROUP:
1443 		cursor->q2c_groups_done = 1;
1444 		break;
1445 	    default:
1446 		return EINVAL;
1447 	}
1448 
1449 	return 0;
1450 }
1451 
1452 int
lfsquota2_handle_cmd_cursoratend(struct ulfsmount * ump,struct quotakcursor * qkc,int * ret)1453 lfsquota2_handle_cmd_cursoratend(struct ulfsmount *ump, struct quotakcursor *qkc,
1454     int *ret)
1455 {
1456 	struct ulfsq2_cursor *cursor;
1457 	int error;
1458 
1459 	cursor = Q2CURSOR(qkc);
1460 	error = q2cursor_check(cursor);
1461 	if (error) {
1462 		return error;
1463 	}
1464 
1465 	*ret = (cursor->q2c_users_done && cursor->q2c_groups_done);
1466 	return 0;
1467 }
1468 
1469 int
lfsquota2_handle_cmd_cursorrewind(struct ulfsmount * ump,struct quotakcursor * qkc)1470 lfsquota2_handle_cmd_cursorrewind(struct ulfsmount *ump, struct quotakcursor *qkc)
1471 {
1472 	struct ulfsq2_cursor *cursor;
1473 	int error;
1474 
1475 	cursor = Q2CURSOR(qkc);
1476 	error = q2cursor_check(cursor);
1477 	if (error) {
1478 		return error;
1479 	}
1480 
1481 	cursor->q2c_hashsize = 0;
1482 
1483 	cursor->q2c_users_done = 0;
1484 	cursor->q2c_groups_done = 0;
1485 	cursor->q2c_defaults_done = 0;
1486 	cursor->q2c_hashpos = 0;
1487 	cursor->q2c_uidpos = 0;
1488 	cursor->q2c_blocks_done = 0;
1489 
1490 	return 0;
1491 }
1492 
1493 int
lfs_q2sync(struct mount * mp)1494 lfs_q2sync(struct mount *mp)
1495 {
1496 	return 0;
1497 }
1498 
1499 struct dq2get_callback {
1500 	uid_t id;
1501 	struct dquot *dq;
1502 };
1503 
1504 static int
dq2get_callback(struct ulfsmount * ump,uint64_t * offp,struct quota2_entry * q2e,uint64_t off,void * v)1505 dq2get_callback(struct ulfsmount *ump, uint64_t *offp, struct quota2_entry *q2e,
1506     uint64_t off, void *v)
1507 {
1508 	struct dq2get_callback *c = v;
1509 	daddr_t lblkno;
1510 	int blkoff;
1511 	struct lfs *fs = ump->um_lfs;
1512 	const int needswap = ULFS_MPNEEDSWAP(fs);
1513 
1514 	if (ulfs_rw32(q2e->q2e_uid, needswap) == c->id) {
1515 		KASSERT(mutex_owned(&c->dq->dq_interlock));
1516 		lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
1517 		blkoff = (off & ump->umq2_bmask);
1518 		c->dq->dq2_lblkno = lblkno;
1519 		c->dq->dq2_blkoff = blkoff;
1520 		return Q2WL_ABORT;
1521 	}
1522 	return 0;
1523 }
1524 
1525 int
lfs_dq2get(struct vnode * dqvp,u_long id,struct ulfsmount * ump,int type,struct dquot * dq)1526 lfs_dq2get(struct vnode *dqvp, u_long id, struct ulfsmount *ump, int type,
1527     struct dquot *dq)
1528 {
1529 	struct buf *bp;
1530 	struct quota2_header *q2h;
1531 	int error;
1532 	daddr_t offset;
1533 	u_long hash_mask;
1534 	struct dq2get_callback c = {
1535 		.id = id,
1536 		.dq = dq
1537 	};
1538 
1539 	KASSERT(mutex_owned(&dq->dq_interlock));
1540 	mutex_enter(&lfs_dqlock);
1541 	error = getq2h(ump, type, &bp, &q2h, 0);
1542 	if (error)
1543 		goto out_mutex;
1544 	/* look for our entry */
1545 	hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
1546 	offset = q2h->q2h_entries[id & hash_mask];
1547 	error = quota2_walk_list(ump, bp, type, &offset, 0, (void *)&c,
1548 	    dq2get_callback);
1549 	brelse(bp, 0);
1550 out_mutex:
1551 	mutex_exit(&lfs_dqlock);
1552 	return error;
1553 }
1554 
1555 int
lfs_dq2sync(struct vnode * vp,struct dquot * dq)1556 lfs_dq2sync(struct vnode *vp, struct dquot *dq)
1557 {
1558 	return 0;
1559 }
1560 
1561 int
lfs_quota2_mount(struct mount * mp)1562 lfs_quota2_mount(struct mount *mp)
1563 {
1564 	struct ulfsmount *ump = VFSTOULFS(mp);
1565 	struct lfs *fs = ump->um_lfs;
1566 	int error;
1567 	struct vnode *vp;
1568 	struct lwp *l = curlwp;
1569 
1570 	if ((fs->lfs_use_quota2) == 0)
1571 		return 0;
1572 
1573 	fs->um_flags |= ULFS_QUOTA2;
1574 	ump->umq2_bsize = lfs_sb_getbsize(fs);
1575 	ump->umq2_bmask = lfs_sb_getbmask(fs);
1576 	if (fs->lfs_quota_magic != Q2_HEAD_MAGIC) {
1577 		printf("%s: Invalid quota magic number\n",
1578 		    mp->mnt_stat.f_mntonname);
1579 		return EINVAL;
1580 	}
1581 
1582 	error = 0;
1583         if ((fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_USRQUOTA)) &&
1584             fs->lfs_quotaino[ULFS_USRQUOTA] == 0) {
1585                 printf("%s: No user quota inode\n",
1586 		    mp->mnt_stat.f_mntonname);
1587                 error = EINVAL;
1588         }
1589         if ((fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_GRPQUOTA)) &&
1590             fs->lfs_quotaino[ULFS_GRPQUOTA] == 0) {
1591                 printf("%s: No group quota inode\n",
1592 		    mp->mnt_stat.f_mntonname);
1593                 error = EINVAL;
1594         }
1595 	if (error)
1596 		return error;
1597 
1598         if (fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_USRQUOTA) &&
1599 	    ump->um_quotas[ULFS_USRQUOTA] == NULLVP) {
1600 		error = VFS_VGET(mp, fs->lfs_quotaino[ULFS_USRQUOTA],
1601 		    LK_EXCLUSIVE, &vp);
1602 		if (error) {
1603 			printf("%s: can't vget() user quota inode: %d\n",
1604 			    mp->mnt_stat.f_mntonname, error);
1605 			return error;
1606 		}
1607 		ump->um_quotas[ULFS_USRQUOTA] = vp;
1608 		ump->um_cred[ULFS_USRQUOTA] = l->l_cred;
1609 		mutex_enter(vp->v_interlock);
1610 		vp->v_writecount++;
1611 		mutex_exit(vp->v_interlock);
1612 		VOP_UNLOCK(vp);
1613 	}
1614         if (fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_GRPQUOTA) &&
1615 	    ump->um_quotas[ULFS_GRPQUOTA] == NULLVP) {
1616 		error = VFS_VGET(mp, fs->lfs_quotaino[ULFS_GRPQUOTA],
1617 		    LK_EXCLUSIVE, &vp);
1618 		if (error) {
1619 			vn_close(ump->um_quotas[ULFS_USRQUOTA],
1620 			    FREAD|FWRITE, l->l_cred);
1621 			printf("%s: can't vget() group quota inode: %d\n",
1622 			    mp->mnt_stat.f_mntonname, error);
1623 			return error;
1624 		}
1625 		ump->um_quotas[ULFS_GRPQUOTA] = vp;
1626 		ump->um_cred[ULFS_GRPQUOTA] = l->l_cred;
1627 		mutex_enter(vp->v_interlock);
1628 		vp->v_vflag |= VV_SYSTEM;
1629 		vp->v_writecount++;
1630 		mutex_exit(vp->v_interlock);
1631 		VOP_UNLOCK(vp);
1632 	}
1633 
1634 	mp->mnt_flag |= MNT_QUOTA;
1635 	return 0;
1636 }
1637