1 /* $NetBSD: ulfs_quota2.c,v 1.21 2015/07/28 05:09:35 dholland Exp $ */
2 /* from NetBSD: ufs_quota2.c,v 1.35 2012/09/27 07:47:56 bouyer Exp */
3 /* from NetBSD: ffs_quota2.c,v 1.4 2011/06/12 03:36:00 rmind Exp */
4
5 /*-
6 * Copyright (c) 2010 Manuel Bouyer
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: ulfs_quota2.c,v 1.21 2015/07/28 05:09:35 dholland Exp $");
33
34 #include <sys/buf.h>
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/namei.h>
39 #include <sys/file.h>
40 #include <sys/proc.h>
41 #include <sys/vnode.h>
42 #include <sys/mount.h>
43 #include <sys/fstrans.h>
44 #include <sys/kauth.h>
45 #include <sys/wapbl.h>
46 #include <sys/quota.h>
47 #include <sys/quotactl.h>
48
49 #include <ufs/lfs/lfs.h>
50 #include <ufs/lfs/lfs_accessors.h>
51 #include <ufs/lfs/lfs_extern.h>
52
53 #include <ufs/lfs/ulfs_quota2.h>
54 #include <ufs/lfs/ulfs_inode.h>
55 #include <ufs/lfs/ulfsmount.h>
56 #include <ufs/lfs/ulfs_bswap.h>
57 #include <ufs/lfs/ulfs_extern.h>
58 #include <ufs/lfs/ulfs_quota.h>
59
60 /*
61 * LOCKING:
62 * Data in the entries are protected by the associated struct dquot's
63 * dq_interlock (this means we can't read or change a quota entry without
64 * grabing a dquot for it).
65 * The header and lists (including pointers in the data entries, and q2e_uid)
66 * are protected by the global dqlock.
67 * the locking order is dq_interlock -> dqlock
68 */
69
70 static int quota2_bwrite(struct mount *, struct buf *);
71 static int getinoquota2(struct inode *, bool, bool, struct buf **,
72 struct quota2_entry **);
73 static int getq2h(struct ulfsmount *, int, struct buf **,
74 struct quota2_header **, int);
75 static int getq2e(struct ulfsmount *, int, daddr_t, int, struct buf **,
76 struct quota2_entry **, int);
77 static int quota2_walk_list(struct ulfsmount *, struct buf *, int,
78 uint64_t *, int, void *,
79 int (*func)(struct ulfsmount *, uint64_t *, struct quota2_entry *,
80 uint64_t, void *));
81
82 static const char *limnames[] = INITQLNAMES;
83
84 static void
quota2_dict_update_q2e_limits(int objtype,const struct quotaval * val,struct quota2_entry * q2e)85 quota2_dict_update_q2e_limits(int objtype, const struct quotaval *val,
86 struct quota2_entry *q2e)
87 {
88 /* make sure we can index q2e_val[] by the fs-independent objtype */
89 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
90 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
91
92 q2e->q2e_val[objtype].q2v_hardlimit = val->qv_hardlimit;
93 q2e->q2e_val[objtype].q2v_softlimit = val->qv_softlimit;
94 q2e->q2e_val[objtype].q2v_grace = val->qv_grace;
95 }
96
97 /*
98 * Convert internal representation to FS-independent representation.
99 * (Note that while the two types are currently identical, the
100 * internal representation is an on-disk struct and the FS-independent
101 * representation is not, and they might diverge in the future.)
102 */
103 static void
q2val_to_quotaval(struct quota2_val * q2v,struct quotaval * qv)104 q2val_to_quotaval(struct quota2_val *q2v, struct quotaval *qv)
105 {
106 qv->qv_softlimit = q2v->q2v_softlimit;
107 qv->qv_hardlimit = q2v->q2v_hardlimit;
108 qv->qv_usage = q2v->q2v_cur;
109 qv->qv_expiretime = q2v->q2v_time;
110 qv->qv_grace = q2v->q2v_grace;
111 }
112
113 /*
114 * Convert a quota2entry and default-flag to the FS-independent
115 * representation.
116 */
117 static void
q2e_to_quotaval(struct quota2_entry * q2e,int def,id_t * id,int objtype,struct quotaval * ret)118 q2e_to_quotaval(struct quota2_entry *q2e, int def,
119 id_t *id, int objtype, struct quotaval *ret)
120 {
121 if (def) {
122 *id = QUOTA_DEFAULTID;
123 } else {
124 *id = q2e->q2e_uid;
125 }
126
127 KASSERT(objtype >= 0 && objtype < N_QL);
128 q2val_to_quotaval(&q2e->q2e_val[objtype], ret);
129 }
130
131
132 static int
quota2_bwrite(struct mount * mp,struct buf * bp)133 quota2_bwrite(struct mount *mp, struct buf *bp)
134 {
135 if (mp->mnt_flag & MNT_SYNCHRONOUS)
136 return bwrite(bp);
137 else {
138 bdwrite(bp);
139 return 0;
140 }
141 }
142
143 static int
getq2h(struct ulfsmount * ump,int type,struct buf ** bpp,struct quota2_header ** q2hp,int flags)144 getq2h(struct ulfsmount *ump, int type,
145 struct buf **bpp, struct quota2_header **q2hp, int flags)
146 {
147 struct lfs *fs = ump->um_lfs;
148 const int needswap = ULFS_MPNEEDSWAP(fs);
149 int error;
150 struct buf *bp;
151 struct quota2_header *q2h;
152
153 KASSERT(mutex_owned(&lfs_dqlock));
154 error = bread(ump->um_quotas[type], 0, ump->umq2_bsize, flags, &bp);
155 if (error)
156 return error;
157 if (bp->b_resid != 0)
158 panic("dq2get: %s quota file truncated", lfs_quotatypes[type]);
159
160 q2h = (void *)bp->b_data;
161 if (ulfs_rw32(q2h->q2h_magic_number, needswap) != Q2_HEAD_MAGIC ||
162 q2h->q2h_type != type)
163 panic("dq2get: corrupted %s quota header", lfs_quotatypes[type]);
164 *bpp = bp;
165 *q2hp = q2h;
166 return 0;
167 }
168
169 static int
getq2e(struct ulfsmount * ump,int type,daddr_t lblkno,int blkoffset,struct buf ** bpp,struct quota2_entry ** q2ep,int flags)170 getq2e(struct ulfsmount *ump, int type, daddr_t lblkno, int blkoffset,
171 struct buf **bpp, struct quota2_entry **q2ep, int flags)
172 {
173 int error;
174 struct buf *bp;
175
176 if (blkoffset & (sizeof(uint64_t) - 1)) {
177 panic("dq2get: %s quota file corrupted",
178 lfs_quotatypes[type]);
179 }
180 error = bread(ump->um_quotas[type], lblkno, ump->umq2_bsize, flags, &bp);
181 if (error)
182 return error;
183 if (bp->b_resid != 0) {
184 panic("dq2get: %s quota file corrupted",
185 lfs_quotatypes[type]);
186 }
187 *q2ep = (void *)((char *)bp->b_data + blkoffset);
188 *bpp = bp;
189 return 0;
190 }
191
192 /* walk a quota entry list, calling the callback for each entry */
193 #define Q2WL_ABORT 0x10000000
194
195 static int
quota2_walk_list(struct ulfsmount * ump,struct buf * hbp,int type,uint64_t * offp,int flags,void * a,int (* func)(struct ulfsmount *,uint64_t *,struct quota2_entry *,uint64_t,void *))196 quota2_walk_list(struct ulfsmount *ump, struct buf *hbp, int type,
197 uint64_t *offp, int flags, void *a,
198 int (*func)(struct ulfsmount *, uint64_t *, struct quota2_entry *, uint64_t, void *))
199 {
200 struct lfs *fs = ump->um_lfs;
201 const int needswap = ULFS_MPNEEDSWAP(fs);
202 daddr_t off = ulfs_rw64(*offp, needswap);
203 struct buf *bp, *obp = hbp;
204 int ret = 0, ret2 = 0;
205 struct quota2_entry *q2e;
206 daddr_t lblkno, blkoff, olblkno = 0;
207
208 KASSERT(mutex_owner(&lfs_dqlock));
209
210 while (off != 0) {
211 lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
212 blkoff = (off & ump->umq2_bmask);
213 if (lblkno == 0) {
214 /* in the header block */
215 bp = hbp;
216 } else if (lblkno == olblkno) {
217 /* still in the same buf */
218 bp = obp;
219 } else {
220 ret = bread(ump->um_quotas[type], lblkno,
221 ump->umq2_bsize, flags, &bp);
222 if (ret)
223 return ret;
224 if (bp->b_resid != 0) {
225 panic("quota2_walk_list: %s quota file corrupted",
226 lfs_quotatypes[type]);
227 }
228 }
229 q2e = (void *)((char *)(bp->b_data) + blkoff);
230 ret = (*func)(ump, offp, q2e, off, a);
231 if (off != ulfs_rw64(*offp, needswap)) {
232 /* callback changed parent's pointer, redo */
233 off = ulfs_rw64(*offp, needswap);
234 if (bp != hbp && bp != obp)
235 ret2 = bwrite(bp);
236 } else {
237 /* parent if now current */
238 if (obp != bp && obp != hbp) {
239 if (flags & B_MODIFY)
240 ret2 = bwrite(obp);
241 else
242 brelse(obp, 0);
243 }
244 obp = bp;
245 olblkno = lblkno;
246 offp = &(q2e->q2e_next);
247 off = ulfs_rw64(*offp, needswap);
248 }
249 if (ret)
250 break;
251 if (ret2) {
252 ret = ret2;
253 break;
254 }
255 }
256 if (obp != hbp) {
257 if (flags & B_MODIFY)
258 ret2 = bwrite(obp);
259 else
260 brelse(obp, 0);
261 }
262 if (ret & Q2WL_ABORT)
263 return 0;
264 if (ret == 0)
265 return ret2;
266 return ret;
267 }
268
269 int
lfsquota2_umount(struct mount * mp,int flags)270 lfsquota2_umount(struct mount *mp, int flags)
271 {
272 int i, error;
273 struct ulfsmount *ump = VFSTOULFS(mp);
274 struct lfs *fs = ump->um_lfs;
275
276 if ((fs->um_flags & ULFS_QUOTA2) == 0)
277 return 0;
278
279 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
280 if (ump->um_quotas[i] != NULLVP) {
281 error = vn_close(ump->um_quotas[i], FREAD|FWRITE,
282 ump->um_cred[i]);
283 if (error) {
284 printf("quota2_umount failed: close(%p) %d\n",
285 ump->um_quotas[i], error);
286 return error;
287 }
288 }
289 ump->um_quotas[i] = NULLVP;
290 }
291 return 0;
292 }
293
294 static int
quota2_q2ealloc(struct ulfsmount * ump,int type,uid_t uid,struct dquot * dq)295 quota2_q2ealloc(struct ulfsmount *ump, int type, uid_t uid, struct dquot *dq)
296 {
297 int error, error2;
298 struct buf *hbp, *bp;
299 struct quota2_header *q2h;
300 struct quota2_entry *q2e;
301 daddr_t offset;
302 u_long hash_mask;
303 struct lfs *fs = ump->um_lfs;
304 const int needswap = ULFS_MPNEEDSWAP(fs);
305
306 KASSERT(mutex_owned(&dq->dq_interlock));
307 KASSERT(mutex_owned(&lfs_dqlock));
308 error = getq2h(ump, type, &hbp, &q2h, B_MODIFY);
309 if (error)
310 return error;
311 offset = ulfs_rw64(q2h->q2h_free, needswap);
312 if (offset == 0) {
313 struct vnode *vp = ump->um_quotas[type];
314 struct inode *ip = VTOI(vp);
315 uint64_t size = ip->i_size;
316 /* need to alocate a new disk block */
317 error = lfs_balloc(vp, size, ump->umq2_bsize,
318 ump->um_cred[type], B_CLRBUF | B_SYNC, &bp);
319 if (error) {
320 brelse(hbp, 0);
321 return error;
322 }
323 KASSERT((ip->i_size % ump->umq2_bsize) == 0);
324 ip->i_size += ump->umq2_bsize;
325 DIP_ASSIGN(ip, size, ip->i_size);
326 ip->i_flag |= IN_CHANGE | IN_UPDATE;
327 uvm_vnp_setsize(vp, ip->i_size);
328 lfsquota2_addfreeq2e(q2h, bp->b_data, size, ump->umq2_bsize,
329 needswap);
330 error = bwrite(bp);
331 error2 = lfs_update(vp, NULL, NULL, UPDATE_WAIT);
332 if (error || error2) {
333 brelse(hbp, 0);
334 if (error)
335 return error;
336 return error2;
337 }
338 offset = ulfs_rw64(q2h->q2h_free, needswap);
339 KASSERT(offset != 0);
340 }
341 dq->dq2_lblkno = (offset >> ump->um_mountp->mnt_fs_bshift);
342 dq->dq2_blkoff = (offset & ump->umq2_bmask);
343 if (dq->dq2_lblkno == 0) {
344 bp = hbp;
345 q2e = (void *)((char *)bp->b_data + dq->dq2_blkoff);
346 } else {
347 error = getq2e(ump, type, dq->dq2_lblkno,
348 dq->dq2_blkoff, &bp, &q2e, B_MODIFY);
349 if (error) {
350 brelse(hbp, 0);
351 return error;
352 }
353 }
354 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
355 /* remove from free list */
356 q2h->q2h_free = q2e->q2e_next;
357
358 memcpy(q2e, &q2h->q2h_defentry, sizeof(*q2e));
359 q2e->q2e_uid = ulfs_rw32(uid, needswap);
360 /* insert in hash list */
361 q2e->q2e_next = q2h->q2h_entries[uid & hash_mask];
362 q2h->q2h_entries[uid & hash_mask] = ulfs_rw64(offset, needswap);
363 if (hbp != bp) {
364 bwrite(hbp);
365 }
366 bwrite(bp);
367 return 0;
368 }
369
370 static int
getinoquota2(struct inode * ip,bool alloc,bool modify,struct buf ** bpp,struct quota2_entry ** q2ep)371 getinoquota2(struct inode *ip, bool alloc, bool modify, struct buf **bpp,
372 struct quota2_entry **q2ep)
373 {
374 int error;
375 int i;
376 struct dquot *dq;
377 struct ulfsmount *ump = ip->i_ump;
378 u_int32_t ino_ids[ULFS_MAXQUOTAS];
379
380 error = lfs_getinoquota(ip);
381 if (error)
382 return error;
383
384 ino_ids[ULFS_USRQUOTA] = ip->i_uid;
385 ino_ids[ULFS_GRPQUOTA] = ip->i_gid;
386 /* first get the interlock for all dquot */
387 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
388 dq = ip->i_dquot[i];
389 if (dq == NODQUOT)
390 continue;
391 mutex_enter(&dq->dq_interlock);
392 }
393 /* now get the corresponding quota entry */
394 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
395 bpp[i] = NULL;
396 q2ep[i] = NULL;
397 dq = ip->i_dquot[i];
398 if (dq == NODQUOT)
399 continue;
400 if (__predict_false(ump->um_quotas[i] == NULL)) {
401 /*
402 * quotas have been turned off. This can happen
403 * at umount time.
404 */
405 mutex_exit(&dq->dq_interlock);
406 lfs_dqrele(NULLVP, dq);
407 ip->i_dquot[i] = NULL;
408 continue;
409 }
410
411 if ((dq->dq2_lblkno | dq->dq2_blkoff) == 0) {
412 if (!alloc) {
413 continue;
414 }
415 /* need to alloc a new on-disk quot */
416 mutex_enter(&lfs_dqlock);
417 error = quota2_q2ealloc(ump, i, ino_ids[i], dq);
418 mutex_exit(&lfs_dqlock);
419 if (error)
420 return error;
421 }
422 KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
423 error = getq2e(ump, i, dq->dq2_lblkno,
424 dq->dq2_blkoff, &bpp[i], &q2ep[i],
425 modify ? B_MODIFY : 0);
426 if (error)
427 return error;
428 }
429 return 0;
430 }
431
432 __inline static int __unused
lfsquota2_check_limit(struct quota2_val * q2v,uint64_t change,time_t now)433 lfsquota2_check_limit(struct quota2_val *q2v, uint64_t change, time_t now)
434 {
435 return lfsquota_check_limit(q2v->q2v_cur, change, q2v->q2v_softlimit,
436 q2v->q2v_hardlimit, q2v->q2v_time, now);
437 }
438
439 static int
quota2_check(struct inode * ip,int vtype,int64_t change,kauth_cred_t cred,int flags)440 quota2_check(struct inode *ip, int vtype, int64_t change, kauth_cred_t cred,
441 int flags)
442 {
443 int error;
444 struct buf *bp[ULFS_MAXQUOTAS];
445 struct quota2_entry *q2e[ULFS_MAXQUOTAS];
446 struct quota2_val *q2vp;
447 struct dquot *dq;
448 uint64_t ncurblks;
449 struct ulfsmount *ump = ip->i_ump;
450 struct lfs *fs = ip->i_lfs;
451 struct mount *mp = ump->um_mountp;
452 const int needswap = ULFS_MPNEEDSWAP(fs);
453 int i;
454
455 if ((error = getinoquota2(ip, change > 0, change != 0, bp, q2e)) != 0)
456 return error;
457 if (change == 0) {
458 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
459 dq = ip->i_dquot[i];
460 if (dq == NODQUOT)
461 continue;
462 if (bp[i])
463 brelse(bp[i], 0);
464 mutex_exit(&dq->dq_interlock);
465 }
466 return 0;
467 }
468 if (change < 0) {
469 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
470 dq = ip->i_dquot[i];
471 if (dq == NODQUOT)
472 continue;
473 if (q2e[i] == NULL) {
474 mutex_exit(&dq->dq_interlock);
475 continue;
476 }
477 q2vp = &q2e[i]->q2e_val[vtype];
478 ncurblks = ulfs_rw64(q2vp->q2v_cur, needswap);
479 if (ncurblks < -change)
480 ncurblks = 0;
481 else
482 ncurblks += change;
483 q2vp->q2v_cur = ulfs_rw64(ncurblks, needswap);
484 quota2_bwrite(mp, bp[i]);
485 mutex_exit(&dq->dq_interlock);
486 }
487 return 0;
488 }
489 /* see if the allocation is allowed */
490 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
491 struct quota2_val q2v;
492 int ql_stat;
493 dq = ip->i_dquot[i];
494 if (dq == NODQUOT)
495 continue;
496 KASSERT(q2e[i] != NULL);
497 lfsquota2_ulfs_rwq2v(&q2e[i]->q2e_val[vtype], &q2v, needswap);
498 ql_stat = lfsquota2_check_limit(&q2v, change, time_second);
499
500 if ((flags & FORCE) == 0 &&
501 kauth_authorize_system(cred, KAUTH_SYSTEM_FS_QUOTA,
502 KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT,
503 KAUTH_ARG(i), KAUTH_ARG(vtype), NULL) != 0) {
504 /* enforce this limit */
505 switch(QL_STATUS(ql_stat)) {
506 case QL_S_DENY_HARD:
507 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
508 uprintf("\n%s: write failed, %s %s "
509 "limit reached\n",
510 mp->mnt_stat.f_mntonname,
511 lfs_quotatypes[i], limnames[vtype]);
512 dq->dq_flags |= DQ_WARN(vtype);
513 }
514 error = EDQUOT;
515 break;
516 case QL_S_DENY_GRACE:
517 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
518 uprintf("\n%s: write failed, %s %s "
519 "limit reached\n",
520 mp->mnt_stat.f_mntonname,
521 lfs_quotatypes[i], limnames[vtype]);
522 dq->dq_flags |= DQ_WARN(vtype);
523 }
524 error = EDQUOT;
525 break;
526 case QL_S_ALLOW_SOFT:
527 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
528 uprintf("\n%s: warning, %s %s "
529 "quota exceeded\n",
530 mp->mnt_stat.f_mntonname,
531 lfs_quotatypes[i], limnames[vtype]);
532 dq->dq_flags |= DQ_WARN(vtype);
533 }
534 break;
535 }
536 }
537 /*
538 * always do this; we don't know if the allocation will
539 * succed or not in the end. if we don't do the allocation
540 * q2v_time will be ignored anyway
541 */
542 if (ql_stat & QL_F_CROSS) {
543 q2v.q2v_time = time_second + q2v.q2v_grace;
544 lfsquota2_ulfs_rwq2v(&q2v, &q2e[i]->q2e_val[vtype],
545 needswap);
546 }
547 }
548
549 /* now do the allocation if allowed */
550 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
551 dq = ip->i_dquot[i];
552 if (dq == NODQUOT)
553 continue;
554 KASSERT(q2e[i] != NULL);
555 if (error == 0) {
556 q2vp = &q2e[i]->q2e_val[vtype];
557 ncurblks = ulfs_rw64(q2vp->q2v_cur, needswap);
558 q2vp->q2v_cur = ulfs_rw64(ncurblks + change, needswap);
559 quota2_bwrite(mp, bp[i]);
560 } else
561 brelse(bp[i], 0);
562 mutex_exit(&dq->dq_interlock);
563 }
564 return error;
565 }
566
567 int
lfs_chkdq2(struct inode * ip,int64_t change,kauth_cred_t cred,int flags)568 lfs_chkdq2(struct inode *ip, int64_t change, kauth_cred_t cred, int flags)
569 {
570 return quota2_check(ip, QL_BLOCK, change, cred, flags);
571 }
572
573 int
lfs_chkiq2(struct inode * ip,int32_t change,kauth_cred_t cred,int flags)574 lfs_chkiq2(struct inode *ip, int32_t change, kauth_cred_t cred, int flags)
575 {
576 return quota2_check(ip, QL_FILE, change, cred, flags);
577 }
578
579 int
lfsquota2_handle_cmd_put(struct ulfsmount * ump,const struct quotakey * key,const struct quotaval * val)580 lfsquota2_handle_cmd_put(struct ulfsmount *ump, const struct quotakey *key,
581 const struct quotaval *val)
582 {
583 int error;
584 struct dquot *dq;
585 struct quota2_header *q2h;
586 struct quota2_entry q2e, *q2ep;
587 struct buf *bp;
588 struct lfs *fs = ump->um_lfs;
589 const int needswap = ULFS_MPNEEDSWAP(fs);
590
591 /* make sure we can index by the fs-independent idtype */
592 CTASSERT(QUOTA_IDTYPE_USER == ULFS_USRQUOTA);
593 CTASSERT(QUOTA_IDTYPE_GROUP == ULFS_GRPQUOTA);
594
595 if (ump->um_quotas[key->qk_idtype] == NULLVP)
596 return ENODEV;
597
598 if (key->qk_id == QUOTA_DEFAULTID) {
599 mutex_enter(&lfs_dqlock);
600 error = getq2h(ump, key->qk_idtype, &bp, &q2h, B_MODIFY);
601 if (error) {
602 mutex_exit(&lfs_dqlock);
603 goto out_wapbl;
604 }
605 lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
606 quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
607 lfsquota2_ulfs_rwq2e(&q2e, &q2h->q2h_defentry, needswap);
608 mutex_exit(&lfs_dqlock);
609 quota2_bwrite(ump->um_mountp, bp);
610 goto out_wapbl;
611 }
612
613 error = lfs_dqget(NULLVP, key->qk_id, ump, key->qk_idtype, &dq);
614 if (error)
615 goto out_wapbl;
616
617 mutex_enter(&dq->dq_interlock);
618 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
619 /* need to alloc a new on-disk quot */
620 mutex_enter(&lfs_dqlock);
621 error = quota2_q2ealloc(ump, key->qk_idtype, key->qk_id, dq);
622 mutex_exit(&lfs_dqlock);
623 if (error)
624 goto out_il;
625 }
626 KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
627 error = getq2e(ump, key->qk_idtype, dq->dq2_lblkno,
628 dq->dq2_blkoff, &bp, &q2ep, B_MODIFY);
629 if (error)
630 goto out_il;
631
632 lfsquota2_ulfs_rwq2e(q2ep, &q2e, needswap);
633 quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
634 lfsquota2_ulfs_rwq2e(&q2e, q2ep, needswap);
635 quota2_bwrite(ump->um_mountp, bp);
636
637 out_il:
638 mutex_exit(&dq->dq_interlock);
639 lfs_dqrele(NULLVP, dq);
640 out_wapbl:
641 return error;
642 }
643
644 struct dq2clear_callback {
645 uid_t id;
646 struct dquot *dq;
647 struct quota2_header *q2h;
648 };
649
650 static int
dq2clear_callback(struct ulfsmount * ump,uint64_t * offp,struct quota2_entry * q2e,uint64_t off,void * v)651 dq2clear_callback(struct ulfsmount *ump, uint64_t *offp, struct quota2_entry *q2e,
652 uint64_t off, void *v)
653 {
654 struct dq2clear_callback *c = v;
655 struct lfs *fs = ump->um_lfs;
656 const int needswap = ULFS_MPNEEDSWAP(fs);
657 uint64_t myoff;
658
659 if (ulfs_rw32(q2e->q2e_uid, needswap) == c->id) {
660 KASSERT(mutex_owned(&c->dq->dq_interlock));
661 c->dq->dq2_lblkno = 0;
662 c->dq->dq2_blkoff = 0;
663 myoff = *offp;
664 /* remove from hash list */
665 *offp = q2e->q2e_next;
666 /* add to free list */
667 q2e->q2e_next = c->q2h->q2h_free;
668 c->q2h->q2h_free = myoff;
669 return Q2WL_ABORT;
670 }
671 return 0;
672 }
673 int
lfsquota2_handle_cmd_del(struct ulfsmount * ump,const struct quotakey * qk)674 lfsquota2_handle_cmd_del(struct ulfsmount *ump, const struct quotakey *qk)
675 {
676 int idtype;
677 id_t id;
678 int objtype;
679 int error, i, canfree;
680 struct dquot *dq;
681 struct quota2_header *q2h;
682 struct quota2_entry q2e, *q2ep;
683 struct buf *hbp, *bp;
684 u_long hash_mask;
685 struct dq2clear_callback c;
686
687 idtype = qk->qk_idtype;
688 id = qk->qk_id;
689 objtype = qk->qk_objtype;
690
691 if (ump->um_quotas[idtype] == NULLVP)
692 return ENODEV;
693 if (id == QUOTA_DEFAULTID)
694 return EOPNOTSUPP;
695
696 /* get the default entry before locking the entry's buffer */
697 mutex_enter(&lfs_dqlock);
698 error = getq2h(ump, idtype, &hbp, &q2h, 0);
699 if (error) {
700 mutex_exit(&lfs_dqlock);
701 return error;
702 }
703 /* we'll copy to another disk entry, so no need to swap */
704 memcpy(&q2e, &q2h->q2h_defentry, sizeof(q2e));
705 mutex_exit(&lfs_dqlock);
706 brelse(hbp, 0);
707
708 error = lfs_dqget(NULLVP, id, ump, idtype, &dq);
709 if (error)
710 return error;
711
712 mutex_enter(&dq->dq_interlock);
713 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
714 /* already clear, nothing to do */
715 error = ENOENT;
716 goto out_il;
717 }
718
719 error = getq2e(ump, idtype, dq->dq2_lblkno, dq->dq2_blkoff,
720 &bp, &q2ep, B_MODIFY);
721 if (error)
722 goto out_wapbl;
723
724 /* make sure we can index by the objtype passed in */
725 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
726 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
727
728 /* clear the requested objtype by copying from the default entry */
729 q2ep->q2e_val[objtype].q2v_softlimit =
730 q2e.q2e_val[objtype].q2v_softlimit;
731 q2ep->q2e_val[objtype].q2v_hardlimit =
732 q2e.q2e_val[objtype].q2v_hardlimit;
733 q2ep->q2e_val[objtype].q2v_grace =
734 q2e.q2e_val[objtype].q2v_grace;
735 q2ep->q2e_val[objtype].q2v_time = 0;
736
737 /* if this entry now contains no information, we can free it */
738 canfree = 1;
739 for (i = 0; i < N_QL; i++) {
740 if (q2ep->q2e_val[i].q2v_cur != 0 ||
741 (q2ep->q2e_val[i].q2v_softlimit !=
742 q2e.q2e_val[i].q2v_softlimit) ||
743 (q2ep->q2e_val[i].q2v_hardlimit !=
744 q2e.q2e_val[i].q2v_hardlimit) ||
745 (q2ep->q2e_val[i].q2v_grace !=
746 q2e.q2e_val[i].q2v_grace)) {
747 canfree = 0;
748 break;
749 }
750 /* note: do not need to check q2v_time */
751 }
752
753 if (canfree == 0) {
754 quota2_bwrite(ump->um_mountp, bp);
755 goto out_wapbl;
756 }
757 /* we can free it. release bp so we can walk the list */
758 brelse(bp, 0);
759 mutex_enter(&lfs_dqlock);
760 error = getq2h(ump, idtype, &hbp, &q2h, 0);
761 if (error)
762 goto out_dqlock;
763
764 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
765 c.dq = dq;
766 c.id = id;
767 c.q2h = q2h;
768 error = quota2_walk_list(ump, hbp, idtype,
769 &q2h->q2h_entries[id & hash_mask], B_MODIFY, &c,
770 dq2clear_callback);
771
772 bwrite(hbp);
773
774 out_dqlock:
775 mutex_exit(&lfs_dqlock);
776 out_wapbl:
777 out_il:
778 mutex_exit(&dq->dq_interlock);
779 lfs_dqrele(NULLVP, dq);
780 return error;
781 }
782
783 static int
quota2_fetch_q2e(struct ulfsmount * ump,const struct quotakey * qk,struct quota2_entry * ret)784 quota2_fetch_q2e(struct ulfsmount *ump, const struct quotakey *qk,
785 struct quota2_entry *ret)
786 {
787 struct dquot *dq;
788 int error;
789 struct quota2_entry *q2ep;
790 struct buf *bp;
791 struct lfs *fs = ump->um_lfs;
792 const int needswap = ULFS_MPNEEDSWAP(fs);
793
794 error = lfs_dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
795 if (error)
796 return error;
797
798 mutex_enter(&dq->dq_interlock);
799 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
800 mutex_exit(&dq->dq_interlock);
801 lfs_dqrele(NULLVP, dq);
802 return ENOENT;
803 }
804 error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
805 &bp, &q2ep, 0);
806 if (error) {
807 mutex_exit(&dq->dq_interlock);
808 lfs_dqrele(NULLVP, dq);
809 return error;
810 }
811 lfsquota2_ulfs_rwq2e(q2ep, ret, needswap);
812 brelse(bp, 0);
813 mutex_exit(&dq->dq_interlock);
814 lfs_dqrele(NULLVP, dq);
815
816 return 0;
817 }
818
819 static int
quota2_fetch_quotaval(struct ulfsmount * ump,const struct quotakey * qk,struct quotaval * ret)820 quota2_fetch_quotaval(struct ulfsmount *ump, const struct quotakey *qk,
821 struct quotaval *ret)
822 {
823 struct dquot *dq;
824 int error;
825 struct quota2_entry *q2ep, q2e;
826 struct buf *bp;
827 struct lfs *fs = ump->um_lfs;
828 const int needswap = ULFS_MPNEEDSWAP(fs);
829 id_t id2;
830
831 error = lfs_dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
832 if (error)
833 return error;
834
835 mutex_enter(&dq->dq_interlock);
836 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
837 mutex_exit(&dq->dq_interlock);
838 lfs_dqrele(NULLVP, dq);
839 return ENOENT;
840 }
841 error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
842 &bp, &q2ep, 0);
843 if (error) {
844 mutex_exit(&dq->dq_interlock);
845 lfs_dqrele(NULLVP, dq);
846 return error;
847 }
848 lfsquota2_ulfs_rwq2e(q2ep, &q2e, needswap);
849 brelse(bp, 0);
850 mutex_exit(&dq->dq_interlock);
851 lfs_dqrele(NULLVP, dq);
852
853 q2e_to_quotaval(&q2e, 0, &id2, qk->qk_objtype, ret);
854 KASSERT(id2 == qk->qk_id);
855 return 0;
856 }
857
858 int
lfsquota2_handle_cmd_get(struct ulfsmount * ump,const struct quotakey * qk,struct quotaval * qv)859 lfsquota2_handle_cmd_get(struct ulfsmount *ump, const struct quotakey *qk,
860 struct quotaval *qv)
861 {
862 int error;
863 struct quota2_header *q2h;
864 struct quota2_entry q2e;
865 struct buf *bp;
866 struct lfs *fs = ump->um_lfs;
867 const int needswap = ULFS_MPNEEDSWAP(fs);
868 id_t id2;
869
870 /*
871 * Make sure the FS-independent codes match the internal ones,
872 * so we can use the passed-in objtype without having to
873 * convert it explicitly to QL_BLOCK/QL_FILE.
874 */
875 CTASSERT(QL_BLOCK == QUOTA_OBJTYPE_BLOCKS);
876 CTASSERT(QL_FILE == QUOTA_OBJTYPE_FILES);
877 CTASSERT(N_QL == 2);
878
879 if (qk->qk_objtype < 0 || qk->qk_objtype >= N_QL) {
880 return EINVAL;
881 }
882
883 if (ump->um_quotas[qk->qk_idtype] == NULLVP)
884 return ENODEV;
885 if (qk->qk_id == QUOTA_DEFAULTID) {
886 mutex_enter(&lfs_dqlock);
887 error = getq2h(ump, qk->qk_idtype, &bp, &q2h, 0);
888 if (error) {
889 mutex_exit(&lfs_dqlock);
890 return error;
891 }
892 lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
893 mutex_exit(&lfs_dqlock);
894 brelse(bp, 0);
895 q2e_to_quotaval(&q2e, qk->qk_id == QUOTA_DEFAULTID, &id2,
896 qk->qk_objtype, qv);
897 (void)id2;
898 } else
899 error = quota2_fetch_quotaval(ump, qk, qv);
900
901 return error;
902 }
903
904 /*
905 * Cursor structure we used.
906 *
907 * This will get stored in userland between calls so we must not assume
908 * it isn't arbitrarily corrupted.
909 */
910 struct ulfsq2_cursor {
911 uint32_t q2c_magic; /* magic number */
912 int q2c_hashsize; /* size of hash table at last go */
913
914 int q2c_users_done; /* true if we've returned all user data */
915 int q2c_groups_done; /* true if we've returned all group data */
916 int q2c_defaults_done; /* true if we've returned the default values */
917 int q2c_hashpos; /* slot to start at in hash table */
918 int q2c_uidpos; /* number of ids we've handled */
919 int q2c_blocks_done; /* true if we've returned the blocks value */
920 };
921
922 /*
923 * State of a single cursorget call, or at least the part of it that
924 * needs to be passed around.
925 */
926 struct q2cursor_state {
927 /* data return pointers */
928 struct quotakey *keys;
929 struct quotaval *vals;
930
931 /* key/value counters */
932 unsigned maxkeyvals;
933 unsigned numkeys; /* number of keys assigned */
934
935 /* ID to key/value conversion state */
936 int skipfirst; /* if true skip first key/value */
937 int skiplast; /* if true skip last key/value */
938
939 /* ID counters */
940 unsigned maxids; /* maximum number of IDs to handle */
941 unsigned numids; /* number of IDs handled */
942 };
943
944 /*
945 * Additional structure for getids callback.
946 */
947 struct q2cursor_getids {
948 struct q2cursor_state *state;
949 int idtype;
950 unsigned skip; /* number of ids to skip over */
951 unsigned new_skip; /* number of ids to skip over next time */
952 unsigned skipped; /* number skipped so far */
953 int stopped; /* true if we stopped quota_walk_list early */
954 };
955
956 /*
957 * Cursor-related functions
958 */
959
960 /* magic number */
961 #define Q2C_MAGIC (0xbeebe111)
962
963 /* extract cursor from caller form */
964 #define Q2CURSOR(qkc) ((struct ulfsq2_cursor *)&qkc->u.qkc_space[0])
965
966 /*
967 * Check that a cursor we're handed is something like valid. If
968 * someone munges it and it still passes these checks, they'll get
969 * partial or odd results back but won't break anything.
970 */
971 static int
q2cursor_check(struct ulfsq2_cursor * cursor)972 q2cursor_check(struct ulfsq2_cursor *cursor)
973 {
974 if (cursor->q2c_magic != Q2C_MAGIC) {
975 return EINVAL;
976 }
977 if (cursor->q2c_hashsize < 0) {
978 return EINVAL;
979 }
980
981 if (cursor->q2c_users_done != 0 && cursor->q2c_users_done != 1) {
982 return EINVAL;
983 }
984 if (cursor->q2c_groups_done != 0 && cursor->q2c_groups_done != 1) {
985 return EINVAL;
986 }
987 if (cursor->q2c_defaults_done != 0 && cursor->q2c_defaults_done != 1) {
988 return EINVAL;
989 }
990 if (cursor->q2c_hashpos < 0 || cursor->q2c_uidpos < 0) {
991 return EINVAL;
992 }
993 if (cursor->q2c_blocks_done != 0 && cursor->q2c_blocks_done != 1) {
994 return EINVAL;
995 }
996 return 0;
997 }
998
999 /*
1000 * Set up the q2cursor state.
1001 */
1002 static void
q2cursor_initstate(struct q2cursor_state * state,struct quotakey * keys,struct quotaval * vals,unsigned maxkeyvals,int blocks_done)1003 q2cursor_initstate(struct q2cursor_state *state, struct quotakey *keys,
1004 struct quotaval *vals, unsigned maxkeyvals, int blocks_done)
1005 {
1006 state->keys = keys;
1007 state->vals = vals;
1008
1009 state->maxkeyvals = maxkeyvals;
1010 state->numkeys = 0;
1011
1012 /*
1013 * For each ID there are two quotavals to return. If the
1014 * maximum number of entries to return is odd, we might want
1015 * to skip the first quotaval of the first ID, or the last
1016 * quotaval of the last ID, but not both. So the number of IDs
1017 * we want is (up to) half the number of return slots we have,
1018 * rounded up.
1019 */
1020
1021 state->maxids = (state->maxkeyvals + 1) / 2;
1022 state->numids = 0;
1023 if (state->maxkeyvals % 2) {
1024 if (blocks_done) {
1025 state->skipfirst = 1;
1026 state->skiplast = 0;
1027 } else {
1028 state->skipfirst = 0;
1029 state->skiplast = 1;
1030 }
1031 } else {
1032 state->skipfirst = 0;
1033 state->skiplast = 0;
1034 }
1035 }
1036
1037 /*
1038 * Choose which idtype we're going to work on. If doing a full
1039 * iteration, we do users first, then groups, but either might be
1040 * disabled or marked to skip via cursorsetidtype(), so don't make
1041 * silly assumptions.
1042 */
1043 static int
q2cursor_pickidtype(struct ulfsq2_cursor * cursor,int * idtype_ret)1044 q2cursor_pickidtype(struct ulfsq2_cursor *cursor, int *idtype_ret)
1045 {
1046 if (cursor->q2c_users_done == 0) {
1047 *idtype_ret = QUOTA_IDTYPE_USER;
1048 } else if (cursor->q2c_groups_done == 0) {
1049 *idtype_ret = QUOTA_IDTYPE_GROUP;
1050 } else {
1051 return EAGAIN;
1052 }
1053 return 0;
1054 }
1055
1056 /*
1057 * Add an ID to the current state. Sets up either one or two keys to
1058 * refer to it, depending on whether it's first/last and the setting
1059 * of skipfirst. (skiplast does not need to be explicitly tested)
1060 */
1061 static void
q2cursor_addid(struct q2cursor_state * state,int idtype,id_t id)1062 q2cursor_addid(struct q2cursor_state *state, int idtype, id_t id)
1063 {
1064 KASSERT(state->numids < state->maxids);
1065 KASSERT(state->numkeys < state->maxkeyvals);
1066
1067 if (!state->skipfirst || state->numkeys > 0) {
1068 state->keys[state->numkeys].qk_idtype = idtype;
1069 state->keys[state->numkeys].qk_id = id;
1070 state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_BLOCKS;
1071 state->numkeys++;
1072 }
1073 if (state->numkeys < state->maxkeyvals) {
1074 state->keys[state->numkeys].qk_idtype = idtype;
1075 state->keys[state->numkeys].qk_id = id;
1076 state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_FILES;
1077 state->numkeys++;
1078 } else {
1079 KASSERT(state->skiplast);
1080 }
1081 state->numids++;
1082 }
1083
1084 /*
1085 * Callback function for getting IDs. Update counting and call addid.
1086 */
1087 static int
q2cursor_getids_callback(struct ulfsmount * ump,uint64_t * offp,struct quota2_entry * q2ep,uint64_t off,void * v)1088 q2cursor_getids_callback(struct ulfsmount *ump, uint64_t *offp,
1089 struct quota2_entry *q2ep, uint64_t off, void *v)
1090 {
1091 struct q2cursor_getids *gi = v;
1092 id_t id;
1093 struct lfs *fs = ump->um_lfs;
1094 const int needswap = ULFS_MPNEEDSWAP(fs);
1095
1096 if (gi->skipped < gi->skip) {
1097 gi->skipped++;
1098 return 0;
1099 }
1100 id = ulfs_rw32(q2ep->q2e_uid, needswap);
1101 q2cursor_addid(gi->state, gi->idtype, id);
1102 gi->new_skip++;
1103 if (gi->state->numids >= gi->state->maxids) {
1104 /* got enough ids, stop now */
1105 gi->stopped = 1;
1106 return Q2WL_ABORT;
1107 }
1108 return 0;
1109 }
1110
1111 /*
1112 * Fill in a batch of quotakeys by scanning one or more hash chains.
1113 */
1114 static int
q2cursor_getkeys(struct ulfsmount * ump,int idtype,struct ulfsq2_cursor * cursor,struct q2cursor_state * state,int * hashsize_ret,struct quota2_entry * default_q2e_ret)1115 q2cursor_getkeys(struct ulfsmount *ump, int idtype, struct ulfsq2_cursor *cursor,
1116 struct q2cursor_state *state,
1117 int *hashsize_ret, struct quota2_entry *default_q2e_ret)
1118 {
1119 struct lfs *fs = ump->um_lfs;
1120 const int needswap = ULFS_MPNEEDSWAP(fs);
1121 struct buf *hbp;
1122 struct quota2_header *q2h;
1123 int quota2_hash_size;
1124 struct q2cursor_getids gi;
1125 uint64_t offset;
1126 int error;
1127
1128 /*
1129 * Read the header block.
1130 */
1131
1132 mutex_enter(&lfs_dqlock);
1133 error = getq2h(ump, idtype, &hbp, &q2h, 0);
1134 if (error) {
1135 mutex_exit(&lfs_dqlock);
1136 return error;
1137 }
1138
1139 /* if the table size has changed, make the caller start over */
1140 quota2_hash_size = ulfs_rw16(q2h->q2h_hash_size, needswap);
1141 if (cursor->q2c_hashsize == 0) {
1142 cursor->q2c_hashsize = quota2_hash_size;
1143 } else if (cursor->q2c_hashsize != quota2_hash_size) {
1144 error = EDEADLK;
1145 goto scanfail;
1146 }
1147
1148 /* grab the entry with the default values out of the header */
1149 lfsquota2_ulfs_rwq2e(&q2h->q2h_defentry, default_q2e_ret, needswap);
1150
1151 /* If we haven't done the defaults yet, that goes first. */
1152 if (cursor->q2c_defaults_done == 0) {
1153 q2cursor_addid(state, idtype, QUOTA_DEFAULTID);
1154 /* if we read both halves, mark it done */
1155 if (state->numids < state->maxids || !state->skiplast) {
1156 cursor->q2c_defaults_done = 1;
1157 }
1158 }
1159
1160 gi.state = state;
1161 gi.idtype = idtype;
1162
1163 while (state->numids < state->maxids) {
1164 if (cursor->q2c_hashpos >= quota2_hash_size) {
1165 /* nothing more left */
1166 break;
1167 }
1168
1169 /* scan this hash chain */
1170 gi.skip = cursor->q2c_uidpos;
1171 gi.new_skip = gi.skip;
1172 gi.skipped = 0;
1173 gi.stopped = 0;
1174 offset = q2h->q2h_entries[cursor->q2c_hashpos];
1175
1176 error = quota2_walk_list(ump, hbp, idtype, &offset, 0, &gi,
1177 q2cursor_getids_callback);
1178 KASSERT(error != Q2WL_ABORT);
1179 if (error) {
1180 break;
1181 }
1182 if (gi.stopped) {
1183 /* callback stopped before reading whole chain */
1184 cursor->q2c_uidpos = gi.new_skip;
1185 /* if we didn't get both halves, back up */
1186 if (state->numids == state->maxids && state->skiplast){
1187 KASSERT(cursor->q2c_uidpos > 0);
1188 cursor->q2c_uidpos--;
1189 }
1190 } else {
1191 /* read whole chain */
1192 /* if we got both halves of the last id, advance */
1193 if (state->numids < state->maxids || !state->skiplast){
1194 cursor->q2c_uidpos = 0;
1195 cursor->q2c_hashpos++;
1196 }
1197 }
1198 }
1199
1200 scanfail:
1201 mutex_exit(&lfs_dqlock);
1202 brelse(hbp, 0);
1203 if (error)
1204 return error;
1205
1206 *hashsize_ret = quota2_hash_size;
1207 return 0;
1208 }
1209
1210 /*
1211 * Fetch the quotavals for the quotakeys.
1212 */
1213 static int
q2cursor_getvals(struct ulfsmount * ump,struct q2cursor_state * state,const struct quota2_entry * default_q2e)1214 q2cursor_getvals(struct ulfsmount *ump, struct q2cursor_state *state,
1215 const struct quota2_entry *default_q2e)
1216 {
1217 int hasid;
1218 id_t loadedid, id;
1219 unsigned pos;
1220 struct quota2_entry q2e;
1221 int objtype;
1222 int error;
1223
1224 hasid = 0;
1225 loadedid = 0;
1226 for (pos = 0; pos < state->numkeys; pos++) {
1227 id = state->keys[pos].qk_id;
1228 if (!hasid || id != loadedid) {
1229 hasid = 1;
1230 loadedid = id;
1231 if (id == QUOTA_DEFAULTID) {
1232 q2e = *default_q2e;
1233 } else {
1234 error = quota2_fetch_q2e(ump,
1235 &state->keys[pos],
1236 &q2e);
1237 if (error == ENOENT) {
1238 /* something changed - start over */
1239 error = EDEADLK;
1240 }
1241 if (error) {
1242 return error;
1243 }
1244 }
1245 }
1246
1247
1248 objtype = state->keys[pos].qk_objtype;
1249 KASSERT(objtype >= 0 && objtype < N_QL);
1250 q2val_to_quotaval(&q2e.q2e_val[objtype], &state->vals[pos]);
1251 }
1252
1253 return 0;
1254 }
1255
1256 /*
1257 * Handle cursorget.
1258 *
1259 * We can't just read keys and values directly, because we can't walk
1260 * the list with qdlock and grab dq_interlock to read the entries at
1261 * the same time. So we're going to do two passes: one to figure out
1262 * which IDs we want and fill in the keys, and then a second to use
1263 * the keys to fetch the values.
1264 */
1265 int
lfsquota2_handle_cmd_cursorget(struct ulfsmount * ump,struct quotakcursor * qkc,struct quotakey * keys,struct quotaval * vals,unsigned maxreturn,unsigned * ret)1266 lfsquota2_handle_cmd_cursorget(struct ulfsmount *ump, struct quotakcursor *qkc,
1267 struct quotakey *keys, struct quotaval *vals, unsigned maxreturn,
1268 unsigned *ret)
1269 {
1270 int error;
1271 struct ulfsq2_cursor *cursor;
1272 struct ulfsq2_cursor newcursor;
1273 struct q2cursor_state state;
1274 struct quota2_entry default_q2e;
1275 int idtype;
1276 int quota2_hash_size = 0; /* XXXuninit */
1277
1278 /*
1279 * Convert and validate the cursor.
1280 */
1281 cursor = Q2CURSOR(qkc);
1282 error = q2cursor_check(cursor);
1283 if (error) {
1284 return error;
1285 }
1286
1287 /*
1288 * Make sure our on-disk codes match the values of the
1289 * FS-independent ones. This avoids the need for explicit
1290 * conversion (which would be a NOP anyway and thus easily
1291 * left out or called in the wrong places...)
1292 */
1293 CTASSERT(QUOTA_IDTYPE_USER == ULFS_USRQUOTA);
1294 CTASSERT(QUOTA_IDTYPE_GROUP == ULFS_GRPQUOTA);
1295 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
1296 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
1297
1298 /*
1299 * If some of the idtypes aren't configured/enabled, arrange
1300 * to skip over them.
1301 */
1302 if (cursor->q2c_users_done == 0 &&
1303 ump->um_quotas[ULFS_USRQUOTA] == NULLVP) {
1304 cursor->q2c_users_done = 1;
1305 }
1306 if (cursor->q2c_groups_done == 0 &&
1307 ump->um_quotas[ULFS_GRPQUOTA] == NULLVP) {
1308 cursor->q2c_groups_done = 1;
1309 }
1310
1311 /* Loop over, potentially, both idtypes */
1312 while (1) {
1313
1314 /* Choose id type */
1315 error = q2cursor_pickidtype(cursor, &idtype);
1316 if (error == EAGAIN) {
1317 /* nothing more to do, return 0 */
1318 *ret = 0;
1319 return 0;
1320 }
1321 KASSERT(ump->um_quotas[idtype] != NULLVP);
1322
1323 /*
1324 * Initialize the per-call iteration state. Copy the
1325 * cursor state so we can update it in place but back
1326 * out on error.
1327 */
1328 q2cursor_initstate(&state, keys, vals, maxreturn,
1329 cursor->q2c_blocks_done);
1330 newcursor = *cursor;
1331
1332 /* Assign keys */
1333 error = q2cursor_getkeys(ump, idtype, &newcursor, &state,
1334 "a2_hash_size, &default_q2e);
1335 if (error) {
1336 return error;
1337 }
1338
1339 /* Now fill in the values. */
1340 error = q2cursor_getvals(ump, &state, &default_q2e);
1341 if (error) {
1342 return error;
1343 }
1344
1345 /*
1346 * Now that we aren't going to fail and lose what we
1347 * did so far, we can update the cursor state.
1348 */
1349
1350 if (newcursor.q2c_hashpos >= quota2_hash_size) {
1351 if (idtype == QUOTA_IDTYPE_USER)
1352 cursor->q2c_users_done = 1;
1353 else
1354 cursor->q2c_groups_done = 1;
1355
1356 /* start over on another id type */
1357 cursor->q2c_hashsize = 0;
1358 cursor->q2c_defaults_done = 0;
1359 cursor->q2c_hashpos = 0;
1360 cursor->q2c_uidpos = 0;
1361 cursor->q2c_blocks_done = 0;
1362 } else {
1363 *cursor = newcursor;
1364 cursor->q2c_blocks_done = state.skiplast;
1365 }
1366
1367 /*
1368 * If we have something to return, return it.
1369 * Otherwise, continue to the other idtype, if any,
1370 * and only return zero at end of iteration.
1371 */
1372 if (state.numkeys > 0) {
1373 break;
1374 }
1375 }
1376
1377 *ret = state.numkeys;
1378 return 0;
1379 }
1380
1381 int
lfsquota2_handle_cmd_cursoropen(struct ulfsmount * ump,struct quotakcursor * qkc)1382 lfsquota2_handle_cmd_cursoropen(struct ulfsmount *ump, struct quotakcursor *qkc)
1383 {
1384 struct ulfsq2_cursor *cursor;
1385
1386 CTASSERT(sizeof(*cursor) <= sizeof(qkc->u.qkc_space));
1387 cursor = Q2CURSOR(qkc);
1388
1389 cursor->q2c_magic = Q2C_MAGIC;
1390 cursor->q2c_hashsize = 0;
1391
1392 cursor->q2c_users_done = 0;
1393 cursor->q2c_groups_done = 0;
1394 cursor->q2c_defaults_done = 0;
1395 cursor->q2c_hashpos = 0;
1396 cursor->q2c_uidpos = 0;
1397 cursor->q2c_blocks_done = 0;
1398 return 0;
1399 }
1400
1401 int
lfsquota2_handle_cmd_cursorclose(struct ulfsmount * ump,struct quotakcursor * qkc)1402 lfsquota2_handle_cmd_cursorclose(struct ulfsmount *ump, struct quotakcursor *qkc)
1403 {
1404 struct ulfsq2_cursor *cursor;
1405 int error;
1406
1407 cursor = Q2CURSOR(qkc);
1408 error = q2cursor_check(cursor);
1409 if (error) {
1410 return error;
1411 }
1412
1413 /* nothing to do */
1414
1415 return 0;
1416 }
1417
1418 int
lfsquota2_handle_cmd_cursorskipidtype(struct ulfsmount * ump,struct quotakcursor * qkc,int idtype)1419 lfsquota2_handle_cmd_cursorskipidtype(struct ulfsmount *ump,
1420 struct quotakcursor *qkc, int idtype)
1421 {
1422 struct ulfsq2_cursor *cursor;
1423 int error;
1424
1425 cursor = Q2CURSOR(qkc);
1426 error = q2cursor_check(cursor);
1427 if (error) {
1428 return error;
1429 }
1430
1431 switch (idtype) {
1432 case QUOTA_IDTYPE_USER:
1433 cursor->q2c_users_done = 1;
1434 break;
1435 case QUOTA_IDTYPE_GROUP:
1436 cursor->q2c_groups_done = 1;
1437 break;
1438 default:
1439 return EINVAL;
1440 }
1441
1442 return 0;
1443 }
1444
1445 int
lfsquota2_handle_cmd_cursoratend(struct ulfsmount * ump,struct quotakcursor * qkc,int * ret)1446 lfsquota2_handle_cmd_cursoratend(struct ulfsmount *ump, struct quotakcursor *qkc,
1447 int *ret)
1448 {
1449 struct ulfsq2_cursor *cursor;
1450 int error;
1451
1452 cursor = Q2CURSOR(qkc);
1453 error = q2cursor_check(cursor);
1454 if (error) {
1455 return error;
1456 }
1457
1458 *ret = (cursor->q2c_users_done && cursor->q2c_groups_done);
1459 return 0;
1460 }
1461
1462 int
lfsquota2_handle_cmd_cursorrewind(struct ulfsmount * ump,struct quotakcursor * qkc)1463 lfsquota2_handle_cmd_cursorrewind(struct ulfsmount *ump, struct quotakcursor *qkc)
1464 {
1465 struct ulfsq2_cursor *cursor;
1466 int error;
1467
1468 cursor = Q2CURSOR(qkc);
1469 error = q2cursor_check(cursor);
1470 if (error) {
1471 return error;
1472 }
1473
1474 cursor->q2c_hashsize = 0;
1475
1476 cursor->q2c_users_done = 0;
1477 cursor->q2c_groups_done = 0;
1478 cursor->q2c_defaults_done = 0;
1479 cursor->q2c_hashpos = 0;
1480 cursor->q2c_uidpos = 0;
1481 cursor->q2c_blocks_done = 0;
1482
1483 return 0;
1484 }
1485
1486 int
lfs_q2sync(struct mount * mp)1487 lfs_q2sync(struct mount *mp)
1488 {
1489 return 0;
1490 }
1491
1492 struct dq2get_callback {
1493 uid_t id;
1494 struct dquot *dq;
1495 };
1496
1497 static int
dq2get_callback(struct ulfsmount * ump,uint64_t * offp,struct quota2_entry * q2e,uint64_t off,void * v)1498 dq2get_callback(struct ulfsmount *ump, uint64_t *offp, struct quota2_entry *q2e,
1499 uint64_t off, void *v)
1500 {
1501 struct dq2get_callback *c = v;
1502 daddr_t lblkno;
1503 int blkoff;
1504 struct lfs *fs = ump->um_lfs;
1505 const int needswap = ULFS_MPNEEDSWAP(fs);
1506
1507 if (ulfs_rw32(q2e->q2e_uid, needswap) == c->id) {
1508 KASSERT(mutex_owned(&c->dq->dq_interlock));
1509 lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
1510 blkoff = (off & ump->umq2_bmask);
1511 c->dq->dq2_lblkno = lblkno;
1512 c->dq->dq2_blkoff = blkoff;
1513 return Q2WL_ABORT;
1514 }
1515 return 0;
1516 }
1517
1518 int
lfs_dq2get(struct vnode * dqvp,u_long id,struct ulfsmount * ump,int type,struct dquot * dq)1519 lfs_dq2get(struct vnode *dqvp, u_long id, struct ulfsmount *ump, int type,
1520 struct dquot *dq)
1521 {
1522 struct buf *bp;
1523 struct quota2_header *q2h;
1524 int error;
1525 daddr_t offset;
1526 u_long hash_mask;
1527 struct dq2get_callback c = {
1528 .id = id,
1529 .dq = dq
1530 };
1531
1532 KASSERT(mutex_owned(&dq->dq_interlock));
1533 mutex_enter(&lfs_dqlock);
1534 error = getq2h(ump, type, &bp, &q2h, 0);
1535 if (error)
1536 goto out_mutex;
1537 /* look for our entry */
1538 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
1539 offset = q2h->q2h_entries[id & hash_mask];
1540 error = quota2_walk_list(ump, bp, type, &offset, 0, (void *)&c,
1541 dq2get_callback);
1542 brelse(bp, 0);
1543 out_mutex:
1544 mutex_exit(&lfs_dqlock);
1545 return error;
1546 }
1547
1548 int
lfs_dq2sync(struct vnode * vp,struct dquot * dq)1549 lfs_dq2sync(struct vnode *vp, struct dquot *dq)
1550 {
1551 return 0;
1552 }
1553
1554 int
lfs_quota2_mount(struct mount * mp)1555 lfs_quota2_mount(struct mount *mp)
1556 {
1557 struct ulfsmount *ump = VFSTOULFS(mp);
1558 struct lfs *fs = ump->um_lfs;
1559 int error = 0;
1560 struct vnode *vp;
1561 struct lwp *l = curlwp;
1562
1563 if ((fs->lfs_use_quota2) == 0)
1564 return 0;
1565
1566 fs->um_flags |= ULFS_QUOTA2;
1567 ump->umq2_bsize = lfs_sb_getbsize(fs);
1568 ump->umq2_bmask = lfs_sb_getbmask(fs);
1569 if (fs->lfs_quota_magic != Q2_HEAD_MAGIC) {
1570 printf("%s: Invalid quota magic number\n",
1571 mp->mnt_stat.f_mntonname);
1572 return EINVAL;
1573 }
1574 if ((fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_USRQUOTA)) &&
1575 fs->lfs_quotaino[ULFS_USRQUOTA] == 0) {
1576 printf("%s: no user quota inode\n",
1577 mp->mnt_stat.f_mntonname);
1578 error = EINVAL;
1579 }
1580 if ((fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_GRPQUOTA)) &&
1581 fs->lfs_quotaino[ULFS_GRPQUOTA] == 0) {
1582 printf("%s: no group quota inode\n",
1583 mp->mnt_stat.f_mntonname);
1584 error = EINVAL;
1585 }
1586 if (error)
1587 return error;
1588
1589 if (fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_USRQUOTA) &&
1590 ump->um_quotas[ULFS_USRQUOTA] == NULLVP) {
1591 error = VFS_VGET(mp, fs->lfs_quotaino[ULFS_USRQUOTA], &vp);
1592 if (error) {
1593 printf("%s: can't vget() user quota inode: %d\n",
1594 mp->mnt_stat.f_mntonname, error);
1595 return error;
1596 }
1597 ump->um_quotas[ULFS_USRQUOTA] = vp;
1598 ump->um_cred[ULFS_USRQUOTA] = l->l_cred;
1599 mutex_enter(vp->v_interlock);
1600 vp->v_writecount++;
1601 mutex_exit(vp->v_interlock);
1602 VOP_UNLOCK(vp);
1603 }
1604 if (fs->lfs_quota_flags & FS_Q2_DO_TYPE(ULFS_GRPQUOTA) &&
1605 ump->um_quotas[ULFS_GRPQUOTA] == NULLVP) {
1606 error = VFS_VGET(mp, fs->lfs_quotaino[ULFS_GRPQUOTA], &vp);
1607 if (error) {
1608 vn_close(ump->um_quotas[ULFS_USRQUOTA],
1609 FREAD|FWRITE, l->l_cred);
1610 printf("%s: can't vget() group quota inode: %d\n",
1611 mp->mnt_stat.f_mntonname, error);
1612 return error;
1613 }
1614 ump->um_quotas[ULFS_GRPQUOTA] = vp;
1615 ump->um_cred[ULFS_GRPQUOTA] = l->l_cred;
1616 mutex_enter(vp->v_interlock);
1617 vp->v_vflag |= VV_SYSTEM;
1618 vp->v_writecount++;
1619 mutex_exit(vp->v_interlock);
1620 VOP_UNLOCK(vp);
1621 }
1622 mp->mnt_flag |= MNT_QUOTA;
1623 return 0;
1624 }
1625