1 /* $NetBSD: ulfs_quota1.c,v 1.12 2021/06/29 22:40:54 dholland Exp $ */
2 /* from NetBSD: ufs_quota1.c,v 1.22 2016/06/20 00:52:04 dholland Exp */
3
4 /*
5 * Copyright (c) 1982, 1986, 1990, 1993, 1995
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * Robert Elz at The University of Melbourne.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)ufs_quota.c 8.5 (Berkeley) 5/20/95
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: ulfs_quota1.c,v 1.12 2021/06/29 22:40:54 dholland Exp $");
40
41 #include <sys/param.h>
42 #include <sys/kernel.h>
43 #include <sys/systm.h>
44 #include <sys/namei.h>
45 #include <sys/file.h>
46 #include <sys/proc.h>
47 #include <sys/vnode.h>
48 #include <sys/mount.h>
49 #include <sys/kauth.h>
50
51 #include <ufs/lfs/ulfs_quota1.h>
52 #include <ufs/lfs/ulfs_inode.h>
53 #include <ufs/lfs/ulfsmount.h>
54 #include <ufs/lfs/ulfs_extern.h>
55 #include <ufs/lfs/ulfs_quota.h>
56
57 static int chkdqchg(struct inode *, int64_t, kauth_cred_t, int);
58 static int chkiqchg(struct inode *, int32_t, kauth_cred_t, int);
59
60 /*
61 * Update disk usage, and take corrective action.
62 */
63 int
lfs_chkdq1(struct inode * ip,int64_t change,kauth_cred_t cred,int flags)64 lfs_chkdq1(struct inode *ip, int64_t change, kauth_cred_t cred, int flags)
65 {
66 struct dquot *dq;
67 int i;
68 int ncurblocks, error;
69
70 if ((error = lfs_getinoquota(ip)) != 0)
71 return error;
72 if (change == 0)
73 return (0);
74 if (change < 0) {
75 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
76 if ((dq = ip->i_dquot[i]) == NODQUOT)
77 continue;
78 mutex_enter(&dq->dq_interlock);
79 ncurblocks = dq->dq_curblocks + change;
80 if (ncurblocks >= 0)
81 dq->dq_curblocks = ncurblocks;
82 else
83 dq->dq_curblocks = 0;
84 dq->dq_flags &= ~DQ_WARN(QL_BLOCK);
85 dq->dq_flags |= DQ_MOD;
86 mutex_exit(&dq->dq_interlock);
87 }
88 return (0);
89 }
90 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
91 if ((dq = ip->i_dquot[i]) == NODQUOT)
92 continue;
93 if ((flags & FORCE) == 0 &&
94 kauth_authorize_system(cred, KAUTH_SYSTEM_FS_QUOTA,
95 KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT, KAUTH_ARG(i),
96 KAUTH_ARG(QL_BLOCK), NULL) != 0) {
97 mutex_enter(&dq->dq_interlock);
98 error = chkdqchg(ip, change, cred, i);
99 mutex_exit(&dq->dq_interlock);
100 if (error != 0)
101 return (error);
102 }
103 }
104 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
105 if ((dq = ip->i_dquot[i]) == NODQUOT)
106 continue;
107 mutex_enter(&dq->dq_interlock);
108 dq->dq_curblocks += change;
109 dq->dq_flags |= DQ_MOD;
110 mutex_exit(&dq->dq_interlock);
111 }
112 return (0);
113 }
114
115 /*
116 * Check for a valid change to a users allocation.
117 * Issue an error message if appropriate.
118 */
119 static int
chkdqchg(struct inode * ip,int64_t change,kauth_cred_t cred,int type)120 chkdqchg(struct inode *ip, int64_t change, kauth_cred_t cred, int type)
121 {
122 struct dquot *dq = ip->i_dquot[type];
123 long ncurblocks = dq->dq_curblocks + change;
124
125 KASSERT(mutex_owned(&dq->dq_interlock));
126 /*
127 * If user would exceed their hard limit, disallow space allocation.
128 */
129 if (ncurblocks >= dq->dq_bhardlimit && dq->dq_bhardlimit) {
130 if ((dq->dq_flags & DQ_WARN(QL_BLOCK)) == 0 &&
131 ip->i_uid == kauth_cred_geteuid(cred)) {
132 uprintf("\n%s: write failed, %s disk limit reached\n",
133 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
134 lfs_quotatypes[type]);
135 dq->dq_flags |= DQ_WARN(QL_BLOCK);
136 }
137 return (EDQUOT);
138 }
139 /*
140 * If user is over their soft limit for too long, disallow space
141 * allocation. Reset time limit as they cross their soft limit.
142 */
143 if (ncurblocks >= dq->dq_bsoftlimit && dq->dq_bsoftlimit) {
144 if (dq->dq_curblocks < dq->dq_bsoftlimit) {
145 dq->dq_btime =
146 time_second + ip->i_ump->umq1_btime[type];
147 if (ip->i_uid == kauth_cred_geteuid(cred))
148 uprintf("\n%s: warning, %s %s\n",
149 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
150 lfs_quotatypes[type], "disk quota exceeded");
151 return (0);
152 }
153 if (time_second > dq->dq_btime) {
154 if ((dq->dq_flags & DQ_WARN(QL_BLOCK)) == 0 &&
155 ip->i_uid == kauth_cred_geteuid(cred)) {
156 uprintf("\n%s: write failed, %s %s\n",
157 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
158 lfs_quotatypes[type],
159 "disk quota exceeded for too long");
160 dq->dq_flags |= DQ_WARN(QL_BLOCK);
161 }
162 return (EDQUOT);
163 }
164 }
165 return (0);
166 }
167
168 /*
169 * Check the inode limit, applying corrective action.
170 */
171 int
lfs_chkiq1(struct inode * ip,int32_t change,kauth_cred_t cred,int flags)172 lfs_chkiq1(struct inode *ip, int32_t change, kauth_cred_t cred, int flags)
173 {
174 struct dquot *dq;
175 int i;
176 int ncurinodes, error;
177
178 if ((error = lfs_getinoquota(ip)) != 0)
179 return error;
180 if (change == 0)
181 return (0);
182 if (change < 0) {
183 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
184 if ((dq = ip->i_dquot[i]) == NODQUOT)
185 continue;
186 mutex_enter(&dq->dq_interlock);
187 ncurinodes = dq->dq_curinodes + change;
188 if (ncurinodes >= 0)
189 dq->dq_curinodes = ncurinodes;
190 else
191 dq->dq_curinodes = 0;
192 dq->dq_flags &= ~DQ_WARN(QL_FILE);
193 dq->dq_flags |= DQ_MOD;
194 mutex_exit(&dq->dq_interlock);
195 }
196 return (0);
197 }
198 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
199 if ((dq = ip->i_dquot[i]) == NODQUOT)
200 continue;
201 if ((flags & FORCE) == 0 && kauth_authorize_system(cred,
202 KAUTH_SYSTEM_FS_QUOTA, KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT,
203 KAUTH_ARG(i), KAUTH_ARG(QL_FILE), NULL) != 0) {
204 mutex_enter(&dq->dq_interlock);
205 error = chkiqchg(ip, change, cred, i);
206 mutex_exit(&dq->dq_interlock);
207 if (error != 0)
208 return (error);
209 }
210 }
211 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
212 if ((dq = ip->i_dquot[i]) == NODQUOT)
213 continue;
214 mutex_enter(&dq->dq_interlock);
215 dq->dq_curinodes += change;
216 dq->dq_flags |= DQ_MOD;
217 mutex_exit(&dq->dq_interlock);
218 }
219 return (0);
220 }
221
222 /*
223 * Check for a valid change to a users allocation.
224 * Issue an error message if appropriate.
225 */
226 static int
chkiqchg(struct inode * ip,int32_t change,kauth_cred_t cred,int type)227 chkiqchg(struct inode *ip, int32_t change, kauth_cred_t cred, int type)
228 {
229 struct dquot *dq = ip->i_dquot[type];
230 long ncurinodes = dq->dq_curinodes + change;
231
232 KASSERT(mutex_owned(&dq->dq_interlock));
233 /*
234 * If user would exceed their hard limit, disallow inode allocation.
235 */
236 if (ncurinodes >= dq->dq_ihardlimit && dq->dq_ihardlimit) {
237 if ((dq->dq_flags & DQ_WARN(QL_FILE)) == 0 &&
238 ip->i_uid == kauth_cred_geteuid(cred)) {
239 uprintf("\n%s: write failed, %s inode limit reached\n",
240 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
241 lfs_quotatypes[type]);
242 dq->dq_flags |= DQ_WARN(QL_FILE);
243 }
244 return (EDQUOT);
245 }
246 /*
247 * If user is over their soft limit for too long, disallow inode
248 * allocation. Reset time limit as they cross their soft limit.
249 */
250 if (ncurinodes >= dq->dq_isoftlimit && dq->dq_isoftlimit) {
251 if (dq->dq_curinodes < dq->dq_isoftlimit) {
252 dq->dq_itime =
253 time_second + ip->i_ump->umq1_itime[type];
254 if (ip->i_uid == kauth_cred_geteuid(cred))
255 uprintf("\n%s: warning, %s %s\n",
256 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
257 lfs_quotatypes[type], "inode quota exceeded");
258 return (0);
259 }
260 if (time_second > dq->dq_itime) {
261 if ((dq->dq_flags & DQ_WARN(QL_FILE)) == 0 &&
262 ip->i_uid == kauth_cred_geteuid(cred)) {
263 uprintf("\n%s: write failed, %s %s\n",
264 ITOV(ip)->v_mount->mnt_stat.f_mntonname,
265 lfs_quotatypes[type],
266 "inode quota exceeded for too long");
267 dq->dq_flags |= DQ_WARN(QL_FILE);
268 }
269 return (EDQUOT);
270 }
271 }
272 return (0);
273 }
274
275 int
lfsquota1_umount(struct mount * mp,int flags)276 lfsquota1_umount(struct mount *mp, int flags)
277 {
278 int i, error;
279 struct ulfsmount *ump = VFSTOULFS(mp);
280 struct lfs *fs = ump->um_lfs;
281 struct lwp *l = curlwp;
282
283 if ((fs->um_flags & ULFS_QUOTA) == 0)
284 return 0;
285
286 if ((error = vflush(mp, NULLVP, SKIPSYSTEM | flags)) != 0)
287 return (error);
288
289 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
290 if (ump->um_quotas[i] != NULLVP) {
291 lfsquota1_handle_cmd_quotaoff(l, ump, i);
292 }
293 }
294 return 0;
295 }
296
297 /*
298 * Code to process quotactl commands.
299 */
300
301 /*
302 * set up a quota file for a particular file system.
303 */
304 int
lfsquota1_handle_cmd_quotaon(struct lwp * l,struct ulfsmount * ump,int type,const char * fname)305 lfsquota1_handle_cmd_quotaon(struct lwp *l, struct ulfsmount *ump, int type,
306 const char *fname)
307 {
308 struct mount *mp = ump->um_mountp;
309 struct lfs *fs = ump->um_lfs;
310 struct vnode *vp, **vpp;
311 struct vnode_iterator *marker;
312 struct dquot *dq;
313 int error;
314 struct pathbuf *pb;
315
316 if (fs->um_flags & ULFS_QUOTA2) {
317 uprintf("%s: quotas v2 already enabled\n",
318 mp->mnt_stat.f_mntonname);
319 return (EBUSY);
320 }
321
322 vpp = &ump->um_quotas[type];
323
324 pb = pathbuf_create(fname);
325 if (pb == NULL) {
326 return ENOMEM;
327 }
328 error = vn_open(NULL, pb, 0, FREAD|FWRITE, 0, &vp, NULL, NULL);
329 if (error != 0) {
330 pathbuf_destroy(pb);
331 return error;
332 }
333 pathbuf_destroy(pb);
334
335 VOP_UNLOCK(vp);
336 if (vp->v_type != VREG) {
337 (void) vn_close(vp, FREAD|FWRITE, l->l_cred);
338 return (EACCES);
339 }
340 if (*vpp != vp)
341 lfsquota1_handle_cmd_quotaoff(l, ump, type);
342 mutex_enter(&lfs_dqlock);
343 while ((ump->umq1_qflags[type] & (QTF_CLOSING | QTF_OPENING)) != 0)
344 cv_wait(&lfs_dqcv, &lfs_dqlock);
345 ump->umq1_qflags[type] |= QTF_OPENING;
346 mutex_exit(&lfs_dqlock);
347 mp->mnt_flag |= MNT_QUOTA;
348 vp->v_vflag |= VV_SYSTEM; /* XXXSMP */
349 *vpp = vp;
350 /*
351 * Save the credential of the process that turned on quotas.
352 * Set up the time limits for this quota.
353 */
354 kauth_cred_hold(l->l_cred);
355 ump->um_cred[type] = l->l_cred;
356 ump->umq1_btime[type] = MAX_DQ_TIME;
357 ump->umq1_itime[type] = MAX_IQ_TIME;
358 if (lfs_dqget(NULLVP, 0, ump, type, &dq) == 0) {
359 if (dq->dq_btime > 0)
360 ump->umq1_btime[type] = dq->dq_btime;
361 if (dq->dq_itime > 0)
362 ump->umq1_itime[type] = dq->dq_itime;
363 lfs_dqrele(NULLVP, dq);
364 }
365 /*
366 * Search vnodes associated with this mount point,
367 * adding references to quota file being opened.
368 * NB: only need to add dquot's for inodes being modified.
369 */
370 vfs_vnode_iterator_init(mp, &marker);
371 while ((vp = vfs_vnode_iterator_next(marker, NULL, NULL))) {
372 error = vn_lock(vp, LK_EXCLUSIVE);
373 if (error) {
374 vrele(vp);
375 continue;
376 }
377 mutex_enter(vp->v_interlock);
378 if (VTOI(vp) == NULL || vp->v_type == VNON ||
379 vp->v_writecount == 0) {
380 mutex_exit(vp->v_interlock);
381 vput(vp);
382 continue;
383 }
384 mutex_exit(vp->v_interlock);
385 if ((error = lfs_getinoquota(VTOI(vp))) != 0) {
386 vput(vp);
387 break;
388 }
389 vput(vp);
390 }
391 vfs_vnode_iterator_destroy(marker);
392
393 mutex_enter(&lfs_dqlock);
394 ump->umq1_qflags[type] &= ~QTF_OPENING;
395 cv_broadcast(&lfs_dqcv);
396 if (error == 0)
397 fs->um_flags |= ULFS_QUOTA;
398 mutex_exit(&lfs_dqlock);
399 if (error)
400 lfsquota1_handle_cmd_quotaoff(l, ump, type);
401 return (error);
402 }
403
404 /*
405 * turn off disk quotas for a filesystem.
406 */
407 int
lfsquota1_handle_cmd_quotaoff(struct lwp * l,struct ulfsmount * ump,int type)408 lfsquota1_handle_cmd_quotaoff(struct lwp *l, struct ulfsmount *ump, int type)
409 {
410 struct mount *mp = ump->um_mountp;
411 struct lfs *fs = ump->um_lfs;
412 struct vnode *vp;
413 struct vnode *qvp;
414 struct vnode_iterator *marker;
415 struct dquot *dq;
416 struct inode *ip;
417 kauth_cred_t cred;
418 int i, error;
419
420 mutex_enter(&lfs_dqlock);
421 while ((ump->umq1_qflags[type] & (QTF_CLOSING | QTF_OPENING)) != 0)
422 cv_wait(&lfs_dqcv, &lfs_dqlock);
423 if ((qvp = ump->um_quotas[type]) == NULLVP) {
424 mutex_exit(&lfs_dqlock);
425 return (0);
426 }
427 ump->umq1_qflags[type] |= QTF_CLOSING;
428 fs->um_flags &= ~ULFS_QUOTA;
429 mutex_exit(&lfs_dqlock);
430 /*
431 * Search vnodes associated with this mount point,
432 * deleting any references to quota file being closed.
433 */
434 vfs_vnode_iterator_init(mp, &marker);
435 while ((vp = vfs_vnode_iterator_next(marker, NULL, NULL))) {
436 error = vn_lock(vp, LK_EXCLUSIVE);
437 if (error) {
438 vrele(vp);
439 continue;
440 }
441 ip = VTOI(vp);
442 if (ip == NULL || vp->v_type == VNON) {
443 vput(vp);
444 continue;
445 }
446 dq = ip->i_dquot[type];
447 ip->i_dquot[type] = NODQUOT;
448 lfs_dqrele(vp, dq);
449 vput(vp);
450 }
451 vfs_vnode_iterator_destroy(marker);
452 #ifdef DIAGNOSTIC
453 lfs_dqflush(qvp);
454 #endif
455 qvp->v_vflag &= ~VV_SYSTEM;
456 error = vn_close(qvp, FREAD|FWRITE, l->l_cred);
457 mutex_enter(&lfs_dqlock);
458 ump->um_quotas[type] = NULLVP;
459 cred = ump->um_cred[type];
460 ump->um_cred[type] = NOCRED;
461 for (i = 0; i < ULFS_MAXQUOTAS; i++)
462 if (ump->um_quotas[i] != NULLVP)
463 break;
464 ump->umq1_qflags[type] &= ~QTF_CLOSING;
465 cv_broadcast(&lfs_dqcv);
466 mutex_exit(&lfs_dqlock);
467 kauth_cred_free(cred);
468 if (i == ULFS_MAXQUOTAS)
469 mp->mnt_flag &= ~MNT_QUOTA;
470 return (error);
471 }
472
473 int
lfsquota1_handle_cmd_get(struct ulfsmount * ump,const struct quotakey * qk,struct quotaval * qv)474 lfsquota1_handle_cmd_get(struct ulfsmount *ump, const struct quotakey *qk,
475 struct quotaval *qv)
476 {
477 struct dquot *dq;
478 int error;
479 struct quotaval blocks, files;
480 int idtype;
481 id_t id;
482
483 idtype = qk->qk_idtype;
484 id = qk->qk_id;
485
486 if (ump->um_quotas[idtype] == NULLVP)
487 return ENODEV;
488
489 if (id == QUOTA_DEFAULTID) { /* we want the grace period of id 0 */
490 if ((error = lfs_dqget(NULLVP, 0, ump, idtype, &dq)) != 0)
491 return error;
492
493 } else {
494 if ((error = lfs_dqget(NULLVP, id, ump, idtype, &dq)) != 0)
495 return error;
496 }
497 lfs_dqblk_to_quotavals(&dq->dq_un.dq1_dqb, &blocks, &files);
498 lfs_dqrele(NULLVP, dq);
499 if (id == QUOTA_DEFAULTID) {
500 if (blocks.qv_expiretime > 0)
501 blocks.qv_grace = blocks.qv_expiretime;
502 else
503 blocks.qv_grace = MAX_DQ_TIME;
504 if (files.qv_expiretime > 0)
505 files.qv_grace = files.qv_expiretime;
506 else
507 files.qv_grace = MAX_DQ_TIME;
508 }
509
510 switch (qk->qk_objtype) {
511 case QUOTA_OBJTYPE_BLOCKS:
512 *qv = blocks;
513 break;
514 case QUOTA_OBJTYPE_FILES:
515 *qv = files;
516 break;
517 default:
518 return EINVAL;
519 }
520
521 return 0;
522 }
523
524 static uint32_t
quota1_encode_limit(uint64_t lim)525 quota1_encode_limit(uint64_t lim)
526 {
527 if (lim == QUOTA_NOLIMIT || lim >= 0xffffffff) {
528 return 0;
529 }
530 return lim;
531 }
532
533 int
lfsquota1_handle_cmd_put(struct ulfsmount * ump,const struct quotakey * key,const struct quotaval * val)534 lfsquota1_handle_cmd_put(struct ulfsmount *ump, const struct quotakey *key,
535 const struct quotaval *val)
536 {
537 struct dquot *dq;
538 struct dqblk dqb;
539 int error;
540
541 switch (key->qk_idtype) {
542 case QUOTA_IDTYPE_USER:
543 case QUOTA_IDTYPE_GROUP:
544 break;
545 default:
546 return EINVAL;
547 }
548
549 switch (key->qk_objtype) {
550 case QUOTA_OBJTYPE_BLOCKS:
551 case QUOTA_OBJTYPE_FILES:
552 break;
553 default:
554 return EINVAL;
555 }
556
557 if (ump->um_quotas[key->qk_idtype] == NULLVP)
558 return ENODEV;
559
560 if (key->qk_id == QUOTA_DEFAULTID) {
561 /* just update grace times */
562 id_t id = 0;
563
564 if ((error = lfs_dqget(NULLVP, id, ump, key->qk_idtype, &dq)) != 0)
565 return error;
566 mutex_enter(&dq->dq_interlock);
567 if (val->qv_grace != QUOTA_NOTIME) {
568 if (key->qk_objtype == QUOTA_OBJTYPE_BLOCKS)
569 ump->umq1_btime[key->qk_idtype] = dq->dq_btime =
570 val->qv_grace;
571 if (key->qk_objtype == QUOTA_OBJTYPE_FILES)
572 ump->umq1_itime[key->qk_idtype] = dq->dq_itime =
573 val->qv_grace;
574 }
575 dq->dq_flags |= DQ_MOD;
576 mutex_exit(&dq->dq_interlock);
577 lfs_dqrele(NULLVP, dq);
578 return 0;
579 }
580
581 if ((error = lfs_dqget(NULLVP, key->qk_id, ump, key->qk_idtype, &dq)) != 0)
582 return (error);
583 mutex_enter(&dq->dq_interlock);
584 /*
585 * Copy all but the current values.
586 * Reset time limit if previously had no soft limit or were
587 * under it, but now have a soft limit and are over it.
588 */
589 dqb.dqb_curblocks = dq->dq_curblocks;
590 dqb.dqb_curinodes = dq->dq_curinodes;
591 dqb.dqb_btime = dq->dq_btime;
592 dqb.dqb_itime = dq->dq_itime;
593 if (key->qk_objtype == QUOTA_OBJTYPE_BLOCKS) {
594 dqb.dqb_bsoftlimit = quota1_encode_limit(val->qv_softlimit);
595 dqb.dqb_bhardlimit = quota1_encode_limit(val->qv_hardlimit);
596 dqb.dqb_isoftlimit = dq->dq_isoftlimit;
597 dqb.dqb_ihardlimit = dq->dq_ihardlimit;
598 } else {
599 KASSERT(key->qk_objtype == QUOTA_OBJTYPE_FILES);
600 dqb.dqb_bsoftlimit = dq->dq_bsoftlimit;
601 dqb.dqb_bhardlimit = dq->dq_bhardlimit;
602 dqb.dqb_isoftlimit = quota1_encode_limit(val->qv_softlimit);
603 dqb.dqb_ihardlimit = quota1_encode_limit(val->qv_hardlimit);
604 }
605 if (dq->dq_id == 0 && val->qv_grace != QUOTA_NOTIME) {
606 /* also update grace time if available */
607 if (key->qk_objtype == QUOTA_OBJTYPE_BLOCKS) {
608 ump->umq1_btime[key->qk_idtype] = dqb.dqb_btime =
609 val->qv_grace;
610 }
611 if (key->qk_objtype == QUOTA_OBJTYPE_FILES) {
612 ump->umq1_itime[key->qk_idtype] = dqb.dqb_itime =
613 val->qv_grace;
614 }
615 }
616 if (dqb.dqb_bsoftlimit &&
617 dq->dq_curblocks >= dqb.dqb_bsoftlimit &&
618 (dq->dq_bsoftlimit == 0 || dq->dq_curblocks < dq->dq_bsoftlimit))
619 dqb.dqb_btime = time_second + ump->umq1_btime[key->qk_idtype];
620 if (dqb.dqb_isoftlimit &&
621 dq->dq_curinodes >= dqb.dqb_isoftlimit &&
622 (dq->dq_isoftlimit == 0 || dq->dq_curinodes < dq->dq_isoftlimit))
623 dqb.dqb_itime = time_second + ump->umq1_itime[key->qk_idtype];
624 dq->dq_un.dq1_dqb = dqb;
625 if (dq->dq_curblocks < dq->dq_bsoftlimit)
626 dq->dq_flags &= ~DQ_WARN(QL_BLOCK);
627 if (dq->dq_curinodes < dq->dq_isoftlimit)
628 dq->dq_flags &= ~DQ_WARN(QL_FILE);
629 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
630 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
631 dq->dq_flags |= DQ_FAKE;
632 else
633 dq->dq_flags &= ~DQ_FAKE;
634 dq->dq_flags |= DQ_MOD;
635 mutex_exit(&dq->dq_interlock);
636 lfs_dqrele(NULLVP, dq);
637 return (0);
638 }
639
640
641 #if 0
642 /*
643 * Q_SETQUOTA - assign an entire dqblk structure.
644 */
645 int
646 setquota1(struct mount *mp, u_long id, int type, struct dqblk *dqb)
647 {
648 struct dquot *dq;
649 struct dquot *ndq;
650 struct ulfsmount *ump = VFSTOULFS(mp);
651
652
653 if ((error = lfs_dqget(NULLVP, id, ump, type, &ndq)) != 0)
654 return (error);
655 dq = ndq;
656 mutex_enter(&dq->dq_interlock);
657 /*
658 * Copy all but the current values.
659 * Reset time limit if previously had no soft limit or were
660 * under it, but now have a soft limit and are over it.
661 */
662 dqb->dqb_curblocks = dq->dq_curblocks;
663 dqb->dqb_curinodes = dq->dq_curinodes;
664 if (dq->dq_id != 0) {
665 dqb->dqb_btime = dq->dq_btime;
666 dqb->dqb_itime = dq->dq_itime;
667 }
668 if (dqb->dqb_bsoftlimit &&
669 dq->dq_curblocks >= dqb->dqb_bsoftlimit &&
670 (dq->dq_bsoftlimit == 0 || dq->dq_curblocks < dq->dq_bsoftlimit))
671 dqb->dqb_btime = time_second + ump->umq1_btime[type];
672 if (dqb->dqb_isoftlimit &&
673 dq->dq_curinodes >= dqb->dqb_isoftlimit &&
674 (dq->dq_isoftlimit == 0 || dq->dq_curinodes < dq->dq_isoftlimit))
675 dqb->dqb_itime = time_second + ump->umq1_itime[type];
676 dq->dq_un.dq1_dqb = *dqb;
677 if (dq->dq_curblocks < dq->dq_bsoftlimit)
678 dq->dq_flags &= ~DQ_WARN(QL_BLOCK);
679 if (dq->dq_curinodes < dq->dq_isoftlimit)
680 dq->dq_flags &= ~DQ_WARN(QL_FILE);
681 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
682 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
683 dq->dq_flags |= DQ_FAKE;
684 else
685 dq->dq_flags &= ~DQ_FAKE;
686 dq->dq_flags |= DQ_MOD;
687 mutex_exit(&dq->dq_interlock);
688 lfs_dqrele(NULLVP, dq);
689 return (0);
690 }
691
692 /*
693 * Q_SETUSE - set current inode and block usage.
694 */
695 int
696 setuse(struct mount *mp, u_long id, int type, void *addr)
697 {
698 struct dquot *dq;
699 struct ulfsmount *ump = VFSTOULFS(mp);
700 struct dquot *ndq;
701 struct dqblk usage;
702 int error;
703
704 error = copyin(addr, (void *)&usage, sizeof (struct dqblk));
705 if (error)
706 return (error);
707 if ((error = lfs_dqget(NULLVP, id, ump, type, &ndq)) != 0)
708 return (error);
709 dq = ndq;
710 mutex_enter(&dq->dq_interlock);
711 /*
712 * Reset time limit if have a soft limit and were
713 * previously under it, but are now over it.
714 */
715 if (dq->dq_bsoftlimit && dq->dq_curblocks < dq->dq_bsoftlimit &&
716 usage.dqb_curblocks >= dq->dq_bsoftlimit)
717 dq->dq_btime = time_second + ump->umq1_btime[type];
718 if (dq->dq_isoftlimit && dq->dq_curinodes < dq->dq_isoftlimit &&
719 usage.dqb_curinodes >= dq->dq_isoftlimit)
720 dq->dq_itime = time_second + ump->umq1_itime[type];
721 dq->dq_curblocks = usage.dqb_curblocks;
722 dq->dq_curinodes = usage.dqb_curinodes;
723 if (dq->dq_curblocks < dq->dq_bsoftlimit)
724 dq->dq_flags &= ~DQ_WARN(QL_BLOCK);
725 if (dq->dq_curinodes < dq->dq_isoftlimit)
726 dq->dq_flags &= ~DQ_WARN(QL_FILE);
727 dq->dq_flags |= DQ_MOD;
728 mutex_exit(&dq->dq_interlock);
729 lfs_dqrele(NULLVP, dq);
730 return (0);
731 }
732 #endif
733
734 /*
735 * Q_SYNC - sync quota files to disk.
736 */
737 int
lfs_q1sync(struct mount * mp)738 lfs_q1sync(struct mount *mp)
739 {
740 struct ulfsmount *ump = VFSTOULFS(mp);
741 struct vnode *vp;
742 struct vnode_iterator *marker;
743 struct dquot *dq;
744 int i, error;
745
746 /*
747 * Check if the mount point has any quotas.
748 * If not, simply return.
749 */
750 for (i = 0; i < ULFS_MAXQUOTAS; i++)
751 if (ump->um_quotas[i] != NULLVP)
752 break;
753 if (i == ULFS_MAXQUOTAS)
754 return (0);
755
756 /*
757 * Search vnodes associated with this mount point,
758 * synchronizing any modified dquot structures.
759 */
760 vfs_vnode_iterator_init(mp, &marker);
761 while ((vp = vfs_vnode_iterator_next(marker, NULL, NULL))) {
762 error = vn_lock(vp, LK_EXCLUSIVE);
763 if (error) {
764 vrele(vp);
765 continue;
766 }
767 if (VTOI(vp) == NULL || vp->v_type == VNON) {
768 vput(vp);
769 continue;
770 }
771 for (i = 0; i < ULFS_MAXQUOTAS; i++) {
772 dq = VTOI(vp)->i_dquot[i];
773 if (dq == NODQUOT)
774 continue;
775 mutex_enter(&dq->dq_interlock);
776 if (dq->dq_flags & DQ_MOD)
777 lfs_dq1sync(vp, dq);
778 mutex_exit(&dq->dq_interlock);
779 }
780 vput(vp);
781 }
782 vfs_vnode_iterator_destroy(marker);
783 return (0);
784 }
785
786 /*
787 * Obtain a dquot structure for the specified identifier and quota file
788 * reading the information from the file if necessary.
789 */
790 int
lfs_dq1get(struct vnode * dqvp,u_long id,struct ulfsmount * ump,int type,struct dquot * dq)791 lfs_dq1get(struct vnode *dqvp, u_long id, struct ulfsmount *ump, int type,
792 struct dquot *dq)
793 {
794 struct iovec aiov;
795 struct uio auio;
796 int error;
797
798 KASSERT(mutex_owned(&dq->dq_interlock));
799 vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
800 auio.uio_iov = &aiov;
801 auio.uio_iovcnt = 1;
802 aiov.iov_base = (void *)&dq->dq_un.dq1_dqb;
803 aiov.iov_len = sizeof (struct dqblk);
804 auio.uio_resid = sizeof (struct dqblk);
805 auio.uio_offset = (off_t)id * sizeof (struct dqblk);
806 auio.uio_rw = UIO_READ;
807 UIO_SETUP_SYSSPACE(&auio);
808 error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]);
809 if (auio.uio_resid == sizeof(struct dqblk) && error == 0)
810 memset((void *)&dq->dq_un.dq1_dqb, 0, sizeof(struct dqblk));
811 VOP_UNLOCK(dqvp);
812 /*
813 * I/O error in reading quota file, release
814 * quota structure and reflect problem to caller.
815 */
816 if (error)
817 return (error);
818 /*
819 * Check for no limit to enforce.
820 * Initialize time values if necessary.
821 */
822 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
823 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
824 dq->dq_flags |= DQ_FAKE;
825 if (dq->dq_id != 0) {
826 if (dq->dq_btime == 0)
827 dq->dq_btime = time_second + ump->umq1_btime[type];
828 if (dq->dq_itime == 0)
829 dq->dq_itime = time_second + ump->umq1_itime[type];
830 }
831 return (0);
832 }
833
834 /*
835 * Update the disk quota in the quota file.
836 */
837 int
lfs_dq1sync(struct vnode * vp,struct dquot * dq)838 lfs_dq1sync(struct vnode *vp, struct dquot *dq)
839 {
840 struct vnode *dqvp;
841 struct iovec aiov;
842 struct uio auio;
843 int error;
844
845 if (dq == NODQUOT)
846 panic("dq1sync: dquot");
847 KASSERT(mutex_owned(&dq->dq_interlock));
848 if ((dq->dq_flags & DQ_MOD) == 0)
849 return (0);
850 if ((dqvp = dq->dq_ump->um_quotas[dq->dq_type]) == NULLVP)
851 panic("dq1sync: file");
852 KASSERT(dqvp != vp);
853 vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
854 auio.uio_iov = &aiov;
855 auio.uio_iovcnt = 1;
856 aiov.iov_base = (void *)&dq->dq_un.dq1_dqb;
857 aiov.iov_len = sizeof (struct dqblk);
858 auio.uio_resid = sizeof (struct dqblk);
859 auio.uio_offset = (off_t)dq->dq_id * sizeof (struct dqblk);
860 auio.uio_rw = UIO_WRITE;
861 UIO_SETUP_SYSSPACE(&auio);
862 error = VOP_WRITE(dqvp, &auio, 0, dq->dq_ump->um_cred[dq->dq_type]);
863 if (auio.uio_resid && error == 0)
864 error = EIO;
865 dq->dq_flags &= ~DQ_MOD;
866 VOP_UNLOCK(dqvp);
867 return (error);
868 }
869