xref: /netbsd-src/sys/ufs/ufs/ufs_quota1.c (revision fb591c4d97dd040b9aff6d6abee9e25e3f67945d)
1 /*	$NetBSD: ufs_quota1.c,v 1.26 2023/02/22 21:49:45 riastradh Exp $	*/
2 
3 /*
4  * Copyright (c) 1982, 1986, 1990, 1993, 1995
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Robert Elz at The University of Melbourne.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)ufs_quota.c	8.5 (Berkeley) 5/20/95
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: ufs_quota1.c,v 1.26 2023/02/22 21:49:45 riastradh Exp $");
39 
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/systm.h>
43 #include <sys/namei.h>
44 #include <sys/file.h>
45 #include <sys/proc.h>
46 #include <sys/vnode.h>
47 #include <sys/mount.h>
48 #include <sys/kauth.h>
49 
50 #include <ufs/ufs/quota1.h>
51 #include <ufs/ufs/inode.h>
52 #include <ufs/ufs/ufsmount.h>
53 #include <ufs/ufs/ufs_extern.h>
54 #include <ufs/ufs/ufs_quota.h>
55 
56 static int chkdqchg(struct inode *, int64_t, kauth_cred_t, int);
57 static int chkiqchg(struct inode *, int32_t, kauth_cred_t, int);
58 
59 /*
60  * Update disk usage, and take corrective action.
61  */
62 int
chkdq1(struct inode * ip,int64_t change,kauth_cred_t cred,int flags)63 chkdq1(struct inode *ip, int64_t change, kauth_cred_t cred, int flags)
64 {
65 	struct dquot *dq;
66 	int i;
67 	int ncurblocks, error;
68 
69 	if ((error = getinoquota(ip)) != 0)
70 		return error;
71 	if (change == 0)
72 		return (0);
73 	if (change < 0) {
74 		for (i = 0; i < MAXQUOTAS; i++) {
75 			if ((dq = ip->i_dquot[i]) == NODQUOT)
76 				continue;
77 			mutex_enter(&dq->dq_interlock);
78 			ncurblocks = dq->dq_curblocks + change;
79 			if (ncurblocks >= 0)
80 				dq->dq_curblocks = ncurblocks;
81 			else
82 				dq->dq_curblocks = 0;
83 			dq->dq_flags &= ~DQ_WARN(QL_BLOCK);
84 			dq->dq_flags |= DQ_MOD;
85 			mutex_exit(&dq->dq_interlock);
86 		}
87 		return (0);
88 	}
89 	for (i = 0; i < MAXQUOTAS; i++) {
90 		if ((dq = ip->i_dquot[i]) == NODQUOT)
91 			continue;
92 		if ((flags & FORCE) == 0 &&
93 		    kauth_authorize_system(cred, KAUTH_SYSTEM_FS_QUOTA,
94 			KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT, KAUTH_ARG(i),
95 			KAUTH_ARG(QL_BLOCK), NULL) != 0) {
96 			mutex_enter(&dq->dq_interlock);
97 			error = chkdqchg(ip, change, cred, i);
98 			mutex_exit(&dq->dq_interlock);
99 			if (error != 0)
100 				return (error);
101 		}
102 	}
103 	for (i = 0; i < MAXQUOTAS; i++) {
104 		if ((dq = ip->i_dquot[i]) == NODQUOT)
105 			continue;
106 		mutex_enter(&dq->dq_interlock);
107 		dq->dq_curblocks += change;
108 		dq->dq_flags |= DQ_MOD;
109 		mutex_exit(&dq->dq_interlock);
110 	}
111 	return (0);
112 }
113 
114 /*
115  * Check for a valid change to a users allocation.
116  * Issue an error message if appropriate.
117  */
118 static int
chkdqchg(struct inode * ip,int64_t change,kauth_cred_t cred,int type)119 chkdqchg(struct inode *ip, int64_t change, kauth_cred_t cred, int type)
120 {
121 	struct dquot *dq = ip->i_dquot[type];
122 	long ncurblocks = dq->dq_curblocks + change;
123 
124 	KASSERT(mutex_owned(&dq->dq_interlock));
125 	/*
126 	 * If user would exceed their hard limit, disallow space allocation.
127 	 */
128 	if (ncurblocks >= dq->dq_bhardlimit && dq->dq_bhardlimit) {
129 		if ((dq->dq_flags & DQ_WARN(QL_BLOCK)) == 0 &&
130 		    ip->i_uid == kauth_cred_geteuid(cred)) {
131 			uprintf("\n%s: write failed, %s disk limit reached\n",
132 			    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
133 			    quotatypes[type]);
134 			dq->dq_flags |= DQ_WARN(QL_BLOCK);
135 		}
136 		return (EDQUOT);
137 	}
138 	/*
139 	 * If user is over their soft limit for too long, disallow space
140 	 * allocation. Reset time limit as they cross their soft limit.
141 	 */
142 	if (ncurblocks >= dq->dq_bsoftlimit && dq->dq_bsoftlimit) {
143 		if (dq->dq_curblocks < dq->dq_bsoftlimit) {
144 			dq->dq_btime =
145 			    time_second + ip->i_ump->umq1_btime[type];
146 			if (ip->i_uid == kauth_cred_geteuid(cred))
147 				uprintf("\n%s: warning, %s %s\n",
148 				    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
149 				    quotatypes[type], "disk quota exceeded");
150 			return (0);
151 		}
152 		if (time_second > dq->dq_btime) {
153 			if ((dq->dq_flags & DQ_WARN(QL_BLOCK)) == 0 &&
154 			    ip->i_uid == kauth_cred_geteuid(cred)) {
155 				uprintf("\n%s: write failed, %s %s\n",
156 				    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
157 				    quotatypes[type],
158 				    "disk quota exceeded for too long");
159 				dq->dq_flags |= DQ_WARN(QL_BLOCK);
160 			}
161 			return (EDQUOT);
162 		}
163 	}
164 	return (0);
165 }
166 
167 /*
168  * Check the inode limit, applying corrective action.
169  */
170 int
chkiq1(struct inode * ip,int32_t change,kauth_cred_t cred,int flags)171 chkiq1(struct inode *ip, int32_t change, kauth_cred_t cred, int flags)
172 {
173 	struct dquot *dq;
174 	int i;
175 	int ncurinodes, error;
176 
177 	if ((error = getinoquota(ip)) != 0)
178 		return error;
179 	if (change == 0)
180 		return (0);
181 	if (change < 0) {
182 		for (i = 0; i < MAXQUOTAS; i++) {
183 			if ((dq = ip->i_dquot[i]) == NODQUOT)
184 				continue;
185 			mutex_enter(&dq->dq_interlock);
186 			ncurinodes = dq->dq_curinodes + change;
187 			if (ncurinodes >= 0)
188 				dq->dq_curinodes = ncurinodes;
189 			else
190 				dq->dq_curinodes = 0;
191 			dq->dq_flags &= ~DQ_WARN(QL_FILE);
192 			dq->dq_flags |= DQ_MOD;
193 			mutex_exit(&dq->dq_interlock);
194 		}
195 		return (0);
196 	}
197 	for (i = 0; i < MAXQUOTAS; i++) {
198 		if ((dq = ip->i_dquot[i]) == NODQUOT)
199 			continue;
200 		if ((flags & FORCE) == 0 &&
201 		    kauth_authorize_system(cred, KAUTH_SYSTEM_FS_QUOTA,
202 			KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT,
203 			KAUTH_ARG(i), KAUTH_ARG(QL_FILE), NULL) != 0) {
204 			mutex_enter(&dq->dq_interlock);
205 			error = chkiqchg(ip, change, cred, i);
206 			mutex_exit(&dq->dq_interlock);
207 			if (error != 0)
208 				return (error);
209 		}
210 	}
211 	for (i = 0; i < MAXQUOTAS; i++) {
212 		if ((dq = ip->i_dquot[i]) == NODQUOT)
213 			continue;
214 		mutex_enter(&dq->dq_interlock);
215 		dq->dq_curinodes += change;
216 		dq->dq_flags |= DQ_MOD;
217 		mutex_exit(&dq->dq_interlock);
218 	}
219 	return (0);
220 }
221 
222 /*
223  * Check for a valid change to a users allocation.
224  * Issue an error message if appropriate.
225  */
226 static int
chkiqchg(struct inode * ip,int32_t change,kauth_cred_t cred,int type)227 chkiqchg(struct inode *ip, int32_t change, kauth_cred_t cred, int type)
228 {
229 	struct dquot *dq = ip->i_dquot[type];
230 	long ncurinodes = dq->dq_curinodes + change;
231 
232 	KASSERT(mutex_owned(&dq->dq_interlock));
233 	/*
234 	 * If user would exceed their hard limit, disallow inode allocation.
235 	 */
236 	if (ncurinodes >= dq->dq_ihardlimit && dq->dq_ihardlimit) {
237 		if ((dq->dq_flags & DQ_WARN(QL_FILE)) == 0 &&
238 		    ip->i_uid == kauth_cred_geteuid(cred)) {
239 			uprintf("\n%s: write failed, %s inode limit reached\n",
240 			    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
241 			    quotatypes[type]);
242 			dq->dq_flags |= DQ_WARN(QL_FILE);
243 		}
244 		return (EDQUOT);
245 	}
246 	/*
247 	 * If user is over their soft limit for too long, disallow inode
248 	 * allocation. Reset time limit as they cross their soft limit.
249 	 */
250 	if (ncurinodes >= dq->dq_isoftlimit && dq->dq_isoftlimit) {
251 		if (dq->dq_curinodes < dq->dq_isoftlimit) {
252 			dq->dq_itime =
253 			    time_second + ip->i_ump->umq1_itime[type];
254 			if (ip->i_uid == kauth_cred_geteuid(cred))
255 				uprintf("\n%s: warning, %s %s\n",
256 				    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
257 				    quotatypes[type], "inode quota exceeded");
258 			return (0);
259 		}
260 		if (time_second > dq->dq_itime) {
261 			if ((dq->dq_flags & DQ_WARN(QL_FILE)) == 0 &&
262 			    ip->i_uid == kauth_cred_geteuid(cred)) {
263 				uprintf("\n%s: write failed, %s %s\n",
264 				    ITOV(ip)->v_mount->mnt_stat.f_mntonname,
265 				    quotatypes[type],
266 				    "inode quota exceeded for too long");
267 				dq->dq_flags |= DQ_WARN(QL_FILE);
268 			}
269 			return (EDQUOT);
270 		}
271 	}
272 	return (0);
273 }
274 
275 int
quota1_umount(struct mount * mp,int flags)276 quota1_umount(struct mount *mp, int flags)
277 {
278 	int i, error;
279 	struct ufsmount *ump = VFSTOUFS(mp);
280 	struct lwp *l = curlwp;
281 
282 	if ((ump->um_flags & UFS_QUOTA) == 0)
283 		return 0;
284 
285 	if ((error = vflush(mp, NULLVP, SKIPSYSTEM | flags)) != 0)
286 		return (error);
287 
288 	for (i = 0; i < MAXQUOTAS; i++) {
289 		if (ump->um_quotas[i] != NULLVP) {
290 			quota1_handle_cmd_quotaoff(l, ump, i);
291 		}
292 	}
293 	return 0;
294 }
295 
296 /*
297  * Code to process quotactl commands.
298  */
299 
300 /*
301  * set up a quota file for a particular file system.
302  */
303 int
quota1_handle_cmd_quotaon(struct lwp * l,struct ufsmount * ump,int type,const char * fname)304 quota1_handle_cmd_quotaon(struct lwp *l, struct ufsmount *ump, int type,
305     const char *fname)
306 {
307 	struct mount *mp = ump->um_mountp;
308 	struct vnode *vp, **vpp;
309 	struct vnode_iterator *marker;
310 	struct dquot *dq;
311 	int error;
312 	struct pathbuf *pb;
313 
314 	if (type < 0 || type >= MAXQUOTAS)
315 		return EINVAL;
316 
317 	if (ump->um_flags & UFS_QUOTA2) {
318 		uprintf("%s: quotas v2 already enabled\n",
319 		    mp->mnt_stat.f_mntonname);
320 		return (EBUSY);
321 	}
322 
323 	if (mp->mnt_wapbl != NULL) {
324 		printf("%s: quota v1 cannot be used with -o log\n",
325 		    mp->mnt_stat.f_mntonname);
326 		return (EOPNOTSUPP);
327 	}
328 
329 	vpp = &ump->um_quotas[type];
330 
331 	pb = pathbuf_create(fname);
332 	if (pb == NULL) {
333 		return ENOMEM;
334 	}
335 	error = vn_open(NULL, pb, 0, FREAD|FWRITE, 0, &vp, NULL, NULL);
336 	if (error != 0) {
337 		pathbuf_destroy(pb);
338 		return error;
339 	}
340 	pathbuf_destroy(pb);
341 
342 	VOP_UNLOCK(vp);
343 	if (vp->v_type != VREG) {
344 		(void) vn_close(vp, FREAD|FWRITE, l->l_cred);
345 		return (EACCES);
346 	}
347 	if (*vpp != vp)
348 		quota1_handle_cmd_quotaoff(l, ump, type);
349 	mutex_enter(&dqlock);
350 	while ((ump->umq1_qflags[type] & (QTF_CLOSING | QTF_OPENING)) != 0)
351 		cv_wait(&dqcv, &dqlock);
352 	ump->umq1_qflags[type] |= QTF_OPENING;
353 	mutex_exit(&dqlock);
354 	mp->mnt_flag |= MNT_QUOTA;
355 	vp->v_vflag |= VV_SYSTEM;	/* XXXSMP */
356 	*vpp = vp;
357 	/*
358 	 * Save the credential of the process that turned on quotas.
359 	 * Set up the time limits for this quota.
360 	 */
361 	kauth_cred_hold(l->l_cred);
362 	ump->um_cred[type] = l->l_cred;
363 	ump->umq1_btime[type] = MAX_DQ_TIME;
364 	ump->umq1_itime[type] = MAX_IQ_TIME;
365 	if (dqget(NULLVP, 0, ump, type, &dq) == 0) {
366 		if (dq->dq_btime > 0)
367 			ump->umq1_btime[type] = dq->dq_btime;
368 		if (dq->dq_itime > 0)
369 			ump->umq1_itime[type] = dq->dq_itime;
370 		dqrele(NULLVP, dq);
371 	}
372 	/*
373 	 * Search vnodes associated with this mount point,
374 	 * adding references to quota file being opened.
375 	 * NB: only need to add dquot's for inodes being modified.
376 	 */
377 	vfs_vnode_iterator_init(mp, &marker);
378 	while ((vp = vfs_vnode_iterator_next(marker, NULL, NULL))) {
379 		error = vn_lock(vp, LK_EXCLUSIVE);
380 		if (error) {
381 			vrele(vp);
382 			continue;
383 		}
384 		mutex_enter(vp->v_interlock);
385 		if (VTOI(vp) == NULL || vp->v_type == VNON ||
386 		    vp->v_writecount == 0) {
387 			mutex_exit(vp->v_interlock);
388 			vput(vp);
389 			continue;
390 		}
391 		mutex_exit(vp->v_interlock);
392 		if ((error = getinoquota(VTOI(vp))) != 0) {
393 			vput(vp);
394 			break;
395 		}
396 		vput(vp);
397 	}
398 	vfs_vnode_iterator_destroy(marker);
399 
400 	mutex_enter(&dqlock);
401 	ump->umq1_qflags[type] &= ~QTF_OPENING;
402 	cv_broadcast(&dqcv);
403 	if (error == 0)
404 		ump->um_flags |= UFS_QUOTA;
405 	mutex_exit(&dqlock);
406 	if (error)
407 		quota1_handle_cmd_quotaoff(l, ump, type);
408 	return (error);
409 }
410 
411 /*
412  * turn off disk quotas for a filesystem.
413  */
414 int
quota1_handle_cmd_quotaoff(struct lwp * l,struct ufsmount * ump,int type)415 quota1_handle_cmd_quotaoff(struct lwp *l, struct ufsmount *ump, int type)
416 {
417 	struct mount *mp = ump->um_mountp;
418 	struct vnode *vp;
419 	struct vnode *qvp;
420 	struct vnode_iterator *marker;
421 	struct dquot *dq;
422 	struct inode *ip;
423 	kauth_cred_t cred;
424 	int i, error;
425 
426 	if (type < 0 || type >= MAXQUOTAS)
427 		return EINVAL;
428 
429 	mutex_enter(&dqlock);
430 	while ((ump->umq1_qflags[type] & (QTF_CLOSING | QTF_OPENING)) != 0)
431 		cv_wait(&dqcv, &dqlock);
432 	if ((qvp = ump->um_quotas[type]) == NULLVP) {
433 		mutex_exit(&dqlock);
434 		return (0);
435 	}
436 	ump->umq1_qflags[type] |= QTF_CLOSING;
437 	mutex_exit(&dqlock);
438 	/*
439 	 * Search vnodes associated with this mount point,
440 	 * deleting any references to quota file being closed.
441 	 */
442 	vfs_vnode_iterator_init(mp, &marker);
443 	while ((vp = vfs_vnode_iterator_next(marker, NULL, NULL))) {
444 		error = vn_lock(vp, LK_EXCLUSIVE);
445 		if (error) {
446 			vrele(vp);
447 			continue;
448 		}
449 		ip = VTOI(vp);
450 		if (ip == NULL || vp->v_type == VNON) {
451 			vput(vp);
452 			continue;
453 		}
454 		dq = ip->i_dquot[type];
455 		ip->i_dquot[type] = NODQUOT;
456 		dqrele(vp, dq);
457 		vput(vp);
458 	}
459 	vfs_vnode_iterator_destroy(marker);
460 #ifdef DIAGNOSTIC
461 	dqflush(qvp);
462 #endif
463 	qvp->v_vflag &= ~VV_SYSTEM;
464 	error = vn_close(qvp, FREAD|FWRITE, l->l_cred);
465 	mutex_enter(&dqlock);
466 	ump->um_quotas[type] = NULLVP;
467 	cred = ump->um_cred[type];
468 	ump->um_cred[type] = NOCRED;
469 	for (i = 0; i < MAXQUOTAS; i++)
470 		if (ump->um_quotas[i] != NULLVP)
471 			break;
472 	ump->umq1_qflags[type] &= ~QTF_CLOSING;
473 	if (i == MAXQUOTAS)
474 		ump->um_flags &= ~UFS_QUOTA;
475 	cv_broadcast(&dqcv);
476 	mutex_exit(&dqlock);
477 	kauth_cred_free(cred);
478 	if (i == MAXQUOTAS)
479 		mp->mnt_flag &= ~MNT_QUOTA;
480 	return (error);
481 }
482 
483 int
quota1_handle_cmd_get(struct ufsmount * ump,const struct quotakey * qk,struct quotaval * qv)484 quota1_handle_cmd_get(struct ufsmount *ump, const struct quotakey *qk,
485     struct quotaval *qv)
486 {
487 	struct dquot *dq;
488 	int error;
489 	struct quotaval blocks, files;
490 	int idtype;
491 	id_t id;
492 
493 	idtype = qk->qk_idtype;
494 	id = qk->qk_id;
495 
496 	if (ump->um_quotas[idtype] == NULLVP)
497 		return ENODEV;
498 
499 	if (id == QUOTA_DEFAULTID) { /* we want the grace period of id 0 */
500 		if ((error = dqget(NULLVP, 0, ump, idtype, &dq)) != 0)
501 			return error;
502 
503 	} else {
504 		if ((error = dqget(NULLVP, id, ump, idtype, &dq)) != 0)
505 			return error;
506 	}
507 	dqblk_to_quotavals(&dq->dq_un.dq1_dqb, &blocks, &files);
508 	dqrele(NULLVP, dq);
509 	if (id == QUOTA_DEFAULTID) {
510 		if (blocks.qv_expiretime > 0)
511 			blocks.qv_grace = blocks.qv_expiretime;
512 		else
513 			blocks.qv_grace = MAX_DQ_TIME;
514 		if (files.qv_expiretime > 0)
515 			files.qv_grace = files.qv_expiretime;
516 		else
517 			files.qv_grace = MAX_DQ_TIME;
518 	}
519 
520 	switch (qk->qk_objtype) {
521 	case QUOTA_OBJTYPE_BLOCKS:
522 		*qv = blocks;
523 		break;
524 	case QUOTA_OBJTYPE_FILES:
525 		*qv = files;
526 		break;
527 	default:
528 		return EINVAL;
529 	}
530 
531 	return 0;
532 }
533 
534 static uint32_t
quota1_encode_limit(uint64_t lim)535 quota1_encode_limit(uint64_t lim)
536 {
537 	if (lim == QUOTA_NOLIMIT || lim >= 0xffffffff) {
538 		return 0;
539 	}
540 	return lim;
541 }
542 
543 int
quota1_handle_cmd_put(struct ufsmount * ump,const struct quotakey * key,const struct quotaval * val)544 quota1_handle_cmd_put(struct ufsmount *ump, const struct quotakey *key,
545     const struct quotaval *val)
546 {
547 	struct dquot *dq;
548 	struct dqblk dqb;
549 	int error;
550 
551 	switch (key->qk_idtype) {
552 	case QUOTA_IDTYPE_USER:
553 	case QUOTA_IDTYPE_GROUP:
554 		break;
555 	default:
556 		return EINVAL;
557 	}
558 
559 	switch (key->qk_objtype) {
560 	case QUOTA_OBJTYPE_BLOCKS:
561 	case QUOTA_OBJTYPE_FILES:
562 		break;
563 	default:
564 		return EINVAL;
565 	}
566 
567 	if (ump->um_quotas[key->qk_idtype] == NULLVP)
568 		return ENODEV;
569 
570 	if (key->qk_id == QUOTA_DEFAULTID) {
571 		/* just update grace times */
572 		id_t id = 0;
573 
574 		if ((error = dqget(NULLVP, id, ump, key->qk_idtype, &dq)) != 0)
575 			return error;
576 		mutex_enter(&dq->dq_interlock);
577 		if (val->qv_grace != QUOTA_NOTIME) {
578 			if (key->qk_objtype == QUOTA_OBJTYPE_BLOCKS)
579 				ump->umq1_btime[key->qk_idtype] = dq->dq_btime =
580 					val->qv_grace;
581 			if (key->qk_objtype == QUOTA_OBJTYPE_FILES)
582 				ump->umq1_itime[key->qk_idtype] = dq->dq_itime =
583 					val->qv_grace;
584 		}
585 		dq->dq_flags |= DQ_MOD;
586 		mutex_exit(&dq->dq_interlock);
587 		dqrele(NULLVP, dq);
588 		return 0;
589 	}
590 
591 	if ((error = dqget(NULLVP, key->qk_id, ump, key->qk_idtype, &dq)) != 0)
592 		return (error);
593 	mutex_enter(&dq->dq_interlock);
594 	/*
595 	 * Copy all but the current values.
596 	 * Reset time limit if previously had no soft limit or were
597 	 * under it, but now have a soft limit and are over it.
598 	 */
599 	dqb.dqb_curblocks = dq->dq_curblocks;
600 	dqb.dqb_curinodes = dq->dq_curinodes;
601 	dqb.dqb_btime = dq->dq_btime;
602 	dqb.dqb_itime = dq->dq_itime;
603 	if (key->qk_objtype == QUOTA_OBJTYPE_BLOCKS) {
604 		dqb.dqb_bsoftlimit = quota1_encode_limit(val->qv_softlimit);
605 		dqb.dqb_bhardlimit = quota1_encode_limit(val->qv_hardlimit);
606 		dqb.dqb_isoftlimit = dq->dq_isoftlimit;
607 		dqb.dqb_ihardlimit = dq->dq_ihardlimit;
608 	} else {
609 		KASSERT(key->qk_objtype == QUOTA_OBJTYPE_FILES);
610 		dqb.dqb_bsoftlimit = dq->dq_bsoftlimit;
611 		dqb.dqb_bhardlimit = dq->dq_bhardlimit;
612 		dqb.dqb_isoftlimit = quota1_encode_limit(val->qv_softlimit);
613 		dqb.dqb_ihardlimit = quota1_encode_limit(val->qv_hardlimit);
614 	}
615 	if (dq->dq_id == 0 && val->qv_grace != QUOTA_NOTIME) {
616 		/* also update grace time if available */
617 		if (key->qk_objtype == QUOTA_OBJTYPE_BLOCKS) {
618 			ump->umq1_btime[key->qk_idtype] = dqb.dqb_btime =
619 				val->qv_grace;
620 		}
621 		if (key->qk_objtype == QUOTA_OBJTYPE_FILES) {
622 			ump->umq1_itime[key->qk_idtype] = dqb.dqb_itime =
623 				val->qv_grace;
624 		}
625 	}
626 	if (dqb.dqb_bsoftlimit &&
627 	    dq->dq_curblocks >= dqb.dqb_bsoftlimit &&
628 	    (dq->dq_bsoftlimit == 0 || dq->dq_curblocks < dq->dq_bsoftlimit))
629 		dqb.dqb_btime = time_second + ump->umq1_btime[key->qk_idtype];
630 	if (dqb.dqb_isoftlimit &&
631 	    dq->dq_curinodes >= dqb.dqb_isoftlimit &&
632 	    (dq->dq_isoftlimit == 0 || dq->dq_curinodes < dq->dq_isoftlimit))
633 		dqb.dqb_itime = time_second + ump->umq1_itime[key->qk_idtype];
634 	dq->dq_un.dq1_dqb = dqb;
635 	if (dq->dq_curblocks < dq->dq_bsoftlimit)
636 		dq->dq_flags &= ~DQ_WARN(QL_BLOCK);
637 	if (dq->dq_curinodes < dq->dq_isoftlimit)
638 		dq->dq_flags &= ~DQ_WARN(QL_FILE);
639 	if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
640 	    dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
641 		dq->dq_flags |= DQ_FAKE;
642 	else
643 		dq->dq_flags &= ~DQ_FAKE;
644 	dq->dq_flags |= DQ_MOD;
645 	mutex_exit(&dq->dq_interlock);
646 	dqrele(NULLVP, dq);
647 	return (0);
648 }
649 
650 
651 #if 0
652 /*
653  * Q_SETQUOTA - assign an entire dqblk structure.
654  */
655 int
656 setquota1(struct mount *mp, u_long id, int type, struct dqblk *dqb)
657 {
658 	struct dquot *dq;
659 	struct dquot *ndq;
660 	struct ufsmount *ump = VFSTOUFS(mp);
661 
662 
663 	if ((error = dqget(NULLVP, id, ump, type, &ndq)) != 0)
664 		return (error);
665 	dq = ndq;
666 	mutex_enter(&dq->dq_interlock);
667 	/*
668 	 * Copy all but the current values.
669 	 * Reset time limit if previously had no soft limit or were
670 	 * under it, but now have a soft limit and are over it.
671 	 */
672 	dqb->dqb_curblocks = dq->dq_curblocks;
673 	dqb->dqb_curinodes = dq->dq_curinodes;
674 	if (dq->dq_id != 0) {
675 		dqb->dqb_btime = dq->dq_btime;
676 		dqb->dqb_itime = dq->dq_itime;
677 	}
678 	if (dqb->dqb_bsoftlimit &&
679 	    dq->dq_curblocks >= dqb->dqb_bsoftlimit &&
680 	    (dq->dq_bsoftlimit == 0 || dq->dq_curblocks < dq->dq_bsoftlimit))
681 		dqb->dqb_btime = time_second + ump->umq1_btime[type];
682 	if (dqb->dqb_isoftlimit &&
683 	    dq->dq_curinodes >= dqb->dqb_isoftlimit &&
684 	    (dq->dq_isoftlimit == 0 || dq->dq_curinodes < dq->dq_isoftlimit))
685 		dqb->dqb_itime = time_second + ump->umq1_itime[type];
686 	dq->dq_un.dq1_dqb = *dqb;
687 	if (dq->dq_curblocks < dq->dq_bsoftlimit)
688 		dq->dq_flags &= ~DQ_WARN(QL_BLOCK);
689 	if (dq->dq_curinodes < dq->dq_isoftlimit)
690 		dq->dq_flags &= ~DQ_WARN(QL_FILE);
691 	if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
692 	    dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
693 		dq->dq_flags |= DQ_FAKE;
694 	else
695 		dq->dq_flags &= ~DQ_FAKE;
696 	dq->dq_flags |= DQ_MOD;
697 	mutex_exit(&dq->dq_interlock);
698 	dqrele(NULLVP, dq);
699 	return (0);
700 }
701 
702 /*
703  * Q_SETUSE - set current inode and block usage.
704  */
705 int
706 setuse(struct mount *mp, u_long id, int type, void *addr)
707 {
708 	struct dquot *dq;
709 	struct ufsmount *ump = VFSTOUFS(mp);
710 	struct dquot *ndq;
711 	struct dqblk usage;
712 	int error;
713 
714 	error = copyin(addr, (void *)&usage, sizeof (struct dqblk));
715 	if (error)
716 		return (error);
717 	if ((error = dqget(NULLVP, id, ump, type, &ndq)) != 0)
718 		return (error);
719 	dq = ndq;
720 	mutex_enter(&dq->dq_interlock);
721 	/*
722 	 * Reset time limit if have a soft limit and were
723 	 * previously under it, but are now over it.
724 	 */
725 	if (dq->dq_bsoftlimit && dq->dq_curblocks < dq->dq_bsoftlimit &&
726 	    usage.dqb_curblocks >= dq->dq_bsoftlimit)
727 		dq->dq_btime = time_second + ump->umq1_btime[type];
728 	if (dq->dq_isoftlimit && dq->dq_curinodes < dq->dq_isoftlimit &&
729 	    usage.dqb_curinodes >= dq->dq_isoftlimit)
730 		dq->dq_itime = time_second + ump->umq1_itime[type];
731 	dq->dq_curblocks = usage.dqb_curblocks;
732 	dq->dq_curinodes = usage.dqb_curinodes;
733 	if (dq->dq_curblocks < dq->dq_bsoftlimit)
734 		dq->dq_flags &= ~DQ_WARN(QL_BLOCK);
735 	if (dq->dq_curinodes < dq->dq_isoftlimit)
736 		dq->dq_flags &= ~DQ_WARN(QL_FILE);
737 	dq->dq_flags |= DQ_MOD;
738 	mutex_exit(&dq->dq_interlock);
739 	dqrele(NULLVP, dq);
740 	return (0);
741 }
742 #endif
743 
744 /*
745  * Q_SYNC - sync quota files to disk.
746  */
747 int
q1sync(struct mount * mp)748 q1sync(struct mount *mp)
749 {
750 	struct ufsmount *ump = VFSTOUFS(mp);
751 	struct vnode *vp;
752 	struct vnode_iterator *marker;
753 	struct dquot *dq;
754 	int i, error;
755 
756 	/*
757 	 * Check if the mount point has any quotas.
758 	 * If not, simply return.
759 	 */
760 	for (i = 0; i < MAXQUOTAS; i++)
761 		if (ump->um_quotas[i] != NULLVP)
762 			break;
763 	if (i == MAXQUOTAS)
764 		return (0);
765 
766 	/*
767 	 * Search vnodes associated with this mount point,
768 	 * synchronizing any modified dquot structures.
769 	 */
770 	vfs_vnode_iterator_init(mp, &marker);
771 	while ((vp = vfs_vnode_iterator_next(marker, NULL, NULL))) {
772 		error = vn_lock(vp, LK_EXCLUSIVE);
773 		if (error) {
774 			vrele(vp);
775 			continue;
776 		}
777 		if (VTOI(vp) == NULL || vp->v_type == VNON) {
778 			vput(vp);
779 			continue;
780 		}
781 		for (i = 0; i < MAXQUOTAS; i++) {
782 			dq = VTOI(vp)->i_dquot[i];
783 			if (dq == NODQUOT)
784 				continue;
785 			mutex_enter(&dq->dq_interlock);
786 			if (dq->dq_flags & DQ_MOD)
787 				dq1sync(vp, dq);
788 			mutex_exit(&dq->dq_interlock);
789 		}
790 		vput(vp);
791 	}
792 	vfs_vnode_iterator_destroy(marker);
793 	return (0);
794 }
795 
796 /*
797  * Obtain a dquot structure for the specified identifier and quota file
798  * reading the information from the file if necessary.
799  */
800 int
dq1get(struct vnode * dqvp,u_long id,struct ufsmount * ump,int type,struct dquot * dq)801 dq1get(struct vnode *dqvp, u_long id, struct ufsmount *ump, int type,
802     struct dquot *dq)
803 {
804 	struct iovec aiov;
805 	struct uio auio;
806 	int error;
807 
808 	KASSERT(mutex_owned(&dq->dq_interlock));
809 	vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
810 	auio.uio_iov = &aiov;
811 	auio.uio_iovcnt = 1;
812 	aiov.iov_base = (void *)&dq->dq_un.dq1_dqb;
813 	aiov.iov_len = sizeof (struct dqblk);
814 	auio.uio_resid = sizeof (struct dqblk);
815 	auio.uio_offset = (off_t)id * sizeof (struct dqblk);
816 	auio.uio_rw = UIO_READ;
817 	UIO_SETUP_SYSSPACE(&auio);
818 	error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]);
819 	if (auio.uio_resid == sizeof(struct dqblk) && error == 0)
820 		memset((void *)&dq->dq_un.dq1_dqb, 0, sizeof(struct dqblk));
821 	VOP_UNLOCK(dqvp);
822 	/*
823 	 * I/O error in reading quota file, release
824 	 * quota structure and reflect problem to caller.
825 	 */
826 	if (error)
827 		return (error);
828 	/*
829 	 * Check for no limit to enforce.
830 	 * Initialize time values if necessary.
831 	 */
832 	if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
833 	    dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
834 		dq->dq_flags |= DQ_FAKE;
835 	if (dq->dq_id != 0) {
836 		if (dq->dq_btime == 0)
837 			dq->dq_btime = time_second + ump->umq1_btime[type];
838 		if (dq->dq_itime == 0)
839 			dq->dq_itime = time_second + ump->umq1_itime[type];
840 	}
841 	return (0);
842 }
843 
844 /*
845  * Update the disk quota in the quota file.
846  */
847 int
dq1sync(struct vnode * vp,struct dquot * dq)848 dq1sync(struct vnode *vp, struct dquot *dq)
849 {
850 	struct vnode *dqvp;
851 	struct iovec aiov;
852 	struct uio auio;
853 	int error;
854 
855 	if (dq == NODQUOT)
856 		panic("dq1sync: dquot");
857 	KASSERT(mutex_owned(&dq->dq_interlock));
858 	if ((dq->dq_flags & DQ_MOD) == 0)
859 		return (0);
860 	if ((dqvp = dq->dq_ump->um_quotas[dq->dq_type]) == NULLVP)
861 		panic("dq1sync: file");
862 	KASSERT(dqvp != vp);
863 	vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
864 	auio.uio_iov = &aiov;
865 	auio.uio_iovcnt = 1;
866 	aiov.iov_base = (void *)&dq->dq_un.dq1_dqb;
867 	aiov.iov_len = sizeof (struct dqblk);
868 	auio.uio_resid = sizeof (struct dqblk);
869 	auio.uio_offset = (off_t)dq->dq_id * sizeof (struct dqblk);
870 	auio.uio_rw = UIO_WRITE;
871 	UIO_SETUP_SYSSPACE(&auio);
872 	error = VOP_WRITE(dqvp, &auio, 0, dq->dq_ump->um_cred[dq->dq_type]);
873 	if (auio.uio_resid && error == 0)
874 		error = EIO;
875 	dq->dq_flags &= ~DQ_MOD;
876 	VOP_UNLOCK(dqvp);
877 	return (error);
878 }
879