1 /* $NetBSD: nfs_clnfsiod.c,v 1.2 2016/12/13 22:17:33 pgoyette Exp $ */ 2 /*- 3 * Copyright (c) 1989, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * Rick Macklem at The University of Guelph. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * from nfs_syscalls.c 8.5 (Berkeley) 3/30/95 34 */ 35 36 #include <sys/cdefs.h> 37 /* __FBSDID("FreeBSD: head/sys/fs/nfsclient/nfs_clnfsiod.c 249630 2013-04-18 23:20:16Z rmacklem "); */ 38 __RCSID("$NetBSD: nfs_clnfsiod.c,v 1.2 2016/12/13 22:17:33 pgoyette Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/sysproto.h> 43 #include <sys/kernel.h> 44 #include <sys/sysctl.h> 45 #include <sys/file.h> 46 #include <sys/filedesc.h> 47 #include <sys/vnode.h> 48 #include <sys/malloc.h> 49 #include <sys/mount.h> 50 #include <sys/proc.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/mbuf.h> 54 #include <sys/socket.h> 55 #include <sys/socketvar.h> 56 #include <sys/domain.h> 57 #include <sys/protosw.h> 58 #include <sys/namei.h> 59 #include <sys/unistd.h> 60 #include <sys/kthread.h> 61 #include <sys/fcntl.h> 62 #include <sys/lockf.h> 63 #include <sys/mutex.h> 64 #include <sys/taskqueue.h> 65 66 #include <netinet/in.h> 67 #include <netinet/tcp.h> 68 69 #include <fs/nfs/common/nfsport.h> 70 #include <fs/nfs/client/nfsmount.h> 71 #include <fs/nfs/client/nfs.h> 72 #include <fs/nfs/client/nfsnode.h> 73 74 extern struct mtx ncl_iod_mutex; 75 extern struct task ncl_nfsiodnew_task; 76 77 int ncl_numasync; 78 enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON]; 79 struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON]; 80 81 static void nfssvc_iod(void *); 82 83 static int nfs_asyncdaemon[NFS_MAXASYNCDAEMON]; 84 85 SYSCTL_DECL(_vfs_nfs); 86 87 /* Maximum number of seconds a nfsiod kthread will sleep before exiting */ 88 static unsigned int nfs_iodmaxidle = 120; 89 SYSCTL_UINT(_vfs_nfs, OID_AUTO, iodmaxidle, CTLFLAG_RW, &nfs_iodmaxidle, 0, 90 "Max number of seconds an nfsiod kthread will sleep before exiting"); 91 92 /* Maximum number of nfsiod kthreads */ 93 unsigned int ncl_iodmax = 20; 94 95 /* Minimum number of nfsiod kthreads to keep as spares */ 96 static unsigned int nfs_iodmin = 0; 97 98 static int nfs_nfsiodnew_sync(void); 99 100 static int 101 sysctl_iodmin(SYSCTL_HANDLER_ARGS) 102 { 103 int error, i; 104 int newmin; 105 106 newmin = nfs_iodmin; 107 error = sysctl_handle_int(oidp, &newmin, 0, req); 108 if (error || (req->newptr == NULL)) 109 return (error); 110 mtx_lock(&ncl_iod_mutex); 111 if (newmin > ncl_iodmax) { 112 error = EINVAL; 113 goto out; 114 } 115 nfs_iodmin = newmin; 116 if (ncl_numasync >= nfs_iodmin) 117 goto out; 118 /* 119 * If the current number of nfsiod is lower 120 * than the new minimum, create some more. 121 */ 122 for (i = nfs_iodmin - ncl_numasync; i > 0; i--) 123 nfs_nfsiodnew_sync(); 124 out: 125 mtx_unlock(&ncl_iod_mutex); 126 return (0); 127 } 128 SYSCTL_PROC(_vfs_nfs, OID_AUTO, iodmin, CTLTYPE_UINT | CTLFLAG_RW, 0, 129 sizeof (nfs_iodmin), sysctl_iodmin, "IU", 130 "Min number of nfsiod kthreads to keep as spares"); 131 132 static int 133 sysctl_iodmax(SYSCTL_HANDLER_ARGS) 134 { 135 int error, i; 136 int iod, newmax; 137 138 newmax = ncl_iodmax; 139 error = sysctl_handle_int(oidp, &newmax, 0, req); 140 if (error || (req->newptr == NULL)) 141 return (error); 142 if (newmax > NFS_MAXASYNCDAEMON) 143 return (EINVAL); 144 mtx_lock(&ncl_iod_mutex); 145 ncl_iodmax = newmax; 146 if (ncl_numasync <= ncl_iodmax) 147 goto out; 148 /* 149 * If there are some asleep nfsiods that should 150 * exit, wakeup() them so that they check ncl_iodmax 151 * and exit. Those who are active will exit as 152 * soon as they finish I/O. 153 */ 154 iod = ncl_numasync - 1; 155 for (i = 0; i < ncl_numasync - ncl_iodmax; i++) { 156 if (ncl_iodwant[iod] == NFSIOD_AVAILABLE) 157 wakeup(&ncl_iodwant[iod]); 158 iod--; 159 } 160 out: 161 mtx_unlock(&ncl_iod_mutex); 162 return (0); 163 } 164 SYSCTL_PROC(_vfs_nfs, OID_AUTO, iodmax, CTLTYPE_UINT | CTLFLAG_RW, 0, 165 sizeof (ncl_iodmax), sysctl_iodmax, "IU", 166 "Max number of nfsiod kthreads"); 167 168 static int 169 nfs_nfsiodnew_sync(void) 170 { 171 int error, i; 172 173 mtx_assert(&ncl_iod_mutex, MA_OWNED); 174 for (i = 0; i < ncl_iodmax; i++) { 175 if (nfs_asyncdaemon[i] == 0) { 176 nfs_asyncdaemon[i] = 1; 177 break; 178 } 179 } 180 if (i == ncl_iodmax) 181 return (0); 182 mtx_unlock(&ncl_iod_mutex); 183 error = kproc_create(nfssvc_iod, nfs_asyncdaemon + i, NULL, 184 RFHIGHPID, 0, "newnfs %d", i); 185 mtx_lock(&ncl_iod_mutex); 186 if (error == 0) { 187 ncl_numasync++; 188 ncl_iodwant[i] = NFSIOD_AVAILABLE; 189 } else 190 nfs_asyncdaemon[i] = 0; 191 return (error); 192 } 193 194 void 195 ncl_nfsiodnew_tq(__unused void *arg, int pending) 196 { 197 198 mtx_lock(&ncl_iod_mutex); 199 while (pending > 0) { 200 pending--; 201 nfs_nfsiodnew_sync(); 202 } 203 mtx_unlock(&ncl_iod_mutex); 204 } 205 206 void 207 ncl_nfsiodnew(void) 208 { 209 210 mtx_assert(&ncl_iod_mutex, MA_OWNED); 211 taskqueue_enqueue(taskqueue_thread, &ncl_nfsiodnew_task); 212 } 213 214 static void 215 nfsiod_setup(void *dummy) 216 { 217 int error; 218 219 TUNABLE_INT_FETCH("vfs.nfs.iodmin", &nfs_iodmin); 220 nfscl_init(); 221 mtx_lock(&ncl_iod_mutex); 222 /* Silently limit the start number of nfsiod's */ 223 if (nfs_iodmin > NFS_MAXASYNCDAEMON) 224 nfs_iodmin = NFS_MAXASYNCDAEMON; 225 226 while (ncl_numasync < nfs_iodmin) { 227 error = nfs_nfsiodnew_sync(); 228 if (error == -1) 229 panic("nfsiod_setup: nfs_nfsiodnew failed"); 230 } 231 mtx_unlock(&ncl_iod_mutex); 232 } 233 SYSINIT(newnfsiod, SI_SUB_KTHREAD_IDLE, SI_ORDER_ANY, nfsiod_setup, NULL); 234 235 static int nfs_defect = 0; 236 SYSCTL_INT(_vfs_nfs, OID_AUTO, defect, CTLFLAG_RW, &nfs_defect, 0, 237 "Allow nfsiods to migrate serving different mounts"); 238 239 /* 240 * Asynchronous I/O daemons for client nfs. 241 * They do read-ahead and write-behind operations on the block I/O cache. 242 * Returns if we hit the timeout defined by the iodmaxidle sysctl. 243 */ 244 static void 245 nfssvc_iod(void *instance) 246 { 247 struct buf *bp; 248 struct nfsmount *nmp; 249 int myiod, timo; 250 int error = 0; 251 252 mtx_lock(&ncl_iod_mutex); 253 myiod = (int *)instance - nfs_asyncdaemon; 254 /* 255 * Main loop 256 */ 257 for (;;) { 258 while (((nmp = ncl_iodmount[myiod]) == NULL) 259 || !TAILQ_FIRST(&nmp->nm_bufq)) { 260 if (myiod >= ncl_iodmax) 261 goto finish; 262 if (nmp) 263 nmp->nm_bufqiods--; 264 if (ncl_iodwant[myiod] == NFSIOD_NOT_AVAILABLE) 265 ncl_iodwant[myiod] = NFSIOD_AVAILABLE; 266 ncl_iodmount[myiod] = NULL; 267 /* 268 * Always keep at least nfs_iodmin kthreads. 269 */ 270 timo = (myiod < nfs_iodmin) ? 0 : nfs_iodmaxidle * hz; 271 error = msleep(&ncl_iodwant[myiod], &ncl_iod_mutex, PWAIT | PCATCH, 272 "-", timo); 273 if (error) { 274 nmp = ncl_iodmount[myiod]; 275 /* 276 * Rechecking the nm_bufq closes a rare race where the 277 * nfsiod is woken up at the exact time the idle timeout 278 * fires 279 */ 280 if (nmp && TAILQ_FIRST(&nmp->nm_bufq)) 281 error = 0; 282 break; 283 } 284 } 285 if (error) 286 break; 287 while ((bp = TAILQ_FIRST(&nmp->nm_bufq)) != NULL) { 288 /* Take one off the front of the list */ 289 TAILQ_REMOVE(&nmp->nm_bufq, bp, b_freelist); 290 nmp->nm_bufqlen--; 291 if (nmp->nm_bufqwant && nmp->nm_bufqlen <= ncl_numasync) { 292 nmp->nm_bufqwant = 0; 293 wakeup(&nmp->nm_bufq); 294 } 295 mtx_unlock(&ncl_iod_mutex); 296 if (bp->b_flags & B_DIRECT) { 297 KASSERT((bp->b_iocmd == BIO_WRITE), ("nfscvs_iod: BIO_WRITE not set")); 298 (void)ncl_doio_directwrite(bp); 299 } else { 300 if (bp->b_iocmd == BIO_READ) 301 (void) ncl_doio(bp->b_vp, bp, bp->b_rcred, 302 NULL, 0); 303 else 304 (void) ncl_doio(bp->b_vp, bp, bp->b_wcred, 305 NULL, 0); 306 } 307 mtx_lock(&ncl_iod_mutex); 308 /* 309 * Make sure the nmp hasn't been dismounted as soon as 310 * ncl_doio() completes for the last buffer. 311 */ 312 nmp = ncl_iodmount[myiod]; 313 if (nmp == NULL) 314 break; 315 316 /* 317 * If there are more than one iod on this mount, then defect 318 * so that the iods can be shared out fairly between the mounts 319 */ 320 if (nfs_defect && nmp->nm_bufqiods > 1) { 321 NFS_DPF(ASYNCIO, 322 ("nfssvc_iod: iod %d defecting from mount %p\n", 323 myiod, nmp)); 324 ncl_iodmount[myiod] = NULL; 325 nmp->nm_bufqiods--; 326 break; 327 } 328 } 329 } 330 finish: 331 nfs_asyncdaemon[myiod] = 0; 332 if (nmp) 333 nmp->nm_bufqiods--; 334 ncl_iodwant[myiod] = NFSIOD_NOT_AVAILABLE; 335 ncl_iodmount[myiod] = NULL; 336 /* Someone may be waiting for the last nfsiod to terminate. */ 337 if (--ncl_numasync == 0) 338 wakeup(&ncl_numasync); 339 mtx_unlock(&ncl_iod_mutex); 340 if ((error == 0) || (error == EWOULDBLOCK)) 341 kproc_exit(0); 342 /* Abnormal termination */ 343 kproc_exit(1); 344 } 345