1 /* $OpenBSD: nfs_kq.c,v 1.15 2009/01/19 23:40:36 thib Exp $ */ 2 /* $NetBSD: nfs_kq.c,v 1.7 2003/10/30 01:43:10 simonb Exp $ */ 3 4 /*- 5 * Copyright (c) 2002 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Jaromir Dolecek. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/proc.h> 39 #include <sys/mount.h> 40 #include <sys/malloc.h> 41 #include <sys/vnode.h> 42 #include <sys/unistd.h> 43 #include <sys/file.h> 44 #include <sys/kthread.h> 45 #include <sys/rwlock.h> 46 #include <sys/queue.h> 47 48 #include <uvm/uvm_extern.h> 49 #include <uvm/uvm.h> 50 51 #include <nfs/rpcv2.h> 52 #include <nfs/nfsproto.h> 53 #include <nfs/nfs.h> 54 #include <nfs/nfsnode.h> 55 #include <nfs/nfs_var.h> 56 57 void nfs_kqpoll(void *); 58 59 void filt_nfsdetach(struct knote *); 60 int filt_nfsread(struct knote *, long); 61 int filt_nfsvnode(struct knote *, long); 62 63 struct kevq { 64 SLIST_ENTRY(kevq) kev_link; 65 struct vnode *vp; 66 u_int usecount; 67 u_int flags; 68 #define KEVQ_BUSY 0x01 /* currently being processed */ 69 #define KEVQ_WANT 0x02 /* want to change this entry */ 70 struct timespec omtime; /* old modification time */ 71 struct timespec octime; /* old change time */ 72 nlink_t onlink; /* old number of references to file */ 73 }; 74 SLIST_HEAD(kevqlist, kevq); 75 76 struct rwlock nfskevq_lock = RWLOCK_INITIALIZER("nfskqlk"); 77 struct proc *pnfskq; 78 struct kevqlist kevlist = SLIST_HEAD_INITIALIZER(kevlist); 79 80 /* 81 * This quite simplistic routine periodically checks for server changes 82 * of any of the watched files every NFS_MINATTRTIMO/2 seconds. 83 * Only changes in size, modification time, change time and nlinks 84 * are being checked, everything else is ignored. 85 * The routine only calls VOP_GETATTR() when it's likely it would get 86 * some new data, i.e. when the vnode expires from attrcache. This 87 * should give same result as periodically running stat(2) from userland, 88 * while keeping CPU/network usage low, and still provide proper kevent 89 * semantics. 90 * The poller thread is created when first vnode is added to watch list, 91 * and exits when the watch list is empty. The overhead of thread creation 92 * isn't really important, neither speed of attach and detach of knote. 93 */ 94 /* ARGSUSED */ 95 void 96 nfs_kqpoll(void *arg) 97 { 98 struct kevq *ke; 99 struct vattr attr; 100 struct proc *p = pnfskq; 101 u_quad_t osize; 102 int error; 103 104 for(;;) { 105 rw_enter_write(&nfskevq_lock); 106 SLIST_FOREACH(ke, &kevlist, kev_link) { 107 struct nfsnode *np = VTONFS(ke->vp); 108 109 #ifdef DEBUG 110 printf("nfs_kqpoll on: "); 111 VOP_PRINT(ke->vp); 112 #endif 113 /* skip if still in attrcache */ 114 if (nfs_getattrcache(ke->vp, &attr) != ENOENT) 115 continue; 116 117 /* 118 * Mark entry busy, release lock and check 119 * for changes. 120 */ 121 ke->flags |= KEVQ_BUSY; 122 rw_exit_write(&nfskevq_lock); 123 124 /* save v_size, nfs_getattr() updates it */ 125 osize = np->n_size; 126 127 error = VOP_GETATTR(ke->vp, &attr, p->p_ucred, p); 128 if (error == ESTALE) { 129 NFS_INVALIDATE_ATTRCACHE(np); 130 VN_KNOTE(ke->vp, NOTE_DELETE); 131 goto next; 132 } 133 134 /* following is a bit fragile, but about best 135 * we can get */ 136 if (attr.va_size != osize) { 137 int flags = NOTE_WRITE; 138 139 if (attr.va_size > osize) 140 flags |= NOTE_EXTEND; 141 else 142 flags |= NOTE_TRUNCATE; 143 144 VN_KNOTE(ke->vp, flags); 145 ke->omtime = attr.va_mtime; 146 } else if (attr.va_mtime.tv_sec != ke->omtime.tv_sec 147 || attr.va_mtime.tv_nsec != ke->omtime.tv_nsec) { 148 VN_KNOTE(ke->vp, NOTE_WRITE); 149 ke->omtime = attr.va_mtime; 150 } 151 152 if (attr.va_ctime.tv_sec != ke->octime.tv_sec 153 || attr.va_ctime.tv_nsec != ke->octime.tv_nsec) { 154 VN_KNOTE(ke->vp, NOTE_ATTRIB); 155 ke->octime = attr.va_ctime; 156 } 157 158 if (attr.va_nlink != ke->onlink) { 159 VN_KNOTE(ke->vp, NOTE_LINK); 160 ke->onlink = attr.va_nlink; 161 } 162 163 next: 164 rw_enter_write(&nfskevq_lock); 165 ke->flags &= ~KEVQ_BUSY; 166 if (ke->flags & KEVQ_WANT) { 167 ke->flags &= ~KEVQ_WANT; 168 wakeup(ke); 169 } 170 } 171 172 if (SLIST_EMPTY(&kevlist)) { 173 /* Nothing more to watch, exit */ 174 pnfskq = NULL; 175 rw_exit_write(&nfskevq_lock); 176 kthread_exit(0); 177 } 178 rw_exit_write(&nfskevq_lock); 179 180 /* wait a while before checking for changes again */ 181 tsleep(pnfskq, PSOCK, "nfskqpw", NFS_MINATTRTIMO * hz / 2); 182 183 } 184 } 185 186 void 187 filt_nfsdetach(struct knote *kn) 188 { 189 struct vnode *vp = (struct vnode *)kn->kn_hook; 190 struct kevq *ke; 191 192 SLIST_REMOVE(&vp->v_selectinfo.si_note, kn, knote, kn_selnext); 193 194 /* Remove the vnode from watch list */ 195 rw_enter_write(&nfskevq_lock); 196 SLIST_FOREACH(ke, &kevlist, kev_link) { 197 if (ke->vp == vp) { 198 while (ke->flags & KEVQ_BUSY) { 199 ke->flags |= KEVQ_WANT; 200 rw_exit_write(&nfskevq_lock); 201 (void) tsleep(ke, PSOCK, "nfskqdet", 0); 202 rw_enter_write(&nfskevq_lock); 203 } 204 205 if (ke->usecount > 1) { 206 /* keep, other kevents need this */ 207 ke->usecount--; 208 } else { 209 /* last user, g/c */ 210 SLIST_REMOVE(&kevlist, ke, kevq, kev_link); 211 free(ke, M_KEVENT); 212 } 213 break; 214 } 215 } 216 rw_exit_write(&nfskevq_lock); 217 } 218 219 int 220 filt_nfsread(struct knote *kn, long hint) 221 { 222 struct vnode *vp = (struct vnode *)kn->kn_hook; 223 struct nfsnode *np = VTONFS(vp); 224 225 /* 226 * filesystem is gone, so set the EOF flag and schedule 227 * the knote for deletion. 228 */ 229 if (hint == NOTE_REVOKE) { 230 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 231 return (1); 232 } 233 234 kn->kn_data = np->n_size - kn->kn_fp->f_offset; 235 #ifdef DEBUG 236 printf("nfsread event. %d\n", kn->kn_data); 237 #endif 238 return (kn->kn_data != 0); 239 } 240 241 int 242 filt_nfsvnode(struct knote *kn, long hint) 243 { 244 if (kn->kn_sfflags & hint) 245 kn->kn_fflags |= hint; 246 if (hint == NOTE_REVOKE) { 247 kn->kn_flags |= EV_EOF; 248 return (1); 249 } 250 return (kn->kn_fflags != 0); 251 } 252 253 static const struct filterops nfsread_filtops = 254 { 1, NULL, filt_nfsdetach, filt_nfsread }; 255 static const struct filterops nfsvnode_filtops = 256 { 1, NULL, filt_nfsdetach, filt_nfsvnode }; 257 258 int 259 nfs_kqfilter(void *v) 260 { 261 struct vop_kqfilter_args *ap = v; 262 struct vnode *vp; 263 struct knote *kn; 264 struct kevq *ke; 265 int error = 0; 266 struct vattr attr; 267 struct proc *p = curproc; /* XXX */ 268 269 vp = ap->a_vp; 270 kn = ap->a_kn; 271 272 #ifdef DEBUG 273 printf("nfs_kqfilter(%d) on: ", kn->kn_filter); 274 VOP_PRINT(vp); 275 #endif 276 277 switch (kn->kn_filter) { 278 case EVFILT_READ: 279 kn->kn_fop = &nfsread_filtops; 280 break; 281 case EVFILT_VNODE: 282 kn->kn_fop = &nfsvnode_filtops; 283 break; 284 default: 285 return (EINVAL); 286 } 287 288 kn->kn_hook = vp; 289 290 /* 291 * Put the vnode to watched list. 292 */ 293 294 /* 295 * Fetch current attributes. It's only needed when the vnode 296 * is not watched yet, but we need to do this without lock 297 * held. This is likely cheap due to attrcache, so do it now. 298 */ 299 memset(&attr, 0, sizeof(attr)); 300 (void) VOP_GETATTR(vp, &attr, p->p_ucred, p); 301 302 rw_enter_write(&nfskevq_lock); 303 304 /* ensure the poller is running */ 305 if (!pnfskq) { 306 error = kthread_create(nfs_kqpoll, NULL, &pnfskq, 307 "nfskqpoll"); 308 if (error) 309 goto out; 310 } 311 312 SLIST_FOREACH(ke, &kevlist, kev_link) 313 if (ke->vp == vp) 314 break; 315 316 if (ke) { 317 /* already watched, so just bump usecount */ 318 ke->usecount++; 319 } else { 320 /* need a new one */ 321 ke = malloc(sizeof(struct kevq), M_KEVENT, M_WAITOK); 322 ke->vp = vp; 323 ke->usecount = 1; 324 ke->flags = 0; 325 ke->omtime = attr.va_mtime; 326 ke->octime = attr.va_ctime; 327 ke->onlink = attr.va_nlink; 328 SLIST_INSERT_HEAD(&kevlist, ke, kev_link); 329 } 330 331 /* kick the poller */ 332 wakeup(pnfskq); 333 334 SLIST_INSERT_HEAD(&vp->v_selectinfo.si_note, kn, kn_selnext); 335 336 out: 337 rw_exit_write(&nfskevq_lock); 338 return (error); 339 } 340