1 /* $NetBSD: nfs_lock.c,v 1.1.1.1 2013/09/30 07:19:33 dholland Exp $ */ 2 /*- 3 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Berkeley Software Design Inc's name may not be used to endorse or 14 * promote products derived from this software without specific prior 15 * written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * from BSDI nfs_lock.c,v 2.4 1998/12/14 23:49:56 jch Exp 30 */ 31 32 #include <sys/cdefs.h> 33 /* __FBSDID("FreeBSD: head/sys/nfs/nfs_lock.c 227293 2011-11-07 06:44:47Z ed "); */ 34 __RCSID("$NetBSD: nfs_lock.c,v 1.1.1.1 2013/09/30 07:19:33 dholland Exp $"); 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/conf.h> 39 #include <sys/fcntl.h> 40 #include <sys/kernel.h> /* for hz */ 41 #include <sys/limits.h> 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/lockf.h> /* for hz */ /* Must come after sys/malloc.h */ 45 #include <sys/mbuf.h> 46 #include <sys/mount.h> 47 #include <sys/namei.h> 48 #include <sys/priv.h> 49 #include <sys/proc.h> 50 #include <sys/resourcevar.h> 51 #include <sys/socket.h> 52 #include <sys/socket.h> 53 #include <sys/unistd.h> 54 #include <sys/vnode.h> 55 56 #include <net/if.h> 57 58 #include <nfs/nfsproto.h> 59 #include <nfs/nfs_lock.h> 60 #include <nfsclient/nfs.h> 61 #include <nfsclient/nfsmount.h> 62 #include <nfsclient/nfsnode.h> 63 #include <nfsclient/nlminfo.h> 64 65 extern void (*nlminfo_release_p)(struct proc *p); 66 67 vop_advlock_t *nfs_advlock_p = nfs_dolock; 68 vop_reclaim_t *nfs_reclaim_p = NULL; 69 70 static MALLOC_DEFINE(M_NFSLOCK, "nfsclient_lock", "NFS lock request"); 71 static MALLOC_DEFINE(M_NLMINFO, "nfsclient_nlminfo", 72 "NFS lock process structure"); 73 74 static int nfslockdans(struct thread *td, struct lockd_ans *ansp); 75 static void nlminfo_release(struct proc *p); 76 /* 77 * -------------------------------------------------------------------- 78 * A miniature device driver which the userland uses to talk to us. 79 * 80 */ 81 82 static struct cdev *nfslock_dev; 83 static struct mtx nfslock_mtx; 84 static int nfslock_isopen; 85 static TAILQ_HEAD(,__lock_msg) nfslock_list; 86 87 static int 88 nfslock_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 89 { 90 int error; 91 92 error = priv_check(td, PRIV_NFS_LOCKD); 93 if (error) 94 return (error); 95 96 mtx_lock(&nfslock_mtx); 97 if (!nfslock_isopen) { 98 error = 0; 99 nfslock_isopen = 1; 100 } else { 101 error = EOPNOTSUPP; 102 } 103 mtx_unlock(&nfslock_mtx); 104 105 return (error); 106 } 107 108 static int 109 nfslock_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 110 { 111 struct __lock_msg *lm; 112 113 mtx_lock(&nfslock_mtx); 114 nfslock_isopen = 0; 115 while (!TAILQ_EMPTY(&nfslock_list)) { 116 lm = TAILQ_FIRST(&nfslock_list); 117 /* XXX: answer request */ 118 TAILQ_REMOVE(&nfslock_list, lm, lm_link); 119 free(lm, M_NFSLOCK); 120 } 121 mtx_unlock(&nfslock_mtx); 122 return (0); 123 } 124 125 static int 126 nfslock_read(struct cdev *dev, struct uio *uio, int ioflag) 127 { 128 int error; 129 struct __lock_msg *lm; 130 131 if (uio->uio_resid != sizeof *lm) 132 return (EOPNOTSUPP); 133 lm = NULL; 134 error = 0; 135 mtx_lock(&nfslock_mtx); 136 while (TAILQ_EMPTY(&nfslock_list)) { 137 error = msleep(&nfslock_list, &nfslock_mtx, PSOCK | PCATCH, 138 "nfslockd", 0); 139 if (error) 140 break; 141 } 142 if (!error) { 143 lm = TAILQ_FIRST(&nfslock_list); 144 TAILQ_REMOVE(&nfslock_list, lm, lm_link); 145 } 146 mtx_unlock(&nfslock_mtx); 147 if (!error) { 148 error = uiomove(lm, sizeof *lm, uio); 149 free(lm, M_NFSLOCK); 150 } 151 return (error); 152 } 153 154 static int 155 nfslock_write(struct cdev *dev, struct uio *uio, int ioflag) 156 { 157 struct lockd_ans la; 158 int error; 159 160 if (uio->uio_resid != sizeof la) 161 return (EOPNOTSUPP); 162 error = uiomove(&la, sizeof la, uio); 163 if (!error) 164 error = nfslockdans(curthread, &la); 165 return (error); 166 } 167 168 static int 169 nfslock_send(struct __lock_msg *lm) 170 { 171 struct __lock_msg *lm2; 172 int error; 173 174 error = 0; 175 lm2 = malloc(sizeof *lm2, M_NFSLOCK, M_WAITOK); 176 mtx_lock(&nfslock_mtx); 177 if (nfslock_isopen) { 178 memcpy(lm2, lm, sizeof *lm2); 179 TAILQ_INSERT_TAIL(&nfslock_list, lm2, lm_link); 180 wakeup(&nfslock_list); 181 } else { 182 error = EOPNOTSUPP; 183 } 184 mtx_unlock(&nfslock_mtx); 185 if (error) 186 free(lm2, M_NFSLOCK); 187 return (error); 188 } 189 190 static struct cdevsw nfslock_cdevsw = { 191 .d_version = D_VERSION, 192 .d_open = nfslock_open, 193 .d_close = nfslock_close, 194 .d_read = nfslock_read, 195 .d_write = nfslock_write, 196 .d_name = "nfslock" 197 }; 198 199 static int 200 nfslock_modevent(module_t mod __unused, int type, void *data __unused) 201 { 202 203 switch (type) { 204 case MOD_LOAD: 205 if (bootverbose) 206 printf("nfslock: pseudo-device\n"); 207 mtx_init(&nfslock_mtx, "nfslock", NULL, MTX_DEF); 208 TAILQ_INIT(&nfslock_list); 209 nlminfo_release_p = nlminfo_release; 210 nfslock_dev = make_dev(&nfslock_cdevsw, 0, 211 UID_ROOT, GID_KMEM, 0600, _PATH_NFSLCKDEV); 212 return (0); 213 default: 214 return (EOPNOTSUPP); 215 } 216 } 217 218 DEV_MODULE(nfslock, nfslock_modevent, NULL); 219 MODULE_VERSION(nfslock, 1); 220 221 222 /* 223 * XXX 224 * We have to let the process know if the call succeeded. I'm using an extra 225 * field in the p_nlminfo field in the proc structure, as it is already for 226 * lockd stuff. 227 */ 228 229 /* 230 * nfs_advlock -- 231 * NFS advisory byte-level locks. 232 * 233 * The vnode shall be (shared) locked on the entry, it is 234 * unconditionally unlocked after. 235 */ 236 int 237 nfs_dolock(struct vop_advlock_args *ap) 238 { 239 LOCKD_MSG msg; 240 struct thread *td; 241 struct vnode *vp; 242 int error; 243 struct flock *fl; 244 struct proc *p; 245 struct nfsmount *nmp; 246 247 td = curthread; 248 p = td->td_proc; 249 250 vp = ap->a_vp; 251 fl = ap->a_fl; 252 nmp = VFSTONFS(vp->v_mount); 253 254 ASSERT_VOP_LOCKED(vp, "nfs_dolock"); 255 256 nmp->nm_getinfo(vp, msg.lm_fh, &msg.lm_fh_len, &msg.lm_addr, 257 &msg.lm_nfsv3, NULL, NULL); 258 VOP_UNLOCK(vp, 0); 259 260 /* 261 * the NLM protocol doesn't allow the server to return an error 262 * on ranges, so we do it. 263 */ 264 if (fl->l_whence != SEEK_END) { 265 if ((fl->l_whence != SEEK_CUR && fl->l_whence != SEEK_SET) || 266 fl->l_start < 0 || 267 (fl->l_len < 0 && 268 (fl->l_start == 0 || fl->l_start + fl->l_len < 0))) 269 return (EINVAL); 270 if (fl->l_len > 0 && 271 (fl->l_len - 1 > OFF_MAX - fl->l_start)) 272 return (EOVERFLOW); 273 } 274 275 /* 276 * Fill in the information structure. 277 */ 278 msg.lm_version = LOCKD_MSG_VERSION; 279 msg.lm_msg_ident.pid = p->p_pid; 280 281 mtx_lock(&Giant); 282 /* 283 * if there is no nfsowner table yet, allocate one. 284 */ 285 if (p->p_nlminfo == NULL) { 286 p->p_nlminfo = malloc(sizeof(struct nlminfo), 287 M_NLMINFO, M_WAITOK | M_ZERO); 288 p->p_nlminfo->pid_start = p->p_stats->p_start; 289 timevaladd(&p->p_nlminfo->pid_start, &boottime); 290 } 291 msg.lm_msg_ident.pid_start = p->p_nlminfo->pid_start; 292 msg.lm_msg_ident.msg_seq = ++(p->p_nlminfo->msg_seq); 293 294 msg.lm_fl = *fl; 295 msg.lm_wait = ap->a_flags & F_WAIT; 296 msg.lm_getlk = ap->a_op == F_GETLK; 297 cru2x(td->td_ucred, &msg.lm_cred); 298 299 for (;;) { 300 error = nfslock_send(&msg); 301 if (error) 302 goto out; 303 304 /* Unlocks succeed immediately. */ 305 if (fl->l_type == F_UNLCK) 306 goto out; 307 308 /* 309 * Retry after 20 seconds if we haven't gotten a response yet. 310 * This number was picked out of thin air... but is longer 311 * then even a reasonably loaded system should take (at least 312 * on a local network). XXX Probably should use a back-off 313 * scheme. 314 * 315 * XXX: No PCATCH here since we currently have no useful 316 * way to signal to the userland rpc.lockd that the request 317 * has been aborted. Once the rpc.lockd implementation 318 * can handle aborts, and we report them properly, 319 * PCATCH can be put back. In the mean time, if we did 320 * permit aborting, the lock attempt would "get lost" 321 * and the lock would get stuck in the locked state. 322 */ 323 error = tsleep(p->p_nlminfo, PUSER, "lockd", 20*hz); 324 if (error != 0) { 325 if (error == EWOULDBLOCK) { 326 /* 327 * We timed out, so we rewrite the request 328 * to the fifo. 329 */ 330 continue; 331 } 332 333 break; 334 } 335 336 if (msg.lm_getlk && p->p_nlminfo->retcode == 0) { 337 if (p->p_nlminfo->set_getlk_pid) { 338 fl->l_sysid = 0; /* XXX */ 339 fl->l_pid = p->p_nlminfo->getlk_pid; 340 } else { 341 fl->l_type = F_UNLCK; 342 } 343 } 344 error = p->p_nlminfo->retcode; 345 break; 346 } 347 out: 348 mtx_unlock(&Giant); 349 return (error); 350 } 351 352 /* 353 * nfslockdans -- 354 * NFS advisory byte-level locks answer from the lock daemon. 355 */ 356 static int 357 nfslockdans(struct thread *td, struct lockd_ans *ansp) 358 { 359 struct proc *targetp; 360 361 /* the version should match, or we're out of sync */ 362 if (ansp->la_vers != LOCKD_ANS_VERSION) 363 return (EINVAL); 364 365 /* Find the process, set its return errno and wake it up. */ 366 if ((targetp = pfind(ansp->la_msg_ident.pid)) == NULL) 367 return (ESRCH); 368 369 /* verify the pid hasn't been reused (if we can), and it isn't waiting 370 * for an answer from a more recent request. We return an EPIPE if 371 * the match fails, because we've already used ESRCH above, and this 372 * is sort of like writing on a pipe after the reader has closed it. 373 */ 374 if (targetp->p_nlminfo == NULL || 375 ((ansp->la_msg_ident.msg_seq != -1) && 376 (timevalcmp(&targetp->p_nlminfo->pid_start, 377 &ansp->la_msg_ident.pid_start, !=) || 378 targetp->p_nlminfo->msg_seq != ansp->la_msg_ident.msg_seq))) { 379 PROC_UNLOCK(targetp); 380 return (EPIPE); 381 } 382 383 targetp->p_nlminfo->retcode = ansp->la_errno; 384 targetp->p_nlminfo->set_getlk_pid = ansp->la_set_getlk_pid; 385 targetp->p_nlminfo->getlk_pid = ansp->la_getlk_pid; 386 387 wakeup(targetp->p_nlminfo); 388 389 PROC_UNLOCK(targetp); 390 return (0); 391 } 392 393 /* 394 * Free nlminfo attached to process. 395 */ 396 void 397 nlminfo_release(struct proc *p) 398 { 399 free(p->p_nlminfo, M_NLMINFO); 400 p->p_nlminfo = NULL; 401 } 402