1 /* This file contains the procedures that manipulate file descriptors. 2 * 3 * The entry points into this file are 4 * get_fd: look for free file descriptor and free filp slots 5 * get_filp: look up the filp entry for a given file descriptor 6 * find_filp: find a filp slot that points to a given vnode 7 * inval_filp: invalidate a filp and associated fd's, only let close() 8 * happen on it 9 * do_copyfd: copies a file descriptor from or to another endpoint 10 */ 11 12 #include <sys/select.h> 13 #include <minix/callnr.h> 14 #include <minix/u64.h> 15 #include <assert.h> 16 #include <sys/stat.h> 17 #include "fs.h" 18 #include "file.h" 19 #include "vnode.h" 20 21 22 #if LOCK_DEBUG 23 /*===========================================================================* 24 * check_filp_locks * 25 *===========================================================================*/ 26 void check_filp_locks_by_me(void) 27 { 28 /* Check whether this thread still has filp locks held */ 29 struct filp *f; 30 int r; 31 32 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) { 33 r = mutex_trylock(&f->filp_lock); 34 if (r == -EDEADLK) 35 panic("Thread %d still holds filp lock on filp %p call_nr=%d\n", 36 mthread_self(), f, job_call_nr); 37 else if (r == 0) { 38 /* We just obtained the lock, release it */ 39 mutex_unlock(&f->filp_lock); 40 } 41 } 42 } 43 #endif 44 45 /*===========================================================================* 46 * check_filp_locks * 47 *===========================================================================*/ 48 void check_filp_locks(void) 49 { 50 struct filp *f; 51 int r, count = 0; 52 53 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) { 54 r = mutex_trylock(&f->filp_lock); 55 if (r == -EBUSY) { 56 /* Mutex is still locked */ 57 count++; 58 } else if (r == 0) { 59 /* We just obtained a lock, don't want it */ 60 mutex_unlock(&f->filp_lock); 61 } else 62 panic("filp_lock weird state"); 63 } 64 if (count) panic("locked filps"); 65 #if 0 66 else printf("check_filp_locks OK\n"); 67 #endif 68 } 69 70 /*===========================================================================* 71 * init_filps * 72 *===========================================================================*/ 73 void init_filps(void) 74 { 75 /* Initialize filps */ 76 struct filp *f; 77 78 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) { 79 if (mutex_init(&f->filp_lock, NULL) != 0) 80 panic("Failed to initialize filp mutex"); 81 } 82 83 } 84 85 /*===========================================================================* 86 * get_fd * 87 *===========================================================================*/ 88 int get_fd(struct fproc *rfp, int start, mode_t bits, int *k, struct filp **fpt) 89 { 90 /* Look for a free file descriptor and a free filp slot. Fill in the mode word 91 * in the latter, but don't claim either one yet, since the open() or creat() 92 * may yet fail. 93 */ 94 95 register struct filp *f; 96 register int i; 97 98 /* Search the fproc fp_filp table for a free file descriptor. */ 99 for (i = start; i < OPEN_MAX; i++) { 100 if (rfp->fp_filp[i] == NULL) { 101 /* A file descriptor has been located. */ 102 *k = i; 103 break; 104 } 105 } 106 107 /* Check to see if a file descriptor has been found. */ 108 if (i >= OPEN_MAX) return(EMFILE); 109 110 /* If we don't care about a filp, return now */ 111 if (fpt == NULL) return(OK); 112 113 /* Now that a file descriptor has been found, look for a free filp slot. */ 114 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) { 115 assert(f->filp_count >= 0); 116 if (f->filp_count == 0 && mutex_trylock(&f->filp_lock) == 0) { 117 f->filp_mode = bits; 118 f->filp_pos = 0; 119 f->filp_selectors = 0; 120 f->filp_select_ops = 0; 121 f->filp_pipe_select_ops = 0; 122 f->filp_flags = 0; 123 f->filp_select_flags = 0; 124 f->filp_softlock = NULL; 125 f->filp_ioctl_fp = NULL; 126 *fpt = f; 127 return(OK); 128 } 129 } 130 131 /* If control passes here, the filp table must be full. Report that back. */ 132 return(ENFILE); 133 } 134 135 136 /*===========================================================================* 137 * get_filp * 138 *===========================================================================*/ 139 struct filp *get_filp(fild, locktype) 140 int fild; /* file descriptor */ 141 tll_access_t locktype; 142 { 143 /* See if 'fild' refers to a valid file descr. If so, return its filp ptr. */ 144 145 return get_filp2(fp, fild, locktype); 146 } 147 148 149 /*===========================================================================* 150 * get_filp2 * 151 *===========================================================================*/ 152 struct filp *get_filp2(rfp, fild, locktype) 153 register struct fproc *rfp; 154 int fild; /* file descriptor */ 155 tll_access_t locktype; 156 { 157 /* See if 'fild' refers to a valid file descr. If so, return its filp ptr. */ 158 struct filp *filp; 159 160 filp = NULL; 161 if (fild < 0 || fild >= OPEN_MAX) 162 err_code = EBADF; 163 else if (locktype != VNODE_OPCL && rfp->fp_filp[fild] != NULL && 164 rfp->fp_filp[fild]->filp_mode == FILP_CLOSED) 165 err_code = EIO; /* disallow all use except close(2) */ 166 else if ((filp = rfp->fp_filp[fild]) == NULL) 167 err_code = EBADF; 168 else if (locktype != VNODE_NONE) /* Only lock the filp if requested */ 169 lock_filp(filp, locktype); /* All is fine */ 170 171 return(filp); /* may also be NULL */ 172 } 173 174 175 /*===========================================================================* 176 * find_filp * 177 *===========================================================================*/ 178 struct filp *find_filp(struct vnode *vp, mode_t bits) 179 { 180 /* Find a filp slot that refers to the vnode 'vp' in a way as described 181 * by the mode bit 'bits'. Used for determining whether somebody is still 182 * interested in either end of a pipe. Also used when opening a FIFO to 183 * find partners to share a filp field with (to shared the file position). 184 * Like 'get_fd' it performs its job by linear search through the filp table. 185 */ 186 187 struct filp *f; 188 189 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) { 190 if (f->filp_count != 0 && f->filp_vno == vp && (f->filp_mode & bits)) { 191 return(f); 192 } 193 } 194 195 /* If control passes here, the filp wasn't there. Report that back. */ 196 return(NULL); 197 } 198 199 /*===========================================================================* 200 * invalidate_filp * 201 *===========================================================================*/ 202 void invalidate_filp(struct filp *rfilp) 203 { 204 /* Invalidate filp. */ 205 206 rfilp->filp_mode = FILP_CLOSED; 207 } 208 209 /*===========================================================================* 210 * invalidate_filp_by_char_major * 211 *===========================================================================*/ 212 void invalidate_filp_by_char_major(int major) 213 { 214 struct filp *f; 215 216 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) { 217 if (f->filp_count != 0 && f->filp_vno != NULL) { 218 if (major(f->filp_vno->v_sdev) == major && 219 S_ISCHR(f->filp_vno->v_mode)) { 220 invalidate_filp(f); 221 } 222 } 223 } 224 } 225 226 /*===========================================================================* 227 * invalidate_filp_by_endpt * 228 *===========================================================================*/ 229 void invalidate_filp_by_endpt(endpoint_t proc_e) 230 { 231 struct filp *f; 232 233 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) { 234 if (f->filp_count != 0 && f->filp_vno != NULL) { 235 if (f->filp_vno->v_fs_e == proc_e) 236 invalidate_filp(f); 237 } 238 } 239 } 240 241 /*===========================================================================* 242 * lock_filp * 243 *===========================================================================*/ 244 void lock_filp(filp, locktype) 245 struct filp *filp; 246 tll_access_t locktype; 247 { 248 struct worker_thread *org_self; 249 struct vnode *vp; 250 251 assert(filp->filp_count > 0); 252 vp = filp->filp_vno; 253 assert(vp != NULL); 254 255 /* Lock vnode only if we haven't already locked it. If already locked by us, 256 * we're allowed to have one additional 'soft' lock. */ 257 if (tll_locked_by_me(&vp->v_lock)) { 258 assert(filp->filp_softlock == NULL); 259 filp->filp_softlock = fp; 260 } else { 261 /* We have to make an exception for vnodes belonging to pipes. Even 262 * read(2) operations on pipes change the vnode and therefore require 263 * exclusive access. 264 */ 265 if (S_ISFIFO(vp->v_mode) && locktype == VNODE_READ) 266 locktype = VNODE_WRITE; 267 lock_vnode(vp, locktype); 268 } 269 270 assert(vp->v_ref_count > 0); /* vnode still in use? */ 271 assert(filp->filp_vno == vp); /* vnode still what we think it is? */ 272 273 /* First try to get filp lock right off the bat */ 274 if (mutex_trylock(&filp->filp_lock) != 0) { 275 276 /* Already in use, let's wait for our turn */ 277 org_self = worker_suspend(); 278 279 if (mutex_lock(&filp->filp_lock) != 0) 280 panic("unable to obtain lock on filp"); 281 282 worker_resume(org_self); 283 } 284 } 285 286 /*===========================================================================* 287 * unlock_filp * 288 *===========================================================================*/ 289 void unlock_filp(filp) 290 struct filp *filp; 291 { 292 /* If this filp holds a soft lock on the vnode, we must be the owner */ 293 if (filp->filp_softlock != NULL) 294 assert(filp->filp_softlock == fp); 295 296 if (filp->filp_count > 0) { 297 /* Only unlock vnode if filp is still in use */ 298 299 /* and if we don't hold a soft lock */ 300 if (filp->filp_softlock == NULL) { 301 assert(tll_islocked(&(filp->filp_vno->v_lock))); 302 unlock_vnode(filp->filp_vno); 303 } 304 } 305 306 filp->filp_softlock = NULL; 307 if (mutex_unlock(&filp->filp_lock) != 0) 308 panic("unable to release lock on filp"); 309 } 310 311 /*===========================================================================* 312 * unlock_filps * 313 *===========================================================================*/ 314 void unlock_filps(filp1, filp2) 315 struct filp *filp1; 316 struct filp *filp2; 317 { 318 /* Unlock two filps that are tied to the same vnode. As a thread can lock a 319 * vnode only once, unlocking the vnode twice would result in an error. */ 320 321 /* No NULL pointers and not equal */ 322 assert(filp1); 323 assert(filp2); 324 assert(filp1 != filp2); 325 326 /* Must be tied to the same vnode and not NULL */ 327 assert(filp1->filp_vno == filp2->filp_vno); 328 assert(filp1->filp_vno != NULL); 329 330 if (filp1->filp_count > 0 && filp2->filp_count > 0) { 331 /* Only unlock vnode if filps are still in use */ 332 unlock_vnode(filp1->filp_vno); 333 } 334 335 filp1->filp_softlock = NULL; 336 filp2->filp_softlock = NULL; 337 if (mutex_unlock(&filp2->filp_lock) != 0) 338 panic("unable to release filp lock on filp2"); 339 if (mutex_unlock(&filp1->filp_lock) != 0) 340 panic("unable to release filp lock on filp1"); 341 } 342 343 /*===========================================================================* 344 * close_filp * 345 *===========================================================================*/ 346 void close_filp(f) 347 struct filp *f; 348 { 349 /* Close a file. Will also unlock filp when done */ 350 351 int rw; 352 dev_t dev; 353 struct vnode *vp; 354 355 /* Must be locked */ 356 assert(mutex_trylock(&f->filp_lock) == -EDEADLK); 357 assert(tll_islocked(&f->filp_vno->v_lock)); 358 359 vp = f->filp_vno; 360 361 if (f->filp_count - 1 == 0 && f->filp_mode != FILP_CLOSED) { 362 /* Check to see if the file is special. */ 363 if (S_ISCHR(vp->v_mode) || S_ISBLK(vp->v_mode)) { 364 dev = vp->v_sdev; 365 if (S_ISBLK(vp->v_mode)) { 366 lock_bsf(); 367 if (vp->v_bfs_e == ROOT_FS_E) { 368 /* Invalidate the cache unless the special is 369 * mounted. Assume that the root filesystem's 370 * is open only for fsck. 371 */ 372 req_flush(vp->v_bfs_e, dev); 373 } 374 unlock_bsf(); 375 376 (void) bdev_close(dev); /* Ignore errors */ 377 } else { 378 (void) cdev_close(dev); /* Ignore errors */ 379 } 380 381 f->filp_mode = FILP_CLOSED; 382 } 383 } 384 385 /* If the inode being closed is a pipe, release everyone hanging on it. */ 386 if (S_ISFIFO(vp->v_mode)) { 387 rw = (f->filp_mode & R_BIT ? VFS_WRITE : VFS_READ); 388 release(vp, rw, susp_count); 389 } 390 391 if (--f->filp_count == 0) { 392 if (S_ISFIFO(vp->v_mode)) { 393 /* Last reader or writer is going. Tell PFS about latest 394 * pipe size. 395 */ 396 truncate_vnode(vp, vp->v_size); 397 } 398 399 unlock_vnode(f->filp_vno); 400 put_vnode(f->filp_vno); 401 f->filp_vno = NULL; 402 f->filp_mode = FILP_CLOSED; 403 f->filp_count = 0; 404 } else if (f->filp_count < 0) { 405 panic("VFS: invalid filp count: %d ino %llx/%llu", f->filp_count, 406 vp->v_dev, vp->v_inode_nr); 407 } else { 408 unlock_vnode(f->filp_vno); 409 } 410 411 mutex_unlock(&f->filp_lock); 412 } 413 414 /*===========================================================================* 415 * do_copyfd * 416 *===========================================================================*/ 417 int do_copyfd(void) 418 { 419 /* Copy a file descriptor between processes, or close a remote file descriptor. 420 * This call is used as back-call by device drivers (UDS, VND), and is expected 421 * to be used in response to an IOCTL to such device drivers. 422 */ 423 struct fproc *rfp; 424 struct filp *rfilp; 425 endpoint_t endpt; 426 int r, fd, what, slot; 427 428 /* This should be replaced with an ACL check. */ 429 if (!super_user) return(EPERM); 430 431 endpt = job_m_in.m_lsys_vfs_copyfd.endpt; 432 fd = job_m_in.m_lsys_vfs_copyfd.fd; 433 what = job_m_in.m_lsys_vfs_copyfd.what; 434 435 if (isokendpt(endpt, &slot) != OK) return(EINVAL); 436 rfp = &fproc[slot]; 437 438 /* FIXME: we should now check that the user process is indeed blocked on an 439 * IOCTL call, so that we can safely mess with its file descriptors. We 440 * currently do not have the necessary state to verify this, so we assume 441 * that the call is always used in the right way. 442 */ 443 444 /* Depending on the operation, get the file descriptor from the caller or the 445 * user process. Do not lock the filp yet: we first need to make sure that 446 * locking it will not result in a deadlock. 447 */ 448 rfilp = get_filp2((what == COPYFD_TO) ? fp : rfp, fd, VNODE_NONE); 449 if (rfilp == NULL) 450 return(err_code); 451 452 /* If the filp is involved in an IOCTL by the user process, locking the filp 453 * here would result in a deadlock. This would happen if a user process 454 * passes in the file descriptor to the device node on which it is performing 455 * the IOCTL. We do not allow manipulation of such device nodes. In 456 * practice, this only applies to block-special files (and thus VND), because 457 * character-special files (as used by UDS) are unlocked during the IOCTL. 458 */ 459 if (rfilp->filp_ioctl_fp == rfp) 460 return(EBADF); 461 462 /* Now we can safely lock the filp, copy or close it, and unlock it again. */ 463 lock_filp(rfilp, VNODE_READ); 464 465 switch (what) { 466 case COPYFD_FROM: 467 rfp = fp; 468 469 /* FALLTHROUGH */ 470 case COPYFD_TO: 471 /* Find a free file descriptor slot in the local or remote process. */ 472 for (fd = 0; fd < OPEN_MAX; fd++) 473 if (rfp->fp_filp[fd] == NULL) 474 break; 475 476 /* If found, fill the slot and return the slot number. */ 477 if (fd < OPEN_MAX) { 478 rfp->fp_filp[fd] = rfilp; 479 rfilp->filp_count++; 480 r = fd; 481 } else 482 r = EMFILE; 483 484 break; 485 486 case COPYFD_CLOSE: 487 /* This should be used ONLY to revert a successful copy-to operation, 488 * and assumes that the filp is still in use by the caller as well. 489 */ 490 if (rfilp->filp_count > 1) { 491 rfilp->filp_count--; 492 rfp->fp_filp[fd] = NULL; 493 r = OK; 494 } else 495 r = EBADF; 496 497 break; 498 499 default: 500 r = EINVAL; 501 } 502 503 unlock_filp(rfilp); 504 505 return(r); 506 } 507