1 /* 2 * Copyright (c) 1997 John S. Dyson. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. John S. Dyson's name may not be used to endorse or promote products 10 * derived from this software without specific prior written permission. 11 * 12 * DISCLAIMER: This code isn't warranted to do anything useful. Anything 13 * bad that happens because of using this software isn't the responsibility 14 * of the author. This software is distributed AS-IS. 15 * 16 * $FreeBSD: src/sys/kern/vfs_aio.c,v 1.70.2.28 2003/05/29 06:15:35 alc Exp $ 17 * $DragonFly: src/sys/kern/vfs_aio.c,v 1.15 2005/06/06 15:02:28 dillon Exp $ 18 */ 19 20 /* 21 * This file contains support for the POSIX 1003.1B AIO/LIO facility. 22 */ 23 24 #include <sys/param.h> 25 #include <sys/systm.h> 26 #include <sys/buf.h> 27 #include <sys/sysproto.h> 28 #include <sys/filedesc.h> 29 #include <sys/kernel.h> 30 #include <sys/fcntl.h> 31 #include <sys/file.h> 32 #include <sys/lock.h> 33 #include <sys/unistd.h> 34 #include <sys/proc.h> 35 #include <sys/resourcevar.h> 36 #include <sys/signalvar.h> 37 #include <sys/protosw.h> 38 #include <sys/socketvar.h> 39 #include <sys/sysctl.h> 40 #include <sys/vnode.h> 41 #include <sys/conf.h> 42 #include <sys/event.h> 43 44 #include <vm/vm.h> 45 #include <vm/vm_extern.h> 46 #include <vm/pmap.h> 47 #include <vm/vm_map.h> 48 #include <vm/vm_zone.h> 49 #include <sys/aio.h> 50 #include <sys/file2.h> 51 #include <sys/buf2.h> 52 53 #include <machine/limits.h> 54 #include "opt_vfs_aio.h" 55 56 #ifdef VFS_AIO 57 58 /* 59 * Counter for allocating reference ids to new jobs. Wrapped to 1 on 60 * overflow. 61 */ 62 static long jobrefid; 63 64 #define JOBST_NULL 0x0 65 #define JOBST_JOBQGLOBAL 0x2 66 #define JOBST_JOBRUNNING 0x3 67 #define JOBST_JOBFINISHED 0x4 68 #define JOBST_JOBQBUF 0x5 69 #define JOBST_JOBBFINISHED 0x6 70 71 #ifndef MAX_AIO_PER_PROC 72 #define MAX_AIO_PER_PROC 32 73 #endif 74 75 #ifndef MAX_AIO_QUEUE_PER_PROC 76 #define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */ 77 #endif 78 79 #ifndef MAX_AIO_PROCS 80 #define MAX_AIO_PROCS 32 81 #endif 82 83 #ifndef MAX_AIO_QUEUE 84 #define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */ 85 #endif 86 87 #ifndef TARGET_AIO_PROCS 88 #define TARGET_AIO_PROCS 4 89 #endif 90 91 #ifndef MAX_BUF_AIO 92 #define MAX_BUF_AIO 16 93 #endif 94 95 #ifndef AIOD_TIMEOUT_DEFAULT 96 #define AIOD_TIMEOUT_DEFAULT (10 * hz) 97 #endif 98 99 #ifndef AIOD_LIFETIME_DEFAULT 100 #define AIOD_LIFETIME_DEFAULT (30 * hz) 101 #endif 102 103 SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, "Async IO management"); 104 105 static int max_aio_procs = MAX_AIO_PROCS; 106 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, 107 CTLFLAG_RW, &max_aio_procs, 0, 108 "Maximum number of kernel threads to use for handling async IO"); 109 110 static int num_aio_procs = 0; 111 SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, 112 CTLFLAG_RD, &num_aio_procs, 0, 113 "Number of presently active kernel threads for async IO"); 114 115 /* 116 * The code will adjust the actual number of AIO processes towards this 117 * number when it gets a chance. 118 */ 119 static int target_aio_procs = TARGET_AIO_PROCS; 120 SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs, 121 0, "Preferred number of ready kernel threads for async IO"); 122 123 static int max_queue_count = MAX_AIO_QUEUE; 124 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0, 125 "Maximum number of aio requests to queue, globally"); 126 127 static int num_queue_count = 0; 128 SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0, 129 "Number of queued aio requests"); 130 131 static int num_buf_aio = 0; 132 SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0, 133 "Number of aio requests presently handled by the buf subsystem"); 134 135 /* Number of async I/O thread in the process of being started */ 136 /* XXX This should be local to _aio_aqueue() */ 137 static int num_aio_resv_start = 0; 138 139 static int aiod_timeout; 140 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_timeout, CTLFLAG_RW, &aiod_timeout, 0, 141 "Timeout value for synchronous aio operations"); 142 143 static int aiod_lifetime; 144 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0, 145 "Maximum lifetime for idle aiod"); 146 147 static int max_aio_per_proc = MAX_AIO_PER_PROC; 148 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc, 149 0, "Maximum active aio requests per process (stored in the process)"); 150 151 static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC; 152 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW, 153 &max_aio_queue_per_proc, 0, 154 "Maximum queued aio requests per process (stored in the process)"); 155 156 static int max_buf_aio = MAX_BUF_AIO; 157 SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0, 158 "Maximum buf aio requests per process (stored in the process)"); 159 160 /* 161 * AIO process info 162 */ 163 #define AIOP_FREE 0x1 /* proc on free queue */ 164 #define AIOP_SCHED 0x2 /* proc explicitly scheduled */ 165 166 struct aioproclist { 167 int aioprocflags; /* AIO proc flags */ 168 TAILQ_ENTRY(aioproclist) list; /* List of processes */ 169 struct proc *aioproc; /* The AIO thread */ 170 }; 171 172 /* 173 * data-structure for lio signal management 174 */ 175 struct aio_liojob { 176 int lioj_flags; 177 int lioj_buffer_count; 178 int lioj_buffer_finished_count; 179 int lioj_queue_count; 180 int lioj_queue_finished_count; 181 struct sigevent lioj_signal; /* signal on all I/O done */ 182 TAILQ_ENTRY(aio_liojob) lioj_list; 183 struct kaioinfo *lioj_ki; 184 }; 185 #define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */ 186 #define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */ 187 188 /* 189 * per process aio data structure 190 */ 191 struct kaioinfo { 192 int kaio_flags; /* per process kaio flags */ 193 int kaio_maxactive_count; /* maximum number of AIOs */ 194 int kaio_active_count; /* number of currently used AIOs */ 195 int kaio_qallowed_count; /* maxiumu size of AIO queue */ 196 int kaio_queue_count; /* size of AIO queue */ 197 int kaio_ballowed_count; /* maximum number of buffers */ 198 int kaio_queue_finished_count; /* number of daemon jobs finished */ 199 int kaio_buffer_count; /* number of physio buffers */ 200 int kaio_buffer_finished_count; /* count of I/O done */ 201 struct proc *kaio_p; /* process that uses this kaio block */ 202 TAILQ_HEAD(,aio_liojob) kaio_liojoblist; /* list of lio jobs */ 203 TAILQ_HEAD(,aiocblist) kaio_jobqueue; /* job queue for process */ 204 TAILQ_HEAD(,aiocblist) kaio_jobdone; /* done queue for process */ 205 TAILQ_HEAD(,aiocblist) kaio_bufqueue; /* buffer job queue for process */ 206 TAILQ_HEAD(,aiocblist) kaio_bufdone; /* buffer done queue for process */ 207 TAILQ_HEAD(,aiocblist) kaio_sockqueue; /* queue for aios waiting on sockets */ 208 }; 209 210 #define KAIO_RUNDOWN 0x1 /* process is being run down */ 211 #define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant event */ 212 213 static TAILQ_HEAD(,aioproclist) aio_freeproc, aio_activeproc; 214 static TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */ 215 static TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */ 216 static TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */ 217 218 static void aio_init_aioinfo(struct proc *p); 219 static void aio_onceonly(void *); 220 static int aio_free_entry(struct aiocblist *aiocbe); 221 static void aio_process(struct aiocblist *aiocbe); 222 static int aio_newproc(void); 223 static int aio_aqueue(struct aiocb *job, int type); 224 static void aio_physwakeup(struct buf *bp); 225 static int aio_fphysio(struct aiocblist *aiocbe); 226 static int aio_qphysio(struct proc *p, struct aiocblist *iocb); 227 static void aio_daemon(void *uproc); 228 static void process_signal(void *aioj); 229 230 SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL); 231 232 /* 233 * Zones for: 234 * kaio Per process async io info 235 * aiop async io thread data 236 * aiocb async io jobs 237 * aiol list io job pointer - internal to aio_suspend XXX 238 * aiolio list io jobs 239 */ 240 static vm_zone_t kaio_zone, aiop_zone, aiocb_zone, aiol_zone, aiolio_zone; 241 242 /* 243 * Startup initialization 244 */ 245 static void 246 aio_onceonly(void *na) 247 { 248 TAILQ_INIT(&aio_freeproc); 249 TAILQ_INIT(&aio_activeproc); 250 TAILQ_INIT(&aio_jobs); 251 TAILQ_INIT(&aio_bufjobs); 252 TAILQ_INIT(&aio_freejobs); 253 kaio_zone = zinit("AIO", sizeof(struct kaioinfo), 0, 0, 1); 254 aiop_zone = zinit("AIOP", sizeof(struct aioproclist), 0, 0, 1); 255 aiocb_zone = zinit("AIOCB", sizeof(struct aiocblist), 0, 0, 1); 256 aiol_zone = zinit("AIOL", AIO_LISTIO_MAX*sizeof(intptr_t), 0, 0, 1); 257 aiolio_zone = zinit("AIOLIO", sizeof(struct aio_liojob), 0, 0, 1); 258 aiod_timeout = AIOD_TIMEOUT_DEFAULT; 259 aiod_lifetime = AIOD_LIFETIME_DEFAULT; 260 jobrefid = 1; 261 } 262 263 /* 264 * Init the per-process aioinfo structure. The aioinfo limits are set 265 * per-process for user limit (resource) management. 266 */ 267 static void 268 aio_init_aioinfo(struct proc *p) 269 { 270 struct kaioinfo *ki; 271 if (p->p_aioinfo == NULL) { 272 ki = zalloc(kaio_zone); 273 p->p_aioinfo = ki; 274 ki->kaio_flags = 0; 275 ki->kaio_maxactive_count = max_aio_per_proc; 276 ki->kaio_active_count = 0; 277 ki->kaio_qallowed_count = max_aio_queue_per_proc; 278 ki->kaio_queue_count = 0; 279 ki->kaio_ballowed_count = max_buf_aio; 280 ki->kaio_buffer_count = 0; 281 ki->kaio_buffer_finished_count = 0; 282 ki->kaio_p = p; 283 TAILQ_INIT(&ki->kaio_jobdone); 284 TAILQ_INIT(&ki->kaio_jobqueue); 285 TAILQ_INIT(&ki->kaio_bufdone); 286 TAILQ_INIT(&ki->kaio_bufqueue); 287 TAILQ_INIT(&ki->kaio_liojoblist); 288 TAILQ_INIT(&ki->kaio_sockqueue); 289 } 290 291 while (num_aio_procs < target_aio_procs) 292 aio_newproc(); 293 } 294 295 /* 296 * Free a job entry. Wait for completion if it is currently active, but don't 297 * delay forever. If we delay, we return a flag that says that we have to 298 * restart the queue scan. 299 */ 300 static int 301 aio_free_entry(struct aiocblist *aiocbe) 302 { 303 struct kaioinfo *ki; 304 struct aio_liojob *lj; 305 struct proc *p; 306 int error; 307 308 if (aiocbe->jobstate == JOBST_NULL) 309 panic("aio_free_entry: freeing already free job"); 310 311 p = aiocbe->userproc; 312 ki = p->p_aioinfo; 313 lj = aiocbe->lio; 314 if (ki == NULL) 315 panic("aio_free_entry: missing p->p_aioinfo"); 316 317 while (aiocbe->jobstate == JOBST_JOBRUNNING) { 318 aiocbe->jobflags |= AIOCBLIST_RUNDOWN; 319 tsleep(aiocbe, 0, "jobwai", 0); 320 } 321 if (aiocbe->bp == NULL) { 322 if (ki->kaio_queue_count <= 0) 323 panic("aio_free_entry: process queue size <= 0"); 324 if (num_queue_count <= 0) 325 panic("aio_free_entry: system wide queue size <= 0"); 326 327 if (lj) { 328 lj->lioj_queue_count--; 329 if (aiocbe->jobflags & AIOCBLIST_DONE) 330 lj->lioj_queue_finished_count--; 331 } 332 ki->kaio_queue_count--; 333 if (aiocbe->jobflags & AIOCBLIST_DONE) 334 ki->kaio_queue_finished_count--; 335 num_queue_count--; 336 } else { 337 if (lj) { 338 lj->lioj_buffer_count--; 339 if (aiocbe->jobflags & AIOCBLIST_DONE) 340 lj->lioj_buffer_finished_count--; 341 } 342 if (aiocbe->jobflags & AIOCBLIST_DONE) 343 ki->kaio_buffer_finished_count--; 344 ki->kaio_buffer_count--; 345 num_buf_aio--; 346 } 347 348 /* aiocbe is going away, we need to destroy any knotes */ 349 knote_remove(p->p_thread, &aiocbe->klist); 350 351 if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags & KAIO_RUNDOWN) 352 && ((ki->kaio_buffer_count == 0) && (ki->kaio_queue_count == 0)))) { 353 ki->kaio_flags &= ~KAIO_WAKEUP; 354 wakeup(p); 355 } 356 357 if (aiocbe->jobstate == JOBST_JOBQBUF) { 358 if ((error = aio_fphysio(aiocbe)) != 0) 359 return error; 360 if (aiocbe->jobstate != JOBST_JOBBFINISHED) 361 panic("aio_free_entry: invalid physio finish-up state"); 362 crit_enter(); 363 TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist); 364 crit_exit(); 365 } else if (aiocbe->jobstate == JOBST_JOBQGLOBAL) { 366 crit_enter(); 367 TAILQ_REMOVE(&aio_jobs, aiocbe, list); 368 TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist); 369 crit_exit(); 370 } else if (aiocbe->jobstate == JOBST_JOBFINISHED) 371 TAILQ_REMOVE(&ki->kaio_jobdone, aiocbe, plist); 372 else if (aiocbe->jobstate == JOBST_JOBBFINISHED) { 373 crit_enter(); 374 TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist); 375 crit_exit(); 376 if (aiocbe->bp) { 377 vunmapbuf(aiocbe->bp); 378 relpbuf(aiocbe->bp, NULL); 379 aiocbe->bp = NULL; 380 } 381 } 382 if (lj && (lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 0)) { 383 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 384 zfree(aiolio_zone, lj); 385 } 386 aiocbe->jobstate = JOBST_NULL; 387 callout_stop(&aiocbe->timeout); 388 fdrop(aiocbe->fd_file, curthread); 389 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 390 return 0; 391 } 392 #endif /* VFS_AIO */ 393 394 /* 395 * Rundown the jobs for a given process. 396 */ 397 void 398 aio_proc_rundown(struct proc *p) 399 { 400 #ifndef VFS_AIO 401 return; 402 #else 403 struct kaioinfo *ki; 404 struct aio_liojob *lj, *ljn; 405 struct aiocblist *aiocbe, *aiocbn; 406 struct file *fp; 407 struct socket *so; 408 409 ki = p->p_aioinfo; 410 if (ki == NULL) 411 return; 412 413 ki->kaio_flags |= LIOJ_SIGNAL_POSTED; 414 while ((ki->kaio_active_count > 0) || (ki->kaio_buffer_count > 415 ki->kaio_buffer_finished_count)) { 416 ki->kaio_flags |= KAIO_RUNDOWN; 417 if (tsleep(p, 0, "kaiowt", aiod_timeout)) 418 break; 419 } 420 421 /* 422 * Move any aio ops that are waiting on socket I/O to the normal job 423 * queues so they are cleaned up with any others. 424 */ 425 crit_enter(); 426 for (aiocbe = TAILQ_FIRST(&ki->kaio_sockqueue); aiocbe; aiocbe = 427 aiocbn) { 428 aiocbn = TAILQ_NEXT(aiocbe, plist); 429 fp = aiocbe->fd_file; 430 if (fp != NULL) { 431 so = (struct socket *)fp->f_data; 432 TAILQ_REMOVE(&so->so_aiojobq, aiocbe, list); 433 if (TAILQ_EMPTY(&so->so_aiojobq)) { 434 so->so_snd.sb_flags &= ~SB_AIO; 435 so->so_rcv.sb_flags &= ~SB_AIO; 436 } 437 } 438 TAILQ_REMOVE(&ki->kaio_sockqueue, aiocbe, plist); 439 TAILQ_INSERT_HEAD(&aio_jobs, aiocbe, list); 440 TAILQ_INSERT_HEAD(&ki->kaio_jobqueue, aiocbe, plist); 441 } 442 crit_exit(); 443 444 restart1: 445 for (aiocbe = TAILQ_FIRST(&ki->kaio_jobdone); aiocbe; aiocbe = aiocbn) { 446 aiocbn = TAILQ_NEXT(aiocbe, plist); 447 if (aio_free_entry(aiocbe)) 448 goto restart1; 449 } 450 451 restart2: 452 for (aiocbe = TAILQ_FIRST(&ki->kaio_jobqueue); aiocbe; aiocbe = 453 aiocbn) { 454 aiocbn = TAILQ_NEXT(aiocbe, plist); 455 if (aio_free_entry(aiocbe)) 456 goto restart2; 457 } 458 459 restart3: 460 crit_enter(); 461 while (TAILQ_FIRST(&ki->kaio_bufqueue)) { 462 ki->kaio_flags |= KAIO_WAKEUP; 463 tsleep(p, 0, "aioprn", 0); 464 crit_exit(); 465 goto restart3; 466 } 467 crit_exit(); 468 469 restart4: 470 crit_enter(); 471 for (aiocbe = TAILQ_FIRST(&ki->kaio_bufdone); aiocbe; aiocbe = aiocbn) { 472 aiocbn = TAILQ_NEXT(aiocbe, plist); 473 if (aio_free_entry(aiocbe)) { 474 crit_exit(); 475 goto restart4; 476 } 477 } 478 crit_exit(); 479 480 /* 481 * If we've slept, jobs might have moved from one queue to another. 482 * Retry rundown if we didn't manage to empty the queues. 483 */ 484 if (TAILQ_FIRST(&ki->kaio_jobdone) != NULL || 485 TAILQ_FIRST(&ki->kaio_jobqueue) != NULL || 486 TAILQ_FIRST(&ki->kaio_bufqueue) != NULL || 487 TAILQ_FIRST(&ki->kaio_bufdone) != NULL) 488 goto restart1; 489 490 for (lj = TAILQ_FIRST(&ki->kaio_liojoblist); lj; lj = ljn) { 491 ljn = TAILQ_NEXT(lj, lioj_list); 492 if ((lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 493 0)) { 494 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 495 zfree(aiolio_zone, lj); 496 } else { 497 #ifdef DIAGNOSTIC 498 printf("LIO job not cleaned up: B:%d, BF:%d, Q:%d, " 499 "QF:%d\n", lj->lioj_buffer_count, 500 lj->lioj_buffer_finished_count, 501 lj->lioj_queue_count, 502 lj->lioj_queue_finished_count); 503 #endif 504 } 505 } 506 507 zfree(kaio_zone, ki); 508 p->p_aioinfo = NULL; 509 #endif /* VFS_AIO */ 510 } 511 512 #ifdef VFS_AIO 513 /* 514 * Select a job to run (called by an AIO daemon). 515 */ 516 static struct aiocblist * 517 aio_selectjob(struct aioproclist *aiop) 518 { 519 struct aiocblist *aiocbe; 520 struct kaioinfo *ki; 521 struct proc *userp; 522 523 crit_enter(); 524 for (aiocbe = TAILQ_FIRST(&aio_jobs); aiocbe; aiocbe = 525 TAILQ_NEXT(aiocbe, list)) { 526 userp = aiocbe->userproc; 527 ki = userp->p_aioinfo; 528 529 if (ki->kaio_active_count < ki->kaio_maxactive_count) { 530 TAILQ_REMOVE(&aio_jobs, aiocbe, list); 531 crit_exit(); 532 return aiocbe; 533 } 534 } 535 crit_exit(); 536 537 return NULL; 538 } 539 540 /* 541 * The AIO processing activity. This is the code that does the I/O request for 542 * the non-physio version of the operations. The normal vn operations are used, 543 * and this code should work in all instances for every type of file, including 544 * pipes, sockets, fifos, and regular files. 545 */ 546 static void 547 aio_process(struct aiocblist *aiocbe) 548 { 549 struct thread *mytd; 550 struct aiocb *cb; 551 struct file *fp; 552 struct uio auio; 553 struct iovec aiov; 554 int cnt; 555 int error; 556 int oublock_st, oublock_end; 557 int inblock_st, inblock_end; 558 559 mytd = curthread; 560 cb = &aiocbe->uaiocb; 561 fp = aiocbe->fd_file; 562 563 aiov.iov_base = (void *)(uintptr_t)cb->aio_buf; 564 aiov.iov_len = cb->aio_nbytes; 565 566 auio.uio_iov = &aiov; 567 auio.uio_iovcnt = 1; 568 auio.uio_offset = cb->aio_offset; 569 auio.uio_resid = cb->aio_nbytes; 570 cnt = cb->aio_nbytes; 571 auio.uio_segflg = UIO_USERSPACE; 572 auio.uio_td = mytd; 573 574 inblock_st = mytd->td_proc->p_stats->p_ru.ru_inblock; 575 oublock_st = mytd->td_proc->p_stats->p_ru.ru_oublock; 576 /* 577 * _aio_aqueue() acquires a reference to the file that is 578 * released in aio_free_entry(). 579 */ 580 if (cb->aio_lio_opcode == LIO_READ) { 581 auio.uio_rw = UIO_READ; 582 error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, mytd); 583 } else { 584 auio.uio_rw = UIO_WRITE; 585 error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, mytd); 586 } 587 inblock_end = mytd->td_proc->p_stats->p_ru.ru_inblock; 588 oublock_end = mytd->td_proc->p_stats->p_ru.ru_oublock; 589 590 aiocbe->inputcharge = inblock_end - inblock_st; 591 aiocbe->outputcharge = oublock_end - oublock_st; 592 593 if ((error) && (auio.uio_resid != cnt)) { 594 if (error == ERESTART || error == EINTR || error == EWOULDBLOCK) 595 error = 0; 596 if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) 597 psignal(aiocbe->userproc, SIGPIPE); 598 } 599 600 cnt -= auio.uio_resid; 601 cb->_aiocb_private.error = error; 602 cb->_aiocb_private.status = cnt; 603 } 604 605 /* 606 * The AIO daemon, most of the actual work is done in aio_process, 607 * but the setup (and address space mgmt) is done in this routine. 608 * 609 * The MP lock is held on entry. 610 */ 611 static void 612 aio_daemon(void *uproc) 613 { 614 struct aio_liojob *lj; 615 struct aiocb *cb; 616 struct aiocblist *aiocbe; 617 struct aioproclist *aiop; 618 struct kaioinfo *ki; 619 struct proc *curcp, *mycp, *userp; 620 struct vmspace *myvm, *tmpvm; 621 struct ucred *cr; 622 623 /* 624 * Local copies of curproc (cp) and vmspace (myvm) 625 */ 626 mycp = curproc; 627 myvm = mycp->p_vmspace; 628 629 if (mycp->p_textvp) { 630 vrele(mycp->p_textvp); 631 mycp->p_textvp = NULL; 632 } 633 634 /* 635 * Allocate and ready the aio control info. There is one aiop structure 636 * per daemon. 637 */ 638 aiop = zalloc(aiop_zone); 639 aiop->aioproc = mycp; 640 aiop->aioprocflags |= AIOP_FREE; 641 642 crit_enter(); 643 644 /* 645 * Place thread (lightweight process) onto the AIO free thread list. 646 */ 647 if (TAILQ_EMPTY(&aio_freeproc)) 648 wakeup(&aio_freeproc); 649 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); 650 651 crit_exit(); 652 653 /* Make up a name for the daemon. */ 654 strcpy(mycp->p_comm, "aiod"); 655 656 /* 657 * Get rid of our current filedescriptors. AIOD's don't need any 658 * filedescriptors, except as temporarily inherited from the client. 659 * Credentials are also cloned, and made equivalent to "root". 660 */ 661 fdfree(mycp); 662 mycp->p_fd = NULL; 663 cr = cratom(&mycp->p_ucred); 664 cr->cr_uid = 0; 665 uireplace(&cr->cr_uidinfo, uifind(0)); 666 cr->cr_ngroups = 1; 667 cr->cr_groups[0] = 1; 668 669 /* The daemon resides in its own pgrp. */ 670 enterpgrp(mycp, mycp->p_pid, 1); 671 672 /* Mark special process type. */ 673 mycp->p_flag |= P_SYSTEM | P_KTHREADP; 674 675 /* 676 * Wakeup parent process. (Parent sleeps to keep from blasting away 677 * and creating too many daemons.) 678 */ 679 wakeup(mycp); 680 681 for (;;) { 682 /* 683 * curcp is the current daemon process context. 684 * userp is the current user process context. 685 */ 686 curcp = mycp; 687 688 /* 689 * Take daemon off of free queue 690 */ 691 if (aiop->aioprocflags & AIOP_FREE) { 692 crit_enter(); 693 TAILQ_REMOVE(&aio_freeproc, aiop, list); 694 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 695 aiop->aioprocflags &= ~AIOP_FREE; 696 crit_exit(); 697 } 698 aiop->aioprocflags &= ~AIOP_SCHED; 699 700 /* 701 * Check for jobs. 702 */ 703 while ((aiocbe = aio_selectjob(aiop)) != NULL) { 704 cb = &aiocbe->uaiocb; 705 userp = aiocbe->userproc; 706 707 aiocbe->jobstate = JOBST_JOBRUNNING; 708 709 /* 710 * Connect to process address space for user program. 711 */ 712 if (userp != curcp) { 713 /* 714 * Save the current address space that we are 715 * connected to. 716 */ 717 tmpvm = mycp->p_vmspace; 718 719 /* 720 * Point to the new user address space, and 721 * refer to it. 722 */ 723 mycp->p_vmspace = userp->p_vmspace; 724 mycp->p_vmspace->vm_refcnt++; 725 726 /* Activate the new mapping. */ 727 pmap_activate(mycp); 728 729 /* 730 * If the old address space wasn't the daemons 731 * own address space, then we need to remove the 732 * daemon's reference from the other process 733 * that it was acting on behalf of. 734 */ 735 if (tmpvm != myvm) { 736 vmspace_free(tmpvm); 737 } 738 curcp = userp; 739 } 740 741 ki = userp->p_aioinfo; 742 lj = aiocbe->lio; 743 744 /* Account for currently active jobs. */ 745 ki->kaio_active_count++; 746 747 /* Do the I/O function. */ 748 aio_process(aiocbe); 749 750 /* Decrement the active job count. */ 751 ki->kaio_active_count--; 752 753 /* 754 * Increment the completion count for wakeup/signal 755 * comparisons. 756 */ 757 aiocbe->jobflags |= AIOCBLIST_DONE; 758 ki->kaio_queue_finished_count++; 759 if (lj) 760 lj->lioj_queue_finished_count++; 761 if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags 762 & KAIO_RUNDOWN) && (ki->kaio_active_count == 0))) { 763 ki->kaio_flags &= ~KAIO_WAKEUP; 764 wakeup(userp); 765 } 766 767 crit_enter(); 768 if (lj && (lj->lioj_flags & 769 (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == LIOJ_SIGNAL) { 770 if ((lj->lioj_queue_finished_count == 771 lj->lioj_queue_count) && 772 (lj->lioj_buffer_finished_count == 773 lj->lioj_buffer_count)) { 774 psignal(userp, 775 lj->lioj_signal.sigev_signo); 776 lj->lioj_flags |= 777 LIOJ_SIGNAL_POSTED; 778 } 779 } 780 crit_exit(); 781 782 aiocbe->jobstate = JOBST_JOBFINISHED; 783 784 crit_enter(); 785 TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist); 786 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, aiocbe, plist); 787 crit_exit(); 788 KNOTE(&aiocbe->klist, 0); 789 790 if (aiocbe->jobflags & AIOCBLIST_RUNDOWN) { 791 wakeup(aiocbe); 792 aiocbe->jobflags &= ~AIOCBLIST_RUNDOWN; 793 } 794 795 if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) { 796 psignal(userp, cb->aio_sigevent.sigev_signo); 797 } 798 } 799 800 /* 801 * Disconnect from user address space. 802 */ 803 if (curcp != mycp) { 804 /* Get the user address space to disconnect from. */ 805 tmpvm = mycp->p_vmspace; 806 807 /* Get original address space for daemon. */ 808 mycp->p_vmspace = myvm; 809 810 /* Activate the daemon's address space. */ 811 pmap_activate(mycp); 812 #ifdef DIAGNOSTIC 813 if (tmpvm == myvm) { 814 printf("AIOD: vmspace problem -- %d\n", 815 mycp->p_pid); 816 } 817 #endif 818 /* Remove our vmspace reference. */ 819 vmspace_free(tmpvm); 820 821 curcp = mycp; 822 } 823 824 /* 825 * If we are the first to be put onto the free queue, wakeup 826 * anyone waiting for a daemon. 827 */ 828 crit_enter(); 829 TAILQ_REMOVE(&aio_activeproc, aiop, list); 830 if (TAILQ_EMPTY(&aio_freeproc)) 831 wakeup(&aio_freeproc); 832 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); 833 aiop->aioprocflags |= AIOP_FREE; 834 crit_exit(); 835 836 /* 837 * If daemon is inactive for a long time, allow it to exit, 838 * thereby freeing resources. 839 */ 840 if (((aiop->aioprocflags & AIOP_SCHED) == 0) && tsleep(mycp, 841 0, "aiordy", aiod_lifetime)) { 842 crit_enter(); 843 if (TAILQ_EMPTY(&aio_jobs)) { 844 if ((aiop->aioprocflags & AIOP_FREE) && 845 (num_aio_procs > target_aio_procs)) { 846 TAILQ_REMOVE(&aio_freeproc, aiop, list); 847 crit_exit(); 848 zfree(aiop_zone, aiop); 849 num_aio_procs--; 850 #ifdef DIAGNOSTIC 851 if (mycp->p_vmspace->vm_refcnt <= 1) { 852 printf("AIOD: bad vm refcnt for" 853 " exiting daemon: %d\n", 854 mycp->p_vmspace->vm_refcnt); 855 } 856 #endif 857 exit1(0); 858 } 859 } 860 crit_exit(); 861 } 862 } 863 } 864 865 /* 866 * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The 867 * AIO daemon modifies its environment itself. 868 */ 869 static int 870 aio_newproc() 871 { 872 int error; 873 struct proc *p, *np; 874 875 p = &proc0; 876 error = fork1(p, RFPROC|RFMEM|RFNOWAIT, &np); 877 if (error) 878 return error; 879 cpu_set_fork_handler(np, aio_daemon, curproc); 880 start_forked_proc(p, np); 881 882 /* 883 * Wait until daemon is started, but continue on just in case to 884 * handle error conditions. 885 */ 886 error = tsleep(np, 0, "aiosta", aiod_timeout); 887 num_aio_procs++; 888 889 return error; 890 } 891 892 /* 893 * Try the high-performance, low-overhead physio method for eligible 894 * VCHR devices. This method doesn't use an aio helper thread, and 895 * thus has very low overhead. 896 * 897 * Assumes that the caller, _aio_aqueue(), has incremented the file 898 * structure's reference count, preventing its deallocation for the 899 * duration of this call. 900 */ 901 static int 902 aio_qphysio(struct proc *p, struct aiocblist *aiocbe) 903 { 904 int error; 905 struct aiocb *cb; 906 struct file *fp; 907 struct buf *bp; 908 struct vnode *vp; 909 struct kaioinfo *ki; 910 struct aio_liojob *lj; 911 int notify; 912 913 cb = &aiocbe->uaiocb; 914 fp = aiocbe->fd_file; 915 916 if (fp->f_type != DTYPE_VNODE) 917 return (-1); 918 919 vp = (struct vnode *)fp->f_data; 920 921 /* 922 * If its not a disk, we don't want to return a positive error. 923 * It causes the aio code to not fall through to try the thread 924 * way when you're talking to a regular file. 925 */ 926 if (!vn_isdisk(vp, &error)) { 927 if (error == ENOTBLK) 928 return (-1); 929 else 930 return (error); 931 } 932 933 if (cb->aio_nbytes % vp->v_rdev->si_bsize_phys) 934 return (-1); 935 936 if (cb->aio_nbytes > 937 MAXPHYS - (((vm_offset_t) cb->aio_buf) & PAGE_MASK)) 938 return (-1); 939 940 ki = p->p_aioinfo; 941 if (ki->kaio_buffer_count >= ki->kaio_ballowed_count) 942 return (-1); 943 944 ki->kaio_buffer_count++; 945 946 lj = aiocbe->lio; 947 if (lj) 948 lj->lioj_buffer_count++; 949 950 /* Create and build a buffer header for a transfer. */ 951 bp = (struct buf *)getpbuf(NULL); 952 BUF_KERNPROC(bp); 953 954 /* 955 * Get a copy of the kva from the physical buffer. 956 */ 957 bp->b_caller1 = p; 958 bp->b_dev = vp->v_rdev; 959 error = 0; 960 961 bp->b_bcount = cb->aio_nbytes; 962 bp->b_bufsize = cb->aio_nbytes; 963 bp->b_flags = B_PHYS | B_CALL | (cb->aio_lio_opcode == LIO_WRITE ? 964 B_WRITE : B_READ); 965 bp->b_iodone = aio_physwakeup; 966 bp->b_saveaddr = bp->b_data; 967 bp->b_data = (void *)(uintptr_t)cb->aio_buf; 968 bp->b_blkno = btodb(cb->aio_offset); 969 970 /* Bring buffer into kernel space. */ 971 if (vmapbuf(bp) < 0) { 972 error = EFAULT; 973 goto doerror; 974 } 975 976 crit_enter(); 977 978 aiocbe->bp = bp; 979 bp->b_spc = (void *)aiocbe; 980 TAILQ_INSERT_TAIL(&aio_bufjobs, aiocbe, list); 981 TAILQ_INSERT_TAIL(&ki->kaio_bufqueue, aiocbe, plist); 982 aiocbe->jobstate = JOBST_JOBQBUF; 983 cb->_aiocb_private.status = cb->aio_nbytes; 984 num_buf_aio++; 985 bp->b_error = 0; 986 987 crit_exit(); 988 989 /* Perform transfer. */ 990 BUF_STRATEGY(bp, 0); 991 992 notify = 0; 993 crit_enter(); 994 995 /* 996 * If we had an error invoking the request, or an error in processing 997 * the request before we have returned, we process it as an error in 998 * transfer. Note that such an I/O error is not indicated immediately, 999 * but is returned using the aio_error mechanism. In this case, 1000 * aio_suspend will return immediately. 1001 */ 1002 if (bp->b_error || (bp->b_flags & B_ERROR)) { 1003 struct aiocb *job = aiocbe->uuaiocb; 1004 1005 aiocbe->uaiocb._aiocb_private.status = 0; 1006 suword(&job->_aiocb_private.status, 0); 1007 aiocbe->uaiocb._aiocb_private.error = bp->b_error; 1008 suword(&job->_aiocb_private.error, bp->b_error); 1009 1010 ki->kaio_buffer_finished_count++; 1011 1012 if (aiocbe->jobstate != JOBST_JOBBFINISHED) { 1013 aiocbe->jobstate = JOBST_JOBBFINISHED; 1014 aiocbe->jobflags |= AIOCBLIST_DONE; 1015 TAILQ_REMOVE(&aio_bufjobs, aiocbe, list); 1016 TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist); 1017 TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist); 1018 notify = 1; 1019 } 1020 } 1021 crit_exit(); 1022 if (notify) 1023 KNOTE(&aiocbe->klist, 0); 1024 return 0; 1025 1026 doerror: 1027 ki->kaio_buffer_count--; 1028 if (lj) 1029 lj->lioj_buffer_count--; 1030 aiocbe->bp = NULL; 1031 relpbuf(bp, NULL); 1032 return error; 1033 } 1034 1035 /* 1036 * This waits/tests physio completion. 1037 */ 1038 static int 1039 aio_fphysio(struct aiocblist *iocb) 1040 { 1041 struct buf *bp; 1042 int error; 1043 1044 bp = iocb->bp; 1045 1046 crit_enter(); 1047 while ((bp->b_flags & B_DONE) == 0) { 1048 if (tsleep(bp, 0, "physstr", aiod_timeout)) { 1049 if ((bp->b_flags & B_DONE) == 0) { 1050 crit_exit(); 1051 return EINPROGRESS; 1052 } else 1053 break; 1054 } 1055 } 1056 crit_exit(); 1057 1058 /* Release mapping into kernel space. */ 1059 vunmapbuf(bp); 1060 iocb->bp = 0; 1061 1062 error = 0; 1063 1064 /* Check for an error. */ 1065 if (bp->b_flags & B_ERROR) 1066 error = bp->b_error; 1067 1068 relpbuf(bp, NULL); 1069 return (error); 1070 } 1071 #endif /* VFS_AIO */ 1072 1073 /* 1074 * Wake up aio requests that may be serviceable now. 1075 */ 1076 void 1077 aio_swake(struct socket *so, struct sockbuf *sb) 1078 { 1079 #ifndef VFS_AIO 1080 return; 1081 #else 1082 struct aiocblist *cb,*cbn; 1083 struct proc *p; 1084 struct kaioinfo *ki = NULL; 1085 int opcode, wakecount = 0; 1086 struct aioproclist *aiop; 1087 1088 if (sb == &so->so_snd) { 1089 opcode = LIO_WRITE; 1090 so->so_snd.sb_flags &= ~SB_AIO; 1091 } else { 1092 opcode = LIO_READ; 1093 so->so_rcv.sb_flags &= ~SB_AIO; 1094 } 1095 1096 for (cb = TAILQ_FIRST(&so->so_aiojobq); cb; cb = cbn) { 1097 cbn = TAILQ_NEXT(cb, list); 1098 if (opcode == cb->uaiocb.aio_lio_opcode) { 1099 p = cb->userproc; 1100 ki = p->p_aioinfo; 1101 TAILQ_REMOVE(&so->so_aiojobq, cb, list); 1102 TAILQ_REMOVE(&ki->kaio_sockqueue, cb, plist); 1103 TAILQ_INSERT_TAIL(&aio_jobs, cb, list); 1104 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, cb, plist); 1105 wakecount++; 1106 if (cb->jobstate != JOBST_JOBQGLOBAL) 1107 panic("invalid queue value"); 1108 } 1109 } 1110 1111 while (wakecount--) { 1112 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != 0) { 1113 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1114 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 1115 aiop->aioprocflags &= ~AIOP_FREE; 1116 wakeup(aiop->aioproc); 1117 } 1118 } 1119 #endif /* VFS_AIO */ 1120 } 1121 1122 #ifdef VFS_AIO 1123 /* 1124 * Queue a new AIO request. Choosing either the threaded or direct physio VCHR 1125 * technique is done in this code. 1126 */ 1127 static int 1128 _aio_aqueue(struct aiocb *job, struct aio_liojob *lj, int type) 1129 { 1130 struct proc *p = curproc; 1131 struct filedesc *fdp; 1132 struct file *fp; 1133 unsigned int fd; 1134 struct socket *so; 1135 int error; 1136 int opcode, user_opcode; 1137 struct aiocblist *aiocbe; 1138 struct aioproclist *aiop; 1139 struct kaioinfo *ki; 1140 struct kevent kev; 1141 struct kqueue *kq; 1142 struct file *kq_fp; 1143 1144 if ((aiocbe = TAILQ_FIRST(&aio_freejobs)) != NULL) 1145 TAILQ_REMOVE(&aio_freejobs, aiocbe, list); 1146 else 1147 aiocbe = zalloc (aiocb_zone); 1148 1149 aiocbe->inputcharge = 0; 1150 aiocbe->outputcharge = 0; 1151 callout_init(&aiocbe->timeout); 1152 SLIST_INIT(&aiocbe->klist); 1153 1154 suword(&job->_aiocb_private.status, -1); 1155 suword(&job->_aiocb_private.error, 0); 1156 suword(&job->_aiocb_private.kernelinfo, -1); 1157 1158 error = copyin(job, &aiocbe->uaiocb, sizeof(aiocbe->uaiocb)); 1159 if (error) { 1160 suword(&job->_aiocb_private.error, error); 1161 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1162 return error; 1163 } 1164 if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL && 1165 !_SIG_VALID(aiocbe->uaiocb.aio_sigevent.sigev_signo)) { 1166 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1167 return EINVAL; 1168 } 1169 1170 /* Save userspace address of the job info. */ 1171 aiocbe->uuaiocb = job; 1172 1173 /* Get the opcode. */ 1174 user_opcode = aiocbe->uaiocb.aio_lio_opcode; 1175 if (type != LIO_NOP) 1176 aiocbe->uaiocb.aio_lio_opcode = type; 1177 opcode = aiocbe->uaiocb.aio_lio_opcode; 1178 1179 /* Get the fd info for process. */ 1180 fdp = p->p_fd; 1181 1182 /* 1183 * Range check file descriptor. 1184 */ 1185 fd = aiocbe->uaiocb.aio_fildes; 1186 if (fd >= fdp->fd_nfiles) { 1187 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1188 if (type == 0) 1189 suword(&job->_aiocb_private.error, EBADF); 1190 return EBADF; 1191 } 1192 1193 fp = aiocbe->fd_file = fdp->fd_ofiles[fd]; 1194 if ((fp == NULL) || ((opcode == LIO_WRITE) && ((fp->f_flag & FWRITE) == 1195 0))) { 1196 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1197 if (type == 0) 1198 suword(&job->_aiocb_private.error, EBADF); 1199 return EBADF; 1200 } 1201 fhold(fp); 1202 1203 if (aiocbe->uaiocb.aio_offset == -1LL) { 1204 error = EINVAL; 1205 goto aqueue_fail; 1206 } 1207 error = suword(&job->_aiocb_private.kernelinfo, jobrefid); 1208 if (error) { 1209 error = EINVAL; 1210 goto aqueue_fail; 1211 } 1212 aiocbe->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jobrefid; 1213 if (jobrefid == LONG_MAX) 1214 jobrefid = 1; 1215 else 1216 jobrefid++; 1217 1218 if (opcode == LIO_NOP) { 1219 fdrop(fp, p->p_thread); 1220 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1221 if (type == 0) { 1222 suword(&job->_aiocb_private.error, 0); 1223 suword(&job->_aiocb_private.status, 0); 1224 suword(&job->_aiocb_private.kernelinfo, 0); 1225 } 1226 return 0; 1227 } 1228 if ((opcode != LIO_READ) && (opcode != LIO_WRITE)) { 1229 if (type == 0) 1230 suword(&job->_aiocb_private.status, 0); 1231 error = EINVAL; 1232 goto aqueue_fail; 1233 } 1234 1235 if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_KEVENT) { 1236 kev.ident = aiocbe->uaiocb.aio_sigevent.sigev_notify_kqueue; 1237 kev.udata = aiocbe->uaiocb.aio_sigevent.sigev_value.sigval_ptr; 1238 } 1239 else { 1240 /* 1241 * This method for requesting kevent-based notification won't 1242 * work on the alpha, since we're passing in a pointer 1243 * via aio_lio_opcode, which is an int. Use the SIGEV_KEVENT- 1244 * based method instead. 1245 */ 1246 if (user_opcode == LIO_NOP || user_opcode == LIO_READ || 1247 user_opcode == LIO_WRITE) 1248 goto no_kqueue; 1249 1250 error = copyin((struct kevent *)(uintptr_t)user_opcode, 1251 &kev, sizeof(kev)); 1252 if (error) 1253 goto aqueue_fail; 1254 } 1255 if ((u_int)kev.ident >= fdp->fd_nfiles || 1256 (kq_fp = fdp->fd_ofiles[kev.ident]) == NULL || 1257 (kq_fp->f_type != DTYPE_KQUEUE)) { 1258 error = EBADF; 1259 goto aqueue_fail; 1260 } 1261 kq = (struct kqueue *)kq_fp->f_data; 1262 kev.ident = (uintptr_t)aiocbe->uuaiocb; 1263 kev.filter = EVFILT_AIO; 1264 kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1; 1265 kev.data = (intptr_t)aiocbe; 1266 error = kqueue_register(kq, &kev, p->p_thread); 1267 aqueue_fail: 1268 if (error) { 1269 fdrop(fp, p->p_thread); 1270 TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list); 1271 if (type == 0) 1272 suword(&job->_aiocb_private.error, error); 1273 goto done; 1274 } 1275 no_kqueue: 1276 1277 suword(&job->_aiocb_private.error, EINPROGRESS); 1278 aiocbe->uaiocb._aiocb_private.error = EINPROGRESS; 1279 aiocbe->userproc = p; 1280 aiocbe->jobflags = 0; 1281 aiocbe->lio = lj; 1282 ki = p->p_aioinfo; 1283 1284 if (fp->f_type == DTYPE_SOCKET) { 1285 /* 1286 * Alternate queueing for socket ops: Reach down into the 1287 * descriptor to get the socket data. Then check to see if the 1288 * socket is ready to be read or written (based on the requested 1289 * operation). 1290 * 1291 * If it is not ready for io, then queue the aiocbe on the 1292 * socket, and set the flags so we get a call when sbnotify() 1293 * happens. 1294 */ 1295 so = (struct socket *)fp->f_data; 1296 crit_enter(); 1297 if (((opcode == LIO_READ) && (!soreadable(so))) || ((opcode == 1298 LIO_WRITE) && (!sowriteable(so)))) { 1299 TAILQ_INSERT_TAIL(&so->so_aiojobq, aiocbe, list); 1300 TAILQ_INSERT_TAIL(&ki->kaio_sockqueue, aiocbe, plist); 1301 if (opcode == LIO_READ) 1302 so->so_rcv.sb_flags |= SB_AIO; 1303 else 1304 so->so_snd.sb_flags |= SB_AIO; 1305 aiocbe->jobstate = JOBST_JOBQGLOBAL; /* XXX */ 1306 ki->kaio_queue_count++; 1307 num_queue_count++; 1308 crit_exit(); 1309 error = 0; 1310 goto done; 1311 } 1312 crit_exit(); 1313 } 1314 1315 if ((error = aio_qphysio(p, aiocbe)) == 0) 1316 goto done; 1317 if (error > 0) { 1318 suword(&job->_aiocb_private.status, 0); 1319 aiocbe->uaiocb._aiocb_private.error = error; 1320 suword(&job->_aiocb_private.error, error); 1321 goto done; 1322 } 1323 1324 /* No buffer for daemon I/O. */ 1325 aiocbe->bp = NULL; 1326 1327 ki->kaio_queue_count++; 1328 if (lj) 1329 lj->lioj_queue_count++; 1330 crit_enter(); 1331 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist); 1332 TAILQ_INSERT_TAIL(&aio_jobs, aiocbe, list); 1333 crit_exit(); 1334 aiocbe->jobstate = JOBST_JOBQGLOBAL; 1335 1336 num_queue_count++; 1337 error = 0; 1338 1339 /* 1340 * If we don't have a free AIO process, and we are below our quota, then 1341 * start one. Otherwise, depend on the subsequent I/O completions to 1342 * pick-up this job. If we don't successfully create the new process 1343 * (thread) due to resource issues, we return an error for now (EAGAIN), 1344 * which is likely not the correct thing to do. 1345 */ 1346 crit_enter(); 1347 retryproc: 1348 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) { 1349 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1350 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list); 1351 aiop->aioprocflags &= ~AIOP_FREE; 1352 wakeup(aiop->aioproc); 1353 } else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) && 1354 ((ki->kaio_active_count + num_aio_resv_start) < 1355 ki->kaio_maxactive_count)) { 1356 num_aio_resv_start++; 1357 if ((error = aio_newproc()) == 0) { 1358 num_aio_resv_start--; 1359 goto retryproc; 1360 } 1361 num_aio_resv_start--; 1362 } 1363 crit_exit(); 1364 done: 1365 return error; 1366 } 1367 1368 /* 1369 * This routine queues an AIO request, checking for quotas. 1370 */ 1371 static int 1372 aio_aqueue(struct aiocb *job, int type) 1373 { 1374 struct proc *p = curproc; 1375 struct kaioinfo *ki; 1376 1377 if (p->p_aioinfo == NULL) 1378 aio_init_aioinfo(p); 1379 1380 if (num_queue_count >= max_queue_count) 1381 return EAGAIN; 1382 1383 ki = p->p_aioinfo; 1384 if (ki->kaio_queue_count >= ki->kaio_qallowed_count) 1385 return EAGAIN; 1386 1387 return _aio_aqueue(job, NULL, type); 1388 } 1389 #endif /* VFS_AIO */ 1390 1391 /* 1392 * Support the aio_return system call, as a side-effect, kernel resources are 1393 * released. 1394 */ 1395 int 1396 aio_return(struct aio_return_args *uap) 1397 { 1398 #ifndef VFS_AIO 1399 return ENOSYS; 1400 #else 1401 struct proc *p = curproc; 1402 long jobref; 1403 struct aiocblist *cb, *ncb; 1404 struct aiocb *ujob; 1405 struct kaioinfo *ki; 1406 1407 ki = p->p_aioinfo; 1408 if (ki == NULL) 1409 return EINVAL; 1410 1411 ujob = uap->aiocbp; 1412 1413 jobref = fuword(&ujob->_aiocb_private.kernelinfo); 1414 if (jobref == -1 || jobref == 0) 1415 return EINVAL; 1416 1417 TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) { 1418 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) == 1419 jobref) { 1420 if (ujob == cb->uuaiocb) { 1421 uap->sysmsg_result = 1422 cb->uaiocb._aiocb_private.status; 1423 } else 1424 uap->sysmsg_result = EFAULT; 1425 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) { 1426 p->p_stats->p_ru.ru_oublock += 1427 cb->outputcharge; 1428 cb->outputcharge = 0; 1429 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) { 1430 p->p_stats->p_ru.ru_inblock += cb->inputcharge; 1431 cb->inputcharge = 0; 1432 } 1433 aio_free_entry(cb); 1434 return 0; 1435 } 1436 } 1437 crit_enter(); 1438 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = ncb) { 1439 ncb = TAILQ_NEXT(cb, plist); 1440 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) 1441 == jobref) { 1442 crit_exit(); 1443 if (ujob == cb->uuaiocb) { 1444 uap->sysmsg_result = 1445 cb->uaiocb._aiocb_private.status; 1446 } else 1447 uap->sysmsg_result = EFAULT; 1448 aio_free_entry(cb); 1449 return 0; 1450 } 1451 } 1452 crit_exit(); 1453 1454 return (EINVAL); 1455 #endif /* VFS_AIO */ 1456 } 1457 1458 /* 1459 * Allow a process to wakeup when any of the I/O requests are completed. 1460 */ 1461 int 1462 aio_suspend(struct aio_suspend_args *uap) 1463 { 1464 #ifndef VFS_AIO 1465 return ENOSYS; 1466 #else 1467 struct proc *p = curproc; 1468 struct timeval atv; 1469 struct timespec ts; 1470 struct aiocb *const *cbptr, *cbp; 1471 struct kaioinfo *ki; 1472 struct aiocblist *cb; 1473 int i; 1474 int njoblist; 1475 int error, s, timo; 1476 long *ijoblist; 1477 struct aiocb **ujoblist; 1478 1479 if (uap->nent > AIO_LISTIO_MAX) 1480 return EINVAL; 1481 1482 timo = 0; 1483 if (uap->timeout) { 1484 /* Get timespec struct. */ 1485 if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0) 1486 return error; 1487 1488 if (ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000) 1489 return (EINVAL); 1490 1491 TIMESPEC_TO_TIMEVAL(&atv, &ts); 1492 if (itimerfix(&atv)) 1493 return (EINVAL); 1494 timo = tvtohz_high(&atv); 1495 } 1496 1497 ki = p->p_aioinfo; 1498 if (ki == NULL) 1499 return EAGAIN; 1500 1501 njoblist = 0; 1502 ijoblist = zalloc(aiol_zone); 1503 ujoblist = zalloc(aiol_zone); 1504 cbptr = uap->aiocbp; 1505 1506 for (i = 0; i < uap->nent; i++) { 1507 cbp = (struct aiocb *)(intptr_t)fuword(&cbptr[i]); 1508 if (cbp == 0) 1509 continue; 1510 ujoblist[njoblist] = cbp; 1511 ijoblist[njoblist] = fuword(&cbp->_aiocb_private.kernelinfo); 1512 njoblist++; 1513 } 1514 1515 if (njoblist == 0) { 1516 zfree(aiol_zone, ijoblist); 1517 zfree(aiol_zone, ujoblist); 1518 return 0; 1519 } 1520 1521 error = 0; 1522 for (;;) { 1523 TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) { 1524 for (i = 0; i < njoblist; i++) { 1525 if (((intptr_t) 1526 cb->uaiocb._aiocb_private.kernelinfo) == 1527 ijoblist[i]) { 1528 if (ujoblist[i] != cb->uuaiocb) 1529 error = EINVAL; 1530 zfree(aiol_zone, ijoblist); 1531 zfree(aiol_zone, ujoblist); 1532 return error; 1533 } 1534 } 1535 } 1536 1537 crit_enter(); 1538 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = 1539 TAILQ_NEXT(cb, plist)) { 1540 for (i = 0; i < njoblist; i++) { 1541 if (((intptr_t) 1542 cb->uaiocb._aiocb_private.kernelinfo) == 1543 ijoblist[i]) { 1544 crit_exit(); 1545 if (ujoblist[i] != cb->uuaiocb) 1546 error = EINVAL; 1547 zfree(aiol_zone, ijoblist); 1548 zfree(aiol_zone, ujoblist); 1549 return error; 1550 } 1551 } 1552 } 1553 1554 ki->kaio_flags |= KAIO_WAKEUP; 1555 error = tsleep(p, PCATCH, "aiospn", timo); 1556 crit_exit(); 1557 1558 if (error == ERESTART || error == EINTR) { 1559 zfree(aiol_zone, ijoblist); 1560 zfree(aiol_zone, ujoblist); 1561 return EINTR; 1562 } else if (error == EWOULDBLOCK) { 1563 zfree(aiol_zone, ijoblist); 1564 zfree(aiol_zone, ujoblist); 1565 return EAGAIN; 1566 } 1567 } 1568 1569 /* NOTREACHED */ 1570 return EINVAL; 1571 #endif /* VFS_AIO */ 1572 } 1573 1574 /* 1575 * aio_cancel cancels any non-physio aio operations not currently in 1576 * progress. 1577 */ 1578 int 1579 aio_cancel(struct aio_cancel_args *uap) 1580 { 1581 #ifndef VFS_AIO 1582 return ENOSYS; 1583 #else 1584 struct proc *p = curproc; 1585 struct kaioinfo *ki; 1586 struct aiocblist *cbe, *cbn; 1587 struct file *fp; 1588 struct filedesc *fdp; 1589 struct socket *so; 1590 struct proc *po; 1591 int error; 1592 int cancelled=0; 1593 int notcancelled=0; 1594 struct vnode *vp; 1595 1596 fdp = p->p_fd; 1597 if ((u_int)uap->fd >= fdp->fd_nfiles || 1598 (fp = fdp->fd_ofiles[uap->fd]) == NULL) 1599 return (EBADF); 1600 1601 if (fp->f_type == DTYPE_VNODE) { 1602 vp = (struct vnode *)fp->f_data; 1603 1604 if (vn_isdisk(vp,&error)) { 1605 uap->sysmsg_result = AIO_NOTCANCELED; 1606 return 0; 1607 } 1608 } else if (fp->f_type == DTYPE_SOCKET) { 1609 so = (struct socket *)fp->f_data; 1610 1611 crit_enter(); 1612 1613 for (cbe = TAILQ_FIRST(&so->so_aiojobq); cbe; cbe = cbn) { 1614 cbn = TAILQ_NEXT(cbe, list); 1615 if ((uap->aiocbp == NULL) || 1616 (uap->aiocbp == cbe->uuaiocb) ) { 1617 po = cbe->userproc; 1618 ki = po->p_aioinfo; 1619 TAILQ_REMOVE(&so->so_aiojobq, cbe, list); 1620 TAILQ_REMOVE(&ki->kaio_sockqueue, cbe, plist); 1621 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, cbe, plist); 1622 if (ki->kaio_flags & KAIO_WAKEUP) { 1623 wakeup(po); 1624 } 1625 cbe->jobstate = JOBST_JOBFINISHED; 1626 cbe->uaiocb._aiocb_private.status=-1; 1627 cbe->uaiocb._aiocb_private.error=ECANCELED; 1628 cancelled++; 1629 /* XXX cancelled, knote? */ 1630 if (cbe->uaiocb.aio_sigevent.sigev_notify == 1631 SIGEV_SIGNAL) 1632 psignal(cbe->userproc, cbe->uaiocb.aio_sigevent.sigev_signo); 1633 if (uap->aiocbp) 1634 break; 1635 } 1636 } 1637 crit_exit(); 1638 1639 if ((cancelled) && (uap->aiocbp)) { 1640 uap->sysmsg_result = AIO_CANCELED; 1641 return 0; 1642 } 1643 } 1644 ki=p->p_aioinfo; 1645 if (ki == NULL) 1646 goto done; 1647 crit_enter(); 1648 1649 for (cbe = TAILQ_FIRST(&ki->kaio_jobqueue); cbe; cbe = cbn) { 1650 cbn = TAILQ_NEXT(cbe, plist); 1651 1652 if ((uap->fd == cbe->uaiocb.aio_fildes) && 1653 ((uap->aiocbp == NULL ) || 1654 (uap->aiocbp == cbe->uuaiocb))) { 1655 1656 if (cbe->jobstate == JOBST_JOBQGLOBAL) { 1657 TAILQ_REMOVE(&aio_jobs, cbe, list); 1658 TAILQ_REMOVE(&ki->kaio_jobqueue, cbe, plist); 1659 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, cbe, 1660 plist); 1661 cancelled++; 1662 ki->kaio_queue_finished_count++; 1663 cbe->jobstate = JOBST_JOBFINISHED; 1664 cbe->uaiocb._aiocb_private.status = -1; 1665 cbe->uaiocb._aiocb_private.error = ECANCELED; 1666 /* XXX cancelled, knote? */ 1667 if (cbe->uaiocb.aio_sigevent.sigev_notify == 1668 SIGEV_SIGNAL) 1669 psignal(cbe->userproc, cbe->uaiocb.aio_sigevent.sigev_signo); 1670 } else { 1671 notcancelled++; 1672 } 1673 } 1674 } 1675 crit_exit(); 1676 done: 1677 if (notcancelled) { 1678 uap->sysmsg_result = AIO_NOTCANCELED; 1679 return 0; 1680 } 1681 if (cancelled) { 1682 uap->sysmsg_result = AIO_CANCELED; 1683 return 0; 1684 } 1685 uap->sysmsg_result = AIO_ALLDONE; 1686 1687 return 0; 1688 #endif /* VFS_AIO */ 1689 } 1690 1691 /* 1692 * aio_error is implemented in the kernel level for compatibility purposes only. 1693 * For a user mode async implementation, it would be best to do it in a userland 1694 * subroutine. 1695 */ 1696 int 1697 aio_error(struct aio_error_args *uap) 1698 { 1699 #ifndef VFS_AIO 1700 return ENOSYS; 1701 #else 1702 struct proc *p = curproc; 1703 struct aiocblist *cb; 1704 struct kaioinfo *ki; 1705 long jobref; 1706 1707 ki = p->p_aioinfo; 1708 if (ki == NULL) 1709 return EINVAL; 1710 1711 jobref = fuword(&uap->aiocbp->_aiocb_private.kernelinfo); 1712 if ((jobref == -1) || (jobref == 0)) 1713 return EINVAL; 1714 1715 TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) { 1716 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1717 jobref) { 1718 uap->sysmsg_result = cb->uaiocb._aiocb_private.error; 1719 return 0; 1720 } 1721 } 1722 1723 crit_enter(); 1724 1725 for (cb = TAILQ_FIRST(&ki->kaio_jobqueue); cb; cb = TAILQ_NEXT(cb, 1726 plist)) { 1727 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1728 jobref) { 1729 uap->sysmsg_result = EINPROGRESS; 1730 crit_exit(); 1731 return 0; 1732 } 1733 } 1734 1735 for (cb = TAILQ_FIRST(&ki->kaio_sockqueue); cb; cb = TAILQ_NEXT(cb, 1736 plist)) { 1737 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1738 jobref) { 1739 uap->sysmsg_result = EINPROGRESS; 1740 crit_exit(); 1741 return 0; 1742 } 1743 } 1744 crit_exit(); 1745 1746 crit_enter(); 1747 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = TAILQ_NEXT(cb, 1748 plist)) { 1749 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1750 jobref) { 1751 uap->sysmsg_result = cb->uaiocb._aiocb_private.error; 1752 crit_exit(); 1753 return 0; 1754 } 1755 } 1756 1757 for (cb = TAILQ_FIRST(&ki->kaio_bufqueue); cb; cb = TAILQ_NEXT(cb, 1758 plist)) { 1759 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) == 1760 jobref) { 1761 uap->sysmsg_result = EINPROGRESS; 1762 crit_exit(); 1763 return 0; 1764 } 1765 } 1766 crit_exit(); 1767 1768 #if (0) 1769 /* 1770 * Hack for lio. 1771 */ 1772 status = fuword(&uap->aiocbp->_aiocb_private.status); 1773 if (status == -1) 1774 return fuword(&uap->aiocbp->_aiocb_private.error); 1775 #endif 1776 return EINVAL; 1777 #endif /* VFS_AIO */ 1778 } 1779 1780 /* syscall - asynchronous read from a file (REALTIME) */ 1781 int 1782 aio_read(struct aio_read_args *uap) 1783 { 1784 #ifndef VFS_AIO 1785 return ENOSYS; 1786 #else 1787 return aio_aqueue(uap->aiocbp, LIO_READ); 1788 #endif /* VFS_AIO */ 1789 } 1790 1791 /* syscall - asynchronous write to a file (REALTIME) */ 1792 int 1793 aio_write(struct aio_write_args *uap) 1794 { 1795 #ifndef VFS_AIO 1796 return ENOSYS; 1797 #else 1798 return aio_aqueue(uap->aiocbp, LIO_WRITE); 1799 #endif /* VFS_AIO */ 1800 } 1801 1802 /* syscall - XXX undocumented */ 1803 int 1804 lio_listio(struct lio_listio_args *uap) 1805 { 1806 #ifndef VFS_AIO 1807 return ENOSYS; 1808 #else 1809 struct proc *p = curproc; 1810 int nent, nentqueued; 1811 struct aiocb *iocb, * const *cbptr; 1812 struct aiocblist *cb; 1813 struct kaioinfo *ki; 1814 struct aio_liojob *lj; 1815 int error, runningcode; 1816 int nerror; 1817 int i; 1818 1819 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) 1820 return EINVAL; 1821 1822 nent = uap->nent; 1823 if (nent > AIO_LISTIO_MAX) 1824 return EINVAL; 1825 1826 if (p->p_aioinfo == NULL) 1827 aio_init_aioinfo(p); 1828 1829 if ((nent + num_queue_count) > max_queue_count) 1830 return EAGAIN; 1831 1832 ki = p->p_aioinfo; 1833 if ((nent + ki->kaio_queue_count) > ki->kaio_qallowed_count) 1834 return EAGAIN; 1835 1836 lj = zalloc(aiolio_zone); 1837 if (!lj) 1838 return EAGAIN; 1839 1840 lj->lioj_flags = 0; 1841 lj->lioj_buffer_count = 0; 1842 lj->lioj_buffer_finished_count = 0; 1843 lj->lioj_queue_count = 0; 1844 lj->lioj_queue_finished_count = 0; 1845 lj->lioj_ki = ki; 1846 1847 /* 1848 * Setup signal. 1849 */ 1850 if (uap->sig && (uap->mode == LIO_NOWAIT)) { 1851 error = copyin(uap->sig, &lj->lioj_signal, 1852 sizeof(lj->lioj_signal)); 1853 if (error) { 1854 zfree(aiolio_zone, lj); 1855 return error; 1856 } 1857 if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) { 1858 zfree(aiolio_zone, lj); 1859 return EINVAL; 1860 } 1861 lj->lioj_flags |= LIOJ_SIGNAL; 1862 lj->lioj_flags &= ~LIOJ_SIGNAL_POSTED; 1863 } else 1864 lj->lioj_flags &= ~LIOJ_SIGNAL; 1865 1866 TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list); 1867 /* 1868 * Get pointers to the list of I/O requests. 1869 */ 1870 nerror = 0; 1871 nentqueued = 0; 1872 cbptr = uap->acb_list; 1873 for (i = 0; i < uap->nent; i++) { 1874 iocb = (struct aiocb *)(intptr_t)fuword(&cbptr[i]); 1875 if (((intptr_t)iocb != -1) && ((intptr_t)iocb != 0)) { 1876 error = _aio_aqueue(iocb, lj, 0); 1877 if (error == 0) 1878 nentqueued++; 1879 else 1880 nerror++; 1881 } 1882 } 1883 1884 /* 1885 * If we haven't queued any, then just return error. 1886 */ 1887 if (nentqueued == 0) 1888 return 0; 1889 1890 /* 1891 * Calculate the appropriate error return. 1892 */ 1893 runningcode = 0; 1894 if (nerror) 1895 runningcode = EIO; 1896 1897 if (uap->mode == LIO_WAIT) { 1898 int command, found, jobref; 1899 1900 for (;;) { 1901 found = 0; 1902 for (i = 0; i < uap->nent; i++) { 1903 /* 1904 * Fetch address of the control buf pointer in 1905 * user space. 1906 */ 1907 iocb = (struct aiocb *) 1908 (intptr_t)fuword(&cbptr[i]); 1909 if (((intptr_t)iocb == -1) || ((intptr_t)iocb 1910 == 0)) 1911 continue; 1912 1913 /* 1914 * Fetch the associated command from user space. 1915 */ 1916 command = fuword(&iocb->aio_lio_opcode); 1917 if (command == LIO_NOP) { 1918 found++; 1919 continue; 1920 } 1921 1922 jobref = fuword(&iocb->_aiocb_private.kernelinfo); 1923 1924 TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) { 1925 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) 1926 == jobref) { 1927 if (cb->uaiocb.aio_lio_opcode 1928 == LIO_WRITE) { 1929 p->p_stats->p_ru.ru_oublock 1930 += 1931 cb->outputcharge; 1932 cb->outputcharge = 0; 1933 } else if (cb->uaiocb.aio_lio_opcode 1934 == LIO_READ) { 1935 p->p_stats->p_ru.ru_inblock 1936 += cb->inputcharge; 1937 cb->inputcharge = 0; 1938 } 1939 found++; 1940 break; 1941 } 1942 } 1943 1944 crit_enter(); 1945 TAILQ_FOREACH(cb, &ki->kaio_bufdone, plist) { 1946 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) 1947 == jobref) { 1948 found++; 1949 break; 1950 } 1951 } 1952 crit_exit(); 1953 } 1954 1955 /* 1956 * If all I/Os have been disposed of, then we can 1957 * return. 1958 */ 1959 if (found == nentqueued) 1960 return runningcode; 1961 1962 ki->kaio_flags |= KAIO_WAKEUP; 1963 error = tsleep(p, PCATCH, "aiospn", 0); 1964 1965 if (error == EINTR) 1966 return EINTR; 1967 else if (error == EWOULDBLOCK) 1968 return EAGAIN; 1969 } 1970 } 1971 1972 return runningcode; 1973 #endif /* VFS_AIO */ 1974 } 1975 1976 #ifdef VFS_AIO 1977 /* 1978 * This is a weird hack so that we can post a signal. It is safe to do so from 1979 * a timeout routine, but *not* from an interrupt routine. 1980 */ 1981 static void 1982 process_signal(void *aioj) 1983 { 1984 struct aiocblist *aiocbe = aioj; 1985 struct aio_liojob *lj = aiocbe->lio; 1986 struct aiocb *cb = &aiocbe->uaiocb; 1987 1988 if ((lj) && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL) && 1989 (lj->lioj_queue_count == lj->lioj_queue_finished_count)) { 1990 psignal(lj->lioj_ki->kaio_p, lj->lioj_signal.sigev_signo); 1991 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 1992 } 1993 1994 if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) 1995 psignal(aiocbe->userproc, cb->aio_sigevent.sigev_signo); 1996 } 1997 1998 /* 1999 * Interrupt handler for physio, performs the necessary process wakeups, and 2000 * signals. 2001 */ 2002 static void 2003 aio_physwakeup(struct buf *bp) 2004 { 2005 struct aiocblist *aiocbe; 2006 struct proc *p; 2007 struct kaioinfo *ki; 2008 struct aio_liojob *lj; 2009 2010 wakeup(bp); 2011 2012 aiocbe = (struct aiocblist *)bp->b_spc; 2013 if (aiocbe) { 2014 p = bp->b_caller1; 2015 2016 aiocbe->jobstate = JOBST_JOBBFINISHED; 2017 aiocbe->uaiocb._aiocb_private.status -= bp->b_resid; 2018 aiocbe->uaiocb._aiocb_private.error = 0; 2019 aiocbe->jobflags |= AIOCBLIST_DONE; 2020 2021 if (bp->b_flags & B_ERROR) 2022 aiocbe->uaiocb._aiocb_private.error = bp->b_error; 2023 2024 lj = aiocbe->lio; 2025 if (lj) { 2026 lj->lioj_buffer_finished_count++; 2027 2028 /* 2029 * wakeup/signal if all of the interrupt jobs are done. 2030 */ 2031 if (lj->lioj_buffer_finished_count == 2032 lj->lioj_buffer_count) { 2033 /* 2034 * Post a signal if it is called for. 2035 */ 2036 if ((lj->lioj_flags & 2037 (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == 2038 LIOJ_SIGNAL) { 2039 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 2040 callout_reset(&aiocbe->timeout, 0, 2041 process_signal, aiocbe); 2042 } 2043 } 2044 } 2045 2046 ki = p->p_aioinfo; 2047 if (ki) { 2048 ki->kaio_buffer_finished_count++; 2049 TAILQ_REMOVE(&aio_bufjobs, aiocbe, list); 2050 TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist); 2051 TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist); 2052 2053 KNOTE(&aiocbe->klist, 0); 2054 /* Do the wakeup. */ 2055 if (ki->kaio_flags & (KAIO_RUNDOWN|KAIO_WAKEUP)) { 2056 ki->kaio_flags &= ~KAIO_WAKEUP; 2057 wakeup(p); 2058 } 2059 } 2060 2061 if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL) { 2062 callout_reset(&aiocbe->timeout, 0, 2063 process_signal, aiocbe); 2064 } 2065 } 2066 } 2067 #endif /* VFS_AIO */ 2068 2069 /* syscall - wait for the next completion of an aio request */ 2070 int 2071 aio_waitcomplete(struct aio_waitcomplete_args *uap) 2072 { 2073 #ifndef VFS_AIO 2074 return ENOSYS; 2075 #else 2076 struct proc *p = curproc; 2077 struct timeval atv; 2078 struct timespec ts; 2079 struct kaioinfo *ki; 2080 struct aiocblist *cb = NULL; 2081 int error, s, timo; 2082 2083 suword(uap->aiocbp, (int)NULL); 2084 2085 timo = 0; 2086 if (uap->timeout) { 2087 /* Get timespec struct. */ 2088 error = copyin(uap->timeout, &ts, sizeof(ts)); 2089 if (error) 2090 return error; 2091 2092 if ((ts.tv_nsec < 0) || (ts.tv_nsec >= 1000000000)) 2093 return (EINVAL); 2094 2095 TIMESPEC_TO_TIMEVAL(&atv, &ts); 2096 if (itimerfix(&atv)) 2097 return (EINVAL); 2098 timo = tvtohz_high(&atv); 2099 } 2100 2101 ki = p->p_aioinfo; 2102 if (ki == NULL) 2103 return EAGAIN; 2104 2105 for (;;) { 2106 if ((cb = TAILQ_FIRST(&ki->kaio_jobdone)) != 0) { 2107 suword(uap->aiocbp, (uintptr_t)cb->uuaiocb); 2108 uap->sysmsg_result = cb->uaiocb._aiocb_private.status; 2109 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) { 2110 p->p_stats->p_ru.ru_oublock += 2111 cb->outputcharge; 2112 cb->outputcharge = 0; 2113 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) { 2114 p->p_stats->p_ru.ru_inblock += cb->inputcharge; 2115 cb->inputcharge = 0; 2116 } 2117 aio_free_entry(cb); 2118 return cb->uaiocb._aiocb_private.error; 2119 } 2120 2121 crit_enter(); 2122 if ((cb = TAILQ_FIRST(&ki->kaio_bufdone)) != 0 ) { 2123 crit_exit(); 2124 suword(uap->aiocbp, (uintptr_t)cb->uuaiocb); 2125 uap->sysmsg_result = cb->uaiocb._aiocb_private.status; 2126 aio_free_entry(cb); 2127 return cb->uaiocb._aiocb_private.error; 2128 } 2129 2130 ki->kaio_flags |= KAIO_WAKEUP; 2131 error = tsleep(p, PCATCH, "aiowc", timo); 2132 crit_exit(); 2133 2134 if (error == ERESTART) 2135 return EINTR; 2136 else if (error < 0) 2137 return error; 2138 else if (error == EINTR) 2139 return EINTR; 2140 else if (error == EWOULDBLOCK) 2141 return EAGAIN; 2142 } 2143 #endif /* VFS_AIO */ 2144 } 2145 2146 #ifndef VFS_AIO 2147 static int 2148 filt_aioattach(struct knote *kn) 2149 { 2150 2151 return (ENXIO); 2152 } 2153 2154 struct filterops aio_filtops = 2155 { 0, filt_aioattach, NULL, NULL }; 2156 2157 #else 2158 /* kqueue attach function */ 2159 static int 2160 filt_aioattach(struct knote *kn) 2161 { 2162 struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata; 2163 2164 /* 2165 * The aiocbe pointer must be validated before using it, so 2166 * registration is restricted to the kernel; the user cannot 2167 * set EV_FLAG1. 2168 */ 2169 if ((kn->kn_flags & EV_FLAG1) == 0) 2170 return (EPERM); 2171 kn->kn_flags &= ~EV_FLAG1; 2172 2173 SLIST_INSERT_HEAD(&aiocbe->klist, kn, kn_selnext); 2174 2175 return (0); 2176 } 2177 2178 /* kqueue detach function */ 2179 static void 2180 filt_aiodetach(struct knote *kn) 2181 { 2182 struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata; 2183 2184 SLIST_REMOVE(&aiocbe->klist, kn, knote, kn_selnext); 2185 } 2186 2187 /* kqueue filter function */ 2188 /*ARGSUSED*/ 2189 static int 2190 filt_aio(struct knote *kn, long hint) 2191 { 2192 struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata; 2193 2194 kn->kn_data = aiocbe->uaiocb._aiocb_private.error; 2195 if (aiocbe->jobstate != JOBST_JOBFINISHED && 2196 aiocbe->jobstate != JOBST_JOBBFINISHED) 2197 return (0); 2198 kn->kn_flags |= EV_EOF; 2199 return (1); 2200 } 2201 2202 struct filterops aio_filtops = 2203 { 0, filt_aioattach, filt_aiodetach, filt_aio }; 2204 #endif /* VFS_AIO */ 2205