Lines Matching defs:job

269 	TAILQ_HEAD(,kaiocb) kaio_jobqueue;	/* (a) job queue for process */
300 static TAILQ_HEAD(,kaiocb) aio_jobs; /* (c) Async job list */
306 static int aio_free_entry(struct kaiocb *job);
307 static void aio_process_rw(struct kaiocb *job);
308 static void aio_process_sync(struct kaiocb *job);
309 static void aio_process_mlock(struct kaiocb *job);
314 static int aio_queue_file(struct file *fp, struct kaiocb *job);
319 static int aio_qbio(struct proc *p, struct kaiocb *job);
321 static void aio_bio_done_notify(struct proc *userp, struct kaiocb *job);
322 static bool aio_clear_cancel_function_locked(struct kaiocb *job);
478 * Free a job entry. Wait for completion if it is currently active, but don't
483 aio_free_entry(struct kaiocb *job)
489 p = job->userproc;
495 MPASS(job->jobflags & KAIOCB_FINISHED);
502 TAILQ_REMOVE(&ki->kaio_done, job, plist);
503 TAILQ_REMOVE(&ki->kaio_all, job, allist);
505 lj = job->lio;
521 /* job is going away, we need to destroy any knotes */
522 knlist_delete(&job->klist, curthread, 1);
524 sigqueue_take(&job->ksi);
533 * need a thread pointer from the process owning the job that is
538 * a kaiocb from the current process' job list either via a
549 if (job->fd_file)
550 fdrop(job->fd_file, curthread);
551 crfree(job->cred);
552 if (job->uiop != &job->uio)
553 freeuio(job->uiop);
554 uma_zfree(aiocb_zone, job);
568 aio_cancel_job(struct proc *p, struct kaioinfo *ki, struct kaiocb *job)
574 if (job->jobflags & (KAIOCB_CANCELLED | KAIOCB_FINISHED))
576 MPASS((job->jobflags & KAIOCB_CANCELLING) == 0);
577 job->jobflags |= KAIOCB_CANCELLED;
579 func = job->cancel_fn;
582 * If there is no cancel routine, just leave the job marked as
583 * cancelled. The job should be in active use by a caller who
592 * completions of this job. This prevents the job from being
597 job->jobflags |= KAIOCB_CANCELLING;
599 func(job);
601 job->jobflags &= ~KAIOCB_CANCELLING;
602 if (job->jobflags & KAIOCB_FINISHED) {
603 cancelled = job->uaiocb._aiocb_private.error == ECANCELED;
604 TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist);
605 aio_bio_done_notify(p, job);
626 struct kaiocb *job, *jobn;
643 TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) {
644 aio_cancel_job(p, ki, job);
655 while ((job = TAILQ_FIRST(&ki->kaio_done)) != NULL)
656 aio_free_entry(job);
667 panic("LIO job not cleaned up: C:%d, FC:%d\n",
680 * Select a job to run (called by an AIO daemon).
685 struct kaiocb *job;
691 TAILQ_FOREACH(job, &aio_jobs, list) {
692 userp = job->userproc;
696 TAILQ_REMOVE(&aio_jobs, job, list);
697 if (!aio_clear_cancel_function(job))
705 return (job);
746 aio_process_rw(struct kaiocb *job)
758 KASSERT(job->uaiocb.aio_lio_opcode == LIO_READ ||
759 job->uaiocb.aio_lio_opcode == LIO_READV ||
760 job->uaiocb.aio_lio_opcode == LIO_WRITE ||
761 job->uaiocb.aio_lio_opcode == LIO_WRITEV,
762 ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
764 aio_switch_vmspace(job);
767 td->td_ucred = job->cred;
768 job->uiop->uio_td = td;
769 fp = job->fd_file;
771 opcode = job->uaiocb.aio_lio_opcode;
772 cnt = job->uiop->uio_resid;
784 if (job->uiop->uio_resid == 0)
787 error = fo_read(fp, job->uiop, fp->f_cred,
788 (job->ioflags & KAIOCB_IO_FOFFSET) != 0 ? 0 :
793 error = fo_write(fp, job->uiop, fp->f_cred, (job->ioflags &
801 job->msgrcv = msgrcv_end - msgrcv_st;
802 job->msgsnd = msgsnd_end - msgsnd_st;
803 job->inblock = inblock_end - inblock_st;
804 job->outblock = oublock_end - oublock_st;
806 if (error != 0 && job->uiop->uio_resid != cnt) {
810 PROC_LOCK(job->userproc);
811 kern_psignal(job->userproc, SIGPIPE);
812 PROC_UNLOCK(job->userproc);
816 cnt -= job->uiop->uio_resid;
819 aio_complete(job, -1, error);
821 aio_complete(job, cnt, 0);
825 aio_process_sync(struct kaiocb *job)
829 struct file *fp = job->fd_file;
832 KASSERT(job->uaiocb.aio_lio_opcode & LIO_SYNC,
833 ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
835 td->td_ucred = job->cred;
838 job->uaiocb.aio_lio_opcode);
842 aio_complete(job, -1, error);
844 aio_complete(job, 0, 0);
848 aio_process_mlock(struct kaiocb *job)
850 struct aiocb *cb = &job->uaiocb;
853 KASSERT(job->uaiocb.aio_lio_opcode == LIO_MLOCK,
854 ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
856 aio_switch_vmspace(job);
857 error = kern_mlock(job->userproc, job->cred,
859 aio_complete(job, error != 0 ? -1 : 0, error);
863 aio_bio_done_notify(struct proc *userp, struct kaiocb *job)
873 lj = job->lio;
880 TAILQ_INSERT_TAIL(&ki->kaio_done, job, plist);
881 MPASS(job->jobflags & KAIOCB_FINISHED);
886 if (job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
887 job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID)
888 aio_sendsig(userp, &job->uaiocb.aio_sigevent, &job->ksi, true);
890 KNOTE_LOCKED(&job->klist, 1);
908 if (job->jobflags & KAIOCB_CHECKSYNC) {
911 if (job->fd_file != sjob->fd_file ||
912 job->seqno >= sjob->seqno)
936 struct kaiocb *job;
941 job = TAILQ_FIRST(&ki->kaio_syncready);
942 TAILQ_REMOVE(&ki->kaio_syncready, job, list);
944 aio_schedule(job, aio_process_sync);
951 aio_cancel_cleared(struct kaiocb *job)
960 return ((job->jobflags & KAIOCB_CLEARED) != 0);
964 aio_clear_cancel_function_locked(struct kaiocb *job)
967 AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED);
968 MPASS(job->cancel_fn != NULL);
969 if (job->jobflags & KAIOCB_CANCELLING) {
970 job->jobflags |= KAIOCB_CLEARED;
973 job->cancel_fn = NULL;
978 aio_clear_cancel_function(struct kaiocb *job)
983 ki = job->userproc->p_aioinfo;
985 ret = aio_clear_cancel_function_locked(job);
991 aio_set_cancel_function_locked(struct kaiocb *job, aio_cancel_fn_t *func)
994 AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED);
995 if (job->jobflags & KAIOCB_CANCELLED)
997 job->cancel_fn = func;
1002 aio_set_cancel_function(struct kaiocb *job, aio_cancel_fn_t *func)
1007 ki = job->userproc->p_aioinfo;
1009 ret = aio_set_cancel_function_locked(job, func);
1015 aio_complete(struct kaiocb *job, long status, int error)
1020 job->uaiocb._aiocb_private.error = error;
1021 job->uaiocb._aiocb_private.status = status;
1023 userp = job->userproc;
1027 KASSERT(!(job->jobflags & KAIOCB_FINISHED),
1029 job->jobflags |= KAIOCB_FINISHED;
1030 if ((job->jobflags & (KAIOCB_QUEUEING | KAIOCB_CANCELLING)) == 0) {
1031 TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist);
1032 aio_bio_done_notify(userp, job);
1038 aio_cancel(struct kaiocb *job)
1041 aio_complete(job, -1, ECANCELED);
1045 aio_switch_vmspace(struct kaiocb *job)
1048 vmspace_switch_aio(job->userproc->p_vmspace);
1058 struct kaiocb *job;
1103 while ((job = aio_selectjob(aiop)) != NULL) {
1106 ki = job->userproc->p_aioinfo;
1107 job->handle_fn(job);
1110 /* Decrement the active job count. */
1123 * no job can be selected.
1198 aio_qbio(struct proc *p, struct kaiocb *job)
1213 cb = &job->uaiocb;
1214 fp = job->fd_file;
1230 iovcnt = job->uiop->uio_iovcnt;
1234 if (job->uiop->uio_iov[i].iov_len % vp->v_bufobj.bo_bsize != 0)
1236 if (job->uiop->uio_iov[i].iov_len > maxphys) {
1252 if (job->uiop->uio_resid > dev->si_iosize_max) {
1258 job->error = 0;
1273 refcount_init(&job->nbio, iovcnt);
1281 buf = job->uiop->uio_iov[i].iov_base;
1282 nbytes = job->uiop->uio_iov[i].iov_len;
1304 bp->bio_caller1 = job;
1487 struct kaiocb *job;
1509 job = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO);
1510 knlist_init_mtx(&job->klist, AIO_MTX(ki));
1512 error = ops->aio_copyin(ujob, job, type);
1516 if (job->uaiocb.aio_nbytes > IOSIZE_MAX) {
1521 if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT &&
1522 job->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL &&
1523 job->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID &&
1524 job->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) {
1529 if ((job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
1530 job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) &&
1531 !_SIG_VALID(job->uaiocb.aio_sigevent.sigev_signo)) {
1538 switch (job->uaiocb.aio_lio_opcode & ~LIO_FOFFSET) {
1544 opcode = job->uaiocb.aio_lio_opcode & ~LIO_FOFFSET;
1545 if ((job->uaiocb.aio_lio_opcode & LIO_FOFFSET) != 0)
1546 job->ioflags |= KAIOCB_IO_FOFFSET;
1553 opcode = job->uaiocb.aio_lio_opcode = type;
1555 ksiginfo_init(&job->ksi);
1557 /* Save userspace address of the job info. */
1558 job->ujob = ujob;
1568 fd = job->uaiocb.aio_fildes;
1600 job->uaiocb.aio_offset < 0 &&
1611 job->fd_file = fp;
1614 job->seqno = jobseqno++;
1618 MPASS(job->uiop == &job->uio || job->uiop == NULL);
1619 uma_zfree(aiocb_zone, job);
1623 if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT)
1625 evflags = job->uaiocb.aio_sigevent.sigev_notify_kevent_flags;
1630 kqfd = job->uaiocb.aio_sigevent.sigev_notify_kqueue;
1632 kev.ident = (uintptr_t)job->ujob;
1635 kev.data = (intptr_t)job;
1636 kev.udata = job->uaiocb.aio_sigevent.sigev_value.sival_ptr;
1644 job->uaiocb._aiocb_private.error = EINPROGRESS;
1645 job->userproc = p;
1646 job->cred = crhold(td->td_ucred);
1647 job->jobflags = KAIOCB_QUEUEING;
1648 job->lio = lj;
1652 MPASS(job->uiop != &job->uio && job->uiop != NULL);
1655 job->iov[0].iov_base = (void *)(uintptr_t)job->uaiocb.aio_buf;
1656 job->iov[0].iov_len = job->uaiocb.aio_nbytes;
1657 job->uio.uio_iov = job->iov;
1658 job->uio.uio_iovcnt = 1;
1659 job->uio.uio_resid = job->uaiocb.aio_nbytes;
1660 job->uio.uio_segflg = UIO_USERSPACE;
1661 job->uiop = &job->uio;
1665 job->uiop->uio_rw = UIO_READ;
1668 job->uiop->uio_rw = UIO_WRITE;
1671 job->uiop->uio_offset = job->uaiocb.aio_offset;
1672 job->uiop->uio_td = td;
1675 aio_schedule(job, aio_process_mlock);
1678 error = aio_queue_file(fp, job);
1680 error = fo_aio_queue(fp, job);
1685 job->jobflags &= ~KAIOCB_QUEUEING;
1686 TAILQ_INSERT_TAIL(&ki->kaio_all, job, allist);
1691 if (job->jobflags & KAIOCB_FINISHED) {
1697 aio_bio_done_notify(p, job);
1699 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, job, plist);
1704 crfree(job->cred);
1708 knlist_delete(&job->klist, curthread, 0);
1710 if (job->uiop != &job->uio)
1711 freeuio(job->uiop);
1712 uma_zfree(aiocb_zone, job);
1719 aio_cancel_daemon_job(struct kaiocb *job)
1723 if (!aio_cancel_cleared(job))
1724 TAILQ_REMOVE(&aio_jobs, job, list);
1726 aio_cancel(job);
1730 aio_schedule(struct kaiocb *job, aio_handle_fn_t *func)
1734 if (!aio_set_cancel_function(job, aio_cancel_daemon_job)) {
1736 aio_cancel(job);
1739 job->handle_fn = func;
1740 TAILQ_INSERT_TAIL(&aio_jobs, job, list);
1741 aio_kick_nowait(job->userproc);
1746 aio_cancel_sync(struct kaiocb *job)
1750 ki = job->userproc->p_aioinfo;
1752 if (!aio_cancel_cleared(job))
1753 TAILQ_REMOVE(&ki->kaio_syncqueue, job, list);
1755 aio_cancel(job);
1759 aio_queue_file(struct file *fp, struct kaiocb *job)
1768 ki = job->userproc->p_aioinfo;
1769 error = aio_qbio(job->userproc, job);
1787 if (job->uaiocb.aio_lio_opcode & (LIO_WRITE | LIO_READ)) {
1788 aio_schedule(job, aio_process_rw);
1790 } else if (job->uaiocb.aio_lio_opcode & LIO_SYNC) {
1793 if (job2->fd_file == job->fd_file &&
1795 job2->seqno < job->seqno) {
1797 job->pending++;
1800 if (job->pending != 0) {
1801 if (!aio_set_cancel_function_locked(job,
1804 aio_cancel(job);
1807 TAILQ_INSERT_TAIL(&ki->kaio_syncqueue, job, list);
1812 aio_schedule(job, aio_process_sync);
1887 struct kaiocb *job;
1895 TAILQ_FOREACH(job, &ki->kaio_done, plist) {
1896 if (job->ujob == ujob)
1899 if (job != NULL) {
1900 MPASS(job->jobflags & KAIOCB_FINISHED);
1901 status = job->uaiocb._aiocb_private.status;
1902 error = job->uaiocb._aiocb_private.error;
1904 td->td_ru.ru_oublock += job->outblock;
1905 td->td_ru.ru_inblock += job->inblock;
1906 td->td_ru.ru_msgsnd += job->msgsnd;
1907 td->td_ru.ru_msgrcv += job->msgrcv;
1908 aio_free_entry(job);
1936 struct kaiocb *firstjob, *job;
1961 TAILQ_FOREACH(job, &ki->kaio_all, allist) {
1963 if (job->ujob == ujoblist[i]) {
1965 firstjob = job;
1966 if (job->jobflags & KAIOCB_FINISHED)
2022 struct kaiocb *job, *jobn;
2048 TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) {
2049 if ((uap->fd == job->uaiocb.aio_fildes) &&
2051 (uap->aiocbp == job->ujob))) {
2052 if (aio_cancel_job(p, ki, job)) {
2097 struct kaiocb *job;
2108 TAILQ_FOREACH(job, &ki->kaio_all, allist) {
2109 if (job->ujob == ujob) {
2110 if (job->jobflags & KAIOCB_FINISHED)
2112 job->uaiocb._aiocb_private.error;
2204 struct aiocb *job;
2284 job = acb_list[i];
2285 if (job != NULL) {
2286 error = aio_aqueue(td, job, lj, LIO_NOP, ops);
2416 struct kaiocb *job = (struct kaiocb *)bp->bio_caller1;
2427 ki = job->userproc->p_aioinfo;
2443 struct kaiocb *job = (struct kaiocb *)bp->bio_caller1;
2451 opcode = job->uaiocb.aio_lio_opcode;
2456 atomic_add_acq_long(&job->nbytes, nbytes);
2460 * If multiple bios experienced an error, the job will reflect the
2464 atomic_store_int(&job->error, bio_error);
2466 atomic_add_int(&job->outblock, nblks);
2468 atomic_add_int(&job->inblock, nblks);
2470 if (refcount_release(&job->nbio)) {
2471 bio_error = atomic_load_int(&job->error);
2473 aio_complete(job, -1, bio_error);
2475 aio_complete(job, atomic_load_long(&job->nbytes), 0);
2487 struct kaiocb *job;
2513 job = NULL;
2515 while ((job = TAILQ_FIRST(&ki->kaio_done)) == NULL) {
2529 if (job != NULL) {
2530 MPASS(job->jobflags & KAIOCB_FINISHED);
2531 ujob = job->ujob;
2532 status = job->uaiocb._aiocb_private.status;
2533 error = job->uaiocb._aiocb_private.error;
2535 td->td_ru.ru_oublock += job->outblock;
2536 td->td_ru.ru_inblock += job->inblock;
2537 td->td_ru.ru_msgsnd += job->msgsnd;
2538 td->td_ru.ru_msgrcv += job->msgrcv;
2539 aio_free_entry(job);
2599 struct kaiocb *job;
2601 job = (struct kaiocb *)(uintptr_t)kn->kn_sdata;
2604 * The job pointer must be validated before using it, so
2610 kn->kn_ptr.p_aio = job;
2613 knlist_add(&job->klist, kn, 0);
2636 struct kaiocb *job = kn->kn_ptr.p_aio;
2638 kn->kn_data = job->uaiocb._aiocb_private.error;
2639 if (!(job->jobflags & KAIOCB_FINISHED))