Lines Matching defs:task
71 #define LD_SDMMC_MAXTASKCNT 8 /* number of tasks in task pool */
76 struct sdmmc_task task;
130 struct ld_sdmmc_task *task;
134 if (sc->sc_dying || (task = TAILQ_FIRST(&sc->sc_freeq)) == NULL)
136 TAILQ_REMOVE(&sc->sc_freeq, task, task_entry);
137 TAILQ_INSERT_TAIL(&sc->sc_xferq, task, task_entry);
138 KASSERT(task->task_bp == NULL);
139 KASSERT(task->task_errorp == NULL);
141 return task;
145 ld_sdmmc_task_put(struct ld_sdmmc_softc *sc, struct ld_sdmmc_task *task)
150 TAILQ_REMOVE(&sc->sc_xferq, task, task_entry);
151 TAILQ_INSERT_TAIL(&sc->sc_freeq, task, task_entry);
152 task->task_bp = NULL;
153 task->task_errorp = NULL;
157 ld_sdmmc_task_cancel(struct ld_sdmmc_softc *sc, struct ld_sdmmc_task *task)
166 * Either the callout or the task may be pending, but not both.
169 if (callout_pending(&task->task_restart_ch) ||
170 callout_invoking(&task->task_restart_ch)) {
178 callout_halt(&task->task_restart_ch, &sc->sc_lock);
183 * task is either pending or running. If the task is
187 callout_halt(&task->task_restart_ch, &sc->sc_lock);
188 if (!sdmmc_del_task(sc->sc_sf->sc, &task->task, &sc->sc_lock))
189 return; /* task already started, let it clean up */
197 bp = task->task_bp;
198 errorp = task->task_errorp;
199 ld_sdmmc_task_put(sc, task);
202 * If the task was for an asynchronous I/O xfer, fail the I/O
221 * If the task was for a synchronous operation (cachesync),
248 struct ld_sdmmc_task *task;
283 task = &sc->sc_task[i];
284 task->task_sc = sc;
285 callout_init(&task->task_restart_ch, CALLOUT_MPSAFE);
286 TAILQ_INSERT_TAIL(&sc->sc_freeq, task, task_entry);
357 struct ld_sdmmc_task *task;
375 while ((task = TAILQ_FIRST(&sc->sc_xferq)) != NULL)
376 ld_sdmmc_task_cancel(sc, task);
404 struct ld_sdmmc_task *task;
408 if ((task = ld_sdmmc_task_get(sc)) == NULL) {
413 task->task_bp = bp;
414 task->task_retries = 0;
415 sdmmc_init_task(&task->task, ld_sdmmc_dobio, task);
417 sdmmc_add_task(sc->sc_sf->sc, &task->task);
429 struct ld_sdmmc_task *task = (struct ld_sdmmc_task *)arg;
430 struct ld_sdmmc_softc *sc = task->task_sc;
431 struct buf *bp = task->task_bp;
436 callout_ack(&task->task_restart_ch);
438 sdmmc_add_task(sc->sc_sf->sc, &task->task);
445 struct ld_sdmmc_task *task = (struct ld_sdmmc_task *)arg;
446 struct ld_sdmmc_softc *sc = task->task_sc;
447 struct buf *bp = task->task_bp;
476 if (task->task_retries < LD_SDMMC_IORETRIES) {
483 task->task_retries++;
490 callout_reset(&task->task_restart_ch,
491 RECOVERYTIME, ld_sdmmc_restart, task);
503 /* Dissociate the task from the I/O xfer and release it. */
506 ld_sdmmc_task_put(sc, task);
524 struct ld_sdmmc_task *task = arg;
525 struct ld_sdmmc_softc *sc = task->task_sc;
526 struct buf *bp = task->task_bp;
537 /* Count error or success and release the task. */
543 ld_sdmmc_task_put(sc, task);
556 struct ld_sdmmc_task *task;
561 /* Acquire a free task, or drop the request altogether. */
562 if ((task = ld_sdmmc_task_get(sc)) == NULL) {
568 /* Set up the task and schedule it. */
569 task->task_bp = bp;
570 sdmmc_init_task(&task->task, ld_sdmmc_dodiscard, task);
572 sdmmc_add_task(sc->sc_sf->sc, &task->task);
584 struct ld_sdmmc_task *task = arg;
585 struct ld_sdmmc_softc *sc = task->task_sc;
589 error = sdmmc_mem_flush_cache(sc->sc_sf, task->task_poll);
594 *task->task_errorp = error;
597 /* Release the task. */
598 ld_sdmmc_task_put(sc, task);
608 struct ld_sdmmc_task *task;
612 * If we come here through the sdmmc discovery task, we can't
613 * wait for a new task because the new task can't even begin
614 * until the sdmmc discovery task has completed.
627 /* Acquire a free task, or fail with EBUSY. */
628 if ((task = ld_sdmmc_task_get(sc)) == NULL) {
634 /* Set up the task and schedule it. */
635 task->task_poll = poll;
636 task->task_errorp = &error;
637 sdmmc_init_task(&task->task, ld_sdmmc_docachesync, task);
639 sdmmc_add_task(sc->sc_sf->sc, &task->task);
642 * Wait for the task to complete. If the device is yanked,