Lines Matching defs:reqlist
279 * Linked list links used to aggregate requests into a reqlist
327 struct xbb_xen_reqlist *reqlist;
488 struct xbb_xen_reqlist *reqlist, int operation,
828 * \param reqlist The request structure whose kva region will be accessed.
836 xbb_reqlist_vaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
838 return (reqlist->kva + (PAGE_SIZE * pagenr) + (sector << 9));
846 * \param reqlist The request structure whose I/O region will be accessed.
858 xbb_reqlist_ioaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
860 return (xbb_reqlist_vaddr(reqlist, pagenr, sector));
868 * \param reqlist The request list structure whose pseudo-physical region
881 xbb_get_gntaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
885 xbb = reqlist->xbb;
888 (uintptr_t)(reqlist->kva - xbb->kva) +
1009 xbb_unmap_reqlist(struct xbb_xen_reqlist *reqlist)
1017 for (i = 0; i < reqlist->nr_segments; i++) {
1018 if (reqlist->gnt_handles[i] == GRANT_REF_INVALID)
1021 unmap[invcount].host_addr = xbb_get_gntaddr(reqlist, i, 0);
1023 unmap[invcount].handle = reqlist->gnt_handles[i];
1024 reqlist->gnt_handles[i] = GRANT_REF_INVALID;
1044 struct xbb_xen_reqlist *reqlist;
1046 reqlist = NULL;
1050 if ((reqlist = STAILQ_FIRST(&xbb->reqlist_free_stailq)) != NULL) {
1052 reqlist->flags = XBB_REQLIST_NONE;
1053 reqlist->kva = NULL;
1054 reqlist->status = BLKIF_RSP_OKAY;
1055 reqlist->residual_512b_sectors = 0;
1056 reqlist->num_children = 0;
1057 reqlist->nr_segments = 0;
1058 STAILQ_INIT(&reqlist->contig_req_list);
1061 return (reqlist);
1069 * \param wakeup If set, wakeup the work thread if freeing this reqlist
1073 xbb_release_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
1084 if (reqlist->kva != NULL)
1085 xbb_free_kva(xbb, reqlist->kva, reqlist->nr_segments);
1087 xbb_release_reqs(xbb, &reqlist->contig_req_list, reqlist->num_children);
1089 STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links);
1109 * \param reqlist Pointer to reqlist pointer.
1116 xbb_get_resources(struct xbb_softc *xbb, struct xbb_xen_reqlist **reqlist,
1137 * Allocate a reqlist if the caller doesn't have one already.
1139 if (*reqlist == NULL) {
1152 if (*reqlist == NULL) {
1153 *reqlist = nreqlist;
1162 nreq->reqlist = *reqlist;
1176 STAILQ_INSERT_TAIL(&(*reqlist)->contig_req_list, nreq, links);
1177 (*reqlist)->num_children++;
1178 (*reqlist)->nr_segments += ring_req->nr_segments;
1311 * \param reqlist Allocated internal request list structure.
1314 xbb_complete_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
1322 if (reqlist->flags & XBB_REQLIST_MAPPED)
1323 xbb_unmap_reqlist(reqlist);
1331 * reqlist right now. However, in order to make sure that no one
1336 STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) {
1340 xbb_queue_response(xbb, nreq, reqlist->status);
1343 if (reqlist->status == BLKIF_RSP_OKAY)
1352 reqlist->ds_tag_type,
1353 reqlist->ds_trans_type,
1363 sectors_sent -= reqlist->residual_512b_sectors;
1369 reqlist->ds_tag_type,
1370 reqlist->ds_trans_type,
1372 /*then*/&reqlist->ds_t0);
1374 xbb_release_reqlist(xbb, reqlist, /*wakeup*/ 1);
1398 struct xbb_xen_reqlist *reqlist;
1400 reqlist = bio->bio_caller1;
1401 xbb = reqlist->xbb;
1403 reqlist->residual_512b_sectors += bio->bio_resid >> 9;
1424 reqlist->status = BLKIF_RSP_ERROR;
1441 if (atomic_fetchadd_int(&reqlist->pendcnt, -1) == 1)
1442 xbb_complete_reqlist(xbb, reqlist);
1452 * \param reqlist Allocated internal request list structure.
1462 xbb_dispatch_io(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
1478 reqlist->ds_tag_type = DEVSTAT_TAG_SIMPLE;
1488 reqlist->kva = NULL;
1489 if (reqlist->nr_segments != 0) {
1490 reqlist->kva = xbb_get_kva(xbb, reqlist->nr_segments);
1491 if (reqlist->kva == NULL) {
1499 binuptime(&reqlist->ds_t0);
1500 devstat_start_transaction(xbb->xbb_stats, &reqlist->ds_t0);
1502 switch (reqlist->operation) {
1505 reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED;
1509 reqlist->ds_trans_type = DEVSTAT_WRITE;
1513 reqlist->status = BLKIF_RSP_ERROR;
1519 reqlist->ds_trans_type = DEVSTAT_READ;
1545 reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED;
1546 reqlist->ds_trans_type = DEVSTAT_NO_DATA;
1551 reqlist->operation);
1552 reqlist->status = BLKIF_RSP_ERROR;
1556 reqlist->xbb = xbb;
1561 STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) {
1576 reqlist->status = BLKIF_RSP_ERROR;
1599 reqlist->status = BLKIF_RSP_ERROR;
1604 map->host_addr = xbb_get_gntaddr(reqlist,
1636 reqlist->status = BLKIF_RSP_ERROR;
1642 xbb->maps, reqlist->nr_segments);
1646 reqlist->flags |= XBB_REQLIST_MAPPED;
1648 for (seg_idx = 0, map = xbb->maps; seg_idx < reqlist->nr_segments;
1657 reqlist->status = BLKIF_RSP_ERROR;
1661 reqlist->gnt_handles[seg_idx] = map->handle;
1663 if (reqlist->starting_sector_number + total_sects >
1668 reqlist->starting_sector_number,
1669 reqlist->starting_sector_number + total_sects,
1671 reqlist->status = BLKIF_RSP_ERROR;
1678 reqlist,
1683 reqlist->status = BLKIF_RSP_ERROR;
1691 xbb_complete_reqlist(xbb, reqlist);
1733 struct xbb_xen_reqlist *reqlist;
1753 * Initialize reqlist to the last element in the pending
1757 reqlist = STAILQ_LAST(&xbb->reqlist_pending_stailq,
1759 if (reqlist != NULL) {
1760 cur_sector = reqlist->next_contig_sector;
1761 cur_operation = reqlist->operation;
1832 if ((reqlist != NULL)
1837 || ((ring_req->nr_segments + reqlist->nr_segments) >
1839 reqlist = NULL;
1848 retval = xbb_get_resources(xbb, &reqlist, ring_req,
1877 reqlist->next_contig_sector = cur_sector;
1882 reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq);
1883 if (reqlist == NULL) {
1897 retval = xbb_dispatch_io(xbb, reqlist);
1907 reqlist, links);
1921 reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq);
1923 if (reqlist != NULL)
1963 * \param reqlist Allocated internal request list structure.
1971 xbb_dispatch_dev(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
1986 bio_offset = (off_t)reqlist->starting_sector_number
2006 bio->bio_caller1 = reqlist;
2009 reqlist->pendcnt = 1;
2021 nseg = reqlist->nr_segments;
2063 bio->bio_data = xbb_reqlist_ioaddr(reqlist, seg_idx,
2066 bio->bio_caller1 = reqlist;
2091 reqlist->pendcnt = nbio;
2128 * \param reqlist Allocated internal request list.
2136 xbb_dispatch_file(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
2179 xuio.uio_offset = (vm_offset_t)reqlist->starting_sector_number
2185 nseg = reqlist->nr_segments;
2198 xiovec->iov_base = xbb_reqlist_ioaddr(reqlist,
2298 reqlist->status = BLKIF_RSP_ERROR;
2300 xbb_complete_reqlist(xbb, reqlist);
2667 struct xbb_xen_reqlist *reqlist;
2671 for (i = 0, reqlist = xbb->request_lists;
2672 i < xbb->max_requests; i++, reqlist++){
2673 if (reqlist->gnt_handles != NULL) {
2674 free(reqlist->gnt_handles, M_XENBLOCKBACK);
2675 reqlist->gnt_handles = NULL;
3027 struct xbb_xen_reqlist *reqlist;
3047 reqlist = &xbb->request_lists[i];
3049 reqlist->xbb = xbb;
3051 reqlist->gnt_handles = malloc(xbb->max_reqlist_segments *
3052 sizeof(*reqlist->gnt_handles),
3054 if (reqlist->gnt_handles == NULL) {
3062 reqlist->gnt_handles[seg] = GRANT_REF_INVALID;
3064 STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links);
3151 * We limit the maximum number of reqlist segments to the maximum