Lines Matching defs:xbb
115 printf("xbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args)
154 static void xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt,
156 static int xbb_shutdown(struct xbb_softc *xbb);
172 struct xbb_softc *xbb;
266 * request list free pool (xbb->reqlist_free_stailq) and pending
267 * requests waiting for execution (xbb->reqlist_pending_stailq).
470 * Only a single file based request is outstanding per-xbb instance,
487 typedef int (*xbb_dispatch_t)(struct xbb_softc *xbb,
669 * (e.g. xbb->media_size >> xbb->sector_size_shift).
764 * \param xbb Per-instance xbb configuration structure.
770 xbb_get_req(struct xbb_softc *xbb)
776 mtx_assert(&xbb->lock, MA_OWNED);
778 if ((req = STAILQ_FIRST(&xbb->request_free_stailq)) != NULL) {
779 STAILQ_REMOVE_HEAD(&xbb->request_free_stailq, links);
780 xbb->active_request_count++;
789 * \param xbb Per-instance xbb configuration structure.
793 xbb_release_req(struct xbb_softc *xbb, struct xbb_xen_req *req)
795 mtx_assert(&xbb->lock, MA_OWNED);
797 STAILQ_INSERT_HEAD(&xbb->request_free_stailq, req, links);
798 xbb->active_request_count--;
800 KASSERT(xbb->active_request_count >= 0,
807 * \param xbb Per-instance xbb configuration structure.
812 xbb_release_reqs(struct xbb_softc *xbb, struct xbb_xen_req_list *req_list,
815 mtx_assert(&xbb->lock, MA_OWNED);
817 STAILQ_CONCAT(&xbb->request_free_stailq, req_list);
818 xbb->active_request_count -= nreqs;
820 KASSERT(xbb->active_request_count >= 0,
883 struct xbb_softc *xbb;
885 xbb = reqlist->xbb;
887 return ((uintptr_t)(xbb->gnt_base_addr +
888 (uintptr_t)(reqlist->kva - xbb->kva) +
895 * \param xbb Per-instance xbb configuration structure.
898 * \param have_lock If set, xbb lock is already held.
908 xbb_get_kva(struct xbb_softc *xbb, int nr_pages)
920 mtx_lock(&xbb->lock);
925 bit_ffc(xbb->kva_free, xbb->reqlist_kva_pages, &first_clear);
934 for (i = first_clear, num_clear = 0; i < xbb->reqlist_kva_pages; i++) {
941 if (bit_test(xbb->kva_free, i)) {
955 bit_nset(xbb->kva_free, first_clear,
958 free_kva = xbb->kva +
961 KASSERT(free_kva >= (uint8_t *)xbb->kva &&
963 (uint8_t *)xbb->ring_config.va,
966 nr_pages * PAGE_SIZE, (uintmax_t)xbb->kva,
967 (uintmax_t)xbb->ring_config.va));
975 xbb->flags |= XBBF_RESOURCE_SHORTAGE;
976 xbb->kva_shortages++;
979 mtx_unlock(&xbb->lock);
987 * \param xbb Per-instance xbb configuration structure.
992 xbb_free_kva(struct xbb_softc *xbb, uint8_t *kva_ptr, int nr_pages)
996 mtx_assert(&xbb->lock, MA_OWNED);
998 start_page = (intptr_t)(kva_ptr - xbb->kva) >> PAGE_SHIFT;
999 bit_nclear(xbb->kva_free, start_page, start_page + nr_pages - 1);
1036 * \param xbb Per-instance xbb configuration structure.
1042 xbb_get_reqlist(struct xbb_softc *xbb)
1048 mtx_assert(&xbb->lock, MA_OWNED);
1050 if ((reqlist = STAILQ_FIRST(&xbb->reqlist_free_stailq)) != NULL) {
1051 STAILQ_REMOVE_HEAD(&xbb->reqlist_free_stailq, links);
1067 * \param xbb Per-instance xbb configuration structure.
1073 xbb_release_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
1077 mtx_assert(&xbb->lock, MA_OWNED);
1080 wakeup = xbb->flags & XBBF_RESOURCE_SHORTAGE;
1081 xbb->flags &= ~XBBF_RESOURCE_SHORTAGE;
1085 xbb_free_kva(xbb, reqlist->kva, reqlist->nr_segments);
1087 xbb_release_reqs(xbb, &reqlist->contig_req_list, reqlist->num_children);
1089 STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links);
1091 if ((xbb->flags & XBBF_SHUTDOWN) != 0) {
1098 xbb_shutdown(xbb);
1102 taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task);
1108 * \param xbb Per-instance xbb configuration structure.
1116 xbb_get_resources(struct xbb_softc *xbb, struct xbb_xen_reqlist **reqlist,
1125 mtx_lock(&xbb->lock);
1131 if ((xbb->flags & XBBF_SHUTDOWN) != 0) {
1132 mtx_unlock(&xbb->lock);
1140 nreqlist = xbb_get_reqlist(xbb);
1146 nreq = xbb_get_req(xbb);
1150 mtx_unlock(&xbb->lock);
1157 xbb->sector_size_shift;
1158 STAILQ_INSERT_TAIL(&xbb->reqlist_pending_stailq, nreqlist,
1167 if (xbb->abi != BLKIF_PROTOCOL_NATIVE) {
1175 devstat_start_transaction(xbb->xbb_stats_in, &nreq->ds_t0);
1189 xbb->flags |= XBBF_RESOURCE_SHORTAGE;
1190 xbb->request_shortages++;
1193 xbb_release_req(xbb, nreq);
1196 xbb_release_reqlist(xbb, nreqlist, /*wakeup*/ 0);
1198 mtx_unlock(&xbb->lock);
1206 * \param xbb Per-instance xbb configuration structure.
1212 xbb_queue_response(struct xbb_softc *xbb, struct xbb_xen_req *req, int status)
1227 mtx_assert(&xbb->lock, MA_OWNED);
1234 switch (xbb->abi) {
1236 resp = RING_GET_RESPONSE(&xbb->rings.native,
1237 xbb->rings.native.rsp_prod_pvt);
1241 RING_GET_RESPONSE(&xbb->rings.x86_32,
1242 xbb->rings.x86_32.rsp_prod_pvt);
1246 RING_GET_RESPONSE(&xbb->rings.x86_64,
1247 xbb->rings.x86_64.rsp_prod_pvt);
1258 xbb->reqs_completed_with_error++;
1260 xbb->rings.common.rsp_prod_pvt++;
1262 xbb->reqs_queued_for_completion++;
1269 * \param xbb Per-instance xbb configuration structure.
1277 xbb_push_responses(struct xbb_softc *xbb, int *run_taskqueue, int *notify)
1284 mtx_assert(&xbb->lock, MA_OWNED);
1288 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xbb->rings.common, *notify);
1290 if (xbb->rings.common.rsp_prod_pvt == xbb->rings.common.req_cons) {
1296 RING_FINAL_CHECK_FOR_REQUESTS(&xbb->rings.common, more_to_do);
1297 } else if (RING_HAS_UNCONSUMED_REQUESTS(&xbb->rings.common)) {
1301 xbb->reqs_completed += xbb->reqs_queued_for_completion;
1302 xbb->reqs_queued_for_completion = 0;
1310 * \param xbb Per-instance xbb configuration structure.
1314 xbb_complete_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
1325 mtx_lock(&xbb->lock);
1340 xbb_queue_response(xbb, nreq, reqlist->status);
1350 devstat_end_transaction(xbb->xbb_stats_in,
1367 devstat_end_transaction(xbb->xbb_stats,
1374 xbb_release_reqlist(xbb, reqlist, /*wakeup*/ 1);
1376 xbb_push_responses(xbb, &run_taskqueue, ¬ify);
1378 mtx_unlock(&xbb->lock);
1381 taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task);
1384 xen_intr_signal(xbb->xen_intr_handle);
1397 struct xbb_softc *xbb;
1401 xbb = reqlist->xbb;
1423 bio->bio_error, xbb->dev_name);
1427 && xenbus_get_state(xbb->dev) == XenbusStateConnected) {
1433 xenbus_set_state(xbb->dev, XenbusStateClosing);
1442 xbb_complete_reqlist(xbb, reqlist);
1451 * \param xbb Per-instance xbb configuration structure.
1462 xbb_dispatch_io(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
1490 reqlist->kva = xbb_get_kva(xbb, reqlist->nr_segments);
1500 devstat_start_transaction(xbb->xbb_stats, &reqlist->ds_t0);
1510 if ((xbb->flags & XBBF_READ_ONLY) != 0) {
1512 xbb->dev_name);
1527 if (xbb->disable_flush != 0) {
1537 if (xbb->flush_interval != 0) {
1538 if (++(xbb->flush_count) < xbb->flush_interval) {
1541 xbb->flush_count = 0;
1556 reqlist->xbb = xbb;
1557 xbb_sg = xbb->xbb_sgs;
1558 map = xbb->maps;
1573 || __predict_false(nseg > xbb->max_request_segments)) {
1607 xbb->ring_config.gnt_addr,
1611 (uintmax_t)xbb->ring_config.gnt_addr));
1615 map->dom = xbb->otherend_id;
1626 nr_sects = (nr_sects << 9) >> xbb->sector_size_shift;
1630 ((xbb->sector_size >> 9) - 1)) != 0) {
1631 device_printf(xbb->dev, "%s: I/O size (%d) is not "
1635 xbb->sector_size);
1642 xbb->maps, reqlist->nr_segments);
1648 for (seg_idx = 0, map = xbb->maps; seg_idx < reqlist->nr_segments;
1664 xbb->media_num_sectors) {
1670 xbb->dev_name);
1677 error = xbb->dispatch_io(xbb,
1691 xbb_complete_reqlist(xbb, reqlist);
1728 struct xbb_softc *xbb;
1735 xbb = (struct xbb_softc *)context;
1736 rings = &xbb->rings;
1757 reqlist = STAILQ_LAST(&xbb->reqlist_pending_stailq,
1792 switch (xbb->abi) {
1794 ring_req = RING_GET_REQUEST(&xbb->rings.native,
1802 &xbb->rings.x86_32, rings->common.req_cons);
1812 ring_req64 =RING_GET_REQUEST(&xbb->rings.x86_64,
1833 && ((xbb->no_coalesce_reqs != 0)
1834 || ((xbb->no_coalesce_reqs == 0)
1838 xbb->max_reqlist_segments))))) {
1848 retval = xbb_get_resources(xbb, &reqlist, ring_req,
1849 xbb->rings.common.req_cons);
1872 xbb->rings.common.req_cons++;
1873 xbb->reqs_received++;
1882 reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq);
1895 STAILQ_REMOVE_HEAD(&xbb->reqlist_pending_stailq, links);
1897 retval = xbb_dispatch_io(xbb, reqlist);
1906 STAILQ_INSERT_HEAD(&xbb->reqlist_pending_stailq,
1921 reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq);
1924 xbb->forced_dispatch++;
1926 xbb->normal_dispatch++;
1928 xbb->total_dispatch++;
1942 struct xbb_softc *xbb;
1945 xbb = (struct xbb_softc *)arg;
1946 taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task);
1951 SDT_PROVIDER_DEFINE(xbb);
1952 SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_dev, flush, "int");
1953 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, read, "int", "uint64_t",
1955 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, write, "int",
1962 * \param xbb Per-instance xbb configuration structure.
1971 xbb_dispatch_dev(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
1985 dev_data = &xbb->backend.dev;
1987 << xbb->sector_size_shift;
2011 SDT_PROBE1(xbb, kernel, xbb_dispatch_dev, flush,
2012 device_get_unit(xbb->dev));
2019 xbb_sg = xbb->xbb_sgs;
2030 if ((bio->bio_length & (xbb->sector_size - 1)) != 0) {
2034 __func__, xbb->otherend_id);
2046 if ((bio_offset & (xbb->sector_size - 1)) != 0){
2049 xbb->otherend_id);
2067 bio->bio_pblkno = bio_offset >> xbb->sector_size_shift;
2075 if ((bio->bio_length & (xbb->sector_size - 1)) != 0) {
2079 __func__, xbb->otherend_id);
2096 SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, read,
2097 device_get_unit(xbb->dev),
2101 SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, write,
2102 device_get_unit(xbb->dev),
2118 SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_file, flush, "int");
2119 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, read, "int", "uint64_t",
2121 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, write, "int",
2127 * \param xbb Per-instance xbb configuration structure.
2136 xbb_dispatch_file(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
2147 file_data = &xbb->backend.file;
2161 SDT_PROBE1(xbb, kernel, xbb_dispatch_file, flush,
2162 device_get_unit(xbb->dev));
2164 (void) vn_start_write(xbb->vn, &mountpoint, V_WAIT);
2166 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY);
2167 error = VOP_FSYNC(xbb->vn, MNT_WAIT, curthread);
2168 VOP_UNLOCK(xbb->vn);
2180 << xbb->sector_size_shift;
2184 xbb_sg = xbb->xbb_sgs;
2222 SDT_PROBE3(xbb, kernel, xbb_dispatch_file, read,
2223 device_get_unit(xbb->dev), xuio.uio_offset,
2226 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY);
2248 error = VOP_READ(xbb->vn, &xuio, (flags & BIO_ORDERED) ?
2251 VOP_UNLOCK(xbb->vn);
2256 SDT_PROBE3(xbb, kernel, xbb_dispatch_file, write,
2257 device_get_unit(xbb->dev), xuio.uio_offset,
2260 (void)vn_start_write(xbb->vn, &mountpoint, V_WAIT);
2262 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY);
2282 error = VOP_WRITE(xbb->vn, &xuio, (flags & BIO_ORDERED) ?
2284 VOP_UNLOCK(xbb->vn);
2300 xbb_complete_reqlist(xbb, reqlist);
2310 * \param xbb Per-instance xbb configuration structure.
2313 xbb_close_backend(struct xbb_softc *xbb)
2316 DPRINTF("closing dev=%s\n", xbb->dev_name);
2317 if (xbb->vn) {
2320 if ((xbb->flags & XBBF_READ_ONLY) == 0)
2323 switch (xbb->device_type) {
2325 if (xbb->backend.dev.csw) {
2326 dev_relthread(xbb->backend.dev.cdev,
2327 xbb->backend.dev.dev_ref);
2328 xbb->backend.dev.csw = NULL;
2329 xbb->backend.dev.cdev = NULL;
2340 (void)vn_close(xbb->vn, flags, NOCRED, curthread);
2341 xbb->vn = NULL;
2343 switch (xbb->device_type) {
2347 if (xbb->backend.file.cred != NULL) {
2348 crfree(xbb->backend.file.cred);
2349 xbb->backend.file.cred = NULL;
2364 * \param xbb Per-instance xbb configuration structure.
2369 xbb_open_dev(struct xbb_softc *xbb)
2376 xbb->device_type = XBB_TYPE_DISK;
2377 xbb->dispatch_io = xbb_dispatch_dev;
2378 xbb->backend.dev.cdev = xbb->vn->v_rdev;
2379 xbb->backend.dev.csw = dev_refthread(xbb->backend.dev.cdev,
2380 &xbb->backend.dev.dev_ref);
2381 if (xbb->backend.dev.csw == NULL)
2384 error = VOP_GETATTR(xbb->vn, &vattr, NOCRED);
2386 xenbus_dev_fatal(xbb->dev, error, "error getting "
2388 xbb->dev_name);
2392 dev = xbb->vn->v_rdev;
2395 xenbus_dev_fatal(xbb->dev, ENODEV, "no d_ioctl for "
2396 "device %s!", xbb->dev_name);
2401 (caddr_t)&xbb->sector_size, FREAD,
2404 xenbus_dev_fatal(xbb->dev, error,
2406 "for device %s", xbb->dev_name);
2411 (caddr_t)&xbb->media_size, FREAD,
2414 xenbus_dev_fatal(xbb->dev, error,
2416 "for device %s", xbb->dev_name);
2426 * \param xbb Per-instance xbb configuration structure.
2431 xbb_open_file(struct xbb_softc *xbb)
2437 file_data = &xbb->backend.file;
2438 xbb->device_type = XBB_TYPE_FILE;
2439 xbb->dispatch_io = xbb_dispatch_file;
2440 error = VOP_GETATTR(xbb->vn, &vattr, curthread->td_ucred);
2442 xenbus_dev_fatal(xbb->dev, error,
2444 "for file %s", xbb->dev_name);
2453 if (VOP_ISLOCKED(xbb->vn) != LK_EXCLUSIVE) {
2454 vn_lock(xbb->vn, LK_UPGRADE | LK_RETRY);
2455 if (VN_IS_DOOMED(xbb->vn)) {
2457 xenbus_dev_fatal(xbb->dev, error,
2459 xbb->dev_name);
2466 xbb->media_size = vattr.va_size;
2478 xbb->sector_size = vattr.va_blocksize;
2480 xbb->sector_size = 512;
2486 if ((xbb->media_size % xbb->sector_size) != 0) {
2488 xenbus_dev_fatal(xbb->dev, error,
2490 xbb->dev_name,
2491 (uintmax_t)xbb->media_size,
2492 xbb->sector_size);
2500 * \param xbb Per-instance xbb configuration structure.
2505 xbb_open_backend(struct xbb_softc *xbb)
2514 DPRINTF("opening dev=%s\n", xbb->dev_name);
2517 xenbus_dev_fatal(xbb->dev, ENOENT,
2522 if ((xbb->flags & XBBF_READ_ONLY) == 0)
2528 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, xbb->dev_name);
2537 if (xbb->dev_name[0] != '/') {
2542 dev_name = malloc(strlen(xbb->dev_name)
2547 xbb->dev_name);
2548 free(xbb->dev_name, M_XENBLOCKBACK);
2549 xbb->dev_name = dev_name;
2553 xenbus_dev_fatal(xbb->dev, error, "error opening device %s",
2554 xbb->dev_name);
2560 xbb->vn = nd.ni_vp;
2563 if (vn_isdisk_error(xbb->vn, &error)) {
2564 error = xbb_open_dev(xbb);
2565 } else if (xbb->vn->v_type == VREG) {
2566 error = xbb_open_file(xbb);
2569 xenbus_dev_fatal(xbb->dev, error, "%s is not a disk "
2570 "or file", xbb->dev_name);
2572 VOP_UNLOCK(xbb->vn);
2575 xbb_close_backend(xbb);
2579 xbb->sector_size_shift = fls(xbb->sector_size) - 1;
2580 xbb->media_num_sectors = xbb->media_size >> xbb->sector_size_shift;
2583 (xbb->device_type == XBB_TYPE_DISK) ? "dev" : "file",
2584 xbb->dev_name, xbb->sector_size, xbb->media_size);
2593 * \param xbb Per-instance xbb configuration structure.
2596 xbb_free_communication_mem(struct xbb_softc *xbb)
2598 if (xbb->kva != 0) {
2599 if (xbb->pseudo_phys_res != NULL) {
2600 xenmem_free(xbb->dev, xbb->pseudo_phys_res_id,
2601 xbb->pseudo_phys_res);
2602 xbb->pseudo_phys_res = NULL;
2605 xbb->kva = 0;
2606 xbb->gnt_base_addr = 0;
2607 if (xbb->kva_free != NULL) {
2608 free(xbb->kva_free, M_XENBLOCKBACK);
2609 xbb->kva_free = NULL;
2616 * \param xbb Per-instance xbb configuration structure.
2619 xbb_disconnect(struct xbb_softc *xbb)
2623 mtx_unlock(&xbb->lock);
2624 xen_intr_unbind(&xbb->xen_intr_handle);
2625 if (xbb->io_taskqueue != NULL)
2626 taskqueue_drain(xbb->io_taskqueue, &xbb->io_task);
2627 mtx_lock(&xbb->lock);
2633 if (xbb->active_request_count != 0)
2636 if (xbb->flags & XBBF_RING_CONNECTED) {
2643 ring_idx < xbb->ring_config.ring_pages;
2645 op->host_addr = xbb->ring_config.gnt_addr
2647 op->dev_bus_addr = xbb->ring_config.bus_addr[ring_idx];
2648 op->handle = xbb->ring_config.handle[ring_idx];
2652 xbb->ring_config.ring_pages);
2656 xbb->flags &= ~XBBF_RING_CONNECTED;
2659 xbb_free_communication_mem(xbb);
2661 if (xbb->requests != NULL) {
2662 free(xbb->requests, M_XENBLOCKBACK);
2663 xbb->requests = NULL;
2666 if (xbb->request_lists != NULL) {
2671 for (i = 0, reqlist = xbb->request_lists;
2672 i < xbb->max_requests; i++, reqlist++){
2678 free(xbb->request_lists, M_XENBLOCKBACK);
2679 xbb->request_lists = NULL;
2690 * \param xbb Per-instance xbb configuration structure.
2693 xbb_connect_ring(struct xbb_softc *xbb)
2700 if ((xbb->flags & XBBF_RING_CONNECTED) != 0)
2707 xbb->ring_config.va = xbb->kva
2708 + (xbb->kva_size
2709 - (xbb->ring_config.ring_pages * PAGE_SIZE));
2710 xbb->ring_config.gnt_addr = xbb->gnt_base_addr
2711 + (xbb->kva_size
2712 - (xbb->ring_config.ring_pages * PAGE_SIZE));
2715 ring_idx < xbb->ring_config.ring_pages;
2717 gnt->host_addr = xbb->ring_config.gnt_addr
2720 gnt->ref = xbb->ring_config.ring_ref[ring_idx];
2721 gnt->dom = xbb->otherend_id;
2725 xbb->ring_config.ring_pages);
2730 ring_idx < xbb->ring_config.ring_pages;
2736 xbb->ring_config.va = 0;
2737 xenbus_dev_fatal(xbb->dev, EACCES,
2742 for (i = 0, j = 0; i < xbb->ring_config.ring_pages;
2760 xbb->ring_config.handle[ring_idx] = gnt->handle;
2761 xbb->ring_config.bus_addr[ring_idx] = gnt->dev_bus_addr;
2765 switch (xbb->abi) {
2769 sring = (blkif_sring_t *)xbb->ring_config.va;
2770 BACK_RING_INIT(&xbb->rings.native, sring,
2771 xbb->ring_config.ring_pages * PAGE_SIZE);
2777 sring_x86_32 = (blkif_x86_32_sring_t *)xbb->ring_config.va;
2778 BACK_RING_INIT(&xbb->rings.x86_32, sring_x86_32,
2779 xbb->ring_config.ring_pages * PAGE_SIZE);
2785 sring_x86_64 = (blkif_x86_64_sring_t *)xbb->ring_config.va;
2786 BACK_RING_INIT(&xbb->rings.x86_64, sring_x86_64,
2787 xbb->ring_config.ring_pages * PAGE_SIZE);
2794 xbb->flags |= XBBF_RING_CONNECTED;
2796 error = xen_intr_bind_remote_port(xbb->dev,
2797 xbb->otherend_id,
2798 xbb->ring_config.evtchn,
2801 /*arg*/xbb,
2803 &xbb->xen_intr_handle);
2805 xenbus_dev_fatal(xbb->dev, error, "binding event channel");
2819 * \param xbb Per-instance xbb configuration structure.
2825 xbb_alloc_communication_mem(struct xbb_softc *xbb)
2827 xbb->reqlist_kva_pages = xbb->max_requests * xbb->max_request_segments;
2828 xbb->reqlist_kva_size = xbb->reqlist_kva_pages * PAGE_SIZE;
2829 xbb->kva_size = xbb->reqlist_kva_size +
2830 (xbb->ring_config.ring_pages * PAGE_SIZE);
2832 xbb->kva_free = bit_alloc(xbb->reqlist_kva_pages, M_XENBLOCKBACK, M_NOWAIT);
2833 if (xbb->kva_free == NULL)
2837 device_get_nameunit(xbb->dev), xbb->kva_size,
2838 xbb->reqlist_kva_size);
2845 xbb->pseudo_phys_res_id = 0;
2846 xbb->pseudo_phys_res = xenmem_alloc(xbb->dev, &xbb->pseudo_phys_res_id,
2847 xbb->kva_size);
2848 if (xbb->pseudo_phys_res == NULL) {
2849 xbb->kva = 0;
2852 xbb->kva = (vm_offset_t)rman_get_virtual(xbb->pseudo_phys_res);
2853 xbb->gnt_base_addr = rman_get_start(xbb->pseudo_phys_res);
2856 device_get_nameunit(xbb->dev), (uintmax_t)xbb->kva,
2857 (uintmax_t)xbb->gnt_base_addr);
2864 * \param xbb Per-instance xbb configuration structure.
2867 xbb_collect_frontend_info(struct xbb_softc *xbb)
2876 otherend_path = xenbus_get_otherend_path(xbb->dev);
2881 xbb->ring_config.ring_pages = 1;
2882 xbb->max_request_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
2883 xbb->max_request_size = xbb->max_request_segments * PAGE_SIZE;
2890 &xbb->ring_config.evtchn);
2892 xenbus_dev_fatal(xbb->dev, error,
2895 xenbus_get_otherend_path(xbb->dev));
2913 xbb->max_requests = 32;
2918 xbb->ring_config.ring_pages = 1 << ring_page_order;
2919 ring_size = PAGE_SIZE * xbb->ring_config.ring_pages;
2920 xbb->max_requests = BLKIF_MAX_RING_REQUESTS(ring_size);
2922 if (xbb->ring_config.ring_pages > XBB_MAX_RING_PAGES) {
2923 xenbus_dev_fatal(xbb->dev, EINVAL,
2927 xbb->ring_config.ring_pages,
2932 if (xbb->ring_config.ring_pages == 1) {
2935 &xbb->ring_config.ring_ref[0],
2938 xenbus_dev_fatal(xbb->dev, error,
2942 xenbus_get_otherend_path(xbb->dev));
2947 for (ring_idx = 0; ring_idx < xbb->ring_config.ring_pages;
2955 &xbb->ring_config.ring_ref[ring_idx]);
2957 xenbus_dev_fatal(xbb->dev, error,
2977 xbb->abi = BLKIF_PROTOCOL_NATIVE;
2979 xbb->abi = BLKIF_PROTOCOL_X86_32;
2981 xbb->abi = BLKIF_PROTOCOL_X86_64;
2983 xenbus_dev_fatal(xbb->dev, EINVAL,
2995 * \param xbb Per-instance xbb configuration structure.
2998 xbb_alloc_requests(struct xbb_softc *xbb)
3006 xbb->requests = malloc(xbb->max_requests * sizeof(*xbb->requests),
3008 if (xbb->requests == NULL) {
3009 xenbus_dev_fatal(xbb->dev, ENOMEM,
3014 req = xbb->requests;
3015 last_req = &xbb->requests[xbb->max_requests - 1];
3016 STAILQ_INIT(&xbb->request_free_stailq);
3018 STAILQ_INSERT_TAIL(&xbb->request_free_stailq, req, links);
3025 xbb_alloc_request_lists(struct xbb_softc *xbb)
3034 xbb->request_lists = malloc(xbb->max_requests *
3035 sizeof(*xbb->request_lists), M_XENBLOCKBACK, M_NOWAIT|M_ZERO);
3036 if (xbb->request_lists == NULL) {
3037 xenbus_dev_fatal(xbb->dev, ENOMEM,
3042 STAILQ_INIT(&xbb->reqlist_free_stailq);
3043 STAILQ_INIT(&xbb->reqlist_pending_stailq);
3044 for (i = 0; i < xbb->max_requests; i++) {
3047 reqlist = &xbb->request_lists[i];
3049 reqlist->xbb = xbb;
3051 reqlist->gnt_handles = malloc(xbb->max_reqlist_segments *
3055 xenbus_dev_fatal(xbb->dev, ENOMEM,
3061 for (seg = 0; seg < xbb->max_reqlist_segments; seg++)
3064 STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links);
3073 * \param xbb Per-instance xbb configuration structure.
3076 xbb_publish_backend_info(struct xbb_softc *xbb)
3083 our_path = xenbus_get_node(xbb->dev);
3087 xenbus_dev_fatal(xbb->dev, error,
3099 (uintmax_t)(xbb->media_size >> XBD_SECTOR_SHFT));
3106 xbb->flags & XBBF_READ_ONLY
3113 xbb->sector_size);
3121 xenbus_dev_fatal(xbb->dev, error, "ending transaction");
3126 xenbus_dev_fatal(xbb->dev, error, "writing %s/%s",
3136 * \param xbb Per-instance xbb configuration structure.
3139 xbb_connect(struct xbb_softc *xbb)
3143 if (!xbb->hotplug_done ||
3144 (xenbus_get_state(xbb->dev) != XenbusStateInitWait) ||
3145 (xbb_collect_frontend_info(xbb) != 0))
3148 xbb->flags &= ~XBBF_SHUTDOWN;
3155 xbb->max_reqlist_segments = MIN(xbb->max_request_segments *
3156 xbb->max_requests, XBB_MAX_SEGMENTS_PER_REQLIST);
3162 xbb->max_reqlist_size = xbb->max_reqlist_segments * PAGE_SIZE;
3165 error = xbb_alloc_communication_mem(xbb);
3167 xenbus_dev_fatal(xbb->dev, error,
3172 error = xbb_publish_backend_info(xbb);
3174 xenbus_dev_fatal(xbb->dev, error,
3179 error = xbb_alloc_requests(xbb);
3185 error = xbb_alloc_request_lists(xbb);
3194 error = xbb_connect_ring(xbb);
3201 xenbus_set_state(xbb->dev, XenbusStateConnected);
3208 * \param xbb Per-instance xbb configuration structure.
3216 xbb_shutdown(struct xbb_softc *xbb)
3230 if ((xbb->flags & XBBF_IN_SHUTDOWN) != 0)
3233 xbb->flags |= XBBF_IN_SHUTDOWN;
3234 mtx_unlock(&xbb->lock);
3236 if (xbb->hotplug_watch.node != NULL) {
3237 xs_unregister_watch(&xbb->hotplug_watch);
3238 free(xbb->hotplug_watch.node, M_XENBLOCKBACK);
3239 xbb->hotplug_watch.node = NULL;
3242 if (xenbus_get_state(xbb->dev) < XenbusStateClosing)
3243 xenbus_set_state(xbb->dev, XenbusStateClosing);
3245 frontState = xenbus_get_otherend_state(xbb->dev);
3246 mtx_lock(&xbb->lock);
3247 xbb->flags &= ~XBBF_IN_SHUTDOWN;
3256 xbb->flags |= XBBF_SHUTDOWN;
3259 error = xbb_disconnect(xbb);
3275 wakeup(xbb);
3284 * \param xbb Per-instance xbb configuration structure.
3289 xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt, ...)
3296 xs_vprintf(XST_NIL, xenbus_get_node(xbb->dev),
3299 xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3302 xenbus_dev_vfatal(xbb->dev, err, fmt, ap);
3305 xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3307 mtx_lock(&xbb->lock);
3308 xbb_shutdown(xbb);
3309 mtx_unlock(&xbb->lock);
3347 * \param xbb Xen Block Back softc.
3351 xbb_setup_sysctl(struct xbb_softc *xbb)
3356 sysctl_ctx = device_get_sysctl_ctx(xbb->dev);
3360 sysctl_tree = device_get_sysctl_tree(xbb->dev);
3365 "disable_flush", CTLFLAG_RW, &xbb->disable_flush, 0,
3369 "flush_interval", CTLFLAG_RW, &xbb->flush_interval, 0,
3373 "no_coalesce_reqs", CTLFLAG_RW, &xbb->no_coalesce_reqs,0,
3377 "reqs_received", CTLFLAG_RW, &xbb->reqs_received,
3381 "reqs_completed", CTLFLAG_RW, &xbb->reqs_completed,
3386 &xbb->reqs_queued_for_completion,
3391 &xbb->reqs_completed_with_error,
3395 "forced_dispatch", CTLFLAG_RW, &xbb->forced_dispatch,
3399 "normal_dispatch", CTLFLAG_RW, &xbb->normal_dispatch,
3403 "total_dispatch", CTLFLAG_RW, &xbb->total_dispatch,
3407 "kva_shortages", CTLFLAG_RW, &xbb->kva_shortages,
3412 &xbb->request_shortages,
3416 "max_requests", CTLFLAG_RD, &xbb->max_requests, 0,
3421 &xbb->max_request_segments, 0,
3426 &xbb->max_request_size, 0,
3431 &xbb->ring_config.ring_pages, 0,
3438 struct xbb_softc *xbb;
3441 xbb = device_get_softc(dev);
3443 KASSERT(xbb->hotplug_done, ("Missing hotplug execution"));
3446 if (strchr(xbb->dev_mode, 'w') == NULL)
3447 xbb->flags |= XBBF_READ_ONLY;
3453 error = xbb_open_backend(xbb);
3455 xbb_attach_failed(xbb, error, "Unable to open %s",
3456 xbb->dev_name);
3461 xbb->xbb_stats = devstat_new_entry("xbb", device_get_unit(xbb->dev),
3462 xbb->sector_size,
3468 xbb->xbb_stats_in = devstat_new_entry("xbbi", device_get_unit(xbb->dev),
3469 xbb->sector_size,
3477 xbb_setup_sysctl(xbb);
3483 xbb->io_taskqueue = taskqueue_create_fast(device_get_nameunit(dev),
3486 /*contxt*/&xbb->io_taskqueue);
3487 if (xbb->io_taskqueue == NULL) {
3488 xbb_attach_failed(xbb, error, "Unable to create taskqueue");
3492 taskqueue_start_threads(&xbb->io_taskqueue,
3499 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3502 xbb_attach_failed(xbb, error, "writing %s/hotplug-status",
3503 xenbus_get_node(xbb->dev));
3508 if (xenbus_get_otherend_state(xbb->dev) == XenbusStateInitialised)
3509 xbb_connect(xbb);
3516 struct xbb_softc *xbb;
3520 xbb = device_get_softc(dev);
3523 NULL, &xbb->dev_name, NULL);
3530 xbb->hotplug_done = true;
3534 NULL, &xbb->dev_type, NULL);
3536 xbb->dev_type = NULL;
3539 &xbb->dev_mode, NULL);
3541 xbb_attach_failed(xbb, error, "reading backend fields at %s",
3559 struct xbb_softc *xbb;
3571 xbb = device_get_softc(dev);
3572 xbb->dev = dev;
3573 xbb->otherend_id = xenbus_get_otherend_id(dev);
3574 TASK_INIT(&xbb->io_task, /*priority*/0, xbb_run_queue, xbb);
3575 mtx_init(&xbb->lock, device_get_nameunit(dev), NULL, MTX_DEF);
3581 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3584 xbb_attach_failed(xbb, error, "writing %s/feature-barrier",
3585 xenbus_get_node(xbb->dev));
3589 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3592 xbb_attach_failed(xbb, error, "writing %s/feature-flush-cache",
3593 xenbus_get_node(xbb->dev));
3598 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3601 xbb_attach_failed(xbb, error, "writing %s/max-ring-page-order",
3602 xenbus_get_node(xbb->dev));
3609 if (xbb->hotplug_done) {
3618 watch_path = xs_join(xenbus_get_node(xbb->dev), "physical-device-path");
3619 xbb->hotplug_watch.callback_data = (uintptr_t)dev;
3620 xbb->hotplug_watch.callback = xbb_attach_cb;
3621 KASSERT(xbb->hotplug_watch.node == NULL, ("watch node already setup"));
3622 xbb->hotplug_watch.node = strdup(sbuf_data(watch_path), M_XENBLOCKBACK);
3628 xbb->hotplug_watch.max_pending = 1;
3630 error = xs_register_watch(&xbb->hotplug_watch);
3632 xbb_attach_failed(xbb, error, "failed to create watch on %s",
3633 xbb->hotplug_watch.node);
3634 free(xbb->hotplug_watch.node, M_XENBLOCKBACK);
3657 struct xbb_softc *xbb;
3661 xbb = device_get_softc(dev);
3662 mtx_lock(&xbb->lock);
3663 while (xbb_shutdown(xbb) == EAGAIN) {
3664 msleep(xbb, &xbb->lock, /*wakeup prio unchanged*/0,
3667 mtx_unlock(&xbb->lock);
3671 if (xbb->io_taskqueue != NULL)
3672 taskqueue_free(xbb->io_taskqueue);
3674 if (xbb->xbb_stats != NULL)
3675 devstat_remove_entry(xbb->xbb_stats);
3677 if (xbb->xbb_stats_in != NULL)
3678 devstat_remove_entry(xbb->xbb_stats_in);
3680 xbb_close_backend(xbb);
3682 if (xbb->dev_mode != NULL) {
3683 free(xbb->dev_mode, M_XENSTORE);
3684 xbb->dev_mode = NULL;
3687 if (xbb->dev_type != NULL) {
3688 free(xbb->dev_type, M_XENSTORE);
3689 xbb->dev_type = NULL;
3692 if (xbb->dev_name != NULL) {
3693 free(xbb->dev_name, M_XENSTORE);
3694 xbb->dev_name = NULL;
3697 mtx_destroy(&xbb->lock);
3748 struct xbb_softc *xbb = device_get_softc(dev);
3752 xenbus_strstate(xenbus_get_state(xbb->dev)));
3759 xbb_connect(xbb);
3763 mtx_lock(&xbb->lock);
3764 xbb_shutdown(xbb);
3765 mtx_unlock(&xbb->lock);
3767 xenbus_set_state(xbb->dev, XenbusStateClosed);
3770 xenbus_dev_fatal(xbb->dev, EINVAL, "saw state %d at frontend",