Lines Matching refs:device
66 /* The request is ready to execute at the block device */
69 /* The request is currently executing at the block device */
72 /* The request finished executing at the block device */
314 struct spdk_nvmf_rdma_device *device;
335 * recv queue. Should not exceed device->attr.max_queue_depth.
407 struct spdk_nvmf_rdma_device *device;
471 struct spdk_nvmf_rdma_device *device;
511 struct spdk_nvmf_rdma_device *device;
930 /* destroy cm_id last so cma device will not be freed before we destroy the cq. */
939 nvmf_rdma_resize_cq(struct spdk_nvmf_rdma_qpair *rqpair, struct spdk_nvmf_rdma_device *device)
950 num_cqe = spdk_min(num_cqe, device->attr.max_cqe);
954 if (device->context->device->transport_type == IBV_TRANSPORT_IWARP) {
959 if (required_num_wr > device->attr.max_cqe) {
960 SPDK_ERRLOG("RDMA CQE requirement (%d) exceeds device max_cqe limitation (%d)\n",
961 required_num_wr, device->attr.max_cqe);
986 struct spdk_nvmf_rdma_device *device;
990 device = rqpair->device;
993 qp_init_attr.pd = device->pd;
1005 qp_init_attr.cap.max_send_sge = spdk_min((uint32_t)device->attr.max_sge, NVMF_DEFAULT_TX_SGE);
1006 qp_init_attr.cap.max_recv_sge = spdk_min((uint32_t)device->attr.max_sge, NVMF_DEFAULT_RX_SGE);
1009 if (rqpair->srq == NULL && nvmf_rdma_resize_cq(rqpair, device) < 0) {
1033 opts.map = device->map;
1305 event->id->verbs->device->name, event->id->verbs->device->dev_name);
1326 port->device->attr.max_qp_wr, port->device->attr.max_qp_rd_atom);
1327 max_queue_depth = spdk_min(max_queue_depth, port->device->attr.max_qp_wr);
1328 max_read_depth = spdk_min(max_read_depth, port->device->attr.max_qp_init_rd_atom);
1375 rqpair->device = port->device;
1477 nvmf_rdma_fill_wr_sgl(struct spdk_nvmf_rdma_device *device,
1492 rc = spdk_rdma_utils_get_translation(device->map, iov->iov_base, iov->iov_len, &mem_translation);
1525 nvmf_rdma_fill_wr_sgl_with_dif(struct spdk_nvmf_rdma_device *device,
1558 rc = spdk_rdma_utils_get_translation(device->map, iov->iov_base, iov->iov_len, &mem_translation);
1641 struct spdk_nvmf_rdma_device *device,
1696 rc = nvmf_rdma_fill_wr_sgl_with_dif(device, rdma_req, wr, length, num_wrs - 1);
1705 rc = nvmf_rdma_fill_wr_sgl(device, rdma_req, wr, length);
1725 struct spdk_nvmf_rdma_device *device,
1806 rc = nvmf_rdma_fill_wr_sgl(device, rdma_req, current_wr, lengths[i]);
1808 rc = nvmf_rdma_fill_wr_sgl_with_dif(device, rdma_req, current_wr,
1826 if ((device->attr.device_cap_flags & IBV_DEVICE_MEM_MGT_EXTENSIONS) != 0) {
1846 struct spdk_nvmf_rdma_device *device,
1870 if ((device->attr.device_cap_flags & IBV_DEVICE_MEM_MGT_EXTENSIONS) != 0) {
1881 rc = nvmf_rdma_request_fill_iovs(rtransport, device, rdma_req);
1932 rc = nvmf_rdma_request_fill_iovs_multi_sgl(rtransport, device, rdma_req);
2093 struct spdk_nvmf_rdma_device *device;
2104 device = rqpair->device;
2209 rc = nvmf_rdma_request_parse_sgl(rtransport, device, rdma_req);
2532 nvmf_rdma_is_rxe_device(struct spdk_nvmf_rdma_device *device)
2534 return device->attr.vendor_id == SPDK_RDMA_RXE_VENDOR_ID_OLD ||
2535 device->attr.vendor_id == SPDK_RDMA_RXE_VENDOR_ID_NEW;
2541 struct spdk_nvmf_rdma_device *device);
2547 struct spdk_nvmf_rdma_device *device;
2551 device = calloc(1, sizeof(*device));
2552 if (!device) {
2556 device->context = context;
2557 rc = ibv_query_device(device->context, &device->attr);
2559 SPDK_ERRLOG("Failed to query RDMA device attributes.\n");
2560 free(device);
2565 if ((device->attr.device_cap_flags & IBV_DEVICE_MEM_MGT_EXTENSIONS) == 0) {
2567 SPDK_WARNLOG("but the device with vendor ID %u does not.\n", device->attr.vendor_id);
2579 if (nvmf_rdma_is_rxe_device(device)) {
2580 device->attr.device_cap_flags &= ~(IBV_DEVICE_MEM_MGT_EXTENSIONS);
2584 /* set up device context async ev fd as NON_BLOCKING */
2585 flag = fcntl(device->context->async_fd, F_GETFL);
2586 rc = fcntl(device->context->async_fd, F_SETFL, flag | O_NONBLOCK);
2589 free(device);
2593 TAILQ_INSERT_TAIL(&rtransport->devices, device, link);
2594 SPDK_DEBUGLOG(rdma, "New device %p is added to RDMA transport\n", device);
2597 device->pd = g_nvmf_hooks.get_ibv_pd(NULL, device->context);
2599 device->pd = ibv_alloc_pd(device->context);
2602 if (!device->pd) {
2604 destroy_ib_device(rtransport, device);
2608 assert(device->map == NULL);
2610 device->map = spdk_rdma_utils_create_mem_map(device->pd, &g_nvmf_hooks, IBV_ACCESS_LOCAL_WRITE);
2611 if (!device->map) {
2613 destroy_ib_device(rtransport, device);
2617 assert(device->map != NULL);
2618 assert(device->pd != NULL);
2621 *new_device = device;
2623 SPDK_NOTICELOG("Create IB device %s(%p/%p) succeed.\n", ibv_get_device_name(context->device),
2624 device, context);
2647 struct spdk_nvmf_rdma_device *device, *tmp;
2649 TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, tmp) {
2664 TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, tmp) {
2665 rtransport->poll_fds[i].fd = device->context->async_fd;
2677 struct spdk_nvmf_rdma_device *device;
2824 rc = create_ib_device(rtransport, contexts[i], &device);
2829 max_device_sge = spdk_min(max_device_sge, device->attr.max_sge);
2830 device->is_ready = true;
2842 SPDK_NOTICELOG("Adjusting the io unit size to fit the device's maximum I/O size. New I/O unit size %u\n",
2869 struct spdk_nvmf_rdma_device *device)
2871 TAILQ_REMOVE(&rtransport->devices, device, link);
2872 spdk_rdma_utils_free_mem_map(&device->map);
2873 if (device->pd) {
2875 ibv_dealloc_pd(device->pd);
2878 SPDK_DEBUGLOG(rdma, "IB device [%p] is destroyed.\n", device);
2879 free(device);
2904 struct spdk_nvmf_rdma_device *device, *device_tmp;
2925 TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, device_tmp) {
2926 destroy_ib_device(rtransport, device);
2959 struct spdk_nvmf_rdma_device *device;
3063 TAILQ_FOREACH(device, &rtransport->devices, link) {
3064 if (device->context == port->id->verbs && device->is_ready) {
3065 port->device = device;
3069 if (!port->device) {
3070 SPDK_ERRLOG("Accepted a connection with verbs %p, but unable to find a corresponding device.\n",
3110 port->device = NULL;
3151 struct spdk_nvmf_rdma_device *device, bool *has_inflight, bool is_add)
3176 if (rpoller->device == device) {
3198 ctx->device = device;
3219 struct spdk_nvmf_rdma_device *device);
3225 struct spdk_nvmf_rdma_device *device, *tmp_device;
3227 TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, tmp_device) {
3228 if (device->need_destroy) {
3232 if (strcmp(device->context->device->dev_name, context->device->dev_name) == 0) {
3233 return device;
3253 * is valid for a device so this context must be invalid and just remove it. */
3264 SPDK_ERRLOG("Failed to create ib device for context: %s(%p)\n",
3265 ibv_get_device_name(context->device), context);
3271 SPDK_ERRLOG("Failed to add poller for device context: %s(%p)\n",
3272 ibv_get_device_name(context->device), context);
3286 struct spdk_nvmf_rdma_device *device;
3294 /* do not rescan when any device is destroying, or context may be freed when
3297 TAILQ_FOREACH(device, &rtransport->devices, link) {
3298 if (device->need_destroy) {
3312 SPDK_WARNLOG("Failed to init ibv device %p, err %d. Skip rescan.\n", ibv_device_list[i], errno);
3316 SPDK_DEBUGLOG(rdma, "Find new verbs init ibv device %p(%s).\n", ibv_device_list[i],
3317 tmp_verbs->device->dev_name);
3365 SPDK_ERRLOG("Found new IB device but port %s:%s is still failed(%d) to listen.\n",
3463 nvmf_rdma_device_supports_last_wqe_reached(struct spdk_nvmf_rdma_device *device)
3466 return !nvmf_rdma_is_rxe_device(device) &&
3467 device->context->device->transport_type != IBV_TRANSPORT_IWARP;
3483 /* device is already destroyed and we should force destroy this qpair. */
3502 nvmf_rdma_device_supports_last_wqe_reached(rqpair->device)) {
3613 struct spdk_nvmf_rdma_device *device)
3619 rc = nvmf_rdma_manage_poller(rtransport, device, &has_inflight, false);
3621 SPDK_ERRLOG("Failed to handle device removal, rc %d\n", rc);
3626 /* no pollers, destroy the device */
3627 device->ready_to_destroy = true;
3632 if (port->device == device) {
3633 SPDK_NOTICELOG("Port %s:%s on device %s is being removed.\n",
3636 ibv_get_device_name(port->device->context->device));
3639 * RDMA transport. when the device comes back we can retry listening
3659 /* if device removal happens during ctrl qpair disconnecting, it's possible that we receive
3664 if (port == tmp_port && port->device && !port->device->need_destroy) {
3665 port->device->need_destroy = true;
3666 nvmf_rdma_handle_device_removal(rtransport, port->device);
3735 /* In case of device removal, kernel IB part triggers IBV_EVENT_DEVICE_FATAL
3743 * corresponding qpair. Otherwise the event refers to a listening device. */
3838 nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
3844 rc = ibv_get_async_event(device->context, &event);
3900 SPDK_ERRLOG("Device Fatal event[%s] received on %s. device: %p\n",
3901 ibv_event_type_str(event.event_type), ibv_get_device_name(device->context->device), device);
3902 device->need_destroy = true;
3927 nvmf_process_ib_events(struct spdk_nvmf_rdma_device *device, uint32_t max_events)
3933 rc = nvmf_process_ib_event(device);
3939 SPDK_DEBUGLOG(rdma, "Device %s: %u events processed\n", device->context->device->name, i);
3948 struct spdk_nvmf_rdma_device *device, *tmp;
3973 TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, tmp) {
3976 if (spdk_likely(!device->need_destroy)) {
3977 nvmf_process_ib_events(device, NVMF_RDMA_MAX_EVENTS_PER_POLL);
3978 if (spdk_unlikely(device->need_destroy)) {
3979 nvmf_rdma_handle_device_removal(rtransport, device);
3984 SPDK_ERRLOG("Receive unknown revent %x on device %p\n", (int)revents, device);
4034 struct spdk_nvmf_rdma_poll_group *rgroup, struct spdk_nvmf_rdma_device *device,
4048 poller->device = device;
4057 SPDK_DEBUGLOG(rdma, "Create poller %p on device %p in poll group %p.\n", poller, device, rgroup);
4058 if (rtransport->rdma_opts.no_srq == false && device->num_srq < device->attr.max_srq) {
4059 if ((int)rtransport->rdma_opts.max_srq_depth > device->attr.max_srq_wr) {
4061 rtransport->rdma_opts.max_srq_depth, device->context->device->name, device->attr.max_srq_wr);
4063 poller->max_srq_depth = spdk_min((int)rtransport->rdma_opts.max_srq_depth, device->attr.max_srq_wr);
4065 device->num_srq++;
4067 srq_init_attr.pd = device->pd;
4070 srq_init_attr.srq_init_attr.attr.max_sge = spdk_min(device->attr.max_sge, NVMF_DEFAULT_RX_SGE);
4078 opts.map = device->map;
4103 poller->cq = ibv_create_cq(device->context, num_cqe, poller, NULL, 0);
4117 struct spdk_nvmf_rdma_device *device;
4120 rc = nvmf_rdma_poller_create(ctx->rtransport, ctx->rgroup, ctx->device, &poller);
4125 device = ctx->device;
4127 device->is_ready = true;
4140 struct spdk_nvmf_rdma_device *device;
4157 TAILQ_FOREACH(device, &rtransport->devices, link) {
4158 rc = nvmf_rdma_poller_create(rtransport, rgroup, device, &poller);
4336 struct spdk_nvmf_rdma_device *device;
4343 device = rqpair->device;
4346 if (poller->device == device) {
4352 SPDK_ERRLOG("No poller found for device.\n");
4867 struct spdk_nvmf_rdma_device *device, *device_tmp;
4870 TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, device_tmp) {
4871 if (device->ready_to_destroy) {
4872 destroy_ib_device(rtransport, device);
4880 SPDK_ERRLOG("Failed to generate poll fds after remove ib device.\n");
4889 struct spdk_nvmf_rdma_device *device = ctx->device;
4893 /* destroy device when last poller is destroyed */
4894 device->ready_to_destroy = true;
5168 ibv_get_device_name(rpoller->device->context->device));