Lines Matching full:id
92 struct rdma_cm_id id; member
169 struct rdma_cm_id *id; in ucma_set_af_ib_support() local
173 ret = rdma_create_id(NULL, &id, NULL, RDMA_PS_IB); in ucma_set_af_ib_support()
182 ret = rdma_bind_addr(id, (struct sockaddr *) &sib); in ucma_set_af_ib_support()
185 rdma_destroy_id(id); in ucma_set_af_ib_support()
411 id_priv->id.verbs = cma_dev->verbs; in ucma_get_device()
412 id_priv->id.pd = cma_dev->pd; in ucma_get_device()
470 if (id_priv->id.route.path_rec) in ucma_free_id()
471 free(id_priv->id.route.path_rec); in ucma_free_id()
474 rdma_destroy_event_channel(id_priv->id.channel); in ucma_free_id()
491 id_priv->id.context = context; in ucma_alloc_id()
492 id_priv->id.ps = ps; in ucma_alloc_id()
493 id_priv->id.qp_type = qp_type; in ucma_alloc_id()
497 id_priv->id.channel = rdma_create_event_channel(); in ucma_alloc_id()
498 if (!id_priv->id.channel) in ucma_alloc_id()
502 id_priv->id.channel = channel; in ucma_alloc_id()
517 struct rdma_cm_id **id, void *context, in rdma_create_id2() argument
538 ret = write(id_priv->id.channel->fd, &cmd, sizeof cmd); in rdma_create_id2()
544 id_priv->handle = resp.id; in rdma_create_id2()
546 *id = &id_priv->id; in rdma_create_id2()
554 struct rdma_cm_id **id, void *context, in rdma_create_id() argument
561 return rdma_create_id2(channel, id, context, ps, qp_type); in rdma_create_id()
571 cmd.id = handle; in ucma_destroy_kern_id()
582 int rdma_destroy_id(struct rdma_cm_id *id) in rdma_destroy_id() argument
587 id_priv = container_of(id, struct cma_id_private, id); in rdma_destroy_id()
588 ret = ucma_destroy_kern_id(id->channel->fd, id_priv->handle); in rdma_destroy_id()
592 if (id_priv->id.event) in rdma_destroy_id()
593 rdma_ack_cm_event(id_priv->id.event); in rdma_destroy_id()
621 static int ucma_query_addr(struct rdma_cm_id *id) in ucma_query_addr() argument
629 id_priv = container_of(id, struct cma_id_private, id); in ucma_query_addr()
630 cmd.id = id_priv->handle; in ucma_query_addr()
633 ret = write(id->channel->fd, &cmd, sizeof cmd); in ucma_query_addr()
639 memcpy(&id->route.addr.src_addr, &resp.src_addr, resp.src_size); in ucma_query_addr()
640 memcpy(&id->route.addr.dst_addr, &resp.dst_addr, resp.dst_size); in ucma_query_addr()
646 id->port_num = resp.port_num; in ucma_query_addr()
647 id->route.addr.addr.ibaddr.pkey = resp.pkey; in ucma_query_addr()
653 static int ucma_query_gid(struct rdma_cm_id *id) in ucma_query_gid() argument
662 id_priv = container_of(id, struct cma_id_private, id); in ucma_query_gid()
663 cmd.id = id_priv->handle; in ucma_query_gid()
666 ret = write(id->channel->fd, &cmd, sizeof cmd); in ucma_query_gid()
673 memcpy(id->route.addr.addr.ibaddr.sgid.raw, sib->sib_addr.sib_raw, in ucma_query_gid()
674 sizeof id->route.addr.addr.ibaddr.sgid); in ucma_query_gid()
677 memcpy(id->route.addr.addr.ibaddr.dgid.raw, sib->sib_addr.sib_raw, in ucma_query_gid()
678 sizeof id->route.addr.addr.ibaddr.dgid); in ucma_query_gid()
713 static int ucma_query_path(struct rdma_cm_id *id) in ucma_query_path() argument
723 id_priv = container_of(id, struct cma_id_private, id); in ucma_query_path()
724 cmd.id = id_priv->handle; in ucma_query_path()
727 ret = write(id->channel->fd, &cmd, sizeof cmd); in ucma_query_path()
734 id->route.path_rec = malloc(sizeof(*id->route.path_rec) * in ucma_query_path()
736 if (!id->route.path_rec) in ucma_query_path()
739 id->route.num_paths = resp->num_paths; in ucma_query_path()
741 ucma_convert_path(&resp->path_data[i], &id->route.path_rec[i]); in ucma_query_path()
747 static int ucma_query_route(struct rdma_cm_id *id) in ucma_query_route() argument
755 id_priv = container_of(id, struct cma_id_private, id); in ucma_query_route()
756 cmd.id = id_priv->handle; in ucma_query_route()
758 ret = write(id->channel->fd, &cmd, sizeof cmd); in ucma_query_route()
765 id->route.path_rec = malloc(sizeof(*id->route.path_rec) * in ucma_query_route()
767 if (!id->route.path_rec) in ucma_query_route()
770 id->route.num_paths = resp.num_paths; in ucma_query_route()
772 ibv_copy_path_rec_from_kern(&id->route.path_rec[i], in ucma_query_route()
776 memcpy(id->route.addr.addr.ibaddr.sgid.raw, resp.ib_route[0].sgid, in ucma_query_route()
777 sizeof id->route.addr.addr.ibaddr.sgid); in ucma_query_route()
778 memcpy(id->route.addr.addr.ibaddr.dgid.raw, resp.ib_route[0].dgid, in ucma_query_route()
779 sizeof id->route.addr.addr.ibaddr.dgid); in ucma_query_route()
780 id->route.addr.addr.ibaddr.pkey = resp.ib_route[0].pkey; in ucma_query_route()
781 memcpy(&id->route.addr.src_addr, &resp.src_addr, in ucma_query_route()
783 memcpy(&id->route.addr.dst_addr, &resp.dst_addr, in ucma_query_route()
790 id_priv->id.port_num = resp.port_num; in ucma_query_route()
796 static int rdma_bind_addr2(struct rdma_cm_id *id, struct sockaddr *addr, in rdma_bind_addr2() argument
804 id_priv = container_of(id, struct cma_id_private, id); in rdma_bind_addr2()
805 cmd.id = id_priv->handle; in rdma_bind_addr2()
809 ret = write(id->channel->fd, &cmd, sizeof cmd); in rdma_bind_addr2()
813 ret = ucma_query_addr(id); in rdma_bind_addr2()
815 ret = ucma_query_gid(id); in rdma_bind_addr2()
819 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) in rdma_bind_addr() argument
830 return rdma_bind_addr2(id, addr, addrlen); in rdma_bind_addr()
833 id_priv = container_of(id, struct cma_id_private, id); in rdma_bind_addr()
834 cmd.id = id_priv->handle; in rdma_bind_addr()
837 ret = write(id->channel->fd, &cmd, sizeof cmd); in rdma_bind_addr()
841 return ucma_query_route(id); in rdma_bind_addr()
844 int ucma_complete(struct rdma_cm_id *id) in ucma_complete() argument
849 id_priv = container_of(id, struct cma_id_private, id); in ucma_complete()
853 if (id_priv->id.event) { in ucma_complete()
854 rdma_ack_cm_event(id_priv->id.event); in ucma_complete()
855 id_priv->id.event = NULL; in ucma_complete()
858 ret = rdma_get_cm_event(id_priv->id.channel, &id_priv->id.event); in ucma_complete()
862 if (id_priv->id.event->status) { in ucma_complete()
863 if (id_priv->id.event->event == RDMA_CM_EVENT_REJECTED) in ucma_complete()
865 else if (id_priv->id.event->status < 0) in ucma_complete()
866 ret = ERR(-id_priv->id.event->status); in ucma_complete()
868 ret = ERR(-id_priv->id.event->status); in ucma_complete()
873 static int rdma_resolve_addr2(struct rdma_cm_id *id, struct sockaddr *src_addr, in rdma_resolve_addr2() argument
882 id_priv = container_of(id, struct cma_id_private, id); in rdma_resolve_addr2()
883 cmd.id = id_priv->handle; in rdma_resolve_addr2()
890 ret = write(id->channel->fd, &cmd, sizeof cmd); in rdma_resolve_addr2()
894 memcpy(&id->route.addr.dst_addr, dst_addr, dst_len); in rdma_resolve_addr2()
895 return ucma_complete(id); in rdma_resolve_addr2()
898 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, in rdma_resolve_addr() argument
914 return rdma_resolve_addr2(id, src_addr, src_len, dst_addr, in rdma_resolve_addr()
918 id_priv = container_of(id, struct cma_id_private, id); in rdma_resolve_addr()
919 cmd.id = id_priv->handle; in rdma_resolve_addr()
925 ret = write(id->channel->fd, &cmd, sizeof cmd); in rdma_resolve_addr()
929 memcpy(&id->route.addr.dst_addr, dst_addr, dst_len); in rdma_resolve_addr()
930 return ucma_complete(id); in rdma_resolve_addr()
933 static int ucma_set_ib_route(struct rdma_cm_id *id) in ucma_set_ib_route() argument
940 hint.ai_family = id->route.addr.src_addr.sa_family; in ucma_set_ib_route()
941 hint.ai_src_len = ucma_addrlen((struct sockaddr *) &id->route.addr.src_addr); in ucma_set_ib_route()
942 hint.ai_src_addr = &id->route.addr.src_addr; in ucma_set_ib_route()
943 hint.ai_dst_len = ucma_addrlen((struct sockaddr *) &id->route.addr.dst_addr); in ucma_set_ib_route()
944 hint.ai_dst_addr = &id->route.addr.dst_addr; in ucma_set_ib_route()
951 ret = rdma_set_option(id, RDMA_OPTION_IB, RDMA_OPTION_IB_PATH, in ucma_set_ib_route()
960 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) in rdma_resolve_route() argument
966 id_priv = container_of(id, struct cma_id_private, id); in rdma_resolve_route()
967 if (id->verbs->device->transport_type == IBV_TRANSPORT_IB) { in rdma_resolve_route()
968 ret = ucma_set_ib_route(id); in rdma_resolve_route()
974 cmd.id = id_priv->handle; in rdma_resolve_route()
977 ret = write(id->channel->fd, &cmd, sizeof cmd); in rdma_resolve_route()
982 return ucma_complete(id); in rdma_resolve_route()
990 static int rdma_init_qp_attr(struct rdma_cm_id *id, struct ibv_qp_attr *qp_attr, in rdma_init_qp_attr() argument
999 id_priv = container_of(id, struct cma_id_private, id); in rdma_init_qp_attr()
1000 cmd.id = id_priv->handle; in rdma_init_qp_attr()
1003 ret = write(id->channel->fd, &cmd, sizeof cmd); in rdma_init_qp_attr()
1014 static int ucma_modify_qp_rtr(struct rdma_cm_id *id, uint8_t resp_res) in ucma_modify_qp_rtr() argument
1021 if (!id->qp) in ucma_modify_qp_rtr()
1026 ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask); in ucma_modify_qp_rtr()
1030 ret = ibv_modify_qp(id->qp, &qp_attr, qp_attr_mask); in ucma_modify_qp_rtr()
1035 ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask); in ucma_modify_qp_rtr()
1043 id_priv = container_of(id, struct cma_id_private, id); in ucma_modify_qp_rtr()
1044 link_layer = id_priv->cma_dev->port[id->port_num - 1].link_layer; in ucma_modify_qp_rtr()
1051 return rdma_seterrno(ibv_modify_qp(id->qp, &qp_attr, qp_attr_mask)); in ucma_modify_qp_rtr()
1054 static int ucma_modify_qp_rts(struct rdma_cm_id *id, uint8_t init_depth) in ucma_modify_qp_rts() argument
1060 ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask); in ucma_modify_qp_rts()
1066 return rdma_seterrno(ibv_modify_qp(id->qp, &qp_attr, qp_attr_mask)); in ucma_modify_qp_rts()
1069 static int ucma_modify_qp_sqd(struct rdma_cm_id *id) in ucma_modify_qp_sqd() argument
1073 if (!id->qp) in ucma_modify_qp_sqd()
1077 return rdma_seterrno(ibv_modify_qp(id->qp, &qp_attr, IBV_QP_STATE)); in ucma_modify_qp_sqd()
1080 static int ucma_modify_qp_err(struct rdma_cm_id *id) in ucma_modify_qp_err() argument
1084 if (!id->qp) in ucma_modify_qp_err()
1088 return rdma_seterrno(ibv_modify_qp(id->qp, &qp_attr, IBV_QP_STATE)); in ucma_modify_qp_err()
1112 ret = ucma_find_pkey(id_priv->cma_dev, id_priv->id.port_num, in ucma_init_conn_qp3()
1113 id_priv->id.route.addr.addr.ibaddr.pkey, in ucma_init_conn_qp3()
1118 qp_attr.port_num = id_priv->id.port_num; in ucma_init_conn_qp3()
1136 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in ucma_init_conn_qp()
1148 ret = ucma_find_pkey(id_priv->cma_dev, id_priv->id.port_num, in ucma_init_ud_qp3()
1149 id_priv->id.route.addr.addr.ibaddr.pkey, in ucma_init_ud_qp3()
1154 qp_attr.port_num = id_priv->id.port_num; in ucma_init_ud_qp3()
1183 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in ucma_init_ud_qp()
1202 static void ucma_destroy_cqs(struct rdma_cm_id *id) in ucma_destroy_cqs() argument
1204 if (id->qp_type == IBV_QPT_XRC_RECV && id->srq) in ucma_destroy_cqs()
1207 if (id->recv_cq) { in ucma_destroy_cqs()
1208 ibv_destroy_cq(id->recv_cq); in ucma_destroy_cqs()
1209 if (id->send_cq && (id->send_cq != id->recv_cq)) { in ucma_destroy_cqs()
1210 ibv_destroy_cq(id->send_cq); in ucma_destroy_cqs()
1211 id->send_cq = NULL; in ucma_destroy_cqs()
1213 id->recv_cq = NULL; in ucma_destroy_cqs()
1216 if (id->recv_cq_channel) { in ucma_destroy_cqs()
1217 ibv_destroy_comp_channel(id->recv_cq_channel); in ucma_destroy_cqs()
1218 if (id->send_cq_channel && (id->send_cq_channel != id->recv_cq_channel)) { in ucma_destroy_cqs()
1219 ibv_destroy_comp_channel(id->send_cq_channel); in ucma_destroy_cqs()
1220 id->send_cq_channel = NULL; in ucma_destroy_cqs()
1222 id->recv_cq_channel = NULL; in ucma_destroy_cqs()
1226 static int ucma_create_cqs(struct rdma_cm_id *id, uint32_t send_size, uint32_t recv_size) in ucma_create_cqs() argument
1229 id->recv_cq_channel = ibv_create_comp_channel(id->verbs); in ucma_create_cqs()
1230 if (!id->recv_cq_channel) in ucma_create_cqs()
1233 id->recv_cq = ibv_create_cq(id->verbs, recv_size, in ucma_create_cqs()
1234 id, id->recv_cq_channel, 0); in ucma_create_cqs()
1235 if (!id->recv_cq) in ucma_create_cqs()
1240 id->send_cq_channel = ibv_create_comp_channel(id->verbs); in ucma_create_cqs()
1241 if (!id->send_cq_channel) in ucma_create_cqs()
1244 id->send_cq = ibv_create_cq(id->verbs, send_size, in ucma_create_cqs()
1245 id, id->send_cq_channel, 0); in ucma_create_cqs()
1246 if (!id->send_cq) in ucma_create_cqs()
1252 ucma_destroy_cqs(id); in ucma_create_cqs()
1256 int rdma_create_srq_ex(struct rdma_cm_id *id, struct ibv_srq_init_attr_ex *attr) in rdma_create_srq_ex() argument
1262 id_priv = container_of(id, struct cma_id_private, id); in rdma_create_srq_ex()
1267 attr->pd = id->pd; in rdma_create_srq_ex()
1278 ret = ucma_create_cqs(id, 0, attr->attr.max_wr); in rdma_create_srq_ex()
1281 attr->cq = id->recv_cq; in rdma_create_srq_ex()
1286 srq = ibv_create_srq_ex(id->verbs, attr); in rdma_create_srq_ex()
1292 if (!id->pd) in rdma_create_srq_ex()
1293 id->pd = attr->pd; in rdma_create_srq_ex()
1294 id->srq = srq; in rdma_create_srq_ex()
1297 ucma_destroy_cqs(id); in rdma_create_srq_ex()
1301 int rdma_create_srq(struct rdma_cm_id *id, struct ibv_pd *pd, in rdma_create_srq() argument
1309 if (id->qp_type == IBV_QPT_XRC_RECV) { in rdma_create_srq()
1315 ret = rdma_create_srq_ex(id, &attr_ex); in rdma_create_srq()
1320 void rdma_destroy_srq(struct rdma_cm_id *id) in rdma_destroy_srq() argument
1322 ibv_destroy_srq(id->srq); in rdma_destroy_srq()
1323 id->srq = NULL; in rdma_destroy_srq()
1324 ucma_destroy_cqs(id); in rdma_destroy_srq()
1327 int rdma_create_qp_ex(struct rdma_cm_id *id, in rdma_create_qp_ex() argument
1334 if (id->qp) in rdma_create_qp_ex()
1337 id_priv = container_of(id, struct cma_id_private, id); in rdma_create_qp_ex()
1340 attr->pd = id->pd; in rdma_create_qp_ex()
1341 } else if (id->verbs != attr->pd->context) in rdma_create_qp_ex()
1344 if ((id->recv_cq && attr->recv_cq && id->recv_cq != attr->recv_cq) || in rdma_create_qp_ex()
1345 (id->send_cq && attr->send_cq && id->send_cq != attr->send_cq)) in rdma_create_qp_ex()
1348 if (id->qp_type == IBV_QPT_XRC_RECV) { in rdma_create_qp_ex()
1357 ret = ucma_create_cqs(id, attr->send_cq || id->send_cq ? 0 : attr->cap.max_send_wr, in rdma_create_qp_ex()
1358 attr->recv_cq || id->recv_cq ? 0 : attr->cap.max_recv_wr); in rdma_create_qp_ex()
1363 attr->send_cq = id->send_cq; in rdma_create_qp_ex()
1365 attr->recv_cq = id->recv_cq; in rdma_create_qp_ex()
1366 if (id->srq && !attr->srq) in rdma_create_qp_ex()
1367 attr->srq = id->srq; in rdma_create_qp_ex()
1368 qp = ibv_create_qp_ex(id->verbs, attr); in rdma_create_qp_ex()
1374 if (ucma_is_ud_qp(id->qp_type)) in rdma_create_qp_ex()
1381 id->pd = qp->pd; in rdma_create_qp_ex()
1382 id->qp = qp; in rdma_create_qp_ex()
1387 ucma_destroy_cqs(id); in rdma_create_qp_ex()
1391 int rdma_create_qp(struct rdma_cm_id *id, struct ibv_pd *pd, in rdma_create_qp() argument
1399 attr_ex.pd = pd ? pd : id->pd; in rdma_create_qp()
1400 ret = rdma_create_qp_ex(id, &attr_ex); in rdma_create_qp()
1405 void rdma_destroy_qp(struct rdma_cm_id *id) in rdma_destroy_qp() argument
1407 ibv_destroy_qp(id->qp); in rdma_destroy_qp()
1408 id->qp = NULL; in rdma_destroy_qp()
1409 ucma_destroy_cqs(id); in rdma_destroy_qp()
1415 if (id_priv->id.ps != RDMA_PS_TCP) in ucma_valid_param()
1418 if (!id_priv->id.qp && !param) in ucma_valid_param()
1469 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) in rdma_connect() argument
1475 id_priv = container_of(id, struct cma_id_private, id); in rdma_connect()
1490 cmd.id = id_priv->handle; in rdma_connect()
1491 if (id->qp) { in rdma_connect()
1493 conn_param, id->qp->qp_num, in rdma_connect()
1494 (id->qp->srq != NULL)); in rdma_connect()
1504 ret = write(id->channel->fd, &cmd, sizeof cmd); in rdma_connect()
1513 return ucma_complete(id); in rdma_connect()
1516 int rdma_listen(struct rdma_cm_id *id, int backlog) in rdma_listen() argument
1523 id_priv = container_of(id, struct cma_id_private, id); in rdma_listen()
1524 cmd.id = id_priv->handle; in rdma_listen()
1527 ret = write(id->channel->fd, &cmd, sizeof cmd); in rdma_listen()
1532 return ucma_query_addr(id); in rdma_listen()
1534 return ucma_query_route(id); in rdma_listen()
1537 int rdma_get_request(struct rdma_cm_id *listen, struct rdma_cm_id **id) in rdma_get_request() argument
1543 id_priv = container_of(listen, struct cma_id_private, id); in rdma_get_request()
1570 ret = rdma_create_qp(event->id, listen->pd, &attr); in rdma_get_request()
1575 *id = event->id; in rdma_get_request()
1576 (*id)->event = event; in rdma_get_request()
1584 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) in rdma_accept() argument
1590 id_priv = container_of(id, struct cma_id_private, id); in rdma_accept()
1608 if (!ucma_is_ud_qp(id->qp_type)) { in rdma_accept()
1609 ret = ucma_modify_qp_rtr(id, id_priv->responder_resources); in rdma_accept()
1613 ret = ucma_modify_qp_rts(id, id_priv->initiator_depth); in rdma_accept()
1619 cmd.id = id_priv->handle; in rdma_accept()
1621 if (id->qp) in rdma_accept()
1623 conn_param, id->qp->qp_num, in rdma_accept()
1624 (id->qp->srq != NULL)); in rdma_accept()
1630 ret = write(id->channel->fd, &cmd, sizeof cmd); in rdma_accept()
1632 ucma_modify_qp_err(id); in rdma_accept()
1636 if (ucma_is_ud_qp(id->qp_type)) in rdma_accept()
1639 return ucma_complete(id); in rdma_accept()
1642 int rdma_reject(struct rdma_cm_id *id, const void *private_data, in rdma_reject() argument
1651 id_priv = container_of(id, struct cma_id_private, id); in rdma_reject()
1652 cmd.id = id_priv->handle; in rdma_reject()
1658 ret = write(id->channel->fd, &cmd, sizeof cmd); in rdma_reject()
1665 int rdma_notify(struct rdma_cm_id *id, enum ibv_event_type event) in rdma_notify() argument
1673 id_priv = container_of(id, struct cma_id_private, id); in rdma_notify()
1674 cmd.id = id_priv->handle; in rdma_notify()
1676 ret = write(id->channel->fd, &cmd, sizeof cmd); in rdma_notify()
1683 int ucma_shutdown(struct rdma_cm_id *id) in ucma_shutdown() argument
1685 switch (id->verbs->device->transport_type) { in ucma_shutdown()
1687 return ucma_modify_qp_err(id); in ucma_shutdown()
1689 return ucma_modify_qp_sqd(id); in ucma_shutdown()
1695 int rdma_disconnect(struct rdma_cm_id *id) in rdma_disconnect() argument
1701 ret = ucma_shutdown(id); in rdma_disconnect()
1706 id_priv = container_of(id, struct cma_id_private, id); in rdma_disconnect()
1707 cmd.id = id_priv->handle; in rdma_disconnect()
1709 ret = write(id->channel->fd, &cmd, sizeof cmd); in rdma_disconnect()
1713 return ucma_complete(id); in rdma_disconnect()
1716 static int rdma_join_multicast2(struct rdma_cm_id *id, struct sockaddr *addr, in rdma_join_multicast2() argument
1724 id_priv = container_of(id, struct cma_id_private, id); in rdma_join_multicast2()
1746 cmd.id = id_priv->handle; in rdma_join_multicast2()
1752 ret = write(id->channel->fd, &cmd, sizeof cmd); in rdma_join_multicast2()
1761 cmd.id = id_priv->handle; in rdma_join_multicast2()
1765 ret = write(id->channel->fd, &cmd, sizeof cmd); in rdma_join_multicast2()
1774 mc->handle = resp.id; in rdma_join_multicast2()
1775 return ucma_complete(id); in rdma_join_multicast2()
1788 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, in rdma_join_multicast() argument
1797 return rdma_join_multicast2(id, addr, addrlen, context); in rdma_join_multicast()
1800 int rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) in rdma_leave_multicast() argument
1812 id_priv = container_of(id, struct cma_id_private, id); in rdma_leave_multicast()
1825 if (id->qp) in rdma_leave_multicast()
1826 ibv_detach_mcast(id->qp, &mc->mgid, mc->mlid); in rdma_leave_multicast()
1829 cmd.id = mc->handle; in rdma_leave_multicast()
1831 ret = write(id->channel->fd, &cmd, sizeof cmd); in rdma_leave_multicast()
1888 evt->event.status = ucma_query_addr(&evt->id_priv->id); in ucma_process_addr_resolved()
1890 evt->id_priv->id.verbs->device->transport_type == IBV_TRANSPORT_IB) in ucma_process_addr_resolved()
1891 evt->event.status = ucma_query_gid(&evt->id_priv->id); in ucma_process_addr_resolved()
1893 evt->event.status = ucma_query_route(&evt->id_priv->id); in ucma_process_addr_resolved()
1902 if (evt->id_priv->id.verbs->device->transport_type != IBV_TRANSPORT_IB) in ucma_process_route_resolved()
1906 evt->event.status = ucma_query_path(&evt->id_priv->id); in ucma_process_route_resolved()
1908 evt->event.status = ucma_query_route(&evt->id_priv->id); in ucma_process_route_resolved()
1914 static int ucma_query_req_info(struct rdma_cm_id *id) in ucma_query_req_info() argument
1919 return ucma_query_route(id); in ucma_query_req_info()
1921 ret = ucma_query_addr(id); in ucma_query_req_info()
1925 ret = ucma_query_gid(id); in ucma_query_req_info()
1929 ret = ucma_query_path(id); in ucma_query_req_info()
1942 id_priv = ucma_alloc_id(evt->id_priv->id.channel, in ucma_process_conn_req()
1943 evt->id_priv->id.context, evt->id_priv->id.ps, in ucma_process_conn_req()
1944 evt->id_priv->id.qp_type); in ucma_process_conn_req()
1946 ucma_destroy_kern_id(evt->id_priv->id.channel->fd, handle); in ucma_process_conn_req()
1951 evt->event.listen_id = &evt->id_priv->id; in ucma_process_conn_req()
1952 evt->event.id = &id_priv->id; in ucma_process_conn_req()
1959 ret = rdma_migrate_id(&id_priv->id, NULL); in ucma_process_conn_req()
1964 ret = ucma_query_req_info(&id_priv->id); in ucma_process_conn_req()
1971 rdma_destroy_id(&id_priv->id); in ucma_process_conn_req()
1982 ret = ucma_modify_qp_rtr(&id_priv->id, RDMA_MAX_RESP_RES); in ucma_process_conn_resp()
1986 ret = ucma_modify_qp_rts(&id_priv->id, RDMA_MAX_INIT_DEPTH); in ucma_process_conn_resp()
1991 cmd.id = id_priv->handle; in ucma_process_conn_resp()
1993 ret = write(id_priv->id.channel->fd, &cmd, sizeof cmd); in ucma_process_conn_resp()
2001 ucma_modify_qp_err(&id_priv->id); in ucma_process_conn_resp()
2010 if (!evt->id_priv->id.qp) in ucma_process_join()
2013 return rdma_seterrno(ibv_attach_mcast(evt->id_priv->id.qp, in ucma_process_join()
2089 * issue by looking up the cma_id based on the kernel's id when the in rdma_get_cm_event()
2097 evt->id_priv = ucma_lookup_id(resp.id); in rdma_get_cm_event()
2108 evt->event.id = &evt->id_priv->id; in rdma_get_cm_event()
2120 if (ucma_is_ud_qp(evt->id_priv->id.qp_type)) in rdma_get_cm_event()
2125 ret = ucma_process_conn_req(evt, resp.id); in rdma_get_cm_event()
2140 if (ucma_is_ud_qp(evt->id_priv->id.qp_type)) { in rdma_get_cm_event()
2153 ucma_modify_qp_err(evt->event.id); in rdma_get_cm_event()
2165 evt->event.id = &evt->id_priv->id; in rdma_get_cm_event()
2175 evt->event.id = &evt->id_priv->id; in rdma_get_cm_event()
2180 evt->event.id = &evt->id_priv->id; in rdma_get_cm_event()
2182 if (ucma_is_ud_qp(evt->id_priv->id.qp_type)) in rdma_get_cm_event()
2233 int rdma_set_option(struct rdma_cm_id *id, int level, int optname, in rdma_set_option() argument
2241 id_priv = container_of(id, struct cma_id_private, id); in rdma_set_option()
2242 cmd.id = id_priv->handle; in rdma_set_option()
2248 ret = write(id->channel->fd, &cmd, sizeof cmd); in rdma_set_option()
2255 int rdma_migrate_id(struct rdma_cm_id *id, struct rdma_event_channel *channel) in rdma_migrate_id() argument
2262 id_priv = container_of(id, struct cma_id_private, id); in rdma_migrate_id()
2273 cmd.id = id_priv->handle; in rdma_migrate_id()
2274 cmd.fd = id->channel->fd; in rdma_migrate_id()
2286 if (id->event) { in rdma_migrate_id()
2287 rdma_ack_cm_event(id->event); in rdma_migrate_id()
2288 id->event = NULL; in rdma_migrate_id()
2290 rdma_destroy_event_channel(id->channel); in rdma_migrate_id()
2296 * there are any outstanding events on the current channel for this id in rdma_migrate_id()
2297 * to prevent the user from processing events for this id on the old in rdma_migrate_id()
2302 id->channel = channel; in rdma_migrate_id()
2310 static int ucma_passive_ep(struct rdma_cm_id *id, struct rdma_addrinfo *res, in ucma_passive_ep() argument
2317 ret = rdma_bind_addr2(id, res->ai_src_addr, res->ai_src_len); in ucma_passive_ep()
2319 ret = rdma_bind_addr(id, res->ai_src_addr); in ucma_passive_ep()
2323 id_priv = container_of(id, struct cma_id_private, id); in ucma_passive_ep()
2325 id->pd = pd; in ucma_passive_ep()
2339 int rdma_create_ep(struct rdma_cm_id **id, struct rdma_addrinfo *res, in rdma_create_ep() argument
2384 id_priv = container_of(cm_id, struct cma_id_private, id); in rdma_create_ep()
2395 *id = cm_id; in rdma_create_ep()
2403 void rdma_destroy_ep(struct rdma_cm_id *id) in rdma_destroy_ep() argument
2407 if (id->qp) in rdma_destroy_ep()
2408 rdma_destroy_qp(id); in rdma_destroy_ep()
2410 if (id->srq) in rdma_destroy_ep()
2411 rdma_destroy_srq(id); in rdma_destroy_ep()
2413 id_priv = container_of(id, struct cma_id_private, id); in rdma_destroy_ep()
2417 rdma_destroy_id(id); in rdma_destroy_ep()
2420 int ucma_max_qpsize(struct rdma_cm_id *id) in ucma_max_qpsize() argument
2425 id_priv = container_of(id, struct cma_id_private, id); in ucma_max_qpsize()
2426 if (id && id_priv->cma_dev) { in ucma_max_qpsize()
2452 __be16 rdma_get_src_port(struct rdma_cm_id *id) in rdma_get_src_port() argument
2454 return ucma_get_port(&id->route.addr.src_addr); in rdma_get_src_port()
2457 __be16 rdma_get_dst_port(struct rdma_cm_id *id) in rdma_get_dst_port() argument
2459 return ucma_get_port(&id->route.addr.dst_addr); in rdma_get_dst_port()