1 // SPDX-License-Identifier: BSD-3-Clause 2 /* Copyright 2018 Mellanox Technologies, Ltd */ 3 4 #include <unistd.h> 5 6 #include <rte_errno.h> 7 #include <rte_malloc.h> 8 9 #include "mlx5_prm.h" 10 #include "mlx5_devx_cmds.h" 11 #include "mlx5_common_utils.h" 12 13 14 /** 15 * Allocate flow counters via devx interface. 16 * 17 * @param[in] ctx 18 * ibv contexts returned from mlx5dv_open_device. 19 * @param dcs 20 * Pointer to counters properties structure to be filled by the routine. 21 * @param bulk_n_128 22 * Bulk counter numbers in 128 counters units. 23 * 24 * @return 25 * Pointer to counter object on success, a negative value otherwise and 26 * rte_errno is set. 27 */ 28 struct mlx5_devx_obj * 29 mlx5_devx_cmd_flow_counter_alloc(struct ibv_context *ctx, uint32_t bulk_n_128) 30 { 31 struct mlx5_devx_obj *dcs = rte_zmalloc("dcs", sizeof(*dcs), 0); 32 uint32_t in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0}; 33 uint32_t out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0}; 34 35 if (!dcs) { 36 rte_errno = ENOMEM; 37 return NULL; 38 } 39 MLX5_SET(alloc_flow_counter_in, in, opcode, 40 MLX5_CMD_OP_ALLOC_FLOW_COUNTER); 41 MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, bulk_n_128); 42 dcs->obj = mlx5_glue->devx_obj_create(ctx, in, 43 sizeof(in), out, sizeof(out)); 44 if (!dcs->obj) { 45 DRV_LOG(ERR, "Can't allocate counters - error %d", errno); 46 rte_errno = errno; 47 rte_free(dcs); 48 return NULL; 49 } 50 dcs->id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id); 51 return dcs; 52 } 53 54 /** 55 * Query flow counters values. 56 * 57 * @param[in] dcs 58 * devx object that was obtained from mlx5_devx_cmd_fc_alloc. 59 * @param[in] clear 60 * Whether hardware should clear the counters after the query or not. 61 * @param[in] n_counters 62 * 0 in case of 1 counter to read, otherwise the counter number to read. 63 * @param pkts 64 * The number of packets that matched the flow. 65 * @param bytes 66 * The number of bytes that matched the flow. 67 * @param mkey 68 * The mkey key for batch query. 69 * @param addr 70 * The address in the mkey range for batch query. 71 * @param cmd_comp 72 * The completion object for asynchronous batch query. 73 * @param async_id 74 * The ID to be returned in the asynchronous batch query response. 75 * 76 * @return 77 * 0 on success, a negative value otherwise. 78 */ 79 int 80 mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs, 81 int clear, uint32_t n_counters, 82 uint64_t *pkts, uint64_t *bytes, 83 uint32_t mkey, void *addr, 84 struct mlx5dv_devx_cmd_comp *cmd_comp, 85 uint64_t async_id) 86 { 87 int out_len = MLX5_ST_SZ_BYTES(query_flow_counter_out) + 88 MLX5_ST_SZ_BYTES(traffic_counter); 89 uint32_t out[out_len]; 90 uint32_t in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0}; 91 void *stats; 92 int rc; 93 94 MLX5_SET(query_flow_counter_in, in, opcode, 95 MLX5_CMD_OP_QUERY_FLOW_COUNTER); 96 MLX5_SET(query_flow_counter_in, in, op_mod, 0); 97 MLX5_SET(query_flow_counter_in, in, flow_counter_id, dcs->id); 98 MLX5_SET(query_flow_counter_in, in, clear, !!clear); 99 100 if (n_counters) { 101 MLX5_SET(query_flow_counter_in, in, num_of_counters, 102 n_counters); 103 MLX5_SET(query_flow_counter_in, in, dump_to_memory, 1); 104 MLX5_SET(query_flow_counter_in, in, mkey, mkey); 105 MLX5_SET64(query_flow_counter_in, in, address, 106 (uint64_t)(uintptr_t)addr); 107 } 108 if (!cmd_comp) 109 rc = mlx5_glue->devx_obj_query(dcs->obj, in, sizeof(in), out, 110 out_len); 111 else 112 rc = mlx5_glue->devx_obj_query_async(dcs->obj, in, sizeof(in), 113 out_len, async_id, 114 cmd_comp); 115 if (rc) { 116 DRV_LOG(ERR, "Failed to query devx counters with rc %d", rc); 117 rte_errno = rc; 118 return -rc; 119 } 120 if (!n_counters) { 121 stats = MLX5_ADDR_OF(query_flow_counter_out, 122 out, flow_statistics); 123 *pkts = MLX5_GET64(traffic_counter, stats, packets); 124 *bytes = MLX5_GET64(traffic_counter, stats, octets); 125 } 126 return 0; 127 } 128 129 /** 130 * Create a new mkey. 131 * 132 * @param[in] ctx 133 * ibv contexts returned from mlx5dv_open_device. 134 * @param[in] attr 135 * Attributes of the requested mkey. 136 * 137 * @return 138 * Pointer to Devx mkey on success, a negative value otherwise and rte_errno 139 * is set. 140 */ 141 struct mlx5_devx_obj * 142 mlx5_devx_cmd_mkey_create(struct ibv_context *ctx, 143 struct mlx5_devx_mkey_attr *attr) 144 { 145 struct mlx5_klm *klm_array = attr->klm_array; 146 int klm_num = attr->klm_num; 147 int in_size_dw = MLX5_ST_SZ_DW(create_mkey_in) + 148 (klm_num ? RTE_ALIGN(klm_num, 4) : 0) * MLX5_ST_SZ_DW(klm); 149 uint32_t in[in_size_dw]; 150 uint32_t out[MLX5_ST_SZ_DW(create_mkey_out)] = {0}; 151 void *mkc; 152 struct mlx5_devx_obj *mkey = rte_zmalloc("mkey", sizeof(*mkey), 0); 153 size_t pgsize; 154 uint32_t translation_size; 155 156 if (!mkey) { 157 rte_errno = ENOMEM; 158 return NULL; 159 } 160 memset(in, 0, in_size_dw * 4); 161 pgsize = sysconf(_SC_PAGESIZE); 162 MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY); 163 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 164 if (klm_num > 0) { 165 int i; 166 uint8_t *klm = (uint8_t *)MLX5_ADDR_OF(create_mkey_in, in, 167 klm_pas_mtt); 168 translation_size = RTE_ALIGN(klm_num, 4); 169 for (i = 0; i < klm_num; i++) { 170 MLX5_SET(klm, klm, byte_count, klm_array[i].byte_count); 171 MLX5_SET(klm, klm, mkey, klm_array[i].mkey); 172 MLX5_SET64(klm, klm, address, klm_array[i].address); 173 klm += MLX5_ST_SZ_BYTES(klm); 174 } 175 for (; i < (int)translation_size; i++) { 176 MLX5_SET(klm, klm, mkey, 0x0); 177 MLX5_SET64(klm, klm, address, 0x0); 178 klm += MLX5_ST_SZ_BYTES(klm); 179 } 180 MLX5_SET(mkc, mkc, access_mode_1_0, attr->log_entity_size ? 181 MLX5_MKC_ACCESS_MODE_KLM_FBS : 182 MLX5_MKC_ACCESS_MODE_KLM); 183 MLX5_SET(mkc, mkc, log_page_size, attr->log_entity_size); 184 } else { 185 translation_size = (RTE_ALIGN(attr->size, pgsize) * 8) / 16; 186 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); 187 MLX5_SET(mkc, mkc, log_page_size, rte_log2_u32(pgsize)); 188 } 189 MLX5_SET(create_mkey_in, in, translations_octword_actual_size, 190 translation_size); 191 MLX5_SET(create_mkey_in, in, mkey_umem_id, attr->umem_id); 192 MLX5_SET(create_mkey_in, in, pg_access, attr->pg_access); 193 MLX5_SET(mkc, mkc, lw, 0x1); 194 MLX5_SET(mkc, mkc, lr, 0x1); 195 MLX5_SET(mkc, mkc, qpn, 0xffffff); 196 MLX5_SET(mkc, mkc, pd, attr->pd); 197 MLX5_SET(mkc, mkc, mkey_7_0, attr->umem_id & 0xFF); 198 MLX5_SET(mkc, mkc, translations_octword_size, translation_size); 199 if (attr->relaxed_ordering == 1) { 200 MLX5_SET(mkc, mkc, relaxed_ordering_write, 0x1); 201 MLX5_SET(mkc, mkc, relaxed_ordering_read, 0x1); 202 } 203 MLX5_SET64(mkc, mkc, start_addr, attr->addr); 204 MLX5_SET64(mkc, mkc, len, attr->size); 205 mkey->obj = mlx5_glue->devx_obj_create(ctx, in, in_size_dw * 4, out, 206 sizeof(out)); 207 if (!mkey->obj) { 208 DRV_LOG(ERR, "Can't create %sdirect mkey - error %d\n", 209 klm_num ? "an in" : "a ", errno); 210 rte_errno = errno; 211 rte_free(mkey); 212 return NULL; 213 } 214 mkey->id = MLX5_GET(create_mkey_out, out, mkey_index); 215 mkey->id = (mkey->id << 8) | (attr->umem_id & 0xFF); 216 return mkey; 217 } 218 219 /** 220 * Get status of devx command response. 221 * Mainly used for asynchronous commands. 222 * 223 * @param[in] out 224 * The out response buffer. 225 * 226 * @return 227 * 0 on success, non-zero value otherwise. 228 */ 229 int 230 mlx5_devx_get_out_command_status(void *out) 231 { 232 int status; 233 234 if (!out) 235 return -EINVAL; 236 status = MLX5_GET(query_flow_counter_out, out, status); 237 if (status) { 238 int syndrome = MLX5_GET(query_flow_counter_out, out, syndrome); 239 240 DRV_LOG(ERR, "Bad devX status %x, syndrome = %x", status, 241 syndrome); 242 } 243 return status; 244 } 245 246 /** 247 * Destroy any object allocated by a Devx API. 248 * 249 * @param[in] obj 250 * Pointer to a general object. 251 * 252 * @return 253 * 0 on success, a negative value otherwise. 254 */ 255 int 256 mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj) 257 { 258 int ret; 259 260 if (!obj) 261 return 0; 262 ret = mlx5_glue->devx_obj_destroy(obj->obj); 263 rte_free(obj); 264 return ret; 265 } 266 267 /** 268 * Query NIC vport context. 269 * Fills minimal inline attribute. 270 * 271 * @param[in] ctx 272 * ibv contexts returned from mlx5dv_open_device. 273 * @param[in] vport 274 * vport index 275 * @param[out] attr 276 * Attributes device values. 277 * 278 * @return 279 * 0 on success, a negative value otherwise. 280 */ 281 static int 282 mlx5_devx_cmd_query_nic_vport_context(struct ibv_context *ctx, 283 unsigned int vport, 284 struct mlx5_hca_attr *attr) 285 { 286 uint32_t in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0}; 287 uint32_t out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0}; 288 void *vctx; 289 int status, syndrome, rc; 290 291 /* Query NIC vport context to determine inline mode. */ 292 MLX5_SET(query_nic_vport_context_in, in, opcode, 293 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); 294 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); 295 if (vport) 296 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); 297 rc = mlx5_glue->devx_general_cmd(ctx, 298 in, sizeof(in), 299 out, sizeof(out)); 300 if (rc) 301 goto error; 302 status = MLX5_GET(query_nic_vport_context_out, out, status); 303 syndrome = MLX5_GET(query_nic_vport_context_out, out, syndrome); 304 if (status) { 305 DRV_LOG(DEBUG, "Failed to query NIC vport context, " 306 "status %x, syndrome = %x", 307 status, syndrome); 308 return -1; 309 } 310 vctx = MLX5_ADDR_OF(query_nic_vport_context_out, out, 311 nic_vport_context); 312 attr->vport_inline_mode = MLX5_GET(nic_vport_context, vctx, 313 min_wqe_inline_mode); 314 return 0; 315 error: 316 rc = (rc > 0) ? -rc : rc; 317 return rc; 318 } 319 320 /** 321 * Query NIC vDPA attributes. 322 * 323 * @param[in] ctx 324 * ibv contexts returned from mlx5dv_open_device. 325 * @param[out] vdpa_attr 326 * vDPA Attributes structure to fill. 327 */ 328 static void 329 mlx5_devx_cmd_query_hca_vdpa_attr(struct ibv_context *ctx, 330 struct mlx5_hca_vdpa_attr *vdpa_attr) 331 { 332 uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0}; 333 uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0}; 334 void *hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 335 int status, syndrome, rc; 336 337 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 338 MLX5_SET(query_hca_cap_in, in, op_mod, 339 MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION | 340 MLX5_HCA_CAP_OPMOD_GET_CUR); 341 rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 342 status = MLX5_GET(query_hca_cap_out, out, status); 343 syndrome = MLX5_GET(query_hca_cap_out, out, syndrome); 344 if (rc || status) { 345 RTE_LOG(DEBUG, PMD, "Failed to query devx VDPA capabilities," 346 " status %x, syndrome = %x", status, syndrome); 347 vdpa_attr->valid = 0; 348 } else { 349 vdpa_attr->valid = 1; 350 vdpa_attr->desc_tunnel_offload_type = 351 MLX5_GET(virtio_emulation_cap, hcattr, 352 desc_tunnel_offload_type); 353 vdpa_attr->eth_frame_offload_type = 354 MLX5_GET(virtio_emulation_cap, hcattr, 355 eth_frame_offload_type); 356 vdpa_attr->virtio_version_1_0 = 357 MLX5_GET(virtio_emulation_cap, hcattr, 358 virtio_version_1_0); 359 vdpa_attr->tso_ipv4 = MLX5_GET(virtio_emulation_cap, hcattr, 360 tso_ipv4); 361 vdpa_attr->tso_ipv6 = MLX5_GET(virtio_emulation_cap, hcattr, 362 tso_ipv6); 363 vdpa_attr->tx_csum = MLX5_GET(virtio_emulation_cap, hcattr, 364 tx_csum); 365 vdpa_attr->rx_csum = MLX5_GET(virtio_emulation_cap, hcattr, 366 rx_csum); 367 vdpa_attr->event_mode = MLX5_GET(virtio_emulation_cap, hcattr, 368 event_mode); 369 vdpa_attr->virtio_queue_type = 370 MLX5_GET(virtio_emulation_cap, hcattr, 371 virtio_queue_type); 372 vdpa_attr->log_doorbell_stride = 373 MLX5_GET(virtio_emulation_cap, hcattr, 374 log_doorbell_stride); 375 vdpa_attr->log_doorbell_bar_size = 376 MLX5_GET(virtio_emulation_cap, hcattr, 377 log_doorbell_bar_size); 378 vdpa_attr->doorbell_bar_offset = 379 MLX5_GET64(virtio_emulation_cap, hcattr, 380 doorbell_bar_offset); 381 vdpa_attr->max_num_virtio_queues = 382 MLX5_GET(virtio_emulation_cap, hcattr, 383 max_num_virtio_queues); 384 vdpa_attr->umems[0].a = MLX5_GET(virtio_emulation_cap, hcattr, 385 umem_1_buffer_param_a); 386 vdpa_attr->umems[0].b = MLX5_GET(virtio_emulation_cap, hcattr, 387 umem_1_buffer_param_b); 388 vdpa_attr->umems[1].a = MLX5_GET(virtio_emulation_cap, hcattr, 389 umem_2_buffer_param_a); 390 vdpa_attr->umems[1].b = MLX5_GET(virtio_emulation_cap, hcattr, 391 umem_2_buffer_param_b); 392 vdpa_attr->umems[2].a = MLX5_GET(virtio_emulation_cap, hcattr, 393 umem_3_buffer_param_a); 394 vdpa_attr->umems[2].b = MLX5_GET(virtio_emulation_cap, hcattr, 395 umem_3_buffer_param_b); 396 } 397 } 398 399 /** 400 * Query HCA attributes. 401 * Using those attributes we can check on run time if the device 402 * is having the required capabilities. 403 * 404 * @param[in] ctx 405 * ibv contexts returned from mlx5dv_open_device. 406 * @param[out] attr 407 * Attributes device values. 408 * 409 * @return 410 * 0 on success, a negative value otherwise. 411 */ 412 int 413 mlx5_devx_cmd_query_hca_attr(struct ibv_context *ctx, 414 struct mlx5_hca_attr *attr) 415 { 416 uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0}; 417 uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0}; 418 void *hcattr; 419 int status, syndrome, rc; 420 421 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 422 MLX5_SET(query_hca_cap_in, in, op_mod, 423 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE | 424 MLX5_HCA_CAP_OPMOD_GET_CUR); 425 426 rc = mlx5_glue->devx_general_cmd(ctx, 427 in, sizeof(in), out, sizeof(out)); 428 if (rc) 429 goto error; 430 status = MLX5_GET(query_hca_cap_out, out, status); 431 syndrome = MLX5_GET(query_hca_cap_out, out, syndrome); 432 if (status) { 433 DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, " 434 "status %x, syndrome = %x", 435 status, syndrome); 436 return -1; 437 } 438 hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 439 attr->flow_counter_bulk_alloc_bitmap = 440 MLX5_GET(cmd_hca_cap, hcattr, flow_counter_bulk_alloc); 441 attr->flow_counters_dump = MLX5_GET(cmd_hca_cap, hcattr, 442 flow_counters_dump); 443 attr->log_max_rqt_size = MLX5_GET(cmd_hca_cap, hcattr, 444 log_max_rqt_size); 445 attr->eswitch_manager = MLX5_GET(cmd_hca_cap, hcattr, eswitch_manager); 446 attr->hairpin = MLX5_GET(cmd_hca_cap, hcattr, hairpin); 447 attr->log_max_hairpin_queues = MLX5_GET(cmd_hca_cap, hcattr, 448 log_max_hairpin_queues); 449 attr->log_max_hairpin_wq_data_sz = MLX5_GET(cmd_hca_cap, hcattr, 450 log_max_hairpin_wq_data_sz); 451 attr->log_max_hairpin_num_packets = MLX5_GET 452 (cmd_hca_cap, hcattr, log_min_hairpin_wq_data_sz); 453 attr->vhca_id = MLX5_GET(cmd_hca_cap, hcattr, vhca_id); 454 attr->eth_net_offloads = MLX5_GET(cmd_hca_cap, hcattr, 455 eth_net_offloads); 456 attr->eth_virt = MLX5_GET(cmd_hca_cap, hcattr, eth_virt); 457 attr->flex_parser_protocols = MLX5_GET(cmd_hca_cap, hcattr, 458 flex_parser_protocols); 459 attr->qos.sup = MLX5_GET(cmd_hca_cap, hcattr, qos); 460 attr->vdpa.valid = !!(MLX5_GET64(cmd_hca_cap, hcattr, 461 general_obj_types) & 462 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q); 463 if (attr->qos.sup) { 464 MLX5_SET(query_hca_cap_in, in, op_mod, 465 MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP | 466 MLX5_HCA_CAP_OPMOD_GET_CUR); 467 rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), 468 out, sizeof(out)); 469 if (rc) 470 goto error; 471 if (status) { 472 DRV_LOG(DEBUG, "Failed to query devx QOS capabilities," 473 " status %x, syndrome = %x", 474 status, syndrome); 475 return -1; 476 } 477 hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 478 attr->qos.srtcm_sup = 479 MLX5_GET(qos_cap, hcattr, flow_meter_srtcm); 480 attr->qos.log_max_flow_meter = 481 MLX5_GET(qos_cap, hcattr, log_max_flow_meter); 482 attr->qos.flow_meter_reg_c_ids = 483 MLX5_GET(qos_cap, hcattr, flow_meter_reg_id); 484 attr->qos.flow_meter_reg_share = 485 MLX5_GET(qos_cap, hcattr, flow_meter_reg_share); 486 } 487 if (attr->vdpa.valid) 488 mlx5_devx_cmd_query_hca_vdpa_attr(ctx, &attr->vdpa); 489 if (!attr->eth_net_offloads) 490 return 0; 491 492 /* Query HCA offloads for Ethernet protocol. */ 493 memset(in, 0, sizeof(in)); 494 memset(out, 0, sizeof(out)); 495 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 496 MLX5_SET(query_hca_cap_in, in, op_mod, 497 MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS | 498 MLX5_HCA_CAP_OPMOD_GET_CUR); 499 500 rc = mlx5_glue->devx_general_cmd(ctx, 501 in, sizeof(in), 502 out, sizeof(out)); 503 if (rc) { 504 attr->eth_net_offloads = 0; 505 goto error; 506 } 507 status = MLX5_GET(query_hca_cap_out, out, status); 508 syndrome = MLX5_GET(query_hca_cap_out, out, syndrome); 509 if (status) { 510 DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, " 511 "status %x, syndrome = %x", 512 status, syndrome); 513 attr->eth_net_offloads = 0; 514 return -1; 515 } 516 hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 517 attr->wqe_vlan_insert = MLX5_GET(per_protocol_networking_offload_caps, 518 hcattr, wqe_vlan_insert); 519 attr->lro_cap = MLX5_GET(per_protocol_networking_offload_caps, hcattr, 520 lro_cap); 521 attr->tunnel_lro_gre = MLX5_GET(per_protocol_networking_offload_caps, 522 hcattr, tunnel_lro_gre); 523 attr->tunnel_lro_vxlan = MLX5_GET(per_protocol_networking_offload_caps, 524 hcattr, tunnel_lro_vxlan); 525 attr->lro_max_msg_sz_mode = MLX5_GET 526 (per_protocol_networking_offload_caps, 527 hcattr, lro_max_msg_sz_mode); 528 for (int i = 0 ; i < MLX5_LRO_NUM_SUPP_PERIODS ; i++) { 529 attr->lro_timer_supported_periods[i] = 530 MLX5_GET(per_protocol_networking_offload_caps, hcattr, 531 lro_timer_supported_periods[i]); 532 } 533 attr->tunnel_stateless_geneve_rx = 534 MLX5_GET(per_protocol_networking_offload_caps, 535 hcattr, tunnel_stateless_geneve_rx); 536 attr->geneve_max_opt_len = 537 MLX5_GET(per_protocol_networking_offload_caps, 538 hcattr, max_geneve_opt_len); 539 attr->wqe_inline_mode = MLX5_GET(per_protocol_networking_offload_caps, 540 hcattr, wqe_inline_mode); 541 attr->tunnel_stateless_gtp = MLX5_GET 542 (per_protocol_networking_offload_caps, 543 hcattr, tunnel_stateless_gtp); 544 if (attr->wqe_inline_mode != MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) 545 return 0; 546 if (attr->eth_virt) { 547 rc = mlx5_devx_cmd_query_nic_vport_context(ctx, 0, attr); 548 if (rc) { 549 attr->eth_virt = 0; 550 goto error; 551 } 552 } 553 return 0; 554 error: 555 rc = (rc > 0) ? -rc : rc; 556 return rc; 557 } 558 559 /** 560 * Query TIS transport domain from QP verbs object using DevX API. 561 * 562 * @param[in] qp 563 * Pointer to verbs QP returned by ibv_create_qp . 564 * @param[in] tis_num 565 * TIS number of TIS to query. 566 * @param[out] tis_td 567 * Pointer to TIS transport domain variable, to be set by the routine. 568 * 569 * @return 570 * 0 on success, a negative value otherwise. 571 */ 572 int 573 mlx5_devx_cmd_qp_query_tis_td(struct ibv_qp *qp, uint32_t tis_num, 574 uint32_t *tis_td) 575 { 576 uint32_t in[MLX5_ST_SZ_DW(query_tis_in)] = {0}; 577 uint32_t out[MLX5_ST_SZ_DW(query_tis_out)] = {0}; 578 int rc; 579 void *tis_ctx; 580 581 MLX5_SET(query_tis_in, in, opcode, MLX5_CMD_OP_QUERY_TIS); 582 MLX5_SET(query_tis_in, in, tisn, tis_num); 583 rc = mlx5_glue->devx_qp_query(qp, in, sizeof(in), out, sizeof(out)); 584 if (rc) { 585 DRV_LOG(ERR, "Failed to query QP using DevX"); 586 return -rc; 587 }; 588 tis_ctx = MLX5_ADDR_OF(query_tis_out, out, tis_context); 589 *tis_td = MLX5_GET(tisc, tis_ctx, transport_domain); 590 return 0; 591 } 592 593 /** 594 * Fill WQ data for DevX API command. 595 * Utility function for use when creating DevX objects containing a WQ. 596 * 597 * @param[in] wq_ctx 598 * Pointer to WQ context to fill with data. 599 * @param [in] wq_attr 600 * Pointer to WQ attributes structure to fill in WQ context. 601 */ 602 static void 603 devx_cmd_fill_wq_data(void *wq_ctx, struct mlx5_devx_wq_attr *wq_attr) 604 { 605 MLX5_SET(wq, wq_ctx, wq_type, wq_attr->wq_type); 606 MLX5_SET(wq, wq_ctx, wq_signature, wq_attr->wq_signature); 607 MLX5_SET(wq, wq_ctx, end_padding_mode, wq_attr->end_padding_mode); 608 MLX5_SET(wq, wq_ctx, cd_slave, wq_attr->cd_slave); 609 MLX5_SET(wq, wq_ctx, hds_skip_first_sge, wq_attr->hds_skip_first_sge); 610 MLX5_SET(wq, wq_ctx, log2_hds_buf_size, wq_attr->log2_hds_buf_size); 611 MLX5_SET(wq, wq_ctx, page_offset, wq_attr->page_offset); 612 MLX5_SET(wq, wq_ctx, lwm, wq_attr->lwm); 613 MLX5_SET(wq, wq_ctx, pd, wq_attr->pd); 614 MLX5_SET(wq, wq_ctx, uar_page, wq_attr->uar_page); 615 MLX5_SET64(wq, wq_ctx, dbr_addr, wq_attr->dbr_addr); 616 MLX5_SET(wq, wq_ctx, hw_counter, wq_attr->hw_counter); 617 MLX5_SET(wq, wq_ctx, sw_counter, wq_attr->sw_counter); 618 MLX5_SET(wq, wq_ctx, log_wq_stride, wq_attr->log_wq_stride); 619 MLX5_SET(wq, wq_ctx, log_wq_pg_sz, wq_attr->log_wq_pg_sz); 620 MLX5_SET(wq, wq_ctx, log_wq_sz, wq_attr->log_wq_sz); 621 MLX5_SET(wq, wq_ctx, dbr_umem_valid, wq_attr->dbr_umem_valid); 622 MLX5_SET(wq, wq_ctx, wq_umem_valid, wq_attr->wq_umem_valid); 623 MLX5_SET(wq, wq_ctx, log_hairpin_num_packets, 624 wq_attr->log_hairpin_num_packets); 625 MLX5_SET(wq, wq_ctx, log_hairpin_data_sz, wq_attr->log_hairpin_data_sz); 626 MLX5_SET(wq, wq_ctx, single_wqe_log_num_of_strides, 627 wq_attr->single_wqe_log_num_of_strides); 628 MLX5_SET(wq, wq_ctx, two_byte_shift_en, wq_attr->two_byte_shift_en); 629 MLX5_SET(wq, wq_ctx, single_stride_log_num_of_bytes, 630 wq_attr->single_stride_log_num_of_bytes); 631 MLX5_SET(wq, wq_ctx, dbr_umem_id, wq_attr->dbr_umem_id); 632 MLX5_SET(wq, wq_ctx, wq_umem_id, wq_attr->wq_umem_id); 633 MLX5_SET64(wq, wq_ctx, wq_umem_offset, wq_attr->wq_umem_offset); 634 } 635 636 /** 637 * Create RQ using DevX API. 638 * 639 * @param[in] ctx 640 * ibv_context returned from mlx5dv_open_device. 641 * @param [in] rq_attr 642 * Pointer to create RQ attributes structure. 643 * @param [in] socket 644 * CPU socket ID for allocations. 645 * 646 * @return 647 * The DevX object created, NULL otherwise and rte_errno is set. 648 */ 649 struct mlx5_devx_obj * 650 mlx5_devx_cmd_create_rq(struct ibv_context *ctx, 651 struct mlx5_devx_create_rq_attr *rq_attr, 652 int socket) 653 { 654 uint32_t in[MLX5_ST_SZ_DW(create_rq_in)] = {0}; 655 uint32_t out[MLX5_ST_SZ_DW(create_rq_out)] = {0}; 656 void *rq_ctx, *wq_ctx; 657 struct mlx5_devx_wq_attr *wq_attr; 658 struct mlx5_devx_obj *rq = NULL; 659 660 rq = rte_calloc_socket(__func__, 1, sizeof(*rq), 0, socket); 661 if (!rq) { 662 DRV_LOG(ERR, "Failed to allocate RQ data"); 663 rte_errno = ENOMEM; 664 return NULL; 665 } 666 MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ); 667 rq_ctx = MLX5_ADDR_OF(create_rq_in, in, ctx); 668 MLX5_SET(rqc, rq_ctx, rlky, rq_attr->rlky); 669 MLX5_SET(rqc, rq_ctx, delay_drop_en, rq_attr->delay_drop_en); 670 MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs); 671 MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd); 672 MLX5_SET(rqc, rq_ctx, mem_rq_type, rq_attr->mem_rq_type); 673 MLX5_SET(rqc, rq_ctx, state, rq_attr->state); 674 MLX5_SET(rqc, rq_ctx, flush_in_error_en, rq_attr->flush_in_error_en); 675 MLX5_SET(rqc, rq_ctx, hairpin, rq_attr->hairpin); 676 MLX5_SET(rqc, rq_ctx, user_index, rq_attr->user_index); 677 MLX5_SET(rqc, rq_ctx, cqn, rq_attr->cqn); 678 MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id); 679 MLX5_SET(rqc, rq_ctx, rmpn, rq_attr->rmpn); 680 wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq); 681 wq_attr = &rq_attr->wq_attr; 682 devx_cmd_fill_wq_data(wq_ctx, wq_attr); 683 rq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 684 out, sizeof(out)); 685 if (!rq->obj) { 686 DRV_LOG(ERR, "Failed to create RQ using DevX"); 687 rte_errno = errno; 688 rte_free(rq); 689 return NULL; 690 } 691 rq->id = MLX5_GET(create_rq_out, out, rqn); 692 return rq; 693 } 694 695 /** 696 * Modify RQ using DevX API. 697 * 698 * @param[in] rq 699 * Pointer to RQ object structure. 700 * @param [in] rq_attr 701 * Pointer to modify RQ attributes structure. 702 * 703 * @return 704 * 0 on success, a negative errno value otherwise and rte_errno is set. 705 */ 706 int 707 mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq, 708 struct mlx5_devx_modify_rq_attr *rq_attr) 709 { 710 uint32_t in[MLX5_ST_SZ_DW(modify_rq_in)] = {0}; 711 uint32_t out[MLX5_ST_SZ_DW(modify_rq_out)] = {0}; 712 void *rq_ctx, *wq_ctx; 713 int ret; 714 715 MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ); 716 MLX5_SET(modify_rq_in, in, rq_state, rq_attr->rq_state); 717 MLX5_SET(modify_rq_in, in, rqn, rq->id); 718 MLX5_SET64(modify_rq_in, in, modify_bitmask, rq_attr->modify_bitmask); 719 rq_ctx = MLX5_ADDR_OF(modify_rq_in, in, ctx); 720 MLX5_SET(rqc, rq_ctx, state, rq_attr->state); 721 if (rq_attr->modify_bitmask & 722 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS) 723 MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs); 724 if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD) 725 MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd); 726 if (rq_attr->modify_bitmask & 727 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID) 728 MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id); 729 MLX5_SET(rqc, rq_ctx, hairpin_peer_sq, rq_attr->hairpin_peer_sq); 730 MLX5_SET(rqc, rq_ctx, hairpin_peer_vhca, rq_attr->hairpin_peer_vhca); 731 if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM) { 732 wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq); 733 MLX5_SET(wq, wq_ctx, lwm, rq_attr->lwm); 734 } 735 ret = mlx5_glue->devx_obj_modify(rq->obj, in, sizeof(in), 736 out, sizeof(out)); 737 if (ret) { 738 DRV_LOG(ERR, "Failed to modify RQ using DevX"); 739 rte_errno = errno; 740 return -errno; 741 } 742 return ret; 743 } 744 745 /** 746 * Create TIR using DevX API. 747 * 748 * @param[in] ctx 749 * ibv_context returned from mlx5dv_open_device. 750 * @param [in] tir_attr 751 * Pointer to TIR attributes structure. 752 * 753 * @return 754 * The DevX object created, NULL otherwise and rte_errno is set. 755 */ 756 struct mlx5_devx_obj * 757 mlx5_devx_cmd_create_tir(struct ibv_context *ctx, 758 struct mlx5_devx_tir_attr *tir_attr) 759 { 760 uint32_t in[MLX5_ST_SZ_DW(create_tir_in)] = {0}; 761 uint32_t out[MLX5_ST_SZ_DW(create_tir_out)] = {0}; 762 void *tir_ctx, *outer, *inner, *rss_key; 763 struct mlx5_devx_obj *tir = NULL; 764 765 tir = rte_calloc(__func__, 1, sizeof(*tir), 0); 766 if (!tir) { 767 DRV_LOG(ERR, "Failed to allocate TIR data"); 768 rte_errno = ENOMEM; 769 return NULL; 770 } 771 MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); 772 tir_ctx = MLX5_ADDR_OF(create_tir_in, in, ctx); 773 MLX5_SET(tirc, tir_ctx, disp_type, tir_attr->disp_type); 774 MLX5_SET(tirc, tir_ctx, lro_timeout_period_usecs, 775 tir_attr->lro_timeout_period_usecs); 776 MLX5_SET(tirc, tir_ctx, lro_enable_mask, tir_attr->lro_enable_mask); 777 MLX5_SET(tirc, tir_ctx, lro_max_msg_sz, tir_attr->lro_max_msg_sz); 778 MLX5_SET(tirc, tir_ctx, inline_rqn, tir_attr->inline_rqn); 779 MLX5_SET(tirc, tir_ctx, rx_hash_symmetric, tir_attr->rx_hash_symmetric); 780 MLX5_SET(tirc, tir_ctx, tunneled_offload_en, 781 tir_attr->tunneled_offload_en); 782 MLX5_SET(tirc, tir_ctx, indirect_table, tir_attr->indirect_table); 783 MLX5_SET(tirc, tir_ctx, rx_hash_fn, tir_attr->rx_hash_fn); 784 MLX5_SET(tirc, tir_ctx, self_lb_block, tir_attr->self_lb_block); 785 MLX5_SET(tirc, tir_ctx, transport_domain, tir_attr->transport_domain); 786 rss_key = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_toeplitz_key); 787 memcpy(rss_key, tir_attr->rx_hash_toeplitz_key, MLX5_RSS_HASH_KEY_LEN); 788 outer = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_outer); 789 MLX5_SET(rx_hash_field_select, outer, l3_prot_type, 790 tir_attr->rx_hash_field_selector_outer.l3_prot_type); 791 MLX5_SET(rx_hash_field_select, outer, l4_prot_type, 792 tir_attr->rx_hash_field_selector_outer.l4_prot_type); 793 MLX5_SET(rx_hash_field_select, outer, selected_fields, 794 tir_attr->rx_hash_field_selector_outer.selected_fields); 795 inner = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_inner); 796 MLX5_SET(rx_hash_field_select, inner, l3_prot_type, 797 tir_attr->rx_hash_field_selector_inner.l3_prot_type); 798 MLX5_SET(rx_hash_field_select, inner, l4_prot_type, 799 tir_attr->rx_hash_field_selector_inner.l4_prot_type); 800 MLX5_SET(rx_hash_field_select, inner, selected_fields, 801 tir_attr->rx_hash_field_selector_inner.selected_fields); 802 tir->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 803 out, sizeof(out)); 804 if (!tir->obj) { 805 DRV_LOG(ERR, "Failed to create TIR using DevX"); 806 rte_errno = errno; 807 rte_free(tir); 808 return NULL; 809 } 810 tir->id = MLX5_GET(create_tir_out, out, tirn); 811 return tir; 812 } 813 814 /** 815 * Create RQT using DevX API. 816 * 817 * @param[in] ctx 818 * ibv_context returned from mlx5dv_open_device. 819 * @param [in] rqt_attr 820 * Pointer to RQT attributes structure. 821 * 822 * @return 823 * The DevX object created, NULL otherwise and rte_errno is set. 824 */ 825 struct mlx5_devx_obj * 826 mlx5_devx_cmd_create_rqt(struct ibv_context *ctx, 827 struct mlx5_devx_rqt_attr *rqt_attr) 828 { 829 uint32_t *in = NULL; 830 uint32_t inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + 831 rqt_attr->rqt_actual_size * sizeof(uint32_t); 832 uint32_t out[MLX5_ST_SZ_DW(create_rqt_out)] = {0}; 833 void *rqt_ctx; 834 struct mlx5_devx_obj *rqt = NULL; 835 int i; 836 837 in = rte_calloc(__func__, 1, inlen, 0); 838 if (!in) { 839 DRV_LOG(ERR, "Failed to allocate RQT IN data"); 840 rte_errno = ENOMEM; 841 return NULL; 842 } 843 rqt = rte_calloc(__func__, 1, sizeof(*rqt), 0); 844 if (!rqt) { 845 DRV_LOG(ERR, "Failed to allocate RQT data"); 846 rte_errno = ENOMEM; 847 rte_free(in); 848 return NULL; 849 } 850 MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); 851 rqt_ctx = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); 852 MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type); 853 MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size); 854 MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size); 855 for (i = 0; i < rqt_attr->rqt_actual_size; i++) 856 MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]); 857 rqt->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out)); 858 rte_free(in); 859 if (!rqt->obj) { 860 DRV_LOG(ERR, "Failed to create RQT using DevX"); 861 rte_errno = errno; 862 rte_free(rqt); 863 return NULL; 864 } 865 rqt->id = MLX5_GET(create_rqt_out, out, rqtn); 866 return rqt; 867 } 868 869 /** 870 * Modify RQT using DevX API. 871 * 872 * @param[in] rqt 873 * Pointer to RQT DevX object structure. 874 * @param [in] rqt_attr 875 * Pointer to RQT attributes structure. 876 * 877 * @return 878 * 0 on success, a negative errno value otherwise and rte_errno is set. 879 */ 880 int 881 mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt, 882 struct mlx5_devx_rqt_attr *rqt_attr) 883 { 884 uint32_t inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + 885 rqt_attr->rqt_actual_size * sizeof(uint32_t); 886 uint32_t out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0}; 887 uint32_t *in = rte_calloc(__func__, 1, inlen, 0); 888 void *rqt_ctx; 889 int i; 890 int ret; 891 892 if (!in) { 893 DRV_LOG(ERR, "Failed to allocate RQT modify IN data."); 894 rte_errno = ENOMEM; 895 return -ENOMEM; 896 } 897 MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT); 898 MLX5_SET(modify_rqt_in, in, rqtn, rqt->id); 899 MLX5_SET64(modify_rqt_in, in, modify_bitmask, 0x1); 900 rqt_ctx = MLX5_ADDR_OF(modify_rqt_in, in, rqt_context); 901 MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type); 902 MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size); 903 MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size); 904 for (i = 0; i < rqt_attr->rqt_actual_size; i++) 905 MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]); 906 ret = mlx5_glue->devx_obj_modify(rqt->obj, in, inlen, out, sizeof(out)); 907 rte_free(in); 908 if (ret) { 909 DRV_LOG(ERR, "Failed to modify RQT using DevX."); 910 rte_errno = errno; 911 return -rte_errno; 912 } 913 return ret; 914 } 915 916 /** 917 * Create SQ using DevX API. 918 * 919 * @param[in] ctx 920 * ibv_context returned from mlx5dv_open_device. 921 * @param [in] sq_attr 922 * Pointer to SQ attributes structure. 923 * @param [in] socket 924 * CPU socket ID for allocations. 925 * 926 * @return 927 * The DevX object created, NULL otherwise and rte_errno is set. 928 **/ 929 struct mlx5_devx_obj * 930 mlx5_devx_cmd_create_sq(struct ibv_context *ctx, 931 struct mlx5_devx_create_sq_attr *sq_attr) 932 { 933 uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0}; 934 uint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0}; 935 void *sq_ctx; 936 void *wq_ctx; 937 struct mlx5_devx_wq_attr *wq_attr; 938 struct mlx5_devx_obj *sq = NULL; 939 940 sq = rte_calloc(__func__, 1, sizeof(*sq), 0); 941 if (!sq) { 942 DRV_LOG(ERR, "Failed to allocate SQ data"); 943 rte_errno = ENOMEM; 944 return NULL; 945 } 946 MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ); 947 sq_ctx = MLX5_ADDR_OF(create_sq_in, in, ctx); 948 MLX5_SET(sqc, sq_ctx, rlky, sq_attr->rlky); 949 MLX5_SET(sqc, sq_ctx, cd_master, sq_attr->cd_master); 950 MLX5_SET(sqc, sq_ctx, fre, sq_attr->fre); 951 MLX5_SET(sqc, sq_ctx, flush_in_error_en, sq_attr->flush_in_error_en); 952 MLX5_SET(sqc, sq_ctx, allow_multi_pkt_send_wqe, 953 sq_attr->flush_in_error_en); 954 MLX5_SET(sqc, sq_ctx, min_wqe_inline_mode, 955 sq_attr->min_wqe_inline_mode); 956 MLX5_SET(sqc, sq_ctx, state, sq_attr->state); 957 MLX5_SET(sqc, sq_ctx, reg_umr, sq_attr->reg_umr); 958 MLX5_SET(sqc, sq_ctx, allow_swp, sq_attr->allow_swp); 959 MLX5_SET(sqc, sq_ctx, hairpin, sq_attr->hairpin); 960 MLX5_SET(sqc, sq_ctx, user_index, sq_attr->user_index); 961 MLX5_SET(sqc, sq_ctx, cqn, sq_attr->cqn); 962 MLX5_SET(sqc, sq_ctx, packet_pacing_rate_limit_index, 963 sq_attr->packet_pacing_rate_limit_index); 964 MLX5_SET(sqc, sq_ctx, tis_lst_sz, sq_attr->tis_lst_sz); 965 MLX5_SET(sqc, sq_ctx, tis_num_0, sq_attr->tis_num); 966 wq_ctx = MLX5_ADDR_OF(sqc, sq_ctx, wq); 967 wq_attr = &sq_attr->wq_attr; 968 devx_cmd_fill_wq_data(wq_ctx, wq_attr); 969 sq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 970 out, sizeof(out)); 971 if (!sq->obj) { 972 DRV_LOG(ERR, "Failed to create SQ using DevX"); 973 rte_errno = errno; 974 rte_free(sq); 975 return NULL; 976 } 977 sq->id = MLX5_GET(create_sq_out, out, sqn); 978 return sq; 979 } 980 981 /** 982 * Modify SQ using DevX API. 983 * 984 * @param[in] sq 985 * Pointer to SQ object structure. 986 * @param [in] sq_attr 987 * Pointer to SQ attributes structure. 988 * 989 * @return 990 * 0 on success, a negative errno value otherwise and rte_errno is set. 991 */ 992 int 993 mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq, 994 struct mlx5_devx_modify_sq_attr *sq_attr) 995 { 996 uint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0}; 997 uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0}; 998 void *sq_ctx; 999 int ret; 1000 1001 MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ); 1002 MLX5_SET(modify_sq_in, in, sq_state, sq_attr->sq_state); 1003 MLX5_SET(modify_sq_in, in, sqn, sq->id); 1004 sq_ctx = MLX5_ADDR_OF(modify_sq_in, in, ctx); 1005 MLX5_SET(sqc, sq_ctx, state, sq_attr->state); 1006 MLX5_SET(sqc, sq_ctx, hairpin_peer_rq, sq_attr->hairpin_peer_rq); 1007 MLX5_SET(sqc, sq_ctx, hairpin_peer_vhca, sq_attr->hairpin_peer_vhca); 1008 ret = mlx5_glue->devx_obj_modify(sq->obj, in, sizeof(in), 1009 out, sizeof(out)); 1010 if (ret) { 1011 DRV_LOG(ERR, "Failed to modify SQ using DevX"); 1012 rte_errno = errno; 1013 return -errno; 1014 } 1015 return ret; 1016 } 1017 1018 /** 1019 * Create TIS using DevX API. 1020 * 1021 * @param[in] ctx 1022 * ibv_context returned from mlx5dv_open_device. 1023 * @param [in] tis_attr 1024 * Pointer to TIS attributes structure. 1025 * 1026 * @return 1027 * The DevX object created, NULL otherwise and rte_errno is set. 1028 */ 1029 struct mlx5_devx_obj * 1030 mlx5_devx_cmd_create_tis(struct ibv_context *ctx, 1031 struct mlx5_devx_tis_attr *tis_attr) 1032 { 1033 uint32_t in[MLX5_ST_SZ_DW(create_tis_in)] = {0}; 1034 uint32_t out[MLX5_ST_SZ_DW(create_tis_out)] = {0}; 1035 struct mlx5_devx_obj *tis = NULL; 1036 void *tis_ctx; 1037 1038 tis = rte_calloc(__func__, 1, sizeof(*tis), 0); 1039 if (!tis) { 1040 DRV_LOG(ERR, "Failed to allocate TIS object"); 1041 rte_errno = ENOMEM; 1042 return NULL; 1043 } 1044 MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS); 1045 tis_ctx = MLX5_ADDR_OF(create_tis_in, in, ctx); 1046 MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity, 1047 tis_attr->strict_lag_tx_port_affinity); 1048 MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity, 1049 tis_attr->strict_lag_tx_port_affinity); 1050 MLX5_SET(tisc, tis_ctx, prio, tis_attr->prio); 1051 MLX5_SET(tisc, tis_ctx, transport_domain, 1052 tis_attr->transport_domain); 1053 tis->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 1054 out, sizeof(out)); 1055 if (!tis->obj) { 1056 DRV_LOG(ERR, "Failed to create TIS using DevX"); 1057 rte_errno = errno; 1058 rte_free(tis); 1059 return NULL; 1060 } 1061 tis->id = MLX5_GET(create_tis_out, out, tisn); 1062 return tis; 1063 } 1064 1065 /** 1066 * Create transport domain using DevX API. 1067 * 1068 * @param[in] ctx 1069 * ibv_context returned from mlx5dv_open_device. 1070 * 1071 * @return 1072 * The DevX object created, NULL otherwise and rte_errno is set. 1073 */ 1074 struct mlx5_devx_obj * 1075 mlx5_devx_cmd_create_td(struct ibv_context *ctx) 1076 { 1077 uint32_t in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0}; 1078 uint32_t out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0}; 1079 struct mlx5_devx_obj *td = NULL; 1080 1081 td = rte_calloc(__func__, 1, sizeof(*td), 0); 1082 if (!td) { 1083 DRV_LOG(ERR, "Failed to allocate TD object"); 1084 rte_errno = ENOMEM; 1085 return NULL; 1086 } 1087 MLX5_SET(alloc_transport_domain_in, in, opcode, 1088 MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN); 1089 td->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 1090 out, sizeof(out)); 1091 if (!td->obj) { 1092 DRV_LOG(ERR, "Failed to create TIS using DevX"); 1093 rte_errno = errno; 1094 rte_free(td); 1095 return NULL; 1096 } 1097 td->id = MLX5_GET(alloc_transport_domain_out, out, 1098 transport_domain); 1099 return td; 1100 } 1101 1102 /** 1103 * Dump all flows to file. 1104 * 1105 * @param[in] fdb_domain 1106 * FDB domain. 1107 * @param[in] rx_domain 1108 * RX domain. 1109 * @param[in] tx_domain 1110 * TX domain. 1111 * @param[out] file 1112 * Pointer to file stream. 1113 * 1114 * @return 1115 * 0 on success, a nagative value otherwise. 1116 */ 1117 int 1118 mlx5_devx_cmd_flow_dump(void *fdb_domain __rte_unused, 1119 void *rx_domain __rte_unused, 1120 void *tx_domain __rte_unused, FILE *file __rte_unused) 1121 { 1122 int ret = 0; 1123 1124 #ifdef HAVE_MLX5_DR_FLOW_DUMP 1125 if (fdb_domain) { 1126 ret = mlx5_glue->dr_dump_domain(file, fdb_domain); 1127 if (ret) 1128 return ret; 1129 } 1130 MLX5_ASSERT(rx_domain); 1131 ret = mlx5_glue->dr_dump_domain(file, rx_domain); 1132 if (ret) 1133 return ret; 1134 MLX5_ASSERT(tx_domain); 1135 ret = mlx5_glue->dr_dump_domain(file, tx_domain); 1136 #else 1137 ret = ENOTSUP; 1138 #endif 1139 return -ret; 1140 } 1141 1142 /* 1143 * Create CQ using DevX API. 1144 * 1145 * @param[in] ctx 1146 * ibv_context returned from mlx5dv_open_device. 1147 * @param [in] attr 1148 * Pointer to CQ attributes structure. 1149 * 1150 * @return 1151 * The DevX object created, NULL otherwise and rte_errno is set. 1152 */ 1153 struct mlx5_devx_obj * 1154 mlx5_devx_cmd_create_cq(struct ibv_context *ctx, struct mlx5_devx_cq_attr *attr) 1155 { 1156 uint32_t in[MLX5_ST_SZ_DW(create_cq_in)] = {0}; 1157 uint32_t out[MLX5_ST_SZ_DW(create_cq_out)] = {0}; 1158 struct mlx5_devx_obj *cq_obj = rte_zmalloc(__func__, sizeof(*cq_obj), 1159 0); 1160 void *cqctx = MLX5_ADDR_OF(create_cq_in, in, cq_context); 1161 1162 if (!cq_obj) { 1163 DRV_LOG(ERR, "Failed to allocate CQ object memory."); 1164 rte_errno = ENOMEM; 1165 return NULL; 1166 } 1167 MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ); 1168 if (attr->db_umem_valid) { 1169 MLX5_SET(cqc, cqctx, dbr_umem_valid, attr->db_umem_valid); 1170 MLX5_SET(cqc, cqctx, dbr_umem_id, attr->db_umem_id); 1171 MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_umem_offset); 1172 } else { 1173 MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_addr); 1174 } 1175 MLX5_SET(cqc, cqctx, cc, attr->use_first_only); 1176 MLX5_SET(cqc, cqctx, oi, attr->overrun_ignore); 1177 MLX5_SET(cqc, cqctx, log_cq_size, attr->log_cq_size); 1178 MLX5_SET(cqc, cqctx, log_page_size, attr->log_page_size - 1179 MLX5_ADAPTER_PAGE_SHIFT); 1180 MLX5_SET(cqc, cqctx, c_eqn, attr->eqn); 1181 MLX5_SET(cqc, cqctx, uar_page, attr->uar_page_id); 1182 if (attr->q_umem_valid) { 1183 MLX5_SET(create_cq_in, in, cq_umem_valid, attr->q_umem_valid); 1184 MLX5_SET(create_cq_in, in, cq_umem_id, attr->q_umem_id); 1185 MLX5_SET64(create_cq_in, in, cq_umem_offset, 1186 attr->q_umem_offset); 1187 } 1188 cq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, 1189 sizeof(out)); 1190 if (!cq_obj->obj) { 1191 rte_errno = errno; 1192 DRV_LOG(ERR, "Failed to create CQ using DevX errno=%d.", errno); 1193 rte_free(cq_obj); 1194 return NULL; 1195 } 1196 cq_obj->id = MLX5_GET(create_cq_out, out, cqn); 1197 return cq_obj; 1198 } 1199 1200 /** 1201 * Create VIRTQ using DevX API. 1202 * 1203 * @param[in] ctx 1204 * ibv_context returned from mlx5dv_open_device. 1205 * @param [in] attr 1206 * Pointer to VIRTQ attributes structure. 1207 * 1208 * @return 1209 * The DevX object created, NULL otherwise and rte_errno is set. 1210 */ 1211 struct mlx5_devx_obj * 1212 mlx5_devx_cmd_create_virtq(struct ibv_context *ctx, 1213 struct mlx5_devx_virtq_attr *attr) 1214 { 1215 uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0}; 1216 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 1217 struct mlx5_devx_obj *virtq_obj = rte_zmalloc(__func__, 1218 sizeof(*virtq_obj), 0); 1219 void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq); 1220 void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr); 1221 void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context); 1222 1223 if (!virtq_obj) { 1224 DRV_LOG(ERR, "Failed to allocate virtq data."); 1225 rte_errno = ENOMEM; 1226 return NULL; 1227 } 1228 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 1229 MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 1230 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 1231 MLX5_GENERAL_OBJ_TYPE_VIRTQ); 1232 MLX5_SET16(virtio_net_q, virtq, hw_available_index, 1233 attr->hw_available_index); 1234 MLX5_SET16(virtio_net_q, virtq, hw_used_index, attr->hw_used_index); 1235 MLX5_SET16(virtio_net_q, virtq, tso_ipv4, attr->tso_ipv4); 1236 MLX5_SET16(virtio_net_q, virtq, tso_ipv6, attr->tso_ipv6); 1237 MLX5_SET16(virtio_net_q, virtq, tx_csum, attr->tx_csum); 1238 MLX5_SET16(virtio_net_q, virtq, rx_csum, attr->rx_csum); 1239 MLX5_SET16(virtio_q, virtctx, virtio_version_1_0, 1240 attr->virtio_version_1_0); 1241 MLX5_SET16(virtio_q, virtctx, event_mode, attr->event_mode); 1242 MLX5_SET(virtio_q, virtctx, event_qpn_or_msix, attr->qp_id); 1243 MLX5_SET64(virtio_q, virtctx, desc_addr, attr->desc_addr); 1244 MLX5_SET64(virtio_q, virtctx, used_addr, attr->used_addr); 1245 MLX5_SET64(virtio_q, virtctx, available_addr, attr->available_addr); 1246 MLX5_SET16(virtio_q, virtctx, queue_index, attr->queue_index); 1247 MLX5_SET16(virtio_q, virtctx, queue_size, attr->q_size); 1248 MLX5_SET(virtio_q, virtctx, virtio_q_mkey, attr->mkey); 1249 MLX5_SET(virtio_q, virtctx, umem_1_id, attr->umems[0].id); 1250 MLX5_SET(virtio_q, virtctx, umem_1_size, attr->umems[0].size); 1251 MLX5_SET64(virtio_q, virtctx, umem_1_offset, attr->umems[0].offset); 1252 MLX5_SET(virtio_q, virtctx, umem_2_id, attr->umems[1].id); 1253 MLX5_SET(virtio_q, virtctx, umem_2_size, attr->umems[1].size); 1254 MLX5_SET64(virtio_q, virtctx, umem_2_offset, attr->umems[1].offset); 1255 MLX5_SET(virtio_q, virtctx, umem_3_id, attr->umems[2].id); 1256 MLX5_SET(virtio_q, virtctx, umem_3_size, attr->umems[2].size); 1257 MLX5_SET64(virtio_q, virtctx, umem_3_offset, attr->umems[2].offset); 1258 MLX5_SET(virtio_net_q, virtq, tisn_or_qpn, attr->tis_id); 1259 virtq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, 1260 sizeof(out)); 1261 if (!virtq_obj->obj) { 1262 rte_errno = errno; 1263 DRV_LOG(ERR, "Failed to create VIRTQ Obj using DevX."); 1264 rte_free(virtq_obj); 1265 return NULL; 1266 } 1267 virtq_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 1268 return virtq_obj; 1269 } 1270 1271 /** 1272 * Modify VIRTQ using DevX API. 1273 * 1274 * @param[in] virtq_obj 1275 * Pointer to virtq object structure. 1276 * @param [in] attr 1277 * Pointer to modify virtq attributes structure. 1278 * 1279 * @return 1280 * 0 on success, a negative errno value otherwise and rte_errno is set. 1281 */ 1282 int 1283 mlx5_devx_cmd_modify_virtq(struct mlx5_devx_obj *virtq_obj, 1284 struct mlx5_devx_virtq_attr *attr) 1285 { 1286 uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0}; 1287 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 1288 void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq); 1289 void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr); 1290 void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context); 1291 int ret; 1292 1293 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 1294 MLX5_CMD_OP_MODIFY_GENERAL_OBJECT); 1295 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 1296 MLX5_GENERAL_OBJ_TYPE_VIRTQ); 1297 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, virtq_obj->id); 1298 MLX5_SET64(virtio_net_q, virtq, modify_field_select, attr->type); 1299 MLX5_SET16(virtio_q, virtctx, queue_index, attr->queue_index); 1300 switch (attr->type) { 1301 case MLX5_VIRTQ_MODIFY_TYPE_STATE: 1302 MLX5_SET16(virtio_net_q, virtq, state, attr->state); 1303 break; 1304 case MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_PARAMS: 1305 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_mkey, 1306 attr->dirty_bitmap_mkey); 1307 MLX5_SET64(virtio_net_q, virtq, dirty_bitmap_addr, 1308 attr->dirty_bitmap_addr); 1309 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_size, 1310 attr->dirty_bitmap_size); 1311 break; 1312 case MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE: 1313 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_dump_enable, 1314 attr->dirty_bitmap_dump_enable); 1315 break; 1316 default: 1317 rte_errno = EINVAL; 1318 return -rte_errno; 1319 } 1320 ret = mlx5_glue->devx_obj_modify(virtq_obj->obj, in, sizeof(in), 1321 out, sizeof(out)); 1322 if (ret) { 1323 DRV_LOG(ERR, "Failed to modify VIRTQ using DevX."); 1324 rte_errno = errno; 1325 return -errno; 1326 } 1327 return ret; 1328 } 1329 1330 /** 1331 * Query VIRTQ using DevX API. 1332 * 1333 * @param[in] virtq_obj 1334 * Pointer to virtq object structure. 1335 * @param [in/out] attr 1336 * Pointer to virtq attributes structure. 1337 * 1338 * @return 1339 * 0 on success, a negative errno value otherwise and rte_errno is set. 1340 */ 1341 int 1342 mlx5_devx_cmd_query_virtq(struct mlx5_devx_obj *virtq_obj, 1343 struct mlx5_devx_virtq_attr *attr) 1344 { 1345 uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0}; 1346 uint32_t out[MLX5_ST_SZ_DW(query_virtq_out)] = {0}; 1347 void *hdr = MLX5_ADDR_OF(query_virtq_out, in, hdr); 1348 void *virtq = MLX5_ADDR_OF(query_virtq_out, out, virtq); 1349 int ret; 1350 1351 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 1352 MLX5_CMD_OP_QUERY_GENERAL_OBJECT); 1353 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 1354 MLX5_GENERAL_OBJ_TYPE_VIRTQ); 1355 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, virtq_obj->id); 1356 ret = mlx5_glue->devx_obj_query(virtq_obj->obj, in, sizeof(in), 1357 out, sizeof(out)); 1358 if (ret) { 1359 DRV_LOG(ERR, "Failed to modify VIRTQ using DevX."); 1360 rte_errno = errno; 1361 return -errno; 1362 } 1363 attr->hw_available_index = MLX5_GET16(virtio_net_q, virtq, 1364 hw_available_index); 1365 attr->hw_used_index = MLX5_GET16(virtio_net_q, virtq, hw_used_index); 1366 return ret; 1367 } 1368 1369 /** 1370 * Create QP using DevX API. 1371 * 1372 * @param[in] ctx 1373 * ibv_context returned from mlx5dv_open_device. 1374 * @param [in] attr 1375 * Pointer to QP attributes structure. 1376 * 1377 * @return 1378 * The DevX object created, NULL otherwise and rte_errno is set. 1379 */ 1380 struct mlx5_devx_obj * 1381 mlx5_devx_cmd_create_qp(struct ibv_context *ctx, 1382 struct mlx5_devx_qp_attr *attr) 1383 { 1384 uint32_t in[MLX5_ST_SZ_DW(create_qp_in)] = {0}; 1385 uint32_t out[MLX5_ST_SZ_DW(create_qp_out)] = {0}; 1386 struct mlx5_devx_obj *qp_obj = rte_zmalloc(__func__, sizeof(*qp_obj), 1387 0); 1388 void *qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 1389 1390 if (!qp_obj) { 1391 DRV_LOG(ERR, "Failed to allocate QP data."); 1392 rte_errno = ENOMEM; 1393 return NULL; 1394 } 1395 MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); 1396 MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC); 1397 MLX5_SET(qpc, qpc, pd, attr->pd); 1398 if (attr->uar_index) { 1399 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 1400 MLX5_SET(qpc, qpc, uar_page, attr->uar_index); 1401 MLX5_SET(qpc, qpc, log_page_size, attr->log_page_size - 1402 MLX5_ADAPTER_PAGE_SHIFT); 1403 if (attr->sq_size) { 1404 MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->sq_size)); 1405 MLX5_SET(qpc, qpc, cqn_snd, attr->cqn); 1406 MLX5_SET(qpc, qpc, log_sq_size, 1407 rte_log2_u32(attr->sq_size)); 1408 } else { 1409 MLX5_SET(qpc, qpc, no_sq, 1); 1410 } 1411 if (attr->rq_size) { 1412 MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->rq_size)); 1413 MLX5_SET(qpc, qpc, cqn_rcv, attr->cqn); 1414 MLX5_SET(qpc, qpc, log_rq_stride, attr->log_rq_stride - 1415 MLX5_LOG_RQ_STRIDE_SHIFT); 1416 MLX5_SET(qpc, qpc, log_rq_size, 1417 rte_log2_u32(attr->rq_size)); 1418 MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ); 1419 } else { 1420 MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ); 1421 } 1422 if (attr->dbr_umem_valid) { 1423 MLX5_SET(qpc, qpc, dbr_umem_valid, 1424 attr->dbr_umem_valid); 1425 MLX5_SET(qpc, qpc, dbr_umem_id, attr->dbr_umem_id); 1426 } 1427 MLX5_SET64(qpc, qpc, dbr_addr, attr->dbr_address); 1428 MLX5_SET64(create_qp_in, in, wq_umem_offset, 1429 attr->wq_umem_offset); 1430 MLX5_SET(create_qp_in, in, wq_umem_id, attr->wq_umem_id); 1431 MLX5_SET(create_qp_in, in, wq_umem_valid, 1); 1432 } else { 1433 /* Special QP to be managed by FW - no SQ\RQ\CQ\UAR\DB rec. */ 1434 MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ); 1435 MLX5_SET(qpc, qpc, no_sq, 1); 1436 } 1437 qp_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, 1438 sizeof(out)); 1439 if (!qp_obj->obj) { 1440 rte_errno = errno; 1441 DRV_LOG(ERR, "Failed to create QP Obj using DevX."); 1442 rte_free(qp_obj); 1443 return NULL; 1444 } 1445 qp_obj->id = MLX5_GET(create_qp_out, out, qpn); 1446 return qp_obj; 1447 } 1448 1449 /** 1450 * Modify QP using DevX API. 1451 * Currently supports only force loop-back QP. 1452 * 1453 * @param[in] qp 1454 * Pointer to QP object structure. 1455 * @param [in] qp_st_mod_op 1456 * The QP state modification operation. 1457 * @param [in] remote_qp_id 1458 * The remote QP ID for MLX5_CMD_OP_INIT2RTR_QP operation. 1459 * 1460 * @return 1461 * 0 on success, a negative errno value otherwise and rte_errno is set. 1462 */ 1463 int 1464 mlx5_devx_cmd_modify_qp_state(struct mlx5_devx_obj *qp, uint32_t qp_st_mod_op, 1465 uint32_t remote_qp_id) 1466 { 1467 union { 1468 uint32_t rst2init[MLX5_ST_SZ_DW(rst2init_qp_in)]; 1469 uint32_t init2rtr[MLX5_ST_SZ_DW(init2rtr_qp_in)]; 1470 uint32_t rtr2rts[MLX5_ST_SZ_DW(rtr2rts_qp_in)]; 1471 } in; 1472 union { 1473 uint32_t rst2init[MLX5_ST_SZ_DW(rst2init_qp_out)]; 1474 uint32_t init2rtr[MLX5_ST_SZ_DW(init2rtr_qp_out)]; 1475 uint32_t rtr2rts[MLX5_ST_SZ_DW(rtr2rts_qp_out)]; 1476 } out; 1477 void *qpc; 1478 int ret; 1479 unsigned int inlen; 1480 unsigned int outlen; 1481 1482 memset(&in, 0, sizeof(in)); 1483 memset(&out, 0, sizeof(out)); 1484 MLX5_SET(rst2init_qp_in, &in, opcode, qp_st_mod_op); 1485 switch (qp_st_mod_op) { 1486 case MLX5_CMD_OP_RST2INIT_QP: 1487 MLX5_SET(rst2init_qp_in, &in, qpn, qp->id); 1488 qpc = MLX5_ADDR_OF(rst2init_qp_in, &in, qpc); 1489 MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1); 1490 MLX5_SET(qpc, qpc, rre, 1); 1491 MLX5_SET(qpc, qpc, rwe, 1); 1492 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 1493 inlen = sizeof(in.rst2init); 1494 outlen = sizeof(out.rst2init); 1495 break; 1496 case MLX5_CMD_OP_INIT2RTR_QP: 1497 MLX5_SET(init2rtr_qp_in, &in, qpn, qp->id); 1498 qpc = MLX5_ADDR_OF(init2rtr_qp_in, &in, qpc); 1499 MLX5_SET(qpc, qpc, primary_address_path.fl, 1); 1500 MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1); 1501 MLX5_SET(qpc, qpc, mtu, 1); 1502 MLX5_SET(qpc, qpc, log_msg_max, 30); 1503 MLX5_SET(qpc, qpc, remote_qpn, remote_qp_id); 1504 MLX5_SET(qpc, qpc, min_rnr_nak, 0); 1505 inlen = sizeof(in.init2rtr); 1506 outlen = sizeof(out.init2rtr); 1507 break; 1508 case MLX5_CMD_OP_RTR2RTS_QP: 1509 qpc = MLX5_ADDR_OF(rtr2rts_qp_in, &in, qpc); 1510 MLX5_SET(rtr2rts_qp_in, &in, qpn, qp->id); 1511 MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 14); 1512 MLX5_SET(qpc, qpc, log_ack_req_freq, 0); 1513 MLX5_SET(qpc, qpc, retry_count, 7); 1514 MLX5_SET(qpc, qpc, rnr_retry, 7); 1515 inlen = sizeof(in.rtr2rts); 1516 outlen = sizeof(out.rtr2rts); 1517 break; 1518 default: 1519 DRV_LOG(ERR, "Invalid or unsupported QP modify op %u.", 1520 qp_st_mod_op); 1521 rte_errno = EINVAL; 1522 return -rte_errno; 1523 } 1524 ret = mlx5_glue->devx_obj_modify(qp->obj, &in, inlen, &out, outlen); 1525 if (ret) { 1526 DRV_LOG(ERR, "Failed to modify QP using DevX."); 1527 rte_errno = errno; 1528 return -errno; 1529 } 1530 return ret; 1531 } 1532