1 // SPDX-License-Identifier: BSD-3-Clause 2 /* Copyright 2018 Mellanox Technologies, Ltd */ 3 4 #include <unistd.h> 5 6 #include <rte_errno.h> 7 #include <rte_malloc.h> 8 9 #include "mlx5_prm.h" 10 #include "mlx5_devx_cmds.h" 11 #include "mlx5_common_utils.h" 12 13 14 /** 15 * Allocate flow counters via devx interface. 16 * 17 * @param[in] ctx 18 * ibv contexts returned from mlx5dv_open_device. 19 * @param dcs 20 * Pointer to counters properties structure to be filled by the routine. 21 * @param bulk_n_128 22 * Bulk counter numbers in 128 counters units. 23 * 24 * @return 25 * Pointer to counter object on success, a negative value otherwise and 26 * rte_errno is set. 27 */ 28 struct mlx5_devx_obj * 29 mlx5_devx_cmd_flow_counter_alloc(struct ibv_context *ctx, uint32_t bulk_n_128) 30 { 31 struct mlx5_devx_obj *dcs = rte_zmalloc("dcs", sizeof(*dcs), 0); 32 uint32_t in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0}; 33 uint32_t out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0}; 34 35 if (!dcs) { 36 rte_errno = ENOMEM; 37 return NULL; 38 } 39 MLX5_SET(alloc_flow_counter_in, in, opcode, 40 MLX5_CMD_OP_ALLOC_FLOW_COUNTER); 41 MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, bulk_n_128); 42 dcs->obj = mlx5_glue->devx_obj_create(ctx, in, 43 sizeof(in), out, sizeof(out)); 44 if (!dcs->obj) { 45 DRV_LOG(ERR, "Can't allocate counters - error %d", errno); 46 rte_errno = errno; 47 rte_free(dcs); 48 return NULL; 49 } 50 dcs->id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id); 51 return dcs; 52 } 53 54 /** 55 * Query flow counters values. 56 * 57 * @param[in] dcs 58 * devx object that was obtained from mlx5_devx_cmd_fc_alloc. 59 * @param[in] clear 60 * Whether hardware should clear the counters after the query or not. 61 * @param[in] n_counters 62 * 0 in case of 1 counter to read, otherwise the counter number to read. 63 * @param pkts 64 * The number of packets that matched the flow. 65 * @param bytes 66 * The number of bytes that matched the flow. 67 * @param mkey 68 * The mkey key for batch query. 69 * @param addr 70 * The address in the mkey range for batch query. 71 * @param cmd_comp 72 * The completion object for asynchronous batch query. 73 * @param async_id 74 * The ID to be returned in the asynchronous batch query response. 75 * 76 * @return 77 * 0 on success, a negative value otherwise. 78 */ 79 int 80 mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs, 81 int clear, uint32_t n_counters, 82 uint64_t *pkts, uint64_t *bytes, 83 uint32_t mkey, void *addr, 84 struct mlx5dv_devx_cmd_comp *cmd_comp, 85 uint64_t async_id) 86 { 87 int out_len = MLX5_ST_SZ_BYTES(query_flow_counter_out) + 88 MLX5_ST_SZ_BYTES(traffic_counter); 89 uint32_t out[out_len]; 90 uint32_t in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0}; 91 void *stats; 92 int rc; 93 94 MLX5_SET(query_flow_counter_in, in, opcode, 95 MLX5_CMD_OP_QUERY_FLOW_COUNTER); 96 MLX5_SET(query_flow_counter_in, in, op_mod, 0); 97 MLX5_SET(query_flow_counter_in, in, flow_counter_id, dcs->id); 98 MLX5_SET(query_flow_counter_in, in, clear, !!clear); 99 100 if (n_counters) { 101 MLX5_SET(query_flow_counter_in, in, num_of_counters, 102 n_counters); 103 MLX5_SET(query_flow_counter_in, in, dump_to_memory, 1); 104 MLX5_SET(query_flow_counter_in, in, mkey, mkey); 105 MLX5_SET64(query_flow_counter_in, in, address, 106 (uint64_t)(uintptr_t)addr); 107 } 108 if (!cmd_comp) 109 rc = mlx5_glue->devx_obj_query(dcs->obj, in, sizeof(in), out, 110 out_len); 111 else 112 rc = mlx5_glue->devx_obj_query_async(dcs->obj, in, sizeof(in), 113 out_len, async_id, 114 cmd_comp); 115 if (rc) { 116 DRV_LOG(ERR, "Failed to query devx counters with rc %d", rc); 117 rte_errno = rc; 118 return -rc; 119 } 120 if (!n_counters) { 121 stats = MLX5_ADDR_OF(query_flow_counter_out, 122 out, flow_statistics); 123 *pkts = MLX5_GET64(traffic_counter, stats, packets); 124 *bytes = MLX5_GET64(traffic_counter, stats, octets); 125 } 126 return 0; 127 } 128 129 /** 130 * Create a new mkey. 131 * 132 * @param[in] ctx 133 * ibv contexts returned from mlx5dv_open_device. 134 * @param[in] attr 135 * Attributes of the requested mkey. 136 * 137 * @return 138 * Pointer to Devx mkey on success, a negative value otherwise and rte_errno 139 * is set. 140 */ 141 struct mlx5_devx_obj * 142 mlx5_devx_cmd_mkey_create(struct ibv_context *ctx, 143 struct mlx5_devx_mkey_attr *attr) 144 { 145 struct mlx5_klm *klm_array = attr->klm_array; 146 int klm_num = attr->klm_num; 147 int in_size_dw = MLX5_ST_SZ_DW(create_mkey_in) + 148 (klm_num ? RTE_ALIGN(klm_num, 4) : 0) * MLX5_ST_SZ_DW(klm); 149 uint32_t in[in_size_dw]; 150 uint32_t out[MLX5_ST_SZ_DW(create_mkey_out)] = {0}; 151 void *mkc; 152 struct mlx5_devx_obj *mkey = rte_zmalloc("mkey", sizeof(*mkey), 0); 153 size_t pgsize; 154 uint32_t translation_size; 155 156 if (!mkey) { 157 rte_errno = ENOMEM; 158 return NULL; 159 } 160 memset(in, 0, in_size_dw * 4); 161 pgsize = sysconf(_SC_PAGESIZE); 162 MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY); 163 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 164 if (klm_num > 0) { 165 int i; 166 uint8_t *klm = (uint8_t *)MLX5_ADDR_OF(create_mkey_in, in, 167 klm_pas_mtt); 168 translation_size = RTE_ALIGN(klm_num, 4); 169 for (i = 0; i < klm_num; i++) { 170 MLX5_SET(klm, klm, byte_count, klm_array[i].byte_count); 171 MLX5_SET(klm, klm, mkey, klm_array[i].mkey); 172 MLX5_SET64(klm, klm, address, klm_array[i].address); 173 klm += MLX5_ST_SZ_BYTES(klm); 174 } 175 for (; i < (int)translation_size; i++) { 176 MLX5_SET(klm, klm, mkey, 0x0); 177 MLX5_SET64(klm, klm, address, 0x0); 178 klm += MLX5_ST_SZ_BYTES(klm); 179 } 180 MLX5_SET(mkc, mkc, access_mode_1_0, attr->log_entity_size ? 181 MLX5_MKC_ACCESS_MODE_KLM_FBS : 182 MLX5_MKC_ACCESS_MODE_KLM); 183 MLX5_SET(mkc, mkc, log_page_size, attr->log_entity_size); 184 } else { 185 translation_size = (RTE_ALIGN(attr->size, pgsize) * 8) / 16; 186 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); 187 MLX5_SET(mkc, mkc, log_page_size, rte_log2_u32(pgsize)); 188 } 189 MLX5_SET(create_mkey_in, in, translations_octword_actual_size, 190 translation_size); 191 MLX5_SET(create_mkey_in, in, mkey_umem_id, attr->umem_id); 192 MLX5_SET(create_mkey_in, in, pg_access, attr->pg_access); 193 MLX5_SET(mkc, mkc, lw, 0x1); 194 MLX5_SET(mkc, mkc, lr, 0x1); 195 MLX5_SET(mkc, mkc, qpn, 0xffffff); 196 MLX5_SET(mkc, mkc, pd, attr->pd); 197 MLX5_SET(mkc, mkc, mkey_7_0, attr->umem_id & 0xFF); 198 MLX5_SET(mkc, mkc, translations_octword_size, translation_size); 199 MLX5_SET64(mkc, mkc, start_addr, attr->addr); 200 MLX5_SET64(mkc, mkc, len, attr->size); 201 mkey->obj = mlx5_glue->devx_obj_create(ctx, in, in_size_dw * 4, out, 202 sizeof(out)); 203 if (!mkey->obj) { 204 DRV_LOG(ERR, "Can't create %sdirect mkey - error %d\n", 205 klm_num ? "an in" : "a ", errno); 206 rte_errno = errno; 207 rte_free(mkey); 208 return NULL; 209 } 210 mkey->id = MLX5_GET(create_mkey_out, out, mkey_index); 211 mkey->id = (mkey->id << 8) | (attr->umem_id & 0xFF); 212 return mkey; 213 } 214 215 /** 216 * Get status of devx command response. 217 * Mainly used for asynchronous commands. 218 * 219 * @param[in] out 220 * The out response buffer. 221 * 222 * @return 223 * 0 on success, non-zero value otherwise. 224 */ 225 int 226 mlx5_devx_get_out_command_status(void *out) 227 { 228 int status; 229 230 if (!out) 231 return -EINVAL; 232 status = MLX5_GET(query_flow_counter_out, out, status); 233 if (status) { 234 int syndrome = MLX5_GET(query_flow_counter_out, out, syndrome); 235 236 DRV_LOG(ERR, "Bad devX status %x, syndrome = %x", status, 237 syndrome); 238 } 239 return status; 240 } 241 242 /** 243 * Destroy any object allocated by a Devx API. 244 * 245 * @param[in] obj 246 * Pointer to a general object. 247 * 248 * @return 249 * 0 on success, a negative value otherwise. 250 */ 251 int 252 mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj) 253 { 254 int ret; 255 256 if (!obj) 257 return 0; 258 ret = mlx5_glue->devx_obj_destroy(obj->obj); 259 rte_free(obj); 260 return ret; 261 } 262 263 /** 264 * Query NIC vport context. 265 * Fills minimal inline attribute. 266 * 267 * @param[in] ctx 268 * ibv contexts returned from mlx5dv_open_device. 269 * @param[in] vport 270 * vport index 271 * @param[out] attr 272 * Attributes device values. 273 * 274 * @return 275 * 0 on success, a negative value otherwise. 276 */ 277 static int 278 mlx5_devx_cmd_query_nic_vport_context(struct ibv_context *ctx, 279 unsigned int vport, 280 struct mlx5_hca_attr *attr) 281 { 282 uint32_t in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0}; 283 uint32_t out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0}; 284 void *vctx; 285 int status, syndrome, rc; 286 287 /* Query NIC vport context to determine inline mode. */ 288 MLX5_SET(query_nic_vport_context_in, in, opcode, 289 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); 290 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); 291 if (vport) 292 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); 293 rc = mlx5_glue->devx_general_cmd(ctx, 294 in, sizeof(in), 295 out, sizeof(out)); 296 if (rc) 297 goto error; 298 status = MLX5_GET(query_nic_vport_context_out, out, status); 299 syndrome = MLX5_GET(query_nic_vport_context_out, out, syndrome); 300 if (status) { 301 DRV_LOG(DEBUG, "Failed to query NIC vport context, " 302 "status %x, syndrome = %x", 303 status, syndrome); 304 return -1; 305 } 306 vctx = MLX5_ADDR_OF(query_nic_vport_context_out, out, 307 nic_vport_context); 308 attr->vport_inline_mode = MLX5_GET(nic_vport_context, vctx, 309 min_wqe_inline_mode); 310 return 0; 311 error: 312 rc = (rc > 0) ? -rc : rc; 313 return rc; 314 } 315 316 /** 317 * Query NIC vDPA attributes. 318 * 319 * @param[in] ctx 320 * ibv contexts returned from mlx5dv_open_device. 321 * @param[out] vdpa_attr 322 * vDPA Attributes structure to fill. 323 */ 324 static void 325 mlx5_devx_cmd_query_hca_vdpa_attr(struct ibv_context *ctx, 326 struct mlx5_hca_vdpa_attr *vdpa_attr) 327 { 328 uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0}; 329 uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0}; 330 void *hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 331 int status, syndrome, rc; 332 333 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 334 MLX5_SET(query_hca_cap_in, in, op_mod, 335 MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION | 336 MLX5_HCA_CAP_OPMOD_GET_CUR); 337 rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 338 status = MLX5_GET(query_hca_cap_out, out, status); 339 syndrome = MLX5_GET(query_hca_cap_out, out, syndrome); 340 if (rc || status) { 341 RTE_LOG(DEBUG, PMD, "Failed to query devx VDPA capabilities," 342 " status %x, syndrome = %x", status, syndrome); 343 vdpa_attr->valid = 0; 344 } else { 345 vdpa_attr->valid = 1; 346 vdpa_attr->desc_tunnel_offload_type = 347 MLX5_GET(virtio_emulation_cap, hcattr, 348 desc_tunnel_offload_type); 349 vdpa_attr->eth_frame_offload_type = 350 MLX5_GET(virtio_emulation_cap, hcattr, 351 eth_frame_offload_type); 352 vdpa_attr->virtio_version_1_0 = 353 MLX5_GET(virtio_emulation_cap, hcattr, 354 virtio_version_1_0); 355 vdpa_attr->tso_ipv4 = MLX5_GET(virtio_emulation_cap, hcattr, 356 tso_ipv4); 357 vdpa_attr->tso_ipv6 = MLX5_GET(virtio_emulation_cap, hcattr, 358 tso_ipv6); 359 vdpa_attr->tx_csum = MLX5_GET(virtio_emulation_cap, hcattr, 360 tx_csum); 361 vdpa_attr->rx_csum = MLX5_GET(virtio_emulation_cap, hcattr, 362 rx_csum); 363 vdpa_attr->event_mode = MLX5_GET(virtio_emulation_cap, hcattr, 364 event_mode); 365 vdpa_attr->virtio_queue_type = 366 MLX5_GET(virtio_emulation_cap, hcattr, 367 virtio_queue_type); 368 vdpa_attr->log_doorbell_stride = 369 MLX5_GET(virtio_emulation_cap, hcattr, 370 log_doorbell_stride); 371 vdpa_attr->log_doorbell_bar_size = 372 MLX5_GET(virtio_emulation_cap, hcattr, 373 log_doorbell_bar_size); 374 vdpa_attr->doorbell_bar_offset = 375 MLX5_GET64(virtio_emulation_cap, hcattr, 376 doorbell_bar_offset); 377 vdpa_attr->max_num_virtio_queues = 378 MLX5_GET(virtio_emulation_cap, hcattr, 379 max_num_virtio_queues); 380 vdpa_attr->umems[0].a = MLX5_GET(virtio_emulation_cap, hcattr, 381 umem_1_buffer_param_a); 382 vdpa_attr->umems[0].b = MLX5_GET(virtio_emulation_cap, hcattr, 383 umem_1_buffer_param_b); 384 vdpa_attr->umems[1].a = MLX5_GET(virtio_emulation_cap, hcattr, 385 umem_2_buffer_param_a); 386 vdpa_attr->umems[1].b = MLX5_GET(virtio_emulation_cap, hcattr, 387 umem_2_buffer_param_b); 388 vdpa_attr->umems[2].a = MLX5_GET(virtio_emulation_cap, hcattr, 389 umem_3_buffer_param_a); 390 vdpa_attr->umems[2].b = MLX5_GET(virtio_emulation_cap, hcattr, 391 umem_3_buffer_param_b); 392 } 393 } 394 395 /** 396 * Query HCA attributes. 397 * Using those attributes we can check on run time if the device 398 * is having the required capabilities. 399 * 400 * @param[in] ctx 401 * ibv contexts returned from mlx5dv_open_device. 402 * @param[out] attr 403 * Attributes device values. 404 * 405 * @return 406 * 0 on success, a negative value otherwise. 407 */ 408 int 409 mlx5_devx_cmd_query_hca_attr(struct ibv_context *ctx, 410 struct mlx5_hca_attr *attr) 411 { 412 uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0}; 413 uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0}; 414 void *hcattr; 415 int status, syndrome, rc; 416 417 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 418 MLX5_SET(query_hca_cap_in, in, op_mod, 419 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE | 420 MLX5_HCA_CAP_OPMOD_GET_CUR); 421 422 rc = mlx5_glue->devx_general_cmd(ctx, 423 in, sizeof(in), out, sizeof(out)); 424 if (rc) 425 goto error; 426 status = MLX5_GET(query_hca_cap_out, out, status); 427 syndrome = MLX5_GET(query_hca_cap_out, out, syndrome); 428 if (status) { 429 DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, " 430 "status %x, syndrome = %x", 431 status, syndrome); 432 return -1; 433 } 434 hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 435 attr->flow_counter_bulk_alloc_bitmap = 436 MLX5_GET(cmd_hca_cap, hcattr, flow_counter_bulk_alloc); 437 attr->flow_counters_dump = MLX5_GET(cmd_hca_cap, hcattr, 438 flow_counters_dump); 439 attr->log_max_rqt_size = MLX5_GET(cmd_hca_cap, hcattr, 440 log_max_rqt_size); 441 attr->eswitch_manager = MLX5_GET(cmd_hca_cap, hcattr, eswitch_manager); 442 attr->hairpin = MLX5_GET(cmd_hca_cap, hcattr, hairpin); 443 attr->log_max_hairpin_queues = MLX5_GET(cmd_hca_cap, hcattr, 444 log_max_hairpin_queues); 445 attr->log_max_hairpin_wq_data_sz = MLX5_GET(cmd_hca_cap, hcattr, 446 log_max_hairpin_wq_data_sz); 447 attr->log_max_hairpin_num_packets = MLX5_GET 448 (cmd_hca_cap, hcattr, log_min_hairpin_wq_data_sz); 449 attr->vhca_id = MLX5_GET(cmd_hca_cap, hcattr, vhca_id); 450 attr->eth_net_offloads = MLX5_GET(cmd_hca_cap, hcattr, 451 eth_net_offloads); 452 attr->eth_virt = MLX5_GET(cmd_hca_cap, hcattr, eth_virt); 453 attr->flex_parser_protocols = MLX5_GET(cmd_hca_cap, hcattr, 454 flex_parser_protocols); 455 attr->qos.sup = MLX5_GET(cmd_hca_cap, hcattr, qos); 456 attr->vdpa.valid = !!(MLX5_GET64(cmd_hca_cap, hcattr, 457 general_obj_types) & 458 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q); 459 if (attr->qos.sup) { 460 MLX5_SET(query_hca_cap_in, in, op_mod, 461 MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP | 462 MLX5_HCA_CAP_OPMOD_GET_CUR); 463 rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), 464 out, sizeof(out)); 465 if (rc) 466 goto error; 467 if (status) { 468 DRV_LOG(DEBUG, "Failed to query devx QOS capabilities," 469 " status %x, syndrome = %x", 470 status, syndrome); 471 return -1; 472 } 473 hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 474 attr->qos.srtcm_sup = 475 MLX5_GET(qos_cap, hcattr, flow_meter_srtcm); 476 attr->qos.log_max_flow_meter = 477 MLX5_GET(qos_cap, hcattr, log_max_flow_meter); 478 attr->qos.flow_meter_reg_c_ids = 479 MLX5_GET(qos_cap, hcattr, flow_meter_reg_id); 480 attr->qos.flow_meter_reg_share = 481 MLX5_GET(qos_cap, hcattr, flow_meter_reg_share); 482 } 483 if (attr->vdpa.valid) 484 mlx5_devx_cmd_query_hca_vdpa_attr(ctx, &attr->vdpa); 485 if (!attr->eth_net_offloads) 486 return 0; 487 488 /* Query HCA offloads for Ethernet protocol. */ 489 memset(in, 0, sizeof(in)); 490 memset(out, 0, sizeof(out)); 491 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 492 MLX5_SET(query_hca_cap_in, in, op_mod, 493 MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS | 494 MLX5_HCA_CAP_OPMOD_GET_CUR); 495 496 rc = mlx5_glue->devx_general_cmd(ctx, 497 in, sizeof(in), 498 out, sizeof(out)); 499 if (rc) { 500 attr->eth_net_offloads = 0; 501 goto error; 502 } 503 status = MLX5_GET(query_hca_cap_out, out, status); 504 syndrome = MLX5_GET(query_hca_cap_out, out, syndrome); 505 if (status) { 506 DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, " 507 "status %x, syndrome = %x", 508 status, syndrome); 509 attr->eth_net_offloads = 0; 510 return -1; 511 } 512 hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 513 attr->wqe_vlan_insert = MLX5_GET(per_protocol_networking_offload_caps, 514 hcattr, wqe_vlan_insert); 515 attr->lro_cap = MLX5_GET(per_protocol_networking_offload_caps, hcattr, 516 lro_cap); 517 attr->tunnel_lro_gre = MLX5_GET(per_protocol_networking_offload_caps, 518 hcattr, tunnel_lro_gre); 519 attr->tunnel_lro_vxlan = MLX5_GET(per_protocol_networking_offload_caps, 520 hcattr, tunnel_lro_vxlan); 521 attr->lro_max_msg_sz_mode = MLX5_GET 522 (per_protocol_networking_offload_caps, 523 hcattr, lro_max_msg_sz_mode); 524 for (int i = 0 ; i < MLX5_LRO_NUM_SUPP_PERIODS ; i++) { 525 attr->lro_timer_supported_periods[i] = 526 MLX5_GET(per_protocol_networking_offload_caps, hcattr, 527 lro_timer_supported_periods[i]); 528 } 529 attr->tunnel_stateless_geneve_rx = 530 MLX5_GET(per_protocol_networking_offload_caps, 531 hcattr, tunnel_stateless_geneve_rx); 532 attr->geneve_max_opt_len = 533 MLX5_GET(per_protocol_networking_offload_caps, 534 hcattr, max_geneve_opt_len); 535 attr->wqe_inline_mode = MLX5_GET(per_protocol_networking_offload_caps, 536 hcattr, wqe_inline_mode); 537 attr->tunnel_stateless_gtp = MLX5_GET 538 (per_protocol_networking_offload_caps, 539 hcattr, tunnel_stateless_gtp); 540 if (attr->wqe_inline_mode != MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) 541 return 0; 542 if (attr->eth_virt) { 543 rc = mlx5_devx_cmd_query_nic_vport_context(ctx, 0, attr); 544 if (rc) { 545 attr->eth_virt = 0; 546 goto error; 547 } 548 } 549 return 0; 550 error: 551 rc = (rc > 0) ? -rc : rc; 552 return rc; 553 } 554 555 /** 556 * Query TIS transport domain from QP verbs object using DevX API. 557 * 558 * @param[in] qp 559 * Pointer to verbs QP returned by ibv_create_qp . 560 * @param[in] tis_num 561 * TIS number of TIS to query. 562 * @param[out] tis_td 563 * Pointer to TIS transport domain variable, to be set by the routine. 564 * 565 * @return 566 * 0 on success, a negative value otherwise. 567 */ 568 int 569 mlx5_devx_cmd_qp_query_tis_td(struct ibv_qp *qp, uint32_t tis_num, 570 uint32_t *tis_td) 571 { 572 uint32_t in[MLX5_ST_SZ_DW(query_tis_in)] = {0}; 573 uint32_t out[MLX5_ST_SZ_DW(query_tis_out)] = {0}; 574 int rc; 575 void *tis_ctx; 576 577 MLX5_SET(query_tis_in, in, opcode, MLX5_CMD_OP_QUERY_TIS); 578 MLX5_SET(query_tis_in, in, tisn, tis_num); 579 rc = mlx5_glue->devx_qp_query(qp, in, sizeof(in), out, sizeof(out)); 580 if (rc) { 581 DRV_LOG(ERR, "Failed to query QP using DevX"); 582 return -rc; 583 }; 584 tis_ctx = MLX5_ADDR_OF(query_tis_out, out, tis_context); 585 *tis_td = MLX5_GET(tisc, tis_ctx, transport_domain); 586 return 0; 587 } 588 589 /** 590 * Fill WQ data for DevX API command. 591 * Utility function for use when creating DevX objects containing a WQ. 592 * 593 * @param[in] wq_ctx 594 * Pointer to WQ context to fill with data. 595 * @param [in] wq_attr 596 * Pointer to WQ attributes structure to fill in WQ context. 597 */ 598 static void 599 devx_cmd_fill_wq_data(void *wq_ctx, struct mlx5_devx_wq_attr *wq_attr) 600 { 601 MLX5_SET(wq, wq_ctx, wq_type, wq_attr->wq_type); 602 MLX5_SET(wq, wq_ctx, wq_signature, wq_attr->wq_signature); 603 MLX5_SET(wq, wq_ctx, end_padding_mode, wq_attr->end_padding_mode); 604 MLX5_SET(wq, wq_ctx, cd_slave, wq_attr->cd_slave); 605 MLX5_SET(wq, wq_ctx, hds_skip_first_sge, wq_attr->hds_skip_first_sge); 606 MLX5_SET(wq, wq_ctx, log2_hds_buf_size, wq_attr->log2_hds_buf_size); 607 MLX5_SET(wq, wq_ctx, page_offset, wq_attr->page_offset); 608 MLX5_SET(wq, wq_ctx, lwm, wq_attr->lwm); 609 MLX5_SET(wq, wq_ctx, pd, wq_attr->pd); 610 MLX5_SET(wq, wq_ctx, uar_page, wq_attr->uar_page); 611 MLX5_SET64(wq, wq_ctx, dbr_addr, wq_attr->dbr_addr); 612 MLX5_SET(wq, wq_ctx, hw_counter, wq_attr->hw_counter); 613 MLX5_SET(wq, wq_ctx, sw_counter, wq_attr->sw_counter); 614 MLX5_SET(wq, wq_ctx, log_wq_stride, wq_attr->log_wq_stride); 615 MLX5_SET(wq, wq_ctx, log_wq_pg_sz, wq_attr->log_wq_pg_sz); 616 MLX5_SET(wq, wq_ctx, log_wq_sz, wq_attr->log_wq_sz); 617 MLX5_SET(wq, wq_ctx, dbr_umem_valid, wq_attr->dbr_umem_valid); 618 MLX5_SET(wq, wq_ctx, wq_umem_valid, wq_attr->wq_umem_valid); 619 MLX5_SET(wq, wq_ctx, log_hairpin_num_packets, 620 wq_attr->log_hairpin_num_packets); 621 MLX5_SET(wq, wq_ctx, log_hairpin_data_sz, wq_attr->log_hairpin_data_sz); 622 MLX5_SET(wq, wq_ctx, single_wqe_log_num_of_strides, 623 wq_attr->single_wqe_log_num_of_strides); 624 MLX5_SET(wq, wq_ctx, two_byte_shift_en, wq_attr->two_byte_shift_en); 625 MLX5_SET(wq, wq_ctx, single_stride_log_num_of_bytes, 626 wq_attr->single_stride_log_num_of_bytes); 627 MLX5_SET(wq, wq_ctx, dbr_umem_id, wq_attr->dbr_umem_id); 628 MLX5_SET(wq, wq_ctx, wq_umem_id, wq_attr->wq_umem_id); 629 MLX5_SET64(wq, wq_ctx, wq_umem_offset, wq_attr->wq_umem_offset); 630 } 631 632 /** 633 * Create RQ using DevX API. 634 * 635 * @param[in] ctx 636 * ibv_context returned from mlx5dv_open_device. 637 * @param [in] rq_attr 638 * Pointer to create RQ attributes structure. 639 * @param [in] socket 640 * CPU socket ID for allocations. 641 * 642 * @return 643 * The DevX object created, NULL otherwise and rte_errno is set. 644 */ 645 struct mlx5_devx_obj * 646 mlx5_devx_cmd_create_rq(struct ibv_context *ctx, 647 struct mlx5_devx_create_rq_attr *rq_attr, 648 int socket) 649 { 650 uint32_t in[MLX5_ST_SZ_DW(create_rq_in)] = {0}; 651 uint32_t out[MLX5_ST_SZ_DW(create_rq_out)] = {0}; 652 void *rq_ctx, *wq_ctx; 653 struct mlx5_devx_wq_attr *wq_attr; 654 struct mlx5_devx_obj *rq = NULL; 655 656 rq = rte_calloc_socket(__func__, 1, sizeof(*rq), 0, socket); 657 if (!rq) { 658 DRV_LOG(ERR, "Failed to allocate RQ data"); 659 rte_errno = ENOMEM; 660 return NULL; 661 } 662 MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ); 663 rq_ctx = MLX5_ADDR_OF(create_rq_in, in, ctx); 664 MLX5_SET(rqc, rq_ctx, rlky, rq_attr->rlky); 665 MLX5_SET(rqc, rq_ctx, delay_drop_en, rq_attr->delay_drop_en); 666 MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs); 667 MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd); 668 MLX5_SET(rqc, rq_ctx, mem_rq_type, rq_attr->mem_rq_type); 669 MLX5_SET(rqc, rq_ctx, state, rq_attr->state); 670 MLX5_SET(rqc, rq_ctx, flush_in_error_en, rq_attr->flush_in_error_en); 671 MLX5_SET(rqc, rq_ctx, hairpin, rq_attr->hairpin); 672 MLX5_SET(rqc, rq_ctx, user_index, rq_attr->user_index); 673 MLX5_SET(rqc, rq_ctx, cqn, rq_attr->cqn); 674 MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id); 675 MLX5_SET(rqc, rq_ctx, rmpn, rq_attr->rmpn); 676 wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq); 677 wq_attr = &rq_attr->wq_attr; 678 devx_cmd_fill_wq_data(wq_ctx, wq_attr); 679 rq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 680 out, sizeof(out)); 681 if (!rq->obj) { 682 DRV_LOG(ERR, "Failed to create RQ using DevX"); 683 rte_errno = errno; 684 rte_free(rq); 685 return NULL; 686 } 687 rq->id = MLX5_GET(create_rq_out, out, rqn); 688 return rq; 689 } 690 691 /** 692 * Modify RQ using DevX API. 693 * 694 * @param[in] rq 695 * Pointer to RQ object structure. 696 * @param [in] rq_attr 697 * Pointer to modify RQ attributes structure. 698 * 699 * @return 700 * 0 on success, a negative errno value otherwise and rte_errno is set. 701 */ 702 int 703 mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq, 704 struct mlx5_devx_modify_rq_attr *rq_attr) 705 { 706 uint32_t in[MLX5_ST_SZ_DW(modify_rq_in)] = {0}; 707 uint32_t out[MLX5_ST_SZ_DW(modify_rq_out)] = {0}; 708 void *rq_ctx, *wq_ctx; 709 int ret; 710 711 MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ); 712 MLX5_SET(modify_rq_in, in, rq_state, rq_attr->rq_state); 713 MLX5_SET(modify_rq_in, in, rqn, rq->id); 714 MLX5_SET64(modify_rq_in, in, modify_bitmask, rq_attr->modify_bitmask); 715 rq_ctx = MLX5_ADDR_OF(modify_rq_in, in, ctx); 716 MLX5_SET(rqc, rq_ctx, state, rq_attr->state); 717 if (rq_attr->modify_bitmask & 718 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS) 719 MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs); 720 if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD) 721 MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd); 722 if (rq_attr->modify_bitmask & 723 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID) 724 MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id); 725 MLX5_SET(rqc, rq_ctx, hairpin_peer_sq, rq_attr->hairpin_peer_sq); 726 MLX5_SET(rqc, rq_ctx, hairpin_peer_vhca, rq_attr->hairpin_peer_vhca); 727 if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM) { 728 wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq); 729 MLX5_SET(wq, wq_ctx, lwm, rq_attr->lwm); 730 } 731 ret = mlx5_glue->devx_obj_modify(rq->obj, in, sizeof(in), 732 out, sizeof(out)); 733 if (ret) { 734 DRV_LOG(ERR, "Failed to modify RQ using DevX"); 735 rte_errno = errno; 736 return -errno; 737 } 738 return ret; 739 } 740 741 /** 742 * Create TIR using DevX API. 743 * 744 * @param[in] ctx 745 * ibv_context returned from mlx5dv_open_device. 746 * @param [in] tir_attr 747 * Pointer to TIR attributes structure. 748 * 749 * @return 750 * The DevX object created, NULL otherwise and rte_errno is set. 751 */ 752 struct mlx5_devx_obj * 753 mlx5_devx_cmd_create_tir(struct ibv_context *ctx, 754 struct mlx5_devx_tir_attr *tir_attr) 755 { 756 uint32_t in[MLX5_ST_SZ_DW(create_tir_in)] = {0}; 757 uint32_t out[MLX5_ST_SZ_DW(create_tir_out)] = {0}; 758 void *tir_ctx, *outer, *inner; 759 struct mlx5_devx_obj *tir = NULL; 760 int i; 761 762 tir = rte_calloc(__func__, 1, sizeof(*tir), 0); 763 if (!tir) { 764 DRV_LOG(ERR, "Failed to allocate TIR data"); 765 rte_errno = ENOMEM; 766 return NULL; 767 } 768 MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); 769 tir_ctx = MLX5_ADDR_OF(create_tir_in, in, ctx); 770 MLX5_SET(tirc, tir_ctx, disp_type, tir_attr->disp_type); 771 MLX5_SET(tirc, tir_ctx, lro_timeout_period_usecs, 772 tir_attr->lro_timeout_period_usecs); 773 MLX5_SET(tirc, tir_ctx, lro_enable_mask, tir_attr->lro_enable_mask); 774 MLX5_SET(tirc, tir_ctx, lro_max_msg_sz, tir_attr->lro_max_msg_sz); 775 MLX5_SET(tirc, tir_ctx, inline_rqn, tir_attr->inline_rqn); 776 MLX5_SET(tirc, tir_ctx, rx_hash_symmetric, tir_attr->rx_hash_symmetric); 777 MLX5_SET(tirc, tir_ctx, tunneled_offload_en, 778 tir_attr->tunneled_offload_en); 779 MLX5_SET(tirc, tir_ctx, indirect_table, tir_attr->indirect_table); 780 MLX5_SET(tirc, tir_ctx, rx_hash_fn, tir_attr->rx_hash_fn); 781 MLX5_SET(tirc, tir_ctx, self_lb_block, tir_attr->self_lb_block); 782 MLX5_SET(tirc, tir_ctx, transport_domain, tir_attr->transport_domain); 783 for (i = 0; i < 10; i++) { 784 MLX5_SET(tirc, tir_ctx, rx_hash_toeplitz_key[i], 785 tir_attr->rx_hash_toeplitz_key[i]); 786 } 787 outer = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_outer); 788 MLX5_SET(rx_hash_field_select, outer, l3_prot_type, 789 tir_attr->rx_hash_field_selector_outer.l3_prot_type); 790 MLX5_SET(rx_hash_field_select, outer, l4_prot_type, 791 tir_attr->rx_hash_field_selector_outer.l4_prot_type); 792 MLX5_SET(rx_hash_field_select, outer, selected_fields, 793 tir_attr->rx_hash_field_selector_outer.selected_fields); 794 inner = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_inner); 795 MLX5_SET(rx_hash_field_select, inner, l3_prot_type, 796 tir_attr->rx_hash_field_selector_inner.l3_prot_type); 797 MLX5_SET(rx_hash_field_select, inner, l4_prot_type, 798 tir_attr->rx_hash_field_selector_inner.l4_prot_type); 799 MLX5_SET(rx_hash_field_select, inner, selected_fields, 800 tir_attr->rx_hash_field_selector_inner.selected_fields); 801 tir->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 802 out, sizeof(out)); 803 if (!tir->obj) { 804 DRV_LOG(ERR, "Failed to create TIR using DevX"); 805 rte_errno = errno; 806 rte_free(tir); 807 return NULL; 808 } 809 tir->id = MLX5_GET(create_tir_out, out, tirn); 810 return tir; 811 } 812 813 /** 814 * Create RQT using DevX API. 815 * 816 * @param[in] ctx 817 * ibv_context returned from mlx5dv_open_device. 818 * @param [in] rqt_attr 819 * Pointer to RQT attributes structure. 820 * 821 * @return 822 * The DevX object created, NULL otherwise and rte_errno is set. 823 */ 824 struct mlx5_devx_obj * 825 mlx5_devx_cmd_create_rqt(struct ibv_context *ctx, 826 struct mlx5_devx_rqt_attr *rqt_attr) 827 { 828 uint32_t *in = NULL; 829 uint32_t inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + 830 rqt_attr->rqt_actual_size * sizeof(uint32_t); 831 uint32_t out[MLX5_ST_SZ_DW(create_rqt_out)] = {0}; 832 void *rqt_ctx; 833 struct mlx5_devx_obj *rqt = NULL; 834 int i; 835 836 in = rte_calloc(__func__, 1, inlen, 0); 837 if (!in) { 838 DRV_LOG(ERR, "Failed to allocate RQT IN data"); 839 rte_errno = ENOMEM; 840 return NULL; 841 } 842 rqt = rte_calloc(__func__, 1, sizeof(*rqt), 0); 843 if (!rqt) { 844 DRV_LOG(ERR, "Failed to allocate RQT data"); 845 rte_errno = ENOMEM; 846 rte_free(in); 847 return NULL; 848 } 849 MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); 850 rqt_ctx = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); 851 MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type); 852 MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size); 853 MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size); 854 for (i = 0; i < rqt_attr->rqt_actual_size; i++) 855 MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]); 856 rqt->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out)); 857 rte_free(in); 858 if (!rqt->obj) { 859 DRV_LOG(ERR, "Failed to create RQT using DevX"); 860 rte_errno = errno; 861 rte_free(rqt); 862 return NULL; 863 } 864 rqt->id = MLX5_GET(create_rqt_out, out, rqtn); 865 return rqt; 866 } 867 868 /** 869 * Modify RQT using DevX API. 870 * 871 * @param[in] rqt 872 * Pointer to RQT DevX object structure. 873 * @param [in] rqt_attr 874 * Pointer to RQT attributes structure. 875 * 876 * @return 877 * 0 on success, a negative errno value otherwise and rte_errno is set. 878 */ 879 int 880 mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt, 881 struct mlx5_devx_rqt_attr *rqt_attr) 882 { 883 uint32_t inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + 884 rqt_attr->rqt_actual_size * sizeof(uint32_t); 885 uint32_t out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0}; 886 uint32_t *in = rte_calloc(__func__, 1, inlen, 0); 887 void *rqt_ctx; 888 int i; 889 int ret; 890 891 if (!in) { 892 DRV_LOG(ERR, "Failed to allocate RQT modify IN data."); 893 rte_errno = ENOMEM; 894 return -ENOMEM; 895 } 896 MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT); 897 MLX5_SET(modify_rqt_in, in, rqtn, rqt->id); 898 MLX5_SET64(modify_rqt_in, in, modify_bitmask, 0x1); 899 rqt_ctx = MLX5_ADDR_OF(modify_rqt_in, in, rqt_context); 900 MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type); 901 MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size); 902 MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size); 903 for (i = 0; i < rqt_attr->rqt_actual_size; i++) 904 MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]); 905 ret = mlx5_glue->devx_obj_modify(rqt->obj, in, inlen, out, sizeof(out)); 906 rte_free(in); 907 if (ret) { 908 DRV_LOG(ERR, "Failed to modify RQT using DevX."); 909 rte_errno = errno; 910 return -rte_errno; 911 } 912 return ret; 913 } 914 915 /** 916 * Create SQ using DevX API. 917 * 918 * @param[in] ctx 919 * ibv_context returned from mlx5dv_open_device. 920 * @param [in] sq_attr 921 * Pointer to SQ attributes structure. 922 * @param [in] socket 923 * CPU socket ID for allocations. 924 * 925 * @return 926 * The DevX object created, NULL otherwise and rte_errno is set. 927 **/ 928 struct mlx5_devx_obj * 929 mlx5_devx_cmd_create_sq(struct ibv_context *ctx, 930 struct mlx5_devx_create_sq_attr *sq_attr) 931 { 932 uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0}; 933 uint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0}; 934 void *sq_ctx; 935 void *wq_ctx; 936 struct mlx5_devx_wq_attr *wq_attr; 937 struct mlx5_devx_obj *sq = NULL; 938 939 sq = rte_calloc(__func__, 1, sizeof(*sq), 0); 940 if (!sq) { 941 DRV_LOG(ERR, "Failed to allocate SQ data"); 942 rte_errno = ENOMEM; 943 return NULL; 944 } 945 MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ); 946 sq_ctx = MLX5_ADDR_OF(create_sq_in, in, ctx); 947 MLX5_SET(sqc, sq_ctx, rlky, sq_attr->rlky); 948 MLX5_SET(sqc, sq_ctx, cd_master, sq_attr->cd_master); 949 MLX5_SET(sqc, sq_ctx, fre, sq_attr->fre); 950 MLX5_SET(sqc, sq_ctx, flush_in_error_en, sq_attr->flush_in_error_en); 951 MLX5_SET(sqc, sq_ctx, allow_multi_pkt_send_wqe, 952 sq_attr->flush_in_error_en); 953 MLX5_SET(sqc, sq_ctx, min_wqe_inline_mode, 954 sq_attr->min_wqe_inline_mode); 955 MLX5_SET(sqc, sq_ctx, state, sq_attr->state); 956 MLX5_SET(sqc, sq_ctx, reg_umr, sq_attr->reg_umr); 957 MLX5_SET(sqc, sq_ctx, allow_swp, sq_attr->allow_swp); 958 MLX5_SET(sqc, sq_ctx, hairpin, sq_attr->hairpin); 959 MLX5_SET(sqc, sq_ctx, user_index, sq_attr->user_index); 960 MLX5_SET(sqc, sq_ctx, cqn, sq_attr->cqn); 961 MLX5_SET(sqc, sq_ctx, packet_pacing_rate_limit_index, 962 sq_attr->packet_pacing_rate_limit_index); 963 MLX5_SET(sqc, sq_ctx, tis_lst_sz, sq_attr->tis_lst_sz); 964 MLX5_SET(sqc, sq_ctx, tis_num_0, sq_attr->tis_num); 965 wq_ctx = MLX5_ADDR_OF(sqc, sq_ctx, wq); 966 wq_attr = &sq_attr->wq_attr; 967 devx_cmd_fill_wq_data(wq_ctx, wq_attr); 968 sq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 969 out, sizeof(out)); 970 if (!sq->obj) { 971 DRV_LOG(ERR, "Failed to create SQ using DevX"); 972 rte_errno = errno; 973 rte_free(sq); 974 return NULL; 975 } 976 sq->id = MLX5_GET(create_sq_out, out, sqn); 977 return sq; 978 } 979 980 /** 981 * Modify SQ using DevX API. 982 * 983 * @param[in] sq 984 * Pointer to SQ object structure. 985 * @param [in] sq_attr 986 * Pointer to SQ attributes structure. 987 * 988 * @return 989 * 0 on success, a negative errno value otherwise and rte_errno is set. 990 */ 991 int 992 mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq, 993 struct mlx5_devx_modify_sq_attr *sq_attr) 994 { 995 uint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0}; 996 uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0}; 997 void *sq_ctx; 998 int ret; 999 1000 MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ); 1001 MLX5_SET(modify_sq_in, in, sq_state, sq_attr->sq_state); 1002 MLX5_SET(modify_sq_in, in, sqn, sq->id); 1003 sq_ctx = MLX5_ADDR_OF(modify_sq_in, in, ctx); 1004 MLX5_SET(sqc, sq_ctx, state, sq_attr->state); 1005 MLX5_SET(sqc, sq_ctx, hairpin_peer_rq, sq_attr->hairpin_peer_rq); 1006 MLX5_SET(sqc, sq_ctx, hairpin_peer_vhca, sq_attr->hairpin_peer_vhca); 1007 ret = mlx5_glue->devx_obj_modify(sq->obj, in, sizeof(in), 1008 out, sizeof(out)); 1009 if (ret) { 1010 DRV_LOG(ERR, "Failed to modify SQ using DevX"); 1011 rte_errno = errno; 1012 return -errno; 1013 } 1014 return ret; 1015 } 1016 1017 /** 1018 * Create TIS using DevX API. 1019 * 1020 * @param[in] ctx 1021 * ibv_context returned from mlx5dv_open_device. 1022 * @param [in] tis_attr 1023 * Pointer to TIS attributes structure. 1024 * 1025 * @return 1026 * The DevX object created, NULL otherwise and rte_errno is set. 1027 */ 1028 struct mlx5_devx_obj * 1029 mlx5_devx_cmd_create_tis(struct ibv_context *ctx, 1030 struct mlx5_devx_tis_attr *tis_attr) 1031 { 1032 uint32_t in[MLX5_ST_SZ_DW(create_tis_in)] = {0}; 1033 uint32_t out[MLX5_ST_SZ_DW(create_tis_out)] = {0}; 1034 struct mlx5_devx_obj *tis = NULL; 1035 void *tis_ctx; 1036 1037 tis = rte_calloc(__func__, 1, sizeof(*tis), 0); 1038 if (!tis) { 1039 DRV_LOG(ERR, "Failed to allocate TIS object"); 1040 rte_errno = ENOMEM; 1041 return NULL; 1042 } 1043 MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS); 1044 tis_ctx = MLX5_ADDR_OF(create_tis_in, in, ctx); 1045 MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity, 1046 tis_attr->strict_lag_tx_port_affinity); 1047 MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity, 1048 tis_attr->strict_lag_tx_port_affinity); 1049 MLX5_SET(tisc, tis_ctx, prio, tis_attr->prio); 1050 MLX5_SET(tisc, tis_ctx, transport_domain, 1051 tis_attr->transport_domain); 1052 tis->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 1053 out, sizeof(out)); 1054 if (!tis->obj) { 1055 DRV_LOG(ERR, "Failed to create TIS using DevX"); 1056 rte_errno = errno; 1057 rte_free(tis); 1058 return NULL; 1059 } 1060 tis->id = MLX5_GET(create_tis_out, out, tisn); 1061 return tis; 1062 } 1063 1064 /** 1065 * Create transport domain using DevX API. 1066 * 1067 * @param[in] ctx 1068 * ibv_context returned from mlx5dv_open_device. 1069 * 1070 * @return 1071 * The DevX object created, NULL otherwise and rte_errno is set. 1072 */ 1073 struct mlx5_devx_obj * 1074 mlx5_devx_cmd_create_td(struct ibv_context *ctx) 1075 { 1076 uint32_t in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0}; 1077 uint32_t out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0}; 1078 struct mlx5_devx_obj *td = NULL; 1079 1080 td = rte_calloc(__func__, 1, sizeof(*td), 0); 1081 if (!td) { 1082 DRV_LOG(ERR, "Failed to allocate TD object"); 1083 rte_errno = ENOMEM; 1084 return NULL; 1085 } 1086 MLX5_SET(alloc_transport_domain_in, in, opcode, 1087 MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN); 1088 td->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 1089 out, sizeof(out)); 1090 if (!td->obj) { 1091 DRV_LOG(ERR, "Failed to create TIS using DevX"); 1092 rte_errno = errno; 1093 rte_free(td); 1094 return NULL; 1095 } 1096 td->id = MLX5_GET(alloc_transport_domain_out, out, 1097 transport_domain); 1098 return td; 1099 } 1100 1101 /** 1102 * Dump all flows to file. 1103 * 1104 * @param[in] fdb_domain 1105 * FDB domain. 1106 * @param[in] rx_domain 1107 * RX domain. 1108 * @param[in] tx_domain 1109 * TX domain. 1110 * @param[out] file 1111 * Pointer to file stream. 1112 * 1113 * @return 1114 * 0 on success, a nagative value otherwise. 1115 */ 1116 int 1117 mlx5_devx_cmd_flow_dump(void *fdb_domain __rte_unused, 1118 void *rx_domain __rte_unused, 1119 void *tx_domain __rte_unused, FILE *file __rte_unused) 1120 { 1121 int ret = 0; 1122 1123 #ifdef HAVE_MLX5_DR_FLOW_DUMP 1124 if (fdb_domain) { 1125 ret = mlx5_glue->dr_dump_domain(file, fdb_domain); 1126 if (ret) 1127 return ret; 1128 } 1129 MLX5_ASSERT(rx_domain); 1130 ret = mlx5_glue->dr_dump_domain(file, rx_domain); 1131 if (ret) 1132 return ret; 1133 MLX5_ASSERT(tx_domain); 1134 ret = mlx5_glue->dr_dump_domain(file, tx_domain); 1135 #else 1136 ret = ENOTSUP; 1137 #endif 1138 return -ret; 1139 } 1140 1141 /* 1142 * Create CQ using DevX API. 1143 * 1144 * @param[in] ctx 1145 * ibv_context returned from mlx5dv_open_device. 1146 * @param [in] attr 1147 * Pointer to CQ attributes structure. 1148 * 1149 * @return 1150 * The DevX object created, NULL otherwise and rte_errno is set. 1151 */ 1152 struct mlx5_devx_obj * 1153 mlx5_devx_cmd_create_cq(struct ibv_context *ctx, struct mlx5_devx_cq_attr *attr) 1154 { 1155 uint32_t in[MLX5_ST_SZ_DW(create_cq_in)] = {0}; 1156 uint32_t out[MLX5_ST_SZ_DW(create_cq_out)] = {0}; 1157 struct mlx5_devx_obj *cq_obj = rte_zmalloc(__func__, sizeof(*cq_obj), 1158 0); 1159 void *cqctx = MLX5_ADDR_OF(create_cq_in, in, cq_context); 1160 1161 if (!cq_obj) { 1162 DRV_LOG(ERR, "Failed to allocate CQ object memory."); 1163 rte_errno = ENOMEM; 1164 return NULL; 1165 } 1166 MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ); 1167 if (attr->db_umem_valid) { 1168 MLX5_SET(cqc, cqctx, dbr_umem_valid, attr->db_umem_valid); 1169 MLX5_SET(cqc, cqctx, dbr_umem_id, attr->db_umem_id); 1170 MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_umem_offset); 1171 } else { 1172 MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_addr); 1173 } 1174 MLX5_SET(cqc, cqctx, cc, attr->use_first_only); 1175 MLX5_SET(cqc, cqctx, oi, attr->overrun_ignore); 1176 MLX5_SET(cqc, cqctx, log_cq_size, attr->log_cq_size); 1177 MLX5_SET(cqc, cqctx, log_page_size, attr->log_page_size - 1178 MLX5_ADAPTER_PAGE_SHIFT); 1179 MLX5_SET(cqc, cqctx, c_eqn, attr->eqn); 1180 MLX5_SET(cqc, cqctx, uar_page, attr->uar_page_id); 1181 if (attr->q_umem_valid) { 1182 MLX5_SET(create_cq_in, in, cq_umem_valid, attr->q_umem_valid); 1183 MLX5_SET(create_cq_in, in, cq_umem_id, attr->q_umem_id); 1184 MLX5_SET64(create_cq_in, in, cq_umem_offset, 1185 attr->q_umem_offset); 1186 } 1187 cq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, 1188 sizeof(out)); 1189 if (!cq_obj->obj) { 1190 rte_errno = errno; 1191 DRV_LOG(ERR, "Failed to create CQ using DevX errno=%d.", errno); 1192 rte_free(cq_obj); 1193 return NULL; 1194 } 1195 cq_obj->id = MLX5_GET(create_cq_out, out, cqn); 1196 return cq_obj; 1197 } 1198 1199 /** 1200 * Create VIRTQ using DevX API. 1201 * 1202 * @param[in] ctx 1203 * ibv_context returned from mlx5dv_open_device. 1204 * @param [in] attr 1205 * Pointer to VIRTQ attributes structure. 1206 * 1207 * @return 1208 * The DevX object created, NULL otherwise and rte_errno is set. 1209 */ 1210 struct mlx5_devx_obj * 1211 mlx5_devx_cmd_create_virtq(struct ibv_context *ctx, 1212 struct mlx5_devx_virtq_attr *attr) 1213 { 1214 uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0}; 1215 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 1216 struct mlx5_devx_obj *virtq_obj = rte_zmalloc(__func__, 1217 sizeof(*virtq_obj), 0); 1218 void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq); 1219 void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr); 1220 void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context); 1221 1222 if (!virtq_obj) { 1223 DRV_LOG(ERR, "Failed to allocate virtq data."); 1224 rte_errno = ENOMEM; 1225 return NULL; 1226 } 1227 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 1228 MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 1229 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 1230 MLX5_GENERAL_OBJ_TYPE_VIRTQ); 1231 MLX5_SET16(virtio_net_q, virtq, hw_available_index, 1232 attr->hw_available_index); 1233 MLX5_SET16(virtio_net_q, virtq, hw_used_index, attr->hw_used_index); 1234 MLX5_SET16(virtio_net_q, virtq, tso_ipv4, attr->tso_ipv4); 1235 MLX5_SET16(virtio_net_q, virtq, tso_ipv6, attr->tso_ipv6); 1236 MLX5_SET16(virtio_net_q, virtq, tx_csum, attr->tx_csum); 1237 MLX5_SET16(virtio_net_q, virtq, rx_csum, attr->rx_csum); 1238 MLX5_SET16(virtio_q, virtctx, virtio_version_1_0, 1239 attr->virtio_version_1_0); 1240 MLX5_SET16(virtio_q, virtctx, event_mode, attr->event_mode); 1241 MLX5_SET(virtio_q, virtctx, event_qpn_or_msix, attr->qp_id); 1242 MLX5_SET64(virtio_q, virtctx, desc_addr, attr->desc_addr); 1243 MLX5_SET64(virtio_q, virtctx, used_addr, attr->used_addr); 1244 MLX5_SET64(virtio_q, virtctx, available_addr, attr->available_addr); 1245 MLX5_SET16(virtio_q, virtctx, queue_index, attr->queue_index); 1246 MLX5_SET16(virtio_q, virtctx, queue_size, attr->q_size); 1247 MLX5_SET(virtio_q, virtctx, virtio_q_mkey, attr->mkey); 1248 MLX5_SET(virtio_q, virtctx, umem_1_id, attr->umems[0].id); 1249 MLX5_SET(virtio_q, virtctx, umem_1_size, attr->umems[0].size); 1250 MLX5_SET64(virtio_q, virtctx, umem_1_offset, attr->umems[0].offset); 1251 MLX5_SET(virtio_q, virtctx, umem_2_id, attr->umems[1].id); 1252 MLX5_SET(virtio_q, virtctx, umem_2_size, attr->umems[1].size); 1253 MLX5_SET64(virtio_q, virtctx, umem_2_offset, attr->umems[1].offset); 1254 MLX5_SET(virtio_q, virtctx, umem_3_id, attr->umems[2].id); 1255 MLX5_SET(virtio_q, virtctx, umem_3_size, attr->umems[2].size); 1256 MLX5_SET64(virtio_q, virtctx, umem_3_offset, attr->umems[2].offset); 1257 MLX5_SET(virtio_net_q, virtq, tisn_or_qpn, attr->tis_id); 1258 virtq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, 1259 sizeof(out)); 1260 if (!virtq_obj->obj) { 1261 rte_errno = errno; 1262 DRV_LOG(ERR, "Failed to create VIRTQ Obj using DevX."); 1263 rte_free(virtq_obj); 1264 return NULL; 1265 } 1266 virtq_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 1267 return virtq_obj; 1268 } 1269 1270 /** 1271 * Modify VIRTQ using DevX API. 1272 * 1273 * @param[in] virtq_obj 1274 * Pointer to virtq object structure. 1275 * @param [in] attr 1276 * Pointer to modify virtq attributes structure. 1277 * 1278 * @return 1279 * 0 on success, a negative errno value otherwise and rte_errno is set. 1280 */ 1281 int 1282 mlx5_devx_cmd_modify_virtq(struct mlx5_devx_obj *virtq_obj, 1283 struct mlx5_devx_virtq_attr *attr) 1284 { 1285 uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0}; 1286 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 1287 void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq); 1288 void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr); 1289 void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context); 1290 int ret; 1291 1292 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 1293 MLX5_CMD_OP_MODIFY_GENERAL_OBJECT); 1294 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 1295 MLX5_GENERAL_OBJ_TYPE_VIRTQ); 1296 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, virtq_obj->id); 1297 MLX5_SET64(virtio_net_q, virtq, modify_field_select, attr->type); 1298 MLX5_SET16(virtio_q, virtctx, queue_index, attr->queue_index); 1299 switch (attr->type) { 1300 case MLX5_VIRTQ_MODIFY_TYPE_STATE: 1301 MLX5_SET16(virtio_net_q, virtq, state, attr->state); 1302 break; 1303 case MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_PARAMS: 1304 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_mkey, 1305 attr->dirty_bitmap_mkey); 1306 MLX5_SET64(virtio_net_q, virtq, dirty_bitmap_addr, 1307 attr->dirty_bitmap_addr); 1308 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_size, 1309 attr->dirty_bitmap_size); 1310 break; 1311 case MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE: 1312 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_dump_enable, 1313 attr->dirty_bitmap_dump_enable); 1314 break; 1315 default: 1316 rte_errno = EINVAL; 1317 return -rte_errno; 1318 } 1319 ret = mlx5_glue->devx_obj_modify(virtq_obj->obj, in, sizeof(in), 1320 out, sizeof(out)); 1321 if (ret) { 1322 DRV_LOG(ERR, "Failed to modify VIRTQ using DevX."); 1323 rte_errno = errno; 1324 return -errno; 1325 } 1326 return ret; 1327 } 1328 1329 /** 1330 * Query VIRTQ using DevX API. 1331 * 1332 * @param[in] virtq_obj 1333 * Pointer to virtq object structure. 1334 * @param [in/out] attr 1335 * Pointer to virtq attributes structure. 1336 * 1337 * @return 1338 * 0 on success, a negative errno value otherwise and rte_errno is set. 1339 */ 1340 int 1341 mlx5_devx_cmd_query_virtq(struct mlx5_devx_obj *virtq_obj, 1342 struct mlx5_devx_virtq_attr *attr) 1343 { 1344 uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0}; 1345 uint32_t out[MLX5_ST_SZ_DW(query_virtq_out)] = {0}; 1346 void *hdr = MLX5_ADDR_OF(query_virtq_out, in, hdr); 1347 void *virtq = MLX5_ADDR_OF(query_virtq_out, out, virtq); 1348 int ret; 1349 1350 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 1351 MLX5_CMD_OP_QUERY_GENERAL_OBJECT); 1352 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 1353 MLX5_GENERAL_OBJ_TYPE_VIRTQ); 1354 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, virtq_obj->id); 1355 ret = mlx5_glue->devx_obj_query(virtq_obj->obj, in, sizeof(in), 1356 out, sizeof(out)); 1357 if (ret) { 1358 DRV_LOG(ERR, "Failed to modify VIRTQ using DevX."); 1359 rte_errno = errno; 1360 return -errno; 1361 } 1362 attr->hw_available_index = MLX5_GET16(virtio_net_q, virtq, 1363 hw_available_index); 1364 attr->hw_used_index = MLX5_GET16(virtio_net_q, virtq, hw_used_index); 1365 return ret; 1366 } 1367 1368 /** 1369 * Create QP using DevX API. 1370 * 1371 * @param[in] ctx 1372 * ibv_context returned from mlx5dv_open_device. 1373 * @param [in] attr 1374 * Pointer to QP attributes structure. 1375 * 1376 * @return 1377 * The DevX object created, NULL otherwise and rte_errno is set. 1378 */ 1379 struct mlx5_devx_obj * 1380 mlx5_devx_cmd_create_qp(struct ibv_context *ctx, 1381 struct mlx5_devx_qp_attr *attr) 1382 { 1383 uint32_t in[MLX5_ST_SZ_DW(create_qp_in)] = {0}; 1384 uint32_t out[MLX5_ST_SZ_DW(create_qp_out)] = {0}; 1385 struct mlx5_devx_obj *qp_obj = rte_zmalloc(__func__, sizeof(*qp_obj), 1386 0); 1387 void *qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 1388 1389 if (!qp_obj) { 1390 DRV_LOG(ERR, "Failed to allocate QP data."); 1391 rte_errno = ENOMEM; 1392 return NULL; 1393 } 1394 MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); 1395 MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC); 1396 MLX5_SET(qpc, qpc, pd, attr->pd); 1397 if (attr->uar_index) { 1398 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 1399 MLX5_SET(qpc, qpc, uar_page, attr->uar_index); 1400 MLX5_SET(qpc, qpc, log_page_size, attr->log_page_size - 1401 MLX5_ADAPTER_PAGE_SHIFT); 1402 if (attr->sq_size) { 1403 MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->sq_size)); 1404 MLX5_SET(qpc, qpc, cqn_snd, attr->cqn); 1405 MLX5_SET(qpc, qpc, log_sq_size, 1406 rte_log2_u32(attr->sq_size)); 1407 } else { 1408 MLX5_SET(qpc, qpc, no_sq, 1); 1409 } 1410 if (attr->rq_size) { 1411 MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->rq_size)); 1412 MLX5_SET(qpc, qpc, cqn_rcv, attr->cqn); 1413 MLX5_SET(qpc, qpc, log_rq_stride, attr->log_rq_stride - 1414 MLX5_LOG_RQ_STRIDE_SHIFT); 1415 MLX5_SET(qpc, qpc, log_rq_size, 1416 rte_log2_u32(attr->rq_size)); 1417 MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ); 1418 } else { 1419 MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ); 1420 } 1421 if (attr->dbr_umem_valid) { 1422 MLX5_SET(qpc, qpc, dbr_umem_valid, 1423 attr->dbr_umem_valid); 1424 MLX5_SET(qpc, qpc, dbr_umem_id, attr->dbr_umem_id); 1425 } 1426 MLX5_SET64(qpc, qpc, dbr_addr, attr->dbr_address); 1427 MLX5_SET64(create_qp_in, in, wq_umem_offset, 1428 attr->wq_umem_offset); 1429 MLX5_SET(create_qp_in, in, wq_umem_id, attr->wq_umem_id); 1430 MLX5_SET(create_qp_in, in, wq_umem_valid, 1); 1431 } else { 1432 /* Special QP to be managed by FW - no SQ\RQ\CQ\UAR\DB rec. */ 1433 MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ); 1434 MLX5_SET(qpc, qpc, no_sq, 1); 1435 } 1436 qp_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, 1437 sizeof(out)); 1438 if (!qp_obj->obj) { 1439 rte_errno = errno; 1440 DRV_LOG(ERR, "Failed to create QP Obj using DevX."); 1441 rte_free(qp_obj); 1442 return NULL; 1443 } 1444 qp_obj->id = MLX5_GET(create_qp_out, out, qpn); 1445 return qp_obj; 1446 } 1447 1448 /** 1449 * Modify QP using DevX API. 1450 * Currently supports only force loop-back QP. 1451 * 1452 * @param[in] qp 1453 * Pointer to QP object structure. 1454 * @param [in] qp_st_mod_op 1455 * The QP state modification operation. 1456 * @param [in] remote_qp_id 1457 * The remote QP ID for MLX5_CMD_OP_INIT2RTR_QP operation. 1458 * 1459 * @return 1460 * 0 on success, a negative errno value otherwise and rte_errno is set. 1461 */ 1462 int 1463 mlx5_devx_cmd_modify_qp_state(struct mlx5_devx_obj *qp, uint32_t qp_st_mod_op, 1464 uint32_t remote_qp_id) 1465 { 1466 union { 1467 uint32_t rst2init[MLX5_ST_SZ_DW(rst2init_qp_in)]; 1468 uint32_t init2rtr[MLX5_ST_SZ_DW(init2rtr_qp_in)]; 1469 uint32_t rtr2rts[MLX5_ST_SZ_DW(rtr2rts_qp_in)]; 1470 } in; 1471 union { 1472 uint32_t rst2init[MLX5_ST_SZ_DW(rst2init_qp_out)]; 1473 uint32_t init2rtr[MLX5_ST_SZ_DW(init2rtr_qp_out)]; 1474 uint32_t rtr2rts[MLX5_ST_SZ_DW(rtr2rts_qp_out)]; 1475 } out; 1476 void *qpc; 1477 int ret; 1478 unsigned int inlen; 1479 unsigned int outlen; 1480 1481 memset(&in, 0, sizeof(in)); 1482 memset(&out, 0, sizeof(out)); 1483 MLX5_SET(rst2init_qp_in, &in, opcode, qp_st_mod_op); 1484 switch (qp_st_mod_op) { 1485 case MLX5_CMD_OP_RST2INIT_QP: 1486 MLX5_SET(rst2init_qp_in, &in, qpn, qp->id); 1487 qpc = MLX5_ADDR_OF(rst2init_qp_in, &in, qpc); 1488 MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1); 1489 MLX5_SET(qpc, qpc, rre, 1); 1490 MLX5_SET(qpc, qpc, rwe, 1); 1491 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 1492 inlen = sizeof(in.rst2init); 1493 outlen = sizeof(out.rst2init); 1494 break; 1495 case MLX5_CMD_OP_INIT2RTR_QP: 1496 MLX5_SET(init2rtr_qp_in, &in, qpn, qp->id); 1497 qpc = MLX5_ADDR_OF(init2rtr_qp_in, &in, qpc); 1498 MLX5_SET(qpc, qpc, primary_address_path.fl, 1); 1499 MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1); 1500 MLX5_SET(qpc, qpc, mtu, 1); 1501 MLX5_SET(qpc, qpc, log_msg_max, 30); 1502 MLX5_SET(qpc, qpc, remote_qpn, remote_qp_id); 1503 MLX5_SET(qpc, qpc, min_rnr_nak, 0); 1504 inlen = sizeof(in.init2rtr); 1505 outlen = sizeof(out.init2rtr); 1506 break; 1507 case MLX5_CMD_OP_RTR2RTS_QP: 1508 qpc = MLX5_ADDR_OF(rtr2rts_qp_in, &in, qpc); 1509 MLX5_SET(rtr2rts_qp_in, &in, qpn, qp->id); 1510 MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 14); 1511 MLX5_SET(qpc, qpc, log_ack_req_freq, 0); 1512 MLX5_SET(qpc, qpc, retry_count, 7); 1513 MLX5_SET(qpc, qpc, rnr_retry, 7); 1514 inlen = sizeof(in.rtr2rts); 1515 outlen = sizeof(out.rtr2rts); 1516 break; 1517 default: 1518 DRV_LOG(ERR, "Invalid or unsupported QP modify op %u.", 1519 qp_st_mod_op); 1520 rte_errno = EINVAL; 1521 return -rte_errno; 1522 } 1523 ret = mlx5_glue->devx_obj_modify(qp->obj, &in, inlen, &out, outlen); 1524 if (ret) { 1525 DRV_LOG(ERR, "Failed to modify QP using DevX."); 1526 rte_errno = errno; 1527 return -errno; 1528 } 1529 return ret; 1530 } 1531