1 // SPDX-License-Identifier: BSD-3-Clause 2 /* Copyright 2018 Mellanox Technologies, Ltd */ 3 4 #include <unistd.h> 5 6 #include <rte_errno.h> 7 #include <rte_malloc.h> 8 9 #include "mlx5_prm.h" 10 #include "mlx5_devx_cmds.h" 11 #include "mlx5_common_utils.h" 12 13 14 /** 15 * Allocate flow counters via devx interface. 16 * 17 * @param[in] ctx 18 * Context returned from mlx5 open_device() glue function. 19 * @param dcs 20 * Pointer to counters properties structure to be filled by the routine. 21 * @param bulk_n_128 22 * Bulk counter numbers in 128 counters units. 23 * 24 * @return 25 * Pointer to counter object on success, a negative value otherwise and 26 * rte_errno is set. 27 */ 28 struct mlx5_devx_obj * 29 mlx5_devx_cmd_flow_counter_alloc(void *ctx, uint32_t bulk_n_128) 30 { 31 struct mlx5_devx_obj *dcs = rte_zmalloc("dcs", sizeof(*dcs), 0); 32 uint32_t in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0}; 33 uint32_t out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0}; 34 35 if (!dcs) { 36 rte_errno = ENOMEM; 37 return NULL; 38 } 39 MLX5_SET(alloc_flow_counter_in, in, opcode, 40 MLX5_CMD_OP_ALLOC_FLOW_COUNTER); 41 MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, bulk_n_128); 42 dcs->obj = mlx5_glue->devx_obj_create(ctx, in, 43 sizeof(in), out, sizeof(out)); 44 if (!dcs->obj) { 45 DRV_LOG(ERR, "Can't allocate counters - error %d", errno); 46 rte_errno = errno; 47 rte_free(dcs); 48 return NULL; 49 } 50 dcs->id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id); 51 return dcs; 52 } 53 54 /** 55 * Query flow counters values. 56 * 57 * @param[in] dcs 58 * devx object that was obtained from mlx5_devx_cmd_fc_alloc. 59 * @param[in] clear 60 * Whether hardware should clear the counters after the query or not. 61 * @param[in] n_counters 62 * 0 in case of 1 counter to read, otherwise the counter number to read. 63 * @param pkts 64 * The number of packets that matched the flow. 65 * @param bytes 66 * The number of bytes that matched the flow. 67 * @param mkey 68 * The mkey key for batch query. 69 * @param addr 70 * The address in the mkey range for batch query. 71 * @param cmd_comp 72 * The completion object for asynchronous batch query. 73 * @param async_id 74 * The ID to be returned in the asynchronous batch query response. 75 * 76 * @return 77 * 0 on success, a negative value otherwise. 78 */ 79 int 80 mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs, 81 int clear, uint32_t n_counters, 82 uint64_t *pkts, uint64_t *bytes, 83 uint32_t mkey, void *addr, 84 void *cmd_comp, 85 uint64_t async_id) 86 { 87 int out_len = MLX5_ST_SZ_BYTES(query_flow_counter_out) + 88 MLX5_ST_SZ_BYTES(traffic_counter); 89 uint32_t out[out_len]; 90 uint32_t in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0}; 91 void *stats; 92 int rc; 93 94 MLX5_SET(query_flow_counter_in, in, opcode, 95 MLX5_CMD_OP_QUERY_FLOW_COUNTER); 96 MLX5_SET(query_flow_counter_in, in, op_mod, 0); 97 MLX5_SET(query_flow_counter_in, in, flow_counter_id, dcs->id); 98 MLX5_SET(query_flow_counter_in, in, clear, !!clear); 99 100 if (n_counters) { 101 MLX5_SET(query_flow_counter_in, in, num_of_counters, 102 n_counters); 103 MLX5_SET(query_flow_counter_in, in, dump_to_memory, 1); 104 MLX5_SET(query_flow_counter_in, in, mkey, mkey); 105 MLX5_SET64(query_flow_counter_in, in, address, 106 (uint64_t)(uintptr_t)addr); 107 } 108 if (!cmd_comp) 109 rc = mlx5_glue->devx_obj_query(dcs->obj, in, sizeof(in), out, 110 out_len); 111 else 112 rc = mlx5_glue->devx_obj_query_async(dcs->obj, in, sizeof(in), 113 out_len, async_id, 114 cmd_comp); 115 if (rc) { 116 DRV_LOG(ERR, "Failed to query devx counters with rc %d", rc); 117 rte_errno = rc; 118 return -rc; 119 } 120 if (!n_counters) { 121 stats = MLX5_ADDR_OF(query_flow_counter_out, 122 out, flow_statistics); 123 *pkts = MLX5_GET64(traffic_counter, stats, packets); 124 *bytes = MLX5_GET64(traffic_counter, stats, octets); 125 } 126 return 0; 127 } 128 129 /** 130 * Create a new mkey. 131 * 132 * @param[in] ctx 133 * Context returned from mlx5 open_device() glue function. 134 * @param[in] attr 135 * Attributes of the requested mkey. 136 * 137 * @return 138 * Pointer to Devx mkey on success, a negative value otherwise and rte_errno 139 * is set. 140 */ 141 struct mlx5_devx_obj * 142 mlx5_devx_cmd_mkey_create(void *ctx, 143 struct mlx5_devx_mkey_attr *attr) 144 { 145 struct mlx5_klm *klm_array = attr->klm_array; 146 int klm_num = attr->klm_num; 147 int in_size_dw = MLX5_ST_SZ_DW(create_mkey_in) + 148 (klm_num ? RTE_ALIGN(klm_num, 4) : 0) * MLX5_ST_SZ_DW(klm); 149 uint32_t in[in_size_dw]; 150 uint32_t out[MLX5_ST_SZ_DW(create_mkey_out)] = {0}; 151 void *mkc; 152 struct mlx5_devx_obj *mkey = rte_zmalloc("mkey", sizeof(*mkey), 0); 153 size_t pgsize; 154 uint32_t translation_size; 155 156 if (!mkey) { 157 rte_errno = ENOMEM; 158 return NULL; 159 } 160 memset(in, 0, in_size_dw * 4); 161 pgsize = sysconf(_SC_PAGESIZE); 162 MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY); 163 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 164 if (klm_num > 0) { 165 int i; 166 uint8_t *klm = (uint8_t *)MLX5_ADDR_OF(create_mkey_in, in, 167 klm_pas_mtt); 168 translation_size = RTE_ALIGN(klm_num, 4); 169 for (i = 0; i < klm_num; i++) { 170 MLX5_SET(klm, klm, byte_count, klm_array[i].byte_count); 171 MLX5_SET(klm, klm, mkey, klm_array[i].mkey); 172 MLX5_SET64(klm, klm, address, klm_array[i].address); 173 klm += MLX5_ST_SZ_BYTES(klm); 174 } 175 for (; i < (int)translation_size; i++) { 176 MLX5_SET(klm, klm, mkey, 0x0); 177 MLX5_SET64(klm, klm, address, 0x0); 178 klm += MLX5_ST_SZ_BYTES(klm); 179 } 180 MLX5_SET(mkc, mkc, access_mode_1_0, attr->log_entity_size ? 181 MLX5_MKC_ACCESS_MODE_KLM_FBS : 182 MLX5_MKC_ACCESS_MODE_KLM); 183 MLX5_SET(mkc, mkc, log_page_size, attr->log_entity_size); 184 } else { 185 translation_size = (RTE_ALIGN(attr->size, pgsize) * 8) / 16; 186 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); 187 MLX5_SET(mkc, mkc, log_page_size, rte_log2_u32(pgsize)); 188 } 189 MLX5_SET(create_mkey_in, in, translations_octword_actual_size, 190 translation_size); 191 MLX5_SET(create_mkey_in, in, mkey_umem_id, attr->umem_id); 192 MLX5_SET(create_mkey_in, in, pg_access, attr->pg_access); 193 MLX5_SET(mkc, mkc, lw, 0x1); 194 MLX5_SET(mkc, mkc, lr, 0x1); 195 MLX5_SET(mkc, mkc, qpn, 0xffffff); 196 MLX5_SET(mkc, mkc, pd, attr->pd); 197 MLX5_SET(mkc, mkc, mkey_7_0, attr->umem_id & 0xFF); 198 MLX5_SET(mkc, mkc, translations_octword_size, translation_size); 199 if (attr->relaxed_ordering == 1) { 200 MLX5_SET(mkc, mkc, relaxed_ordering_write, 0x1); 201 MLX5_SET(mkc, mkc, relaxed_ordering_read, 0x1); 202 } 203 MLX5_SET64(mkc, mkc, start_addr, attr->addr); 204 MLX5_SET64(mkc, mkc, len, attr->size); 205 mkey->obj = mlx5_glue->devx_obj_create(ctx, in, in_size_dw * 4, out, 206 sizeof(out)); 207 if (!mkey->obj) { 208 DRV_LOG(ERR, "Can't create %sdirect mkey - error %d\n", 209 klm_num ? "an in" : "a ", errno); 210 rte_errno = errno; 211 rte_free(mkey); 212 return NULL; 213 } 214 mkey->id = MLX5_GET(create_mkey_out, out, mkey_index); 215 mkey->id = (mkey->id << 8) | (attr->umem_id & 0xFF); 216 return mkey; 217 } 218 219 /** 220 * Get status of devx command response. 221 * Mainly used for asynchronous commands. 222 * 223 * @param[in] out 224 * The out response buffer. 225 * 226 * @return 227 * 0 on success, non-zero value otherwise. 228 */ 229 int 230 mlx5_devx_get_out_command_status(void *out) 231 { 232 int status; 233 234 if (!out) 235 return -EINVAL; 236 status = MLX5_GET(query_flow_counter_out, out, status); 237 if (status) { 238 int syndrome = MLX5_GET(query_flow_counter_out, out, syndrome); 239 240 DRV_LOG(ERR, "Bad devX status %x, syndrome = %x", status, 241 syndrome); 242 } 243 return status; 244 } 245 246 /** 247 * Destroy any object allocated by a Devx API. 248 * 249 * @param[in] obj 250 * Pointer to a general object. 251 * 252 * @return 253 * 0 on success, a negative value otherwise. 254 */ 255 int 256 mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj) 257 { 258 int ret; 259 260 if (!obj) 261 return 0; 262 ret = mlx5_glue->devx_obj_destroy(obj->obj); 263 rte_free(obj); 264 return ret; 265 } 266 267 /** 268 * Query NIC vport context. 269 * Fills minimal inline attribute. 270 * 271 * @param[in] ctx 272 * ibv contexts returned from mlx5dv_open_device. 273 * @param[in] vport 274 * vport index 275 * @param[out] attr 276 * Attributes device values. 277 * 278 * @return 279 * 0 on success, a negative value otherwise. 280 */ 281 static int 282 mlx5_devx_cmd_query_nic_vport_context(void *ctx, 283 unsigned int vport, 284 struct mlx5_hca_attr *attr) 285 { 286 uint32_t in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0}; 287 uint32_t out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0}; 288 void *vctx; 289 int status, syndrome, rc; 290 291 /* Query NIC vport context to determine inline mode. */ 292 MLX5_SET(query_nic_vport_context_in, in, opcode, 293 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); 294 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); 295 if (vport) 296 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); 297 rc = mlx5_glue->devx_general_cmd(ctx, 298 in, sizeof(in), 299 out, sizeof(out)); 300 if (rc) 301 goto error; 302 status = MLX5_GET(query_nic_vport_context_out, out, status); 303 syndrome = MLX5_GET(query_nic_vport_context_out, out, syndrome); 304 if (status) { 305 DRV_LOG(DEBUG, "Failed to query NIC vport context, " 306 "status %x, syndrome = %x", 307 status, syndrome); 308 return -1; 309 } 310 vctx = MLX5_ADDR_OF(query_nic_vport_context_out, out, 311 nic_vport_context); 312 attr->vport_inline_mode = MLX5_GET(nic_vport_context, vctx, 313 min_wqe_inline_mode); 314 return 0; 315 error: 316 rc = (rc > 0) ? -rc : rc; 317 return rc; 318 } 319 320 /** 321 * Query NIC vDPA attributes. 322 * 323 * @param[in] ctx 324 * Context returned from mlx5 open_device() glue function. 325 * @param[out] vdpa_attr 326 * vDPA Attributes structure to fill. 327 */ 328 static void 329 mlx5_devx_cmd_query_hca_vdpa_attr(void *ctx, 330 struct mlx5_hca_vdpa_attr *vdpa_attr) 331 { 332 uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0}; 333 uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0}; 334 void *hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 335 int status, syndrome, rc; 336 337 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 338 MLX5_SET(query_hca_cap_in, in, op_mod, 339 MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION | 340 MLX5_HCA_CAP_OPMOD_GET_CUR); 341 rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 342 status = MLX5_GET(query_hca_cap_out, out, status); 343 syndrome = MLX5_GET(query_hca_cap_out, out, syndrome); 344 if (rc || status) { 345 RTE_LOG(DEBUG, PMD, "Failed to query devx VDPA capabilities," 346 " status %x, syndrome = %x", status, syndrome); 347 vdpa_attr->valid = 0; 348 } else { 349 vdpa_attr->valid = 1; 350 vdpa_attr->desc_tunnel_offload_type = 351 MLX5_GET(virtio_emulation_cap, hcattr, 352 desc_tunnel_offload_type); 353 vdpa_attr->eth_frame_offload_type = 354 MLX5_GET(virtio_emulation_cap, hcattr, 355 eth_frame_offload_type); 356 vdpa_attr->virtio_version_1_0 = 357 MLX5_GET(virtio_emulation_cap, hcattr, 358 virtio_version_1_0); 359 vdpa_attr->tso_ipv4 = MLX5_GET(virtio_emulation_cap, hcattr, 360 tso_ipv4); 361 vdpa_attr->tso_ipv6 = MLX5_GET(virtio_emulation_cap, hcattr, 362 tso_ipv6); 363 vdpa_attr->tx_csum = MLX5_GET(virtio_emulation_cap, hcattr, 364 tx_csum); 365 vdpa_attr->rx_csum = MLX5_GET(virtio_emulation_cap, hcattr, 366 rx_csum); 367 vdpa_attr->event_mode = MLX5_GET(virtio_emulation_cap, hcattr, 368 event_mode); 369 vdpa_attr->virtio_queue_type = 370 MLX5_GET(virtio_emulation_cap, hcattr, 371 virtio_queue_type); 372 vdpa_attr->log_doorbell_stride = 373 MLX5_GET(virtio_emulation_cap, hcattr, 374 log_doorbell_stride); 375 vdpa_attr->log_doorbell_bar_size = 376 MLX5_GET(virtio_emulation_cap, hcattr, 377 log_doorbell_bar_size); 378 vdpa_attr->doorbell_bar_offset = 379 MLX5_GET64(virtio_emulation_cap, hcattr, 380 doorbell_bar_offset); 381 vdpa_attr->max_num_virtio_queues = 382 MLX5_GET(virtio_emulation_cap, hcattr, 383 max_num_virtio_queues); 384 vdpa_attr->umems[0].a = MLX5_GET(virtio_emulation_cap, hcattr, 385 umem_1_buffer_param_a); 386 vdpa_attr->umems[0].b = MLX5_GET(virtio_emulation_cap, hcattr, 387 umem_1_buffer_param_b); 388 vdpa_attr->umems[1].a = MLX5_GET(virtio_emulation_cap, hcattr, 389 umem_2_buffer_param_a); 390 vdpa_attr->umems[1].b = MLX5_GET(virtio_emulation_cap, hcattr, 391 umem_2_buffer_param_b); 392 vdpa_attr->umems[2].a = MLX5_GET(virtio_emulation_cap, hcattr, 393 umem_3_buffer_param_a); 394 vdpa_attr->umems[2].b = MLX5_GET(virtio_emulation_cap, hcattr, 395 umem_3_buffer_param_b); 396 } 397 } 398 399 /** 400 * Query HCA attributes. 401 * Using those attributes we can check on run time if the device 402 * is having the required capabilities. 403 * 404 * @param[in] ctx 405 * Context returned from mlx5 open_device() glue function. 406 * @param[out] attr 407 * Attributes device values. 408 * 409 * @return 410 * 0 on success, a negative value otherwise. 411 */ 412 int 413 mlx5_devx_cmd_query_hca_attr(void *ctx, 414 struct mlx5_hca_attr *attr) 415 { 416 uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0}; 417 uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0}; 418 void *hcattr; 419 int status, syndrome, rc; 420 421 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 422 MLX5_SET(query_hca_cap_in, in, op_mod, 423 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE | 424 MLX5_HCA_CAP_OPMOD_GET_CUR); 425 426 rc = mlx5_glue->devx_general_cmd(ctx, 427 in, sizeof(in), out, sizeof(out)); 428 if (rc) 429 goto error; 430 status = MLX5_GET(query_hca_cap_out, out, status); 431 syndrome = MLX5_GET(query_hca_cap_out, out, syndrome); 432 if (status) { 433 DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, " 434 "status %x, syndrome = %x", 435 status, syndrome); 436 return -1; 437 } 438 hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 439 attr->flow_counter_bulk_alloc_bitmap = 440 MLX5_GET(cmd_hca_cap, hcattr, flow_counter_bulk_alloc); 441 attr->flow_counters_dump = MLX5_GET(cmd_hca_cap, hcattr, 442 flow_counters_dump); 443 attr->log_max_rqt_size = MLX5_GET(cmd_hca_cap, hcattr, 444 log_max_rqt_size); 445 attr->eswitch_manager = MLX5_GET(cmd_hca_cap, hcattr, eswitch_manager); 446 attr->hairpin = MLX5_GET(cmd_hca_cap, hcattr, hairpin); 447 attr->log_max_hairpin_queues = MLX5_GET(cmd_hca_cap, hcattr, 448 log_max_hairpin_queues); 449 attr->log_max_hairpin_wq_data_sz = MLX5_GET(cmd_hca_cap, hcattr, 450 log_max_hairpin_wq_data_sz); 451 attr->log_max_hairpin_num_packets = MLX5_GET 452 (cmd_hca_cap, hcattr, log_min_hairpin_wq_data_sz); 453 attr->vhca_id = MLX5_GET(cmd_hca_cap, hcattr, vhca_id); 454 attr->relaxed_ordering_write = MLX5_GET(cmd_hca_cap, hcattr, 455 relaxed_ordering_write); 456 attr->relaxed_ordering_read = MLX5_GET(cmd_hca_cap, hcattr, 457 relaxed_ordering_read); 458 attr->eth_net_offloads = MLX5_GET(cmd_hca_cap, hcattr, 459 eth_net_offloads); 460 attr->eth_virt = MLX5_GET(cmd_hca_cap, hcattr, eth_virt); 461 attr->flex_parser_protocols = MLX5_GET(cmd_hca_cap, hcattr, 462 flex_parser_protocols); 463 attr->qos.sup = MLX5_GET(cmd_hca_cap, hcattr, qos); 464 attr->vdpa.valid = !!(MLX5_GET64(cmd_hca_cap, hcattr, 465 general_obj_types) & 466 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q); 467 if (attr->qos.sup) { 468 MLX5_SET(query_hca_cap_in, in, op_mod, 469 MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP | 470 MLX5_HCA_CAP_OPMOD_GET_CUR); 471 rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), 472 out, sizeof(out)); 473 if (rc) 474 goto error; 475 if (status) { 476 DRV_LOG(DEBUG, "Failed to query devx QOS capabilities," 477 " status %x, syndrome = %x", 478 status, syndrome); 479 return -1; 480 } 481 hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 482 attr->qos.srtcm_sup = 483 MLX5_GET(qos_cap, hcattr, flow_meter_srtcm); 484 attr->qos.log_max_flow_meter = 485 MLX5_GET(qos_cap, hcattr, log_max_flow_meter); 486 attr->qos.flow_meter_reg_c_ids = 487 MLX5_GET(qos_cap, hcattr, flow_meter_reg_id); 488 attr->qos.flow_meter_reg_share = 489 MLX5_GET(qos_cap, hcattr, flow_meter_reg_share); 490 } 491 if (attr->vdpa.valid) 492 mlx5_devx_cmd_query_hca_vdpa_attr(ctx, &attr->vdpa); 493 if (!attr->eth_net_offloads) 494 return 0; 495 496 /* Query HCA offloads for Ethernet protocol. */ 497 memset(in, 0, sizeof(in)); 498 memset(out, 0, sizeof(out)); 499 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 500 MLX5_SET(query_hca_cap_in, in, op_mod, 501 MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS | 502 MLX5_HCA_CAP_OPMOD_GET_CUR); 503 504 rc = mlx5_glue->devx_general_cmd(ctx, 505 in, sizeof(in), 506 out, sizeof(out)); 507 if (rc) { 508 attr->eth_net_offloads = 0; 509 goto error; 510 } 511 status = MLX5_GET(query_hca_cap_out, out, status); 512 syndrome = MLX5_GET(query_hca_cap_out, out, syndrome); 513 if (status) { 514 DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, " 515 "status %x, syndrome = %x", 516 status, syndrome); 517 attr->eth_net_offloads = 0; 518 return -1; 519 } 520 hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 521 attr->wqe_vlan_insert = MLX5_GET(per_protocol_networking_offload_caps, 522 hcattr, wqe_vlan_insert); 523 attr->lro_cap = MLX5_GET(per_protocol_networking_offload_caps, hcattr, 524 lro_cap); 525 attr->tunnel_lro_gre = MLX5_GET(per_protocol_networking_offload_caps, 526 hcattr, tunnel_lro_gre); 527 attr->tunnel_lro_vxlan = MLX5_GET(per_protocol_networking_offload_caps, 528 hcattr, tunnel_lro_vxlan); 529 attr->lro_max_msg_sz_mode = MLX5_GET 530 (per_protocol_networking_offload_caps, 531 hcattr, lro_max_msg_sz_mode); 532 for (int i = 0 ; i < MLX5_LRO_NUM_SUPP_PERIODS ; i++) { 533 attr->lro_timer_supported_periods[i] = 534 MLX5_GET(per_protocol_networking_offload_caps, hcattr, 535 lro_timer_supported_periods[i]); 536 } 537 attr->tunnel_stateless_geneve_rx = 538 MLX5_GET(per_protocol_networking_offload_caps, 539 hcattr, tunnel_stateless_geneve_rx); 540 attr->geneve_max_opt_len = 541 MLX5_GET(per_protocol_networking_offload_caps, 542 hcattr, max_geneve_opt_len); 543 attr->wqe_inline_mode = MLX5_GET(per_protocol_networking_offload_caps, 544 hcattr, wqe_inline_mode); 545 attr->tunnel_stateless_gtp = MLX5_GET 546 (per_protocol_networking_offload_caps, 547 hcattr, tunnel_stateless_gtp); 548 if (attr->wqe_inline_mode != MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) 549 return 0; 550 if (attr->eth_virt) { 551 rc = mlx5_devx_cmd_query_nic_vport_context(ctx, 0, attr); 552 if (rc) { 553 attr->eth_virt = 0; 554 goto error; 555 } 556 } 557 return 0; 558 error: 559 rc = (rc > 0) ? -rc : rc; 560 return rc; 561 } 562 563 /** 564 * Query TIS transport domain from QP verbs object using DevX API. 565 * 566 * @param[in] qp 567 * Pointer to verbs QP returned by ibv_create_qp . 568 * @param[in] tis_num 569 * TIS number of TIS to query. 570 * @param[out] tis_td 571 * Pointer to TIS transport domain variable, to be set by the routine. 572 * 573 * @return 574 * 0 on success, a negative value otherwise. 575 */ 576 int 577 mlx5_devx_cmd_qp_query_tis_td(void *qp, uint32_t tis_num, 578 uint32_t *tis_td) 579 { 580 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 581 uint32_t in[MLX5_ST_SZ_DW(query_tis_in)] = {0}; 582 uint32_t out[MLX5_ST_SZ_DW(query_tis_out)] = {0}; 583 int rc; 584 void *tis_ctx; 585 586 MLX5_SET(query_tis_in, in, opcode, MLX5_CMD_OP_QUERY_TIS); 587 MLX5_SET(query_tis_in, in, tisn, tis_num); 588 rc = mlx5_glue->devx_qp_query(qp, in, sizeof(in), out, sizeof(out)); 589 if (rc) { 590 DRV_LOG(ERR, "Failed to query QP using DevX"); 591 return -rc; 592 }; 593 tis_ctx = MLX5_ADDR_OF(query_tis_out, out, tis_context); 594 *tis_td = MLX5_GET(tisc, tis_ctx, transport_domain); 595 return 0; 596 #else 597 (void)qp; 598 (void)tis_num; 599 (void)tis_td; 600 return -ENOTSUP; 601 #endif 602 } 603 604 /** 605 * Fill WQ data for DevX API command. 606 * Utility function for use when creating DevX objects containing a WQ. 607 * 608 * @param[in] wq_ctx 609 * Pointer to WQ context to fill with data. 610 * @param [in] wq_attr 611 * Pointer to WQ attributes structure to fill in WQ context. 612 */ 613 static void 614 devx_cmd_fill_wq_data(void *wq_ctx, struct mlx5_devx_wq_attr *wq_attr) 615 { 616 MLX5_SET(wq, wq_ctx, wq_type, wq_attr->wq_type); 617 MLX5_SET(wq, wq_ctx, wq_signature, wq_attr->wq_signature); 618 MLX5_SET(wq, wq_ctx, end_padding_mode, wq_attr->end_padding_mode); 619 MLX5_SET(wq, wq_ctx, cd_slave, wq_attr->cd_slave); 620 MLX5_SET(wq, wq_ctx, hds_skip_first_sge, wq_attr->hds_skip_first_sge); 621 MLX5_SET(wq, wq_ctx, log2_hds_buf_size, wq_attr->log2_hds_buf_size); 622 MLX5_SET(wq, wq_ctx, page_offset, wq_attr->page_offset); 623 MLX5_SET(wq, wq_ctx, lwm, wq_attr->lwm); 624 MLX5_SET(wq, wq_ctx, pd, wq_attr->pd); 625 MLX5_SET(wq, wq_ctx, uar_page, wq_attr->uar_page); 626 MLX5_SET64(wq, wq_ctx, dbr_addr, wq_attr->dbr_addr); 627 MLX5_SET(wq, wq_ctx, hw_counter, wq_attr->hw_counter); 628 MLX5_SET(wq, wq_ctx, sw_counter, wq_attr->sw_counter); 629 MLX5_SET(wq, wq_ctx, log_wq_stride, wq_attr->log_wq_stride); 630 MLX5_SET(wq, wq_ctx, log_wq_pg_sz, wq_attr->log_wq_pg_sz); 631 MLX5_SET(wq, wq_ctx, log_wq_sz, wq_attr->log_wq_sz); 632 MLX5_SET(wq, wq_ctx, dbr_umem_valid, wq_attr->dbr_umem_valid); 633 MLX5_SET(wq, wq_ctx, wq_umem_valid, wq_attr->wq_umem_valid); 634 MLX5_SET(wq, wq_ctx, log_hairpin_num_packets, 635 wq_attr->log_hairpin_num_packets); 636 MLX5_SET(wq, wq_ctx, log_hairpin_data_sz, wq_attr->log_hairpin_data_sz); 637 MLX5_SET(wq, wq_ctx, single_wqe_log_num_of_strides, 638 wq_attr->single_wqe_log_num_of_strides); 639 MLX5_SET(wq, wq_ctx, two_byte_shift_en, wq_attr->two_byte_shift_en); 640 MLX5_SET(wq, wq_ctx, single_stride_log_num_of_bytes, 641 wq_attr->single_stride_log_num_of_bytes); 642 MLX5_SET(wq, wq_ctx, dbr_umem_id, wq_attr->dbr_umem_id); 643 MLX5_SET(wq, wq_ctx, wq_umem_id, wq_attr->wq_umem_id); 644 MLX5_SET64(wq, wq_ctx, wq_umem_offset, wq_attr->wq_umem_offset); 645 } 646 647 /** 648 * Create RQ using DevX API. 649 * 650 * @param[in] ctx 651 * Context returned from mlx5 open_device() glue function. 652 * @param [in] rq_attr 653 * Pointer to create RQ attributes structure. 654 * @param [in] socket 655 * CPU socket ID for allocations. 656 * 657 * @return 658 * The DevX object created, NULL otherwise and rte_errno is set. 659 */ 660 struct mlx5_devx_obj * 661 mlx5_devx_cmd_create_rq(void *ctx, 662 struct mlx5_devx_create_rq_attr *rq_attr, 663 int socket) 664 { 665 uint32_t in[MLX5_ST_SZ_DW(create_rq_in)] = {0}; 666 uint32_t out[MLX5_ST_SZ_DW(create_rq_out)] = {0}; 667 void *rq_ctx, *wq_ctx; 668 struct mlx5_devx_wq_attr *wq_attr; 669 struct mlx5_devx_obj *rq = NULL; 670 671 rq = rte_calloc_socket(__func__, 1, sizeof(*rq), 0, socket); 672 if (!rq) { 673 DRV_LOG(ERR, "Failed to allocate RQ data"); 674 rte_errno = ENOMEM; 675 return NULL; 676 } 677 MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ); 678 rq_ctx = MLX5_ADDR_OF(create_rq_in, in, ctx); 679 MLX5_SET(rqc, rq_ctx, rlky, rq_attr->rlky); 680 MLX5_SET(rqc, rq_ctx, delay_drop_en, rq_attr->delay_drop_en); 681 MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs); 682 MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd); 683 MLX5_SET(rqc, rq_ctx, mem_rq_type, rq_attr->mem_rq_type); 684 MLX5_SET(rqc, rq_ctx, state, rq_attr->state); 685 MLX5_SET(rqc, rq_ctx, flush_in_error_en, rq_attr->flush_in_error_en); 686 MLX5_SET(rqc, rq_ctx, hairpin, rq_attr->hairpin); 687 MLX5_SET(rqc, rq_ctx, user_index, rq_attr->user_index); 688 MLX5_SET(rqc, rq_ctx, cqn, rq_attr->cqn); 689 MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id); 690 MLX5_SET(rqc, rq_ctx, rmpn, rq_attr->rmpn); 691 wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq); 692 wq_attr = &rq_attr->wq_attr; 693 devx_cmd_fill_wq_data(wq_ctx, wq_attr); 694 rq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 695 out, sizeof(out)); 696 if (!rq->obj) { 697 DRV_LOG(ERR, "Failed to create RQ using DevX"); 698 rte_errno = errno; 699 rte_free(rq); 700 return NULL; 701 } 702 rq->id = MLX5_GET(create_rq_out, out, rqn); 703 return rq; 704 } 705 706 /** 707 * Modify RQ using DevX API. 708 * 709 * @param[in] rq 710 * Pointer to RQ object structure. 711 * @param [in] rq_attr 712 * Pointer to modify RQ attributes structure. 713 * 714 * @return 715 * 0 on success, a negative errno value otherwise and rte_errno is set. 716 */ 717 int 718 mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq, 719 struct mlx5_devx_modify_rq_attr *rq_attr) 720 { 721 uint32_t in[MLX5_ST_SZ_DW(modify_rq_in)] = {0}; 722 uint32_t out[MLX5_ST_SZ_DW(modify_rq_out)] = {0}; 723 void *rq_ctx, *wq_ctx; 724 int ret; 725 726 MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ); 727 MLX5_SET(modify_rq_in, in, rq_state, rq_attr->rq_state); 728 MLX5_SET(modify_rq_in, in, rqn, rq->id); 729 MLX5_SET64(modify_rq_in, in, modify_bitmask, rq_attr->modify_bitmask); 730 rq_ctx = MLX5_ADDR_OF(modify_rq_in, in, ctx); 731 MLX5_SET(rqc, rq_ctx, state, rq_attr->state); 732 if (rq_attr->modify_bitmask & 733 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS) 734 MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs); 735 if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD) 736 MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd); 737 if (rq_attr->modify_bitmask & 738 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID) 739 MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id); 740 MLX5_SET(rqc, rq_ctx, hairpin_peer_sq, rq_attr->hairpin_peer_sq); 741 MLX5_SET(rqc, rq_ctx, hairpin_peer_vhca, rq_attr->hairpin_peer_vhca); 742 if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM) { 743 wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq); 744 MLX5_SET(wq, wq_ctx, lwm, rq_attr->lwm); 745 } 746 ret = mlx5_glue->devx_obj_modify(rq->obj, in, sizeof(in), 747 out, sizeof(out)); 748 if (ret) { 749 DRV_LOG(ERR, "Failed to modify RQ using DevX"); 750 rte_errno = errno; 751 return -errno; 752 } 753 return ret; 754 } 755 756 /** 757 * Create TIR using DevX API. 758 * 759 * @param[in] ctx 760 * Context returned from mlx5 open_device() glue function. 761 * @param [in] tir_attr 762 * Pointer to TIR attributes structure. 763 * 764 * @return 765 * The DevX object created, NULL otherwise and rte_errno is set. 766 */ 767 struct mlx5_devx_obj * 768 mlx5_devx_cmd_create_tir(void *ctx, 769 struct mlx5_devx_tir_attr *tir_attr) 770 { 771 uint32_t in[MLX5_ST_SZ_DW(create_tir_in)] = {0}; 772 uint32_t out[MLX5_ST_SZ_DW(create_tir_out)] = {0}; 773 void *tir_ctx, *outer, *inner, *rss_key; 774 struct mlx5_devx_obj *tir = NULL; 775 776 tir = rte_calloc(__func__, 1, sizeof(*tir), 0); 777 if (!tir) { 778 DRV_LOG(ERR, "Failed to allocate TIR data"); 779 rte_errno = ENOMEM; 780 return NULL; 781 } 782 MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); 783 tir_ctx = MLX5_ADDR_OF(create_tir_in, in, ctx); 784 MLX5_SET(tirc, tir_ctx, disp_type, tir_attr->disp_type); 785 MLX5_SET(tirc, tir_ctx, lro_timeout_period_usecs, 786 tir_attr->lro_timeout_period_usecs); 787 MLX5_SET(tirc, tir_ctx, lro_enable_mask, tir_attr->lro_enable_mask); 788 MLX5_SET(tirc, tir_ctx, lro_max_msg_sz, tir_attr->lro_max_msg_sz); 789 MLX5_SET(tirc, tir_ctx, inline_rqn, tir_attr->inline_rqn); 790 MLX5_SET(tirc, tir_ctx, rx_hash_symmetric, tir_attr->rx_hash_symmetric); 791 MLX5_SET(tirc, tir_ctx, tunneled_offload_en, 792 tir_attr->tunneled_offload_en); 793 MLX5_SET(tirc, tir_ctx, indirect_table, tir_attr->indirect_table); 794 MLX5_SET(tirc, tir_ctx, rx_hash_fn, tir_attr->rx_hash_fn); 795 MLX5_SET(tirc, tir_ctx, self_lb_block, tir_attr->self_lb_block); 796 MLX5_SET(tirc, tir_ctx, transport_domain, tir_attr->transport_domain); 797 rss_key = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_toeplitz_key); 798 memcpy(rss_key, tir_attr->rx_hash_toeplitz_key, MLX5_RSS_HASH_KEY_LEN); 799 outer = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_outer); 800 MLX5_SET(rx_hash_field_select, outer, l3_prot_type, 801 tir_attr->rx_hash_field_selector_outer.l3_prot_type); 802 MLX5_SET(rx_hash_field_select, outer, l4_prot_type, 803 tir_attr->rx_hash_field_selector_outer.l4_prot_type); 804 MLX5_SET(rx_hash_field_select, outer, selected_fields, 805 tir_attr->rx_hash_field_selector_outer.selected_fields); 806 inner = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_inner); 807 MLX5_SET(rx_hash_field_select, inner, l3_prot_type, 808 tir_attr->rx_hash_field_selector_inner.l3_prot_type); 809 MLX5_SET(rx_hash_field_select, inner, l4_prot_type, 810 tir_attr->rx_hash_field_selector_inner.l4_prot_type); 811 MLX5_SET(rx_hash_field_select, inner, selected_fields, 812 tir_attr->rx_hash_field_selector_inner.selected_fields); 813 tir->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 814 out, sizeof(out)); 815 if (!tir->obj) { 816 DRV_LOG(ERR, "Failed to create TIR using DevX"); 817 rte_errno = errno; 818 rte_free(tir); 819 return NULL; 820 } 821 tir->id = MLX5_GET(create_tir_out, out, tirn); 822 return tir; 823 } 824 825 /** 826 * Create RQT using DevX API. 827 * 828 * @param[in] ctx 829 * Context returned from mlx5 open_device() glue function. 830 * @param [in] rqt_attr 831 * Pointer to RQT attributes structure. 832 * 833 * @return 834 * The DevX object created, NULL otherwise and rte_errno is set. 835 */ 836 struct mlx5_devx_obj * 837 mlx5_devx_cmd_create_rqt(void *ctx, 838 struct mlx5_devx_rqt_attr *rqt_attr) 839 { 840 uint32_t *in = NULL; 841 uint32_t inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + 842 rqt_attr->rqt_actual_size * sizeof(uint32_t); 843 uint32_t out[MLX5_ST_SZ_DW(create_rqt_out)] = {0}; 844 void *rqt_ctx; 845 struct mlx5_devx_obj *rqt = NULL; 846 int i; 847 848 in = rte_calloc(__func__, 1, inlen, 0); 849 if (!in) { 850 DRV_LOG(ERR, "Failed to allocate RQT IN data"); 851 rte_errno = ENOMEM; 852 return NULL; 853 } 854 rqt = rte_calloc(__func__, 1, sizeof(*rqt), 0); 855 if (!rqt) { 856 DRV_LOG(ERR, "Failed to allocate RQT data"); 857 rte_errno = ENOMEM; 858 rte_free(in); 859 return NULL; 860 } 861 MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); 862 rqt_ctx = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); 863 MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type); 864 MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size); 865 MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size); 866 for (i = 0; i < rqt_attr->rqt_actual_size; i++) 867 MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]); 868 rqt->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out)); 869 rte_free(in); 870 if (!rqt->obj) { 871 DRV_LOG(ERR, "Failed to create RQT using DevX"); 872 rte_errno = errno; 873 rte_free(rqt); 874 return NULL; 875 } 876 rqt->id = MLX5_GET(create_rqt_out, out, rqtn); 877 return rqt; 878 } 879 880 /** 881 * Modify RQT using DevX API. 882 * 883 * @param[in] rqt 884 * Pointer to RQT DevX object structure. 885 * @param [in] rqt_attr 886 * Pointer to RQT attributes structure. 887 * 888 * @return 889 * 0 on success, a negative errno value otherwise and rte_errno is set. 890 */ 891 int 892 mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt, 893 struct mlx5_devx_rqt_attr *rqt_attr) 894 { 895 uint32_t inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + 896 rqt_attr->rqt_actual_size * sizeof(uint32_t); 897 uint32_t out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0}; 898 uint32_t *in = rte_calloc(__func__, 1, inlen, 0); 899 void *rqt_ctx; 900 int i; 901 int ret; 902 903 if (!in) { 904 DRV_LOG(ERR, "Failed to allocate RQT modify IN data."); 905 rte_errno = ENOMEM; 906 return -ENOMEM; 907 } 908 MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT); 909 MLX5_SET(modify_rqt_in, in, rqtn, rqt->id); 910 MLX5_SET64(modify_rqt_in, in, modify_bitmask, 0x1); 911 rqt_ctx = MLX5_ADDR_OF(modify_rqt_in, in, rqt_context); 912 MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type); 913 MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size); 914 MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size); 915 for (i = 0; i < rqt_attr->rqt_actual_size; i++) 916 MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]); 917 ret = mlx5_glue->devx_obj_modify(rqt->obj, in, inlen, out, sizeof(out)); 918 rte_free(in); 919 if (ret) { 920 DRV_LOG(ERR, "Failed to modify RQT using DevX."); 921 rte_errno = errno; 922 return -rte_errno; 923 } 924 return ret; 925 } 926 927 /** 928 * Create SQ using DevX API. 929 * 930 * @param[in] ctx 931 * Context returned from mlx5 open_device() glue function. 932 * @param [in] sq_attr 933 * Pointer to SQ attributes structure. 934 * @param [in] socket 935 * CPU socket ID for allocations. 936 * 937 * @return 938 * The DevX object created, NULL otherwise and rte_errno is set. 939 **/ 940 struct mlx5_devx_obj * 941 mlx5_devx_cmd_create_sq(void *ctx, 942 struct mlx5_devx_create_sq_attr *sq_attr) 943 { 944 uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0}; 945 uint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0}; 946 void *sq_ctx; 947 void *wq_ctx; 948 struct mlx5_devx_wq_attr *wq_attr; 949 struct mlx5_devx_obj *sq = NULL; 950 951 sq = rte_calloc(__func__, 1, sizeof(*sq), 0); 952 if (!sq) { 953 DRV_LOG(ERR, "Failed to allocate SQ data"); 954 rte_errno = ENOMEM; 955 return NULL; 956 } 957 MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ); 958 sq_ctx = MLX5_ADDR_OF(create_sq_in, in, ctx); 959 MLX5_SET(sqc, sq_ctx, rlky, sq_attr->rlky); 960 MLX5_SET(sqc, sq_ctx, cd_master, sq_attr->cd_master); 961 MLX5_SET(sqc, sq_ctx, fre, sq_attr->fre); 962 MLX5_SET(sqc, sq_ctx, flush_in_error_en, sq_attr->flush_in_error_en); 963 MLX5_SET(sqc, sq_ctx, allow_multi_pkt_send_wqe, 964 sq_attr->flush_in_error_en); 965 MLX5_SET(sqc, sq_ctx, min_wqe_inline_mode, 966 sq_attr->min_wqe_inline_mode); 967 MLX5_SET(sqc, sq_ctx, state, sq_attr->state); 968 MLX5_SET(sqc, sq_ctx, reg_umr, sq_attr->reg_umr); 969 MLX5_SET(sqc, sq_ctx, allow_swp, sq_attr->allow_swp); 970 MLX5_SET(sqc, sq_ctx, hairpin, sq_attr->hairpin); 971 MLX5_SET(sqc, sq_ctx, user_index, sq_attr->user_index); 972 MLX5_SET(sqc, sq_ctx, cqn, sq_attr->cqn); 973 MLX5_SET(sqc, sq_ctx, packet_pacing_rate_limit_index, 974 sq_attr->packet_pacing_rate_limit_index); 975 MLX5_SET(sqc, sq_ctx, tis_lst_sz, sq_attr->tis_lst_sz); 976 MLX5_SET(sqc, sq_ctx, tis_num_0, sq_attr->tis_num); 977 wq_ctx = MLX5_ADDR_OF(sqc, sq_ctx, wq); 978 wq_attr = &sq_attr->wq_attr; 979 devx_cmd_fill_wq_data(wq_ctx, wq_attr); 980 sq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 981 out, sizeof(out)); 982 if (!sq->obj) { 983 DRV_LOG(ERR, "Failed to create SQ using DevX"); 984 rte_errno = errno; 985 rte_free(sq); 986 return NULL; 987 } 988 sq->id = MLX5_GET(create_sq_out, out, sqn); 989 return sq; 990 } 991 992 /** 993 * Modify SQ using DevX API. 994 * 995 * @param[in] sq 996 * Pointer to SQ object structure. 997 * @param [in] sq_attr 998 * Pointer to SQ attributes structure. 999 * 1000 * @return 1001 * 0 on success, a negative errno value otherwise and rte_errno is set. 1002 */ 1003 int 1004 mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq, 1005 struct mlx5_devx_modify_sq_attr *sq_attr) 1006 { 1007 uint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0}; 1008 uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0}; 1009 void *sq_ctx; 1010 int ret; 1011 1012 MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ); 1013 MLX5_SET(modify_sq_in, in, sq_state, sq_attr->sq_state); 1014 MLX5_SET(modify_sq_in, in, sqn, sq->id); 1015 sq_ctx = MLX5_ADDR_OF(modify_sq_in, in, ctx); 1016 MLX5_SET(sqc, sq_ctx, state, sq_attr->state); 1017 MLX5_SET(sqc, sq_ctx, hairpin_peer_rq, sq_attr->hairpin_peer_rq); 1018 MLX5_SET(sqc, sq_ctx, hairpin_peer_vhca, sq_attr->hairpin_peer_vhca); 1019 ret = mlx5_glue->devx_obj_modify(sq->obj, in, sizeof(in), 1020 out, sizeof(out)); 1021 if (ret) { 1022 DRV_LOG(ERR, "Failed to modify SQ using DevX"); 1023 rte_errno = errno; 1024 return -errno; 1025 } 1026 return ret; 1027 } 1028 1029 /** 1030 * Create TIS using DevX API. 1031 * 1032 * @param[in] ctx 1033 * Context returned from mlx5 open_device() glue function. 1034 * @param [in] tis_attr 1035 * Pointer to TIS attributes structure. 1036 * 1037 * @return 1038 * The DevX object created, NULL otherwise and rte_errno is set. 1039 */ 1040 struct mlx5_devx_obj * 1041 mlx5_devx_cmd_create_tis(void *ctx, 1042 struct mlx5_devx_tis_attr *tis_attr) 1043 { 1044 uint32_t in[MLX5_ST_SZ_DW(create_tis_in)] = {0}; 1045 uint32_t out[MLX5_ST_SZ_DW(create_tis_out)] = {0}; 1046 struct mlx5_devx_obj *tis = NULL; 1047 void *tis_ctx; 1048 1049 tis = rte_calloc(__func__, 1, sizeof(*tis), 0); 1050 if (!tis) { 1051 DRV_LOG(ERR, "Failed to allocate TIS object"); 1052 rte_errno = ENOMEM; 1053 return NULL; 1054 } 1055 MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS); 1056 tis_ctx = MLX5_ADDR_OF(create_tis_in, in, ctx); 1057 MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity, 1058 tis_attr->strict_lag_tx_port_affinity); 1059 MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity, 1060 tis_attr->strict_lag_tx_port_affinity); 1061 MLX5_SET(tisc, tis_ctx, prio, tis_attr->prio); 1062 MLX5_SET(tisc, tis_ctx, transport_domain, 1063 tis_attr->transport_domain); 1064 tis->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 1065 out, sizeof(out)); 1066 if (!tis->obj) { 1067 DRV_LOG(ERR, "Failed to create TIS using DevX"); 1068 rte_errno = errno; 1069 rte_free(tis); 1070 return NULL; 1071 } 1072 tis->id = MLX5_GET(create_tis_out, out, tisn); 1073 return tis; 1074 } 1075 1076 /** 1077 * Create transport domain using DevX API. 1078 * 1079 * @param[in] ctx 1080 * Context returned from mlx5 open_device() glue function. 1081 * @return 1082 * The DevX object created, NULL otherwise and rte_errno is set. 1083 */ 1084 struct mlx5_devx_obj * 1085 mlx5_devx_cmd_create_td(void *ctx) 1086 { 1087 uint32_t in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0}; 1088 uint32_t out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0}; 1089 struct mlx5_devx_obj *td = NULL; 1090 1091 td = rte_calloc(__func__, 1, sizeof(*td), 0); 1092 if (!td) { 1093 DRV_LOG(ERR, "Failed to allocate TD object"); 1094 rte_errno = ENOMEM; 1095 return NULL; 1096 } 1097 MLX5_SET(alloc_transport_domain_in, in, opcode, 1098 MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN); 1099 td->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 1100 out, sizeof(out)); 1101 if (!td->obj) { 1102 DRV_LOG(ERR, "Failed to create TIS using DevX"); 1103 rte_errno = errno; 1104 rte_free(td); 1105 return NULL; 1106 } 1107 td->id = MLX5_GET(alloc_transport_domain_out, out, 1108 transport_domain); 1109 return td; 1110 } 1111 1112 /** 1113 * Dump all flows to file. 1114 * 1115 * @param[in] fdb_domain 1116 * FDB domain. 1117 * @param[in] rx_domain 1118 * RX domain. 1119 * @param[in] tx_domain 1120 * TX domain. 1121 * @param[out] file 1122 * Pointer to file stream. 1123 * 1124 * @return 1125 * 0 on success, a nagative value otherwise. 1126 */ 1127 int 1128 mlx5_devx_cmd_flow_dump(void *fdb_domain __rte_unused, 1129 void *rx_domain __rte_unused, 1130 void *tx_domain __rte_unused, FILE *file __rte_unused) 1131 { 1132 int ret = 0; 1133 1134 #ifdef HAVE_MLX5_DR_FLOW_DUMP 1135 if (fdb_domain) { 1136 ret = mlx5_glue->dr_dump_domain(file, fdb_domain); 1137 if (ret) 1138 return ret; 1139 } 1140 MLX5_ASSERT(rx_domain); 1141 ret = mlx5_glue->dr_dump_domain(file, rx_domain); 1142 if (ret) 1143 return ret; 1144 MLX5_ASSERT(tx_domain); 1145 ret = mlx5_glue->dr_dump_domain(file, tx_domain); 1146 #else 1147 ret = ENOTSUP; 1148 #endif 1149 return -ret; 1150 } 1151 1152 /* 1153 * Create CQ using DevX API. 1154 * 1155 * @param[in] ctx 1156 * Context returned from mlx5 open_device() glue function. 1157 * @param [in] attr 1158 * Pointer to CQ attributes structure. 1159 * 1160 * @return 1161 * The DevX object created, NULL otherwise and rte_errno is set. 1162 */ 1163 struct mlx5_devx_obj * 1164 mlx5_devx_cmd_create_cq(void *ctx, struct mlx5_devx_cq_attr *attr) 1165 { 1166 uint32_t in[MLX5_ST_SZ_DW(create_cq_in)] = {0}; 1167 uint32_t out[MLX5_ST_SZ_DW(create_cq_out)] = {0}; 1168 struct mlx5_devx_obj *cq_obj = rte_zmalloc(__func__, sizeof(*cq_obj), 1169 0); 1170 void *cqctx = MLX5_ADDR_OF(create_cq_in, in, cq_context); 1171 1172 if (!cq_obj) { 1173 DRV_LOG(ERR, "Failed to allocate CQ object memory."); 1174 rte_errno = ENOMEM; 1175 return NULL; 1176 } 1177 MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ); 1178 if (attr->db_umem_valid) { 1179 MLX5_SET(cqc, cqctx, dbr_umem_valid, attr->db_umem_valid); 1180 MLX5_SET(cqc, cqctx, dbr_umem_id, attr->db_umem_id); 1181 MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_umem_offset); 1182 } else { 1183 MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_addr); 1184 } 1185 MLX5_SET(cqc, cqctx, cc, attr->use_first_only); 1186 MLX5_SET(cqc, cqctx, oi, attr->overrun_ignore); 1187 MLX5_SET(cqc, cqctx, log_cq_size, attr->log_cq_size); 1188 MLX5_SET(cqc, cqctx, log_page_size, attr->log_page_size - 1189 MLX5_ADAPTER_PAGE_SHIFT); 1190 MLX5_SET(cqc, cqctx, c_eqn, attr->eqn); 1191 MLX5_SET(cqc, cqctx, uar_page, attr->uar_page_id); 1192 if (attr->q_umem_valid) { 1193 MLX5_SET(create_cq_in, in, cq_umem_valid, attr->q_umem_valid); 1194 MLX5_SET(create_cq_in, in, cq_umem_id, attr->q_umem_id); 1195 MLX5_SET64(create_cq_in, in, cq_umem_offset, 1196 attr->q_umem_offset); 1197 } 1198 cq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, 1199 sizeof(out)); 1200 if (!cq_obj->obj) { 1201 rte_errno = errno; 1202 DRV_LOG(ERR, "Failed to create CQ using DevX errno=%d.", errno); 1203 rte_free(cq_obj); 1204 return NULL; 1205 } 1206 cq_obj->id = MLX5_GET(create_cq_out, out, cqn); 1207 return cq_obj; 1208 } 1209 1210 /** 1211 * Create VIRTQ using DevX API. 1212 * 1213 * @param[in] ctx 1214 * Context returned from mlx5 open_device() glue function. 1215 * @param [in] attr 1216 * Pointer to VIRTQ attributes structure. 1217 * 1218 * @return 1219 * The DevX object created, NULL otherwise and rte_errno is set. 1220 */ 1221 struct mlx5_devx_obj * 1222 mlx5_devx_cmd_create_virtq(void *ctx, 1223 struct mlx5_devx_virtq_attr *attr) 1224 { 1225 uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0}; 1226 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 1227 struct mlx5_devx_obj *virtq_obj = rte_zmalloc(__func__, 1228 sizeof(*virtq_obj), 0); 1229 void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq); 1230 void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr); 1231 void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context); 1232 1233 if (!virtq_obj) { 1234 DRV_LOG(ERR, "Failed to allocate virtq data."); 1235 rte_errno = ENOMEM; 1236 return NULL; 1237 } 1238 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 1239 MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 1240 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 1241 MLX5_GENERAL_OBJ_TYPE_VIRTQ); 1242 MLX5_SET16(virtio_net_q, virtq, hw_available_index, 1243 attr->hw_available_index); 1244 MLX5_SET16(virtio_net_q, virtq, hw_used_index, attr->hw_used_index); 1245 MLX5_SET16(virtio_net_q, virtq, tso_ipv4, attr->tso_ipv4); 1246 MLX5_SET16(virtio_net_q, virtq, tso_ipv6, attr->tso_ipv6); 1247 MLX5_SET16(virtio_net_q, virtq, tx_csum, attr->tx_csum); 1248 MLX5_SET16(virtio_net_q, virtq, rx_csum, attr->rx_csum); 1249 MLX5_SET16(virtio_q, virtctx, virtio_version_1_0, 1250 attr->virtio_version_1_0); 1251 MLX5_SET16(virtio_q, virtctx, event_mode, attr->event_mode); 1252 MLX5_SET(virtio_q, virtctx, event_qpn_or_msix, attr->qp_id); 1253 MLX5_SET64(virtio_q, virtctx, desc_addr, attr->desc_addr); 1254 MLX5_SET64(virtio_q, virtctx, used_addr, attr->used_addr); 1255 MLX5_SET64(virtio_q, virtctx, available_addr, attr->available_addr); 1256 MLX5_SET16(virtio_q, virtctx, queue_index, attr->queue_index); 1257 MLX5_SET16(virtio_q, virtctx, queue_size, attr->q_size); 1258 MLX5_SET(virtio_q, virtctx, virtio_q_mkey, attr->mkey); 1259 MLX5_SET(virtio_q, virtctx, umem_1_id, attr->umems[0].id); 1260 MLX5_SET(virtio_q, virtctx, umem_1_size, attr->umems[0].size); 1261 MLX5_SET64(virtio_q, virtctx, umem_1_offset, attr->umems[0].offset); 1262 MLX5_SET(virtio_q, virtctx, umem_2_id, attr->umems[1].id); 1263 MLX5_SET(virtio_q, virtctx, umem_2_size, attr->umems[1].size); 1264 MLX5_SET64(virtio_q, virtctx, umem_2_offset, attr->umems[1].offset); 1265 MLX5_SET(virtio_q, virtctx, umem_3_id, attr->umems[2].id); 1266 MLX5_SET(virtio_q, virtctx, umem_3_size, attr->umems[2].size); 1267 MLX5_SET64(virtio_q, virtctx, umem_3_offset, attr->umems[2].offset); 1268 MLX5_SET(virtio_net_q, virtq, tisn_or_qpn, attr->tis_id); 1269 virtq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, 1270 sizeof(out)); 1271 if (!virtq_obj->obj) { 1272 rte_errno = errno; 1273 DRV_LOG(ERR, "Failed to create VIRTQ Obj using DevX."); 1274 rte_free(virtq_obj); 1275 return NULL; 1276 } 1277 virtq_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 1278 return virtq_obj; 1279 } 1280 1281 /** 1282 * Modify VIRTQ using DevX API. 1283 * 1284 * @param[in] virtq_obj 1285 * Pointer to virtq object structure. 1286 * @param [in] attr 1287 * Pointer to modify virtq attributes structure. 1288 * 1289 * @return 1290 * 0 on success, a negative errno value otherwise and rte_errno is set. 1291 */ 1292 int 1293 mlx5_devx_cmd_modify_virtq(struct mlx5_devx_obj *virtq_obj, 1294 struct mlx5_devx_virtq_attr *attr) 1295 { 1296 uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0}; 1297 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 1298 void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq); 1299 void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr); 1300 void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context); 1301 int ret; 1302 1303 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 1304 MLX5_CMD_OP_MODIFY_GENERAL_OBJECT); 1305 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 1306 MLX5_GENERAL_OBJ_TYPE_VIRTQ); 1307 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, virtq_obj->id); 1308 MLX5_SET64(virtio_net_q, virtq, modify_field_select, attr->type); 1309 MLX5_SET16(virtio_q, virtctx, queue_index, attr->queue_index); 1310 switch (attr->type) { 1311 case MLX5_VIRTQ_MODIFY_TYPE_STATE: 1312 MLX5_SET16(virtio_net_q, virtq, state, attr->state); 1313 break; 1314 case MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_PARAMS: 1315 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_mkey, 1316 attr->dirty_bitmap_mkey); 1317 MLX5_SET64(virtio_net_q, virtq, dirty_bitmap_addr, 1318 attr->dirty_bitmap_addr); 1319 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_size, 1320 attr->dirty_bitmap_size); 1321 break; 1322 case MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE: 1323 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_dump_enable, 1324 attr->dirty_bitmap_dump_enable); 1325 break; 1326 default: 1327 rte_errno = EINVAL; 1328 return -rte_errno; 1329 } 1330 ret = mlx5_glue->devx_obj_modify(virtq_obj->obj, in, sizeof(in), 1331 out, sizeof(out)); 1332 if (ret) { 1333 DRV_LOG(ERR, "Failed to modify VIRTQ using DevX."); 1334 rte_errno = errno; 1335 return -errno; 1336 } 1337 return ret; 1338 } 1339 1340 /** 1341 * Query VIRTQ using DevX API. 1342 * 1343 * @param[in] virtq_obj 1344 * Pointer to virtq object structure. 1345 * @param [in/out] attr 1346 * Pointer to virtq attributes structure. 1347 * 1348 * @return 1349 * 0 on success, a negative errno value otherwise and rte_errno is set. 1350 */ 1351 int 1352 mlx5_devx_cmd_query_virtq(struct mlx5_devx_obj *virtq_obj, 1353 struct mlx5_devx_virtq_attr *attr) 1354 { 1355 uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0}; 1356 uint32_t out[MLX5_ST_SZ_DW(query_virtq_out)] = {0}; 1357 void *hdr = MLX5_ADDR_OF(query_virtq_out, in, hdr); 1358 void *virtq = MLX5_ADDR_OF(query_virtq_out, out, virtq); 1359 int ret; 1360 1361 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 1362 MLX5_CMD_OP_QUERY_GENERAL_OBJECT); 1363 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 1364 MLX5_GENERAL_OBJ_TYPE_VIRTQ); 1365 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, virtq_obj->id); 1366 ret = mlx5_glue->devx_obj_query(virtq_obj->obj, in, sizeof(in), 1367 out, sizeof(out)); 1368 if (ret) { 1369 DRV_LOG(ERR, "Failed to modify VIRTQ using DevX."); 1370 rte_errno = errno; 1371 return -errno; 1372 } 1373 attr->hw_available_index = MLX5_GET16(virtio_net_q, virtq, 1374 hw_available_index); 1375 attr->hw_used_index = MLX5_GET16(virtio_net_q, virtq, hw_used_index); 1376 return ret; 1377 } 1378 1379 /** 1380 * Create QP using DevX API. 1381 * 1382 * @param[in] ctx 1383 * Context returned from mlx5 open_device() glue function. 1384 * @param [in] attr 1385 * Pointer to QP attributes structure. 1386 * 1387 * @return 1388 * The DevX object created, NULL otherwise and rte_errno is set. 1389 */ 1390 struct mlx5_devx_obj * 1391 mlx5_devx_cmd_create_qp(void *ctx, 1392 struct mlx5_devx_qp_attr *attr) 1393 { 1394 uint32_t in[MLX5_ST_SZ_DW(create_qp_in)] = {0}; 1395 uint32_t out[MLX5_ST_SZ_DW(create_qp_out)] = {0}; 1396 struct mlx5_devx_obj *qp_obj = rte_zmalloc(__func__, sizeof(*qp_obj), 1397 0); 1398 void *qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 1399 1400 if (!qp_obj) { 1401 DRV_LOG(ERR, "Failed to allocate QP data."); 1402 rte_errno = ENOMEM; 1403 return NULL; 1404 } 1405 MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); 1406 MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC); 1407 MLX5_SET(qpc, qpc, pd, attr->pd); 1408 if (attr->uar_index) { 1409 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 1410 MLX5_SET(qpc, qpc, uar_page, attr->uar_index); 1411 MLX5_SET(qpc, qpc, log_page_size, attr->log_page_size - 1412 MLX5_ADAPTER_PAGE_SHIFT); 1413 if (attr->sq_size) { 1414 MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->sq_size)); 1415 MLX5_SET(qpc, qpc, cqn_snd, attr->cqn); 1416 MLX5_SET(qpc, qpc, log_sq_size, 1417 rte_log2_u32(attr->sq_size)); 1418 } else { 1419 MLX5_SET(qpc, qpc, no_sq, 1); 1420 } 1421 if (attr->rq_size) { 1422 MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->rq_size)); 1423 MLX5_SET(qpc, qpc, cqn_rcv, attr->cqn); 1424 MLX5_SET(qpc, qpc, log_rq_stride, attr->log_rq_stride - 1425 MLX5_LOG_RQ_STRIDE_SHIFT); 1426 MLX5_SET(qpc, qpc, log_rq_size, 1427 rte_log2_u32(attr->rq_size)); 1428 MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ); 1429 } else { 1430 MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ); 1431 } 1432 if (attr->dbr_umem_valid) { 1433 MLX5_SET(qpc, qpc, dbr_umem_valid, 1434 attr->dbr_umem_valid); 1435 MLX5_SET(qpc, qpc, dbr_umem_id, attr->dbr_umem_id); 1436 } 1437 MLX5_SET64(qpc, qpc, dbr_addr, attr->dbr_address); 1438 MLX5_SET64(create_qp_in, in, wq_umem_offset, 1439 attr->wq_umem_offset); 1440 MLX5_SET(create_qp_in, in, wq_umem_id, attr->wq_umem_id); 1441 MLX5_SET(create_qp_in, in, wq_umem_valid, 1); 1442 } else { 1443 /* Special QP to be managed by FW - no SQ\RQ\CQ\UAR\DB rec. */ 1444 MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ); 1445 MLX5_SET(qpc, qpc, no_sq, 1); 1446 } 1447 qp_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, 1448 sizeof(out)); 1449 if (!qp_obj->obj) { 1450 rte_errno = errno; 1451 DRV_LOG(ERR, "Failed to create QP Obj using DevX."); 1452 rte_free(qp_obj); 1453 return NULL; 1454 } 1455 qp_obj->id = MLX5_GET(create_qp_out, out, qpn); 1456 return qp_obj; 1457 } 1458 1459 /** 1460 * Modify QP using DevX API. 1461 * Currently supports only force loop-back QP. 1462 * 1463 * @param[in] qp 1464 * Pointer to QP object structure. 1465 * @param [in] qp_st_mod_op 1466 * The QP state modification operation. 1467 * @param [in] remote_qp_id 1468 * The remote QP ID for MLX5_CMD_OP_INIT2RTR_QP operation. 1469 * 1470 * @return 1471 * 0 on success, a negative errno value otherwise and rte_errno is set. 1472 */ 1473 int 1474 mlx5_devx_cmd_modify_qp_state(struct mlx5_devx_obj *qp, uint32_t qp_st_mod_op, 1475 uint32_t remote_qp_id) 1476 { 1477 union { 1478 uint32_t rst2init[MLX5_ST_SZ_DW(rst2init_qp_in)]; 1479 uint32_t init2rtr[MLX5_ST_SZ_DW(init2rtr_qp_in)]; 1480 uint32_t rtr2rts[MLX5_ST_SZ_DW(rtr2rts_qp_in)]; 1481 } in; 1482 union { 1483 uint32_t rst2init[MLX5_ST_SZ_DW(rst2init_qp_out)]; 1484 uint32_t init2rtr[MLX5_ST_SZ_DW(init2rtr_qp_out)]; 1485 uint32_t rtr2rts[MLX5_ST_SZ_DW(rtr2rts_qp_out)]; 1486 } out; 1487 void *qpc; 1488 int ret; 1489 unsigned int inlen; 1490 unsigned int outlen; 1491 1492 memset(&in, 0, sizeof(in)); 1493 memset(&out, 0, sizeof(out)); 1494 MLX5_SET(rst2init_qp_in, &in, opcode, qp_st_mod_op); 1495 switch (qp_st_mod_op) { 1496 case MLX5_CMD_OP_RST2INIT_QP: 1497 MLX5_SET(rst2init_qp_in, &in, qpn, qp->id); 1498 qpc = MLX5_ADDR_OF(rst2init_qp_in, &in, qpc); 1499 MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1); 1500 MLX5_SET(qpc, qpc, rre, 1); 1501 MLX5_SET(qpc, qpc, rwe, 1); 1502 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 1503 inlen = sizeof(in.rst2init); 1504 outlen = sizeof(out.rst2init); 1505 break; 1506 case MLX5_CMD_OP_INIT2RTR_QP: 1507 MLX5_SET(init2rtr_qp_in, &in, qpn, qp->id); 1508 qpc = MLX5_ADDR_OF(init2rtr_qp_in, &in, qpc); 1509 MLX5_SET(qpc, qpc, primary_address_path.fl, 1); 1510 MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1); 1511 MLX5_SET(qpc, qpc, mtu, 1); 1512 MLX5_SET(qpc, qpc, log_msg_max, 30); 1513 MLX5_SET(qpc, qpc, remote_qpn, remote_qp_id); 1514 MLX5_SET(qpc, qpc, min_rnr_nak, 0); 1515 inlen = sizeof(in.init2rtr); 1516 outlen = sizeof(out.init2rtr); 1517 break; 1518 case MLX5_CMD_OP_RTR2RTS_QP: 1519 qpc = MLX5_ADDR_OF(rtr2rts_qp_in, &in, qpc); 1520 MLX5_SET(rtr2rts_qp_in, &in, qpn, qp->id); 1521 MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 14); 1522 MLX5_SET(qpc, qpc, log_ack_req_freq, 0); 1523 MLX5_SET(qpc, qpc, retry_count, 7); 1524 MLX5_SET(qpc, qpc, rnr_retry, 7); 1525 inlen = sizeof(in.rtr2rts); 1526 outlen = sizeof(out.rtr2rts); 1527 break; 1528 default: 1529 DRV_LOG(ERR, "Invalid or unsupported QP modify op %u.", 1530 qp_st_mod_op); 1531 rte_errno = EINVAL; 1532 return -rte_errno; 1533 } 1534 ret = mlx5_glue->devx_obj_modify(qp->obj, &in, inlen, &out, outlen); 1535 if (ret) { 1536 DRV_LOG(ERR, "Failed to modify QP using DevX."); 1537 rte_errno = errno; 1538 return -errno; 1539 } 1540 return ret; 1541 } 1542