1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018 Mellanox Technologies, Ltd 3 */ 4 5 #include <unistd.h> 6 7 #include <rte_errno.h> 8 #include <rte_malloc.h> 9 #include <rte_eal_paging.h> 10 11 #include "mlx5_prm.h" 12 #include "mlx5_devx_cmds.h" 13 #include "mlx5_common_log.h" 14 #include "mlx5_malloc.h" 15 16 /* FW writes status value to the OUT buffer at offset 00H */ 17 #define MLX5_FW_STATUS(o) MLX5_GET(general_obj_out_cmd_hdr, (o), status) 18 /* FW writes syndrome value to the OUT buffer at offset 04H */ 19 #define MLX5_FW_SYNDROME(o) MLX5_GET(general_obj_out_cmd_hdr, (o), syndrome) 20 21 #define MLX5_DEVX_ERR_RC(x) ((x) > 0 ? -(x) : ((x) < 0 ? (x) : -1)) 22 23 #define DEVX_DRV_LOG(level, out, reason, param, value) \ 24 do { \ 25 /* \ 26 * Some (old) GCC compilers like 7.5.0 and aarch64 GCC 7.1-2017.08 \ 27 * do not expand correctly when the macro invoked when the `param` \ 28 * is `NULL`. \ 29 * Use `local_param` to avoid direct `NULL` expansion. \ 30 */ \ 31 const char *local_param = (const char *)param; \ 32 \ 33 rte_errno = errno; \ 34 if (!local_param) { \ 35 DRV_LOG(level, \ 36 "DevX %s failed errno=%d status=%#x syndrome=%#x", \ 37 (reason), errno, MLX5_FW_STATUS((out)), \ 38 MLX5_FW_SYNDROME((out))); \ 39 } else { \ 40 DRV_LOG(level, \ 41 "DevX %s %s=%#X failed errno=%d status=%#x syndrome=%#x",\ 42 (reason), local_param, (value), errno, \ 43 MLX5_FW_STATUS((out)), MLX5_FW_SYNDROME((out))); \ 44 } \ 45 } while (0) 46 47 static void * 48 mlx5_devx_get_hca_cap(void *ctx, uint32_t *in, uint32_t *out, 49 int *err, uint32_t flags) 50 { 51 const size_t size_in = MLX5_ST_SZ_DW(query_hca_cap_in) * sizeof(int); 52 const size_t size_out = MLX5_ST_SZ_DW(query_hca_cap_out) * sizeof(int); 53 int rc; 54 55 memset(in, 0, size_in); 56 memset(out, 0, size_out); 57 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 58 MLX5_SET(query_hca_cap_in, in, op_mod, flags); 59 rc = mlx5_glue->devx_general_cmd(ctx, in, size_in, out, size_out); 60 if (rc || MLX5_FW_STATUS(out)) { 61 DEVX_DRV_LOG(ERR, out, "HCA capabilities", "func", flags >> 1); 62 if (err) 63 *err = MLX5_DEVX_ERR_RC(rc); 64 return NULL; 65 } 66 if (err) 67 *err = 0; 68 return MLX5_ADDR_OF(query_hca_cap_out, out, capability); 69 } 70 71 /** 72 * Perform read access to the registers. Reads data from register 73 * and writes ones to the specified buffer. 74 * 75 * @param[in] ctx 76 * Context returned from mlx5 open_device() glue function. 77 * @param[in] reg_id 78 * Register identifier according to the PRM. 79 * @param[in] arg 80 * Register access auxiliary parameter according to the PRM. 81 * @param[out] data 82 * Pointer to the buffer to store read data. 83 * @param[in] dw_cnt 84 * Buffer size in double words. 85 * 86 * @return 87 * 0 on success, a negative value otherwise. 88 */ 89 int 90 mlx5_devx_cmd_register_read(void *ctx, uint16_t reg_id, uint32_t arg, 91 uint32_t *data, uint32_t dw_cnt) 92 { 93 uint32_t in[MLX5_ST_SZ_DW(access_register_in)] = {0}; 94 uint32_t out[MLX5_ST_SZ_DW(access_register_out) + 95 MLX5_ACCESS_REGISTER_DATA_DWORD_MAX] = {0}; 96 int rc; 97 98 MLX5_ASSERT(data && dw_cnt); 99 MLX5_ASSERT(dw_cnt <= MLX5_ACCESS_REGISTER_DATA_DWORD_MAX); 100 if (dw_cnt > MLX5_ACCESS_REGISTER_DATA_DWORD_MAX) { 101 DRV_LOG(ERR, "Not enough buffer for register read data"); 102 return -1; 103 } 104 MLX5_SET(access_register_in, in, opcode, 105 MLX5_CMD_OP_ACCESS_REGISTER_USER); 106 MLX5_SET(access_register_in, in, op_mod, 107 MLX5_ACCESS_REGISTER_IN_OP_MOD_READ); 108 MLX5_SET(access_register_in, in, register_id, reg_id); 109 MLX5_SET(access_register_in, in, argument, arg); 110 rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, 111 MLX5_ST_SZ_BYTES(access_register_out) + 112 sizeof(uint32_t) * dw_cnt); 113 if (rc || MLX5_FW_STATUS(out)) { 114 DEVX_DRV_LOG(DEBUG, out, "read access", "NIC register", reg_id); 115 return MLX5_DEVX_ERR_RC(rc); 116 } 117 memcpy(data, &out[MLX5_ST_SZ_DW(access_register_out)], 118 dw_cnt * sizeof(uint32_t)); 119 return 0; 120 } 121 122 /** 123 * Perform write access to the registers. 124 * 125 * @param[in] ctx 126 * Context returned from mlx5 open_device() glue function. 127 * @param[in] reg_id 128 * Register identifier according to the PRM. 129 * @param[in] arg 130 * Register access auxiliary parameter according to the PRM. 131 * @param[out] data 132 * Pointer to the buffer containing data to write. 133 * @param[in] dw_cnt 134 * Buffer size in double words (32bit units). 135 * 136 * @return 137 * 0 on success, a negative value otherwise. 138 */ 139 int 140 mlx5_devx_cmd_register_write(void *ctx, uint16_t reg_id, uint32_t arg, 141 uint32_t *data, uint32_t dw_cnt) 142 { 143 uint32_t in[MLX5_ST_SZ_DW(access_register_in) + 144 MLX5_ACCESS_REGISTER_DATA_DWORD_MAX] = {0}; 145 uint32_t out[MLX5_ST_SZ_DW(access_register_out)] = {0}; 146 int rc; 147 void *ptr; 148 149 MLX5_ASSERT(data && dw_cnt); 150 MLX5_ASSERT(dw_cnt <= MLX5_ACCESS_REGISTER_DATA_DWORD_MAX); 151 if (dw_cnt > MLX5_ACCESS_REGISTER_DATA_DWORD_MAX) { 152 DRV_LOG(ERR, "Data to write exceeds max size"); 153 return -1; 154 } 155 MLX5_SET(access_register_in, in, opcode, 156 MLX5_CMD_OP_ACCESS_REGISTER_USER); 157 MLX5_SET(access_register_in, in, op_mod, 158 MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE); 159 MLX5_SET(access_register_in, in, register_id, reg_id); 160 MLX5_SET(access_register_in, in, argument, arg); 161 ptr = MLX5_ADDR_OF(access_register_in, in, register_data); 162 memcpy(ptr, data, dw_cnt * sizeof(uint32_t)); 163 rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 164 if (rc || MLX5_FW_STATUS(out)) { 165 DEVX_DRV_LOG(ERR, out, "write access", "NIC register", reg_id); 166 return MLX5_DEVX_ERR_RC(rc); 167 } 168 rc = mlx5_glue->devx_general_cmd(ctx, in, 169 MLX5_ST_SZ_BYTES(access_register_in) + 170 dw_cnt * sizeof(uint32_t), 171 out, sizeof(out)); 172 if (rc || MLX5_FW_STATUS(out)) { 173 DEVX_DRV_LOG(ERR, out, "write access", "NIC register", reg_id); 174 return MLX5_DEVX_ERR_RC(rc); 175 } 176 return 0; 177 } 178 179 struct mlx5_devx_obj * 180 mlx5_devx_cmd_flow_counter_alloc_general(void *ctx, 181 struct mlx5_devx_counter_attr *attr) 182 { 183 struct mlx5_devx_obj *dcs = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*dcs), 184 0, SOCKET_ID_ANY); 185 uint32_t in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0}; 186 uint32_t out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0}; 187 188 if (!dcs) { 189 rte_errno = ENOMEM; 190 return NULL; 191 } 192 MLX5_SET(alloc_flow_counter_in, in, opcode, 193 MLX5_CMD_OP_ALLOC_FLOW_COUNTER); 194 if (attr->bulk_log_max_alloc) 195 MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk_log_size, 196 attr->flow_counter_bulk_log_size); 197 else 198 MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, 199 attr->bulk_n_128); 200 if (attr->pd_valid) 201 MLX5_SET(alloc_flow_counter_in, in, pd, attr->pd); 202 dcs->obj = mlx5_glue->devx_obj_create(ctx, in, 203 sizeof(in), out, sizeof(out)); 204 if (!dcs->obj) { 205 DRV_LOG(ERR, "Can't allocate counters - error %d", errno); 206 rte_errno = errno; 207 mlx5_free(dcs); 208 return NULL; 209 } 210 dcs->id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id); 211 return dcs; 212 } 213 214 /** 215 * Allocate flow counters via devx interface. 216 * 217 * @param[in] ctx 218 * Context returned from mlx5 open_device() glue function. 219 * @param dcs 220 * Pointer to counters properties structure to be filled by the routine. 221 * @param bulk_n_128 222 * Bulk counter numbers in 128 counters units. 223 * 224 * @return 225 * Pointer to counter object on success, a negative value otherwise and 226 * rte_errno is set. 227 */ 228 struct mlx5_devx_obj * 229 mlx5_devx_cmd_flow_counter_alloc(void *ctx, uint32_t bulk_n_128) 230 { 231 struct mlx5_devx_obj *dcs = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*dcs), 232 0, SOCKET_ID_ANY); 233 uint32_t in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0}; 234 uint32_t out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0}; 235 236 if (!dcs) { 237 rte_errno = ENOMEM; 238 return NULL; 239 } 240 MLX5_SET(alloc_flow_counter_in, in, opcode, 241 MLX5_CMD_OP_ALLOC_FLOW_COUNTER); 242 MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, bulk_n_128); 243 dcs->obj = mlx5_glue->devx_obj_create(ctx, in, 244 sizeof(in), out, sizeof(out)); 245 if (!dcs->obj) { 246 DEVX_DRV_LOG(ERR, out, "allocate counters", NULL, 0); 247 mlx5_free(dcs); 248 return NULL; 249 } 250 dcs->id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id); 251 return dcs; 252 } 253 254 /** 255 * Query flow counters values. 256 * 257 * @param[in] dcs 258 * devx object that was obtained from mlx5_devx_cmd_fc_alloc. 259 * @param[in] clear 260 * Whether hardware should clear the counters after the query or not. 261 * @param[in] n_counters 262 * 0 in case of 1 counter to read, otherwise the counter number to read. 263 * @param pkts 264 * The number of packets that matched the flow. 265 * @param bytes 266 * The number of bytes that matched the flow. 267 * @param mkey 268 * The mkey key for batch query. 269 * @param addr 270 * The address in the mkey range for batch query. 271 * @param cmd_comp 272 * The completion object for asynchronous batch query. 273 * @param async_id 274 * The ID to be returned in the asynchronous batch query response. 275 * 276 * @return 277 * 0 on success, a negative value otherwise. 278 */ 279 int 280 mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs, 281 int clear, uint32_t n_counters, 282 uint64_t *pkts, uint64_t *bytes, 283 uint32_t mkey, void *addr, 284 void *cmd_comp, 285 uint64_t async_id) 286 { 287 int out_len = MLX5_ST_SZ_BYTES(query_flow_counter_out) + 288 MLX5_ST_SZ_BYTES(traffic_counter); 289 uint32_t out[out_len]; 290 uint32_t in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0}; 291 void *stats; 292 int rc; 293 294 MLX5_SET(query_flow_counter_in, in, opcode, 295 MLX5_CMD_OP_QUERY_FLOW_COUNTER); 296 MLX5_SET(query_flow_counter_in, in, op_mod, 0); 297 MLX5_SET(query_flow_counter_in, in, flow_counter_id, dcs->id); 298 MLX5_SET(query_flow_counter_in, in, clear, !!clear); 299 300 if (n_counters) { 301 MLX5_SET(query_flow_counter_in, in, num_of_counters, 302 n_counters); 303 MLX5_SET(query_flow_counter_in, in, dump_to_memory, 1); 304 MLX5_SET(query_flow_counter_in, in, mkey, mkey); 305 MLX5_SET64(query_flow_counter_in, in, address, 306 (uint64_t)(uintptr_t)addr); 307 } 308 if (!cmd_comp) 309 rc = mlx5_glue->devx_obj_query(dcs->obj, in, sizeof(in), out, 310 out_len); 311 else 312 rc = mlx5_glue->devx_obj_query_async(dcs->obj, in, sizeof(in), 313 out_len, async_id, 314 cmd_comp); 315 if (rc) { 316 DRV_LOG(ERR, "Failed to query devx counters with rc %d", rc); 317 rte_errno = rc; 318 return -rc; 319 } 320 if (!n_counters) { 321 stats = MLX5_ADDR_OF(query_flow_counter_out, 322 out, flow_statistics); 323 *pkts = MLX5_GET64(traffic_counter, stats, packets); 324 *bytes = MLX5_GET64(traffic_counter, stats, octets); 325 } 326 return 0; 327 } 328 329 /** 330 * Create a new mkey. 331 * 332 * @param[in] ctx 333 * Context returned from mlx5 open_device() glue function. 334 * @param[in] attr 335 * Attributes of the requested mkey. 336 * 337 * @return 338 * Pointer to Devx mkey on success, a negative value otherwise and rte_errno 339 * is set. 340 */ 341 struct mlx5_devx_obj * 342 mlx5_devx_cmd_mkey_create(void *ctx, 343 struct mlx5_devx_mkey_attr *attr) 344 { 345 struct mlx5_klm *klm_array = attr->klm_array; 346 int klm_num = attr->klm_num; 347 int in_size_dw = MLX5_ST_SZ_DW(create_mkey_in) + 348 (klm_num ? RTE_ALIGN(klm_num, 4) : 0) * MLX5_ST_SZ_DW(klm); 349 uint32_t in[in_size_dw]; 350 uint32_t out[MLX5_ST_SZ_DW(create_mkey_out)] = {0}; 351 void *mkc; 352 struct mlx5_devx_obj *mkey = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mkey), 353 0, SOCKET_ID_ANY); 354 size_t pgsize; 355 uint32_t translation_size; 356 357 if (!mkey) { 358 rte_errno = ENOMEM; 359 return NULL; 360 } 361 memset(in, 0, in_size_dw * 4); 362 pgsize = rte_mem_page_size(); 363 if (pgsize == (size_t)-1) { 364 mlx5_free(mkey); 365 DRV_LOG(ERR, "Failed to get page size"); 366 rte_errno = ENOMEM; 367 return NULL; 368 } 369 MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY); 370 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 371 if (klm_num > 0) { 372 int i; 373 uint8_t *klm = (uint8_t *)MLX5_ADDR_OF(create_mkey_in, in, 374 klm_pas_mtt); 375 translation_size = RTE_ALIGN(klm_num, 4); 376 for (i = 0; i < klm_num; i++) { 377 MLX5_SET(klm, klm, byte_count, klm_array[i].byte_count); 378 MLX5_SET(klm, klm, mkey, klm_array[i].mkey); 379 MLX5_SET64(klm, klm, address, klm_array[i].address); 380 klm += MLX5_ST_SZ_BYTES(klm); 381 } 382 for (; i < (int)translation_size; i++) { 383 MLX5_SET(klm, klm, mkey, 0x0); 384 MLX5_SET64(klm, klm, address, 0x0); 385 klm += MLX5_ST_SZ_BYTES(klm); 386 } 387 MLX5_SET(mkc, mkc, access_mode_1_0, attr->log_entity_size ? 388 MLX5_MKC_ACCESS_MODE_KLM_FBS : 389 MLX5_MKC_ACCESS_MODE_KLM); 390 MLX5_SET(mkc, mkc, log_page_size, attr->log_entity_size); 391 } else { 392 translation_size = (RTE_ALIGN(attr->size, pgsize) * 8) / 16; 393 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); 394 MLX5_SET(mkc, mkc, log_page_size, rte_log2_u32(pgsize)); 395 } 396 MLX5_SET(create_mkey_in, in, translations_octword_actual_size, 397 translation_size); 398 MLX5_SET(create_mkey_in, in, mkey_umem_id, attr->umem_id); 399 MLX5_SET(create_mkey_in, in, pg_access, attr->pg_access); 400 MLX5_SET(mkc, mkc, lw, 0x1); 401 MLX5_SET(mkc, mkc, lr, 0x1); 402 if (attr->set_remote_rw) { 403 MLX5_SET(mkc, mkc, rw, 0x1); 404 MLX5_SET(mkc, mkc, rr, 0x1); 405 } 406 MLX5_SET(mkc, mkc, qpn, 0xffffff); 407 MLX5_SET(mkc, mkc, pd, attr->pd); 408 MLX5_SET(mkc, mkc, mkey_7_0, attr->umem_id & 0xFF); 409 MLX5_SET(mkc, mkc, umr_en, attr->umr_en); 410 MLX5_SET(mkc, mkc, translations_octword_size, translation_size); 411 MLX5_SET(mkc, mkc, relaxed_ordering_write, 412 attr->relaxed_ordering_write); 413 MLX5_SET(mkc, mkc, relaxed_ordering_read, attr->relaxed_ordering_read); 414 MLX5_SET64(mkc, mkc, start_addr, attr->addr); 415 MLX5_SET64(mkc, mkc, len, attr->size); 416 MLX5_SET(mkc, mkc, crypto_en, attr->crypto_en); 417 if (attr->crypto_en) { 418 MLX5_SET(mkc, mkc, bsf_en, attr->crypto_en); 419 MLX5_SET(mkc, mkc, bsf_octword_size, 4); 420 } 421 mkey->obj = mlx5_glue->devx_obj_create(ctx, in, in_size_dw * 4, out, 422 sizeof(out)); 423 if (!mkey->obj) { 424 DEVX_DRV_LOG(ERR, out, klm_num ? "create indirect mkey" 425 : "create direct key", NULL, 0); 426 mlx5_free(mkey); 427 return NULL; 428 } 429 mkey->id = MLX5_GET(create_mkey_out, out, mkey_index); 430 mkey->id = (mkey->id << 8) | (attr->umem_id & 0xFF); 431 return mkey; 432 } 433 434 /** 435 * Get status of devx command response. 436 * Mainly used for asynchronous commands. 437 * 438 * @param[in] out 439 * The out response buffer. 440 * 441 * @return 442 * 0 on success, non-zero value otherwise. 443 */ 444 int 445 mlx5_devx_get_out_command_status(void *out) 446 { 447 int status; 448 449 if (!out) 450 return -EINVAL; 451 status = MLX5_GET(query_flow_counter_out, out, status); 452 if (status) { 453 int syndrome = MLX5_GET(query_flow_counter_out, out, syndrome); 454 455 DRV_LOG(ERR, "Bad DevX status %x, syndrome = %x", status, 456 syndrome); 457 } 458 return status; 459 } 460 461 /** 462 * Destroy any object allocated by a Devx API. 463 * 464 * @param[in] obj 465 * Pointer to a general object. 466 * 467 * @return 468 * 0 on success, a negative value otherwise. 469 */ 470 int 471 mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj) 472 { 473 int ret; 474 475 if (!obj) 476 return 0; 477 ret = mlx5_glue->devx_obj_destroy(obj->obj); 478 mlx5_free(obj); 479 return ret; 480 } 481 482 /** 483 * Query NIC vport context. 484 * Fills minimal inline attribute. 485 * 486 * @param[in] ctx 487 * ibv contexts returned from mlx5dv_open_device. 488 * @param[in] vport 489 * vport index 490 * @param[out] attr 491 * Attributes device values. 492 * 493 * @return 494 * 0 on success, a negative value otherwise. 495 */ 496 static int 497 mlx5_devx_cmd_query_nic_vport_context(void *ctx, 498 unsigned int vport, 499 struct mlx5_hca_attr *attr) 500 { 501 uint32_t in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0}; 502 uint32_t out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0}; 503 void *vctx; 504 int rc; 505 506 /* Query NIC vport context to determine inline mode. */ 507 MLX5_SET(query_nic_vport_context_in, in, opcode, 508 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); 509 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); 510 if (vport) 511 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); 512 rc = mlx5_glue->devx_general_cmd(ctx, 513 in, sizeof(in), 514 out, sizeof(out)); 515 if (rc || MLX5_FW_STATUS(out)) { 516 DEVX_DRV_LOG(ERR, out, "query NIC vport context", NULL, 0); 517 return MLX5_DEVX_ERR_RC(rc); 518 } 519 vctx = MLX5_ADDR_OF(query_nic_vport_context_out, out, 520 nic_vport_context); 521 attr->vport_inline_mode = MLX5_GET(nic_vport_context, vctx, 522 min_wqe_inline_mode); 523 return 0; 524 } 525 526 /** 527 * Query NIC vDPA attributes. 528 * 529 * @param[in] ctx 530 * Context returned from mlx5 open_device() glue function. 531 * @param[out] vdpa_attr 532 * vDPA Attributes structure to fill. 533 */ 534 static void 535 mlx5_devx_cmd_query_hca_vdpa_attr(void *ctx, 536 struct mlx5_hca_vdpa_attr *vdpa_attr) 537 { 538 uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)]; 539 uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)]; 540 void *hcattr; 541 542 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, NULL, 543 MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION | 544 MLX5_HCA_CAP_OPMOD_GET_CUR); 545 if (!hcattr) { 546 DRV_LOG(DEBUG, "Failed to query devx VDPA capabilities"); 547 vdpa_attr->valid = 0; 548 } else { 549 vdpa_attr->valid = 1; 550 vdpa_attr->desc_tunnel_offload_type = 551 MLX5_GET(virtio_emulation_cap, hcattr, 552 desc_tunnel_offload_type); 553 vdpa_attr->eth_frame_offload_type = 554 MLX5_GET(virtio_emulation_cap, hcattr, 555 eth_frame_offload_type); 556 vdpa_attr->virtio_version_1_0 = 557 MLX5_GET(virtio_emulation_cap, hcattr, 558 virtio_version_1_0); 559 vdpa_attr->tso_ipv4 = MLX5_GET(virtio_emulation_cap, hcattr, 560 tso_ipv4); 561 vdpa_attr->tso_ipv6 = MLX5_GET(virtio_emulation_cap, hcattr, 562 tso_ipv6); 563 vdpa_attr->tx_csum = MLX5_GET(virtio_emulation_cap, hcattr, 564 tx_csum); 565 vdpa_attr->rx_csum = MLX5_GET(virtio_emulation_cap, hcattr, 566 rx_csum); 567 vdpa_attr->event_mode = MLX5_GET(virtio_emulation_cap, hcattr, 568 event_mode); 569 vdpa_attr->virtio_queue_type = 570 MLX5_GET(virtio_emulation_cap, hcattr, 571 virtio_queue_type); 572 vdpa_attr->log_doorbell_stride = 573 MLX5_GET(virtio_emulation_cap, hcattr, 574 log_doorbell_stride); 575 vdpa_attr->vnet_modify_ext = 576 MLX5_GET(virtio_emulation_cap, hcattr, 577 vnet_modify_ext); 578 vdpa_attr->virtio_net_q_addr_modify = 579 MLX5_GET(virtio_emulation_cap, hcattr, 580 virtio_net_q_addr_modify); 581 vdpa_attr->virtio_q_index_modify = 582 MLX5_GET(virtio_emulation_cap, hcattr, 583 virtio_q_index_modify); 584 vdpa_attr->log_doorbell_bar_size = 585 MLX5_GET(virtio_emulation_cap, hcattr, 586 log_doorbell_bar_size); 587 vdpa_attr->doorbell_bar_offset = 588 MLX5_GET64(virtio_emulation_cap, hcattr, 589 doorbell_bar_offset); 590 vdpa_attr->max_num_virtio_queues = 591 MLX5_GET(virtio_emulation_cap, hcattr, 592 max_num_virtio_queues); 593 vdpa_attr->umems[0].a = MLX5_GET(virtio_emulation_cap, hcattr, 594 umem_1_buffer_param_a); 595 vdpa_attr->umems[0].b = MLX5_GET(virtio_emulation_cap, hcattr, 596 umem_1_buffer_param_b); 597 vdpa_attr->umems[1].a = MLX5_GET(virtio_emulation_cap, hcattr, 598 umem_2_buffer_param_a); 599 vdpa_attr->umems[1].b = MLX5_GET(virtio_emulation_cap, hcattr, 600 umem_2_buffer_param_b); 601 vdpa_attr->umems[2].a = MLX5_GET(virtio_emulation_cap, hcattr, 602 umem_3_buffer_param_a); 603 vdpa_attr->umems[2].b = MLX5_GET(virtio_emulation_cap, hcattr, 604 umem_3_buffer_param_b); 605 } 606 } 607 608 /** 609 * Query match sample handle parameters. 610 * 611 * This command allows translating a field sample handle returned by either 612 * PARSE_GRAPH_FLOW_MATCH_SAMPLE or by GENEVE TLV OPTION object into values 613 * used for header modification or header matching/hashing. 614 * 615 * @param[in] ctx 616 * Context used to create either GENEVE TLV option or FLEX PARSE GRAPH object. 617 * @param[in] sample_field_id 618 * Field sample handle returned by either PARSE_GRAPH_FLOW_MATCH_SAMPLE 619 * or by GENEVE TLV OPTION object. 620 * @param[out] attr 621 * Pointer to match sample info attributes structure. 622 * 623 * @return 624 * 0 on success, a negative errno otherwise and rte_errno is set. 625 */ 626 int 627 mlx5_devx_cmd_match_sample_info_query(void *ctx, uint32_t sample_field_id, 628 struct mlx5_devx_match_sample_info_query_attr *attr) 629 { 630 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 631 uint32_t out[MLX5_ST_SZ_DW(query_match_sample_info_out)] = {0}; 632 uint32_t in[MLX5_ST_SZ_DW(query_match_sample_info_in)] = {0}; 633 int rc; 634 635 MLX5_SET(query_match_sample_info_in, in, opcode, 636 MLX5_CMD_OP_QUERY_MATCH_SAMPLE_INFO); 637 MLX5_SET(query_match_sample_info_in, in, op_mod, 0); 638 MLX5_SET(query_match_sample_info_in, in, sample_field_id, 639 sample_field_id); 640 rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 641 if (rc) { 642 DRV_LOG(ERR, "Failed to query match sample info using DevX: %s", 643 strerror(rc)); 644 rte_errno = rc; 645 return -rc; 646 } 647 attr->modify_field_id = MLX5_GET(query_match_sample_info_out, out, 648 modify_field_id); 649 attr->sample_dw_data = MLX5_GET(query_match_sample_info_out, out, 650 field_format_select_dw); 651 attr->sample_dw_ok_bit = MLX5_GET(query_match_sample_info_out, out, 652 ok_bit_format_select_dw); 653 attr->sample_dw_ok_bit_offset = MLX5_GET(query_match_sample_info_out, 654 out, ok_bit_offset); 655 return 0; 656 #else 657 (void)ctx; 658 (void)sample_field_id; 659 (void)attr; 660 return -ENOTSUP; 661 #endif 662 } 663 664 int 665 mlx5_devx_cmd_query_parse_samples(struct mlx5_devx_obj *flex_obj, 666 uint32_t *ids, 667 uint32_t num, uint8_t *anchor) 668 { 669 uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0}; 670 uint32_t out[MLX5_ST_SZ_DW(create_flex_parser_out)] = {0}; 671 void *hdr = MLX5_ADDR_OF(create_flex_parser_out, in, hdr); 672 void *flex = MLX5_ADDR_OF(create_flex_parser_out, out, flex); 673 void *sample = MLX5_ADDR_OF(parse_graph_flex, flex, sample_table); 674 int ret; 675 uint32_t idx = 0; 676 uint32_t i; 677 678 if (num > MLX5_GRAPH_NODE_SAMPLE_NUM) { 679 rte_errno = EINVAL; 680 DRV_LOG(ERR, "Too many sample IDs to be fetched."); 681 return -rte_errno; 682 } 683 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 684 MLX5_CMD_OP_QUERY_GENERAL_OBJECT); 685 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 686 MLX5_GENERAL_OBJ_TYPE_FLEX_PARSE_GRAPH); 687 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, flex_obj->id); 688 ret = mlx5_glue->devx_obj_query(flex_obj->obj, in, sizeof(in), 689 out, sizeof(out)); 690 if (ret) { 691 rte_errno = ret; 692 DRV_LOG(ERR, "Failed to query sample IDs with object %p.", 693 (void *)flex_obj); 694 return -rte_errno; 695 } 696 if (anchor) 697 *anchor = MLX5_GET(parse_graph_flex, flex, head_anchor_id); 698 for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM && idx < num; i++) { 699 void *s_off = (void *)((char *)sample + i * 700 MLX5_ST_SZ_BYTES(parse_graph_flow_match_sample)); 701 uint32_t en; 702 703 en = MLX5_GET(parse_graph_flow_match_sample, s_off, 704 flow_match_sample_en); 705 if (!en) 706 continue; 707 ids[idx++] = MLX5_GET(parse_graph_flow_match_sample, s_off, 708 flow_match_sample_field_id); 709 } 710 if (num != idx) { 711 rte_errno = EINVAL; 712 DRV_LOG(ERR, "Number of sample IDs are not as expected."); 713 return -rte_errno; 714 } 715 return ret; 716 } 717 718 struct mlx5_devx_obj * 719 mlx5_devx_cmd_create_flex_parser(void *ctx, 720 struct mlx5_devx_graph_node_attr *data) 721 { 722 uint32_t in[MLX5_ST_SZ_DW(create_flex_parser_in)] = {0}; 723 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 724 void *hdr = MLX5_ADDR_OF(create_flex_parser_in, in, hdr); 725 void *flex = MLX5_ADDR_OF(create_flex_parser_in, in, flex); 726 void *sample = MLX5_ADDR_OF(parse_graph_flex, flex, sample_table); 727 void *in_arc = MLX5_ADDR_OF(parse_graph_flex, flex, input_arc); 728 void *out_arc = MLX5_ADDR_OF(parse_graph_flex, flex, output_arc); 729 struct mlx5_devx_obj *parse_flex_obj = mlx5_malloc 730 (MLX5_MEM_ZERO, sizeof(*parse_flex_obj), 0, SOCKET_ID_ANY); 731 uint32_t i; 732 733 if (!parse_flex_obj) { 734 DRV_LOG(ERR, "Failed to allocate flex parser data."); 735 rte_errno = ENOMEM; 736 return NULL; 737 } 738 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 739 MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 740 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 741 MLX5_GENERAL_OBJ_TYPE_FLEX_PARSE_GRAPH); 742 MLX5_SET(parse_graph_flex, flex, header_length_mode, 743 data->header_length_mode); 744 MLX5_SET64(parse_graph_flex, flex, modify_field_select, 745 data->modify_field_select); 746 MLX5_SET(parse_graph_flex, flex, header_length_base_value, 747 data->header_length_base_value); 748 MLX5_SET(parse_graph_flex, flex, header_length_field_offset, 749 data->header_length_field_offset); 750 MLX5_SET(parse_graph_flex, flex, header_length_field_shift, 751 data->header_length_field_shift); 752 MLX5_SET(parse_graph_flex, flex, next_header_field_offset, 753 data->next_header_field_offset); 754 MLX5_SET(parse_graph_flex, flex, next_header_field_size, 755 data->next_header_field_size); 756 MLX5_SET(parse_graph_flex, flex, header_length_field_mask, 757 data->header_length_field_mask); 758 for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) { 759 struct mlx5_devx_match_sample_attr *s = &data->sample[i]; 760 void *s_off = (void *)((char *)sample + i * 761 MLX5_ST_SZ_BYTES(parse_graph_flow_match_sample)); 762 763 if (!s->flow_match_sample_en) 764 continue; 765 MLX5_SET(parse_graph_flow_match_sample, s_off, 766 flow_match_sample_en, !!s->flow_match_sample_en); 767 MLX5_SET(parse_graph_flow_match_sample, s_off, 768 flow_match_sample_field_offset, 769 s->flow_match_sample_field_offset); 770 MLX5_SET(parse_graph_flow_match_sample, s_off, 771 flow_match_sample_offset_mode, 772 s->flow_match_sample_offset_mode); 773 MLX5_SET(parse_graph_flow_match_sample, s_off, 774 flow_match_sample_field_offset_mask, 775 s->flow_match_sample_field_offset_mask); 776 MLX5_SET(parse_graph_flow_match_sample, s_off, 777 flow_match_sample_field_offset_shift, 778 s->flow_match_sample_field_offset_shift); 779 MLX5_SET(parse_graph_flow_match_sample, s_off, 780 flow_match_sample_field_base_offset, 781 s->flow_match_sample_field_base_offset); 782 MLX5_SET(parse_graph_flow_match_sample, s_off, 783 flow_match_sample_tunnel_mode, 784 s->flow_match_sample_tunnel_mode); 785 } 786 for (i = 0; i < MLX5_GRAPH_NODE_ARC_NUM; i++) { 787 struct mlx5_devx_graph_arc_attr *ia = &data->in[i]; 788 struct mlx5_devx_graph_arc_attr *oa = &data->out[i]; 789 void *in_off = (void *)((char *)in_arc + i * 790 MLX5_ST_SZ_BYTES(parse_graph_arc)); 791 void *out_off = (void *)((char *)out_arc + i * 792 MLX5_ST_SZ_BYTES(parse_graph_arc)); 793 794 if (ia->arc_parse_graph_node != 0) { 795 MLX5_SET(parse_graph_arc, in_off, 796 compare_condition_value, 797 ia->compare_condition_value); 798 MLX5_SET(parse_graph_arc, in_off, start_inner_tunnel, 799 ia->start_inner_tunnel); 800 MLX5_SET(parse_graph_arc, in_off, arc_parse_graph_node, 801 ia->arc_parse_graph_node); 802 MLX5_SET(parse_graph_arc, in_off, 803 parse_graph_node_handle, 804 ia->parse_graph_node_handle); 805 } 806 if (oa->arc_parse_graph_node != 0) { 807 MLX5_SET(parse_graph_arc, out_off, 808 compare_condition_value, 809 oa->compare_condition_value); 810 MLX5_SET(parse_graph_arc, out_off, start_inner_tunnel, 811 oa->start_inner_tunnel); 812 MLX5_SET(parse_graph_arc, out_off, arc_parse_graph_node, 813 oa->arc_parse_graph_node); 814 MLX5_SET(parse_graph_arc, out_off, 815 parse_graph_node_handle, 816 oa->parse_graph_node_handle); 817 } 818 } 819 parse_flex_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 820 out, sizeof(out)); 821 if (!parse_flex_obj->obj) { 822 DEVX_DRV_LOG(ERR, out, "create FLEX PARSE GRAPH", NULL, 0); 823 mlx5_free(parse_flex_obj); 824 return NULL; 825 } 826 parse_flex_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 827 return parse_flex_obj; 828 } 829 830 static int 831 mlx5_devx_cmd_query_hca_parse_graph_node_cap 832 (void *ctx, struct mlx5_hca_flex_attr *attr) 833 { 834 uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)]; 835 uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)]; 836 void *hcattr; 837 int rc; 838 839 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc, 840 MLX5_GET_HCA_CAP_OP_MOD_PARSE_GRAPH_NODE_CAP | 841 MLX5_HCA_CAP_OPMOD_GET_CUR); 842 if (!hcattr) 843 return rc; 844 attr->node_in = MLX5_GET(parse_graph_node_cap, hcattr, node_in); 845 attr->node_out = MLX5_GET(parse_graph_node_cap, hcattr, node_out); 846 attr->header_length_mode = MLX5_GET(parse_graph_node_cap, hcattr, 847 header_length_mode); 848 attr->sample_offset_mode = MLX5_GET(parse_graph_node_cap, hcattr, 849 sample_offset_mode); 850 attr->max_num_arc_in = MLX5_GET(parse_graph_node_cap, hcattr, 851 max_num_arc_in); 852 attr->max_num_arc_out = MLX5_GET(parse_graph_node_cap, hcattr, 853 max_num_arc_out); 854 attr->max_num_sample = MLX5_GET(parse_graph_node_cap, hcattr, 855 max_num_sample); 856 attr->parse_graph_anchor = MLX5_GET(parse_graph_node_cap, hcattr, parse_graph_anchor); 857 attr->sample_tunnel_inner2 = MLX5_GET(parse_graph_node_cap, hcattr, 858 sample_tunnel_inner2); 859 attr->zero_size_supported = MLX5_GET(parse_graph_node_cap, hcattr, 860 zero_size_supported); 861 attr->sample_id_in_out = MLX5_GET(parse_graph_node_cap, hcattr, 862 sample_id_in_out); 863 attr->max_base_header_length = MLX5_GET(parse_graph_node_cap, hcattr, 864 max_base_header_length); 865 attr->max_sample_base_offset = MLX5_GET(parse_graph_node_cap, hcattr, 866 max_sample_base_offset); 867 attr->max_next_header_offset = MLX5_GET(parse_graph_node_cap, hcattr, 868 max_next_header_offset); 869 attr->header_length_mask_width = MLX5_GET(parse_graph_node_cap, hcattr, 870 header_length_mask_width); 871 /* Get the max supported samples from HCA CAP 2 */ 872 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc, 873 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 | 874 MLX5_HCA_CAP_OPMOD_GET_CUR); 875 if (!hcattr) 876 return rc; 877 attr->max_num_prog_sample = 878 MLX5_GET(cmd_hca_cap_2, hcattr, max_num_prog_sample_field); 879 return 0; 880 } 881 882 static int 883 mlx5_devx_query_pkt_integrity_match(void *hcattr) 884 { 885 return MLX5_GET(flow_table_nic_cap, hcattr, 886 ft_field_support_2_nic_receive.inner_l3_ok) && 887 MLX5_GET(flow_table_nic_cap, hcattr, 888 ft_field_support_2_nic_receive.inner_l4_ok) && 889 MLX5_GET(flow_table_nic_cap, hcattr, 890 ft_field_support_2_nic_receive.outer_l3_ok) && 891 MLX5_GET(flow_table_nic_cap, hcattr, 892 ft_field_support_2_nic_receive.outer_l4_ok) && 893 MLX5_GET(flow_table_nic_cap, hcattr, 894 ft_field_support_2_nic_receive 895 .inner_ipv4_checksum_ok) && 896 MLX5_GET(flow_table_nic_cap, hcattr, 897 ft_field_support_2_nic_receive.inner_l4_checksum_ok) && 898 MLX5_GET(flow_table_nic_cap, hcattr, 899 ft_field_support_2_nic_receive 900 .outer_ipv4_checksum_ok) && 901 MLX5_GET(flow_table_nic_cap, hcattr, 902 ft_field_support_2_nic_receive.outer_l4_checksum_ok); 903 } 904 905 /** 906 * Query HCA attributes. 907 * Using those attributes we can check on run time if the device 908 * is having the required capabilities. 909 * 910 * @param[in] ctx 911 * Context returned from mlx5 open_device() glue function. 912 * @param[out] attr 913 * Attributes device values. 914 * 915 * @return 916 * 0 on success, a negative value otherwise. 917 */ 918 int 919 mlx5_devx_cmd_query_hca_attr(void *ctx, 920 struct mlx5_hca_attr *attr) 921 { 922 uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0}; 923 uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0}; 924 bool hca_cap_2_sup; 925 uint64_t general_obj_types_supported = 0; 926 void *hcattr; 927 int rc, i; 928 929 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc, 930 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE | 931 MLX5_HCA_CAP_OPMOD_GET_CUR); 932 if (!hcattr) 933 return rc; 934 hca_cap_2_sup = MLX5_GET(cmd_hca_cap, hcattr, hca_cap_2); 935 attr->max_wqe_sz_sq = MLX5_GET(cmd_hca_cap, hcattr, max_wqe_sz_sq); 936 attr->flow_counter_bulk_alloc_bitmap = 937 MLX5_GET(cmd_hca_cap, hcattr, flow_counter_bulk_alloc); 938 attr->flow_counters_dump = MLX5_GET(cmd_hca_cap, hcattr, 939 flow_counters_dump); 940 attr->log_max_rmp = MLX5_GET(cmd_hca_cap, hcattr, log_max_rmp); 941 attr->mem_rq_rmp = MLX5_GET(cmd_hca_cap, hcattr, mem_rq_rmp); 942 attr->log_max_rqt_size = MLX5_GET(cmd_hca_cap, hcattr, 943 log_max_rqt_size); 944 attr->eswitch_manager = MLX5_GET(cmd_hca_cap, hcattr, eswitch_manager); 945 attr->hairpin = MLX5_GET(cmd_hca_cap, hcattr, hairpin); 946 attr->log_max_hairpin_queues = MLX5_GET(cmd_hca_cap, hcattr, 947 log_max_hairpin_queues); 948 attr->log_max_hairpin_wq_data_sz = MLX5_GET(cmd_hca_cap, hcattr, 949 log_max_hairpin_wq_data_sz); 950 attr->log_max_hairpin_num_packets = MLX5_GET 951 (cmd_hca_cap, hcattr, log_min_hairpin_wq_data_sz); 952 attr->vhca_id = MLX5_GET(cmd_hca_cap, hcattr, vhca_id); 953 attr->relaxed_ordering_write = MLX5_GET(cmd_hca_cap, hcattr, 954 relaxed_ordering_write); 955 attr->relaxed_ordering_read = MLX5_GET(cmd_hca_cap, hcattr, 956 relaxed_ordering_read); 957 attr->access_register_user = MLX5_GET(cmd_hca_cap, hcattr, 958 access_register_user); 959 attr->eth_net_offloads = MLX5_GET(cmd_hca_cap, hcattr, 960 eth_net_offloads); 961 attr->eth_virt = MLX5_GET(cmd_hca_cap, hcattr, eth_virt); 962 attr->flex_parser_protocols = MLX5_GET(cmd_hca_cap, hcattr, 963 flex_parser_protocols); 964 attr->max_geneve_tlv_options = MLX5_GET(cmd_hca_cap, hcattr, 965 max_geneve_tlv_options); 966 attr->max_geneve_tlv_option_data_len = MLX5_GET(cmd_hca_cap, hcattr, 967 max_geneve_tlv_option_data_len); 968 attr->qos.sup = MLX5_GET(cmd_hca_cap, hcattr, qos); 969 attr->qos.flow_meter_aso_sup = !!(MLX5_GET64(cmd_hca_cap, hcattr, 970 general_obj_types) & 971 MLX5_GENERAL_OBJ_TYPES_CAP_FLOW_METER_ASO); 972 attr->vdpa.valid = !!(MLX5_GET64(cmd_hca_cap, hcattr, 973 general_obj_types) & 974 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q); 975 attr->vdpa.queue_counters_valid = !!(MLX5_GET64(cmd_hca_cap, hcattr, 976 general_obj_types) & 977 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_Q_COUNTERS); 978 attr->parse_graph_flex_node = !!(MLX5_GET64(cmd_hca_cap, hcattr, 979 general_obj_types) & 980 MLX5_GENERAL_OBJ_TYPES_CAP_PARSE_GRAPH_FLEX_NODE); 981 attr->wqe_index_ignore = MLX5_GET(cmd_hca_cap, hcattr, 982 wqe_index_ignore_cap); 983 attr->cross_channel = MLX5_GET(cmd_hca_cap, hcattr, cd); 984 attr->non_wire_sq = MLX5_GET(cmd_hca_cap, hcattr, non_wire_sq); 985 attr->log_max_static_sq_wq = MLX5_GET(cmd_hca_cap, hcattr, 986 log_max_static_sq_wq); 987 attr->num_lag_ports = MLX5_GET(cmd_hca_cap, hcattr, num_lag_ports); 988 attr->dev_freq_khz = MLX5_GET(cmd_hca_cap, hcattr, 989 device_frequency_khz); 990 attr->scatter_fcs_w_decap_disable = 991 MLX5_GET(cmd_hca_cap, hcattr, scatter_fcs_w_decap_disable); 992 attr->roce = MLX5_GET(cmd_hca_cap, hcattr, roce); 993 attr->rq_ts_format = MLX5_GET(cmd_hca_cap, hcattr, rq_ts_format); 994 attr->sq_ts_format = MLX5_GET(cmd_hca_cap, hcattr, sq_ts_format); 995 attr->steering_format_version = 996 MLX5_GET(cmd_hca_cap, hcattr, steering_format_version); 997 attr->regexp_params = MLX5_GET(cmd_hca_cap, hcattr, regexp_params); 998 attr->regexp_version = MLX5_GET(cmd_hca_cap, hcattr, regexp_version); 999 attr->regexp_num_of_engines = MLX5_GET(cmd_hca_cap, hcattr, 1000 regexp_num_of_engines); 1001 /* Read the general_obj_types bitmap and extract the relevant bits. */ 1002 general_obj_types_supported = MLX5_GET64(cmd_hca_cap, hcattr, 1003 general_obj_types); 1004 attr->vdpa.valid = !!(general_obj_types_supported & 1005 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q); 1006 attr->vdpa.queue_counters_valid = 1007 !!(general_obj_types_supported & 1008 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_Q_COUNTERS); 1009 attr->parse_graph_flex_node = 1010 !!(general_obj_types_supported & 1011 MLX5_GENERAL_OBJ_TYPES_CAP_PARSE_GRAPH_FLEX_NODE); 1012 attr->flow_hit_aso = !!(general_obj_types_supported & 1013 MLX5_GENERAL_OBJ_TYPES_CAP_FLOW_HIT_ASO); 1014 attr->geneve_tlv_opt = !!(general_obj_types_supported & 1015 MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT); 1016 attr->dek = !!(general_obj_types_supported & 1017 MLX5_GENERAL_OBJ_TYPES_CAP_DEK); 1018 attr->import_kek = !!(general_obj_types_supported & 1019 MLX5_GENERAL_OBJ_TYPES_CAP_IMPORT_KEK); 1020 attr->credential = !!(general_obj_types_supported & 1021 MLX5_GENERAL_OBJ_TYPES_CAP_CREDENTIAL); 1022 attr->crypto_login = !!(general_obj_types_supported & 1023 MLX5_GENERAL_OBJ_TYPES_CAP_CRYPTO_LOGIN); 1024 /* Add reading of other GENERAL_OBJ_TYPES_CAP bits above this line. */ 1025 attr->log_max_cq = MLX5_GET(cmd_hca_cap, hcattr, log_max_cq); 1026 attr->log_max_qp = MLX5_GET(cmd_hca_cap, hcattr, log_max_qp); 1027 attr->log_max_cq_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_cq_sz); 1028 attr->log_max_qp_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_qp_sz); 1029 attr->log_max_mrw_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_mrw_sz); 1030 attr->log_max_pd = MLX5_GET(cmd_hca_cap, hcattr, log_max_pd); 1031 attr->log_max_srq = MLX5_GET(cmd_hca_cap, hcattr, log_max_srq); 1032 attr->log_max_srq_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_srq_sz); 1033 attr->reg_c_preserve = 1034 MLX5_GET(cmd_hca_cap, hcattr, reg_c_preserve); 1035 attr->mmo_regex_qp_en = MLX5_GET(cmd_hca_cap, hcattr, regexp_mmo_qp); 1036 attr->mmo_regex_sq_en = MLX5_GET(cmd_hca_cap, hcattr, regexp_mmo_sq); 1037 attr->mmo_dma_sq_en = MLX5_GET(cmd_hca_cap, hcattr, dma_mmo_sq); 1038 attr->mmo_compress_sq_en = MLX5_GET(cmd_hca_cap, hcattr, 1039 compress_mmo_sq); 1040 attr->mmo_decompress_sq_en = MLX5_GET(cmd_hca_cap, hcattr, 1041 decompress_mmo_sq); 1042 attr->mmo_dma_qp_en = MLX5_GET(cmd_hca_cap, hcattr, dma_mmo_qp); 1043 attr->mmo_compress_qp_en = MLX5_GET(cmd_hca_cap, hcattr, 1044 compress_mmo_qp); 1045 attr->decomp_deflate_v1_en = MLX5_GET(cmd_hca_cap, hcattr, 1046 decompress_deflate_v1); 1047 attr->decomp_deflate_v2_en = MLX5_GET(cmd_hca_cap, hcattr, 1048 decompress_deflate_v2); 1049 attr->compress_min_block_size = MLX5_GET(cmd_hca_cap, hcattr, 1050 compress_min_block_size); 1051 attr->log_max_mmo_dma = MLX5_GET(cmd_hca_cap, hcattr, log_dma_mmo_size); 1052 attr->log_max_mmo_compress = MLX5_GET(cmd_hca_cap, hcattr, 1053 log_compress_mmo_size); 1054 attr->log_max_mmo_decompress = MLX5_GET(cmd_hca_cap, hcattr, 1055 log_decompress_mmo_size); 1056 attr->decomp_lz4_data_only_en = MLX5_GET(cmd_hca_cap, hcattr, 1057 decompress_lz4_data_only_v2); 1058 attr->decomp_lz4_no_checksum_en = MLX5_GET(cmd_hca_cap, hcattr, 1059 decompress_lz4_no_checksum_v2); 1060 attr->decomp_lz4_checksum_en = MLX5_GET(cmd_hca_cap, hcattr, 1061 decompress_lz4_checksum_v2); 1062 attr->cqe_compression = MLX5_GET(cmd_hca_cap, hcattr, cqe_compression); 1063 attr->mini_cqe_resp_flow_tag = MLX5_GET(cmd_hca_cap, hcattr, 1064 mini_cqe_resp_flow_tag); 1065 attr->cqe_compression_128 = MLX5_GET(cmd_hca_cap, hcattr, 1066 cqe_compression_128); 1067 attr->mini_cqe_resp_l3_l4_tag = MLX5_GET(cmd_hca_cap, hcattr, 1068 mini_cqe_resp_l3_l4_tag); 1069 attr->enhanced_cqe_compression = MLX5_GET(cmd_hca_cap, hcattr, 1070 enhanced_cqe_compression); 1071 attr->umr_indirect_mkey_disabled = 1072 MLX5_GET(cmd_hca_cap, hcattr, umr_indirect_mkey_disabled); 1073 attr->umr_modify_entity_size_disabled = 1074 MLX5_GET(cmd_hca_cap, hcattr, umr_modify_entity_size_disabled); 1075 attr->wait_on_time = MLX5_GET(cmd_hca_cap, hcattr, wait_on_time); 1076 attr->crypto = MLX5_GET(cmd_hca_cap, hcattr, crypto); 1077 attr->ct_offload = !!(MLX5_GET64(cmd_hca_cap, hcattr, 1078 general_obj_types) & 1079 MLX5_GENERAL_OBJ_TYPES_CAP_CONN_TRACK_OFFLOAD); 1080 attr->rq_delay_drop = MLX5_GET(cmd_hca_cap, hcattr, rq_delay_drop); 1081 attr->nic_flow_table = MLX5_GET(cmd_hca_cap, hcattr, nic_flow_table); 1082 attr->striding_rq = MLX5_GET(cmd_hca_cap, hcattr, striding_rq); 1083 attr->ext_stride_num_range = 1084 MLX5_GET(cmd_hca_cap, hcattr, ext_stride_num_range); 1085 attr->nic_flow_table = MLX5_GET(cmd_hca_cap, hcattr, nic_flow_table); 1086 attr->max_flow_counter_15_0 = MLX5_GET(cmd_hca_cap, hcattr, 1087 max_flow_counter_15_0); 1088 attr->max_flow_counter_31_16 = MLX5_GET(cmd_hca_cap, hcattr, 1089 max_flow_counter_31_16); 1090 attr->alloc_flow_counter_pd = MLX5_GET(cmd_hca_cap, hcattr, 1091 alloc_flow_counter_pd); 1092 attr->flow_counter_access_aso = MLX5_GET(cmd_hca_cap, hcattr, 1093 flow_counter_access_aso); 1094 attr->flow_access_aso_opc_mod = MLX5_GET(cmd_hca_cap, hcattr, 1095 flow_access_aso_opc_mod); 1096 attr->wqe_based_flow_table_sup = MLX5_GET(cmd_hca_cap, hcattr, 1097 wqe_based_flow_table_update_cap); 1098 /* 1099 * Flex item support needs max_num_prog_sample_field 1100 * from the Capabilities 2 table for PARSE_GRAPH_NODE 1101 */ 1102 if (attr->parse_graph_flex_node) { 1103 rc = mlx5_devx_cmd_query_hca_parse_graph_node_cap 1104 (ctx, &attr->flex); 1105 if (rc) 1106 return -1; 1107 attr->flex.query_match_sample_info = MLX5_GET(cmd_hca_cap, hcattr, 1108 query_match_sample_info); 1109 } 1110 if (attr->crypto) { 1111 attr->aes_xts = MLX5_GET(cmd_hca_cap, hcattr, aes_xts) || 1112 MLX5_GET(cmd_hca_cap, hcattr, aes_xts_multi_block_be_tweak) || 1113 MLX5_GET(cmd_hca_cap, hcattr, aes_xts_single_block_le_tweak); 1114 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc, 1115 MLX5_GET_HCA_CAP_OP_MOD_CRYPTO | 1116 MLX5_HCA_CAP_OPMOD_GET_CUR); 1117 if (!hcattr) 1118 return -1; 1119 attr->crypto_wrapped_import_method = !!(MLX5_GET(crypto_caps, 1120 hcattr, wrapped_import_method) 1121 & 1 << 2); 1122 attr->crypto_mmo.crypto_mmo_qp = MLX5_GET(crypto_caps, hcattr, crypto_mmo_qp); 1123 attr->crypto_mmo.gcm_256_encrypt = 1124 MLX5_GET(crypto_caps, hcattr, crypto_aes_gcm_256_encrypt); 1125 attr->crypto_mmo.gcm_128_encrypt = 1126 MLX5_GET(crypto_caps, hcattr, crypto_aes_gcm_128_encrypt); 1127 attr->crypto_mmo.gcm_256_decrypt = 1128 MLX5_GET(crypto_caps, hcattr, crypto_aes_gcm_256_decrypt); 1129 attr->crypto_mmo.gcm_128_decrypt = 1130 MLX5_GET(crypto_caps, hcattr, crypto_aes_gcm_128_decrypt); 1131 attr->crypto_mmo.gcm_auth_tag_128 = 1132 MLX5_GET(crypto_caps, hcattr, gcm_auth_tag_128); 1133 attr->crypto_mmo.gcm_auth_tag_96 = 1134 MLX5_GET(crypto_caps, hcattr, gcm_auth_tag_96); 1135 attr->crypto_mmo.log_crypto_mmo_max_size = 1136 MLX5_GET(crypto_caps, hcattr, log_crypto_mmo_max_size); 1137 } 1138 if (hca_cap_2_sup) { 1139 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc, 1140 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 | 1141 MLX5_HCA_CAP_OPMOD_GET_CUR); 1142 if (!hcattr) { 1143 DRV_LOG(DEBUG, 1144 "Failed to query DevX HCA capabilities 2."); 1145 return rc; 1146 } 1147 attr->log_min_stride_wqe_sz = MLX5_GET(cmd_hca_cap_2, hcattr, 1148 log_min_stride_wqe_sz); 1149 attr->hairpin_sq_wqe_bb_size = MLX5_GET(cmd_hca_cap_2, hcattr, 1150 hairpin_sq_wqe_bb_size); 1151 attr->hairpin_sq_wq_in_host_mem = MLX5_GET(cmd_hca_cap_2, hcattr, 1152 hairpin_sq_wq_in_host_mem); 1153 attr->hairpin_data_buffer_locked = MLX5_GET(cmd_hca_cap_2, hcattr, 1154 hairpin_data_buffer_locked); 1155 attr->flow_counter_bulk_log_max_alloc = MLX5_GET(cmd_hca_cap_2, 1156 hcattr, flow_counter_bulk_log_max_alloc); 1157 attr->flow_counter_bulk_log_granularity = 1158 MLX5_GET(cmd_hca_cap_2, hcattr, 1159 flow_counter_bulk_log_granularity); 1160 rc = MLX5_GET(cmd_hca_cap_2, hcattr, 1161 cross_vhca_object_to_object_supported); 1162 attr->cross_vhca = 1163 (rc & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_STC_TO_TIR) && 1164 (rc & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_STC_TO_FT) && 1165 (rc & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_FT_TO_FT) && 1166 (rc & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_FT_TO_RTC); 1167 rc = MLX5_GET(cmd_hca_cap_2, hcattr, 1168 allowed_object_for_other_vhca_access); 1169 attr->cross_vhca = attr->cross_vhca && 1170 (rc & MLX5_CROSS_VHCA_ALLOWED_OBJS_TIR) && 1171 (rc & MLX5_CROSS_VHCA_ALLOWED_OBJS_FT) && 1172 (rc & MLX5_CROSS_VHCA_ALLOWED_OBJS_RTC); 1173 } 1174 if (attr->log_min_stride_wqe_sz == 0) 1175 attr->log_min_stride_wqe_sz = MLX5_MPRQ_LOG_MIN_STRIDE_WQE_SIZE; 1176 if (attr->qos.sup) { 1177 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc, 1178 MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP | 1179 MLX5_HCA_CAP_OPMOD_GET_CUR); 1180 if (!hcattr) { 1181 DRV_LOG(DEBUG, "Failed to query devx QOS capabilities"); 1182 return rc; 1183 } 1184 attr->qos.flow_meter_old = 1185 MLX5_GET(qos_cap, hcattr, flow_meter_old); 1186 attr->qos.log_max_flow_meter = 1187 MLX5_GET(qos_cap, hcattr, log_max_flow_meter); 1188 attr->qos.flow_meter_reg_c_ids = 1189 MLX5_GET(qos_cap, hcattr, flow_meter_reg_id); 1190 attr->qos.flow_meter = 1191 MLX5_GET(qos_cap, hcattr, flow_meter); 1192 attr->qos.packet_pacing = 1193 MLX5_GET(qos_cap, hcattr, packet_pacing); 1194 attr->qos.wqe_rate_pp = 1195 MLX5_GET(qos_cap, hcattr, wqe_rate_pp); 1196 if (attr->qos.flow_meter_aso_sup) { 1197 attr->qos.log_meter_aso_granularity = 1198 MLX5_GET(qos_cap, hcattr, 1199 log_meter_aso_granularity); 1200 attr->qos.log_meter_aso_max_alloc = 1201 MLX5_GET(qos_cap, hcattr, 1202 log_meter_aso_max_alloc); 1203 attr->qos.log_max_num_meter_aso = 1204 MLX5_GET(qos_cap, hcattr, 1205 log_max_num_meter_aso); 1206 } 1207 } 1208 if (attr->vdpa.valid) 1209 mlx5_devx_cmd_query_hca_vdpa_attr(ctx, &attr->vdpa); 1210 if (!attr->eth_net_offloads) 1211 return 0; 1212 /* Query Flow Sampler Capability From FLow Table Properties Layout. */ 1213 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc, 1214 MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE | 1215 MLX5_HCA_CAP_OPMOD_GET_CUR); 1216 if (!hcattr) { 1217 attr->log_max_ft_sampler_num = 0; 1218 return rc; 1219 } 1220 attr->log_max_ft_sampler_num = MLX5_GET 1221 (flow_table_nic_cap, hcattr, 1222 flow_table_properties_nic_receive.log_max_ft_sampler_num); 1223 attr->flow.tunnel_header_0_1 = MLX5_GET 1224 (flow_table_nic_cap, hcattr, 1225 ft_field_support_2_nic_receive.tunnel_header_0_1); 1226 attr->flow.tunnel_header_2_3 = MLX5_GET 1227 (flow_table_nic_cap, hcattr, 1228 ft_field_support_2_nic_receive.tunnel_header_2_3); 1229 attr->modify_outer_ip_ecn = MLX5_GET 1230 (flow_table_nic_cap, hcattr, 1231 ft_header_modify_nic_receive.outer_ip_ecn); 1232 attr->set_reg_c = 0xffff; 1233 if (attr->nic_flow_table) { 1234 #define GET_RX_REG_X_BITS \ 1235 MLX5_GET(flow_table_nic_cap, hcattr, \ 1236 ft_header_modify_nic_receive.metadata_reg_c_x) 1237 #define GET_TX_REG_X_BITS \ 1238 MLX5_GET(flow_table_nic_cap, hcattr, \ 1239 ft_header_modify_nic_transmit.metadata_reg_c_x) 1240 1241 uint32_t tx_reg, rx_reg, reg_c_8_15; 1242 1243 tx_reg = GET_TX_REG_X_BITS; 1244 reg_c_8_15 = MLX5_GET(flow_table_nic_cap, hcattr, 1245 ft_field_support_2_nic_transmit.metadata_reg_c_8_15); 1246 tx_reg |= ((0xff & reg_c_8_15) << 8); 1247 rx_reg = GET_RX_REG_X_BITS; 1248 reg_c_8_15 = MLX5_GET(flow_table_nic_cap, hcattr, 1249 ft_field_support_2_nic_receive.metadata_reg_c_8_15); 1250 rx_reg |= ((0xff & reg_c_8_15) << 8); 1251 attr->set_reg_c &= (rx_reg & tx_reg); 1252 1253 #undef GET_RX_REG_X_BITS 1254 #undef GET_TX_REG_X_BITS 1255 } 1256 attr->pkt_integrity_match = mlx5_devx_query_pkt_integrity_match(hcattr); 1257 attr->inner_ipv4_ihl = MLX5_GET 1258 (flow_table_nic_cap, hcattr, 1259 ft_field_support_2_nic_receive.inner_ipv4_ihl); 1260 attr->outer_ipv4_ihl = MLX5_GET 1261 (flow_table_nic_cap, hcattr, 1262 ft_field_support_2_nic_receive.outer_ipv4_ihl); 1263 attr->lag_rx_port_affinity = MLX5_GET 1264 (flow_table_nic_cap, hcattr, 1265 ft_field_support_2_nic_receive.lag_rx_port_affinity); 1266 /* Query HCA offloads for Ethernet protocol. */ 1267 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc, 1268 MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS | 1269 MLX5_HCA_CAP_OPMOD_GET_CUR); 1270 if (!hcattr) { 1271 attr->eth_net_offloads = 0; 1272 return rc; 1273 } 1274 attr->wqe_vlan_insert = MLX5_GET(per_protocol_networking_offload_caps, 1275 hcattr, wqe_vlan_insert); 1276 attr->csum_cap = MLX5_GET(per_protocol_networking_offload_caps, 1277 hcattr, csum_cap); 1278 attr->vlan_cap = MLX5_GET(per_protocol_networking_offload_caps, 1279 hcattr, vlan_cap); 1280 attr->lro_cap = MLX5_GET(per_protocol_networking_offload_caps, hcattr, 1281 lro_cap); 1282 attr->max_lso_cap = MLX5_GET(per_protocol_networking_offload_caps, 1283 hcattr, max_lso_cap); 1284 attr->scatter_fcs = MLX5_GET(per_protocol_networking_offload_caps, 1285 hcattr, scatter_fcs); 1286 attr->tunnel_lro_gre = MLX5_GET(per_protocol_networking_offload_caps, 1287 hcattr, tunnel_lro_gre); 1288 attr->tunnel_lro_vxlan = MLX5_GET(per_protocol_networking_offload_caps, 1289 hcattr, tunnel_lro_vxlan); 1290 attr->swp = MLX5_GET(per_protocol_networking_offload_caps, 1291 hcattr, swp); 1292 attr->tunnel_stateless_gre = 1293 MLX5_GET(per_protocol_networking_offload_caps, 1294 hcattr, tunnel_stateless_gre); 1295 attr->tunnel_stateless_vxlan = 1296 MLX5_GET(per_protocol_networking_offload_caps, 1297 hcattr, tunnel_stateless_vxlan); 1298 attr->swp_csum = MLX5_GET(per_protocol_networking_offload_caps, 1299 hcattr, swp_csum); 1300 attr->swp_lso = MLX5_GET(per_protocol_networking_offload_caps, 1301 hcattr, swp_lso); 1302 attr->lro_max_msg_sz_mode = MLX5_GET 1303 (per_protocol_networking_offload_caps, 1304 hcattr, lro_max_msg_sz_mode); 1305 for (i = 0 ; i < MLX5_LRO_NUM_SUPP_PERIODS ; i++) { 1306 attr->lro_timer_supported_periods[i] = 1307 MLX5_GET(per_protocol_networking_offload_caps, hcattr, 1308 lro_timer_supported_periods[i]); 1309 } 1310 attr->lro_min_mss_size = MLX5_GET(per_protocol_networking_offload_caps, 1311 hcattr, lro_min_mss_size); 1312 attr->tunnel_stateless_geneve_rx = 1313 MLX5_GET(per_protocol_networking_offload_caps, 1314 hcattr, tunnel_stateless_geneve_rx); 1315 attr->geneve_max_opt_len = 1316 MLX5_GET(per_protocol_networking_offload_caps, 1317 hcattr, max_geneve_opt_len); 1318 attr->wqe_inline_mode = MLX5_GET(per_protocol_networking_offload_caps, 1319 hcattr, wqe_inline_mode); 1320 attr->tunnel_stateless_gtp = MLX5_GET 1321 (per_protocol_networking_offload_caps, 1322 hcattr, tunnel_stateless_gtp); 1323 attr->tunnel_stateless_vxlan_gpe_nsh = MLX5_GET 1324 (per_protocol_networking_offload_caps, 1325 hcattr, tunnel_stateless_vxlan_gpe_nsh); 1326 attr->rss_ind_tbl_cap = MLX5_GET 1327 (per_protocol_networking_offload_caps, 1328 hcattr, rss_ind_tbl_cap); 1329 attr->multi_pkt_send_wqe = MLX5_GET 1330 (per_protocol_networking_offload_caps, 1331 hcattr, multi_pkt_send_wqe); 1332 attr->enhanced_multi_pkt_send_wqe = MLX5_GET 1333 (per_protocol_networking_offload_caps, 1334 hcattr, enhanced_multi_pkt_send_wqe); 1335 if (attr->wqe_based_flow_table_sup) { 1336 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc, 1337 MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE | 1338 MLX5_HCA_CAP_OPMOD_GET_CUR); 1339 if (!hcattr) { 1340 DRV_LOG(DEBUG, "Failed to query WQE Based Flow table capabilities"); 1341 return rc; 1342 } 1343 attr->max_header_modify_pattern_length = MLX5_GET(wqe_based_flow_table_cap, 1344 hcattr, 1345 max_header_modify_pattern_length); 1346 } 1347 /* Query HCA attribute for ROCE. */ 1348 if (attr->roce) { 1349 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc, 1350 MLX5_GET_HCA_CAP_OP_MOD_ROCE | 1351 MLX5_HCA_CAP_OPMOD_GET_CUR); 1352 if (!hcattr) { 1353 DRV_LOG(DEBUG, 1354 "Failed to query devx HCA ROCE capabilities"); 1355 return rc; 1356 } 1357 attr->qp_ts_format = MLX5_GET(roce_caps, hcattr, qp_ts_format); 1358 } 1359 if (attr->eth_virt && 1360 attr->wqe_inline_mode == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) { 1361 rc = mlx5_devx_cmd_query_nic_vport_context(ctx, 0, attr); 1362 if (rc) { 1363 attr->eth_virt = 0; 1364 goto error; 1365 } 1366 } 1367 if (attr->eswitch_manager) { 1368 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc, 1369 MLX5_SET_HCA_CAP_OP_MOD_ESW | 1370 MLX5_HCA_CAP_OPMOD_GET_CUR); 1371 if (!hcattr) 1372 return rc; 1373 attr->esw_mgr_vport_id_valid = 1374 MLX5_GET(esw_cap, hcattr, 1375 esw_manager_vport_number_valid); 1376 attr->esw_mgr_vport_id = 1377 MLX5_GET(esw_cap, hcattr, esw_manager_vport_number); 1378 } 1379 if (attr->eswitch_manager) { 1380 uint32_t esw_reg, reg_c_8_15; 1381 1382 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc, 1383 MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE | 1384 MLX5_HCA_CAP_OPMOD_GET_CUR); 1385 if (!hcattr) 1386 return rc; 1387 esw_reg = MLX5_GET(flow_table_esw_cap, hcattr, 1388 ft_header_modify_esw_fdb.metadata_reg_c_x); 1389 reg_c_8_15 = MLX5_GET(flow_table_esw_cap, hcattr, 1390 ft_field_support_2_esw_fdb.metadata_reg_c_8_15); 1391 attr->set_reg_c &= ((0xff & reg_c_8_15) << 8) | esw_reg; 1392 } 1393 return 0; 1394 error: 1395 rc = (rc > 0) ? -rc : rc; 1396 return rc; 1397 } 1398 1399 /** 1400 * Query TIS transport domain from QP verbs object using DevX API. 1401 * 1402 * @param[in] qp 1403 * Pointer to verbs QP returned by ibv_create_qp . 1404 * @param[in] tis_num 1405 * TIS number of TIS to query. 1406 * @param[out] tis_td 1407 * Pointer to TIS transport domain variable, to be set by the routine. 1408 * 1409 * @return 1410 * 0 on success, a negative value otherwise. 1411 */ 1412 int 1413 mlx5_devx_cmd_qp_query_tis_td(void *qp, uint32_t tis_num, 1414 uint32_t *tis_td) 1415 { 1416 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 1417 uint32_t in[MLX5_ST_SZ_DW(query_tis_in)] = {0}; 1418 uint32_t out[MLX5_ST_SZ_DW(query_tis_out)] = {0}; 1419 int rc; 1420 void *tis_ctx; 1421 1422 MLX5_SET(query_tis_in, in, opcode, MLX5_CMD_OP_QUERY_TIS); 1423 MLX5_SET(query_tis_in, in, tisn, tis_num); 1424 rc = mlx5_glue->devx_qp_query(qp, in, sizeof(in), out, sizeof(out)); 1425 if (rc) { 1426 DRV_LOG(ERR, "Failed to query QP using DevX"); 1427 return -rc; 1428 }; 1429 tis_ctx = MLX5_ADDR_OF(query_tis_out, out, tis_context); 1430 *tis_td = MLX5_GET(tisc, tis_ctx, transport_domain); 1431 return 0; 1432 #else 1433 (void)qp; 1434 (void)tis_num; 1435 (void)tis_td; 1436 return -ENOTSUP; 1437 #endif 1438 } 1439 1440 /** 1441 * Fill WQ data for DevX API command. 1442 * Utility function for use when creating DevX objects containing a WQ. 1443 * 1444 * @param[in] wq_ctx 1445 * Pointer to WQ context to fill with data. 1446 * @param [in] wq_attr 1447 * Pointer to WQ attributes structure to fill in WQ context. 1448 */ 1449 static void 1450 devx_cmd_fill_wq_data(void *wq_ctx, struct mlx5_devx_wq_attr *wq_attr) 1451 { 1452 MLX5_SET(wq, wq_ctx, wq_type, wq_attr->wq_type); 1453 MLX5_SET(wq, wq_ctx, wq_signature, wq_attr->wq_signature); 1454 MLX5_SET(wq, wq_ctx, end_padding_mode, wq_attr->end_padding_mode); 1455 MLX5_SET(wq, wq_ctx, cd_slave, wq_attr->cd_slave); 1456 MLX5_SET(wq, wq_ctx, hds_skip_first_sge, wq_attr->hds_skip_first_sge); 1457 MLX5_SET(wq, wq_ctx, log2_hds_buf_size, wq_attr->log2_hds_buf_size); 1458 MLX5_SET(wq, wq_ctx, page_offset, wq_attr->page_offset); 1459 MLX5_SET(wq, wq_ctx, lwm, wq_attr->lwm); 1460 MLX5_SET(wq, wq_ctx, pd, wq_attr->pd); 1461 MLX5_SET(wq, wq_ctx, uar_page, wq_attr->uar_page); 1462 MLX5_SET64(wq, wq_ctx, dbr_addr, wq_attr->dbr_addr); 1463 MLX5_SET(wq, wq_ctx, hw_counter, wq_attr->hw_counter); 1464 MLX5_SET(wq, wq_ctx, sw_counter, wq_attr->sw_counter); 1465 MLX5_SET(wq, wq_ctx, log_wq_stride, wq_attr->log_wq_stride); 1466 if (wq_attr->log_wq_pg_sz > MLX5_ADAPTER_PAGE_SHIFT) 1467 MLX5_SET(wq, wq_ctx, log_wq_pg_sz, 1468 wq_attr->log_wq_pg_sz - MLX5_ADAPTER_PAGE_SHIFT); 1469 MLX5_SET(wq, wq_ctx, log_wq_sz, wq_attr->log_wq_sz); 1470 MLX5_SET(wq, wq_ctx, dbr_umem_valid, wq_attr->dbr_umem_valid); 1471 MLX5_SET(wq, wq_ctx, wq_umem_valid, wq_attr->wq_umem_valid); 1472 MLX5_SET(wq, wq_ctx, log_hairpin_num_packets, 1473 wq_attr->log_hairpin_num_packets); 1474 MLX5_SET(wq, wq_ctx, log_hairpin_data_sz, wq_attr->log_hairpin_data_sz); 1475 MLX5_SET(wq, wq_ctx, single_wqe_log_num_of_strides, 1476 wq_attr->single_wqe_log_num_of_strides); 1477 MLX5_SET(wq, wq_ctx, two_byte_shift_en, wq_attr->two_byte_shift_en); 1478 MLX5_SET(wq, wq_ctx, single_stride_log_num_of_bytes, 1479 wq_attr->single_stride_log_num_of_bytes); 1480 MLX5_SET(wq, wq_ctx, dbr_umem_id, wq_attr->dbr_umem_id); 1481 MLX5_SET(wq, wq_ctx, wq_umem_id, wq_attr->wq_umem_id); 1482 MLX5_SET64(wq, wq_ctx, wq_umem_offset, wq_attr->wq_umem_offset); 1483 } 1484 1485 /** 1486 * Create RQ using DevX API. 1487 * 1488 * @param[in] ctx 1489 * Context returned from mlx5 open_device() glue function. 1490 * @param [in] rq_attr 1491 * Pointer to create RQ attributes structure. 1492 * @param [in] socket 1493 * CPU socket ID for allocations. 1494 * 1495 * @return 1496 * The DevX object created, NULL otherwise and rte_errno is set. 1497 */ 1498 struct mlx5_devx_obj * 1499 mlx5_devx_cmd_create_rq(void *ctx, 1500 struct mlx5_devx_create_rq_attr *rq_attr, 1501 int socket) 1502 { 1503 uint32_t in[MLX5_ST_SZ_DW(create_rq_in)] = {0}; 1504 uint32_t out[MLX5_ST_SZ_DW(create_rq_out)] = {0}; 1505 void *rq_ctx, *wq_ctx; 1506 struct mlx5_devx_wq_attr *wq_attr; 1507 struct mlx5_devx_obj *rq = NULL; 1508 1509 rq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rq), 0, socket); 1510 if (!rq) { 1511 DRV_LOG(ERR, "Failed to allocate RQ data"); 1512 rte_errno = ENOMEM; 1513 return NULL; 1514 } 1515 MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ); 1516 rq_ctx = MLX5_ADDR_OF(create_rq_in, in, ctx); 1517 MLX5_SET(rqc, rq_ctx, rlky, rq_attr->rlky); 1518 MLX5_SET(rqc, rq_ctx, delay_drop_en, rq_attr->delay_drop_en); 1519 MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs); 1520 MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd); 1521 MLX5_SET(rqc, rq_ctx, mem_rq_type, rq_attr->mem_rq_type); 1522 MLX5_SET(rqc, rq_ctx, state, rq_attr->state); 1523 MLX5_SET(rqc, rq_ctx, flush_in_error_en, rq_attr->flush_in_error_en); 1524 MLX5_SET(rqc, rq_ctx, hairpin, rq_attr->hairpin); 1525 MLX5_SET(rqc, rq_ctx, hairpin_data_buffer_type, rq_attr->hairpin_data_buffer_type); 1526 MLX5_SET(rqc, rq_ctx, user_index, rq_attr->user_index); 1527 MLX5_SET(rqc, rq_ctx, cqn, rq_attr->cqn); 1528 MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id); 1529 MLX5_SET(rqc, rq_ctx, rmpn, rq_attr->rmpn); 1530 MLX5_SET(sqc, rq_ctx, ts_format, rq_attr->ts_format); 1531 wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq); 1532 wq_attr = &rq_attr->wq_attr; 1533 devx_cmd_fill_wq_data(wq_ctx, wq_attr); 1534 rq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 1535 out, sizeof(out)); 1536 if (!rq->obj) { 1537 DEVX_DRV_LOG(ERR, out, "create RQ", NULL, 0); 1538 mlx5_free(rq); 1539 return NULL; 1540 } 1541 rq->id = MLX5_GET(create_rq_out, out, rqn); 1542 return rq; 1543 } 1544 1545 /** 1546 * Modify RQ using DevX API. 1547 * 1548 * @param[in] rq 1549 * Pointer to RQ object structure. 1550 * @param [in] rq_attr 1551 * Pointer to modify RQ attributes structure. 1552 * 1553 * @return 1554 * 0 on success, a negative errno value otherwise and rte_errno is set. 1555 */ 1556 int 1557 mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq, 1558 struct mlx5_devx_modify_rq_attr *rq_attr) 1559 { 1560 uint32_t in[MLX5_ST_SZ_DW(modify_rq_in)] = {0}; 1561 uint32_t out[MLX5_ST_SZ_DW(modify_rq_out)] = {0}; 1562 void *rq_ctx, *wq_ctx; 1563 int ret; 1564 1565 MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ); 1566 MLX5_SET(modify_rq_in, in, rq_state, rq_attr->rq_state); 1567 MLX5_SET(modify_rq_in, in, rqn, rq->id); 1568 MLX5_SET64(modify_rq_in, in, modify_bitmask, rq_attr->modify_bitmask); 1569 rq_ctx = MLX5_ADDR_OF(modify_rq_in, in, ctx); 1570 MLX5_SET(rqc, rq_ctx, state, rq_attr->state); 1571 if (rq_attr->modify_bitmask & 1572 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS) 1573 MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs); 1574 if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD) 1575 MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd); 1576 if (rq_attr->modify_bitmask & 1577 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID) 1578 MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id); 1579 MLX5_SET(rqc, rq_ctx, hairpin_peer_sq, rq_attr->hairpin_peer_sq); 1580 MLX5_SET(rqc, rq_ctx, hairpin_peer_vhca, rq_attr->hairpin_peer_vhca); 1581 if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM) { 1582 wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq); 1583 MLX5_SET(wq, wq_ctx, lwm, rq_attr->lwm); 1584 } 1585 ret = mlx5_glue->devx_obj_modify(rq->obj, in, sizeof(in), 1586 out, sizeof(out)); 1587 if (ret) { 1588 DRV_LOG(ERR, "Failed to modify RQ using DevX"); 1589 rte_errno = errno; 1590 return -errno; 1591 } 1592 return ret; 1593 } 1594 1595 /** 1596 * Create RMP using DevX API. 1597 * 1598 * @param[in] ctx 1599 * Context returned from mlx5 open_device() glue function. 1600 * @param [in] rmp_attr 1601 * Pointer to create RMP attributes structure. 1602 * @param [in] socket 1603 * CPU socket ID for allocations. 1604 * 1605 * @return 1606 * The DevX object created, NULL otherwise and rte_errno is set. 1607 */ 1608 struct mlx5_devx_obj * 1609 mlx5_devx_cmd_create_rmp(void *ctx, 1610 struct mlx5_devx_create_rmp_attr *rmp_attr, 1611 int socket) 1612 { 1613 uint32_t in[MLX5_ST_SZ_DW(create_rmp_in)] = {0}; 1614 uint32_t out[MLX5_ST_SZ_DW(create_rmp_out)] = {0}; 1615 void *rmp_ctx, *wq_ctx; 1616 struct mlx5_devx_wq_attr *wq_attr; 1617 struct mlx5_devx_obj *rmp = NULL; 1618 1619 rmp = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rmp), 0, socket); 1620 if (!rmp) { 1621 DRV_LOG(ERR, "Failed to allocate RMP data"); 1622 rte_errno = ENOMEM; 1623 return NULL; 1624 } 1625 MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP); 1626 rmp_ctx = MLX5_ADDR_OF(create_rmp_in, in, ctx); 1627 MLX5_SET(rmpc, rmp_ctx, state, rmp_attr->state); 1628 MLX5_SET(rmpc, rmp_ctx, basic_cyclic_rcv_wqe, 1629 rmp_attr->basic_cyclic_rcv_wqe); 1630 wq_ctx = MLX5_ADDR_OF(rmpc, rmp_ctx, wq); 1631 wq_attr = &rmp_attr->wq_attr; 1632 devx_cmd_fill_wq_data(wq_ctx, wq_attr); 1633 rmp->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, 1634 sizeof(out)); 1635 if (!rmp->obj) { 1636 DEVX_DRV_LOG(ERR, out, "create RMP", NULL, 0); 1637 mlx5_free(rmp); 1638 return NULL; 1639 } 1640 rmp->id = MLX5_GET(create_rmp_out, out, rmpn); 1641 return rmp; 1642 } 1643 1644 /* 1645 * Create TIR using DevX API. 1646 * 1647 * @param[in] ctx 1648 * Context returned from mlx5 open_device() glue function. 1649 * @param [in] tir_attr 1650 * Pointer to TIR attributes structure. 1651 * 1652 * @return 1653 * The DevX object created, NULL otherwise and rte_errno is set. 1654 */ 1655 struct mlx5_devx_obj * 1656 mlx5_devx_cmd_create_tir(void *ctx, 1657 struct mlx5_devx_tir_attr *tir_attr) 1658 { 1659 uint32_t in[MLX5_ST_SZ_DW(create_tir_in)] = {0}; 1660 uint32_t out[MLX5_ST_SZ_DW(create_tir_out)] = {0}; 1661 void *tir_ctx, *outer, *inner, *rss_key; 1662 struct mlx5_devx_obj *tir = NULL; 1663 1664 tir = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tir), 0, SOCKET_ID_ANY); 1665 if (!tir) { 1666 DRV_LOG(ERR, "Failed to allocate TIR data"); 1667 rte_errno = ENOMEM; 1668 return NULL; 1669 } 1670 MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); 1671 tir_ctx = MLX5_ADDR_OF(create_tir_in, in, ctx); 1672 MLX5_SET(tirc, tir_ctx, disp_type, tir_attr->disp_type); 1673 MLX5_SET(tirc, tir_ctx, lro_timeout_period_usecs, 1674 tir_attr->lro_timeout_period_usecs); 1675 MLX5_SET(tirc, tir_ctx, lro_enable_mask, tir_attr->lro_enable_mask); 1676 MLX5_SET(tirc, tir_ctx, lro_max_msg_sz, tir_attr->lro_max_msg_sz); 1677 MLX5_SET(tirc, tir_ctx, inline_rqn, tir_attr->inline_rqn); 1678 MLX5_SET(tirc, tir_ctx, rx_hash_symmetric, tir_attr->rx_hash_symmetric); 1679 MLX5_SET(tirc, tir_ctx, tunneled_offload_en, 1680 tir_attr->tunneled_offload_en); 1681 MLX5_SET(tirc, tir_ctx, indirect_table, tir_attr->indirect_table); 1682 MLX5_SET(tirc, tir_ctx, rx_hash_fn, tir_attr->rx_hash_fn); 1683 MLX5_SET(tirc, tir_ctx, self_lb_block, tir_attr->self_lb_block); 1684 MLX5_SET(tirc, tir_ctx, transport_domain, tir_attr->transport_domain); 1685 rss_key = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_toeplitz_key); 1686 memcpy(rss_key, tir_attr->rx_hash_toeplitz_key, MLX5_RSS_HASH_KEY_LEN); 1687 outer = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_outer); 1688 MLX5_SET(rx_hash_field_select, outer, l3_prot_type, 1689 tir_attr->rx_hash_field_selector_outer.l3_prot_type); 1690 MLX5_SET(rx_hash_field_select, outer, l4_prot_type, 1691 tir_attr->rx_hash_field_selector_outer.l4_prot_type); 1692 MLX5_SET(rx_hash_field_select, outer, selected_fields, 1693 tir_attr->rx_hash_field_selector_outer.selected_fields); 1694 inner = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_inner); 1695 MLX5_SET(rx_hash_field_select, inner, l3_prot_type, 1696 tir_attr->rx_hash_field_selector_inner.l3_prot_type); 1697 MLX5_SET(rx_hash_field_select, inner, l4_prot_type, 1698 tir_attr->rx_hash_field_selector_inner.l4_prot_type); 1699 MLX5_SET(rx_hash_field_select, inner, selected_fields, 1700 tir_attr->rx_hash_field_selector_inner.selected_fields); 1701 tir->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 1702 out, sizeof(out)); 1703 if (!tir->obj) { 1704 DEVX_DRV_LOG(ERR, out, "create TIR", NULL, 0); 1705 mlx5_free(tir); 1706 return NULL; 1707 } 1708 tir->id = MLX5_GET(create_tir_out, out, tirn); 1709 return tir; 1710 } 1711 1712 /** 1713 * Modify TIR using DevX API. 1714 * 1715 * @param[in] tir 1716 * Pointer to TIR DevX object structure. 1717 * @param [in] modify_tir_attr 1718 * Pointer to TIR modification attributes structure. 1719 * 1720 * @return 1721 * 0 on success, a negative errno value otherwise and rte_errno is set. 1722 */ 1723 int 1724 mlx5_devx_cmd_modify_tir(struct mlx5_devx_obj *tir, 1725 struct mlx5_devx_modify_tir_attr *modify_tir_attr) 1726 { 1727 struct mlx5_devx_tir_attr *tir_attr = &modify_tir_attr->tir; 1728 uint32_t in[MLX5_ST_SZ_DW(modify_tir_in)] = {0}; 1729 uint32_t out[MLX5_ST_SZ_DW(modify_tir_out)] = {0}; 1730 void *tir_ctx; 1731 int ret; 1732 1733 MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR); 1734 MLX5_SET(modify_tir_in, in, tirn, modify_tir_attr->tirn); 1735 MLX5_SET64(modify_tir_in, in, modify_bitmask, 1736 modify_tir_attr->modify_bitmask); 1737 tir_ctx = MLX5_ADDR_OF(modify_rq_in, in, ctx); 1738 if (modify_tir_attr->modify_bitmask & 1739 MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_LRO) { 1740 MLX5_SET(tirc, tir_ctx, lro_timeout_period_usecs, 1741 tir_attr->lro_timeout_period_usecs); 1742 MLX5_SET(tirc, tir_ctx, lro_enable_mask, 1743 tir_attr->lro_enable_mask); 1744 MLX5_SET(tirc, tir_ctx, lro_max_msg_sz, 1745 tir_attr->lro_max_msg_sz); 1746 } 1747 if (modify_tir_attr->modify_bitmask & 1748 MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_INDIRECT_TABLE) 1749 MLX5_SET(tirc, tir_ctx, indirect_table, 1750 tir_attr->indirect_table); 1751 if (modify_tir_attr->modify_bitmask & 1752 MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_HASH) { 1753 int i; 1754 void *outer, *inner; 1755 1756 MLX5_SET(tirc, tir_ctx, rx_hash_symmetric, 1757 tir_attr->rx_hash_symmetric); 1758 MLX5_SET(tirc, tir_ctx, rx_hash_fn, tir_attr->rx_hash_fn); 1759 for (i = 0; i < 10; i++) { 1760 MLX5_SET(tirc, tir_ctx, rx_hash_toeplitz_key[i], 1761 tir_attr->rx_hash_toeplitz_key[i]); 1762 } 1763 outer = MLX5_ADDR_OF(tirc, tir_ctx, 1764 rx_hash_field_selector_outer); 1765 MLX5_SET(rx_hash_field_select, outer, l3_prot_type, 1766 tir_attr->rx_hash_field_selector_outer.l3_prot_type); 1767 MLX5_SET(rx_hash_field_select, outer, l4_prot_type, 1768 tir_attr->rx_hash_field_selector_outer.l4_prot_type); 1769 MLX5_SET 1770 (rx_hash_field_select, outer, selected_fields, 1771 tir_attr->rx_hash_field_selector_outer.selected_fields); 1772 inner = MLX5_ADDR_OF(tirc, tir_ctx, 1773 rx_hash_field_selector_inner); 1774 MLX5_SET(rx_hash_field_select, inner, l3_prot_type, 1775 tir_attr->rx_hash_field_selector_inner.l3_prot_type); 1776 MLX5_SET(rx_hash_field_select, inner, l4_prot_type, 1777 tir_attr->rx_hash_field_selector_inner.l4_prot_type); 1778 MLX5_SET 1779 (rx_hash_field_select, inner, selected_fields, 1780 tir_attr->rx_hash_field_selector_inner.selected_fields); 1781 } 1782 if (modify_tir_attr->modify_bitmask & 1783 MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_SELF_LB_EN) { 1784 MLX5_SET(tirc, tir_ctx, self_lb_block, tir_attr->self_lb_block); 1785 } 1786 ret = mlx5_glue->devx_obj_modify(tir->obj, in, sizeof(in), 1787 out, sizeof(out)); 1788 if (ret) { 1789 DRV_LOG(ERR, "Failed to modify TIR using DevX"); 1790 rte_errno = errno; 1791 return -errno; 1792 } 1793 return ret; 1794 } 1795 1796 /** 1797 * Create RQT using DevX API. 1798 * 1799 * @param[in] ctx 1800 * Context returned from mlx5 open_device() glue function. 1801 * @param [in] rqt_attr 1802 * Pointer to RQT attributes structure. 1803 * 1804 * @return 1805 * The DevX object created, NULL otherwise and rte_errno is set. 1806 */ 1807 struct mlx5_devx_obj * 1808 mlx5_devx_cmd_create_rqt(void *ctx, 1809 struct mlx5_devx_rqt_attr *rqt_attr) 1810 { 1811 uint32_t *in = NULL; 1812 uint32_t inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + 1813 rqt_attr->rqt_actual_size * sizeof(uint32_t); 1814 uint32_t out[MLX5_ST_SZ_DW(create_rqt_out)] = {0}; 1815 void *rqt_ctx; 1816 struct mlx5_devx_obj *rqt = NULL; 1817 int i; 1818 1819 in = mlx5_malloc(MLX5_MEM_ZERO, inlen, 0, SOCKET_ID_ANY); 1820 if (!in) { 1821 DRV_LOG(ERR, "Failed to allocate RQT IN data"); 1822 rte_errno = ENOMEM; 1823 return NULL; 1824 } 1825 rqt = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt), 0, SOCKET_ID_ANY); 1826 if (!rqt) { 1827 DRV_LOG(ERR, "Failed to allocate RQT data"); 1828 rte_errno = ENOMEM; 1829 mlx5_free(in); 1830 return NULL; 1831 } 1832 MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); 1833 rqt_ctx = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); 1834 MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type); 1835 MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size); 1836 MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size); 1837 for (i = 0; i < rqt_attr->rqt_actual_size; i++) 1838 MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]); 1839 rqt->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out)); 1840 mlx5_free(in); 1841 if (!rqt->obj) { 1842 DEVX_DRV_LOG(ERR, out, "create RQT", NULL, 0); 1843 mlx5_free(rqt); 1844 return NULL; 1845 } 1846 rqt->id = MLX5_GET(create_rqt_out, out, rqtn); 1847 return rqt; 1848 } 1849 1850 /** 1851 * Modify RQT using DevX API. 1852 * 1853 * @param[in] rqt 1854 * Pointer to RQT DevX object structure. 1855 * @param [in] rqt_attr 1856 * Pointer to RQT attributes structure. 1857 * 1858 * @return 1859 * 0 on success, a negative errno value otherwise and rte_errno is set. 1860 */ 1861 int 1862 mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt, 1863 struct mlx5_devx_rqt_attr *rqt_attr) 1864 { 1865 uint32_t inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + 1866 rqt_attr->rqt_actual_size * sizeof(uint32_t); 1867 uint32_t out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0}; 1868 uint32_t *in = mlx5_malloc(MLX5_MEM_ZERO, inlen, 0, SOCKET_ID_ANY); 1869 void *rqt_ctx; 1870 int i; 1871 int ret; 1872 1873 if (!in) { 1874 DRV_LOG(ERR, "Failed to allocate RQT modify IN data."); 1875 rte_errno = ENOMEM; 1876 return -ENOMEM; 1877 } 1878 MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT); 1879 MLX5_SET(modify_rqt_in, in, rqtn, rqt->id); 1880 MLX5_SET64(modify_rqt_in, in, modify_bitmask, 0x1); 1881 rqt_ctx = MLX5_ADDR_OF(modify_rqt_in, in, rqt_context); 1882 MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type); 1883 MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size); 1884 MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size); 1885 for (i = 0; i < rqt_attr->rqt_actual_size; i++) 1886 MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]); 1887 ret = mlx5_glue->devx_obj_modify(rqt->obj, in, inlen, out, sizeof(out)); 1888 mlx5_free(in); 1889 if (ret) { 1890 DRV_LOG(ERR, "Failed to modify RQT using DevX."); 1891 rte_errno = errno; 1892 return -rte_errno; 1893 } 1894 return ret; 1895 } 1896 1897 /** 1898 * Create SQ using DevX API. 1899 * 1900 * @param[in] ctx 1901 * Context returned from mlx5 open_device() glue function. 1902 * @param [in] sq_attr 1903 * Pointer to SQ attributes structure. 1904 * @param [in] socket 1905 * CPU socket ID for allocations. 1906 * 1907 * @return 1908 * The DevX object created, NULL otherwise and rte_errno is set. 1909 **/ 1910 struct mlx5_devx_obj * 1911 mlx5_devx_cmd_create_sq(void *ctx, 1912 struct mlx5_devx_create_sq_attr *sq_attr) 1913 { 1914 uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0}; 1915 uint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0}; 1916 void *sq_ctx; 1917 void *wq_ctx; 1918 struct mlx5_devx_wq_attr *wq_attr; 1919 struct mlx5_devx_obj *sq = NULL; 1920 1921 sq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sq), 0, SOCKET_ID_ANY); 1922 if (!sq) { 1923 DRV_LOG(ERR, "Failed to allocate SQ data"); 1924 rte_errno = ENOMEM; 1925 return NULL; 1926 } 1927 MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ); 1928 sq_ctx = MLX5_ADDR_OF(create_sq_in, in, ctx); 1929 MLX5_SET(sqc, sq_ctx, rlky, sq_attr->rlky); 1930 MLX5_SET(sqc, sq_ctx, cd_master, sq_attr->cd_master); 1931 MLX5_SET(sqc, sq_ctx, fre, sq_attr->fre); 1932 MLX5_SET(sqc, sq_ctx, flush_in_error_en, sq_attr->flush_in_error_en); 1933 MLX5_SET(sqc, sq_ctx, allow_multi_pkt_send_wqe, 1934 sq_attr->allow_multi_pkt_send_wqe); 1935 MLX5_SET(sqc, sq_ctx, min_wqe_inline_mode, 1936 sq_attr->min_wqe_inline_mode); 1937 MLX5_SET(sqc, sq_ctx, state, sq_attr->state); 1938 MLX5_SET(sqc, sq_ctx, reg_umr, sq_attr->reg_umr); 1939 MLX5_SET(sqc, sq_ctx, allow_swp, sq_attr->allow_swp); 1940 MLX5_SET(sqc, sq_ctx, hairpin, sq_attr->hairpin); 1941 MLX5_SET(sqc, sq_ctx, non_wire, sq_attr->non_wire); 1942 MLX5_SET(sqc, sq_ctx, static_sq_wq, sq_attr->static_sq_wq); 1943 MLX5_SET(sqc, sq_ctx, hairpin_wq_buffer_type, sq_attr->hairpin_wq_buffer_type); 1944 MLX5_SET(sqc, sq_ctx, user_index, sq_attr->user_index); 1945 MLX5_SET(sqc, sq_ctx, cqn, sq_attr->cqn); 1946 MLX5_SET(sqc, sq_ctx, packet_pacing_rate_limit_index, 1947 sq_attr->packet_pacing_rate_limit_index); 1948 MLX5_SET(sqc, sq_ctx, tis_lst_sz, sq_attr->tis_lst_sz); 1949 MLX5_SET(sqc, sq_ctx, tis_num_0, sq_attr->tis_num); 1950 MLX5_SET(sqc, sq_ctx, ts_format, sq_attr->ts_format); 1951 wq_ctx = MLX5_ADDR_OF(sqc, sq_ctx, wq); 1952 wq_attr = &sq_attr->wq_attr; 1953 devx_cmd_fill_wq_data(wq_ctx, wq_attr); 1954 sq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 1955 out, sizeof(out)); 1956 if (!sq->obj) { 1957 DEVX_DRV_LOG(ERR, out, "create SQ", NULL, 0); 1958 mlx5_free(sq); 1959 return NULL; 1960 } 1961 sq->id = MLX5_GET(create_sq_out, out, sqn); 1962 return sq; 1963 } 1964 1965 /** 1966 * Modify SQ using DevX API. 1967 * 1968 * @param[in] sq 1969 * Pointer to SQ object structure. 1970 * @param [in] sq_attr 1971 * Pointer to SQ attributes structure. 1972 * 1973 * @return 1974 * 0 on success, a negative errno value otherwise and rte_errno is set. 1975 */ 1976 int 1977 mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq, 1978 struct mlx5_devx_modify_sq_attr *sq_attr) 1979 { 1980 uint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0}; 1981 uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0}; 1982 void *sq_ctx; 1983 int ret; 1984 1985 MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ); 1986 MLX5_SET(modify_sq_in, in, sq_state, sq_attr->sq_state); 1987 MLX5_SET(modify_sq_in, in, sqn, sq->id); 1988 sq_ctx = MLX5_ADDR_OF(modify_sq_in, in, ctx); 1989 MLX5_SET(sqc, sq_ctx, state, sq_attr->state); 1990 MLX5_SET(sqc, sq_ctx, hairpin_peer_rq, sq_attr->hairpin_peer_rq); 1991 MLX5_SET(sqc, sq_ctx, hairpin_peer_vhca, sq_attr->hairpin_peer_vhca); 1992 ret = mlx5_glue->devx_obj_modify(sq->obj, in, sizeof(in), 1993 out, sizeof(out)); 1994 if (ret) { 1995 DRV_LOG(ERR, "Failed to modify SQ using DevX"); 1996 rte_errno = errno; 1997 return -rte_errno; 1998 } 1999 return ret; 2000 } 2001 2002 /** 2003 * Create TIS using DevX API. 2004 * 2005 * @param[in] ctx 2006 * Context returned from mlx5 open_device() glue function. 2007 * @param [in] tis_attr 2008 * Pointer to TIS attributes structure. 2009 * 2010 * @return 2011 * The DevX object created, NULL otherwise and rte_errno is set. 2012 */ 2013 struct mlx5_devx_obj * 2014 mlx5_devx_cmd_create_tis(void *ctx, 2015 struct mlx5_devx_tis_attr *tis_attr) 2016 { 2017 uint32_t in[MLX5_ST_SZ_DW(create_tis_in)] = {0}; 2018 uint32_t out[MLX5_ST_SZ_DW(create_tis_out)] = {0}; 2019 struct mlx5_devx_obj *tis = NULL; 2020 void *tis_ctx; 2021 2022 tis = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tis), 0, SOCKET_ID_ANY); 2023 if (!tis) { 2024 DRV_LOG(ERR, "Failed to allocate TIS object"); 2025 rte_errno = ENOMEM; 2026 return NULL; 2027 } 2028 MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS); 2029 tis_ctx = MLX5_ADDR_OF(create_tis_in, in, ctx); 2030 MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity, 2031 tis_attr->strict_lag_tx_port_affinity); 2032 MLX5_SET(tisc, tis_ctx, lag_tx_port_affinity, 2033 tis_attr->lag_tx_port_affinity); 2034 MLX5_SET(tisc, tis_ctx, prio, tis_attr->prio); 2035 MLX5_SET(tisc, tis_ctx, transport_domain, 2036 tis_attr->transport_domain); 2037 tis->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 2038 out, sizeof(out)); 2039 if (!tis->obj) { 2040 DEVX_DRV_LOG(ERR, out, "create TIS", NULL, 0); 2041 mlx5_free(tis); 2042 return NULL; 2043 } 2044 tis->id = MLX5_GET(create_tis_out, out, tisn); 2045 return tis; 2046 } 2047 2048 /** 2049 * Create transport domain using DevX API. 2050 * 2051 * @param[in] ctx 2052 * Context returned from mlx5 open_device() glue function. 2053 * @return 2054 * The DevX object created, NULL otherwise and rte_errno is set. 2055 */ 2056 struct mlx5_devx_obj * 2057 mlx5_devx_cmd_create_td(void *ctx) 2058 { 2059 uint32_t in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0}; 2060 uint32_t out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0}; 2061 struct mlx5_devx_obj *td = NULL; 2062 2063 td = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*td), 0, SOCKET_ID_ANY); 2064 if (!td) { 2065 DRV_LOG(ERR, "Failed to allocate TD object"); 2066 rte_errno = ENOMEM; 2067 return NULL; 2068 } 2069 MLX5_SET(alloc_transport_domain_in, in, opcode, 2070 MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN); 2071 td->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 2072 out, sizeof(out)); 2073 if (!td->obj) { 2074 DEVX_DRV_LOG(ERR, out, "create TIS", NULL, 0); 2075 mlx5_free(td); 2076 return NULL; 2077 } 2078 td->id = MLX5_GET(alloc_transport_domain_out, out, 2079 transport_domain); 2080 return td; 2081 } 2082 2083 /** 2084 * Dump all flows to file. 2085 * 2086 * @param[in] fdb_domain 2087 * FDB domain. 2088 * @param[in] rx_domain 2089 * RX domain. 2090 * @param[in] tx_domain 2091 * TX domain. 2092 * @param[out] file 2093 * Pointer to file stream. 2094 * 2095 * @return 2096 * 0 on success, a negative value otherwise. 2097 */ 2098 int 2099 mlx5_devx_cmd_flow_dump(void *fdb_domain __rte_unused, 2100 void *rx_domain __rte_unused, 2101 void *tx_domain __rte_unused, FILE *file __rte_unused) 2102 { 2103 int ret = 0; 2104 2105 #ifdef HAVE_MLX5_DR_FLOW_DUMP 2106 if (fdb_domain) { 2107 ret = mlx5_glue->dr_dump_domain(file, fdb_domain); 2108 if (ret) 2109 return ret; 2110 } 2111 MLX5_ASSERT(rx_domain); 2112 ret = mlx5_glue->dr_dump_domain(file, rx_domain); 2113 if (ret) 2114 return ret; 2115 MLX5_ASSERT(tx_domain); 2116 ret = mlx5_glue->dr_dump_domain(file, tx_domain); 2117 #else 2118 ret = ENOTSUP; 2119 #endif 2120 return -ret; 2121 } 2122 2123 int 2124 mlx5_devx_cmd_flow_single_dump(void *rule_info __rte_unused, 2125 FILE *file __rte_unused) 2126 { 2127 int ret = 0; 2128 #ifdef HAVE_MLX5_DR_FLOW_DUMP_RULE 2129 if (rule_info) 2130 ret = mlx5_glue->dr_dump_rule(file, rule_info); 2131 #else 2132 ret = ENOTSUP; 2133 #endif 2134 return -ret; 2135 } 2136 2137 /* 2138 * Create CQ using DevX API. 2139 * 2140 * @param[in] ctx 2141 * Context returned from mlx5 open_device() glue function. 2142 * @param [in] attr 2143 * Pointer to CQ attributes structure. 2144 * 2145 * @return 2146 * The DevX object created, NULL otherwise and rte_errno is set. 2147 */ 2148 struct mlx5_devx_obj * 2149 mlx5_devx_cmd_create_cq(void *ctx, struct mlx5_devx_cq_attr *attr) 2150 { 2151 uint32_t in[MLX5_ST_SZ_DW(create_cq_in)] = {0}; 2152 uint32_t out[MLX5_ST_SZ_DW(create_cq_out)] = {0}; 2153 struct mlx5_devx_obj *cq_obj = mlx5_malloc(MLX5_MEM_ZERO, 2154 sizeof(*cq_obj), 2155 0, SOCKET_ID_ANY); 2156 void *cqctx = MLX5_ADDR_OF(create_cq_in, in, cq_context); 2157 2158 if (!cq_obj) { 2159 DRV_LOG(ERR, "Failed to allocate CQ object memory."); 2160 rte_errno = ENOMEM; 2161 return NULL; 2162 } 2163 MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ); 2164 if (attr->db_umem_valid) { 2165 MLX5_SET(cqc, cqctx, dbr_umem_valid, attr->db_umem_valid); 2166 MLX5_SET(cqc, cqctx, dbr_umem_id, attr->db_umem_id); 2167 MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_umem_offset); 2168 } else { 2169 MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_addr); 2170 } 2171 MLX5_SET(cqc, cqctx, cqe_sz, (RTE_CACHE_LINE_SIZE == 128) ? 2172 MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B); 2173 MLX5_SET(cqc, cqctx, cc, attr->use_first_only); 2174 MLX5_SET(cqc, cqctx, oi, attr->overrun_ignore); 2175 MLX5_SET(cqc, cqctx, log_cq_size, attr->log_cq_size); 2176 if (attr->log_page_size > MLX5_ADAPTER_PAGE_SHIFT) 2177 MLX5_SET(cqc, cqctx, log_page_size, 2178 attr->log_page_size - MLX5_ADAPTER_PAGE_SHIFT); 2179 MLX5_SET(cqc, cqctx, c_eqn, attr->eqn); 2180 MLX5_SET(cqc, cqctx, uar_page, attr->uar_page_id); 2181 MLX5_SET(cqc, cqctx, cqe_comp_en, !!attr->cqe_comp_en); 2182 MLX5_SET(cqc, cqctx, cqe_comp_layout, !!attr->cqe_comp_layout); 2183 MLX5_SET(cqc, cqctx, mini_cqe_res_format, attr->mini_cqe_res_format); 2184 MLX5_SET(cqc, cqctx, mini_cqe_res_format_ext, 2185 attr->mini_cqe_res_format_ext); 2186 if (attr->q_umem_valid) { 2187 MLX5_SET(create_cq_in, in, cq_umem_valid, attr->q_umem_valid); 2188 MLX5_SET(create_cq_in, in, cq_umem_id, attr->q_umem_id); 2189 MLX5_SET64(create_cq_in, in, cq_umem_offset, 2190 attr->q_umem_offset); 2191 } 2192 cq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, 2193 sizeof(out)); 2194 if (!cq_obj->obj) { 2195 DEVX_DRV_LOG(ERR, out, "create CQ", NULL, 0); 2196 mlx5_free(cq_obj); 2197 return NULL; 2198 } 2199 cq_obj->id = MLX5_GET(create_cq_out, out, cqn); 2200 return cq_obj; 2201 } 2202 2203 /** 2204 * Create VIRTQ using DevX API. 2205 * 2206 * @param[in] ctx 2207 * Context returned from mlx5 open_device() glue function. 2208 * @param [in] attr 2209 * Pointer to VIRTQ attributes structure. 2210 * 2211 * @return 2212 * The DevX object created, NULL otherwise and rte_errno is set. 2213 */ 2214 struct mlx5_devx_obj * 2215 mlx5_devx_cmd_create_virtq(void *ctx, 2216 struct mlx5_devx_virtq_attr *attr) 2217 { 2218 uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0}; 2219 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 2220 struct mlx5_devx_obj *virtq_obj = mlx5_malloc(MLX5_MEM_ZERO, 2221 sizeof(*virtq_obj), 2222 0, SOCKET_ID_ANY); 2223 void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq); 2224 void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr); 2225 void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context); 2226 2227 if (!virtq_obj) { 2228 DRV_LOG(ERR, "Failed to allocate virtq data."); 2229 rte_errno = ENOMEM; 2230 return NULL; 2231 } 2232 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 2233 MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 2234 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 2235 MLX5_GENERAL_OBJ_TYPE_VIRTQ); 2236 MLX5_SET16(virtio_net_q, virtq, hw_available_index, 2237 attr->hw_available_index); 2238 MLX5_SET16(virtio_net_q, virtq, hw_used_index, attr->hw_used_index); 2239 MLX5_SET16(virtio_net_q, virtq, tso_ipv4, attr->tso_ipv4); 2240 MLX5_SET16(virtio_net_q, virtq, tso_ipv6, attr->tso_ipv6); 2241 MLX5_SET16(virtio_net_q, virtq, tx_csum, attr->tx_csum); 2242 MLX5_SET16(virtio_net_q, virtq, rx_csum, attr->rx_csum); 2243 MLX5_SET16(virtio_q, virtctx, virtio_version_1_0, 2244 attr->virtio_version_1_0); 2245 MLX5_SET16(virtio_q, virtctx, event_mode, attr->event_mode); 2246 MLX5_SET(virtio_q, virtctx, event_qpn_or_msix, attr->qp_id); 2247 MLX5_SET64(virtio_q, virtctx, desc_addr, attr->desc_addr); 2248 MLX5_SET64(virtio_q, virtctx, used_addr, attr->used_addr); 2249 MLX5_SET64(virtio_q, virtctx, available_addr, attr->available_addr); 2250 MLX5_SET16(virtio_q, virtctx, queue_index, attr->queue_index); 2251 MLX5_SET16(virtio_q, virtctx, queue_size, attr->q_size); 2252 MLX5_SET(virtio_q, virtctx, virtio_q_mkey, attr->mkey); 2253 MLX5_SET(virtio_q, virtctx, umem_1_id, attr->umems[0].id); 2254 MLX5_SET(virtio_q, virtctx, umem_1_size, attr->umems[0].size); 2255 MLX5_SET64(virtio_q, virtctx, umem_1_offset, attr->umems[0].offset); 2256 MLX5_SET(virtio_q, virtctx, umem_2_id, attr->umems[1].id); 2257 MLX5_SET(virtio_q, virtctx, umem_2_size, attr->umems[1].size); 2258 MLX5_SET64(virtio_q, virtctx, umem_2_offset, attr->umems[1].offset); 2259 MLX5_SET(virtio_q, virtctx, umem_3_id, attr->umems[2].id); 2260 MLX5_SET(virtio_q, virtctx, umem_3_size, attr->umems[2].size); 2261 MLX5_SET64(virtio_q, virtctx, umem_3_offset, attr->umems[2].offset); 2262 MLX5_SET(virtio_q, virtctx, counter_set_id, attr->counters_obj_id); 2263 MLX5_SET(virtio_q, virtctx, pd, attr->pd); 2264 MLX5_SET(virtio_q, virtctx, queue_period_mode, attr->hw_latency_mode); 2265 MLX5_SET(virtio_q, virtctx, queue_period_us, attr->hw_max_latency_us); 2266 MLX5_SET(virtio_q, virtctx, queue_max_count, attr->hw_max_pending_comp); 2267 MLX5_SET(virtio_net_q, virtq, tisn_or_qpn, attr->tis_id); 2268 virtq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, 2269 sizeof(out)); 2270 if (!virtq_obj->obj) { 2271 DEVX_DRV_LOG(ERR, out, "create VIRTQ", NULL, 0); 2272 mlx5_free(virtq_obj); 2273 return NULL; 2274 } 2275 virtq_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 2276 return virtq_obj; 2277 } 2278 2279 /** 2280 * Modify VIRTQ using DevX API. 2281 * 2282 * @param[in] virtq_obj 2283 * Pointer to virtq object structure. 2284 * @param [in] attr 2285 * Pointer to modify virtq attributes structure. 2286 * 2287 * @return 2288 * 0 on success, a negative errno value otherwise and rte_errno is set. 2289 */ 2290 int 2291 mlx5_devx_cmd_modify_virtq(struct mlx5_devx_obj *virtq_obj, 2292 struct mlx5_devx_virtq_attr *attr) 2293 { 2294 uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0}; 2295 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 2296 void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq); 2297 void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr); 2298 void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context); 2299 int ret; 2300 2301 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 2302 MLX5_CMD_OP_MODIFY_GENERAL_OBJECT); 2303 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 2304 MLX5_GENERAL_OBJ_TYPE_VIRTQ); 2305 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, virtq_obj->id); 2306 MLX5_SET64(virtio_net_q, virtq, modify_field_select, 2307 attr->mod_fields_bitmap); 2308 MLX5_SET16(virtio_q, virtctx, queue_index, attr->queue_index); 2309 if (!attr->mod_fields_bitmap) { 2310 DRV_LOG(ERR, "Failed to modify VIRTQ for no type set."); 2311 rte_errno = EINVAL; 2312 return -rte_errno; 2313 } 2314 if (attr->mod_fields_bitmap & MLX5_VIRTQ_MODIFY_TYPE_STATE) 2315 MLX5_SET16(virtio_net_q, virtq, state, attr->state); 2316 if (attr->mod_fields_bitmap & 2317 MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_PARAMS) { 2318 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_mkey, 2319 attr->dirty_bitmap_mkey); 2320 MLX5_SET64(virtio_net_q, virtq, dirty_bitmap_addr, 2321 attr->dirty_bitmap_addr); 2322 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_size, 2323 attr->dirty_bitmap_size); 2324 } 2325 if (attr->mod_fields_bitmap & 2326 MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE) 2327 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_dump_enable, 2328 attr->dirty_bitmap_dump_enable); 2329 if (attr->mod_fields_bitmap & MLX5_VIRTQ_MODIFY_TYPE_QUEUE_PERIOD) { 2330 MLX5_SET(virtio_q, virtctx, queue_period_mode, 2331 attr->hw_latency_mode); 2332 MLX5_SET(virtio_q, virtctx, queue_period_us, 2333 attr->hw_max_latency_us); 2334 MLX5_SET(virtio_q, virtctx, queue_max_count, 2335 attr->hw_max_pending_comp); 2336 } 2337 if (attr->mod_fields_bitmap & MLX5_VIRTQ_MODIFY_TYPE_ADDR) { 2338 MLX5_SET64(virtio_q, virtctx, desc_addr, attr->desc_addr); 2339 MLX5_SET64(virtio_q, virtctx, used_addr, attr->used_addr); 2340 MLX5_SET64(virtio_q, virtctx, available_addr, 2341 attr->available_addr); 2342 } 2343 if (attr->mod_fields_bitmap & MLX5_VIRTQ_MODIFY_TYPE_HW_AVAILABLE_INDEX) 2344 MLX5_SET16(virtio_net_q, virtq, hw_available_index, 2345 attr->hw_available_index); 2346 if (attr->mod_fields_bitmap & MLX5_VIRTQ_MODIFY_TYPE_HW_USED_INDEX) 2347 MLX5_SET16(virtio_net_q, virtq, hw_used_index, 2348 attr->hw_used_index); 2349 if (attr->mod_fields_bitmap & MLX5_VIRTQ_MODIFY_TYPE_Q_TYPE) 2350 MLX5_SET16(virtio_q, virtctx, virtio_q_type, attr->q_type); 2351 if (attr->mod_fields_bitmap & MLX5_VIRTQ_MODIFY_TYPE_VERSION_1_0) 2352 MLX5_SET16(virtio_q, virtctx, virtio_version_1_0, 2353 attr->virtio_version_1_0); 2354 if (attr->mod_fields_bitmap & MLX5_VIRTQ_MODIFY_TYPE_Q_MKEY) 2355 MLX5_SET(virtio_q, virtctx, virtio_q_mkey, attr->mkey); 2356 if (attr->mod_fields_bitmap & 2357 MLX5_VIRTQ_MODIFY_TYPE_QUEUE_FEATURE_BIT_MASK) { 2358 MLX5_SET16(virtio_net_q, virtq, tso_ipv4, attr->tso_ipv4); 2359 MLX5_SET16(virtio_net_q, virtq, tso_ipv6, attr->tso_ipv6); 2360 MLX5_SET16(virtio_net_q, virtq, tx_csum, attr->tx_csum); 2361 MLX5_SET16(virtio_net_q, virtq, rx_csum, attr->rx_csum); 2362 } 2363 if (attr->mod_fields_bitmap & MLX5_VIRTQ_MODIFY_TYPE_EVENT_MODE) { 2364 MLX5_SET16(virtio_q, virtctx, event_mode, attr->event_mode); 2365 MLX5_SET(virtio_q, virtctx, event_qpn_or_msix, attr->qp_id); 2366 } 2367 ret = mlx5_glue->devx_obj_modify(virtq_obj->obj, in, sizeof(in), 2368 out, sizeof(out)); 2369 if (ret) { 2370 DRV_LOG(ERR, "Failed to modify VIRTQ using DevX."); 2371 rte_errno = errno; 2372 return -rte_errno; 2373 } 2374 return ret; 2375 } 2376 2377 /** 2378 * Query VIRTQ using DevX API. 2379 * 2380 * @param[in] virtq_obj 2381 * Pointer to virtq object structure. 2382 * @param [in/out] attr 2383 * Pointer to virtq attributes structure. 2384 * 2385 * @return 2386 * 0 on success, a negative errno value otherwise and rte_errno is set. 2387 */ 2388 int 2389 mlx5_devx_cmd_query_virtq(struct mlx5_devx_obj *virtq_obj, 2390 struct mlx5_devx_virtq_attr *attr) 2391 { 2392 uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0}; 2393 uint32_t out[MLX5_ST_SZ_DW(query_virtq_out)] = {0}; 2394 void *hdr = MLX5_ADDR_OF(query_virtq_out, in, hdr); 2395 void *virtq = MLX5_ADDR_OF(query_virtq_out, out, virtq); 2396 int ret; 2397 2398 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 2399 MLX5_CMD_OP_QUERY_GENERAL_OBJECT); 2400 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 2401 MLX5_GENERAL_OBJ_TYPE_VIRTQ); 2402 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, virtq_obj->id); 2403 ret = mlx5_glue->devx_obj_query(virtq_obj->obj, in, sizeof(in), 2404 out, sizeof(out)); 2405 if (ret) { 2406 DRV_LOG(ERR, "Failed to modify VIRTQ using DevX."); 2407 rte_errno = errno; 2408 return -errno; 2409 } 2410 attr->hw_available_index = MLX5_GET16(virtio_net_q, virtq, 2411 hw_available_index); 2412 attr->hw_used_index = MLX5_GET16(virtio_net_q, virtq, hw_used_index); 2413 attr->state = MLX5_GET16(virtio_net_q, virtq, state); 2414 attr->error_type = MLX5_GET16(virtio_net_q, virtq, 2415 virtio_q_context.error_type); 2416 return ret; 2417 } 2418 2419 /** 2420 * Create QP using DevX API. 2421 * 2422 * @param[in] ctx 2423 * Context returned from mlx5 open_device() glue function. 2424 * @param [in] attr 2425 * Pointer to QP attributes structure. 2426 * 2427 * @return 2428 * The DevX object created, NULL otherwise and rte_errno is set. 2429 */ 2430 struct mlx5_devx_obj * 2431 mlx5_devx_cmd_create_qp(void *ctx, 2432 struct mlx5_devx_qp_attr *attr) 2433 { 2434 uint32_t in[MLX5_ST_SZ_DW(create_qp_in)] = {0}; 2435 uint32_t out[MLX5_ST_SZ_DW(create_qp_out)] = {0}; 2436 struct mlx5_devx_obj *qp_obj = mlx5_malloc(MLX5_MEM_ZERO, 2437 sizeof(*qp_obj), 2438 0, SOCKET_ID_ANY); 2439 void *qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 2440 2441 if (!qp_obj) { 2442 DRV_LOG(ERR, "Failed to allocate QP data."); 2443 rte_errno = ENOMEM; 2444 return NULL; 2445 } 2446 MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); 2447 MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC); 2448 MLX5_SET(qpc, qpc, pd, attr->pd); 2449 MLX5_SET(qpc, qpc, ts_format, attr->ts_format); 2450 MLX5_SET(qpc, qpc, user_index, attr->user_index); 2451 if (attr->uar_index) { 2452 if (attr->mmo) { 2453 void *qpc_ext_and_pas_list = MLX5_ADDR_OF(create_qp_in, 2454 in, qpc_extension_and_pas_list); 2455 void *qpc_ext = MLX5_ADDR_OF(qpc_extension_and_pas_list, 2456 qpc_ext_and_pas_list, qpc_data_extension); 2457 2458 MLX5_SET(create_qp_in, in, qpc_ext, 1); 2459 MLX5_SET(qpc_extension, qpc_ext, mmo, 1); 2460 } 2461 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 2462 MLX5_SET(qpc, qpc, uar_page, attr->uar_index); 2463 if (attr->log_page_size > MLX5_ADAPTER_PAGE_SHIFT) 2464 MLX5_SET(qpc, qpc, log_page_size, 2465 attr->log_page_size - MLX5_ADAPTER_PAGE_SHIFT); 2466 if (attr->num_of_send_wqbbs) { 2467 MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->num_of_send_wqbbs)); 2468 MLX5_SET(qpc, qpc, cqn_snd, attr->cqn); 2469 MLX5_SET(qpc, qpc, log_sq_size, 2470 rte_log2_u32(attr->num_of_send_wqbbs)); 2471 } else { 2472 MLX5_SET(qpc, qpc, no_sq, 1); 2473 } 2474 if (attr->num_of_receive_wqes) { 2475 MLX5_ASSERT(RTE_IS_POWER_OF_2( 2476 attr->num_of_receive_wqes)); 2477 MLX5_SET(qpc, qpc, cqn_rcv, attr->cqn); 2478 MLX5_SET(qpc, qpc, log_rq_stride, attr->log_rq_stride - 2479 MLX5_LOG_RQ_STRIDE_SHIFT); 2480 MLX5_SET(qpc, qpc, log_rq_size, 2481 rte_log2_u32(attr->num_of_receive_wqes)); 2482 MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ); 2483 } else { 2484 MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ); 2485 } 2486 if (attr->dbr_umem_valid) { 2487 MLX5_SET(qpc, qpc, dbr_umem_valid, 2488 attr->dbr_umem_valid); 2489 MLX5_SET(qpc, qpc, dbr_umem_id, attr->dbr_umem_id); 2490 } 2491 if (attr->cd_master) 2492 MLX5_SET(qpc, qpc, cd_master, attr->cd_master); 2493 if (attr->cd_slave_send) 2494 MLX5_SET(qpc, qpc, cd_slave_send, attr->cd_slave_send); 2495 if (attr->cd_slave_recv) 2496 MLX5_SET(qpc, qpc, cd_slave_receive, attr->cd_slave_recv); 2497 MLX5_SET64(qpc, qpc, dbr_addr, attr->dbr_address); 2498 MLX5_SET64(create_qp_in, in, wq_umem_offset, 2499 attr->wq_umem_offset); 2500 MLX5_SET(create_qp_in, in, wq_umem_id, attr->wq_umem_id); 2501 MLX5_SET(create_qp_in, in, wq_umem_valid, 1); 2502 } else { 2503 /* Special QP to be managed by FW - no SQ\RQ\CQ\UAR\DB rec. */ 2504 MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ); 2505 MLX5_SET(qpc, qpc, no_sq, 1); 2506 } 2507 qp_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, 2508 sizeof(out)); 2509 if (!qp_obj->obj) { 2510 DEVX_DRV_LOG(ERR, out, "create QP", NULL, 0); 2511 mlx5_free(qp_obj); 2512 return NULL; 2513 } 2514 qp_obj->id = MLX5_GET(create_qp_out, out, qpn); 2515 return qp_obj; 2516 } 2517 2518 /** 2519 * Modify QP using DevX API. 2520 * Currently supports only force loop-back QP. 2521 * 2522 * @param[in] qp 2523 * Pointer to QP object structure. 2524 * @param [in] qp_st_mod_op 2525 * The QP state modification operation. 2526 * @param [in] remote_qp_id 2527 * The remote QP ID for MLX5_CMD_OP_INIT2RTR_QP operation. 2528 * 2529 * @return 2530 * 0 on success, a negative errno value otherwise and rte_errno is set. 2531 */ 2532 int 2533 mlx5_devx_cmd_modify_qp_state(struct mlx5_devx_obj *qp, uint32_t qp_st_mod_op, 2534 uint32_t remote_qp_id) 2535 { 2536 union { 2537 uint32_t rst2init[MLX5_ST_SZ_DW(rst2init_qp_in)]; 2538 uint32_t init2rtr[MLX5_ST_SZ_DW(init2rtr_qp_in)]; 2539 uint32_t rtr2rts[MLX5_ST_SZ_DW(rtr2rts_qp_in)]; 2540 uint32_t qp2rst[MLX5_ST_SZ_DW(2rst_qp_in)]; 2541 } in; 2542 union { 2543 uint32_t rst2init[MLX5_ST_SZ_DW(rst2init_qp_out)]; 2544 uint32_t init2rtr[MLX5_ST_SZ_DW(init2rtr_qp_out)]; 2545 uint32_t rtr2rts[MLX5_ST_SZ_DW(rtr2rts_qp_out)]; 2546 uint32_t qp2rst[MLX5_ST_SZ_DW(2rst_qp_out)]; 2547 } out; 2548 void *qpc; 2549 int ret; 2550 unsigned int inlen; 2551 unsigned int outlen; 2552 2553 memset(&in, 0, sizeof(in)); 2554 memset(&out, 0, sizeof(out)); 2555 MLX5_SET(rst2init_qp_in, &in, opcode, qp_st_mod_op); 2556 switch (qp_st_mod_op) { 2557 case MLX5_CMD_OP_RST2INIT_QP: 2558 MLX5_SET(rst2init_qp_in, &in, qpn, qp->id); 2559 qpc = MLX5_ADDR_OF(rst2init_qp_in, &in, qpc); 2560 MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1); 2561 MLX5_SET(qpc, qpc, rre, 1); 2562 MLX5_SET(qpc, qpc, rwe, 1); 2563 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 2564 inlen = sizeof(in.rst2init); 2565 outlen = sizeof(out.rst2init); 2566 break; 2567 case MLX5_CMD_OP_INIT2RTR_QP: 2568 MLX5_SET(init2rtr_qp_in, &in, qpn, qp->id); 2569 qpc = MLX5_ADDR_OF(init2rtr_qp_in, &in, qpc); 2570 MLX5_SET(qpc, qpc, primary_address_path.fl, 1); 2571 MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1); 2572 MLX5_SET(qpc, qpc, mtu, 1); 2573 MLX5_SET(qpc, qpc, log_msg_max, 30); 2574 MLX5_SET(qpc, qpc, remote_qpn, remote_qp_id); 2575 MLX5_SET(qpc, qpc, min_rnr_nak, 0); 2576 inlen = sizeof(in.init2rtr); 2577 outlen = sizeof(out.init2rtr); 2578 break; 2579 case MLX5_CMD_OP_RTR2RTS_QP: 2580 qpc = MLX5_ADDR_OF(rtr2rts_qp_in, &in, qpc); 2581 MLX5_SET(rtr2rts_qp_in, &in, qpn, qp->id); 2582 MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 16); 2583 MLX5_SET(qpc, qpc, log_ack_req_freq, 0); 2584 MLX5_SET(qpc, qpc, retry_count, 7); 2585 MLX5_SET(qpc, qpc, rnr_retry, 7); 2586 inlen = sizeof(in.rtr2rts); 2587 outlen = sizeof(out.rtr2rts); 2588 break; 2589 case MLX5_CMD_OP_QP_2RST: 2590 MLX5_SET(2rst_qp_in, &in, qpn, qp->id); 2591 inlen = sizeof(in.qp2rst); 2592 outlen = sizeof(out.qp2rst); 2593 break; 2594 default: 2595 DRV_LOG(ERR, "Invalid or unsupported QP modify op %u.", 2596 qp_st_mod_op); 2597 rte_errno = EINVAL; 2598 return -rte_errno; 2599 } 2600 ret = mlx5_glue->devx_obj_modify(qp->obj, &in, inlen, &out, outlen); 2601 if (ret) { 2602 DRV_LOG(ERR, "Failed to modify QP using DevX."); 2603 rte_errno = errno; 2604 return -rte_errno; 2605 } 2606 return ret; 2607 } 2608 2609 struct mlx5_devx_obj * 2610 mlx5_devx_cmd_create_virtio_q_counters(void *ctx) 2611 { 2612 uint32_t in[MLX5_ST_SZ_DW(create_virtio_q_counters_in)] = {0}; 2613 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 2614 struct mlx5_devx_obj *couners_obj = mlx5_malloc(MLX5_MEM_ZERO, 2615 sizeof(*couners_obj), 0, 2616 SOCKET_ID_ANY); 2617 void *hdr = MLX5_ADDR_OF(create_virtio_q_counters_in, in, hdr); 2618 2619 if (!couners_obj) { 2620 DRV_LOG(ERR, "Failed to allocate virtio queue counters data."); 2621 rte_errno = ENOMEM; 2622 return NULL; 2623 } 2624 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 2625 MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 2626 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 2627 MLX5_GENERAL_OBJ_TYPE_VIRTIO_Q_COUNTERS); 2628 couners_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, 2629 sizeof(out)); 2630 if (!couners_obj->obj) { 2631 DEVX_DRV_LOG(ERR, out, "create virtio queue counters Obj", NULL, 2632 0); 2633 mlx5_free(couners_obj); 2634 return NULL; 2635 } 2636 couners_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 2637 return couners_obj; 2638 } 2639 2640 int 2641 mlx5_devx_cmd_query_virtio_q_counters(struct mlx5_devx_obj *couners_obj, 2642 struct mlx5_devx_virtio_q_couners_attr *attr) 2643 { 2644 uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0}; 2645 uint32_t out[MLX5_ST_SZ_DW(query_virtio_q_counters_out)] = {0}; 2646 void *hdr = MLX5_ADDR_OF(query_virtio_q_counters_out, in, hdr); 2647 void *virtio_q_counters = MLX5_ADDR_OF(query_virtio_q_counters_out, out, 2648 virtio_q_counters); 2649 int ret; 2650 2651 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 2652 MLX5_CMD_OP_QUERY_GENERAL_OBJECT); 2653 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 2654 MLX5_GENERAL_OBJ_TYPE_VIRTIO_Q_COUNTERS); 2655 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, couners_obj->id); 2656 ret = mlx5_glue->devx_obj_query(couners_obj->obj, in, sizeof(in), out, 2657 sizeof(out)); 2658 if (ret) { 2659 DRV_LOG(ERR, "Failed to query virtio q counters using DevX."); 2660 rte_errno = errno; 2661 return -errno; 2662 } 2663 attr->received_desc = MLX5_GET64(virtio_q_counters, virtio_q_counters, 2664 received_desc); 2665 attr->completed_desc = MLX5_GET64(virtio_q_counters, virtio_q_counters, 2666 completed_desc); 2667 attr->error_cqes = MLX5_GET(virtio_q_counters, virtio_q_counters, 2668 error_cqes); 2669 attr->bad_desc_errors = MLX5_GET(virtio_q_counters, virtio_q_counters, 2670 bad_desc_errors); 2671 attr->exceed_max_chain = MLX5_GET(virtio_q_counters, virtio_q_counters, 2672 exceed_max_chain); 2673 attr->invalid_buffer = MLX5_GET(virtio_q_counters, virtio_q_counters, 2674 invalid_buffer); 2675 return ret; 2676 } 2677 2678 /** 2679 * Create general object of type FLOW_HIT_ASO using DevX API. 2680 * 2681 * @param[in] ctx 2682 * Context returned from mlx5 open_device() glue function. 2683 * @param [in] pd 2684 * PD value to associate the FLOW_HIT_ASO object with. 2685 * 2686 * @return 2687 * The DevX object created, NULL otherwise and rte_errno is set. 2688 */ 2689 struct mlx5_devx_obj * 2690 mlx5_devx_cmd_create_flow_hit_aso_obj(void *ctx, uint32_t pd) 2691 { 2692 uint32_t in[MLX5_ST_SZ_DW(create_flow_hit_aso_in)] = {0}; 2693 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 2694 struct mlx5_devx_obj *flow_hit_aso_obj = NULL; 2695 void *ptr = NULL; 2696 2697 flow_hit_aso_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*flow_hit_aso_obj), 2698 0, SOCKET_ID_ANY); 2699 if (!flow_hit_aso_obj) { 2700 DRV_LOG(ERR, "Failed to allocate FLOW_HIT_ASO object data"); 2701 rte_errno = ENOMEM; 2702 return NULL; 2703 } 2704 ptr = MLX5_ADDR_OF(create_flow_hit_aso_in, in, hdr); 2705 MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode, 2706 MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 2707 MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type, 2708 MLX5_GENERAL_OBJ_TYPE_FLOW_HIT_ASO); 2709 ptr = MLX5_ADDR_OF(create_flow_hit_aso_in, in, flow_hit_aso); 2710 MLX5_SET(flow_hit_aso, ptr, access_pd, pd); 2711 flow_hit_aso_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 2712 out, sizeof(out)); 2713 if (!flow_hit_aso_obj->obj) { 2714 DEVX_DRV_LOG(ERR, out, "create FLOW_HIT_ASO", NULL, 0); 2715 mlx5_free(flow_hit_aso_obj); 2716 return NULL; 2717 } 2718 flow_hit_aso_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 2719 return flow_hit_aso_obj; 2720 } 2721 2722 /* 2723 * Create PD using DevX API. 2724 * 2725 * @param[in] ctx 2726 * Context returned from mlx5 open_device() glue function. 2727 * 2728 * @return 2729 * The DevX object created, NULL otherwise and rte_errno is set. 2730 */ 2731 struct mlx5_devx_obj * 2732 mlx5_devx_cmd_alloc_pd(void *ctx) 2733 { 2734 struct mlx5_devx_obj *ppd = 2735 mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ppd), 0, SOCKET_ID_ANY); 2736 u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {0}; 2737 u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {0}; 2738 2739 if (!ppd) { 2740 DRV_LOG(ERR, "Failed to allocate PD data."); 2741 rte_errno = ENOMEM; 2742 return NULL; 2743 } 2744 MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD); 2745 ppd->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 2746 out, sizeof(out)); 2747 if (!ppd->obj) { 2748 mlx5_free(ppd); 2749 DRV_LOG(ERR, "Failed to allocate PD Obj using DevX."); 2750 rte_errno = errno; 2751 return NULL; 2752 } 2753 ppd->id = MLX5_GET(alloc_pd_out, out, pd); 2754 return ppd; 2755 } 2756 2757 /** 2758 * Create general object of type FLOW_METER_ASO using DevX API. 2759 * 2760 * @param[in] ctx 2761 * Context returned from mlx5 open_device() glue function. 2762 * @param [in] pd 2763 * PD value to associate the FLOW_METER_ASO object with. 2764 * @param [in] log_obj_size 2765 * log_obj_size define to allocate number of 2 * meters 2766 * in one FLOW_METER_ASO object. 2767 * 2768 * @return 2769 * The DevX object created, NULL otherwise and rte_errno is set. 2770 */ 2771 struct mlx5_devx_obj * 2772 mlx5_devx_cmd_create_flow_meter_aso_obj(void *ctx, uint32_t pd, 2773 uint32_t log_obj_size) 2774 { 2775 uint32_t in[MLX5_ST_SZ_DW(create_flow_meter_aso_in)] = {0}; 2776 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; 2777 struct mlx5_devx_obj *flow_meter_aso_obj; 2778 void *ptr; 2779 2780 flow_meter_aso_obj = mlx5_malloc(MLX5_MEM_ZERO, 2781 sizeof(*flow_meter_aso_obj), 2782 0, SOCKET_ID_ANY); 2783 if (!flow_meter_aso_obj) { 2784 DRV_LOG(ERR, "Failed to allocate FLOW_METER_ASO object data"); 2785 rte_errno = ENOMEM; 2786 return NULL; 2787 } 2788 ptr = MLX5_ADDR_OF(create_flow_meter_aso_in, in, hdr); 2789 MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode, 2790 MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 2791 MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type, 2792 MLX5_GENERAL_OBJ_TYPE_FLOW_METER_ASO); 2793 MLX5_SET(general_obj_in_cmd_hdr, ptr, log_obj_range, 2794 log_obj_size); 2795 ptr = MLX5_ADDR_OF(create_flow_meter_aso_in, in, flow_meter_aso); 2796 MLX5_SET(flow_meter_aso, ptr, access_pd, pd); 2797 flow_meter_aso_obj->obj = mlx5_glue->devx_obj_create( 2798 ctx, in, sizeof(in), 2799 out, sizeof(out)); 2800 if (!flow_meter_aso_obj->obj) { 2801 DEVX_DRV_LOG(ERR, out, "create FLOW_METTER_ASO", NULL, 0); 2802 mlx5_free(flow_meter_aso_obj); 2803 return NULL; 2804 } 2805 flow_meter_aso_obj->id = MLX5_GET(general_obj_out_cmd_hdr, 2806 out, obj_id); 2807 return flow_meter_aso_obj; 2808 } 2809 2810 /* 2811 * Create general object of type CONN_TRACK_OFFLOAD using DevX API. 2812 * 2813 * @param[in] ctx 2814 * Context returned from mlx5 open_device() glue function. 2815 * @param [in] pd 2816 * PD value to associate the CONN_TRACK_OFFLOAD ASO object with. 2817 * @param [in] log_obj_size 2818 * log_obj_size to allocate its power of 2 * objects 2819 * in one CONN_TRACK_OFFLOAD bulk allocation. 2820 * 2821 * @return 2822 * The DevX object created, NULL otherwise and rte_errno is set. 2823 */ 2824 struct mlx5_devx_obj * 2825 mlx5_devx_cmd_create_conn_track_offload_obj(void *ctx, uint32_t pd, 2826 uint32_t log_obj_size) 2827 { 2828 uint32_t in[MLX5_ST_SZ_DW(create_conn_track_aso_in)] = {0}; 2829 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; 2830 struct mlx5_devx_obj *ct_aso_obj; 2831 void *ptr; 2832 2833 ct_aso_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ct_aso_obj), 2834 0, SOCKET_ID_ANY); 2835 if (!ct_aso_obj) { 2836 DRV_LOG(ERR, "Failed to allocate CONN_TRACK_OFFLOAD object."); 2837 rte_errno = ENOMEM; 2838 return NULL; 2839 } 2840 ptr = MLX5_ADDR_OF(create_conn_track_aso_in, in, hdr); 2841 MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode, 2842 MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 2843 MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type, 2844 MLX5_GENERAL_OBJ_TYPE_CONN_TRACK_OFFLOAD); 2845 MLX5_SET(general_obj_in_cmd_hdr, ptr, log_obj_range, log_obj_size); 2846 ptr = MLX5_ADDR_OF(create_conn_track_aso_in, in, conn_track_offload); 2847 MLX5_SET(conn_track_offload, ptr, conn_track_aso_access_pd, pd); 2848 ct_aso_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 2849 out, sizeof(out)); 2850 if (!ct_aso_obj->obj) { 2851 DEVX_DRV_LOG(ERR, out, "create CONN_TRACK_OFFLOAD", NULL, 0); 2852 mlx5_free(ct_aso_obj); 2853 return NULL; 2854 } 2855 ct_aso_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 2856 return ct_aso_obj; 2857 } 2858 2859 /** 2860 * Create general object of type GENEVE TLV option using DevX API. 2861 * 2862 * @param[in] ctx 2863 * Context returned from mlx5 open_device() glue function. 2864 * @param [in] class 2865 * TLV option variable value of class 2866 * @param [in] type 2867 * TLV option variable value of type 2868 * @param [in] len 2869 * TLV option variable value of len 2870 * 2871 * @return 2872 * The DevX object created, NULL otherwise and rte_errno is set. 2873 */ 2874 struct mlx5_devx_obj * 2875 mlx5_devx_cmd_create_geneve_tlv_option(void *ctx, 2876 uint16_t class, uint8_t type, uint8_t len) 2877 { 2878 uint32_t in[MLX5_ST_SZ_DW(create_geneve_tlv_option_in)] = {0}; 2879 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 2880 struct mlx5_devx_obj *geneve_tlv_opt_obj = mlx5_malloc(MLX5_MEM_ZERO, 2881 sizeof(*geneve_tlv_opt_obj), 2882 0, SOCKET_ID_ANY); 2883 2884 if (!geneve_tlv_opt_obj) { 2885 DRV_LOG(ERR, "Failed to allocate geneve tlv option object."); 2886 rte_errno = ENOMEM; 2887 return NULL; 2888 } 2889 void *hdr = MLX5_ADDR_OF(create_geneve_tlv_option_in, in, hdr); 2890 void *opt = MLX5_ADDR_OF(create_geneve_tlv_option_in, in, 2891 geneve_tlv_opt); 2892 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 2893 MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 2894 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 2895 MLX5_GENERAL_OBJ_TYPE_GENEVE_TLV_OPT); 2896 MLX5_SET(geneve_tlv_option, opt, option_class, 2897 rte_be_to_cpu_16(class)); 2898 MLX5_SET(geneve_tlv_option, opt, option_type, type); 2899 MLX5_SET(geneve_tlv_option, opt, option_data_length, len); 2900 geneve_tlv_opt_obj->obj = mlx5_glue->devx_obj_create(ctx, in, 2901 sizeof(in), out, sizeof(out)); 2902 if (!geneve_tlv_opt_obj->obj) { 2903 DEVX_DRV_LOG(ERR, out, "create GENEVE TLV", NULL, 0); 2904 mlx5_free(geneve_tlv_opt_obj); 2905 return NULL; 2906 } 2907 geneve_tlv_opt_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 2908 return geneve_tlv_opt_obj; 2909 } 2910 2911 int 2912 mlx5_devx_cmd_wq_query(void *wq, uint32_t *counter_set_id) 2913 { 2914 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 2915 uint32_t in[MLX5_ST_SZ_DW(query_rq_in)] = {0}; 2916 uint32_t out[MLX5_ST_SZ_DW(query_rq_out)] = {0}; 2917 int rc; 2918 void *rq_ctx; 2919 2920 MLX5_SET(query_rq_in, in, opcode, MLX5_CMD_OP_QUERY_RQ); 2921 MLX5_SET(query_rq_in, in, rqn, ((struct ibv_wq *)wq)->wq_num); 2922 rc = mlx5_glue->devx_wq_query(wq, in, sizeof(in), out, sizeof(out)); 2923 if (rc) { 2924 rte_errno = errno; 2925 DRV_LOG(ERR, "Failed to query WQ counter set ID using DevX - " 2926 "rc = %d, errno = %d.", rc, errno); 2927 return -rc; 2928 }; 2929 rq_ctx = MLX5_ADDR_OF(query_rq_out, out, rq_context); 2930 *counter_set_id = MLX5_GET(rqc, rq_ctx, counter_set_id); 2931 return 0; 2932 #else 2933 (void)wq; 2934 (void)counter_set_id; 2935 return -ENOTSUP; 2936 #endif 2937 } 2938 2939 /* 2940 * Allocate queue counters via devx interface. 2941 * 2942 * @param[in] ctx 2943 * Context returned from mlx5 open_device() glue function. 2944 * 2945 * @return 2946 * Pointer to counter object on success, a NULL value otherwise and 2947 * rte_errno is set. 2948 */ 2949 struct mlx5_devx_obj * 2950 mlx5_devx_cmd_queue_counter_alloc(void *ctx) 2951 { 2952 struct mlx5_devx_obj *dcs = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*dcs), 0, 2953 SOCKET_ID_ANY); 2954 uint32_t in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0}; 2955 uint32_t out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0}; 2956 2957 if (!dcs) { 2958 rte_errno = ENOMEM; 2959 return NULL; 2960 } 2961 MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER); 2962 dcs->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, 2963 sizeof(out)); 2964 if (!dcs->obj) { 2965 DEVX_DRV_LOG(DEBUG, out, "create q counter set", NULL, 0); 2966 mlx5_free(dcs); 2967 return NULL; 2968 } 2969 dcs->id = MLX5_GET(alloc_q_counter_out, out, counter_set_id); 2970 return dcs; 2971 } 2972 2973 /** 2974 * Query queue counters values. 2975 * 2976 * @param[in] dcs 2977 * devx object of the queue counter set. 2978 * @param[in] clear 2979 * Whether hardware should clear the counters after the query or not. 2980 * @param[out] out_of_buffers 2981 * Number of dropped occurred due to lack of WQE for the associated QPs/RQs. 2982 * 2983 * @return 2984 * 0 on success, a negative value otherwise. 2985 */ 2986 int 2987 mlx5_devx_cmd_queue_counter_query(struct mlx5_devx_obj *dcs, int clear, 2988 uint32_t *out_of_buffers) 2989 { 2990 uint32_t out[MLX5_ST_SZ_BYTES(query_q_counter_out)] = {0}; 2991 uint32_t in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0}; 2992 int rc; 2993 2994 MLX5_SET(query_q_counter_in, in, opcode, 2995 MLX5_CMD_OP_QUERY_Q_COUNTER); 2996 MLX5_SET(query_q_counter_in, in, op_mod, 0); 2997 MLX5_SET(query_q_counter_in, in, counter_set_id, dcs->id); 2998 MLX5_SET(query_q_counter_in, in, clear, !!clear); 2999 rc = mlx5_glue->devx_obj_query(dcs->obj, in, sizeof(in), out, 3000 sizeof(out)); 3001 if (rc) { 3002 DRV_LOG(ERR, "Failed to query devx q counter set - rc %d", rc); 3003 rte_errno = rc; 3004 return -rc; 3005 } 3006 *out_of_buffers = MLX5_GET(query_q_counter_out, out, out_of_buffer); 3007 return 0; 3008 } 3009 3010 /** 3011 * Create general object of type DEK using DevX API. 3012 * 3013 * @param[in] ctx 3014 * Context returned from mlx5 open_device() glue function. 3015 * @param [in] attr 3016 * Pointer to DEK attributes structure. 3017 * 3018 * @return 3019 * The DevX object created, NULL otherwise and rte_errno is set. 3020 */ 3021 struct mlx5_devx_obj * 3022 mlx5_devx_cmd_create_dek_obj(void *ctx, struct mlx5_devx_dek_attr *attr) 3023 { 3024 uint32_t in[MLX5_ST_SZ_DW(create_dek_in)] = {0}; 3025 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 3026 struct mlx5_devx_obj *dek_obj = NULL; 3027 void *ptr = NULL, *key_addr = NULL; 3028 3029 dek_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*dek_obj), 3030 0, SOCKET_ID_ANY); 3031 if (dek_obj == NULL) { 3032 DRV_LOG(ERR, "Failed to allocate DEK object data"); 3033 rte_errno = ENOMEM; 3034 return NULL; 3035 } 3036 ptr = MLX5_ADDR_OF(create_dek_in, in, hdr); 3037 MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode, 3038 MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 3039 MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type, 3040 MLX5_GENERAL_OBJ_TYPE_DEK); 3041 ptr = MLX5_ADDR_OF(create_dek_in, in, dek); 3042 MLX5_SET(dek, ptr, key_size, attr->key_size); 3043 MLX5_SET(dek, ptr, has_keytag, attr->has_keytag); 3044 MLX5_SET(dek, ptr, key_purpose, attr->key_purpose); 3045 MLX5_SET(dek, ptr, pd, attr->pd); 3046 MLX5_SET64(dek, ptr, opaque, attr->opaque); 3047 key_addr = MLX5_ADDR_OF(dek, ptr, key); 3048 memcpy(key_addr, (void *)(attr->key), MLX5_CRYPTO_KEY_MAX_SIZE); 3049 dek_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 3050 out, sizeof(out)); 3051 if (dek_obj->obj == NULL) { 3052 DEVX_DRV_LOG(ERR, out, "create DEK", NULL, 0); 3053 mlx5_free(dek_obj); 3054 return NULL; 3055 } 3056 dek_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 3057 return dek_obj; 3058 } 3059 3060 /** 3061 * Create general object of type IMPORT_KEK using DevX API. 3062 * 3063 * @param[in] ctx 3064 * Context returned from mlx5 open_device() glue function. 3065 * @param [in] attr 3066 * Pointer to IMPORT_KEK attributes structure. 3067 * 3068 * @return 3069 * The DevX object created, NULL otherwise and rte_errno is set. 3070 */ 3071 struct mlx5_devx_obj * 3072 mlx5_devx_cmd_create_import_kek_obj(void *ctx, 3073 struct mlx5_devx_import_kek_attr *attr) 3074 { 3075 uint32_t in[MLX5_ST_SZ_DW(create_import_kek_in)] = {0}; 3076 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 3077 struct mlx5_devx_obj *import_kek_obj = NULL; 3078 void *ptr = NULL, *key_addr = NULL; 3079 3080 import_kek_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*import_kek_obj), 3081 0, SOCKET_ID_ANY); 3082 if (import_kek_obj == NULL) { 3083 DRV_LOG(ERR, "Failed to allocate IMPORT_KEK object data"); 3084 rte_errno = ENOMEM; 3085 return NULL; 3086 } 3087 ptr = MLX5_ADDR_OF(create_import_kek_in, in, hdr); 3088 MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode, 3089 MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 3090 MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type, 3091 MLX5_GENERAL_OBJ_TYPE_IMPORT_KEK); 3092 ptr = MLX5_ADDR_OF(create_import_kek_in, in, import_kek); 3093 MLX5_SET(import_kek, ptr, key_size, attr->key_size); 3094 key_addr = MLX5_ADDR_OF(import_kek, ptr, key); 3095 memcpy(key_addr, (void *)(attr->key), MLX5_CRYPTO_KEY_MAX_SIZE); 3096 import_kek_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 3097 out, sizeof(out)); 3098 if (import_kek_obj->obj == NULL) { 3099 DEVX_DRV_LOG(ERR, out, "create IMPORT_KEK", NULL, 0); 3100 mlx5_free(import_kek_obj); 3101 return NULL; 3102 } 3103 import_kek_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 3104 return import_kek_obj; 3105 } 3106 3107 /** 3108 * Create general object of type CREDENTIAL using DevX API. 3109 * 3110 * @param[in] ctx 3111 * Context returned from mlx5 open_device() glue function. 3112 * @param [in] attr 3113 * Pointer to CREDENTIAL attributes structure. 3114 * 3115 * @return 3116 * The DevX object created, NULL otherwise and rte_errno is set. 3117 */ 3118 struct mlx5_devx_obj * 3119 mlx5_devx_cmd_create_credential_obj(void *ctx, 3120 struct mlx5_devx_credential_attr *attr) 3121 { 3122 uint32_t in[MLX5_ST_SZ_DW(create_credential_in)] = {0}; 3123 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 3124 struct mlx5_devx_obj *credential_obj = NULL; 3125 void *ptr = NULL, *credential_addr = NULL; 3126 3127 credential_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*credential_obj), 3128 0, SOCKET_ID_ANY); 3129 if (credential_obj == NULL) { 3130 DRV_LOG(ERR, "Failed to allocate CREDENTIAL object data"); 3131 rte_errno = ENOMEM; 3132 return NULL; 3133 } 3134 ptr = MLX5_ADDR_OF(create_credential_in, in, hdr); 3135 MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode, 3136 MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 3137 MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type, 3138 MLX5_GENERAL_OBJ_TYPE_CREDENTIAL); 3139 ptr = MLX5_ADDR_OF(create_credential_in, in, credential); 3140 MLX5_SET(credential, ptr, credential_role, attr->credential_role); 3141 credential_addr = MLX5_ADDR_OF(credential, ptr, credential); 3142 memcpy(credential_addr, (void *)(attr->credential), 3143 MLX5_CRYPTO_CREDENTIAL_SIZE); 3144 credential_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 3145 out, sizeof(out)); 3146 if (credential_obj->obj == NULL) { 3147 DEVX_DRV_LOG(ERR, out, "create CREDENTIAL", NULL, 0); 3148 mlx5_free(credential_obj); 3149 return NULL; 3150 } 3151 credential_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 3152 return credential_obj; 3153 } 3154 3155 /** 3156 * Create general object of type CRYPTO_LOGIN using DevX API. 3157 * 3158 * @param[in] ctx 3159 * Context returned from mlx5 open_device() glue function. 3160 * @param [in] attr 3161 * Pointer to CRYPTO_LOGIN attributes structure. 3162 * 3163 * @return 3164 * The DevX object created, NULL otherwise and rte_errno is set. 3165 */ 3166 struct mlx5_devx_obj * 3167 mlx5_devx_cmd_create_crypto_login_obj(void *ctx, 3168 struct mlx5_devx_crypto_login_attr *attr) 3169 { 3170 uint32_t in[MLX5_ST_SZ_DW(create_crypto_login_in)] = {0}; 3171 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 3172 struct mlx5_devx_obj *crypto_login_obj = NULL; 3173 void *ptr = NULL, *credential_addr = NULL; 3174 3175 crypto_login_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*crypto_login_obj), 3176 0, SOCKET_ID_ANY); 3177 if (crypto_login_obj == NULL) { 3178 DRV_LOG(ERR, "Failed to allocate CRYPTO_LOGIN object data"); 3179 rte_errno = ENOMEM; 3180 return NULL; 3181 } 3182 ptr = MLX5_ADDR_OF(create_crypto_login_in, in, hdr); 3183 MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode, 3184 MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 3185 MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type, 3186 MLX5_GENERAL_OBJ_TYPE_CRYPTO_LOGIN); 3187 ptr = MLX5_ADDR_OF(create_crypto_login_in, in, crypto_login); 3188 MLX5_SET(crypto_login, ptr, credential_pointer, 3189 attr->credential_pointer); 3190 MLX5_SET(crypto_login, ptr, session_import_kek_ptr, 3191 attr->session_import_kek_ptr); 3192 credential_addr = MLX5_ADDR_OF(crypto_login, ptr, credential); 3193 memcpy(credential_addr, (void *)(attr->credential), 3194 MLX5_CRYPTO_CREDENTIAL_SIZE); 3195 crypto_login_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 3196 out, sizeof(out)); 3197 if (crypto_login_obj->obj == NULL) { 3198 DEVX_DRV_LOG(ERR, out, "create CRYPTO_LOGIN", NULL, 0); 3199 mlx5_free(crypto_login_obj); 3200 return NULL; 3201 } 3202 crypto_login_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 3203 return crypto_login_obj; 3204 } 3205 3206 /** 3207 * Query LAG context. 3208 * 3209 * @param[in] ctx 3210 * Pointer to ibv_context, returned from mlx5dv_open_device. 3211 * @param[out] lag_ctx 3212 * Pointer to struct mlx5_devx_lag_context, to be set by the routine. 3213 * 3214 * @return 3215 * 0 on success, a negative value otherwise. 3216 */ 3217 int 3218 mlx5_devx_cmd_query_lag(void *ctx, 3219 struct mlx5_devx_lag_context *lag_ctx) 3220 { 3221 uint32_t in[MLX5_ST_SZ_DW(query_lag_in)] = {0}; 3222 uint32_t out[MLX5_ST_SZ_DW(query_lag_out)] = {0}; 3223 void *lctx; 3224 int rc; 3225 3226 MLX5_SET(query_lag_in, in, opcode, MLX5_CMD_OP_QUERY_LAG); 3227 rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 3228 if (rc) 3229 goto error; 3230 lctx = MLX5_ADDR_OF(query_lag_out, out, context); 3231 lag_ctx->fdb_selection_mode = MLX5_GET(lag_context, lctx, 3232 fdb_selection_mode); 3233 lag_ctx->port_select_mode = MLX5_GET(lag_context, lctx, 3234 port_select_mode); 3235 lag_ctx->lag_state = MLX5_GET(lag_context, lctx, lag_state); 3236 lag_ctx->tx_remap_affinity_2 = MLX5_GET(lag_context, lctx, 3237 tx_remap_affinity_2); 3238 lag_ctx->tx_remap_affinity_1 = MLX5_GET(lag_context, lctx, 3239 tx_remap_affinity_1); 3240 return 0; 3241 error: 3242 rc = (rc > 0) ? -rc : rc; 3243 return rc; 3244 } 3245