xref: /dpdk/drivers/common/mlx5/mlx5_devx_cmds.h (revision a8ca598cd8e696b4135f04cdd86a93b12fd5642a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 
5 #ifndef RTE_PMD_MLX5_DEVX_CMDS_H_
6 #define RTE_PMD_MLX5_DEVX_CMDS_H_
7 
8 #include <rte_compat.h>
9 #include <rte_bitops.h>
10 
11 #include "mlx5_glue.h"
12 #include "mlx5_prm.h"
13 
14 /* This is limitation of libibverbs: in length variable type is u16. */
15 #define MLX5_DEVX_MAX_KLM_ENTRIES ((UINT16_MAX - \
16 		MLX5_ST_SZ_DW(create_mkey_in) * 4) / (MLX5_ST_SZ_DW(klm) * 4))
17 
18 struct mlx5_devx_counter_attr {
19 	uint32_t pd_valid:1;
20 	uint32_t pd:24;
21 	uint32_t bulk_log_max_alloc:1;
22 	union {
23 		uint8_t flow_counter_bulk_log_size;
24 		uint8_t bulk_n_128;
25 	};
26 };
27 
28 struct mlx5_devx_mkey_attr {
29 	uint64_t addr;
30 	uint64_t size;
31 	uint32_t umem_id;
32 	uint32_t pd;
33 	uint32_t log_entity_size;
34 	uint32_t pg_access:1;
35 	uint32_t relaxed_ordering_write:1;
36 	uint32_t relaxed_ordering_read:1;
37 	uint32_t umr_en:1;
38 	uint32_t crypto_en:2;
39 	uint32_t set_remote_rw:1;
40 	struct mlx5_klm *klm_array;
41 	int klm_num;
42 };
43 
44 /* HCA qos attributes. */
45 struct mlx5_hca_qos_attr {
46 	uint32_t sup:1;	/* Whether QOS is supported. */
47 	uint32_t flow_meter_old:1; /* Flow meter is supported, old version. */
48 	uint32_t packet_pacing:1; /* Packet pacing is supported. */
49 	uint32_t wqe_rate_pp:1; /* Packet pacing WQE rate mode. */
50 	uint32_t flow_meter:1;
51 	/*
52 	 * Flow meter is supported, updated version.
53 	 * When flow_meter is 1, it indicates that REG_C sharing is supported.
54 	 * If flow_meter is 1, flow_meter_old is also 1.
55 	 * Using older driver versions, flow_meter_old can be 1
56 	 * while flow_meter is 0.
57 	 */
58 	uint32_t flow_meter_aso_sup:1;
59 	/* Whether FLOW_METER_ASO Object is supported. */
60 	uint8_t log_max_flow_meter;
61 	/* Power of the maximum supported meters. */
62 	uint8_t flow_meter_reg_c_ids;
63 	/* Bitmap of the reg_Cs available for flow meter to use. */
64 	uint32_t log_meter_aso_granularity:5;
65 	/* Power of the minimum allocation granularity Object. */
66 	uint32_t log_meter_aso_max_alloc:5;
67 	/* Power of the maximum allocation granularity Object. */
68 	uint32_t log_max_num_meter_aso:5;
69 	/* Power of the maximum number of supported objects. */
70 
71 };
72 
73 struct mlx5_hca_vdpa_attr {
74 	uint8_t virtio_queue_type;
75 	uint32_t valid:1;
76 	uint32_t desc_tunnel_offload_type:1;
77 	uint32_t eth_frame_offload_type:1;
78 	uint32_t virtio_version_1_0:1;
79 	uint32_t tso_ipv4:1;
80 	uint32_t tso_ipv6:1;
81 	uint32_t tx_csum:1;
82 	uint32_t rx_csum:1;
83 	uint32_t event_mode:3;
84 	uint32_t log_doorbell_stride:5;
85 	uint32_t log_doorbell_bar_size:5;
86 	uint32_t queue_counters_valid:1;
87 	uint32_t vnet_modify_ext:1;
88 	uint32_t virtio_net_q_addr_modify:1;
89 	uint32_t virtio_q_index_modify:1;
90 	uint32_t max_num_virtio_queues;
91 	struct {
92 		uint32_t a;
93 		uint32_t b;
94 	} umems[3];
95 	uint64_t doorbell_bar_offset;
96 };
97 
98 struct mlx5_hca_flow_attr {
99 	uint32_t tunnel_header_0_1;
100 	uint32_t tunnel_header_2_3;
101 };
102 
103 /**
104  * Accumulate port PARSE_GRAPH_NODE capabilities from
105  * PARSE_GRAPH_NODE Capabilities and HCA Capabilities 2 tables
106  */
107 __extension__
108 struct mlx5_hca_flex_attr {
109 	uint32_t node_in;
110 	uint32_t node_out;
111 	uint16_t header_length_mode;
112 	uint16_t sample_offset_mode;
113 	uint8_t  max_num_arc_in;
114 	uint8_t  max_num_arc_out;
115 	uint8_t  max_num_sample;
116 	uint8_t  max_num_prog_sample:5;	/* From HCA CAP 2 */
117 	uint8_t  parse_graph_anchor:1;
118 	uint8_t  query_match_sample_info:1; /* Support DevX query sample info. */
119 	uint8_t  sample_tunnel_inner2:1;
120 	uint8_t  zero_size_supported:1;
121 	uint8_t  sample_id_in_out:1;
122 	uint16_t max_base_header_length;
123 	uint8_t  max_sample_base_offset;
124 	uint16_t max_next_header_offset;
125 	uint8_t  header_length_mask_width;
126 };
127 
128 /* ISO C restricts enumerator values to range of 'int' */
129 __extension__
130 enum {
131 	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_HEAD          = RTE_BIT32(1),
132 	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_MAC           = RTE_BIT32(2),
133 	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_IP            = RTE_BIT32(3),
134 	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_GRE           = RTE_BIT32(4),
135 	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_UDP           = RTE_BIT32(5),
136 	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_MPLS          = RTE_BIT32(6),
137 	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_TCP           = RTE_BIT32(7),
138 	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_VXLAN_GRE     = RTE_BIT32(8),
139 	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_GENEVE        = RTE_BIT32(9),
140 	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_IPSEC_ESP     = RTE_BIT32(10),
141 	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_IPV4          = RTE_BIT32(11),
142 	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_IPV6          = RTE_BIT32(12),
143 	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_PROGRAMMABLE  = RTE_BIT32(31)
144 };
145 
146 enum {
147 	PARSE_GRAPH_NODE_CAP_LENGTH_MODE_FIXED          = RTE_BIT32(0),
148 	PARSE_GRAPH_NODE_CAP_LENGTH_MODE_EXPLISIT_FIELD = RTE_BIT32(1),
149 	PARSE_GRAPH_NODE_CAP_LENGTH_MODE_BITMASK_FIELD  = RTE_BIT32(2)
150 };
151 
152 /*
153  * DWORD shift is the base for calculating header_length_field_mask
154  * value in the MLX5_GRAPH_NODE_LEN_FIELD mode.
155  */
156 #define MLX5_PARSE_GRAPH_NODE_HDR_LEN_SHIFT_DWORD 0x02
157 
158 static inline uint32_t
159 mlx5_hca_parse_graph_node_base_hdr_len_mask
160 	(const struct mlx5_hca_flex_attr *attr)
161 {
162 	return (1 << attr->header_length_mask_width) - 1;
163 }
164 
165 /* HCA supports this number of time periods for LRO. */
166 #define MLX5_LRO_NUM_SUPP_PERIODS 4
167 
168 /* HCA attributes. */
169 struct mlx5_hca_attr {
170 	uint32_t eswitch_manager:1;
171 	uint32_t flow_counters_dump:1;
172 	uint32_t mem_rq_rmp:1;
173 	uint32_t log_max_rmp:5;
174 	uint32_t log_max_rqt_size:5;
175 	uint32_t parse_graph_flex_node:1;
176 	uint8_t flow_counter_bulk_alloc_bitmap;
177 	uint32_t eth_net_offloads:1;
178 	uint32_t eth_virt:1;
179 	uint32_t wqe_vlan_insert:1;
180 	uint32_t csum_cap:1;
181 	uint32_t vlan_cap:1;
182 	uint32_t wqe_inline_mode:2;
183 	uint32_t vport_inline_mode:3;
184 	uint32_t tunnel_stateless_geneve_rx:1;
185 	uint32_t geneve_max_opt_len:1; /* 0x0: 14DW, 0x1: 63DW */
186 	uint32_t tunnel_stateless_gtp:1;
187 	uint32_t max_lso_cap;
188 	uint32_t scatter_fcs:1;
189 	uint32_t lro_cap:1;
190 	uint32_t tunnel_lro_gre:1;
191 	uint32_t tunnel_lro_vxlan:1;
192 	uint32_t tunnel_stateless_gre:1;
193 	uint32_t tunnel_stateless_vxlan:1;
194 	uint32_t swp:1;
195 	uint32_t swp_csum:1;
196 	uint32_t swp_lso:1;
197 	uint32_t lro_max_msg_sz_mode:2;
198 	uint32_t rq_delay_drop:1;
199 	uint32_t lro_timer_supported_periods[MLX5_LRO_NUM_SUPP_PERIODS];
200 	uint16_t lro_min_mss_size;
201 	uint32_t flex_parser_protocols;
202 	uint32_t max_geneve_tlv_options;
203 	uint32_t max_geneve_tlv_option_data_len;
204 	uint32_t hairpin:1;
205 	uint32_t log_max_hairpin_queues:5;
206 	uint32_t log_max_hairpin_wq_data_sz:5;
207 	uint32_t log_max_hairpin_num_packets:5;
208 	uint32_t hairpin_sq_wqe_bb_size:4;
209 	uint32_t hairpin_sq_wq_in_host_mem:1;
210 	uint32_t hairpin_data_buffer_locked:1;
211 	uint32_t vhca_id:16;
212 	uint32_t relaxed_ordering_write:1;
213 	uint32_t relaxed_ordering_read:1;
214 	uint32_t access_register_user:1;
215 	uint32_t wqe_index_ignore:1;
216 	uint32_t cross_channel:1;
217 	uint32_t non_wire_sq:1; /* SQ with non-wire ops is supported. */
218 	uint32_t log_max_static_sq_wq:5; /* Static WQE size SQ. */
219 	uint32_t num_lag_ports:4; /* Number of ports can be bonded. */
220 	uint32_t dev_freq_khz; /* Timestamp counter frequency, kHz. */
221 	uint32_t scatter_fcs_w_decap_disable:1;
222 	uint32_t flow_hit_aso:1; /* General obj type FLOW_HIT_ASO supported. */
223 	uint32_t roce:1;
224 	uint32_t wait_on_time:1;
225 	uint32_t rq_ts_format:2;
226 	uint32_t sq_ts_format:2;
227 	uint32_t steering_format_version:4;
228 	uint32_t qp_ts_format:2;
229 	uint32_t regexp_params:1;
230 	uint32_t regexp_version:3;
231 	uint32_t reg_c_preserve:1;
232 	uint32_t ct_offload:1; /* General obj type ASO CT offload supported. */
233 	uint32_t crypto:1; /* Crypto engine is supported. */
234 	uint32_t aes_xts:1; /* AES-XTS crypto is supported. */
235 	uint32_t dek:1; /* General obj type DEK is supported. */
236 	uint32_t import_kek:1; /* General obj type IMPORT_KEK supported. */
237 	uint32_t credential:1; /* General obj type CREDENTIAL supported. */
238 	uint32_t crypto_login:1; /* General obj type CRYPTO_LOGIN supported. */
239 	uint32_t regexp_num_of_engines;
240 	uint32_t log_max_ft_sampler_num:8;
241 	uint32_t inner_ipv4_ihl:1;
242 	uint32_t outer_ipv4_ihl:1;
243 	uint32_t geneve_tlv_opt;
244 	uint32_t cqe_compression:1;
245 	uint32_t mini_cqe_resp_flow_tag:1;
246 	uint32_t mini_cqe_resp_l3_l4_tag:1;
247 	uint32_t enhanced_cqe_compression:1;
248 	uint32_t pkt_integrity_match:1; /* 1 if HW supports integrity item */
249 	struct mlx5_hca_qos_attr qos;
250 	struct mlx5_hca_vdpa_attr vdpa;
251 	struct mlx5_hca_flow_attr flow;
252 	struct mlx5_hca_flex_attr flex;
253 	int log_max_qp_sz;
254 	int log_max_cq_sz;
255 	int log_max_qp;
256 	int log_max_cq;
257 	uint32_t log_max_pd;
258 	uint32_t log_max_mrw_sz;
259 	uint32_t log_max_srq;
260 	uint32_t log_max_srq_sz;
261 	uint32_t rss_ind_tbl_cap;
262 	uint32_t mmo_dma_sq_en:1;
263 	uint32_t mmo_compress_sq_en:1;
264 	uint32_t mmo_decompress_sq_en:1;
265 	uint32_t mmo_dma_qp_en:1;
266 	uint32_t mmo_compress_qp_en:1;
267 	uint32_t decomp_deflate_v1_en:1;
268 	uint32_t decomp_deflate_v2_en:1;
269 	uint32_t mmo_regex_qp_en:1;
270 	uint32_t mmo_regex_sq_en:1;
271 	uint32_t compress_min_block_size:4;
272 	uint32_t log_max_mmo_dma:5;
273 	uint32_t log_max_mmo_compress:5;
274 	uint32_t log_max_mmo_decompress:5;
275 	uint32_t decomp_lz4_data_only_en:1;
276 	uint32_t decomp_lz4_no_checksum_en:1;
277 	uint32_t decomp_lz4_checksum_en:1;
278 	uint32_t umr_modify_entity_size_disabled:1;
279 	uint32_t umr_indirect_mkey_disabled:1;
280 	uint32_t log_min_stride_wqe_sz:5;
281 	uint32_t esw_mgr_vport_id_valid:1; /* E-Switch Mgr vport ID is valid. */
282 	uint32_t crypto_wrapped_import_method:1;
283 	uint16_t esw_mgr_vport_id; /* E-Switch Mgr vport ID . */
284 	uint16_t max_wqe_sz_sq;
285 	uint32_t striding_rq:1;
286 	uint32_t ext_stride_num_range:1;
287 	uint32_t cqe_compression_128:1;
288 	uint32_t multi_pkt_send_wqe:1;
289 	uint32_t enhanced_multi_pkt_send_wqe:1;
290 	uint32_t set_reg_c:8;
291 	uint32_t nic_flow_table:1;
292 	uint32_t modify_outer_ip_ecn:1;
293 	union {
294 		uint32_t max_flow_counter;
295 		struct {
296 			uint16_t max_flow_counter_15_0;
297 			uint16_t max_flow_counter_31_16;
298 		};
299 	};
300 	uint32_t flow_counter_bulk_log_max_alloc:5;
301 	uint32_t flow_counter_bulk_log_granularity:5;
302 	uint32_t alloc_flow_counter_pd:1;
303 	uint32_t flow_counter_access_aso:1;
304 	uint32_t flow_access_aso_opc_mod:8;
305 	uint32_t cross_vhca:1;
306 	uint32_t lag_rx_port_affinity:1;
307 	uint32_t wqe_based_flow_table_sup:1;
308 	uint8_t max_header_modify_pattern_length;
309 };
310 
311 /* LAG Context. */
312 struct mlx5_devx_lag_context {
313 	uint32_t fdb_selection_mode:1;
314 	uint32_t port_select_mode:3;
315 	uint32_t lag_state:3;
316 	uint32_t tx_remap_affinity_1:4;
317 	uint32_t tx_remap_affinity_2:4;
318 };
319 
320 struct mlx5_devx_wq_attr {
321 	uint32_t wq_type:4;
322 	uint32_t wq_signature:1;
323 	uint32_t end_padding_mode:2;
324 	uint32_t cd_slave:1;
325 	uint32_t hds_skip_first_sge:1;
326 	uint32_t log2_hds_buf_size:3;
327 	uint32_t page_offset:5;
328 	uint32_t lwm:16;
329 	uint32_t pd:24;
330 	uint32_t uar_page:24;
331 	uint64_t dbr_addr;
332 	uint32_t hw_counter;
333 	uint32_t sw_counter;
334 	uint32_t log_wq_stride:4;
335 	uint32_t log_wq_pg_sz:5;
336 	uint32_t log_wq_sz:5;
337 	uint32_t dbr_umem_valid:1;
338 	uint32_t wq_umem_valid:1;
339 	uint32_t log_hairpin_num_packets:5;
340 	uint32_t log_hairpin_data_sz:5;
341 	uint32_t single_wqe_log_num_of_strides:4;
342 	uint32_t two_byte_shift_en:1;
343 	uint32_t single_stride_log_num_of_bytes:3;
344 	uint32_t dbr_umem_id;
345 	uint32_t wq_umem_id;
346 	uint64_t wq_umem_offset;
347 };
348 
349 /* Create RQ attributes structure, used by create RQ operation. */
350 struct mlx5_devx_create_rq_attr {
351 	uint32_t rlky:1;
352 	uint32_t delay_drop_en:1;
353 	uint32_t scatter_fcs:1;
354 	uint32_t vsd:1;
355 	uint32_t mem_rq_type:4;
356 	uint32_t state:4;
357 	uint32_t flush_in_error_en:1;
358 	uint32_t hairpin:1;
359 	uint32_t hairpin_data_buffer_type:3;
360 	uint32_t ts_format:2;
361 	uint32_t user_index:24;
362 	uint32_t cqn:24;
363 	uint32_t counter_set_id:8;
364 	uint32_t rmpn:24;
365 	struct mlx5_devx_wq_attr wq_attr;
366 };
367 
368 /* Modify RQ attributes structure, used by modify RQ operation. */
369 struct mlx5_devx_modify_rq_attr {
370 	uint32_t rqn:24;
371 	uint32_t rq_state:4; /* Current RQ state. */
372 	uint32_t state:4; /* Required RQ state. */
373 	uint32_t scatter_fcs:1;
374 	uint32_t vsd:1;
375 	uint32_t counter_set_id:8;
376 	uint32_t hairpin_peer_sq:24;
377 	uint32_t hairpin_peer_vhca:16;
378 	uint64_t modify_bitmask;
379 	uint32_t lwm:16; /* Contained WQ lwm. */
380 };
381 
382 /* Create RMP attributes structure, used by create RMP operation. */
383 struct mlx5_devx_create_rmp_attr {
384 	uint32_t rsvd0:8;
385 	uint32_t state:4;
386 	uint32_t rsvd1:20;
387 	uint32_t basic_cyclic_rcv_wqe:1;
388 	uint32_t rsvd4:31;
389 	uint32_t rsvd8[10];
390 	struct mlx5_devx_wq_attr wq_attr;
391 };
392 
393 struct mlx5_rx_hash_field_select {
394 	uint32_t l3_prot_type:1;
395 	uint32_t l4_prot_type:1;
396 	uint32_t selected_fields:30;
397 };
398 
399 /* TIR attributes structure, used by TIR operations. */
400 struct mlx5_devx_tir_attr {
401 	uint32_t disp_type:4;
402 	uint32_t lro_timeout_period_usecs:16;
403 	uint32_t lro_enable_mask:4;
404 	uint32_t lro_max_msg_sz:8;
405 	uint32_t inline_rqn:24;
406 	uint32_t rx_hash_symmetric:1;
407 	uint32_t tunneled_offload_en:1;
408 	uint32_t indirect_table:24;
409 	uint32_t rx_hash_fn:4;
410 	uint32_t self_lb_block:2;
411 	uint32_t transport_domain:24;
412 	uint8_t rx_hash_toeplitz_key[MLX5_RSS_HASH_KEY_LEN];
413 	struct mlx5_rx_hash_field_select rx_hash_field_selector_outer;
414 	struct mlx5_rx_hash_field_select rx_hash_field_selector_inner;
415 };
416 
417 /* TIR attributes structure, used by TIR modify. */
418 struct mlx5_devx_modify_tir_attr {
419 	uint32_t tirn:24;
420 	uint64_t modify_bitmask;
421 	struct mlx5_devx_tir_attr tir;
422 };
423 
424 /* RQT attributes structure, used by RQT operations. */
425 struct mlx5_devx_rqt_attr {
426 	uint8_t rq_type;
427 	uint32_t rqt_max_size:16;
428 	uint32_t rqt_actual_size:16;
429 	uint32_t rq_list[];
430 };
431 
432 /* TIS attributes structure. */
433 struct mlx5_devx_tis_attr {
434 	uint32_t strict_lag_tx_port_affinity:1;
435 	uint32_t tls_en:1;
436 	uint32_t lag_tx_port_affinity:4;
437 	uint32_t prio:4;
438 	uint32_t transport_domain:24;
439 };
440 
441 /* SQ attributes structure, used by SQ create operation. */
442 struct mlx5_devx_create_sq_attr {
443 	uint32_t rlky:1;
444 	uint32_t cd_master:1;
445 	uint32_t fre:1;
446 	uint32_t flush_in_error_en:1;
447 	uint32_t allow_multi_pkt_send_wqe:1;
448 	uint32_t min_wqe_inline_mode:3;
449 	uint32_t state:4;
450 	uint32_t reg_umr:1;
451 	uint32_t allow_swp:1;
452 	uint32_t hairpin:1;
453 	uint32_t non_wire:1;
454 	uint32_t static_sq_wq:1;
455 	uint32_t ts_format:2;
456 	uint32_t hairpin_wq_buffer_type:3;
457 	uint32_t user_index:24;
458 	uint32_t cqn:24;
459 	uint32_t packet_pacing_rate_limit_index:16;
460 	uint32_t tis_lst_sz:16;
461 	uint32_t tis_num:24;
462 	struct mlx5_devx_wq_attr wq_attr;
463 };
464 
465 /* SQ attributes structure, used by SQ modify operation. */
466 struct mlx5_devx_modify_sq_attr {
467 	uint32_t sq_state:4;
468 	uint32_t state:4;
469 	uint32_t hairpin_peer_rq:24;
470 	uint32_t hairpin_peer_vhca:16;
471 };
472 
473 
474 /* CQ attributes structure, used by CQ operations. */
475 struct mlx5_devx_cq_attr {
476 	uint32_t q_umem_valid:1;
477 	uint32_t db_umem_valid:1;
478 	uint32_t use_first_only:1;
479 	uint32_t overrun_ignore:1;
480 	uint32_t cqe_comp_en:1;
481 	uint32_t mini_cqe_res_format:2;
482 	uint32_t mini_cqe_res_format_ext:2;
483 	uint32_t cqe_comp_layout:2;
484 	uint32_t log_cq_size:5;
485 	uint32_t log_page_size:5;
486 	uint32_t uar_page_id;
487 	uint32_t q_umem_id;
488 	uint64_t q_umem_offset;
489 	uint32_t db_umem_id;
490 	uint64_t db_umem_offset;
491 	uint32_t eqn;
492 	uint64_t db_addr;
493 };
494 
495 /* Virtq attributes structure, used by VIRTQ operations. */
496 struct mlx5_devx_virtq_attr {
497 	uint16_t hw_available_index;
498 	uint16_t hw_used_index;
499 	uint16_t q_size;
500 	uint32_t pd:24;
501 	uint32_t virtio_version_1_0:1;
502 	uint32_t tso_ipv4:1;
503 	uint32_t tso_ipv6:1;
504 	uint32_t tx_csum:1;
505 	uint32_t rx_csum:1;
506 	uint32_t event_mode:3;
507 	uint32_t state:4;
508 	uint32_t hw_latency_mode:2;
509 	uint32_t hw_max_latency_us:12;
510 	uint32_t hw_max_pending_comp:16;
511 	uint32_t dirty_bitmap_dump_enable:1;
512 	uint32_t dirty_bitmap_mkey;
513 	uint32_t dirty_bitmap_size;
514 	uint32_t mkey;
515 	uint32_t qp_id;
516 	uint32_t queue_index;
517 	uint32_t tis_id;
518 	uint32_t counters_obj_id;
519 	uint64_t dirty_bitmap_addr;
520 	uint64_t mod_fields_bitmap;
521 	uint64_t desc_addr;
522 	uint64_t used_addr;
523 	uint64_t available_addr;
524 	struct {
525 		uint32_t id;
526 		uint32_t size;
527 		uint64_t offset;
528 	} umems[3];
529 	uint8_t error_type;
530 	uint8_t q_type;
531 };
532 
533 struct mlx5_devx_qp_attr {
534 	uint32_t pd:24;
535 	uint32_t uar_index:24;
536 	uint32_t cqn:24;
537 	uint32_t log_page_size:5;
538 	uint32_t num_of_receive_wqes:17; /* Must be power of 2. */
539 	uint32_t log_rq_stride:3;
540 	uint32_t num_of_send_wqbbs:17; /* Must be power of 2. */
541 	uint32_t ts_format:2;
542 	uint32_t dbr_umem_valid:1;
543 	uint32_t dbr_umem_id;
544 	uint64_t dbr_address;
545 	uint32_t wq_umem_id;
546 	uint64_t wq_umem_offset;
547 	uint32_t user_index:24;
548 	uint32_t mmo:1;
549 };
550 
551 struct mlx5_devx_virtio_q_couners_attr {
552 	uint64_t received_desc;
553 	uint64_t completed_desc;
554 	uint32_t error_cqes;
555 	uint32_t bad_desc_errors;
556 	uint32_t exceed_max_chain;
557 	uint32_t invalid_buffer;
558 };
559 
560 /*
561  * Match sample info attributes structure, used by:
562  *  - GENEVE TLV option query.
563  *  - Graph flow match sample query.
564  */
565 struct mlx5_devx_match_sample_info_query_attr {
566 	uint32_t modify_field_id:12;
567 	uint32_t sample_dw_data:8;
568 	uint32_t sample_dw_ok_bit:8;
569 	uint32_t sample_dw_ok_bit_offset:5;
570 };
571 
572 /*
573  * graph flow match sample attributes structure,
574  * used by flex parser operations.
575  */
576 struct mlx5_devx_match_sample_attr {
577 	uint32_t flow_match_sample_en:1;
578 	uint32_t flow_match_sample_field_offset:16;
579 	uint32_t flow_match_sample_offset_mode:4;
580 	uint32_t flow_match_sample_field_offset_mask;
581 	uint32_t flow_match_sample_field_offset_shift:4;
582 	uint32_t flow_match_sample_field_base_offset:8;
583 	uint32_t flow_match_sample_tunnel_mode:3;
584 	uint32_t flow_match_sample_field_id;
585 };
586 
587 /* graph node arc attributes structure, used by flex parser operations. */
588 struct mlx5_devx_graph_arc_attr {
589 	uint32_t compare_condition_value:16;
590 	uint32_t start_inner_tunnel:1;
591 	uint32_t arc_parse_graph_node:8;
592 	uint32_t parse_graph_node_handle;
593 };
594 
595 /* Maximal number of samples per graph node. */
596 #define MLX5_GRAPH_NODE_SAMPLE_NUM 8
597 
598 /* Maximal number of input/output arcs per graph node. */
599 #define MLX5_GRAPH_NODE_ARC_NUM 8
600 
601 /* parse graph node attributes structure, used by flex parser operations. */
602 struct mlx5_devx_graph_node_attr {
603 	uint32_t modify_field_select;
604 	uint32_t header_length_mode:4;
605 	uint32_t header_length_base_value:16;
606 	uint32_t header_length_field_shift:4;
607 	uint32_t header_length_field_offset:16;
608 	uint32_t header_length_field_mask;
609 	struct mlx5_devx_match_sample_attr sample[MLX5_GRAPH_NODE_SAMPLE_NUM];
610 	uint32_t next_header_field_offset:16;
611 	uint32_t next_header_field_size:5;
612 	struct mlx5_devx_graph_arc_attr in[MLX5_GRAPH_NODE_ARC_NUM];
613 	struct mlx5_devx_graph_arc_attr out[MLX5_GRAPH_NODE_ARC_NUM];
614 };
615 
616 /* Encryption key size is up to 1024 bit, 128 bytes. */
617 #define MLX5_CRYPTO_KEY_MAX_SIZE	128
618 
619 struct mlx5_devx_dek_attr {
620 	uint32_t key_size:4;
621 	uint32_t has_keytag:1;
622 	uint32_t key_purpose:4;
623 	uint32_t pd:24;
624 	uint64_t opaque;
625 	uint8_t key[MLX5_CRYPTO_KEY_MAX_SIZE];
626 };
627 
628 struct mlx5_devx_import_kek_attr {
629 	uint64_t modify_field_select;
630 	uint32_t state:8;
631 	uint32_t key_size:4;
632 	uint8_t key[MLX5_CRYPTO_KEY_MAX_SIZE];
633 };
634 
635 #define MLX5_CRYPTO_CREDENTIAL_SIZE	48
636 
637 struct mlx5_devx_credential_attr {
638 	uint64_t modify_field_select;
639 	uint32_t state:8;
640 	uint32_t credential_role:8;
641 	uint8_t credential[MLX5_CRYPTO_CREDENTIAL_SIZE];
642 };
643 
644 struct mlx5_devx_crypto_login_attr {
645 	uint64_t modify_field_select;
646 	uint32_t credential_pointer:24;
647 	uint32_t session_import_kek_ptr:24;
648 	uint8_t credential[MLX5_CRYPTO_CREDENTIAL_SIZE];
649 };
650 
651 /* mlx5_devx_cmds.c */
652 
653 __rte_internal
654 struct mlx5_devx_obj *
655 mlx5_devx_cmd_flow_counter_alloc_general(void *ctx,
656 				struct mlx5_devx_counter_attr *attr);
657 
658 __rte_internal
659 struct mlx5_devx_obj *mlx5_devx_cmd_flow_counter_alloc(void *ctx,
660 						       uint32_t bulk_sz);
661 __rte_internal
662 int mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj);
663 __rte_internal
664 int mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs,
665 				     int clear, uint32_t n_counters,
666 				     uint64_t *pkts, uint64_t *bytes,
667 				     uint32_t mkey, void *addr,
668 				     void *cmd_comp,
669 				     uint64_t async_id);
670 __rte_internal
671 int mlx5_devx_cmd_query_hca_attr(void *ctx,
672 				 struct mlx5_hca_attr *attr);
673 __rte_internal
674 struct mlx5_devx_obj *mlx5_devx_cmd_mkey_create(void *ctx,
675 					      struct mlx5_devx_mkey_attr *attr);
676 __rte_internal
677 int mlx5_devx_get_out_command_status(void *out);
678 __rte_internal
679 int mlx5_devx_cmd_qp_query_tis_td(void *qp, uint32_t tis_num,
680 				  uint32_t *tis_td);
681 __rte_internal
682 struct mlx5_devx_obj *mlx5_devx_cmd_create_rq(void *ctx,
683 				       struct mlx5_devx_create_rq_attr *rq_attr,
684 				       int socket);
685 __rte_internal
686 int mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq,
687 			    struct mlx5_devx_modify_rq_attr *rq_attr);
688 __rte_internal
689 struct mlx5_devx_obj *mlx5_devx_cmd_create_rmp(void *ctx,
690 			struct mlx5_devx_create_rmp_attr *rq_attr, int socket);
691 __rte_internal
692 struct mlx5_devx_obj *mlx5_devx_cmd_create_tir(void *ctx,
693 					   struct mlx5_devx_tir_attr *tir_attr);
694 __rte_internal
695 struct mlx5_devx_obj *mlx5_devx_cmd_create_rqt(void *ctx,
696 					   struct mlx5_devx_rqt_attr *rqt_attr);
697 __rte_internal
698 struct mlx5_devx_obj *mlx5_devx_cmd_create_sq(void *ctx,
699 				      struct mlx5_devx_create_sq_attr *sq_attr);
700 __rte_internal
701 int mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq,
702 			    struct mlx5_devx_modify_sq_attr *sq_attr);
703 __rte_internal
704 struct mlx5_devx_obj *mlx5_devx_cmd_create_tis(void *ctx,
705 					   struct mlx5_devx_tis_attr *tis_attr);
706 __rte_internal
707 struct mlx5_devx_obj *mlx5_devx_cmd_create_td(void *ctx);
708 __rte_internal
709 int mlx5_devx_cmd_flow_dump(void *fdb_domain, void *rx_domain, void *tx_domain,
710 			    FILE *file);
711 __rte_internal
712 int mlx5_devx_cmd_flow_single_dump(void *rule, FILE *file);
713 __rte_internal
714 struct mlx5_devx_obj *mlx5_devx_cmd_create_cq(void *ctx,
715 					      struct mlx5_devx_cq_attr *attr);
716 __rte_internal
717 struct mlx5_devx_obj *mlx5_devx_cmd_create_virtq(void *ctx,
718 					     struct mlx5_devx_virtq_attr *attr);
719 __rte_internal
720 int mlx5_devx_cmd_modify_virtq(struct mlx5_devx_obj *virtq_obj,
721 			       struct mlx5_devx_virtq_attr *attr);
722 __rte_internal
723 int mlx5_devx_cmd_query_virtq(struct mlx5_devx_obj *virtq_obj,
724 			      struct mlx5_devx_virtq_attr *attr);
725 __rte_internal
726 struct mlx5_devx_obj *mlx5_devx_cmd_create_qp(void *ctx,
727 					      struct mlx5_devx_qp_attr *attr);
728 __rte_internal
729 int mlx5_devx_cmd_modify_qp_state(struct mlx5_devx_obj *qp,
730 				  uint32_t qp_st_mod_op, uint32_t remote_qp_id);
731 __rte_internal
732 int mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt,
733 			     struct mlx5_devx_rqt_attr *rqt_attr);
734 __rte_internal
735 int mlx5_devx_cmd_modify_tir(struct mlx5_devx_obj *tir,
736 			     struct mlx5_devx_modify_tir_attr *tir_attr);
737 __rte_internal
738 int mlx5_devx_cmd_match_sample_info_query(void *ctx, uint32_t sample_field_id,
739 					  struct mlx5_devx_match_sample_info_query_attr *attr);
740 __rte_internal
741 int mlx5_devx_cmd_query_parse_samples(struct mlx5_devx_obj *flex_obj,
742 				      uint32_t *ids,
743 				      uint32_t num, uint8_t *anchor);
744 
745 __rte_internal
746 struct mlx5_devx_obj *
747 mlx5_devx_cmd_create_flex_parser(void *ctx,
748 				 struct mlx5_devx_graph_node_attr *data);
749 
750 __rte_internal
751 int mlx5_devx_cmd_register_read(void *ctx, uint16_t reg_id,
752 				uint32_t arg, uint32_t *data, uint32_t dw_cnt);
753 
754 __rte_internal
755 int mlx5_devx_cmd_register_write(void *ctx, uint16_t reg_id,
756 				 uint32_t arg, uint32_t *data, uint32_t dw_cnt);
757 
758 __rte_internal
759 struct mlx5_devx_obj *
760 mlx5_devx_cmd_create_geneve_tlv_option(void *ctx,
761 		uint16_t class, uint8_t type, uint8_t len);
762 
763 /**
764  * Create virtio queue counters object DevX API.
765  *
766  * @param[in] ctx
767  *   Device context.
768 
769  * @return
770  *   The DevX object created, NULL otherwise and rte_errno is set.
771  */
772 __rte_internal
773 struct mlx5_devx_obj *mlx5_devx_cmd_create_virtio_q_counters(void *ctx);
774 
775 /**
776  * Query virtio queue counters object using DevX API.
777  *
778  * @param[in] couners_obj
779  *   Pointer to virtq object structure.
780  * @param [in/out] attr
781  *   Pointer to virtio queue counters attributes structure.
782  *
783  * @return
784  *   0 on success, a negative errno value otherwise and rte_errno is set.
785  */
786 __rte_internal
787 int mlx5_devx_cmd_query_virtio_q_counters(struct mlx5_devx_obj *couners_obj,
788 				  struct mlx5_devx_virtio_q_couners_attr *attr);
789 __rte_internal
790 struct mlx5_devx_obj *mlx5_devx_cmd_create_flow_hit_aso_obj(void *ctx,
791 							    uint32_t pd);
792 __rte_internal
793 struct mlx5_devx_obj *mlx5_devx_cmd_alloc_pd(void *ctx);
794 
795 __rte_internal
796 int mlx5_devx_cmd_wq_query(void *wq, uint32_t *counter_set_id);
797 
798 __rte_internal
799 struct mlx5_devx_obj *mlx5_devx_cmd_queue_counter_alloc(void *ctx);
800 __rte_internal
801 int mlx5_devx_cmd_queue_counter_query(struct mlx5_devx_obj *dcs, int clear,
802 				      uint32_t *out_of_buffers);
803 __rte_internal
804 struct mlx5_devx_obj *mlx5_devx_cmd_create_conn_track_offload_obj(void *ctx,
805 					uint32_t pd, uint32_t log_obj_size);
806 
807 /**
808  * Create general object of type FLOW_METER_ASO using DevX API..
809  *
810  * @param[in] ctx
811  *   Device context.
812  * @param [in] pd
813  *   PD value to associate the FLOW_METER_ASO object with.
814  * @param [in] log_obj_size
815  *   log_obj_size define to allocate number of 2 * meters
816  *   in one FLOW_METER_ASO object.
817  *
818  * @return
819  *   The DevX object created, NULL otherwise and rte_errno is set.
820  */
821 __rte_internal
822 struct mlx5_devx_obj *mlx5_devx_cmd_create_flow_meter_aso_obj(void *ctx,
823 					uint32_t pd, uint32_t log_obj_size);
824 __rte_internal
825 struct mlx5_devx_obj *
826 mlx5_devx_cmd_create_dek_obj(void *ctx, struct mlx5_devx_dek_attr *attr);
827 
828 __rte_internal
829 struct mlx5_devx_obj *
830 mlx5_devx_cmd_create_import_kek_obj(void *ctx,
831 				    struct mlx5_devx_import_kek_attr *attr);
832 
833 __rte_internal
834 struct mlx5_devx_obj *
835 mlx5_devx_cmd_create_credential_obj(void *ctx,
836 				    struct mlx5_devx_credential_attr *attr);
837 
838 __rte_internal
839 struct mlx5_devx_obj *
840 mlx5_devx_cmd_create_crypto_login_obj(void *ctx,
841 				      struct mlx5_devx_crypto_login_attr *attr);
842 
843 __rte_internal
844 int
845 mlx5_devx_cmd_query_lag(void *ctx,
846 			struct mlx5_devx_lag_context *lag_ctx);
847 
848 #endif /* RTE_PMD_MLX5_DEVX_CMDS_H_ */
849