xref: /dpdk/drivers/common/mlx5/mlx5_devx_cmds.h (revision 8c10530836780c4aa058bfc77ba8dedcc695d9d4)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 
5 #ifndef RTE_PMD_MLX5_DEVX_CMDS_H_
6 #define RTE_PMD_MLX5_DEVX_CMDS_H_
7 
8 #include "mlx5_glue.h"
9 #include "mlx5_prm.h"
10 #include <rte_compat.h>
11 
12 /*
13  * Defines the amount of retries to allocate the first UAR in the page.
14  * OFED 5.0.x and Upstream rdma_core before v29 returned the NULL as
15  * UAR base address if UAR was not the first object in the UAR page.
16  * It caused the PMD failure and we should try to get another UAR
17  * till we get the first one with non-NULL base address returned.
18  */
19 #define MLX5_ALLOC_UAR_RETRY 32
20 
21 /* This is limitation of libibverbs: in length variable type is u16. */
22 #define MLX5_DEVX_MAX_KLM_ENTRIES ((UINT16_MAX - \
23 		MLX5_ST_SZ_DW(create_mkey_in) * 4) / (MLX5_ST_SZ_DW(klm) * 4))
24 
25 struct mlx5_devx_mkey_attr {
26 	uint64_t addr;
27 	uint64_t size;
28 	uint32_t umem_id;
29 	uint32_t pd;
30 	uint32_t log_entity_size;
31 	uint32_t pg_access:1;
32 	uint32_t relaxed_ordering_write:1;
33 	uint32_t relaxed_ordering_read:1;
34 	uint32_t umr_en:1;
35 	struct mlx5_klm *klm_array;
36 	int klm_num;
37 };
38 
39 /* HCA qos attributes. */
40 struct mlx5_hca_qos_attr {
41 	uint32_t sup:1;	/* Whether QOS is supported. */
42 	uint32_t flow_meter_old:1; /* Flow meter is supported, old version. */
43 	uint32_t packet_pacing:1; /* Packet pacing is supported. */
44 	uint32_t wqe_rate_pp:1; /* Packet pacing WQE rate mode. */
45 	uint32_t flow_meter:1;
46 	/*
47 	 * Flow meter is supported, updated version.
48 	 * When flow_meter is 1, it indicates that REG_C sharing is supported.
49 	 * If flow_meter is 1, flow_meter_old is also 1.
50 	 * Using older driver versions, flow_meter_old can be 1
51 	 * while flow_meter is 0.
52 	 */
53 	uint8_t log_max_flow_meter;
54 	/* Power of the maximum supported meters. */
55 	uint8_t flow_meter_reg_c_ids;
56 	/* Bitmap of the reg_Cs available for flow meter to use. */
57 
58 };
59 
60 struct mlx5_hca_vdpa_attr {
61 	uint8_t virtio_queue_type;
62 	uint32_t valid:1;
63 	uint32_t desc_tunnel_offload_type:1;
64 	uint32_t eth_frame_offload_type:1;
65 	uint32_t virtio_version_1_0:1;
66 	uint32_t tso_ipv4:1;
67 	uint32_t tso_ipv6:1;
68 	uint32_t tx_csum:1;
69 	uint32_t rx_csum:1;
70 	uint32_t event_mode:3;
71 	uint32_t log_doorbell_stride:5;
72 	uint32_t log_doorbell_bar_size:5;
73 	uint32_t queue_counters_valid:1;
74 	uint32_t max_num_virtio_queues;
75 	struct {
76 		uint32_t a;
77 		uint32_t b;
78 	} umems[3];
79 	uint64_t doorbell_bar_offset;
80 };
81 
82 /* HCA supports this number of time periods for LRO. */
83 #define MLX5_LRO_NUM_SUPP_PERIODS 4
84 
85 /* HCA attributes. */
86 struct mlx5_hca_attr {
87 	uint32_t eswitch_manager:1;
88 	uint32_t flow_counters_dump:1;
89 	uint32_t log_max_rqt_size:5;
90 	uint32_t parse_graph_flex_node:1;
91 	uint8_t flow_counter_bulk_alloc_bitmap;
92 	uint32_t eth_net_offloads:1;
93 	uint32_t eth_virt:1;
94 	uint32_t wqe_vlan_insert:1;
95 	uint32_t wqe_inline_mode:2;
96 	uint32_t vport_inline_mode:3;
97 	uint32_t tunnel_stateless_geneve_rx:1;
98 	uint32_t geneve_max_opt_len:1; /* 0x0: 14DW, 0x1: 63DW */
99 	uint32_t tunnel_stateless_gtp:1;
100 	uint32_t lro_cap:1;
101 	uint32_t tunnel_lro_gre:1;
102 	uint32_t tunnel_lro_vxlan:1;
103 	uint32_t lro_max_msg_sz_mode:2;
104 	uint32_t lro_timer_supported_periods[MLX5_LRO_NUM_SUPP_PERIODS];
105 	uint16_t lro_min_mss_size;
106 	uint32_t flex_parser_protocols;
107 	uint32_t max_geneve_tlv_options;
108 	uint32_t max_geneve_tlv_option_data_len;
109 	uint32_t hairpin:1;
110 	uint32_t log_max_hairpin_queues:5;
111 	uint32_t log_max_hairpin_wq_data_sz:5;
112 	uint32_t log_max_hairpin_num_packets:5;
113 	uint32_t vhca_id:16;
114 	uint32_t relaxed_ordering_write:1;
115 	uint32_t relaxed_ordering_read:1;
116 	uint32_t access_register_user:1;
117 	uint32_t wqe_index_ignore:1;
118 	uint32_t cross_channel:1;
119 	uint32_t non_wire_sq:1; /* SQ with non-wire ops is supported. */
120 	uint32_t log_max_static_sq_wq:5; /* Static WQE size SQ. */
121 	uint32_t num_lag_ports:4; /* Number of ports can be bonded. */
122 	uint32_t dev_freq_khz; /* Timestamp counter frequency, kHz. */
123 	uint32_t scatter_fcs_w_decap_disable:1;
124 	uint32_t flow_hit_aso:1; /* General obj type FLOW_HIT_ASO supported. */
125 	uint32_t roce:1;
126 	uint32_t rq_ts_format:2;
127 	uint32_t sq_ts_format:2;
128 	uint32_t qp_ts_format:2;
129 	uint32_t regex:1;
130 	uint32_t reg_c_preserve:1;
131 	uint32_t regexp_num_of_engines;
132 	uint32_t log_max_ft_sampler_num:8;
133 	uint32_t geneve_tlv_opt;
134 	uint32_t cqe_compression:1;
135 	uint32_t mini_cqe_resp_flow_tag:1;
136 	uint32_t mini_cqe_resp_l3_l4_tag:1;
137 	struct mlx5_hca_qos_attr qos;
138 	struct mlx5_hca_vdpa_attr vdpa;
139 	int log_max_qp_sz;
140 	int log_max_cq_sz;
141 	int log_max_qp;
142 	int log_max_cq;
143 	uint32_t log_max_pd;
144 	uint32_t log_max_mrw_sz;
145 	uint32_t log_max_srq;
146 	uint32_t log_max_srq_sz;
147 	uint32_t rss_ind_tbl_cap;
148 	uint32_t mmo_dma_en:1;
149 	uint32_t mmo_compress_en:1;
150 	uint32_t mmo_decompress_en:1;
151 	uint32_t compress_min_block_size:4;
152 	uint32_t log_max_mmo_dma:5;
153 	uint32_t log_max_mmo_compress:5;
154 	uint32_t log_max_mmo_decompress:5;
155 	uint32_t umr_modify_entity_size_disabled:1;
156 	uint32_t umr_indirect_mkey_disabled:1;
157 };
158 
159 struct mlx5_devx_wq_attr {
160 	uint32_t wq_type:4;
161 	uint32_t wq_signature:1;
162 	uint32_t end_padding_mode:2;
163 	uint32_t cd_slave:1;
164 	uint32_t hds_skip_first_sge:1;
165 	uint32_t log2_hds_buf_size:3;
166 	uint32_t page_offset:5;
167 	uint32_t lwm:16;
168 	uint32_t pd:24;
169 	uint32_t uar_page:24;
170 	uint64_t dbr_addr;
171 	uint32_t hw_counter;
172 	uint32_t sw_counter;
173 	uint32_t log_wq_stride:4;
174 	uint32_t log_wq_pg_sz:5;
175 	uint32_t log_wq_sz:5;
176 	uint32_t dbr_umem_valid:1;
177 	uint32_t wq_umem_valid:1;
178 	uint32_t log_hairpin_num_packets:5;
179 	uint32_t log_hairpin_data_sz:5;
180 	uint32_t single_wqe_log_num_of_strides:4;
181 	uint32_t two_byte_shift_en:1;
182 	uint32_t single_stride_log_num_of_bytes:3;
183 	uint32_t dbr_umem_id;
184 	uint32_t wq_umem_id;
185 	uint64_t wq_umem_offset;
186 };
187 
188 /* Create RQ attributes structure, used by create RQ operation. */
189 struct mlx5_devx_create_rq_attr {
190 	uint32_t rlky:1;
191 	uint32_t delay_drop_en:1;
192 	uint32_t scatter_fcs:1;
193 	uint32_t vsd:1;
194 	uint32_t mem_rq_type:4;
195 	uint32_t state:4;
196 	uint32_t flush_in_error_en:1;
197 	uint32_t hairpin:1;
198 	uint32_t ts_format:2;
199 	uint32_t user_index:24;
200 	uint32_t cqn:24;
201 	uint32_t counter_set_id:8;
202 	uint32_t rmpn:24;
203 	struct mlx5_devx_wq_attr wq_attr;
204 };
205 
206 /* Modify RQ attributes structure, used by modify RQ operation. */
207 struct mlx5_devx_modify_rq_attr {
208 	uint32_t rqn:24;
209 	uint32_t rq_state:4; /* Current RQ state. */
210 	uint32_t state:4; /* Required RQ state. */
211 	uint32_t scatter_fcs:1;
212 	uint32_t vsd:1;
213 	uint32_t counter_set_id:8;
214 	uint32_t hairpin_peer_sq:24;
215 	uint32_t hairpin_peer_vhca:16;
216 	uint64_t modify_bitmask;
217 	uint32_t lwm:16; /* Contained WQ lwm. */
218 };
219 
220 struct mlx5_rx_hash_field_select {
221 	uint32_t l3_prot_type:1;
222 	uint32_t l4_prot_type:1;
223 	uint32_t selected_fields:30;
224 };
225 
226 /* TIR attributes structure, used by TIR operations. */
227 struct mlx5_devx_tir_attr {
228 	uint32_t disp_type:4;
229 	uint32_t lro_timeout_period_usecs:16;
230 	uint32_t lro_enable_mask:4;
231 	uint32_t lro_max_msg_sz:8;
232 	uint32_t inline_rqn:24;
233 	uint32_t rx_hash_symmetric:1;
234 	uint32_t tunneled_offload_en:1;
235 	uint32_t indirect_table:24;
236 	uint32_t rx_hash_fn:4;
237 	uint32_t self_lb_block:2;
238 	uint32_t transport_domain:24;
239 	uint8_t rx_hash_toeplitz_key[MLX5_RSS_HASH_KEY_LEN];
240 	struct mlx5_rx_hash_field_select rx_hash_field_selector_outer;
241 	struct mlx5_rx_hash_field_select rx_hash_field_selector_inner;
242 };
243 
244 /* TIR attributes structure, used by TIR modify. */
245 struct mlx5_devx_modify_tir_attr {
246 	uint32_t tirn:24;
247 	uint64_t modify_bitmask;
248 	struct mlx5_devx_tir_attr tir;
249 };
250 
251 /* RQT attributes structure, used by RQT operations. */
252 struct mlx5_devx_rqt_attr {
253 	uint8_t rq_type;
254 	uint32_t rqt_max_size:16;
255 	uint32_t rqt_actual_size:16;
256 	uint32_t rq_list[];
257 };
258 
259 /* TIS attributes structure. */
260 struct mlx5_devx_tis_attr {
261 	uint32_t strict_lag_tx_port_affinity:1;
262 	uint32_t tls_en:1;
263 	uint32_t lag_tx_port_affinity:4;
264 	uint32_t prio:4;
265 	uint32_t transport_domain:24;
266 };
267 
268 /* SQ attributes structure, used by SQ create operation. */
269 struct mlx5_devx_create_sq_attr {
270 	uint32_t rlky:1;
271 	uint32_t cd_master:1;
272 	uint32_t fre:1;
273 	uint32_t flush_in_error_en:1;
274 	uint32_t allow_multi_pkt_send_wqe:1;
275 	uint32_t min_wqe_inline_mode:3;
276 	uint32_t state:4;
277 	uint32_t reg_umr:1;
278 	uint32_t allow_swp:1;
279 	uint32_t hairpin:1;
280 	uint32_t non_wire:1;
281 	uint32_t static_sq_wq:1;
282 	uint32_t ts_format:2;
283 	uint32_t user_index:24;
284 	uint32_t cqn:24;
285 	uint32_t packet_pacing_rate_limit_index:16;
286 	uint32_t tis_lst_sz:16;
287 	uint32_t tis_num:24;
288 	struct mlx5_devx_wq_attr wq_attr;
289 };
290 
291 /* SQ attributes structure, used by SQ modify operation. */
292 struct mlx5_devx_modify_sq_attr {
293 	uint32_t sq_state:4;
294 	uint32_t state:4;
295 	uint32_t hairpin_peer_rq:24;
296 	uint32_t hairpin_peer_vhca:16;
297 };
298 
299 
300 /* CQ attributes structure, used by CQ operations. */
301 struct mlx5_devx_cq_attr {
302 	uint32_t q_umem_valid:1;
303 	uint32_t db_umem_valid:1;
304 	uint32_t use_first_only:1;
305 	uint32_t overrun_ignore:1;
306 	uint32_t cqe_comp_en:1;
307 	uint32_t mini_cqe_res_format:2;
308 	uint32_t mini_cqe_res_format_ext:2;
309 	uint32_t log_cq_size:5;
310 	uint32_t log_page_size:5;
311 	uint32_t uar_page_id;
312 	uint32_t q_umem_id;
313 	uint64_t q_umem_offset;
314 	uint32_t db_umem_id;
315 	uint64_t db_umem_offset;
316 	uint32_t eqn;
317 	uint64_t db_addr;
318 };
319 
320 /* Virtq attributes structure, used by VIRTQ operations. */
321 struct mlx5_devx_virtq_attr {
322 	uint16_t hw_available_index;
323 	uint16_t hw_used_index;
324 	uint16_t q_size;
325 	uint32_t pd:24;
326 	uint32_t virtio_version_1_0:1;
327 	uint32_t tso_ipv4:1;
328 	uint32_t tso_ipv6:1;
329 	uint32_t tx_csum:1;
330 	uint32_t rx_csum:1;
331 	uint32_t event_mode:3;
332 	uint32_t state:4;
333 	uint32_t hw_latency_mode:2;
334 	uint32_t hw_max_latency_us:12;
335 	uint32_t hw_max_pending_comp:16;
336 	uint32_t dirty_bitmap_dump_enable:1;
337 	uint32_t dirty_bitmap_mkey;
338 	uint32_t dirty_bitmap_size;
339 	uint32_t mkey;
340 	uint32_t qp_id;
341 	uint32_t queue_index;
342 	uint32_t tis_id;
343 	uint32_t counters_obj_id;
344 	uint64_t dirty_bitmap_addr;
345 	uint64_t type;
346 	uint64_t desc_addr;
347 	uint64_t used_addr;
348 	uint64_t available_addr;
349 	struct {
350 		uint32_t id;
351 		uint32_t size;
352 		uint64_t offset;
353 	} umems[3];
354 	uint8_t error_type;
355 };
356 
357 
358 struct mlx5_devx_qp_attr {
359 	uint32_t pd:24;
360 	uint32_t uar_index:24;
361 	uint32_t cqn:24;
362 	uint32_t log_page_size:5;
363 	uint32_t rq_size:17; /* Must be power of 2. */
364 	uint32_t log_rq_stride:3;
365 	uint32_t sq_size:17; /* Must be power of 2. */
366 	uint32_t ts_format:2;
367 	uint32_t dbr_umem_valid:1;
368 	uint32_t dbr_umem_id;
369 	uint64_t dbr_address;
370 	uint32_t wq_umem_id;
371 	uint64_t wq_umem_offset;
372 };
373 
374 struct mlx5_devx_virtio_q_couners_attr {
375 	uint64_t received_desc;
376 	uint64_t completed_desc;
377 	uint32_t error_cqes;
378 	uint32_t bad_desc_errors;
379 	uint32_t exceed_max_chain;
380 	uint32_t invalid_buffer;
381 };
382 
383 /*
384  * graph flow match sample attributes structure,
385  * used by flex parser operations.
386  */
387 struct mlx5_devx_match_sample_attr {
388 	uint32_t flow_match_sample_en:1;
389 	uint32_t flow_match_sample_field_offset:16;
390 	uint32_t flow_match_sample_offset_mode:4;
391 	uint32_t flow_match_sample_field_offset_mask;
392 	uint32_t flow_match_sample_field_offset_shift:4;
393 	uint32_t flow_match_sample_field_base_offset:8;
394 	uint32_t flow_match_sample_tunnel_mode:3;
395 	uint32_t flow_match_sample_field_id;
396 };
397 
398 /* graph node arc attributes structure, used by flex parser operations. */
399 struct mlx5_devx_graph_arc_attr {
400 	uint32_t compare_condition_value:16;
401 	uint32_t start_inner_tunnel:1;
402 	uint32_t arc_parse_graph_node:8;
403 	uint32_t parse_graph_node_handle;
404 };
405 
406 /* Maximal number of samples per graph node. */
407 #define MLX5_GRAPH_NODE_SAMPLE_NUM 8
408 
409 /* Maximal number of input/output arcs per graph node. */
410 #define MLX5_GRAPH_NODE_ARC_NUM 8
411 
412 /* parse graph node attributes structure, used by flex parser operations. */
413 struct mlx5_devx_graph_node_attr {
414 	uint32_t modify_field_select;
415 	uint32_t header_length_mode:4;
416 	uint32_t header_length_base_value:16;
417 	uint32_t header_length_field_shift:4;
418 	uint32_t header_length_field_offset:16;
419 	uint32_t header_length_field_mask;
420 	struct mlx5_devx_match_sample_attr sample[MLX5_GRAPH_NODE_SAMPLE_NUM];
421 	uint32_t next_header_field_offset:16;
422 	uint32_t next_header_field_size:5;
423 	struct mlx5_devx_graph_arc_attr in[MLX5_GRAPH_NODE_ARC_NUM];
424 	struct mlx5_devx_graph_arc_attr out[MLX5_GRAPH_NODE_ARC_NUM];
425 };
426 
427 /* mlx5_devx_cmds.c */
428 
429 __rte_internal
430 struct mlx5_devx_obj *mlx5_devx_cmd_flow_counter_alloc(void *ctx,
431 						       uint32_t bulk_sz);
432 __rte_internal
433 int mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj);
434 __rte_internal
435 int mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs,
436 				     int clear, uint32_t n_counters,
437 				     uint64_t *pkts, uint64_t *bytes,
438 				     uint32_t mkey, void *addr,
439 				     void *cmd_comp,
440 				     uint64_t async_id);
441 __rte_internal
442 int mlx5_devx_cmd_query_hca_attr(void *ctx,
443 				 struct mlx5_hca_attr *attr);
444 __rte_internal
445 struct mlx5_devx_obj *mlx5_devx_cmd_mkey_create(void *ctx,
446 					      struct mlx5_devx_mkey_attr *attr);
447 __rte_internal
448 int mlx5_devx_get_out_command_status(void *out);
449 __rte_internal
450 int mlx5_devx_cmd_qp_query_tis_td(void *qp, uint32_t tis_num,
451 				  uint32_t *tis_td);
452 __rte_internal
453 struct mlx5_devx_obj *mlx5_devx_cmd_create_rq(void *ctx,
454 				       struct mlx5_devx_create_rq_attr *rq_attr,
455 				       int socket);
456 __rte_internal
457 int mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq,
458 			    struct mlx5_devx_modify_rq_attr *rq_attr);
459 __rte_internal
460 struct mlx5_devx_obj *mlx5_devx_cmd_create_tir(void *ctx,
461 					   struct mlx5_devx_tir_attr *tir_attr);
462 __rte_internal
463 struct mlx5_devx_obj *mlx5_devx_cmd_create_rqt(void *ctx,
464 					   struct mlx5_devx_rqt_attr *rqt_attr);
465 __rte_internal
466 struct mlx5_devx_obj *mlx5_devx_cmd_create_sq(void *ctx,
467 				      struct mlx5_devx_create_sq_attr *sq_attr);
468 __rte_internal
469 int mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq,
470 			    struct mlx5_devx_modify_sq_attr *sq_attr);
471 __rte_internal
472 struct mlx5_devx_obj *mlx5_devx_cmd_create_tis(void *ctx,
473 					   struct mlx5_devx_tis_attr *tis_attr);
474 __rte_internal
475 struct mlx5_devx_obj *mlx5_devx_cmd_create_td(void *ctx);
476 __rte_internal
477 int mlx5_devx_cmd_flow_dump(void *fdb_domain, void *rx_domain, void *tx_domain,
478 			    FILE *file);
479 __rte_internal
480 struct mlx5_devx_obj *mlx5_devx_cmd_create_cq(void *ctx,
481 					      struct mlx5_devx_cq_attr *attr);
482 __rte_internal
483 struct mlx5_devx_obj *mlx5_devx_cmd_create_virtq(void *ctx,
484 					     struct mlx5_devx_virtq_attr *attr);
485 __rte_internal
486 int mlx5_devx_cmd_modify_virtq(struct mlx5_devx_obj *virtq_obj,
487 			       struct mlx5_devx_virtq_attr *attr);
488 __rte_internal
489 int mlx5_devx_cmd_query_virtq(struct mlx5_devx_obj *virtq_obj,
490 			      struct mlx5_devx_virtq_attr *attr);
491 __rte_internal
492 struct mlx5_devx_obj *mlx5_devx_cmd_create_qp(void *ctx,
493 					      struct mlx5_devx_qp_attr *attr);
494 __rte_internal
495 int mlx5_devx_cmd_modify_qp_state(struct mlx5_devx_obj *qp,
496 				  uint32_t qp_st_mod_op, uint32_t remote_qp_id);
497 __rte_internal
498 int mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt,
499 			     struct mlx5_devx_rqt_attr *rqt_attr);
500 __rte_internal
501 int mlx5_devx_cmd_modify_tir(struct mlx5_devx_obj *tir,
502 			     struct mlx5_devx_modify_tir_attr *tir_attr);
503 __rte_internal
504 int mlx5_devx_cmd_query_parse_samples(struct mlx5_devx_obj *flex_obj,
505 				      uint32_t ids[], uint32_t num);
506 
507 __rte_internal
508 struct mlx5_devx_obj *mlx5_devx_cmd_create_flex_parser(void *ctx,
509 					struct mlx5_devx_graph_node_attr *data);
510 
511 __rte_internal
512 int mlx5_devx_cmd_register_read(void *ctx, uint16_t reg_id,
513 				uint32_t arg, uint32_t *data, uint32_t dw_cnt);
514 
515 __rte_internal
516 struct mlx5_devx_obj *
517 mlx5_devx_cmd_create_geneve_tlv_option(void *ctx,
518 		uint16_t class, uint8_t type, uint8_t len);
519 
520 /**
521  * Create virtio queue counters object DevX API.
522  *
523  * @param[in] ctx
524  *   Device context.
525 
526  * @return
527  *   The DevX object created, NULL otherwise and rte_errno is set.
528  */
529 __rte_internal
530 struct mlx5_devx_obj *mlx5_devx_cmd_create_virtio_q_counters(void *ctx);
531 
532 /**
533  * Query virtio queue counters object using DevX API.
534  *
535  * @param[in] couners_obj
536  *   Pointer to virtq object structure.
537  * @param [in/out] attr
538  *   Pointer to virtio queue counters attributes structure.
539  *
540  * @return
541  *   0 on success, a negative errno value otherwise and rte_errno is set.
542  */
543 __rte_internal
544 int mlx5_devx_cmd_query_virtio_q_counters(struct mlx5_devx_obj *couners_obj,
545 				  struct mlx5_devx_virtio_q_couners_attr *attr);
546 __rte_internal
547 struct mlx5_devx_obj *mlx5_devx_cmd_create_flow_hit_aso_obj(void *ctx,
548 							    uint32_t pd);
549 
550 __rte_internal
551 struct mlx5_devx_obj *mlx5_devx_cmd_alloc_pd(void *ctx);
552 
553 __rte_internal
554 int mlx5_devx_cmd_wq_query(void *wq, uint32_t *counter_set_id);
555 
556 __rte_internal
557 struct mlx5_devx_obj *mlx5_devx_cmd_queue_counter_alloc(void *ctx);
558 __rte_internal
559 int mlx5_devx_cmd_queue_counter_query(struct mlx5_devx_obj *dcs, int clear,
560 				      uint32_t *out_of_buffers);
561 #endif /* RTE_PMD_MLX5_DEVX_CMDS_H_ */
562