xref: /dpdk/drivers/net/mana/mana.h (revision 2b843cac232eb3f2fa79e4254e21766817e2019f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2022 Microsoft Corporation
3  */
4 
5 #ifndef __MANA_H__
6 #define __MANA_H__
7 
8 #define	PCI_VENDOR_ID_MICROSOFT		0x1414
9 #define PCI_DEVICE_ID_MICROSOFT_MANA	0x00ba
10 
11 /* Shared data between primary/secondary processes */
12 struct mana_shared_data {
13 	rte_spinlock_t lock;
14 	int init_done;
15 	unsigned int primary_cnt;
16 	unsigned int secondary_cnt;
17 };
18 
19 #define MANA_MAX_MTU	9000
20 #define MIN_RX_BUF_SIZE	1024
21 #define MANA_MAX_MAC_ADDR 1
22 
23 #define MANA_DEV_RX_OFFLOAD_SUPPORT ( \
24 		RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
25 		RTE_ETH_RX_OFFLOAD_CHECKSUM | \
26 		RTE_ETH_RX_OFFLOAD_RSS_HASH)
27 
28 #define MANA_DEV_TX_OFFLOAD_SUPPORT ( \
29 		RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
30 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
31 		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
32 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
33 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM)
34 
35 #define INDIRECTION_TABLE_NUM_ELEMENTS 64
36 #define TOEPLITZ_HASH_KEY_SIZE_IN_BYTES 40
37 #define MANA_ETH_RSS_SUPPORT ( \
38 	RTE_ETH_RSS_IPV4 |	     \
39 	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
40 	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
41 	RTE_ETH_RSS_IPV6 |	     \
42 	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
43 	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
44 
45 #define MIN_BUFFERS_PER_QUEUE		64
46 #define MAX_RECEIVE_BUFFERS_PER_QUEUE	256
47 #define MAX_SEND_BUFFERS_PER_QUEUE	256
48 
49 #define GDMA_WQE_ALIGNMENT_UNIT_SIZE 32
50 
51 #define COMP_ENTRY_SIZE 64
52 #define MAX_TX_WQE_SIZE 512
53 #define MAX_RX_WQE_SIZE 256
54 
55 /* For 32 bit only */
56 #ifdef RTE_ARCH_32
57 #define	GDMA_SHORT_DB_INC_MASK 0xffff
58 #define	GDMA_SHORT_DB_QID_MASK 0xfff
59 
60 #define GDMA_SHORT_DB_MAX_WQE	(0x10000 / GDMA_WQE_ALIGNMENT_UNIT_SIZE)
61 
62 #define TX_WQE_SHORT_DB_THRESHOLD			\
63 	(GDMA_SHORT_DB_MAX_WQE -			\
64 	(MAX_TX_WQE_SIZE / GDMA_WQE_ALIGNMENT_UNIT_SIZE))
65 #define RX_WQE_SHORT_DB_THRESHOLD			\
66 	(GDMA_SHORT_DB_MAX_WQE -			\
67 	(MAX_RX_WQE_SIZE / GDMA_WQE_ALIGNMENT_UNIT_SIZE))
68 #endif
69 
70 /* Values from the GDMA specification document, WQE format description */
71 #define INLINE_OOB_SMALL_SIZE_IN_BYTES 8
72 #define INLINE_OOB_LARGE_SIZE_IN_BYTES 24
73 
74 #define NOT_USING_CLIENT_DATA_UNIT 0
75 
76 enum tx_packet_format_v2 {
77 	SHORT_PACKET_FORMAT = 0,
78 	LONG_PACKET_FORMAT = 1
79 };
80 
81 struct transmit_short_oob_v2 {
82 	enum tx_packet_format_v2 packet_format : 2;
83 	uint32_t tx_is_outer_ipv4 : 1;
84 	uint32_t tx_is_outer_ipv6 : 1;
85 	uint32_t tx_compute_IP_header_checksum : 1;
86 	uint32_t tx_compute_TCP_checksum : 1;
87 	uint32_t tx_compute_UDP_checksum : 1;
88 	uint32_t suppress_tx_CQE_generation : 1;
89 	uint32_t VCQ_number : 24;
90 	uint32_t tx_transport_header_offset : 10;
91 	uint32_t VSQ_frame_num : 14;
92 	uint32_t short_vport_offset : 8;
93 };
94 
95 struct transmit_long_oob_v2 {
96 	uint32_t tx_is_encapsulated_packet : 1;
97 	uint32_t tx_inner_is_ipv6 : 1;
98 	uint32_t tx_inner_TCP_options_present : 1;
99 	uint32_t inject_vlan_prior_tag : 1;
100 	uint32_t reserved1 : 12;
101 	uint32_t priority_code_point : 3;
102 	uint32_t drop_eligible_indicator : 1;
103 	uint32_t vlan_identifier : 12;
104 	uint32_t tx_inner_frame_offset : 10;
105 	uint32_t tx_inner_IP_header_relative_offset : 6;
106 	uint32_t long_vport_offset : 12;
107 	uint32_t reserved3 : 4;
108 	uint32_t reserved4 : 32;
109 	uint32_t reserved5 : 32;
110 };
111 
112 struct transmit_oob_v2 {
113 	struct transmit_short_oob_v2 short_oob;
114 	struct transmit_long_oob_v2 long_oob;
115 };
116 
117 enum gdma_queue_types {
118 	GDMA_QUEUE_TYPE_INVALID  = 0,
119 	GDMA_QUEUE_SEND,
120 	GDMA_QUEUE_RECEIVE,
121 	GDMA_QUEUE_COMPLETION,
122 	GDMA_QUEUE_EVENT,
123 	GDMA_QUEUE_TYPE_MAX = 16,
124 	/*Room for expansion */
125 
126 	/* This enum can be expanded to add more queue types but
127 	 * it's expected to be done in a contiguous manner.
128 	 * Failing that will result in unexpected behavior.
129 	 */
130 };
131 
132 #define WORK_QUEUE_NUMBER_BASE_BITS 10
133 
134 struct gdma_header {
135 	/* size of the entire gdma structure, including the entire length of
136 	 * the struct that is formed by extending other gdma struct. i.e.
137 	 * GDMA_BASE_SPEC extends gdma_header, GDMA_EVENT_QUEUE_SPEC extends
138 	 * GDMA_BASE_SPEC, StructSize for GDMA_EVENT_QUEUE_SPEC will be size of
139 	 * GDMA_EVENT_QUEUE_SPEC which includes size of GDMA_BASE_SPEC and size
140 	 * of gdma_header.
141 	 * Above example is for illustration purpose and is not in code
142 	 */
143 	size_t struct_size;
144 };
145 
146 /* The following macros are from GDMA SPEC 3.6, "Table 2: CQE data structure"
147  * and "Table 4: Event Queue Entry (EQE) data format"
148  */
149 #define GDMA_COMP_DATA_SIZE 0x3C /* Must be a multiple of 4 */
150 #define GDMA_COMP_DATA_SIZE_IN_UINT32 (GDMA_COMP_DATA_SIZE / 4)
151 
152 #define COMPLETION_QUEUE_ENTRY_WORK_QUEUE_INDEX 0
153 #define COMPLETION_QUEUE_ENTRY_WORK_QUEUE_SIZE 24
154 #define COMPLETION_QUEUE_ENTRY_SEND_WORK_QUEUE_INDEX 24
155 #define COMPLETION_QUEUE_ENTRY_SEND_WORK_QUEUE_SIZE 1
156 #define COMPLETION_QUEUE_ENTRY_OWNER_BITS_INDEX 29
157 #define COMPLETION_QUEUE_ENTRY_OWNER_BITS_SIZE 3
158 
159 #define COMPLETION_QUEUE_OWNER_MASK \
160 	((1 << (COMPLETION_QUEUE_ENTRY_OWNER_BITS_SIZE)) - 1)
161 
162 struct gdma_hardware_completion_entry {
163 	char dma_client_data[GDMA_COMP_DATA_SIZE];
164 	union {
165 		uint32_t work_queue_owner_bits;
166 		struct {
167 			uint32_t wq_num		: 24;
168 			uint32_t is_sq		: 1;
169 			uint32_t reserved	: 4;
170 			uint32_t owner_bits	: 3;
171 		};
172 	};
173 }; /* HW DATA */
174 
175 struct gdma_posted_wqe_info {
176 	struct gdma_header gdma_header;
177 
178 	/* size of the written wqe in basic units (32B), filled by GDMA core.
179 	 * Use this value to progress the work queue after the wqe is processed
180 	 * by hardware.
181 	 */
182 	uint32_t wqe_size_in_bu;
183 
184 	/* At the time of writing the wqe to the work queue, the offset in the
185 	 * work queue buffer where by the wqe will be written. Each unit
186 	 * represents 32B of buffer space.
187 	 */
188 	uint32_t wqe_index;
189 
190 	/* Unmasked offset in the queue to which the WQE was written.
191 	 * In 32 byte units.
192 	 */
193 	uint32_t unmasked_queue_offset;
194 };
195 
196 struct gdma_sgl_element {
197 	uint64_t address;
198 	uint32_t memory_key;
199 	uint32_t size;
200 };
201 
202 #define MAX_SGL_ENTRIES_FOR_TRANSMIT 30
203 
204 struct one_sgl {
205 	struct gdma_sgl_element gdma_sgl[MAX_SGL_ENTRIES_FOR_TRANSMIT];
206 };
207 
208 struct gdma_work_request {
209 	struct gdma_header gdma_header;
210 	struct gdma_sgl_element *sgl;
211 	uint32_t num_sgl_elements;
212 	uint32_t inline_oob_size_in_bytes;
213 	void *inline_oob_data;
214 	uint32_t flags; /* From _gdma_work_request_FLAGS */
215 	uint32_t client_data_unit; /* For LSO, this is the MTU of the data */
216 };
217 
218 enum mana_cqe_type {
219 	CQE_INVALID                     = 0,
220 
221 	CQE_RX_OKAY                     = 1,
222 	CQE_RX_COALESCED_4              = 2,
223 	CQE_RX_OBJECT_FENCE             = 3,
224 	CQE_RX_TRUNCATED                = 4,
225 
226 	CQE_TX_OKAY                     = 32,
227 	CQE_TX_SA_DROP                  = 33,
228 	CQE_TX_MTU_DROP                 = 34,
229 	CQE_TX_INVALID_OOB              = 35,
230 	CQE_TX_INVALID_ETH_TYPE         = 36,
231 	CQE_TX_HDR_PROCESSING_ERROR     = 37,
232 	CQE_TX_VF_DISABLED              = 38,
233 	CQE_TX_VPORT_IDX_OUT_OF_RANGE   = 39,
234 	CQE_TX_VPORT_DISABLED           = 40,
235 	CQE_TX_VLAN_TAGGING_VIOLATION   = 41,
236 };
237 
238 struct mana_cqe_header {
239 	uint32_t cqe_type    : 6;
240 	uint32_t client_type : 2;
241 	uint32_t vendor_err  : 24;
242 }; /* HW DATA */
243 
244 struct mana_tx_comp_oob {
245 	struct mana_cqe_header cqe_hdr;
246 
247 	uint32_t tx_data_offset;
248 
249 	uint32_t tx_sgl_offset       : 5;
250 	uint32_t tx_wqe_offset       : 27;
251 
252 	uint32_t reserved[12];
253 }; /* HW DATA */
254 
255 /* NDIS HASH Types */
256 #define NDIS_HASH_IPV4          RTE_BIT32(0)
257 #define NDIS_HASH_TCP_IPV4      RTE_BIT32(1)
258 #define NDIS_HASH_UDP_IPV4      RTE_BIT32(2)
259 #define NDIS_HASH_IPV6          RTE_BIT32(3)
260 #define NDIS_HASH_TCP_IPV6      RTE_BIT32(4)
261 #define NDIS_HASH_UDP_IPV6      RTE_BIT32(5)
262 #define NDIS_HASH_IPV6_EX       RTE_BIT32(6)
263 #define NDIS_HASH_TCP_IPV6_EX   RTE_BIT32(7)
264 #define NDIS_HASH_UDP_IPV6_EX   RTE_BIT32(8)
265 
266 #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
267 #define MANA_HASH_L4                                                         \
268 	(NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 |      \
269 	 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
270 
271 struct mana_rx_comp_per_packet_info {
272 	uint32_t packet_length	: 16;
273 	uint32_t reserved0	: 16;
274 	uint32_t reserved1;
275 	uint32_t packet_hash;
276 }; /* HW DATA */
277 #define RX_COM_OOB_NUM_PACKETINFO_SEGMENTS 4
278 
279 struct mana_rx_comp_oob {
280 	struct mana_cqe_header cqe_hdr;
281 
282 	uint32_t rx_vlan_id				: 12;
283 	uint32_t rx_vlan_tag_present			: 1;
284 	uint32_t rx_outer_ip_header_checksum_succeeded	: 1;
285 	uint32_t rx_outer_ip_header_checksum_failed	: 1;
286 	uint32_t reserved				: 1;
287 	uint32_t rx_hash_type				: 9;
288 	uint32_t rx_ip_header_checksum_succeeded	: 1;
289 	uint32_t rx_ip_header_checksum_failed		: 1;
290 	uint32_t rx_tcp_checksum_succeeded		: 1;
291 	uint32_t rx_tcp_checksum_failed			: 1;
292 	uint32_t rx_udp_checksum_succeeded		: 1;
293 	uint32_t rx_udp_checksum_failed			: 1;
294 	uint32_t reserved1				: 1;
295 	struct mana_rx_comp_per_packet_info
296 		packet_info[RX_COM_OOB_NUM_PACKETINFO_SEGMENTS];
297 	uint32_t received_wqe_offset;
298 }; /* HW DATA */
299 
300 struct gdma_wqe_dma_oob {
301 	uint32_t reserved:24;
302 	uint32_t last_v_bytes:8;
303 	union {
304 		uint32_t flags;
305 		struct {
306 			uint32_t num_sgl_entries:8;
307 			uint32_t inline_client_oob_size_in_dwords:3;
308 			uint32_t client_oob_in_sgl:1;
309 			uint32_t consume_credit:1;
310 			uint32_t fence:1;
311 			uint32_t reserved1:2;
312 			uint32_t client_data_unit:14;
313 			uint32_t check_sn:1;
314 			uint32_t sgl_direct:1;
315 		};
316 	};
317 };
318 
319 struct mana_mr_cache {
320 	uint32_t	lkey;
321 	uintptr_t	addr;
322 	size_t		len;
323 	void		*verb_obj;
324 };
325 
326 #define MANA_MR_BTREE_CACHE_N	512
327 struct mana_mr_btree {
328 	uint16_t	len;	/* Used entries */
329 	uint16_t	size;	/* Total entries */
330 	int		overflow;
331 	int		socket;
332 	struct mana_mr_cache *table;
333 };
334 
335 struct mana_process_priv {
336 	void *db_page;
337 };
338 
339 struct mana_priv {
340 	struct rte_eth_dev_data *dev_data;
341 	struct mana_process_priv *process_priv;
342 	int num_queues;
343 
344 	/* DPDK port */
345 	uint16_t port_id;
346 
347 	/* IB device port */
348 	uint8_t dev_port;
349 
350 	uint8_t vlan_strip;
351 
352 	struct ibv_context *ib_ctx;
353 	struct ibv_pd *ib_pd;
354 	struct ibv_pd *ib_parent_pd;
355 	struct ibv_rwq_ind_table *ind_table;
356 	struct ibv_qp *rwq_qp;
357 	void *db_page;
358 	struct rte_eth_rss_conf rss_conf;
359 	struct rte_intr_handle *intr_handle;
360 	int max_rx_queues;
361 	int max_tx_queues;
362 	int max_rx_desc;
363 	int max_tx_desc;
364 	int max_send_sge;
365 	int max_recv_sge;
366 	int max_mr;
367 	uint64_t max_mr_size;
368 	struct mana_mr_btree mr_btree;
369 	rte_spinlock_t	mr_btree_lock;
370 };
371 
372 struct mana_txq_desc {
373 	struct rte_mbuf *pkt;
374 	uint32_t wqe_size_in_bu;
375 	bool suppress_tx_cqe;
376 };
377 
378 struct mana_rxq_desc {
379 	struct rte_mbuf *pkt;
380 	uint32_t wqe_size_in_bu;
381 };
382 
383 struct mana_stats {
384 	uint64_t packets;
385 	uint64_t bytes;
386 	uint64_t errors;
387 	uint64_t nombuf;
388 };
389 
390 struct mana_gdma_queue {
391 	void *buffer;
392 	uint32_t count;	/* in entries */
393 	uint32_t size;	/* in bytes */
394 	uint32_t id;
395 	uint32_t head;
396 	uint32_t tail;
397 };
398 
399 #define MANA_MR_BTREE_PER_QUEUE_N	64
400 
401 struct gdma_comp {
402 	/* Filled by GDMA core */
403 	char *cqe_data;
404 };
405 
406 struct mana_txq {
407 	struct mana_priv *priv;
408 	uint32_t num_desc;
409 	struct ibv_cq *cq;
410 	struct ibv_qp *qp;
411 
412 	struct mana_gdma_queue gdma_sq;
413 	struct mana_gdma_queue gdma_cq;
414 	struct gdma_comp *gdma_comp_buf;
415 
416 	uint32_t tx_vp_offset;
417 
418 	/* For storing pending requests */
419 	struct mana_txq_desc *desc_ring;
420 
421 	/* desc_ring_head is where we put pending requests to ring,
422 	 * completion pull off desc_ring_tail
423 	 */
424 	uint32_t desc_ring_head, desc_ring_tail, desc_ring_len;
425 
426 	struct mana_mr_btree mr_btree;
427 	struct mana_stats stats;
428 	unsigned int socket;
429 };
430 
431 struct mana_rxq {
432 	struct mana_priv *priv;
433 	uint32_t num_desc;
434 	struct rte_mempool *mp;
435 	struct ibv_cq *cq;
436 	struct ibv_comp_channel *channel;
437 	struct ibv_wq *wq;
438 
439 	/* For storing pending requests */
440 	struct mana_rxq_desc *desc_ring;
441 
442 	/* desc_ring_head is where we put pending requests to ring,
443 	 * completion pull off desc_ring_tail
444 	 */
445 	uint32_t desc_ring_head, desc_ring_tail;
446 
447 #ifdef RTE_ARCH_32
448 	/* For storing wqe increment count btw each short doorbell ring */
449 	uint32_t wqe_cnt_to_short_db;
450 #endif
451 
452 	struct mana_gdma_queue gdma_rq;
453 	struct mana_gdma_queue gdma_cq;
454 	struct gdma_comp *gdma_comp_buf;
455 
456 	uint32_t comp_buf_len;
457 	uint32_t comp_buf_idx;
458 	uint32_t backlog_idx;
459 
460 	struct mana_stats stats;
461 	struct mana_mr_btree mr_btree;
462 
463 	unsigned int socket;
464 };
465 
466 extern int mana_logtype_driver;
467 #define RTE_LOGTYPE_MANA_DRIVER mana_logtype_driver
468 extern int mana_logtype_init;
469 #define RTE_LOGTYPE_MANA_INIT mana_logtype_init
470 
471 #define DRV_LOG(level, ...) \
472 	RTE_LOG_LINE_PREFIX(level, MANA_DRIVER, "%s(): ", __func__, __VA_ARGS__)
473 
474 #define DP_LOG(level, ...) \
475 	RTE_LOG_DP_LINE(level, MANA_DRIVER, __VA_ARGS__)
476 
477 #define PMD_INIT_LOG(level, ...) \
478 	RTE_LOG_LINE_PREFIX(level, MANA_INIT, "%s(): ", __func__, __VA_ARGS__)
479 
480 #define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
481 
482 #ifdef RTE_ARCH_32
483 int mana_ring_short_doorbell(void *db_page, enum gdma_queue_types queue_type,
484 			     uint32_t queue_id, uint32_t tail_incr,
485 			     uint8_t arm);
486 #else
487 int mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type,
488 		       uint32_t queue_id, uint32_t tail, uint8_t arm);
489 #endif
490 int mana_rq_ring_doorbell(struct mana_rxq *rxq);
491 
492 int gdma_post_work_request(struct mana_gdma_queue *queue,
493 			   struct gdma_work_request *work_req,
494 			   uint32_t *wqe_size_in_bu);
495 uint8_t *gdma_get_wqe_pointer(struct mana_gdma_queue *queue);
496 
497 uint16_t mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **rx_pkts,
498 		       uint16_t pkts_n);
499 uint16_t mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts,
500 		       uint16_t pkts_n);
501 
502 uint16_t mana_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts,
503 			       uint16_t pkts_n);
504 
505 uint16_t mana_tx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts,
506 			       uint16_t pkts_n);
507 
508 uint32_t gdma_poll_completion_queue(struct mana_gdma_queue *cq,
509 				    struct gdma_comp *gdma_comp,
510 				    uint32_t max_comp);
511 
512 int mana_start_rx_queues(struct rte_eth_dev *dev);
513 int mana_start_tx_queues(struct rte_eth_dev *dev);
514 
515 int mana_stop_rx_queues(struct rte_eth_dev *dev);
516 int mana_stop_tx_queues(struct rte_eth_dev *dev);
517 
518 struct mana_mr_cache *mana_alloc_pmd_mr(struct mana_mr_btree *local_tree,
519 					struct mana_priv *priv,
520 					struct rte_mbuf *mbuf);
521 int mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv,
522 		    struct rte_mempool *pool);
523 void mana_remove_all_mr(struct mana_priv *priv);
524 void mana_del_pmd_mr(struct mana_mr_cache *mr);
525 
526 void mana_mempool_chunk_cb(struct rte_mempool *mp, void *opaque,
527 			   struct rte_mempool_memhdr *memhdr, unsigned int idx);
528 
529 int mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx,
530 			 uintptr_t addr, size_t len,
531 			 struct mana_mr_cache **cache);
532 int mana_mr_btree_insert(struct mana_mr_btree *bt, struct mana_mr_cache *entry);
533 int mana_mr_btree_init(struct mana_mr_btree *bt, int n, int socket);
534 void mana_mr_btree_free(struct mana_mr_btree *bt);
535 
536 /** Request timeout for IPC. */
537 #define MANA_MP_REQ_TIMEOUT_SEC 5
538 
539 /* Request types for IPC. */
540 enum mana_mp_req_type {
541 	MANA_MP_REQ_VERBS_CMD_FD = 1,
542 	MANA_MP_REQ_CREATE_MR,
543 	MANA_MP_REQ_START_RXTX,
544 	MANA_MP_REQ_STOP_RXTX,
545 };
546 
547 /* Pameters for IPC. */
548 struct mana_mp_param {
549 	enum mana_mp_req_type type;
550 	int port_id;
551 	int result;
552 
553 	/* MANA_MP_REQ_CREATE_MR */
554 	uintptr_t addr;
555 	uint32_t len;
556 };
557 
558 #define MANA_MP_NAME	"net_mana_mp"
559 int mana_mp_init_primary(void);
560 int mana_mp_init_secondary(void);
561 void mana_mp_uninit_primary(void);
562 void mana_mp_uninit_secondary(void);
563 int mana_mp_req_verbs_cmd_fd(struct rte_eth_dev *dev);
564 int mana_mp_req_mr_create(struct mana_priv *priv, uintptr_t addr, uint32_t len);
565 
566 void mana_mp_req_on_rxtx(struct rte_eth_dev *dev, enum mana_mp_req_type type);
567 
568 void *mana_alloc_verbs_buf(size_t size, void *data);
569 void mana_free_verbs_buf(void *ptr, void *data __rte_unused);
570 
571 int mana_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
572 int mana_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
573 int mana_fd_set_non_blocking(int fd);
574 
575 #endif
576