xref: /dpdk/drivers/net/mana/mana.h (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2022 Microsoft Corporation
3  */
4 
5 #ifndef __MANA_H__
6 #define __MANA_H__
7 
8 #define	PCI_VENDOR_ID_MICROSOFT		0x1414
9 #define PCI_DEVICE_ID_MICROSOFT_MANA	0x00ba
10 
11 /* Shared data between primary/secondary processes */
12 struct mana_shared_data {
13 	rte_spinlock_t lock;
14 	int init_done;
15 	unsigned int primary_cnt;
16 	unsigned int secondary_cnt;
17 };
18 
19 #define MANA_MAX_MTU	9000
20 #define MIN_RX_BUF_SIZE	1024
21 #define MANA_MAX_MAC_ADDR 1
22 
23 #define MANA_DEV_RX_OFFLOAD_SUPPORT ( \
24 		RTE_ETH_RX_OFFLOAD_CHECKSUM | \
25 		RTE_ETH_RX_OFFLOAD_RSS_HASH)
26 
27 #define MANA_DEV_TX_OFFLOAD_SUPPORT ( \
28 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
29 		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
30 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
31 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM)
32 
33 #define INDIRECTION_TABLE_NUM_ELEMENTS 64
34 #define TOEPLITZ_HASH_KEY_SIZE_IN_BYTES 40
35 #define MANA_ETH_RSS_SUPPORT ( \
36 	RTE_ETH_RSS_IPV4 |	     \
37 	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
38 	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
39 	RTE_ETH_RSS_IPV6 |	     \
40 	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
41 	RTE_ETH_RSS_NONFRAG_IPV6_UDP)
42 
43 #define MIN_BUFFERS_PER_QUEUE		64
44 #define MAX_RECEIVE_BUFFERS_PER_QUEUE	256
45 #define MAX_SEND_BUFFERS_PER_QUEUE	256
46 
47 #define GDMA_WQE_ALIGNMENT_UNIT_SIZE 32
48 
49 #define COMP_ENTRY_SIZE 64
50 #define MAX_TX_WQE_SIZE 512
51 #define MAX_RX_WQE_SIZE 256
52 
53 /* For 32 bit only */
54 #ifdef RTE_ARCH_32
55 #define	GDMA_SHORT_DB_INC_MASK 0xffff
56 #define	GDMA_SHORT_DB_QID_MASK 0xfff
57 
58 #define GDMA_SHORT_DB_MAX_WQE	(0x10000 / GDMA_WQE_ALIGNMENT_UNIT_SIZE)
59 
60 #define TX_WQE_SHORT_DB_THRESHOLD			\
61 	(GDMA_SHORT_DB_MAX_WQE -			\
62 	(MAX_TX_WQE_SIZE / GDMA_WQE_ALIGNMENT_UNIT_SIZE))
63 #define RX_WQE_SHORT_DB_THRESHOLD			\
64 	(GDMA_SHORT_DB_MAX_WQE -			\
65 	(MAX_RX_WQE_SIZE / GDMA_WQE_ALIGNMENT_UNIT_SIZE))
66 #endif
67 
68 /* Values from the GDMA specification document, WQE format description */
69 #define INLINE_OOB_SMALL_SIZE_IN_BYTES 8
70 #define INLINE_OOB_LARGE_SIZE_IN_BYTES 24
71 
72 #define NOT_USING_CLIENT_DATA_UNIT 0
73 
74 enum tx_packet_format_v2 {
75 	SHORT_PACKET_FORMAT = 0,
76 	LONG_PACKET_FORMAT = 1
77 };
78 
79 struct transmit_short_oob_v2 {
80 	enum tx_packet_format_v2 packet_format : 2;
81 	uint32_t tx_is_outer_ipv4 : 1;
82 	uint32_t tx_is_outer_ipv6 : 1;
83 	uint32_t tx_compute_IP_header_checksum : 1;
84 	uint32_t tx_compute_TCP_checksum : 1;
85 	uint32_t tx_compute_UDP_checksum : 1;
86 	uint32_t suppress_tx_CQE_generation : 1;
87 	uint32_t VCQ_number : 24;
88 	uint32_t tx_transport_header_offset : 10;
89 	uint32_t VSQ_frame_num : 14;
90 	uint32_t short_vport_offset : 8;
91 };
92 
93 struct transmit_long_oob_v2 {
94 	uint32_t tx_is_encapsulated_packet : 1;
95 	uint32_t tx_inner_is_ipv6 : 1;
96 	uint32_t tx_inner_TCP_options_present : 1;
97 	uint32_t inject_vlan_prior_tag : 1;
98 	uint32_t reserved1 : 12;
99 	uint32_t priority_code_point : 3;
100 	uint32_t drop_eligible_indicator : 1;
101 	uint32_t vlan_identifier : 12;
102 	uint32_t tx_inner_frame_offset : 10;
103 	uint32_t tx_inner_IP_header_relative_offset : 6;
104 	uint32_t long_vport_offset : 12;
105 	uint32_t reserved3 : 4;
106 	uint32_t reserved4 : 32;
107 	uint32_t reserved5 : 32;
108 };
109 
110 struct transmit_oob_v2 {
111 	struct transmit_short_oob_v2 short_oob;
112 	struct transmit_long_oob_v2 long_oob;
113 };
114 
115 enum gdma_queue_types {
116 	GDMA_QUEUE_TYPE_INVALID  = 0,
117 	GDMA_QUEUE_SEND,
118 	GDMA_QUEUE_RECEIVE,
119 	GDMA_QUEUE_COMPLETION,
120 	GDMA_QUEUE_EVENT,
121 	GDMA_QUEUE_TYPE_MAX = 16,
122 	/*Room for expansion */
123 
124 	/* This enum can be expanded to add more queue types but
125 	 * it's expected to be done in a contiguous manner.
126 	 * Failing that will result in unexpected behavior.
127 	 */
128 };
129 
130 #define WORK_QUEUE_NUMBER_BASE_BITS 10
131 
132 struct gdma_header {
133 	/* size of the entire gdma structure, including the entire length of
134 	 * the struct that is formed by extending other gdma struct. i.e.
135 	 * GDMA_BASE_SPEC extends gdma_header, GDMA_EVENT_QUEUE_SPEC extends
136 	 * GDMA_BASE_SPEC, StructSize for GDMA_EVENT_QUEUE_SPEC will be size of
137 	 * GDMA_EVENT_QUEUE_SPEC which includes size of GDMA_BASE_SPEC and size
138 	 * of gdma_header.
139 	 * Above example is for illustration purpose and is not in code
140 	 */
141 	size_t struct_size;
142 };
143 
144 /* The following macros are from GDMA SPEC 3.6, "Table 2: CQE data structure"
145  * and "Table 4: Event Queue Entry (EQE) data format"
146  */
147 #define GDMA_COMP_DATA_SIZE 0x3C /* Must be a multiple of 4 */
148 #define GDMA_COMP_DATA_SIZE_IN_UINT32 (GDMA_COMP_DATA_SIZE / 4)
149 
150 #define COMPLETION_QUEUE_ENTRY_WORK_QUEUE_INDEX 0
151 #define COMPLETION_QUEUE_ENTRY_WORK_QUEUE_SIZE 24
152 #define COMPLETION_QUEUE_ENTRY_SEND_WORK_QUEUE_INDEX 24
153 #define COMPLETION_QUEUE_ENTRY_SEND_WORK_QUEUE_SIZE 1
154 #define COMPLETION_QUEUE_ENTRY_OWNER_BITS_INDEX 29
155 #define COMPLETION_QUEUE_ENTRY_OWNER_BITS_SIZE 3
156 
157 #define COMPLETION_QUEUE_OWNER_MASK \
158 	((1 << (COMPLETION_QUEUE_ENTRY_OWNER_BITS_SIZE)) - 1)
159 
160 struct gdma_hardware_completion_entry {
161 	char dma_client_data[GDMA_COMP_DATA_SIZE];
162 	union {
163 		uint32_t work_queue_owner_bits;
164 		struct {
165 			uint32_t wq_num		: 24;
166 			uint32_t is_sq		: 1;
167 			uint32_t reserved	: 4;
168 			uint32_t owner_bits	: 3;
169 		};
170 	};
171 }; /* HW DATA */
172 
173 struct gdma_posted_wqe_info {
174 	struct gdma_header gdma_header;
175 
176 	/* size of the written wqe in basic units (32B), filled by GDMA core.
177 	 * Use this value to progress the work queue after the wqe is processed
178 	 * by hardware.
179 	 */
180 	uint32_t wqe_size_in_bu;
181 
182 	/* At the time of writing the wqe to the work queue, the offset in the
183 	 * work queue buffer where by the wqe will be written. Each unit
184 	 * represents 32B of buffer space.
185 	 */
186 	uint32_t wqe_index;
187 
188 	/* Unmasked offset in the queue to which the WQE was written.
189 	 * In 32 byte units.
190 	 */
191 	uint32_t unmasked_queue_offset;
192 };
193 
194 struct gdma_sgl_element {
195 	uint64_t address;
196 	uint32_t memory_key;
197 	uint32_t size;
198 };
199 
200 #define MAX_SGL_ENTRIES_FOR_TRANSMIT 30
201 
202 struct one_sgl {
203 	struct gdma_sgl_element gdma_sgl[MAX_SGL_ENTRIES_FOR_TRANSMIT];
204 };
205 
206 struct gdma_work_request {
207 	struct gdma_header gdma_header;
208 	struct gdma_sgl_element *sgl;
209 	uint32_t num_sgl_elements;
210 	uint32_t inline_oob_size_in_bytes;
211 	void *inline_oob_data;
212 	uint32_t flags; /* From _gdma_work_request_FLAGS */
213 	uint32_t client_data_unit; /* For LSO, this is the MTU of the data */
214 };
215 
216 enum mana_cqe_type {
217 	CQE_INVALID                     = 0,
218 
219 	CQE_RX_OKAY                     = 1,
220 	CQE_RX_COALESCED_4              = 2,
221 	CQE_RX_OBJECT_FENCE             = 3,
222 	CQE_RX_TRUNCATED                = 4,
223 
224 	CQE_TX_OKAY                     = 32,
225 	CQE_TX_SA_DROP                  = 33,
226 	CQE_TX_MTU_DROP                 = 34,
227 	CQE_TX_INVALID_OOB              = 35,
228 	CQE_TX_INVALID_ETH_TYPE         = 36,
229 	CQE_TX_HDR_PROCESSING_ERROR     = 37,
230 	CQE_TX_VF_DISABLED              = 38,
231 	CQE_TX_VPORT_IDX_OUT_OF_RANGE   = 39,
232 	CQE_TX_VPORT_DISABLED           = 40,
233 	CQE_TX_VLAN_TAGGING_VIOLATION   = 41,
234 };
235 
236 struct mana_cqe_header {
237 	uint32_t cqe_type    : 6;
238 	uint32_t client_type : 2;
239 	uint32_t vendor_err  : 24;
240 }; /* HW DATA */
241 
242 struct mana_tx_comp_oob {
243 	struct mana_cqe_header cqe_hdr;
244 
245 	uint32_t tx_data_offset;
246 
247 	uint32_t tx_sgl_offset       : 5;
248 	uint32_t tx_wqe_offset       : 27;
249 
250 	uint32_t reserved[12];
251 }; /* HW DATA */
252 
253 /* NDIS HASH Types */
254 #define NDIS_HASH_IPV4          RTE_BIT32(0)
255 #define NDIS_HASH_TCP_IPV4      RTE_BIT32(1)
256 #define NDIS_HASH_UDP_IPV4      RTE_BIT32(2)
257 #define NDIS_HASH_IPV6          RTE_BIT32(3)
258 #define NDIS_HASH_TCP_IPV6      RTE_BIT32(4)
259 #define NDIS_HASH_UDP_IPV6      RTE_BIT32(5)
260 #define NDIS_HASH_IPV6_EX       RTE_BIT32(6)
261 #define NDIS_HASH_TCP_IPV6_EX   RTE_BIT32(7)
262 #define NDIS_HASH_UDP_IPV6_EX   RTE_BIT32(8)
263 
264 #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
265 #define MANA_HASH_L4                                                         \
266 	(NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 |      \
267 	 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
268 
269 struct mana_rx_comp_per_packet_info {
270 	uint32_t packet_length	: 16;
271 	uint32_t reserved0	: 16;
272 	uint32_t reserved1;
273 	uint32_t packet_hash;
274 }; /* HW DATA */
275 #define RX_COM_OOB_NUM_PACKETINFO_SEGMENTS 4
276 
277 struct mana_rx_comp_oob {
278 	struct mana_cqe_header cqe_hdr;
279 
280 	uint32_t rx_vlan_id				: 12;
281 	uint32_t rx_vlan_tag_present			: 1;
282 	uint32_t rx_outer_ip_header_checksum_succeeded	: 1;
283 	uint32_t rx_outer_ip_header_checksum_failed	: 1;
284 	uint32_t reserved				: 1;
285 	uint32_t rx_hash_type				: 9;
286 	uint32_t rx_ip_header_checksum_succeeded	: 1;
287 	uint32_t rx_ip_header_checksum_failed		: 1;
288 	uint32_t rx_tcp_checksum_succeeded		: 1;
289 	uint32_t rx_tcp_checksum_failed			: 1;
290 	uint32_t rx_udp_checksum_succeeded		: 1;
291 	uint32_t rx_udp_checksum_failed			: 1;
292 	uint32_t reserved1				: 1;
293 	struct mana_rx_comp_per_packet_info
294 		packet_info[RX_COM_OOB_NUM_PACKETINFO_SEGMENTS];
295 	uint32_t received_wqe_offset;
296 }; /* HW DATA */
297 
298 struct gdma_wqe_dma_oob {
299 	uint32_t reserved:24;
300 	uint32_t last_v_bytes:8;
301 	union {
302 		uint32_t flags;
303 		struct {
304 			uint32_t num_sgl_entries:8;
305 			uint32_t inline_client_oob_size_in_dwords:3;
306 			uint32_t client_oob_in_sgl:1;
307 			uint32_t consume_credit:1;
308 			uint32_t fence:1;
309 			uint32_t reserved1:2;
310 			uint32_t client_data_unit:14;
311 			uint32_t check_sn:1;
312 			uint32_t sgl_direct:1;
313 		};
314 	};
315 };
316 
317 struct mana_mr_cache {
318 	uint32_t	lkey;
319 	uintptr_t	addr;
320 	size_t		len;
321 	void		*verb_obj;
322 };
323 
324 #define MANA_MR_BTREE_CACHE_N	512
325 struct mana_mr_btree {
326 	uint16_t	len;	/* Used entries */
327 	uint16_t	size;	/* Total entries */
328 	int		overflow;
329 	int		socket;
330 	struct mana_mr_cache *table;
331 };
332 
333 struct mana_process_priv {
334 	void *db_page;
335 };
336 
337 struct mana_priv {
338 	struct rte_eth_dev_data *dev_data;
339 	struct mana_process_priv *process_priv;
340 	int num_queues;
341 
342 	/* DPDK port */
343 	uint16_t port_id;
344 
345 	/* IB device port */
346 	uint8_t dev_port;
347 
348 	struct ibv_context *ib_ctx;
349 	struct ibv_pd *ib_pd;
350 	struct ibv_pd *ib_parent_pd;
351 	struct ibv_rwq_ind_table *ind_table;
352 	struct ibv_qp *rwq_qp;
353 	void *db_page;
354 	struct rte_eth_rss_conf rss_conf;
355 	struct rte_intr_handle *intr_handle;
356 	int max_rx_queues;
357 	int max_tx_queues;
358 	int max_rx_desc;
359 	int max_tx_desc;
360 	int max_send_sge;
361 	int max_recv_sge;
362 	int max_mr;
363 	uint64_t max_mr_size;
364 	struct mana_mr_btree mr_btree;
365 	rte_spinlock_t	mr_btree_lock;
366 };
367 
368 struct mana_txq_desc {
369 	struct rte_mbuf *pkt;
370 	uint32_t wqe_size_in_bu;
371 	bool suppress_tx_cqe;
372 };
373 
374 struct mana_rxq_desc {
375 	struct rte_mbuf *pkt;
376 	uint32_t wqe_size_in_bu;
377 };
378 
379 struct mana_stats {
380 	uint64_t packets;
381 	uint64_t bytes;
382 	uint64_t errors;
383 	uint64_t nombuf;
384 };
385 
386 struct mana_gdma_queue {
387 	void *buffer;
388 	uint32_t count;	/* in entries */
389 	uint32_t size;	/* in bytes */
390 	uint32_t id;
391 	uint32_t head;
392 	uint32_t tail;
393 };
394 
395 #define MANA_MR_BTREE_PER_QUEUE_N	64
396 
397 struct gdma_comp {
398 	/* Filled by GDMA core */
399 	char *cqe_data;
400 };
401 
402 struct mana_txq {
403 	struct mana_priv *priv;
404 	uint32_t num_desc;
405 	struct ibv_cq *cq;
406 	struct ibv_qp *qp;
407 
408 	struct mana_gdma_queue gdma_sq;
409 	struct mana_gdma_queue gdma_cq;
410 	struct gdma_comp *gdma_comp_buf;
411 
412 	uint32_t tx_vp_offset;
413 
414 	/* For storing pending requests */
415 	struct mana_txq_desc *desc_ring;
416 
417 	/* desc_ring_head is where we put pending requests to ring,
418 	 * completion pull off desc_ring_tail
419 	 */
420 	uint32_t desc_ring_head, desc_ring_tail, desc_ring_len;
421 
422 	struct mana_mr_btree mr_btree;
423 	struct mana_stats stats;
424 	unsigned int socket;
425 };
426 
427 struct mana_rxq {
428 	struct mana_priv *priv;
429 	uint32_t num_desc;
430 	struct rte_mempool *mp;
431 	struct ibv_cq *cq;
432 	struct ibv_comp_channel *channel;
433 	struct ibv_wq *wq;
434 
435 	/* For storing pending requests */
436 	struct mana_rxq_desc *desc_ring;
437 
438 	/* desc_ring_head is where we put pending requests to ring,
439 	 * completion pull off desc_ring_tail
440 	 */
441 	uint32_t desc_ring_head, desc_ring_tail;
442 
443 #ifdef RTE_ARCH_32
444 	/* For storing wqe increment count btw each short doorbell ring */
445 	uint32_t wqe_cnt_to_short_db;
446 #endif
447 
448 	struct mana_gdma_queue gdma_rq;
449 	struct mana_gdma_queue gdma_cq;
450 	struct gdma_comp *gdma_comp_buf;
451 
452 	uint32_t comp_buf_len;
453 	uint32_t comp_buf_idx;
454 	uint32_t backlog_idx;
455 
456 	struct mana_stats stats;
457 	struct mana_mr_btree mr_btree;
458 
459 	unsigned int socket;
460 };
461 
462 extern int mana_logtype_driver;
463 #define RTE_LOGTYPE_MANA_DRIVER mana_logtype_driver
464 extern int mana_logtype_init;
465 
466 #define DRV_LOG(level, fmt, args...) \
467 	rte_log(RTE_LOG_ ## level, mana_logtype_driver, "%s(): " fmt "\n", \
468 		__func__, ## args)
469 
470 #define DP_LOG(level, fmt, args...) \
471 	RTE_LOG_DP(level, MANA_DRIVER, fmt "\n", ## args)
472 
473 #define PMD_INIT_LOG(level, fmt, args...) \
474 	rte_log(RTE_LOG_ ## level, mana_logtype_init, "%s(): " fmt "\n",\
475 		__func__, ## args)
476 
477 #define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
478 
479 #ifdef RTE_ARCH_32
480 int mana_ring_short_doorbell(void *db_page, enum gdma_queue_types queue_type,
481 			     uint32_t queue_id, uint32_t tail_incr,
482 			     uint8_t arm);
483 #else
484 int mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type,
485 		       uint32_t queue_id, uint32_t tail, uint8_t arm);
486 #endif
487 int mana_rq_ring_doorbell(struct mana_rxq *rxq);
488 
489 int gdma_post_work_request(struct mana_gdma_queue *queue,
490 			   struct gdma_work_request *work_req,
491 			   uint32_t *wqe_size_in_bu);
492 uint8_t *gdma_get_wqe_pointer(struct mana_gdma_queue *queue);
493 
494 uint16_t mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **rx_pkts,
495 		       uint16_t pkts_n);
496 uint16_t mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts,
497 		       uint16_t pkts_n);
498 
499 uint16_t mana_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts,
500 			       uint16_t pkts_n);
501 
502 uint16_t mana_tx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts,
503 			       uint16_t pkts_n);
504 
505 uint32_t gdma_poll_completion_queue(struct mana_gdma_queue *cq,
506 				    struct gdma_comp *gdma_comp,
507 				    uint32_t max_comp);
508 
509 int mana_start_rx_queues(struct rte_eth_dev *dev);
510 int mana_start_tx_queues(struct rte_eth_dev *dev);
511 
512 int mana_stop_rx_queues(struct rte_eth_dev *dev);
513 int mana_stop_tx_queues(struct rte_eth_dev *dev);
514 
515 struct mana_mr_cache *mana_alloc_pmd_mr(struct mana_mr_btree *local_tree,
516 					struct mana_priv *priv,
517 					struct rte_mbuf *mbuf);
518 int mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv,
519 		    struct rte_mempool *pool);
520 void mana_remove_all_mr(struct mana_priv *priv);
521 void mana_del_pmd_mr(struct mana_mr_cache *mr);
522 
523 void mana_mempool_chunk_cb(struct rte_mempool *mp, void *opaque,
524 			   struct rte_mempool_memhdr *memhdr, unsigned int idx);
525 
526 int mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx,
527 			 uintptr_t addr, size_t len,
528 			 struct mana_mr_cache **cache);
529 int mana_mr_btree_insert(struct mana_mr_btree *bt, struct mana_mr_cache *entry);
530 int mana_mr_btree_init(struct mana_mr_btree *bt, int n, int socket);
531 void mana_mr_btree_free(struct mana_mr_btree *bt);
532 
533 /** Request timeout for IPC. */
534 #define MANA_MP_REQ_TIMEOUT_SEC 5
535 
536 /* Request types for IPC. */
537 enum mana_mp_req_type {
538 	MANA_MP_REQ_VERBS_CMD_FD = 1,
539 	MANA_MP_REQ_CREATE_MR,
540 	MANA_MP_REQ_START_RXTX,
541 	MANA_MP_REQ_STOP_RXTX,
542 };
543 
544 /* Pameters for IPC. */
545 struct mana_mp_param {
546 	enum mana_mp_req_type type;
547 	int port_id;
548 	int result;
549 
550 	/* MANA_MP_REQ_CREATE_MR */
551 	uintptr_t addr;
552 	uint32_t len;
553 };
554 
555 #define MANA_MP_NAME	"net_mana_mp"
556 int mana_mp_init_primary(void);
557 int mana_mp_init_secondary(void);
558 void mana_mp_uninit_primary(void);
559 void mana_mp_uninit_secondary(void);
560 int mana_mp_req_verbs_cmd_fd(struct rte_eth_dev *dev);
561 int mana_mp_req_mr_create(struct mana_priv *priv, uintptr_t addr, uint32_t len);
562 
563 void mana_mp_req_on_rxtx(struct rte_eth_dev *dev, enum mana_mp_req_type type);
564 
565 void *mana_alloc_verbs_buf(size_t size, void *data);
566 void mana_free_verbs_buf(void *ptr, void *data __rte_unused);
567 
568 int mana_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
569 int mana_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
570 int mana_fd_set_non_blocking(int fd);
571 
572 #endif
573