xref: /dpdk/drivers/net/mana/rx.c (revision da7e701151ea8b742d4c38ace3e4fefd1b4507fc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2022 Microsoft Corporation
3  */
4 #include <ethdev_driver.h>
5 
6 #include <infiniband/verbs.h>
7 #include <infiniband/manadv.h>
8 
9 #include "mana.h"
10 
11 static uint8_t mana_rss_hash_key_default[TOEPLITZ_HASH_KEY_SIZE_IN_BYTES] = {
12 	0x2c, 0xc6, 0x81, 0xd1,
13 	0x5b, 0xdb, 0xf4, 0xf7,
14 	0xfc, 0xa2, 0x83, 0x19,
15 	0xdb, 0x1a, 0x3e, 0x94,
16 	0x6b, 0x9e, 0x38, 0xd9,
17 	0x2c, 0x9c, 0x03, 0xd1,
18 	0xad, 0x99, 0x44, 0xa7,
19 	0xd9, 0x56, 0x3d, 0x59,
20 	0x06, 0x3c, 0x25, 0xf3,
21 	0xfc, 0x1f, 0xdc, 0x2a,
22 };
23 
24 int
25 mana_rq_ring_doorbell(struct mana_rxq *rxq)
26 {
27 	struct mana_priv *priv = rxq->priv;
28 	int ret;
29 	void *db_page = priv->db_page;
30 
31 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
32 		struct rte_eth_dev *dev =
33 			&rte_eth_devices[priv->dev_data->port_id];
34 		struct mana_process_priv *process_priv = dev->process_private;
35 
36 		db_page = process_priv->db_page;
37 	}
38 
39 	/* Hardware Spec specifies that software client should set 0 for
40 	 * wqe_cnt for Receive Queues.
41 	 */
42 #ifdef RTE_ARCH_32
43 	ret = mana_ring_short_doorbell(db_page, GDMA_QUEUE_RECEIVE,
44 			 rxq->gdma_rq.id,
45 			 rxq->wqe_cnt_to_short_db *
46 				GDMA_WQE_ALIGNMENT_UNIT_SIZE,
47 			 0);
48 #else
49 	ret = mana_ring_doorbell(db_page, GDMA_QUEUE_RECEIVE,
50 			 rxq->gdma_rq.id,
51 			 rxq->gdma_rq.head * GDMA_WQE_ALIGNMENT_UNIT_SIZE,
52 			 0);
53 #endif
54 
55 	if (ret)
56 		DP_LOG(ERR, "failed to ring RX doorbell ret %d", ret);
57 
58 	return ret;
59 }
60 
61 static int
62 mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq)
63 {
64 	struct rte_mbuf *mbuf = NULL;
65 	struct gdma_sgl_element sgl[1];
66 	struct gdma_work_request request;
67 	uint32_t wqe_size_in_bu;
68 	struct mana_priv *priv = rxq->priv;
69 	int ret;
70 	struct mana_mr_cache *mr;
71 
72 	mbuf = rte_pktmbuf_alloc(rxq->mp);
73 	if (!mbuf) {
74 		rxq->stats.nombuf++;
75 		return -ENOMEM;
76 	}
77 
78 	mr = mana_find_pmd_mr(&rxq->mr_btree, priv, mbuf);
79 	if (!mr) {
80 		DP_LOG(ERR, "failed to register RX MR");
81 		rte_pktmbuf_free(mbuf);
82 		return -ENOMEM;
83 	}
84 
85 	request.gdma_header.struct_size = sizeof(request);
86 
87 	sgl[0].address = rte_cpu_to_le_64(rte_pktmbuf_mtod(mbuf, uint64_t));
88 	sgl[0].memory_key = mr->lkey;
89 	sgl[0].size =
90 		rte_pktmbuf_data_room_size(rxq->mp) -
91 		RTE_PKTMBUF_HEADROOM;
92 
93 	request.sgl = sgl;
94 	request.num_sgl_elements = 1;
95 	request.inline_oob_data = NULL;
96 	request.inline_oob_size_in_bytes = 0;
97 	request.flags = 0;
98 	request.client_data_unit = NOT_USING_CLIENT_DATA_UNIT;
99 
100 	ret = gdma_post_work_request(&rxq->gdma_rq, &request, &wqe_size_in_bu);
101 	if (!ret) {
102 		struct mana_rxq_desc *desc =
103 			&rxq->desc_ring[rxq->desc_ring_head];
104 
105 		/* update queue for tracking pending packets */
106 		desc->pkt = mbuf;
107 		desc->wqe_size_in_bu = wqe_size_in_bu;
108 #ifdef RTE_ARCH_32
109 		rxq->wqe_cnt_to_short_db += wqe_size_in_bu;
110 #endif
111 		rxq->desc_ring_head = (rxq->desc_ring_head + 1) % rxq->num_desc;
112 	} else {
113 		DP_LOG(DEBUG, "failed to post recv ret %d", ret);
114 		return ret;
115 	}
116 
117 	return 0;
118 }
119 
120 /*
121  * Post work requests for a Rx queue.
122  */
123 static int
124 mana_alloc_and_post_rx_wqes(struct mana_rxq *rxq)
125 {
126 	int ret;
127 	uint32_t i;
128 
129 #ifdef RTE_ARCH_32
130 	rxq->wqe_cnt_to_short_db = 0;
131 #endif
132 	for (i = 0; i < rxq->num_desc; i++) {
133 		ret = mana_alloc_and_post_rx_wqe(rxq);
134 		if (ret) {
135 			DP_LOG(ERR, "failed to post RX ret = %d", ret);
136 			return ret;
137 		}
138 
139 #ifdef RTE_ARCH_32
140 		if (rxq->wqe_cnt_to_short_db > RX_WQE_SHORT_DB_THRESHOLD) {
141 			mana_rq_ring_doorbell(rxq);
142 			rxq->wqe_cnt_to_short_db = 0;
143 		}
144 #endif
145 	}
146 
147 	mana_rq_ring_doorbell(rxq);
148 
149 	return ret;
150 }
151 
152 int
153 mana_stop_rx_queues(struct rte_eth_dev *dev)
154 {
155 	struct mana_priv *priv = dev->data->dev_private;
156 	int ret, i;
157 
158 	for (i = 0; i < priv->num_queues; i++)
159 		if (dev->data->rx_queue_state[i] == RTE_ETH_QUEUE_STATE_STOPPED)
160 			return -EINVAL;
161 
162 	if (priv->rwq_qp) {
163 		ret = ibv_destroy_qp(priv->rwq_qp);
164 		if (ret)
165 			DRV_LOG(ERR, "rx_queue destroy_qp failed %d", ret);
166 		priv->rwq_qp = NULL;
167 	}
168 
169 	if (priv->ind_table) {
170 		ret = ibv_destroy_rwq_ind_table(priv->ind_table);
171 		if (ret)
172 			DRV_LOG(ERR, "destroy rwq ind table failed %d", ret);
173 		priv->ind_table = NULL;
174 	}
175 
176 	for (i = 0; i < priv->num_queues; i++) {
177 		struct mana_rxq *rxq = dev->data->rx_queues[i];
178 
179 		if (rxq->wq) {
180 			ret = ibv_destroy_wq(rxq->wq);
181 			if (ret)
182 				DRV_LOG(ERR,
183 					"rx_queue destroy_wq failed %d", ret);
184 			rxq->wq = NULL;
185 		}
186 
187 		if (rxq->cq) {
188 			ret = ibv_destroy_cq(rxq->cq);
189 			if (ret)
190 				DRV_LOG(ERR,
191 					"rx_queue destroy_cq failed %d", ret);
192 			rxq->cq = NULL;
193 
194 			if (rxq->channel) {
195 				ret = ibv_destroy_comp_channel(rxq->channel);
196 				if (ret)
197 					DRV_LOG(ERR, "failed destroy comp %d",
198 						ret);
199 				rxq->channel = NULL;
200 			}
201 		}
202 
203 		/* Drain and free posted WQEs */
204 		while (rxq->desc_ring_tail != rxq->desc_ring_head) {
205 			struct mana_rxq_desc *desc =
206 				&rxq->desc_ring[rxq->desc_ring_tail];
207 
208 			rte_pktmbuf_free(desc->pkt);
209 
210 			rxq->desc_ring_tail =
211 				(rxq->desc_ring_tail + 1) % rxq->num_desc;
212 		}
213 		rxq->desc_ring_head = 0;
214 		rxq->desc_ring_tail = 0;
215 
216 		memset(&rxq->gdma_rq, 0, sizeof(rxq->gdma_rq));
217 		memset(&rxq->gdma_cq, 0, sizeof(rxq->gdma_cq));
218 
219 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
220 	}
221 
222 	return 0;
223 }
224 
225 int
226 mana_start_rx_queues(struct rte_eth_dev *dev)
227 {
228 	struct mana_priv *priv = dev->data->dev_private;
229 	int ret, i;
230 	struct ibv_wq *ind_tbl[priv->num_queues];
231 
232 	DRV_LOG(INFO, "start rx queues");
233 
234 	for (i = 0; i < priv->num_queues; i++)
235 		if (dev->data->rx_queue_state[i] == RTE_ETH_QUEUE_STATE_STARTED)
236 			return -EINVAL;
237 
238 	for (i = 0; i < priv->num_queues; i++) {
239 		struct mana_rxq *rxq = dev->data->rx_queues[i];
240 		struct ibv_wq_init_attr wq_attr = {};
241 
242 		manadv_set_context_attr(priv->ib_ctx,
243 			MANADV_CTX_ATTR_BUF_ALLOCATORS,
244 			(void *)((uintptr_t)&(struct manadv_ctx_allocators){
245 				.alloc = &mana_alloc_verbs_buf,
246 				.free = &mana_free_verbs_buf,
247 				.data = (void *)(uintptr_t)rxq->socket,
248 			}));
249 
250 		if (dev->data->dev_conf.intr_conf.rxq) {
251 			rxq->channel = ibv_create_comp_channel(priv->ib_ctx);
252 			if (!rxq->channel) {
253 				ret = -errno;
254 				DRV_LOG(ERR, "Queue %d comp channel failed", i);
255 				goto fail;
256 			}
257 
258 			ret = mana_fd_set_non_blocking(rxq->channel->fd);
259 			if (ret) {
260 				DRV_LOG(ERR, "Failed to set comp non-blocking");
261 				goto fail;
262 			}
263 		}
264 
265 		rxq->cq = ibv_create_cq(priv->ib_ctx, rxq->num_desc,
266 					NULL, rxq->channel,
267 					rxq->channel ? i : 0);
268 		if (!rxq->cq) {
269 			ret = -errno;
270 			DRV_LOG(ERR, "failed to create rx cq queue %d", i);
271 			goto fail;
272 		}
273 
274 		wq_attr.wq_type = IBV_WQT_RQ;
275 		wq_attr.max_wr = rxq->num_desc;
276 		wq_attr.max_sge = 1;
277 		wq_attr.pd = priv->ib_parent_pd;
278 		wq_attr.cq = rxq->cq;
279 
280 		rxq->wq = ibv_create_wq(priv->ib_ctx, &wq_attr);
281 		if (!rxq->wq) {
282 			ret = -errno;
283 			DRV_LOG(ERR, "failed to create rx wq %d", i);
284 			goto fail;
285 		}
286 
287 		ind_tbl[i] = rxq->wq;
288 	}
289 
290 	struct ibv_rwq_ind_table_init_attr ind_table_attr = {
291 		.log_ind_tbl_size = rte_log2_u32(RTE_DIM(ind_tbl)),
292 		.ind_tbl = ind_tbl,
293 		.comp_mask = 0,
294 	};
295 
296 	priv->ind_table = ibv_create_rwq_ind_table(priv->ib_ctx,
297 						   &ind_table_attr);
298 	if (!priv->ind_table) {
299 		ret = -errno;
300 		DRV_LOG(ERR, "failed to create ind_table ret %d", ret);
301 		goto fail;
302 	}
303 
304 	DRV_LOG(INFO, "ind_table handle %d num %d",
305 		priv->ind_table->ind_tbl_handle,
306 		priv->ind_table->ind_tbl_num);
307 
308 	struct ibv_qp_init_attr_ex qp_attr_ex = {
309 		.comp_mask = IBV_QP_INIT_ATTR_PD |
310 			     IBV_QP_INIT_ATTR_RX_HASH |
311 			     IBV_QP_INIT_ATTR_IND_TABLE,
312 		.qp_type = IBV_QPT_RAW_PACKET,
313 		.pd = priv->ib_parent_pd,
314 		.rwq_ind_tbl = priv->ind_table,
315 		.rx_hash_conf = {
316 			.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
317 			.rx_hash_key_len = TOEPLITZ_HASH_KEY_SIZE_IN_BYTES,
318 			.rx_hash_key = mana_rss_hash_key_default,
319 			.rx_hash_fields_mask =
320 				IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4,
321 		},
322 
323 	};
324 
325 	/* overwrite default if rss key is set */
326 	if (priv->rss_conf.rss_key_len && priv->rss_conf.rss_key)
327 		qp_attr_ex.rx_hash_conf.rx_hash_key =
328 			priv->rss_conf.rss_key;
329 
330 	/* overwrite default if rss hash fields are set */
331 	if (priv->rss_conf.rss_hf) {
332 		qp_attr_ex.rx_hash_conf.rx_hash_fields_mask = 0;
333 
334 		if (priv->rss_conf.rss_hf & RTE_ETH_RSS_IPV4)
335 			qp_attr_ex.rx_hash_conf.rx_hash_fields_mask |=
336 				IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4;
337 
338 		if (priv->rss_conf.rss_hf & RTE_ETH_RSS_IPV6)
339 			qp_attr_ex.rx_hash_conf.rx_hash_fields_mask |=
340 				IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_SRC_IPV6;
341 
342 		if (priv->rss_conf.rss_hf &
343 		    (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
344 			qp_attr_ex.rx_hash_conf.rx_hash_fields_mask |=
345 				IBV_RX_HASH_SRC_PORT_TCP |
346 				IBV_RX_HASH_DST_PORT_TCP;
347 
348 		if (priv->rss_conf.rss_hf &
349 		    (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
350 			qp_attr_ex.rx_hash_conf.rx_hash_fields_mask |=
351 				IBV_RX_HASH_SRC_PORT_UDP |
352 				IBV_RX_HASH_DST_PORT_UDP;
353 	}
354 
355 	priv->rwq_qp = ibv_create_qp_ex(priv->ib_ctx, &qp_attr_ex);
356 	if (!priv->rwq_qp) {
357 		ret = -errno;
358 		DRV_LOG(ERR, "rx ibv_create_qp_ex failed");
359 		goto fail;
360 	}
361 
362 	for (i = 0; i < priv->num_queues; i++) {
363 		struct mana_rxq *rxq = dev->data->rx_queues[i];
364 		struct manadv_obj obj = {};
365 		struct manadv_cq dv_cq;
366 		struct manadv_rwq dv_wq;
367 
368 		obj.cq.in = rxq->cq;
369 		obj.cq.out = &dv_cq;
370 		obj.rwq.in = rxq->wq;
371 		obj.rwq.out = &dv_wq;
372 		ret = manadv_init_obj(&obj, MANADV_OBJ_CQ | MANADV_OBJ_RWQ);
373 		if (ret) {
374 			DRV_LOG(ERR, "manadv_init_obj failed ret %d", ret);
375 			goto fail;
376 		}
377 
378 		rxq->gdma_cq.buffer = obj.cq.out->buf;
379 		rxq->gdma_cq.count = obj.cq.out->count;
380 		rxq->gdma_cq.size = rxq->gdma_cq.count * COMP_ENTRY_SIZE;
381 		rxq->gdma_cq.id = obj.cq.out->cq_id;
382 
383 		/* CQ head starts with count */
384 		rxq->gdma_cq.head = rxq->gdma_cq.count;
385 
386 		DRV_LOG(INFO, "rxq cq id %u buf %p count %u size %u",
387 			rxq->gdma_cq.id, rxq->gdma_cq.buffer,
388 			rxq->gdma_cq.count, rxq->gdma_cq.size);
389 
390 		priv->db_page = obj.rwq.out->db_page;
391 
392 		rxq->gdma_rq.buffer = obj.rwq.out->buf;
393 		rxq->gdma_rq.count = obj.rwq.out->count;
394 		rxq->gdma_rq.size = obj.rwq.out->size;
395 		rxq->gdma_rq.id = obj.rwq.out->wq_id;
396 
397 		DRV_LOG(INFO, "rxq rq id %u buf %p count %u size %u",
398 			rxq->gdma_rq.id, rxq->gdma_rq.buffer,
399 			rxq->gdma_rq.count, rxq->gdma_rq.size);
400 
401 		rxq->comp_buf_len = 0;
402 		rxq->comp_buf_idx = 0;
403 		rxq->backlog_idx = 0;
404 	}
405 
406 	for (i = 0; i < priv->num_queues; i++) {
407 		ret = mana_alloc_and_post_rx_wqes(dev->data->rx_queues[i]);
408 		if (ret)
409 			goto fail;
410 	}
411 
412 	for (i = 0; i < priv->num_queues; i++)
413 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
414 
415 	return 0;
416 
417 fail:
418 	mana_stop_rx_queues(dev);
419 	return ret;
420 }
421 
422 uint16_t
423 mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
424 {
425 	uint16_t pkt_received = 0;
426 	uint16_t wqe_posted = 0;
427 	struct mana_rxq *rxq = dpdk_rxq;
428 	struct mana_priv *priv = rxq->priv;
429 	struct rte_mbuf *mbuf;
430 	int ret;
431 	uint32_t pkt_idx = rxq->backlog_idx;
432 	uint32_t pkt_len;
433 	uint32_t i;
434 	int polled = 0;
435 
436 #ifdef RTE_ARCH_32
437 	rxq->wqe_cnt_to_short_db = 0;
438 #endif
439 
440 repoll:
441 	/* Polling on new completions if we have no backlog */
442 	if (rxq->comp_buf_idx == rxq->comp_buf_len) {
443 		RTE_ASSERT(!pkt_idx);
444 		rxq->comp_buf_len =
445 			gdma_poll_completion_queue(&rxq->gdma_cq,
446 						   rxq->gdma_comp_buf, pkts_n);
447 		rxq->comp_buf_idx = 0;
448 		polled = 1;
449 	}
450 
451 	i = rxq->comp_buf_idx;
452 	while (i < rxq->comp_buf_len) {
453 		struct mana_rx_comp_oob *oob = (struct mana_rx_comp_oob *)
454 			rxq->gdma_comp_buf[i].cqe_data;
455 		struct mana_rxq_desc *desc =
456 			&rxq->desc_ring[rxq->desc_ring_tail];
457 
458 		mbuf = desc->pkt;
459 
460 		switch (oob->cqe_hdr.cqe_type) {
461 		case CQE_RX_OKAY:
462 		case CQE_RX_COALESCED_4:
463 			/* Proceed to process mbuf */
464 			break;
465 
466 		case CQE_RX_TRUNCATED:
467 		default:
468 			DP_LOG(ERR, "RX CQE type %d client %d vendor %d",
469 			       oob->cqe_hdr.cqe_type, oob->cqe_hdr.client_type,
470 			       oob->cqe_hdr.vendor_err);
471 
472 			rxq->stats.errors++;
473 			rte_pktmbuf_free(mbuf);
474 
475 			i++;
476 			goto drop;
477 		}
478 
479 		DP_LOG(DEBUG, "mana_rx_comp_oob type %d rxq %p",
480 		       oob->cqe_hdr.cqe_type, rxq);
481 
482 		pkt_len = oob->packet_info[pkt_idx].packet_length;
483 		if (!pkt_len) {
484 			/* Move on to the next completion */
485 			pkt_idx = 0;
486 			i++;
487 			continue;
488 		}
489 
490 		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
491 		mbuf->nb_segs = 1;
492 		mbuf->next = NULL;
493 		mbuf->data_len = pkt_len;
494 		mbuf->pkt_len = pkt_len;
495 		mbuf->port = priv->port_id;
496 
497 		if (oob->rx_ip_header_checksum_succeeded)
498 			mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
499 
500 		if (oob->rx_ip_header_checksum_failed)
501 			mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
502 
503 		if (oob->rx_outer_ip_header_checksum_failed)
504 			mbuf->ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
505 
506 		if (oob->rx_tcp_checksum_succeeded ||
507 		    oob->rx_udp_checksum_succeeded)
508 			mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
509 
510 		if (oob->rx_tcp_checksum_failed ||
511 		    oob->rx_udp_checksum_failed)
512 			mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
513 
514 		if (oob->rx_hash_type == MANA_HASH_L3 ||
515 		    oob->rx_hash_type == MANA_HASH_L4) {
516 			mbuf->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
517 			mbuf->hash.rss = oob->packet_info[pkt_idx].packet_hash;
518 		}
519 
520 		pkts[pkt_received++] = mbuf;
521 		rxq->stats.packets++;
522 		rxq->stats.bytes += mbuf->data_len;
523 
524 		pkt_idx++;
525 		/* Move on the next completion if all packets are processed */
526 		if (pkt_idx >= RX_COM_OOB_NUM_PACKETINFO_SEGMENTS) {
527 			pkt_idx = 0;
528 			i++;
529 		}
530 
531 drop:
532 		rxq->desc_ring_tail++;
533 		if (rxq->desc_ring_tail >= rxq->num_desc)
534 			rxq->desc_ring_tail = 0;
535 
536 		rxq->gdma_rq.tail += desc->wqe_size_in_bu;
537 
538 		/* Consume this request and post another request */
539 		ret = mana_alloc_and_post_rx_wqe(rxq);
540 		if (ret) {
541 			DP_LOG(ERR, "failed to post rx wqe ret=%d", ret);
542 			break;
543 		}
544 
545 		wqe_posted++;
546 		if (pkt_received == pkts_n)
547 			break;
548 
549 #ifdef RTE_ARCH_32
550 		/* Ring short doorbell if approaching the wqe increment
551 		 * limit.
552 		 */
553 		if (rxq->wqe_cnt_to_short_db > RX_WQE_SHORT_DB_THRESHOLD) {
554 			mana_rq_ring_doorbell(rxq);
555 			rxq->wqe_cnt_to_short_db = 0;
556 		}
557 #endif
558 	}
559 
560 	rxq->backlog_idx = pkt_idx;
561 	rxq->comp_buf_idx = i;
562 
563 	/* If all CQEs are processed but there are more packets to read, poll the
564 	 * completion queue again because we may have not polled on the completion
565 	 * queue due to CQE not fully processed in the previous rx_burst
566 	 */
567 	if (pkt_received < pkts_n && !polled) {
568 		polled = 1;
569 		goto repoll;
570 	}
571 
572 	if (wqe_posted)
573 		mana_rq_ring_doorbell(rxq);
574 
575 	return pkt_received;
576 }
577 
578 #ifdef RTE_ARCH_32
579 static int
580 mana_arm_cq(struct mana_rxq *rxq __rte_unused, uint8_t arm __rte_unused)
581 {
582 	DP_LOG(ERR, "Do not support in 32 bit");
583 
584 	return -ENODEV;
585 }
586 #else
587 static int
588 mana_arm_cq(struct mana_rxq *rxq, uint8_t arm)
589 {
590 	struct mana_priv *priv = rxq->priv;
591 	uint32_t head = rxq->gdma_cq.head %
592 		(rxq->gdma_cq.count << COMPLETION_QUEUE_ENTRY_OWNER_BITS_SIZE);
593 
594 	DP_LOG(DEBUG, "Ringing completion queue ID %u head %u arm %d",
595 	       rxq->gdma_cq.id, head, arm);
596 
597 	return mana_ring_doorbell(priv->db_page, GDMA_QUEUE_COMPLETION,
598 				  rxq->gdma_cq.id, head, arm);
599 }
600 #endif
601 
602 int
603 mana_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
604 {
605 	struct mana_rxq *rxq = dev->data->rx_queues[rx_queue_id];
606 
607 	return mana_arm_cq(rxq, 1);
608 }
609 
610 int
611 mana_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
612 {
613 	struct mana_rxq *rxq = dev->data->rx_queues[rx_queue_id];
614 	struct ibv_cq *ev_cq;
615 	void *ev_ctx;
616 	int ret;
617 
618 	ret = ibv_get_cq_event(rxq->channel, &ev_cq, &ev_ctx);
619 	if (ret)
620 		ret = errno;
621 	else if (ev_cq != rxq->cq)
622 		ret = EINVAL;
623 
624 	if (ret) {
625 		if (ret != EAGAIN)
626 			DP_LOG(ERR, "Can't disable RX intr queue %d",
627 			       rx_queue_id);
628 	} else {
629 		ibv_ack_cq_events(rxq->cq, 1);
630 	}
631 
632 	return -ret;
633 }
634