xref: /dpdk/drivers/net/gve/gve_rx_dqo.c (revision f8fee84eb48cdf13a7a29f5851a2e2a41045813a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022-2023 Google LLC
3  * Copyright (c) 2022-2023 Intel Corporation
4  */
5 
6 
7 #include "gve_ethdev.h"
8 #include "base/gve_adminq.h"
9 #include "rte_mbuf_ptype.h"
10 
11 static inline void
12 gve_rx_refill_dqo(struct gve_rx_queue *rxq)
13 {
14 	volatile struct gve_rx_desc_dqo *rx_buf_desc;
15 	struct rte_mbuf *nmb[rxq->nb_rx_hold];
16 	uint16_t nb_refill = rxq->nb_rx_hold;
17 	uint16_t next_avail = rxq->bufq_tail;
18 	struct rte_eth_dev *dev;
19 	uint64_t dma_addr;
20 	int i;
21 
22 	if (rxq->nb_rx_hold < rxq->free_thresh)
23 		return;
24 
25 	if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mpool, nmb, nb_refill))) {
26 		rxq->stats.no_mbufs_bulk++;
27 		rxq->stats.no_mbufs += nb_refill;
28 		dev = &rte_eth_devices[rxq->port_id];
29 		dev->data->rx_mbuf_alloc_failed += nb_refill;
30 		PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
31 			    rxq->port_id, rxq->queue_id);
32 		return;
33 	}
34 
35 	for (i = 0; i < nb_refill; i++) {
36 		rx_buf_desc = &rxq->rx_ring[next_avail];
37 		rxq->sw_ring[next_avail] = nmb[i];
38 		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
39 		rx_buf_desc->header_buf_addr = 0;
40 		rx_buf_desc->buf_addr = dma_addr;
41 		next_avail = (next_avail + 1) & (rxq->nb_rx_desc - 1);
42 	}
43 	rxq->nb_rx_hold -= nb_refill;
44 	rte_write32(next_avail, rxq->qrx_tail);
45 
46 	rxq->bufq_tail = next_avail;
47 }
48 
49 static inline void
50 gve_parse_csum_ol_flags(struct rte_mbuf *rx_mbuf,
51 	volatile struct gve_rx_compl_desc_dqo *rx_desc)
52 {
53 	if (!rx_desc->l3_l4_processed)
54 		return;
55 
56 	if (rx_mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
57 		if (rx_desc->csum_ip_err)
58 			rx_mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
59 		else
60 			rx_mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
61 	}
62 
63 	if (rx_desc->csum_l4_err) {
64 		rx_mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
65 		return;
66 	}
67 	if (rx_mbuf->packet_type & RTE_PTYPE_L4_MASK)
68 		rx_mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
69 }
70 
71 static inline void
72 gve_rx_set_mbuf_ptype(struct gve_priv *priv, struct rte_mbuf *rx_mbuf,
73 		      volatile struct gve_rx_compl_desc_dqo *rx_desc)
74 {
75 	struct gve_ptype ptype =
76 		priv->ptype_lut_dqo->ptypes[rx_desc->packet_type];
77 	rx_mbuf->packet_type = 0;
78 
79 	switch (ptype.l3_type) {
80 	case GVE_L3_TYPE_IPV4:
81 		rx_mbuf->packet_type |= RTE_PTYPE_L3_IPV4;
82 		break;
83 	case GVE_L3_TYPE_IPV6:
84 		rx_mbuf->packet_type |= RTE_PTYPE_L3_IPV6;
85 		break;
86 	default:
87 		break;
88 	}
89 
90 	switch (ptype.l4_type) {
91 	case GVE_L4_TYPE_TCP:
92 		rx_mbuf->packet_type |= RTE_PTYPE_L4_TCP;
93 		break;
94 	case GVE_L4_TYPE_UDP:
95 		rx_mbuf->packet_type |= RTE_PTYPE_L4_UDP;
96 		break;
97 	case GVE_L4_TYPE_ICMP:
98 		rx_mbuf->packet_type |= RTE_PTYPE_L4_ICMP;
99 		break;
100 	case GVE_L4_TYPE_SCTP:
101 		rx_mbuf->packet_type |= RTE_PTYPE_L4_SCTP;
102 		break;
103 	default:
104 		break;
105 	}
106 }
107 
108 uint16_t
109 gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
110 {
111 	volatile struct gve_rx_compl_desc_dqo *rx_compl_ring;
112 	volatile struct gve_rx_compl_desc_dqo *rx_desc;
113 	struct gve_rx_queue *rxq;
114 	struct rte_mbuf *rxm;
115 	uint16_t rx_id_bufq;
116 	uint16_t pkt_len;
117 	uint16_t rx_id;
118 	uint16_t nb_rx;
119 	uint64_t bytes;
120 
121 	bytes = 0;
122 	nb_rx = 0;
123 	rxq = rx_queue;
124 	rx_id = rxq->rx_tail;
125 	rx_id_bufq = rxq->next_avail;
126 	rx_compl_ring = rxq->compl_ring;
127 
128 	while (nb_rx < nb_pkts) {
129 		rx_desc = &rx_compl_ring[rx_id];
130 
131 		/* check status */
132 		if (rx_desc->generation != rxq->cur_gen_bit)
133 			break;
134 
135 		rte_io_rmb();
136 
137 		if (unlikely(rx_desc->rx_error)) {
138 			rxq->stats.errors++;
139 			continue;
140 		}
141 
142 		pkt_len = rx_desc->packet_len;
143 
144 		rx_id++;
145 		if (rx_id == rxq->nb_rx_desc) {
146 			rx_id = 0;
147 			rxq->cur_gen_bit ^= 1;
148 		}
149 
150 		rxm = rxq->sw_ring[rx_id_bufq];
151 		rx_id_bufq++;
152 		if (rx_id_bufq == rxq->nb_rx_desc)
153 			rx_id_bufq = 0;
154 		rxq->nb_rx_hold++;
155 
156 		rxm->pkt_len = pkt_len;
157 		rxm->data_len = pkt_len;
158 		rxm->port = rxq->port_id;
159 		gve_rx_set_mbuf_ptype(rxq->hw, rxm, rx_desc);
160 		rxm->ol_flags = RTE_MBUF_F_RX_RSS_HASH;
161 		gve_parse_csum_ol_flags(rxm, rx_desc);
162 		rxm->hash.rss = rte_le_to_cpu_32(rx_desc->hash);
163 
164 		rx_pkts[nb_rx++] = rxm;
165 		bytes += pkt_len;
166 	}
167 
168 	if (nb_rx > 0) {
169 		rxq->rx_tail = rx_id;
170 		rxq->next_avail = rx_id_bufq;
171 
172 		rxq->stats.packets += nb_rx;
173 		rxq->stats.bytes += bytes;
174 	}
175 	gve_rx_refill_dqo(rxq);
176 
177 	return nb_rx;
178 }
179 
180 static inline void
181 gve_release_rxq_mbufs_dqo(struct gve_rx_queue *rxq)
182 {
183 	uint16_t i;
184 
185 	for (i = 0; i < rxq->nb_rx_desc; i++) {
186 		if (rxq->sw_ring[i]) {
187 			rte_pktmbuf_free_seg(rxq->sw_ring[i]);
188 			rxq->sw_ring[i] = NULL;
189 		}
190 	}
191 
192 	rxq->nb_avail = rxq->nb_rx_desc;
193 }
194 
195 void
196 gve_rx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid)
197 {
198 	struct gve_rx_queue *q = dev->data->rx_queues[qid];
199 
200 	if (q == NULL)
201 		return;
202 
203 	gve_release_rxq_mbufs_dqo(q);
204 	rte_free(q->sw_ring);
205 	rte_memzone_free(q->compl_ring_mz);
206 	rte_memzone_free(q->mz);
207 	rte_memzone_free(q->qres_mz);
208 	q->qres = NULL;
209 	rte_free(q);
210 }
211 
212 static void
213 gve_reset_rxq_dqo(struct gve_rx_queue *rxq)
214 {
215 	struct rte_mbuf **sw_ring;
216 	uint32_t size, i;
217 
218 	if (rxq == NULL) {
219 		PMD_DRV_LOG(ERR, "pointer to rxq is NULL");
220 		return;
221 	}
222 
223 	size = rxq->nb_rx_desc * sizeof(struct gve_rx_desc_dqo);
224 	for (i = 0; i < size; i++)
225 		((volatile char *)rxq->rx_ring)[i] = 0;
226 
227 	size = rxq->nb_rx_desc * sizeof(struct gve_rx_compl_desc_dqo);
228 	for (i = 0; i < size; i++)
229 		((volatile char *)rxq->compl_ring)[i] = 0;
230 
231 	sw_ring = rxq->sw_ring;
232 	for (i = 0; i < rxq->nb_rx_desc; i++)
233 		sw_ring[i] = NULL;
234 
235 	rxq->bufq_tail = 0;
236 	rxq->next_avail = 0;
237 	rxq->nb_rx_hold = rxq->nb_rx_desc - 1;
238 
239 	rxq->rx_tail = 0;
240 	rxq->cur_gen_bit = 1;
241 }
242 
243 int
244 gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
245 		       uint16_t nb_desc, unsigned int socket_id,
246 		       const struct rte_eth_rxconf *conf,
247 		       struct rte_mempool *pool)
248 {
249 	struct gve_priv *hw = dev->data->dev_private;
250 	const struct rte_memzone *mz;
251 	struct gve_rx_queue *rxq;
252 	uint16_t free_thresh;
253 	uint32_t mbuf_len;
254 	int err = 0;
255 
256 	/* Free memory if needed */
257 	if (dev->data->rx_queues[queue_id]) {
258 		gve_rx_queue_release_dqo(dev, queue_id);
259 		dev->data->rx_queues[queue_id] = NULL;
260 	}
261 
262 	/* Allocate the RX queue data structure. */
263 	rxq = rte_zmalloc_socket("gve rxq",
264 				 sizeof(struct gve_rx_queue),
265 				 RTE_CACHE_LINE_SIZE,
266 				 socket_id);
267 	if (rxq == NULL) {
268 		PMD_DRV_LOG(ERR, "Failed to allocate memory for rx queue structure");
269 		return -ENOMEM;
270 	}
271 
272 	/* check free_thresh here */
273 	free_thresh = conf->rx_free_thresh ?
274 			conf->rx_free_thresh : GVE_DEFAULT_RX_FREE_THRESH;
275 	if (free_thresh >= nb_desc) {
276 		PMD_DRV_LOG(ERR, "rx_free_thresh (%u) must be less than nb_desc (%u).",
277 			    free_thresh, rxq->nb_rx_desc);
278 		err = -EINVAL;
279 		goto free_rxq;
280 	}
281 
282 	rxq->nb_rx_desc = nb_desc;
283 	rxq->free_thresh = free_thresh;
284 	rxq->queue_id = queue_id;
285 	rxq->port_id = dev->data->port_id;
286 	rxq->ntfy_id = hw->num_ntfy_blks / 2 + queue_id;
287 
288 	rxq->mpool = pool;
289 	rxq->hw = hw;
290 	rxq->ntfy_addr = &hw->db_bar2[rte_be_to_cpu_32(hw->irq_dbs[rxq->ntfy_id].id)];
291 
292 	mbuf_len =
293 		rte_pktmbuf_data_room_size(rxq->mpool) - RTE_PKTMBUF_HEADROOM;
294 	rxq->rx_buf_len =
295 		RTE_MIN((uint16_t)GVE_RX_MAX_BUF_SIZE_DQO,
296 			RTE_ALIGN_FLOOR(mbuf_len, GVE_RX_BUF_ALIGN_DQO));
297 
298 	/* Allocate software ring */
299 	rxq->sw_ring = rte_zmalloc_socket("gve rx sw ring",
300 					  nb_desc * sizeof(struct rte_mbuf *),
301 					  RTE_CACHE_LINE_SIZE, socket_id);
302 	if (rxq->sw_ring == NULL) {
303 		PMD_DRV_LOG(ERR, "Failed to allocate memory for SW RX ring");
304 		err = -ENOMEM;
305 		goto free_rxq;
306 	}
307 
308 	/* Allocate RX buffer queue */
309 	mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
310 				      nb_desc * sizeof(struct gve_rx_desc_dqo),
311 				      PAGE_SIZE, socket_id);
312 	if (mz == NULL) {
313 		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX buffer queue");
314 		err = -ENOMEM;
315 		goto free_rxq_sw_ring;
316 	}
317 	rxq->rx_ring = (struct gve_rx_desc_dqo *)mz->addr;
318 	rxq->rx_ring_phys_addr = mz->iova;
319 	rxq->mz = mz;
320 
321 	/* Allocate RX completion queue */
322 	mz = rte_eth_dma_zone_reserve(dev, "compl_ring", queue_id,
323 				      nb_desc * sizeof(struct gve_rx_compl_desc_dqo),
324 				      PAGE_SIZE, socket_id);
325 	if (mz == NULL) {
326 		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX completion queue");
327 		err = -ENOMEM;
328 		goto free_rxq_mz;
329 	}
330 	/* Zero all the descriptors in the ring */
331 	memset(mz->addr, 0, nb_desc * sizeof(struct gve_rx_compl_desc_dqo));
332 	rxq->compl_ring = (struct gve_rx_compl_desc_dqo *)mz->addr;
333 	rxq->compl_ring_phys_addr = mz->iova;
334 	rxq->compl_ring_mz = mz;
335 
336 	mz = rte_eth_dma_zone_reserve(dev, "rxq_res", queue_id,
337 				      sizeof(struct gve_queue_resources),
338 				      PAGE_SIZE, socket_id);
339 	if (mz == NULL) {
340 		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX resource");
341 		err = -ENOMEM;
342 		goto free_rxq_cq_mz;
343 	}
344 	rxq->qres = (struct gve_queue_resources *)mz->addr;
345 	rxq->qres_mz = mz;
346 
347 	gve_reset_rxq_dqo(rxq);
348 
349 	dev->data->rx_queues[queue_id] = rxq;
350 
351 	return 0;
352 
353 free_rxq_cq_mz:
354 	rte_memzone_free(rxq->compl_ring_mz);
355 free_rxq_mz:
356 	rte_memzone_free(rxq->mz);
357 free_rxq_sw_ring:
358 	rte_free(rxq->sw_ring);
359 free_rxq:
360 	rte_free(rxq);
361 	return err;
362 }
363 
364 static int
365 gve_rxq_mbufs_alloc_dqo(struct gve_rx_queue *rxq)
366 {
367 	struct rte_mbuf *nmb;
368 	uint16_t rx_mask;
369 	uint16_t i;
370 	int diag;
371 
372 	rx_mask = rxq->nb_rx_desc - 1;
373 	diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0],
374 				      rx_mask);
375 	if (diag < 0) {
376 		rxq->stats.no_mbufs_bulk++;
377 		for (i = 0; i < rx_mask; i++) {
378 			nmb = rte_pktmbuf_alloc(rxq->mpool);
379 			if (!nmb)
380 				break;
381 			rxq->sw_ring[i] = nmb;
382 		}
383 		if (i < rxq->nb_rx_desc - 1) {
384 			rxq->stats.no_mbufs += rx_mask - i;
385 			return -ENOMEM;
386 		}
387 	}
388 
389 	for (i = 0; i < rx_mask; i++) {
390 		nmb = rxq->sw_ring[i];
391 		rxq->rx_ring[i].buf_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
392 		rxq->rx_ring[i].buf_id = rte_cpu_to_le_16(i);
393 	}
394 	rxq->rx_ring[rx_mask].buf_id = rte_cpu_to_le_16(rx_mask);
395 
396 	rxq->nb_rx_hold = 0;
397 	rxq->bufq_tail = rx_mask;
398 
399 	rte_write32(rxq->bufq_tail, rxq->qrx_tail);
400 
401 	return 0;
402 }
403 
404 int
405 gve_rx_queue_start_dqo(struct rte_eth_dev *dev, uint16_t rx_queue_id)
406 {
407 	struct gve_priv *hw = dev->data->dev_private;
408 	struct gve_rx_queue *rxq;
409 	int ret;
410 
411 	if (rx_queue_id >= dev->data->nb_rx_queues)
412 		return -EINVAL;
413 
414 	rxq = dev->data->rx_queues[rx_queue_id];
415 
416 	rxq->qrx_tail = &hw->db_bar2[rte_be_to_cpu_32(rxq->qres->db_index)];
417 
418 	rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), rxq->ntfy_addr);
419 
420 	ret = gve_rxq_mbufs_alloc_dqo(rxq);
421 	if (ret != 0) {
422 		PMD_DRV_LOG(ERR, "Failed to alloc Rx queue mbuf");
423 		return ret;
424 	}
425 
426 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
427 
428 	return 0;
429 }
430 
431 int
432 gve_rx_queue_stop_dqo(struct rte_eth_dev *dev, uint16_t rx_queue_id)
433 {
434 	struct gve_rx_queue *rxq;
435 
436 	if (rx_queue_id >= dev->data->nb_rx_queues)
437 		return -EINVAL;
438 
439 	rxq = dev->data->rx_queues[rx_queue_id];
440 	gve_release_rxq_mbufs_dqo(rxq);
441 	gve_reset_rxq_dqo(rxq);
442 
443 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
444 
445 	return 0;
446 }
447 
448 void
449 gve_stop_rx_queues_dqo(struct rte_eth_dev *dev)
450 {
451 	struct gve_priv *hw = dev->data->dev_private;
452 	uint16_t i;
453 	int err;
454 
455 	err = gve_adminq_destroy_rx_queues(hw, dev->data->nb_rx_queues);
456 	if (err != 0)
457 		PMD_DRV_LOG(WARNING, "failed to destroy rxqs");
458 
459 	for (i = 0; i < dev->data->nb_rx_queues; i++)
460 		if (gve_rx_queue_stop_dqo(dev, i) != 0)
461 			PMD_DRV_LOG(WARNING, "Fail to stop Rx queue %d", i);
462 }
463 
464 void
465 gve_set_rx_function_dqo(struct rte_eth_dev *dev)
466 {
467 	dev->rx_pkt_burst = gve_rx_burst_dqo;
468 }
469