1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2022 Advanced Micro Devices, Inc.
3 */
4
5 #include <stdio.h>
6 #include <string.h>
7 #include <errno.h>
8 #include <stdint.h>
9
10 #include <rte_common.h>
11 #include <rte_byteorder.h>
12 #include <rte_errno.h>
13 #include <rte_log.h>
14 #include <rte_mbuf.h>
15 #include <rte_ether.h>
16 #include <rte_ip.h>
17 #include <rte_tcp.h>
18 #include <rte_ethdev.h>
19 #include <ethdev_driver.h>
20
21 #include "ionic.h"
22 #include "ionic_dev.h"
23 #include "ionic_lif.h"
24 #include "ionic_ethdev.h"
25 #include "ionic_rxtx.h"
26 #include "ionic_logs.h"
27
28 static void
ionic_empty_array(void ** array,uint32_t free_idx,uint32_t zero_idx)29 ionic_empty_array(void **array, uint32_t free_idx, uint32_t zero_idx)
30 {
31 uint32_t i;
32
33 for (i = 0; i < free_idx; i++)
34 if (array[i])
35 rte_pktmbuf_free_seg(array[i]);
36
37 memset(array, 0, sizeof(void *) * zero_idx);
38 }
39
40 static void __rte_cold
ionic_tx_empty(struct ionic_tx_qcq * txq)41 ionic_tx_empty(struct ionic_tx_qcq *txq)
42 {
43 struct ionic_queue *q = &txq->qcq.q;
44 uint32_t info_len = q->num_descs * q->num_segs;
45
46 ionic_empty_array(q->info, info_len, info_len);
47 }
48
49 static void __rte_cold
ionic_rx_empty(struct ionic_rx_qcq * rxq)50 ionic_rx_empty(struct ionic_rx_qcq *rxq)
51 {
52 struct ionic_queue *q = &rxq->qcq.q;
53 uint32_t info_len = q->num_descs * q->num_segs;
54
55 /*
56 * Walk the full info array so that the clean up includes any
57 * fragments that were left dangling for later reuse
58 */
59 ionic_empty_array(q->info, info_len, info_len);
60
61 ionic_empty_array((void **)rxq->mbs, rxq->mb_idx,
62 IONIC_MBUF_BULK_ALLOC);
63 rxq->mb_idx = 0;
64 }
65
66 /*********************************************************************
67 *
68 * TX functions
69 *
70 **********************************************************************/
71
72 void
ionic_txq_info_get(struct rte_eth_dev * dev,uint16_t queue_id,struct rte_eth_txq_info * qinfo)73 ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
74 struct rte_eth_txq_info *qinfo)
75 {
76 struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id];
77 struct ionic_queue *q = &txq->qcq.q;
78
79 qinfo->nb_desc = q->num_descs;
80 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
81 if (txq->flags & IONIC_QCQ_F_FAST_FREE)
82 qinfo->conf.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
83 qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED;
84 }
85
86 void __rte_cold
ionic_dev_tx_queue_release(struct rte_eth_dev * dev,uint16_t qid)87 ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
88 {
89 struct ionic_tx_qcq *txq = dev->data->tx_queues[qid];
90
91 IONIC_PRINT_CALL();
92
93 ionic_qcq_free(&txq->qcq);
94 }
95
96 int __rte_cold
ionic_dev_tx_queue_stop(struct rte_eth_dev * dev,uint16_t tx_queue_id)97 ionic_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
98 {
99 ionic_dev_tx_queue_stop_firsthalf(dev, tx_queue_id);
100 ionic_dev_tx_queue_stop_secondhalf(dev, tx_queue_id);
101
102 return 0;
103 }
104
105 void __rte_cold
ionic_dev_tx_queue_stop_firsthalf(struct rte_eth_dev * dev,uint16_t tx_queue_id)106 ionic_dev_tx_queue_stop_firsthalf(struct rte_eth_dev *dev,
107 uint16_t tx_queue_id)
108 {
109 struct ionic_tx_qcq *txq = dev->data->tx_queues[tx_queue_id];
110
111 IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id);
112
113 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
114
115 ionic_lif_txq_deinit_nowait(txq);
116 }
117
118 void __rte_cold
ionic_dev_tx_queue_stop_secondhalf(struct rte_eth_dev * dev,uint16_t tx_queue_id)119 ionic_dev_tx_queue_stop_secondhalf(struct rte_eth_dev *dev,
120 uint16_t tx_queue_id)
121 {
122 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(dev);
123 struct ionic_tx_qcq *txq = dev->data->tx_queues[tx_queue_id];
124
125 ionic_adminq_wait(lif, &txq->admin_ctx);
126
127 /* Free all buffers from descriptor ring */
128 ionic_tx_empty(txq);
129
130 ionic_lif_txq_stats(txq);
131 }
132
133 int __rte_cold
ionic_dev_tx_queue_setup(struct rte_eth_dev * eth_dev,uint16_t tx_queue_id,uint16_t nb_desc,uint32_t socket_id,const struct rte_eth_txconf * tx_conf)134 ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
135 uint16_t nb_desc, uint32_t socket_id,
136 const struct rte_eth_txconf *tx_conf)
137 {
138 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
139 struct ionic_tx_qcq *txq;
140 uint64_t offloads;
141 int err;
142
143 if (tx_queue_id >= lif->ntxqcqs) {
144 IONIC_PRINT(DEBUG, "Queue index %u not available "
145 "(max %u queues)",
146 tx_queue_id, lif->ntxqcqs);
147 return -EINVAL;
148 }
149
150 offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
151 IONIC_PRINT(DEBUG,
152 "Configuring skt %u TX queue %u with %u buffers, offloads %jx",
153 socket_id, tx_queue_id, nb_desc, offloads);
154
155 /* Validate number of receive descriptors */
156 if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC)
157 return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
158
159 if (tx_conf->tx_free_thresh > nb_desc) {
160 IONIC_PRINT(ERR,
161 "tx_free_thresh must be less than nb_desc (%u)",
162 nb_desc);
163 return -EINVAL;
164 }
165
166 /* Free memory prior to re-allocation if needed... */
167 if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
168 ionic_dev_tx_queue_release(eth_dev, tx_queue_id);
169 eth_dev->data->tx_queues[tx_queue_id] = NULL;
170 }
171
172 eth_dev->data->tx_queue_state[tx_queue_id] =
173 RTE_ETH_QUEUE_STATE_STOPPED;
174
175 err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq);
176 if (err) {
177 IONIC_PRINT(DEBUG, "Queue allocation failure");
178 return -EINVAL;
179 }
180
181 /* Do not start queue with rte_eth_dev_start() */
182 if (tx_conf->tx_deferred_start)
183 txq->flags |= IONIC_QCQ_F_DEFERRED;
184
185 /* Convert the offload flags into queue flags */
186 if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
187 txq->flags |= IONIC_QCQ_F_CSUM_L3;
188 if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
189 txq->flags |= IONIC_QCQ_F_CSUM_TCP;
190 if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)
191 txq->flags |= IONIC_QCQ_F_CSUM_UDP;
192 if (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
193 txq->flags |= IONIC_QCQ_F_FAST_FREE;
194
195 txq->free_thresh =
196 tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
197 nb_desc - IONIC_DEF_TXRX_BURST;
198
199 eth_dev->data->tx_queues[tx_queue_id] = txq;
200
201 return 0;
202 }
203
204 /*
205 * Start Transmit Units for specified queue.
206 */
207 int __rte_cold
ionic_dev_tx_queue_start(struct rte_eth_dev * dev,uint16_t tx_queue_id)208 ionic_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
209 {
210 int err;
211
212 err = ionic_dev_tx_queue_start_firsthalf(dev, tx_queue_id);
213 if (err)
214 return err;
215
216 return ionic_dev_tx_queue_start_secondhalf(dev, tx_queue_id);
217 }
218
219 int __rte_cold
ionic_dev_tx_queue_start_firsthalf(struct rte_eth_dev * dev,uint16_t tx_queue_id)220 ionic_dev_tx_queue_start_firsthalf(struct rte_eth_dev *dev,
221 uint16_t tx_queue_id)
222 {
223 uint8_t *tx_queue_state = dev->data->tx_queue_state;
224 struct ionic_tx_qcq *txq = dev->data->tx_queues[tx_queue_id];
225
226 if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
227 IONIC_PRINT(DEBUG, "TX queue %u already started",
228 tx_queue_id);
229 return 0;
230 }
231
232 IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs",
233 tx_queue_id, txq->qcq.q.num_descs);
234
235 return ionic_lif_txq_init_nowait(txq);
236 }
237
238 int __rte_cold
ionic_dev_tx_queue_start_secondhalf(struct rte_eth_dev * dev,uint16_t tx_queue_id)239 ionic_dev_tx_queue_start_secondhalf(struct rte_eth_dev *dev,
240 uint16_t tx_queue_id)
241 {
242 uint8_t *tx_queue_state = dev->data->tx_queue_state;
243 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(dev);
244 struct ionic_tx_qcq *txq = dev->data->tx_queues[tx_queue_id];
245 int err;
246
247 if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED)
248 return 0;
249
250 err = ionic_adminq_wait(lif, &txq->admin_ctx);
251 if (err)
252 return err;
253
254 ionic_lif_txq_init_done(txq);
255
256 tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
257
258 return 0;
259 }
260
261 static void
ionic_tx_tcp_pseudo_csum(struct rte_mbuf * txm)262 ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm)
263 {
264 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
265 char *l3_hdr = ((char *)eth_hdr) + txm->l2_len;
266 struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
267 (l3_hdr + txm->l3_len);
268
269 if (txm->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
270 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
271 ipv4_hdr->hdr_checksum = 0;
272 tcp_hdr->cksum = 0;
273 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
274 } else {
275 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
276 tcp_hdr->cksum = 0;
277 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
278 }
279 }
280
281 static void
ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf * txm)282 ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm)
283 {
284 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
285 char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len +
286 txm->outer_l3_len + txm->l2_len;
287 struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
288 (l3_hdr + txm->l3_len);
289
290 if (txm->ol_flags & RTE_MBUF_F_TX_IPV4) {
291 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
292 ipv4_hdr->hdr_checksum = 0;
293 tcp_hdr->cksum = 0;
294 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
295 } else {
296 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
297 tcp_hdr->cksum = 0;
298 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
299 }
300 }
301
302 static void
ionic_tx_tso_post(struct ionic_queue * q,struct ionic_txq_desc * desc,struct rte_mbuf * txm,rte_iova_t addr,uint8_t nsge,uint16_t len,uint32_t hdrlen,uint32_t mss,bool encap,uint16_t vlan_tci,bool has_vlan,bool start,bool done)303 ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
304 struct rte_mbuf *txm,
305 rte_iova_t addr, uint8_t nsge, uint16_t len,
306 uint32_t hdrlen, uint32_t mss,
307 bool encap,
308 uint16_t vlan_tci, bool has_vlan,
309 bool start, bool done)
310 {
311 struct rte_mbuf *txm_seg;
312 void **info;
313 uint64_t cmd;
314 uint8_t flags = 0;
315 int i;
316
317 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
318 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
319 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
320 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
321
322 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO,
323 flags, nsge, addr);
324 desc->cmd = rte_cpu_to_le_64(cmd);
325 desc->len = rte_cpu_to_le_16(len);
326 desc->vlan_tci = rte_cpu_to_le_16(vlan_tci);
327 desc->hdr_len = rte_cpu_to_le_16(hdrlen);
328 desc->mss = rte_cpu_to_le_16(mss);
329
330 if (done) {
331 info = IONIC_INFO_PTR(q, q->head_idx);
332
333 /* Walk the mbuf chain to stash pointers in the array */
334 txm_seg = txm;
335 for (i = 0; i < txm->nb_segs; i++) {
336 info[i] = txm_seg;
337 txm_seg = txm_seg->next;
338 }
339 }
340
341 q->head_idx = Q_NEXT_TO_POST(q, 1);
342 }
343
344 static struct ionic_txq_desc *
ionic_tx_tso_next(struct ionic_tx_qcq * txq,struct ionic_txq_sg_elem ** elem)345 ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem)
346 {
347 struct ionic_queue *q = &txq->qcq.q;
348 struct ionic_txq_desc *desc_base = q->base;
349 struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base;
350 struct ionic_txq_desc *desc = &desc_base[q->head_idx];
351 struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx];
352
353 *elem = sg_desc->elems;
354 return desc;
355 }
356
357 int
ionic_tx_tso(struct ionic_tx_qcq * txq,struct rte_mbuf * txm)358 ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
359 {
360 struct ionic_queue *q = &txq->qcq.q;
361 struct ionic_tx_stats *stats = &txq->stats;
362 struct ionic_txq_desc *desc;
363 struct ionic_txq_sg_elem *elem;
364 struct rte_mbuf *txm_seg;
365 rte_iova_t data_iova;
366 uint64_t desc_addr = 0, next_addr;
367 uint16_t desc_len = 0;
368 uint8_t desc_nsge = 0;
369 uint32_t hdrlen;
370 uint32_t mss = txm->tso_segsz;
371 uint32_t frag_left = 0;
372 uint32_t left;
373 uint32_t seglen;
374 uint32_t len;
375 uint32_t offset = 0;
376 bool start, done;
377 bool encap;
378 bool has_vlan = !!(txm->ol_flags & RTE_MBUF_F_TX_VLAN);
379 bool use_sgl = !!(txq->flags & IONIC_QCQ_F_SG);
380 uint16_t vlan_tci = txm->vlan_tci;
381 uint64_t ol_flags = txm->ol_flags;
382
383 encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
384 (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
385 ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) ||
386 (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6));
387
388 /* Preload inner-most TCP csum field with IP pseudo hdr
389 * calculated with IP length set to zero. HW will later
390 * add in length to each TCP segment resulting from the TSO.
391 */
392
393 if (encap) {
394 ionic_tx_tcp_inner_pseudo_csum(txm);
395 hdrlen = txm->outer_l2_len + txm->outer_l3_len +
396 txm->l2_len + txm->l3_len + txm->l4_len;
397 } else {
398 ionic_tx_tcp_pseudo_csum(txm);
399 hdrlen = txm->l2_len + txm->l3_len + txm->l4_len;
400 }
401
402 desc = ionic_tx_tso_next(txq, &elem);
403 txm_seg = txm;
404 start = true;
405 seglen = hdrlen + mss;
406
407 /* Walk the chain of mbufs */
408 while (txm_seg != NULL) {
409 offset = 0;
410 data_iova = rte_mbuf_data_iova(txm_seg);
411 left = txm_seg->data_len;
412
413 /* Split the mbuf data up into multiple descriptors */
414 while (left > 0) {
415 next_addr = rte_cpu_to_le_64(data_iova + offset);
416 if (frag_left > 0 && use_sgl) {
417 /* Fill previous descriptor's SGE */
418 len = RTE_MIN(frag_left, left);
419 frag_left -= len;
420 elem->addr = next_addr;
421 elem->len = rte_cpu_to_le_16(len);
422 elem++;
423 desc_nsge++;
424 } else {
425 /* Fill new descriptor's data field */
426 len = RTE_MIN(seglen, left);
427 frag_left = seglen - len;
428 desc_addr = next_addr;
429 desc_len = len;
430 desc_nsge = 0;
431 }
432 left -= len;
433 offset += len;
434
435 /* Pack the next mbuf's data into the descriptor */
436 if (txm_seg->next != NULL && frag_left > 0 && use_sgl)
437 break;
438
439 done = (txm_seg->next == NULL && left == 0);
440 ionic_tx_tso_post(q, desc, txm_seg,
441 desc_addr, desc_nsge, desc_len,
442 hdrlen, mss,
443 encap,
444 vlan_tci, has_vlan,
445 start, done);
446 desc = ionic_tx_tso_next(txq, &elem);
447 start = false;
448 seglen = mss;
449 }
450
451 txm_seg = txm_seg->next;
452 }
453
454 stats->tso++;
455
456 return 0;
457 }
458
459 /*********************************************************************
460 *
461 * TX prep functions
462 *
463 **********************************************************************/
464
465 #define IONIC_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV4 | \
466 RTE_MBUF_F_TX_IPV6 | \
467 RTE_MBUF_F_TX_VLAN | \
468 RTE_MBUF_F_TX_IP_CKSUM | \
469 RTE_MBUF_F_TX_TCP_SEG | \
470 RTE_MBUF_F_TX_L4_MASK)
471
472 #define IONIC_TX_OFFLOAD_NOTSUP_MASK \
473 (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK)
474
475 uint16_t
ionic_prep_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)476 ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
477 {
478 struct ionic_tx_qcq *txq = tx_queue;
479 struct rte_mbuf *txm;
480 uint64_t offloads;
481 int i = 0;
482
483 for (i = 0; i < nb_pkts; i++) {
484 txm = tx_pkts[i];
485
486 if (txm->nb_segs > txq->num_segs_fw) {
487 rte_errno = -EINVAL;
488 break;
489 }
490
491 offloads = txm->ol_flags;
492
493 if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) {
494 rte_errno = -ENOTSUP;
495 break;
496 }
497 }
498
499 return i;
500 }
501
502 /*********************************************************************
503 *
504 * RX functions
505 *
506 **********************************************************************/
507
508 void
ionic_rxq_info_get(struct rte_eth_dev * dev,uint16_t queue_id,struct rte_eth_rxq_info * qinfo)509 ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
510 struct rte_eth_rxq_info *qinfo)
511 {
512 struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id];
513 struct ionic_queue *q = &rxq->qcq.q;
514
515 qinfo->mp = rxq->mb_pool;
516 qinfo->scattered_rx = dev->data->scattered_rx;
517 qinfo->nb_desc = q->num_descs;
518 qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED;
519 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
520 }
521
522 void __rte_cold
ionic_dev_rx_queue_release(struct rte_eth_dev * dev,uint16_t qid)523 ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
524 {
525 struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid];
526
527 if (!rxq)
528 return;
529
530 IONIC_PRINT_CALL();
531
532 ionic_qcq_free(&rxq->qcq);
533 }
534
535 int __rte_cold
ionic_dev_rx_queue_setup(struct rte_eth_dev * eth_dev,uint16_t rx_queue_id,uint16_t nb_desc,uint32_t socket_id,const struct rte_eth_rxconf * rx_conf,struct rte_mempool * mp)536 ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
537 uint16_t rx_queue_id,
538 uint16_t nb_desc,
539 uint32_t socket_id,
540 const struct rte_eth_rxconf *rx_conf,
541 struct rte_mempool *mp)
542 {
543 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
544 struct ionic_rx_qcq *rxq;
545 uint64_t offloads;
546 int err;
547
548 if (rx_queue_id >= lif->nrxqcqs) {
549 IONIC_PRINT(ERR,
550 "Queue index %u not available (max %u queues)",
551 rx_queue_id, lif->nrxqcqs);
552 return -EINVAL;
553 }
554
555 offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
556 IONIC_PRINT(DEBUG,
557 "Configuring skt %u RX queue %u with %u buffers, offloads %jx",
558 socket_id, rx_queue_id, nb_desc, offloads);
559
560 if (!rx_conf->rx_drop_en)
561 IONIC_PRINT(WARNING, "No-drop mode is not supported");
562
563 /* Validate number of receive descriptors */
564 if (!rte_is_power_of_2(nb_desc) ||
565 nb_desc < IONIC_MIN_RING_DESC ||
566 nb_desc > IONIC_MAX_RING_DESC) {
567 IONIC_PRINT(ERR,
568 "Bad descriptor count (%u) for queue %u (min: %u)",
569 nb_desc, rx_queue_id, IONIC_MIN_RING_DESC);
570 return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
571 }
572
573 /* Free memory prior to re-allocation if needed... */
574 if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
575 ionic_dev_rx_queue_release(eth_dev, rx_queue_id);
576 eth_dev->data->rx_queues[rx_queue_id] = NULL;
577 }
578
579 eth_dev->data->rx_queue_state[rx_queue_id] =
580 RTE_ETH_QUEUE_STATE_STOPPED;
581
582 err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc, mp,
583 &rxq);
584 if (err) {
585 IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id);
586 return -EINVAL;
587 }
588
589 rxq->mb_pool = mp;
590 rxq->wdog_ms = IONIC_Q_WDOG_MS;
591
592 /*
593 * Note: the interface does not currently support
594 * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
595 * when the adapter will be able to keep the CRC and subtract
596 * it to the length for all received packets:
597 * if (eth_dev->data->dev_conf.rxmode.offloads &
598 * RTE_ETH_RX_OFFLOAD_KEEP_CRC)
599 * rxq->crc_len = ETHER_CRC_LEN;
600 */
601
602 /* Do not start queue with rte_eth_dev_start() */
603 if (rx_conf->rx_deferred_start)
604 rxq->flags |= IONIC_QCQ_F_DEFERRED;
605
606 eth_dev->data->rx_queues[rx_queue_id] = rxq;
607
608 return 0;
609 }
610
611 #define IONIC_CSUM_FLAG_MASK (IONIC_RXQ_COMP_CSUM_F_VLAN - 1)
612 const alignas(RTE_CACHE_LINE_SIZE) uint64_t ionic_csum_flags[IONIC_CSUM_FLAG_MASK] = {
613 /* IP_BAD set */
614 [IONIC_RXQ_COMP_CSUM_F_IP_BAD] = RTE_MBUF_F_RX_IP_CKSUM_BAD,
615 [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_TCP_OK] =
616 RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
617 [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_TCP_BAD] =
618 RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
619 [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_UDP_OK] =
620 RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
621 [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_UDP_BAD] =
622 RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
623 /* IP_OK set */
624 [IONIC_RXQ_COMP_CSUM_F_IP_OK] = RTE_MBUF_F_RX_IP_CKSUM_GOOD,
625 [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_TCP_OK] =
626 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
627 [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_TCP_BAD] =
628 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
629 [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_UDP_OK] =
630 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
631 [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_UDP_BAD] =
632 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
633 /* No IP flag set */
634 [IONIC_RXQ_COMP_CSUM_F_TCP_OK] = RTE_MBUF_F_RX_L4_CKSUM_GOOD,
635 [IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = RTE_MBUF_F_RX_L4_CKSUM_BAD,
636 [IONIC_RXQ_COMP_CSUM_F_UDP_OK] = RTE_MBUF_F_RX_L4_CKSUM_GOOD,
637 [IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = RTE_MBUF_F_RX_L4_CKSUM_BAD,
638 };
639
640 /* RTE_PTYPE_UNKNOWN is 0x0 */
641 const alignas(RTE_CACHE_LINE_SIZE) uint32_t ionic_ptype_table[IONIC_RXQ_COMP_PKT_TYPE_MASK] = {
642 [IONIC_PKT_TYPE_NON_IP] = RTE_PTYPE_UNKNOWN,
643 [IONIC_PKT_TYPE_IPV4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
644 [IONIC_PKT_TYPE_IPV4_TCP] =
645 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
646 [IONIC_PKT_TYPE_IPV4_UDP] =
647 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
648 [IONIC_PKT_TYPE_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
649 [IONIC_PKT_TYPE_IPV6_TCP] =
650 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
651 [IONIC_PKT_TYPE_IPV6_UDP] =
652 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
653 };
654
655 const uint32_t *
ionic_dev_supported_ptypes_get(struct rte_eth_dev * dev __rte_unused,size_t * no_of_elements)656 ionic_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused,
657 size_t *no_of_elements)
658 {
659 /* See ionic_ptype_table[] */
660 static const uint32_t ptypes[] = {
661 RTE_PTYPE_L2_ETHER,
662 RTE_PTYPE_L2_ETHER_TIMESYNC,
663 RTE_PTYPE_L2_ETHER_LLDP,
664 RTE_PTYPE_L2_ETHER_ARP,
665 RTE_PTYPE_L3_IPV4,
666 RTE_PTYPE_L3_IPV6,
667 RTE_PTYPE_L4_TCP,
668 RTE_PTYPE_L4_UDP,
669 };
670
671 *no_of_elements = RTE_DIM(ptypes);
672 return ptypes;
673 }
674
675 /*
676 * Perform one-time initialization of descriptor fields
677 * which will not change for the life of the queue.
678 */
679 static void __rte_cold
ionic_rx_init_descriptors(struct ionic_rx_qcq * rxq)680 ionic_rx_init_descriptors(struct ionic_rx_qcq *rxq)
681 {
682 struct ionic_queue *q = &rxq->qcq.q;
683 struct ionic_rxq_desc *desc, *desc_base = q->base;
684 struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
685 uint32_t i, j;
686 uint8_t opcode;
687
688 opcode = (q->num_segs > 1) ?
689 IONIC_RXQ_DESC_OPCODE_SG : IONIC_RXQ_DESC_OPCODE_SIMPLE;
690
691 /*
692 * NB: Only the first segment needs to leave headroom (hdr_seg_size).
693 * Later segments (seg_size) do not.
694 */
695 for (i = 0; i < q->num_descs; i++) {
696 desc = &desc_base[i];
697 desc->len = rte_cpu_to_le_16(rxq->hdr_seg_size);
698 desc->opcode = opcode;
699
700 sg_desc = &sg_desc_base[i];
701 for (j = 0; j < q->num_segs - 1u; j++)
702 sg_desc->elems[j].len =
703 rte_cpu_to_le_16(rxq->seg_size);
704 }
705 }
706
707 /*
708 * Start Receive Units for specified queue.
709 */
710 int __rte_cold
ionic_dev_rx_queue_start(struct rte_eth_dev * dev,uint16_t rx_queue_id)711 ionic_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
712 {
713 int err;
714
715 err = ionic_dev_rx_queue_start_firsthalf(dev, rx_queue_id);
716 if (err)
717 return err;
718
719 return ionic_dev_rx_queue_start_secondhalf(dev, rx_queue_id);
720 }
721
722 int __rte_cold
ionic_dev_rx_queue_start_firsthalf(struct rte_eth_dev * dev,uint16_t rx_queue_id)723 ionic_dev_rx_queue_start_firsthalf(struct rte_eth_dev *dev,
724 uint16_t rx_queue_id)
725 {
726 uint8_t *rx_queue_state = dev->data->rx_queue_state;
727 struct ionic_rx_qcq *rxq = dev->data->rx_queues[rx_queue_id];
728 struct ionic_queue *q = &rxq->qcq.q;
729
730 if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
731 IONIC_PRINT(DEBUG, "RX queue %u already started",
732 rx_queue_id);
733 return 0;
734 }
735
736 rxq->frame_size = rxq->qcq.lif->frame_size - RTE_ETHER_CRC_LEN;
737
738 /* Recalculate segment count based on MTU */
739 q->num_segs = 1 +
740 (rxq->frame_size + RTE_PKTMBUF_HEADROOM - 1) / rxq->seg_size;
741
742 IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs, size %u segs %u",
743 rx_queue_id, q->num_descs, rxq->frame_size, q->num_segs);
744
745 ionic_rx_init_descriptors(rxq);
746
747 return ionic_lif_rxq_init_nowait(rxq);
748 }
749
750 int __rte_cold
ionic_dev_rx_queue_start_secondhalf(struct rte_eth_dev * dev,uint16_t rx_queue_id)751 ionic_dev_rx_queue_start_secondhalf(struct rte_eth_dev *dev,
752 uint16_t rx_queue_id)
753 {
754 uint8_t *rx_queue_state = dev->data->rx_queue_state;
755 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(dev);
756 struct ionic_rx_qcq *rxq = dev->data->rx_queues[rx_queue_id];
757 int err;
758
759 if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED)
760 return 0;
761
762 err = ionic_adminq_wait(lif, &rxq->admin_ctx);
763 if (err)
764 return err;
765
766 ionic_lif_rxq_init_done(rxq);
767
768 /* Allocate buffers for descriptor ring */
769 if (rxq->flags & IONIC_QCQ_F_SG)
770 err = ionic_rx_fill_sg(rxq);
771 else
772 err = ionic_rx_fill(rxq);
773 if (err != 0) {
774 IONIC_PRINT(ERR, "Could not fill queue %d", rx_queue_id);
775 return -1;
776 }
777
778 rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
779
780 return 0;
781 }
782
783 /*
784 * Stop Receive Units for specified queue.
785 */
786 int __rte_cold
ionic_dev_rx_queue_stop(struct rte_eth_dev * dev,uint16_t rx_queue_id)787 ionic_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
788 {
789 ionic_dev_rx_queue_stop_firsthalf(dev, rx_queue_id);
790 ionic_dev_rx_queue_stop_secondhalf(dev, rx_queue_id);
791
792 return 0;
793 }
794
795 void __rte_cold
ionic_dev_rx_queue_stop_firsthalf(struct rte_eth_dev * dev,uint16_t rx_queue_id)796 ionic_dev_rx_queue_stop_firsthalf(struct rte_eth_dev *dev,
797 uint16_t rx_queue_id)
798 {
799 struct ionic_rx_qcq *rxq = dev->data->rx_queues[rx_queue_id];
800
801 IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id);
802
803 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
804
805 ionic_lif_rxq_deinit_nowait(rxq);
806 }
807
808 void __rte_cold
ionic_dev_rx_queue_stop_secondhalf(struct rte_eth_dev * dev,uint16_t rx_queue_id)809 ionic_dev_rx_queue_stop_secondhalf(struct rte_eth_dev *dev,
810 uint16_t rx_queue_id)
811 {
812 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(dev);
813 struct ionic_rx_qcq *rxq = dev->data->rx_queues[rx_queue_id];
814
815 ionic_adminq_wait(lif, &rxq->admin_ctx);
816
817 /* Free all buffers from descriptor ring */
818 ionic_rx_empty(rxq);
819
820 ionic_lif_rxq_stats(rxq);
821 }
822
823 int
ionic_dev_rx_descriptor_status(void * rx_queue,uint16_t offset)824 ionic_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
825 {
826 struct ionic_rx_qcq *rxq = rx_queue;
827 struct ionic_qcq *qcq = &rxq->qcq;
828 volatile struct ionic_rxq_comp *cq_desc;
829 uint16_t mask, head, tail, pos;
830 bool done_color;
831
832 mask = qcq->q.size_mask;
833
834 /* offset must be within the size of the ring */
835 if (offset > mask)
836 return -EINVAL;
837
838 head = qcq->q.head_idx;
839 tail = qcq->q.tail_idx;
840
841 /* offset is beyond what is posted */
842 if (offset >= ((head - tail) & mask))
843 return RTE_ETH_RX_DESC_UNAVAIL;
844
845 /* interested in this absolute position in the rxq */
846 pos = (tail + offset) & mask;
847
848 /* rx cq position == rx q position */
849 cq_desc = qcq->cq.base;
850 cq_desc = &cq_desc[pos];
851
852 /* expected done color at this position */
853 done_color = qcq->cq.done_color != (pos < tail);
854
855 /* has the hw indicated the done color at this position? */
856 if (color_match(cq_desc->pkt_type_color, done_color))
857 return RTE_ETH_RX_DESC_DONE;
858
859 return RTE_ETH_RX_DESC_AVAIL;
860 }
861
862 int
ionic_dev_tx_descriptor_status(void * tx_queue,uint16_t offset)863 ionic_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
864 {
865 struct ionic_tx_qcq *txq = tx_queue;
866 struct ionic_qcq *qcq = &txq->qcq;
867 volatile struct ionic_txq_comp *cq_desc;
868 uint16_t mask, head, tail, pos, cq_pos;
869 bool done_color;
870
871 mask = qcq->q.size_mask;
872
873 /* offset must be within the size of the ring */
874 if (offset > mask)
875 return -EINVAL;
876
877 head = qcq->q.head_idx;
878 tail = qcq->q.tail_idx;
879
880 /* offset is beyond what is posted */
881 if (offset >= ((head - tail) & mask))
882 return RTE_ETH_TX_DESC_DONE;
883
884 /* interested in this absolute position in the txq */
885 pos = (tail + offset) & mask;
886
887 /* tx cq position != tx q position, need to walk cq */
888 cq_pos = qcq->cq.tail_idx;
889 cq_desc = qcq->cq.base;
890 cq_desc = &cq_desc[cq_pos];
891
892 /* how far behind is pos from head? */
893 offset = (head - pos) & mask;
894
895 /* walk cq descriptors that match the expected done color */
896 done_color = qcq->cq.done_color;
897 while (color_match(cq_desc->color, done_color)) {
898 /* is comp index no further behind than pos? */
899 tail = rte_cpu_to_le_16(cq_desc->comp_index);
900 if (((head - tail) & mask) <= offset)
901 return RTE_ETH_TX_DESC_DONE;
902
903 cq_pos = (cq_pos + 1) & mask;
904 cq_desc = qcq->cq.base;
905 cq_desc = &cq_desc[cq_pos];
906
907 done_color = done_color != (cq_pos == 0);
908 }
909
910 return RTE_ETH_TX_DESC_FULL;
911 }
912