xref: /dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
25566a3e3SBruce Richardson  * Copyright(c) 2010-2015 Intel Corporation
309452c07SBruce Richardson  */
409452c07SBruce Richardson 
509452c07SBruce Richardson #include <sys/queue.h>
609452c07SBruce Richardson 
709452c07SBruce Richardson #include <stdio.h>
809452c07SBruce Richardson #include <stdlib.h>
909452c07SBruce Richardson #include <string.h>
1009452c07SBruce Richardson #include <errno.h>
1109452c07SBruce Richardson #include <stdint.h>
1209452c07SBruce Richardson #include <stdarg.h>
1309452c07SBruce Richardson #include <unistd.h>
1409452c07SBruce Richardson #include <inttypes.h>
1509452c07SBruce Richardson 
1609452c07SBruce Richardson #include <rte_byteorder.h>
1709452c07SBruce Richardson #include <rte_common.h>
1809452c07SBruce Richardson #include <rte_cycles.h>
1909452c07SBruce Richardson #include <rte_log.h>
2009452c07SBruce Richardson #include <rte_debug.h>
2109452c07SBruce Richardson #include <rte_interrupts.h>
2209452c07SBruce Richardson #include <rte_pci.h>
2309452c07SBruce Richardson #include <rte_memory.h>
2409452c07SBruce Richardson #include <rte_memzone.h>
2509452c07SBruce Richardson #include <rte_launch.h>
2609452c07SBruce Richardson #include <rte_eal.h>
2709452c07SBruce Richardson #include <rte_per_lcore.h>
2809452c07SBruce Richardson #include <rte_lcore.h>
2909452c07SBruce Richardson #include <rte_atomic.h>
3009452c07SBruce Richardson #include <rte_branch_prediction.h>
3109452c07SBruce Richardson #include <rte_mempool.h>
3209452c07SBruce Richardson #include <rte_malloc.h>
3309452c07SBruce Richardson #include <rte_mbuf.h>
3409452c07SBruce Richardson #include <rte_ether.h>
35df96fd0dSBruce Richardson #include <ethdev_driver.h>
3609452c07SBruce Richardson #include <rte_prefetch.h>
3709452c07SBruce Richardson #include <rte_ip.h>
3809452c07SBruce Richardson #include <rte_udp.h>
3909452c07SBruce Richardson #include <rte_tcp.h>
4009452c07SBruce Richardson #include <rte_sctp.h>
4109452c07SBruce Richardson #include <rte_string_fns.h>
4209452c07SBruce Richardson #include <rte_errno.h>
43baf3bbaeSKonstantin Ananyev #include <rte_net.h>
4409452c07SBruce Richardson 
4509452c07SBruce Richardson #include "base/vmxnet3_defs.h"
4609452c07SBruce Richardson #include "vmxnet3_ring.h"
4709452c07SBruce Richardson 
4809452c07SBruce Richardson #include "vmxnet3_logs.h"
4909452c07SBruce Richardson #include "vmxnet3_ethdev.h"
5009452c07SBruce Richardson 
51daa02b5cSOlivier Matz #define	VMXNET3_TX_OFFLOAD_MASK	(RTE_MBUF_F_TX_VLAN | \
52daa02b5cSOlivier Matz 		RTE_MBUF_F_TX_IPV6 |     \
53daa02b5cSOlivier Matz 		RTE_MBUF_F_TX_IPV4 |     \
54daa02b5cSOlivier Matz 		RTE_MBUF_F_TX_L4_MASK |  \
55daa02b5cSOlivier Matz 		RTE_MBUF_F_TX_TCP_SEG)
56baf3bbaeSKonstantin Ananyev 
57baf3bbaeSKonstantin Ananyev #define	VMXNET3_TX_OFFLOAD_NOTSUP_MASK	\
58daa02b5cSOlivier Matz 	(RTE_MBUF_F_TX_OFFLOAD_MASK ^ VMXNET3_TX_OFFLOAD_MASK)
59baf3bbaeSKonstantin Ananyev 
60bd5d7beeSStephen Hemminger static int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t*, uint8_t);
61bd5d7beeSStephen Hemminger static void vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *);
6209452c07SBruce Richardson #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
6309452c07SBruce Richardson static void vmxnet3_rxq_dump(struct vmxnet3_rx_queue *);
6409452c07SBruce Richardson static void vmxnet3_txq_dump(struct vmxnet3_tx_queue *);
6509452c07SBruce Richardson #endif
6609452c07SBruce Richardson 
6709452c07SBruce Richardson #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
6809452c07SBruce Richardson static void
6909452c07SBruce Richardson vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq)
7009452c07SBruce Richardson {
7109452c07SBruce Richardson 	uint32_t avail = 0;
7209452c07SBruce Richardson 
7309452c07SBruce Richardson 	if (rxq == NULL)
7409452c07SBruce Richardson 		return;
7509452c07SBruce Richardson 
7609452c07SBruce Richardson 	PMD_RX_LOG(DEBUG,
772c78ec4aSFerruh Yigit 		   "RXQ: cmd0 base : %p cmd1 base : %p comp ring base : %p.",
7809452c07SBruce Richardson 		   rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, rxq->comp_ring.base);
7909452c07SBruce Richardson 	PMD_RX_LOG(DEBUG,
8009452c07SBruce Richardson 		   "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.",
8109452c07SBruce Richardson 		   (unsigned long)rxq->cmd_ring[0].basePA,
8209452c07SBruce Richardson 		   (unsigned long)rxq->cmd_ring[1].basePA,
8309452c07SBruce Richardson 		   (unsigned long)rxq->comp_ring.basePA);
8409452c07SBruce Richardson 
8509452c07SBruce Richardson 	avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[0]);
8609452c07SBruce Richardson 	PMD_RX_LOG(DEBUG,
8709452c07SBruce Richardson 		   "RXQ:cmd0: size=%u; free=%u; next2proc=%u; queued=%u",
8809452c07SBruce Richardson 		   (uint32_t)rxq->cmd_ring[0].size, avail,
8909452c07SBruce Richardson 		   rxq->comp_ring.next2proc,
9009452c07SBruce Richardson 		   rxq->cmd_ring[0].size - avail);
9109452c07SBruce Richardson 
9209452c07SBruce Richardson 	avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[1]);
9309452c07SBruce Richardson 	PMD_RX_LOG(DEBUG, "RXQ:cmd1 size=%u; free=%u; next2proc=%u; queued=%u",
9409452c07SBruce Richardson 		   (uint32_t)rxq->cmd_ring[1].size, avail, rxq->comp_ring.next2proc,
9509452c07SBruce Richardson 		   rxq->cmd_ring[1].size - avail);
9609452c07SBruce Richardson 
9709452c07SBruce Richardson }
9809452c07SBruce Richardson 
9909452c07SBruce Richardson static void
10009452c07SBruce Richardson vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq)
10109452c07SBruce Richardson {
10209452c07SBruce Richardson 	uint32_t avail = 0;
10309452c07SBruce Richardson 
10409452c07SBruce Richardson 	if (txq == NULL)
10509452c07SBruce Richardson 		return;
10609452c07SBruce Richardson 
1072c78ec4aSFerruh Yigit 	PMD_TX_LOG(DEBUG, "TXQ: cmd base : %p comp ring base : %p data ring base : %p.",
10809452c07SBruce Richardson 		   txq->cmd_ring.base, txq->comp_ring.base, txq->data_ring.base);
10909452c07SBruce Richardson 	PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx data ring basePA : 0x%lx.",
11009452c07SBruce Richardson 		   (unsigned long)txq->cmd_ring.basePA,
11109452c07SBruce Richardson 		   (unsigned long)txq->comp_ring.basePA,
11209452c07SBruce Richardson 		   (unsigned long)txq->data_ring.basePA);
11309452c07SBruce Richardson 
11409452c07SBruce Richardson 	avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
11509452c07SBruce Richardson 	PMD_TX_LOG(DEBUG, "TXQ: size=%u; free=%u; next2proc=%u; queued=%u",
11609452c07SBruce Richardson 		   (uint32_t)txq->cmd_ring.size, avail,
11709452c07SBruce Richardson 		   txq->comp_ring.next2proc, txq->cmd_ring.size - avail);
11809452c07SBruce Richardson }
11909452c07SBruce Richardson #endif
12009452c07SBruce Richardson 
121bd5d7beeSStephen Hemminger static void
122646edddfSYong Wang vmxnet3_tx_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
12309452c07SBruce Richardson {
12409452c07SBruce Richardson 	while (ring->next2comp != ring->next2fill) {
125646edddfSYong Wang 		/* No need to worry about desc ownership, device is quiesced by now. */
12609452c07SBruce Richardson 		vmxnet3_buf_info_t *buf_info = ring->buf_info + ring->next2comp;
12709452c07SBruce Richardson 
12809452c07SBruce Richardson 		if (buf_info->m) {
12909452c07SBruce Richardson 			rte_pktmbuf_free(buf_info->m);
13009452c07SBruce Richardson 			buf_info->m = NULL;
13109452c07SBruce Richardson 			buf_info->bufPA = 0;
13209452c07SBruce Richardson 			buf_info->len = 0;
13309452c07SBruce Richardson 		}
13409452c07SBruce Richardson 		vmxnet3_cmd_ring_adv_next2comp(ring);
13509452c07SBruce Richardson 	}
13609452c07SBruce Richardson }
13709452c07SBruce Richardson 
13809452c07SBruce Richardson static void
139646edddfSYong Wang vmxnet3_rx_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
140646edddfSYong Wang {
141646edddfSYong Wang 	uint32_t i;
142646edddfSYong Wang 
143646edddfSYong Wang 	for (i = 0; i < ring->size; i++) {
144646edddfSYong Wang 		/* No need to worry about desc ownership, device is quiesced by now. */
145646edddfSYong Wang 		vmxnet3_buf_info_t *buf_info = &ring->buf_info[i];
146646edddfSYong Wang 
147646edddfSYong Wang 		if (buf_info->m) {
148646edddfSYong Wang 			rte_pktmbuf_free_seg(buf_info->m);
149646edddfSYong Wang 			buf_info->m = NULL;
150646edddfSYong Wang 			buf_info->bufPA = 0;
151646edddfSYong Wang 			buf_info->len = 0;
152646edddfSYong Wang 		}
153646edddfSYong Wang 		vmxnet3_cmd_ring_adv_next2comp(ring);
154646edddfSYong Wang 	}
155646edddfSYong Wang }
156646edddfSYong Wang 
157646edddfSYong Wang static void
15809452c07SBruce Richardson vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
15909452c07SBruce Richardson {
16009452c07SBruce Richardson 	rte_free(ring->buf_info);
16109452c07SBruce Richardson 	ring->buf_info = NULL;
16209452c07SBruce Richardson }
16309452c07SBruce Richardson 
16409452c07SBruce Richardson void
1657483341aSXueming Li vmxnet3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
16609452c07SBruce Richardson {
1677483341aSXueming Li 	vmxnet3_tx_queue_t *tq = dev->data->tx_queues[qid];
16809452c07SBruce Richardson 
16909452c07SBruce Richardson 	if (tq != NULL) {
170646edddfSYong Wang 		/* Release mbufs */
171646edddfSYong Wang 		vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
17209452c07SBruce Richardson 		/* Release the cmd_ring */
17309452c07SBruce Richardson 		vmxnet3_cmd_ring_release(&tq->cmd_ring);
17404df93d1SChas Williams 		/* Release the memzone */
17504df93d1SChas Williams 		rte_memzone_free(tq->mz);
176aad14460SChas Williams 		/* Release the queue */
177aad14460SChas Williams 		rte_free(tq);
17809452c07SBruce Richardson 	}
17909452c07SBruce Richardson }
18009452c07SBruce Richardson 
18109452c07SBruce Richardson void
1827483341aSXueming Li vmxnet3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
18309452c07SBruce Richardson {
18409452c07SBruce Richardson 	int i;
1857483341aSXueming Li 	vmxnet3_rx_queue_t *rq = dev->data->rx_queues[qid];
18609452c07SBruce Richardson 
18709452c07SBruce Richardson 	if (rq != NULL) {
188646edddfSYong Wang 		/* Release mbufs */
189646edddfSYong Wang 		for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
190646edddfSYong Wang 			vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
191646edddfSYong Wang 
19209452c07SBruce Richardson 		/* Release both the cmd_rings */
19309452c07SBruce Richardson 		for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
19409452c07SBruce Richardson 			vmxnet3_cmd_ring_release(&rq->cmd_ring[i]);
19504df93d1SChas Williams 
19604df93d1SChas Williams 		/* Release the memzone */
19704df93d1SChas Williams 		rte_memzone_free(rq->mz);
198aad14460SChas Williams 
199aad14460SChas Williams 		/* Release the queue */
200aad14460SChas Williams 		rte_free(rq);
20109452c07SBruce Richardson 	}
20209452c07SBruce Richardson }
20309452c07SBruce Richardson 
20409452c07SBruce Richardson static void
20509452c07SBruce Richardson vmxnet3_dev_tx_queue_reset(void *txq)
20609452c07SBruce Richardson {
20709452c07SBruce Richardson 	vmxnet3_tx_queue_t *tq = txq;
20809452c07SBruce Richardson 	struct vmxnet3_cmd_ring *ring = &tq->cmd_ring;
20909452c07SBruce Richardson 	struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
21009452c07SBruce Richardson 	struct vmxnet3_data_ring *data_ring = &tq->data_ring;
21109452c07SBruce Richardson 	int size;
21209452c07SBruce Richardson 
21309452c07SBruce Richardson 	if (tq != NULL) {
21409452c07SBruce Richardson 		/* Release the cmd_ring mbufs */
215646edddfSYong Wang 		vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
21609452c07SBruce Richardson 	}
21709452c07SBruce Richardson 
21809452c07SBruce Richardson 	/* Tx vmxnet rings structure initialization*/
21909452c07SBruce Richardson 	ring->next2fill = 0;
22009452c07SBruce Richardson 	ring->next2comp = 0;
22109452c07SBruce Richardson 	ring->gen = VMXNET3_INIT_GEN;
22209452c07SBruce Richardson 	comp_ring->next2proc = 0;
22309452c07SBruce Richardson 	comp_ring->gen = VMXNET3_INIT_GEN;
22409452c07SBruce Richardson 
22509452c07SBruce Richardson 	size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
22609452c07SBruce Richardson 	size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
22701fef6e3SShrikrishna Khare 	size += tq->txdata_desc_size * data_ring->size;
22809452c07SBruce Richardson 
22909452c07SBruce Richardson 	memset(ring->base, 0, size);
23009452c07SBruce Richardson }
23109452c07SBruce Richardson 
23209452c07SBruce Richardson static void
23309452c07SBruce Richardson vmxnet3_dev_rx_queue_reset(void *rxq)
23409452c07SBruce Richardson {
23509452c07SBruce Richardson 	int i;
23609452c07SBruce Richardson 	vmxnet3_rx_queue_t *rq = rxq;
237c4be1a65SShrikrishna Khare 	struct vmxnet3_hw *hw = rq->hw;
23809452c07SBruce Richardson 	struct vmxnet3_cmd_ring *ring0, *ring1;
23909452c07SBruce Richardson 	struct vmxnet3_comp_ring *comp_ring;
240c4be1a65SShrikrishna Khare 	struct vmxnet3_rx_data_ring *data_ring = &rq->data_ring;
24109452c07SBruce Richardson 	int size;
24209452c07SBruce Richardson 
24309452c07SBruce Richardson 	/* Release both the cmd_rings mbufs */
24409452c07SBruce Richardson 	for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
245646edddfSYong Wang 		vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
24609452c07SBruce Richardson 
24709452c07SBruce Richardson 	ring0 = &rq->cmd_ring[0];
24809452c07SBruce Richardson 	ring1 = &rq->cmd_ring[1];
24909452c07SBruce Richardson 	comp_ring = &rq->comp_ring;
25009452c07SBruce Richardson 
25109452c07SBruce Richardson 	/* Rx vmxnet rings structure initialization */
25209452c07SBruce Richardson 	ring0->next2fill = 0;
25309452c07SBruce Richardson 	ring1->next2fill = 0;
25409452c07SBruce Richardson 	ring0->next2comp = 0;
25509452c07SBruce Richardson 	ring1->next2comp = 0;
25609452c07SBruce Richardson 	ring0->gen = VMXNET3_INIT_GEN;
25709452c07SBruce Richardson 	ring1->gen = VMXNET3_INIT_GEN;
25809452c07SBruce Richardson 	comp_ring->next2proc = 0;
25909452c07SBruce Richardson 	comp_ring->gen = VMXNET3_INIT_GEN;
26009452c07SBruce Richardson 
26109452c07SBruce Richardson 	size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
26209452c07SBruce Richardson 	size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
263c4be1a65SShrikrishna Khare 	if (VMXNET3_VERSION_GE_3(hw) && rq->data_desc_size)
264c4be1a65SShrikrishna Khare 		size += rq->data_desc_size * data_ring->size;
26509452c07SBruce Richardson 
26609452c07SBruce Richardson 	memset(ring0->base, 0, size);
26709452c07SBruce Richardson }
26809452c07SBruce Richardson 
26909452c07SBruce Richardson void
27009452c07SBruce Richardson vmxnet3_dev_clear_queues(struct rte_eth_dev *dev)
27109452c07SBruce Richardson {
27209452c07SBruce Richardson 	unsigned i;
27309452c07SBruce Richardson 
27409452c07SBruce Richardson 	PMD_INIT_FUNC_TRACE();
27509452c07SBruce Richardson 
27609452c07SBruce Richardson 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
27709452c07SBruce Richardson 		struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
27809452c07SBruce Richardson 
27909452c07SBruce Richardson 		if (txq != NULL) {
28009452c07SBruce Richardson 			txq->stopped = TRUE;
28109452c07SBruce Richardson 			vmxnet3_dev_tx_queue_reset(txq);
28209452c07SBruce Richardson 		}
28309452c07SBruce Richardson 	}
28409452c07SBruce Richardson 
28509452c07SBruce Richardson 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
28609452c07SBruce Richardson 		struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
28709452c07SBruce Richardson 
28809452c07SBruce Richardson 		if (rxq != NULL) {
28909452c07SBruce Richardson 			rxq->stopped = TRUE;
29009452c07SBruce Richardson 			vmxnet3_dev_rx_queue_reset(rxq);
29109452c07SBruce Richardson 		}
29209452c07SBruce Richardson 	}
29309452c07SBruce Richardson }
29409452c07SBruce Richardson 
295c3ecdbb3SYong Wang static int
296c3ecdbb3SYong Wang vmxnet3_unmap_pkt(uint16_t eop_idx, vmxnet3_tx_queue_t *txq)
297c3ecdbb3SYong Wang {
298c3ecdbb3SYong Wang 	int completed = 0;
299c3ecdbb3SYong Wang 	struct rte_mbuf *mbuf;
300c3ecdbb3SYong Wang 
301c3ecdbb3SYong Wang 	/* Release cmd_ring descriptor and free mbuf */
30250705e8eSThomas Monjalon 	RTE_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1);
303c3ecdbb3SYong Wang 
304c3ecdbb3SYong Wang 	mbuf = txq->cmd_ring.buf_info[eop_idx].m;
305c3ecdbb3SYong Wang 	if (mbuf == NULL)
306c3ecdbb3SYong Wang 		rte_panic("EOP desc does not point to a valid mbuf");
307c3ecdbb3SYong Wang 	rte_pktmbuf_free(mbuf);
308c3ecdbb3SYong Wang 
309c3ecdbb3SYong Wang 	txq->cmd_ring.buf_info[eop_idx].m = NULL;
310c3ecdbb3SYong Wang 
311c3ecdbb3SYong Wang 	while (txq->cmd_ring.next2comp != eop_idx) {
312c3ecdbb3SYong Wang 		/* no out-of-order completion */
31350705e8eSThomas Monjalon 		RTE_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0);
314c3ecdbb3SYong Wang 		vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
315c3ecdbb3SYong Wang 		completed++;
316c3ecdbb3SYong Wang 	}
317c3ecdbb3SYong Wang 
318c3ecdbb3SYong Wang 	/* Mark the txd for which tcd was generated as completed */
319c3ecdbb3SYong Wang 	vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
320c3ecdbb3SYong Wang 
321c3ecdbb3SYong Wang 	return completed + 1;
322c3ecdbb3SYong Wang }
323c3ecdbb3SYong Wang 
324bd5d7beeSStephen Hemminger static void
32509452c07SBruce Richardson vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
32609452c07SBruce Richardson {
32709452c07SBruce Richardson 	int completed = 0;
32809452c07SBruce Richardson 	vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
32909452c07SBruce Richardson 	struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
33009452c07SBruce Richardson 		(comp_ring->base + comp_ring->next2proc);
33109452c07SBruce Richardson 
33209452c07SBruce Richardson 	while (tcd->gen == comp_ring->gen) {
333c3ecdbb3SYong Wang 		completed += vmxnet3_unmap_pkt(tcd->txdIdx, txq);
33409452c07SBruce Richardson 
33509452c07SBruce Richardson 		vmxnet3_comp_ring_adv_next2proc(comp_ring);
33609452c07SBruce Richardson 		tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
33709452c07SBruce Richardson 						    comp_ring->next2proc);
33809452c07SBruce Richardson 	}
33909452c07SBruce Richardson 
34009452c07SBruce Richardson 	PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
341e76eb560SConor Walsh 
342e76eb560SConor Walsh 	/* To avoid compiler warnings when not in DEBUG mode. */
343e76eb560SConor Walsh 	RTE_SET_USED(completed);
34409452c07SBruce Richardson }
34509452c07SBruce Richardson 
34609452c07SBruce Richardson uint16_t
347baf3bbaeSKonstantin Ananyev vmxnet3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
348baf3bbaeSKonstantin Ananyev 	uint16_t nb_pkts)
349baf3bbaeSKonstantin Ananyev {
350baf3bbaeSKonstantin Ananyev 	int32_t ret;
351baf3bbaeSKonstantin Ananyev 	uint32_t i;
352baf3bbaeSKonstantin Ananyev 	uint64_t ol_flags;
353baf3bbaeSKonstantin Ananyev 	struct rte_mbuf *m;
354baf3bbaeSKonstantin Ananyev 
355baf3bbaeSKonstantin Ananyev 	for (i = 0; i != nb_pkts; i++) {
356baf3bbaeSKonstantin Ananyev 		m = tx_pkts[i];
357baf3bbaeSKonstantin Ananyev 		ol_flags = m->ol_flags;
358baf3bbaeSKonstantin Ananyev 
359baf3bbaeSKonstantin Ananyev 		/* Non-TSO packet cannot occupy more than
360baf3bbaeSKonstantin Ananyev 		 * VMXNET3_MAX_TXD_PER_PKT TX descriptors.
361baf3bbaeSKonstantin Ananyev 		 */
362daa02b5cSOlivier Matz 		if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) == 0 &&
363baf3bbaeSKonstantin Ananyev 				m->nb_segs > VMXNET3_MAX_TXD_PER_PKT) {
364fcae1808SAndrew Rybchenko 			rte_errno = EINVAL;
365baf3bbaeSKonstantin Ananyev 			return i;
366baf3bbaeSKonstantin Ananyev 		}
3678fb18591SRonak Doshi 		/* TSO packet cannot occupy more than
3688fb18591SRonak Doshi 		 * VMXNET3_MAX_TSO_TXD_PER_PKT TX descriptors.
3698fb18591SRonak Doshi 		 */
3708fb18591SRonak Doshi 		if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0 &&
3718fb18591SRonak Doshi 				m->nb_segs > VMXNET3_MAX_TSO_TXD_PER_PKT) {
3728fb18591SRonak Doshi 			rte_errno = EINVAL;
3738fb18591SRonak Doshi 			return i;
3748fb18591SRonak Doshi 		}
375baf3bbaeSKonstantin Ananyev 
376baf3bbaeSKonstantin Ananyev 		/* check that only supported TX offloads are requested. */
377baf3bbaeSKonstantin Ananyev 		if ((ol_flags & VMXNET3_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
378daa02b5cSOlivier Matz 				(ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
379daa02b5cSOlivier Matz 				RTE_MBUF_F_TX_SCTP_CKSUM) {
380fcae1808SAndrew Rybchenko 			rte_errno = ENOTSUP;
381baf3bbaeSKonstantin Ananyev 			return i;
382baf3bbaeSKonstantin Ananyev 		}
383baf3bbaeSKonstantin Ananyev 
384baf3bbaeSKonstantin Ananyev #ifdef RTE_LIBRTE_ETHDEV_DEBUG
385baf3bbaeSKonstantin Ananyev 		ret = rte_validate_tx_offload(m);
386baf3bbaeSKonstantin Ananyev 		if (ret != 0) {
387fcae1808SAndrew Rybchenko 			rte_errno = -ret;
388baf3bbaeSKonstantin Ananyev 			return i;
389baf3bbaeSKonstantin Ananyev 		}
390baf3bbaeSKonstantin Ananyev #endif
391baf3bbaeSKonstantin Ananyev 		ret = rte_net_intel_cksum_prepare(m);
392baf3bbaeSKonstantin Ananyev 		if (ret != 0) {
393fcae1808SAndrew Rybchenko 			rte_errno = -ret;
394baf3bbaeSKonstantin Ananyev 			return i;
395baf3bbaeSKonstantin Ananyev 		}
396baf3bbaeSKonstantin Ananyev 	}
397baf3bbaeSKonstantin Ananyev 
398baf3bbaeSKonstantin Ananyev 	return i;
399baf3bbaeSKonstantin Ananyev }
400baf3bbaeSKonstantin Ananyev 
401baf3bbaeSKonstantin Ananyev uint16_t
40209452c07SBruce Richardson vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
40309452c07SBruce Richardson 		  uint16_t nb_pkts)
40409452c07SBruce Richardson {
40509452c07SBruce Richardson 	uint16_t nb_tx;
40609452c07SBruce Richardson 	vmxnet3_tx_queue_t *txq = tx_queue;
4077ba5de41SStephen Hemminger 	struct vmxnet3_hw *hw = txq->hw;
40855cd9f13SYong Wang 	Vmxnet3_TxQueueCtrl *txq_ctrl = &txq->shared->ctrl;
40955cd9f13SYong Wang 	uint32_t deferred = rte_le_to_cpu_32(txq_ctrl->txNumDeferred);
41009452c07SBruce Richardson 
41109452c07SBruce Richardson 	if (unlikely(txq->stopped)) {
41209452c07SBruce Richardson 		PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
41309452c07SBruce Richardson 		return 0;
41409452c07SBruce Richardson 	}
41509452c07SBruce Richardson 
41609452c07SBruce Richardson 	/* Free up the comp_descriptors aggressively */
41709452c07SBruce Richardson 	vmxnet3_tq_tx_complete(txq);
41809452c07SBruce Richardson 
41909452c07SBruce Richardson 	nb_tx = 0;
42009452c07SBruce Richardson 	while (nb_tx < nb_pkts) {
421b44f3e13SRonak Doshi 		Vmxnet3_GenericDesc *gdesc = NULL;
422b44f3e13SRonak Doshi 		vmxnet3_buf_info_t *tbi = NULL;
4237ba5de41SStephen Hemminger 		uint32_t first2fill, avail, dw2;
4247ba5de41SStephen Hemminger 		struct rte_mbuf *txm = tx_pkts[nb_tx];
4257ba5de41SStephen Hemminger 		struct rte_mbuf *m_seg = txm;
4266e9893c5SYong Wang 		int copy_size = 0;
427daa02b5cSOlivier Matz 		bool tso = (txm->ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0;
428c3ecdbb3SYong Wang 		/* # of descriptors needed for a packet. */
429c3ecdbb3SYong Wang 		unsigned count = txm->nb_segs;
43009452c07SBruce Richardson 
431c3ecdbb3SYong Wang 		avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
432c3ecdbb3SYong Wang 		if (count > avail) {
433c3ecdbb3SYong Wang 			/* Is command ring full? */
434c3ecdbb3SYong Wang 			if (unlikely(avail == 0)) {
435c3ecdbb3SYong Wang 				PMD_TX_LOG(DEBUG, "No free ring descriptors");
436c3ecdbb3SYong Wang 				txq->stats.tx_ring_full++;
437c3ecdbb3SYong Wang 				txq->stats.drop_total += (nb_pkts - nb_tx);
438c3ecdbb3SYong Wang 				break;
439c3ecdbb3SYong Wang 			}
440c3ecdbb3SYong Wang 
441c3ecdbb3SYong Wang 			/* Command ring is not full but cannot handle the
442c3ecdbb3SYong Wang 			 * multi-segmented packet. Let's try the next packet
443c3ecdbb3SYong Wang 			 * in this case.
444c3ecdbb3SYong Wang 			 */
445c3ecdbb3SYong Wang 			PMD_TX_LOG(DEBUG, "Running out of ring descriptors "
446c3ecdbb3SYong Wang 				   "(avail %d needed %d)", avail, count);
447c3ecdbb3SYong Wang 			txq->stats.drop_total++;
448c3ecdbb3SYong Wang 			if (tso)
449c3ecdbb3SYong Wang 				txq->stats.drop_tso++;
4507ba5de41SStephen Hemminger 			rte_pktmbuf_free(txm);
451c3ecdbb3SYong Wang 			nb_tx++;
45209452c07SBruce Richardson 			continue;
45309452c07SBruce Richardson 		}
45409452c07SBruce Richardson 
4558fb18591SRonak Doshi 		/* Drop non-TSO or TSO packet that is excessively fragmented */
4568fb18591SRonak Doshi 		if (unlikely((!tso && count > VMXNET3_MAX_TXD_PER_PKT) ||
4578fb18591SRonak Doshi 			     (tso && count > VMXNET3_MAX_TSO_TXD_PER_PKT))) {
4588fb18591SRonak Doshi 			PMD_TX_LOG(ERR, "Non-TSO or TSO packet cannot occupy more than "
4598fb18591SRonak Doshi 				   "%d or %d tx descriptors respectively. Packet dropped.",
4608fb18591SRonak Doshi 				   VMXNET3_MAX_TXD_PER_PKT, VMXNET3_MAX_TSO_TXD_PER_PKT);
461c3ecdbb3SYong Wang 			txq->stats.drop_too_many_segs++;
462c3ecdbb3SYong Wang 			txq->stats.drop_total++;
463c3ecdbb3SYong Wang 			rte_pktmbuf_free(txm);
464c3ecdbb3SYong Wang 			nb_tx++;
465c3ecdbb3SYong Wang 			continue;
46609452c07SBruce Richardson 		}
46709452c07SBruce Richardson 
468d863f19eSDidier Pallard 		/* Skip empty packets */
469d863f19eSDidier Pallard 		if (unlikely(rte_pktmbuf_pkt_len(txm) == 0)) {
470d863f19eSDidier Pallard 			txq->stats.drop_total++;
471d863f19eSDidier Pallard 			rte_pktmbuf_free(txm);
472d863f19eSDidier Pallard 			nb_tx++;
473d863f19eSDidier Pallard 			continue;
474d863f19eSDidier Pallard 		}
475d863f19eSDidier Pallard 
476b44f3e13SRonak Doshi 		if (txm->nb_segs == 1 &&
477b44f3e13SRonak Doshi 		    rte_pktmbuf_pkt_len(txm) <= txq->txdata_desc_size) {
478b44f3e13SRonak Doshi 			struct Vmxnet3_TxDataDesc *tdd;
479b44f3e13SRonak Doshi 
48001fef6e3SShrikrishna Khare 			tdd = (struct Vmxnet3_TxDataDesc *)
48101fef6e3SShrikrishna Khare 				((uint8 *)txq->data_ring.base +
48201fef6e3SShrikrishna Khare 				 txq->cmd_ring.next2fill *
48301fef6e3SShrikrishna Khare 				 txq->txdata_desc_size);
4846e9893c5SYong Wang 			copy_size = rte_pktmbuf_pkt_len(txm);
4856e9893c5SYong Wang 			rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size);
4866e9893c5SYong Wang 		}
4876e9893c5SYong Wang 
4887ba5de41SStephen Hemminger 		/* use the previous gen bit for the SOP desc */
4897ba5de41SStephen Hemminger 		dw2 = (txq->cmd_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
4907ba5de41SStephen Hemminger 		first2fill = txq->cmd_ring.next2fill;
4917ba5de41SStephen Hemminger 		do {
492b44f3e13SRonak Doshi 			/* Skip empty segments */
493b44f3e13SRonak Doshi 			if (unlikely(m_seg->data_len == 0))
494b44f3e13SRonak Doshi 				continue;
495b44f3e13SRonak Doshi 
4967ba5de41SStephen Hemminger 			/* Remember the transmit buffer for cleanup */
49709452c07SBruce Richardson 			tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
49809452c07SBruce Richardson 
4997ba5de41SStephen Hemminger 			/* NB: the following assumes that VMXNET3 maximum
500c3ecdbb3SYong Wang 			 * transmit buffer size (16K) is greater than
501c3ecdbb3SYong Wang 			 * maximum size of mbuf segment size.
502c3ecdbb3SYong Wang 			 */
5037ba5de41SStephen Hemminger 			gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
504d863f19eSDidier Pallard 
50501fef6e3SShrikrishna Khare 			if (copy_size) {
506e8c1642aSSebastian Basierski 				uint64 offset =
507e8c1642aSSebastian Basierski 					(uint64)txq->cmd_ring.next2fill *
50801fef6e3SShrikrishna Khare 							txq->txdata_desc_size;
50901fef6e3SShrikrishna Khare 				gdesc->txd.addr =
51001fef6e3SShrikrishna Khare 					rte_cpu_to_le_64(txq->data_ring.basePA +
51101fef6e3SShrikrishna Khare 							 offset);
51201fef6e3SShrikrishna Khare 			} else {
513bfa9a8a4SThomas Monjalon 				gdesc->txd.addr = rte_mbuf_data_iova(m_seg);
51401fef6e3SShrikrishna Khare 			}
5156e9893c5SYong Wang 
5167ba5de41SStephen Hemminger 			gdesc->dword[2] = dw2 | m_seg->data_len;
5177ba5de41SStephen Hemminger 			gdesc->dword[3] = 0;
51809452c07SBruce Richardson 
51909452c07SBruce Richardson 			/* move to the next2fill descriptor */
52009452c07SBruce Richardson 			vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);
52109452c07SBruce Richardson 
5227ba5de41SStephen Hemminger 			/* use the right gen for non-SOP desc */
5237ba5de41SStephen Hemminger 			dw2 = txq->cmd_ring.gen << VMXNET3_TXD_GEN_SHIFT;
5247ba5de41SStephen Hemminger 		} while ((m_seg = m_seg->next) != NULL);
525b44f3e13SRonak Doshi 		/* We must have executed the complete preceding loop at least
526b44f3e13SRonak Doshi 		 * once without skipping an empty segment, as we can't have
527b44f3e13SRonak Doshi 		 * a packet with only empty segments.
528b44f3e13SRonak Doshi 		 * Thus, tbi and gdesc have been initialized.
529b44f3e13SRonak Doshi 		 */
5307ba5de41SStephen Hemminger 
531c3ecdbb3SYong Wang 		/* set the last buf_info for the pkt */
532c3ecdbb3SYong Wang 		tbi->m = txm;
5337ba5de41SStephen Hemminger 		/* Update the EOP descriptor */
5347ba5de41SStephen Hemminger 		gdesc->dword[3] |= VMXNET3_TXD_EOP | VMXNET3_TXD_CQ;
5357ba5de41SStephen Hemminger 
5367ba5de41SStephen Hemminger 		/* Add VLAN tag if present */
5377ba5de41SStephen Hemminger 		gdesc = txq->cmd_ring.base + first2fill;
538daa02b5cSOlivier Matz 		if (txm->ol_flags & RTE_MBUF_F_TX_VLAN) {
5397ba5de41SStephen Hemminger 			gdesc->txd.ti = 1;
5407ba5de41SStephen Hemminger 			gdesc->txd.tci = txm->vlan_tci;
54109452c07SBruce Richardson 		}
5427ba5de41SStephen Hemminger 
543c3ecdbb3SYong Wang 		if (tso) {
544c3ecdbb3SYong Wang 			uint16_t mss = txm->tso_segsz;
545c3ecdbb3SYong Wang 
54650705e8eSThomas Monjalon 			RTE_ASSERT(mss > 0);
547c3ecdbb3SYong Wang 
548c3ecdbb3SYong Wang 			gdesc->txd.hlen = txm->l2_len + txm->l3_len + txm->l4_len;
549c3ecdbb3SYong Wang 			gdesc->txd.om = VMXNET3_OM_TSO;
550c3ecdbb3SYong Wang 			gdesc->txd.msscof = mss;
551c3ecdbb3SYong Wang 
552c3ecdbb3SYong Wang 			deferred += (rte_pktmbuf_pkt_len(txm) - gdesc->txd.hlen + mss - 1) / mss;
553daa02b5cSOlivier Matz 		} else if (txm->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
554f598fd06SYong Wang 			gdesc->txd.om = VMXNET3_OM_CSUM;
555f598fd06SYong Wang 			gdesc->txd.hlen = txm->l2_len + txm->l3_len;
556f598fd06SYong Wang 
557daa02b5cSOlivier Matz 			switch (txm->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
558daa02b5cSOlivier Matz 			case RTE_MBUF_F_TX_TCP_CKSUM:
559f41b5156SOlivier Matz 				gdesc->txd.msscof = gdesc->txd.hlen +
560f41b5156SOlivier Matz 					offsetof(struct rte_tcp_hdr, cksum);
561f598fd06SYong Wang 				break;
562daa02b5cSOlivier Matz 			case RTE_MBUF_F_TX_UDP_CKSUM:
563e73e3547SOlivier Matz 				gdesc->txd.msscof = gdesc->txd.hlen +
564e73e3547SOlivier Matz 					offsetof(struct rte_udp_hdr,
565e73e3547SOlivier Matz 						dgram_cksum);
566f598fd06SYong Wang 				break;
567f598fd06SYong Wang 			default:
568f598fd06SYong Wang 				PMD_TX_LOG(WARNING, "requested cksum offload not supported %#llx",
569daa02b5cSOlivier Matz 					   txm->ol_flags & RTE_MBUF_F_TX_L4_MASK);
570f598fd06SYong Wang 				abort();
571f598fd06SYong Wang 			}
572c3ecdbb3SYong Wang 			deferred++;
573f598fd06SYong Wang 		} else {
574f598fd06SYong Wang 			gdesc->txd.hlen = 0;
575f598fd06SYong Wang 			gdesc->txd.om = VMXNET3_OM_NONE;
576f598fd06SYong Wang 			gdesc->txd.msscof = 0;
577c3ecdbb3SYong Wang 			deferred++;
578f598fd06SYong Wang 		}
5797ba5de41SStephen Hemminger 
5807ba5de41SStephen Hemminger 		/* flip the GEN bit on the SOP */
5817ba5de41SStephen Hemminger 		rte_compiler_barrier();
5827ba5de41SStephen Hemminger 		gdesc->dword[2] ^= VMXNET3_TXD_GEN;
5837ba5de41SStephen Hemminger 
584c3ecdbb3SYong Wang 		txq_ctrl->txNumDeferred = rte_cpu_to_le_32(deferred);
5857ba5de41SStephen Hemminger 		nb_tx++;
58609452c07SBruce Richardson 	}
58709452c07SBruce Richardson 
58855cd9f13SYong Wang 	PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", rte_le_to_cpu_32(txq_ctrl->txThreshold));
58909452c07SBruce Richardson 
59055cd9f13SYong Wang 	if (deferred >= rte_le_to_cpu_32(txq_ctrl->txThreshold)) {
59155cd9f13SYong Wang 		txq_ctrl->txNumDeferred = 0;
59209452c07SBruce Richardson 		/* Notify vSwitch that packets are available. */
593bfb405b0SRonak Doshi 		VMXNET3_WRITE_BAR0_REG(hw, (hw->tx_prod_offset + txq->queue_id * VMXNET3_REG_ALIGN),
59409452c07SBruce Richardson 				       txq->cmd_ring.next2fill);
59509452c07SBruce Richardson 	}
59609452c07SBruce Richardson 
59709452c07SBruce Richardson 	return nb_tx;
59809452c07SBruce Richardson }
59909452c07SBruce Richardson 
6008fce14b7SStefan Puiu static inline void
6018fce14b7SStefan Puiu vmxnet3_renew_desc(vmxnet3_rx_queue_t *rxq, uint8_t ring_id,
6028fce14b7SStefan Puiu 		   struct rte_mbuf *mbuf)
6038fce14b7SStefan Puiu {
6041052c4c4SChas Williams 	uint32_t val;
6058fce14b7SStefan Puiu 	struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
6068fce14b7SStefan Puiu 	struct Vmxnet3_RxDesc *rxd =
6078fce14b7SStefan Puiu 		(struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
6088fce14b7SStefan Puiu 	vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
6098fce14b7SStefan Puiu 
6101052c4c4SChas Williams 	if (ring_id == 0) {
6111052c4c4SChas Williams 		/* Usually: One HEAD type buf per packet
6121052c4c4SChas Williams 		 * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
6131052c4c4SChas Williams 		 * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
6141052c4c4SChas Williams 		 */
6158fce14b7SStefan Puiu 
6161052c4c4SChas Williams 		/* We use single packet buffer so all heads here */
6171052c4c4SChas Williams 		val = VMXNET3_RXD_BTYPE_HEAD;
6181052c4c4SChas Williams 	} else {
6191052c4c4SChas Williams 		/* All BODY type buffers for 2nd ring */
6201052c4c4SChas Williams 		val = VMXNET3_RXD_BTYPE_BODY;
6211052c4c4SChas Williams 	}
6221052c4c4SChas Williams 
6231052c4c4SChas Williams 	/*
6241052c4c4SChas Williams 	 * Load mbuf pointer into buf_info[ring_size]
6251052c4c4SChas Williams 	 * buf_info structure is equivalent to cookie for virtio-virtqueue
6261052c4c4SChas Williams 	 */
6278fce14b7SStefan Puiu 	buf_info->m = mbuf;
6288fce14b7SStefan Puiu 	buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM);
629bfa9a8a4SThomas Monjalon 	buf_info->bufPA = rte_mbuf_data_iova_default(mbuf);
6308fce14b7SStefan Puiu 
6311052c4c4SChas Williams 	/* Load Rx Descriptor with the buffer's GPA */
6328fce14b7SStefan Puiu 	rxd->addr = buf_info->bufPA;
6331052c4c4SChas Williams 
6341052c4c4SChas Williams 	/* After this point rxd->addr MUST not be NULL */
6358fce14b7SStefan Puiu 	rxd->btype = val;
6368fce14b7SStefan Puiu 	rxd->len = buf_info->len;
6371052c4c4SChas Williams 	/* Flip gen bit at the end to change ownership */
6388fce14b7SStefan Puiu 	rxd->gen = ring->gen;
6398fce14b7SStefan Puiu 
6408fce14b7SStefan Puiu 	vmxnet3_cmd_ring_adv_next2fill(ring);
6418fce14b7SStefan Puiu }
64209452c07SBruce Richardson /*
64309452c07SBruce Richardson  *  Allocates mbufs and clusters. Post rx descriptors with buffer details
64409452c07SBruce Richardson  *  so that device can receive packets in those buffers.
64509452c07SBruce Richardson  *  Ring layout:
64609452c07SBruce Richardson  *      Among the two rings, 1st ring contains buffers of type 0 and type 1.
64709452c07SBruce Richardson  *      bufs_per_pkt is set such that for non-LRO cases all the buffers required
64809452c07SBruce Richardson  *      by a frame will fit in 1st ring (1st buf of type0 and rest of type1).
64909452c07SBruce Richardson  *      2nd ring contains buffers of type 1 alone. Second ring mostly be used
65009452c07SBruce Richardson  *      only for LRO.
65109452c07SBruce Richardson  */
652bd5d7beeSStephen Hemminger static int
65309452c07SBruce Richardson vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
65409452c07SBruce Richardson {
65509452c07SBruce Richardson 	int err = 0;
6561052c4c4SChas Williams 	uint32_t i = 0;
65709452c07SBruce Richardson 	struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
65809452c07SBruce Richardson 
65909452c07SBruce Richardson 	while (vmxnet3_cmd_ring_desc_avail(ring) > 0) {
66009452c07SBruce Richardson 		struct rte_mbuf *mbuf;
66109452c07SBruce Richardson 
66209452c07SBruce Richardson 		/* Allocate blank mbuf for the current Rx Descriptor */
663fbfd9955SOlivier Matz 		mbuf = rte_mbuf_raw_alloc(rxq->mp);
66409452c07SBruce Richardson 		if (unlikely(mbuf == NULL)) {
6657112356eSHuawei Xie 			PMD_RX_LOG(ERR, "Error allocating mbuf");
66609452c07SBruce Richardson 			rxq->stats.rx_buf_alloc_failure++;
66709452c07SBruce Richardson 			err = ENOMEM;
66809452c07SBruce Richardson 			break;
66909452c07SBruce Richardson 		}
67009452c07SBruce Richardson 
6711052c4c4SChas Williams 		vmxnet3_renew_desc(rxq, ring_id, mbuf);
67209452c07SBruce Richardson 		i++;
67309452c07SBruce Richardson 	}
67409452c07SBruce Richardson 
67509452c07SBruce Richardson 	/* Return error only if no buffers are posted at present */
67609452c07SBruce Richardson 	if (vmxnet3_cmd_ring_desc_avail(ring) >= (ring->size - 1))
67709452c07SBruce Richardson 		return -err;
67809452c07SBruce Richardson 	else
67909452c07SBruce Richardson 		return i;
68009452c07SBruce Richardson }
68109452c07SBruce Richardson 
682ae2705b8SDidier Pallard /* MSS not provided by vmxnet3, guess one with available information */
683ae2705b8SDidier Pallard static uint16_t
684ae2705b8SDidier Pallard vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
685ae2705b8SDidier Pallard 		struct rte_mbuf *rxm)
686ae2705b8SDidier Pallard {
687ae2705b8SDidier Pallard 	uint32_t hlen, slen;
688a7c528e5SOlivier Matz 	struct rte_ipv4_hdr *ipv4_hdr;
689a7c528e5SOlivier Matz 	struct rte_ipv6_hdr *ipv6_hdr;
690f41b5156SOlivier Matz 	struct rte_tcp_hdr *tcp_hdr;
691ae2705b8SDidier Pallard 	char *ptr;
6924c27fe28SThomas Monjalon 	uint8_t segs;
693ae2705b8SDidier Pallard 
694ae2705b8SDidier Pallard 	RTE_ASSERT(rcd->tcp);
695ae2705b8SDidier Pallard 
696ae2705b8SDidier Pallard 	ptr = rte_pktmbuf_mtod(rxm, char *);
697ae2705b8SDidier Pallard 	slen = rte_pktmbuf_data_len(rxm);
6986d13ea8eSOlivier Matz 	hlen = sizeof(struct rte_ether_hdr);
699ae2705b8SDidier Pallard 
700ae2705b8SDidier Pallard 	if (rcd->v4) {
701a7c528e5SOlivier Matz 		if (unlikely(slen < hlen + sizeof(struct rte_ipv4_hdr)))
702a7c528e5SOlivier Matz 			return hw->mtu - sizeof(struct rte_ipv4_hdr)
703f41b5156SOlivier Matz 					- sizeof(struct rte_tcp_hdr);
704ae2705b8SDidier Pallard 
705a7c528e5SOlivier Matz 		ipv4_hdr = (struct rte_ipv4_hdr *)(ptr + hlen);
7069863627fSMichael Pfeiffer 		hlen += rte_ipv4_hdr_len(ipv4_hdr);
707ae2705b8SDidier Pallard 	} else if (rcd->v6) {
708a7c528e5SOlivier Matz 		if (unlikely(slen < hlen + sizeof(struct rte_ipv6_hdr)))
709a7c528e5SOlivier Matz 			return hw->mtu - sizeof(struct rte_ipv6_hdr) -
710f41b5156SOlivier Matz 					sizeof(struct rte_tcp_hdr);
711ae2705b8SDidier Pallard 
712a7c528e5SOlivier Matz 		ipv6_hdr = (struct rte_ipv6_hdr *)(ptr + hlen);
713a7c528e5SOlivier Matz 		hlen += sizeof(struct rte_ipv6_hdr);
714ae2705b8SDidier Pallard 		if (unlikely(ipv6_hdr->proto != IPPROTO_TCP)) {
715ae2705b8SDidier Pallard 			int frag;
716ae2705b8SDidier Pallard 
717ae2705b8SDidier Pallard 			rte_net_skip_ip6_ext(ipv6_hdr->proto, rxm,
718ae2705b8SDidier Pallard 					&hlen, &frag);
719ae2705b8SDidier Pallard 		}
720ae2705b8SDidier Pallard 	}
721ae2705b8SDidier Pallard 
722f41b5156SOlivier Matz 	if (unlikely(slen < hlen + sizeof(struct rte_tcp_hdr)))
723f41b5156SOlivier Matz 		return hw->mtu - hlen - sizeof(struct rte_tcp_hdr) +
7246d13ea8eSOlivier Matz 				sizeof(struct rte_ether_hdr);
725ae2705b8SDidier Pallard 
726f41b5156SOlivier Matz 	tcp_hdr = (struct rte_tcp_hdr *)(ptr + hlen);
727ae2705b8SDidier Pallard 	hlen += (tcp_hdr->data_off & 0xf0) >> 2;
728ae2705b8SDidier Pallard 
7294c27fe28SThomas Monjalon 	segs = *vmxnet3_segs_dynfield(rxm);
7304c27fe28SThomas Monjalon 	if (segs > 1)
7314c27fe28SThomas Monjalon 		return (rte_pktmbuf_pkt_len(rxm) - hlen + segs - 1) / segs;
732ae2705b8SDidier Pallard 	else
7336d13ea8eSOlivier Matz 		return hw->mtu - hlen + sizeof(struct rte_ether_hdr);
734ae2705b8SDidier Pallard }
7359fd5e98bSStephen Hemminger 
7369fd5e98bSStephen Hemminger /* Receive side checksum and other offloads */
7375e5ac26fSDidier Pallard static inline void
7385e5ac26fSDidier Pallard vmxnet3_rx_offload(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
7395e5ac26fSDidier Pallard 		struct rte_mbuf *rxm, const uint8_t sop)
7409fd5e98bSStephen Hemminger {
74173c1f32cSDidier Pallard 	uint64_t ol_flags = rxm->ol_flags;
74273c1f32cSDidier Pallard 	uint32_t packet_type = rxm->packet_type;
7435e5ac26fSDidier Pallard 
7445e5ac26fSDidier Pallard 	/* Offloads set in sop */
7455e5ac26fSDidier Pallard 	if (sop) {
74673c1f32cSDidier Pallard 		/* Set packet type */
74773c1f32cSDidier Pallard 		packet_type |= RTE_PTYPE_L2_ETHER;
74873c1f32cSDidier Pallard 
74973c1f32cSDidier Pallard 		/* Check large packet receive */
75073c1f32cSDidier Pallard 		if (VMXNET3_VERSION_GE_2(hw) &&
75173c1f32cSDidier Pallard 		    rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
75273c1f32cSDidier Pallard 			const Vmxnet3_RxCompDescExt *rcde =
75373c1f32cSDidier Pallard 					(const Vmxnet3_RxCompDescExt *)rcd;
75473c1f32cSDidier Pallard 
75573c1f32cSDidier Pallard 			rxm->tso_segsz = rcde->mss;
7564c27fe28SThomas Monjalon 			*vmxnet3_segs_dynfield(rxm) = rcde->segCnt;
757daa02b5cSOlivier Matz 			ol_flags |= RTE_MBUF_F_RX_LRO;
75873c1f32cSDidier Pallard 		}
759d08e3c90SDidier Pallard 	} else { /* Offloads set in eop */
7609fd5e98bSStephen Hemminger 		/* Check for RSS */
7619fd5e98bSStephen Hemminger 		if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
762daa02b5cSOlivier Matz 			ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
7639fd5e98bSStephen Hemminger 			rxm->hash.rss = rcd->rssHash;
7649fd5e98bSStephen Hemminger 		}
7659fd5e98bSStephen Hemminger 
766d08e3c90SDidier Pallard 		/* Check for hardware stripped VLAN tag */
767d08e3c90SDidier Pallard 		if (rcd->ts) {
768daa02b5cSOlivier Matz 			ol_flags |= (RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
769d08e3c90SDidier Pallard 			rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
770d08e3c90SDidier Pallard 		}
771d08e3c90SDidier Pallard 
77273c1f32cSDidier Pallard 		/* Check packet type, checksum errors, etc. */
77373c1f32cSDidier Pallard 		if (rcd->cnc) {
774daa02b5cSOlivier Matz 			ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
7751c313c0eSPankaj Gupta 
7761c313c0eSPankaj Gupta 			if (rcd->v4) {
7771c313c0eSPankaj Gupta 				packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
7781c313c0eSPankaj Gupta 				if (rcd->tcp)
7791c313c0eSPankaj Gupta 					packet_type |= RTE_PTYPE_L4_TCP;
7801c313c0eSPankaj Gupta 				else if (rcd->udp)
7811c313c0eSPankaj Gupta 					packet_type |= RTE_PTYPE_L4_UDP;
7821c313c0eSPankaj Gupta 			} else if (rcd->v6) {
7831c313c0eSPankaj Gupta 				packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
7841c313c0eSPankaj Gupta 				if (rcd->tcp)
7851c313c0eSPankaj Gupta 					packet_type |= RTE_PTYPE_L4_TCP;
7861c313c0eSPankaj Gupta 				else if (rcd->udp)
7871c313c0eSPankaj Gupta 					packet_type |= RTE_PTYPE_L4_UDP;
7881c313c0eSPankaj Gupta 			} else {
7891c313c0eSPankaj Gupta 				packet_type |= RTE_PTYPE_UNKNOWN;
7901c313c0eSPankaj Gupta 			}
7911c313c0eSPankaj Gupta 
79273c1f32cSDidier Pallard 		} else {
7939fd5e98bSStephen Hemminger 			if (rcd->v4) {
79473c1f32cSDidier Pallard 				packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
7959fd5e98bSStephen Hemminger 
79673c1f32cSDidier Pallard 				if (rcd->ipc)
797daa02b5cSOlivier Matz 					ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
79873c1f32cSDidier Pallard 				else
799daa02b5cSOlivier Matz 					ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
8009fd5e98bSStephen Hemminger 
80173c1f32cSDidier Pallard 				if (rcd->tuc) {
802daa02b5cSOlivier Matz 					ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
80373c1f32cSDidier Pallard 					if (rcd->tcp)
80473c1f32cSDidier Pallard 						packet_type |= RTE_PTYPE_L4_TCP;
80573c1f32cSDidier Pallard 					else
80673c1f32cSDidier Pallard 						packet_type |= RTE_PTYPE_L4_UDP;
80773c1f32cSDidier Pallard 				} else {
80873c1f32cSDidier Pallard 					if (rcd->tcp) {
80973c1f32cSDidier Pallard 						packet_type |= RTE_PTYPE_L4_TCP;
810daa02b5cSOlivier Matz 						ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
81173c1f32cSDidier Pallard 					} else if (rcd->udp) {
81273c1f32cSDidier Pallard 						packet_type |= RTE_PTYPE_L4_UDP;
813daa02b5cSOlivier Matz 						ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
81473c1f32cSDidier Pallard 					}
81573c1f32cSDidier Pallard 				}
81673c1f32cSDidier Pallard 			} else if (rcd->v6) {
81773c1f32cSDidier Pallard 				packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
81873c1f32cSDidier Pallard 
81973c1f32cSDidier Pallard 				if (rcd->tuc) {
820daa02b5cSOlivier Matz 					ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
82173c1f32cSDidier Pallard 					if (rcd->tcp)
82273c1f32cSDidier Pallard 						packet_type |= RTE_PTYPE_L4_TCP;
82373c1f32cSDidier Pallard 					else
82473c1f32cSDidier Pallard 						packet_type |= RTE_PTYPE_L4_UDP;
82573c1f32cSDidier Pallard 				} else {
82673c1f32cSDidier Pallard 					if (rcd->tcp) {
82773c1f32cSDidier Pallard 						packet_type |= RTE_PTYPE_L4_TCP;
828daa02b5cSOlivier Matz 						ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
82973c1f32cSDidier Pallard 					} else if (rcd->udp) {
83073c1f32cSDidier Pallard 						packet_type |= RTE_PTYPE_L4_UDP;
831daa02b5cSOlivier Matz 						ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
83273c1f32cSDidier Pallard 					}
8339fd5e98bSStephen Hemminger 				}
8345d020410SGeorge Wilkie 			} else {
83573c1f32cSDidier Pallard 				packet_type |= RTE_PTYPE_UNKNOWN;
8369fd5e98bSStephen Hemminger 			}
837ae2705b8SDidier Pallard 
838ae2705b8SDidier Pallard 			/* Old variants of vmxnet3 do not provide MSS */
839daa02b5cSOlivier Matz 			if ((ol_flags & RTE_MBUF_F_RX_LRO) && rxm->tso_segsz == 0)
840ae2705b8SDidier Pallard 				rxm->tso_segsz = vmxnet3_guess_mss(hw,
841ae2705b8SDidier Pallard 						rcd, rxm);
8425e5ac26fSDidier Pallard 		}
8439fd5e98bSStephen Hemminger 	}
8449fd5e98bSStephen Hemminger 
84573c1f32cSDidier Pallard 	rxm->ol_flags = ol_flags;
84673c1f32cSDidier Pallard 	rxm->packet_type = packet_type;
84773c1f32cSDidier Pallard }
84873c1f32cSDidier Pallard 
84909452c07SBruce Richardson /*
85009452c07SBruce Richardson  * Process the Rx Completion Ring of given vmxnet3_rx_queue
85109452c07SBruce Richardson  * for nb_pkts burst and return the number of packets received
85209452c07SBruce Richardson  */
85309452c07SBruce Richardson uint16_t
85409452c07SBruce Richardson vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
85509452c07SBruce Richardson {
85609452c07SBruce Richardson 	uint16_t nb_rx;
85709452c07SBruce Richardson 	uint32_t nb_rxd, idx;
85809452c07SBruce Richardson 	uint8_t ring_idx;
85909452c07SBruce Richardson 	vmxnet3_rx_queue_t *rxq;
86009452c07SBruce Richardson 	Vmxnet3_RxCompDesc *rcd;
86109452c07SBruce Richardson 	vmxnet3_buf_info_t *rbi;
86209452c07SBruce Richardson 	Vmxnet3_RxDesc *rxd;
86309452c07SBruce Richardson 	struct rte_mbuf *rxm = NULL;
86409452c07SBruce Richardson 	struct vmxnet3_hw *hw;
86509452c07SBruce Richardson 
86609452c07SBruce Richardson 	nb_rx = 0;
86709452c07SBruce Richardson 	ring_idx = 0;
86809452c07SBruce Richardson 	nb_rxd = 0;
86909452c07SBruce Richardson 	idx = 0;
87009452c07SBruce Richardson 
87109452c07SBruce Richardson 	rxq = rx_queue;
87209452c07SBruce Richardson 	hw = rxq->hw;
87309452c07SBruce Richardson 
87409452c07SBruce Richardson 	rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
87509452c07SBruce Richardson 
87609452c07SBruce Richardson 	if (unlikely(rxq->stopped)) {
87709452c07SBruce Richardson 		PMD_RX_LOG(DEBUG, "Rx queue is stopped.");
87809452c07SBruce Richardson 		return 0;
87909452c07SBruce Richardson 	}
88009452c07SBruce Richardson 
88109452c07SBruce Richardson 	while (rcd->gen == rxq->comp_ring.gen) {
8828fce14b7SStefan Puiu 		struct rte_mbuf *newm;
8838fce14b7SStefan Puiu 
88409452c07SBruce Richardson 		if (nb_rx >= nb_pkts)
88509452c07SBruce Richardson 			break;
88609452c07SBruce Richardson 
8878fce14b7SStefan Puiu 		newm = rte_mbuf_raw_alloc(rxq->mp);
8888fce14b7SStefan Puiu 		if (unlikely(newm == NULL)) {
8898fce14b7SStefan Puiu 			PMD_RX_LOG(ERR, "Error allocating mbuf");
8908fce14b7SStefan Puiu 			rxq->stats.rx_buf_alloc_failure++;
8918fce14b7SStefan Puiu 			break;
8928fce14b7SStefan Puiu 		}
8938fce14b7SStefan Puiu 
89409452c07SBruce Richardson 		idx = rcd->rxdIdx;
895c4be1a65SShrikrishna Khare 		ring_idx = vmxnet3_get_ring_idx(hw, rcd->rqID);
89609452c07SBruce Richardson 		rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
89750705e8eSThomas Monjalon 		RTE_SET_USED(rxd); /* used only for assert when enabled */
89809452c07SBruce Richardson 		rbi = rxq->cmd_ring[ring_idx].buf_info + idx;
89909452c07SBruce Richardson 
90009452c07SBruce Richardson 		PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx);
90109452c07SBruce Richardson 
90250705e8eSThomas Monjalon 		RTE_ASSERT(rcd->len <= rxd->len);
90350705e8eSThomas Monjalon 		RTE_ASSERT(rbi->m);
904cd360643SStephen Hemminger 
90509452c07SBruce Richardson 		/* Get the packet buffer pointer from buf_info */
90609452c07SBruce Richardson 		rxm = rbi->m;
90709452c07SBruce Richardson 
90809452c07SBruce Richardson 		/* Clear descriptor associated buf_info to be reused */
90909452c07SBruce Richardson 		rbi->m = NULL;
91009452c07SBruce Richardson 		rbi->bufPA = 0;
91109452c07SBruce Richardson 
91209452c07SBruce Richardson 		/* Update the index that we received a packet */
91309452c07SBruce Richardson 		rxq->cmd_ring[ring_idx].next2comp = idx;
91409452c07SBruce Richardson 
91509452c07SBruce Richardson 		/* For RCD with EOP set, check if there is frame error */
9162fdd835fSStephen Hemminger 		if (unlikely(rcd->eop && rcd->err)) {
91709452c07SBruce Richardson 			rxq->stats.drop_total++;
91809452c07SBruce Richardson 			rxq->stats.drop_err++;
91909452c07SBruce Richardson 
92009452c07SBruce Richardson 			if (!rcd->fcs) {
92109452c07SBruce Richardson 				rxq->stats.drop_fcs++;
92209452c07SBruce Richardson 				PMD_RX_LOG(ERR, "Recv packet dropped due to frame err.");
92309452c07SBruce Richardson 			}
92409452c07SBruce Richardson 			PMD_RX_LOG(ERR, "Error in received packet rcd#:%d rxd:%d",
92509452c07SBruce Richardson 				   (int)(rcd - (struct Vmxnet3_RxCompDesc *)
92609452c07SBruce Richardson 					 rxq->comp_ring.base), rcd->rxdIdx);
92709452c07SBruce Richardson 			rte_pktmbuf_free_seg(rxm);
9280bf3a2c0SMandeep Rohilla 			if (rxq->start_seg) {
9290bf3a2c0SMandeep Rohilla 				struct rte_mbuf *start = rxq->start_seg;
9300bf3a2c0SMandeep Rohilla 
9310bf3a2c0SMandeep Rohilla 				rxq->start_seg = NULL;
9320bf3a2c0SMandeep Rohilla 				rte_pktmbuf_free(start);
9330bf3a2c0SMandeep Rohilla 			}
93409452c07SBruce Richardson 			goto rcd_done;
93509452c07SBruce Richardson 		}
93609452c07SBruce Richardson 
93709452c07SBruce Richardson 		/* Initialize newly received packet buffer */
93809452c07SBruce Richardson 		rxm->port = rxq->port_id;
93909452c07SBruce Richardson 		rxm->nb_segs = 1;
94009452c07SBruce Richardson 		rxm->next = NULL;
94109452c07SBruce Richardson 		rxm->pkt_len = (uint16_t)rcd->len;
94209452c07SBruce Richardson 		rxm->data_len = (uint16_t)rcd->len;
94309452c07SBruce Richardson 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
9449fd5e98bSStephen Hemminger 		rxm->ol_flags = 0;
9459fd5e98bSStephen Hemminger 		rxm->vlan_tci = 0;
94673c1f32cSDidier Pallard 		rxm->packet_type = 0;
94709452c07SBruce Richardson 
9482fdd835fSStephen Hemminger 		/*
9492fdd835fSStephen Hemminger 		 * If this is the first buffer of the received packet,
9502fdd835fSStephen Hemminger 		 * set the pointer to the first mbuf of the packet
9512fdd835fSStephen Hemminger 		 * Otherwise, update the total length and the number of segments
9522fdd835fSStephen Hemminger 		 * of the current scattered packet, and update the pointer to
9532fdd835fSStephen Hemminger 		 * the last mbuf of the current packet.
9542fdd835fSStephen Hemminger 		 */
9552fdd835fSStephen Hemminger 		if (rcd->sop) {
95650705e8eSThomas Monjalon 			RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
95709452c07SBruce Richardson 
9582fdd835fSStephen Hemminger 			if (unlikely(rcd->len == 0)) {
95950705e8eSThomas Monjalon 				RTE_ASSERT(rcd->eop);
9602fdd835fSStephen Hemminger 
9612fdd835fSStephen Hemminger 				PMD_RX_LOG(DEBUG,
9622fdd835fSStephen Hemminger 					   "Rx buf was skipped. rxring[%d][%d])",
9632fdd835fSStephen Hemminger 					   ring_idx, idx);
9642fdd835fSStephen Hemminger 				rte_pktmbuf_free_seg(rxm);
9652fdd835fSStephen Hemminger 				goto rcd_done;
9662fdd835fSStephen Hemminger 			}
9672fdd835fSStephen Hemminger 
968c4be1a65SShrikrishna Khare 			if (vmxnet3_rx_data_ring(hw, rcd->rqID)) {
969c4be1a65SShrikrishna Khare 				uint8_t *rdd = rxq->data_ring.base +
970c4be1a65SShrikrishna Khare 					idx * rxq->data_desc_size;
971c4be1a65SShrikrishna Khare 
972c4be1a65SShrikrishna Khare 				RTE_ASSERT(VMXNET3_VERSION_GE_3(hw));
973c4be1a65SShrikrishna Khare 				rte_memcpy(rte_pktmbuf_mtod(rxm, char *),
974c4be1a65SShrikrishna Khare 					   rdd, rcd->len);
975c4be1a65SShrikrishna Khare 			}
976c4be1a65SShrikrishna Khare 
9772fdd835fSStephen Hemminger 			rxq->start_seg = rxm;
978595d08d1SDidier Pallard 			rxq->last_seg = rxm;
9795e5ac26fSDidier Pallard 			vmxnet3_rx_offload(hw, rcd, rxm, 1);
9802fdd835fSStephen Hemminger 		} else {
9812fdd835fSStephen Hemminger 			struct rte_mbuf *start = rxq->start_seg;
9822fdd835fSStephen Hemminger 
98350705e8eSThomas Monjalon 			RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
9842fdd835fSStephen Hemminger 
985eb49f167SStephen Hemminger 			if (likely(start && rxm->data_len > 0)) {
9862fdd835fSStephen Hemminger 				start->pkt_len += rxm->data_len;
9872fdd835fSStephen Hemminger 				start->nb_segs++;
9882fdd835fSStephen Hemminger 
9892fdd835fSStephen Hemminger 				rxq->last_seg->next = rxm;
9902fdd835fSStephen Hemminger 				rxq->last_seg = rxm;
991595d08d1SDidier Pallard 			} else {
992eb49f167SStephen Hemminger 				PMD_RX_LOG(ERR, "Error received empty or out of order frame.");
993eb49f167SStephen Hemminger 				rxq->stats.drop_total++;
994eb49f167SStephen Hemminger 				rxq->stats.drop_err++;
995eb49f167SStephen Hemminger 
996595d08d1SDidier Pallard 				rte_pktmbuf_free_seg(rxm);
997595d08d1SDidier Pallard 			}
998595d08d1SDidier Pallard 		}
9992fdd835fSStephen Hemminger 
10002fdd835fSStephen Hemminger 		if (rcd->eop) {
1001ce82dc6bSJohn Guzik 			struct rte_mbuf *start = rxq->start_seg;
1002ce82dc6bSJohn Guzik 
10035e5ac26fSDidier Pallard 			vmxnet3_rx_offload(hw, rcd, start, 0);
1004ce82dc6bSJohn Guzik 			rx_pkts[nb_rx++] = start;
10052fdd835fSStephen Hemminger 			rxq->start_seg = NULL;
10062fdd835fSStephen Hemminger 		}
10072fdd835fSStephen Hemminger 
100809452c07SBruce Richardson rcd_done:
100909452c07SBruce Richardson 		rxq->cmd_ring[ring_idx].next2comp = idx;
1010c346551eSYong Wang 		VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp,
1011c346551eSYong Wang 					  rxq->cmd_ring[ring_idx].size);
101209452c07SBruce Richardson 
10138fce14b7SStefan Puiu 		/* It's time to renew descriptors */
10148fce14b7SStefan Puiu 		vmxnet3_renew_desc(rxq, ring_idx, newm);
1015d8b0e3c6SRonak Doshi 		if (unlikely(rxq->shared->ctrl.updateRxProd &&
1016d8b0e3c6SRonak Doshi 			 (rxq->cmd_ring[ring_idx].next2fill & 0xf) == 0)) {
1017bfb405b0SRonak Doshi 			VMXNET3_WRITE_BAR0_REG(hw, hw->rx_prod_offset[ring_idx] +
1018bfb405b0SRonak Doshi 					       (rxq->queue_id * VMXNET3_REG_ALIGN),
101909452c07SBruce Richardson 					       rxq->cmd_ring[ring_idx].next2fill);
102009452c07SBruce Richardson 		}
102109452c07SBruce Richardson 
102209452c07SBruce Richardson 		/* Advance to the next descriptor in comp_ring */
102309452c07SBruce Richardson 		vmxnet3_comp_ring_adv_next2proc(&rxq->comp_ring);
102409452c07SBruce Richardson 
102509452c07SBruce Richardson 		rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
102609452c07SBruce Richardson 		nb_rxd++;
102709452c07SBruce Richardson 		if (nb_rxd > rxq->cmd_ring[0].size) {
1028c346551eSYong Wang 			PMD_RX_LOG(ERR, "Used up quota of receiving packets,"
102909452c07SBruce Richardson 				   " relinquish control.");
103009452c07SBruce Richardson 			break;
103109452c07SBruce Richardson 		}
103209452c07SBruce Richardson 	}
103309452c07SBruce Richardson 
1034c740ba20SDavid Harton 	if (unlikely(nb_rxd == 0)) {
1035c740ba20SDavid Harton 		uint32_t avail;
1036d8b0e3c6SRonak Doshi 		uint32_t posted = 0;
1037c740ba20SDavid Harton 		for (ring_idx = 0; ring_idx < VMXNET3_RX_CMDRING_SIZE; ring_idx++) {
1038c740ba20SDavid Harton 			avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[ring_idx]);
1039c740ba20SDavid Harton 			if (unlikely(avail > 0)) {
1040c740ba20SDavid Harton 				/* try to alloc new buf and renew descriptors */
1041d8b0e3c6SRonak Doshi 				if (vmxnet3_post_rx_bufs(rxq, ring_idx) > 0)
1042d8b0e3c6SRonak Doshi 					posted |= (1 << ring_idx);
1043c740ba20SDavid Harton 			}
1044c740ba20SDavid Harton 		}
1045c740ba20SDavid Harton 		if (unlikely(rxq->shared->ctrl.updateRxProd)) {
1046c740ba20SDavid Harton 			for (ring_idx = 0; ring_idx < VMXNET3_RX_CMDRING_SIZE; ring_idx++) {
1047d8b0e3c6SRonak Doshi 				if (posted & (1 << ring_idx))
1048bfb405b0SRonak Doshi 					VMXNET3_WRITE_BAR0_REG(hw, hw->rx_prod_offset[ring_idx] +
1049bfb405b0SRonak Doshi 							       (rxq->queue_id * VMXNET3_REG_ALIGN),
1050c740ba20SDavid Harton 							       rxq->cmd_ring[ring_idx].next2fill);
1051c740ba20SDavid Harton 			}
1052c740ba20SDavid Harton 		}
1053c740ba20SDavid Harton 	}
1054c740ba20SDavid Harton 
105509452c07SBruce Richardson 	return nb_rx;
105609452c07SBruce Richardson }
105709452c07SBruce Richardson 
105844362052SPankaj Gupta uint32_t
105944362052SPankaj Gupta vmxnet3_dev_rx_queue_count(void *rx_queue)
106044362052SPankaj Gupta {
106144362052SPankaj Gupta 	const vmxnet3_rx_queue_t *rxq;
106244362052SPankaj Gupta 	const Vmxnet3_RxCompDesc *rcd;
106344362052SPankaj Gupta 	uint32_t idx, nb_rxd = 0;
106444362052SPankaj Gupta 	uint8_t gen;
106544362052SPankaj Gupta 
106644362052SPankaj Gupta 	rxq = rx_queue;
106744362052SPankaj Gupta 	if (unlikely(rxq->stopped)) {
106844362052SPankaj Gupta 		PMD_RX_LOG(DEBUG, "Rx queue is stopped.");
106944362052SPankaj Gupta 		return 0;
107044362052SPankaj Gupta 	}
107144362052SPankaj Gupta 
107244362052SPankaj Gupta 	gen = rxq->comp_ring.gen;
107344362052SPankaj Gupta 	idx = rxq->comp_ring.next2proc;
107444362052SPankaj Gupta 	rcd = &rxq->comp_ring.base[idx].rcd;
107544362052SPankaj Gupta 	while (rcd->gen == gen) {
107644362052SPankaj Gupta 		if (rcd->eop)
107744362052SPankaj Gupta 			++nb_rxd;
107844362052SPankaj Gupta 		if (++idx == rxq->comp_ring.size) {
107944362052SPankaj Gupta 			idx = 0;
108044362052SPankaj Gupta 			gen ^= 1;
108144362052SPankaj Gupta 		}
108244362052SPankaj Gupta 		rcd = &rxq->comp_ring.base[idx].rcd;
108344362052SPankaj Gupta 	}
108444362052SPankaj Gupta 
108544362052SPankaj Gupta 	return nb_rxd;
108644362052SPankaj Gupta }
108744362052SPankaj Gupta 
108809452c07SBruce Richardson int
108909452c07SBruce Richardson vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
109009452c07SBruce Richardson 			   uint16_t queue_idx,
109109452c07SBruce Richardson 			   uint16_t nb_desc,
109209452c07SBruce Richardson 			   unsigned int socket_id,
1093a4996bd8SWei Dai 			   const struct rte_eth_txconf *tx_conf __rte_unused)
109409452c07SBruce Richardson {
109509452c07SBruce Richardson 	struct vmxnet3_hw *hw = dev->data->dev_private;
109609452c07SBruce Richardson 	const struct rte_memzone *mz;
109709452c07SBruce Richardson 	struct vmxnet3_tx_queue *txq;
109809452c07SBruce Richardson 	struct vmxnet3_cmd_ring *ring;
109909452c07SBruce Richardson 	struct vmxnet3_comp_ring *comp_ring;
110009452c07SBruce Richardson 	struct vmxnet3_data_ring *data_ring;
110109452c07SBruce Richardson 	int size;
110209452c07SBruce Richardson 
110309452c07SBruce Richardson 	PMD_INIT_FUNC_TRACE();
110409452c07SBruce Richardson 
1105c346551eSYong Wang 	txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
1106c346551eSYong Wang 			  RTE_CACHE_LINE_SIZE);
110709452c07SBruce Richardson 	if (txq == NULL) {
110809452c07SBruce Richardson 		PMD_INIT_LOG(ERR, "Can not allocate tx queue structure");
110909452c07SBruce Richardson 		return -ENOMEM;
111009452c07SBruce Richardson 	}
111109452c07SBruce Richardson 
111209452c07SBruce Richardson 	txq->queue_id = queue_idx;
111309452c07SBruce Richardson 	txq->port_id = dev->data->port_id;
11143e5810f3SChas Williams 	txq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */
111509452c07SBruce Richardson 	txq->hw = hw;
111609452c07SBruce Richardson 	txq->qid = queue_idx;
111709452c07SBruce Richardson 	txq->stopped = TRUE;
111801fef6e3SShrikrishna Khare 	txq->txdata_desc_size = hw->txdata_desc_size;
111909452c07SBruce Richardson 
112009452c07SBruce Richardson 	ring = &txq->cmd_ring;
112109452c07SBruce Richardson 	comp_ring = &txq->comp_ring;
112209452c07SBruce Richardson 	data_ring = &txq->data_ring;
112309452c07SBruce Richardson 
112409452c07SBruce Richardson 	/* Tx vmxnet ring length should be between 512-4096 */
112509452c07SBruce Richardson 	if (nb_desc < VMXNET3_DEF_TX_RING_SIZE) {
112609452c07SBruce Richardson 		PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Min: %u",
112709452c07SBruce Richardson 			     VMXNET3_DEF_TX_RING_SIZE);
112809452c07SBruce Richardson 		return -EINVAL;
112909452c07SBruce Richardson 	} else if (nb_desc > VMXNET3_TX_RING_MAX_SIZE) {
113009452c07SBruce Richardson 		PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Max: %u",
113109452c07SBruce Richardson 			     VMXNET3_TX_RING_MAX_SIZE);
113209452c07SBruce Richardson 		return -EINVAL;
113309452c07SBruce Richardson 	} else {
113409452c07SBruce Richardson 		ring->size = nb_desc;
11356e5ee1b6SRonak Doshi 		if (VMXNET3_VERSION_GE_7(hw))
11366e5ee1b6SRonak Doshi 			ring->size = rte_align32prevpow2(nb_desc);
113709452c07SBruce Richardson 		ring->size &= ~VMXNET3_RING_SIZE_MASK;
113809452c07SBruce Richardson 	}
113909452c07SBruce Richardson 	comp_ring->size = data_ring->size = ring->size;
114009452c07SBruce Richardson 
114109452c07SBruce Richardson 	/* Tx vmxnet rings structure initialization*/
114209452c07SBruce Richardson 	ring->next2fill = 0;
114309452c07SBruce Richardson 	ring->next2comp = 0;
114409452c07SBruce Richardson 	ring->gen = VMXNET3_INIT_GEN;
114509452c07SBruce Richardson 	comp_ring->next2proc = 0;
114609452c07SBruce Richardson 	comp_ring->gen = VMXNET3_INIT_GEN;
114709452c07SBruce Richardson 
114809452c07SBruce Richardson 	size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
114909452c07SBruce Richardson 	size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
115001fef6e3SShrikrishna Khare 	size += txq->txdata_desc_size * data_ring->size;
115109452c07SBruce Richardson 
115204df93d1SChas Williams 	mz = rte_eth_dma_zone_reserve(dev, "txdesc", queue_idx, size,
115304df93d1SChas Williams 				      VMXNET3_RING_BA_ALIGN, socket_id);
115409452c07SBruce Richardson 	if (mz == NULL) {
115509452c07SBruce Richardson 		PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
115609452c07SBruce Richardson 		return -ENOMEM;
115709452c07SBruce Richardson 	}
115804df93d1SChas Williams 	txq->mz = mz;
115909452c07SBruce Richardson 	memset(mz->addr, 0, mz->len);
116009452c07SBruce Richardson 
116109452c07SBruce Richardson 	/* cmd_ring initialization */
116209452c07SBruce Richardson 	ring->base = mz->addr;
1163f17ca787SThomas Monjalon 	ring->basePA = mz->iova;
116409452c07SBruce Richardson 
116509452c07SBruce Richardson 	/* comp_ring initialization */
116609452c07SBruce Richardson 	comp_ring->base = ring->base + ring->size;
116709452c07SBruce Richardson 	comp_ring->basePA = ring->basePA +
116809452c07SBruce Richardson 		(sizeof(struct Vmxnet3_TxDesc) * ring->size);
116909452c07SBruce Richardson 
117009452c07SBruce Richardson 	/* data_ring initialization */
117109452c07SBruce Richardson 	data_ring->base = (Vmxnet3_TxDataDesc *)(comp_ring->base + comp_ring->size);
117209452c07SBruce Richardson 	data_ring->basePA = comp_ring->basePA +
117309452c07SBruce Richardson 			(sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size);
117409452c07SBruce Richardson 
117509452c07SBruce Richardson 	/* cmd_ring0 buf_info allocation */
117609452c07SBruce Richardson 	ring->buf_info = rte_zmalloc("tx_ring_buf_info",
117709452c07SBruce Richardson 				     ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE);
117809452c07SBruce Richardson 	if (ring->buf_info == NULL) {
117909452c07SBruce Richardson 		PMD_INIT_LOG(ERR, "ERROR: Creating tx_buf_info structure");
118009452c07SBruce Richardson 		return -ENOMEM;
118109452c07SBruce Richardson 	}
118209452c07SBruce Richardson 
118309452c07SBruce Richardson 	/* Update the data portion with txq */
118409452c07SBruce Richardson 	dev->data->tx_queues[queue_idx] = txq;
118509452c07SBruce Richardson 
118609452c07SBruce Richardson 	return 0;
118709452c07SBruce Richardson }
118809452c07SBruce Richardson 
118909452c07SBruce Richardson int
119009452c07SBruce Richardson vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
119109452c07SBruce Richardson 			   uint16_t queue_idx,
119209452c07SBruce Richardson 			   uint16_t nb_desc,
119309452c07SBruce Richardson 			   unsigned int socket_id,
1194c346551eSYong Wang 			   __rte_unused const struct rte_eth_rxconf *rx_conf,
119509452c07SBruce Richardson 			   struct rte_mempool *mp)
119609452c07SBruce Richardson {
119709452c07SBruce Richardson 	const struct rte_memzone *mz;
119809452c07SBruce Richardson 	struct vmxnet3_rx_queue *rxq;
119909452c07SBruce Richardson 	struct vmxnet3_hw *hw = dev->data->dev_private;
120009452c07SBruce Richardson 	struct vmxnet3_cmd_ring *ring0, *ring1, *ring;
120109452c07SBruce Richardson 	struct vmxnet3_comp_ring *comp_ring;
1202c4be1a65SShrikrishna Khare 	struct vmxnet3_rx_data_ring *data_ring;
120309452c07SBruce Richardson 	int size;
120409452c07SBruce Richardson 	uint8_t i;
120509452c07SBruce Richardson 	char mem_name[32];
120609452c07SBruce Richardson 
120709452c07SBruce Richardson 	PMD_INIT_FUNC_TRACE();
120809452c07SBruce Richardson 
1209c346551eSYong Wang 	rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue),
1210c346551eSYong Wang 			  RTE_CACHE_LINE_SIZE);
121109452c07SBruce Richardson 	if (rxq == NULL) {
121209452c07SBruce Richardson 		PMD_INIT_LOG(ERR, "Can not allocate rx queue structure");
121309452c07SBruce Richardson 		return -ENOMEM;
121409452c07SBruce Richardson 	}
121509452c07SBruce Richardson 
121609452c07SBruce Richardson 	rxq->mp = mp;
12176e5ee1b6SRonak Doshi 	/* Remember buffer size for initialization in dev start. */
12186e5ee1b6SRonak Doshi 	hw->rxdata_buf_size =
12196e5ee1b6SRonak Doshi 		rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
122009452c07SBruce Richardson 	rxq->queue_id = queue_idx;
122109452c07SBruce Richardson 	rxq->port_id = dev->data->port_id;
12223e5810f3SChas Williams 	rxq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */
122309452c07SBruce Richardson 	rxq->hw = hw;
122409452c07SBruce Richardson 	rxq->qid1 = queue_idx;
122509452c07SBruce Richardson 	rxq->qid2 = queue_idx + hw->num_rx_queues;
1226c4be1a65SShrikrishna Khare 	rxq->data_ring_qid = queue_idx + 2 * hw->num_rx_queues;
1227c4be1a65SShrikrishna Khare 	rxq->data_desc_size = hw->rxdata_desc_size;
122809452c07SBruce Richardson 	rxq->stopped = TRUE;
122909452c07SBruce Richardson 
123009452c07SBruce Richardson 	ring0 = &rxq->cmd_ring[0];
123109452c07SBruce Richardson 	ring1 = &rxq->cmd_ring[1];
123209452c07SBruce Richardson 	comp_ring = &rxq->comp_ring;
1233c4be1a65SShrikrishna Khare 	data_ring = &rxq->data_ring;
123409452c07SBruce Richardson 
123509452c07SBruce Richardson 	/* Rx vmxnet rings length should be between 256-4096 */
123609452c07SBruce Richardson 	if (nb_desc < VMXNET3_DEF_RX_RING_SIZE) {
123709452c07SBruce Richardson 		PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Min: 256");
123809452c07SBruce Richardson 		return -EINVAL;
123909452c07SBruce Richardson 	} else if (nb_desc > VMXNET3_RX_RING_MAX_SIZE) {
124009452c07SBruce Richardson 		PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Max: 4096");
124109452c07SBruce Richardson 		return -EINVAL;
124209452c07SBruce Richardson 	} else {
124309452c07SBruce Richardson 		ring0->size = nb_desc;
12446e5ee1b6SRonak Doshi 		if (VMXNET3_VERSION_GE_7(hw))
12456e5ee1b6SRonak Doshi 			ring0->size = rte_align32prevpow2(nb_desc);
124609452c07SBruce Richardson 		ring0->size &= ~VMXNET3_RING_SIZE_MASK;
124709452c07SBruce Richardson 		ring1->size = ring0->size;
124809452c07SBruce Richardson 	}
124909452c07SBruce Richardson 
125009452c07SBruce Richardson 	comp_ring->size = ring0->size + ring1->size;
1251c4be1a65SShrikrishna Khare 	data_ring->size = ring0->size;
125209452c07SBruce Richardson 
125309452c07SBruce Richardson 	/* Rx vmxnet rings structure initialization */
125409452c07SBruce Richardson 	ring0->next2fill = 0;
125509452c07SBruce Richardson 	ring1->next2fill = 0;
125609452c07SBruce Richardson 	ring0->next2comp = 0;
125709452c07SBruce Richardson 	ring1->next2comp = 0;
125809452c07SBruce Richardson 	ring0->gen = VMXNET3_INIT_GEN;
125909452c07SBruce Richardson 	ring1->gen = VMXNET3_INIT_GEN;
126009452c07SBruce Richardson 	comp_ring->next2proc = 0;
126109452c07SBruce Richardson 	comp_ring->gen = VMXNET3_INIT_GEN;
126209452c07SBruce Richardson 
126309452c07SBruce Richardson 	size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
126409452c07SBruce Richardson 	size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
1265c4be1a65SShrikrishna Khare 	if (VMXNET3_VERSION_GE_3(hw) && rxq->data_desc_size)
1266c4be1a65SShrikrishna Khare 		size += rxq->data_desc_size * data_ring->size;
126709452c07SBruce Richardson 
126804df93d1SChas Williams 	mz = rte_eth_dma_zone_reserve(dev, "rxdesc", queue_idx, size,
126904df93d1SChas Williams 				      VMXNET3_RING_BA_ALIGN, socket_id);
127009452c07SBruce Richardson 	if (mz == NULL) {
127109452c07SBruce Richardson 		PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
127209452c07SBruce Richardson 		return -ENOMEM;
127309452c07SBruce Richardson 	}
127404df93d1SChas Williams 	rxq->mz = mz;
127509452c07SBruce Richardson 	memset(mz->addr, 0, mz->len);
127609452c07SBruce Richardson 
127709452c07SBruce Richardson 	/* cmd_ring0 initialization */
127809452c07SBruce Richardson 	ring0->base = mz->addr;
1279f17ca787SThomas Monjalon 	ring0->basePA = mz->iova;
128009452c07SBruce Richardson 
128109452c07SBruce Richardson 	/* cmd_ring1 initialization */
128209452c07SBruce Richardson 	ring1->base = ring0->base + ring0->size;
128309452c07SBruce Richardson 	ring1->basePA = ring0->basePA + sizeof(struct Vmxnet3_RxDesc) * ring0->size;
128409452c07SBruce Richardson 
128509452c07SBruce Richardson 	/* comp_ring initialization */
128609452c07SBruce Richardson 	comp_ring->base = ring1->base + ring1->size;
128709452c07SBruce Richardson 	comp_ring->basePA = ring1->basePA + sizeof(struct Vmxnet3_RxDesc) *
128809452c07SBruce Richardson 		ring1->size;
128909452c07SBruce Richardson 
1290c4be1a65SShrikrishna Khare 	/* data_ring initialization */
1291c4be1a65SShrikrishna Khare 	if (VMXNET3_VERSION_GE_3(hw) && rxq->data_desc_size) {
1292c4be1a65SShrikrishna Khare 		data_ring->base =
1293c4be1a65SShrikrishna Khare 			(uint8_t *)(comp_ring->base + comp_ring->size);
1294c4be1a65SShrikrishna Khare 		data_ring->basePA = comp_ring->basePA +
1295c4be1a65SShrikrishna Khare 			sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
1296c4be1a65SShrikrishna Khare 	}
1297c4be1a65SShrikrishna Khare 
129809452c07SBruce Richardson 	/* cmd_ring0-cmd_ring1 buf_info allocation */
129909452c07SBruce Richardson 	for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) {
130009452c07SBruce Richardson 
130109452c07SBruce Richardson 		ring = &rxq->cmd_ring[i];
130209452c07SBruce Richardson 		ring->rid = i;
130309452c07SBruce Richardson 		snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i);
130409452c07SBruce Richardson 
1305c346551eSYong Wang 		ring->buf_info = rte_zmalloc(mem_name,
1306c346551eSYong Wang 					     ring->size * sizeof(vmxnet3_buf_info_t),
1307c346551eSYong Wang 					     RTE_CACHE_LINE_SIZE);
130809452c07SBruce Richardson 		if (ring->buf_info == NULL) {
130909452c07SBruce Richardson 			PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure");
131009452c07SBruce Richardson 			return -ENOMEM;
131109452c07SBruce Richardson 		}
131209452c07SBruce Richardson 	}
131309452c07SBruce Richardson 
131409452c07SBruce Richardson 	/* Update the data portion with rxq */
131509452c07SBruce Richardson 	dev->data->rx_queues[queue_idx] = rxq;
131609452c07SBruce Richardson 
131709452c07SBruce Richardson 	return 0;
131809452c07SBruce Richardson }
131909452c07SBruce Richardson 
132009452c07SBruce Richardson /*
132109452c07SBruce Richardson  * Initializes Receive Unit
132209452c07SBruce Richardson  * Load mbufs in rx queue in advance
132309452c07SBruce Richardson  */
132409452c07SBruce Richardson int
132509452c07SBruce Richardson vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev)
132609452c07SBruce Richardson {
132709452c07SBruce Richardson 	struct vmxnet3_hw *hw = dev->data->dev_private;
132809452c07SBruce Richardson 
132909452c07SBruce Richardson 	int i, ret;
133009452c07SBruce Richardson 	uint8_t j;
133109452c07SBruce Richardson 
133209452c07SBruce Richardson 	PMD_INIT_FUNC_TRACE();
133309452c07SBruce Richardson 
133409452c07SBruce Richardson 	for (i = 0; i < hw->num_rx_queues; i++) {
133509452c07SBruce Richardson 		vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
133609452c07SBruce Richardson 
133709452c07SBruce Richardson 		for (j = 0; j < VMXNET3_RX_CMDRING_SIZE; j++) {
133809452c07SBruce Richardson 			/* Passing 0 as alloc_num will allocate full ring */
133909452c07SBruce Richardson 			ret = vmxnet3_post_rx_bufs(rxq, j);
13408a2de735SKaijun Zeng 
13418a2de735SKaijun Zeng 			/* Zero number of descriptors in the configuration of the RX queue */
13428a2de735SKaijun Zeng 			if (ret == 0) {
1343c346551eSYong Wang 				PMD_INIT_LOG(ERR,
1344*f665790aSDavid Marchand 					"Invalid configuration in Rx queue: %d, buffers ring: %d",
1345c346551eSYong Wang 					i, j);
13468a2de735SKaijun Zeng 				return -EINVAL;
13478a2de735SKaijun Zeng 			}
13488a2de735SKaijun Zeng 			/* Return the error number */
13498a2de735SKaijun Zeng 			if (ret < 0) {
13508a2de735SKaijun Zeng 				PMD_INIT_LOG(ERR, "Posting Rxq: %d buffers ring: %d", i, j);
13518a2de735SKaijun Zeng 				return ret;
135209452c07SBruce Richardson 			}
1353c346551eSYong Wang 			/*
1354c346551eSYong Wang 			 * Updating device with the index:next2fill to fill the
1355c346551eSYong Wang 			 * mbufs for coming packets.
1356c346551eSYong Wang 			 */
135709452c07SBruce Richardson 			if (unlikely(rxq->shared->ctrl.updateRxProd)) {
1358bfb405b0SRonak Doshi 				VMXNET3_WRITE_BAR0_REG(hw, hw->rx_prod_offset[j] +
1359bfb405b0SRonak Doshi 						       (rxq->queue_id * VMXNET3_REG_ALIGN),
136009452c07SBruce Richardson 						       rxq->cmd_ring[j].next2fill);
136109452c07SBruce Richardson 			}
136209452c07SBruce Richardson 		}
136309452c07SBruce Richardson 		rxq->stopped = FALSE;
13642fdd835fSStephen Hemminger 		rxq->start_seg = NULL;
136509452c07SBruce Richardson 	}
136609452c07SBruce Richardson 
136709452c07SBruce Richardson 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
136809452c07SBruce Richardson 		struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
136909452c07SBruce Richardson 
137009452c07SBruce Richardson 		txq->stopped = FALSE;
137109452c07SBruce Richardson 	}
137209452c07SBruce Richardson 
137309452c07SBruce Richardson 	return 0;
137409452c07SBruce Richardson }
137509452c07SBruce Richardson 
137609452c07SBruce Richardson static uint8_t rss_intel_key[40] = {
137709452c07SBruce Richardson 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
137809452c07SBruce Richardson 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
137909452c07SBruce Richardson 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
138009452c07SBruce Richardson 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
138109452c07SBruce Richardson 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
138209452c07SBruce Richardson };
138309452c07SBruce Richardson 
138409452c07SBruce Richardson /*
1385643fba77SEduard Serra  * Additional RSS configurations based on vmxnet v4+ APIs
1386643fba77SEduard Serra  */
1387643fba77SEduard Serra int
1388643fba77SEduard Serra vmxnet3_v4_rss_configure(struct rte_eth_dev *dev)
1389643fba77SEduard Serra {
1390643fba77SEduard Serra 	struct vmxnet3_hw *hw = dev->data->dev_private;
1391643fba77SEduard Serra 	Vmxnet3_DriverShared *shared = hw->shared;
1392643fba77SEduard Serra 	Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
1393643fba77SEduard Serra 	struct rte_eth_rss_conf *port_rss_conf;
1394643fba77SEduard Serra 	uint64_t rss_hf;
1395643fba77SEduard Serra 	uint32_t ret;
1396643fba77SEduard Serra 
1397643fba77SEduard Serra 	PMD_INIT_FUNC_TRACE();
1398643fba77SEduard Serra 
1399643fba77SEduard Serra 	cmdInfo->setRSSFields = 0;
1400643fba77SEduard Serra 	port_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
140152ec00fdSEduard Serra 
140252ec00fdSEduard Serra 	if ((port_rss_conf->rss_hf & VMXNET3_MANDATORY_V4_RSS) !=
140352ec00fdSEduard Serra 	    VMXNET3_MANDATORY_V4_RSS) {
140452ec00fdSEduard Serra 		PMD_INIT_LOG(WARNING, "RSS: IPv4/6 TCP is required for vmxnet3 v4 RSS,"
140552ec00fdSEduard Serra 			     "automatically setting it");
140652ec00fdSEduard Serra 		port_rss_conf->rss_hf |= VMXNET3_MANDATORY_V4_RSS;
140752ec00fdSEduard Serra 	}
140852ec00fdSEduard Serra 
1409643fba77SEduard Serra 	rss_hf = port_rss_conf->rss_hf &
1410643fba77SEduard Serra 		(VMXNET3_V4_RSS_MASK | VMXNET3_RSS_OFFLOAD_ALL);
1411643fba77SEduard Serra 
1412295968d1SFerruh Yigit 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
1413643fba77SEduard Serra 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP4;
1414295968d1SFerruh Yigit 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
1415643fba77SEduard Serra 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP6;
1416295968d1SFerruh Yigit 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
1417643fba77SEduard Serra 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP4;
1418295968d1SFerruh Yigit 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
1419643fba77SEduard Serra 		cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP6;
1420643fba77SEduard Serra 
1421643fba77SEduard Serra 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1422643fba77SEduard Serra 			       VMXNET3_CMD_SET_RSS_FIELDS);
1423643fba77SEduard Serra 	ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
1424643fba77SEduard Serra 
1425643fba77SEduard Serra 	if (ret != VMXNET3_SUCCESS) {
1426643fba77SEduard Serra 		PMD_DRV_LOG(ERR, "Set RSS fields (v4) failed: %d", ret);
1427643fba77SEduard Serra 	}
1428643fba77SEduard Serra 
1429643fba77SEduard Serra 	return ret;
1430643fba77SEduard Serra }
1431643fba77SEduard Serra 
1432643fba77SEduard Serra /*
143309452c07SBruce Richardson  * Configure RSS feature
143409452c07SBruce Richardson  */
143509452c07SBruce Richardson int
143609452c07SBruce Richardson vmxnet3_rss_configure(struct rte_eth_dev *dev)
143709452c07SBruce Richardson {
143809452c07SBruce Richardson 	struct vmxnet3_hw *hw = dev->data->dev_private;
143909452c07SBruce Richardson 	struct VMXNET3_RSSConf *dev_rss_conf;
144009452c07SBruce Richardson 	struct rte_eth_rss_conf *port_rss_conf;
144109452c07SBruce Richardson 	uint64_t rss_hf;
144209452c07SBruce Richardson 	uint8_t i, j;
144309452c07SBruce Richardson 
144409452c07SBruce Richardson 	PMD_INIT_FUNC_TRACE();
144509452c07SBruce Richardson 
144609452c07SBruce Richardson 	dev_rss_conf = hw->rss_conf;
144709452c07SBruce Richardson 	port_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
144809452c07SBruce Richardson 
144909452c07SBruce Richardson 	/* loading hashFunc */
145009452c07SBruce Richardson 	dev_rss_conf->hashFunc = VMXNET3_RSS_HASH_FUNC_TOEPLITZ;
145109452c07SBruce Richardson 	/* loading hashKeySize */
145209452c07SBruce Richardson 	dev_rss_conf->hashKeySize = VMXNET3_RSS_MAX_KEY_SIZE;
145309452c07SBruce Richardson 	/* loading indTableSize: Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/
1454b1584dd0SPankaj Gupta 	dev_rss_conf->indTableSize = (uint16_t)((MAX_RX_QUEUES(hw)) * 4);
145509452c07SBruce Richardson 
145609452c07SBruce Richardson 	if (port_rss_conf->rss_key == NULL) {
145709452c07SBruce Richardson 		/* Default hash key */
145809452c07SBruce Richardson 		port_rss_conf->rss_key = rss_intel_key;
145909452c07SBruce Richardson 	}
146009452c07SBruce Richardson 
146109452c07SBruce Richardson 	/* loading hashKey */
1462c346551eSYong Wang 	memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key,
1463c346551eSYong Wang 	       dev_rss_conf->hashKeySize);
146409452c07SBruce Richardson 
146509452c07SBruce Richardson 	/* loading indTable */
146609452c07SBruce Richardson 	for (i = 0, j = 0; i < dev_rss_conf->indTableSize; i++, j++) {
146709452c07SBruce Richardson 		if (j == dev->data->nb_rx_queues)
146809452c07SBruce Richardson 			j = 0;
146909452c07SBruce Richardson 		dev_rss_conf->indTable[i] = j;
147009452c07SBruce Richardson 	}
147109452c07SBruce Richardson 
147209452c07SBruce Richardson 	/* loading hashType */
147309452c07SBruce Richardson 	dev_rss_conf->hashType = 0;
147409452c07SBruce Richardson 	rss_hf = port_rss_conf->rss_hf & VMXNET3_RSS_OFFLOAD_ALL;
1475295968d1SFerruh Yigit 	if (rss_hf & RTE_ETH_RSS_IPV4)
147609452c07SBruce Richardson 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV4;
1477295968d1SFerruh Yigit 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
147809452c07SBruce Richardson 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV4;
1479295968d1SFerruh Yigit 	if (rss_hf & RTE_ETH_RSS_IPV6)
148009452c07SBruce Richardson 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV6;
1481295968d1SFerruh Yigit 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
148209452c07SBruce Richardson 		dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
148309452c07SBruce Richardson 
148409452c07SBruce Richardson 	return VMXNET3_SUCCESS;
148509452c07SBruce Richardson }
1486