xref: /dpdk/drivers/net/netvsc/hn_rxtx.c (revision 06c968f9ba8afeaf03b60871a453652a5828ff3f)
14e9c73e9SStephen Hemminger /* SPDX-License-Identifier: BSD-3-Clause
24e9c73e9SStephen Hemminger  * Copyright(c) 2016-2018 Microsoft Corporation
34e9c73e9SStephen Hemminger  * Copyright(c) 2013-2016 Brocade Communications Systems, Inc.
44e9c73e9SStephen Hemminger  * All rights reserved.
54e9c73e9SStephen Hemminger  */
64e9c73e9SStephen Hemminger 
74e9c73e9SStephen Hemminger #include <stdint.h>
84e9c73e9SStephen Hemminger #include <string.h>
94e9c73e9SStephen Hemminger #include <stdio.h>
104e9c73e9SStephen Hemminger #include <errno.h>
114e9c73e9SStephen Hemminger #include <unistd.h>
124e9c73e9SStephen Hemminger #include <strings.h>
131f2766b7SStephen Hemminger #include <malloc.h>
144e9c73e9SStephen Hemminger 
154e9c73e9SStephen Hemminger #include <rte_ethdev.h>
164e9c73e9SStephen Hemminger #include <rte_memcpy.h>
174e9c73e9SStephen Hemminger #include <rte_string_fns.h>
184e9c73e9SStephen Hemminger #include <rte_memzone.h>
194e9c73e9SStephen Hemminger #include <rte_malloc.h>
204e9c73e9SStephen Hemminger #include <rte_atomic.h>
21cc025181SStephen Hemminger #include <rte_bitmap.h>
224e9c73e9SStephen Hemminger #include <rte_branch_prediction.h>
234e9c73e9SStephen Hemminger #include <rte_ether.h>
244e9c73e9SStephen Hemminger #include <rte_common.h>
254e9c73e9SStephen Hemminger #include <rte_errno.h>
264e9c73e9SStephen Hemminger #include <rte_memory.h>
274e9c73e9SStephen Hemminger #include <rte_eal.h>
281acb7f54SDavid Marchand #include <dev_driver.h>
293e3ef77eSStephen Hemminger #include <rte_net.h>
3084aaf06dSDavid Marchand #include <bus_vmbus_driver.h>
314e9c73e9SStephen Hemminger #include <rte_spinlock.h>
324e9c73e9SStephen Hemminger 
334e9c73e9SStephen Hemminger #include "hn_logs.h"
344e9c73e9SStephen Hemminger #include "hn_var.h"
354e9c73e9SStephen Hemminger #include "hn_rndis.h"
364e9c73e9SStephen Hemminger #include "hn_nvs.h"
374e9c73e9SStephen Hemminger #include "ndis.h"
384e9c73e9SStephen Hemminger 
394e9c73e9SStephen Hemminger #define HN_NVS_SEND_MSG_SIZE \
404e9c73e9SStephen Hemminger 	(sizeof(struct vmbus_chanpkt_hdr) + sizeof(struct hn_nvs_rndis))
414e9c73e9SStephen Hemminger 
424e9c73e9SStephen Hemminger #define HN_TXD_CACHE_SIZE	32 /* per cpu tx_descriptor pool cache */
43530af95aSStephen Hemminger #define HN_RXQ_EVENT_DEFAULT	2048
444e9c73e9SStephen Hemminger 
454e9c73e9SStephen Hemminger struct hn_rxinfo {
464e9c73e9SStephen Hemminger 	uint32_t	vlan_info;
474e9c73e9SStephen Hemminger 	uint32_t	csum_info;
484e9c73e9SStephen Hemminger 	uint32_t	hash_info;
494e9c73e9SStephen Hemminger 	uint32_t	hash_value;
504e9c73e9SStephen Hemminger };
514e9c73e9SStephen Hemminger 
524e9c73e9SStephen Hemminger #define HN_RXINFO_VLAN			0x0001
534e9c73e9SStephen Hemminger #define HN_RXINFO_CSUM			0x0002
544e9c73e9SStephen Hemminger #define HN_RXINFO_HASHINF		0x0004
554e9c73e9SStephen Hemminger #define HN_RXINFO_HASHVAL		0x0008
564e9c73e9SStephen Hemminger #define HN_RXINFO_ALL			\
574e9c73e9SStephen Hemminger 	(HN_RXINFO_VLAN |		\
584e9c73e9SStephen Hemminger 	 HN_RXINFO_CSUM |		\
594e9c73e9SStephen Hemminger 	 HN_RXINFO_HASHINF |		\
604e9c73e9SStephen Hemminger 	 HN_RXINFO_HASHVAL)
614e9c73e9SStephen Hemminger 
624e9c73e9SStephen Hemminger #define HN_NDIS_VLAN_INFO_INVALID	0xffffffff
634e9c73e9SStephen Hemminger #define HN_NDIS_RXCSUM_INFO_INVALID	0
644e9c73e9SStephen Hemminger #define HN_NDIS_HASH_INFO_INVALID	0
654e9c73e9SStephen Hemminger 
664e9c73e9SStephen Hemminger /*
674e9c73e9SStephen Hemminger  * Per-transmit book keeping.
684e9c73e9SStephen Hemminger  * A slot in transmit ring (chim_index) is reserved for each transmit.
694e9c73e9SStephen Hemminger  *
704e9c73e9SStephen Hemminger  * There are two types of transmit:
714e9c73e9SStephen Hemminger  *   - buffered transmit where chimney buffer is used and RNDIS header
724e9c73e9SStephen Hemminger  *     is in the buffer. mbuf == NULL for this case.
734e9c73e9SStephen Hemminger  *
744e9c73e9SStephen Hemminger  *   - direct transmit where RNDIS header is in the in  rndis_pkt
754e9c73e9SStephen Hemminger  *     mbuf is freed after transmit.
764e9c73e9SStephen Hemminger  *
774e9c73e9SStephen Hemminger  * Descriptors come from per-port pool which is used
784e9c73e9SStephen Hemminger  * to limit number of outstanding requests per device.
794e9c73e9SStephen Hemminger  */
804e9c73e9SStephen Hemminger struct hn_txdesc {
814e9c73e9SStephen Hemminger 	struct rte_mbuf *m;
824e9c73e9SStephen Hemminger 
834e9c73e9SStephen Hemminger 	uint16_t	queue_id;
84cc025181SStephen Hemminger 	uint32_t	chim_index;
854e9c73e9SStephen Hemminger 	uint32_t	chim_size;
864e9c73e9SStephen Hemminger 	uint32_t	data_size;
874e9c73e9SStephen Hemminger 	uint32_t	packets;
884e9c73e9SStephen Hemminger 
894e9c73e9SStephen Hemminger 	struct rndis_packet_msg *rndis_pkt;
904e9c73e9SStephen Hemminger };
914e9c73e9SStephen Hemminger 
924e9c73e9SStephen Hemminger #define HN_RNDIS_PKT_LEN				\
934e9c73e9SStephen Hemminger 	(sizeof(struct rndis_packet_msg) +		\
944e9c73e9SStephen Hemminger 	 RNDIS_PKTINFO_SIZE(NDIS_HASH_VALUE_SIZE) +	\
954e9c73e9SStephen Hemminger 	 RNDIS_PKTINFO_SIZE(NDIS_VLAN_INFO_SIZE) +	\
964e9c73e9SStephen Hemminger 	 RNDIS_PKTINFO_SIZE(NDIS_LSO2_INFO_SIZE) +	\
974e9c73e9SStephen Hemminger 	 RNDIS_PKTINFO_SIZE(NDIS_TXCSUM_INFO_SIZE))
984e9c73e9SStephen Hemminger 
99cc025181SStephen Hemminger #define HN_RNDIS_PKT_ALIGNED	RTE_ALIGN(HN_RNDIS_PKT_LEN, RTE_CACHE_LINE_SIZE)
100cc025181SStephen Hemminger 
1014e9c73e9SStephen Hemminger /* Minimum space required for a packet */
1024e9c73e9SStephen Hemminger #define HN_PKTSIZE_MIN(align) \
10335b2d13fSOlivier Matz 	RTE_ALIGN(RTE_ETHER_MIN_LEN + HN_RNDIS_PKT_LEN, align)
1044e9c73e9SStephen Hemminger 
105cc025181SStephen Hemminger #define DEFAULT_TX_FREE_THRESH 32
1064e9c73e9SStephen Hemminger 
1074e9c73e9SStephen Hemminger static void
1084e9c73e9SStephen Hemminger hn_update_packet_stats(struct hn_stats *stats, const struct rte_mbuf *m)
1094e9c73e9SStephen Hemminger {
1104e9c73e9SStephen Hemminger 	uint32_t s = m->pkt_len;
1116d13ea8eSOlivier Matz 	const struct rte_ether_addr *ea;
1124e9c73e9SStephen Hemminger 
11384c292faSMorten Brørup 	if (s >= 1024)
11484c292faSMorten Brørup 		stats->size_bins[6 + (s > 1518)]++;
11584c292faSMorten Brørup 	else if (s <= 64)
11684c292faSMorten Brørup 		stats->size_bins[s >> 6]++;
117204109c5SStephen Hemminger 	else
11884c292faSMorten Brørup 		stats->size_bins[32UL - rte_clz32(s) - 5]++;
1194e9c73e9SStephen Hemminger 
1206d13ea8eSOlivier Matz 	ea = rte_pktmbuf_mtod(m, const struct rte_ether_addr *);
12184c292faSMorten Brørup 	RTE_BUILD_BUG_ON(offsetof(struct hn_stats, broadcast) !=
12284c292faSMorten Brørup 			offsetof(struct hn_stats, multicast) + sizeof(uint64_t));
12384c292faSMorten Brørup 	if (unlikely(rte_is_multicast_ether_addr(ea)))
12484c292faSMorten Brørup 		(&stats->multicast)[rte_is_broadcast_ether_addr(ea)]++;
1254e9c73e9SStephen Hemminger }
1264e9c73e9SStephen Hemminger 
1274e9c73e9SStephen Hemminger static inline unsigned int hn_rndis_pktlen(const struct rndis_packet_msg *pkt)
1284e9c73e9SStephen Hemminger {
1294e9c73e9SStephen Hemminger 	return pkt->pktinfooffset + pkt->pktinfolen;
1304e9c73e9SStephen Hemminger }
1314e9c73e9SStephen Hemminger 
1324e9c73e9SStephen Hemminger static inline uint32_t
1334e9c73e9SStephen Hemminger hn_rndis_pktmsg_offset(uint32_t ofs)
1344e9c73e9SStephen Hemminger {
1354e9c73e9SStephen Hemminger 	return ofs - offsetof(struct rndis_packet_msg, dataoffset);
1364e9c73e9SStephen Hemminger }
1374e9c73e9SStephen Hemminger 
1384e9c73e9SStephen Hemminger static void hn_txd_init(struct rte_mempool *mp __rte_unused,
1394e9c73e9SStephen Hemminger 			void *opaque, void *obj, unsigned int idx)
1404e9c73e9SStephen Hemminger {
141cc025181SStephen Hemminger 	struct hn_tx_queue *txq = opaque;
1424e9c73e9SStephen Hemminger 	struct hn_txdesc *txd = obj;
1434e9c73e9SStephen Hemminger 
1444e9c73e9SStephen Hemminger 	memset(txd, 0, sizeof(*txd));
1454e9c73e9SStephen Hemminger 
146cc025181SStephen Hemminger 	txd->queue_id = txq->queue_id;
147cc025181SStephen Hemminger 	txd->chim_index = NVS_CHIM_IDX_INVALID;
148d9fecbe9SLong Li 	txd->rndis_pkt = (struct rndis_packet_msg *)((char *)txq->tx_rndis
149d9fecbe9SLong Li 		+ idx * HN_RNDIS_PKT_ALIGNED);
1504e9c73e9SStephen Hemminger }
1514e9c73e9SStephen Hemminger 
1524e9c73e9SStephen Hemminger int
153cc025181SStephen Hemminger hn_chim_init(struct rte_eth_dev *dev)
1544e9c73e9SStephen Hemminger {
1554e9c73e9SStephen Hemminger 	struct hn_data *hv = dev->data->dev_private;
156cc025181SStephen Hemminger 	uint32_t i, chim_bmp_size;
1574e9c73e9SStephen Hemminger 
158cc025181SStephen Hemminger 	rte_spinlock_init(&hv->chim_lock);
159cc025181SStephen Hemminger 	chim_bmp_size = rte_bitmap_get_memory_footprint(hv->chim_cnt);
160cc025181SStephen Hemminger 	hv->chim_bmem = rte_zmalloc("hn_chim_bitmap", chim_bmp_size,
161cc025181SStephen Hemminger 				    RTE_CACHE_LINE_SIZE);
162cc025181SStephen Hemminger 	if (hv->chim_bmem == NULL) {
163cc025181SStephen Hemminger 		PMD_INIT_LOG(ERR, "failed to allocate bitmap size %u",
164cc025181SStephen Hemminger 			     chim_bmp_size);
165cc025181SStephen Hemminger 		return -1;
1664e9c73e9SStephen Hemminger 	}
1674e9c73e9SStephen Hemminger 
168cc025181SStephen Hemminger 	hv->chim_bmap = rte_bitmap_init(hv->chim_cnt,
169cc025181SStephen Hemminger 					hv->chim_bmem, chim_bmp_size);
170cc025181SStephen Hemminger 	if (hv->chim_bmap == NULL) {
171cc025181SStephen Hemminger 		PMD_INIT_LOG(ERR, "failed to init chim bitmap");
172cc025181SStephen Hemminger 		return -1;
173cc025181SStephen Hemminger 	}
174cc025181SStephen Hemminger 
175cc025181SStephen Hemminger 	for (i = 0; i < hv->chim_cnt; i++)
176cc025181SStephen Hemminger 		rte_bitmap_set(hv->chim_bmap, i);
177cc025181SStephen Hemminger 
1784e9c73e9SStephen Hemminger 	return 0;
1794e9c73e9SStephen Hemminger }
1804e9c73e9SStephen Hemminger 
181c578d850SStephen Hemminger void
182cc025181SStephen Hemminger hn_chim_uninit(struct rte_eth_dev *dev)
183c578d850SStephen Hemminger {
184c578d850SStephen Hemminger 	struct hn_data *hv = dev->data->dev_private;
185c578d850SStephen Hemminger 
186cc025181SStephen Hemminger 	rte_bitmap_free(hv->chim_bmap);
187cc025181SStephen Hemminger 	rte_free(hv->chim_bmem);
188cc025181SStephen Hemminger 	hv->chim_bmem = NULL;
189cc025181SStephen Hemminger }
190cc025181SStephen Hemminger 
191cc025181SStephen Hemminger static uint32_t hn_chim_alloc(struct hn_data *hv)
192cc025181SStephen Hemminger {
193cc025181SStephen Hemminger 	uint32_t index = NVS_CHIM_IDX_INVALID;
1943eee6368SLong Li 	uint64_t slab = 0;
195cc025181SStephen Hemminger 
196cc025181SStephen Hemminger 	rte_spinlock_lock(&hv->chim_lock);
1973eee6368SLong Li 	if (rte_bitmap_scan(hv->chim_bmap, &index, &slab)) {
1983eee6368SLong Li 		index += rte_bsf64(slab);
199cc025181SStephen Hemminger 		rte_bitmap_clear(hv->chim_bmap, index);
2003eee6368SLong Li 	}
201cc025181SStephen Hemminger 	rte_spinlock_unlock(&hv->chim_lock);
202cc025181SStephen Hemminger 
203cc025181SStephen Hemminger 	return index;
204cc025181SStephen Hemminger }
205cc025181SStephen Hemminger 
206cc025181SStephen Hemminger static void hn_chim_free(struct hn_data *hv, uint32_t chim_idx)
207cc025181SStephen Hemminger {
208cc025181SStephen Hemminger 	if (chim_idx >= hv->chim_cnt) {
209cc025181SStephen Hemminger 		PMD_DRV_LOG(ERR, "Invalid chimney index %u", chim_idx);
210cc025181SStephen Hemminger 	} else {
211cc025181SStephen Hemminger 		rte_spinlock_lock(&hv->chim_lock);
212cc025181SStephen Hemminger 		rte_bitmap_set(hv->chim_bmap, chim_idx);
213cc025181SStephen Hemminger 		rte_spinlock_unlock(&hv->chim_lock);
214c578d850SStephen Hemminger 	}
215c578d850SStephen Hemminger }
216c578d850SStephen Hemminger 
2174e9c73e9SStephen Hemminger static void hn_reset_txagg(struct hn_tx_queue *txq)
2184e9c73e9SStephen Hemminger {
2194e9c73e9SStephen Hemminger 	txq->agg_szleft = txq->agg_szmax;
2204e9c73e9SStephen Hemminger 	txq->agg_pktleft = txq->agg_pktmax;
2214e9c73e9SStephen Hemminger 	txq->agg_txd = NULL;
2224e9c73e9SStephen Hemminger 	txq->agg_prevpkt = NULL;
2234e9c73e9SStephen Hemminger }
2244e9c73e9SStephen Hemminger 
225e9002053SAlan Elder static void
226e9002053SAlan Elder hn_rx_queue_free_common(struct hn_rx_queue *rxq)
227e9002053SAlan Elder {
228e9002053SAlan Elder 	if (!rxq)
229e9002053SAlan Elder 		return;
230e9002053SAlan Elder 
231e9002053SAlan Elder 	rte_free(rxq->rxbuf_info);
232e9002053SAlan Elder 	rte_free(rxq->event_buf);
233e9002053SAlan Elder 	rte_free(rxq);
234e9002053SAlan Elder }
235e9002053SAlan Elder 
2364e9c73e9SStephen Hemminger int
2374e9c73e9SStephen Hemminger hn_dev_tx_queue_setup(struct rte_eth_dev *dev,
238cc025181SStephen Hemminger 		      uint16_t queue_idx, uint16_t nb_desc,
2394e9c73e9SStephen Hemminger 		      unsigned int socket_id,
2404e9c73e9SStephen Hemminger 		      const struct rte_eth_txconf *tx_conf)
2414e9c73e9SStephen Hemminger 
2424e9c73e9SStephen Hemminger {
2434e9c73e9SStephen Hemminger 	struct hn_data *hv = dev->data->dev_private;
2444e9c73e9SStephen Hemminger 	struct hn_tx_queue *txq;
245e9002053SAlan Elder 	struct hn_rx_queue *rxq = NULL;
246cc025181SStephen Hemminger 	char name[RTE_MEMPOOL_NAMESIZE];
2474e9c73e9SStephen Hemminger 	uint32_t tx_free_thresh;
248cc025181SStephen Hemminger 	int err = -ENOMEM;
2494e9c73e9SStephen Hemminger 
2504e9c73e9SStephen Hemminger 	PMD_INIT_FUNC_TRACE();
2514e9c73e9SStephen Hemminger 
2524e9c73e9SStephen Hemminger 	tx_free_thresh = tx_conf->tx_free_thresh;
2534e9c73e9SStephen Hemminger 	if (tx_free_thresh == 0)
254cc025181SStephen Hemminger 		tx_free_thresh = RTE_MIN(nb_desc / 4,
2554e9c73e9SStephen Hemminger 					 DEFAULT_TX_FREE_THRESH);
2564e9c73e9SStephen Hemminger 
257cc025181SStephen Hemminger 	if (tx_free_thresh + 3 >= nb_desc) {
258cc025181SStephen Hemminger 		PMD_INIT_LOG(ERR,
259cc025181SStephen Hemminger 			     "tx_free_thresh must be less than the number of TX entries minus 3(%u)."
260f665790aSDavid Marchand 			     " (tx_free_thresh=%u port=%u queue=%u)",
261cc025181SStephen Hemminger 			     nb_desc - 3,
262cc025181SStephen Hemminger 			     tx_free_thresh, dev->data->port_id, queue_idx);
263cc025181SStephen Hemminger 		return -EINVAL;
264cc025181SStephen Hemminger 	}
2654e9c73e9SStephen Hemminger 
266f3013acfSYunjian Wang 	txq = rte_zmalloc_socket("HN_TXQ", sizeof(*txq), RTE_CACHE_LINE_SIZE,
267f3013acfSYunjian Wang 				 socket_id);
268f3013acfSYunjian Wang 	if (!txq)
269f3013acfSYunjian Wang 		return -ENOMEM;
270f3013acfSYunjian Wang 
271f3013acfSYunjian Wang 	txq->hv = hv;
272f3013acfSYunjian Wang 	txq->chan = hv->channels[queue_idx];
273f3013acfSYunjian Wang 	txq->port_id = dev->data->port_id;
274f3013acfSYunjian Wang 	txq->queue_id = queue_idx;
2754e9c73e9SStephen Hemminger 	txq->free_thresh = tx_free_thresh;
2764e9c73e9SStephen Hemminger 
277cc025181SStephen Hemminger 	snprintf(name, sizeof(name),
278cc025181SStephen Hemminger 		 "hn_txd_%u_%u", dev->data->port_id, queue_idx);
279cc025181SStephen Hemminger 
280cc025181SStephen Hemminger 	PMD_INIT_LOG(DEBUG, "TX descriptor pool %s n=%u size=%zu",
281cc025181SStephen Hemminger 		     name, nb_desc, sizeof(struct hn_txdesc));
282cc025181SStephen Hemminger 
283b8c3c628SLong Li 	txq->tx_rndis_mz = rte_memzone_reserve_aligned(name,
284b8c3c628SLong Li 			nb_desc * HN_RNDIS_PKT_ALIGNED, rte_socket_id(),
285b8c3c628SLong Li 			RTE_MEMZONE_IOVA_CONTIG, HN_RNDIS_PKT_ALIGNED);
286b8c3c628SLong Li 	if (!txq->tx_rndis_mz) {
287b8c3c628SLong Li 		err = -rte_errno;
288cc025181SStephen Hemminger 		goto error;
289b8c3c628SLong Li 	}
290b8c3c628SLong Li 	txq->tx_rndis = txq->tx_rndis_mz->addr;
291b8c3c628SLong Li 	txq->tx_rndis_iova = txq->tx_rndis_mz->iova;
292cc025181SStephen Hemminger 
293cc025181SStephen Hemminger 	txq->txdesc_pool = rte_mempool_create(name, nb_desc,
294cc025181SStephen Hemminger 					      sizeof(struct hn_txdesc),
295cc025181SStephen Hemminger 					      0, 0, NULL, NULL,
296cc025181SStephen Hemminger 					      hn_txd_init, txq,
297cc025181SStephen Hemminger 					      dev->device->numa_node, 0);
298cc025181SStephen Hemminger 	if (txq->txdesc_pool == NULL) {
299cc025181SStephen Hemminger 		PMD_DRV_LOG(ERR,
300cc025181SStephen Hemminger 			    "mempool %s create failed: %d", name, rte_errno);
301cc025181SStephen Hemminger 		goto error;
302cc025181SStephen Hemminger 	}
303cc025181SStephen Hemminger 
304e9002053SAlan Elder 	/*
305e9002053SAlan Elder 	 * If there are more Tx queues than Rx queues, allocate rx_queues
306e9002053SAlan Elder 	 * with event buffer so that Tx completion messages can still be
307e9002053SAlan Elder 	 * received
308e9002053SAlan Elder 	 */
309e9002053SAlan Elder 	if (queue_idx >= dev->data->nb_rx_queues) {
310e9002053SAlan Elder 		rxq = hn_rx_queue_alloc(hv, queue_idx, socket_id);
311e9002053SAlan Elder 
312e9002053SAlan Elder 		if (!rxq) {
313e9002053SAlan Elder 			err = -ENOMEM;
314e9002053SAlan Elder 			goto error;
315e9002053SAlan Elder 		}
316e9002053SAlan Elder 
317e9002053SAlan Elder 		/*
318e9002053SAlan Elder 		 * Don't allocate mbuf pool or rx ring.  RSS is always configured
319e9002053SAlan Elder 		 * to ensure packets aren't received by this Rx queue.
320e9002053SAlan Elder 		 */
321e9002053SAlan Elder 		rxq->mb_pool = NULL;
322e9002053SAlan Elder 		rxq->rx_ring = NULL;
323e9002053SAlan Elder 	}
324e9002053SAlan Elder 
3254e9c73e9SStephen Hemminger 	txq->agg_szmax  = RTE_MIN(hv->chim_szmax, hv->rndis_agg_size);
3264e9c73e9SStephen Hemminger 	txq->agg_pktmax = hv->rndis_agg_pkts;
3274e9c73e9SStephen Hemminger 	txq->agg_align  = hv->rndis_agg_align;
3284e9c73e9SStephen Hemminger 
3294e9c73e9SStephen Hemminger 	hn_reset_txagg(txq);
3304e9c73e9SStephen Hemminger 
331dc7680e8SStephen Hemminger 	err = hn_vf_tx_queue_setup(dev, queue_idx, nb_desc,
332dc7680e8SStephen Hemminger 				     socket_id, tx_conf);
333cc025181SStephen Hemminger 	if (err == 0) {
334cc025181SStephen Hemminger 		dev->data->tx_queues[queue_idx] = txq;
335e9002053SAlan Elder 		if (rxq != NULL)
336e9002053SAlan Elder 			dev->data->rx_queues[queue_idx] = rxq;
337cc025181SStephen Hemminger 		return 0;
338cc025181SStephen Hemminger 	}
339cc025181SStephen Hemminger 
340cc025181SStephen Hemminger error:
341cc025181SStephen Hemminger 	rte_mempool_free(txq->txdesc_pool);
342b8c3c628SLong Li 	rte_memzone_free(txq->tx_rndis_mz);
343e9002053SAlan Elder 	hn_rx_queue_free_common(rxq);
344dc7680e8SStephen Hemminger 	rte_free(txq);
345dc7680e8SStephen Hemminger 	return err;
346dc7680e8SStephen Hemminger }
3474e9c73e9SStephen Hemminger 
348c7b82b14SStephen Hemminger void
349c7b82b14SStephen Hemminger hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id,
350c7b82b14SStephen Hemminger 		     struct rte_eth_txq_info *qinfo)
351c7b82b14SStephen Hemminger {
352c7b82b14SStephen Hemminger 	struct hn_tx_queue *txq = dev->data->tx_queues[queue_id];
353c7b82b14SStephen Hemminger 
354c7b82b14SStephen Hemminger 	qinfo->nb_desc = txq->txdesc_pool->size;
355c7b82b14SStephen Hemminger 	qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
356c7b82b14SStephen Hemminger }
357cc025181SStephen Hemminger 
358cc025181SStephen Hemminger static struct hn_txdesc *hn_txd_get(struct hn_tx_queue *txq)
359cc025181SStephen Hemminger {
360cc025181SStephen Hemminger 	struct hn_txdesc *txd;
361cc025181SStephen Hemminger 
362cc025181SStephen Hemminger 	if (rte_mempool_get(txq->txdesc_pool, (void **)&txd)) {
363cc025181SStephen Hemminger 		++txq->stats.ring_full;
364cc025181SStephen Hemminger 		PMD_TX_LOG(DEBUG, "tx pool exhausted!");
365cc025181SStephen Hemminger 		return NULL;
366cc025181SStephen Hemminger 	}
367cc025181SStephen Hemminger 
368cc025181SStephen Hemminger 	txd->m = NULL;
369cc025181SStephen Hemminger 	txd->packets = 0;
370cc025181SStephen Hemminger 	txd->data_size = 0;
371cc025181SStephen Hemminger 	txd->chim_size = 0;
372cc025181SStephen Hemminger 
373cc025181SStephen Hemminger 	return txd;
374cc025181SStephen Hemminger }
375cc025181SStephen Hemminger 
376cc025181SStephen Hemminger static void hn_txd_put(struct hn_tx_queue *txq, struct hn_txdesc *txd)
377cc025181SStephen Hemminger {
378cc025181SStephen Hemminger 	rte_mempool_put(txq->txdesc_pool, txd);
3794e9c73e9SStephen Hemminger }
3804e9c73e9SStephen Hemminger 
3814e9c73e9SStephen Hemminger void
3827483341aSXueming Li hn_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
3834e9c73e9SStephen Hemminger {
3847483341aSXueming Li 	struct hn_tx_queue *txq = dev->data->tx_queues[qid];
3854e9c73e9SStephen Hemminger 
3864e9c73e9SStephen Hemminger 	PMD_INIT_FUNC_TRACE();
3874e9c73e9SStephen Hemminger 
3884e9c73e9SStephen Hemminger 	if (!txq)
3894e9c73e9SStephen Hemminger 		return;
390e9002053SAlan Elder 	/*
391e9002053SAlan Elder 	 * Free any Rx queues allocated for a Tx queue without a corresponding
392e9002053SAlan Elder 	 * Rx queue
393e9002053SAlan Elder 	 */
394e9002053SAlan Elder 	if (qid >= dev->data->nb_rx_queues)
395e9002053SAlan Elder 		hn_rx_queue_free_common(dev->data->rx_queues[qid]);
3964e9c73e9SStephen Hemminger 
397cc025181SStephen Hemminger 	rte_mempool_free(txq->txdesc_pool);
3984e9c73e9SStephen Hemminger 
399b8c3c628SLong Li 	rte_memzone_free(txq->tx_rndis_mz);
4004e9c73e9SStephen Hemminger 	rte_free(txq);
4014e9c73e9SStephen Hemminger }
4024e9c73e9SStephen Hemminger 
403a41ef8eeSStephen Hemminger /*
404a41ef8eeSStephen Hemminger  * Check the status of a Tx descriptor in the queue.
405a41ef8eeSStephen Hemminger  *
406a41ef8eeSStephen Hemminger  * returns:
407a41ef8eeSStephen Hemminger  *  - -EINVAL              - offset outside of tx_descriptor pool.
408a41ef8eeSStephen Hemminger  *  - RTE_ETH_TX_DESC_FULL - descriptor is not acknowledged by host.
409a41ef8eeSStephen Hemminger  *  - RTE_ETH_TX_DESC_DONE - descriptor is available.
410a41ef8eeSStephen Hemminger  */
411a41ef8eeSStephen Hemminger int hn_dev_tx_descriptor_status(void *arg, uint16_t offset)
412a41ef8eeSStephen Hemminger {
413a41ef8eeSStephen Hemminger 	const struct hn_tx_queue *txq = arg;
414a41ef8eeSStephen Hemminger 
415a41ef8eeSStephen Hemminger 	hn_process_events(txq->hv, txq->queue_id, 0);
416a41ef8eeSStephen Hemminger 
417a41ef8eeSStephen Hemminger 	if (offset >= rte_mempool_avail_count(txq->txdesc_pool))
418a41ef8eeSStephen Hemminger 		return -EINVAL;
419a41ef8eeSStephen Hemminger 
420a41ef8eeSStephen Hemminger 	if (offset < rte_mempool_in_use_count(txq->txdesc_pool))
421a41ef8eeSStephen Hemminger 		return RTE_ETH_TX_DESC_FULL;
422a41ef8eeSStephen Hemminger 	else
423a41ef8eeSStephen Hemminger 		return RTE_ETH_TX_DESC_DONE;
424a41ef8eeSStephen Hemminger }
425a41ef8eeSStephen Hemminger 
4264e9c73e9SStephen Hemminger static void
4274e9c73e9SStephen Hemminger hn_nvs_send_completed(struct rte_eth_dev *dev, uint16_t queue_id,
4284e9c73e9SStephen Hemminger 		      unsigned long xactid, const struct hn_nvs_rndis_ack *ack)
4294e9c73e9SStephen Hemminger {
430cc025181SStephen Hemminger 	struct hn_data *hv = dev->data->dev_private;
4314e9c73e9SStephen Hemminger 	struct hn_txdesc *txd = (struct hn_txdesc *)xactid;
4324e9c73e9SStephen Hemminger 	struct hn_tx_queue *txq;
4334e9c73e9SStephen Hemminger 
4344e9c73e9SStephen Hemminger 	/* Control packets are sent with xacid == 0 */
4354e9c73e9SStephen Hemminger 	if (!txd)
4364e9c73e9SStephen Hemminger 		return;
4374e9c73e9SStephen Hemminger 
4384e9c73e9SStephen Hemminger 	txq = dev->data->tx_queues[queue_id];
4394e9c73e9SStephen Hemminger 	if (likely(ack->status == NVS_STATUS_OK)) {
4404e9c73e9SStephen Hemminger 		PMD_TX_LOG(DEBUG, "port %u:%u complete tx %u packets %u bytes %u",
4414e9c73e9SStephen Hemminger 			   txq->port_id, txq->queue_id, txd->chim_index,
4424e9c73e9SStephen Hemminger 			   txd->packets, txd->data_size);
4434e9c73e9SStephen Hemminger 		txq->stats.bytes += txd->data_size;
4444e9c73e9SStephen Hemminger 		txq->stats.packets += txd->packets;
4454e9c73e9SStephen Hemminger 	} else {
446b757deb8SStephen Hemminger 		PMD_DRV_LOG(NOTICE, "port %u:%u complete tx %u failed status %u",
4474e9c73e9SStephen Hemminger 			    txq->port_id, txq->queue_id, txd->chim_index, ack->status);
4484e9c73e9SStephen Hemminger 		++txq->stats.errors;
4494e9c73e9SStephen Hemminger 	}
4504e9c73e9SStephen Hemminger 
4510caf5621SLong Li 	if (txd->chim_index != NVS_CHIM_IDX_INVALID) {
452cc025181SStephen Hemminger 		hn_chim_free(hv, txd->chim_index);
4530caf5621SLong Li 		txd->chim_index = NVS_CHIM_IDX_INVALID;
4540caf5621SLong Li 	}
4554e9c73e9SStephen Hemminger 
456cc025181SStephen Hemminger 	rte_pktmbuf_free(txd->m);
457cc025181SStephen Hemminger 	hn_txd_put(txq, txd);
4584e9c73e9SStephen Hemminger }
4594e9c73e9SStephen Hemminger 
4604e9c73e9SStephen Hemminger /* Handle transmit completion events */
4614e9c73e9SStephen Hemminger static void
4624e9c73e9SStephen Hemminger hn_nvs_handle_comp(struct rte_eth_dev *dev, uint16_t queue_id,
4634e9c73e9SStephen Hemminger 		   const struct vmbus_chanpkt_hdr *pkt,
4644e9c73e9SStephen Hemminger 		   const void *data)
4654e9c73e9SStephen Hemminger {
4664e9c73e9SStephen Hemminger 	const struct hn_nvs_hdr *hdr = data;
4674e9c73e9SStephen Hemminger 
4684e9c73e9SStephen Hemminger 	switch (hdr->type) {
4694e9c73e9SStephen Hemminger 	case NVS_TYPE_RNDIS_ACK:
4704e9c73e9SStephen Hemminger 		hn_nvs_send_completed(dev, queue_id, pkt->xactid, data);
4714e9c73e9SStephen Hemminger 		break;
4724e9c73e9SStephen Hemminger 
4734e9c73e9SStephen Hemminger 	default:
474b757deb8SStephen Hemminger 		PMD_DRV_LOG(NOTICE, "unexpected send completion type %u",
4754e9c73e9SStephen Hemminger 			   hdr->type);
4764e9c73e9SStephen Hemminger 	}
4774e9c73e9SStephen Hemminger }
4784e9c73e9SStephen Hemminger 
4794e9c73e9SStephen Hemminger /* Parse per-packet info (meta data) */
4804e9c73e9SStephen Hemminger static int
4814e9c73e9SStephen Hemminger hn_rndis_rxinfo(const void *info_data, unsigned int info_dlen,
4824e9c73e9SStephen Hemminger 		struct hn_rxinfo *info)
4834e9c73e9SStephen Hemminger {
4844e9c73e9SStephen Hemminger 	const struct rndis_pktinfo *pi = info_data;
4854e9c73e9SStephen Hemminger 	uint32_t mask = 0;
4864e9c73e9SStephen Hemminger 
4874e9c73e9SStephen Hemminger 	while (info_dlen != 0) {
4884e9c73e9SStephen Hemminger 		const void *data;
4894e9c73e9SStephen Hemminger 		uint32_t dlen;
4904e9c73e9SStephen Hemminger 
4914e9c73e9SStephen Hemminger 		if (unlikely(info_dlen < sizeof(*pi)))
4924e9c73e9SStephen Hemminger 			return -EINVAL;
4934e9c73e9SStephen Hemminger 
4944e9c73e9SStephen Hemminger 		if (unlikely(info_dlen < pi->size))
4954e9c73e9SStephen Hemminger 			return -EINVAL;
4964e9c73e9SStephen Hemminger 		info_dlen -= pi->size;
4974e9c73e9SStephen Hemminger 
4984e9c73e9SStephen Hemminger 		if (unlikely(pi->size & RNDIS_PKTINFO_SIZE_ALIGNMASK))
4994e9c73e9SStephen Hemminger 			return -EINVAL;
5004e9c73e9SStephen Hemminger 		if (unlikely(pi->size < pi->offset))
5014e9c73e9SStephen Hemminger 			return -EINVAL;
5024e9c73e9SStephen Hemminger 
5034e9c73e9SStephen Hemminger 		dlen = pi->size - pi->offset;
5044e9c73e9SStephen Hemminger 		data = pi->data;
5054e9c73e9SStephen Hemminger 
5064e9c73e9SStephen Hemminger 		switch (pi->type) {
5074e9c73e9SStephen Hemminger 		case NDIS_PKTINFO_TYPE_VLAN:
5084e9c73e9SStephen Hemminger 			if (unlikely(dlen < NDIS_VLAN_INFO_SIZE))
5094e9c73e9SStephen Hemminger 				return -EINVAL;
5104e9c73e9SStephen Hemminger 			info->vlan_info = *((const uint32_t *)data);
5114e9c73e9SStephen Hemminger 			mask |= HN_RXINFO_VLAN;
5124e9c73e9SStephen Hemminger 			break;
5134e9c73e9SStephen Hemminger 
5144e9c73e9SStephen Hemminger 		case NDIS_PKTINFO_TYPE_CSUM:
5154e9c73e9SStephen Hemminger 			if (unlikely(dlen < NDIS_RXCSUM_INFO_SIZE))
5164e9c73e9SStephen Hemminger 				return -EINVAL;
5174e9c73e9SStephen Hemminger 			info->csum_info = *((const uint32_t *)data);
5184e9c73e9SStephen Hemminger 			mask |= HN_RXINFO_CSUM;
5194e9c73e9SStephen Hemminger 			break;
5204e9c73e9SStephen Hemminger 
5214e9c73e9SStephen Hemminger 		case NDIS_PKTINFO_TYPE_HASHVAL:
5224e9c73e9SStephen Hemminger 			if (unlikely(dlen < NDIS_HASH_VALUE_SIZE))
5234e9c73e9SStephen Hemminger 				return -EINVAL;
5244e9c73e9SStephen Hemminger 			info->hash_value = *((const uint32_t *)data);
5254e9c73e9SStephen Hemminger 			mask |= HN_RXINFO_HASHVAL;
5264e9c73e9SStephen Hemminger 			break;
5274e9c73e9SStephen Hemminger 
5284e9c73e9SStephen Hemminger 		case NDIS_PKTINFO_TYPE_HASHINF:
5294e9c73e9SStephen Hemminger 			if (unlikely(dlen < NDIS_HASH_INFO_SIZE))
5304e9c73e9SStephen Hemminger 				return -EINVAL;
5314e9c73e9SStephen Hemminger 			info->hash_info = *((const uint32_t *)data);
5324e9c73e9SStephen Hemminger 			mask |= HN_RXINFO_HASHINF;
5334e9c73e9SStephen Hemminger 			break;
5344e9c73e9SStephen Hemminger 
5354e9c73e9SStephen Hemminger 		default:
5364e9c73e9SStephen Hemminger 			goto next;
5374e9c73e9SStephen Hemminger 		}
5384e9c73e9SStephen Hemminger 
5394e9c73e9SStephen Hemminger 		if (mask == HN_RXINFO_ALL)
5404e9c73e9SStephen Hemminger 			break; /* All found; done */
5414e9c73e9SStephen Hemminger next:
5424e9c73e9SStephen Hemminger 		pi = (const struct rndis_pktinfo *)
5434e9c73e9SStephen Hemminger 		    ((const uint8_t *)pi + pi->size);
5444e9c73e9SStephen Hemminger 	}
5454e9c73e9SStephen Hemminger 
5464e9c73e9SStephen Hemminger 	/*
5474e9c73e9SStephen Hemminger 	 * Final fixup.
5484e9c73e9SStephen Hemminger 	 * - If there is no hash value, invalidate the hash info.
5494e9c73e9SStephen Hemminger 	 */
5504e9c73e9SStephen Hemminger 	if (!(mask & HN_RXINFO_HASHVAL))
5514e9c73e9SStephen Hemminger 		info->hash_info = HN_NDIS_HASH_INFO_INVALID;
5524e9c73e9SStephen Hemminger 	return 0;
5534e9c73e9SStephen Hemminger }
5544e9c73e9SStephen Hemminger 
5554e9c73e9SStephen Hemminger static void hn_rx_buf_free_cb(void *buf __rte_unused, void *opaque)
5564e9c73e9SStephen Hemminger {
557d43b8c71SLong Li 	struct hn_rx_bufinfo *rxb = opaque;
558ac837bddSLong Li 	struct hn_rx_queue *rxq = rxb->rxq;
559d43b8c71SLong Li 
560ac837bddSLong Li 	rte_atomic32_dec(&rxq->rxbuf_outstanding);
561d43b8c71SLong Li 	hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid);
5624e9c73e9SStephen Hemminger }
5634e9c73e9SStephen Hemminger 
564ac837bddSLong Li static struct hn_rx_bufinfo *hn_rx_buf_init(struct hn_rx_queue *rxq,
5654e9c73e9SStephen Hemminger 					    const struct vmbus_chanpkt_rxbuf *pkt)
5664e9c73e9SStephen Hemminger {
5674e9c73e9SStephen Hemminger 	struct hn_rx_bufinfo *rxb;
5684e9c73e9SStephen Hemminger 
569ac837bddSLong Li 	rxb = rxq->rxbuf_info + pkt->hdr.xactid;
5704e9c73e9SStephen Hemminger 	rxb->chan = rxq->chan;
5714e9c73e9SStephen Hemminger 	rxb->xactid = pkt->hdr.xactid;
572ac837bddSLong Li 	rxb->rxq = rxq;
5734e9c73e9SStephen Hemminger 
5744e9c73e9SStephen Hemminger 	rxb->shinfo.free_cb = hn_rx_buf_free_cb;
5754e9c73e9SStephen Hemminger 	rxb->shinfo.fcb_opaque = rxb;
5764e9c73e9SStephen Hemminger 	rte_mbuf_ext_refcnt_set(&rxb->shinfo, 1);
5774e9c73e9SStephen Hemminger 	return rxb;
5784e9c73e9SStephen Hemminger }
5794e9c73e9SStephen Hemminger 
5804e9c73e9SStephen Hemminger static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb,
5814e9c73e9SStephen Hemminger 		     uint8_t *data, unsigned int headroom, unsigned int dlen,
5824e9c73e9SStephen Hemminger 		     const struct hn_rxinfo *info)
5834e9c73e9SStephen Hemminger {
5844e9c73e9SStephen Hemminger 	struct hn_data *hv = rxq->hv;
585e9002053SAlan Elder 	struct rte_mbuf *m = NULL;
5865ff00cf9SLong Li 	bool use_extbuf = false;
5874e9c73e9SStephen Hemminger 
588e9002053SAlan Elder 	if (likely(rxq->mb_pool != NULL))
5894e9c73e9SStephen Hemminger 		m = rte_pktmbuf_alloc(rxq->mb_pool);
590e9002053SAlan Elder 
5914e9c73e9SStephen Hemminger 	if (unlikely(!m)) {
5924e9c73e9SStephen Hemminger 		struct rte_eth_dev *dev =
5934e9c73e9SStephen Hemminger 			&rte_eth_devices[rxq->port_id];
5944e9c73e9SStephen Hemminger 
5954e9c73e9SStephen Hemminger 		dev->data->rx_mbuf_alloc_failed++;
5964e9c73e9SStephen Hemminger 		return;
5974e9c73e9SStephen Hemminger 	}
5984e9c73e9SStephen Hemminger 
5994e9c73e9SStephen Hemminger 	/*
6004e9c73e9SStephen Hemminger 	 * For large packets, avoid copy if possible but need to keep
6014e9c73e9SStephen Hemminger 	 * some space available in receive area for later packets.
6024e9c73e9SStephen Hemminger 	 */
603096b31fcSLong Li 	if (hv->rx_extmbuf_enable && dlen > hv->rx_copybreak &&
604ac837bddSLong Li 	    (uint32_t)rte_atomic32_read(&rxq->rxbuf_outstanding) <
605d43b8c71SLong Li 			hv->rxbuf_section_cnt / 2) {
6064e9c73e9SStephen Hemminger 		struct rte_mbuf_ext_shared_info *shinfo;
6074e9c73e9SStephen Hemminger 		const void *rxbuf;
6084e9c73e9SStephen Hemminger 		rte_iova_t iova;
6094e9c73e9SStephen Hemminger 
6104e9c73e9SStephen Hemminger 		/*
6117be78d02SJosh Soref 		 * Build an external mbuf that points to receive area.
6124e9c73e9SStephen Hemminger 		 * Use refcount to handle multiple packets in same
6134e9c73e9SStephen Hemminger 		 * receive buffer section.
6144e9c73e9SStephen Hemminger 		 */
6157b1a614dSLong Li 		rxbuf = hv->rxbuf_res.addr;
6164e9c73e9SStephen Hemminger 		iova = rte_mem_virt2iova(rxbuf) + RTE_PTR_DIFF(data, rxbuf);
6174e9c73e9SStephen Hemminger 		shinfo = &rxb->shinfo;
6184e9c73e9SStephen Hemminger 
619d43b8c71SLong Li 		/* shinfo is already set to 1 by the caller */
620d43b8c71SLong Li 		if (rte_mbuf_ext_refcnt_update(shinfo, 1) == 2)
621ac837bddSLong Li 			rte_atomic32_inc(&rxq->rxbuf_outstanding);
6224e9c73e9SStephen Hemminger 
6234e9c73e9SStephen Hemminger 		rte_pktmbuf_attach_extbuf(m, data, iova,
6244e9c73e9SStephen Hemminger 					  dlen + headroom, shinfo);
6254e9c73e9SStephen Hemminger 		m->data_off = headroom;
6265ff00cf9SLong Li 		use_extbuf = true;
6274e9c73e9SStephen Hemminger 	} else {
6284e9c73e9SStephen Hemminger 		/* Mbuf's in pool must be large enough to hold small packets */
6294e9c73e9SStephen Hemminger 		if (unlikely(rte_pktmbuf_tailroom(m) < dlen)) {
6304e9c73e9SStephen Hemminger 			rte_pktmbuf_free_seg(m);
6314e9c73e9SStephen Hemminger 			++rxq->stats.errors;
6324e9c73e9SStephen Hemminger 			return;
6334e9c73e9SStephen Hemminger 		}
6344e9c73e9SStephen Hemminger 		rte_memcpy(rte_pktmbuf_mtod(m, void *),
6354e9c73e9SStephen Hemminger 			   data + headroom, dlen);
6364e9c73e9SStephen Hemminger 	}
6374e9c73e9SStephen Hemminger 
6384e9c73e9SStephen Hemminger 	m->port = rxq->port_id;
6394e9c73e9SStephen Hemminger 	m->pkt_len = dlen;
6404e9c73e9SStephen Hemminger 	m->data_len = dlen;
6413e3ef77eSStephen Hemminger 	m->packet_type = rte_net_get_ptype(m, NULL,
6423e3ef77eSStephen Hemminger 					   RTE_PTYPE_L2_MASK |
6433e3ef77eSStephen Hemminger 					   RTE_PTYPE_L3_MASK |
6443e3ef77eSStephen Hemminger 					   RTE_PTYPE_L4_MASK);
6454e9c73e9SStephen Hemminger 
6464e9c73e9SStephen Hemminger 	if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) {
647f7654c8cSAlan Elder 		m->vlan_tci = RTE_VLAN_TCI_MAKE(NDIS_VLAN_INFO_ID(info->vlan_info),
648f7654c8cSAlan Elder 						NDIS_VLAN_INFO_PRI(info->vlan_info),
649f7654c8cSAlan Elder 						NDIS_VLAN_INFO_CFI(info->vlan_info));
650daa02b5cSOlivier Matz 		m->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN;
6513c32fdf4SStephen Hemminger 
6523c32fdf4SStephen Hemminger 		/* NDIS always strips tag, put it back if necessary */
6533c32fdf4SStephen Hemminger 		if (!hv->vlan_strip && rte_vlan_insert(&m)) {
6543c32fdf4SStephen Hemminger 			PMD_DRV_LOG(DEBUG, "vlan insert failed");
6553c32fdf4SStephen Hemminger 			++rxq->stats.errors;
6565ff00cf9SLong Li 			if (use_extbuf)
6575ff00cf9SLong Li 				rte_pktmbuf_detach_extbuf(m);
6583c32fdf4SStephen Hemminger 			rte_pktmbuf_free(m);
6593c32fdf4SStephen Hemminger 			return;
6603c32fdf4SStephen Hemminger 		}
6614e9c73e9SStephen Hemminger 	}
6624e9c73e9SStephen Hemminger 
6634e9c73e9SStephen Hemminger 	if (info->csum_info != HN_NDIS_RXCSUM_INFO_INVALID) {
6644e9c73e9SStephen Hemminger 		if (info->csum_info & NDIS_RXCSUM_INFO_IPCS_OK)
665daa02b5cSOlivier Matz 			m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
6664e9c73e9SStephen Hemminger 
6674e9c73e9SStephen Hemminger 		if (info->csum_info & (NDIS_RXCSUM_INFO_UDPCS_OK
6684e9c73e9SStephen Hemminger 				       | NDIS_RXCSUM_INFO_TCPCS_OK))
669daa02b5cSOlivier Matz 			m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
670a31f76d8SStephen Hemminger 		else if (info->csum_info & (NDIS_RXCSUM_INFO_TCPCS_FAILED
671a31f76d8SStephen Hemminger 					    | NDIS_RXCSUM_INFO_UDPCS_FAILED))
672daa02b5cSOlivier Matz 			m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
6734e9c73e9SStephen Hemminger 	}
6744e9c73e9SStephen Hemminger 
6754e9c73e9SStephen Hemminger 	if (info->hash_info != HN_NDIS_HASH_INFO_INVALID) {
676daa02b5cSOlivier Matz 		m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
6774e9c73e9SStephen Hemminger 		m->hash.rss = info->hash_value;
6784e9c73e9SStephen Hemminger 	}
6794e9c73e9SStephen Hemminger 
6803e3ef77eSStephen Hemminger 	PMD_RX_LOG(DEBUG,
6813e3ef77eSStephen Hemminger 		   "port %u:%u RX id %"PRIu64" size %u type %#x ol_flags %#"PRIx64,
6824e9c73e9SStephen Hemminger 		   rxq->port_id, rxq->queue_id, rxb->xactid,
6833e3ef77eSStephen Hemminger 		   m->pkt_len, m->packet_type, m->ol_flags);
6844e9c73e9SStephen Hemminger 
6854e9c73e9SStephen Hemminger 	++rxq->stats.packets;
6864e9c73e9SStephen Hemminger 	rxq->stats.bytes += m->pkt_len;
6874e9c73e9SStephen Hemminger 	hn_update_packet_stats(&rxq->stats, m);
6884e9c73e9SStephen Hemminger 
6894e9c73e9SStephen Hemminger 	if (unlikely(rte_ring_sp_enqueue(rxq->rx_ring, m) != 0)) {
69085c42091SStephen Hemminger 		++rxq->stats.ring_full;
691b757deb8SStephen Hemminger 		PMD_RX_LOG(DEBUG, "rx ring full");
6925ff00cf9SLong Li 		if (use_extbuf)
6935ff00cf9SLong Li 			rte_pktmbuf_detach_extbuf(m);
6944e9c73e9SStephen Hemminger 		rte_pktmbuf_free(m);
6954e9c73e9SStephen Hemminger 	}
6964e9c73e9SStephen Hemminger }
6974e9c73e9SStephen Hemminger 
6984e9c73e9SStephen Hemminger static void hn_rndis_rx_data(struct hn_rx_queue *rxq,
6994e9c73e9SStephen Hemminger 			     struct hn_rx_bufinfo *rxb,
7004e9c73e9SStephen Hemminger 			     void *data, uint32_t dlen)
7014e9c73e9SStephen Hemminger {
702d73543b5SFerruh Yigit 	unsigned int data_off, data_len;
7037838d3a6SStephen Hemminger 	unsigned int pktinfo_off, pktinfo_len;
7044e9c73e9SStephen Hemminger 	const struct rndis_packet_msg *pkt = data;
7054e9c73e9SStephen Hemminger 	struct hn_rxinfo info = {
7064e9c73e9SStephen Hemminger 		.vlan_info = HN_NDIS_VLAN_INFO_INVALID,
7074e9c73e9SStephen Hemminger 		.csum_info = HN_NDIS_RXCSUM_INFO_INVALID,
7084e9c73e9SStephen Hemminger 		.hash_info = HN_NDIS_HASH_INFO_INVALID,
7094e9c73e9SStephen Hemminger 	};
7104e9c73e9SStephen Hemminger 	int err;
7114e9c73e9SStephen Hemminger 
7124e9c73e9SStephen Hemminger 	hn_rndis_dump(pkt);
7134e9c73e9SStephen Hemminger 
7144e9c73e9SStephen Hemminger 	if (unlikely(dlen < sizeof(*pkt)))
7154e9c73e9SStephen Hemminger 		goto error;
7164e9c73e9SStephen Hemminger 
7174e9c73e9SStephen Hemminger 	if (unlikely(dlen < pkt->len))
7184e9c73e9SStephen Hemminger 		goto error; /* truncated RNDIS from host */
7194e9c73e9SStephen Hemminger 
7204e9c73e9SStephen Hemminger 	if (unlikely(pkt->len < pkt->datalen
7214e9c73e9SStephen Hemminger 		     + pkt->oobdatalen + pkt->pktinfolen))
7224e9c73e9SStephen Hemminger 		goto error;
7234e9c73e9SStephen Hemminger 
7244e9c73e9SStephen Hemminger 	if (unlikely(pkt->datalen == 0))
7254e9c73e9SStephen Hemminger 		goto error;
7264e9c73e9SStephen Hemminger 
7274e9c73e9SStephen Hemminger 	/* Check offsets. */
7284e9c73e9SStephen Hemminger 	if (unlikely(pkt->dataoffset < RNDIS_PACKET_MSG_OFFSET_MIN))
7294e9c73e9SStephen Hemminger 		goto error;
7304e9c73e9SStephen Hemminger 
7314e9c73e9SStephen Hemminger 	if (likely(pkt->pktinfooffset > 0) &&
7324e9c73e9SStephen Hemminger 	    unlikely(pkt->pktinfooffset < RNDIS_PACKET_MSG_OFFSET_MIN ||
7334e9c73e9SStephen Hemminger 		     (pkt->pktinfooffset & RNDIS_PACKET_MSG_OFFSET_ALIGNMASK)))
7344e9c73e9SStephen Hemminger 		goto error;
7354e9c73e9SStephen Hemminger 
7364e9c73e9SStephen Hemminger 	data_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->dataoffset);
7374e9c73e9SStephen Hemminger 	data_len = pkt->datalen;
7384e9c73e9SStephen Hemminger 	pktinfo_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->pktinfooffset);
7394e9c73e9SStephen Hemminger 	pktinfo_len = pkt->pktinfolen;
7404e9c73e9SStephen Hemminger 
7414e9c73e9SStephen Hemminger 	if (likely(pktinfo_len > 0)) {
7424e9c73e9SStephen Hemminger 		err = hn_rndis_rxinfo((const uint8_t *)pkt + pktinfo_off,
7434e9c73e9SStephen Hemminger 				      pktinfo_len, &info);
7444e9c73e9SStephen Hemminger 		if (err)
7454e9c73e9SStephen Hemminger 			goto error;
7464e9c73e9SStephen Hemminger 	}
7474e9c73e9SStephen Hemminger 
748d73543b5SFerruh Yigit 	/* overflow check */
749d73543b5SFerruh Yigit 	if (data_len > data_len + data_off || data_len + data_off > pkt->len)
7504e9c73e9SStephen Hemminger 		goto error;
7514e9c73e9SStephen Hemminger 
75235b2d13fSOlivier Matz 	if (unlikely(data_len < RTE_ETHER_HDR_LEN))
7534e9c73e9SStephen Hemminger 		goto error;
7544e9c73e9SStephen Hemminger 
7554e9c73e9SStephen Hemminger 	hn_rxpkt(rxq, rxb, data, data_off, data_len, &info);
7564e9c73e9SStephen Hemminger 	return;
7574e9c73e9SStephen Hemminger error:
7584e9c73e9SStephen Hemminger 	++rxq->stats.errors;
7594e9c73e9SStephen Hemminger }
7604e9c73e9SStephen Hemminger 
7614e9c73e9SStephen Hemminger static void
762f6ddcf80SStephen Hemminger hn_rndis_receive(struct rte_eth_dev *dev, struct hn_rx_queue *rxq,
7634e9c73e9SStephen Hemminger 		 struct hn_rx_bufinfo *rxb, void *buf, uint32_t len)
7644e9c73e9SStephen Hemminger {
7654e9c73e9SStephen Hemminger 	const struct rndis_msghdr *hdr = buf;
7664e9c73e9SStephen Hemminger 
7674e9c73e9SStephen Hemminger 	switch (hdr->type) {
7684e9c73e9SStephen Hemminger 	case RNDIS_PACKET_MSG:
7694e9c73e9SStephen Hemminger 		if (dev->data->dev_started)
7704e9c73e9SStephen Hemminger 			hn_rndis_rx_data(rxq, rxb, buf, len);
7714e9c73e9SStephen Hemminger 		break;
7724e9c73e9SStephen Hemminger 
7734e9c73e9SStephen Hemminger 	case RNDIS_INDICATE_STATUS_MSG:
774f6ddcf80SStephen Hemminger 		hn_rndis_link_status(dev, buf);
7754e9c73e9SStephen Hemminger 		break;
7764e9c73e9SStephen Hemminger 
7774e9c73e9SStephen Hemminger 	case RNDIS_INITIALIZE_CMPLT:
7784e9c73e9SStephen Hemminger 	case RNDIS_QUERY_CMPLT:
7794e9c73e9SStephen Hemminger 	case RNDIS_SET_CMPLT:
7804e9c73e9SStephen Hemminger 		hn_rndis_receive_response(rxq->hv, buf, len);
7814e9c73e9SStephen Hemminger 		break;
7824e9c73e9SStephen Hemminger 
7834e9c73e9SStephen Hemminger 	default:
7844e9c73e9SStephen Hemminger 		PMD_DRV_LOG(NOTICE,
7854e9c73e9SStephen Hemminger 			    "unexpected RNDIS message (type %#x len %u)",
7864e9c73e9SStephen Hemminger 			    hdr->type, len);
7874e9c73e9SStephen Hemminger 		break;
7884e9c73e9SStephen Hemminger 	}
7894e9c73e9SStephen Hemminger }
7904e9c73e9SStephen Hemminger 
7914e9c73e9SStephen Hemminger static void
7924e9c73e9SStephen Hemminger hn_nvs_handle_rxbuf(struct rte_eth_dev *dev,
7934e9c73e9SStephen Hemminger 		    struct hn_data *hv,
7944e9c73e9SStephen Hemminger 		    struct hn_rx_queue *rxq,
7954e9c73e9SStephen Hemminger 		    const struct vmbus_chanpkt_hdr *hdr,
7964e9c73e9SStephen Hemminger 		    const void *buf)
7974e9c73e9SStephen Hemminger {
7984e9c73e9SStephen Hemminger 	const struct vmbus_chanpkt_rxbuf *pkt;
7994e9c73e9SStephen Hemminger 	const struct hn_nvs_hdr *nvs_hdr = buf;
8007b1a614dSLong Li 	uint32_t rxbuf_sz = hv->rxbuf_res.len;
8017b1a614dSLong Li 	char *rxbuf = hv->rxbuf_res.addr;
8024e9c73e9SStephen Hemminger 	unsigned int i, hlen, count;
8034e9c73e9SStephen Hemminger 	struct hn_rx_bufinfo *rxb;
8044e9c73e9SStephen Hemminger 
8054e9c73e9SStephen Hemminger 	/* At minimum we need type header */
8064e9c73e9SStephen Hemminger 	if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*nvs_hdr))) {
8074e9c73e9SStephen Hemminger 		PMD_RX_LOG(ERR, "invalid receive nvs RNDIS");
8084e9c73e9SStephen Hemminger 		return;
8094e9c73e9SStephen Hemminger 	}
8104e9c73e9SStephen Hemminger 
8114e9c73e9SStephen Hemminger 	/* Make sure that this is a RNDIS message. */
8124e9c73e9SStephen Hemminger 	if (unlikely(nvs_hdr->type != NVS_TYPE_RNDIS)) {
8134e9c73e9SStephen Hemminger 		PMD_RX_LOG(ERR, "nvs type %u, not RNDIS",
8144e9c73e9SStephen Hemminger 			   nvs_hdr->type);
8154e9c73e9SStephen Hemminger 		return;
8164e9c73e9SStephen Hemminger 	}
8174e9c73e9SStephen Hemminger 
8184e9c73e9SStephen Hemminger 	hlen = vmbus_chanpkt_getlen(hdr->hlen);
8194e9c73e9SStephen Hemminger 	if (unlikely(hlen < sizeof(*pkt))) {
8204e9c73e9SStephen Hemminger 		PMD_RX_LOG(ERR, "invalid rxbuf chanpkt");
8214e9c73e9SStephen Hemminger 		return;
8224e9c73e9SStephen Hemminger 	}
8234e9c73e9SStephen Hemminger 
8244e9c73e9SStephen Hemminger 	pkt = container_of(hdr, const struct vmbus_chanpkt_rxbuf, hdr);
8254e9c73e9SStephen Hemminger 	if (unlikely(pkt->rxbuf_id != NVS_RXBUF_SIG)) {
8264e9c73e9SStephen Hemminger 		PMD_RX_LOG(ERR, "invalid rxbuf_id 0x%08x",
8274e9c73e9SStephen Hemminger 			   pkt->rxbuf_id);
8284e9c73e9SStephen Hemminger 		return;
8294e9c73e9SStephen Hemminger 	}
8304e9c73e9SStephen Hemminger 
8314e9c73e9SStephen Hemminger 	count = pkt->rxbuf_cnt;
8324e9c73e9SStephen Hemminger 	if (unlikely(hlen < offsetof(struct vmbus_chanpkt_rxbuf,
8334e9c73e9SStephen Hemminger 				     rxbuf[count]))) {
8344e9c73e9SStephen Hemminger 		PMD_RX_LOG(ERR, "invalid rxbuf_cnt %u", count);
8354e9c73e9SStephen Hemminger 		return;
8364e9c73e9SStephen Hemminger 	}
8374e9c73e9SStephen Hemminger 
8384e9c73e9SStephen Hemminger 	if (pkt->hdr.xactid > hv->rxbuf_section_cnt) {
8394e9c73e9SStephen Hemminger 		PMD_RX_LOG(ERR, "invalid rxbuf section id %" PRIx64,
8404e9c73e9SStephen Hemminger 			   pkt->hdr.xactid);
8414e9c73e9SStephen Hemminger 		return;
8424e9c73e9SStephen Hemminger 	}
8434e9c73e9SStephen Hemminger 
8444e9c73e9SStephen Hemminger 	/* Setup receive buffer info to allow for callback */
8454e9c73e9SStephen Hemminger 	rxb = hn_rx_buf_init(rxq, pkt);
8464e9c73e9SStephen Hemminger 
8474e9c73e9SStephen Hemminger 	/* Each range represents 1 RNDIS pkt that contains 1 Ethernet frame */
8484e9c73e9SStephen Hemminger 	for (i = 0; i < count; ++i) {
8494e9c73e9SStephen Hemminger 		unsigned int ofs, len;
8504e9c73e9SStephen Hemminger 
8514e9c73e9SStephen Hemminger 		ofs = pkt->rxbuf[i].ofs;
8524e9c73e9SStephen Hemminger 		len = pkt->rxbuf[i].len;
8534e9c73e9SStephen Hemminger 
8544e9c73e9SStephen Hemminger 		if (unlikely(ofs + len > rxbuf_sz)) {
8554e9c73e9SStephen Hemminger 			PMD_RX_LOG(ERR,
8564e9c73e9SStephen Hemminger 				   "%uth RNDIS msg overflow ofs %u, len %u",
8574e9c73e9SStephen Hemminger 				   i, ofs, len);
8584e9c73e9SStephen Hemminger 			continue;
8594e9c73e9SStephen Hemminger 		}
8604e9c73e9SStephen Hemminger 
8614e9c73e9SStephen Hemminger 		if (unlikely(len == 0)) {
8624e9c73e9SStephen Hemminger 			PMD_RX_LOG(ERR, "%uth RNDIS msg len %u", i, len);
8634e9c73e9SStephen Hemminger 			continue;
8644e9c73e9SStephen Hemminger 		}
8654e9c73e9SStephen Hemminger 
8664e9c73e9SStephen Hemminger 		hn_rndis_receive(dev, rxq, rxb,
8674e9c73e9SStephen Hemminger 				 rxbuf + ofs, len);
8684e9c73e9SStephen Hemminger 	}
8694e9c73e9SStephen Hemminger 
8704e9c73e9SStephen Hemminger 	/* Send ACK now if external mbuf not used */
871d43b8c71SLong Li 	if (rte_mbuf_ext_refcnt_update(&rxb->shinfo, -1) == 0)
872d43b8c71SLong Li 		hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid);
8734e9c73e9SStephen Hemminger }
8744e9c73e9SStephen Hemminger 
875dc7680e8SStephen Hemminger /*
876dc7680e8SStephen Hemminger  * Called when NVS inband events are received.
877dc7680e8SStephen Hemminger  * Send up a two part message with port_id and the NVS message
878dc7680e8SStephen Hemminger  * to the pipe to the netvsc-vf-event control thread.
879dc7680e8SStephen Hemminger  */
880dc7680e8SStephen Hemminger static void hn_nvs_handle_notify(struct rte_eth_dev *dev,
881dc7680e8SStephen Hemminger 				 const struct vmbus_chanpkt_hdr *pkt,
882dc7680e8SStephen Hemminger 				 const void *data)
883dc7680e8SStephen Hemminger {
884dc7680e8SStephen Hemminger 	const struct hn_nvs_hdr *hdr = data;
885dc7680e8SStephen Hemminger 
886dc7680e8SStephen Hemminger 	switch (hdr->type) {
887dc7680e8SStephen Hemminger 	case NVS_TYPE_TXTBL_NOTE:
888dc7680e8SStephen Hemminger 		/* Transmit indirection table has locking problems
889dc7680e8SStephen Hemminger 		 * in DPDK and therefore not implemented
890dc7680e8SStephen Hemminger 		 */
891dc7680e8SStephen Hemminger 		PMD_DRV_LOG(DEBUG, "host notify of transmit indirection table");
892dc7680e8SStephen Hemminger 		break;
893dc7680e8SStephen Hemminger 
894dc7680e8SStephen Hemminger 	case NVS_TYPE_VFASSOC_NOTE:
895dc7680e8SStephen Hemminger 		hn_nvs_handle_vfassoc(dev, pkt, data);
896dc7680e8SStephen Hemminger 		break;
897dc7680e8SStephen Hemminger 
898dc7680e8SStephen Hemminger 	default:
899dc7680e8SStephen Hemminger 		PMD_DRV_LOG(INFO,
900dc7680e8SStephen Hemminger 			    "got notify, nvs type %u", hdr->type);
901dc7680e8SStephen Hemminger 	}
902dc7680e8SStephen Hemminger }
903dc7680e8SStephen Hemminger 
9044e9c73e9SStephen Hemminger struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
9054e9c73e9SStephen Hemminger 				      uint16_t queue_id,
9064e9c73e9SStephen Hemminger 				      unsigned int socket_id)
9074e9c73e9SStephen Hemminger {
9084e9c73e9SStephen Hemminger 	struct hn_rx_queue *rxq;
9094e9c73e9SStephen Hemminger 
9101f2766b7SStephen Hemminger 	rxq = rte_zmalloc_socket("HN_RXQ", sizeof(*rxq),
9114e9c73e9SStephen Hemminger 				 RTE_CACHE_LINE_SIZE, socket_id);
9121f2766b7SStephen Hemminger 	if (!rxq)
9131f2766b7SStephen Hemminger 		return NULL;
9141f2766b7SStephen Hemminger 
9154e9c73e9SStephen Hemminger 	rxq->hv = hv;
9164e9c73e9SStephen Hemminger 	rxq->chan = hv->channels[queue_id];
9174e9c73e9SStephen Hemminger 	rte_spinlock_init(&rxq->ring_lock);
9184e9c73e9SStephen Hemminger 	rxq->port_id = hv->port_id;
9194e9c73e9SStephen Hemminger 	rxq->queue_id = queue_id;
9201f2766b7SStephen Hemminger 	rxq->event_sz = HN_RXQ_EVENT_DEFAULT;
9211f2766b7SStephen Hemminger 	rxq->event_buf = rte_malloc_socket("HN_EVENTS", HN_RXQ_EVENT_DEFAULT,
9221f2766b7SStephen Hemminger 					   RTE_CACHE_LINE_SIZE, socket_id);
9231f2766b7SStephen Hemminger 	if (!rxq->event_buf) {
9241f2766b7SStephen Hemminger 		rte_free(rxq);
9251f2766b7SStephen Hemminger 		return NULL;
9264e9c73e9SStephen Hemminger 	}
9271f2766b7SStephen Hemminger 
928ac837bddSLong Li 	/* setup rxbuf_info for non-primary queue */
929ac837bddSLong Li 	if (queue_id) {
930ac837bddSLong Li 		rxq->rxbuf_info = rte_calloc("HN_RXBUF_INFO",
931ac837bddSLong Li 					hv->rxbuf_section_cnt,
932ac837bddSLong Li 					sizeof(*rxq->rxbuf_info),
933ac837bddSLong Li 					RTE_CACHE_LINE_SIZE);
934ac837bddSLong Li 
935ac837bddSLong Li 		if (!rxq->rxbuf_info) {
936ac837bddSLong Li 			PMD_DRV_LOG(ERR,
937f665790aSDavid Marchand 				"Could not allocate rxbuf info for queue %d",
938ac837bddSLong Li 				queue_id);
939ac837bddSLong Li 			rte_free(rxq->event_buf);
940ac837bddSLong Li 			rte_free(rxq);
941ac837bddSLong Li 			return NULL;
942ac837bddSLong Li 		}
943ac837bddSLong Li 	}
944ac837bddSLong Li 
9454e9c73e9SStephen Hemminger 	return rxq;
9464e9c73e9SStephen Hemminger }
9474e9c73e9SStephen Hemminger 
948c7b82b14SStephen Hemminger void
949c7b82b14SStephen Hemminger hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id,
950c7b82b14SStephen Hemminger 		     struct rte_eth_rxq_info *qinfo)
951c7b82b14SStephen Hemminger {
952c7b82b14SStephen Hemminger 	struct hn_rx_queue *rxq = dev->data->rx_queues[queue_id];
953c7b82b14SStephen Hemminger 
954c7b82b14SStephen Hemminger 	qinfo->mp = rxq->mb_pool;
955c7b82b14SStephen Hemminger 	qinfo->nb_desc = rxq->rx_ring->size;
956c7b82b14SStephen Hemminger 	qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
957c7b82b14SStephen Hemminger }
958c7b82b14SStephen Hemminger 
9594e9c73e9SStephen Hemminger int
9604e9c73e9SStephen Hemminger hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
9614e9c73e9SStephen Hemminger 		      uint16_t queue_idx, uint16_t nb_desc,
9624e9c73e9SStephen Hemminger 		      unsigned int socket_id,
963dc7680e8SStephen Hemminger 		      const struct rte_eth_rxconf *rx_conf,
9644e9c73e9SStephen Hemminger 		      struct rte_mempool *mp)
9654e9c73e9SStephen Hemminger {
9664e9c73e9SStephen Hemminger 	struct hn_data *hv = dev->data->dev_private;
9674e9c73e9SStephen Hemminger 	char ring_name[RTE_RING_NAMESIZE];
9684e9c73e9SStephen Hemminger 	struct hn_rx_queue *rxq;
9694e9c73e9SStephen Hemminger 	unsigned int count;
970dc7680e8SStephen Hemminger 	int error = -ENOMEM;
9714e9c73e9SStephen Hemminger 
9724e9c73e9SStephen Hemminger 	PMD_INIT_FUNC_TRACE();
9734e9c73e9SStephen Hemminger 
9744e9c73e9SStephen Hemminger 	if (queue_idx == 0) {
9754e9c73e9SStephen Hemminger 		rxq = hv->primary;
9764e9c73e9SStephen Hemminger 	} else {
977e9002053SAlan Elder 		/*
978e9002053SAlan Elder 		 * If the number of Tx queues was previously greater than the
979e9002053SAlan Elder 		 * number of Rx queues, we may already have allocated an rxq.
980e9002053SAlan Elder 		 */
981e9002053SAlan Elder 		if (!dev->data->rx_queues[queue_idx])
9824e9c73e9SStephen Hemminger 			rxq = hn_rx_queue_alloc(hv, queue_idx, socket_id);
983e9002053SAlan Elder 		else
984e9002053SAlan Elder 			rxq = dev->data->rx_queues[queue_idx];
985e9002053SAlan Elder 
9864e9c73e9SStephen Hemminger 		if (!rxq)
9874e9c73e9SStephen Hemminger 			return -ENOMEM;
9884e9c73e9SStephen Hemminger 	}
9894e9c73e9SStephen Hemminger 
9904e9c73e9SStephen Hemminger 	rxq->mb_pool = mp;
991fc30efe3SStephen Hemminger 	count = rte_mempool_avail_count(mp) / dev->data->nb_rx_queues;
992fc30efe3SStephen Hemminger 	if (nb_desc == 0 || nb_desc > count)
993fc30efe3SStephen Hemminger 		nb_desc = count;
9944e9c73e9SStephen Hemminger 
9954e9c73e9SStephen Hemminger 	/*
9964e9c73e9SStephen Hemminger 	 * Staging ring from receive event logic to rx_pkts.
9974e9c73e9SStephen Hemminger 	 * rx_pkts assumes caller is handling multi-thread issue.
9984e9c73e9SStephen Hemminger 	 * event logic has locking.
9994e9c73e9SStephen Hemminger 	 */
10004e9c73e9SStephen Hemminger 	snprintf(ring_name, sizeof(ring_name),
10014e9c73e9SStephen Hemminger 		 "hn_rx_%u_%u", dev->data->port_id, queue_idx);
1002fc30efe3SStephen Hemminger 	rxq->rx_ring = rte_ring_create(ring_name,
1003fc30efe3SStephen Hemminger 				       rte_align32pow2(nb_desc),
1004fc30efe3SStephen Hemminger 				       socket_id, 0);
1005fc30efe3SStephen Hemminger 	if (!rxq->rx_ring)
10064e9c73e9SStephen Hemminger 		goto fail;
10074e9c73e9SStephen Hemminger 
1008dc7680e8SStephen Hemminger 	error = hn_vf_rx_queue_setup(dev, queue_idx, nb_desc,
1009dc7680e8SStephen Hemminger 				     socket_id, rx_conf, mp);
1010dc7680e8SStephen Hemminger 	if (error)
1011dc7680e8SStephen Hemminger 		goto fail;
1012dc7680e8SStephen Hemminger 
10134e9c73e9SStephen Hemminger 	dev->data->rx_queues[queue_idx] = rxq;
10144e9c73e9SStephen Hemminger 	return 0;
10154e9c73e9SStephen Hemminger 
10164e9c73e9SStephen Hemminger fail:
1017ec324268SStephen Hemminger 	rte_ring_free(rxq->rx_ring);
1018e9002053SAlan Elder 	/* Only free rxq if it was created in this function. */
1019e9002053SAlan Elder 	if (!dev->data->rx_queues[queue_idx])
1020e9002053SAlan Elder 		hn_rx_queue_free_common(rxq);
1021e9002053SAlan Elder 
1022dc7680e8SStephen Hemminger 	return error;
10234e9c73e9SStephen Hemminger }
10244e9c73e9SStephen Hemminger 
10258428da72SStephen Hemminger static void
10268428da72SStephen Hemminger hn_rx_queue_free(struct hn_rx_queue *rxq, bool keep_primary)
10274e9c73e9SStephen Hemminger {
10284e9c73e9SStephen Hemminger 
10294e9c73e9SStephen Hemminger 	if (!rxq)
10304e9c73e9SStephen Hemminger 		return;
10314e9c73e9SStephen Hemminger 
1032ec324268SStephen Hemminger 	rte_ring_free(rxq->rx_ring);
10334e9c73e9SStephen Hemminger 	rxq->rx_ring = NULL;
10344e9c73e9SStephen Hemminger 	rxq->mb_pool = NULL;
10354e9c73e9SStephen Hemminger 
1036dc7680e8SStephen Hemminger 	hn_vf_rx_queue_release(rxq->hv, rxq->queue_id);
1037dc7680e8SStephen Hemminger 
1038dc7680e8SStephen Hemminger 	/* Keep primary queue to allow for control operations */
10398428da72SStephen Hemminger 	if (keep_primary && rxq == rxq->hv->primary)
10408428da72SStephen Hemminger 		return;
10418428da72SStephen Hemminger 
1042e9002053SAlan Elder 	hn_rx_queue_free_common(rxq);
10434e9c73e9SStephen Hemminger }
10448428da72SStephen Hemminger 
10458428da72SStephen Hemminger void
10467483341aSXueming Li hn_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
10478428da72SStephen Hemminger {
10487483341aSXueming Li 	struct hn_rx_queue *rxq = dev->data->rx_queues[qid];
10498428da72SStephen Hemminger 
10508428da72SStephen Hemminger 	PMD_INIT_FUNC_TRACE();
10518428da72SStephen Hemminger 
10528428da72SStephen Hemminger 	hn_rx_queue_free(rxq, true);
10534e9c73e9SStephen Hemminger }
10544e9c73e9SStephen Hemminger 
1055a41ef8eeSStephen Hemminger /*
1056a41ef8eeSStephen Hemminger  * Get the number of used descriptor in a rx queue
1057a41ef8eeSStephen Hemminger  * For this device that means how many packets are pending in the ring.
1058a41ef8eeSStephen Hemminger  */
1059a41ef8eeSStephen Hemminger uint32_t
10608d7d4fcdSKonstantin Ananyev hn_dev_rx_queue_count(void *rx_queue)
1061a41ef8eeSStephen Hemminger {
10628d7d4fcdSKonstantin Ananyev 	struct hn_rx_queue *rxq = rx_queue;
1063a41ef8eeSStephen Hemminger 
1064a41ef8eeSStephen Hemminger 	return rte_ring_count(rxq->rx_ring);
1065a41ef8eeSStephen Hemminger }
1066a41ef8eeSStephen Hemminger 
1067a41ef8eeSStephen Hemminger /*
1068a41ef8eeSStephen Hemminger  * Check the status of a Rx descriptor in the queue
1069a41ef8eeSStephen Hemminger  *
1070a41ef8eeSStephen Hemminger  * returns:
1071a41ef8eeSStephen Hemminger  *  - -EINVAL               - offset outside of ring
1072a41ef8eeSStephen Hemminger  *  - RTE_ETH_RX_DESC_AVAIL - no data available yet
10737be78d02SJosh Soref  *  - RTE_ETH_RX_DESC_DONE  - data is waiting in staging ring
1074a41ef8eeSStephen Hemminger  */
1075a41ef8eeSStephen Hemminger int hn_dev_rx_queue_status(void *arg, uint16_t offset)
1076a41ef8eeSStephen Hemminger {
1077a41ef8eeSStephen Hemminger 	const struct hn_rx_queue *rxq = arg;
1078a41ef8eeSStephen Hemminger 
1079a41ef8eeSStephen Hemminger 	hn_process_events(rxq->hv, rxq->queue_id, 0);
1080a41ef8eeSStephen Hemminger 	if (offset >= rxq->rx_ring->capacity)
1081a41ef8eeSStephen Hemminger 		return -EINVAL;
1082a41ef8eeSStephen Hemminger 
1083a41ef8eeSStephen Hemminger 	if (offset < rte_ring_count(rxq->rx_ring))
1084a41ef8eeSStephen Hemminger 		return RTE_ETH_RX_DESC_DONE;
1085a41ef8eeSStephen Hemminger 	else
1086a41ef8eeSStephen Hemminger 		return RTE_ETH_RX_DESC_AVAIL;
1087a41ef8eeSStephen Hemminger }
1088a41ef8eeSStephen Hemminger 
10897a866f0dSStephen Hemminger int
10907a866f0dSStephen Hemminger hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt)
10917a866f0dSStephen Hemminger {
10927a866f0dSStephen Hemminger 	struct hn_tx_queue *txq = arg;
10937a866f0dSStephen Hemminger 
10947a866f0dSStephen Hemminger 	return hn_process_events(txq->hv, txq->queue_id, free_cnt);
10957a866f0dSStephen Hemminger }
10967a866f0dSStephen Hemminger 
10974e9c73e9SStephen Hemminger /*
10984e9c73e9SStephen Hemminger  * Process pending events on the channel.
10994e9c73e9SStephen Hemminger  * Called from both Rx queue poll and Tx cleanup
11004e9c73e9SStephen Hemminger  */
11017a866f0dSStephen Hemminger uint32_t hn_process_events(struct hn_data *hv, uint16_t queue_id,
11027a866f0dSStephen Hemminger 			   uint32_t tx_limit)
11034e9c73e9SStephen Hemminger {
11044e9c73e9SStephen Hemminger 	struct rte_eth_dev *dev = &rte_eth_devices[hv->port_id];
11054e9c73e9SStephen Hemminger 	struct hn_rx_queue *rxq;
1106530af95aSStephen Hemminger 	uint32_t bytes_read = 0;
11077a866f0dSStephen Hemminger 	uint32_t tx_done = 0;
11084e9c73e9SStephen Hemminger 	int ret = 0;
11094e9c73e9SStephen Hemminger 
11104e9c73e9SStephen Hemminger 	rxq = queue_id == 0 ? hv->primary : dev->data->rx_queues[queue_id];
11114e9c73e9SStephen Hemminger 
11124e9c73e9SStephen Hemminger 	/*
11134e9c73e9SStephen Hemminger 	 * Since channel is shared between Rx and TX queue need to have a lock
11144e9c73e9SStephen Hemminger 	 * since DPDK does not force same CPU to be used for Rx/Tx.
11154e9c73e9SStephen Hemminger 	 */
11164e9c73e9SStephen Hemminger 	if (unlikely(!rte_spinlock_trylock(&rxq->ring_lock)))
11177a866f0dSStephen Hemminger 		return 0;
11184e9c73e9SStephen Hemminger 
11194e9c73e9SStephen Hemminger 	for (;;) {
11204e9c73e9SStephen Hemminger 		const struct vmbus_chanpkt_hdr *pkt;
11211f2766b7SStephen Hemminger 		uint32_t len = rxq->event_sz;
11224e9c73e9SStephen Hemminger 		const void *data;
11234e9c73e9SStephen Hemminger 
11241f2766b7SStephen Hemminger retry:
11254e9c73e9SStephen Hemminger 		ret = rte_vmbus_chan_recv_raw(rxq->chan, rxq->event_buf, &len);
11264e9c73e9SStephen Hemminger 		if (ret == -EAGAIN)
11274e9c73e9SStephen Hemminger 			break;	/* ring is empty */
11284e9c73e9SStephen Hemminger 
11291f2766b7SStephen Hemminger 		if (unlikely(ret == -ENOBUFS)) {
11301f2766b7SStephen Hemminger 			/* event buffer not large enough to read ring */
11311f2766b7SStephen Hemminger 
11321f2766b7SStephen Hemminger 			PMD_DRV_LOG(DEBUG,
11331f2766b7SStephen Hemminger 				    "event buffer expansion (need %u)", len);
11341f2766b7SStephen Hemminger 			rxq->event_sz = len + len / 4;
11351f2766b7SStephen Hemminger 			rxq->event_buf = rte_realloc(rxq->event_buf, rxq->event_sz,
11361f2766b7SStephen Hemminger 						     RTE_CACHE_LINE_SIZE);
11371f2766b7SStephen Hemminger 			if (rxq->event_buf)
11381f2766b7SStephen Hemminger 				goto retry;
11391f2766b7SStephen Hemminger 			/* out of memory, no more events now */
11401f2766b7SStephen Hemminger 			rxq->event_sz = 0;
11411f2766b7SStephen Hemminger 			break;
11421f2766b7SStephen Hemminger 		}
11431f2766b7SStephen Hemminger 
11441f2766b7SStephen Hemminger 		if (unlikely(ret <= 0)) {
11451f2766b7SStephen Hemminger 			/* This indicates a failure to communicate (or worse) */
1146530af95aSStephen Hemminger 			rte_exit(EXIT_FAILURE,
1147530af95aSStephen Hemminger 				 "vmbus ring buffer error: %d", ret);
11481f2766b7SStephen Hemminger 		}
11494e9c73e9SStephen Hemminger 
1150530af95aSStephen Hemminger 		bytes_read += ret;
11514e9c73e9SStephen Hemminger 		pkt = (const struct vmbus_chanpkt_hdr *)rxq->event_buf;
11524e9c73e9SStephen Hemminger 		data = (char *)rxq->event_buf + vmbus_chanpkt_getlen(pkt->hlen);
11534e9c73e9SStephen Hemminger 
11544e9c73e9SStephen Hemminger 		switch (pkt->type) {
11554e9c73e9SStephen Hemminger 		case VMBUS_CHANPKT_TYPE_COMP:
11567a866f0dSStephen Hemminger 			++tx_done;
11574e9c73e9SStephen Hemminger 			hn_nvs_handle_comp(dev, queue_id, pkt, data);
11584e9c73e9SStephen Hemminger 			break;
11594e9c73e9SStephen Hemminger 
11604e9c73e9SStephen Hemminger 		case VMBUS_CHANPKT_TYPE_RXBUF:
11614e9c73e9SStephen Hemminger 			hn_nvs_handle_rxbuf(dev, hv, rxq, pkt, data);
11624e9c73e9SStephen Hemminger 			break;
11634e9c73e9SStephen Hemminger 
11644e9c73e9SStephen Hemminger 		case VMBUS_CHANPKT_TYPE_INBAND:
1165dc7680e8SStephen Hemminger 			hn_nvs_handle_notify(dev, pkt, data);
11664e9c73e9SStephen Hemminger 			break;
11674e9c73e9SStephen Hemminger 
11684e9c73e9SStephen Hemminger 		default:
11694e9c73e9SStephen Hemminger 			PMD_DRV_LOG(ERR, "unknown chan pkt %u", pkt->type);
11704e9c73e9SStephen Hemminger 			break;
11714e9c73e9SStephen Hemminger 		}
11727e6c8243SStephen Hemminger 
11737a866f0dSStephen Hemminger 		if (tx_limit && tx_done >= tx_limit)
11747a866f0dSStephen Hemminger 			break;
11754e9c73e9SStephen Hemminger 	}
1176530af95aSStephen Hemminger 
1177530af95aSStephen Hemminger 	if (bytes_read > 0)
1178530af95aSStephen Hemminger 		rte_vmbus_chan_signal_read(rxq->chan, bytes_read);
1179530af95aSStephen Hemminger 
11804e9c73e9SStephen Hemminger 	rte_spinlock_unlock(&rxq->ring_lock);
11817a866f0dSStephen Hemminger 
11827a866f0dSStephen Hemminger 	return tx_done;
11834e9c73e9SStephen Hemminger }
11844e9c73e9SStephen Hemminger 
11854e9c73e9SStephen Hemminger static void hn_append_to_chim(struct hn_tx_queue *txq,
11864e9c73e9SStephen Hemminger 			      struct rndis_packet_msg *pkt,
11874e9c73e9SStephen Hemminger 			      const struct rte_mbuf *m)
11884e9c73e9SStephen Hemminger {
11894e9c73e9SStephen Hemminger 	struct hn_txdesc *txd = txq->agg_txd;
11904e9c73e9SStephen Hemminger 	uint8_t *buf = (uint8_t *)pkt;
11914e9c73e9SStephen Hemminger 	unsigned int data_offs;
11924e9c73e9SStephen Hemminger 
11934e9c73e9SStephen Hemminger 	hn_rndis_dump(pkt);
11944e9c73e9SStephen Hemminger 
11954e9c73e9SStephen Hemminger 	data_offs = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->dataoffset);
11964e9c73e9SStephen Hemminger 	txd->chim_size += pkt->len;
11974e9c73e9SStephen Hemminger 	txd->data_size += m->pkt_len;
11984e9c73e9SStephen Hemminger 	++txd->packets;
11994e9c73e9SStephen Hemminger 	hn_update_packet_stats(&txq->stats, m);
12004e9c73e9SStephen Hemminger 
12014e9c73e9SStephen Hemminger 	for (; m; m = m->next) {
12024e9c73e9SStephen Hemminger 		uint16_t len = rte_pktmbuf_data_len(m);
12034e9c73e9SStephen Hemminger 
12044e9c73e9SStephen Hemminger 		rte_memcpy(buf + data_offs,
12054e9c73e9SStephen Hemminger 			   rte_pktmbuf_mtod(m, const char *), len);
12064e9c73e9SStephen Hemminger 		data_offs += len;
12074e9c73e9SStephen Hemminger 	}
12084e9c73e9SStephen Hemminger }
12094e9c73e9SStephen Hemminger 
12104e9c73e9SStephen Hemminger /*
12114e9c73e9SStephen Hemminger  * Send pending aggregated data in chimney buffer (if any).
12124e9c73e9SStephen Hemminger  * Returns error if send was unsuccessful because channel ring buffer
12134e9c73e9SStephen Hemminger  * was full.
12144e9c73e9SStephen Hemminger  */
12154e9c73e9SStephen Hemminger static int hn_flush_txagg(struct hn_tx_queue *txq, bool *need_sig)
12164e9c73e9SStephen Hemminger 
12174e9c73e9SStephen Hemminger {
12184e9c73e9SStephen Hemminger 	struct hn_txdesc *txd = txq->agg_txd;
12194e9c73e9SStephen Hemminger 	struct hn_nvs_rndis rndis;
12204e9c73e9SStephen Hemminger 	int ret;
12214e9c73e9SStephen Hemminger 
12224e9c73e9SStephen Hemminger 	if (!txd)
12234e9c73e9SStephen Hemminger 		return 0;
12244e9c73e9SStephen Hemminger 
12254e9c73e9SStephen Hemminger 	rndis = (struct hn_nvs_rndis) {
12264e9c73e9SStephen Hemminger 		.type = NVS_TYPE_RNDIS,
12274e9c73e9SStephen Hemminger 		.rndis_mtype = NVS_RNDIS_MTYPE_DATA,
12284e9c73e9SStephen Hemminger 		.chim_idx = txd->chim_index,
12294e9c73e9SStephen Hemminger 		.chim_sz = txd->chim_size,
12304e9c73e9SStephen Hemminger 	};
12314e9c73e9SStephen Hemminger 
12324e9c73e9SStephen Hemminger 	PMD_TX_LOG(DEBUG, "port %u:%u tx %u size %u",
12334e9c73e9SStephen Hemminger 		   txq->port_id, txq->queue_id, txd->chim_index, txd->chim_size);
12344e9c73e9SStephen Hemminger 
12354e9c73e9SStephen Hemminger 	ret = hn_nvs_send(txq->chan, VMBUS_CHANPKT_FLAG_RC,
12364e9c73e9SStephen Hemminger 			  &rndis, sizeof(rndis), (uintptr_t)txd, need_sig);
12374e9c73e9SStephen Hemminger 
12384e9c73e9SStephen Hemminger 	if (likely(ret == 0))
12394e9c73e9SStephen Hemminger 		hn_reset_txagg(txq);
1240b757deb8SStephen Hemminger 	else if (ret == -EAGAIN) {
1241b757deb8SStephen Hemminger 		PMD_TX_LOG(DEBUG, "port %u:%u channel full",
1242b757deb8SStephen Hemminger 			   txq->port_id, txq->queue_id);
1243b757deb8SStephen Hemminger 		++txq->stats.channel_full;
1244b757deb8SStephen Hemminger 	} else {
1245b757deb8SStephen Hemminger 		++txq->stats.errors;
12464e9c73e9SStephen Hemminger 
1247b757deb8SStephen Hemminger 		PMD_DRV_LOG(NOTICE, "port %u:%u send failed: %d",
1248b757deb8SStephen Hemminger 			   txq->port_id, txq->queue_id, ret);
1249b757deb8SStephen Hemminger 	}
12504e9c73e9SStephen Hemminger 	return ret;
12514e9c73e9SStephen Hemminger }
12524e9c73e9SStephen Hemminger 
1253cc025181SStephen Hemminger /*
1254cc025181SStephen Hemminger  * Try and find a place in a send chimney buffer to put
1255cc025181SStephen Hemminger  * the small packet. If space is available, this routine
1256cc025181SStephen Hemminger  * returns a pointer of where to place the data.
1257cc025181SStephen Hemminger  * If no space, caller should try direct transmit.
1258cc025181SStephen Hemminger  */
12594e9c73e9SStephen Hemminger static void *
1260cc025181SStephen Hemminger hn_try_txagg(struct hn_data *hv, struct hn_tx_queue *txq,
1261cc025181SStephen Hemminger 	     struct hn_txdesc *txd, uint32_t pktsize)
12624e9c73e9SStephen Hemminger {
12634e9c73e9SStephen Hemminger 	struct hn_txdesc *agg_txd = txq->agg_txd;
12644e9c73e9SStephen Hemminger 	struct rndis_packet_msg *pkt;
12654e9c73e9SStephen Hemminger 	void *chim;
12664e9c73e9SStephen Hemminger 
12674e9c73e9SStephen Hemminger 	if (agg_txd) {
12684e9c73e9SStephen Hemminger 		unsigned int padding, olen;
12694e9c73e9SStephen Hemminger 
12704e9c73e9SStephen Hemminger 		/*
12714e9c73e9SStephen Hemminger 		 * Update the previous RNDIS packet's total length,
12724e9c73e9SStephen Hemminger 		 * it can be increased due to the mandatory alignment
12734e9c73e9SStephen Hemminger 		 * padding for this RNDIS packet.  And update the
12744e9c73e9SStephen Hemminger 		 * aggregating txdesc's chimney sending buffer size
12754e9c73e9SStephen Hemminger 		 * accordingly.
12764e9c73e9SStephen Hemminger 		 *
12774e9c73e9SStephen Hemminger 		 * Zero-out the padding, as required by the RNDIS spec.
12784e9c73e9SStephen Hemminger 		 */
12794e9c73e9SStephen Hemminger 		pkt = txq->agg_prevpkt;
12804e9c73e9SStephen Hemminger 		olen = pkt->len;
12814e9c73e9SStephen Hemminger 		padding = RTE_ALIGN(olen, txq->agg_align) - olen;
12824e9c73e9SStephen Hemminger 		if (padding > 0) {
12834e9c73e9SStephen Hemminger 			agg_txd->chim_size += padding;
12844e9c73e9SStephen Hemminger 			pkt->len += padding;
12854e9c73e9SStephen Hemminger 			memset((uint8_t *)pkt + olen, 0, padding);
12864e9c73e9SStephen Hemminger 		}
12874e9c73e9SStephen Hemminger 
12884e9c73e9SStephen Hemminger 		chim = (uint8_t *)pkt + pkt->len;
1289cc025181SStephen Hemminger 		txq->agg_prevpkt = chim;
12904e9c73e9SStephen Hemminger 		txq->agg_pktleft--;
12914e9c73e9SStephen Hemminger 		txq->agg_szleft -= pktsize;
12924e9c73e9SStephen Hemminger 		if (txq->agg_szleft < HN_PKTSIZE_MIN(txq->agg_align)) {
12934e9c73e9SStephen Hemminger 			/*
12944e9c73e9SStephen Hemminger 			 * Probably can't aggregate more packets,
12954e9c73e9SStephen Hemminger 			 * flush this aggregating txdesc proactively.
12964e9c73e9SStephen Hemminger 			 */
12974e9c73e9SStephen Hemminger 			txq->agg_pktleft = 0;
12984e9c73e9SStephen Hemminger 		}
1299cc025181SStephen Hemminger 
1300cc025181SStephen Hemminger 		hn_txd_put(txq, txd);
1301cc025181SStephen Hemminger 		return chim;
1302cc025181SStephen Hemminger 	}
1303cc025181SStephen Hemminger 
1304cc025181SStephen Hemminger 	txd->chim_index = hn_chim_alloc(hv);
1305cc025181SStephen Hemminger 	if (txd->chim_index == NVS_CHIM_IDX_INVALID)
13064e9c73e9SStephen Hemminger 		return NULL;
13074e9c73e9SStephen Hemminger 
13087b1a614dSLong Li 	chim = (uint8_t *)hv->chim_res.addr
1309cc025181SStephen Hemminger 			+ txd->chim_index * hv->chim_szmax;
13104e9c73e9SStephen Hemminger 
1311cc025181SStephen Hemminger 	txq->agg_txd = txd;
13124e9c73e9SStephen Hemminger 	txq->agg_pktleft = txq->agg_pktmax - 1;
13134e9c73e9SStephen Hemminger 	txq->agg_szleft = txq->agg_szmax - pktsize;
13144e9c73e9SStephen Hemminger 	txq->agg_prevpkt = chim;
13154e9c73e9SStephen Hemminger 
13164e9c73e9SStephen Hemminger 	return chim;
13174e9c73e9SStephen Hemminger }
13184e9c73e9SStephen Hemminger 
13194e9c73e9SStephen Hemminger static inline void *
13204e9c73e9SStephen Hemminger hn_rndis_pktinfo_append(struct rndis_packet_msg *pkt,
13214e9c73e9SStephen Hemminger 			uint32_t pi_dlen, uint32_t pi_type)
13224e9c73e9SStephen Hemminger {
13234e9c73e9SStephen Hemminger 	const uint32_t pi_size = RNDIS_PKTINFO_SIZE(pi_dlen);
13244e9c73e9SStephen Hemminger 	struct rndis_pktinfo *pi;
13254e9c73e9SStephen Hemminger 
13264e9c73e9SStephen Hemminger 	/*
13274e9c73e9SStephen Hemminger 	 * Per-packet-info does not move; it only grows.
13284e9c73e9SStephen Hemminger 	 *
13294e9c73e9SStephen Hemminger 	 * NOTE:
13304e9c73e9SStephen Hemminger 	 * pktinfooffset in this phase counts from the beginning
13314e9c73e9SStephen Hemminger 	 * of rndis_packet_msg.
13324e9c73e9SStephen Hemminger 	 */
13334e9c73e9SStephen Hemminger 	pi = (struct rndis_pktinfo *)((uint8_t *)pkt + hn_rndis_pktlen(pkt));
13344e9c73e9SStephen Hemminger 
13354e9c73e9SStephen Hemminger 	pkt->pktinfolen += pi_size;
13364e9c73e9SStephen Hemminger 
13374e9c73e9SStephen Hemminger 	pi->size = pi_size;
13384e9c73e9SStephen Hemminger 	pi->type = pi_type;
13394e9c73e9SStephen Hemminger 	pi->offset = RNDIS_PKTINFO_OFFSET;
13404e9c73e9SStephen Hemminger 
13414e9c73e9SStephen Hemminger 	return pi->data;
13424e9c73e9SStephen Hemminger }
13434e9c73e9SStephen Hemminger 
13444e9c73e9SStephen Hemminger /* Put RNDIS header and packet info on packet */
13454e9c73e9SStephen Hemminger static void hn_encap(struct rndis_packet_msg *pkt,
13464e9c73e9SStephen Hemminger 		     uint16_t queue_id,
13474e9c73e9SStephen Hemminger 		     const struct rte_mbuf *m)
13484e9c73e9SStephen Hemminger {
13494e9c73e9SStephen Hemminger 	unsigned int hlen = m->l2_len + m->l3_len;
13504e9c73e9SStephen Hemminger 	uint32_t *pi_data;
13514e9c73e9SStephen Hemminger 	uint32_t pkt_hlen;
13524e9c73e9SStephen Hemminger 
13534e9c73e9SStephen Hemminger 	pkt->type = RNDIS_PACKET_MSG;
13544e9c73e9SStephen Hemminger 	pkt->len = m->pkt_len;
13554e9c73e9SStephen Hemminger 	pkt->dataoffset = 0;
13564e9c73e9SStephen Hemminger 	pkt->datalen = m->pkt_len;
13574e9c73e9SStephen Hemminger 	pkt->oobdataoffset = 0;
13584e9c73e9SStephen Hemminger 	pkt->oobdatalen = 0;
13594e9c73e9SStephen Hemminger 	pkt->oobdataelements = 0;
13604e9c73e9SStephen Hemminger 	pkt->pktinfooffset = sizeof(*pkt);
13614e9c73e9SStephen Hemminger 	pkt->pktinfolen = 0;
13624e9c73e9SStephen Hemminger 	pkt->vchandle = 0;
13634e9c73e9SStephen Hemminger 	pkt->reserved = 0;
13644e9c73e9SStephen Hemminger 
13654e9c73e9SStephen Hemminger 	/*
13664e9c73e9SStephen Hemminger 	 * Set the hash value for this packet, to the queue_id to cause
13674e9c73e9SStephen Hemminger 	 * TX done event for this packet on the right channel.
13684e9c73e9SStephen Hemminger 	 */
13694e9c73e9SStephen Hemminger 	pi_data = hn_rndis_pktinfo_append(pkt, NDIS_HASH_VALUE_SIZE,
13704e9c73e9SStephen Hemminger 					  NDIS_PKTINFO_TYPE_HASHVAL);
13714e9c73e9SStephen Hemminger 	*pi_data = queue_id;
13724e9c73e9SStephen Hemminger 
1373daa02b5cSOlivier Matz 	if (m->ol_flags & RTE_MBUF_F_TX_VLAN) {
13744e9c73e9SStephen Hemminger 		pi_data = hn_rndis_pktinfo_append(pkt, NDIS_VLAN_INFO_SIZE,
13754e9c73e9SStephen Hemminger 						  NDIS_PKTINFO_TYPE_VLAN);
1376f7654c8cSAlan Elder 		*pi_data = NDIS_VLAN_INFO_MAKE(RTE_VLAN_TCI_ID(m->vlan_tci),
1377f7654c8cSAlan Elder 					       RTE_VLAN_TCI_PRI(m->vlan_tci),
1378f7654c8cSAlan Elder 					       RTE_VLAN_TCI_DEI(m->vlan_tci));
13794e9c73e9SStephen Hemminger 	}
13804e9c73e9SStephen Hemminger 
1381daa02b5cSOlivier Matz 	if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
13824e9c73e9SStephen Hemminger 		pi_data = hn_rndis_pktinfo_append(pkt, NDIS_LSO2_INFO_SIZE,
13834e9c73e9SStephen Hemminger 						  NDIS_PKTINFO_TYPE_LSO);
13844e9c73e9SStephen Hemminger 
1385daa02b5cSOlivier Matz 		if (m->ol_flags & RTE_MBUF_F_TX_IPV6) {
13864e9c73e9SStephen Hemminger 			*pi_data = NDIS_LSO2_INFO_MAKEIPV6(hlen,
13874e9c73e9SStephen Hemminger 							   m->tso_segsz);
13884e9c73e9SStephen Hemminger 		} else {
13894e9c73e9SStephen Hemminger 			*pi_data = NDIS_LSO2_INFO_MAKEIPV4(hlen,
13904e9c73e9SStephen Hemminger 							   m->tso_segsz);
13914e9c73e9SStephen Hemminger 		}
1392559a1f2eSLong Li 	} else if ((m->ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
1393559a1f2eSLong Li 			RTE_MBUF_F_TX_TCP_CKSUM ||
1394559a1f2eSLong Li 		   (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
1395559a1f2eSLong Li 			RTE_MBUF_F_TX_UDP_CKSUM ||
1396559a1f2eSLong Li 		   (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)) {
13974e9c73e9SStephen Hemminger 		pi_data = hn_rndis_pktinfo_append(pkt, NDIS_TXCSUM_INFO_SIZE,
13984e9c73e9SStephen Hemminger 						  NDIS_PKTINFO_TYPE_CSUM);
13994e9c73e9SStephen Hemminger 		*pi_data = 0;
14004e9c73e9SStephen Hemminger 
1401daa02b5cSOlivier Matz 		if (m->ol_flags & RTE_MBUF_F_TX_IPV6)
14024e9c73e9SStephen Hemminger 			*pi_data |= NDIS_TXCSUM_INFO_IPV6;
1403daa02b5cSOlivier Matz 		if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
14044e9c73e9SStephen Hemminger 			*pi_data |= NDIS_TXCSUM_INFO_IPV4;
14054e9c73e9SStephen Hemminger 
1406daa02b5cSOlivier Matz 			if (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
14074e9c73e9SStephen Hemminger 				*pi_data |= NDIS_TXCSUM_INFO_IPCS;
14084e9c73e9SStephen Hemminger 		}
14094e9c73e9SStephen Hemminger 
1410559a1f2eSLong Li 		if ((m->ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
1411559a1f2eSLong Li 				RTE_MBUF_F_TX_TCP_CKSUM)
14124e9c73e9SStephen Hemminger 			*pi_data |= NDIS_TXCSUM_INFO_MKTCPCS(hlen);
1413559a1f2eSLong Li 		else if ((m->ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
1414559a1f2eSLong Li 				RTE_MBUF_F_TX_UDP_CKSUM)
14154e9c73e9SStephen Hemminger 			*pi_data |= NDIS_TXCSUM_INFO_MKUDPCS(hlen);
14164e9c73e9SStephen Hemminger 	}
14174e9c73e9SStephen Hemminger 
14184e9c73e9SStephen Hemminger 	pkt_hlen = pkt->pktinfooffset + pkt->pktinfolen;
14194e9c73e9SStephen Hemminger 	/* Fixup RNDIS packet message total length */
14204e9c73e9SStephen Hemminger 	pkt->len += pkt_hlen;
14214e9c73e9SStephen Hemminger 
14224e9c73e9SStephen Hemminger 	/* Convert RNDIS packet message offsets */
14234e9c73e9SStephen Hemminger 	pkt->dataoffset = hn_rndis_pktmsg_offset(pkt_hlen);
14244e9c73e9SStephen Hemminger 	pkt->pktinfooffset = hn_rndis_pktmsg_offset(pkt->pktinfooffset);
14254e9c73e9SStephen Hemminger }
14264e9c73e9SStephen Hemminger 
14274e9c73e9SStephen Hemminger /* How many scatter gather list elements ar needed */
14284e9c73e9SStephen Hemminger static unsigned int hn_get_slots(const struct rte_mbuf *m)
14294e9c73e9SStephen Hemminger {
14304e9c73e9SStephen Hemminger 	unsigned int slots = 1; /* for RNDIS header */
14314e9c73e9SStephen Hemminger 
14324e9c73e9SStephen Hemminger 	while (m) {
14334e9c73e9SStephen Hemminger 		unsigned int size = rte_pktmbuf_data_len(m);
14344e9c73e9SStephen Hemminger 		unsigned int offs = rte_mbuf_data_iova(m) & PAGE_MASK;
14354e9c73e9SStephen Hemminger 
1436924e6b76SThomas Monjalon 		slots += (offs + size + rte_mem_page_size() - 1) /
1437924e6b76SThomas Monjalon 				rte_mem_page_size();
14384e9c73e9SStephen Hemminger 		m = m->next;
14394e9c73e9SStephen Hemminger 	}
14404e9c73e9SStephen Hemminger 
14414e9c73e9SStephen Hemminger 	return slots;
14424e9c73e9SStephen Hemminger }
14434e9c73e9SStephen Hemminger 
14444e9c73e9SStephen Hemminger /* Build scatter gather list from chained mbuf */
14454e9c73e9SStephen Hemminger static unsigned int hn_fill_sg(struct vmbus_gpa *sg,
14464e9c73e9SStephen Hemminger 			       const struct rte_mbuf *m)
14474e9c73e9SStephen Hemminger {
14484e9c73e9SStephen Hemminger 	unsigned int segs = 0;
14494e9c73e9SStephen Hemminger 
14504e9c73e9SStephen Hemminger 	while (m) {
14514e9c73e9SStephen Hemminger 		rte_iova_t addr = rte_mbuf_data_iova(m);
1452924e6b76SThomas Monjalon 		unsigned int page = addr / rte_mem_page_size();
14534e9c73e9SStephen Hemminger 		unsigned int offset = addr & PAGE_MASK;
14544e9c73e9SStephen Hemminger 		unsigned int len = rte_pktmbuf_data_len(m);
14554e9c73e9SStephen Hemminger 
14564e9c73e9SStephen Hemminger 		while (len > 0) {
1457924e6b76SThomas Monjalon 			unsigned int bytes = RTE_MIN(len,
1458924e6b76SThomas Monjalon 					rte_mem_page_size() - offset);
14594e9c73e9SStephen Hemminger 
14604e9c73e9SStephen Hemminger 			sg[segs].page = page;
14614e9c73e9SStephen Hemminger 			sg[segs].ofs = offset;
14624e9c73e9SStephen Hemminger 			sg[segs].len = bytes;
14634e9c73e9SStephen Hemminger 			segs++;
14644e9c73e9SStephen Hemminger 
14654e9c73e9SStephen Hemminger 			++page;
14664e9c73e9SStephen Hemminger 			offset = 0;
14674e9c73e9SStephen Hemminger 			len -= bytes;
14684e9c73e9SStephen Hemminger 		}
14694e9c73e9SStephen Hemminger 		m = m->next;
14704e9c73e9SStephen Hemminger 	}
14714e9c73e9SStephen Hemminger 
14724e9c73e9SStephen Hemminger 	return segs;
14734e9c73e9SStephen Hemminger }
14744e9c73e9SStephen Hemminger 
14754e9c73e9SStephen Hemminger /* Transmit directly from mbuf */
14764e9c73e9SStephen Hemminger static int hn_xmit_sg(struct hn_tx_queue *txq,
14774e9c73e9SStephen Hemminger 		      const struct hn_txdesc *txd, const struct rte_mbuf *m,
14784e9c73e9SStephen Hemminger 		      bool *need_sig)
14794e9c73e9SStephen Hemminger {
14804e9c73e9SStephen Hemminger 	struct vmbus_gpa sg[hn_get_slots(m)];
14814e9c73e9SStephen Hemminger 	struct hn_nvs_rndis nvs_rndis = {
14824e9c73e9SStephen Hemminger 		.type = NVS_TYPE_RNDIS,
14834e9c73e9SStephen Hemminger 		.rndis_mtype = NVS_RNDIS_MTYPE_DATA,
14844e9c73e9SStephen Hemminger 		.chim_sz = txd->chim_size,
14854e9c73e9SStephen Hemminger 	};
14864e9c73e9SStephen Hemminger 	rte_iova_t addr;
14874e9c73e9SStephen Hemminger 	unsigned int segs;
14884e9c73e9SStephen Hemminger 
14894e9c73e9SStephen Hemminger 	/* attach aggregation data if present */
14904e9c73e9SStephen Hemminger 	if (txd->chim_size > 0)
14914e9c73e9SStephen Hemminger 		nvs_rndis.chim_idx = txd->chim_index;
14924e9c73e9SStephen Hemminger 	else
14934e9c73e9SStephen Hemminger 		nvs_rndis.chim_idx = NVS_CHIM_IDX_INVALID;
14944e9c73e9SStephen Hemminger 
14954e9c73e9SStephen Hemminger 	hn_rndis_dump(txd->rndis_pkt);
14964e9c73e9SStephen Hemminger 
14974e9c73e9SStephen Hemminger 	/* pass IOVA of rndis header in first segment */
1498b8c3c628SLong Li 	addr = txq->tx_rndis_iova +
1499b8c3c628SLong Li 		((char *)txd->rndis_pkt - (char *)txq->tx_rndis);
15004e9c73e9SStephen Hemminger 
1501924e6b76SThomas Monjalon 	sg[0].page = addr / rte_mem_page_size();
15024e9c73e9SStephen Hemminger 	sg[0].ofs = addr & PAGE_MASK;
15034e9c73e9SStephen Hemminger 	sg[0].len = RNDIS_PACKET_MSG_OFFSET_ABS(hn_rndis_pktlen(txd->rndis_pkt));
15044e9c73e9SStephen Hemminger 	segs = 1;
15054e9c73e9SStephen Hemminger 
15064e9c73e9SStephen Hemminger 	hn_update_packet_stats(&txq->stats, m);
15074e9c73e9SStephen Hemminger 
15084e9c73e9SStephen Hemminger 	segs += hn_fill_sg(sg + 1, m);
15094e9c73e9SStephen Hemminger 
15104e9c73e9SStephen Hemminger 	PMD_TX_LOG(DEBUG, "port %u:%u tx %u segs %u size %u",
15114e9c73e9SStephen Hemminger 		   txq->port_id, txq->queue_id, txd->chim_index,
15124e9c73e9SStephen Hemminger 		   segs, nvs_rndis.chim_sz);
15134e9c73e9SStephen Hemminger 
15144e9c73e9SStephen Hemminger 	return hn_nvs_send_sglist(txq->chan, sg, segs,
15154e9c73e9SStephen Hemminger 				  &nvs_rndis, sizeof(nvs_rndis),
15164e9c73e9SStephen Hemminger 				  (uintptr_t)txd, need_sig);
15174e9c73e9SStephen Hemminger }
15184e9c73e9SStephen Hemminger 
15194e9c73e9SStephen Hemminger uint16_t
15204e9c73e9SStephen Hemminger hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
15214e9c73e9SStephen Hemminger {
15224e9c73e9SStephen Hemminger 	struct hn_tx_queue *txq = ptxq;
1523dc7680e8SStephen Hemminger 	uint16_t queue_id = txq->queue_id;
15244e9c73e9SStephen Hemminger 	struct hn_data *hv = txq->hv;
1525dc7680e8SStephen Hemminger 	struct rte_eth_dev *vf_dev;
15264e9c73e9SStephen Hemminger 	bool need_sig = false;
152733fd81cdSStephen Hemminger 	uint16_t nb_tx, tx_thresh;
15284e9c73e9SStephen Hemminger 	int ret;
15294e9c73e9SStephen Hemminger 
15304e9c73e9SStephen Hemminger 	if (unlikely(hv->closed))
15314e9c73e9SStephen Hemminger 		return 0;
15324e9c73e9SStephen Hemminger 
153333fd81cdSStephen Hemminger 	/*
153433fd81cdSStephen Hemminger 	 * Always check for events on the primary channel
153533fd81cdSStephen Hemminger 	 * because that is where hotplug notifications occur.
153633fd81cdSStephen Hemminger 	 */
153733fd81cdSStephen Hemminger 	tx_thresh = RTE_MAX(txq->free_thresh, nb_pkts);
153833fd81cdSStephen Hemminger 	if (txq->queue_id == 0 ||
153933fd81cdSStephen Hemminger 	    rte_mempool_avail_count(txq->txdesc_pool) < tx_thresh)
154033fd81cdSStephen Hemminger 		hn_process_events(hv, txq->queue_id, 0);
154133fd81cdSStephen Hemminger 
1542dc7680e8SStephen Hemminger 	/* Transmit over VF if present and up */
1543a2a23a79SLong Li 	if (hv->vf_ctx.vf_vsc_switched) {
154481938ebbSStephen Hemminger 		rte_rwlock_read_lock(&hv->vf_lock);
15454a9efcddSStephen Hemminger 		vf_dev = hn_get_vf_dev(hv);
1546a2a23a79SLong Li 		if (hv->vf_ctx.vf_vsc_switched && vf_dev &&
1547a2a23a79SLong Li 		    vf_dev->data->dev_started) {
1548dc7680e8SStephen Hemminger 			void *sub_q = vf_dev->data->tx_queues[queue_id];
1549dc7680e8SStephen Hemminger 
1550a2a23a79SLong Li 			nb_tx = (*vf_dev->tx_pkt_burst)
1551a2a23a79SLong Li 					(sub_q, tx_pkts, nb_pkts);
155281938ebbSStephen Hemminger 			rte_rwlock_read_unlock(&hv->vf_lock);
155381938ebbSStephen Hemminger 			return nb_tx;
1554dc7680e8SStephen Hemminger 		}
155581938ebbSStephen Hemminger 		rte_rwlock_read_unlock(&hv->vf_lock);
1556a2a23a79SLong Li 	}
1557dc7680e8SStephen Hemminger 
15584e9c73e9SStephen Hemminger 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
15594e9c73e9SStephen Hemminger 		struct rte_mbuf *m = tx_pkts[nb_tx];
15604e9c73e9SStephen Hemminger 		struct rndis_packet_msg *pkt;
1561cc025181SStephen Hemminger 		struct hn_txdesc *txd;
1562*06c968f9SLong Li 		uint32_t pkt_size;
1563cc025181SStephen Hemminger 
1564cc025181SStephen Hemminger 		txd = hn_txd_get(txq);
1565cc025181SStephen Hemminger 		if (txd == NULL)
1566cc025181SStephen Hemminger 			break;
15674e9c73e9SStephen Hemminger 
1568*06c968f9SLong Li 		if (!(m->ol_flags & RTE_MBUF_F_TX_VLAN)) {
1569*06c968f9SLong Li 			struct rte_ether_hdr *eh =
1570*06c968f9SLong Li 				rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1571*06c968f9SLong Li 			struct rte_vlan_hdr *vh;
1572*06c968f9SLong Li 
1573*06c968f9SLong Li 			/* Force TX vlan offloading for 801.2Q packet */
1574*06c968f9SLong Li 			if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
1575*06c968f9SLong Li 				vh = (struct rte_vlan_hdr *)(eh + 1);
1576*06c968f9SLong Li 				m->ol_flags |= RTE_MBUF_F_TX_VLAN;
1577*06c968f9SLong Li 				m->vlan_tci = rte_be_to_cpu_16(vh->vlan_tci);
1578*06c968f9SLong Li 
1579*06c968f9SLong Li 				/* Copy ether header over */
1580*06c968f9SLong Li 				memmove(rte_pktmbuf_adj(m, sizeof(struct rte_vlan_hdr)),
1581*06c968f9SLong Li 					eh, 2 * RTE_ETHER_ADDR_LEN);
1582*06c968f9SLong Li 			}
1583*06c968f9SLong Li 		}
1584*06c968f9SLong Li 		pkt_size = m->pkt_len + HN_RNDIS_PKT_LEN;
1585*06c968f9SLong Li 
15864e9c73e9SStephen Hemminger 		/* For small packets aggregate them in chimney buffer */
158774a5a666SStephen Hemminger 		if (m->pkt_len <= hv->tx_copybreak &&
158874a5a666SStephen Hemminger 		    pkt_size <= txq->agg_szmax) {
15894e9c73e9SStephen Hemminger 			/* If this packet will not fit, then flush  */
15904e9c73e9SStephen Hemminger 			if (txq->agg_pktleft == 0 ||
15914e9c73e9SStephen Hemminger 			    RTE_ALIGN(pkt_size, txq->agg_align) > txq->agg_szleft) {
15924e9c73e9SStephen Hemminger 				if (hn_flush_txagg(txq, &need_sig))
15934e9c73e9SStephen Hemminger 					goto fail;
15944e9c73e9SStephen Hemminger 			}
15954e9c73e9SStephen Hemminger 
1596cc025181SStephen Hemminger 
1597cc025181SStephen Hemminger 			pkt = hn_try_txagg(hv, txq, txd, pkt_size);
15984e9c73e9SStephen Hemminger 			if (unlikely(!pkt))
15997e6c8243SStephen Hemminger 				break;
16004e9c73e9SStephen Hemminger 
1601dc7680e8SStephen Hemminger 			hn_encap(pkt, queue_id, m);
16024e9c73e9SStephen Hemminger 			hn_append_to_chim(txq, pkt, m);
16034e9c73e9SStephen Hemminger 
16044e9c73e9SStephen Hemminger 			rte_pktmbuf_free(m);
16054e9c73e9SStephen Hemminger 
16064e9c73e9SStephen Hemminger 			/* if buffer is full, flush */
16074e9c73e9SStephen Hemminger 			if (txq->agg_pktleft == 0 &&
16084e9c73e9SStephen Hemminger 			    hn_flush_txagg(txq, &need_sig))
16094e9c73e9SStephen Hemminger 				goto fail;
16104e9c73e9SStephen Hemminger 		} else {
1611cc025181SStephen Hemminger 			/* Send any outstanding packets in buffer */
1612cc025181SStephen Hemminger 			if (txq->agg_txd && hn_flush_txagg(txq, &need_sig))
1613cc025181SStephen Hemminger 				goto fail;
16144e9c73e9SStephen Hemminger 
16154e9c73e9SStephen Hemminger 			pkt = txd->rndis_pkt;
16164e9c73e9SStephen Hemminger 			txd->m = m;
1617cc025181SStephen Hemminger 			txd->data_size = m->pkt_len;
16184e9c73e9SStephen Hemminger 			++txd->packets;
16194e9c73e9SStephen Hemminger 
1620dc7680e8SStephen Hemminger 			hn_encap(pkt, queue_id, m);
16214e9c73e9SStephen Hemminger 
16224e9c73e9SStephen Hemminger 			ret = hn_xmit_sg(txq, txd, m, &need_sig);
16234e9c73e9SStephen Hemminger 			if (unlikely(ret != 0)) {
1624b757deb8SStephen Hemminger 				if (ret == -EAGAIN) {
1625b757deb8SStephen Hemminger 					PMD_TX_LOG(DEBUG, "sg channel full");
1626b757deb8SStephen Hemminger 					++txq->stats.channel_full;
1627b757deb8SStephen Hemminger 				} else {
1628b757deb8SStephen Hemminger 					PMD_DRV_LOG(NOTICE, "sg send failed: %d", ret);
16294e9c73e9SStephen Hemminger 					++txq->stats.errors;
1630b757deb8SStephen Hemminger 				}
1631cc025181SStephen Hemminger 				hn_txd_put(txq, txd);
16324e9c73e9SStephen Hemminger 				goto fail;
16334e9c73e9SStephen Hemminger 			}
16344e9c73e9SStephen Hemminger 		}
16354e9c73e9SStephen Hemminger 	}
16364e9c73e9SStephen Hemminger 
16374e9c73e9SStephen Hemminger 	/* If partial buffer left, then try and send it.
16384e9c73e9SStephen Hemminger 	 * if that fails, then reuse it on next send.
16394e9c73e9SStephen Hemminger 	 */
16404e9c73e9SStephen Hemminger 	hn_flush_txagg(txq, &need_sig);
16414e9c73e9SStephen Hemminger 
16424e9c73e9SStephen Hemminger fail:
16434e9c73e9SStephen Hemminger 	if (need_sig)
16444e9c73e9SStephen Hemminger 		rte_vmbus_chan_signal_tx(txq->chan);
16454e9c73e9SStephen Hemminger 
16464e9c73e9SStephen Hemminger 	return nb_tx;
16474e9c73e9SStephen Hemminger }
16484e9c73e9SStephen Hemminger 
164905bfd4b4SStephen Hemminger static uint16_t
165005bfd4b4SStephen Hemminger hn_recv_vf(uint16_t vf_port, const struct hn_rx_queue *rxq,
165105bfd4b4SStephen Hemminger 	   struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
165205bfd4b4SStephen Hemminger {
165305bfd4b4SStephen Hemminger 	uint16_t i, n;
165405bfd4b4SStephen Hemminger 
165505bfd4b4SStephen Hemminger 	if (unlikely(nb_pkts == 0))
165605bfd4b4SStephen Hemminger 		return 0;
165705bfd4b4SStephen Hemminger 
165805bfd4b4SStephen Hemminger 	n = rte_eth_rx_burst(vf_port, rxq->queue_id, rx_pkts, nb_pkts);
165905bfd4b4SStephen Hemminger 
166005bfd4b4SStephen Hemminger 	/* relabel the received mbufs */
166105bfd4b4SStephen Hemminger 	for (i = 0; i < n; i++)
166205bfd4b4SStephen Hemminger 		rx_pkts[i]->port = rxq->port_id;
166305bfd4b4SStephen Hemminger 
166405bfd4b4SStephen Hemminger 	return n;
166505bfd4b4SStephen Hemminger }
166605bfd4b4SStephen Hemminger 
16674e9c73e9SStephen Hemminger uint16_t
16684e9c73e9SStephen Hemminger hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
16694e9c73e9SStephen Hemminger {
16704e9c73e9SStephen Hemminger 	struct hn_rx_queue *rxq = prxq;
16714e9c73e9SStephen Hemminger 	struct hn_data *hv = rxq->hv;
1672dc7680e8SStephen Hemminger 	struct rte_eth_dev *vf_dev;
1673dc7680e8SStephen Hemminger 	uint16_t nb_rcv;
16744e9c73e9SStephen Hemminger 
16754e9c73e9SStephen Hemminger 	if (unlikely(hv->closed))
16764e9c73e9SStephen Hemminger 		return 0;
16774e9c73e9SStephen Hemminger 
167833fd81cdSStephen Hemminger 	/* Check for new completions (and hotplug) */
167905bfd4b4SStephen Hemminger 	if (likely(rte_ring_count(rxq->rx_ring) < nb_pkts))
1680dc7680e8SStephen Hemminger 		hn_process_events(hv, rxq->queue_id, 0);
1681dc7680e8SStephen Hemminger 
168205bfd4b4SStephen Hemminger 	/* Always check the vmbus path for multicast and new flows */
1683dc7680e8SStephen Hemminger 	nb_rcv = rte_ring_sc_dequeue_burst(rxq->rx_ring,
168405bfd4b4SStephen Hemminger 					   (void **)rx_pkts, nb_pkts, NULL);
168505bfd4b4SStephen Hemminger 
168605bfd4b4SStephen Hemminger 	/* If VF is available, check that as well */
1687a2a23a79SLong Li 	if (hv->vf_ctx.vf_vsc_switched) {
168881938ebbSStephen Hemminger 		rte_rwlock_read_lock(&hv->vf_lock);
168933fd81cdSStephen Hemminger 		vf_dev = hn_get_vf_dev(hv);
1690a2a23a79SLong Li 		if (hv->vf_ctx.vf_vsc_switched && vf_dev &&
1691a2a23a79SLong Li 		    vf_dev->data->dev_started)
169205bfd4b4SStephen Hemminger 			nb_rcv += hn_recv_vf(vf_dev->data->port_id, rxq,
1693a2a23a79SLong Li 					     rx_pkts + nb_rcv,
1694a2a23a79SLong Li 					     nb_pkts - nb_rcv);
1695dc7680e8SStephen Hemminger 
169681938ebbSStephen Hemminger 		rte_rwlock_read_unlock(&hv->vf_lock);
1697a2a23a79SLong Li 	}
1698dc7680e8SStephen Hemminger 	return nb_rcv;
1699dc7680e8SStephen Hemminger }
17008428da72SStephen Hemminger 
17018428da72SStephen Hemminger void
17028428da72SStephen Hemminger hn_dev_free_queues(struct rte_eth_dev *dev)
17038428da72SStephen Hemminger {
17048428da72SStephen Hemminger 	unsigned int i;
17058428da72SStephen Hemminger 
17068428da72SStephen Hemminger 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
17078428da72SStephen Hemminger 		struct hn_rx_queue *rxq = dev->data->rx_queues[i];
17088428da72SStephen Hemminger 
17098428da72SStephen Hemminger 		hn_rx_queue_free(rxq, false);
17108428da72SStephen Hemminger 		dev->data->rx_queues[i] = NULL;
17118428da72SStephen Hemminger 	}
17128428da72SStephen Hemminger 	dev->data->nb_rx_queues = 0;
17138428da72SStephen Hemminger 
17148428da72SStephen Hemminger 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
17157483341aSXueming Li 		hn_dev_tx_queue_release(dev, i);
17168428da72SStephen Hemminger 		dev->data->tx_queues[i] = NULL;
17178428da72SStephen Hemminger 	}
17188428da72SStephen Hemminger 	dev->data->nb_tx_queues = 0;
17198428da72SStephen Hemminger }
1720