xref: /dpdk/drivers/net/bonding/rte_eth_bond_pmd.c (revision 4da0705bf896327af062212b5a1e6cb1f1366aa5)
15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
25566a3e3SBruce Richardson  * Copyright(c) 2010-2017 Intel Corporation
33eb6bdd8SBruce Richardson  */
43eb6bdd8SBruce Richardson #include <stdlib.h>
58ff0003cSDavid Marchand #include <stdbool.h>
63eb6bdd8SBruce Richardson #include <netinet/in.h>
73eb6bdd8SBruce Richardson 
8191128d7SDavid Marchand #include <rte_bitops.h>
93eb6bdd8SBruce Richardson #include <rte_mbuf.h>
103eb6bdd8SBruce Richardson #include <rte_malloc.h>
11df96fd0dSBruce Richardson #include <ethdev_driver.h>
12df96fd0dSBruce Richardson #include <ethdev_vdev.h>
133eb6bdd8SBruce Richardson #include <rte_tcp.h>
143eb6bdd8SBruce Richardson #include <rte_udp.h>
153eb6bdd8SBruce Richardson #include <rte_ip.h>
161d63c314SAndriy Berestovskyy #include <rte_ip_frag.h>
173eb6bdd8SBruce Richardson #include <rte_devargs.h>
183eb6bdd8SBruce Richardson #include <rte_kvargs.h>
194851ef2bSDavid Marchand #include <bus_vdev_driver.h>
203eb6bdd8SBruce Richardson #include <rte_alarm.h>
213eb6bdd8SBruce Richardson #include <rte_cycles.h>
22c022cb40SBruce Richardson #include <rte_string_fns.h>
233eb6bdd8SBruce Richardson 
243eb6bdd8SBruce Richardson #include "rte_eth_bond.h"
25b28f28aeSDharmik Thakkar #include "eth_bond_private.h"
26b28f28aeSDharmik Thakkar #include "eth_bond_8023ad_private.h"
273eb6bdd8SBruce Richardson 
283eb6bdd8SBruce Richardson #define REORDER_PERIOD_MS 10
2968451eb6SJan Blunck #define DEFAULT_POLLING_INTERVAL_10_MS (10)
309d453d1dSAlex Kiselev #define BOND_MAX_MAC_ADDRS 16
313eb6bdd8SBruce Richardson 
323eb6bdd8SBruce Richardson #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
333eb6bdd8SBruce Richardson 
343eb6bdd8SBruce Richardson /* Table for statistics in mode 5 TLB */
353eb6bdd8SBruce Richardson static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
363eb6bdd8SBruce Richardson 
373eb6bdd8SBruce Richardson static inline size_t
386d13ea8eSOlivier Matz get_vlan_offset(struct rte_ether_hdr *eth_hdr, uint16_t *proto)
393eb6bdd8SBruce Richardson {
403eb6bdd8SBruce Richardson 	size_t vlan_offset = 0;
413eb6bdd8SBruce Richardson 
4235b2d13fSOlivier Matz 	if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto ||
4335b2d13fSOlivier Matz 		rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) == *proto) {
446d13ea8eSOlivier Matz 		struct rte_vlan_hdr *vlan_hdr =
456d13ea8eSOlivier Matz 			(struct rte_vlan_hdr *)(eth_hdr + 1);
463eb6bdd8SBruce Richardson 
476d13ea8eSOlivier Matz 		vlan_offset = sizeof(struct rte_vlan_hdr);
483eb6bdd8SBruce Richardson 		*proto = vlan_hdr->eth_proto;
493eb6bdd8SBruce Richardson 
5035b2d13fSOlivier Matz 		if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto) {
513eb6bdd8SBruce Richardson 			vlan_hdr = vlan_hdr + 1;
523eb6bdd8SBruce Richardson 			*proto = vlan_hdr->eth_proto;
536d13ea8eSOlivier Matz 			vlan_offset += sizeof(struct rte_vlan_hdr);
543eb6bdd8SBruce Richardson 		}
553eb6bdd8SBruce Richardson 	}
563eb6bdd8SBruce Richardson 	return vlan_offset;
573eb6bdd8SBruce Richardson }
583eb6bdd8SBruce Richardson 
593eb6bdd8SBruce Richardson static uint16_t
603eb6bdd8SBruce Richardson bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
613eb6bdd8SBruce Richardson {
623eb6bdd8SBruce Richardson 	struct bond_dev_private *internals;
633eb6bdd8SBruce Richardson 
643eb6bdd8SBruce Richardson 	uint16_t num_rx_total = 0;
6515e34522SLong Wu 	uint16_t member_count;
6615e34522SLong Wu 	uint16_t active_member;
673eb6bdd8SBruce Richardson 	int i;
683eb6bdd8SBruce Richardson 
694f840086SLong Wu 	/* Cast to structure, containing bonding device's port id and queue id */
703eb6bdd8SBruce Richardson 	struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
713eb6bdd8SBruce Richardson 	internals = bd_rx_q->dev_private;
7215e34522SLong Wu 	member_count = internals->active_member_count;
7315e34522SLong Wu 	active_member = bd_rx_q->active_member;
743eb6bdd8SBruce Richardson 
7515e34522SLong Wu 	for (i = 0; i < member_count && nb_pkts; i++) {
7615e34522SLong Wu 		uint16_t num_rx_member;
773eb6bdd8SBruce Richardson 
7815e34522SLong Wu 		/*
7915e34522SLong Wu 		 * Offset of pointer to *bufs increases as packets are received
8015e34522SLong Wu 		 * from other members.
8115e34522SLong Wu 		 */
8215e34522SLong Wu 		num_rx_member =
8315e34522SLong Wu 			rte_eth_rx_burst(internals->active_members[active_member],
84e1110e97SChas Williams 					 bd_rx_q->queue_id,
85e1110e97SChas Williams 					 bufs + num_rx_total, nb_pkts);
8615e34522SLong Wu 		num_rx_total += num_rx_member;
8715e34522SLong Wu 		nb_pkts -= num_rx_member;
8815e34522SLong Wu 		if (++active_member >= member_count)
8915e34522SLong Wu 			active_member = 0;
903eb6bdd8SBruce Richardson 	}
913eb6bdd8SBruce Richardson 
9215e34522SLong Wu 	if (++bd_rx_q->active_member >= member_count)
9315e34522SLong Wu 		bd_rx_q->active_member = 0;
943eb6bdd8SBruce Richardson 	return num_rx_total;
953eb6bdd8SBruce Richardson }
963eb6bdd8SBruce Richardson 
973eb6bdd8SBruce Richardson static uint16_t
983eb6bdd8SBruce Richardson bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
993eb6bdd8SBruce Richardson 		uint16_t nb_pkts)
1003eb6bdd8SBruce Richardson {
1013eb6bdd8SBruce Richardson 	struct bond_dev_private *internals;
1023eb6bdd8SBruce Richardson 
1034f840086SLong Wu 	/* Cast to structure, containing bonding device's port id and queue id */
1043eb6bdd8SBruce Richardson 	struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
1053eb6bdd8SBruce Richardson 
1063eb6bdd8SBruce Richardson 	internals = bd_rx_q->dev_private;
1073eb6bdd8SBruce Richardson 
1083eb6bdd8SBruce Richardson 	return rte_eth_rx_burst(internals->current_primary_port,
1093eb6bdd8SBruce Richardson 			bd_rx_q->queue_id, bufs, nb_pkts);
1103eb6bdd8SBruce Richardson }
1113eb6bdd8SBruce Richardson 
1129ed2c8a2SHaifeng Lin static inline uint8_t
113ae61e472SGanghui Zeng is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf)
1149ed2c8a2SHaifeng Lin {
11535b2d13fSOlivier Matz 	const uint16_t ether_type_slow_be =
11635b2d13fSOlivier Matz 		rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
1179ed2c8a2SHaifeng Lin 
118daa02b5cSOlivier Matz 	return !((mbuf->ol_flags & RTE_MBUF_F_RX_VLAN) ? mbuf->vlan_tci : 0) &&
119ae61e472SGanghui Zeng 		(ethertype == ether_type_slow_be &&
1209ed2c8a2SHaifeng Lin 		(subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
1219ed2c8a2SHaifeng Lin }
1229ed2c8a2SHaifeng Lin 
123112891cdSTomasz Kulasek /*****************************************************************************
124112891cdSTomasz Kulasek  * Flow director's setup for mode 4 optimization
125112891cdSTomasz Kulasek  */
126112891cdSTomasz Kulasek 
127112891cdSTomasz Kulasek static struct rte_flow_item_eth flow_item_eth_type_8023ad = {
1288275d5fcSThomas Monjalon 	.hdr.dst_addr.addr_bytes = { 0 },
1298275d5fcSThomas Monjalon 	.hdr.src_addr.addr_bytes = { 0 },
1308275d5fcSThomas Monjalon 	.hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_SLOW),
131112891cdSTomasz Kulasek };
132112891cdSTomasz Kulasek 
133112891cdSTomasz Kulasek static struct rte_flow_item_eth flow_item_eth_mask_type_8023ad = {
1348275d5fcSThomas Monjalon 	.hdr.dst_addr.addr_bytes = { 0 },
1358275d5fcSThomas Monjalon 	.hdr.src_addr.addr_bytes = { 0 },
1368275d5fcSThomas Monjalon 	.hdr.ether_type = 0xFFFF,
137112891cdSTomasz Kulasek };
138112891cdSTomasz Kulasek 
139112891cdSTomasz Kulasek static struct rte_flow_item flow_item_8023ad[] = {
140112891cdSTomasz Kulasek 	{
141112891cdSTomasz Kulasek 		.type = RTE_FLOW_ITEM_TYPE_ETH,
142112891cdSTomasz Kulasek 		.spec = &flow_item_eth_type_8023ad,
143112891cdSTomasz Kulasek 		.last = NULL,
144112891cdSTomasz Kulasek 		.mask = &flow_item_eth_mask_type_8023ad,
145112891cdSTomasz Kulasek 	},
146112891cdSTomasz Kulasek 	{
147112891cdSTomasz Kulasek 		.type = RTE_FLOW_ITEM_TYPE_END,
148112891cdSTomasz Kulasek 		.spec = NULL,
149112891cdSTomasz Kulasek 		.last = NULL,
150112891cdSTomasz Kulasek 		.mask = NULL,
151112891cdSTomasz Kulasek 	}
152112891cdSTomasz Kulasek };
153112891cdSTomasz Kulasek 
154112891cdSTomasz Kulasek const struct rte_flow_attr flow_attr_8023ad = {
155112891cdSTomasz Kulasek 	.group = 0,
156112891cdSTomasz Kulasek 	.priority = 0,
157112891cdSTomasz Kulasek 	.ingress = 1,
158112891cdSTomasz Kulasek 	.egress = 0,
159112891cdSTomasz Kulasek 	.reserved = 0,
160112891cdSTomasz Kulasek };
161112891cdSTomasz Kulasek 
162112891cdSTomasz Kulasek int
163112891cdSTomasz Kulasek bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
16415e34522SLong Wu 		uint16_t member_port) {
16515e34522SLong Wu 	struct rte_eth_dev_info member_info;
166112891cdSTomasz Kulasek 	struct rte_flow_error error;
1670b07f6e9SStephen Hemminger 	struct bond_dev_private *internals = bond_dev->data->dev_private;
168112891cdSTomasz Kulasek 
1699f78433aSTomasz Kulasek 	const struct rte_flow_action_queue lacp_queue_conf = {
1709f78433aSTomasz Kulasek 		.index = 0,
171112891cdSTomasz Kulasek 	};
172112891cdSTomasz Kulasek 
173112891cdSTomasz Kulasek 	const struct rte_flow_action actions[] = {
174112891cdSTomasz Kulasek 		{
175112891cdSTomasz Kulasek 			.type = RTE_FLOW_ACTION_TYPE_QUEUE,
176112891cdSTomasz Kulasek 			.conf = &lacp_queue_conf
177112891cdSTomasz Kulasek 		},
178112891cdSTomasz Kulasek 		{
179112891cdSTomasz Kulasek 			.type = RTE_FLOW_ACTION_TYPE_END,
180112891cdSTomasz Kulasek 		}
181112891cdSTomasz Kulasek 	};
182112891cdSTomasz Kulasek 
18315e34522SLong Wu 	int ret = rte_flow_validate(member_port, &flow_attr_8023ad,
184112891cdSTomasz Kulasek 			flow_item_8023ad, actions, &error);
1859f78433aSTomasz Kulasek 	if (ret < 0) {
18615e34522SLong Wu 		RTE_BOND_LOG(ERR, "%s: %s (member_port=%d queue_id=%d)",
18715e34522SLong Wu 				__func__, error.message, member_port,
1889f78433aSTomasz Kulasek 				internals->mode4.dedicated_queues.rx_qid);
189112891cdSTomasz Kulasek 		return -1;
1909f78433aSTomasz Kulasek 	}
1919f78433aSTomasz Kulasek 
19215e34522SLong Wu 	ret = rte_eth_dev_info_get(member_port, &member_info);
193fab23451SIvan Ilchenko 	if (ret != 0) {
194fab23451SIvan Ilchenko 		RTE_BOND_LOG(ERR,
195f665790aSDavid Marchand 			"%s: Error during getting device (port %u) info: %s",
19615e34522SLong Wu 			__func__, member_port, strerror(-ret));
197fab23451SIvan Ilchenko 
198fab23451SIvan Ilchenko 		return ret;
199fab23451SIvan Ilchenko 	}
200fab23451SIvan Ilchenko 
20115e34522SLong Wu 	if (member_info.max_rx_queues < bond_dev->data->nb_rx_queues ||
20215e34522SLong Wu 			member_info.max_tx_queues < bond_dev->data->nb_tx_queues) {
2039f78433aSTomasz Kulasek 		RTE_BOND_LOG(ERR,
20415e34522SLong Wu 			"%s: Member %d capabilities doesn't allow allocating additional queues",
20515e34522SLong Wu 			__func__, member_port);
2069f78433aSTomasz Kulasek 		return -1;
2079f78433aSTomasz Kulasek 	}
208112891cdSTomasz Kulasek 
209112891cdSTomasz Kulasek 	return 0;
210112891cdSTomasz Kulasek }
211112891cdSTomasz Kulasek 
212112891cdSTomasz Kulasek int
213f8244c63SZhiyong Yang bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) {
214112891cdSTomasz Kulasek 	struct rte_eth_dev *bond_dev = &rte_eth_devices[port_id];
2150b07f6e9SStephen Hemminger 	struct bond_dev_private *internals = bond_dev->data->dev_private;
2169f78433aSTomasz Kulasek 	struct rte_eth_dev_info bond_info;
217f8244c63SZhiyong Yang 	uint16_t idx;
218fab23451SIvan Ilchenko 	int ret;
219112891cdSTomasz Kulasek 
22015e34522SLong Wu 	/* Verify if all members in bonding supports flow director and */
22115e34522SLong Wu 	if (internals->member_count > 0) {
222fab23451SIvan Ilchenko 		ret = rte_eth_dev_info_get(bond_dev->data->port_id, &bond_info);
223fab23451SIvan Ilchenko 		if (ret != 0) {
224fab23451SIvan Ilchenko 			RTE_BOND_LOG(ERR,
225f665790aSDavid Marchand 				"%s: Error during getting device (port %u) info: %s",
226fab23451SIvan Ilchenko 				__func__, bond_dev->data->port_id,
227fab23451SIvan Ilchenko 				strerror(-ret));
228fab23451SIvan Ilchenko 
229fab23451SIvan Ilchenko 			return ret;
230fab23451SIvan Ilchenko 		}
231112891cdSTomasz Kulasek 
232112891cdSTomasz Kulasek 		internals->mode4.dedicated_queues.rx_qid = bond_info.nb_rx_queues;
233112891cdSTomasz Kulasek 		internals->mode4.dedicated_queues.tx_qid = bond_info.nb_tx_queues;
234112891cdSTomasz Kulasek 
23515e34522SLong Wu 		for (idx = 0; idx < internals->member_count; idx++) {
236112891cdSTomasz Kulasek 			if (bond_ethdev_8023ad_flow_verify(bond_dev,
23715e34522SLong Wu 					internals->members[idx].port_id) != 0)
238112891cdSTomasz Kulasek 				return -1;
239112891cdSTomasz Kulasek 		}
240112891cdSTomasz Kulasek 	}
241112891cdSTomasz Kulasek 
242112891cdSTomasz Kulasek 	return 0;
243112891cdSTomasz Kulasek }
244112891cdSTomasz Kulasek 
245112891cdSTomasz Kulasek int
24615e34522SLong Wu bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t member_port) {
247112891cdSTomasz Kulasek 
248112891cdSTomasz Kulasek 	struct rte_flow_error error;
2490b07f6e9SStephen Hemminger 	struct bond_dev_private *internals = bond_dev->data->dev_private;
250112891cdSTomasz Kulasek 	struct rte_flow_action_queue lacp_queue_conf = {
251112891cdSTomasz Kulasek 		.index = internals->mode4.dedicated_queues.rx_qid,
252112891cdSTomasz Kulasek 	};
253112891cdSTomasz Kulasek 
254112891cdSTomasz Kulasek 	const struct rte_flow_action actions[] = {
255112891cdSTomasz Kulasek 		{
256112891cdSTomasz Kulasek 			.type = RTE_FLOW_ACTION_TYPE_QUEUE,
257112891cdSTomasz Kulasek 			.conf = &lacp_queue_conf
258112891cdSTomasz Kulasek 		},
259112891cdSTomasz Kulasek 		{
260112891cdSTomasz Kulasek 			.type = RTE_FLOW_ACTION_TYPE_END,
261112891cdSTomasz Kulasek 		}
262112891cdSTomasz Kulasek 	};
263112891cdSTomasz Kulasek 
26415e34522SLong Wu 	internals->mode4.dedicated_queues.flow[member_port] = rte_flow_create(member_port,
265112891cdSTomasz Kulasek 			&flow_attr_8023ad, flow_item_8023ad, actions, &error);
26615e34522SLong Wu 	if (internals->mode4.dedicated_queues.flow[member_port] == NULL) {
267112891cdSTomasz Kulasek 		RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_set: %s "
26815e34522SLong Wu 				"(member_port=%d queue_id=%d)",
26915e34522SLong Wu 				error.message, member_port,
270112891cdSTomasz Kulasek 				internals->mode4.dedicated_queues.rx_qid);
271112891cdSTomasz Kulasek 		return -1;
272112891cdSTomasz Kulasek 	}
273112891cdSTomasz Kulasek 
274112891cdSTomasz Kulasek 	return 0;
275112891cdSTomasz Kulasek }
276112891cdSTomasz Kulasek 
27730bfba52SHuisong Li static bool
27830bfba52SHuisong Li is_bond_mac_addr(const struct rte_ether_addr *ea,
27930bfba52SHuisong Li 		 const struct rte_ether_addr *mac_addrs, uint32_t max_mac_addrs)
28030bfba52SHuisong Li {
28130bfba52SHuisong Li 	uint32_t i;
28230bfba52SHuisong Li 
28330bfba52SHuisong Li 	for (i = 0; i < max_mac_addrs; i++) {
28430bfba52SHuisong Li 		/* skip zero address */
28530bfba52SHuisong Li 		if (rte_is_zero_ether_addr(&mac_addrs[i]))
28630bfba52SHuisong Li 			continue;
28730bfba52SHuisong Li 
28830bfba52SHuisong Li 		if (rte_is_same_ether_addr(ea, &mac_addrs[i]))
28930bfba52SHuisong Li 			return true;
29030bfba52SHuisong Li 	}
29130bfba52SHuisong Li 
29230bfba52SHuisong Li 	return false;
29330bfba52SHuisong Li }
29430bfba52SHuisong Li 
29558729b54SDavid Marchand static inline uint16_t
29658729b54SDavid Marchand rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts,
29758729b54SDavid Marchand 		bool dedicated_rxq)
2983eb6bdd8SBruce Richardson {
2994f840086SLong Wu 	/* Cast to structure, containing bonding device's port id and queue id */
3003eb6bdd8SBruce Richardson 	struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
3013eb6bdd8SBruce Richardson 	struct bond_dev_private *internals = bd_rx_q->dev_private;
3024f840086SLong Wu 	struct rte_eth_dev *bonding_eth_dev =
3034a5bc4e2SChas Williams 					&rte_eth_devices[internals->port_id];
3044f840086SLong Wu 	struct rte_ether_addr *bond_mac = bonding_eth_dev->data->mac_addrs;
3056d13ea8eSOlivier Matz 	struct rte_ether_hdr *hdr;
3063eb6bdd8SBruce Richardson 
30735b2d13fSOlivier Matz 	const uint16_t ether_type_slow_be =
30835b2d13fSOlivier Matz 		rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
3093eb6bdd8SBruce Richardson 	uint16_t num_rx_total = 0;	/* Total number of received packets */
31015e34522SLong Wu 	uint16_t members[RTE_MAX_ETHPORTS];
31115e34522SLong Wu 	uint16_t member_count, idx;
3123eb6bdd8SBruce Richardson 
31315e34522SLong Wu 	uint8_t collecting;  /* current member collecting status */
31468218b87SDavid Marchand 	const uint8_t promisc = rte_eth_promiscuous_get(internals->port_id);
31568218b87SDavid Marchand 	const uint8_t allmulti = rte_eth_allmulticast_get(internals->port_id);
3169ed2c8a2SHaifeng Lin 	uint8_t subtype;
3171d6cab8aSDavid Marchand 	uint16_t i;
318646d3f20SDavid Marchand 	uint16_t j;
319646d3f20SDavid Marchand 	uint16_t k;
3203eb6bdd8SBruce Richardson 
32115e34522SLong Wu 	/* Copy member list to protect against member up/down changes during tx
3223eb6bdd8SBruce Richardson 	 * bursting */
32315e34522SLong Wu 	member_count = internals->active_member_count;
32415e34522SLong Wu 	memcpy(members, internals->active_members,
32515e34522SLong Wu 			sizeof(internals->active_members[0]) * member_count);
3263eb6bdd8SBruce Richardson 
32715e34522SLong Wu 	idx = bd_rx_q->active_member;
32815e34522SLong Wu 	if (idx >= member_count) {
32915e34522SLong Wu 		bd_rx_q->active_member = 0;
330ae2a0486SKeith Wiles 		idx = 0;
331ae2a0486SKeith Wiles 	}
33215e34522SLong Wu 	for (i = 0; i < member_count && num_rx_total < nb_pkts; i++) {
3333eb6bdd8SBruce Richardson 		j = num_rx_total;
33415e34522SLong Wu 		collecting = ACTOR_STATE(&bond_mode_8023ad_ports[members[idx]],
335ae2a0486SKeith Wiles 					 COLLECTING);
3363eb6bdd8SBruce Richardson 
33715e34522SLong Wu 		/* Read packets from this member */
33815e34522SLong Wu 		num_rx_total += rte_eth_rx_burst(members[idx], bd_rx_q->queue_id,
3393eb6bdd8SBruce Richardson 				&bufs[num_rx_total], nb_pkts - num_rx_total);
3403eb6bdd8SBruce Richardson 
3413eb6bdd8SBruce Richardson 		for (k = j; k < 2 && k < num_rx_total; k++)
3423eb6bdd8SBruce Richardson 			rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
3433eb6bdd8SBruce Richardson 
3443eb6bdd8SBruce Richardson 		/* Handle slow protocol packets. */
3453eb6bdd8SBruce Richardson 		while (j < num_rx_total) {
3463eb6bdd8SBruce Richardson 			if (j + 3 < num_rx_total)
3473eb6bdd8SBruce Richardson 				rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
3483eb6bdd8SBruce Richardson 
3496d13ea8eSOlivier Matz 			hdr = rte_pktmbuf_mtod(bufs[j], struct rte_ether_hdr *);
3509ed2c8a2SHaifeng Lin 			subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
3519ed2c8a2SHaifeng Lin 
35258729b54SDavid Marchand 			/* Remove packet from array if:
35358729b54SDavid Marchand 			 * - it is slow packet but no dedicated rxq is present,
35415e34522SLong Wu 			 * - member is not in collecting state,
35530bfba52SHuisong Li 			 * - bonding interface is not in promiscuous mode and
35630bfba52SHuisong Li 			 *   packet address isn't in mac_addrs array:
35730bfba52SHuisong Li 			 *   - packet is unicast,
35868218b87SDavid Marchand 			 *   - packet is multicast and bonding interface
35968218b87SDavid Marchand 			 *     is not in allmulti,
36058729b54SDavid Marchand 			 */
36158729b54SDavid Marchand 			if (unlikely(
36258729b54SDavid Marchand 				(!dedicated_rxq &&
36358729b54SDavid Marchand 				 is_lacp_packets(hdr->ether_type, subtype,
36458729b54SDavid Marchand 						 bufs[j])) ||
3654a5bc4e2SChas Williams 				!collecting ||
3664a5bc4e2SChas Williams 				(!promisc &&
36730bfba52SHuisong Li 				 !is_bond_mac_addr(&hdr->dst_addr, bond_mac,
36830bfba52SHuisong Li 						   BOND_MAX_MAC_ADDRS) &&
36930bfba52SHuisong Li 				 (rte_is_unicast_ether_addr(&hdr->dst_addr) ||
37030bfba52SHuisong Li 				  !allmulti)))) {
3713eb6bdd8SBruce Richardson 				if (hdr->ether_type == ether_type_slow_be) {
372ae2a0486SKeith Wiles 					bond_mode_8023ad_handle_slow_pkt(
37315e34522SLong Wu 					    internals, members[idx], bufs[j]);
3743eb6bdd8SBruce Richardson 				} else
3753eb6bdd8SBruce Richardson 					rte_pktmbuf_free(bufs[j]);
3763eb6bdd8SBruce Richardson 
3773eb6bdd8SBruce Richardson 				/* Packet is managed by mode 4 or dropped, shift the array */
3783eb6bdd8SBruce Richardson 				num_rx_total--;
3793eb6bdd8SBruce Richardson 				if (j < num_rx_total) {
3803eb6bdd8SBruce Richardson 					memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
3813eb6bdd8SBruce Richardson 						(num_rx_total - j));
3823eb6bdd8SBruce Richardson 				}
3833eb6bdd8SBruce Richardson 			} else
3843eb6bdd8SBruce Richardson 				j++;
3853eb6bdd8SBruce Richardson 		}
38615e34522SLong Wu 		if (unlikely(++idx == member_count))
387ae2a0486SKeith Wiles 			idx = 0;
3883eb6bdd8SBruce Richardson 	}
3893eb6bdd8SBruce Richardson 
39015e34522SLong Wu 	if (++bd_rx_q->active_member >= member_count)
39115e34522SLong Wu 		bd_rx_q->active_member = 0;
392e1110e97SChas Williams 
3933eb6bdd8SBruce Richardson 	return num_rx_total;
3943eb6bdd8SBruce Richardson }
3953eb6bdd8SBruce Richardson 
39658729b54SDavid Marchand static uint16_t
39758729b54SDavid Marchand bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
39858729b54SDavid Marchand 		uint16_t nb_pkts)
39958729b54SDavid Marchand {
40058729b54SDavid Marchand 	return rx_burst_8023ad(queue, bufs, nb_pkts, false);
40158729b54SDavid Marchand }
40258729b54SDavid Marchand 
40358729b54SDavid Marchand static uint16_t
40458729b54SDavid Marchand bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
40558729b54SDavid Marchand 		uint16_t nb_pkts)
40658729b54SDavid Marchand {
40758729b54SDavid Marchand 	return rx_burst_8023ad(queue, bufs, nb_pkts, true);
40858729b54SDavid Marchand }
40958729b54SDavid Marchand 
4103eb6bdd8SBruce Richardson #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
4113eb6bdd8SBruce Richardson uint32_t burstnumberRX;
41215e34522SLong Wu uint32_t burst_number_TX;
4133eb6bdd8SBruce Richardson 
4143eb6bdd8SBruce Richardson #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
4153eb6bdd8SBruce Richardson 
4163eb6bdd8SBruce Richardson static void
417f4206d16SBruce Richardson arp_op_name(uint16_t arp_op, char *buf, size_t buf_len)
4183eb6bdd8SBruce Richardson {
4193eb6bdd8SBruce Richardson 	switch (arp_op) {
420e482e0faSOlivier Matz 	case RTE_ARP_OP_REQUEST:
421f9acaf84SBruce Richardson 		strlcpy(buf, "ARP Request", buf_len);
4223eb6bdd8SBruce Richardson 		return;
423e482e0faSOlivier Matz 	case RTE_ARP_OP_REPLY:
424f9acaf84SBruce Richardson 		strlcpy(buf, "ARP Reply", buf_len);
4253eb6bdd8SBruce Richardson 		return;
426e482e0faSOlivier Matz 	case RTE_ARP_OP_REVREQUEST:
427f9acaf84SBruce Richardson 		strlcpy(buf, "Reverse ARP Request", buf_len);
4283eb6bdd8SBruce Richardson 		return;
429e482e0faSOlivier Matz 	case RTE_ARP_OP_REVREPLY:
430f9acaf84SBruce Richardson 		strlcpy(buf, "Reverse ARP Reply", buf_len);
4313eb6bdd8SBruce Richardson 		return;
432e482e0faSOlivier Matz 	case RTE_ARP_OP_INVREQUEST:
433f9acaf84SBruce Richardson 		strlcpy(buf, "Peer Identify Request", buf_len);
4343eb6bdd8SBruce Richardson 		return;
435e482e0faSOlivier Matz 	case RTE_ARP_OP_INVREPLY:
436f9acaf84SBruce Richardson 		strlcpy(buf, "Peer Identify Reply", buf_len);
4373eb6bdd8SBruce Richardson 		return;
4383eb6bdd8SBruce Richardson 	default:
4393eb6bdd8SBruce Richardson 		break;
4403eb6bdd8SBruce Richardson 	}
441f9acaf84SBruce Richardson 	strlcpy(buf, "Unknown", buf_len);
4423eb6bdd8SBruce Richardson 	return;
4433eb6bdd8SBruce Richardson }
4443eb6bdd8SBruce Richardson #endif
4453eb6bdd8SBruce Richardson #define MaxIPv4String	16
4463eb6bdd8SBruce Richardson static void
4473eb6bdd8SBruce Richardson ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
4483eb6bdd8SBruce Richardson {
4493eb6bdd8SBruce Richardson 	uint32_t ipv4_addr;
4503eb6bdd8SBruce Richardson 
4513eb6bdd8SBruce Richardson 	ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
4523eb6bdd8SBruce Richardson 	snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
4533eb6bdd8SBruce Richardson 		(ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
4543eb6bdd8SBruce Richardson 		ipv4_addr & 0xFF);
4553eb6bdd8SBruce Richardson }
4563eb6bdd8SBruce Richardson 
4573eb6bdd8SBruce Richardson #define MAX_CLIENTS_NUMBER	128
4583eb6bdd8SBruce Richardson uint8_t active_clients;
4593eb6bdd8SBruce Richardson struct client_stats_t {
460398fb97fSZhiyong Yang 	uint16_t port;
4613eb6bdd8SBruce Richardson 	uint32_t ipv4_addr;
4623eb6bdd8SBruce Richardson 	uint32_t ipv4_rx_packets;
4633eb6bdd8SBruce Richardson 	uint32_t ipv4_tx_packets;
4643eb6bdd8SBruce Richardson };
4653eb6bdd8SBruce Richardson struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
4663eb6bdd8SBruce Richardson 
4673eb6bdd8SBruce Richardson static void
468398fb97fSZhiyong Yang update_client_stats(uint32_t addr, uint16_t port, uint32_t *TXorRXindicator)
4693eb6bdd8SBruce Richardson {
4703eb6bdd8SBruce Richardson 	int i = 0;
4713eb6bdd8SBruce Richardson 
4723eb6bdd8SBruce Richardson 	for (; i < MAX_CLIENTS_NUMBER; i++)	{
4733eb6bdd8SBruce Richardson 		if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port))	{
4743eb6bdd8SBruce Richardson 			/* Just update RX packets number for this client */
4753eb6bdd8SBruce Richardson 			if (TXorRXindicator == &burstnumberRX)
4763eb6bdd8SBruce Richardson 				client_stats[i].ipv4_rx_packets++;
4773eb6bdd8SBruce Richardson 			else
4783eb6bdd8SBruce Richardson 				client_stats[i].ipv4_tx_packets++;
4793eb6bdd8SBruce Richardson 			return;
4803eb6bdd8SBruce Richardson 		}
4813eb6bdd8SBruce Richardson 	}
4823eb6bdd8SBruce Richardson 	/* We have a new client. Insert him to the table, and increment stats */
4833eb6bdd8SBruce Richardson 	if (TXorRXindicator == &burstnumberRX)
4843eb6bdd8SBruce Richardson 		client_stats[active_clients].ipv4_rx_packets++;
4853eb6bdd8SBruce Richardson 	else
4863eb6bdd8SBruce Richardson 		client_stats[active_clients].ipv4_tx_packets++;
4873eb6bdd8SBruce Richardson 	client_stats[active_clients].ipv4_addr = addr;
4883eb6bdd8SBruce Richardson 	client_stats[active_clients].port = port;
4893eb6bdd8SBruce Richardson 	active_clients++;
4903eb6bdd8SBruce Richardson 
4913eb6bdd8SBruce Richardson }
4923eb6bdd8SBruce Richardson 
4933eb6bdd8SBruce Richardson #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
4943eb6bdd8SBruce Richardson #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
4952b843cacSDavid Marchand 	RTE_LOG_LINE(DEBUG, BOND,				\
496c2c4f87bSAman Deep Singh 		"%s port:%d SrcMAC:" RTE_ETHER_ADDR_PRT_FMT " SrcIP:%s " \
4972b843cacSDavid Marchand 		"DstMAC:" RTE_ETHER_ADDR_PRT_FMT " DstIP:%s %s %d", \
4983eb6bdd8SBruce Richardson 		info,							\
4993eb6bdd8SBruce Richardson 		port,							\
50004d43857SDmitry Kozlyuk 		RTE_ETHER_ADDR_BYTES(&eth_h->src_addr),                  \
5013eb6bdd8SBruce Richardson 		src_ip,							\
50204d43857SDmitry Kozlyuk 		RTE_ETHER_ADDR_BYTES(&eth_h->dst_addr),                  \
5033eb6bdd8SBruce Richardson 		dst_ip,							\
504d7f4562aSStephen Hemminger 		arp_op, ++burstnumber)
5053eb6bdd8SBruce Richardson #endif
5063eb6bdd8SBruce Richardson 
5073eb6bdd8SBruce Richardson static void
508f2fc83b4SThomas Monjalon mode6_debug(const char __rte_unused *info,
5096d13ea8eSOlivier Matz 	struct rte_ether_hdr *eth_h, uint16_t port,
510f2fc83b4SThomas Monjalon 	uint32_t __rte_unused *burstnumber)
5113eb6bdd8SBruce Richardson {
512a7c528e5SOlivier Matz 	struct rte_ipv4_hdr *ipv4_h;
5133eb6bdd8SBruce Richardson #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
514f2745bfeSOlivier Matz 	struct rte_arp_hdr *arp_h;
5153eb6bdd8SBruce Richardson 	char dst_ip[16];
5163eb6bdd8SBruce Richardson 	char ArpOp[24];
5173eb6bdd8SBruce Richardson 	char buf[16];
5183eb6bdd8SBruce Richardson #endif
5193eb6bdd8SBruce Richardson 	char src_ip[16];
5203eb6bdd8SBruce Richardson 
5213eb6bdd8SBruce Richardson 	uint16_t ether_type = eth_h->ether_type;
5223eb6bdd8SBruce Richardson 	uint16_t offset = get_vlan_offset(eth_h, &ether_type);
5233eb6bdd8SBruce Richardson 
5243eb6bdd8SBruce Richardson #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
525c022cb40SBruce Richardson 	strlcpy(buf, info, 16);
5263eb6bdd8SBruce Richardson #endif
5273eb6bdd8SBruce Richardson 
5280c9da755SDavid Marchand 	if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
529a7c528e5SOlivier Matz 		ipv4_h = (struct rte_ipv4_hdr *)((char *)(eth_h + 1) + offset);
5303eb6bdd8SBruce Richardson 		ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
5313eb6bdd8SBruce Richardson #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
5323eb6bdd8SBruce Richardson 		ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
5333eb6bdd8SBruce Richardson 		MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
5343eb6bdd8SBruce Richardson #endif
5353eb6bdd8SBruce Richardson 		update_client_stats(ipv4_h->src_addr, port, burstnumber);
5363eb6bdd8SBruce Richardson 	}
5373eb6bdd8SBruce Richardson #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
53835b2d13fSOlivier Matz 	else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
539f2745bfeSOlivier Matz 		arp_h = (struct rte_arp_hdr *)((char *)(eth_h + 1) + offset);
5403eb6bdd8SBruce Richardson 		ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
5413eb6bdd8SBruce Richardson 		ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
542f2745bfeSOlivier Matz 		arp_op_name(rte_be_to_cpu_16(arp_h->arp_opcode),
543f4206d16SBruce Richardson 				ArpOp, sizeof(ArpOp));
5443eb6bdd8SBruce Richardson 		MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
5453eb6bdd8SBruce Richardson 	}
5463eb6bdd8SBruce Richardson #endif
5473eb6bdd8SBruce Richardson }
5483eb6bdd8SBruce Richardson #endif
5493eb6bdd8SBruce Richardson 
5503eb6bdd8SBruce Richardson static uint16_t
5513eb6bdd8SBruce Richardson bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
5523eb6bdd8SBruce Richardson {
55399fb0a03SDongsheng Rong 	struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
55499fb0a03SDongsheng Rong 	struct bond_dev_private *internals = bd_rx_q->dev_private;
5556d13ea8eSOlivier Matz 	struct rte_ether_hdr *eth_h;
5563eb6bdd8SBruce Richardson 	uint16_t ether_type, offset;
5573eb6bdd8SBruce Richardson 	uint16_t nb_recv_pkts;
5583eb6bdd8SBruce Richardson 	int i;
5593eb6bdd8SBruce Richardson 
5603eb6bdd8SBruce Richardson 	nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
5613eb6bdd8SBruce Richardson 
5623eb6bdd8SBruce Richardson 	for (i = 0; i < nb_recv_pkts; i++) {
5636d13ea8eSOlivier Matz 		eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
5643eb6bdd8SBruce Richardson 		ether_type = eth_h->ether_type;
5653eb6bdd8SBruce Richardson 		offset = get_vlan_offset(eth_h, &ether_type);
5663eb6bdd8SBruce Richardson 
56735b2d13fSOlivier Matz 		if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
5683eb6bdd8SBruce Richardson #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
5693eb6bdd8SBruce Richardson 			mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
5703eb6bdd8SBruce Richardson #endif
5713eb6bdd8SBruce Richardson 			bond_mode_alb_arp_recv(eth_h, offset, internals);
5723eb6bdd8SBruce Richardson 		}
5733eb6bdd8SBruce Richardson #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
5740c9da755SDavid Marchand 		else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
5753eb6bdd8SBruce Richardson 			mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
5763eb6bdd8SBruce Richardson #endif
5773eb6bdd8SBruce Richardson 	}
5783eb6bdd8SBruce Richardson 
5793eb6bdd8SBruce Richardson 	return nb_recv_pkts;
5803eb6bdd8SBruce Richardson }
5813eb6bdd8SBruce Richardson 
5823eb6bdd8SBruce Richardson static uint16_t
5833eb6bdd8SBruce Richardson bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
5843eb6bdd8SBruce Richardson 		uint16_t nb_pkts)
5853eb6bdd8SBruce Richardson {
5863eb6bdd8SBruce Richardson 	struct bond_dev_private *internals;
5873eb6bdd8SBruce Richardson 	struct bond_tx_queue *bd_tx_q;
5883eb6bdd8SBruce Richardson 
58915e34522SLong Wu 	struct rte_mbuf *member_bufs[RTE_MAX_ETHPORTS][nb_pkts];
59015e34522SLong Wu 	uint16_t member_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
5913eb6bdd8SBruce Richardson 
59215e34522SLong Wu 	uint16_t num_of_members;
59315e34522SLong Wu 	uint16_t members[RTE_MAX_ETHPORTS];
5943eb6bdd8SBruce Richardson 
59515e34522SLong Wu 	uint16_t num_tx_total = 0, num_tx_member;
5963eb6bdd8SBruce Richardson 
59715e34522SLong Wu 	static int member_idx;
59815e34522SLong Wu 	int i, cmember_idx = 0, tx_fail_total = 0;
5993eb6bdd8SBruce Richardson 
6003eb6bdd8SBruce Richardson 	bd_tx_q = (struct bond_tx_queue *)queue;
6013eb6bdd8SBruce Richardson 	internals = bd_tx_q->dev_private;
6023eb6bdd8SBruce Richardson 
60315e34522SLong Wu 	/* Copy member list to protect against member up/down changes during tx
6043eb6bdd8SBruce Richardson 	 * bursting */
60515e34522SLong Wu 	num_of_members = internals->active_member_count;
60615e34522SLong Wu 	memcpy(members, internals->active_members,
60715e34522SLong Wu 			sizeof(internals->active_members[0]) * num_of_members);
6083eb6bdd8SBruce Richardson 
60915e34522SLong Wu 	if (num_of_members < 1)
6103eb6bdd8SBruce Richardson 		return num_tx_total;
6113eb6bdd8SBruce Richardson 
61215e34522SLong Wu 	/* Populate members mbuf with which packets are to be sent on it  */
6133eb6bdd8SBruce Richardson 	for (i = 0; i < nb_pkts; i++) {
61415e34522SLong Wu 		cmember_idx = (member_idx + i) % num_of_members;
61515e34522SLong Wu 		member_bufs[cmember_idx][(member_nb_pkts[cmember_idx])++] = bufs[i];
6163eb6bdd8SBruce Richardson 	}
6173eb6bdd8SBruce Richardson 
61815e34522SLong Wu 	/*
61915e34522SLong Wu 	 * increment current member index so the next call to tx burst starts on the
62015e34522SLong Wu 	 * next member.
62115e34522SLong Wu 	 */
62215e34522SLong Wu 	member_idx = ++cmember_idx;
6233eb6bdd8SBruce Richardson 
62415e34522SLong Wu 	/* Send packet burst on each member device */
62515e34522SLong Wu 	for (i = 0; i < num_of_members; i++) {
62615e34522SLong Wu 		if (member_nb_pkts[i] > 0) {
62715e34522SLong Wu 			num_tx_member = rte_eth_tx_prepare(members[i],
62815e34522SLong Wu 					bd_tx_q->queue_id, member_bufs[i],
62915e34522SLong Wu 					member_nb_pkts[i]);
63015e34522SLong Wu 			num_tx_member = rte_eth_tx_burst(members[i], bd_tx_q->queue_id,
63115e34522SLong Wu 					member_bufs[i], num_tx_member);
6323eb6bdd8SBruce Richardson 
6333eb6bdd8SBruce Richardson 			/* if tx burst fails move packets to end of bufs */
63415e34522SLong Wu 			if (unlikely(num_tx_member < member_nb_pkts[i])) {
63515e34522SLong Wu 				int tx_fail_member = member_nb_pkts[i] - num_tx_member;
6363eb6bdd8SBruce Richardson 
63715e34522SLong Wu 				tx_fail_total += tx_fail_member;
6383eb6bdd8SBruce Richardson 
6393eb6bdd8SBruce Richardson 				memcpy(&bufs[nb_pkts - tx_fail_total],
64015e34522SLong Wu 				       &member_bufs[i][num_tx_member],
64115e34522SLong Wu 				       tx_fail_member * sizeof(bufs[0]));
6423eb6bdd8SBruce Richardson 			}
64315e34522SLong Wu 			num_tx_total += num_tx_member;
6443eb6bdd8SBruce Richardson 		}
6453eb6bdd8SBruce Richardson 	}
6463eb6bdd8SBruce Richardson 
6473eb6bdd8SBruce Richardson 	return num_tx_total;
6483eb6bdd8SBruce Richardson }
6493eb6bdd8SBruce Richardson 
6503eb6bdd8SBruce Richardson static uint16_t
6513eb6bdd8SBruce Richardson bond_ethdev_tx_burst_active_backup(void *queue,
6523eb6bdd8SBruce Richardson 		struct rte_mbuf **bufs, uint16_t nb_pkts)
6533eb6bdd8SBruce Richardson {
6543eb6bdd8SBruce Richardson 	struct bond_dev_private *internals;
6553eb6bdd8SBruce Richardson 	struct bond_tx_queue *bd_tx_q;
656c622735dSChengwen Feng 	uint16_t nb_prep_pkts;
6573eb6bdd8SBruce Richardson 
6583eb6bdd8SBruce Richardson 	bd_tx_q = (struct bond_tx_queue *)queue;
6593eb6bdd8SBruce Richardson 	internals = bd_tx_q->dev_private;
6603eb6bdd8SBruce Richardson 
66115e34522SLong Wu 	if (internals->active_member_count < 1)
6623eb6bdd8SBruce Richardson 		return 0;
6633eb6bdd8SBruce Richardson 
664c622735dSChengwen Feng 	nb_prep_pkts = rte_eth_tx_prepare(internals->current_primary_port,
665c622735dSChengwen Feng 				bd_tx_q->queue_id, bufs, nb_pkts);
666c622735dSChengwen Feng 
6673eb6bdd8SBruce Richardson 	return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
668c622735dSChengwen Feng 			bufs, nb_prep_pkts);
6693eb6bdd8SBruce Richardson }
6703eb6bdd8SBruce Richardson 
6713eb6bdd8SBruce Richardson static inline uint16_t
6726d13ea8eSOlivier Matz ether_hash(struct rte_ether_hdr *eth_hdr)
6733eb6bdd8SBruce Richardson {
6747621d6a8SCyril Chemparathy 	unaligned_uint16_t *word_src_addr =
67504d43857SDmitry Kozlyuk 		(unaligned_uint16_t *)eth_hdr->src_addr.addr_bytes;
6767621d6a8SCyril Chemparathy 	unaligned_uint16_t *word_dst_addr =
67704d43857SDmitry Kozlyuk 		(unaligned_uint16_t *)eth_hdr->dst_addr.addr_bytes;
6783eb6bdd8SBruce Richardson 
6793eb6bdd8SBruce Richardson 	return (word_src_addr[0] ^ word_dst_addr[0]) ^
6803eb6bdd8SBruce Richardson 			(word_src_addr[1] ^ word_dst_addr[1]) ^
6813eb6bdd8SBruce Richardson 			(word_src_addr[2] ^ word_dst_addr[2]);
6823eb6bdd8SBruce Richardson }
6833eb6bdd8SBruce Richardson 
6843eb6bdd8SBruce Richardson static inline uint32_t
685a7c528e5SOlivier Matz ipv4_hash(struct rte_ipv4_hdr *ipv4_hdr)
6863eb6bdd8SBruce Richardson {
687693f715dSHuawei Xie 	return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
6883eb6bdd8SBruce Richardson }
6893eb6bdd8SBruce Richardson 
6903eb6bdd8SBruce Richardson static inline uint32_t
691a7c528e5SOlivier Matz ipv6_hash(struct rte_ipv6_hdr *ipv6_hdr)
6923eb6bdd8SBruce Richardson {
69389b5642dSRobin Jarry 	unaligned_uint32_t *word_src_addr = (unaligned_uint32_t *)&ipv6_hdr->src_addr;
69489b5642dSRobin Jarry 	unaligned_uint32_t *word_dst_addr = (unaligned_uint32_t *)&ipv6_hdr->dst_addr;
6953eb6bdd8SBruce Richardson 
6963eb6bdd8SBruce Richardson 	return (word_src_addr[0] ^ word_dst_addr[0]) ^
6973eb6bdd8SBruce Richardson 			(word_src_addr[1] ^ word_dst_addr[1]) ^
6983eb6bdd8SBruce Richardson 			(word_src_addr[2] ^ word_dst_addr[2]) ^
6993eb6bdd8SBruce Richardson 			(word_src_addr[3] ^ word_dst_addr[3]);
7003eb6bdd8SBruce Richardson }
7013eb6bdd8SBruce Richardson 
70209150784SDeclan Doherty 
70309150784SDeclan Doherty void
70409150784SDeclan Doherty burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
70515e34522SLong Wu 		uint16_t member_count, uint16_t *members)
7063eb6bdd8SBruce Richardson {
7076d13ea8eSOlivier Matz 	struct rte_ether_hdr *eth_hdr;
70809150784SDeclan Doherty 	uint32_t hash;
70909150784SDeclan Doherty 	int i;
7103eb6bdd8SBruce Richardson 
71109150784SDeclan Doherty 	for (i = 0; i < nb_pkts; i++) {
7126d13ea8eSOlivier Matz 		eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
7133eb6bdd8SBruce Richardson 
7143eb6bdd8SBruce Richardson 		hash = ether_hash(eth_hdr);
7153eb6bdd8SBruce Richardson 
71615e34522SLong Wu 		members[i] = (hash ^= hash >> 8) % member_count;
71709150784SDeclan Doherty 	}
71809150784SDeclan Doherty }
71909150784SDeclan Doherty 
72009150784SDeclan Doherty void
72109150784SDeclan Doherty burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
72215e34522SLong Wu 		uint16_t member_count, uint16_t *members)
72309150784SDeclan Doherty {
72409150784SDeclan Doherty 	uint16_t i;
7256d13ea8eSOlivier Matz 	struct rte_ether_hdr *eth_hdr;
72609150784SDeclan Doherty 	uint16_t proto;
72709150784SDeclan Doherty 	size_t vlan_offset;
72809150784SDeclan Doherty 	uint32_t hash, l3hash;
72909150784SDeclan Doherty 
73009150784SDeclan Doherty 	for (i = 0; i < nb_pkts; i++) {
7316d13ea8eSOlivier Matz 		eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
73209150784SDeclan Doherty 		l3hash = 0;
73309150784SDeclan Doherty 
73409150784SDeclan Doherty 		proto = eth_hdr->ether_type;
73509150784SDeclan Doherty 		hash = ether_hash(eth_hdr);
73609150784SDeclan Doherty 
73709150784SDeclan Doherty 		vlan_offset = get_vlan_offset(eth_hdr, &proto);
73809150784SDeclan Doherty 
7390c9da755SDavid Marchand 		if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
740a7c528e5SOlivier Matz 			struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
7413eb6bdd8SBruce Richardson 					((char *)(eth_hdr + 1) + vlan_offset);
7423eb6bdd8SBruce Richardson 			l3hash = ipv4_hash(ipv4_hdr);
7433eb6bdd8SBruce Richardson 
7440c9da755SDavid Marchand 		} else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
745a7c528e5SOlivier Matz 			struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
7463eb6bdd8SBruce Richardson 					((char *)(eth_hdr + 1) + vlan_offset);
7473eb6bdd8SBruce Richardson 			l3hash = ipv6_hash(ipv6_hdr);
7483eb6bdd8SBruce Richardson 		}
7493eb6bdd8SBruce Richardson 
7503eb6bdd8SBruce Richardson 		hash = hash ^ l3hash;
7513eb6bdd8SBruce Richardson 		hash ^= hash >> 16;
7523eb6bdd8SBruce Richardson 		hash ^= hash >> 8;
7533eb6bdd8SBruce Richardson 
75415e34522SLong Wu 		members[i] = hash % member_count;
75509150784SDeclan Doherty 	}
7563eb6bdd8SBruce Richardson }
7573eb6bdd8SBruce Richardson 
75809150784SDeclan Doherty void
75909150784SDeclan Doherty burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
76015e34522SLong Wu 		uint16_t member_count, uint16_t *members)
7613eb6bdd8SBruce Richardson {
7626d13ea8eSOlivier Matz 	struct rte_ether_hdr *eth_hdr;
76309150784SDeclan Doherty 	uint16_t proto;
76409150784SDeclan Doherty 	size_t vlan_offset;
76509150784SDeclan Doherty 	int i;
7663eb6bdd8SBruce Richardson 
767e73e3547SOlivier Matz 	struct rte_udp_hdr *udp_hdr;
768f41b5156SOlivier Matz 	struct rte_tcp_hdr *tcp_hdr;
76909150784SDeclan Doherty 	uint32_t hash, l3hash, l4hash;
77009150784SDeclan Doherty 
77109150784SDeclan Doherty 	for (i = 0; i < nb_pkts; i++) {
7726d13ea8eSOlivier Matz 		eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
77372615806SRadu Nicolau 		size_t pkt_end = (size_t)eth_hdr + rte_pktmbuf_data_len(buf[i]);
77409150784SDeclan Doherty 		proto = eth_hdr->ether_type;
77509150784SDeclan Doherty 		vlan_offset = get_vlan_offset(eth_hdr, &proto);
77609150784SDeclan Doherty 		l3hash = 0;
77709150784SDeclan Doherty 		l4hash = 0;
7783eb6bdd8SBruce Richardson 
7790c9da755SDavid Marchand 		if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
780a7c528e5SOlivier Matz 			struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
7813eb6bdd8SBruce Richardson 					((char *)(eth_hdr + 1) + vlan_offset);
7823eb6bdd8SBruce Richardson 			size_t ip_hdr_offset;
7833eb6bdd8SBruce Richardson 
7843eb6bdd8SBruce Richardson 			l3hash = ipv4_hash(ipv4_hdr);
7853eb6bdd8SBruce Richardson 
7861d63c314SAndriy Berestovskyy 			/* there is no L4 header in fragmented packet */
78709150784SDeclan Doherty 			if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr)
78809150784SDeclan Doherty 								== 0)) {
78909150784SDeclan Doherty 				ip_hdr_offset = (ipv4_hdr->version_ihl
79024ac604eSOlivier Matz 					& RTE_IPV4_HDR_IHL_MASK) *
79124ac604eSOlivier Matz 					RTE_IPV4_IHL_MULTIPLIER;
7923eb6bdd8SBruce Richardson 
7933eb6bdd8SBruce Richardson 				if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
794f41b5156SOlivier Matz 					tcp_hdr = (struct rte_tcp_hdr *)
79509150784SDeclan Doherty 						((char *)ipv4_hdr +
7963eb6bdd8SBruce Richardson 							ip_hdr_offset);
79772615806SRadu Nicolau 					if ((size_t)tcp_hdr + sizeof(*tcp_hdr)
79881683288SJun Qiu 							<= pkt_end)
7993eb6bdd8SBruce Richardson 						l4hash = HASH_L4_PORTS(tcp_hdr);
80009150784SDeclan Doherty 				} else if (ipv4_hdr->next_proto_id ==
80109150784SDeclan Doherty 								IPPROTO_UDP) {
802e73e3547SOlivier Matz 					udp_hdr = (struct rte_udp_hdr *)
80309150784SDeclan Doherty 						((char *)ipv4_hdr +
8043eb6bdd8SBruce Richardson 							ip_hdr_offset);
80572615806SRadu Nicolau 					if ((size_t)udp_hdr + sizeof(*udp_hdr)
80672615806SRadu Nicolau 							< pkt_end)
8073eb6bdd8SBruce Richardson 						l4hash = HASH_L4_PORTS(udp_hdr);
8083eb6bdd8SBruce Richardson 				}
8091d63c314SAndriy Berestovskyy 			}
8100c9da755SDavid Marchand 		} else if  (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
811a7c528e5SOlivier Matz 			struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
8123eb6bdd8SBruce Richardson 					((char *)(eth_hdr + 1) + vlan_offset);
8133eb6bdd8SBruce Richardson 			l3hash = ipv6_hash(ipv6_hdr);
8143eb6bdd8SBruce Richardson 
8153eb6bdd8SBruce Richardson 			if (ipv6_hdr->proto == IPPROTO_TCP) {
816f41b5156SOlivier Matz 				tcp_hdr = (struct rte_tcp_hdr *)(ipv6_hdr + 1);
8173eb6bdd8SBruce Richardson 				l4hash = HASH_L4_PORTS(tcp_hdr);
8183eb6bdd8SBruce Richardson 			} else if (ipv6_hdr->proto == IPPROTO_UDP) {
819e73e3547SOlivier Matz 				udp_hdr = (struct rte_udp_hdr *)(ipv6_hdr + 1);
8203eb6bdd8SBruce Richardson 				l4hash = HASH_L4_PORTS(udp_hdr);
8213eb6bdd8SBruce Richardson 			}
8223eb6bdd8SBruce Richardson 		}
8233eb6bdd8SBruce Richardson 
8243eb6bdd8SBruce Richardson 		hash = l3hash ^ l4hash;
8253eb6bdd8SBruce Richardson 		hash ^= hash >> 16;
8263eb6bdd8SBruce Richardson 		hash ^= hash >> 8;
8273eb6bdd8SBruce Richardson 
82815e34522SLong Wu 		members[i] = hash % member_count;
82909150784SDeclan Doherty 	}
8303eb6bdd8SBruce Richardson }
8313eb6bdd8SBruce Richardson 
83215e34522SLong Wu struct bwg_member {
8333eb6bdd8SBruce Richardson 	uint64_t bwg_left_int;
8343eb6bdd8SBruce Richardson 	uint64_t bwg_left_remainder;
83515e34522SLong Wu 	uint16_t member;
8363eb6bdd8SBruce Richardson };
8373eb6bdd8SBruce Richardson 
8383eb6bdd8SBruce Richardson void
83915e34522SLong Wu bond_tlb_activate_member(struct bond_dev_private *internals) {
8403eb6bdd8SBruce Richardson 	int i;
8413eb6bdd8SBruce Richardson 
84215e34522SLong Wu 	for (i = 0; i < internals->active_member_count; i++)
84315e34522SLong Wu 		tlb_last_obytets[internals->active_members[i]] = 0;
8443eb6bdd8SBruce Richardson }
8453eb6bdd8SBruce Richardson 
8463eb6bdd8SBruce Richardson static int
8473eb6bdd8SBruce Richardson bandwidth_cmp(const void *a, const void *b)
8483eb6bdd8SBruce Richardson {
84915e34522SLong Wu 	const struct bwg_member *bwg_a = a;
85015e34522SLong Wu 	const struct bwg_member *bwg_b = b;
8513eb6bdd8SBruce Richardson 	int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
8523eb6bdd8SBruce Richardson 	int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
8533eb6bdd8SBruce Richardson 			(int64_t)bwg_a->bwg_left_remainder;
8543eb6bdd8SBruce Richardson 	if (diff > 0)
8553eb6bdd8SBruce Richardson 		return 1;
8563eb6bdd8SBruce Richardson 	else if (diff < 0)
8573eb6bdd8SBruce Richardson 		return -1;
8583eb6bdd8SBruce Richardson 	else if (diff2 > 0)
8593eb6bdd8SBruce Richardson 		return 1;
8603eb6bdd8SBruce Richardson 	else if (diff2 < 0)
8613eb6bdd8SBruce Richardson 		return -1;
8623eb6bdd8SBruce Richardson 	else
8633eb6bdd8SBruce Richardson 		return 0;
8643eb6bdd8SBruce Richardson }
8653eb6bdd8SBruce Richardson 
8663eb6bdd8SBruce Richardson static void
867f8244c63SZhiyong Yang bandwidth_left(uint16_t port_id, uint64_t load, uint8_t update_idx,
86815e34522SLong Wu 		struct bwg_member *bwg_member)
8693eb6bdd8SBruce Richardson {
8703eb6bdd8SBruce Richardson 	struct rte_eth_link link_status;
871fc1134c7SIgor Romanov 	int ret;
8723eb6bdd8SBruce Richardson 
873fc1134c7SIgor Romanov 	ret = rte_eth_link_get_nowait(port_id, &link_status);
874fc1134c7SIgor Romanov 	if (ret < 0) {
87515e34522SLong Wu 		RTE_BOND_LOG(ERR, "Member (port %u) link get failed: %s",
876fc1134c7SIgor Romanov 			     port_id, rte_strerror(-ret));
877fc1134c7SIgor Romanov 		return;
878fc1134c7SIgor Romanov 	}
8793eb6bdd8SBruce Richardson 	uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
8803eb6bdd8SBruce Richardson 	if (link_bwg == 0)
8813eb6bdd8SBruce Richardson 		return;
8823eb6bdd8SBruce Richardson 	link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
88315e34522SLong Wu 	bwg_member->bwg_left_int = (link_bwg - 1000 * load) / link_bwg;
88415e34522SLong Wu 	bwg_member->bwg_left_remainder = (link_bwg - 1000 * load) % link_bwg;
8853eb6bdd8SBruce Richardson }
8863eb6bdd8SBruce Richardson 
8873eb6bdd8SBruce Richardson static void
88815e34522SLong Wu bond_ethdev_update_tlb_member_cb(void *arg)
8893eb6bdd8SBruce Richardson {
8903eb6bdd8SBruce Richardson 	struct bond_dev_private *internals = arg;
89115e34522SLong Wu 	struct rte_eth_stats member_stats;
89215e34522SLong Wu 	struct bwg_member bwg_array[RTE_MAX_ETHPORTS];
89315e34522SLong Wu 	uint16_t member_count;
8943eb6bdd8SBruce Richardson 	uint64_t tx_bytes;
8953eb6bdd8SBruce Richardson 
8963eb6bdd8SBruce Richardson 	uint8_t update_stats = 0;
89715e34522SLong Wu 	uint16_t member_id;
8981d6cab8aSDavid Marchand 	uint16_t i;
8993eb6bdd8SBruce Richardson 
90015e34522SLong Wu 	internals->member_update_idx++;
9013eb6bdd8SBruce Richardson 
9023eb6bdd8SBruce Richardson 
90315e34522SLong Wu 	if (internals->member_update_idx >= REORDER_PERIOD_MS)
9043eb6bdd8SBruce Richardson 		update_stats = 1;
9053eb6bdd8SBruce Richardson 
90615e34522SLong Wu 	for (i = 0; i < internals->active_member_count; i++) {
90715e34522SLong Wu 		member_id = internals->active_members[i];
90815e34522SLong Wu 		rte_eth_stats_get(member_id, &member_stats);
90915e34522SLong Wu 		tx_bytes = member_stats.obytes - tlb_last_obytets[member_id];
91015e34522SLong Wu 		bandwidth_left(member_id, tx_bytes,
91115e34522SLong Wu 				internals->member_update_idx, &bwg_array[i]);
91215e34522SLong Wu 		bwg_array[i].member = member_id;
9133eb6bdd8SBruce Richardson 
9143eb6bdd8SBruce Richardson 		if (update_stats) {
91515e34522SLong Wu 			tlb_last_obytets[member_id] = member_stats.obytes;
9163eb6bdd8SBruce Richardson 		}
9173eb6bdd8SBruce Richardson 	}
9183eb6bdd8SBruce Richardson 
9193eb6bdd8SBruce Richardson 	if (update_stats == 1)
92015e34522SLong Wu 		internals->member_update_idx = 0;
9213eb6bdd8SBruce Richardson 
92215e34522SLong Wu 	member_count = i;
92315e34522SLong Wu 	qsort(bwg_array, member_count, sizeof(bwg_array[0]), bandwidth_cmp);
92415e34522SLong Wu 	for (i = 0; i < member_count; i++)
92515e34522SLong Wu 		internals->tlb_members_order[i] = bwg_array[i].member;
9263eb6bdd8SBruce Richardson 
92715e34522SLong Wu 	rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_member_cb,
9283eb6bdd8SBruce Richardson 			(struct bond_dev_private *)internals);
9293eb6bdd8SBruce Richardson }
9303eb6bdd8SBruce Richardson 
9313eb6bdd8SBruce Richardson static uint16_t
9323eb6bdd8SBruce Richardson bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
9333eb6bdd8SBruce Richardson {
9343eb6bdd8SBruce Richardson 	struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
9353eb6bdd8SBruce Richardson 	struct bond_dev_private *internals = bd_tx_q->dev_private;
9363eb6bdd8SBruce Richardson 
9373eb6bdd8SBruce Richardson 	struct rte_eth_dev *primary_port =
9383eb6bdd8SBruce Richardson 			&rte_eth_devices[internals->primary_port];
939c622735dSChengwen Feng 	uint16_t num_tx_total = 0, num_tx_prep;
940f8244c63SZhiyong Yang 	uint16_t i, j;
9413eb6bdd8SBruce Richardson 
94215e34522SLong Wu 	uint16_t num_of_members = internals->active_member_count;
94315e34522SLong Wu 	uint16_t members[RTE_MAX_ETHPORTS];
9443eb6bdd8SBruce Richardson 
9456d13ea8eSOlivier Matz 	struct rte_ether_hdr *ether_hdr;
94615e34522SLong Wu 	struct rte_ether_addr primary_member_addr;
94715e34522SLong Wu 	struct rte_ether_addr active_member_addr;
9483eb6bdd8SBruce Richardson 
94915e34522SLong Wu 	if (num_of_members < 1)
9503eb6bdd8SBruce Richardson 		return num_tx_total;
9513eb6bdd8SBruce Richardson 
95215e34522SLong Wu 	memcpy(members, internals->tlb_members_order,
95315e34522SLong Wu 				sizeof(internals->tlb_members_order[0]) * num_of_members);
9543eb6bdd8SBruce Richardson 
9553eb6bdd8SBruce Richardson 
95615e34522SLong Wu 	rte_ether_addr_copy(primary_port->data->mac_addrs, &primary_member_addr);
9573eb6bdd8SBruce Richardson 
9583eb6bdd8SBruce Richardson 	if (nb_pkts > 3) {
9593eb6bdd8SBruce Richardson 		for (i = 0; i < 3; i++)
9603eb6bdd8SBruce Richardson 			rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
9613eb6bdd8SBruce Richardson 	}
9623eb6bdd8SBruce Richardson 
96315e34522SLong Wu 	for (i = 0; i < num_of_members; i++) {
96415e34522SLong Wu 		rte_eth_macaddr_get(members[i], &active_member_addr);
9653eb6bdd8SBruce Richardson 		for (j = num_tx_total; j < nb_pkts; j++) {
9663eb6bdd8SBruce Richardson 			if (j + 3 < nb_pkts)
9673eb6bdd8SBruce Richardson 				rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
9683eb6bdd8SBruce Richardson 
9696d13ea8eSOlivier Matz 			ether_hdr = rte_pktmbuf_mtod(bufs[j],
9706d13ea8eSOlivier Matz 						struct rte_ether_hdr *);
97104d43857SDmitry Kozlyuk 			if (rte_is_same_ether_addr(&ether_hdr->src_addr,
97215e34522SLong Wu 							&primary_member_addr))
97315e34522SLong Wu 				rte_ether_addr_copy(&active_member_addr,
97404d43857SDmitry Kozlyuk 						&ether_hdr->src_addr);
9753eb6bdd8SBruce Richardson #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
97615e34522SLong Wu 					mode6_debug("TX IPv4:", ether_hdr, members[i],
97715e34522SLong Wu 						&burst_number_TX);
9783eb6bdd8SBruce Richardson #endif
9793eb6bdd8SBruce Richardson 		}
9803eb6bdd8SBruce Richardson 
98115e34522SLong Wu 		num_tx_prep = rte_eth_tx_prepare(members[i], bd_tx_q->queue_id,
9823eb6bdd8SBruce Richardson 				bufs + num_tx_total, nb_pkts - num_tx_total);
98315e34522SLong Wu 		num_tx_total += rte_eth_tx_burst(members[i], bd_tx_q->queue_id,
984c622735dSChengwen Feng 				bufs + num_tx_total, num_tx_prep);
9853eb6bdd8SBruce Richardson 
9863eb6bdd8SBruce Richardson 		if (num_tx_total == nb_pkts)
9873eb6bdd8SBruce Richardson 			break;
9883eb6bdd8SBruce Richardson 	}
9893eb6bdd8SBruce Richardson 
9903eb6bdd8SBruce Richardson 	return num_tx_total;
9913eb6bdd8SBruce Richardson }
9923eb6bdd8SBruce Richardson 
9933eb6bdd8SBruce Richardson void
9943eb6bdd8SBruce Richardson bond_tlb_disable(struct bond_dev_private *internals)
9953eb6bdd8SBruce Richardson {
99615e34522SLong Wu 	rte_eal_alarm_cancel(bond_ethdev_update_tlb_member_cb, internals);
9973eb6bdd8SBruce Richardson }
9983eb6bdd8SBruce Richardson 
9993eb6bdd8SBruce Richardson void
10003eb6bdd8SBruce Richardson bond_tlb_enable(struct bond_dev_private *internals)
10013eb6bdd8SBruce Richardson {
100215e34522SLong Wu 	bond_ethdev_update_tlb_member_cb(internals);
10033eb6bdd8SBruce Richardson }
10043eb6bdd8SBruce Richardson 
10053eb6bdd8SBruce Richardson static uint16_t
10063eb6bdd8SBruce Richardson bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
10073eb6bdd8SBruce Richardson {
10083eb6bdd8SBruce Richardson 	struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
10093eb6bdd8SBruce Richardson 	struct bond_dev_private *internals = bd_tx_q->dev_private;
10103eb6bdd8SBruce Richardson 
10116d13ea8eSOlivier Matz 	struct rte_ether_hdr *eth_h;
10123eb6bdd8SBruce Richardson 	uint16_t ether_type, offset;
10133eb6bdd8SBruce Richardson 
10143eb6bdd8SBruce Richardson 	struct client_data *client_info;
10153eb6bdd8SBruce Richardson 
10163eb6bdd8SBruce Richardson 	/*
101715e34522SLong Wu 	 * We create transmit buffers for every member and one additional to send
10183eb6bdd8SBruce Richardson 	 * through tlb. In worst case every packet will be send on one port.
10193eb6bdd8SBruce Richardson 	 */
102015e34522SLong Wu 	struct rte_mbuf *member_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
102115e34522SLong Wu 	uint16_t member_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
10223eb6bdd8SBruce Richardson 
10233eb6bdd8SBruce Richardson 	/*
102498a7ea33SJerin Jacob 	 * We create separate transmit buffers for update packets as they won't
102598a7ea33SJerin Jacob 	 * be counted in num_tx_total.
10263eb6bdd8SBruce Richardson 	 */
10273eb6bdd8SBruce Richardson 	struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
10283eb6bdd8SBruce Richardson 	uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
10293eb6bdd8SBruce Richardson 
10303eb6bdd8SBruce Richardson 	struct rte_mbuf *upd_pkt;
10313eb6bdd8SBruce Richardson 	size_t pkt_size;
10323eb6bdd8SBruce Richardson 
10333eb6bdd8SBruce Richardson 	uint16_t num_send, num_not_send = 0;
10343eb6bdd8SBruce Richardson 	uint16_t num_tx_total = 0;
103515e34522SLong Wu 	uint16_t member_idx;
10363eb6bdd8SBruce Richardson 
10373eb6bdd8SBruce Richardson 	int i, j;
10383eb6bdd8SBruce Richardson 
10393eb6bdd8SBruce Richardson 	/* Search tx buffer for ARP packets and forward them to alb */
10403eb6bdd8SBruce Richardson 	for (i = 0; i < nb_pkts; i++) {
10416d13ea8eSOlivier Matz 		eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
10423eb6bdd8SBruce Richardson 		ether_type = eth_h->ether_type;
10433eb6bdd8SBruce Richardson 		offset = get_vlan_offset(eth_h, &ether_type);
10443eb6bdd8SBruce Richardson 
104535b2d13fSOlivier Matz 		if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
104615e34522SLong Wu 			member_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
10473eb6bdd8SBruce Richardson 
10483eb6bdd8SBruce Richardson 			/* Change src mac in eth header */
104915e34522SLong Wu 			rte_eth_macaddr_get(member_idx, &eth_h->src_addr);
10503eb6bdd8SBruce Richardson 
105115e34522SLong Wu 			/* Add packet to member tx buffer */
105215e34522SLong Wu 			member_bufs[member_idx][member_bufs_pkts[member_idx]] = bufs[i];
105315e34522SLong Wu 			member_bufs_pkts[member_idx]++;
10543eb6bdd8SBruce Richardson 		} else {
10553eb6bdd8SBruce Richardson 			/* If packet is not ARP, send it with TLB policy */
105615e34522SLong Wu 			member_bufs[RTE_MAX_ETHPORTS][member_bufs_pkts[RTE_MAX_ETHPORTS]] =
10573eb6bdd8SBruce Richardson 					bufs[i];
105815e34522SLong Wu 			member_bufs_pkts[RTE_MAX_ETHPORTS]++;
10593eb6bdd8SBruce Richardson 		}
10603eb6bdd8SBruce Richardson 	}
10613eb6bdd8SBruce Richardson 
10623eb6bdd8SBruce Richardson 	/* Update connected client ARP tables */
10633eb6bdd8SBruce Richardson 	if (internals->mode6.ntt) {
10643eb6bdd8SBruce Richardson 		for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
10653eb6bdd8SBruce Richardson 			client_info = &internals->mode6.client_table[i];
10663eb6bdd8SBruce Richardson 
10673eb6bdd8SBruce Richardson 			if (client_info->in_use) {
106815e34522SLong Wu 				/* Allocate new packet to send ARP update on current member */
10693eb6bdd8SBruce Richardson 				upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
10703eb6bdd8SBruce Richardson 				if (upd_pkt == NULL) {
1071d7f4562aSStephen Hemminger 					RTE_BOND_LOG(ERR,
1072d7f4562aSStephen Hemminger 						     "Failed to allocate ARP packet from pool");
10733eb6bdd8SBruce Richardson 					continue;
10743eb6bdd8SBruce Richardson 				}
10756d13ea8eSOlivier Matz 				pkt_size = sizeof(struct rte_ether_hdr) +
1076f2745bfeSOlivier Matz 					sizeof(struct rte_arp_hdr) +
1077f2745bfeSOlivier Matz 					client_info->vlan_count *
10786d13ea8eSOlivier Matz 					sizeof(struct rte_vlan_hdr);
10793eb6bdd8SBruce Richardson 				upd_pkt->data_len = pkt_size;
10803eb6bdd8SBruce Richardson 				upd_pkt->pkt_len = pkt_size;
10813eb6bdd8SBruce Richardson 
108215e34522SLong Wu 				member_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
10833eb6bdd8SBruce Richardson 						internals);
10843eb6bdd8SBruce Richardson 
10853eb6bdd8SBruce Richardson 				/* Add packet to update tx buffer */
108615e34522SLong Wu 				update_bufs[member_idx][update_bufs_pkts[member_idx]] = upd_pkt;
108715e34522SLong Wu 				update_bufs_pkts[member_idx]++;
10883eb6bdd8SBruce Richardson 			}
10893eb6bdd8SBruce Richardson 		}
10903eb6bdd8SBruce Richardson 		internals->mode6.ntt = 0;
10913eb6bdd8SBruce Richardson 	}
10923eb6bdd8SBruce Richardson 
109315e34522SLong Wu 	/* Send ARP packets on proper members */
10943eb6bdd8SBruce Richardson 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
109515e34522SLong Wu 		if (member_bufs_pkts[i] > 0) {
1096c622735dSChengwen Feng 			num_send = rte_eth_tx_prepare(i, bd_tx_q->queue_id,
109715e34522SLong Wu 					member_bufs[i], member_bufs_pkts[i]);
1098c622735dSChengwen Feng 			num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
109915e34522SLong Wu 					member_bufs[i], num_send);
110015e34522SLong Wu 			for (j = 0; j < member_bufs_pkts[i] - num_send; j++) {
11013eb6bdd8SBruce Richardson 				bufs[nb_pkts - 1 - num_not_send - j] =
110215e34522SLong Wu 						member_bufs[i][nb_pkts - 1 - j];
11033eb6bdd8SBruce Richardson 			}
11043eb6bdd8SBruce Richardson 
11053eb6bdd8SBruce Richardson 			num_tx_total += num_send;
110615e34522SLong Wu 			num_not_send += member_bufs_pkts[i] - num_send;
11073eb6bdd8SBruce Richardson 
11083eb6bdd8SBruce Richardson #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
11093eb6bdd8SBruce Richardson 	/* Print TX stats including update packets */
111015e34522SLong Wu 			for (j = 0; j < member_bufs_pkts[i]; j++) {
111115e34522SLong Wu 				eth_h = rte_pktmbuf_mtod(member_bufs[i][j],
11126d13ea8eSOlivier Matz 							struct rte_ether_hdr *);
111315e34522SLong Wu 				mode6_debug("TX ARP:", eth_h, i, &burst_number_TX);
11143eb6bdd8SBruce Richardson 			}
11153eb6bdd8SBruce Richardson #endif
11163eb6bdd8SBruce Richardson 		}
11173eb6bdd8SBruce Richardson 	}
11183eb6bdd8SBruce Richardson 
111915e34522SLong Wu 	/* Send update packets on proper members */
11203eb6bdd8SBruce Richardson 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
11213eb6bdd8SBruce Richardson 		if (update_bufs_pkts[i] > 0) {
1122c622735dSChengwen Feng 			num_send = rte_eth_tx_prepare(i, bd_tx_q->queue_id,
1123c622735dSChengwen Feng 					update_bufs[i], update_bufs_pkts[i]);
11243eb6bdd8SBruce Richardson 			num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
1125c622735dSChengwen Feng 					num_send);
11263eb6bdd8SBruce Richardson 			for (j = num_send; j < update_bufs_pkts[i]; j++) {
11273eb6bdd8SBruce Richardson 				rte_pktmbuf_free(update_bufs[i][j]);
11283eb6bdd8SBruce Richardson 			}
11293eb6bdd8SBruce Richardson #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
11303eb6bdd8SBruce Richardson 			for (j = 0; j < update_bufs_pkts[i]; j++) {
11316d13ea8eSOlivier Matz 				eth_h = rte_pktmbuf_mtod(update_bufs[i][j],
11326d13ea8eSOlivier Matz 							struct rte_ether_hdr *);
113315e34522SLong Wu 				mode6_debug("TX ARPupd:", eth_h, i, &burst_number_TX);
11343eb6bdd8SBruce Richardson 			}
11353eb6bdd8SBruce Richardson #endif
11363eb6bdd8SBruce Richardson 		}
11373eb6bdd8SBruce Richardson 	}
11383eb6bdd8SBruce Richardson 
11393eb6bdd8SBruce Richardson 	/* Send non-ARP packets using tlb policy */
114015e34522SLong Wu 	if (member_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
11413eb6bdd8SBruce Richardson 		num_send = bond_ethdev_tx_burst_tlb(queue,
114215e34522SLong Wu 				member_bufs[RTE_MAX_ETHPORTS],
114315e34522SLong Wu 				member_bufs_pkts[RTE_MAX_ETHPORTS]);
11443eb6bdd8SBruce Richardson 
114515e34522SLong Wu 		for (j = 0; j < member_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
11463eb6bdd8SBruce Richardson 			bufs[nb_pkts - 1 - num_not_send - j] =
114715e34522SLong Wu 					member_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
11483eb6bdd8SBruce Richardson 		}
11493eb6bdd8SBruce Richardson 
11503eb6bdd8SBruce Richardson 		num_tx_total += num_send;
11513eb6bdd8SBruce Richardson 	}
11523eb6bdd8SBruce Richardson 
11533eb6bdd8SBruce Richardson 	return num_tx_total;
11543eb6bdd8SBruce Richardson }
11553eb6bdd8SBruce Richardson 
11568ff0003cSDavid Marchand static inline uint16_t
11578ff0003cSDavid Marchand tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
115815e34522SLong Wu 		 uint16_t *member_port_ids, uint16_t member_count)
11593eb6bdd8SBruce Richardson {
116009150784SDeclan Doherty 	struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
116109150784SDeclan Doherty 	struct bond_dev_private *internals = bd_tx_q->dev_private;
11623eb6bdd8SBruce Richardson 
116315e34522SLong Wu 	/* Array to sort mbufs for transmission on each member into */
116415e34522SLong Wu 	struct rte_mbuf *member_bufs[RTE_MAX_ETHPORTS][nb_bufs];
116515e34522SLong Wu 	/* Number of mbufs for transmission on each member */
116615e34522SLong Wu 	uint16_t member_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
116715e34522SLong Wu 	/* Mapping array generated by hash function to map mbufs to members */
116815e34522SLong Wu 	uint16_t bufs_member_port_idxs[nb_bufs];
11693eb6bdd8SBruce Richardson 
117015e34522SLong Wu 	uint16_t member_tx_count;
117109150784SDeclan Doherty 	uint16_t total_tx_count = 0, total_tx_fail_count = 0;
11723eb6bdd8SBruce Richardson 
11736b2a47deSJia Yu 	uint16_t i;
11743eb6bdd8SBruce Richardson 
117509150784SDeclan Doherty 	/*
117615e34522SLong Wu 	 * Populate members mbuf with the packets which are to be sent on it
117715e34522SLong Wu 	 * selecting output member using hash based on xmit policy
117809150784SDeclan Doherty 	 */
117915e34522SLong Wu 	internals->burst_xmit_hash(bufs, nb_bufs, member_count,
118015e34522SLong Wu 			bufs_member_port_idxs);
11813eb6bdd8SBruce Richardson 
118209150784SDeclan Doherty 	for (i = 0; i < nb_bufs; i++) {
118315e34522SLong Wu 		/* Populate member mbuf arrays with mbufs for that member. */
118415e34522SLong Wu 		uint16_t member_idx = bufs_member_port_idxs[i];
118509150784SDeclan Doherty 
118615e34522SLong Wu 		member_bufs[member_idx][member_nb_bufs[member_idx]++] = bufs[i];
11873eb6bdd8SBruce Richardson 	}
11883eb6bdd8SBruce Richardson 
118915e34522SLong Wu 	/* Send packet burst on each member device */
119015e34522SLong Wu 	for (i = 0; i < member_count; i++) {
119115e34522SLong Wu 		if (member_nb_bufs[i] == 0)
119209150784SDeclan Doherty 			continue;
11933eb6bdd8SBruce Richardson 
119415e34522SLong Wu 		member_tx_count = rte_eth_tx_prepare(member_port_ids[i],
119515e34522SLong Wu 				bd_tx_q->queue_id, member_bufs[i],
119615e34522SLong Wu 				member_nb_bufs[i]);
119715e34522SLong Wu 		member_tx_count = rte_eth_tx_burst(member_port_ids[i],
119815e34522SLong Wu 				bd_tx_q->queue_id, member_bufs[i],
119915e34522SLong Wu 				member_tx_count);
12003eb6bdd8SBruce Richardson 
120115e34522SLong Wu 		total_tx_count += member_tx_count;
120209150784SDeclan Doherty 
120309150784SDeclan Doherty 		/* If tx burst fails move packets to end of bufs */
120415e34522SLong Wu 		if (unlikely(member_tx_count < member_nb_bufs[i])) {
120515e34522SLong Wu 			int member_tx_fail_count = member_nb_bufs[i] -
120615e34522SLong Wu 					member_tx_count;
120715e34522SLong Wu 			total_tx_fail_count += member_tx_fail_count;
12086b2a47deSJia Yu 			memcpy(&bufs[nb_bufs - total_tx_fail_count],
120915e34522SLong Wu 			       &member_bufs[i][member_tx_count],
121015e34522SLong Wu 			       member_tx_fail_count * sizeof(bufs[0]));
121109150784SDeclan Doherty 		}
121209150784SDeclan Doherty 	}
121309150784SDeclan Doherty 
121409150784SDeclan Doherty 	return total_tx_count;
12153eb6bdd8SBruce Richardson }
12163eb6bdd8SBruce Richardson 
12173eb6bdd8SBruce Richardson static uint16_t
12188ff0003cSDavid Marchand bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
121909150784SDeclan Doherty 		uint16_t nb_bufs)
12203eb6bdd8SBruce Richardson {
122109150784SDeclan Doherty 	struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
122209150784SDeclan Doherty 	struct bond_dev_private *internals = bd_tx_q->dev_private;
12233eb6bdd8SBruce Richardson 
122415e34522SLong Wu 	uint16_t member_port_ids[RTE_MAX_ETHPORTS];
122515e34522SLong Wu 	uint16_t member_count;
12263eb6bdd8SBruce Richardson 
12278ff0003cSDavid Marchand 	if (unlikely(nb_bufs == 0))
12288ff0003cSDavid Marchand 		return 0;
12298ff0003cSDavid Marchand 
123015e34522SLong Wu 	/* Copy member list to protect against member up/down changes during tx
12318ff0003cSDavid Marchand 	 * bursting
12328ff0003cSDavid Marchand 	 */
123315e34522SLong Wu 	member_count = internals->active_member_count;
123415e34522SLong Wu 	if (unlikely(member_count < 1))
12358ff0003cSDavid Marchand 		return 0;
12368ff0003cSDavid Marchand 
123715e34522SLong Wu 	memcpy(member_port_ids, internals->active_members,
123815e34522SLong Wu 			sizeof(member_port_ids[0]) * member_count);
123915e34522SLong Wu 	return tx_burst_balance(queue, bufs, nb_bufs, member_port_ids,
124015e34522SLong Wu 				member_count);
12418ff0003cSDavid Marchand }
12428ff0003cSDavid Marchand 
12438ff0003cSDavid Marchand static inline uint16_t
12448ff0003cSDavid Marchand tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
12458ff0003cSDavid Marchand 		bool dedicated_txq)
12468ff0003cSDavid Marchand {
12478ff0003cSDavid Marchand 	struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
12488ff0003cSDavid Marchand 	struct bond_dev_private *internals = bd_tx_q->dev_private;
12498ff0003cSDavid Marchand 
125015e34522SLong Wu 	uint16_t member_port_ids[RTE_MAX_ETHPORTS];
125115e34522SLong Wu 	uint16_t member_count;
12528ff0003cSDavid Marchand 
125315e34522SLong Wu 	uint16_t dist_member_port_ids[RTE_MAX_ETHPORTS];
125415e34522SLong Wu 	uint16_t dist_member_count;
12553eb6bdd8SBruce Richardson 
125615e34522SLong Wu 	uint16_t member_tx_count;
12573eb6bdd8SBruce Richardson 
12586b2a47deSJia Yu 	uint16_t i;
125909150784SDeclan Doherty 
126015e34522SLong Wu 	/* Copy member list to protect against member up/down changes during tx
12613eb6bdd8SBruce Richardson 	 * bursting */
126215e34522SLong Wu 	member_count = internals->active_member_count;
126315e34522SLong Wu 	if (unlikely(member_count < 1))
126409150784SDeclan Doherty 		return 0;
12653eb6bdd8SBruce Richardson 
126615e34522SLong Wu 	memcpy(member_port_ids, internals->active_members,
126715e34522SLong Wu 			sizeof(member_port_ids[0]) * member_count);
12683eb6bdd8SBruce Richardson 
12698ff0003cSDavid Marchand 	if (dedicated_txq)
12708ff0003cSDavid Marchand 		goto skip_tx_ring;
12718ff0003cSDavid Marchand 
1272af91947cSChas Williams 	/* Check for LACP control packets and send if available */
127315e34522SLong Wu 	for (i = 0; i < member_count; i++) {
127415e34522SLong Wu 		struct port *port = &bond_mode_8023ad_ports[member_port_ids[i]];
1275af91947cSChas Williams 		struct rte_mbuf *ctrl_pkt = NULL;
1276af91947cSChas Williams 
1277af91947cSChas Williams 		if (likely(rte_ring_empty(port->tx_ring)))
1278af91947cSChas Williams 			continue;
1279af91947cSChas Williams 
1280af91947cSChas Williams 		if (rte_ring_dequeue(port->tx_ring,
1281af91947cSChas Williams 				     (void **)&ctrl_pkt) != -ENOENT) {
128215e34522SLong Wu 			member_tx_count = rte_eth_tx_prepare(member_port_ids[i],
1283af91947cSChas Williams 					bd_tx_q->queue_id, &ctrl_pkt, 1);
128415e34522SLong Wu 			member_tx_count = rte_eth_tx_burst(member_port_ids[i],
128515e34522SLong Wu 					bd_tx_q->queue_id, &ctrl_pkt, member_tx_count);
1286af91947cSChas Williams 			/*
1287af91947cSChas Williams 			 * re-enqueue LAG control plane packets to buffering
1288af91947cSChas Williams 			 * ring if transmission fails so the packet isn't lost.
1289af91947cSChas Williams 			 */
129015e34522SLong Wu 			if (member_tx_count != 1)
1291af91947cSChas Williams 				rte_ring_enqueue(port->tx_ring,	ctrl_pkt);
1292af91947cSChas Williams 		}
1293af91947cSChas Williams 	}
1294af91947cSChas Williams 
12958ff0003cSDavid Marchand skip_tx_ring:
1296af91947cSChas Williams 	if (unlikely(nb_bufs == 0))
1297af91947cSChas Williams 		return 0;
1298af91947cSChas Williams 
129915e34522SLong Wu 	dist_member_count = 0;
130015e34522SLong Wu 	for (i = 0; i < member_count; i++) {
130115e34522SLong Wu 		struct port *port = &bond_mode_8023ad_ports[member_port_ids[i]];
13023eb6bdd8SBruce Richardson 
13033eb6bdd8SBruce Richardson 		if (ACTOR_STATE(port, DISTRIBUTING))
130415e34522SLong Wu 			dist_member_port_ids[dist_member_count++] =
130515e34522SLong Wu 					member_port_ids[i];
13063eb6bdd8SBruce Richardson 	}
13073eb6bdd8SBruce Richardson 
130815e34522SLong Wu 	if (unlikely(dist_member_count < 1))
13098ff0003cSDavid Marchand 		return 0;
13103eb6bdd8SBruce Richardson 
131115e34522SLong Wu 	return tx_burst_balance(queue, bufs, nb_bufs, dist_member_port_ids,
131215e34522SLong Wu 				dist_member_count);
13133eb6bdd8SBruce Richardson }
131409150784SDeclan Doherty 
13158ff0003cSDavid Marchand static uint16_t
13168ff0003cSDavid Marchand bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
13178ff0003cSDavid Marchand 		uint16_t nb_bufs)
13188ff0003cSDavid Marchand {
13198ff0003cSDavid Marchand 	return tx_burst_8023ad(queue, bufs, nb_bufs, false);
13203eb6bdd8SBruce Richardson }
13213eb6bdd8SBruce Richardson 
13228ff0003cSDavid Marchand static uint16_t
13238ff0003cSDavid Marchand bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
13248ff0003cSDavid Marchand 		uint16_t nb_bufs)
13258ff0003cSDavid Marchand {
13268ff0003cSDavid Marchand 	return tx_burst_8023ad(queue, bufs, nb_bufs, true);
13273eb6bdd8SBruce Richardson }
13283eb6bdd8SBruce Richardson 
13293eb6bdd8SBruce Richardson static uint16_t
13303eb6bdd8SBruce Richardson bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
13313eb6bdd8SBruce Richardson 		uint16_t nb_pkts)
13323eb6bdd8SBruce Richardson {
13333eb6bdd8SBruce Richardson 	struct bond_dev_private *internals;
13343eb6bdd8SBruce Richardson 	struct bond_tx_queue *bd_tx_q;
13353eb6bdd8SBruce Richardson 
133615e34522SLong Wu 	uint16_t members[RTE_MAX_ETHPORTS];
13371d6cab8aSDavid Marchand 	uint8_t tx_failed_flag = 0;
133815e34522SLong Wu 	uint16_t num_of_members;
13393eb6bdd8SBruce Richardson 
13403eb6bdd8SBruce Richardson 	uint16_t max_nb_of_tx_pkts = 0;
13413eb6bdd8SBruce Richardson 
134215e34522SLong Wu 	int member_tx_total[RTE_MAX_ETHPORTS];
134315e34522SLong Wu 	int i, most_successful_tx_member = -1;
13443eb6bdd8SBruce Richardson 
13453eb6bdd8SBruce Richardson 	bd_tx_q = (struct bond_tx_queue *)queue;
13463eb6bdd8SBruce Richardson 	internals = bd_tx_q->dev_private;
13473eb6bdd8SBruce Richardson 
134815e34522SLong Wu 	/* Copy member list to protect against member up/down changes during tx
13493eb6bdd8SBruce Richardson 	 * bursting */
135015e34522SLong Wu 	num_of_members = internals->active_member_count;
135115e34522SLong Wu 	memcpy(members, internals->active_members,
135215e34522SLong Wu 			sizeof(internals->active_members[0]) * num_of_members);
13533eb6bdd8SBruce Richardson 
135415e34522SLong Wu 	if (num_of_members < 1)
13553eb6bdd8SBruce Richardson 		return 0;
13563eb6bdd8SBruce Richardson 
1357c622735dSChengwen Feng 	/* It is rare that bond different PMDs together, so just call tx-prepare once */
135815e34522SLong Wu 	nb_pkts = rte_eth_tx_prepare(members[0], bd_tx_q->queue_id, bufs, nb_pkts);
1359c622735dSChengwen Feng 
13603eb6bdd8SBruce Richardson 	/* Increment reference count on mbufs */
13613eb6bdd8SBruce Richardson 	for (i = 0; i < nb_pkts; i++)
136215e34522SLong Wu 		rte_pktmbuf_refcnt_update(bufs[i], num_of_members - 1);
13633eb6bdd8SBruce Richardson 
136415e34522SLong Wu 	/* Transmit burst on each active member */
136515e34522SLong Wu 	for (i = 0; i < num_of_members; i++) {
136615e34522SLong Wu 		member_tx_total[i] = rte_eth_tx_burst(members[i], bd_tx_q->queue_id,
13673eb6bdd8SBruce Richardson 					bufs, nb_pkts);
13683eb6bdd8SBruce Richardson 
136915e34522SLong Wu 		if (unlikely(member_tx_total[i] < nb_pkts))
13703eb6bdd8SBruce Richardson 			tx_failed_flag = 1;
13713eb6bdd8SBruce Richardson 
137215e34522SLong Wu 		/* record the value and member index for the member which transmits the
13733eb6bdd8SBruce Richardson 		 * maximum number of packets */
137415e34522SLong Wu 		if (member_tx_total[i] > max_nb_of_tx_pkts) {
137515e34522SLong Wu 			max_nb_of_tx_pkts = member_tx_total[i];
137615e34522SLong Wu 			most_successful_tx_member = i;
13773eb6bdd8SBruce Richardson 		}
13783eb6bdd8SBruce Richardson 	}
13793eb6bdd8SBruce Richardson 
138015e34522SLong Wu 	/* if members fail to transmit packets from burst, the calling application
13813eb6bdd8SBruce Richardson 	 * is not expected to know about multiple references to packets so we must
138215e34522SLong Wu 	 * handle failures of all packets except those of the most successful member
13833eb6bdd8SBruce Richardson 	 */
13843eb6bdd8SBruce Richardson 	if (unlikely(tx_failed_flag))
138515e34522SLong Wu 		for (i = 0; i < num_of_members; i++)
138615e34522SLong Wu 			if (i != most_successful_tx_member)
138715e34522SLong Wu 				while (member_tx_total[i] < nb_pkts)
138815e34522SLong Wu 					rte_pktmbuf_free(bufs[member_tx_total[i]++]);
13893eb6bdd8SBruce Richardson 
13903eb6bdd8SBruce Richardson 	return max_nb_of_tx_pkts;
13913eb6bdd8SBruce Richardson }
13923eb6bdd8SBruce Richardson 
1393d7bce005SChas Williams static void
139415e34522SLong Wu link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *member_link)
13953eb6bdd8SBruce Richardson {
1396deba8a2fSTomasz Kulasek 	struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
13973eb6bdd8SBruce Richardson 
1398deba8a2fSTomasz Kulasek 	if (bond_ctx->mode == BONDING_MODE_8023AD) {
1399deba8a2fSTomasz Kulasek 		/**
1400deba8a2fSTomasz Kulasek 		 * If in mode 4 then save the link properties of the first
140115e34522SLong Wu 		 * member, all subsequent members must match these properties
1402deba8a2fSTomasz Kulasek 		 */
140315e34522SLong Wu 		struct rte_eth_link *bond_link = &bond_ctx->mode4.member_link;
14043eb6bdd8SBruce Richardson 
140515e34522SLong Wu 		bond_link->link_autoneg = member_link->link_autoneg;
140615e34522SLong Wu 		bond_link->link_duplex = member_link->link_duplex;
140715e34522SLong Wu 		bond_link->link_speed = member_link->link_speed;
1408deba8a2fSTomasz Kulasek 	} else {
1409deba8a2fSTomasz Kulasek 		/**
1410deba8a2fSTomasz Kulasek 		 * In any other mode the link properties are set to default
1411deba8a2fSTomasz Kulasek 		 * values of AUTONEG/DUPLEX
1412deba8a2fSTomasz Kulasek 		 */
1413295968d1SFerruh Yigit 		ethdev->data->dev_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
1414295968d1SFerruh Yigit 		ethdev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
14153eb6bdd8SBruce Richardson 	}
14163eb6bdd8SBruce Richardson }
14173eb6bdd8SBruce Richardson 
1418d7bce005SChas Williams static int
1419deba8a2fSTomasz Kulasek link_properties_valid(struct rte_eth_dev *ethdev,
142015e34522SLong Wu 		struct rte_eth_link *member_link)
14213eb6bdd8SBruce Richardson {
1422deba8a2fSTomasz Kulasek 	struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
1423deba8a2fSTomasz Kulasek 
1424deba8a2fSTomasz Kulasek 	if (bond_ctx->mode == BONDING_MODE_8023AD) {
142515e34522SLong Wu 		struct rte_eth_link *bond_link = &bond_ctx->mode4.member_link;
1426deba8a2fSTomasz Kulasek 
142715e34522SLong Wu 		if (bond_link->link_duplex != member_link->link_duplex ||
142815e34522SLong Wu 			bond_link->link_autoneg != member_link->link_autoneg ||
142915e34522SLong Wu 			bond_link->link_speed != member_link->link_speed)
14303eb6bdd8SBruce Richardson 			return -1;
1431deba8a2fSTomasz Kulasek 	}
14323eb6bdd8SBruce Richardson 
14333eb6bdd8SBruce Richardson 	return 0;
14343eb6bdd8SBruce Richardson }
14353eb6bdd8SBruce Richardson 
14363eb6bdd8SBruce Richardson int
14376d13ea8eSOlivier Matz mac_address_get(struct rte_eth_dev *eth_dev,
14386d13ea8eSOlivier Matz 		struct rte_ether_addr *dst_mac_addr)
14393eb6bdd8SBruce Richardson {
14406d13ea8eSOlivier Matz 	struct rte_ether_addr *mac_addr;
14413eb6bdd8SBruce Richardson 
14423eb6bdd8SBruce Richardson 	if (eth_dev == NULL) {
1443d7f4562aSStephen Hemminger 		RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
14443eb6bdd8SBruce Richardson 		return -1;
14453eb6bdd8SBruce Richardson 	}
14463eb6bdd8SBruce Richardson 
14473eb6bdd8SBruce Richardson 	if (dst_mac_addr == NULL) {
1448d7f4562aSStephen Hemminger 		RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
14493eb6bdd8SBruce Richardson 		return -1;
14503eb6bdd8SBruce Richardson 	}
14513eb6bdd8SBruce Richardson 
14523eb6bdd8SBruce Richardson 	mac_addr = eth_dev->data->mac_addrs;
14533eb6bdd8SBruce Richardson 
1454538da7a1SOlivier Matz 	rte_ether_addr_copy(mac_addr, dst_mac_addr);
14553eb6bdd8SBruce Richardson 	return 0;
14563eb6bdd8SBruce Richardson }
14573eb6bdd8SBruce Richardson 
14583eb6bdd8SBruce Richardson int
14596d13ea8eSOlivier Matz mac_address_set(struct rte_eth_dev *eth_dev,
14606d13ea8eSOlivier Matz 		struct rte_ether_addr *new_mac_addr)
14613eb6bdd8SBruce Richardson {
14626d13ea8eSOlivier Matz 	struct rte_ether_addr *mac_addr;
14633eb6bdd8SBruce Richardson 
14643eb6bdd8SBruce Richardson 	if (eth_dev == NULL) {
14653eb6bdd8SBruce Richardson 		RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
14663eb6bdd8SBruce Richardson 		return -1;
14673eb6bdd8SBruce Richardson 	}
14683eb6bdd8SBruce Richardson 
14693eb6bdd8SBruce Richardson 	if (new_mac_addr == NULL) {
14703eb6bdd8SBruce Richardson 		RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
14713eb6bdd8SBruce Richardson 		return -1;
14723eb6bdd8SBruce Richardson 	}
14733eb6bdd8SBruce Richardson 
14743eb6bdd8SBruce Richardson 	mac_addr = eth_dev->data->mac_addrs;
14753eb6bdd8SBruce Richardson 
14763eb6bdd8SBruce Richardson 	/* If new MAC is different to current MAC then update */
14773eb6bdd8SBruce Richardson 	if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
14783eb6bdd8SBruce Richardson 		memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
14793eb6bdd8SBruce Richardson 
14803eb6bdd8SBruce Richardson 	return 0;
14813eb6bdd8SBruce Richardson }
14823eb6bdd8SBruce Richardson 
14836d13ea8eSOlivier Matz static const struct rte_ether_addr null_mac_addr;
14849d453d1dSAlex Kiselev 
14859d453d1dSAlex Kiselev /*
148615e34522SLong Wu  * Add additional MAC addresses to the member
14879d453d1dSAlex Kiselev  */
14889d453d1dSAlex Kiselev int
14894f840086SLong Wu member_add_mac_addresses(struct rte_eth_dev *bonding_eth_dev,
149015e34522SLong Wu 		uint16_t member_port_id)
14919d453d1dSAlex Kiselev {
14929d453d1dSAlex Kiselev 	int i, ret;
14936d13ea8eSOlivier Matz 	struct rte_ether_addr *mac_addr;
14949d453d1dSAlex Kiselev 
14959d453d1dSAlex Kiselev 	for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
14964f840086SLong Wu 		mac_addr = &bonding_eth_dev->data->mac_addrs[i];
1497538da7a1SOlivier Matz 		if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
14989d453d1dSAlex Kiselev 			break;
14999d453d1dSAlex Kiselev 
150015e34522SLong Wu 		ret = rte_eth_dev_mac_addr_add(member_port_id, mac_addr, 0);
15019d453d1dSAlex Kiselev 		if (ret < 0) {
15029d453d1dSAlex Kiselev 			/* rollback */
15039d453d1dSAlex Kiselev 			for (i--; i > 0; i--)
150415e34522SLong Wu 				rte_eth_dev_mac_addr_remove(member_port_id,
15054f840086SLong Wu 					&bonding_eth_dev->data->mac_addrs[i]);
15069d453d1dSAlex Kiselev 			return ret;
15079d453d1dSAlex Kiselev 		}
15089d453d1dSAlex Kiselev 	}
15099d453d1dSAlex Kiselev 
15109d453d1dSAlex Kiselev 	return 0;
15119d453d1dSAlex Kiselev }
15129d453d1dSAlex Kiselev 
15139d453d1dSAlex Kiselev /*
151415e34522SLong Wu  * Remove additional MAC addresses from the member
15159d453d1dSAlex Kiselev  */
15169d453d1dSAlex Kiselev int
15174f840086SLong Wu member_remove_mac_addresses(struct rte_eth_dev *bonding_eth_dev,
151815e34522SLong Wu 		uint16_t member_port_id)
15199d453d1dSAlex Kiselev {
15209d453d1dSAlex Kiselev 	int i, rc, ret;
15216d13ea8eSOlivier Matz 	struct rte_ether_addr *mac_addr;
15229d453d1dSAlex Kiselev 
15239d453d1dSAlex Kiselev 	rc = 0;
15249d453d1dSAlex Kiselev 	for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
15254f840086SLong Wu 		mac_addr = &bonding_eth_dev->data->mac_addrs[i];
1526538da7a1SOlivier Matz 		if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
15279d453d1dSAlex Kiselev 			break;
15289d453d1dSAlex Kiselev 
152915e34522SLong Wu 		ret = rte_eth_dev_mac_addr_remove(member_port_id, mac_addr);
15309d453d1dSAlex Kiselev 		/* save only the first error */
15319d453d1dSAlex Kiselev 		if (ret < 0 && rc == 0)
15329d453d1dSAlex Kiselev 			rc = ret;
15339d453d1dSAlex Kiselev 	}
15349d453d1dSAlex Kiselev 
15359d453d1dSAlex Kiselev 	return rc;
15369d453d1dSAlex Kiselev }
15379d453d1dSAlex Kiselev 
15383eb6bdd8SBruce Richardson int
15394f840086SLong Wu mac_address_members_update(struct rte_eth_dev *bonding_eth_dev)
15403eb6bdd8SBruce Richardson {
15414f840086SLong Wu 	struct bond_dev_private *internals = bonding_eth_dev->data->dev_private;
15422d944002SWei Hu (Xavier) 	bool set;
15433eb6bdd8SBruce Richardson 	int i;
15443eb6bdd8SBruce Richardson 
154515e34522SLong Wu 	/* Update member devices MAC addresses */
154615e34522SLong Wu 	if (internals->member_count < 1)
15473eb6bdd8SBruce Richardson 		return -1;
15483eb6bdd8SBruce Richardson 
15493eb6bdd8SBruce Richardson 	switch (internals->mode) {
15503eb6bdd8SBruce Richardson 	case BONDING_MODE_ROUND_ROBIN:
15513eb6bdd8SBruce Richardson 	case BONDING_MODE_BALANCE:
15523eb6bdd8SBruce Richardson 	case BONDING_MODE_BROADCAST:
155315e34522SLong Wu 		for (i = 0; i < internals->member_count; i++) {
1554aa7791baSChas Williams 			if (rte_eth_dev_default_mac_addr_set(
155515e34522SLong Wu 					internals->members[i].port_id,
15564f840086SLong Wu 					bonding_eth_dev->data->mac_addrs)) {
15573eb6bdd8SBruce Richardson 				RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
155815e34522SLong Wu 						internals->members[i].port_id);
15593eb6bdd8SBruce Richardson 				return -1;
15603eb6bdd8SBruce Richardson 			}
15613eb6bdd8SBruce Richardson 		}
15623eb6bdd8SBruce Richardson 		break;
15633eb6bdd8SBruce Richardson 	case BONDING_MODE_8023AD:
15644f840086SLong Wu 		bond_mode_8023ad_mac_address_update(bonding_eth_dev);
15653eb6bdd8SBruce Richardson 		break;
15663eb6bdd8SBruce Richardson 	case BONDING_MODE_ACTIVE_BACKUP:
15673eb6bdd8SBruce Richardson 	case BONDING_MODE_TLB:
15683eb6bdd8SBruce Richardson 	case BONDING_MODE_ALB:
15693eb6bdd8SBruce Richardson 	default:
15702d944002SWei Hu (Xavier) 		set = true;
157115e34522SLong Wu 		for (i = 0; i < internals->member_count; i++) {
157215e34522SLong Wu 			if (internals->members[i].port_id ==
15733eb6bdd8SBruce Richardson 					internals->current_primary_port) {
1574aa7791baSChas Williams 				if (rte_eth_dev_default_mac_addr_set(
1575edf6489eSWei Hu (Xavier) 						internals->current_primary_port,
15764f840086SLong Wu 						bonding_eth_dev->data->mac_addrs)) {
15773eb6bdd8SBruce Richardson 					RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
15783eb6bdd8SBruce Richardson 							internals->current_primary_port);
15792d944002SWei Hu (Xavier) 					set = false;
15803eb6bdd8SBruce Richardson 				}
15813eb6bdd8SBruce Richardson 			} else {
1582aa7791baSChas Williams 				if (rte_eth_dev_default_mac_addr_set(
158315e34522SLong Wu 						internals->members[i].port_id,
158415e34522SLong Wu 						&internals->members[i].persisted_mac_addr)) {
15853eb6bdd8SBruce Richardson 					RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
158615e34522SLong Wu 							internals->members[i].port_id);
15872d944002SWei Hu (Xavier) 				}
15882d944002SWei Hu (Xavier) 			}
15892d944002SWei Hu (Xavier) 		}
15902d944002SWei Hu (Xavier) 		if (!set)
15913eb6bdd8SBruce Richardson 			return -1;
15923eb6bdd8SBruce Richardson 	}
15933eb6bdd8SBruce Richardson 
15943eb6bdd8SBruce Richardson 	return 0;
15953eb6bdd8SBruce Richardson }
15963eb6bdd8SBruce Richardson 
15973eb6bdd8SBruce Richardson int
1598cc5097b1SYunjian Wang bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, uint8_t mode)
15993eb6bdd8SBruce Richardson {
16003eb6bdd8SBruce Richardson 	struct bond_dev_private *internals;
16013eb6bdd8SBruce Richardson 
16023eb6bdd8SBruce Richardson 	internals = eth_dev->data->dev_private;
16033eb6bdd8SBruce Richardson 
16043eb6bdd8SBruce Richardson 	switch (mode) {
16053eb6bdd8SBruce Richardson 	case BONDING_MODE_ROUND_ROBIN:
16063eb6bdd8SBruce Richardson 		eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
16073eb6bdd8SBruce Richardson 		eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
16083eb6bdd8SBruce Richardson 		break;
16093eb6bdd8SBruce Richardson 	case BONDING_MODE_ACTIVE_BACKUP:
16103eb6bdd8SBruce Richardson 		eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
16113eb6bdd8SBruce Richardson 		eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
16123eb6bdd8SBruce Richardson 		break;
16133eb6bdd8SBruce Richardson 	case BONDING_MODE_BALANCE:
16143eb6bdd8SBruce Richardson 		eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
16153eb6bdd8SBruce Richardson 		eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
16163eb6bdd8SBruce Richardson 		break;
16173eb6bdd8SBruce Richardson 	case BONDING_MODE_BROADCAST:
16183eb6bdd8SBruce Richardson 		eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
16193eb6bdd8SBruce Richardson 		eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
16203eb6bdd8SBruce Richardson 		break;
16213eb6bdd8SBruce Richardson 	case BONDING_MODE_8023AD:
16223eb6bdd8SBruce Richardson 		if (bond_mode_8023ad_enable(eth_dev) != 0)
16233eb6bdd8SBruce Richardson 			return -1;
16243eb6bdd8SBruce Richardson 
1625112891cdSTomasz Kulasek 		if (internals->mode4.dedicated_queues.enabled == 0) {
16263eb6bdd8SBruce Richardson 			eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
16273eb6bdd8SBruce Richardson 			eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1628d7f4562aSStephen Hemminger 			RTE_BOND_LOG(WARNING,
1629112891cdSTomasz Kulasek 				"Using mode 4, it is necessary to do TX burst "
1630d7f4562aSStephen Hemminger 				"and RX burst at least every 100ms.");
1631112891cdSTomasz Kulasek 		} else {
1632112891cdSTomasz Kulasek 			/* Use flow director's optimization */
1633112891cdSTomasz Kulasek 			eth_dev->rx_pkt_burst =
1634112891cdSTomasz Kulasek 					bond_ethdev_rx_burst_8023ad_fast_queue;
1635112891cdSTomasz Kulasek 			eth_dev->tx_pkt_burst =
1636112891cdSTomasz Kulasek 					bond_ethdev_tx_burst_8023ad_fast_queue;
1637112891cdSTomasz Kulasek 		}
16383eb6bdd8SBruce Richardson 		break;
16393eb6bdd8SBruce Richardson 	case BONDING_MODE_TLB:
16403eb6bdd8SBruce Richardson 		eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
16413eb6bdd8SBruce Richardson 		eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
16423eb6bdd8SBruce Richardson 		break;
16433eb6bdd8SBruce Richardson 	case BONDING_MODE_ALB:
16443eb6bdd8SBruce Richardson 		if (bond_mode_alb_enable(eth_dev) != 0)
16453eb6bdd8SBruce Richardson 			return -1;
16463eb6bdd8SBruce Richardson 
16473eb6bdd8SBruce Richardson 		eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
16483eb6bdd8SBruce Richardson 		eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
16493eb6bdd8SBruce Richardson 		break;
16503eb6bdd8SBruce Richardson 	default:
16513eb6bdd8SBruce Richardson 		return -1;
16523eb6bdd8SBruce Richardson 	}
16533eb6bdd8SBruce Richardson 
16543eb6bdd8SBruce Richardson 	internals->mode = mode;
16553eb6bdd8SBruce Richardson 
16563eb6bdd8SBruce Richardson 	return 0;
16573eb6bdd8SBruce Richardson }
16583eb6bdd8SBruce Richardson 
1659112891cdSTomasz Kulasek 
1660112891cdSTomasz Kulasek static int
16614f840086SLong Wu member_configure_slow_queue(struct rte_eth_dev *bonding_eth_dev,
166215e34522SLong Wu 		struct rte_eth_dev *member_eth_dev)
1663112891cdSTomasz Kulasek {
1664112891cdSTomasz Kulasek 	int errval = 0;
16654f840086SLong Wu 	struct bond_dev_private *internals = bonding_eth_dev->data->dev_private;
166615e34522SLong Wu 	struct port *port = &bond_mode_8023ad_ports[member_eth_dev->data->port_id];
1667112891cdSTomasz Kulasek 
1668112891cdSTomasz Kulasek 	if (port->slow_pool == NULL) {
1669112891cdSTomasz Kulasek 		char mem_name[256];
167015e34522SLong Wu 		int member_id = member_eth_dev->data->port_id;
1671112891cdSTomasz Kulasek 
167215e34522SLong Wu 		snprintf(mem_name, RTE_DIM(mem_name), "member_port%u_slow_pool",
167315e34522SLong Wu 				member_id);
1674112891cdSTomasz Kulasek 		port->slow_pool = rte_pktmbuf_pool_create(mem_name, 8191,
1675112891cdSTomasz Kulasek 			250, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
167615e34522SLong Wu 			member_eth_dev->data->numa_node);
1677112891cdSTomasz Kulasek 
1678112891cdSTomasz Kulasek 		/* Any memory allocation failure in initialization is critical because
1679112891cdSTomasz Kulasek 		 * resources can't be free, so reinitialization is impossible. */
1680112891cdSTomasz Kulasek 		if (port->slow_pool == NULL) {
168115e34522SLong Wu 			rte_panic("Member %u: Failed to create memory pool '%s': %s\n",
168215e34522SLong Wu 				member_id, mem_name, rte_strerror(rte_errno));
1683112891cdSTomasz Kulasek 		}
1684112891cdSTomasz Kulasek 	}
1685112891cdSTomasz Kulasek 
1686112891cdSTomasz Kulasek 	if (internals->mode4.dedicated_queues.enabled == 1) {
1687*4da0705bSLong Wu 		struct rte_eth_dev_info member_info = {};
1688*4da0705bSLong Wu 		uint16_t nb_rx_desc = SLOW_RX_QUEUE_HW_DEFAULT_SIZE;
1689*4da0705bSLong Wu 		uint16_t nb_tx_desc = SLOW_TX_QUEUE_HW_DEFAULT_SIZE;
1690112891cdSTomasz Kulasek 
1691*4da0705bSLong Wu 		errval = rte_eth_dev_info_get(member_eth_dev->data->port_id,
1692*4da0705bSLong Wu 				&member_info);
1693*4da0705bSLong Wu 		if (errval != 0) {
1694*4da0705bSLong Wu 			RTE_BOND_LOG(ERR,
1695*4da0705bSLong Wu 					"rte_eth_dev_info_get: port=%d, err (%d)",
1696*4da0705bSLong Wu 					member_eth_dev->data->port_id,
1697*4da0705bSLong Wu 					errval);
1698*4da0705bSLong Wu 			return errval;
1699*4da0705bSLong Wu 		}
1700*4da0705bSLong Wu 
1701*4da0705bSLong Wu 		if (member_info.rx_desc_lim.nb_min != 0)
1702*4da0705bSLong Wu 			nb_rx_desc = member_info.rx_desc_lim.nb_min;
1703*4da0705bSLong Wu 
1704*4da0705bSLong Wu 		/* Configure slow Rx queue */
170515e34522SLong Wu 		errval = rte_eth_rx_queue_setup(member_eth_dev->data->port_id,
1706*4da0705bSLong Wu 				internals->mode4.dedicated_queues.rx_qid, nb_rx_desc,
170715e34522SLong Wu 				rte_eth_dev_socket_id(member_eth_dev->data->port_id),
1708112891cdSTomasz Kulasek 				NULL, port->slow_pool);
1709112891cdSTomasz Kulasek 		if (errval != 0) {
1710112891cdSTomasz Kulasek 			RTE_BOND_LOG(ERR,
1711112891cdSTomasz Kulasek 					"rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
171215e34522SLong Wu 					member_eth_dev->data->port_id,
1713112891cdSTomasz Kulasek 					internals->mode4.dedicated_queues.rx_qid,
1714112891cdSTomasz Kulasek 					errval);
1715112891cdSTomasz Kulasek 			return errval;
1716112891cdSTomasz Kulasek 		}
1717112891cdSTomasz Kulasek 
1718*4da0705bSLong Wu 		if (member_info.tx_desc_lim.nb_min != 0)
1719*4da0705bSLong Wu 			nb_tx_desc = member_info.tx_desc_lim.nb_min;
1720*4da0705bSLong Wu 
172115e34522SLong Wu 		errval = rte_eth_tx_queue_setup(member_eth_dev->data->port_id,
1722*4da0705bSLong Wu 				internals->mode4.dedicated_queues.tx_qid, nb_tx_desc,
172315e34522SLong Wu 				rte_eth_dev_socket_id(member_eth_dev->data->port_id),
1724112891cdSTomasz Kulasek 				NULL);
1725112891cdSTomasz Kulasek 		if (errval != 0) {
1726112891cdSTomasz Kulasek 			RTE_BOND_LOG(ERR,
1727112891cdSTomasz Kulasek 				"rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
172815e34522SLong Wu 				member_eth_dev->data->port_id,
1729112891cdSTomasz Kulasek 				internals->mode4.dedicated_queues.tx_qid,
1730112891cdSTomasz Kulasek 				errval);
1731112891cdSTomasz Kulasek 			return errval;
1732112891cdSTomasz Kulasek 		}
1733112891cdSTomasz Kulasek 	}
1734112891cdSTomasz Kulasek 	return 0;
1735112891cdSTomasz Kulasek }
1736112891cdSTomasz Kulasek 
17373eb6bdd8SBruce Richardson int
17384f840086SLong Wu member_configure(struct rte_eth_dev *bonding_eth_dev,
173915e34522SLong Wu 		struct rte_eth_dev *member_eth_dev)
17403eb6bdd8SBruce Richardson {
1741112891cdSTomasz Kulasek 	uint16_t nb_rx_queues;
1742112891cdSTomasz Kulasek 	uint16_t nb_tx_queues;
17433eb6bdd8SBruce Richardson 
17443eb6bdd8SBruce Richardson 	int errval;
1745112891cdSTomasz Kulasek 
17464f840086SLong Wu 	struct bond_dev_private *internals = bonding_eth_dev->data->dev_private;
17473eb6bdd8SBruce Richardson 
174815e34522SLong Wu 	/* Stop member */
174915e34522SLong Wu 	errval = rte_eth_dev_stop(member_eth_dev->data->port_id);
1750fb0379bcSIvan Ilchenko 	if (errval != 0)
1751fb0379bcSIvan Ilchenko 		RTE_BOND_LOG(ERR, "rte_eth_dev_stop: port %u, err (%d)",
175215e34522SLong Wu 			     member_eth_dev->data->port_id, errval);
17533eb6bdd8SBruce Richardson 
175415e34522SLong Wu 	/* Enable interrupts on member device if supported */
175515e34522SLong Wu 	if (member_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
175615e34522SLong Wu 		member_eth_dev->data->dev_conf.intr_conf.lsc = 1;
17573eb6bdd8SBruce Richardson 
175815e34522SLong Wu 	/* If RSS is enabled for bonding, try to enable it for members  */
17594f840086SLong Wu 	if (bonding_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
17604f840086SLong Wu 		/* rss_key won't be empty if RSS is configured in bonding dev */
176115e34522SLong Wu 		member_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
176283cf204aSIgor Romanov 					internals->rss_key_len;
176315e34522SLong Wu 		member_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
176483cf204aSIgor Romanov 					internals->rss_key;
1765734ce47fSTomasz Kulasek 
176615e34522SLong Wu 		member_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
17674f840086SLong Wu 				bonding_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
176815e34522SLong Wu 		member_eth_dev->data->dev_conf.rxmode.mq_mode =
17694f840086SLong Wu 				bonding_eth_dev->data->dev_conf.rxmode.mq_mode;
17709e0fb72cSHuisong Li 	} else {
177115e34522SLong Wu 		member_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = 0;
177215e34522SLong Wu 		member_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
177315e34522SLong Wu 		member_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
177415e34522SLong Wu 		member_eth_dev->data->dev_conf.rxmode.mq_mode =
17754f840086SLong Wu 				bonding_eth_dev->data->dev_conf.rxmode.mq_mode;
1776734ce47fSTomasz Kulasek 	}
1777734ce47fSTomasz Kulasek 
177815e34522SLong Wu 	member_eth_dev->data->dev_conf.rxmode.mtu =
17794f840086SLong Wu 			bonding_eth_dev->data->dev_conf.rxmode.mtu;
178015e34522SLong Wu 	member_eth_dev->data->dev_conf.link_speeds =
17814f840086SLong Wu 			bonding_eth_dev->data->dev_conf.link_speeds;
17829e6ec4eaSKiran KN 
178315e34522SLong Wu 	member_eth_dev->data->dev_conf.txmode.offloads =
17844f840086SLong Wu 			bonding_eth_dev->data->dev_conf.txmode.offloads;
178557b15654SChengchang Tang 
178615e34522SLong Wu 	member_eth_dev->data->dev_conf.rxmode.offloads =
17874f840086SLong Wu 			bonding_eth_dev->data->dev_conf.rxmode.offloads;
178857b15654SChengchang Tang 
17894f840086SLong Wu 	nb_rx_queues = bonding_eth_dev->data->nb_rx_queues;
17904f840086SLong Wu 	nb_tx_queues = bonding_eth_dev->data->nb_tx_queues;
1791112891cdSTomasz Kulasek 
1792112891cdSTomasz Kulasek 	if (internals->mode == BONDING_MODE_8023AD) {
1793112891cdSTomasz Kulasek 		if (internals->mode4.dedicated_queues.enabled == 1) {
1794112891cdSTomasz Kulasek 			nb_rx_queues++;
1795112891cdSTomasz Kulasek 			nb_tx_queues++;
1796112891cdSTomasz Kulasek 		}
1797112891cdSTomasz Kulasek 	}
1798112891cdSTomasz Kulasek 
17993eb6bdd8SBruce Richardson 	/* Configure device */
180015e34522SLong Wu 	errval = rte_eth_dev_configure(member_eth_dev->data->port_id,
1801112891cdSTomasz Kulasek 			nb_rx_queues, nb_tx_queues,
180215e34522SLong Wu 			&member_eth_dev->data->dev_conf);
18033eb6bdd8SBruce Richardson 	if (errval != 0) {
180415e34522SLong Wu 		RTE_BOND_LOG(ERR, "Cannot configure member device: port %u, err (%d)",
180515e34522SLong Wu 				member_eth_dev->data->port_id, errval);
18063eb6bdd8SBruce Richardson 		return errval;
18073eb6bdd8SBruce Richardson 	}
18083eb6bdd8SBruce Richardson 
180915e34522SLong Wu 	errval = rte_eth_dev_set_mtu(member_eth_dev->data->port_id,
18104f840086SLong Wu 				     bonding_eth_dev->data->mtu);
181120a53b19SFerruh Yigit 	if (errval != 0 && errval != -ENOTSUP) {
181220a53b19SFerruh Yigit 		RTE_BOND_LOG(ERR, "rte_eth_dev_set_mtu: port %u, err (%d)",
181315e34522SLong Wu 				member_eth_dev->data->port_id, errval);
181420a53b19SFerruh Yigit 		return errval;
181520a53b19SFerruh Yigit 	}
1816b3eaaf1dSJunjie Wan 	return 0;
1817b3eaaf1dSJunjie Wan }
1818b3eaaf1dSJunjie Wan 
1819b3eaaf1dSJunjie Wan int
18204f840086SLong Wu member_start(struct rte_eth_dev *bonding_eth_dev,
182115e34522SLong Wu 		struct rte_eth_dev *member_eth_dev)
1822b3eaaf1dSJunjie Wan {
1823b3eaaf1dSJunjie Wan 	int errval = 0;
1824b3eaaf1dSJunjie Wan 	struct bond_rx_queue *bd_rx_q;
1825b3eaaf1dSJunjie Wan 	struct bond_tx_queue *bd_tx_q;
1826b3eaaf1dSJunjie Wan 	uint16_t q_id;
1827b3eaaf1dSJunjie Wan 	struct rte_flow_error flow_error;
18284f840086SLong Wu 	struct bond_dev_private *internals = bonding_eth_dev->data->dev_private;
182915e34522SLong Wu 	uint16_t member_port_id = member_eth_dev->data->port_id;
183020a53b19SFerruh Yigit 
18313eb6bdd8SBruce Richardson 	/* Setup Rx Queues */
18324f840086SLong Wu 	for (q_id = 0; q_id < bonding_eth_dev->data->nb_rx_queues; q_id++) {
18334f840086SLong Wu 		bd_rx_q = (struct bond_rx_queue *)bonding_eth_dev->data->rx_queues[q_id];
18343eb6bdd8SBruce Richardson 
183515e34522SLong Wu 		errval = rte_eth_rx_queue_setup(member_port_id, q_id,
18363eb6bdd8SBruce Richardson 				bd_rx_q->nb_rx_desc,
183715e34522SLong Wu 				rte_eth_dev_socket_id(member_port_id),
18383eb6bdd8SBruce Richardson 				&(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
18393eb6bdd8SBruce Richardson 		if (errval != 0) {
18403eb6bdd8SBruce Richardson 			RTE_BOND_LOG(ERR,
18413eb6bdd8SBruce Richardson 					"rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
184215e34522SLong Wu 					member_port_id, q_id, errval);
18433eb6bdd8SBruce Richardson 			return errval;
18443eb6bdd8SBruce Richardson 		}
18453eb6bdd8SBruce Richardson 	}
18463eb6bdd8SBruce Richardson 
18473eb6bdd8SBruce Richardson 	/* Setup Tx Queues */
18484f840086SLong Wu 	for (q_id = 0; q_id < bonding_eth_dev->data->nb_tx_queues; q_id++) {
18494f840086SLong Wu 		bd_tx_q = (struct bond_tx_queue *)bonding_eth_dev->data->tx_queues[q_id];
18503eb6bdd8SBruce Richardson 
185115e34522SLong Wu 		errval = rte_eth_tx_queue_setup(member_port_id, q_id,
18523eb6bdd8SBruce Richardson 				bd_tx_q->nb_tx_desc,
185315e34522SLong Wu 				rte_eth_dev_socket_id(member_port_id),
18543eb6bdd8SBruce Richardson 				&bd_tx_q->tx_conf);
18553eb6bdd8SBruce Richardson 		if (errval != 0) {
18563eb6bdd8SBruce Richardson 			RTE_BOND_LOG(ERR,
18573eb6bdd8SBruce Richardson 				"rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
185815e34522SLong Wu 				member_port_id, q_id, errval);
18593eb6bdd8SBruce Richardson 			return errval;
18603eb6bdd8SBruce Richardson 		}
18613eb6bdd8SBruce Richardson 	}
18623eb6bdd8SBruce Richardson 
1863112891cdSTomasz Kulasek 	if (internals->mode == BONDING_MODE_8023AD &&
1864112891cdSTomasz Kulasek 			internals->mode4.dedicated_queues.enabled == 1) {
18654f840086SLong Wu 		if (member_configure_slow_queue(bonding_eth_dev, member_eth_dev)
1866112891cdSTomasz Kulasek 				!= 0)
1867112891cdSTomasz Kulasek 			return errval;
1868112891cdSTomasz Kulasek 
18694f840086SLong Wu 		errval = bond_ethdev_8023ad_flow_verify(bonding_eth_dev,
187015e34522SLong Wu 				member_port_id);
1871cb8dc97fSMartin Havlik 		if (errval != 0) {
1872112891cdSTomasz Kulasek 			RTE_BOND_LOG(ERR,
1873cb8dc97fSMartin Havlik 				"bond_ethdev_8023ad_flow_verify: port=%d, err (%d)",
187415e34522SLong Wu 				member_port_id, errval);
1875cb8dc97fSMartin Havlik 			return errval;
1876112891cdSTomasz Kulasek 		}
1877112891cdSTomasz Kulasek 
187815e34522SLong Wu 		if (internals->mode4.dedicated_queues.flow[member_port_id] != NULL) {
187915e34522SLong Wu 			errval = rte_flow_destroy(member_port_id,
188015e34522SLong Wu 					internals->mode4.dedicated_queues.flow[member_port_id],
1881112891cdSTomasz Kulasek 					&flow_error);
1882b3eaaf1dSJunjie Wan 			RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_destroy: port=%d, err (%d)",
188315e34522SLong Wu 				member_port_id, errval);
1884b3eaaf1dSJunjie Wan 		}
1885112891cdSTomasz Kulasek 	}
1886112891cdSTomasz Kulasek 
18873eb6bdd8SBruce Richardson 	/* Start device */
188815e34522SLong Wu 	errval = rte_eth_dev_start(member_port_id);
18893eb6bdd8SBruce Richardson 	if (errval != 0) {
18903eb6bdd8SBruce Richardson 		RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
189115e34522SLong Wu 				member_port_id, errval);
18923eb6bdd8SBruce Richardson 		return -1;
18933eb6bdd8SBruce Richardson 	}
18943eb6bdd8SBruce Richardson 
1895f6632371SUsman Tanveer 	if (internals->mode == BONDING_MODE_8023AD &&
1896f6632371SUsman Tanveer 			internals->mode4.dedicated_queues.enabled == 1) {
18974f840086SLong Wu 		errval = bond_ethdev_8023ad_flow_set(bonding_eth_dev,
189815e34522SLong Wu 				member_port_id);
1899f6632371SUsman Tanveer 		if (errval != 0) {
1900f6632371SUsman Tanveer 			RTE_BOND_LOG(ERR,
1901f6632371SUsman Tanveer 				"bond_ethdev_8023ad_flow_set: port=%d, err (%d)",
190215e34522SLong Wu 				member_port_id, errval);
1903f6632371SUsman Tanveer 			return errval;
1904f6632371SUsman Tanveer 		}
1905f6632371SUsman Tanveer 	}
1906f6632371SUsman Tanveer 
1907734ce47fSTomasz Kulasek 	/* If RSS is enabled for bonding, synchronize RETA */
19084f840086SLong Wu 	if (bonding_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
1909734ce47fSTomasz Kulasek 		int i;
1910734ce47fSTomasz Kulasek 		struct bond_dev_private *internals;
1911734ce47fSTomasz Kulasek 
19124f840086SLong Wu 		internals = bonding_eth_dev->data->dev_private;
1913734ce47fSTomasz Kulasek 
191415e34522SLong Wu 		for (i = 0; i < internals->member_count; i++) {
191515e34522SLong Wu 			if (internals->members[i].port_id == member_port_id) {
1916734ce47fSTomasz Kulasek 				errval = rte_eth_dev_rss_reta_update(
191715e34522SLong Wu 						member_port_id,
1918734ce47fSTomasz Kulasek 						&internals->reta_conf[0],
191915e34522SLong Wu 						internals->members[i].reta_size);
1920734ce47fSTomasz Kulasek 				if (errval != 0) {
1921d7f4562aSStephen Hemminger 					RTE_BOND_LOG(WARNING,
192215e34522SLong Wu 						     "rte_eth_dev_rss_reta_update on member port %d fails (err %d)."
1923d7f4562aSStephen Hemminger 						     " RSS Configuration for bonding may be inconsistent.",
192415e34522SLong Wu 						     member_port_id, errval);
1925734ce47fSTomasz Kulasek 				}
1926734ce47fSTomasz Kulasek 				break;
1927734ce47fSTomasz Kulasek 			}
1928734ce47fSTomasz Kulasek 		}
1929734ce47fSTomasz Kulasek 	}
1930734ce47fSTomasz Kulasek 
193115e34522SLong Wu 	/* If lsc interrupt is set, check initial member's link status */
193215e34522SLong Wu 	if (member_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
193315e34522SLong Wu 		member_eth_dev->dev_ops->link_update(member_eth_dev, 0);
193415e34522SLong Wu 		bond_ethdev_lsc_event_callback(member_port_id,
19354f840086SLong Wu 			RTE_ETH_EVENT_INTR_LSC, &bonding_eth_dev->data->port_id,
1936d6af1a13SBernard Iremonger 			NULL);
193721090380SWei Wang 	}
1938414b2023STomasz Kulasek 
19393eb6bdd8SBruce Richardson 	return 0;
19403eb6bdd8SBruce Richardson }
19413eb6bdd8SBruce Richardson 
19423eb6bdd8SBruce Richardson void
194315e34522SLong Wu member_remove(struct bond_dev_private *internals,
194415e34522SLong Wu 		struct rte_eth_dev *member_eth_dev)
19453eb6bdd8SBruce Richardson {
19461d6cab8aSDavid Marchand 	uint16_t i;
19473eb6bdd8SBruce Richardson 
194815e34522SLong Wu 	for (i = 0; i < internals->member_count; i++)
194915e34522SLong Wu 		if (internals->members[i].port_id ==
195015e34522SLong Wu 				member_eth_dev->data->port_id)
19513eb6bdd8SBruce Richardson 			break;
19523eb6bdd8SBruce Richardson 
195315e34522SLong Wu 	if (i < (internals->member_count - 1)) {
195449dad902SMatan Azrad 		struct rte_flow *flow;
195549dad902SMatan Azrad 
195615e34522SLong Wu 		memmove(&internals->members[i], &internals->members[i + 1],
195715e34522SLong Wu 				sizeof(internals->members[0]) *
195815e34522SLong Wu 				(internals->member_count - i - 1));
195949dad902SMatan Azrad 		TAILQ_FOREACH(flow, &internals->flow_list, next) {
196049dad902SMatan Azrad 			memmove(&flow->flows[i], &flow->flows[i + 1],
196149dad902SMatan Azrad 				sizeof(flow->flows[0]) *
196215e34522SLong Wu 				(internals->member_count - i - 1));
196315e34522SLong Wu 			flow->flows[internals->member_count - 1] = NULL;
196449dad902SMatan Azrad 		}
196549dad902SMatan Azrad 	}
19663eb6bdd8SBruce Richardson 
196715e34522SLong Wu 	internals->member_count--;
1968601319aeSJan Blunck 
196915e34522SLong Wu 	/* force reconfiguration of member interfaces */
197015e34522SLong Wu 	rte_eth_dev_internal_reset(member_eth_dev);
19713eb6bdd8SBruce Richardson }
19723eb6bdd8SBruce Richardson 
19733eb6bdd8SBruce Richardson static void
197415e34522SLong Wu bond_ethdev_member_link_status_change_monitor(void *cb_arg);
19753eb6bdd8SBruce Richardson 
19763eb6bdd8SBruce Richardson void
197715e34522SLong Wu member_add(struct bond_dev_private *internals,
197815e34522SLong Wu 		struct rte_eth_dev *member_eth_dev)
19793eb6bdd8SBruce Richardson {
198015e34522SLong Wu 	struct bond_member_details *member_details =
198115e34522SLong Wu 			&internals->members[internals->member_count];
19823eb6bdd8SBruce Richardson 
198315e34522SLong Wu 	member_details->port_id = member_eth_dev->data->port_id;
198415e34522SLong Wu 	member_details->last_link_status = 0;
19853eb6bdd8SBruce Richardson 
198615e34522SLong Wu 	/* Mark member devices that don't support interrupts so we can
198744bf37a7SNelson Escobar 	 * compensate when we start the bond
198844bf37a7SNelson Escobar 	 */
198915e34522SLong Wu 	if (!(member_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))
199015e34522SLong Wu 		member_details->link_status_poll_enabled = 1;
19913eb6bdd8SBruce Richardson 
199215e34522SLong Wu 	member_details->link_status_wait_to_complete = 0;
19933eb6bdd8SBruce Richardson 	/* clean tlb_last_obytes when adding port for bonding device */
199415e34522SLong Wu 	memcpy(&member_details->persisted_mac_addr, member_eth_dev->data->mac_addrs,
19956d13ea8eSOlivier Matz 			sizeof(struct rte_ether_addr));
19963eb6bdd8SBruce Richardson }
19973eb6bdd8SBruce Richardson 
19983eb6bdd8SBruce Richardson void
19993eb6bdd8SBruce Richardson bond_ethdev_primary_set(struct bond_dev_private *internals,
200015e34522SLong Wu 		uint16_t member_port_id)
20013eb6bdd8SBruce Richardson {
20023eb6bdd8SBruce Richardson 	int i;
20033eb6bdd8SBruce Richardson 
200415e34522SLong Wu 	if (internals->active_member_count < 1)
200515e34522SLong Wu 		internals->current_primary_port = member_port_id;
20063eb6bdd8SBruce Richardson 	else
20074f840086SLong Wu 		/* Search bonding device member ports for new proposed primary port */
200815e34522SLong Wu 		for (i = 0; i < internals->active_member_count; i++) {
200915e34522SLong Wu 			if (internals->active_members[i] == member_port_id)
201015e34522SLong Wu 				internals->current_primary_port = member_port_id;
20113eb6bdd8SBruce Richardson 		}
20123eb6bdd8SBruce Richardson }
20133eb6bdd8SBruce Richardson 
20149039c812SAndrew Rybchenko static int
20153eb6bdd8SBruce Richardson bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
20163eb6bdd8SBruce Richardson 
20173eb6bdd8SBruce Richardson static int
20183eb6bdd8SBruce Richardson bond_ethdev_start(struct rte_eth_dev *eth_dev)
20193eb6bdd8SBruce Richardson {
20203eb6bdd8SBruce Richardson 	struct bond_dev_private *internals;
20213eb6bdd8SBruce Richardson 	int i;
20223eb6bdd8SBruce Richardson 
20234f840086SLong Wu 	/* member eth dev will be started by bonding device */
20244f840086SLong Wu 	if (check_for_bonding_ethdev(eth_dev)) {
202515e34522SLong Wu 		RTE_BOND_LOG(ERR, "User tried to explicitly start a member eth_dev (%d)",
20263eb6bdd8SBruce Richardson 				eth_dev->data->port_id);
20273eb6bdd8SBruce Richardson 		return -1;
20283eb6bdd8SBruce Richardson 	}
20293eb6bdd8SBruce Richardson 
2030295968d1SFerruh Yigit 	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
20313eb6bdd8SBruce Richardson 	eth_dev->data->dev_started = 1;
20323eb6bdd8SBruce Richardson 
20333eb6bdd8SBruce Richardson 	internals = eth_dev->data->dev_private;
20343eb6bdd8SBruce Richardson 
203515e34522SLong Wu 	if (internals->member_count == 0) {
203615e34522SLong Wu 		RTE_BOND_LOG(ERR, "Cannot start port since there are no member devices");
203785d3c09aSChas Williams 		goto out_err;
20383eb6bdd8SBruce Richardson 	}
20393eb6bdd8SBruce Richardson 
20403eb6bdd8SBruce Richardson 	if (internals->user_defined_mac == 0) {
20416d13ea8eSOlivier Matz 		struct rte_ether_addr *new_mac_addr = NULL;
20423eb6bdd8SBruce Richardson 
204315e34522SLong Wu 		for (i = 0; i < internals->member_count; i++)
204415e34522SLong Wu 			if (internals->members[i].port_id == internals->primary_port)
204515e34522SLong Wu 				new_mac_addr = &internals->members[i].persisted_mac_addr;
20463eb6bdd8SBruce Richardson 
20473eb6bdd8SBruce Richardson 		if (new_mac_addr == NULL)
204885d3c09aSChas Williams 			goto out_err;
20493eb6bdd8SBruce Richardson 
20503eb6bdd8SBruce Richardson 		if (mac_address_set(eth_dev, new_mac_addr) != 0) {
20514f840086SLong Wu 			RTE_BOND_LOG(ERR, "bonding port (%d) failed to update MAC address",
20523eb6bdd8SBruce Richardson 					eth_dev->data->port_id);
205385d3c09aSChas Williams 			goto out_err;
20543eb6bdd8SBruce Richardson 		}
20553eb6bdd8SBruce Richardson 	}
20563eb6bdd8SBruce Richardson 
2057112891cdSTomasz Kulasek 	if (internals->mode == BONDING_MODE_8023AD) {
2058112891cdSTomasz Kulasek 		if (internals->mode4.dedicated_queues.enabled == 1) {
2059112891cdSTomasz Kulasek 			internals->mode4.dedicated_queues.rx_qid =
2060112891cdSTomasz Kulasek 					eth_dev->data->nb_rx_queues;
2061112891cdSTomasz Kulasek 			internals->mode4.dedicated_queues.tx_qid =
2062112891cdSTomasz Kulasek 					eth_dev->data->nb_tx_queues;
2063112891cdSTomasz Kulasek 		}
2064112891cdSTomasz Kulasek 	}
2065112891cdSTomasz Kulasek 
2066112891cdSTomasz Kulasek 
20674f840086SLong Wu 	/* Reconfigure each member device if starting bonding device */
206815e34522SLong Wu 	for (i = 0; i < internals->member_count; i++) {
206915e34522SLong Wu 		struct rte_eth_dev *member_ethdev =
207015e34522SLong Wu 				&(rte_eth_devices[internals->members[i].port_id]);
207115e34522SLong Wu 		if (member_configure(eth_dev, member_ethdev) != 0) {
20723eb6bdd8SBruce Richardson 			RTE_BOND_LOG(ERR,
20734f840086SLong Wu 				"bonding port (%d) failed to reconfigure member device (%d)",
2074112891cdSTomasz Kulasek 				eth_dev->data->port_id,
207515e34522SLong Wu 				internals->members[i].port_id);
207685d3c09aSChas Williams 			goto out_err;
20773eb6bdd8SBruce Richardson 		}
207815e34522SLong Wu 		if (member_start(eth_dev, member_ethdev) != 0) {
2079b3eaaf1dSJunjie Wan 			RTE_BOND_LOG(ERR,
20804f840086SLong Wu 				"bonding port (%d) failed to start member device (%d)",
2081b3eaaf1dSJunjie Wan 				eth_dev->data->port_id,
208215e34522SLong Wu 				internals->members[i].port_id);
2083b3eaaf1dSJunjie Wan 			goto out_err;
2084b3eaaf1dSJunjie Wan 		}
208515e34522SLong Wu 		/* We will need to poll for link status if any member doesn't
208644bf37a7SNelson Escobar 		 * support interrupts
208744bf37a7SNelson Escobar 		 */
208815e34522SLong Wu 		if (internals->members[i].link_status_poll_enabled)
208944bf37a7SNelson Escobar 			internals->link_status_polling_enabled = 1;
209044bf37a7SNelson Escobar 	}
209185d3c09aSChas Williams 
209244bf37a7SNelson Escobar 	/* start polling if needed */
209344bf37a7SNelson Escobar 	if (internals->link_status_polling_enabled) {
209444bf37a7SNelson Escobar 		rte_eal_alarm_set(
209544bf37a7SNelson Escobar 			internals->link_status_polling_interval_ms * 1000,
209615e34522SLong Wu 			bond_ethdev_member_link_status_change_monitor,
209744bf37a7SNelson Escobar 			(void *)&rte_eth_devices[internals->port_id]);
20983eb6bdd8SBruce Richardson 	}
20993eb6bdd8SBruce Richardson 
210015e34522SLong Wu 	/* Update all member devices MACs*/
210115e34522SLong Wu 	if (mac_address_members_update(eth_dev) != 0)
21025922ff06SRadu Nicolau 		goto out_err;
21035922ff06SRadu Nicolau 
21043eb6bdd8SBruce Richardson 	if (internals->user_defined_primary_port)
21053eb6bdd8SBruce Richardson 		bond_ethdev_primary_set(internals, internals->primary_port);
21063eb6bdd8SBruce Richardson 
21073eb6bdd8SBruce Richardson 	if (internals->mode == BONDING_MODE_8023AD)
21083eb6bdd8SBruce Richardson 		bond_mode_8023ad_start(eth_dev);
21093eb6bdd8SBruce Richardson 
21103eb6bdd8SBruce Richardson 	if (internals->mode == BONDING_MODE_TLB ||
21113eb6bdd8SBruce Richardson 			internals->mode == BONDING_MODE_ALB)
21123eb6bdd8SBruce Richardson 		bond_tlb_enable(internals);
21133eb6bdd8SBruce Richardson 
2114419d3e33SJie Hai 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
2115419d3e33SJie Hai 		eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
2116419d3e33SJie Hai 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
2117419d3e33SJie Hai 		eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
2118419d3e33SJie Hai 
21193eb6bdd8SBruce Richardson 	return 0;
212085d3c09aSChas Williams 
212185d3c09aSChas Williams out_err:
212285d3c09aSChas Williams 	eth_dev->data->dev_started = 0;
212385d3c09aSChas Williams 	return -1;
21243eb6bdd8SBruce Richardson }
21253eb6bdd8SBruce Richardson 
2126d1d1e664SBernard Iremonger static void
2127d1d1e664SBernard Iremonger bond_ethdev_free_queues(struct rte_eth_dev *dev)
2128d1d1e664SBernard Iremonger {
212988245e7eSDavid Marchand 	uint16_t i;
2130d1d1e664SBernard Iremonger 
213132b12e75SRaslsn Darawsheh 	if (dev->data->rx_queues != NULL) {
2132d1d1e664SBernard Iremonger 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
2133d1d1e664SBernard Iremonger 			rte_free(dev->data->rx_queues[i]);
2134d1d1e664SBernard Iremonger 			dev->data->rx_queues[i] = NULL;
2135d1d1e664SBernard Iremonger 		}
2136d1d1e664SBernard Iremonger 		dev->data->nb_rx_queues = 0;
213732b12e75SRaslsn Darawsheh 	}
2138d1d1e664SBernard Iremonger 
213932b12e75SRaslsn Darawsheh 	if (dev->data->tx_queues != NULL) {
2140d1d1e664SBernard Iremonger 		for (i = 0; i < dev->data->nb_tx_queues; i++) {
2141d1d1e664SBernard Iremonger 			rte_free(dev->data->tx_queues[i]);
2142d1d1e664SBernard Iremonger 			dev->data->tx_queues[i] = NULL;
2143d1d1e664SBernard Iremonger 		}
2144d1d1e664SBernard Iremonger 		dev->data->nb_tx_queues = 0;
2145d1d1e664SBernard Iremonger 	}
214632b12e75SRaslsn Darawsheh }
2147d1d1e664SBernard Iremonger 
214862024eb8SIvan Ilchenko int
21493eb6bdd8SBruce Richardson bond_ethdev_stop(struct rte_eth_dev *eth_dev)
21503eb6bdd8SBruce Richardson {
21513eb6bdd8SBruce Richardson 	struct bond_dev_private *internals = eth_dev->data->dev_private;
21521d6cab8aSDavid Marchand 	uint16_t i;
215362024eb8SIvan Ilchenko 	int ret;
21543eb6bdd8SBruce Richardson 
21553eb6bdd8SBruce Richardson 	if (internals->mode == BONDING_MODE_8023AD) {
21563eb6bdd8SBruce Richardson 		struct port *port;
21573eb6bdd8SBruce Richardson 		void *pkt = NULL;
21583eb6bdd8SBruce Richardson 
21593eb6bdd8SBruce Richardson 		bond_mode_8023ad_stop(eth_dev);
21603eb6bdd8SBruce Richardson 
21613eb6bdd8SBruce Richardson 		/* Discard all messages to/from mode 4 state machines */
216215e34522SLong Wu 		for (i = 0; i < internals->active_member_count; i++) {
216315e34522SLong Wu 			port = &bond_mode_8023ad_ports[internals->active_members[i]];
21643eb6bdd8SBruce Richardson 
216550705e8eSThomas Monjalon 			RTE_ASSERT(port->rx_ring != NULL);
21663eb6bdd8SBruce Richardson 			while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
21673eb6bdd8SBruce Richardson 				rte_pktmbuf_free(pkt);
21683eb6bdd8SBruce Richardson 
216950705e8eSThomas Monjalon 			RTE_ASSERT(port->tx_ring != NULL);
21703eb6bdd8SBruce Richardson 			while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
21713eb6bdd8SBruce Richardson 				rte_pktmbuf_free(pkt);
21723eb6bdd8SBruce Richardson 		}
21733eb6bdd8SBruce Richardson 	}
21743eb6bdd8SBruce Richardson 
21753eb6bdd8SBruce Richardson 	if (internals->mode == BONDING_MODE_TLB ||
21763eb6bdd8SBruce Richardson 			internals->mode == BONDING_MODE_ALB) {
21773eb6bdd8SBruce Richardson 		bond_tlb_disable(internals);
217815e34522SLong Wu 		for (i = 0; i < internals->active_member_count; i++)
217915e34522SLong Wu 			tlb_last_obytets[internals->active_members[i]] = 0;
21803eb6bdd8SBruce Richardson 	}
21813eb6bdd8SBruce Richardson 
2182295968d1SFerruh Yigit 	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
21833eb6bdd8SBruce Richardson 	eth_dev->data->dev_started = 0;
218474863313SRadu Nicolau 
2185615bd385SDavid Marchand 	if (internals->link_status_polling_enabled) {
2186615bd385SDavid Marchand 		rte_eal_alarm_cancel(bond_ethdev_member_link_status_change_monitor,
2187615bd385SDavid Marchand 			(void *)&rte_eth_devices[internals->port_id]);
2188615bd385SDavid Marchand 	}
218974863313SRadu Nicolau 	internals->link_status_polling_enabled = 0;
219015e34522SLong Wu 	for (i = 0; i < internals->member_count; i++) {
219115e34522SLong Wu 		uint16_t member_id = internals->members[i].port_id;
2192f5e72e8eSHuisong Li 
219315e34522SLong Wu 		internals->members[i].last_link_status = 0;
219415e34522SLong Wu 		ret = rte_eth_dev_stop(member_id);
219562024eb8SIvan Ilchenko 		if (ret != 0) {
219662024eb8SIvan Ilchenko 			RTE_BOND_LOG(ERR, "Failed to stop device on port %u",
219715e34522SLong Wu 				     member_id);
219862024eb8SIvan Ilchenko 			return ret;
219962024eb8SIvan Ilchenko 		}
2200f5e72e8eSHuisong Li 
220115e34522SLong Wu 		/* active members need to be deactivated. */
220215e34522SLong Wu 		if (find_member_by_id(internals->active_members,
220315e34522SLong Wu 				internals->active_member_count, member_id) !=
220415e34522SLong Wu 					internals->active_member_count)
220515e34522SLong Wu 			deactivate_member(eth_dev, member_id);
22060911d4ecSRadu Nicolau 	}
220762024eb8SIvan Ilchenko 
2208419d3e33SJie Hai 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
2209419d3e33SJie Hai 		eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
2210419d3e33SJie Hai 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
2211419d3e33SJie Hai 		eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
2212419d3e33SJie Hai 
221362024eb8SIvan Ilchenko 	return 0;
22143eb6bdd8SBruce Richardson }
22153eb6bdd8SBruce Richardson 
2216339f1ba5SIvan Malov static void
22175847b57aSIvan Malov bond_ethdev_cfg_cleanup(struct rte_eth_dev *dev, bool remove)
22183eb6bdd8SBruce Richardson {
2219c771e4efSEric Kinzie 	struct bond_dev_private *internals = dev->data->dev_private;
2220c4fa09baSDavid Marchand 	uint16_t bond_port_id = internals->port_id;
222101c0eae7SIlya Maximets 	int skipped = 0;
222249dad902SMatan Azrad 	struct rte_flow_error ferror;
2223c771e4efSEric Kinzie 
2224df810d1bSIvan Malov 	/* Flush flows in all back-end devices before removing them */
2225df810d1bSIvan Malov 	bond_flow_ops.flush(dev, &ferror);
2226df810d1bSIvan Malov 
222715e34522SLong Wu 	while (internals->member_count != skipped) {
222815e34522SLong Wu 		uint16_t port_id = internals->members[skipped].port_id;
22295847b57aSIvan Malov 		int ret;
223001c0eae7SIlya Maximets 
22315847b57aSIvan Malov 		ret = rte_eth_dev_stop(port_id);
22325847b57aSIvan Malov 		if (ret != 0) {
2233fb0379bcSIvan Ilchenko 			RTE_BOND_LOG(ERR, "Failed to stop device on port %u",
2234fb0379bcSIvan Ilchenko 				     port_id);
22355847b57aSIvan Malov 		}
22365847b57aSIvan Malov 
22375847b57aSIvan Malov 		if (ret != 0 || !remove) {
2238fb0379bcSIvan Ilchenko 			skipped++;
22391c5c6cd8SHuisong Li 			continue;
2240fb0379bcSIvan Ilchenko 		}
224101c0eae7SIlya Maximets 
224215e34522SLong Wu 		if (rte_eth_bond_member_remove(bond_port_id, port_id) != 0) {
2243d7f4562aSStephen Hemminger 			RTE_BOND_LOG(ERR,
22444f840086SLong Wu 				     "Failed to remove port %d from bonding device %s",
2245d7f4562aSStephen Hemminger 				     port_id, dev->device->name);
224601c0eae7SIlya Maximets 			skipped++;
224701c0eae7SIlya Maximets 		}
224801c0eae7SIlya Maximets 	}
2249339f1ba5SIvan Malov }
2250339f1ba5SIvan Malov 
2251339f1ba5SIvan Malov int
2252339f1ba5SIvan Malov bond_ethdev_close(struct rte_eth_dev *dev)
2253339f1ba5SIvan Malov {
2254339f1ba5SIvan Malov 	struct bond_dev_private *internals = dev->data->dev_private;
2255339f1ba5SIvan Malov 
2256339f1ba5SIvan Malov 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2257339f1ba5SIvan Malov 		return 0;
2258339f1ba5SIvan Malov 
22594f840086SLong Wu 	RTE_BOND_LOG(INFO, "Closing bonding device %s", dev->device->name);
2260339f1ba5SIvan Malov 
22615847b57aSIvan Malov 	bond_ethdev_cfg_cleanup(dev, true);
2262339f1ba5SIvan Malov 
2263d1d1e664SBernard Iremonger 	bond_ethdev_free_queues(dev);
2264c771e4efSEric Kinzie 	rte_bitmap_reset(internals->vlan_filter_bmp);
2265171875d0SThomas Monjalon 	rte_bitmap_free(internals->vlan_filter_bmp);
2266171875d0SThomas Monjalon 	rte_free(internals->vlan_filter_bmpmem);
2267171875d0SThomas Monjalon 
2268171875d0SThomas Monjalon 	/* Try to release mempool used in mode6. If the bond
2269171875d0SThomas Monjalon 	 * device is not mode6, free the NULL is not problem.
2270171875d0SThomas Monjalon 	 */
2271171875d0SThomas Monjalon 	rte_mempool_free(internals->mode6.mempool);
2272171875d0SThomas Monjalon 
2273ccf0f002SDapeng Yu 	rte_kvargs_free(internals->kvlist);
2274ccf0f002SDapeng Yu 
2275b142387bSThomas Monjalon 	return 0;
22763eb6bdd8SBruce Richardson }
22773eb6bdd8SBruce Richardson 
22783eb6bdd8SBruce Richardson /* forward declaration */
22793eb6bdd8SBruce Richardson static int bond_ethdev_configure(struct rte_eth_dev *dev);
22803eb6bdd8SBruce Richardson 
2281bdad90d1SIvan Ilchenko static int
22823eb6bdd8SBruce Richardson bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
22833eb6bdd8SBruce Richardson {
22843eb6bdd8SBruce Richardson 	struct bond_dev_private *internals = dev->data->dev_private;
228515e34522SLong Wu 	struct bond_member_details member;
2286fab23451SIvan Ilchenko 	int ret;
2287112891cdSTomasz Kulasek 
2288acfb51e2SDeclan Doherty 	uint16_t max_nb_rx_queues = UINT16_MAX;
2289acfb51e2SDeclan Doherty 	uint16_t max_nb_tx_queues = UINT16_MAX;
22903eb6bdd8SBruce Richardson 
22919d453d1dSAlex Kiselev 	dev_info->max_mac_addrs = BOND_MAX_MAC_ADDRS;
22923eb6bdd8SBruce Richardson 
2293112891cdSTomasz Kulasek 	dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
2294112891cdSTomasz Kulasek 			internals->candidate_max_rx_pktlen :
229535b2d13fSOlivier Matz 			RTE_ETHER_MAX_JUMBO_FRAME_LEN;
22963eb6bdd8SBruce Richardson 
22974f840086SLong Wu 	/* Max number of tx/rx queues that the bonding device can support is the
22984f840086SLong Wu 	 * minimum values of the bonding members, as all members must be capable
2299112891cdSTomasz Kulasek 	 * of supporting the same number of tx/rx queues.
2300acfb51e2SDeclan Doherty 	 */
230115e34522SLong Wu 	if (internals->member_count > 0) {
230215e34522SLong Wu 		struct rte_eth_dev_info member_info;
23031d6cab8aSDavid Marchand 		uint16_t idx;
2304acfb51e2SDeclan Doherty 
230515e34522SLong Wu 		for (idx = 0; idx < internals->member_count; idx++) {
230615e34522SLong Wu 			member = internals->members[idx];
230715e34522SLong Wu 			ret = rte_eth_dev_info_get(member.port_id, &member_info);
2308fab23451SIvan Ilchenko 			if (ret != 0) {
2309fab23451SIvan Ilchenko 				RTE_BOND_LOG(ERR,
2310f665790aSDavid Marchand 					"%s: Error during getting device (port %u) info: %s",
2311fab23451SIvan Ilchenko 					__func__,
231215e34522SLong Wu 					member.port_id,
2313fab23451SIvan Ilchenko 					strerror(-ret));
2314fab23451SIvan Ilchenko 
2315bdad90d1SIvan Ilchenko 				return ret;
2316fab23451SIvan Ilchenko 			}
2317acfb51e2SDeclan Doherty 
231815e34522SLong Wu 			if (member_info.max_rx_queues < max_nb_rx_queues)
231915e34522SLong Wu 				max_nb_rx_queues = member_info.max_rx_queues;
2320acfb51e2SDeclan Doherty 
232115e34522SLong Wu 			if (member_info.max_tx_queues < max_nb_tx_queues)
232215e34522SLong Wu 				max_nb_tx_queues = member_info.max_tx_queues;
2323acfb51e2SDeclan Doherty 		}
2324acfb51e2SDeclan Doherty 	}
2325acfb51e2SDeclan Doherty 
2326acfb51e2SDeclan Doherty 	dev_info->max_rx_queues = max_nb_rx_queues;
2327acfb51e2SDeclan Doherty 	dev_info->max_tx_queues = max_nb_tx_queues;
23283eb6bdd8SBruce Richardson 
2329f5f93e10SIvan Malov 	memcpy(&dev_info->default_rxconf, &internals->default_rxconf,
2330f5f93e10SIvan Malov 	       sizeof(dev_info->default_rxconf));
2331f5f93e10SIvan Malov 	memcpy(&dev_info->default_txconf, &internals->default_txconf,
2332f5f93e10SIvan Malov 	       sizeof(dev_info->default_txconf));
2333f5f93e10SIvan Malov 
2334d03c0e83SIvan Malov 	memcpy(&dev_info->rx_desc_lim, &internals->rx_desc_lim,
2335d03c0e83SIvan Malov 	       sizeof(dev_info->rx_desc_lim));
2336d03c0e83SIvan Malov 	memcpy(&dev_info->tx_desc_lim, &internals->tx_desc_lim,
2337d03c0e83SIvan Malov 	       sizeof(dev_info->tx_desc_lim));
23387a066594SIvan Malov 
2339112891cdSTomasz Kulasek 	/**
2340112891cdSTomasz Kulasek 	 * If dedicated hw queues enabled for link bonding device in LACP mode
2341112891cdSTomasz Kulasek 	 * then we need to reduce the maximum number of data path queues by 1.
2342112891cdSTomasz Kulasek 	 */
2343112891cdSTomasz Kulasek 	if (internals->mode == BONDING_MODE_8023AD &&
2344112891cdSTomasz Kulasek 		internals->mode4.dedicated_queues.enabled == 1) {
2345112891cdSTomasz Kulasek 		dev_info->max_rx_queues--;
2346112891cdSTomasz Kulasek 		dev_info->max_tx_queues--;
2347112891cdSTomasz Kulasek 	}
2348112891cdSTomasz Kulasek 
23493eb6bdd8SBruce Richardson 	dev_info->min_rx_bufsize = 0;
23503eb6bdd8SBruce Richardson 
23513eb6bdd8SBruce Richardson 	dev_info->rx_offload_capa = internals->rx_offload_capa;
23523eb6bdd8SBruce Richardson 	dev_info->tx_offload_capa = internals->tx_offload_capa;
2353e8b3e1a9SFerruh Yigit 	dev_info->rx_queue_offload_capa = internals->rx_queue_offload_capa;
2354e8b3e1a9SFerruh Yigit 	dev_info->tx_queue_offload_capa = internals->tx_queue_offload_capa;
2355734ce47fSTomasz Kulasek 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
2356734ce47fSTomasz Kulasek 
2357734ce47fSTomasz Kulasek 	dev_info->reta_size = internals->reta_size;
23586b1a001eSChengchang Tang 	dev_info->hash_key_size = internals->rss_key_len;
2359e5f18551SHuisong Li 	dev_info->speed_capa = internals->speed_capa;
2360bdad90d1SIvan Ilchenko 
2361bdad90d1SIvan Ilchenko 	return 0;
23623eb6bdd8SBruce Richardson }
23633eb6bdd8SBruce Richardson 
23643eb6bdd8SBruce Richardson static int
2365c771e4efSEric Kinzie bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2366c771e4efSEric Kinzie {
2367c771e4efSEric Kinzie 	int res;
2368f8244c63SZhiyong Yang 	uint16_t i;
2369c771e4efSEric Kinzie 	struct bond_dev_private *internals = dev->data->dev_private;
2370c771e4efSEric Kinzie 
237115e34522SLong Wu 	/* don't do this while a member is being added */
2372c771e4efSEric Kinzie 	rte_spinlock_lock(&internals->lock);
2373c771e4efSEric Kinzie 
2374c771e4efSEric Kinzie 	if (on)
2375c771e4efSEric Kinzie 		rte_bitmap_set(internals->vlan_filter_bmp, vlan_id);
2376c771e4efSEric Kinzie 	else
2377c771e4efSEric Kinzie 		rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id);
2378c771e4efSEric Kinzie 
237915e34522SLong Wu 	for (i = 0; i < internals->member_count; i++) {
238015e34522SLong Wu 		uint16_t port_id = internals->members[i].port_id;
2381c771e4efSEric Kinzie 
2382c771e4efSEric Kinzie 		res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
2383c771e4efSEric Kinzie 		if (res == ENOTSUP)
2384d7f4562aSStephen Hemminger 			RTE_BOND_LOG(WARNING,
238515e34522SLong Wu 				     "Setting VLAN filter on member port %u not supported.",
2386c771e4efSEric Kinzie 				     port_id);
2387c771e4efSEric Kinzie 	}
2388c771e4efSEric Kinzie 
2389c771e4efSEric Kinzie 	rte_spinlock_unlock(&internals->lock);
2390c771e4efSEric Kinzie 	return 0;
2391c771e4efSEric Kinzie }
2392c771e4efSEric Kinzie 
2393c771e4efSEric Kinzie static int
23943eb6bdd8SBruce Richardson bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
23953eb6bdd8SBruce Richardson 		uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
23963eb6bdd8SBruce Richardson 		const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
23973eb6bdd8SBruce Richardson {
23983eb6bdd8SBruce Richardson 	struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
23993eb6bdd8SBruce Richardson 			rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
240071ba84b1SBernard Iremonger 					0, dev->data->numa_node);
24013eb6bdd8SBruce Richardson 	if (bd_rx_q == NULL)
24023eb6bdd8SBruce Richardson 		return -1;
24033eb6bdd8SBruce Richardson 
24043eb6bdd8SBruce Richardson 	bd_rx_q->queue_id = rx_queue_id;
24053eb6bdd8SBruce Richardson 	bd_rx_q->dev_private = dev->data->dev_private;
24063eb6bdd8SBruce Richardson 
24073eb6bdd8SBruce Richardson 	bd_rx_q->nb_rx_desc = nb_rx_desc;
24083eb6bdd8SBruce Richardson 
24093eb6bdd8SBruce Richardson 	memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
24103eb6bdd8SBruce Richardson 	bd_rx_q->mb_pool = mb_pool;
24113eb6bdd8SBruce Richardson 
24123eb6bdd8SBruce Richardson 	dev->data->rx_queues[rx_queue_id] = bd_rx_q;
24133eb6bdd8SBruce Richardson 
24143eb6bdd8SBruce Richardson 	return 0;
24153eb6bdd8SBruce Richardson }
24163eb6bdd8SBruce Richardson 
24173eb6bdd8SBruce Richardson static int
24183eb6bdd8SBruce Richardson bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
24193eb6bdd8SBruce Richardson 		uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
24203eb6bdd8SBruce Richardson 		const struct rte_eth_txconf *tx_conf)
24213eb6bdd8SBruce Richardson {
24223eb6bdd8SBruce Richardson 	struct bond_tx_queue *bd_tx_q  = (struct bond_tx_queue *)
24233eb6bdd8SBruce Richardson 			rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
242471ba84b1SBernard Iremonger 					0, dev->data->numa_node);
24253eb6bdd8SBruce Richardson 
24263eb6bdd8SBruce Richardson 	if (bd_tx_q == NULL)
24273eb6bdd8SBruce Richardson 		return -1;
24283eb6bdd8SBruce Richardson 
24293eb6bdd8SBruce Richardson 	bd_tx_q->queue_id = tx_queue_id;
24303eb6bdd8SBruce Richardson 	bd_tx_q->dev_private = dev->data->dev_private;
24313eb6bdd8SBruce Richardson 
24323eb6bdd8SBruce Richardson 	bd_tx_q->nb_tx_desc = nb_tx_desc;
24333eb6bdd8SBruce Richardson 	memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
24343eb6bdd8SBruce Richardson 
24353eb6bdd8SBruce Richardson 	dev->data->tx_queues[tx_queue_id] = bd_tx_q;
24363eb6bdd8SBruce Richardson 
24373eb6bdd8SBruce Richardson 	return 0;
24383eb6bdd8SBruce Richardson }
24393eb6bdd8SBruce Richardson 
24403eb6bdd8SBruce Richardson static void
24417483341aSXueming Li bond_ethdev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
24423eb6bdd8SBruce Richardson {
24437483341aSXueming Li 	void *queue = dev->data->rx_queues[queue_id];
24447483341aSXueming Li 
24453eb6bdd8SBruce Richardson 	if (queue == NULL)
24463eb6bdd8SBruce Richardson 		return;
24473eb6bdd8SBruce Richardson 
24483eb6bdd8SBruce Richardson 	rte_free(queue);
24493eb6bdd8SBruce Richardson }
24503eb6bdd8SBruce Richardson 
24513eb6bdd8SBruce Richardson static void
24527483341aSXueming Li bond_ethdev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
24533eb6bdd8SBruce Richardson {
245451c1b8f8SXueming Li 	void *queue = dev->data->tx_queues[queue_id];
24557483341aSXueming Li 
24563eb6bdd8SBruce Richardson 	if (queue == NULL)
24573eb6bdd8SBruce Richardson 		return;
24583eb6bdd8SBruce Richardson 
24593eb6bdd8SBruce Richardson 	rte_free(queue);
24603eb6bdd8SBruce Richardson }
24613eb6bdd8SBruce Richardson 
24623eb6bdd8SBruce Richardson static void
246315e34522SLong Wu bond_ethdev_member_link_status_change_monitor(void *cb_arg)
24643eb6bdd8SBruce Richardson {
24654f840086SLong Wu 	struct rte_eth_dev *bonding_ethdev, *member_ethdev;
24663eb6bdd8SBruce Richardson 	struct bond_dev_private *internals;
24673eb6bdd8SBruce Richardson 
246815e34522SLong Wu 	/* Default value for polling member found is true as we don't want to
24693eb6bdd8SBruce Richardson 	 * disable the polling thread if we cannot get the lock */
247015e34522SLong Wu 	int i, polling_member_found = 1;
24713eb6bdd8SBruce Richardson 
24723eb6bdd8SBruce Richardson 	if (cb_arg == NULL)
24733eb6bdd8SBruce Richardson 		return;
24743eb6bdd8SBruce Richardson 
24754f840086SLong Wu 	bonding_ethdev = cb_arg;
24764f840086SLong Wu 	internals = bonding_ethdev->data->dev_private;
24773eb6bdd8SBruce Richardson 
24784f840086SLong Wu 	if (!bonding_ethdev->data->dev_started ||
24793eb6bdd8SBruce Richardson 		!internals->link_status_polling_enabled)
24803eb6bdd8SBruce Richardson 		return;
24813eb6bdd8SBruce Richardson 
248215e34522SLong Wu 	/* If device is currently being configured then don't check members link
24833eb6bdd8SBruce Richardson 	 * status, wait until next period */
24843eb6bdd8SBruce Richardson 	if (rte_spinlock_trylock(&internals->lock)) {
248515e34522SLong Wu 		if (internals->member_count > 0)
248615e34522SLong Wu 			polling_member_found = 0;
24873eb6bdd8SBruce Richardson 
248815e34522SLong Wu 		for (i = 0; i < internals->member_count; i++) {
248915e34522SLong Wu 			if (!internals->members[i].link_status_poll_enabled)
24903eb6bdd8SBruce Richardson 				continue;
24913eb6bdd8SBruce Richardson 
249215e34522SLong Wu 			member_ethdev = &rte_eth_devices[internals->members[i].port_id];
249315e34522SLong Wu 			polling_member_found = 1;
24943eb6bdd8SBruce Richardson 
249515e34522SLong Wu 			/* Update member link status */
249615e34522SLong Wu 			(*member_ethdev->dev_ops->link_update)(member_ethdev,
249715e34522SLong Wu 					internals->members[i].link_status_wait_to_complete);
24983eb6bdd8SBruce Richardson 
24993eb6bdd8SBruce Richardson 			/* if link status has changed since last checked then call lsc
25003eb6bdd8SBruce Richardson 			 * event callback */
250115e34522SLong Wu 			if (member_ethdev->data->dev_link.link_status !=
250215e34522SLong Wu 					internals->members[i].last_link_status) {
250315e34522SLong Wu 				bond_ethdev_lsc_event_callback(internals->members[i].port_id,
25043eb6bdd8SBruce Richardson 						RTE_ETH_EVENT_INTR_LSC,
25054f840086SLong Wu 						&bonding_ethdev->data->port_id,
2506d6af1a13SBernard Iremonger 						NULL);
25073eb6bdd8SBruce Richardson 			}
25083eb6bdd8SBruce Richardson 		}
25093eb6bdd8SBruce Richardson 		rte_spinlock_unlock(&internals->lock);
25103eb6bdd8SBruce Richardson 	}
25113eb6bdd8SBruce Richardson 
251215e34522SLong Wu 	if (polling_member_found)
251315e34522SLong Wu 		/* Set alarm to continue monitoring link status of member ethdev's */
25143eb6bdd8SBruce Richardson 		rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
251515e34522SLong Wu 				bond_ethdev_member_link_status_change_monitor, cb_arg);
25163eb6bdd8SBruce Richardson }
25173eb6bdd8SBruce Richardson 
25183eb6bdd8SBruce Richardson static int
2519deba8a2fSTomasz Kulasek bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
25203eb6bdd8SBruce Richardson {
25214633c3b2SIgor Romanov 	int (*link_update)(uint16_t port_id, struct rte_eth_link *eth_link);
25223eb6bdd8SBruce Richardson 
2523deba8a2fSTomasz Kulasek 	struct bond_dev_private *bond_ctx;
252415e34522SLong Wu 	struct rte_eth_link member_link;
2525deba8a2fSTomasz Kulasek 
2526fc1134c7SIgor Romanov 	bool one_link_update_succeeded;
2527deba8a2fSTomasz Kulasek 	uint32_t idx;
2528fc1134c7SIgor Romanov 	int ret;
2529deba8a2fSTomasz Kulasek 
2530deba8a2fSTomasz Kulasek 	bond_ctx = ethdev->data->dev_private;
2531deba8a2fSTomasz Kulasek 
2532295968d1SFerruh Yigit 	ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2533deba8a2fSTomasz Kulasek 
2534deba8a2fSTomasz Kulasek 	if (ethdev->data->dev_started == 0 ||
253515e34522SLong Wu 			bond_ctx->active_member_count == 0) {
2536295968d1SFerruh Yigit 		ethdev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
25373eb6bdd8SBruce Richardson 		return 0;
2538deba8a2fSTomasz Kulasek 	}
25393eb6bdd8SBruce Richardson 
2540295968d1SFerruh Yigit 	ethdev->data->dev_link.link_status = RTE_ETH_LINK_UP;
25413eb6bdd8SBruce Richardson 
2542deba8a2fSTomasz Kulasek 	if (wait_to_complete)
2543deba8a2fSTomasz Kulasek 		link_update = rte_eth_link_get;
2544deba8a2fSTomasz Kulasek 	else
2545deba8a2fSTomasz Kulasek 		link_update = rte_eth_link_get_nowait;
2546deba8a2fSTomasz Kulasek 
2547deba8a2fSTomasz Kulasek 	switch (bond_ctx->mode) {
2548deba8a2fSTomasz Kulasek 	case BONDING_MODE_BROADCAST:
2549deba8a2fSTomasz Kulasek 		/**
2550deba8a2fSTomasz Kulasek 		 * Setting link speed to UINT32_MAX to ensure we pick up the
255115e34522SLong Wu 		 * value of the first active member
2552deba8a2fSTomasz Kulasek 		 */
2553deba8a2fSTomasz Kulasek 		ethdev->data->dev_link.link_speed = UINT32_MAX;
2554deba8a2fSTomasz Kulasek 
2555deba8a2fSTomasz Kulasek 		/**
255615e34522SLong Wu 		 * link speed is minimum value of all the members link speed as
255715e34522SLong Wu 		 * packet loss will occur on this member if transmission at rates
2558deba8a2fSTomasz Kulasek 		 * greater than this are attempted
2559deba8a2fSTomasz Kulasek 		 */
256015e34522SLong Wu 		for (idx = 0; idx < bond_ctx->active_member_count; idx++) {
256115e34522SLong Wu 			ret = link_update(bond_ctx->active_members[idx],
256215e34522SLong Wu 					  &member_link);
2563fc1134c7SIgor Romanov 			if (ret < 0) {
2564fc1134c7SIgor Romanov 				ethdev->data->dev_link.link_speed =
2565295968d1SFerruh Yigit 					RTE_ETH_SPEED_NUM_NONE;
2566fc1134c7SIgor Romanov 				RTE_BOND_LOG(ERR,
256715e34522SLong Wu 					"Member (port %u) link get failed: %s",
256815e34522SLong Wu 					bond_ctx->active_members[idx],
2569fc1134c7SIgor Romanov 					rte_strerror(-ret));
2570fc1134c7SIgor Romanov 				return 0;
2571fc1134c7SIgor Romanov 			}
2572deba8a2fSTomasz Kulasek 
257315e34522SLong Wu 			if (member_link.link_speed <
2574deba8a2fSTomasz Kulasek 					ethdev->data->dev_link.link_speed)
2575deba8a2fSTomasz Kulasek 				ethdev->data->dev_link.link_speed =
257615e34522SLong Wu 						member_link.link_speed;
2577deba8a2fSTomasz Kulasek 		}
25783eb6bdd8SBruce Richardson 		break;
2579deba8a2fSTomasz Kulasek 	case BONDING_MODE_ACTIVE_BACKUP:
258015e34522SLong Wu 		/* Current primary member */
258115e34522SLong Wu 		ret = link_update(bond_ctx->current_primary_port, &member_link);
2582fc1134c7SIgor Romanov 		if (ret < 0) {
258315e34522SLong Wu 			RTE_BOND_LOG(ERR, "Member (port %u) link get failed: %s",
2584fc1134c7SIgor Romanov 				bond_ctx->current_primary_port,
2585fc1134c7SIgor Romanov 				rte_strerror(-ret));
2586fc1134c7SIgor Romanov 			return 0;
2587fc1134c7SIgor Romanov 		}
2588deba8a2fSTomasz Kulasek 
258915e34522SLong Wu 		ethdev->data->dev_link.link_speed = member_link.link_speed;
2590deba8a2fSTomasz Kulasek 		break;
2591deba8a2fSTomasz Kulasek 	case BONDING_MODE_8023AD:
2592deba8a2fSTomasz Kulasek 		ethdev->data->dev_link.link_autoneg =
259315e34522SLong Wu 				bond_ctx->mode4.member_link.link_autoneg;
2594deba8a2fSTomasz Kulasek 		ethdev->data->dev_link.link_duplex =
259515e34522SLong Wu 				bond_ctx->mode4.member_link.link_duplex;
259647cce54bSBruce Richardson 		/* fall through */
259747cce54bSBruce Richardson 		/* to update link speed */
2598deba8a2fSTomasz Kulasek 	case BONDING_MODE_ROUND_ROBIN:
2599deba8a2fSTomasz Kulasek 	case BONDING_MODE_BALANCE:
2600deba8a2fSTomasz Kulasek 	case BONDING_MODE_TLB:
2601deba8a2fSTomasz Kulasek 	case BONDING_MODE_ALB:
2602deba8a2fSTomasz Kulasek 	default:
2603deba8a2fSTomasz Kulasek 		/**
2604deba8a2fSTomasz Kulasek 		 * In theses mode the maximum theoretical link speed is the sum
260515e34522SLong Wu 		 * of all the members
2606deba8a2fSTomasz Kulasek 		 */
2607295968d1SFerruh Yigit 		ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2608fc1134c7SIgor Romanov 		one_link_update_succeeded = false;
2609deba8a2fSTomasz Kulasek 
261015e34522SLong Wu 		for (idx = 0; idx < bond_ctx->active_member_count; idx++) {
261115e34522SLong Wu 			ret = link_update(bond_ctx->active_members[idx],
261215e34522SLong Wu 					&member_link);
2613fc1134c7SIgor Romanov 			if (ret < 0) {
2614fc1134c7SIgor Romanov 				RTE_BOND_LOG(ERR,
261515e34522SLong Wu 					"Member (port %u) link get failed: %s",
261615e34522SLong Wu 					bond_ctx->active_members[idx],
2617fc1134c7SIgor Romanov 					rte_strerror(-ret));
2618fc1134c7SIgor Romanov 				continue;
2619fc1134c7SIgor Romanov 			}
2620deba8a2fSTomasz Kulasek 
2621fc1134c7SIgor Romanov 			one_link_update_succeeded = true;
2622deba8a2fSTomasz Kulasek 			ethdev->data->dev_link.link_speed +=
262315e34522SLong Wu 					member_link.link_speed;
26243eb6bdd8SBruce Richardson 		}
2625fc1134c7SIgor Romanov 
2626fc1134c7SIgor Romanov 		if (!one_link_update_succeeded) {
262715e34522SLong Wu 			RTE_BOND_LOG(ERR, "All members link get failed");
2628fc1134c7SIgor Romanov 			return 0;
2629fc1134c7SIgor Romanov 		}
26303eb6bdd8SBruce Richardson 	}
26313eb6bdd8SBruce Richardson 
26323eb6bdd8SBruce Richardson 
26333eb6bdd8SBruce Richardson 	return 0;
26343eb6bdd8SBruce Richardson }
26353eb6bdd8SBruce Richardson 
2636deba8a2fSTomasz Kulasek 
2637d5b0924bSMatan Azrad static int
26383eb6bdd8SBruce Richardson bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
26393eb6bdd8SBruce Richardson {
26403eb6bdd8SBruce Richardson 	struct bond_dev_private *internals = dev->data->dev_private;
264115e34522SLong Wu 	struct rte_eth_stats member_stats;
264255587b01STomasz Kulasek 	int i, j;
26433eb6bdd8SBruce Richardson 
264415e34522SLong Wu 	for (i = 0; i < internals->member_count; i++) {
264515e34522SLong Wu 		rte_eth_stats_get(internals->members[i].port_id, &member_stats);
26463eb6bdd8SBruce Richardson 
264715e34522SLong Wu 		stats->ipackets += member_stats.ipackets;
264815e34522SLong Wu 		stats->opackets += member_stats.opackets;
264915e34522SLong Wu 		stats->ibytes += member_stats.ibytes;
265015e34522SLong Wu 		stats->obytes += member_stats.obytes;
265115e34522SLong Wu 		stats->imissed += member_stats.imissed;
265215e34522SLong Wu 		stats->ierrors += member_stats.ierrors;
265315e34522SLong Wu 		stats->oerrors += member_stats.oerrors;
265415e34522SLong Wu 		stats->rx_nombuf += member_stats.rx_nombuf;
265555587b01STomasz Kulasek 
265655587b01STomasz Kulasek 		for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
265715e34522SLong Wu 			stats->q_ipackets[j] += member_stats.q_ipackets[j];
265815e34522SLong Wu 			stats->q_opackets[j] += member_stats.q_opackets[j];
265915e34522SLong Wu 			stats->q_ibytes[j] += member_stats.q_ibytes[j];
266015e34522SLong Wu 			stats->q_obytes[j] += member_stats.q_obytes[j];
266115e34522SLong Wu 			stats->q_errors[j] += member_stats.q_errors[j];
266255587b01STomasz Kulasek 		}
266355587b01STomasz Kulasek 
26643eb6bdd8SBruce Richardson 	}
2665d5b0924bSMatan Azrad 
2666d5b0924bSMatan Azrad 	return 0;
26673eb6bdd8SBruce Richardson }
26683eb6bdd8SBruce Richardson 
26699970a9adSIgor Romanov static int
26703eb6bdd8SBruce Richardson bond_ethdev_stats_reset(struct rte_eth_dev *dev)
26713eb6bdd8SBruce Richardson {
26723eb6bdd8SBruce Richardson 	struct bond_dev_private *internals = dev->data->dev_private;
26733eb6bdd8SBruce Richardson 	int i;
26749970a9adSIgor Romanov 	int err;
26759970a9adSIgor Romanov 	int ret;
26763eb6bdd8SBruce Richardson 
267715e34522SLong Wu 	for (i = 0, err = 0; i < internals->member_count; i++) {
267815e34522SLong Wu 		ret = rte_eth_stats_reset(internals->members[i].port_id);
26799970a9adSIgor Romanov 		if (ret != 0)
26809970a9adSIgor Romanov 			err = ret;
26819970a9adSIgor Romanov 	}
26829970a9adSIgor Romanov 
26839970a9adSIgor Romanov 	return err;
26843eb6bdd8SBruce Richardson }
26853eb6bdd8SBruce Richardson 
26869039c812SAndrew Rybchenko static int
26873eb6bdd8SBruce Richardson bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
26883eb6bdd8SBruce Richardson {
26893eb6bdd8SBruce Richardson 	struct bond_dev_private *internals = eth_dev->data->dev_private;
26903eb6bdd8SBruce Richardson 	int i;
2691ae9f487fSIvan Ilchenko 	int ret = 0;
2692ae9f487fSIvan Ilchenko 	uint16_t port_id;
26933eb6bdd8SBruce Richardson 
26943eb6bdd8SBruce Richardson 	switch (internals->mode) {
269515e34522SLong Wu 	/* Promiscuous mode is propagated to all members */
26963eb6bdd8SBruce Richardson 	case BONDING_MODE_ROUND_ROBIN:
26973eb6bdd8SBruce Richardson 	case BONDING_MODE_BALANCE:
26983eb6bdd8SBruce Richardson 	case BONDING_MODE_BROADCAST:
26999039c812SAndrew Rybchenko 	case BONDING_MODE_8023AD: {
270015e34522SLong Wu 		unsigned int member_ok = 0;
27019039c812SAndrew Rybchenko 
270215e34522SLong Wu 		for (i = 0; i < internals->member_count; i++) {
270315e34522SLong Wu 			port_id = internals->members[i].port_id;
270468218b87SDavid Marchand 
2705ae9f487fSIvan Ilchenko 			ret = rte_eth_promiscuous_enable(port_id);
2706ae9f487fSIvan Ilchenko 			if (ret != 0)
2707ae9f487fSIvan Ilchenko 				RTE_BOND_LOG(ERR,
2708ae9f487fSIvan Ilchenko 					"Failed to enable promiscuous mode for port %u: %s",
2709ae9f487fSIvan Ilchenko 					port_id, rte_strerror(-ret));
27109039c812SAndrew Rybchenko 			else
271115e34522SLong Wu 				member_ok++;
271268218b87SDavid Marchand 		}
27139039c812SAndrew Rybchenko 		/*
27149039c812SAndrew Rybchenko 		 * Report success if operation is successful on at least
271515e34522SLong Wu 		 * on one member. Otherwise return last error code.
27169039c812SAndrew Rybchenko 		 */
271715e34522SLong Wu 		if (member_ok > 0)
27189039c812SAndrew Rybchenko 			ret = 0;
27193eb6bdd8SBruce Richardson 		break;
27209039c812SAndrew Rybchenko 	}
272115e34522SLong Wu 	/* Promiscuous mode is propagated only to primary member */
27223eb6bdd8SBruce Richardson 	case BONDING_MODE_ACTIVE_BACKUP:
27233eb6bdd8SBruce Richardson 	case BONDING_MODE_TLB:
27243eb6bdd8SBruce Richardson 	case BONDING_MODE_ALB:
27253eb6bdd8SBruce Richardson 	default:
272690d2eb05SHyong Youb Kim 		/* Do not touch promisc when there cannot be primary ports */
272715e34522SLong Wu 		if (internals->member_count == 0)
272890d2eb05SHyong Youb Kim 			break;
2729ae9f487fSIvan Ilchenko 		port_id = internals->current_primary_port;
2730ae9f487fSIvan Ilchenko 		ret = rte_eth_promiscuous_enable(port_id);
2731ae9f487fSIvan Ilchenko 		if (ret != 0)
2732ae9f487fSIvan Ilchenko 			RTE_BOND_LOG(ERR,
2733ae9f487fSIvan Ilchenko 				"Failed to enable promiscuous mode for port %u: %s",
2734ae9f487fSIvan Ilchenko 				port_id, rte_strerror(-ret));
27353eb6bdd8SBruce Richardson 	}
27369039c812SAndrew Rybchenko 
27379039c812SAndrew Rybchenko 	return ret;
27383eb6bdd8SBruce Richardson }
27393eb6bdd8SBruce Richardson 
27409039c812SAndrew Rybchenko static int
27413eb6bdd8SBruce Richardson bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
27423eb6bdd8SBruce Richardson {
27433eb6bdd8SBruce Richardson 	struct bond_dev_private *internals = dev->data->dev_private;
27443eb6bdd8SBruce Richardson 	int i;
27459039c812SAndrew Rybchenko 	int ret = 0;
2746ae9f487fSIvan Ilchenko 	uint16_t port_id;
27473eb6bdd8SBruce Richardson 
27483eb6bdd8SBruce Richardson 	switch (internals->mode) {
274915e34522SLong Wu 	/* Promiscuous mode is propagated to all members */
27503eb6bdd8SBruce Richardson 	case BONDING_MODE_ROUND_ROBIN:
27513eb6bdd8SBruce Richardson 	case BONDING_MODE_BALANCE:
27523eb6bdd8SBruce Richardson 	case BONDING_MODE_BROADCAST:
27539039c812SAndrew Rybchenko 	case BONDING_MODE_8023AD: {
275415e34522SLong Wu 		unsigned int member_ok = 0;
27559039c812SAndrew Rybchenko 
275615e34522SLong Wu 		for (i = 0; i < internals->member_count; i++) {
275715e34522SLong Wu 			port_id = internals->members[i].port_id;
275868218b87SDavid Marchand 
275968218b87SDavid Marchand 			if (internals->mode == BONDING_MODE_8023AD &&
276068218b87SDavid Marchand 			    bond_mode_8023ad_ports[port_id].forced_rx_flags ==
27619039c812SAndrew Rybchenko 					BOND_8023AD_FORCED_PROMISC) {
276215e34522SLong Wu 				member_ok++;
276368218b87SDavid Marchand 				continue;
27649039c812SAndrew Rybchenko 			}
2765ae9f487fSIvan Ilchenko 			ret = rte_eth_promiscuous_disable(port_id);
2766ae9f487fSIvan Ilchenko 			if (ret != 0)
2767ae9f487fSIvan Ilchenko 				RTE_BOND_LOG(ERR,
2768ae9f487fSIvan Ilchenko 					"Failed to disable promiscuous mode for port %u: %s",
2769ae9f487fSIvan Ilchenko 					port_id, rte_strerror(-ret));
27709039c812SAndrew Rybchenko 			else
277115e34522SLong Wu 				member_ok++;
277268218b87SDavid Marchand 		}
27739039c812SAndrew Rybchenko 		/*
27749039c812SAndrew Rybchenko 		 * Report success if operation is successful on at least
277515e34522SLong Wu 		 * on one member. Otherwise return last error code.
27769039c812SAndrew Rybchenko 		 */
277715e34522SLong Wu 		if (member_ok > 0)
27789039c812SAndrew Rybchenko 			ret = 0;
27793eb6bdd8SBruce Richardson 		break;
27809039c812SAndrew Rybchenko 	}
278115e34522SLong Wu 	/* Promiscuous mode is propagated only to primary member */
27823eb6bdd8SBruce Richardson 	case BONDING_MODE_ACTIVE_BACKUP:
27833eb6bdd8SBruce Richardson 	case BONDING_MODE_TLB:
27843eb6bdd8SBruce Richardson 	case BONDING_MODE_ALB:
27853eb6bdd8SBruce Richardson 	default:
278690d2eb05SHyong Youb Kim 		/* Do not touch promisc when there cannot be primary ports */
278715e34522SLong Wu 		if (internals->member_count == 0)
278890d2eb05SHyong Youb Kim 			break;
2789ae9f487fSIvan Ilchenko 		port_id = internals->current_primary_port;
2790ae9f487fSIvan Ilchenko 		ret = rte_eth_promiscuous_disable(port_id);
2791ae9f487fSIvan Ilchenko 		if (ret != 0)
2792ae9f487fSIvan Ilchenko 			RTE_BOND_LOG(ERR,
2793ae9f487fSIvan Ilchenko 				"Failed to disable promiscuous mode for port %u: %s",
2794ae9f487fSIvan Ilchenko 				port_id, rte_strerror(-ret));
27953eb6bdd8SBruce Richardson 	}
27969039c812SAndrew Rybchenko 
27979039c812SAndrew Rybchenko 	return ret;
27983eb6bdd8SBruce Richardson }
27993eb6bdd8SBruce Richardson 
2800ca041cd4SIvan Ilchenko static int
2801ac5341f5SMin Hu (Connor) bond_ethdev_promiscuous_update(struct rte_eth_dev *dev)
2802ac5341f5SMin Hu (Connor) {
2803ac5341f5SMin Hu (Connor) 	struct bond_dev_private *internals = dev->data->dev_private;
2804ac5341f5SMin Hu (Connor) 	uint16_t port_id = internals->current_primary_port;
28052b91fa5fSSunyang Wu 	int ret;
2806ac5341f5SMin Hu (Connor) 
2807ac5341f5SMin Hu (Connor) 	switch (internals->mode) {
2808ac5341f5SMin Hu (Connor) 	case BONDING_MODE_ROUND_ROBIN:
2809ac5341f5SMin Hu (Connor) 	case BONDING_MODE_BALANCE:
2810ac5341f5SMin Hu (Connor) 	case BONDING_MODE_BROADCAST:
2811ac5341f5SMin Hu (Connor) 	case BONDING_MODE_8023AD:
281215e34522SLong Wu 		/* As promiscuous mode is propagated to all members for these
2813ac5341f5SMin Hu (Connor) 		 * mode, no need to update for bonding device.
2814ac5341f5SMin Hu (Connor) 		 */
2815ac5341f5SMin Hu (Connor) 		break;
2816ac5341f5SMin Hu (Connor) 	case BONDING_MODE_ACTIVE_BACKUP:
2817ac5341f5SMin Hu (Connor) 	case BONDING_MODE_TLB:
2818ac5341f5SMin Hu (Connor) 	case BONDING_MODE_ALB:
2819ac5341f5SMin Hu (Connor) 	default:
282015e34522SLong Wu 		/* As promiscuous mode is propagated only to primary member
2821ac5341f5SMin Hu (Connor) 		 * for these mode. When active/standby switchover, promiscuous
282215e34522SLong Wu 		 * mode should be set to new primary member according to bonding
2823ac5341f5SMin Hu (Connor) 		 * device.
2824ac5341f5SMin Hu (Connor) 		 */
28252b91fa5fSSunyang Wu 		if (rte_eth_promiscuous_get(internals->port_id) == 1) {
28262b91fa5fSSunyang Wu 			ret = rte_eth_promiscuous_enable(port_id);
28272b91fa5fSSunyang Wu 			if (ret != 0)
28282b91fa5fSSunyang Wu 				RTE_BOND_LOG(ERR,
28292b91fa5fSSunyang Wu 					     "Failed to enable promiscuous mode for port %u: %s",
28302b91fa5fSSunyang Wu 					     port_id, rte_strerror(-ret));
28312b91fa5fSSunyang Wu 		} else {
28322b91fa5fSSunyang Wu 			ret = rte_eth_promiscuous_disable(port_id);
28332b91fa5fSSunyang Wu 			if (ret != 0)
28342b91fa5fSSunyang Wu 				RTE_BOND_LOG(ERR,
28352b91fa5fSSunyang Wu 					     "Failed to disable promiscuous mode for port %u: %s",
28362b91fa5fSSunyang Wu 					     port_id, rte_strerror(-ret));
28372b91fa5fSSunyang Wu 		}
2838ac5341f5SMin Hu (Connor) 	}
2839ac5341f5SMin Hu (Connor) 
2840ac5341f5SMin Hu (Connor) 	return 0;
2841ac5341f5SMin Hu (Connor) }
2842ac5341f5SMin Hu (Connor) 
2843ac5341f5SMin Hu (Connor) static int
284468218b87SDavid Marchand bond_ethdev_allmulticast_enable(struct rte_eth_dev *eth_dev)
284568218b87SDavid Marchand {
284668218b87SDavid Marchand 	struct bond_dev_private *internals = eth_dev->data->dev_private;
284768218b87SDavid Marchand 	int i;
2848ca041cd4SIvan Ilchenko 	int ret = 0;
284973e83bf5SIvan Ilchenko 	uint16_t port_id;
285068218b87SDavid Marchand 
285168218b87SDavid Marchand 	switch (internals->mode) {
285215e34522SLong Wu 	/* allmulti mode is propagated to all members */
285368218b87SDavid Marchand 	case BONDING_MODE_ROUND_ROBIN:
285468218b87SDavid Marchand 	case BONDING_MODE_BALANCE:
285568218b87SDavid Marchand 	case BONDING_MODE_BROADCAST:
2856ca041cd4SIvan Ilchenko 	case BONDING_MODE_8023AD: {
285715e34522SLong Wu 		unsigned int member_ok = 0;
2858ca041cd4SIvan Ilchenko 
285915e34522SLong Wu 		for (i = 0; i < internals->member_count; i++) {
286015e34522SLong Wu 			port_id = internals->members[i].port_id;
286168218b87SDavid Marchand 
286273e83bf5SIvan Ilchenko 			ret = rte_eth_allmulticast_enable(port_id);
286373e83bf5SIvan Ilchenko 			if (ret != 0)
286473e83bf5SIvan Ilchenko 				RTE_BOND_LOG(ERR,
286573e83bf5SIvan Ilchenko 					"Failed to enable allmulti mode for port %u: %s",
286673e83bf5SIvan Ilchenko 					port_id, rte_strerror(-ret));
2867ca041cd4SIvan Ilchenko 			else
286815e34522SLong Wu 				member_ok++;
286968218b87SDavid Marchand 		}
2870ca041cd4SIvan Ilchenko 		/*
2871ca041cd4SIvan Ilchenko 		 * Report success if operation is successful on at least
287215e34522SLong Wu 		 * on one member. Otherwise return last error code.
2873ca041cd4SIvan Ilchenko 		 */
287415e34522SLong Wu 		if (member_ok > 0)
2875ca041cd4SIvan Ilchenko 			ret = 0;
287668218b87SDavid Marchand 		break;
2877ca041cd4SIvan Ilchenko 	}
287815e34522SLong Wu 	/* allmulti mode is propagated only to primary member */
287968218b87SDavid Marchand 	case BONDING_MODE_ACTIVE_BACKUP:
288068218b87SDavid Marchand 	case BONDING_MODE_TLB:
288168218b87SDavid Marchand 	case BONDING_MODE_ALB:
288268218b87SDavid Marchand 	default:
288368218b87SDavid Marchand 		/* Do not touch allmulti when there cannot be primary ports */
288415e34522SLong Wu 		if (internals->member_count == 0)
288568218b87SDavid Marchand 			break;
288673e83bf5SIvan Ilchenko 		port_id = internals->current_primary_port;
288773e83bf5SIvan Ilchenko 		ret = rte_eth_allmulticast_enable(port_id);
288873e83bf5SIvan Ilchenko 		if (ret != 0)
288973e83bf5SIvan Ilchenko 			RTE_BOND_LOG(ERR,
289073e83bf5SIvan Ilchenko 				"Failed to enable allmulti mode for port %u: %s",
289173e83bf5SIvan Ilchenko 				port_id, rte_strerror(-ret));
289268218b87SDavid Marchand 	}
2893ca041cd4SIvan Ilchenko 
2894ca041cd4SIvan Ilchenko 	return ret;
289568218b87SDavid Marchand }
289668218b87SDavid Marchand 
2897ca041cd4SIvan Ilchenko static int
289868218b87SDavid Marchand bond_ethdev_allmulticast_disable(struct rte_eth_dev *eth_dev)
289968218b87SDavid Marchand {
290068218b87SDavid Marchand 	struct bond_dev_private *internals = eth_dev->data->dev_private;
290168218b87SDavid Marchand 	int i;
2902ca041cd4SIvan Ilchenko 	int ret = 0;
290373e83bf5SIvan Ilchenko 	uint16_t port_id;
290468218b87SDavid Marchand 
290568218b87SDavid Marchand 	switch (internals->mode) {
290615e34522SLong Wu 	/* allmulti mode is propagated to all members */
290768218b87SDavid Marchand 	case BONDING_MODE_ROUND_ROBIN:
290868218b87SDavid Marchand 	case BONDING_MODE_BALANCE:
290968218b87SDavid Marchand 	case BONDING_MODE_BROADCAST:
2910ca041cd4SIvan Ilchenko 	case BONDING_MODE_8023AD: {
291115e34522SLong Wu 		unsigned int member_ok = 0;
2912ca041cd4SIvan Ilchenko 
291315e34522SLong Wu 		for (i = 0; i < internals->member_count; i++) {
291415e34522SLong Wu 			uint16_t port_id = internals->members[i].port_id;
291568218b87SDavid Marchand 
291668218b87SDavid Marchand 			if (internals->mode == BONDING_MODE_8023AD &&
291768218b87SDavid Marchand 			    bond_mode_8023ad_ports[port_id].forced_rx_flags ==
291868218b87SDavid Marchand 					BOND_8023AD_FORCED_ALLMULTI)
291968218b87SDavid Marchand 				continue;
292073e83bf5SIvan Ilchenko 
292173e83bf5SIvan Ilchenko 			ret = rte_eth_allmulticast_disable(port_id);
292273e83bf5SIvan Ilchenko 			if (ret != 0)
292373e83bf5SIvan Ilchenko 				RTE_BOND_LOG(ERR,
292473e83bf5SIvan Ilchenko 					"Failed to disable allmulti mode for port %u: %s",
292573e83bf5SIvan Ilchenko 					port_id, rte_strerror(-ret));
2926ca041cd4SIvan Ilchenko 			else
292715e34522SLong Wu 				member_ok++;
292868218b87SDavid Marchand 		}
2929ca041cd4SIvan Ilchenko 		/*
2930ca041cd4SIvan Ilchenko 		 * Report success if operation is successful on at least
293115e34522SLong Wu 		 * on one member. Otherwise return last error code.
2932ca041cd4SIvan Ilchenko 		 */
293315e34522SLong Wu 		if (member_ok > 0)
2934ca041cd4SIvan Ilchenko 			ret = 0;
293568218b87SDavid Marchand 		break;
2936ca041cd4SIvan Ilchenko 	}
293715e34522SLong Wu 	/* allmulti mode is propagated only to primary member */
293868218b87SDavid Marchand 	case BONDING_MODE_ACTIVE_BACKUP:
293968218b87SDavid Marchand 	case BONDING_MODE_TLB:
294068218b87SDavid Marchand 	case BONDING_MODE_ALB:
294168218b87SDavid Marchand 	default:
294268218b87SDavid Marchand 		/* Do not touch allmulti when there cannot be primary ports */
294315e34522SLong Wu 		if (internals->member_count == 0)
294468218b87SDavid Marchand 			break;
294573e83bf5SIvan Ilchenko 		port_id = internals->current_primary_port;
294673e83bf5SIvan Ilchenko 		ret = rte_eth_allmulticast_disable(port_id);
294773e83bf5SIvan Ilchenko 		if (ret != 0)
294873e83bf5SIvan Ilchenko 			RTE_BOND_LOG(ERR,
294973e83bf5SIvan Ilchenko 				"Failed to disable allmulti mode for port %u: %s",
295073e83bf5SIvan Ilchenko 				port_id, rte_strerror(-ret));
295168218b87SDavid Marchand 	}
2952ca041cd4SIvan Ilchenko 
2953ca041cd4SIvan Ilchenko 	return ret;
295468218b87SDavid Marchand }
295568218b87SDavid Marchand 
2956ac5341f5SMin Hu (Connor) static int
2957ac5341f5SMin Hu (Connor) bond_ethdev_allmulticast_update(struct rte_eth_dev *dev)
2958ac5341f5SMin Hu (Connor) {
2959ac5341f5SMin Hu (Connor) 	struct bond_dev_private *internals = dev->data->dev_private;
2960ac5341f5SMin Hu (Connor) 	uint16_t port_id = internals->current_primary_port;
2961ac5341f5SMin Hu (Connor) 
2962ac5341f5SMin Hu (Connor) 	switch (internals->mode) {
2963ac5341f5SMin Hu (Connor) 	case BONDING_MODE_ROUND_ROBIN:
2964ac5341f5SMin Hu (Connor) 	case BONDING_MODE_BALANCE:
2965ac5341f5SMin Hu (Connor) 	case BONDING_MODE_BROADCAST:
2966ac5341f5SMin Hu (Connor) 	case BONDING_MODE_8023AD:
296715e34522SLong Wu 		/* As allmulticast mode is propagated to all members for these
2968ac5341f5SMin Hu (Connor) 		 * mode, no need to update for bonding device.
2969ac5341f5SMin Hu (Connor) 		 */
2970ac5341f5SMin Hu (Connor) 		break;
2971ac5341f5SMin Hu (Connor) 	case BONDING_MODE_ACTIVE_BACKUP:
2972ac5341f5SMin Hu (Connor) 	case BONDING_MODE_TLB:
2973ac5341f5SMin Hu (Connor) 	case BONDING_MODE_ALB:
2974ac5341f5SMin Hu (Connor) 	default:
297515e34522SLong Wu 		/* As allmulticast mode is propagated only to primary member
2976ac5341f5SMin Hu (Connor) 		 * for these mode. When active/standby switchover, allmulticast
297715e34522SLong Wu 		 * mode should be set to new primary member according to bonding
2978ac5341f5SMin Hu (Connor) 		 * device.
2979ac5341f5SMin Hu (Connor) 		 */
2980ac5341f5SMin Hu (Connor) 		if (rte_eth_allmulticast_get(internals->port_id) == 1)
2981ac5341f5SMin Hu (Connor) 			rte_eth_allmulticast_enable(port_id);
2982ac5341f5SMin Hu (Connor) 		else
2983ac5341f5SMin Hu (Connor) 			rte_eth_allmulticast_disable(port_id);
2984ac5341f5SMin Hu (Connor) 	}
2985ac5341f5SMin Hu (Connor) 
2986ac5341f5SMin Hu (Connor) 	return 0;
2987ac5341f5SMin Hu (Connor) }
2988ac5341f5SMin Hu (Connor) 
298968218b87SDavid Marchand static void
29903eb6bdd8SBruce Richardson bond_ethdev_delayed_lsc_propagation(void *arg)
29913eb6bdd8SBruce Richardson {
29923eb6bdd8SBruce Richardson 	if (arg == NULL)
29933eb6bdd8SBruce Richardson 		return;
29943eb6bdd8SBruce Richardson 
29955723fbedSFerruh Yigit 	rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
2996cebe3d7bSThomas Monjalon 			RTE_ETH_EVENT_INTR_LSC, NULL);
29973eb6bdd8SBruce Richardson }
29983eb6bdd8SBruce Richardson 
2999d6af1a13SBernard Iremonger int
3000f8244c63SZhiyong Yang bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
3001d6af1a13SBernard Iremonger 		void *param, void *ret_param __rte_unused)
30023eb6bdd8SBruce Richardson {
30034f840086SLong Wu 	struct rte_eth_dev *bonding_eth_dev;
30043eb6bdd8SBruce Richardson 	struct bond_dev_private *internals;
30053eb6bdd8SBruce Richardson 	struct rte_eth_link link;
3006d6af1a13SBernard Iremonger 	int rc = -1;
3007fc1134c7SIgor Romanov 	int ret;
30083eb6bdd8SBruce Richardson 
30093eb6bdd8SBruce Richardson 	uint8_t lsc_flag = 0;
301015e34522SLong Wu 	int valid_member = 0;
301115e34522SLong Wu 	uint16_t active_pos, member_idx;
30121d6cab8aSDavid Marchand 	uint16_t i;
30133eb6bdd8SBruce Richardson 
30143eb6bdd8SBruce Richardson 	if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
3015d6af1a13SBernard Iremonger 		return rc;
30163eb6bdd8SBruce Richardson 
30174f840086SLong Wu 	bonding_eth_dev = &rte_eth_devices[*(uint16_t *)param];
30183eb6bdd8SBruce Richardson 
30194f840086SLong Wu 	if (check_for_bonding_ethdev(bonding_eth_dev))
3020d6af1a13SBernard Iremonger 		return rc;
30213eb6bdd8SBruce Richardson 
30224f840086SLong Wu 	internals = bonding_eth_dev->data->dev_private;
30233eb6bdd8SBruce Richardson 
30243eb6bdd8SBruce Richardson 	/* If the device isn't started don't handle interrupts */
30254f840086SLong Wu 	if (!bonding_eth_dev->data->dev_started)
3026d6af1a13SBernard Iremonger 		return rc;
30273eb6bdd8SBruce Richardson 
30284f840086SLong Wu 	/* verify that port_id is a valid member of bonding port */
302915e34522SLong Wu 	for (i = 0; i < internals->member_count; i++) {
303015e34522SLong Wu 		if (internals->members[i].port_id == port_id) {
303115e34522SLong Wu 			valid_member = 1;
303215e34522SLong Wu 			member_idx = i;
30333eb6bdd8SBruce Richardson 			break;
30343eb6bdd8SBruce Richardson 		}
30353eb6bdd8SBruce Richardson 	}
30363eb6bdd8SBruce Richardson 
303715e34522SLong Wu 	if (!valid_member)
3038d6af1a13SBernard Iremonger 		return rc;
30393eb6bdd8SBruce Richardson 
304059056833SMatan Azrad 	/* Synchronize lsc callback parallel calls either by real link event
304115e34522SLong Wu 	 * from the members PMDs or by the bonding PMD itself.
304259056833SMatan Azrad 	 */
304359056833SMatan Azrad 	rte_spinlock_lock(&internals->lsc_lock);
304459056833SMatan Azrad 
30453eb6bdd8SBruce Richardson 	/* Search for port in active port list */
304615e34522SLong Wu 	active_pos = find_member_by_id(internals->active_members,
304715e34522SLong Wu 			internals->active_member_count, port_id);
30483eb6bdd8SBruce Richardson 
3049fc1134c7SIgor Romanov 	ret = rte_eth_link_get_nowait(port_id, &link);
3050fc1134c7SIgor Romanov 	if (ret < 0)
305115e34522SLong Wu 		RTE_BOND_LOG(ERR, "Member (port %u) link get failed", port_id);
3052fc1134c7SIgor Romanov 
3053fc1134c7SIgor Romanov 	if (ret == 0 && link.link_status) {
305415e34522SLong Wu 		if (active_pos < internals->active_member_count)
30550e677a35SChas Williams 			goto link_update;
30563eb6bdd8SBruce Richardson 
30574f840086SLong Wu 		/* check link state properties if bonding link is up*/
30584f840086SLong Wu 		if (bonding_eth_dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
30594f840086SLong Wu 			if (link_properties_valid(bonding_eth_dev, &link) != 0)
306094421f9eSRadu Nicolau 				RTE_BOND_LOG(ERR, "Invalid link properties "
306115e34522SLong Wu 					     "for member %d in bonding mode %d",
306294421f9eSRadu Nicolau 					     port_id, internals->mode);
306394421f9eSRadu Nicolau 		} else {
306415e34522SLong Wu 			/* inherit member link properties */
30654f840086SLong Wu 			link_properties_set(bonding_eth_dev, &link);
306694421f9eSRadu Nicolau 		}
306794421f9eSRadu Nicolau 
306815e34522SLong Wu 		/* If no active member ports then set this port to be
3069d7bce005SChas Williams 		 * the primary port.
3070d7bce005SChas Williams 		 */
307115e34522SLong Wu 		if (internals->active_member_count < 1) {
307215e34522SLong Wu 			/* If first active member, then change link status */
30734f840086SLong Wu 			bonding_eth_dev->data->dev_link.link_status =
3074295968d1SFerruh Yigit 								RTE_ETH_LINK_UP;
3075d7bce005SChas Williams 			internals->current_primary_port = port_id;
3076d7bce005SChas Williams 			lsc_flag = 1;
3077d7bce005SChas Williams 
30784f840086SLong Wu 			mac_address_members_update(bonding_eth_dev);
30794f840086SLong Wu 			bond_ethdev_promiscuous_update(bonding_eth_dev);
30804f840086SLong Wu 			bond_ethdev_allmulticast_update(bonding_eth_dev);
3081d7bce005SChas Williams 		}
3082d7bce005SChas Williams 
30834f840086SLong Wu 		activate_member(bonding_eth_dev, port_id);
30843eb6bdd8SBruce Richardson 
3085d7bce005SChas Williams 		/* If the user has defined the primary port then default to
3086d7bce005SChas Williams 		 * using it.
3087d7bce005SChas Williams 		 */
30883eb6bdd8SBruce Richardson 		if (internals->user_defined_primary_port &&
30893eb6bdd8SBruce Richardson 				internals->primary_port == port_id)
30903eb6bdd8SBruce Richardson 			bond_ethdev_primary_set(internals, port_id);
30913eb6bdd8SBruce Richardson 	} else {
309215e34522SLong Wu 		if (active_pos == internals->active_member_count)
30930e677a35SChas Williams 			goto link_update;
30943eb6bdd8SBruce Richardson 
309515e34522SLong Wu 		/* Remove from active member list */
30964f840086SLong Wu 		deactivate_member(bonding_eth_dev, port_id);
30973eb6bdd8SBruce Richardson 
309815e34522SLong Wu 		if (internals->active_member_count < 1)
30995b1b672dSTomasz Kulasek 			lsc_flag = 1;
31005b1b672dSTomasz Kulasek 
310115e34522SLong Wu 		/* Update primary id, take first active member from list or if none
31023eb6bdd8SBruce Richardson 		 * available set to -1 */
31033eb6bdd8SBruce Richardson 		if (port_id == internals->current_primary_port) {
310415e34522SLong Wu 			if (internals->active_member_count > 0)
31053eb6bdd8SBruce Richardson 				bond_ethdev_primary_set(internals,
310615e34522SLong Wu 						internals->active_members[0]);
31073eb6bdd8SBruce Richardson 			else
31083eb6bdd8SBruce Richardson 				internals->current_primary_port = internals->primary_port;
31094f840086SLong Wu 			mac_address_members_update(bonding_eth_dev);
31104f840086SLong Wu 			bond_ethdev_promiscuous_update(bonding_eth_dev);
31114f840086SLong Wu 			bond_ethdev_allmulticast_update(bonding_eth_dev);
31123eb6bdd8SBruce Richardson 		}
31133eb6bdd8SBruce Richardson 	}
31143eb6bdd8SBruce Richardson 
31150e677a35SChas Williams link_update:
3116deba8a2fSTomasz Kulasek 	/**
31174f840086SLong Wu 	 * Update bonding device link properties after any change to active
311815e34522SLong Wu 	 * members
3119deba8a2fSTomasz Kulasek 	 */
31204f840086SLong Wu 	bond_ethdev_link_update(bonding_eth_dev, 0);
312115e34522SLong Wu 	internals->members[member_idx].last_link_status = link.link_status;
3122deba8a2fSTomasz Kulasek 
31233eb6bdd8SBruce Richardson 	if (lsc_flag) {
31243eb6bdd8SBruce Richardson 		/* Cancel any possible outstanding interrupts if delays are enabled */
31253eb6bdd8SBruce Richardson 		if (internals->link_up_delay_ms > 0 ||
31263eb6bdd8SBruce Richardson 			internals->link_down_delay_ms > 0)
31273eb6bdd8SBruce Richardson 			rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
31284f840086SLong Wu 					bonding_eth_dev);
31293eb6bdd8SBruce Richardson 
31304f840086SLong Wu 		if (bonding_eth_dev->data->dev_link.link_status) {
31313eb6bdd8SBruce Richardson 			if (internals->link_up_delay_ms > 0)
31323eb6bdd8SBruce Richardson 				rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
31333eb6bdd8SBruce Richardson 						bond_ethdev_delayed_lsc_propagation,
31344f840086SLong Wu 						(void *)bonding_eth_dev);
31353eb6bdd8SBruce Richardson 			else
31364f840086SLong Wu 				rte_eth_dev_callback_process(bonding_eth_dev,
3137d6af1a13SBernard Iremonger 						RTE_ETH_EVENT_INTR_LSC,
3138cebe3d7bSThomas Monjalon 						NULL);
31393eb6bdd8SBruce Richardson 
31403eb6bdd8SBruce Richardson 		} else {
31413eb6bdd8SBruce Richardson 			if (internals->link_down_delay_ms > 0)
31423eb6bdd8SBruce Richardson 				rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
31433eb6bdd8SBruce Richardson 						bond_ethdev_delayed_lsc_propagation,
31444f840086SLong Wu 						(void *)bonding_eth_dev);
31453eb6bdd8SBruce Richardson 			else
31464f840086SLong Wu 				rte_eth_dev_callback_process(bonding_eth_dev,
3147d6af1a13SBernard Iremonger 						RTE_ETH_EVENT_INTR_LSC,
3148cebe3d7bSThomas Monjalon 						NULL);
31493eb6bdd8SBruce Richardson 		}
31503eb6bdd8SBruce Richardson 	}
315159056833SMatan Azrad 
315259056833SMatan Azrad 	rte_spinlock_unlock(&internals->lsc_lock);
315359056833SMatan Azrad 
31540e677a35SChas Williams 	return rc;
31553eb6bdd8SBruce Richardson }
31563eb6bdd8SBruce Richardson 
3157734ce47fSTomasz Kulasek static int
3158734ce47fSTomasz Kulasek bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
3159734ce47fSTomasz Kulasek 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
3160734ce47fSTomasz Kulasek {
3161734ce47fSTomasz Kulasek 	unsigned i, j;
3162734ce47fSTomasz Kulasek 	int result = 0;
316315e34522SLong Wu 	int member_reta_size;
3164734ce47fSTomasz Kulasek 	unsigned reta_count;
3165734ce47fSTomasz Kulasek 	struct bond_dev_private *internals = dev->data->dev_private;
3166734ce47fSTomasz Kulasek 
3167734ce47fSTomasz Kulasek 	if (reta_size != internals->reta_size)
3168734ce47fSTomasz Kulasek 		return -EINVAL;
3169734ce47fSTomasz Kulasek 
3170734ce47fSTomasz Kulasek 	 /* Copy RETA table */
3171295968d1SFerruh Yigit 	reta_count = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) /
3172295968d1SFerruh Yigit 			RTE_ETH_RETA_GROUP_SIZE;
3173734ce47fSTomasz Kulasek 
3174734ce47fSTomasz Kulasek 	for (i = 0; i < reta_count; i++) {
3175734ce47fSTomasz Kulasek 		internals->reta_conf[i].mask = reta_conf[i].mask;
3176295968d1SFerruh Yigit 		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
3177734ce47fSTomasz Kulasek 			if ((reta_conf[i].mask >> j) & 0x01)
3178734ce47fSTomasz Kulasek 				internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
3179734ce47fSTomasz Kulasek 	}
3180734ce47fSTomasz Kulasek 
3181734ce47fSTomasz Kulasek 	/* Fill rest of array */
3182734ce47fSTomasz Kulasek 	for (; i < RTE_DIM(internals->reta_conf); i += reta_count)
3183734ce47fSTomasz Kulasek 		memcpy(&internals->reta_conf[i], &internals->reta_conf[0],
3184734ce47fSTomasz Kulasek 				sizeof(internals->reta_conf[0]) * reta_count);
3185734ce47fSTomasz Kulasek 
318615e34522SLong Wu 	/* Propagate RETA over members */
318715e34522SLong Wu 	for (i = 0; i < internals->member_count; i++) {
318815e34522SLong Wu 		member_reta_size = internals->members[i].reta_size;
318915e34522SLong Wu 		result = rte_eth_dev_rss_reta_update(internals->members[i].port_id,
319015e34522SLong Wu 				&internals->reta_conf[0], member_reta_size);
3191734ce47fSTomasz Kulasek 		if (result < 0)
3192734ce47fSTomasz Kulasek 			return result;
3193734ce47fSTomasz Kulasek 	}
3194734ce47fSTomasz Kulasek 
3195734ce47fSTomasz Kulasek 	return 0;
3196734ce47fSTomasz Kulasek }
3197734ce47fSTomasz Kulasek 
3198734ce47fSTomasz Kulasek static int
3199734ce47fSTomasz Kulasek bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
3200734ce47fSTomasz Kulasek 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
3201734ce47fSTomasz Kulasek {
3202734ce47fSTomasz Kulasek 	int i, j;
3203734ce47fSTomasz Kulasek 	struct bond_dev_private *internals = dev->data->dev_private;
3204734ce47fSTomasz Kulasek 
3205734ce47fSTomasz Kulasek 	if (reta_size != internals->reta_size)
3206734ce47fSTomasz Kulasek 		return -EINVAL;
3207734ce47fSTomasz Kulasek 
3208734ce47fSTomasz Kulasek 	 /* Copy RETA table */
3209295968d1SFerruh Yigit 	for (i = 0; i < reta_size / RTE_ETH_RETA_GROUP_SIZE; i++)
3210295968d1SFerruh Yigit 		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
3211734ce47fSTomasz Kulasek 			if ((reta_conf[i].mask >> j) & 0x01)
3212734ce47fSTomasz Kulasek 				reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
3213734ce47fSTomasz Kulasek 
3214734ce47fSTomasz Kulasek 	return 0;
3215734ce47fSTomasz Kulasek }
3216734ce47fSTomasz Kulasek 
3217734ce47fSTomasz Kulasek static int
3218734ce47fSTomasz Kulasek bond_ethdev_rss_hash_update(struct rte_eth_dev *dev,
3219734ce47fSTomasz Kulasek 		struct rte_eth_rss_conf *rss_conf)
3220734ce47fSTomasz Kulasek {
3221734ce47fSTomasz Kulasek 	int i, result = 0;
3222734ce47fSTomasz Kulasek 	struct bond_dev_private *internals = dev->data->dev_private;
3223734ce47fSTomasz Kulasek 	struct rte_eth_rss_conf bond_rss_conf;
3224734ce47fSTomasz Kulasek 
3225734ce47fSTomasz Kulasek 	memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf));
3226734ce47fSTomasz Kulasek 
3227734ce47fSTomasz Kulasek 	bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads;
3228734ce47fSTomasz Kulasek 
3229734ce47fSTomasz Kulasek 	if (bond_rss_conf.rss_hf != 0)
3230734ce47fSTomasz Kulasek 		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf;
3231734ce47fSTomasz Kulasek 
32326b1a001eSChengchang Tang 	if (bond_rss_conf.rss_key) {
32336b1a001eSChengchang Tang 		if (bond_rss_conf.rss_key_len < internals->rss_key_len)
32346b1a001eSChengchang Tang 			return -EINVAL;
32356b1a001eSChengchang Tang 		else if (bond_rss_conf.rss_key_len > internals->rss_key_len)
32366b1a001eSChengchang Tang 			RTE_BOND_LOG(WARNING, "rss_key will be truncated");
32376b1a001eSChengchang Tang 
3238734ce47fSTomasz Kulasek 		memcpy(internals->rss_key, bond_rss_conf.rss_key,
3239734ce47fSTomasz Kulasek 				internals->rss_key_len);
32406b1a001eSChengchang Tang 		bond_rss_conf.rss_key_len = internals->rss_key_len;
3241734ce47fSTomasz Kulasek 	}
3242734ce47fSTomasz Kulasek 
324315e34522SLong Wu 	for (i = 0; i < internals->member_count; i++) {
324415e34522SLong Wu 		result = rte_eth_dev_rss_hash_update(internals->members[i].port_id,
3245734ce47fSTomasz Kulasek 				&bond_rss_conf);
3246734ce47fSTomasz Kulasek 		if (result < 0)
3247734ce47fSTomasz Kulasek 			return result;
3248734ce47fSTomasz Kulasek 	}
3249734ce47fSTomasz Kulasek 
3250734ce47fSTomasz Kulasek 	return 0;
3251734ce47fSTomasz Kulasek }
3252734ce47fSTomasz Kulasek 
3253734ce47fSTomasz Kulasek static int
3254734ce47fSTomasz Kulasek bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
3255734ce47fSTomasz Kulasek 		struct rte_eth_rss_conf *rss_conf)
3256734ce47fSTomasz Kulasek {
3257734ce47fSTomasz Kulasek 	struct bond_dev_private *internals = dev->data->dev_private;
3258734ce47fSTomasz Kulasek 
3259734ce47fSTomasz Kulasek 	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
3260734ce47fSTomasz Kulasek 	rss_conf->rss_key_len = internals->rss_key_len;
3261734ce47fSTomasz Kulasek 	if (rss_conf->rss_key)
3262734ce47fSTomasz Kulasek 		memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len);
3263734ce47fSTomasz Kulasek 
3264734ce47fSTomasz Kulasek 	return 0;
3265734ce47fSTomasz Kulasek }
3266734ce47fSTomasz Kulasek 
326755b58a73SSharmila Podury static int
326855b58a73SSharmila Podury bond_ethdev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
326955b58a73SSharmila Podury {
327015e34522SLong Wu 	struct rte_eth_dev *member_eth_dev;
327155b58a73SSharmila Podury 	struct bond_dev_private *internals = dev->data->dev_private;
327255b58a73SSharmila Podury 	int ret, i;
327355b58a73SSharmila Podury 
327455b58a73SSharmila Podury 	rte_spinlock_lock(&internals->lock);
327555b58a73SSharmila Podury 
327615e34522SLong Wu 	for (i = 0; i < internals->member_count; i++) {
327715e34522SLong Wu 		member_eth_dev = &rte_eth_devices[internals->members[i].port_id];
327815e34522SLong Wu 		if (*member_eth_dev->dev_ops->mtu_set == NULL) {
327955b58a73SSharmila Podury 			rte_spinlock_unlock(&internals->lock);
328055b58a73SSharmila Podury 			return -ENOTSUP;
328155b58a73SSharmila Podury 		}
328255b58a73SSharmila Podury 	}
328315e34522SLong Wu 	for (i = 0; i < internals->member_count; i++) {
328415e34522SLong Wu 		ret = rte_eth_dev_set_mtu(internals->members[i].port_id, mtu);
328555b58a73SSharmila Podury 		if (ret < 0) {
328655b58a73SSharmila Podury 			rte_spinlock_unlock(&internals->lock);
328755b58a73SSharmila Podury 			return ret;
328855b58a73SSharmila Podury 		}
328955b58a73SSharmila Podury 	}
329055b58a73SSharmila Podury 
329155b58a73SSharmila Podury 	rte_spinlock_unlock(&internals->lock);
329255b58a73SSharmila Podury 	return 0;
329355b58a73SSharmila Podury }
329455b58a73SSharmila Podury 
3295caccf8b3SOlivier Matz static int
32966d13ea8eSOlivier Matz bond_ethdev_mac_address_set(struct rte_eth_dev *dev,
32976d13ea8eSOlivier Matz 			struct rte_ether_addr *addr)
32981e4a3cf6SRadu Nicolau {
3299caccf8b3SOlivier Matz 	if (mac_address_set(dev, addr)) {
33001e4a3cf6SRadu Nicolau 		RTE_BOND_LOG(ERR, "Failed to update MAC address");
3301caccf8b3SOlivier Matz 		return -EINVAL;
3302caccf8b3SOlivier Matz 	}
3303caccf8b3SOlivier Matz 
3304caccf8b3SOlivier Matz 	return 0;
33051e4a3cf6SRadu Nicolau }
33061e4a3cf6SRadu Nicolau 
330749dad902SMatan Azrad static int
3308fb7ad441SThomas Monjalon bond_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
3309fb7ad441SThomas Monjalon 		  const struct rte_flow_ops **ops)
331049dad902SMatan Azrad {
3311fb7ad441SThomas Monjalon 	*ops = &bond_flow_ops;
331249dad902SMatan Azrad 	return 0;
331349dad902SMatan Azrad }
331449dad902SMatan Azrad 
33159d453d1dSAlex Kiselev static int
33166d13ea8eSOlivier Matz bond_ethdev_mac_addr_add(struct rte_eth_dev *dev,
33176d13ea8eSOlivier Matz 			struct rte_ether_addr *mac_addr,
33189d453d1dSAlex Kiselev 			__rte_unused uint32_t index, uint32_t vmdq)
33199d453d1dSAlex Kiselev {
332015e34522SLong Wu 	struct rte_eth_dev *member_eth_dev;
33219d453d1dSAlex Kiselev 	struct bond_dev_private *internals = dev->data->dev_private;
33229d453d1dSAlex Kiselev 	int ret, i;
33239d453d1dSAlex Kiselev 
33249d453d1dSAlex Kiselev 	rte_spinlock_lock(&internals->lock);
33259d453d1dSAlex Kiselev 
332615e34522SLong Wu 	for (i = 0; i < internals->member_count; i++) {
332715e34522SLong Wu 		member_eth_dev = &rte_eth_devices[internals->members[i].port_id];
332815e34522SLong Wu 		if (*member_eth_dev->dev_ops->mac_addr_add == NULL ||
332915e34522SLong Wu 			 *member_eth_dev->dev_ops->mac_addr_remove == NULL) {
33309d453d1dSAlex Kiselev 			ret = -ENOTSUP;
33319d453d1dSAlex Kiselev 			goto end;
33329d453d1dSAlex Kiselev 		}
33339d453d1dSAlex Kiselev 	}
33349d453d1dSAlex Kiselev 
333515e34522SLong Wu 	for (i = 0; i < internals->member_count; i++) {
333615e34522SLong Wu 		ret = rte_eth_dev_mac_addr_add(internals->members[i].port_id,
33379d453d1dSAlex Kiselev 				mac_addr, vmdq);
33389d453d1dSAlex Kiselev 		if (ret < 0) {
33399d453d1dSAlex Kiselev 			/* rollback */
33409d453d1dSAlex Kiselev 			for (i--; i >= 0; i--)
33419d453d1dSAlex Kiselev 				rte_eth_dev_mac_addr_remove(
334215e34522SLong Wu 					internals->members[i].port_id, mac_addr);
33439d453d1dSAlex Kiselev 			goto end;
33449d453d1dSAlex Kiselev 		}
33459d453d1dSAlex Kiselev 	}
33469d453d1dSAlex Kiselev 
33479d453d1dSAlex Kiselev 	ret = 0;
33489d453d1dSAlex Kiselev end:
33499d453d1dSAlex Kiselev 	rte_spinlock_unlock(&internals->lock);
33509d453d1dSAlex Kiselev 	return ret;
33519d453d1dSAlex Kiselev }
33529d453d1dSAlex Kiselev 
33539d453d1dSAlex Kiselev static void
33549d453d1dSAlex Kiselev bond_ethdev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
33559d453d1dSAlex Kiselev {
335615e34522SLong Wu 	struct rte_eth_dev *member_eth_dev;
33579d453d1dSAlex Kiselev 	struct bond_dev_private *internals = dev->data->dev_private;
33589d453d1dSAlex Kiselev 	int i;
33599d453d1dSAlex Kiselev 
33609d453d1dSAlex Kiselev 	rte_spinlock_lock(&internals->lock);
33619d453d1dSAlex Kiselev 
336215e34522SLong Wu 	for (i = 0; i < internals->member_count; i++) {
336315e34522SLong Wu 		member_eth_dev = &rte_eth_devices[internals->members[i].port_id];
336415e34522SLong Wu 		if (*member_eth_dev->dev_ops->mac_addr_remove == NULL)
33659d453d1dSAlex Kiselev 			goto end;
33669d453d1dSAlex Kiselev 	}
33679d453d1dSAlex Kiselev 
33686d13ea8eSOlivier Matz 	struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[index];
33699d453d1dSAlex Kiselev 
337015e34522SLong Wu 	for (i = 0; i < internals->member_count; i++)
337115e34522SLong Wu 		rte_eth_dev_mac_addr_remove(internals->members[i].port_id,
33729d453d1dSAlex Kiselev 				mac_addr);
33739d453d1dSAlex Kiselev 
33749d453d1dSAlex Kiselev end:
33759d453d1dSAlex Kiselev 	rte_spinlock_unlock(&internals->lock);
33769d453d1dSAlex Kiselev }
33779d453d1dSAlex Kiselev 
337829e89fb1SChengwen Feng static const char *
337929e89fb1SChengwen Feng bond_mode_name(uint8_t mode)
338029e89fb1SChengwen Feng {
338129e89fb1SChengwen Feng 	switch (mode) {
338229e89fb1SChengwen Feng 	case BONDING_MODE_ROUND_ROBIN:
338329e89fb1SChengwen Feng 		return "ROUND_ROBIN";
338429e89fb1SChengwen Feng 	case BONDING_MODE_ACTIVE_BACKUP:
338529e89fb1SChengwen Feng 		return "ACTIVE_BACKUP";
338629e89fb1SChengwen Feng 	case BONDING_MODE_BALANCE:
338729e89fb1SChengwen Feng 		return "BALANCE";
338829e89fb1SChengwen Feng 	case BONDING_MODE_BROADCAST:
338929e89fb1SChengwen Feng 		return "BROADCAST";
339029e89fb1SChengwen Feng 	case BONDING_MODE_8023AD:
339129e89fb1SChengwen Feng 		return "8023AD";
339229e89fb1SChengwen Feng 	case BONDING_MODE_TLB:
339329e89fb1SChengwen Feng 		return "TLB";
339429e89fb1SChengwen Feng 	case BONDING_MODE_ALB:
339529e89fb1SChengwen Feng 		return "ALB";
339629e89fb1SChengwen Feng 	default:
339729e89fb1SChengwen Feng 		return "Unknown";
339829e89fb1SChengwen Feng 	}
339929e89fb1SChengwen Feng }
340029e89fb1SChengwen Feng 
3401b00119fcSChengwen Feng static void
3402b00119fcSChengwen Feng dump_basic(const struct rte_eth_dev *dev, FILE *f)
340329e89fb1SChengwen Feng {
340429e89fb1SChengwen Feng 	struct bond_dev_private instant_priv;
340529e89fb1SChengwen Feng 	const struct bond_dev_private *internals = &instant_priv;
340629e89fb1SChengwen Feng 	int mode, i;
340729e89fb1SChengwen Feng 
340829e89fb1SChengwen Feng 	/* Obtain a instance of dev_private to prevent data from being modified. */
340929e89fb1SChengwen Feng 	memcpy(&instant_priv, dev->data->dev_private, sizeof(struct bond_dev_private));
341029e89fb1SChengwen Feng 	mode = internals->mode;
341129e89fb1SChengwen Feng 
341229e89fb1SChengwen Feng 	fprintf(f, "  - Dev basic:\n");
341329e89fb1SChengwen Feng 	fprintf(f, "\tBonding mode: %s(%d)\n", bond_mode_name(mode), mode);
341429e89fb1SChengwen Feng 
341529e89fb1SChengwen Feng 	if (mode == BONDING_MODE_BALANCE || mode == BONDING_MODE_8023AD) {
341629e89fb1SChengwen Feng 		fprintf(f, "\tBalance Xmit Policy: ");
341729e89fb1SChengwen Feng 		switch (internals->balance_xmit_policy) {
341829e89fb1SChengwen Feng 		case BALANCE_XMIT_POLICY_LAYER2:
341929e89fb1SChengwen Feng 			fprintf(f, "BALANCE_XMIT_POLICY_LAYER2");
342029e89fb1SChengwen Feng 			break;
342129e89fb1SChengwen Feng 		case BALANCE_XMIT_POLICY_LAYER23:
342229e89fb1SChengwen Feng 			fprintf(f, "BALANCE_XMIT_POLICY_LAYER23");
342329e89fb1SChengwen Feng 			break;
342429e89fb1SChengwen Feng 		case BALANCE_XMIT_POLICY_LAYER34:
342529e89fb1SChengwen Feng 			fprintf(f, "BALANCE_XMIT_POLICY_LAYER34");
342629e89fb1SChengwen Feng 			break;
342729e89fb1SChengwen Feng 		default:
342829e89fb1SChengwen Feng 			fprintf(f, "Unknown");
342929e89fb1SChengwen Feng 		}
343029e89fb1SChengwen Feng 		fprintf(f, "\n");
343129e89fb1SChengwen Feng 	}
343229e89fb1SChengwen Feng 
343329e89fb1SChengwen Feng 	if (mode == BONDING_MODE_8023AD) {
343429e89fb1SChengwen Feng 		fprintf(f, "\tIEEE802.3AD Aggregator Mode: ");
343529e89fb1SChengwen Feng 		switch (internals->mode4.agg_selection) {
343629e89fb1SChengwen Feng 		case AGG_BANDWIDTH:
343729e89fb1SChengwen Feng 			fprintf(f, "bandwidth");
343829e89fb1SChengwen Feng 			break;
343929e89fb1SChengwen Feng 		case AGG_STABLE:
344029e89fb1SChengwen Feng 			fprintf(f, "stable");
344129e89fb1SChengwen Feng 			break;
344229e89fb1SChengwen Feng 		case AGG_COUNT:
344329e89fb1SChengwen Feng 			fprintf(f, "count");
344429e89fb1SChengwen Feng 			break;
344529e89fb1SChengwen Feng 		default:
344629e89fb1SChengwen Feng 			fprintf(f, "unknown");
344729e89fb1SChengwen Feng 		}
344829e89fb1SChengwen Feng 		fprintf(f, "\n");
344929e89fb1SChengwen Feng 	}
345029e89fb1SChengwen Feng 
345115e34522SLong Wu 	if (internals->member_count > 0) {
345215e34522SLong Wu 		fprintf(f, "\tMembers (%u): [", internals->member_count);
345315e34522SLong Wu 		for (i = 0; i < internals->member_count - 1; i++)
345415e34522SLong Wu 			fprintf(f, "%u ", internals->members[i].port_id);
345529e89fb1SChengwen Feng 
345615e34522SLong Wu 		fprintf(f, "%u]\n", internals->members[internals->member_count - 1].port_id);
345729e89fb1SChengwen Feng 	} else {
345815e34522SLong Wu 		fprintf(f, "\tMembers: []\n");
345929e89fb1SChengwen Feng 	}
346029e89fb1SChengwen Feng 
346115e34522SLong Wu 	if (internals->active_member_count > 0) {
346215e34522SLong Wu 		fprintf(f, "\tActive Members (%u): [", internals->active_member_count);
346315e34522SLong Wu 		for (i = 0; i < internals->active_member_count - 1; i++)
346415e34522SLong Wu 			fprintf(f, "%u ", internals->active_members[i]);
346529e89fb1SChengwen Feng 
346615e34522SLong Wu 		fprintf(f, "%u]\n", internals->active_members[internals->active_member_count - 1]);
346729e89fb1SChengwen Feng 
346829e89fb1SChengwen Feng 	} else {
346915e34522SLong Wu 		fprintf(f, "\tActive Members: []\n");
347029e89fb1SChengwen Feng 	}
347129e89fb1SChengwen Feng 
347229e89fb1SChengwen Feng 	if (internals->user_defined_primary_port)
347329e89fb1SChengwen Feng 		fprintf(f, "\tUser Defined Primary: [%u]\n", internals->primary_port);
347415e34522SLong Wu 	if (internals->member_count > 0)
347529e89fb1SChengwen Feng 		fprintf(f, "\tCurrent Primary: [%u]\n", internals->current_primary_port);
3476b00119fcSChengwen Feng }
3477b00119fcSChengwen Feng 
3478b00119fcSChengwen Feng static void
3479b00119fcSChengwen Feng dump_lacp_conf(const struct rte_eth_bond_8023ad_conf *conf, FILE *f)
3480b00119fcSChengwen Feng {
3481b00119fcSChengwen Feng 	fprintf(f, "\tfast period: %u ms\n", conf->fast_periodic_ms);
3482b00119fcSChengwen Feng 	fprintf(f, "\tslow period: %u ms\n", conf->slow_periodic_ms);
3483b00119fcSChengwen Feng 	fprintf(f, "\tshort timeout: %u ms\n", conf->short_timeout_ms);
3484b00119fcSChengwen Feng 	fprintf(f, "\tlong timeout: %u ms\n", conf->long_timeout_ms);
3485b00119fcSChengwen Feng 	fprintf(f, "\taggregate wait timeout: %u ms\n",
3486b00119fcSChengwen Feng 			conf->aggregate_wait_timeout_ms);
3487b00119fcSChengwen Feng 	fprintf(f, "\ttx period: %u ms\n", conf->tx_period_ms);
3488b00119fcSChengwen Feng 	fprintf(f, "\trx marker period: %u ms\n", conf->rx_marker_period_ms);
3489b00119fcSChengwen Feng 	fprintf(f, "\tupdate timeout: %u ms\n", conf->update_timeout_ms);
3490b00119fcSChengwen Feng 	switch (conf->agg_selection) {
3491b00119fcSChengwen Feng 	case AGG_BANDWIDTH:
3492b00119fcSChengwen Feng 		fprintf(f, "\taggregation mode: bandwidth\n");
3493b00119fcSChengwen Feng 		break;
3494b00119fcSChengwen Feng 	case AGG_STABLE:
3495b00119fcSChengwen Feng 		fprintf(f, "\taggregation mode: stable\n");
3496b00119fcSChengwen Feng 		break;
3497b00119fcSChengwen Feng 	case AGG_COUNT:
3498b00119fcSChengwen Feng 		fprintf(f, "\taggregation mode: count\n");
3499b00119fcSChengwen Feng 		break;
3500b00119fcSChengwen Feng 	default:
3501b00119fcSChengwen Feng 		fprintf(f, "\taggregation mode: invalid\n");
3502b00119fcSChengwen Feng 		break;
3503b00119fcSChengwen Feng 	}
3504b00119fcSChengwen Feng 	fprintf(f, "\n");
3505b00119fcSChengwen Feng }
3506b00119fcSChengwen Feng 
3507b00119fcSChengwen Feng static void
3508b00119fcSChengwen Feng dump_lacp_port_param(const struct port_params *params, FILE *f)
3509b00119fcSChengwen Feng {
3510b00119fcSChengwen Feng 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
3511b00119fcSChengwen Feng 	fprintf(f, "\t\tsystem priority: %u\n", params->system_priority);
3512b00119fcSChengwen Feng 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &params->system);
3513b00119fcSChengwen Feng 	fprintf(f, "\t\tsystem mac address: %s\n", buf);
3514b00119fcSChengwen Feng 	fprintf(f, "\t\tport key: %u\n", params->key);
3515b00119fcSChengwen Feng 	fprintf(f, "\t\tport priority: %u\n", params->port_priority);
3516b00119fcSChengwen Feng 	fprintf(f, "\t\tport number: %u\n", params->port_number);
3517b00119fcSChengwen Feng }
3518b00119fcSChengwen Feng 
3519b00119fcSChengwen Feng static void
352015e34522SLong Wu dump_lacp_member(const struct rte_eth_bond_8023ad_member_info *info, FILE *f)
3521b00119fcSChengwen Feng {
3522b00119fcSChengwen Feng 	char a_state[256] = { 0 };
3523b00119fcSChengwen Feng 	char p_state[256] = { 0 };
3524b00119fcSChengwen Feng 	int a_len = 0;
3525b00119fcSChengwen Feng 	int p_len = 0;
3526b00119fcSChengwen Feng 	uint32_t i;
3527b00119fcSChengwen Feng 
3528b00119fcSChengwen Feng 	static const char * const state[] = {
3529b00119fcSChengwen Feng 		"ACTIVE",
3530b00119fcSChengwen Feng 		"TIMEOUT",
3531b00119fcSChengwen Feng 		"AGGREGATION",
3532b00119fcSChengwen Feng 		"SYNCHRONIZATION",
3533b00119fcSChengwen Feng 		"COLLECTING",
3534b00119fcSChengwen Feng 		"DISTRIBUTING",
3535b00119fcSChengwen Feng 		"DEFAULTED",
3536b00119fcSChengwen Feng 		"EXPIRED"
3537b00119fcSChengwen Feng 	};
3538b00119fcSChengwen Feng 	static const char * const selection[] = {
3539b00119fcSChengwen Feng 		"UNSELECTED",
3540b00119fcSChengwen Feng 		"STANDBY",
3541b00119fcSChengwen Feng 		"SELECTED"
3542b00119fcSChengwen Feng 	};
3543b00119fcSChengwen Feng 
3544b00119fcSChengwen Feng 	for (i = 0; i < RTE_DIM(state); i++) {
3545b00119fcSChengwen Feng 		if ((info->actor_state >> i) & 1)
3546b00119fcSChengwen Feng 			a_len += snprintf(&a_state[a_len],
3547b00119fcSChengwen Feng 						RTE_DIM(a_state) - a_len, "%s ",
3548b00119fcSChengwen Feng 						state[i]);
3549b00119fcSChengwen Feng 
3550b00119fcSChengwen Feng 		if ((info->partner_state >> i) & 1)
3551b00119fcSChengwen Feng 			p_len += snprintf(&p_state[p_len],
3552b00119fcSChengwen Feng 						RTE_DIM(p_state) - p_len, "%s ",
3553b00119fcSChengwen Feng 						state[i]);
3554b00119fcSChengwen Feng 	}
3555b00119fcSChengwen Feng 	fprintf(f, "\tAggregator port id: %u\n", info->agg_port_id);
3556b00119fcSChengwen Feng 	fprintf(f, "\tselection: %s\n", selection[info->selected]);
3557b00119fcSChengwen Feng 	fprintf(f, "\tActor detail info:\n");
3558b00119fcSChengwen Feng 	dump_lacp_port_param(&info->actor, f);
3559b00119fcSChengwen Feng 	fprintf(f, "\t\tport state: %s\n", a_state);
3560b00119fcSChengwen Feng 	fprintf(f, "\tPartner detail info:\n");
3561b00119fcSChengwen Feng 	dump_lacp_port_param(&info->partner, f);
3562b00119fcSChengwen Feng 	fprintf(f, "\t\tport state: %s\n", p_state);
3563b00119fcSChengwen Feng 	fprintf(f, "\n");
3564b00119fcSChengwen Feng }
3565b00119fcSChengwen Feng 
3566b00119fcSChengwen Feng static void
3567b00119fcSChengwen Feng dump_lacp(uint16_t port_id, FILE *f)
3568b00119fcSChengwen Feng {
356915e34522SLong Wu 	struct rte_eth_bond_8023ad_member_info member_info;
3570b00119fcSChengwen Feng 	struct rte_eth_bond_8023ad_conf port_conf;
357115e34522SLong Wu 	uint16_t members[RTE_MAX_ETHPORTS];
357215e34522SLong Wu 	int num_active_members;
3573b00119fcSChengwen Feng 	int i, ret;
3574b00119fcSChengwen Feng 
3575b00119fcSChengwen Feng 	fprintf(f, "  - Lacp info:\n");
3576b00119fcSChengwen Feng 
357715e34522SLong Wu 	num_active_members = rte_eth_bond_active_members_get(port_id, members,
3578b00119fcSChengwen Feng 			RTE_MAX_ETHPORTS);
357915e34522SLong Wu 	if (num_active_members < 0) {
358015e34522SLong Wu 		fprintf(f, "\tFailed to get active member list for port %u\n",
3581b00119fcSChengwen Feng 				port_id);
3582b00119fcSChengwen Feng 		return;
3583b00119fcSChengwen Feng 	}
3584b00119fcSChengwen Feng 
3585b00119fcSChengwen Feng 	fprintf(f, "\tIEEE802.3 port: %u\n", port_id);
3586b00119fcSChengwen Feng 	ret = rte_eth_bond_8023ad_conf_get(port_id, &port_conf);
3587b00119fcSChengwen Feng 	if (ret) {
35884f840086SLong Wu 		fprintf(f, "\tGet bonding device %u 8023ad config failed\n",
3589b00119fcSChengwen Feng 			port_id);
3590b00119fcSChengwen Feng 		return;
3591b00119fcSChengwen Feng 	}
3592b00119fcSChengwen Feng 	dump_lacp_conf(&port_conf, f);
3593b00119fcSChengwen Feng 
359415e34522SLong Wu 	for (i = 0; i < num_active_members; i++) {
359515e34522SLong Wu 		ret = rte_eth_bond_8023ad_member_info(port_id, members[i],
359615e34522SLong Wu 				&member_info);
3597b00119fcSChengwen Feng 		if (ret) {
359815e34522SLong Wu 			fprintf(f, "\tGet member device %u 8023ad info failed\n",
359915e34522SLong Wu 				members[i]);
3600b00119fcSChengwen Feng 			return;
3601b00119fcSChengwen Feng 		}
360215e34522SLong Wu 		fprintf(f, "\tMember Port: %u\n", members[i]);
360315e34522SLong Wu 		dump_lacp_member(&member_info, f);
3604b00119fcSChengwen Feng 	}
3605b00119fcSChengwen Feng }
3606b00119fcSChengwen Feng 
3607b00119fcSChengwen Feng static int
3608b00119fcSChengwen Feng bond_ethdev_priv_dump(struct rte_eth_dev *dev, FILE *f)
3609b00119fcSChengwen Feng {
3610b00119fcSChengwen Feng 	const struct bond_dev_private *internals = dev->data->dev_private;
3611b00119fcSChengwen Feng 
3612b00119fcSChengwen Feng 	dump_basic(dev, f);
3613b00119fcSChengwen Feng 	if (internals->mode == BONDING_MODE_8023AD)
3614b00119fcSChengwen Feng 		dump_lacp(dev->data->port_id, f);
361529e89fb1SChengwen Feng 
361629e89fb1SChengwen Feng 	return 0;
361729e89fb1SChengwen Feng }
361829e89fb1SChengwen Feng 
361989b890dfSStephen Hemminger const struct eth_dev_ops default_dev_ops = {
36203eb6bdd8SBruce Richardson 	.dev_start            = bond_ethdev_start,
36213eb6bdd8SBruce Richardson 	.dev_stop             = bond_ethdev_stop,
36223eb6bdd8SBruce Richardson 	.dev_close            = bond_ethdev_close,
36233eb6bdd8SBruce Richardson 	.dev_configure        = bond_ethdev_configure,
36243eb6bdd8SBruce Richardson 	.dev_infos_get        = bond_ethdev_info,
3625c771e4efSEric Kinzie 	.vlan_filter_set      = bond_ethdev_vlan_filter_set,
36263eb6bdd8SBruce Richardson 	.rx_queue_setup       = bond_ethdev_rx_queue_setup,
36273eb6bdd8SBruce Richardson 	.tx_queue_setup       = bond_ethdev_tx_queue_setup,
36283eb6bdd8SBruce Richardson 	.rx_queue_release     = bond_ethdev_rx_queue_release,
36293eb6bdd8SBruce Richardson 	.tx_queue_release     = bond_ethdev_tx_queue_release,
36303eb6bdd8SBruce Richardson 	.link_update          = bond_ethdev_link_update,
36313eb6bdd8SBruce Richardson 	.stats_get            = bond_ethdev_stats_get,
36323eb6bdd8SBruce Richardson 	.stats_reset          = bond_ethdev_stats_reset,
36333eb6bdd8SBruce Richardson 	.promiscuous_enable   = bond_ethdev_promiscuous_enable,
3634734ce47fSTomasz Kulasek 	.promiscuous_disable  = bond_ethdev_promiscuous_disable,
363568218b87SDavid Marchand 	.allmulticast_enable  = bond_ethdev_allmulticast_enable,
363668218b87SDavid Marchand 	.allmulticast_disable = bond_ethdev_allmulticast_disable,
3637734ce47fSTomasz Kulasek 	.reta_update          = bond_ethdev_rss_reta_update,
3638734ce47fSTomasz Kulasek 	.reta_query           = bond_ethdev_rss_reta_query,
3639734ce47fSTomasz Kulasek 	.rss_hash_update      = bond_ethdev_rss_hash_update,
364055b58a73SSharmila Podury 	.rss_hash_conf_get    = bond_ethdev_rss_hash_conf_get,
36411e4a3cf6SRadu Nicolau 	.mtu_set              = bond_ethdev_mtu_set,
364249dad902SMatan Azrad 	.mac_addr_set         = bond_ethdev_mac_address_set,
36439d453d1dSAlex Kiselev 	.mac_addr_add         = bond_ethdev_mac_addr_add,
36449d453d1dSAlex Kiselev 	.mac_addr_remove      = bond_ethdev_mac_addr_remove,
364529e89fb1SChengwen Feng 	.flow_ops_get         = bond_flow_ops_get,
364629e89fb1SChengwen Feng 	.eth_dev_priv_dump    = bond_ethdev_priv_dump,
36473eb6bdd8SBruce Richardson };
36483eb6bdd8SBruce Richardson 
36493eb6bdd8SBruce Richardson static int
365068451eb6SJan Blunck bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
365168451eb6SJan Blunck {
365268451eb6SJan Blunck 	const char *name = rte_vdev_device_name(dev);
365385e6be63SZerun Fu 	int socket_id = dev->device.numa_node;
365468451eb6SJan Blunck 	struct bond_dev_private *internals = NULL;
365568451eb6SJan Blunck 	struct rte_eth_dev *eth_dev = NULL;
365668451eb6SJan Blunck 	uint32_t vlan_filter_bmp_size;
365768451eb6SJan Blunck 
365868451eb6SJan Blunck 	/* now do all data allocation - for eth_dev structure, dummy pci driver
365968451eb6SJan Blunck 	 * and internal (private) data
366068451eb6SJan Blunck 	 */
366168451eb6SJan Blunck 
366268451eb6SJan Blunck 	/* reserve an ethdev entry */
3663050fe6e9SJan Blunck 	eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
366468451eb6SJan Blunck 	if (eth_dev == NULL) {
366568451eb6SJan Blunck 		RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
366668451eb6SJan Blunck 		goto err;
366768451eb6SJan Blunck 	}
366868451eb6SJan Blunck 
3669050fe6e9SJan Blunck 	internals = eth_dev->data->dev_private;
367068451eb6SJan Blunck 	eth_dev->data->nb_rx_queues = (uint16_t)1;
367168451eb6SJan Blunck 	eth_dev->data->nb_tx_queues = (uint16_t)1;
367268451eb6SJan Blunck 
36739d453d1dSAlex Kiselev 	/* Allocate memory for storing MAC addresses */
367435b2d13fSOlivier Matz 	eth_dev->data->mac_addrs = rte_zmalloc_socket(name, RTE_ETHER_ADDR_LEN *
36759d453d1dSAlex Kiselev 			BOND_MAX_MAC_ADDRS, 0, socket_id);
367668451eb6SJan Blunck 	if (eth_dev->data->mac_addrs == NULL) {
36779d453d1dSAlex Kiselev 		RTE_BOND_LOG(ERR,
36789d453d1dSAlex Kiselev 			     "Failed to allocate %u bytes needed to store MAC addresses",
367935b2d13fSOlivier Matz 			     RTE_ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS);
368068451eb6SJan Blunck 		goto err;
368168451eb6SJan Blunck 	}
368268451eb6SJan Blunck 
368368451eb6SJan Blunck 	eth_dev->dev_ops = &default_dev_ops;
3684f30e69b4SFerruh Yigit 	eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC |
3685f30e69b4SFerruh Yigit 					RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
368668451eb6SJan Blunck 
368768451eb6SJan Blunck 	rte_spinlock_init(&internals->lock);
368859056833SMatan Azrad 	rte_spinlock_init(&internals->lsc_lock);
368968451eb6SJan Blunck 
369068451eb6SJan Blunck 	internals->port_id = eth_dev->data->port_id;
369168451eb6SJan Blunck 	internals->mode = BONDING_MODE_INVALID;
369268451eb6SJan Blunck 	internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
369368451eb6SJan Blunck 	internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
369409150784SDeclan Doherty 	internals->burst_xmit_hash = burst_xmit_l2_hash;
369568451eb6SJan Blunck 	internals->user_defined_mac = 0;
369668451eb6SJan Blunck 
369768451eb6SJan Blunck 	internals->link_status_polling_enabled = 0;
369868451eb6SJan Blunck 
369968451eb6SJan Blunck 	internals->link_status_polling_interval_ms =
370068451eb6SJan Blunck 		DEFAULT_POLLING_INTERVAL_10_MS;
370168451eb6SJan Blunck 	internals->link_down_delay_ms = 0;
370268451eb6SJan Blunck 	internals->link_up_delay_ms = 0;
370368451eb6SJan Blunck 
370415e34522SLong Wu 	internals->member_count = 0;
370515e34522SLong Wu 	internals->active_member_count = 0;
370668451eb6SJan Blunck 	internals->rx_offload_capa = 0;
370768451eb6SJan Blunck 	internals->tx_offload_capa = 0;
3708e8b3e1a9SFerruh Yigit 	internals->rx_queue_offload_capa = 0;
3709e8b3e1a9SFerruh Yigit 	internals->tx_queue_offload_capa = 0;
371068451eb6SJan Blunck 	internals->candidate_max_rx_pktlen = 0;
371168451eb6SJan Blunck 	internals->max_rx_pktlen = 0;
371268451eb6SJan Blunck 
371368451eb6SJan Blunck 	/* Initially allow to choose any offload type */
3714295968d1SFerruh Yigit 	internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
371568451eb6SJan Blunck 
3716f5f93e10SIvan Malov 	memset(&internals->default_rxconf, 0,
3717f5f93e10SIvan Malov 	       sizeof(internals->default_rxconf));
3718f5f93e10SIvan Malov 	memset(&internals->default_txconf, 0,
3719f5f93e10SIvan Malov 	       sizeof(internals->default_txconf));
3720f5f93e10SIvan Malov 
37217a066594SIvan Malov 	memset(&internals->rx_desc_lim, 0, sizeof(internals->rx_desc_lim));
37227a066594SIvan Malov 	memset(&internals->tx_desc_lim, 0, sizeof(internals->tx_desc_lim));
37237a066594SIvan Malov 
3724d03c0e83SIvan Malov 	/*
3725d03c0e83SIvan Malov 	 * Do not restrict descriptor counts until
3726d03c0e83SIvan Malov 	 * the first back-end device gets attached.
3727d03c0e83SIvan Malov 	 */
3728d03c0e83SIvan Malov 	internals->rx_desc_lim.nb_max = UINT16_MAX;
3729d03c0e83SIvan Malov 	internals->tx_desc_lim.nb_max = UINT16_MAX;
3730550e8d6dSIvan Malov 	internals->rx_desc_lim.nb_align = 1;
3731550e8d6dSIvan Malov 	internals->tx_desc_lim.nb_align = 1;
3732d03c0e83SIvan Malov 
373315e34522SLong Wu 	memset(internals->active_members, 0, sizeof(internals->active_members));
373415e34522SLong Wu 	memset(internals->members, 0, sizeof(internals->members));
373568451eb6SJan Blunck 
373649dad902SMatan Azrad 	TAILQ_INIT(&internals->flow_list);
373749dad902SMatan Azrad 	internals->flow_isolated_valid = 0;
373849dad902SMatan Azrad 
373968451eb6SJan Blunck 	/* Set mode 4 default configuration */
374068451eb6SJan Blunck 	bond_mode_8023ad_setup(eth_dev, NULL);
374168451eb6SJan Blunck 	if (bond_ethdev_mode_set(eth_dev, mode)) {
37424f840086SLong Wu 		RTE_BOND_LOG(ERR, "Failed to set bonding device %u mode to %u",
374368451eb6SJan Blunck 				 eth_dev->data->port_id, mode);
374468451eb6SJan Blunck 		goto err;
374568451eb6SJan Blunck 	}
374668451eb6SJan Blunck 
374768451eb6SJan Blunck 	vlan_filter_bmp_size =
374835b2d13fSOlivier Matz 		rte_bitmap_get_memory_footprint(RTE_ETHER_MAX_VLAN_ID + 1);
374968451eb6SJan Blunck 	internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
375068451eb6SJan Blunck 						   RTE_CACHE_LINE_SIZE);
375168451eb6SJan Blunck 	if (internals->vlan_filter_bmpmem == NULL) {
375268451eb6SJan Blunck 		RTE_BOND_LOG(ERR,
37534f840086SLong Wu 			     "Failed to allocate vlan bitmap for bonding device %u",
375468451eb6SJan Blunck 			     eth_dev->data->port_id);
375568451eb6SJan Blunck 		goto err;
375668451eb6SJan Blunck 	}
375768451eb6SJan Blunck 
375835b2d13fSOlivier Matz 	internals->vlan_filter_bmp = rte_bitmap_init(RTE_ETHER_MAX_VLAN_ID + 1,
375968451eb6SJan Blunck 			internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
376068451eb6SJan Blunck 	if (internals->vlan_filter_bmp == NULL) {
376168451eb6SJan Blunck 		RTE_BOND_LOG(ERR,
37624f840086SLong Wu 			     "Failed to init vlan bitmap for bonding device %u",
376368451eb6SJan Blunck 			     eth_dev->data->port_id);
376468451eb6SJan Blunck 		rte_free(internals->vlan_filter_bmpmem);
376568451eb6SJan Blunck 		goto err;
376668451eb6SJan Blunck 	}
376768451eb6SJan Blunck 
376868451eb6SJan Blunck 	return eth_dev->data->port_id;
376968451eb6SJan Blunck 
377068451eb6SJan Blunck err:
377168451eb6SJan Blunck 	rte_free(internals);
3772e16adf08SThomas Monjalon 	if (eth_dev != NULL)
3773e16adf08SThomas Monjalon 		eth_dev->data->dev_private = NULL;
377468451eb6SJan Blunck 	rte_eth_dev_release_port(eth_dev);
377568451eb6SJan Blunck 	return -1;
377668451eb6SJan Blunck }
377768451eb6SJan Blunck 
377868451eb6SJan Blunck static int
37795d2aa461SJan Blunck bond_probe(struct rte_vdev_device *dev)
37803eb6bdd8SBruce Richardson {
37815d2aa461SJan Blunck 	const char *name;
37823eb6bdd8SBruce Richardson 	struct bond_dev_private *internals;
37833eb6bdd8SBruce Richardson 	struct rte_kvargs *kvlist;
3784f294e048SChengchang Tang 	uint8_t bonding_mode;
37853eb6bdd8SBruce Richardson 	int arg_count, port_id;
3786f294e048SChengchang Tang 	int socket_id;
37877ff0ec78SDaniel Mrzyglod 	uint8_t agg_mode;
3788ee27edbeSJianfeng Tan 	struct rte_eth_dev *eth_dev;
37893eb6bdd8SBruce Richardson 
379068451eb6SJan Blunck 	if (!dev)
379168451eb6SJan Blunck 		return -EINVAL;
379268451eb6SJan Blunck 
37935d2aa461SJan Blunck 	name = rte_vdev_device_name(dev);
3794d7f4562aSStephen Hemminger 	RTE_BOND_LOG(INFO, "Initializing pmd_bond for %s", name);
37953eb6bdd8SBruce Richardson 
37964852aa8fSQi Zhang 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
3797ee27edbeSJianfeng Tan 		eth_dev = rte_eth_dev_attach_secondary(name);
3798ee27edbeSJianfeng Tan 		if (!eth_dev) {
3799d7f4562aSStephen Hemminger 			RTE_BOND_LOG(ERR, "Failed to probe %s", name);
3800ee27edbeSJianfeng Tan 			return -1;
3801ee27edbeSJianfeng Tan 		}
3802ee27edbeSJianfeng Tan 		/* TODO: request info from primary to set up Rx and Tx */
3803ee27edbeSJianfeng Tan 		eth_dev->dev_ops = &default_dev_ops;
3804d1c3ab22SFerruh Yigit 		eth_dev->device = &dev->device;
3805fbe90cddSThomas Monjalon 		rte_eth_dev_probing_finish(eth_dev);
3806ee27edbeSJianfeng Tan 		return 0;
3807ee27edbeSJianfeng Tan 	}
3808ee27edbeSJianfeng Tan 
38095d2aa461SJan Blunck 	kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
38105d2aa461SJan Blunck 		pmd_bond_init_valid_arguments);
38116f65049eSJunjie Wan 	if (kvlist == NULL) {
38126f65049eSJunjie Wan 		RTE_BOND_LOG(ERR, "Invalid args in %s", rte_vdev_device_args(dev));
38133eb6bdd8SBruce Richardson 		return -1;
38146f65049eSJunjie Wan 	}
38153eb6bdd8SBruce Richardson 
38163eb6bdd8SBruce Richardson 	/* Parse link bonding mode */
38173eb6bdd8SBruce Richardson 	if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
38183eb6bdd8SBruce Richardson 		if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
381915e34522SLong Wu 				&bond_ethdev_parse_member_mode_kvarg,
38203eb6bdd8SBruce Richardson 				&bonding_mode) != 0) {
38214f840086SLong Wu 			RTE_BOND_LOG(ERR, "Invalid mode for bonding device %s",
38223eb6bdd8SBruce Richardson 					name);
38233eb6bdd8SBruce Richardson 			goto parse_error;
38243eb6bdd8SBruce Richardson 		}
38253eb6bdd8SBruce Richardson 	} else {
38264f840086SLong Wu 		RTE_BOND_LOG(ERR, "Mode must be specified only once for bonding "
3827d7f4562aSStephen Hemminger 				"device %s", name);
38283eb6bdd8SBruce Richardson 		goto parse_error;
38293eb6bdd8SBruce Richardson 	}
38303eb6bdd8SBruce Richardson 
38313eb6bdd8SBruce Richardson 	/* Parse socket id to create bonding device on */
38323eb6bdd8SBruce Richardson 	arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
38333eb6bdd8SBruce Richardson 	if (arg_count == 1) {
38343eb6bdd8SBruce Richardson 		if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
38353eb6bdd8SBruce Richardson 				&bond_ethdev_parse_socket_id_kvarg, &socket_id)
38363eb6bdd8SBruce Richardson 				!= 0) {
3837d7f4562aSStephen Hemminger 			RTE_BOND_LOG(ERR, "Invalid socket Id specified for "
38384f840086SLong Wu 					"bonding device %s", name);
38393eb6bdd8SBruce Richardson 			goto parse_error;
38403eb6bdd8SBruce Richardson 		}
38413eb6bdd8SBruce Richardson 	} else if (arg_count > 1) {
3842d7f4562aSStephen Hemminger 		RTE_BOND_LOG(ERR, "Socket Id can be specified only once for "
38434f840086SLong Wu 				"bonding device %s", name);
38443eb6bdd8SBruce Richardson 		goto parse_error;
38453eb6bdd8SBruce Richardson 	} else {
38463eb6bdd8SBruce Richardson 		socket_id = rte_socket_id();
38473eb6bdd8SBruce Richardson 	}
38483eb6bdd8SBruce Richardson 
384968451eb6SJan Blunck 	dev->device.numa_node = socket_id;
385068451eb6SJan Blunck 
38513eb6bdd8SBruce Richardson 	/* Create link bonding eth device */
385268451eb6SJan Blunck 	port_id = bond_alloc(dev, bonding_mode);
38533eb6bdd8SBruce Richardson 	if (port_id < 0) {
3854d7f4562aSStephen Hemminger 		RTE_BOND_LOG(ERR, "Failed to create socket %s in mode %u on "
385585e6be63SZerun Fu 				"socket %d.",	name, bonding_mode, socket_id);
38563eb6bdd8SBruce Richardson 		goto parse_error;
38573eb6bdd8SBruce Richardson 	}
38583eb6bdd8SBruce Richardson 	internals = rte_eth_devices[port_id].data->dev_private;
38593eb6bdd8SBruce Richardson 	internals->kvlist = kvlist;
38603eb6bdd8SBruce Richardson 
38617ff0ec78SDaniel Mrzyglod 	if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
38627ff0ec78SDaniel Mrzyglod 		if (rte_kvargs_process(kvlist,
38637ff0ec78SDaniel Mrzyglod 				PMD_BOND_AGG_MODE_KVARG,
386415e34522SLong Wu 				&bond_ethdev_parse_member_agg_mode_kvarg,
38657ff0ec78SDaniel Mrzyglod 				&agg_mode) != 0) {
3866d7f4562aSStephen Hemminger 			RTE_BOND_LOG(ERR,
38674f840086SLong Wu 					"Failed to parse agg selection mode for bonding device %s",
38687ff0ec78SDaniel Mrzyglod 					name);
38697ff0ec78SDaniel Mrzyglod 			goto parse_error;
38707ff0ec78SDaniel Mrzyglod 		}
38717ff0ec78SDaniel Mrzyglod 
38727ff0ec78SDaniel Mrzyglod 		if (internals->mode == BONDING_MODE_8023AD)
38737dc58bc7SRadu Nicolau 			internals->mode4.agg_selection = agg_mode;
38747ff0ec78SDaniel Mrzyglod 	} else {
38757dc58bc7SRadu Nicolau 		internals->mode4.agg_selection = AGG_STABLE;
38767ff0ec78SDaniel Mrzyglod 	}
38777ff0ec78SDaniel Mrzyglod 
38787dc58bc7SRadu Nicolau 	rte_eth_dev_probing_finish(&rte_eth_devices[port_id]);
38794f840086SLong Wu 	RTE_BOND_LOG(INFO, "Create bonding device %s on port %d in mode %u on "
38804f840086SLong Wu 			"socket %u.",	name, port_id, bonding_mode, socket_id);
38813eb6bdd8SBruce Richardson 	return 0;
38823eb6bdd8SBruce Richardson 
38833eb6bdd8SBruce Richardson parse_error:
38843eb6bdd8SBruce Richardson 	rte_kvargs_free(kvlist);
38853eb6bdd8SBruce Richardson 
38863eb6bdd8SBruce Richardson 	return -1;
38873eb6bdd8SBruce Richardson }
38883eb6bdd8SBruce Richardson 
38898d30fe7fSBernard Iremonger static int
38905d2aa461SJan Blunck bond_remove(struct rte_vdev_device *dev)
38918d30fe7fSBernard Iremonger {
389268451eb6SJan Blunck 	struct rte_eth_dev *eth_dev;
389368451eb6SJan Blunck 	struct bond_dev_private *internals;
38945d2aa461SJan Blunck 	const char *name;
389562024eb8SIvan Ilchenko 	int ret = 0;
38968d30fe7fSBernard Iremonger 
38975d2aa461SJan Blunck 	if (!dev)
38988d30fe7fSBernard Iremonger 		return -EINVAL;
38998d30fe7fSBernard Iremonger 
39005d2aa461SJan Blunck 	name = rte_vdev_device_name(dev);
3901d7f4562aSStephen Hemminger 	RTE_BOND_LOG(INFO, "Uninitializing pmd_bond for %s", name);
39028d30fe7fSBernard Iremonger 
390368451eb6SJan Blunck 	/* find an ethdev entry */
390468451eb6SJan Blunck 	eth_dev = rte_eth_dev_allocated(name);
390568451eb6SJan Blunck 	if (eth_dev == NULL)
3906171875d0SThomas Monjalon 		return 0; /* port already released */
390768451eb6SJan Blunck 
39084852aa8fSQi Zhang 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3909662dbc32SThomas Monjalon 		return rte_eth_dev_release_port(eth_dev);
39104852aa8fSQi Zhang 
391168451eb6SJan Blunck 	RTE_ASSERT(eth_dev->device == &dev->device);
391268451eb6SJan Blunck 
391368451eb6SJan Blunck 	internals = eth_dev->data->dev_private;
391415e34522SLong Wu 	if (internals->member_count != 0)
391568451eb6SJan Blunck 		return -EBUSY;
391668451eb6SJan Blunck 
391768451eb6SJan Blunck 	if (eth_dev->data->dev_started == 1) {
391862024eb8SIvan Ilchenko 		ret = bond_ethdev_stop(eth_dev);
391968451eb6SJan Blunck 		bond_ethdev_close(eth_dev);
392068451eb6SJan Blunck 	}
392168451eb6SJan Blunck 	rte_eth_dev_release_port(eth_dev);
392268451eb6SJan Blunck 
392362024eb8SIvan Ilchenko 	return ret;
39248d30fe7fSBernard Iremonger }
39258d30fe7fSBernard Iremonger 
392615e34522SLong Wu /* this part will resolve the member portids after all the other pdev and vdev
39273eb6bdd8SBruce Richardson  * have been allocated */
39283eb6bdd8SBruce Richardson static int
39293eb6bdd8SBruce Richardson bond_ethdev_configure(struct rte_eth_dev *dev)
39303eb6bdd8SBruce Richardson {
39314be4659aSFerruh Yigit 	const char *name = dev->device->name;
39323eb6bdd8SBruce Richardson 	struct bond_dev_private *internals = dev->data->dev_private;
39333eb6bdd8SBruce Richardson 	struct rte_kvargs *kvlist = internals->kvlist;
39343eb6bdd8SBruce Richardson 	int arg_count;
3935f8244c63SZhiyong Yang 	uint16_t port_id = dev - rte_eth_devices;
3936e5f18551SHuisong Li 	uint32_t link_speeds;
39376d72657cSDaniel Mrzyglod 	uint8_t agg_mode;
39383eb6bdd8SBruce Richardson 
3939734ce47fSTomasz Kulasek 	static const uint8_t default_rss_key[40] = {
3940734ce47fSTomasz Kulasek 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
3941734ce47fSTomasz Kulasek 		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
3942734ce47fSTomasz Kulasek 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
3943734ce47fSTomasz Kulasek 		0xBE, 0xAC, 0x01, 0xFA
3944734ce47fSTomasz Kulasek 	};
3945734ce47fSTomasz Kulasek 
3946734ce47fSTomasz Kulasek 	unsigned i, j;
3947734ce47fSTomasz Kulasek 
3948339f1ba5SIvan Malov 
39495847b57aSIvan Malov 	bond_ethdev_cfg_cleanup(dev, false);
3950339f1ba5SIvan Malov 
395183cf204aSIgor Romanov 	/*
395283cf204aSIgor Romanov 	 * If RSS is enabled, fill table with default values and
3953b53d106dSSean Morrissey 	 * set key to the value specified in port RSS configuration.
395483cf204aSIgor Romanov 	 * Fall back to default RSS key if the key is not specified
395583cf204aSIgor Romanov 	 */
3956295968d1SFerruh Yigit 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
39576b1a001eSChengchang Tang 		struct rte_eth_rss_conf *rss_conf =
39586b1a001eSChengchang Tang 			&dev->data->dev_conf.rx_adv_conf.rss_conf;
39594986aea2SYu Wenjun 
39604986aea2SYu Wenjun 		if (internals->rss_key_len == 0) {
39614986aea2SYu Wenjun 			internals->rss_key_len = sizeof(default_rss_key);
39624986aea2SYu Wenjun 		}
39634986aea2SYu Wenjun 
39646b1a001eSChengchang Tang 		if (rss_conf->rss_key != NULL) {
39656b1a001eSChengchang Tang 			if (internals->rss_key_len > rss_conf->rss_key_len) {
39666b1a001eSChengchang Tang 				RTE_BOND_LOG(ERR, "Invalid rss key length(%u)",
39676b1a001eSChengchang Tang 						rss_conf->rss_key_len);
39686b1a001eSChengchang Tang 				return -EINVAL;
39696b1a001eSChengchang Tang 			}
39706b1a001eSChengchang Tang 
39716b1a001eSChengchang Tang 			memcpy(internals->rss_key, rss_conf->rss_key,
397283cf204aSIgor Romanov 			       internals->rss_key_len);
397383cf204aSIgor Romanov 		} else {
39746b1a001eSChengchang Tang 			if (internals->rss_key_len > sizeof(default_rss_key)) {
397594d9c7d4SKe Zhang 				/*
397694d9c7d4SKe Zhang 				 * If the rss_key includes standard_rss_key and
397794d9c7d4SKe Zhang 				 * extended_hash_key, the rss key length will be
397894d9c7d4SKe Zhang 				 * larger than default rss key length, so it should
397994d9c7d4SKe Zhang 				 * re-calculate the hash key.
398094d9c7d4SKe Zhang 				 */
398194d9c7d4SKe Zhang 				for (i = 0; i < internals->rss_key_len; i++)
398294d9c7d4SKe Zhang 					internals->rss_key[i] = (uint8_t)rte_rand();
398394d9c7d4SKe Zhang 			} else {
398483cf204aSIgor Romanov 				memcpy(internals->rss_key, default_rss_key,
398583cf204aSIgor Romanov 					internals->rss_key_len);
398683cf204aSIgor Romanov 			}
398794d9c7d4SKe Zhang 		}
3988734ce47fSTomasz Kulasek 
3989734ce47fSTomasz Kulasek 		for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
3990734ce47fSTomasz Kulasek 			internals->reta_conf[i].mask = ~0LL;
3991295968d1SFerruh Yigit 			for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
3992617d1ac2SIgor Romanov 				internals->reta_conf[i].reta[j] =
3993295968d1SFerruh Yigit 						(i * RTE_ETH_RETA_GROUP_SIZE + j) %
3994617d1ac2SIgor Romanov 						dev->data->nb_rx_queues;
3995734ce47fSTomasz Kulasek 		}
3996734ce47fSTomasz Kulasek 	}
3997734ce47fSTomasz Kulasek 
3998e5f18551SHuisong Li 	link_speeds = dev->data->dev_conf.link_speeds;
3999e5f18551SHuisong Li 	/*
4000e5f18551SHuisong Li 	 * The default value of 'link_speeds' is zero. From its definition,
4001e5f18551SHuisong Li 	 * this value actually means auto-negotiation. But not all PMDs support
4002e5f18551SHuisong Li 	 * auto-negotiation. So ignore the check for the auto-negotiation and
4003e5f18551SHuisong Li 	 * only consider fixed speed to reduce the impact on PMDs.
4004e5f18551SHuisong Li 	 */
4005e5f18551SHuisong Li 	if (link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
4006e5f18551SHuisong Li 		if ((link_speeds &
4007e5f18551SHuisong Li 		    (internals->speed_capa & ~RTE_ETH_LINK_SPEED_FIXED)) == 0) {
400815e34522SLong Wu 			RTE_BOND_LOG(ERR, "the fixed speed is not supported by all member devices.");
4009e5f18551SHuisong Li 			return -EINVAL;
4010e5f18551SHuisong Li 		}
4011e5f18551SHuisong Li 		/*
4012e5f18551SHuisong Li 		 * Two '1' in binary of 'link_speeds': bit0 and a unique
4013e5f18551SHuisong Li 		 * speed bit.
4014e5f18551SHuisong Li 		 */
4015191128d7SDavid Marchand 		if (rte_popcount64(link_speeds) != 2) {
4016e5f18551SHuisong Li 			RTE_BOND_LOG(ERR, "please set a unique speed.");
4017e5f18551SHuisong Li 			return -EINVAL;
4018e5f18551SHuisong Li 		}
4019e5f18551SHuisong Li 	}
4020e5f18551SHuisong Li 
40216cfc6a4fSEric Kinzie 	/* set the max_rx_pktlen */
40226cfc6a4fSEric Kinzie 	internals->max_rx_pktlen = internals->candidate_max_rx_pktlen;
40236cfc6a4fSEric Kinzie 
40243eb6bdd8SBruce Richardson 	/*
40254f840086SLong Wu 	 * if no kvlist, it means that this bonding device has been created
40263eb6bdd8SBruce Richardson 	 * through the bonding api.
40273eb6bdd8SBruce Richardson 	 */
40285847b57aSIvan Malov 	if (!kvlist || internals->kvargs_processing_is_done)
40293eb6bdd8SBruce Richardson 		return 0;
40303eb6bdd8SBruce Richardson 
40315847b57aSIvan Malov 	internals->kvargs_processing_is_done = true;
40325847b57aSIvan Malov 
40334f840086SLong Wu 	/* Parse MAC address for bonding device */
40343eb6bdd8SBruce Richardson 	arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
40353eb6bdd8SBruce Richardson 	if (arg_count == 1) {
40366d13ea8eSOlivier Matz 		struct rte_ether_addr bond_mac;
40373eb6bdd8SBruce Richardson 
40383eb6bdd8SBruce Richardson 		if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
40393eb6bdd8SBruce Richardson 				       &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
40404f840086SLong Wu 			RTE_BOND_LOG(INFO, "Invalid mac address for bonding device %s",
40413eb6bdd8SBruce Richardson 				     name);
40423eb6bdd8SBruce Richardson 			return -1;
40433eb6bdd8SBruce Richardson 		}
40443eb6bdd8SBruce Richardson 
40453eb6bdd8SBruce Richardson 		/* Set MAC address */
40463eb6bdd8SBruce Richardson 		if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
4047d7f4562aSStephen Hemminger 			RTE_BOND_LOG(ERR,
40484f840086SLong Wu 				     "Failed to set mac address on bonding device %s",
40493eb6bdd8SBruce Richardson 				     name);
40503eb6bdd8SBruce Richardson 			return -1;
40513eb6bdd8SBruce Richardson 		}
40523eb6bdd8SBruce Richardson 	} else if (arg_count > 1) {
4053d7f4562aSStephen Hemminger 		RTE_BOND_LOG(ERR,
40544f840086SLong Wu 			     "MAC address can be specified only once for bonding device %s",
40553eb6bdd8SBruce Richardson 			     name);
40563eb6bdd8SBruce Richardson 		return -1;
40573eb6bdd8SBruce Richardson 	}
40583eb6bdd8SBruce Richardson 
40593eb6bdd8SBruce Richardson 	/* Parse/set balance mode transmit policy */
40603eb6bdd8SBruce Richardson 	arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
40613eb6bdd8SBruce Richardson 	if (arg_count == 1) {
40623eb6bdd8SBruce Richardson 		uint8_t xmit_policy;
40633eb6bdd8SBruce Richardson 
40643eb6bdd8SBruce Richardson 		if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
40653eb6bdd8SBruce Richardson 				       &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
40663eb6bdd8SBruce Richardson 		    0) {
4067d7f4562aSStephen Hemminger 			RTE_BOND_LOG(INFO,
40684f840086SLong Wu 				     "Invalid xmit policy specified for bonding device %s",
40693eb6bdd8SBruce Richardson 				     name);
40703eb6bdd8SBruce Richardson 			return -1;
40713eb6bdd8SBruce Richardson 		}
40723eb6bdd8SBruce Richardson 
40733eb6bdd8SBruce Richardson 		/* Set balance mode transmit policy*/
40743eb6bdd8SBruce Richardson 		if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
4075d7f4562aSStephen Hemminger 			RTE_BOND_LOG(ERR,
40764f840086SLong Wu 				     "Failed to set balance xmit policy on bonding device %s",
40773eb6bdd8SBruce Richardson 				     name);
40783eb6bdd8SBruce Richardson 			return -1;
40793eb6bdd8SBruce Richardson 		}
40803eb6bdd8SBruce Richardson 	} else if (arg_count > 1) {
4081d7f4562aSStephen Hemminger 		RTE_BOND_LOG(ERR,
40824f840086SLong Wu 			     "Transmit policy can be specified only once for bonding device %s",
4083d7f4562aSStephen Hemminger 			     name);
40843eb6bdd8SBruce Richardson 		return -1;
40853eb6bdd8SBruce Richardson 	}
40863eb6bdd8SBruce Richardson 
40876d72657cSDaniel Mrzyglod 	if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
40886d72657cSDaniel Mrzyglod 		if (rte_kvargs_process(kvlist,
40896d72657cSDaniel Mrzyglod 				       PMD_BOND_AGG_MODE_KVARG,
409015e34522SLong Wu 				       &bond_ethdev_parse_member_agg_mode_kvarg,
40916d72657cSDaniel Mrzyglod 				       &agg_mode) != 0) {
4092d7f4562aSStephen Hemminger 			RTE_BOND_LOG(ERR,
40934f840086SLong Wu 				     "Failed to parse agg selection mode for bonding device %s",
40946d72657cSDaniel Mrzyglod 				     name);
40956d72657cSDaniel Mrzyglod 		}
40966f4ae7f5SLee Daly 		if (internals->mode == BONDING_MODE_8023AD) {
40976f4ae7f5SLee Daly 			int ret = rte_eth_bond_8023ad_agg_selection_set(port_id,
40986d72657cSDaniel Mrzyglod 					agg_mode);
40996f4ae7f5SLee Daly 			if (ret < 0) {
41006f4ae7f5SLee Daly 				RTE_BOND_LOG(ERR,
41014f840086SLong Wu 					"Invalid args for agg selection set for bonding device %s",
41026f4ae7f5SLee Daly 					name);
41036f4ae7f5SLee Daly 				return -1;
41046f4ae7f5SLee Daly 			}
41056f4ae7f5SLee Daly 		}
41066d72657cSDaniel Mrzyglod 	}
41076d72657cSDaniel Mrzyglod 
41084f840086SLong Wu 	/* Parse/add member ports to bonding device */
410915e34522SLong Wu 	if (rte_kvargs_count(kvlist, PMD_BOND_MEMBER_PORT_KVARG) > 0) {
411015e34522SLong Wu 		struct bond_ethdev_member_ports member_ports;
41113eb6bdd8SBruce Richardson 		unsigned i;
41123eb6bdd8SBruce Richardson 
411315e34522SLong Wu 		memset(&member_ports, 0, sizeof(member_ports));
41143eb6bdd8SBruce Richardson 
411515e34522SLong Wu 		if (rte_kvargs_process(kvlist, PMD_BOND_MEMBER_PORT_KVARG,
411615e34522SLong Wu 				       &bond_ethdev_parse_member_port_kvarg, &member_ports) != 0) {
4117d7f4562aSStephen Hemminger 			RTE_BOND_LOG(ERR,
41184f840086SLong Wu 				     "Failed to parse member ports for bonding device %s",
41193eb6bdd8SBruce Richardson 				     name);
41203eb6bdd8SBruce Richardson 			return -1;
41213eb6bdd8SBruce Richardson 		}
41223eb6bdd8SBruce Richardson 
412315e34522SLong Wu 		for (i = 0; i < member_ports.member_count; i++) {
412415e34522SLong Wu 			if (rte_eth_bond_member_add(port_id, member_ports.members[i]) != 0) {
4125d7f4562aSStephen Hemminger 				RTE_BOND_LOG(ERR,
41264f840086SLong Wu 					     "Failed to add port %d as member to bonding device %s",
412715e34522SLong Wu 					     member_ports.members[i], name);
41283eb6bdd8SBruce Richardson 			}
41293eb6bdd8SBruce Richardson 		}
41303eb6bdd8SBruce Richardson 
41313eb6bdd8SBruce Richardson 	} else {
41324f840086SLong Wu 		RTE_BOND_LOG(INFO, "No members specified for bonding device %s", name);
41333eb6bdd8SBruce Richardson 		return -1;
41343eb6bdd8SBruce Richardson 	}
41353eb6bdd8SBruce Richardson 
413615e34522SLong Wu 	/* Parse/set primary member port id*/
413715e34522SLong Wu 	arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_MEMBER_KVARG);
41383eb6bdd8SBruce Richardson 	if (arg_count == 1) {
413915e34522SLong Wu 		uint16_t primary_member_port_id;
41403eb6bdd8SBruce Richardson 
41413eb6bdd8SBruce Richardson 		if (rte_kvargs_process(kvlist,
414215e34522SLong Wu 				       PMD_BOND_PRIMARY_MEMBER_KVARG,
414315e34522SLong Wu 				       &bond_ethdev_parse_primary_member_port_id_kvarg,
414415e34522SLong Wu 				       &primary_member_port_id) < 0) {
4145d7f4562aSStephen Hemminger 			RTE_BOND_LOG(INFO,
41464f840086SLong Wu 				     "Invalid primary member port id specified for bonding device %s",
4147d7f4562aSStephen Hemminger 				     name);
41483eb6bdd8SBruce Richardson 			return -1;
41493eb6bdd8SBruce Richardson 		}
41503eb6bdd8SBruce Richardson 
41513eb6bdd8SBruce Richardson 		/* Set balance mode transmit policy*/
415215e34522SLong Wu 		if (rte_eth_bond_primary_set(port_id, primary_member_port_id)
41533eb6bdd8SBruce Richardson 		    != 0) {
4154d7f4562aSStephen Hemminger 			RTE_BOND_LOG(ERR,
41554f840086SLong Wu 				     "Failed to set primary member port %d on bonding device %s",
415615e34522SLong Wu 				     primary_member_port_id, name);
41573eb6bdd8SBruce Richardson 			return -1;
41583eb6bdd8SBruce Richardson 		}
41593eb6bdd8SBruce Richardson 	} else if (arg_count > 1) {
4160d7f4562aSStephen Hemminger 		RTE_BOND_LOG(INFO,
41614f840086SLong Wu 			     "Primary member can be specified only once for bonding device %s",
4162d7f4562aSStephen Hemminger 			     name);
41633eb6bdd8SBruce Richardson 		return -1;
41643eb6bdd8SBruce Richardson 	}
41653eb6bdd8SBruce Richardson 
41663eb6bdd8SBruce Richardson 	/* Parse link status monitor polling interval */
41673eb6bdd8SBruce Richardson 	arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
41683eb6bdd8SBruce Richardson 	if (arg_count == 1) {
41693eb6bdd8SBruce Richardson 		uint32_t lsc_poll_interval_ms;
41703eb6bdd8SBruce Richardson 
41713eb6bdd8SBruce Richardson 		if (rte_kvargs_process(kvlist,
41723eb6bdd8SBruce Richardson 				       PMD_BOND_LSC_POLL_PERIOD_KVARG,
41733eb6bdd8SBruce Richardson 				       &bond_ethdev_parse_time_ms_kvarg,
41743eb6bdd8SBruce Richardson 				       &lsc_poll_interval_ms) < 0) {
4175d7f4562aSStephen Hemminger 			RTE_BOND_LOG(INFO,
41764f840086SLong Wu 				     "Invalid lsc polling interval value specified for bonding"
4177d7f4562aSStephen Hemminger 				     " device %s", name);
41783eb6bdd8SBruce Richardson 			return -1;
41793eb6bdd8SBruce Richardson 		}
41803eb6bdd8SBruce Richardson 
41813eb6bdd8SBruce Richardson 		if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
41823eb6bdd8SBruce Richardson 		    != 0) {
4183d7f4562aSStephen Hemminger 			RTE_BOND_LOG(ERR,
41844f840086SLong Wu 				     "Failed to set lsc monitor polling interval (%u ms) on bonding device %s",
4185d7f4562aSStephen Hemminger 				     lsc_poll_interval_ms, name);
41863eb6bdd8SBruce Richardson 			return -1;
41873eb6bdd8SBruce Richardson 		}
41883eb6bdd8SBruce Richardson 	} else if (arg_count > 1) {
4189d7f4562aSStephen Hemminger 		RTE_BOND_LOG(INFO,
41904f840086SLong Wu 			     "LSC polling interval can be specified only once for bonding"
4191d7f4562aSStephen Hemminger 			     " device %s", name);
41923eb6bdd8SBruce Richardson 		return -1;
41933eb6bdd8SBruce Richardson 	}
41943eb6bdd8SBruce Richardson 
41953eb6bdd8SBruce Richardson 	/* Parse link up interrupt propagation delay */
41963eb6bdd8SBruce Richardson 	arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
41973eb6bdd8SBruce Richardson 	if (arg_count == 1) {
41983eb6bdd8SBruce Richardson 		uint32_t link_up_delay_ms;
41993eb6bdd8SBruce Richardson 
42003eb6bdd8SBruce Richardson 		if (rte_kvargs_process(kvlist,
42013eb6bdd8SBruce Richardson 				       PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
42023eb6bdd8SBruce Richardson 				       &bond_ethdev_parse_time_ms_kvarg,
42033eb6bdd8SBruce Richardson 				       &link_up_delay_ms) < 0) {
4204d7f4562aSStephen Hemminger 			RTE_BOND_LOG(INFO,
42053eb6bdd8SBruce Richardson 				     "Invalid link up propagation delay value specified for"
42064f840086SLong Wu 				     " bonding device %s", name);
42073eb6bdd8SBruce Richardson 			return -1;
42083eb6bdd8SBruce Richardson 		}
42093eb6bdd8SBruce Richardson 
42103eb6bdd8SBruce Richardson 		/* Set balance mode transmit policy*/
42113eb6bdd8SBruce Richardson 		if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
42123eb6bdd8SBruce Richardson 		    != 0) {
4213d7f4562aSStephen Hemminger 			RTE_BOND_LOG(ERR,
42144f840086SLong Wu 				     "Failed to set link up propagation delay (%u ms) on bonding"
4215d7f4562aSStephen Hemminger 				     " device %s", link_up_delay_ms, name);
42163eb6bdd8SBruce Richardson 			return -1;
42173eb6bdd8SBruce Richardson 		}
42183eb6bdd8SBruce Richardson 	} else if (arg_count > 1) {
4219d7f4562aSStephen Hemminger 		RTE_BOND_LOG(INFO,
42203eb6bdd8SBruce Richardson 			     "Link up propagation delay can be specified only once for"
42214f840086SLong Wu 			     " bonding device %s", name);
42223eb6bdd8SBruce Richardson 		return -1;
42233eb6bdd8SBruce Richardson 	}
42243eb6bdd8SBruce Richardson 
42253eb6bdd8SBruce Richardson 	/* Parse link down interrupt propagation delay */
42263eb6bdd8SBruce Richardson 	arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
42273eb6bdd8SBruce Richardson 	if (arg_count == 1) {
42283eb6bdd8SBruce Richardson 		uint32_t link_down_delay_ms;
42293eb6bdd8SBruce Richardson 
42303eb6bdd8SBruce Richardson 		if (rte_kvargs_process(kvlist,
42313eb6bdd8SBruce Richardson 				       PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
42323eb6bdd8SBruce Richardson 				       &bond_ethdev_parse_time_ms_kvarg,
42333eb6bdd8SBruce Richardson 				       &link_down_delay_ms) < 0) {
4234d7f4562aSStephen Hemminger 			RTE_BOND_LOG(INFO,
42353eb6bdd8SBruce Richardson 				     "Invalid link down propagation delay value specified for"
42364f840086SLong Wu 				     " bonding device %s", name);
42373eb6bdd8SBruce Richardson 			return -1;
42383eb6bdd8SBruce Richardson 		}
42393eb6bdd8SBruce Richardson 
42403eb6bdd8SBruce Richardson 		/* Set balance mode transmit policy*/
42413eb6bdd8SBruce Richardson 		if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
42423eb6bdd8SBruce Richardson 		    != 0) {
4243d7f4562aSStephen Hemminger 			RTE_BOND_LOG(ERR,
42444f840086SLong Wu 				     "Failed to set link down propagation delay (%u ms) on bonding device %s",
4245d7f4562aSStephen Hemminger 				     link_down_delay_ms, name);
42463eb6bdd8SBruce Richardson 			return -1;
42473eb6bdd8SBruce Richardson 		}
42483eb6bdd8SBruce Richardson 	} else if (arg_count > 1) {
4249d7f4562aSStephen Hemminger 		RTE_BOND_LOG(INFO,
42504f840086SLong Wu 			     "Link down propagation delay can be specified only once for  bonding device %s",
4251d7f4562aSStephen Hemminger 			     name);
42523eb6bdd8SBruce Richardson 		return -1;
42533eb6bdd8SBruce Richardson 	}
42543eb6bdd8SBruce Richardson 
425515e34522SLong Wu 	/* configure members so we can pass mtu setting */
425615e34522SLong Wu 	for (i = 0; i < internals->member_count; i++) {
425715e34522SLong Wu 		struct rte_eth_dev *member_ethdev =
425815e34522SLong Wu 				&(rte_eth_devices[internals->members[i].port_id]);
425915e34522SLong Wu 		if (member_configure(dev, member_ethdev) != 0) {
4260b3eaaf1dSJunjie Wan 			RTE_BOND_LOG(ERR,
42614f840086SLong Wu 				"bonding port (%d) failed to configure member device (%d)",
4262b3eaaf1dSJunjie Wan 				dev->data->port_id,
426315e34522SLong Wu 				internals->members[i].port_id);
4264b3eaaf1dSJunjie Wan 			return -1;
4265b3eaaf1dSJunjie Wan 		}
4266b3eaaf1dSJunjie Wan 	}
42673eb6bdd8SBruce Richardson 	return 0;
42683eb6bdd8SBruce Richardson }
42693eb6bdd8SBruce Richardson 
427073db5badSDavid Marchand struct rte_vdev_driver pmd_bond_drv = {
427150a3345fSShreyansh Jain 	.probe = bond_probe,
427250a3345fSShreyansh Jain 	.remove = bond_remove,
42733eb6bdd8SBruce Richardson };
42743eb6bdd8SBruce Richardson 
427573db5badSDavid Marchand RTE_PMD_REGISTER_VDEV(net_bonding, pmd_bond_drv);
42769fa80cb2SJan Blunck RTE_PMD_REGISTER_ALIAS(net_bonding, eth_bond);
4277cb6696d2SNeil Horman 
427801f19227SShreyansh Jain RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
427915e34522SLong Wu 	"member=<ifc> "
428065eca099SPablo de Lara 	"primary=<ifc> "
4281419f2addSPablo de Lara 	"mode=[0-6] "
428265eca099SPablo de Lara 	"xmit_policy=[l2 | l23 | l34] "
42836d72657cSDaniel Mrzyglod 	"agg_mode=[count | stable | bandwidth] "
428465eca099SPablo de Lara 	"socket_id=<int> "
428565eca099SPablo de Lara 	"mac=<mac addr> "
428665eca099SPablo de Lara 	"lsc_poll_period_ms=<int> "
428765eca099SPablo de Lara 	"up_delay=<int> "
428865eca099SPablo de Lara 	"down_delay=<int>");
4289d7f4562aSStephen Hemminger 
4290eeded204SDavid Marchand /* We can't use RTE_LOG_REGISTER_DEFAULT because of the forced name for
4291eeded204SDavid Marchand  * this library, see meson.build.
4292eeded204SDavid Marchand  */
4293b1641987SThomas Monjalon RTE_LOG_REGISTER(bond_logtype, pmd.net.bonding, NOTICE);
4294