xref: /dpdk/drivers/net/bonding/rte_eth_bond_api.c (revision ceb1ccd5d50c1a89ba8bdd97cc199e7f07422b98)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <string.h>
35 
36 #include <rte_mbuf.h>
37 #include <rte_malloc.h>
38 #include <rte_ethdev.h>
39 #include <rte_tcp.h>
40 
41 #include "rte_eth_bond.h"
42 #include "rte_eth_bond_private.h"
43 #include "rte_eth_bond_8023ad_private.h"
44 
45 #define DEFAULT_POLLING_INTERVAL_10_MS (10)
46 
47 const char pmd_bond_driver_name[] = "rte_bond_pmd";
48 
49 int
50 check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev)
51 {
52 	/* Check valid pointer */
53 	if (eth_dev->data->drv_name == NULL)
54 		return -1;
55 
56 	/* return 0 if driver name matches */
57 	return eth_dev->data->drv_name != pmd_bond_driver_name;
58 }
59 
60 int
61 valid_bonded_port_id(uint8_t port_id)
62 {
63 	if (!rte_eth_dev_is_valid_port(port_id))
64 		return -1;
65 
66 	return check_for_bonded_ethdev(&rte_eth_devices[port_id]);
67 }
68 
69 int
70 valid_slave_port_id(uint8_t port_id)
71 {
72 	/* Verify that port id's are valid */
73 	if (!rte_eth_dev_is_valid_port(port_id))
74 		return -1;
75 
76 	/* Verify that port_id refers to a non bonded port */
77 	if (check_for_bonded_ethdev(&rte_eth_devices[port_id]) == 0)
78 		return -1;
79 
80 	return 0;
81 }
82 
83 void
84 activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
85 {
86 	struct bond_dev_private *internals = eth_dev->data->dev_private;
87 	uint8_t active_count = internals->active_slave_count;
88 
89 	if (internals->mode == BONDING_MODE_8023AD)
90 		bond_mode_8023ad_activate_slave(eth_dev, port_id);
91 
92 	if (internals->mode == BONDING_MODE_TLB
93 			|| internals->mode == BONDING_MODE_ALB) {
94 
95 		internals->tlb_slaves_order[active_count] = port_id;
96 	}
97 
98 	RTE_VERIFY(internals->active_slave_count <
99 			(RTE_DIM(internals->active_slaves) - 1));
100 
101 	internals->active_slaves[internals->active_slave_count] = port_id;
102 	internals->active_slave_count++;
103 
104 	if (internals->mode == BONDING_MODE_TLB)
105 		bond_tlb_activate_slave(internals);
106 	if (internals->mode == BONDING_MODE_ALB)
107 		bond_mode_alb_client_list_upd(eth_dev);
108 }
109 
110 void
111 deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
112 {
113 	uint8_t slave_pos;
114 	struct bond_dev_private *internals = eth_dev->data->dev_private;
115 	uint8_t active_count = internals->active_slave_count;
116 
117 	if (internals->mode == BONDING_MODE_8023AD) {
118 		bond_mode_8023ad_stop(eth_dev);
119 		bond_mode_8023ad_deactivate_slave(eth_dev, port_id);
120 	} else if (internals->mode == BONDING_MODE_TLB
121 			|| internals->mode == BONDING_MODE_ALB)
122 		bond_tlb_disable(internals);
123 
124 	slave_pos = find_slave_by_id(internals->active_slaves, active_count,
125 			port_id);
126 
127 	/* If slave was not at the end of the list
128 	 * shift active slaves up active array list */
129 	if (slave_pos < active_count) {
130 		active_count--;
131 		memmove(internals->active_slaves + slave_pos,
132 				internals->active_slaves + slave_pos + 1,
133 				(active_count - slave_pos) *
134 					sizeof(internals->active_slaves[0]));
135 	}
136 
137 	RTE_VERIFY(active_count < RTE_DIM(internals->active_slaves));
138 	internals->active_slave_count = active_count;
139 
140 	if (eth_dev->data->dev_started) {
141 		if (internals->mode == BONDING_MODE_8023AD) {
142 			bond_mode_8023ad_start(eth_dev);
143 		} else if (internals->mode == BONDING_MODE_TLB) {
144 			bond_tlb_enable(internals);
145 		} else if (internals->mode == BONDING_MODE_ALB) {
146 			bond_tlb_enable(internals);
147 			bond_mode_alb_client_list_upd(eth_dev);
148 		}
149 	}
150 }
151 
152 uint8_t
153 number_of_sockets(void)
154 {
155 	int sockets = 0;
156 	int i;
157 	const struct rte_memseg *ms = rte_eal_get_physmem_layout();
158 
159 	for (i = 0; ((i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL)); i++) {
160 		if (sockets < ms[i].socket_id)
161 			sockets = ms[i].socket_id;
162 	}
163 
164 	/* Number of sockets = maximum socket_id + 1 */
165 	return ++sockets;
166 }
167 
168 int
169 rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
170 {
171 	struct bond_dev_private *internals = NULL;
172 	struct rte_eth_dev *eth_dev = NULL;
173 
174 	/* now do all data allocation - for eth_dev structure, dummy pci driver
175 	 * and internal (private) data
176 	 */
177 
178 	if (name == NULL) {
179 		RTE_BOND_LOG(ERR, "Invalid name specified");
180 		goto err;
181 	}
182 
183 	if (socket_id >= number_of_sockets()) {
184 		RTE_BOND_LOG(ERR,
185 				"Invalid socket id specified to create bonded device on.");
186 		goto err;
187 	}
188 
189 	internals = rte_zmalloc_socket(name, sizeof(*internals), 0, socket_id);
190 	if (internals == NULL) {
191 		RTE_BOND_LOG(ERR, "Unable to malloc internals on socket");
192 		goto err;
193 	}
194 
195 	/* reserve an ethdev entry */
196 	eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
197 	if (eth_dev == NULL) {
198 		RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
199 		goto err;
200 	}
201 
202 	eth_dev->data->dev_private = internals;
203 	eth_dev->data->nb_rx_queues = (uint16_t)1;
204 	eth_dev->data->nb_tx_queues = (uint16_t)1;
205 
206 	TAILQ_INIT(&(eth_dev->link_intr_cbs));
207 
208 	eth_dev->data->dev_link.link_status = 0;
209 
210 	eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN, 0,
211 			socket_id);
212 	if (eth_dev->data->mac_addrs == NULL) {
213 		RTE_BOND_LOG(ERR, "Unable to malloc mac_addrs");
214 		goto err;
215 	}
216 
217 	eth_dev->data->dev_started = 0;
218 	eth_dev->data->promiscuous = 0;
219 	eth_dev->data->scattered_rx = 0;
220 	eth_dev->data->all_multicast = 0;
221 
222 	eth_dev->dev_ops = &default_dev_ops;
223 	eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC |
224 		RTE_ETH_DEV_DETACHABLE;
225 	eth_dev->driver = NULL;
226 	eth_dev->data->kdrv = RTE_KDRV_NONE;
227 	eth_dev->data->drv_name = pmd_bond_driver_name;
228 	eth_dev->data->numa_node =  socket_id;
229 
230 	rte_spinlock_init(&internals->lock);
231 
232 	internals->port_id = eth_dev->data->port_id;
233 	internals->mode = BONDING_MODE_INVALID;
234 	internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
235 	internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
236 	internals->xmit_hash = xmit_l2_hash;
237 	internals->user_defined_mac = 0;
238 	internals->link_props_set = 0;
239 
240 	internals->link_status_polling_enabled = 0;
241 
242 	internals->link_status_polling_interval_ms = DEFAULT_POLLING_INTERVAL_10_MS;
243 	internals->link_down_delay_ms = 0;
244 	internals->link_up_delay_ms = 0;
245 
246 	internals->slave_count = 0;
247 	internals->active_slave_count = 0;
248 	internals->rx_offload_capa = 0;
249 	internals->tx_offload_capa = 0;
250 
251 	/* Initially allow to choose any offload type */
252 	internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
253 
254 	memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
255 	memset(internals->slaves, 0, sizeof(internals->slaves));
256 
257 	/* Set mode 4 default configuration */
258 	bond_mode_8023ad_setup(eth_dev, NULL);
259 	if (bond_ethdev_mode_set(eth_dev, mode)) {
260 		RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode too %d",
261 				 eth_dev->data->port_id, mode);
262 		goto err;
263 	}
264 
265 	return eth_dev->data->port_id;
266 
267 err:
268 	rte_free(internals);
269 	if (eth_dev != NULL) {
270 		rte_free(eth_dev->data->mac_addrs);
271 		rte_eth_dev_release_port(eth_dev);
272 	}
273 	return -1;
274 }
275 
276 int
277 rte_eth_bond_free(const char *name)
278 {
279 	struct rte_eth_dev *eth_dev = NULL;
280 	struct bond_dev_private *internals;
281 
282 	/* now free all data allocation - for eth_dev structure,
283 	 * dummy pci driver and internal (private) data
284 	 */
285 
286 	/* find an ethdev entry */
287 	eth_dev = rte_eth_dev_allocated(name);
288 	if (eth_dev == NULL)
289 		return -ENODEV;
290 
291 	internals = eth_dev->data->dev_private;
292 	if (internals->slave_count != 0)
293 		return -EBUSY;
294 
295 	if (eth_dev->data->dev_started == 1) {
296 		bond_ethdev_stop(eth_dev);
297 		bond_ethdev_close(eth_dev);
298 	}
299 
300 	eth_dev->dev_ops = NULL;
301 	eth_dev->rx_pkt_burst = NULL;
302 	eth_dev->tx_pkt_burst = NULL;
303 
304 	rte_free(eth_dev->data->dev_private);
305 	rte_free(eth_dev->data->mac_addrs);
306 
307 	rte_eth_dev_release_port(eth_dev);
308 
309 	return 0;
310 }
311 
312 static int
313 __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
314 {
315 	struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
316 	struct bond_dev_private *internals;
317 	struct rte_eth_link link_props;
318 	struct rte_eth_dev_info dev_info;
319 
320 	if (valid_slave_port_id(slave_port_id) != 0)
321 		return -1;
322 
323 	bonded_eth_dev = &rte_eth_devices[bonded_port_id];
324 	internals = bonded_eth_dev->data->dev_private;
325 
326 	slave_eth_dev = &rte_eth_devices[slave_port_id];
327 	if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
328 		RTE_BOND_LOG(ERR, "Slave device is already a slave of a bonded device");
329 		return -1;
330 	}
331 
332 	/* Add slave details to bonded device */
333 	slave_eth_dev->data->dev_flags |= RTE_ETH_DEV_BONDED_SLAVE;
334 	slave_add(internals, slave_eth_dev);
335 
336 	rte_eth_dev_info_get(slave_port_id, &dev_info);
337 
338 	/* We need to store slaves reta_size to be able to synchronize RETA for all
339 	 * slave devices even if its sizes are different.
340 	 */
341 	internals->slaves[internals->slave_count].reta_size = dev_info.reta_size;
342 
343 	if (internals->slave_count < 1) {
344 		/* if MAC is not user defined then use MAC of first slave add to
345 		 * bonded device */
346 		if (!internals->user_defined_mac)
347 			mac_address_set(bonded_eth_dev, slave_eth_dev->data->mac_addrs);
348 
349 		/* Inherit eth dev link properties from first slave */
350 		link_properties_set(bonded_eth_dev,
351 				&(slave_eth_dev->data->dev_link));
352 
353 		/* Make primary slave */
354 		internals->primary_port = slave_port_id;
355 		internals->current_primary_port = slave_port_id;
356 
357 		/* Inherit queues settings from first slave */
358 		internals->nb_rx_queues = slave_eth_dev->data->nb_rx_queues;
359 		internals->nb_tx_queues = slave_eth_dev->data->nb_tx_queues;
360 
361 		internals->reta_size = dev_info.reta_size;
362 
363 		/* Take the first dev's offload capabilities */
364 		internals->rx_offload_capa = dev_info.rx_offload_capa;
365 		internals->tx_offload_capa = dev_info.tx_offload_capa;
366 		internals->flow_type_rss_offloads = dev_info.flow_type_rss_offloads;
367 
368 	} else {
369 		/* Check slave link properties are supported if props are set,
370 		 * all slaves must be the same */
371 		if (internals->link_props_set) {
372 			if (link_properties_valid(&(bonded_eth_dev->data->dev_link),
373 									  &(slave_eth_dev->data->dev_link))) {
374 				slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE);
375 				RTE_BOND_LOG(ERR,
376 						"Slave port %d link speed/duplex not supported",
377 						slave_port_id);
378 				return -1;
379 			}
380 		} else {
381 			link_properties_set(bonded_eth_dev,
382 					&(slave_eth_dev->data->dev_link));
383 		}
384 		internals->rx_offload_capa &= dev_info.rx_offload_capa;
385 		internals->tx_offload_capa &= dev_info.tx_offload_capa;
386 		internals->flow_type_rss_offloads &= dev_info.flow_type_rss_offloads;
387 
388 		/* RETA size is GCD of all slaves RETA sizes, so, if all sizes will be
389 		 * the power of 2, the lower one is GCD
390 		 */
391 		if (internals->reta_size > dev_info.reta_size)
392 			internals->reta_size = dev_info.reta_size;
393 
394 	}
395 
396 	bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &=
397 			internals->flow_type_rss_offloads;
398 
399 	internals->slave_count++;
400 
401 	/* Update all slave devices MACs*/
402 	mac_address_slaves_update(bonded_eth_dev);
403 
404 	if (bonded_eth_dev->data->dev_started) {
405 		if (slave_configure(bonded_eth_dev, slave_eth_dev) != 0) {
406 			slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE);
407 			RTE_BOND_LOG(ERR, "rte_bond_slaves_configure: port=%d",
408 					slave_port_id);
409 			return -1;
410 		}
411 	}
412 
413 	/* Register link status change callback with bonded device pointer as
414 	 * argument*/
415 	rte_eth_dev_callback_register(slave_port_id, RTE_ETH_EVENT_INTR_LSC,
416 			bond_ethdev_lsc_event_callback, &bonded_eth_dev->data->port_id);
417 
418 	/* If bonded device is started then we can add the slave to our active
419 	 * slave array */
420 	if (bonded_eth_dev->data->dev_started) {
421 		rte_eth_link_get_nowait(slave_port_id, &link_props);
422 
423 		 if (link_props.link_status == 1) {
424 			if (internals->active_slave_count == 0 &&
425 			    !internals->user_defined_primary_port)
426 				bond_ethdev_primary_set(internals,
427 							slave_port_id);
428 
429 			if (find_slave_by_id(internals->active_slaves,
430 					     internals->active_slave_count,
431 					     slave_port_id) == internals->active_slave_count)
432 				activate_slave(bonded_eth_dev, slave_port_id);
433 		}
434 	}
435 	return 0;
436 
437 }
438 
439 int
440 rte_eth_bond_slave_add(uint8_t bonded_port_id, uint8_t slave_port_id)
441 {
442 	struct rte_eth_dev *bonded_eth_dev;
443 	struct bond_dev_private *internals;
444 
445 	int retval;
446 
447 	/* Verify that port id's are valid bonded and slave ports */
448 	if (valid_bonded_port_id(bonded_port_id) != 0)
449 		return -1;
450 
451 	bonded_eth_dev = &rte_eth_devices[bonded_port_id];
452 	internals = bonded_eth_dev->data->dev_private;
453 
454 	rte_spinlock_lock(&internals->lock);
455 
456 	retval = __eth_bond_slave_add_lock_free(bonded_port_id, slave_port_id);
457 
458 	rte_spinlock_unlock(&internals->lock);
459 
460 	return retval;
461 }
462 
463 static int
464 __eth_bond_slave_remove_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
465 {
466 	struct rte_eth_dev *bonded_eth_dev;
467 	struct bond_dev_private *internals;
468 	struct rte_eth_dev *slave_eth_dev;
469 	int i, slave_idx;
470 
471 	if (valid_slave_port_id(slave_port_id) != 0)
472 		return -1;
473 
474 	bonded_eth_dev = &rte_eth_devices[bonded_port_id];
475 	internals = bonded_eth_dev->data->dev_private;
476 
477 	/* first remove from active slave list */
478 	slave_idx = find_slave_by_id(internals->active_slaves,
479 		internals->active_slave_count, slave_port_id);
480 
481 	if (slave_idx < internals->active_slave_count)
482 		deactivate_slave(bonded_eth_dev, slave_port_id);
483 
484 	slave_idx = -1;
485 	/* now find in slave list */
486 	for (i = 0; i < internals->slave_count; i++)
487 		if (internals->slaves[i].port_id == slave_port_id) {
488 			slave_idx = i;
489 			break;
490 		}
491 
492 	if (slave_idx < 0) {
493 		RTE_BOND_LOG(ERR, "Couldn't find slave in port list, slave count %d",
494 				internals->slave_count);
495 		return -1;
496 	}
497 
498 	/* Un-register link status change callback with bonded device pointer as
499 	 * argument*/
500 	rte_eth_dev_callback_unregister(slave_port_id, RTE_ETH_EVENT_INTR_LSC,
501 			bond_ethdev_lsc_event_callback,
502 			&rte_eth_devices[bonded_port_id].data->port_id);
503 
504 	/* Restore original MAC address of slave device */
505 	mac_address_set(&rte_eth_devices[slave_port_id],
506 			&(internals->slaves[slave_idx].persisted_mac_addr));
507 
508 	slave_eth_dev = &rte_eth_devices[slave_port_id];
509 	slave_remove(internals, slave_eth_dev);
510 	slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE);
511 
512 	/*  first slave in the active list will be the primary by default,
513 	 *  otherwise use first device in list */
514 	if (internals->current_primary_port == slave_port_id) {
515 		if (internals->active_slave_count > 0)
516 			internals->current_primary_port = internals->active_slaves[0];
517 		else if (internals->slave_count > 0)
518 			internals->current_primary_port = internals->slaves[0].port_id;
519 		else
520 			internals->primary_port = 0;
521 	}
522 
523 	if (internals->active_slave_count < 1) {
524 		/* reset device link properties as no slaves are active */
525 		link_properties_reset(&rte_eth_devices[bonded_port_id]);
526 
527 		/* if no slaves are any longer attached to bonded device and MAC is not
528 		 * user defined then clear MAC of bonded device as it will be reset
529 		 * when a new slave is added */
530 		if (internals->slave_count < 1 && !internals->user_defined_mac)
531 			memset(rte_eth_devices[bonded_port_id].data->mac_addrs, 0,
532 					sizeof(*(rte_eth_devices[bonded_port_id].data->mac_addrs)));
533 	}
534 	if (internals->slave_count == 0) {
535 		internals->rx_offload_capa = 0;
536 		internals->tx_offload_capa = 0;
537 		internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
538 		internals->reta_size = 0;
539 	}
540 	return 0;
541 }
542 
543 int
544 rte_eth_bond_slave_remove(uint8_t bonded_port_id, uint8_t slave_port_id)
545 {
546 	struct rte_eth_dev *bonded_eth_dev;
547 	struct bond_dev_private *internals;
548 	int retval;
549 
550 	if (valid_bonded_port_id(bonded_port_id) != 0)
551 		return -1;
552 
553 	bonded_eth_dev = &rte_eth_devices[bonded_port_id];
554 	internals = bonded_eth_dev->data->dev_private;
555 
556 	rte_spinlock_lock(&internals->lock);
557 
558 	retval = __eth_bond_slave_remove_lock_free(bonded_port_id, slave_port_id);
559 
560 	rte_spinlock_unlock(&internals->lock);
561 
562 	return retval;
563 }
564 
565 int
566 rte_eth_bond_mode_set(uint8_t bonded_port_id, uint8_t mode)
567 {
568 	if (valid_bonded_port_id(bonded_port_id) != 0)
569 		return -1;
570 
571 	return bond_ethdev_mode_set(&rte_eth_devices[bonded_port_id], mode);
572 }
573 
574 int
575 rte_eth_bond_mode_get(uint8_t bonded_port_id)
576 {
577 	struct bond_dev_private *internals;
578 
579 	if (valid_bonded_port_id(bonded_port_id) != 0)
580 		return -1;
581 
582 	internals = rte_eth_devices[bonded_port_id].data->dev_private;
583 
584 	return internals->mode;
585 }
586 
587 int
588 rte_eth_bond_primary_set(uint8_t bonded_port_id, uint8_t slave_port_id)
589 {
590 	struct bond_dev_private *internals;
591 
592 	if (valid_bonded_port_id(bonded_port_id) != 0)
593 		return -1;
594 
595 	if (valid_slave_port_id(slave_port_id) != 0)
596 		return -1;
597 
598 	internals =  rte_eth_devices[bonded_port_id].data->dev_private;
599 
600 	internals->user_defined_primary_port = 1;
601 	internals->primary_port = slave_port_id;
602 
603 	bond_ethdev_primary_set(internals, slave_port_id);
604 
605 	return 0;
606 }
607 
608 int
609 rte_eth_bond_primary_get(uint8_t bonded_port_id)
610 {
611 	struct bond_dev_private *internals;
612 
613 	if (valid_bonded_port_id(bonded_port_id) != 0)
614 		return -1;
615 
616 	internals = rte_eth_devices[bonded_port_id].data->dev_private;
617 
618 	if (internals->slave_count < 1)
619 		return -1;
620 
621 	return internals->current_primary_port;
622 }
623 
624 int
625 rte_eth_bond_slaves_get(uint8_t bonded_port_id, uint8_t slaves[], uint8_t len)
626 {
627 	struct bond_dev_private *internals;
628 	uint8_t i;
629 
630 	if (valid_bonded_port_id(bonded_port_id) != 0)
631 		return -1;
632 
633 	if (slaves == NULL)
634 		return -1;
635 
636 	internals = rte_eth_devices[bonded_port_id].data->dev_private;
637 
638 	if (internals->slave_count > len)
639 		return -1;
640 
641 	for (i = 0; i < internals->slave_count; i++)
642 		slaves[i] = internals->slaves[i].port_id;
643 
644 	return internals->slave_count;
645 }
646 
647 int
648 rte_eth_bond_active_slaves_get(uint8_t bonded_port_id, uint8_t slaves[],
649 		uint8_t len)
650 {
651 	struct bond_dev_private *internals;
652 
653 	if (valid_bonded_port_id(bonded_port_id) != 0)
654 		return -1;
655 
656 	if (slaves == NULL)
657 		return -1;
658 
659 	internals = rte_eth_devices[bonded_port_id].data->dev_private;
660 
661 	if (internals->active_slave_count > len)
662 		return -1;
663 
664 	memcpy(slaves, internals->active_slaves, internals->active_slave_count);
665 
666 	return internals->active_slave_count;
667 }
668 
669 int
670 rte_eth_bond_mac_address_set(uint8_t bonded_port_id,
671 		struct ether_addr *mac_addr)
672 {
673 	struct rte_eth_dev *bonded_eth_dev;
674 	struct bond_dev_private *internals;
675 
676 	if (valid_bonded_port_id(bonded_port_id) != 0)
677 		return -1;
678 
679 	bonded_eth_dev = &rte_eth_devices[bonded_port_id];
680 	internals = bonded_eth_dev->data->dev_private;
681 
682 	/* Set MAC Address of Bonded Device */
683 	if (mac_address_set(bonded_eth_dev, mac_addr))
684 		return -1;
685 
686 	internals->user_defined_mac = 1;
687 
688 	/* Update all slave devices MACs*/
689 	if (internals->slave_count > 0)
690 		return mac_address_slaves_update(bonded_eth_dev);
691 
692 	return 0;
693 }
694 
695 int
696 rte_eth_bond_mac_address_reset(uint8_t bonded_port_id)
697 {
698 	struct rte_eth_dev *bonded_eth_dev;
699 	struct bond_dev_private *internals;
700 
701 	if (valid_bonded_port_id(bonded_port_id) != 0)
702 		return -1;
703 
704 	bonded_eth_dev = &rte_eth_devices[bonded_port_id];
705 	internals = bonded_eth_dev->data->dev_private;
706 
707 	internals->user_defined_mac = 0;
708 
709 	if (internals->slave_count > 0) {
710 		/* Set MAC Address of Bonded Device */
711 		if (mac_address_set(bonded_eth_dev,
712 				&internals->slaves[internals->primary_port].persisted_mac_addr)
713 				!= 0) {
714 			RTE_BOND_LOG(ERR, "Failed to set MAC address on bonded device");
715 			return -1;
716 		}
717 		/* Update all slave devices MAC addresses */
718 		return mac_address_slaves_update(bonded_eth_dev);
719 	}
720 	/* No need to update anything as no slaves present */
721 	return 0;
722 }
723 
724 int
725 rte_eth_bond_xmit_policy_set(uint8_t bonded_port_id, uint8_t policy)
726 {
727 	struct bond_dev_private *internals;
728 
729 	if (valid_bonded_port_id(bonded_port_id) != 0)
730 		return -1;
731 
732 	internals = rte_eth_devices[bonded_port_id].data->dev_private;
733 
734 	switch (policy) {
735 	case BALANCE_XMIT_POLICY_LAYER2:
736 		internals->balance_xmit_policy = policy;
737 		internals->xmit_hash = xmit_l2_hash;
738 		break;
739 	case BALANCE_XMIT_POLICY_LAYER23:
740 		internals->balance_xmit_policy = policy;
741 		internals->xmit_hash = xmit_l23_hash;
742 		break;
743 	case BALANCE_XMIT_POLICY_LAYER34:
744 		internals->balance_xmit_policy = policy;
745 		internals->xmit_hash = xmit_l34_hash;
746 		break;
747 
748 	default:
749 		return -1;
750 	}
751 	return 0;
752 }
753 
754 int
755 rte_eth_bond_xmit_policy_get(uint8_t bonded_port_id)
756 {
757 	struct bond_dev_private *internals;
758 
759 	if (valid_bonded_port_id(bonded_port_id) != 0)
760 		return -1;
761 
762 	internals = rte_eth_devices[bonded_port_id].data->dev_private;
763 
764 	return internals->balance_xmit_policy;
765 }
766 
767 int
768 rte_eth_bond_link_monitoring_set(uint8_t bonded_port_id, uint32_t internal_ms)
769 {
770 	struct bond_dev_private *internals;
771 
772 	if (valid_bonded_port_id(bonded_port_id) != 0)
773 		return -1;
774 
775 	internals = rte_eth_devices[bonded_port_id].data->dev_private;
776 	internals->link_status_polling_interval_ms = internal_ms;
777 
778 	return 0;
779 }
780 
781 int
782 rte_eth_bond_link_monitoring_get(uint8_t bonded_port_id)
783 {
784 	struct bond_dev_private *internals;
785 
786 	if (valid_bonded_port_id(bonded_port_id) != 0)
787 		return -1;
788 
789 	internals = rte_eth_devices[bonded_port_id].data->dev_private;
790 
791 	return internals->link_status_polling_interval_ms;
792 }
793 
794 int
795 rte_eth_bond_link_down_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms)
796 
797 {
798 	struct bond_dev_private *internals;
799 
800 	if (valid_bonded_port_id(bonded_port_id) != 0)
801 		return -1;
802 
803 	internals = rte_eth_devices[bonded_port_id].data->dev_private;
804 	internals->link_down_delay_ms = delay_ms;
805 
806 	return 0;
807 }
808 
809 int
810 rte_eth_bond_link_down_prop_delay_get(uint8_t bonded_port_id)
811 {
812 	struct bond_dev_private *internals;
813 
814 	if (valid_bonded_port_id(bonded_port_id) != 0)
815 		return -1;
816 
817 	internals = rte_eth_devices[bonded_port_id].data->dev_private;
818 
819 	return internals->link_down_delay_ms;
820 }
821 
822 int
823 rte_eth_bond_link_up_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms)
824 
825 {
826 	struct bond_dev_private *internals;
827 
828 	if (valid_bonded_port_id(bonded_port_id) != 0)
829 		return -1;
830 
831 	internals = rte_eth_devices[bonded_port_id].data->dev_private;
832 	internals->link_up_delay_ms = delay_ms;
833 
834 	return 0;
835 }
836 
837 int
838 rte_eth_bond_link_up_prop_delay_get(uint8_t bonded_port_id)
839 {
840 	struct bond_dev_private *internals;
841 
842 	if (valid_bonded_port_id(bonded_port_id) != 0)
843 		return -1;
844 
845 	internals = rte_eth_devices[bonded_port_id].data->dev_private;
846 
847 	return internals->link_up_delay_ms;
848 }
849