1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017 6WIND S.A. 3 * Copyright 2017 Mellanox Technologies, Ltd 4 */ 5 6 #include <stdbool.h> 7 #include <stdint.h> 8 #include <unistd.h> 9 10 #include <rte_debug.h> 11 #include <rte_atomic.h> 12 #include <rte_ethdev_driver.h> 13 #include <rte_malloc.h> 14 #include <rte_flow.h> 15 #include <rte_cycles.h> 16 #include <rte_ethdev.h> 17 #include <rte_string_fns.h> 18 19 #include "failsafe_private.h" 20 21 static int 22 fs_dev_configure(struct rte_eth_dev *dev) 23 { 24 struct sub_device *sdev; 25 uint8_t i; 26 int ret; 27 28 fs_lock(dev, 0); 29 FOREACH_SUBDEV(sdev, i, dev) { 30 int rmv_interrupt = 0; 31 int lsc_interrupt = 0; 32 int lsc_enabled; 33 34 if (sdev->state != DEV_PROBED && 35 !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE)) 36 continue; 37 38 rmv_interrupt = ETH(sdev)->data->dev_flags & 39 RTE_ETH_DEV_INTR_RMV; 40 if (rmv_interrupt) { 41 DEBUG("Enabling RMV interrupts for sub_device %d", i); 42 dev->data->dev_conf.intr_conf.rmv = 1; 43 } else { 44 DEBUG("sub_device %d does not support RMV event", i); 45 } 46 lsc_enabled = dev->data->dev_conf.intr_conf.lsc; 47 lsc_interrupt = lsc_enabled && 48 (ETH(sdev)->data->dev_flags & 49 RTE_ETH_DEV_INTR_LSC); 50 if (lsc_interrupt) { 51 DEBUG("Enabling LSC interrupts for sub_device %d", i); 52 dev->data->dev_conf.intr_conf.lsc = 1; 53 } else if (lsc_enabled && !lsc_interrupt) { 54 DEBUG("Disabling LSC interrupts for sub_device %d", i); 55 dev->data->dev_conf.intr_conf.lsc = 0; 56 } 57 DEBUG("Configuring sub-device %d", i); 58 ret = rte_eth_dev_configure(PORT_ID(sdev), 59 dev->data->nb_rx_queues, 60 dev->data->nb_tx_queues, 61 &dev->data->dev_conf); 62 if (ret) { 63 if (!fs_err(sdev, ret)) 64 continue; 65 ERROR("Could not configure sub_device %d", i); 66 fs_unlock(dev, 0); 67 return ret; 68 } 69 if (rmv_interrupt && sdev->rmv_callback == 0) { 70 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 71 RTE_ETH_EVENT_INTR_RMV, 72 failsafe_eth_rmv_event_callback, 73 sdev); 74 if (ret) 75 WARN("Failed to register RMV callback for sub_device %d", 76 SUB_ID(sdev)); 77 else 78 sdev->rmv_callback = 1; 79 } 80 dev->data->dev_conf.intr_conf.rmv = 0; 81 if (lsc_interrupt && sdev->lsc_callback == 0) { 82 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 83 RTE_ETH_EVENT_INTR_LSC, 84 failsafe_eth_lsc_event_callback, 85 dev); 86 if (ret) 87 WARN("Failed to register LSC callback for sub_device %d", 88 SUB_ID(sdev)); 89 else 90 sdev->lsc_callback = 1; 91 } 92 dev->data->dev_conf.intr_conf.lsc = lsc_enabled; 93 sdev->state = DEV_ACTIVE; 94 } 95 if (PRIV(dev)->state < DEV_ACTIVE) 96 PRIV(dev)->state = DEV_ACTIVE; 97 fs_unlock(dev, 0); 98 return 0; 99 } 100 101 static void 102 fs_set_queues_state_start(struct rte_eth_dev *dev) 103 { 104 struct rxq *rxq; 105 struct txq *txq; 106 uint16_t i; 107 108 for (i = 0; i < dev->data->nb_rx_queues; i++) { 109 rxq = dev->data->rx_queues[i]; 110 if (rxq != NULL && !rxq->info.conf.rx_deferred_start) 111 dev->data->rx_queue_state[i] = 112 RTE_ETH_QUEUE_STATE_STARTED; 113 } 114 for (i = 0; i < dev->data->nb_tx_queues; i++) { 115 txq = dev->data->tx_queues[i]; 116 if (txq != NULL && !txq->info.conf.tx_deferred_start) 117 dev->data->tx_queue_state[i] = 118 RTE_ETH_QUEUE_STATE_STARTED; 119 } 120 } 121 122 static int 123 fs_dev_start(struct rte_eth_dev *dev) 124 { 125 struct sub_device *sdev; 126 uint8_t i; 127 int ret; 128 129 fs_lock(dev, 0); 130 ret = failsafe_rx_intr_install(dev); 131 if (ret) { 132 fs_unlock(dev, 0); 133 return ret; 134 } 135 FOREACH_SUBDEV(sdev, i, dev) { 136 if (sdev->state != DEV_ACTIVE) 137 continue; 138 DEBUG("Starting sub_device %d", i); 139 ret = rte_eth_dev_start(PORT_ID(sdev)); 140 if (ret) { 141 if (!fs_err(sdev, ret)) 142 continue; 143 fs_unlock(dev, 0); 144 return ret; 145 } 146 ret = failsafe_rx_intr_install_subdevice(sdev); 147 if (ret) { 148 if (!fs_err(sdev, ret)) 149 continue; 150 rte_eth_dev_stop(PORT_ID(sdev)); 151 fs_unlock(dev, 0); 152 return ret; 153 } 154 sdev->state = DEV_STARTED; 155 } 156 if (PRIV(dev)->state < DEV_STARTED) { 157 PRIV(dev)->state = DEV_STARTED; 158 fs_set_queues_state_start(dev); 159 } 160 fs_switch_dev(dev, NULL); 161 fs_unlock(dev, 0); 162 return 0; 163 } 164 165 static void 166 fs_set_queues_state_stop(struct rte_eth_dev *dev) 167 { 168 uint16_t i; 169 170 for (i = 0; i < dev->data->nb_rx_queues; i++) 171 if (dev->data->rx_queues[i] != NULL) 172 dev->data->rx_queue_state[i] = 173 RTE_ETH_QUEUE_STATE_STOPPED; 174 for (i = 0; i < dev->data->nb_tx_queues; i++) 175 if (dev->data->tx_queues[i] != NULL) 176 dev->data->tx_queue_state[i] = 177 RTE_ETH_QUEUE_STATE_STOPPED; 178 } 179 180 static void 181 fs_dev_stop(struct rte_eth_dev *dev) 182 { 183 struct sub_device *sdev; 184 uint8_t i; 185 186 fs_lock(dev, 0); 187 PRIV(dev)->state = DEV_STARTED - 1; 188 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) { 189 rte_eth_dev_stop(PORT_ID(sdev)); 190 failsafe_rx_intr_uninstall_subdevice(sdev); 191 sdev->state = DEV_STARTED - 1; 192 } 193 failsafe_rx_intr_uninstall(dev); 194 fs_set_queues_state_stop(dev); 195 fs_unlock(dev, 0); 196 } 197 198 static int 199 fs_dev_set_link_up(struct rte_eth_dev *dev) 200 { 201 struct sub_device *sdev; 202 uint8_t i; 203 int ret; 204 205 fs_lock(dev, 0); 206 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 207 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i); 208 ret = rte_eth_dev_set_link_up(PORT_ID(sdev)); 209 if ((ret = fs_err(sdev, ret))) { 210 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d" 211 " with error %d", i, ret); 212 fs_unlock(dev, 0); 213 return ret; 214 } 215 } 216 fs_unlock(dev, 0); 217 return 0; 218 } 219 220 static int 221 fs_dev_set_link_down(struct rte_eth_dev *dev) 222 { 223 struct sub_device *sdev; 224 uint8_t i; 225 int ret; 226 227 fs_lock(dev, 0); 228 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 229 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i); 230 ret = rte_eth_dev_set_link_down(PORT_ID(sdev)); 231 if ((ret = fs_err(sdev, ret))) { 232 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d" 233 " with error %d", i, ret); 234 fs_unlock(dev, 0); 235 return ret; 236 } 237 } 238 fs_unlock(dev, 0); 239 return 0; 240 } 241 242 static int 243 fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 244 { 245 struct sub_device *sdev; 246 uint8_t i; 247 int ret; 248 int err = 0; 249 bool failure = true; 250 251 fs_lock(dev, 0); 252 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 253 uint16_t port_id = ETH(sdev)->data->port_id; 254 255 ret = rte_eth_dev_rx_queue_stop(port_id, rx_queue_id); 256 ret = fs_err(sdev, ret); 257 if (ret) { 258 ERROR("Rx queue stop failed for subdevice %d", i); 259 err = ret; 260 } else { 261 failure = false; 262 } 263 } 264 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 265 fs_unlock(dev, 0); 266 /* Return 0 in case of at least one successful queue stop */ 267 return (failure) ? err : 0; 268 } 269 270 static int 271 fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 272 { 273 struct sub_device *sdev; 274 uint8_t i; 275 int ret; 276 277 fs_lock(dev, 0); 278 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 279 uint16_t port_id = ETH(sdev)->data->port_id; 280 281 ret = rte_eth_dev_rx_queue_start(port_id, rx_queue_id); 282 ret = fs_err(sdev, ret); 283 if (ret) { 284 ERROR("Rx queue start failed for subdevice %d", i); 285 fs_rx_queue_stop(dev, rx_queue_id); 286 fs_unlock(dev, 0); 287 return ret; 288 } 289 } 290 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 291 fs_unlock(dev, 0); 292 return 0; 293 } 294 295 static int 296 fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 297 { 298 struct sub_device *sdev; 299 uint8_t i; 300 int ret; 301 int err = 0; 302 bool failure = true; 303 304 fs_lock(dev, 0); 305 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 306 uint16_t port_id = ETH(sdev)->data->port_id; 307 308 ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id); 309 ret = fs_err(sdev, ret); 310 if (ret) { 311 ERROR("Tx queue stop failed for subdevice %d", i); 312 err = ret; 313 } else { 314 failure = false; 315 } 316 } 317 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 318 fs_unlock(dev, 0); 319 /* Return 0 in case of at least one successful queue stop */ 320 return (failure) ? err : 0; 321 } 322 323 static int 324 fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 325 { 326 struct sub_device *sdev; 327 uint8_t i; 328 int ret; 329 330 fs_lock(dev, 0); 331 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 332 uint16_t port_id = ETH(sdev)->data->port_id; 333 334 ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id); 335 ret = fs_err(sdev, ret); 336 if (ret) { 337 ERROR("Tx queue start failed for subdevice %d", i); 338 fs_tx_queue_stop(dev, tx_queue_id); 339 fs_unlock(dev, 0); 340 return ret; 341 } 342 } 343 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 344 fs_unlock(dev, 0); 345 return 0; 346 } 347 348 static void 349 fs_rx_queue_release(void *queue) 350 { 351 struct rte_eth_dev *dev; 352 struct sub_device *sdev; 353 uint8_t i; 354 struct rxq *rxq; 355 356 if (queue == NULL) 357 return; 358 rxq = queue; 359 dev = &rte_eth_devices[rxq->priv->data->port_id]; 360 fs_lock(dev, 0); 361 if (rxq->event_fd >= 0) 362 close(rxq->event_fd); 363 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 364 if (ETH(sdev)->data->rx_queues != NULL && 365 ETH(sdev)->data->rx_queues[rxq->qid] != NULL) { 366 SUBOPS(sdev, rx_queue_release) 367 (ETH(sdev)->data->rx_queues[rxq->qid]); 368 } 369 } 370 dev->data->rx_queues[rxq->qid] = NULL; 371 rte_free(rxq); 372 fs_unlock(dev, 0); 373 } 374 375 static int 376 fs_rx_queue_setup(struct rte_eth_dev *dev, 377 uint16_t rx_queue_id, 378 uint16_t nb_rx_desc, 379 unsigned int socket_id, 380 const struct rte_eth_rxconf *rx_conf, 381 struct rte_mempool *mb_pool) 382 { 383 /* 384 * FIXME: Add a proper interface in rte_eal_interrupts for 385 * allocating eventfd as an interrupt vector. 386 * For the time being, fake as if we are using MSIX interrupts, 387 * this will cause rte_intr_efd_enable to allocate an eventfd for us. 388 */ 389 struct rte_intr_handle intr_handle = { 390 .type = RTE_INTR_HANDLE_VFIO_MSIX, 391 .efds = { -1, }, 392 }; 393 struct sub_device *sdev; 394 struct rxq *rxq; 395 uint8_t i; 396 int ret; 397 398 fs_lock(dev, 0); 399 if (rx_conf->rx_deferred_start) { 400 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 401 if (SUBOPS(sdev, rx_queue_start) == NULL) { 402 ERROR("Rx queue deferred start is not " 403 "supported for subdevice %d", i); 404 fs_unlock(dev, 0); 405 return -EINVAL; 406 } 407 } 408 } 409 rxq = dev->data->rx_queues[rx_queue_id]; 410 if (rxq != NULL) { 411 fs_rx_queue_release(rxq); 412 dev->data->rx_queues[rx_queue_id] = NULL; 413 } 414 rxq = rte_zmalloc(NULL, 415 sizeof(*rxq) + 416 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 417 RTE_CACHE_LINE_SIZE); 418 if (rxq == NULL) { 419 fs_unlock(dev, 0); 420 return -ENOMEM; 421 } 422 FOREACH_SUBDEV(sdev, i, dev) 423 rte_atomic64_init(&rxq->refcnt[i]); 424 rxq->qid = rx_queue_id; 425 rxq->socket_id = socket_id; 426 rxq->info.mp = mb_pool; 427 rxq->info.conf = *rx_conf; 428 rxq->info.nb_desc = nb_rx_desc; 429 rxq->priv = PRIV(dev); 430 rxq->sdev = PRIV(dev)->subs; 431 ret = rte_intr_efd_enable(&intr_handle, 1); 432 if (ret < 0) { 433 fs_unlock(dev, 0); 434 return ret; 435 } 436 rxq->event_fd = intr_handle.efds[0]; 437 dev->data->rx_queues[rx_queue_id] = rxq; 438 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 439 ret = rte_eth_rx_queue_setup(PORT_ID(sdev), 440 rx_queue_id, 441 nb_rx_desc, socket_id, 442 rx_conf, mb_pool); 443 if ((ret = fs_err(sdev, ret))) { 444 ERROR("RX queue setup failed for sub_device %d", i); 445 goto free_rxq; 446 } 447 } 448 fs_unlock(dev, 0); 449 return 0; 450 free_rxq: 451 fs_rx_queue_release(rxq); 452 fs_unlock(dev, 0); 453 return ret; 454 } 455 456 static int 457 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx) 458 { 459 struct rxq *rxq; 460 struct sub_device *sdev; 461 uint8_t i; 462 int ret; 463 int rc = 0; 464 465 fs_lock(dev, 0); 466 if (idx >= dev->data->nb_rx_queues) { 467 rc = -EINVAL; 468 goto unlock; 469 } 470 rxq = dev->data->rx_queues[idx]; 471 if (rxq == NULL || rxq->event_fd <= 0) { 472 rc = -EINVAL; 473 goto unlock; 474 } 475 /* Fail if proxy service is nor running. */ 476 if (PRIV(dev)->rxp.sstate != SS_RUNNING) { 477 ERROR("failsafe interrupt services are not running"); 478 rc = -EAGAIN; 479 goto unlock; 480 } 481 rxq->enable_events = 1; 482 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 483 ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx); 484 ret = fs_err(sdev, ret); 485 if (ret) 486 rc = ret; 487 } 488 unlock: 489 fs_unlock(dev, 0); 490 if (rc) 491 rte_errno = -rc; 492 return rc; 493 } 494 495 static int 496 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx) 497 { 498 struct rxq *rxq; 499 struct sub_device *sdev; 500 uint64_t u64; 501 uint8_t i; 502 int rc = 0; 503 int ret; 504 505 fs_lock(dev, 0); 506 if (idx >= dev->data->nb_rx_queues) { 507 rc = -EINVAL; 508 goto unlock; 509 } 510 rxq = dev->data->rx_queues[idx]; 511 if (rxq == NULL || rxq->event_fd <= 0) { 512 rc = -EINVAL; 513 goto unlock; 514 } 515 rxq->enable_events = 0; 516 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 517 ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx); 518 ret = fs_err(sdev, ret); 519 if (ret) 520 rc = ret; 521 } 522 /* Clear pending events */ 523 while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0) 524 ; 525 unlock: 526 fs_unlock(dev, 0); 527 if (rc) 528 rte_errno = -rc; 529 return rc; 530 } 531 532 static void 533 fs_tx_queue_release(void *queue) 534 { 535 struct rte_eth_dev *dev; 536 struct sub_device *sdev; 537 uint8_t i; 538 struct txq *txq; 539 540 if (queue == NULL) 541 return; 542 txq = queue; 543 dev = &rte_eth_devices[txq->priv->data->port_id]; 544 fs_lock(dev, 0); 545 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 546 if (ETH(sdev)->data->tx_queues != NULL && 547 ETH(sdev)->data->tx_queues[txq->qid] != NULL) { 548 SUBOPS(sdev, tx_queue_release) 549 (ETH(sdev)->data->tx_queues[txq->qid]); 550 } 551 } 552 dev->data->tx_queues[txq->qid] = NULL; 553 rte_free(txq); 554 fs_unlock(dev, 0); 555 } 556 557 static int 558 fs_tx_queue_setup(struct rte_eth_dev *dev, 559 uint16_t tx_queue_id, 560 uint16_t nb_tx_desc, 561 unsigned int socket_id, 562 const struct rte_eth_txconf *tx_conf) 563 { 564 struct sub_device *sdev; 565 struct txq *txq; 566 uint8_t i; 567 int ret; 568 569 fs_lock(dev, 0); 570 if (tx_conf->tx_deferred_start) { 571 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 572 if (SUBOPS(sdev, tx_queue_start) == NULL) { 573 ERROR("Tx queue deferred start is not " 574 "supported for subdevice %d", i); 575 fs_unlock(dev, 0); 576 return -EINVAL; 577 } 578 } 579 } 580 txq = dev->data->tx_queues[tx_queue_id]; 581 if (txq != NULL) { 582 fs_tx_queue_release(txq); 583 dev->data->tx_queues[tx_queue_id] = NULL; 584 } 585 txq = rte_zmalloc("ethdev TX queue", 586 sizeof(*txq) + 587 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 588 RTE_CACHE_LINE_SIZE); 589 if (txq == NULL) { 590 fs_unlock(dev, 0); 591 return -ENOMEM; 592 } 593 FOREACH_SUBDEV(sdev, i, dev) 594 rte_atomic64_init(&txq->refcnt[i]); 595 txq->qid = tx_queue_id; 596 txq->socket_id = socket_id; 597 txq->info.conf = *tx_conf; 598 txq->info.nb_desc = nb_tx_desc; 599 txq->priv = PRIV(dev); 600 dev->data->tx_queues[tx_queue_id] = txq; 601 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 602 ret = rte_eth_tx_queue_setup(PORT_ID(sdev), 603 tx_queue_id, 604 nb_tx_desc, socket_id, 605 tx_conf); 606 if ((ret = fs_err(sdev, ret))) { 607 ERROR("TX queue setup failed for sub_device %d", i); 608 goto free_txq; 609 } 610 } 611 fs_unlock(dev, 0); 612 return 0; 613 free_txq: 614 fs_tx_queue_release(txq); 615 fs_unlock(dev, 0); 616 return ret; 617 } 618 619 static void 620 fs_dev_free_queues(struct rte_eth_dev *dev) 621 { 622 uint16_t i; 623 624 for (i = 0; i < dev->data->nb_rx_queues; i++) { 625 fs_rx_queue_release(dev->data->rx_queues[i]); 626 dev->data->rx_queues[i] = NULL; 627 } 628 dev->data->nb_rx_queues = 0; 629 for (i = 0; i < dev->data->nb_tx_queues; i++) { 630 fs_tx_queue_release(dev->data->tx_queues[i]); 631 dev->data->tx_queues[i] = NULL; 632 } 633 dev->data->nb_tx_queues = 0; 634 } 635 636 int 637 failsafe_eth_dev_close(struct rte_eth_dev *dev) 638 { 639 struct sub_device *sdev; 640 uint8_t i; 641 int ret; 642 643 fs_lock(dev, 0); 644 failsafe_hotplug_alarm_cancel(dev); 645 if (PRIV(dev)->state == DEV_STARTED) 646 dev->dev_ops->dev_stop(dev); 647 PRIV(dev)->state = DEV_ACTIVE - 1; 648 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 649 DEBUG("Closing sub_device %d", i); 650 failsafe_eth_dev_unregister_callbacks(sdev); 651 rte_eth_dev_close(PORT_ID(sdev)); 652 sdev->state = DEV_ACTIVE - 1; 653 } 654 rte_eth_dev_callback_unregister(RTE_ETH_ALL, RTE_ETH_EVENT_NEW, 655 failsafe_eth_new_event_callback, dev); 656 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 657 fs_unlock(dev, 0); 658 return 0; 659 } 660 fs_dev_free_queues(dev); 661 ret = failsafe_eal_uninit(dev); 662 if (ret) 663 ERROR("Error while uninitializing sub-EAL"); 664 failsafe_args_free(dev); 665 rte_free(PRIV(dev)->subs); 666 rte_free(PRIV(dev)->mcast_addrs); 667 /* mac_addrs must not be freed alone because part of dev_private */ 668 dev->data->mac_addrs = NULL; 669 fs_unlock(dev, 0); 670 ret = pthread_mutex_destroy(&PRIV(dev)->hotplug_mutex); 671 if (ret) 672 ERROR("Error while destroying hotplug mutex"); 673 return 0; 674 } 675 676 static int 677 fs_promiscuous_enable(struct rte_eth_dev *dev) 678 { 679 struct sub_device *sdev; 680 uint8_t i; 681 int ret = 0; 682 683 fs_lock(dev, 0); 684 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 685 ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); 686 ret = fs_err(sdev, ret); 687 if (ret != 0) { 688 ERROR("Promiscuous mode enable failed for subdevice %d", 689 PORT_ID(sdev)); 690 break; 691 } 692 } 693 if (ret != 0) { 694 /* Rollback in the case of failure */ 695 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 696 ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); 697 ret = fs_err(sdev, ret); 698 if (ret != 0) 699 ERROR("Promiscuous mode disable during rollback failed for subdevice %d", 700 PORT_ID(sdev)); 701 } 702 } 703 fs_unlock(dev, 0); 704 705 return ret; 706 } 707 708 static int 709 fs_promiscuous_disable(struct rte_eth_dev *dev) 710 { 711 struct sub_device *sdev; 712 uint8_t i; 713 int ret = 0; 714 715 fs_lock(dev, 0); 716 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 717 ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); 718 ret = fs_err(sdev, ret); 719 if (ret != 0) { 720 ERROR("Promiscuous mode disable failed for subdevice %d", 721 PORT_ID(sdev)); 722 break; 723 } 724 } 725 if (ret != 0) { 726 /* Rollback in the case of failure */ 727 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 728 ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); 729 ret = fs_err(sdev, ret); 730 if (ret != 0) 731 ERROR("Promiscuous mode enable during rollback failed for subdevice %d", 732 PORT_ID(sdev)); 733 } 734 } 735 fs_unlock(dev, 0); 736 737 return ret; 738 } 739 740 static int 741 fs_allmulticast_enable(struct rte_eth_dev *dev) 742 { 743 struct sub_device *sdev; 744 uint8_t i; 745 int ret = 0; 746 747 fs_lock(dev, 0); 748 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 749 ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); 750 ret = fs_err(sdev, ret); 751 if (ret != 0) { 752 ERROR("All-multicast mode enable failed for subdevice %d", 753 PORT_ID(sdev)); 754 break; 755 } 756 } 757 if (ret != 0) { 758 /* Rollback in the case of failure */ 759 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 760 ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); 761 ret = fs_err(sdev, ret); 762 if (ret != 0) 763 ERROR("All-multicast mode disable during rollback failed for subdevice %d", 764 PORT_ID(sdev)); 765 } 766 } 767 fs_unlock(dev, 0); 768 769 return ret; 770 } 771 772 static int 773 fs_allmulticast_disable(struct rte_eth_dev *dev) 774 { 775 struct sub_device *sdev; 776 uint8_t i; 777 int ret = 0; 778 779 fs_lock(dev, 0); 780 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 781 ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); 782 ret = fs_err(sdev, ret); 783 if (ret != 0) { 784 ERROR("All-multicast mode disable failed for subdevice %d", 785 PORT_ID(sdev)); 786 break; 787 } 788 } 789 if (ret != 0) { 790 /* Rollback in the case of failure */ 791 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 792 ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); 793 ret = fs_err(sdev, ret); 794 if (ret != 0) 795 ERROR("All-multicast mode enable during rollback failed for subdevice %d", 796 PORT_ID(sdev)); 797 } 798 } 799 fs_unlock(dev, 0); 800 801 return ret; 802 } 803 804 static int 805 fs_link_update(struct rte_eth_dev *dev, 806 int wait_to_complete) 807 { 808 struct sub_device *sdev; 809 uint8_t i; 810 int ret; 811 812 fs_lock(dev, 0); 813 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 814 DEBUG("Calling link_update on sub_device %d", i); 815 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete); 816 if (ret && ret != -1 && sdev->remove == 0 && 817 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) { 818 ERROR("Link update failed for sub_device %d with error %d", 819 i, ret); 820 fs_unlock(dev, 0); 821 return ret; 822 } 823 } 824 if (TX_SUBDEV(dev)) { 825 struct rte_eth_link *l1; 826 struct rte_eth_link *l2; 827 828 l1 = &dev->data->dev_link; 829 l2 = Ð(TX_SUBDEV(dev))->data->dev_link; 830 if (memcmp(l1, l2, sizeof(*l1))) { 831 *l1 = *l2; 832 fs_unlock(dev, 0); 833 return 0; 834 } 835 } 836 fs_unlock(dev, 0); 837 return -1; 838 } 839 840 static int 841 fs_stats_get(struct rte_eth_dev *dev, 842 struct rte_eth_stats *stats) 843 { 844 struct rte_eth_stats backup; 845 struct sub_device *sdev; 846 uint8_t i; 847 int ret; 848 849 fs_lock(dev, 0); 850 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats)); 851 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 852 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats; 853 uint64_t *timestamp = &sdev->stats_snapshot.timestamp; 854 855 rte_memcpy(&backup, snapshot, sizeof(backup)); 856 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot); 857 if (ret) { 858 if (!fs_err(sdev, ret)) { 859 rte_memcpy(snapshot, &backup, sizeof(backup)); 860 goto inc; 861 } 862 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d", 863 i, ret); 864 *timestamp = 0; 865 fs_unlock(dev, 0); 866 return ret; 867 } 868 *timestamp = rte_rdtsc(); 869 inc: 870 failsafe_stats_increment(stats, snapshot); 871 } 872 fs_unlock(dev, 0); 873 return 0; 874 } 875 876 static int 877 fs_stats_reset(struct rte_eth_dev *dev) 878 { 879 struct sub_device *sdev; 880 uint8_t i; 881 int ret; 882 883 fs_lock(dev, 0); 884 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 885 ret = rte_eth_stats_reset(PORT_ID(sdev)); 886 if (ret) { 887 if (!fs_err(sdev, ret)) 888 continue; 889 890 ERROR("Operation rte_eth_stats_reset failed for sub_device %d with error %d", 891 i, ret); 892 fs_unlock(dev, 0); 893 return ret; 894 } 895 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats)); 896 } 897 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats)); 898 fs_unlock(dev, 0); 899 900 return 0; 901 } 902 903 static int 904 __fs_xstats_count(struct rte_eth_dev *dev) 905 { 906 struct sub_device *sdev; 907 int count = 0; 908 uint8_t i; 909 int ret; 910 911 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 912 ret = rte_eth_xstats_get_names(PORT_ID(sdev), NULL, 0); 913 if (ret < 0) 914 return ret; 915 count += ret; 916 } 917 918 return count; 919 } 920 921 static int 922 __fs_xstats_get_names(struct rte_eth_dev *dev, 923 struct rte_eth_xstat_name *xstats_names, 924 unsigned int limit) 925 { 926 struct sub_device *sdev; 927 unsigned int count = 0; 928 uint8_t i; 929 930 /* Caller only cares about count */ 931 if (!xstats_names) 932 return __fs_xstats_count(dev); 933 934 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 935 struct rte_eth_xstat_name *sub_names = xstats_names + count; 936 int j, r; 937 938 if (count >= limit) 939 break; 940 941 r = rte_eth_xstats_get_names(PORT_ID(sdev), 942 sub_names, limit - count); 943 if (r < 0) 944 return r; 945 946 /* add subN_ prefix to names */ 947 for (j = 0; j < r; j++) { 948 char *xname = sub_names[j].name; 949 char tmp[RTE_ETH_XSTATS_NAME_SIZE]; 950 951 if ((xname[0] == 't' || xname[0] == 'r') && 952 xname[1] == 'x' && xname[2] == '_') 953 snprintf(tmp, sizeof(tmp), "%.3ssub%u_%s", 954 xname, i, xname + 3); 955 else 956 snprintf(tmp, sizeof(tmp), "sub%u_%s", 957 i, xname); 958 959 strlcpy(xname, tmp, RTE_ETH_XSTATS_NAME_SIZE); 960 } 961 count += r; 962 } 963 return count; 964 } 965 966 static int 967 fs_xstats_get_names(struct rte_eth_dev *dev, 968 struct rte_eth_xstat_name *xstats_names, 969 unsigned int limit) 970 { 971 int ret; 972 973 fs_lock(dev, 0); 974 ret = __fs_xstats_get_names(dev, xstats_names, limit); 975 fs_unlock(dev, 0); 976 return ret; 977 } 978 979 static int 980 __fs_xstats_get(struct rte_eth_dev *dev, 981 struct rte_eth_xstat *xstats, 982 unsigned int n) 983 { 984 unsigned int count = 0; 985 struct sub_device *sdev; 986 uint8_t i; 987 int j, ret; 988 989 ret = __fs_xstats_count(dev); 990 /* 991 * if error 992 * or caller did not give enough space 993 * or just querying 994 */ 995 if (ret < 0 || ret > (int)n || xstats == NULL) 996 return ret; 997 998 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 999 ret = rte_eth_xstats_get(PORT_ID(sdev), xstats, n); 1000 if (ret < 0) 1001 return ret; 1002 1003 if (ret > (int)n) 1004 return n + count; 1005 1006 /* add offset to id's from sub-device */ 1007 for (j = 0; j < ret; j++) 1008 xstats[j].id += count; 1009 1010 xstats += ret; 1011 n -= ret; 1012 count += ret; 1013 } 1014 1015 return count; 1016 } 1017 1018 static int 1019 fs_xstats_get(struct rte_eth_dev *dev, 1020 struct rte_eth_xstat *xstats, 1021 unsigned int n) 1022 { 1023 int ret; 1024 1025 fs_lock(dev, 0); 1026 ret = __fs_xstats_get(dev, xstats, n); 1027 fs_unlock(dev, 0); 1028 1029 return ret; 1030 } 1031 1032 1033 static int 1034 fs_xstats_reset(struct rte_eth_dev *dev) 1035 { 1036 struct sub_device *sdev; 1037 uint8_t i; 1038 int r = 0; 1039 1040 fs_lock(dev, 0); 1041 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1042 r = rte_eth_xstats_reset(PORT_ID(sdev)); 1043 if (r < 0) 1044 break; 1045 } 1046 fs_unlock(dev, 0); 1047 1048 return r; 1049 } 1050 1051 static void 1052 fs_dev_merge_desc_lim(struct rte_eth_desc_lim *to, 1053 const struct rte_eth_desc_lim *from) 1054 { 1055 to->nb_max = RTE_MIN(to->nb_max, from->nb_max); 1056 to->nb_min = RTE_MAX(to->nb_min, from->nb_min); 1057 to->nb_align = RTE_MAX(to->nb_align, from->nb_align); 1058 1059 to->nb_seg_max = RTE_MIN(to->nb_seg_max, from->nb_seg_max); 1060 to->nb_mtu_seg_max = RTE_MIN(to->nb_mtu_seg_max, from->nb_mtu_seg_max); 1061 } 1062 1063 /* 1064 * Merge the information from sub-devices. 1065 * 1066 * The reported values must be the common subset of all sub devices 1067 */ 1068 static void 1069 fs_dev_merge_info(struct rte_eth_dev_info *info, 1070 const struct rte_eth_dev_info *sinfo) 1071 { 1072 info->max_rx_pktlen = RTE_MIN(info->max_rx_pktlen, sinfo->max_rx_pktlen); 1073 info->max_rx_queues = RTE_MIN(info->max_rx_queues, sinfo->max_rx_queues); 1074 info->max_tx_queues = RTE_MIN(info->max_tx_queues, sinfo->max_tx_queues); 1075 info->max_mac_addrs = RTE_MIN(info->max_mac_addrs, sinfo->max_mac_addrs); 1076 info->max_hash_mac_addrs = RTE_MIN(info->max_hash_mac_addrs, 1077 sinfo->max_hash_mac_addrs); 1078 info->max_vmdq_pools = RTE_MIN(info->max_vmdq_pools, sinfo->max_vmdq_pools); 1079 info->max_vfs = RTE_MIN(info->max_vfs, sinfo->max_vfs); 1080 1081 fs_dev_merge_desc_lim(&info->rx_desc_lim, &sinfo->rx_desc_lim); 1082 fs_dev_merge_desc_lim(&info->tx_desc_lim, &sinfo->tx_desc_lim); 1083 1084 info->rx_offload_capa &= sinfo->rx_offload_capa; 1085 info->tx_offload_capa &= sinfo->tx_offload_capa; 1086 info->rx_queue_offload_capa &= sinfo->rx_queue_offload_capa; 1087 info->tx_queue_offload_capa &= sinfo->tx_queue_offload_capa; 1088 info->flow_type_rss_offloads &= sinfo->flow_type_rss_offloads; 1089 1090 /* 1091 * RETA size is a GCD of RETA sizes indicated by sub-devices. 1092 * Each of these sizes is a power of 2, so use the lower one. 1093 */ 1094 info->reta_size = RTE_MIN(info->reta_size, sinfo->reta_size); 1095 1096 info->hash_key_size = RTE_MIN(info->hash_key_size, 1097 sinfo->hash_key_size); 1098 } 1099 1100 /** 1101 * Fail-safe dev_infos_get rules: 1102 * 1103 * No sub_device: 1104 * Numerables: 1105 * Use the maximum possible values for any field, so as not 1106 * to impede any further configuration effort. 1107 * Capabilities: 1108 * Limits capabilities to those that are understood by the 1109 * fail-safe PMD. This understanding stems from the fail-safe 1110 * being capable of verifying that the related capability is 1111 * expressed within the device configuration (struct rte_eth_conf). 1112 * 1113 * At least one probed sub_device: 1114 * Numerables: 1115 * Uses values from the active probed sub_device 1116 * The rationale here is that if any sub_device is less capable 1117 * (for example concerning the number of queues) than the active 1118 * sub_device, then its subsequent configuration will fail. 1119 * It is impossible to foresee this failure when the failing sub_device 1120 * is supposed to be plugged-in later on, so the configuration process 1121 * is the single point of failure and error reporting. 1122 * Capabilities: 1123 * Uses a logical AND of RX capabilities among 1124 * all sub_devices and the default capabilities. 1125 * Uses a logical AND of TX capabilities among 1126 * the active probed sub_device and the default capabilities. 1127 * Uses a logical AND of device capabilities among 1128 * all sub_devices and the default capabilities. 1129 * 1130 */ 1131 static int 1132 fs_dev_infos_get(struct rte_eth_dev *dev, 1133 struct rte_eth_dev_info *infos) 1134 { 1135 struct sub_device *sdev; 1136 uint8_t i; 1137 int ret; 1138 1139 /* Use maximum upper bounds by default */ 1140 infos->max_rx_pktlen = UINT32_MAX; 1141 infos->max_rx_queues = RTE_MAX_QUEUES_PER_PORT; 1142 infos->max_tx_queues = RTE_MAX_QUEUES_PER_PORT; 1143 infos->max_mac_addrs = FAILSAFE_MAX_ETHADDR; 1144 infos->max_hash_mac_addrs = UINT32_MAX; 1145 infos->max_vfs = UINT16_MAX; 1146 infos->max_vmdq_pools = UINT16_MAX; 1147 infos->reta_size = UINT16_MAX; 1148 infos->hash_key_size = UINT8_MAX; 1149 1150 /* 1151 * Set of capabilities that can be verified upon 1152 * configuring a sub-device. 1153 */ 1154 infos->rx_offload_capa = 1155 DEV_RX_OFFLOAD_VLAN_STRIP | 1156 DEV_RX_OFFLOAD_IPV4_CKSUM | 1157 DEV_RX_OFFLOAD_UDP_CKSUM | 1158 DEV_RX_OFFLOAD_TCP_CKSUM | 1159 DEV_RX_OFFLOAD_TCP_LRO | 1160 DEV_RX_OFFLOAD_QINQ_STRIP | 1161 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1162 DEV_RX_OFFLOAD_MACSEC_STRIP | 1163 DEV_RX_OFFLOAD_HEADER_SPLIT | 1164 DEV_RX_OFFLOAD_VLAN_FILTER | 1165 DEV_RX_OFFLOAD_VLAN_EXTEND | 1166 DEV_RX_OFFLOAD_JUMBO_FRAME | 1167 DEV_RX_OFFLOAD_SCATTER | 1168 DEV_RX_OFFLOAD_TIMESTAMP | 1169 DEV_RX_OFFLOAD_SECURITY; 1170 1171 infos->rx_queue_offload_capa = 1172 DEV_RX_OFFLOAD_VLAN_STRIP | 1173 DEV_RX_OFFLOAD_IPV4_CKSUM | 1174 DEV_RX_OFFLOAD_UDP_CKSUM | 1175 DEV_RX_OFFLOAD_TCP_CKSUM | 1176 DEV_RX_OFFLOAD_TCP_LRO | 1177 DEV_RX_OFFLOAD_QINQ_STRIP | 1178 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1179 DEV_RX_OFFLOAD_MACSEC_STRIP | 1180 DEV_RX_OFFLOAD_HEADER_SPLIT | 1181 DEV_RX_OFFLOAD_VLAN_FILTER | 1182 DEV_RX_OFFLOAD_VLAN_EXTEND | 1183 DEV_RX_OFFLOAD_JUMBO_FRAME | 1184 DEV_RX_OFFLOAD_SCATTER | 1185 DEV_RX_OFFLOAD_TIMESTAMP | 1186 DEV_RX_OFFLOAD_SECURITY; 1187 1188 infos->tx_offload_capa = 1189 DEV_TX_OFFLOAD_MULTI_SEGS | 1190 DEV_TX_OFFLOAD_MBUF_FAST_FREE | 1191 DEV_TX_OFFLOAD_IPV4_CKSUM | 1192 DEV_TX_OFFLOAD_UDP_CKSUM | 1193 DEV_TX_OFFLOAD_TCP_CKSUM | 1194 DEV_TX_OFFLOAD_TCP_TSO; 1195 1196 infos->flow_type_rss_offloads = 1197 ETH_RSS_IP | 1198 ETH_RSS_UDP | 1199 ETH_RSS_TCP; 1200 infos->dev_capa = 1201 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 1202 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 1203 1204 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 1205 struct rte_eth_dev_info sub_info; 1206 1207 ret = rte_eth_dev_info_get(PORT_ID(sdev), &sub_info); 1208 ret = fs_err(sdev, ret); 1209 if (ret != 0) 1210 return ret; 1211 1212 fs_dev_merge_info(infos, &sub_info); 1213 } 1214 1215 return 0; 1216 } 1217 1218 static const uint32_t * 1219 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev) 1220 { 1221 struct sub_device *sdev; 1222 struct rte_eth_dev *edev; 1223 const uint32_t *ret; 1224 1225 fs_lock(dev, 0); 1226 sdev = TX_SUBDEV(dev); 1227 if (sdev == NULL) { 1228 ret = NULL; 1229 goto unlock; 1230 } 1231 edev = ETH(sdev); 1232 /* ENOTSUP: counts as no supported ptypes */ 1233 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) { 1234 ret = NULL; 1235 goto unlock; 1236 } 1237 /* 1238 * The API does not permit to do a clean AND of all ptypes, 1239 * It is also incomplete by design and we do not really care 1240 * to have a best possible value in this context. 1241 * We just return the ptypes of the device of highest 1242 * priority, usually the PREFERRED device. 1243 */ 1244 ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev); 1245 unlock: 1246 fs_unlock(dev, 0); 1247 return ret; 1248 } 1249 1250 static int 1251 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1252 { 1253 struct sub_device *sdev; 1254 uint8_t i; 1255 int ret; 1256 1257 fs_lock(dev, 0); 1258 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1259 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i); 1260 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu); 1261 if ((ret = fs_err(sdev, ret))) { 1262 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d", 1263 i, ret); 1264 fs_unlock(dev, 0); 1265 return ret; 1266 } 1267 } 1268 fs_unlock(dev, 0); 1269 return 0; 1270 } 1271 1272 static int 1273 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1274 { 1275 struct sub_device *sdev; 1276 uint8_t i; 1277 int ret; 1278 1279 fs_lock(dev, 0); 1280 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1281 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i); 1282 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on); 1283 if ((ret = fs_err(sdev, ret))) { 1284 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d" 1285 " with error %d", i, ret); 1286 fs_unlock(dev, 0); 1287 return ret; 1288 } 1289 } 1290 fs_unlock(dev, 0); 1291 return 0; 1292 } 1293 1294 static int 1295 fs_flow_ctrl_get(struct rte_eth_dev *dev, 1296 struct rte_eth_fc_conf *fc_conf) 1297 { 1298 struct sub_device *sdev; 1299 int ret; 1300 1301 fs_lock(dev, 0); 1302 sdev = TX_SUBDEV(dev); 1303 if (sdev == NULL) { 1304 ret = 0; 1305 goto unlock; 1306 } 1307 if (SUBOPS(sdev, flow_ctrl_get) == NULL) { 1308 ret = -ENOTSUP; 1309 goto unlock; 1310 } 1311 ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf); 1312 unlock: 1313 fs_unlock(dev, 0); 1314 return ret; 1315 } 1316 1317 static int 1318 fs_flow_ctrl_set(struct rte_eth_dev *dev, 1319 struct rte_eth_fc_conf *fc_conf) 1320 { 1321 struct sub_device *sdev; 1322 uint8_t i; 1323 int ret; 1324 1325 fs_lock(dev, 0); 1326 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1327 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i); 1328 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf); 1329 if ((ret = fs_err(sdev, ret))) { 1330 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d" 1331 " with error %d", i, ret); 1332 fs_unlock(dev, 0); 1333 return ret; 1334 } 1335 } 1336 fs_unlock(dev, 0); 1337 return 0; 1338 } 1339 1340 static void 1341 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 1342 { 1343 struct sub_device *sdev; 1344 uint8_t i; 1345 1346 fs_lock(dev, 0); 1347 /* No check: already done within the rte_eth_dev_mac_addr_remove 1348 * call for the fail-safe device. 1349 */ 1350 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 1351 rte_eth_dev_mac_addr_remove(PORT_ID(sdev), 1352 &dev->data->mac_addrs[index]); 1353 PRIV(dev)->mac_addr_pool[index] = 0; 1354 fs_unlock(dev, 0); 1355 } 1356 1357 static int 1358 fs_mac_addr_add(struct rte_eth_dev *dev, 1359 struct rte_ether_addr *mac_addr, 1360 uint32_t index, 1361 uint32_t vmdq) 1362 { 1363 struct sub_device *sdev; 1364 int ret; 1365 uint8_t i; 1366 1367 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR); 1368 fs_lock(dev, 0); 1369 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1370 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq); 1371 if ((ret = fs_err(sdev, ret))) { 1372 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %" 1373 PRIu8 " with error %d", i, ret); 1374 fs_unlock(dev, 0); 1375 return ret; 1376 } 1377 } 1378 if (index >= PRIV(dev)->nb_mac_addr) { 1379 DEBUG("Growing mac_addrs array"); 1380 PRIV(dev)->nb_mac_addr = index; 1381 } 1382 PRIV(dev)->mac_addr_pool[index] = vmdq; 1383 fs_unlock(dev, 0); 1384 return 0; 1385 } 1386 1387 static int 1388 fs_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 1389 { 1390 struct sub_device *sdev; 1391 uint8_t i; 1392 int ret; 1393 1394 fs_lock(dev, 0); 1395 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1396 ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr); 1397 ret = fs_err(sdev, ret); 1398 if (ret) { 1399 ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d", 1400 i, ret); 1401 fs_unlock(dev, 0); 1402 return ret; 1403 } 1404 } 1405 fs_unlock(dev, 0); 1406 1407 return 0; 1408 } 1409 1410 static int 1411 fs_set_mc_addr_list(struct rte_eth_dev *dev, 1412 struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) 1413 { 1414 struct sub_device *sdev; 1415 uint8_t i; 1416 int ret; 1417 void *mcast_addrs; 1418 1419 fs_lock(dev, 0); 1420 1421 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1422 ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1423 mc_addr_set, nb_mc_addr); 1424 if (ret != 0) { 1425 ERROR("Operation rte_eth_dev_set_mc_addr_list failed for sub_device %d with error %d", 1426 i, ret); 1427 goto rollback; 1428 } 1429 } 1430 1431 mcast_addrs = rte_realloc(PRIV(dev)->mcast_addrs, 1432 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]), 0); 1433 if (mcast_addrs == NULL && nb_mc_addr > 0) { 1434 ret = -ENOMEM; 1435 goto rollback; 1436 } 1437 rte_memcpy(mcast_addrs, mc_addr_set, 1438 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0])); 1439 PRIV(dev)->nb_mcast_addr = nb_mc_addr; 1440 PRIV(dev)->mcast_addrs = mcast_addrs; 1441 1442 fs_unlock(dev, 0); 1443 return 0; 1444 1445 rollback: 1446 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1447 int rc = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1448 PRIV(dev)->mcast_addrs, PRIV(dev)->nb_mcast_addr); 1449 if (rc != 0) { 1450 ERROR("Multicast MAC address list rollback for sub_device %d failed with error %d", 1451 i, rc); 1452 } 1453 } 1454 1455 fs_unlock(dev, 0); 1456 return ret; 1457 } 1458 1459 static int 1460 fs_rss_hash_update(struct rte_eth_dev *dev, 1461 struct rte_eth_rss_conf *rss_conf) 1462 { 1463 struct sub_device *sdev; 1464 uint8_t i; 1465 int ret; 1466 1467 fs_lock(dev, 0); 1468 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1469 ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf); 1470 ret = fs_err(sdev, ret); 1471 if (ret) { 1472 ERROR("Operation rte_eth_dev_rss_hash_update" 1473 " failed for sub_device %d with error %d", 1474 i, ret); 1475 fs_unlock(dev, 0); 1476 return ret; 1477 } 1478 } 1479 fs_unlock(dev, 0); 1480 1481 return 0; 1482 } 1483 1484 static int 1485 fs_filter_ctrl(struct rte_eth_dev *dev __rte_unused, 1486 enum rte_filter_type type, 1487 enum rte_filter_op op, 1488 void *arg) 1489 { 1490 if (type == RTE_ETH_FILTER_GENERIC && 1491 op == RTE_ETH_FILTER_GET) { 1492 *(const void **)arg = &fs_flow_ops; 1493 return 0; 1494 } 1495 return -ENOTSUP; 1496 } 1497 1498 const struct eth_dev_ops failsafe_ops = { 1499 .dev_configure = fs_dev_configure, 1500 .dev_start = fs_dev_start, 1501 .dev_stop = fs_dev_stop, 1502 .dev_set_link_down = fs_dev_set_link_down, 1503 .dev_set_link_up = fs_dev_set_link_up, 1504 .dev_close = failsafe_eth_dev_close, 1505 .promiscuous_enable = fs_promiscuous_enable, 1506 .promiscuous_disable = fs_promiscuous_disable, 1507 .allmulticast_enable = fs_allmulticast_enable, 1508 .allmulticast_disable = fs_allmulticast_disable, 1509 .link_update = fs_link_update, 1510 .stats_get = fs_stats_get, 1511 .stats_reset = fs_stats_reset, 1512 .xstats_get = fs_xstats_get, 1513 .xstats_get_names = fs_xstats_get_names, 1514 .xstats_reset = fs_xstats_reset, 1515 .dev_infos_get = fs_dev_infos_get, 1516 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get, 1517 .mtu_set = fs_mtu_set, 1518 .vlan_filter_set = fs_vlan_filter_set, 1519 .rx_queue_start = fs_rx_queue_start, 1520 .rx_queue_stop = fs_rx_queue_stop, 1521 .tx_queue_start = fs_tx_queue_start, 1522 .tx_queue_stop = fs_tx_queue_stop, 1523 .rx_queue_setup = fs_rx_queue_setup, 1524 .tx_queue_setup = fs_tx_queue_setup, 1525 .rx_queue_release = fs_rx_queue_release, 1526 .tx_queue_release = fs_tx_queue_release, 1527 .rx_queue_intr_enable = fs_rx_intr_enable, 1528 .rx_queue_intr_disable = fs_rx_intr_disable, 1529 .flow_ctrl_get = fs_flow_ctrl_get, 1530 .flow_ctrl_set = fs_flow_ctrl_set, 1531 .mac_addr_remove = fs_mac_addr_remove, 1532 .mac_addr_add = fs_mac_addr_add, 1533 .mac_addr_set = fs_mac_addr_set, 1534 .set_mc_addr_list = fs_set_mc_addr_list, 1535 .rss_hash_update = fs_rss_hash_update, 1536 .filter_ctrl = fs_filter_ctrl, 1537 }; 1538