1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017 6WIND S.A. 3 * Copyright 2017 Mellanox Technologies, Ltd 4 */ 5 6 #include <stdbool.h> 7 #include <stdint.h> 8 #include <unistd.h> 9 10 #include <rte_debug.h> 11 #include <rte_atomic.h> 12 #include <ethdev_driver.h> 13 #include <rte_malloc.h> 14 #include <rte_flow.h> 15 #include <rte_cycles.h> 16 #include <rte_ethdev.h> 17 #include <rte_string_fns.h> 18 19 #include "failsafe_private.h" 20 21 static int 22 fs_dev_configure(struct rte_eth_dev *dev) 23 { 24 struct sub_device *sdev; 25 uint8_t i; 26 int ret; 27 28 fs_lock(dev, 0); 29 FOREACH_SUBDEV(sdev, i, dev) { 30 int rmv_interrupt = 0; 31 int lsc_interrupt = 0; 32 int lsc_enabled; 33 34 if (sdev->state != DEV_PROBED && 35 !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE)) 36 continue; 37 38 rmv_interrupt = ETH(sdev)->data->dev_flags & 39 RTE_ETH_DEV_INTR_RMV; 40 if (rmv_interrupt) { 41 DEBUG("Enabling RMV interrupts for sub_device %d", i); 42 dev->data->dev_conf.intr_conf.rmv = 1; 43 } else { 44 DEBUG("sub_device %d does not support RMV event", i); 45 } 46 lsc_enabled = dev->data->dev_conf.intr_conf.lsc; 47 lsc_interrupt = lsc_enabled && 48 (ETH(sdev)->data->dev_flags & 49 RTE_ETH_DEV_INTR_LSC); 50 if (lsc_interrupt) { 51 DEBUG("Enabling LSC interrupts for sub_device %d", i); 52 dev->data->dev_conf.intr_conf.lsc = 1; 53 } else if (lsc_enabled && !lsc_interrupt) { 54 DEBUG("Disabling LSC interrupts for sub_device %d", i); 55 dev->data->dev_conf.intr_conf.lsc = 0; 56 } 57 DEBUG("Configuring sub-device %d", i); 58 ret = rte_eth_dev_configure(PORT_ID(sdev), 59 dev->data->nb_rx_queues, 60 dev->data->nb_tx_queues, 61 &dev->data->dev_conf); 62 if (ret) { 63 if (!fs_err(sdev, ret)) 64 continue; 65 ERROR("Could not configure sub_device %d", i); 66 fs_unlock(dev, 0); 67 return ret; 68 } 69 if (rmv_interrupt && sdev->rmv_callback == 0) { 70 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 71 RTE_ETH_EVENT_INTR_RMV, 72 failsafe_eth_rmv_event_callback, 73 sdev); 74 if (ret) 75 WARN("Failed to register RMV callback for sub_device %d", 76 SUB_ID(sdev)); 77 else 78 sdev->rmv_callback = 1; 79 } 80 dev->data->dev_conf.intr_conf.rmv = 0; 81 if (lsc_interrupt && sdev->lsc_callback == 0) { 82 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 83 RTE_ETH_EVENT_INTR_LSC, 84 failsafe_eth_lsc_event_callback, 85 dev); 86 if (ret) 87 WARN("Failed to register LSC callback for sub_device %d", 88 SUB_ID(sdev)); 89 else 90 sdev->lsc_callback = 1; 91 } 92 dev->data->dev_conf.intr_conf.lsc = lsc_enabled; 93 sdev->state = DEV_ACTIVE; 94 } 95 if (PRIV(dev)->state < DEV_ACTIVE) 96 PRIV(dev)->state = DEV_ACTIVE; 97 fs_unlock(dev, 0); 98 return 0; 99 } 100 101 static void 102 fs_set_queues_state_start(struct rte_eth_dev *dev) 103 { 104 struct rxq *rxq; 105 struct txq *txq; 106 uint16_t i; 107 108 for (i = 0; i < dev->data->nb_rx_queues; i++) { 109 rxq = dev->data->rx_queues[i]; 110 if (rxq != NULL && !rxq->info.conf.rx_deferred_start) 111 dev->data->rx_queue_state[i] = 112 RTE_ETH_QUEUE_STATE_STARTED; 113 } 114 for (i = 0; i < dev->data->nb_tx_queues; i++) { 115 txq = dev->data->tx_queues[i]; 116 if (txq != NULL && !txq->info.conf.tx_deferred_start) 117 dev->data->tx_queue_state[i] = 118 RTE_ETH_QUEUE_STATE_STARTED; 119 } 120 } 121 122 static int 123 fs_dev_start(struct rte_eth_dev *dev) 124 { 125 struct sub_device *sdev; 126 uint8_t i; 127 int ret; 128 129 fs_lock(dev, 0); 130 ret = failsafe_rx_intr_install(dev); 131 if (ret) { 132 fs_unlock(dev, 0); 133 return ret; 134 } 135 FOREACH_SUBDEV(sdev, i, dev) { 136 if (sdev->state != DEV_ACTIVE) 137 continue; 138 DEBUG("Starting sub_device %d", i); 139 ret = rte_eth_dev_start(PORT_ID(sdev)); 140 if (ret) { 141 if (!fs_err(sdev, ret)) 142 continue; 143 fs_unlock(dev, 0); 144 return ret; 145 } 146 ret = failsafe_rx_intr_install_subdevice(sdev); 147 if (ret) { 148 if (!fs_err(sdev, ret)) 149 continue; 150 if (fs_err(sdev, rte_eth_dev_stop(PORT_ID(sdev))) < 0) 151 ERROR("Failed to stop sub-device %u", 152 SUB_ID(sdev)); 153 fs_unlock(dev, 0); 154 return ret; 155 } 156 sdev->state = DEV_STARTED; 157 } 158 if (PRIV(dev)->state < DEV_STARTED) { 159 PRIV(dev)->state = DEV_STARTED; 160 fs_set_queues_state_start(dev); 161 } 162 fs_switch_dev(dev, NULL); 163 fs_unlock(dev, 0); 164 return 0; 165 } 166 167 static void 168 fs_set_queues_state_stop(struct rte_eth_dev *dev) 169 { 170 uint16_t i; 171 172 for (i = 0; i < dev->data->nb_rx_queues; i++) 173 if (dev->data->rx_queues[i] != NULL) 174 dev->data->rx_queue_state[i] = 175 RTE_ETH_QUEUE_STATE_STOPPED; 176 for (i = 0; i < dev->data->nb_tx_queues; i++) 177 if (dev->data->tx_queues[i] != NULL) 178 dev->data->tx_queue_state[i] = 179 RTE_ETH_QUEUE_STATE_STOPPED; 180 } 181 182 static int 183 fs_dev_stop(struct rte_eth_dev *dev) 184 { 185 struct sub_device *sdev; 186 uint8_t i; 187 int ret; 188 189 fs_lock(dev, 0); 190 PRIV(dev)->state = DEV_STARTED - 1; 191 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) { 192 ret = rte_eth_dev_stop(PORT_ID(sdev)); 193 if (fs_err(sdev, ret) < 0) { 194 ERROR("Failed to stop device %u", 195 PORT_ID(sdev)); 196 PRIV(dev)->state = DEV_STARTED + 1; 197 fs_unlock(dev, 0); 198 return ret; 199 } 200 failsafe_rx_intr_uninstall_subdevice(sdev); 201 sdev->state = DEV_STARTED - 1; 202 } 203 failsafe_rx_intr_uninstall(dev); 204 fs_set_queues_state_stop(dev); 205 fs_unlock(dev, 0); 206 207 return 0; 208 } 209 210 static int 211 fs_dev_set_link_up(struct rte_eth_dev *dev) 212 { 213 struct sub_device *sdev; 214 uint8_t i; 215 int ret; 216 217 fs_lock(dev, 0); 218 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 219 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i); 220 ret = rte_eth_dev_set_link_up(PORT_ID(sdev)); 221 if ((ret = fs_err(sdev, ret))) { 222 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d" 223 " with error %d", i, ret); 224 fs_unlock(dev, 0); 225 return ret; 226 } 227 } 228 fs_unlock(dev, 0); 229 return 0; 230 } 231 232 static int 233 fs_dev_set_link_down(struct rte_eth_dev *dev) 234 { 235 struct sub_device *sdev; 236 uint8_t i; 237 int ret; 238 239 fs_lock(dev, 0); 240 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 241 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i); 242 ret = rte_eth_dev_set_link_down(PORT_ID(sdev)); 243 if ((ret = fs_err(sdev, ret))) { 244 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d" 245 " with error %d", i, ret); 246 fs_unlock(dev, 0); 247 return ret; 248 } 249 } 250 fs_unlock(dev, 0); 251 return 0; 252 } 253 254 static int 255 fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 256 { 257 struct sub_device *sdev; 258 uint8_t i; 259 int ret; 260 int err = 0; 261 bool failure = true; 262 263 fs_lock(dev, 0); 264 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 265 uint16_t port_id = ETH(sdev)->data->port_id; 266 267 ret = rte_eth_dev_rx_queue_stop(port_id, rx_queue_id); 268 ret = fs_err(sdev, ret); 269 if (ret) { 270 ERROR("Rx queue stop failed for subdevice %d", i); 271 err = ret; 272 } else { 273 failure = false; 274 } 275 } 276 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 277 fs_unlock(dev, 0); 278 /* Return 0 in case of at least one successful queue stop */ 279 return (failure) ? err : 0; 280 } 281 282 static int 283 fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 284 { 285 struct sub_device *sdev; 286 uint8_t i; 287 int ret; 288 289 fs_lock(dev, 0); 290 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 291 uint16_t port_id = ETH(sdev)->data->port_id; 292 293 ret = rte_eth_dev_rx_queue_start(port_id, rx_queue_id); 294 ret = fs_err(sdev, ret); 295 if (ret) { 296 ERROR("Rx queue start failed for subdevice %d", i); 297 fs_rx_queue_stop(dev, rx_queue_id); 298 fs_unlock(dev, 0); 299 return ret; 300 } 301 } 302 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 303 fs_unlock(dev, 0); 304 return 0; 305 } 306 307 static int 308 fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 309 { 310 struct sub_device *sdev; 311 uint8_t i; 312 int ret; 313 int err = 0; 314 bool failure = true; 315 316 fs_lock(dev, 0); 317 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 318 uint16_t port_id = ETH(sdev)->data->port_id; 319 320 ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id); 321 ret = fs_err(sdev, ret); 322 if (ret) { 323 ERROR("Tx queue stop failed for subdevice %d", i); 324 err = ret; 325 } else { 326 failure = false; 327 } 328 } 329 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 330 fs_unlock(dev, 0); 331 /* Return 0 in case of at least one successful queue stop */ 332 return (failure) ? err : 0; 333 } 334 335 static int 336 fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 337 { 338 struct sub_device *sdev; 339 uint8_t i; 340 int ret; 341 342 fs_lock(dev, 0); 343 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 344 uint16_t port_id = ETH(sdev)->data->port_id; 345 346 ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id); 347 ret = fs_err(sdev, ret); 348 if (ret) { 349 ERROR("Tx queue start failed for subdevice %d", i); 350 fs_tx_queue_stop(dev, tx_queue_id); 351 fs_unlock(dev, 0); 352 return ret; 353 } 354 } 355 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 356 fs_unlock(dev, 0); 357 return 0; 358 } 359 360 static void 361 fs_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 362 { 363 struct sub_device *sdev; 364 uint8_t i; 365 struct rxq *rxq = dev->data->rx_queues[qid]; 366 367 if (rxq == NULL) 368 return; 369 fs_lock(dev, 0); 370 if (rxq->event_fd >= 0) 371 close(rxq->event_fd); 372 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 373 if (ETH(sdev)->data->rx_queues != NULL && 374 ETH(sdev)->data->rx_queues[rxq->qid] != NULL) 375 SUBOPS(sdev, rx_queue_release)(ETH(sdev), rxq->qid); 376 } 377 dev->data->rx_queues[rxq->qid] = NULL; 378 rte_free(rxq); 379 fs_unlock(dev, 0); 380 } 381 382 static int 383 fs_rx_queue_setup(struct rte_eth_dev *dev, 384 uint16_t rx_queue_id, 385 uint16_t nb_rx_desc, 386 unsigned int socket_id, 387 const struct rte_eth_rxconf *rx_conf, 388 struct rte_mempool *mb_pool) 389 { 390 /* 391 * FIXME: Add a proper interface in rte_eal_interrupts for 392 * allocating eventfd as an interrupt vector. 393 * For the time being, fake as if we are using MSIX interrupts, 394 * this will cause rte_intr_efd_enable to allocate an eventfd for us. 395 */ 396 struct rte_intr_handle intr_handle = { 397 .type = RTE_INTR_HANDLE_VFIO_MSIX, 398 .efds = { -1, }, 399 }; 400 struct sub_device *sdev; 401 struct rxq *rxq; 402 uint8_t i; 403 int ret; 404 405 fs_lock(dev, 0); 406 if (rx_conf->rx_deferred_start) { 407 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 408 if (SUBOPS(sdev, rx_queue_start) == NULL) { 409 ERROR("Rx queue deferred start is not " 410 "supported for subdevice %d", i); 411 fs_unlock(dev, 0); 412 return -EINVAL; 413 } 414 } 415 } 416 rxq = dev->data->rx_queues[rx_queue_id]; 417 if (rxq != NULL) { 418 fs_rx_queue_release(dev, rx_queue_id); 419 dev->data->rx_queues[rx_queue_id] = NULL; 420 } 421 rxq = rte_zmalloc(NULL, 422 sizeof(*rxq) + 423 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 424 RTE_CACHE_LINE_SIZE); 425 if (rxq == NULL) { 426 fs_unlock(dev, 0); 427 return -ENOMEM; 428 } 429 FOREACH_SUBDEV(sdev, i, dev) 430 rte_atomic64_init(&rxq->refcnt[i]); 431 rxq->qid = rx_queue_id; 432 rxq->socket_id = socket_id; 433 rxq->info.mp = mb_pool; 434 rxq->info.conf = *rx_conf; 435 rxq->info.nb_desc = nb_rx_desc; 436 rxq->priv = PRIV(dev); 437 rxq->sdev = PRIV(dev)->subs; 438 ret = rte_intr_efd_enable(&intr_handle, 1); 439 if (ret < 0) { 440 fs_unlock(dev, 0); 441 return ret; 442 } 443 rxq->event_fd = intr_handle.efds[0]; 444 dev->data->rx_queues[rx_queue_id] = rxq; 445 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 446 ret = rte_eth_rx_queue_setup(PORT_ID(sdev), 447 rx_queue_id, 448 nb_rx_desc, socket_id, 449 rx_conf, mb_pool); 450 if ((ret = fs_err(sdev, ret))) { 451 ERROR("RX queue setup failed for sub_device %d", i); 452 goto free_rxq; 453 } 454 } 455 fs_unlock(dev, 0); 456 return 0; 457 free_rxq: 458 fs_rx_queue_release(dev, rx_queue_id); 459 fs_unlock(dev, 0); 460 return ret; 461 } 462 463 static int 464 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx) 465 { 466 struct rxq *rxq; 467 struct sub_device *sdev; 468 uint8_t i; 469 int ret; 470 int rc = 0; 471 472 fs_lock(dev, 0); 473 if (idx >= dev->data->nb_rx_queues) { 474 rc = -EINVAL; 475 goto unlock; 476 } 477 rxq = dev->data->rx_queues[idx]; 478 if (rxq == NULL || rxq->event_fd <= 0) { 479 rc = -EINVAL; 480 goto unlock; 481 } 482 /* Fail if proxy service is nor running. */ 483 if (PRIV(dev)->rxp.sstate != SS_RUNNING) { 484 ERROR("failsafe interrupt services are not running"); 485 rc = -EAGAIN; 486 goto unlock; 487 } 488 rxq->enable_events = 1; 489 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 490 ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx); 491 ret = fs_err(sdev, ret); 492 if (ret) 493 rc = ret; 494 } 495 unlock: 496 fs_unlock(dev, 0); 497 if (rc) 498 rte_errno = -rc; 499 return rc; 500 } 501 502 static int 503 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx) 504 { 505 struct rxq *rxq; 506 struct sub_device *sdev; 507 uint64_t u64; 508 uint8_t i; 509 int rc = 0; 510 int ret; 511 512 fs_lock(dev, 0); 513 if (idx >= dev->data->nb_rx_queues) { 514 rc = -EINVAL; 515 goto unlock; 516 } 517 rxq = dev->data->rx_queues[idx]; 518 if (rxq == NULL || rxq->event_fd <= 0) { 519 rc = -EINVAL; 520 goto unlock; 521 } 522 rxq->enable_events = 0; 523 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 524 ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx); 525 ret = fs_err(sdev, ret); 526 if (ret) 527 rc = ret; 528 } 529 /* Clear pending events */ 530 while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0) 531 ; 532 unlock: 533 fs_unlock(dev, 0); 534 if (rc) 535 rte_errno = -rc; 536 return rc; 537 } 538 539 static void 540 fs_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 541 { 542 struct sub_device *sdev; 543 uint8_t i; 544 struct txq *txq = dev->data->tx_queues[qid]; 545 546 if (txq == NULL) 547 return; 548 fs_lock(dev, 0); 549 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 550 if (ETH(sdev)->data->tx_queues != NULL && 551 ETH(sdev)->data->tx_queues[txq->qid] != NULL) 552 SUBOPS(sdev, tx_queue_release)(ETH(sdev), txq->qid); 553 } 554 dev->data->tx_queues[txq->qid] = NULL; 555 rte_free(txq); 556 fs_unlock(dev, 0); 557 } 558 559 static int 560 fs_tx_queue_setup(struct rte_eth_dev *dev, 561 uint16_t tx_queue_id, 562 uint16_t nb_tx_desc, 563 unsigned int socket_id, 564 const struct rte_eth_txconf *tx_conf) 565 { 566 struct sub_device *sdev; 567 struct txq *txq; 568 uint8_t i; 569 int ret; 570 571 fs_lock(dev, 0); 572 if (tx_conf->tx_deferred_start) { 573 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 574 if (SUBOPS(sdev, tx_queue_start) == NULL) { 575 ERROR("Tx queue deferred start is not " 576 "supported for subdevice %d", i); 577 fs_unlock(dev, 0); 578 return -EINVAL; 579 } 580 } 581 } 582 txq = dev->data->tx_queues[tx_queue_id]; 583 if (txq != NULL) { 584 fs_tx_queue_release(dev, tx_queue_id); 585 dev->data->tx_queues[tx_queue_id] = NULL; 586 } 587 txq = rte_zmalloc("ethdev TX queue", 588 sizeof(*txq) + 589 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 590 RTE_CACHE_LINE_SIZE); 591 if (txq == NULL) { 592 fs_unlock(dev, 0); 593 return -ENOMEM; 594 } 595 FOREACH_SUBDEV(sdev, i, dev) 596 rte_atomic64_init(&txq->refcnt[i]); 597 txq->qid = tx_queue_id; 598 txq->socket_id = socket_id; 599 txq->info.conf = *tx_conf; 600 txq->info.nb_desc = nb_tx_desc; 601 txq->priv = PRIV(dev); 602 dev->data->tx_queues[tx_queue_id] = txq; 603 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 604 ret = rte_eth_tx_queue_setup(PORT_ID(sdev), 605 tx_queue_id, 606 nb_tx_desc, socket_id, 607 tx_conf); 608 if ((ret = fs_err(sdev, ret))) { 609 ERROR("TX queue setup failed for sub_device %d", i); 610 goto free_txq; 611 } 612 } 613 fs_unlock(dev, 0); 614 return 0; 615 free_txq: 616 fs_tx_queue_release(dev, tx_queue_id); 617 fs_unlock(dev, 0); 618 return ret; 619 } 620 621 static void 622 fs_dev_free_queues(struct rte_eth_dev *dev) 623 { 624 uint16_t i; 625 626 for (i = 0; i < dev->data->nb_rx_queues; i++) { 627 fs_rx_queue_release(dev, i); 628 dev->data->rx_queues[i] = NULL; 629 } 630 dev->data->nb_rx_queues = 0; 631 for (i = 0; i < dev->data->nb_tx_queues; i++) { 632 fs_tx_queue_release(dev, i); 633 dev->data->tx_queues[i] = NULL; 634 } 635 dev->data->nb_tx_queues = 0; 636 } 637 638 int 639 failsafe_eth_dev_close(struct rte_eth_dev *dev) 640 { 641 struct sub_device *sdev; 642 uint8_t i; 643 int err, ret = 0; 644 645 fs_lock(dev, 0); 646 failsafe_hotplug_alarm_cancel(dev); 647 if (PRIV(dev)->state == DEV_STARTED) { 648 ret = dev->dev_ops->dev_stop(dev); 649 if (ret != 0) { 650 fs_unlock(dev, 0); 651 return ret; 652 } 653 } 654 PRIV(dev)->state = DEV_ACTIVE - 1; 655 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 656 DEBUG("Closing sub_device %d", i); 657 failsafe_eth_dev_unregister_callbacks(sdev); 658 err = rte_eth_dev_close(PORT_ID(sdev)); 659 if (err) { 660 ret = ret ? ret : err; 661 ERROR("Error while closing sub-device %u", 662 PORT_ID(sdev)); 663 } 664 sdev->state = DEV_ACTIVE - 1; 665 } 666 rte_eth_dev_callback_unregister(RTE_ETH_ALL, RTE_ETH_EVENT_NEW, 667 failsafe_eth_new_event_callback, dev); 668 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 669 fs_unlock(dev, 0); 670 return ret; 671 } 672 fs_dev_free_queues(dev); 673 err = failsafe_eal_uninit(dev); 674 if (err) { 675 ret = ret ? ret : err; 676 ERROR("Error while uninitializing sub-EAL"); 677 } 678 failsafe_args_free(dev); 679 rte_free(PRIV(dev)->subs); 680 rte_free(PRIV(dev)->mcast_addrs); 681 /* mac_addrs must not be freed alone because part of dev_private */ 682 dev->data->mac_addrs = NULL; 683 fs_unlock(dev, 0); 684 err = pthread_mutex_destroy(&PRIV(dev)->hotplug_mutex); 685 if (err) { 686 ret = ret ? ret : err; 687 ERROR("Error while destroying hotplug mutex"); 688 } 689 return ret; 690 } 691 692 static int 693 fs_promiscuous_enable(struct rte_eth_dev *dev) 694 { 695 struct sub_device *sdev; 696 uint8_t i; 697 int ret = 0; 698 699 fs_lock(dev, 0); 700 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 701 ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); 702 ret = fs_err(sdev, ret); 703 if (ret != 0) { 704 ERROR("Promiscuous mode enable failed for subdevice %d", 705 PORT_ID(sdev)); 706 break; 707 } 708 } 709 if (ret != 0) { 710 /* Rollback in the case of failure */ 711 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 712 ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); 713 ret = fs_err(sdev, ret); 714 if (ret != 0) 715 ERROR("Promiscuous mode disable during rollback failed for subdevice %d", 716 PORT_ID(sdev)); 717 } 718 } 719 fs_unlock(dev, 0); 720 721 return ret; 722 } 723 724 static int 725 fs_promiscuous_disable(struct rte_eth_dev *dev) 726 { 727 struct sub_device *sdev; 728 uint8_t i; 729 int ret = 0; 730 731 fs_lock(dev, 0); 732 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 733 ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); 734 ret = fs_err(sdev, ret); 735 if (ret != 0) { 736 ERROR("Promiscuous mode disable failed for subdevice %d", 737 PORT_ID(sdev)); 738 break; 739 } 740 } 741 if (ret != 0) { 742 /* Rollback in the case of failure */ 743 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 744 ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); 745 ret = fs_err(sdev, ret); 746 if (ret != 0) 747 ERROR("Promiscuous mode enable during rollback failed for subdevice %d", 748 PORT_ID(sdev)); 749 } 750 } 751 fs_unlock(dev, 0); 752 753 return ret; 754 } 755 756 static int 757 fs_allmulticast_enable(struct rte_eth_dev *dev) 758 { 759 struct sub_device *sdev; 760 uint8_t i; 761 int ret = 0; 762 763 fs_lock(dev, 0); 764 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 765 ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); 766 ret = fs_err(sdev, ret); 767 if (ret != 0) { 768 ERROR("All-multicast mode enable failed for subdevice %d", 769 PORT_ID(sdev)); 770 break; 771 } 772 } 773 if (ret != 0) { 774 /* Rollback in the case of failure */ 775 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 776 ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); 777 ret = fs_err(sdev, ret); 778 if (ret != 0) 779 ERROR("All-multicast mode disable during rollback failed for subdevice %d", 780 PORT_ID(sdev)); 781 } 782 } 783 fs_unlock(dev, 0); 784 785 return ret; 786 } 787 788 static int 789 fs_allmulticast_disable(struct rte_eth_dev *dev) 790 { 791 struct sub_device *sdev; 792 uint8_t i; 793 int ret = 0; 794 795 fs_lock(dev, 0); 796 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 797 ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); 798 ret = fs_err(sdev, ret); 799 if (ret != 0) { 800 ERROR("All-multicast mode disable failed for subdevice %d", 801 PORT_ID(sdev)); 802 break; 803 } 804 } 805 if (ret != 0) { 806 /* Rollback in the case of failure */ 807 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 808 ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); 809 ret = fs_err(sdev, ret); 810 if (ret != 0) 811 ERROR("All-multicast mode enable during rollback failed for subdevice %d", 812 PORT_ID(sdev)); 813 } 814 } 815 fs_unlock(dev, 0); 816 817 return ret; 818 } 819 820 static int 821 fs_link_update(struct rte_eth_dev *dev, 822 int wait_to_complete) 823 { 824 struct sub_device *sdev; 825 uint8_t i; 826 int ret; 827 828 fs_lock(dev, 0); 829 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 830 DEBUG("Calling link_update on sub_device %d", i); 831 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete); 832 if (ret && ret != -1 && sdev->remove == 0 && 833 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) { 834 ERROR("Link update failed for sub_device %d with error %d", 835 i, ret); 836 fs_unlock(dev, 0); 837 return ret; 838 } 839 } 840 if (TX_SUBDEV(dev)) { 841 struct rte_eth_link *l1; 842 struct rte_eth_link *l2; 843 844 l1 = &dev->data->dev_link; 845 l2 = Ð(TX_SUBDEV(dev))->data->dev_link; 846 if (memcmp(l1, l2, sizeof(*l1))) { 847 *l1 = *l2; 848 fs_unlock(dev, 0); 849 return 0; 850 } 851 } 852 fs_unlock(dev, 0); 853 return -1; 854 } 855 856 static int 857 fs_stats_get(struct rte_eth_dev *dev, 858 struct rte_eth_stats *stats) 859 { 860 struct rte_eth_stats backup; 861 struct sub_device *sdev; 862 uint8_t i; 863 int ret; 864 865 fs_lock(dev, 0); 866 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats)); 867 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 868 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats; 869 uint64_t *timestamp = &sdev->stats_snapshot.timestamp; 870 871 rte_memcpy(&backup, snapshot, sizeof(backup)); 872 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot); 873 if (ret) { 874 if (!fs_err(sdev, ret)) { 875 rte_memcpy(snapshot, &backup, sizeof(backup)); 876 goto inc; 877 } 878 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d", 879 i, ret); 880 *timestamp = 0; 881 fs_unlock(dev, 0); 882 return ret; 883 } 884 *timestamp = rte_rdtsc(); 885 inc: 886 failsafe_stats_increment(stats, snapshot); 887 } 888 fs_unlock(dev, 0); 889 return 0; 890 } 891 892 static int 893 fs_stats_reset(struct rte_eth_dev *dev) 894 { 895 struct sub_device *sdev; 896 uint8_t i; 897 int ret; 898 899 fs_lock(dev, 0); 900 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 901 ret = rte_eth_stats_reset(PORT_ID(sdev)); 902 if (ret) { 903 if (!fs_err(sdev, ret)) 904 continue; 905 906 ERROR("Operation rte_eth_stats_reset failed for sub_device %d with error %d", 907 i, ret); 908 fs_unlock(dev, 0); 909 return ret; 910 } 911 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats)); 912 } 913 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats)); 914 fs_unlock(dev, 0); 915 916 return 0; 917 } 918 919 static int 920 __fs_xstats_count(struct rte_eth_dev *dev) 921 { 922 struct sub_device *sdev; 923 int count = 0; 924 uint8_t i; 925 int ret; 926 927 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 928 ret = rte_eth_xstats_get_names(PORT_ID(sdev), NULL, 0); 929 if (ret < 0) 930 return ret; 931 count += ret; 932 } 933 934 return count; 935 } 936 937 static int 938 __fs_xstats_get_names(struct rte_eth_dev *dev, 939 struct rte_eth_xstat_name *xstats_names, 940 unsigned int limit) 941 { 942 struct sub_device *sdev; 943 unsigned int count = 0; 944 uint8_t i; 945 946 /* Caller only cares about count */ 947 if (!xstats_names) 948 return __fs_xstats_count(dev); 949 950 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 951 struct rte_eth_xstat_name *sub_names = xstats_names + count; 952 int j, r; 953 954 if (count >= limit) 955 break; 956 957 r = rte_eth_xstats_get_names(PORT_ID(sdev), 958 sub_names, limit - count); 959 if (r < 0) 960 return r; 961 962 /* add subN_ prefix to names */ 963 for (j = 0; j < r; j++) { 964 char *xname = sub_names[j].name; 965 char tmp[RTE_ETH_XSTATS_NAME_SIZE]; 966 967 if ((xname[0] == 't' || xname[0] == 'r') && 968 xname[1] == 'x' && xname[2] == '_') 969 snprintf(tmp, sizeof(tmp), "%.3ssub%u_%s", 970 xname, i, xname + 3); 971 else 972 snprintf(tmp, sizeof(tmp), "sub%u_%s", 973 i, xname); 974 975 strlcpy(xname, tmp, RTE_ETH_XSTATS_NAME_SIZE); 976 } 977 count += r; 978 } 979 return count; 980 } 981 982 static int 983 fs_xstats_get_names(struct rte_eth_dev *dev, 984 struct rte_eth_xstat_name *xstats_names, 985 unsigned int limit) 986 { 987 int ret; 988 989 fs_lock(dev, 0); 990 ret = __fs_xstats_get_names(dev, xstats_names, limit); 991 fs_unlock(dev, 0); 992 return ret; 993 } 994 995 static int 996 __fs_xstats_get(struct rte_eth_dev *dev, 997 struct rte_eth_xstat *xstats, 998 unsigned int n) 999 { 1000 unsigned int count = 0; 1001 struct sub_device *sdev; 1002 uint8_t i; 1003 int j, ret; 1004 1005 ret = __fs_xstats_count(dev); 1006 /* 1007 * if error 1008 * or caller did not give enough space 1009 * or just querying 1010 */ 1011 if (ret < 0 || ret > (int)n || xstats == NULL) 1012 return ret; 1013 1014 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1015 ret = rte_eth_xstats_get(PORT_ID(sdev), xstats, n); 1016 if (ret < 0) 1017 return ret; 1018 1019 if (ret > (int)n) 1020 return n + count; 1021 1022 /* add offset to id's from sub-device */ 1023 for (j = 0; j < ret; j++) 1024 xstats[j].id += count; 1025 1026 xstats += ret; 1027 n -= ret; 1028 count += ret; 1029 } 1030 1031 return count; 1032 } 1033 1034 static int 1035 fs_xstats_get(struct rte_eth_dev *dev, 1036 struct rte_eth_xstat *xstats, 1037 unsigned int n) 1038 { 1039 int ret; 1040 1041 fs_lock(dev, 0); 1042 ret = __fs_xstats_get(dev, xstats, n); 1043 fs_unlock(dev, 0); 1044 1045 return ret; 1046 } 1047 1048 1049 static int 1050 fs_xstats_reset(struct rte_eth_dev *dev) 1051 { 1052 struct sub_device *sdev; 1053 uint8_t i; 1054 int r = 0; 1055 1056 fs_lock(dev, 0); 1057 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1058 r = rte_eth_xstats_reset(PORT_ID(sdev)); 1059 if (r < 0) 1060 break; 1061 } 1062 fs_unlock(dev, 0); 1063 1064 return r; 1065 } 1066 1067 static void 1068 fs_dev_merge_desc_lim(struct rte_eth_desc_lim *to, 1069 const struct rte_eth_desc_lim *from) 1070 { 1071 to->nb_max = RTE_MIN(to->nb_max, from->nb_max); 1072 to->nb_min = RTE_MAX(to->nb_min, from->nb_min); 1073 to->nb_align = RTE_MAX(to->nb_align, from->nb_align); 1074 1075 to->nb_seg_max = RTE_MIN(to->nb_seg_max, from->nb_seg_max); 1076 to->nb_mtu_seg_max = RTE_MIN(to->nb_mtu_seg_max, from->nb_mtu_seg_max); 1077 } 1078 1079 /* 1080 * Merge the information from sub-devices. 1081 * 1082 * The reported values must be the common subset of all sub devices 1083 */ 1084 static void 1085 fs_dev_merge_info(struct rte_eth_dev_info *info, 1086 const struct rte_eth_dev_info *sinfo) 1087 { 1088 info->min_mtu = RTE_MAX(info->min_mtu, sinfo->min_mtu); 1089 info->max_mtu = RTE_MIN(info->max_mtu, sinfo->max_mtu); 1090 info->max_rx_pktlen = RTE_MIN(info->max_rx_pktlen, sinfo->max_rx_pktlen); 1091 info->max_rx_queues = RTE_MIN(info->max_rx_queues, sinfo->max_rx_queues); 1092 info->max_tx_queues = RTE_MIN(info->max_tx_queues, sinfo->max_tx_queues); 1093 info->max_mac_addrs = RTE_MIN(info->max_mac_addrs, sinfo->max_mac_addrs); 1094 info->max_hash_mac_addrs = RTE_MIN(info->max_hash_mac_addrs, 1095 sinfo->max_hash_mac_addrs); 1096 info->max_vmdq_pools = RTE_MIN(info->max_vmdq_pools, sinfo->max_vmdq_pools); 1097 info->max_vfs = RTE_MIN(info->max_vfs, sinfo->max_vfs); 1098 1099 fs_dev_merge_desc_lim(&info->rx_desc_lim, &sinfo->rx_desc_lim); 1100 fs_dev_merge_desc_lim(&info->tx_desc_lim, &sinfo->tx_desc_lim); 1101 1102 info->rx_offload_capa &= sinfo->rx_offload_capa; 1103 info->tx_offload_capa &= sinfo->tx_offload_capa; 1104 info->rx_queue_offload_capa &= sinfo->rx_queue_offload_capa; 1105 info->tx_queue_offload_capa &= sinfo->tx_queue_offload_capa; 1106 info->flow_type_rss_offloads &= sinfo->flow_type_rss_offloads; 1107 1108 /* 1109 * RETA size is a GCD of RETA sizes indicated by sub-devices. 1110 * Each of these sizes is a power of 2, so use the lower one. 1111 */ 1112 info->reta_size = RTE_MIN(info->reta_size, sinfo->reta_size); 1113 1114 info->hash_key_size = RTE_MIN(info->hash_key_size, 1115 sinfo->hash_key_size); 1116 } 1117 1118 /** 1119 * Fail-safe dev_infos_get rules: 1120 * 1121 * No sub_device: 1122 * Numerables: 1123 * Use the maximum possible values for any field, so as not 1124 * to impede any further configuration effort. 1125 * Capabilities: 1126 * Limits capabilities to those that are understood by the 1127 * fail-safe PMD. This understanding stems from the fail-safe 1128 * being capable of verifying that the related capability is 1129 * expressed within the device configuration (struct rte_eth_conf). 1130 * 1131 * At least one probed sub_device: 1132 * Numerables: 1133 * Uses values from the active probed sub_device 1134 * The rationale here is that if any sub_device is less capable 1135 * (for example concerning the number of queues) than the active 1136 * sub_device, then its subsequent configuration will fail. 1137 * It is impossible to foresee this failure when the failing sub_device 1138 * is supposed to be plugged-in later on, so the configuration process 1139 * is the single point of failure and error reporting. 1140 * Capabilities: 1141 * Uses a logical AND of RX capabilities among 1142 * all sub_devices and the default capabilities. 1143 * Uses a logical AND of TX capabilities among 1144 * the active probed sub_device and the default capabilities. 1145 * Uses a logical AND of device capabilities among 1146 * all sub_devices and the default capabilities. 1147 * 1148 */ 1149 static int 1150 fs_dev_infos_get(struct rte_eth_dev *dev, 1151 struct rte_eth_dev_info *infos) 1152 { 1153 struct sub_device *sdev; 1154 uint8_t i; 1155 int ret; 1156 1157 /* Use maximum upper bounds by default */ 1158 infos->min_mtu = RTE_ETHER_MIN_MTU; 1159 infos->max_mtu = UINT16_MAX; 1160 infos->max_rx_pktlen = UINT32_MAX; 1161 infos->max_rx_queues = RTE_MAX_QUEUES_PER_PORT; 1162 infos->max_tx_queues = RTE_MAX_QUEUES_PER_PORT; 1163 infos->max_mac_addrs = FAILSAFE_MAX_ETHADDR; 1164 infos->max_hash_mac_addrs = UINT32_MAX; 1165 infos->max_vfs = UINT16_MAX; 1166 infos->max_vmdq_pools = UINT16_MAX; 1167 infos->reta_size = UINT16_MAX; 1168 infos->hash_key_size = UINT8_MAX; 1169 1170 /* 1171 * Set of capabilities that can be verified upon 1172 * configuring a sub-device. 1173 */ 1174 infos->rx_offload_capa = 1175 RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 1176 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 1177 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 1178 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 1179 RTE_ETH_RX_OFFLOAD_TCP_LRO | 1180 RTE_ETH_RX_OFFLOAD_QINQ_STRIP | 1181 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1182 RTE_ETH_RX_OFFLOAD_MACSEC_STRIP | 1183 RTE_ETH_RX_OFFLOAD_HEADER_SPLIT | 1184 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 1185 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | 1186 RTE_ETH_RX_OFFLOAD_SCATTER | 1187 RTE_ETH_RX_OFFLOAD_TIMESTAMP | 1188 RTE_ETH_RX_OFFLOAD_SECURITY | 1189 RTE_ETH_RX_OFFLOAD_RSS_HASH; 1190 1191 infos->rx_queue_offload_capa = 1192 RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 1193 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 1194 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 1195 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 1196 RTE_ETH_RX_OFFLOAD_TCP_LRO | 1197 RTE_ETH_RX_OFFLOAD_QINQ_STRIP | 1198 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1199 RTE_ETH_RX_OFFLOAD_MACSEC_STRIP | 1200 RTE_ETH_RX_OFFLOAD_HEADER_SPLIT | 1201 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 1202 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | 1203 RTE_ETH_RX_OFFLOAD_SCATTER | 1204 RTE_ETH_RX_OFFLOAD_TIMESTAMP | 1205 RTE_ETH_RX_OFFLOAD_SECURITY | 1206 RTE_ETH_RX_OFFLOAD_RSS_HASH; 1207 1208 infos->tx_offload_capa = 1209 RTE_ETH_TX_OFFLOAD_MULTI_SEGS | 1210 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | 1211 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | 1212 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | 1213 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | 1214 RTE_ETH_TX_OFFLOAD_TCP_TSO; 1215 1216 infos->flow_type_rss_offloads = 1217 RTE_ETH_RSS_IP | 1218 RTE_ETH_RSS_UDP | 1219 RTE_ETH_RSS_TCP; 1220 infos->dev_capa = 1221 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 1222 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 1223 1224 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 1225 struct rte_eth_dev_info sub_info; 1226 1227 ret = rte_eth_dev_info_get(PORT_ID(sdev), &sub_info); 1228 ret = fs_err(sdev, ret); 1229 if (ret != 0) 1230 return ret; 1231 1232 fs_dev_merge_info(infos, &sub_info); 1233 } 1234 1235 return 0; 1236 } 1237 1238 static const uint32_t * 1239 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev) 1240 { 1241 struct sub_device *sdev; 1242 struct rte_eth_dev *edev; 1243 const uint32_t *ret; 1244 1245 fs_lock(dev, 0); 1246 sdev = TX_SUBDEV(dev); 1247 if (sdev == NULL) { 1248 ret = NULL; 1249 goto unlock; 1250 } 1251 edev = ETH(sdev); 1252 /* ENOTSUP: counts as no supported ptypes */ 1253 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) { 1254 ret = NULL; 1255 goto unlock; 1256 } 1257 /* 1258 * The API does not permit to do a clean AND of all ptypes, 1259 * It is also incomplete by design and we do not really care 1260 * to have a best possible value in this context. 1261 * We just return the ptypes of the device of highest 1262 * priority, usually the PREFERRED device. 1263 */ 1264 ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev); 1265 unlock: 1266 fs_unlock(dev, 0); 1267 return ret; 1268 } 1269 1270 static int 1271 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1272 { 1273 struct sub_device *sdev; 1274 uint8_t i; 1275 int ret; 1276 1277 fs_lock(dev, 0); 1278 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1279 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i); 1280 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu); 1281 if ((ret = fs_err(sdev, ret))) { 1282 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d", 1283 i, ret); 1284 fs_unlock(dev, 0); 1285 return ret; 1286 } 1287 } 1288 fs_unlock(dev, 0); 1289 return 0; 1290 } 1291 1292 static int 1293 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1294 { 1295 struct sub_device *sdev; 1296 uint8_t i; 1297 int ret; 1298 1299 fs_lock(dev, 0); 1300 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1301 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i); 1302 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on); 1303 if ((ret = fs_err(sdev, ret))) { 1304 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d" 1305 " with error %d", i, ret); 1306 fs_unlock(dev, 0); 1307 return ret; 1308 } 1309 } 1310 fs_unlock(dev, 0); 1311 return 0; 1312 } 1313 1314 static int 1315 fs_flow_ctrl_get(struct rte_eth_dev *dev, 1316 struct rte_eth_fc_conf *fc_conf) 1317 { 1318 struct sub_device *sdev; 1319 int ret; 1320 1321 fs_lock(dev, 0); 1322 sdev = TX_SUBDEV(dev); 1323 if (sdev == NULL) { 1324 ret = 0; 1325 goto unlock; 1326 } 1327 if (SUBOPS(sdev, flow_ctrl_get) == NULL) { 1328 ret = -ENOTSUP; 1329 goto unlock; 1330 } 1331 ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf); 1332 unlock: 1333 fs_unlock(dev, 0); 1334 return ret; 1335 } 1336 1337 static int 1338 fs_flow_ctrl_set(struct rte_eth_dev *dev, 1339 struct rte_eth_fc_conf *fc_conf) 1340 { 1341 struct sub_device *sdev; 1342 uint8_t i; 1343 int ret; 1344 1345 fs_lock(dev, 0); 1346 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1347 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i); 1348 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf); 1349 if ((ret = fs_err(sdev, ret))) { 1350 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d" 1351 " with error %d", i, ret); 1352 fs_unlock(dev, 0); 1353 return ret; 1354 } 1355 } 1356 fs_unlock(dev, 0); 1357 return 0; 1358 } 1359 1360 static void 1361 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 1362 { 1363 struct sub_device *sdev; 1364 uint8_t i; 1365 1366 fs_lock(dev, 0); 1367 /* No check: already done within the rte_eth_dev_mac_addr_remove 1368 * call for the fail-safe device. 1369 */ 1370 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 1371 rte_eth_dev_mac_addr_remove(PORT_ID(sdev), 1372 &dev->data->mac_addrs[index]); 1373 PRIV(dev)->mac_addr_pool[index] = 0; 1374 fs_unlock(dev, 0); 1375 } 1376 1377 static int 1378 fs_mac_addr_add(struct rte_eth_dev *dev, 1379 struct rte_ether_addr *mac_addr, 1380 uint32_t index, 1381 uint32_t vmdq) 1382 { 1383 struct sub_device *sdev; 1384 int ret; 1385 uint8_t i; 1386 1387 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR); 1388 fs_lock(dev, 0); 1389 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1390 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq); 1391 if ((ret = fs_err(sdev, ret))) { 1392 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %" 1393 PRIu8 " with error %d", i, ret); 1394 fs_unlock(dev, 0); 1395 return ret; 1396 } 1397 } 1398 if (index >= PRIV(dev)->nb_mac_addr) { 1399 DEBUG("Growing mac_addrs array"); 1400 PRIV(dev)->nb_mac_addr = index; 1401 } 1402 PRIV(dev)->mac_addr_pool[index] = vmdq; 1403 fs_unlock(dev, 0); 1404 return 0; 1405 } 1406 1407 static int 1408 fs_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 1409 { 1410 struct sub_device *sdev; 1411 uint8_t i; 1412 int ret; 1413 1414 fs_lock(dev, 0); 1415 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1416 ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr); 1417 ret = fs_err(sdev, ret); 1418 if (ret) { 1419 ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d", 1420 i, ret); 1421 fs_unlock(dev, 0); 1422 return ret; 1423 } 1424 } 1425 fs_unlock(dev, 0); 1426 1427 return 0; 1428 } 1429 1430 static int 1431 fs_set_mc_addr_list(struct rte_eth_dev *dev, 1432 struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) 1433 { 1434 struct sub_device *sdev; 1435 uint8_t i; 1436 int ret; 1437 void *mcast_addrs; 1438 1439 fs_lock(dev, 0); 1440 1441 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1442 ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1443 mc_addr_set, nb_mc_addr); 1444 if (ret != 0) { 1445 ERROR("Operation rte_eth_dev_set_mc_addr_list failed for sub_device %d with error %d", 1446 i, ret); 1447 goto rollback; 1448 } 1449 } 1450 1451 mcast_addrs = rte_realloc(PRIV(dev)->mcast_addrs, 1452 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]), 0); 1453 if (mcast_addrs == NULL && nb_mc_addr > 0) { 1454 ret = -ENOMEM; 1455 goto rollback; 1456 } 1457 rte_memcpy(mcast_addrs, mc_addr_set, 1458 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0])); 1459 PRIV(dev)->nb_mcast_addr = nb_mc_addr; 1460 PRIV(dev)->mcast_addrs = mcast_addrs; 1461 1462 fs_unlock(dev, 0); 1463 return 0; 1464 1465 rollback: 1466 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1467 int rc = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1468 PRIV(dev)->mcast_addrs, PRIV(dev)->nb_mcast_addr); 1469 if (rc != 0) { 1470 ERROR("Multicast MAC address list rollback for sub_device %d failed with error %d", 1471 i, rc); 1472 } 1473 } 1474 1475 fs_unlock(dev, 0); 1476 return ret; 1477 } 1478 1479 static int 1480 fs_rss_hash_update(struct rte_eth_dev *dev, 1481 struct rte_eth_rss_conf *rss_conf) 1482 { 1483 struct sub_device *sdev; 1484 uint8_t i; 1485 int ret; 1486 1487 fs_lock(dev, 0); 1488 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1489 ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf); 1490 ret = fs_err(sdev, ret); 1491 if (ret) { 1492 ERROR("Operation rte_eth_dev_rss_hash_update" 1493 " failed for sub_device %d with error %d", 1494 i, ret); 1495 fs_unlock(dev, 0); 1496 return ret; 1497 } 1498 } 1499 fs_unlock(dev, 0); 1500 1501 return 0; 1502 } 1503 1504 static int 1505 fs_flow_ops_get(struct rte_eth_dev *dev __rte_unused, 1506 const struct rte_flow_ops **ops) 1507 { 1508 *ops = &fs_flow_ops; 1509 return 0; 1510 } 1511 1512 const struct eth_dev_ops failsafe_ops = { 1513 .dev_configure = fs_dev_configure, 1514 .dev_start = fs_dev_start, 1515 .dev_stop = fs_dev_stop, 1516 .dev_set_link_down = fs_dev_set_link_down, 1517 .dev_set_link_up = fs_dev_set_link_up, 1518 .dev_close = failsafe_eth_dev_close, 1519 .promiscuous_enable = fs_promiscuous_enable, 1520 .promiscuous_disable = fs_promiscuous_disable, 1521 .allmulticast_enable = fs_allmulticast_enable, 1522 .allmulticast_disable = fs_allmulticast_disable, 1523 .link_update = fs_link_update, 1524 .stats_get = fs_stats_get, 1525 .stats_reset = fs_stats_reset, 1526 .xstats_get = fs_xstats_get, 1527 .xstats_get_names = fs_xstats_get_names, 1528 .xstats_reset = fs_xstats_reset, 1529 .dev_infos_get = fs_dev_infos_get, 1530 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get, 1531 .mtu_set = fs_mtu_set, 1532 .vlan_filter_set = fs_vlan_filter_set, 1533 .rx_queue_start = fs_rx_queue_start, 1534 .rx_queue_stop = fs_rx_queue_stop, 1535 .tx_queue_start = fs_tx_queue_start, 1536 .tx_queue_stop = fs_tx_queue_stop, 1537 .rx_queue_setup = fs_rx_queue_setup, 1538 .tx_queue_setup = fs_tx_queue_setup, 1539 .rx_queue_release = fs_rx_queue_release, 1540 .tx_queue_release = fs_tx_queue_release, 1541 .rx_queue_intr_enable = fs_rx_intr_enable, 1542 .rx_queue_intr_disable = fs_rx_intr_disable, 1543 .flow_ctrl_get = fs_flow_ctrl_get, 1544 .flow_ctrl_set = fs_flow_ctrl_set, 1545 .mac_addr_remove = fs_mac_addr_remove, 1546 .mac_addr_add = fs_mac_addr_add, 1547 .mac_addr_set = fs_mac_addr_set, 1548 .set_mc_addr_list = fs_set_mc_addr_list, 1549 .rss_hash_update = fs_rss_hash_update, 1550 .flow_ops_get = fs_flow_ops_get, 1551 }; 1552