1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017 6WIND S.A. 3 * Copyright 2017 Mellanox Technologies, Ltd 4 */ 5 6 #include <stdbool.h> 7 #include <stdint.h> 8 #include <unistd.h> 9 #ifdef RTE_EXEC_ENV_LINUX 10 #include <sys/eventfd.h> 11 #endif 12 13 #include <rte_debug.h> 14 #include <rte_atomic.h> 15 #include <ethdev_driver.h> 16 #include <rte_malloc.h> 17 #include <rte_flow.h> 18 #include <rte_cycles.h> 19 #include <rte_ethdev.h> 20 #include <rte_string_fns.h> 21 22 #include "failsafe_private.h" 23 24 static int 25 fs_dev_configure(struct rte_eth_dev *dev) 26 { 27 struct sub_device *sdev; 28 uint8_t i; 29 int ret; 30 31 ret = fs_lock(dev, 0); 32 if (ret != 0) 33 return ret; 34 FOREACH_SUBDEV(sdev, i, dev) { 35 int rmv_interrupt = 0; 36 int lsc_interrupt = 0; 37 int lsc_enabled; 38 39 if (sdev->state != DEV_PROBED && 40 !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE)) 41 continue; 42 43 rmv_interrupt = ETH(sdev)->data->dev_flags & 44 RTE_ETH_DEV_INTR_RMV; 45 if (rmv_interrupt) { 46 DEBUG("Enabling RMV interrupts for sub_device %d", i); 47 dev->data->dev_conf.intr_conf.rmv = 1; 48 } else { 49 DEBUG("sub_device %d does not support RMV event", i); 50 } 51 lsc_enabled = dev->data->dev_conf.intr_conf.lsc; 52 lsc_interrupt = lsc_enabled && 53 (ETH(sdev)->data->dev_flags & 54 RTE_ETH_DEV_INTR_LSC); 55 if (lsc_interrupt) { 56 DEBUG("Enabling LSC interrupts for sub_device %d", i); 57 dev->data->dev_conf.intr_conf.lsc = 1; 58 } else if (lsc_enabled && !lsc_interrupt) { 59 DEBUG("Disabling LSC interrupts for sub_device %d", i); 60 dev->data->dev_conf.intr_conf.lsc = 0; 61 } 62 DEBUG("Configuring sub-device %d", i); 63 ret = rte_eth_dev_configure(PORT_ID(sdev), 64 dev->data->nb_rx_queues, 65 dev->data->nb_tx_queues, 66 &dev->data->dev_conf); 67 if (ret) { 68 if (!fs_err(sdev, ret)) 69 continue; 70 ERROR("Could not configure sub_device %d", i); 71 fs_unlock(dev, 0); 72 return ret; 73 } 74 if (rmv_interrupt && sdev->rmv_callback == 0) { 75 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 76 RTE_ETH_EVENT_INTR_RMV, 77 failsafe_eth_rmv_event_callback, 78 sdev); 79 if (ret) 80 WARN("Failed to register RMV callback for sub_device %d", 81 SUB_ID(sdev)); 82 else 83 sdev->rmv_callback = 1; 84 } 85 dev->data->dev_conf.intr_conf.rmv = 0; 86 if (lsc_interrupt && sdev->lsc_callback == 0) { 87 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 88 RTE_ETH_EVENT_INTR_LSC, 89 failsafe_eth_lsc_event_callback, 90 dev); 91 if (ret) 92 WARN("Failed to register LSC callback for sub_device %d", 93 SUB_ID(sdev)); 94 else 95 sdev->lsc_callback = 1; 96 } 97 dev->data->dev_conf.intr_conf.lsc = lsc_enabled; 98 sdev->state = DEV_ACTIVE; 99 } 100 if (PRIV(dev)->state < DEV_ACTIVE) 101 PRIV(dev)->state = DEV_ACTIVE; 102 fs_unlock(dev, 0); 103 return 0; 104 } 105 106 static void 107 fs_set_queues_state_start(struct rte_eth_dev *dev) 108 { 109 struct rxq *rxq; 110 struct txq *txq; 111 uint16_t i; 112 113 for (i = 0; i < dev->data->nb_rx_queues; i++) { 114 __rte_assume(i < RTE_MAX_QUEUES_PER_PORT); 115 rxq = dev->data->rx_queues[i]; 116 if (rxq != NULL && !rxq->info.conf.rx_deferred_start) 117 dev->data->rx_queue_state[i] = 118 RTE_ETH_QUEUE_STATE_STARTED; 119 } 120 for (i = 0; i < dev->data->nb_tx_queues; i++) { 121 __rte_assume(i < RTE_MAX_QUEUES_PER_PORT); 122 txq = dev->data->tx_queues[i]; 123 if (txq != NULL && !txq->info.conf.tx_deferred_start) 124 dev->data->tx_queue_state[i] = 125 RTE_ETH_QUEUE_STATE_STARTED; 126 } 127 } 128 129 static int 130 fs_dev_start(struct rte_eth_dev *dev) 131 { 132 struct sub_device *sdev; 133 uint8_t i; 134 int ret; 135 136 ret = fs_lock(dev, 0); 137 if (ret != 0) 138 return ret; 139 ret = failsafe_rx_intr_install(dev); 140 if (ret) { 141 fs_unlock(dev, 0); 142 return ret; 143 } 144 FOREACH_SUBDEV(sdev, i, dev) { 145 if (sdev->state != DEV_ACTIVE) 146 continue; 147 DEBUG("Starting sub_device %d", i); 148 ret = rte_eth_dev_start(PORT_ID(sdev)); 149 if (ret) { 150 if (!fs_err(sdev, ret)) 151 continue; 152 fs_unlock(dev, 0); 153 return ret; 154 } 155 ret = failsafe_rx_intr_install_subdevice(sdev); 156 if (ret) { 157 if (!fs_err(sdev, ret)) 158 continue; 159 if (fs_err(sdev, rte_eth_dev_stop(PORT_ID(sdev))) < 0) 160 ERROR("Failed to stop sub-device %u", 161 SUB_ID(sdev)); 162 fs_unlock(dev, 0); 163 return ret; 164 } 165 sdev->state = DEV_STARTED; 166 } 167 if (PRIV(dev)->state < DEV_STARTED) { 168 PRIV(dev)->state = DEV_STARTED; 169 fs_set_queues_state_start(dev); 170 } 171 fs_switch_dev(dev, NULL); 172 fs_unlock(dev, 0); 173 return 0; 174 } 175 176 static void 177 fs_set_queues_state_stop(struct rte_eth_dev *dev) 178 { 179 uint16_t i; 180 181 for (i = 0; i < dev->data->nb_rx_queues; i++) { 182 __rte_assume(i < RTE_MAX_QUEUES_PER_PORT); 183 if (dev->data->rx_queues[i] != NULL) 184 dev->data->rx_queue_state[i] = 185 RTE_ETH_QUEUE_STATE_STOPPED; 186 } 187 for (i = 0; i < dev->data->nb_tx_queues; i++) { 188 __rte_assume(i < RTE_MAX_QUEUES_PER_PORT); 189 if (dev->data->tx_queues[i] != NULL) 190 dev->data->tx_queue_state[i] = 191 RTE_ETH_QUEUE_STATE_STOPPED; 192 } 193 } 194 195 static int 196 fs_dev_stop(struct rte_eth_dev *dev) 197 { 198 struct sub_device *sdev; 199 uint8_t i; 200 int ret; 201 202 ret = fs_lock(dev, 0); 203 if (ret != 0) 204 return ret; 205 PRIV(dev)->state = DEV_STARTED - 1; 206 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) { 207 ret = rte_eth_dev_stop(PORT_ID(sdev)); 208 if (fs_err(sdev, ret) < 0) { 209 ERROR("Failed to stop device %u", 210 PORT_ID(sdev)); 211 PRIV(dev)->state = DEV_STARTED + 1; 212 fs_unlock(dev, 0); 213 return ret; 214 } 215 failsafe_rx_intr_uninstall_subdevice(sdev); 216 sdev->state = DEV_STARTED - 1; 217 } 218 failsafe_rx_intr_uninstall(dev); 219 fs_set_queues_state_stop(dev); 220 fs_unlock(dev, 0); 221 222 return 0; 223 } 224 225 static int 226 fs_dev_set_link_up(struct rte_eth_dev *dev) 227 { 228 struct sub_device *sdev; 229 uint8_t i; 230 int ret; 231 232 ret = fs_lock(dev, 0); 233 if (ret != 0) 234 return ret; 235 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 236 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i); 237 ret = rte_eth_dev_set_link_up(PORT_ID(sdev)); 238 if ((ret = fs_err(sdev, ret))) { 239 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d" 240 " with error %d", i, ret); 241 fs_unlock(dev, 0); 242 return ret; 243 } 244 } 245 fs_unlock(dev, 0); 246 return 0; 247 } 248 249 static int 250 fs_dev_set_link_down(struct rte_eth_dev *dev) 251 { 252 struct sub_device *sdev; 253 uint8_t i; 254 int ret; 255 256 ret = fs_lock(dev, 0); 257 if (ret != 0) 258 return ret; 259 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 260 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i); 261 ret = rte_eth_dev_set_link_down(PORT_ID(sdev)); 262 if ((ret = fs_err(sdev, ret))) { 263 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d" 264 " with error %d", i, ret); 265 fs_unlock(dev, 0); 266 return ret; 267 } 268 } 269 fs_unlock(dev, 0); 270 return 0; 271 } 272 273 static int 274 fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 275 { 276 struct sub_device *sdev; 277 uint8_t i; 278 int ret; 279 int err = 0; 280 bool failure = true; 281 282 ret = fs_lock(dev, 0); 283 if (ret != 0) 284 return ret; 285 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 286 uint16_t port_id = ETH(sdev)->data->port_id; 287 288 ret = rte_eth_dev_rx_queue_stop(port_id, rx_queue_id); 289 ret = fs_err(sdev, ret); 290 if (ret) { 291 ERROR("Rx queue stop failed for subdevice %d", i); 292 err = ret; 293 } else { 294 failure = false; 295 } 296 } 297 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 298 fs_unlock(dev, 0); 299 /* Return 0 in case of at least one successful queue stop */ 300 return (failure) ? err : 0; 301 } 302 303 static int 304 fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 305 { 306 struct sub_device *sdev; 307 uint8_t i; 308 int ret; 309 310 ret = fs_lock(dev, 0); 311 if (ret != 0) 312 return ret; 313 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 314 uint16_t port_id = ETH(sdev)->data->port_id; 315 316 ret = rte_eth_dev_rx_queue_start(port_id, rx_queue_id); 317 ret = fs_err(sdev, ret); 318 if (ret) { 319 ERROR("Rx queue start failed for subdevice %d", i); 320 fs_rx_queue_stop(dev, rx_queue_id); 321 fs_unlock(dev, 0); 322 return ret; 323 } 324 } 325 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 326 fs_unlock(dev, 0); 327 return 0; 328 } 329 330 static int 331 fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 332 { 333 struct sub_device *sdev; 334 uint8_t i; 335 int ret; 336 int err = 0; 337 bool failure = true; 338 339 ret = fs_lock(dev, 0); 340 if (ret != 0) 341 return ret; 342 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 343 uint16_t port_id = ETH(sdev)->data->port_id; 344 345 ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id); 346 ret = fs_err(sdev, ret); 347 if (ret) { 348 ERROR("Tx queue stop failed for subdevice %d", i); 349 err = ret; 350 } else { 351 failure = false; 352 } 353 } 354 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 355 fs_unlock(dev, 0); 356 /* Return 0 in case of at least one successful queue stop */ 357 return (failure) ? err : 0; 358 } 359 360 static int 361 fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 362 { 363 struct sub_device *sdev; 364 uint8_t i; 365 int ret; 366 367 ret = fs_lock(dev, 0); 368 if (ret != 0) 369 return ret; 370 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 371 uint16_t port_id = ETH(sdev)->data->port_id; 372 373 ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id); 374 ret = fs_err(sdev, ret); 375 if (ret) { 376 ERROR("Tx queue start failed for subdevice %d", i); 377 fs_tx_queue_stop(dev, tx_queue_id); 378 fs_unlock(dev, 0); 379 return ret; 380 } 381 } 382 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 383 fs_unlock(dev, 0); 384 return 0; 385 } 386 387 static void 388 fs_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 389 { 390 struct sub_device *sdev; 391 uint8_t i; 392 struct rxq *rxq = dev->data->rx_queues[qid]; 393 394 if (rxq == NULL) 395 return; 396 if (fs_lock(dev, 0) != 0) 397 return; 398 if (rxq->event_fd >= 0) 399 close(rxq->event_fd); 400 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 401 if (ETH(sdev)->data->rx_queues != NULL && 402 ETH(sdev)->data->rx_queues[rxq->qid] != NULL) 403 SUBOPS(sdev, rx_queue_release)(ETH(sdev), rxq->qid); 404 } 405 dev->data->rx_queues[rxq->qid] = NULL; 406 rte_free(rxq); 407 fs_unlock(dev, 0); 408 } 409 410 static int 411 fs_rx_queue_setup(struct rte_eth_dev *dev, 412 uint16_t rx_queue_id, 413 uint16_t nb_rx_desc, 414 unsigned int socket_id, 415 const struct rte_eth_rxconf *rx_conf, 416 struct rte_mempool *mb_pool) 417 { 418 struct sub_device *sdev; 419 struct rxq *rxq; 420 uint8_t i; 421 int ret; 422 423 ret = fs_lock(dev, 0); 424 if (ret != 0) 425 return ret; 426 if (rx_conf->rx_deferred_start) { 427 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 428 if (SUBOPS(sdev, rx_queue_start) == NULL) { 429 ERROR("Rx queue deferred start is not " 430 "supported for subdevice %d", i); 431 fs_unlock(dev, 0); 432 return -EINVAL; 433 } 434 } 435 } 436 rxq = dev->data->rx_queues[rx_queue_id]; 437 if (rxq != NULL) { 438 fs_rx_queue_release(dev, rx_queue_id); 439 dev->data->rx_queues[rx_queue_id] = NULL; 440 } 441 rxq = rte_zmalloc(NULL, 442 sizeof(*rxq) + 443 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 444 RTE_CACHE_LINE_SIZE); 445 if (rxq == NULL) { 446 fs_unlock(dev, 0); 447 return -ENOMEM; 448 } 449 FOREACH_SUBDEV(sdev, i, dev) 450 rte_atomic64_init(&rxq->refcnt[i]); 451 rxq->qid = rx_queue_id; 452 rxq->socket_id = socket_id; 453 rxq->info.mp = mb_pool; 454 rxq->info.conf = *rx_conf; 455 rxq->info.nb_desc = nb_rx_desc; 456 rxq->priv = PRIV(dev); 457 rxq->sdev = PRIV(dev)->subs; 458 #ifdef RTE_EXEC_ENV_LINUX 459 rxq->event_fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); 460 if (rxq->event_fd < 0) { 461 ERROR("Failed to create an eventfd: %s", strerror(errno)); 462 fs_unlock(dev, 0); 463 return -errno; 464 } 465 #else 466 rxq->event_fd = -1; 467 #endif 468 dev->data->rx_queues[rx_queue_id] = rxq; 469 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 470 ret = rte_eth_rx_queue_setup(PORT_ID(sdev), 471 rx_queue_id, 472 nb_rx_desc, socket_id, 473 rx_conf, mb_pool); 474 if ((ret = fs_err(sdev, ret))) { 475 ERROR("RX queue setup failed for sub_device %d", i); 476 goto free_rxq; 477 } 478 } 479 fs_unlock(dev, 0); 480 return 0; 481 free_rxq: 482 fs_rx_queue_release(dev, rx_queue_id); 483 fs_unlock(dev, 0); 484 return ret; 485 } 486 487 static int 488 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx) 489 { 490 struct rxq *rxq; 491 struct sub_device *sdev; 492 uint8_t i; 493 int ret; 494 int rc = 0; 495 496 ret = fs_lock(dev, 0); 497 if (ret != 0) 498 return ret; 499 if (idx >= dev->data->nb_rx_queues) { 500 rc = -EINVAL; 501 goto unlock; 502 } 503 rxq = dev->data->rx_queues[idx]; 504 if (rxq == NULL || rxq->event_fd <= 0) { 505 rc = -EINVAL; 506 goto unlock; 507 } 508 /* Fail if proxy service is nor running. */ 509 if (PRIV(dev)->rxp.sstate != SS_RUNNING) { 510 ERROR("failsafe interrupt services are not running"); 511 rc = -EAGAIN; 512 goto unlock; 513 } 514 rxq->enable_events = 1; 515 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 516 ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx); 517 ret = fs_err(sdev, ret); 518 if (ret) 519 rc = ret; 520 } 521 unlock: 522 fs_unlock(dev, 0); 523 if (rc) 524 rte_errno = -rc; 525 return rc; 526 } 527 528 static int 529 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx) 530 { 531 struct rxq *rxq; 532 struct sub_device *sdev; 533 uint64_t u64; 534 uint8_t i; 535 int rc = 0; 536 int ret; 537 538 ret = fs_lock(dev, 0); 539 if (ret != 0) 540 return ret; 541 if (idx >= dev->data->nb_rx_queues) { 542 rc = -EINVAL; 543 goto unlock; 544 } 545 rxq = dev->data->rx_queues[idx]; 546 if (rxq == NULL || rxq->event_fd <= 0) { 547 rc = -EINVAL; 548 goto unlock; 549 } 550 rxq->enable_events = 0; 551 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 552 ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx); 553 ret = fs_err(sdev, ret); 554 if (ret) 555 rc = ret; 556 } 557 /* Clear pending events */ 558 while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0) 559 ; 560 unlock: 561 fs_unlock(dev, 0); 562 if (rc) 563 rte_errno = -rc; 564 return rc; 565 } 566 567 static void 568 fs_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 569 { 570 struct sub_device *sdev; 571 uint8_t i; 572 struct txq *txq = dev->data->tx_queues[qid]; 573 574 if (txq == NULL) 575 return; 576 if (fs_lock(dev, 0) != 0) 577 return; 578 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 579 if (ETH(sdev)->data->tx_queues != NULL && 580 ETH(sdev)->data->tx_queues[txq->qid] != NULL) 581 SUBOPS(sdev, tx_queue_release)(ETH(sdev), txq->qid); 582 } 583 dev->data->tx_queues[txq->qid] = NULL; 584 rte_free(txq); 585 fs_unlock(dev, 0); 586 } 587 588 static int 589 fs_tx_queue_setup(struct rte_eth_dev *dev, 590 uint16_t tx_queue_id, 591 uint16_t nb_tx_desc, 592 unsigned int socket_id, 593 const struct rte_eth_txconf *tx_conf) 594 { 595 struct sub_device *sdev; 596 struct txq *txq; 597 uint8_t i; 598 int ret; 599 600 ret = fs_lock(dev, 0); 601 if (ret != 0) 602 return ret; 603 if (tx_conf->tx_deferred_start) { 604 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 605 if (SUBOPS(sdev, tx_queue_start) == NULL) { 606 ERROR("Tx queue deferred start is not " 607 "supported for subdevice %d", i); 608 fs_unlock(dev, 0); 609 return -EINVAL; 610 } 611 } 612 } 613 txq = dev->data->tx_queues[tx_queue_id]; 614 if (txq != NULL) { 615 fs_tx_queue_release(dev, tx_queue_id); 616 dev->data->tx_queues[tx_queue_id] = NULL; 617 } 618 txq = rte_zmalloc("ethdev TX queue", 619 sizeof(*txq) + 620 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 621 RTE_CACHE_LINE_SIZE); 622 if (txq == NULL) { 623 fs_unlock(dev, 0); 624 return -ENOMEM; 625 } 626 FOREACH_SUBDEV(sdev, i, dev) 627 rte_atomic64_init(&txq->refcnt[i]); 628 txq->qid = tx_queue_id; 629 txq->socket_id = socket_id; 630 txq->info.conf = *tx_conf; 631 txq->info.nb_desc = nb_tx_desc; 632 txq->priv = PRIV(dev); 633 dev->data->tx_queues[tx_queue_id] = txq; 634 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 635 ret = rte_eth_tx_queue_setup(PORT_ID(sdev), 636 tx_queue_id, 637 nb_tx_desc, socket_id, 638 tx_conf); 639 if ((ret = fs_err(sdev, ret))) { 640 ERROR("TX queue setup failed for sub_device %d", i); 641 goto free_txq; 642 } 643 } 644 fs_unlock(dev, 0); 645 return 0; 646 free_txq: 647 fs_tx_queue_release(dev, tx_queue_id); 648 fs_unlock(dev, 0); 649 return ret; 650 } 651 652 static void 653 fs_dev_free_queues(struct rte_eth_dev *dev) 654 { 655 uint16_t i; 656 657 for (i = 0; i < dev->data->nb_rx_queues; i++) { 658 fs_rx_queue_release(dev, i); 659 dev->data->rx_queues[i] = NULL; 660 } 661 dev->data->nb_rx_queues = 0; 662 for (i = 0; i < dev->data->nb_tx_queues; i++) { 663 fs_tx_queue_release(dev, i); 664 dev->data->tx_queues[i] = NULL; 665 } 666 dev->data->nb_tx_queues = 0; 667 } 668 669 int 670 failsafe_eth_dev_close(struct rte_eth_dev *dev) 671 { 672 struct sub_device *sdev; 673 uint8_t i; 674 int err, ret = 0; 675 676 ret = fs_lock(dev, 0); 677 if (ret != 0) 678 return ret; 679 failsafe_hotplug_alarm_cancel(dev); 680 if (PRIV(dev)->state == DEV_STARTED) { 681 ret = dev->dev_ops->dev_stop(dev); 682 if (ret != 0) { 683 fs_unlock(dev, 0); 684 return ret; 685 } 686 } 687 PRIV(dev)->state = DEV_ACTIVE - 1; 688 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 689 DEBUG("Closing sub_device %d", i); 690 failsafe_eth_dev_unregister_callbacks(sdev); 691 err = rte_eth_dev_close(PORT_ID(sdev)); 692 if (err) { 693 ret = ret ? ret : err; 694 ERROR("Error while closing sub-device %u", 695 PORT_ID(sdev)); 696 } 697 sdev->state = DEV_ACTIVE - 1; 698 } 699 rte_eth_dev_callback_unregister(RTE_ETH_ALL, RTE_ETH_EVENT_NEW, 700 failsafe_eth_new_event_callback, dev); 701 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 702 fs_unlock(dev, 0); 703 return ret; 704 } 705 fs_dev_free_queues(dev); 706 err = failsafe_eal_uninit(dev); 707 if (err) { 708 ret = ret ? ret : err; 709 ERROR("Error while uninitializing sub-EAL"); 710 } 711 failsafe_args_free(dev); 712 rte_free(PRIV(dev)->subs); 713 rte_free(PRIV(dev)->mcast_addrs); 714 /* mac_addrs must not be freed alone because part of dev_private */ 715 dev->data->mac_addrs = NULL; 716 fs_unlock(dev, 0); 717 err = pthread_mutex_destroy(&PRIV(dev)->hotplug_mutex); 718 if (err) { 719 ret = ret ? ret : err; 720 ERROR("Error while destroying hotplug mutex"); 721 } 722 return ret; 723 } 724 725 static int 726 fs_promiscuous_enable(struct rte_eth_dev *dev) 727 { 728 struct sub_device *sdev; 729 uint8_t i; 730 int ret = 0; 731 732 ret = fs_lock(dev, 0); 733 if (ret != 0) 734 return ret; 735 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 736 ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); 737 ret = fs_err(sdev, ret); 738 if (ret != 0) { 739 ERROR("Promiscuous mode enable failed for subdevice %d", 740 PORT_ID(sdev)); 741 break; 742 } 743 } 744 if (ret != 0) { 745 /* Rollback in the case of failure */ 746 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 747 ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); 748 ret = fs_err(sdev, ret); 749 if (ret != 0) 750 ERROR("Promiscuous mode disable during rollback failed for subdevice %d", 751 PORT_ID(sdev)); 752 } 753 } 754 fs_unlock(dev, 0); 755 756 return ret; 757 } 758 759 static int 760 fs_promiscuous_disable(struct rte_eth_dev *dev) 761 { 762 struct sub_device *sdev; 763 uint8_t i; 764 int ret = 0; 765 766 ret = fs_lock(dev, 0); 767 if (ret != 0) 768 return ret; 769 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 770 ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); 771 ret = fs_err(sdev, ret); 772 if (ret != 0) { 773 ERROR("Promiscuous mode disable failed for subdevice %d", 774 PORT_ID(sdev)); 775 break; 776 } 777 } 778 if (ret != 0) { 779 /* Rollback in the case of failure */ 780 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 781 ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); 782 ret = fs_err(sdev, ret); 783 if (ret != 0) 784 ERROR("Promiscuous mode enable during rollback failed for subdevice %d", 785 PORT_ID(sdev)); 786 } 787 } 788 fs_unlock(dev, 0); 789 790 return ret; 791 } 792 793 static int 794 fs_allmulticast_enable(struct rte_eth_dev *dev) 795 { 796 struct sub_device *sdev; 797 uint8_t i; 798 int ret = 0; 799 800 ret = fs_lock(dev, 0); 801 if (ret != 0) 802 return ret; 803 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 804 ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); 805 ret = fs_err(sdev, ret); 806 if (ret != 0) { 807 ERROR("All-multicast mode enable failed for subdevice %d", 808 PORT_ID(sdev)); 809 break; 810 } 811 } 812 if (ret != 0) { 813 /* Rollback in the case of failure */ 814 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 815 ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); 816 ret = fs_err(sdev, ret); 817 if (ret != 0) 818 ERROR("All-multicast mode disable during rollback failed for subdevice %d", 819 PORT_ID(sdev)); 820 } 821 } 822 fs_unlock(dev, 0); 823 824 return ret; 825 } 826 827 static int 828 fs_allmulticast_disable(struct rte_eth_dev *dev) 829 { 830 struct sub_device *sdev; 831 uint8_t i; 832 int ret = 0; 833 834 ret = fs_lock(dev, 0); 835 if (ret != 0) 836 return ret; 837 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 838 ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); 839 ret = fs_err(sdev, ret); 840 if (ret != 0) { 841 ERROR("All-multicast mode disable failed for subdevice %d", 842 PORT_ID(sdev)); 843 break; 844 } 845 } 846 if (ret != 0) { 847 /* Rollback in the case of failure */ 848 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 849 ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); 850 ret = fs_err(sdev, ret); 851 if (ret != 0) 852 ERROR("All-multicast mode enable during rollback failed for subdevice %d", 853 PORT_ID(sdev)); 854 } 855 } 856 fs_unlock(dev, 0); 857 858 return ret; 859 } 860 861 static int 862 fs_link_update(struct rte_eth_dev *dev, 863 int wait_to_complete) 864 { 865 struct sub_device *sdev; 866 uint8_t i; 867 int ret; 868 869 ret = fs_lock(dev, 0); 870 if (ret != 0) 871 return ret; 872 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 873 DEBUG("Calling link_update on sub_device %d", i); 874 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete); 875 if (ret && ret != -1 && sdev->remove == 0 && 876 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) { 877 ERROR("Link update failed for sub_device %d with error %d", 878 i, ret); 879 fs_unlock(dev, 0); 880 return ret; 881 } 882 } 883 if (TX_SUBDEV(dev)) { 884 struct rte_eth_link *l1; 885 struct rte_eth_link *l2; 886 887 l1 = &dev->data->dev_link; 888 l2 = Ð(TX_SUBDEV(dev))->data->dev_link; 889 if (memcmp(l1, l2, sizeof(*l1))) { 890 *l1 = *l2; 891 fs_unlock(dev, 0); 892 return 0; 893 } 894 } 895 fs_unlock(dev, 0); 896 return -1; 897 } 898 899 static int 900 fs_stats_get(struct rte_eth_dev *dev, 901 struct rte_eth_stats *stats) 902 { 903 struct rte_eth_stats backup; 904 struct sub_device *sdev; 905 uint8_t i; 906 int ret; 907 908 ret = fs_lock(dev, 0); 909 if (ret != 0) 910 return ret; 911 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats)); 912 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 913 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats; 914 uint64_t *timestamp = &sdev->stats_snapshot.timestamp; 915 916 rte_memcpy(&backup, snapshot, sizeof(backup)); 917 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot); 918 if (ret) { 919 if (!fs_err(sdev, ret)) { 920 rte_memcpy(snapshot, &backup, sizeof(backup)); 921 goto inc; 922 } 923 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d", 924 i, ret); 925 *timestamp = 0; 926 fs_unlock(dev, 0); 927 return ret; 928 } 929 *timestamp = rte_rdtsc(); 930 inc: 931 failsafe_stats_increment(stats, snapshot); 932 } 933 fs_unlock(dev, 0); 934 return 0; 935 } 936 937 static int 938 fs_stats_reset(struct rte_eth_dev *dev) 939 { 940 struct sub_device *sdev; 941 uint8_t i; 942 int ret; 943 944 ret = fs_lock(dev, 0); 945 if (ret != 0) 946 return ret; 947 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 948 ret = rte_eth_stats_reset(PORT_ID(sdev)); 949 if (ret) { 950 if (!fs_err(sdev, ret)) 951 continue; 952 953 ERROR("Operation rte_eth_stats_reset failed for sub_device %d with error %d", 954 i, ret); 955 fs_unlock(dev, 0); 956 return ret; 957 } 958 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats)); 959 } 960 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats)); 961 fs_unlock(dev, 0); 962 963 return 0; 964 } 965 966 static int 967 __fs_xstats_count(struct rte_eth_dev *dev) 968 { 969 struct sub_device *sdev; 970 int count = 0; 971 uint8_t i; 972 int ret; 973 974 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 975 ret = rte_eth_xstats_get_names(PORT_ID(sdev), NULL, 0); 976 if (ret < 0) 977 return ret; 978 count += ret; 979 } 980 981 return count; 982 } 983 984 static int 985 __fs_xstats_get_names(struct rte_eth_dev *dev, 986 struct rte_eth_xstat_name *xstats_names, 987 unsigned int limit) 988 { 989 struct sub_device *sdev; 990 unsigned int count = 0; 991 uint8_t i; 992 993 /* Caller only cares about count */ 994 if (!xstats_names) 995 return __fs_xstats_count(dev); 996 997 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 998 struct rte_eth_xstat_name *sub_names = xstats_names + count; 999 int j, r; 1000 1001 if (count >= limit) 1002 break; 1003 1004 r = rte_eth_xstats_get_names(PORT_ID(sdev), 1005 sub_names, limit - count); 1006 if (r < 0) 1007 return r; 1008 1009 /* add subN_ prefix to names */ 1010 for (j = 0; j < r; j++) { 1011 char *xname = sub_names[j].name; 1012 char tmp[RTE_ETH_XSTATS_NAME_SIZE]; 1013 1014 if ((xname[0] == 't' || xname[0] == 'r') && 1015 xname[1] == 'x' && xname[2] == '_') 1016 snprintf(tmp, sizeof(tmp), "%.3ssub%u_%s", 1017 xname, i, xname + 3); 1018 else 1019 snprintf(tmp, sizeof(tmp), "sub%u_%s", 1020 i, xname); 1021 1022 strlcpy(xname, tmp, RTE_ETH_XSTATS_NAME_SIZE); 1023 } 1024 count += r; 1025 } 1026 return count; 1027 } 1028 1029 static int 1030 fs_xstats_get_names(struct rte_eth_dev *dev, 1031 struct rte_eth_xstat_name *xstats_names, 1032 unsigned int limit) 1033 { 1034 int ret; 1035 1036 ret = fs_lock(dev, 0); 1037 if (ret != 0) 1038 return ret; 1039 ret = __fs_xstats_get_names(dev, xstats_names, limit); 1040 fs_unlock(dev, 0); 1041 return ret; 1042 } 1043 1044 static int 1045 __fs_xstats_get(struct rte_eth_dev *dev, 1046 struct rte_eth_xstat *xstats, 1047 unsigned int n) 1048 { 1049 unsigned int count = 0; 1050 struct sub_device *sdev; 1051 uint8_t i; 1052 int j, ret; 1053 1054 ret = __fs_xstats_count(dev); 1055 /* 1056 * if error 1057 * or caller did not give enough space 1058 * or just querying 1059 */ 1060 if (ret < 0 || ret > (int)n || xstats == NULL) 1061 return ret; 1062 1063 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1064 ret = rte_eth_xstats_get(PORT_ID(sdev), xstats, n); 1065 if (ret < 0) 1066 return ret; 1067 1068 if (ret > (int)n) 1069 return n + count; 1070 1071 /* add offset to id's from sub-device */ 1072 for (j = 0; j < ret; j++) 1073 xstats[j].id += count; 1074 1075 xstats += ret; 1076 n -= ret; 1077 count += ret; 1078 } 1079 1080 return count; 1081 } 1082 1083 static int 1084 fs_xstats_get(struct rte_eth_dev *dev, 1085 struct rte_eth_xstat *xstats, 1086 unsigned int n) 1087 { 1088 int ret; 1089 1090 ret = fs_lock(dev, 0); 1091 if (ret != 0) 1092 return ret; 1093 ret = __fs_xstats_get(dev, xstats, n); 1094 fs_unlock(dev, 0); 1095 1096 return ret; 1097 } 1098 1099 1100 static int 1101 fs_xstats_reset(struct rte_eth_dev *dev) 1102 { 1103 struct sub_device *sdev; 1104 uint8_t i; 1105 int r; 1106 1107 r = fs_lock(dev, 0); 1108 if (r != 0) 1109 return r; 1110 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1111 r = rte_eth_xstats_reset(PORT_ID(sdev)); 1112 if (r < 0) 1113 break; 1114 } 1115 fs_unlock(dev, 0); 1116 1117 return r; 1118 } 1119 1120 static void 1121 fs_dev_merge_desc_lim(struct rte_eth_desc_lim *to, 1122 const struct rte_eth_desc_lim *from) 1123 { 1124 to->nb_max = RTE_MIN(to->nb_max, from->nb_max); 1125 to->nb_min = RTE_MAX(to->nb_min, from->nb_min); 1126 to->nb_align = RTE_MAX(to->nb_align, from->nb_align); 1127 1128 to->nb_seg_max = RTE_MIN(to->nb_seg_max, from->nb_seg_max); 1129 to->nb_mtu_seg_max = RTE_MIN(to->nb_mtu_seg_max, from->nb_mtu_seg_max); 1130 } 1131 1132 /* 1133 * Merge the information from sub-devices. 1134 * 1135 * The reported values must be the common subset of all sub devices 1136 */ 1137 static void 1138 fs_dev_merge_info(struct rte_eth_dev_info *info, 1139 const struct rte_eth_dev_info *sinfo) 1140 { 1141 info->min_mtu = RTE_MAX(info->min_mtu, sinfo->min_mtu); 1142 info->max_mtu = RTE_MIN(info->max_mtu, sinfo->max_mtu); 1143 info->max_rx_pktlen = RTE_MIN(info->max_rx_pktlen, sinfo->max_rx_pktlen); 1144 info->max_rx_queues = RTE_MIN(info->max_rx_queues, sinfo->max_rx_queues); 1145 info->max_tx_queues = RTE_MIN(info->max_tx_queues, sinfo->max_tx_queues); 1146 info->max_mac_addrs = RTE_MIN(info->max_mac_addrs, sinfo->max_mac_addrs); 1147 info->max_hash_mac_addrs = RTE_MIN(info->max_hash_mac_addrs, 1148 sinfo->max_hash_mac_addrs); 1149 info->max_vmdq_pools = RTE_MIN(info->max_vmdq_pools, sinfo->max_vmdq_pools); 1150 info->max_vfs = RTE_MIN(info->max_vfs, sinfo->max_vfs); 1151 1152 fs_dev_merge_desc_lim(&info->rx_desc_lim, &sinfo->rx_desc_lim); 1153 fs_dev_merge_desc_lim(&info->tx_desc_lim, &sinfo->tx_desc_lim); 1154 1155 info->rx_offload_capa &= sinfo->rx_offload_capa; 1156 info->tx_offload_capa &= sinfo->tx_offload_capa; 1157 info->rx_queue_offload_capa &= sinfo->rx_queue_offload_capa; 1158 info->tx_queue_offload_capa &= sinfo->tx_queue_offload_capa; 1159 info->flow_type_rss_offloads &= sinfo->flow_type_rss_offloads; 1160 1161 /* 1162 * RETA size is a GCD of RETA sizes indicated by sub-devices. 1163 * Each of these sizes is a power of 2, so use the lower one. 1164 */ 1165 info->reta_size = RTE_MIN(info->reta_size, sinfo->reta_size); 1166 1167 info->hash_key_size = RTE_MIN(info->hash_key_size, 1168 sinfo->hash_key_size); 1169 } 1170 1171 /** 1172 * Fail-safe dev_infos_get rules: 1173 * 1174 * No sub_device: 1175 * Numerables: 1176 * Use the maximum possible values for any field, so as not 1177 * to impede any further configuration effort. 1178 * Capabilities: 1179 * Limits capabilities to those that are understood by the 1180 * fail-safe PMD. This understanding stems from the fail-safe 1181 * being capable of verifying that the related capability is 1182 * expressed within the device configuration (struct rte_eth_conf). 1183 * 1184 * At least one probed sub_device: 1185 * Numerables: 1186 * Uses values from the active probed sub_device 1187 * The rationale here is that if any sub_device is less capable 1188 * (for example concerning the number of queues) than the active 1189 * sub_device, then its subsequent configuration will fail. 1190 * It is impossible to foresee this failure when the failing sub_device 1191 * is supposed to be plugged-in later on, so the configuration process 1192 * is the single point of failure and error reporting. 1193 * Capabilities: 1194 * Uses a logical AND of RX capabilities among 1195 * all sub_devices and the default capabilities. 1196 * Uses a logical AND of TX capabilities among 1197 * the active probed sub_device and the default capabilities. 1198 * Uses a logical AND of device capabilities among 1199 * all sub_devices and the default capabilities. 1200 * 1201 */ 1202 static int 1203 fs_dev_infos_get(struct rte_eth_dev *dev, 1204 struct rte_eth_dev_info *infos) 1205 { 1206 struct sub_device *sdev; 1207 uint8_t i; 1208 int ret; 1209 1210 /* Use maximum upper bounds by default */ 1211 infos->min_mtu = RTE_ETHER_MIN_MTU; 1212 infos->max_mtu = UINT16_MAX; 1213 infos->max_rx_pktlen = UINT32_MAX; 1214 infos->max_rx_queues = RTE_MAX_QUEUES_PER_PORT; 1215 infos->max_tx_queues = RTE_MAX_QUEUES_PER_PORT; 1216 infos->max_mac_addrs = FAILSAFE_MAX_ETHADDR; 1217 infos->max_hash_mac_addrs = UINT32_MAX; 1218 infos->max_vfs = UINT16_MAX; 1219 infos->max_vmdq_pools = UINT16_MAX; 1220 infos->reta_size = UINT16_MAX; 1221 infos->hash_key_size = UINT8_MAX; 1222 1223 /* 1224 * Set of capabilities that can be verified upon 1225 * configuring a sub-device. 1226 */ 1227 infos->rx_offload_capa = 1228 RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 1229 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 1230 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 1231 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 1232 RTE_ETH_RX_OFFLOAD_TCP_LRO | 1233 RTE_ETH_RX_OFFLOAD_QINQ_STRIP | 1234 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1235 RTE_ETH_RX_OFFLOAD_MACSEC_STRIP | 1236 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 1237 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | 1238 RTE_ETH_RX_OFFLOAD_SCATTER | 1239 RTE_ETH_RX_OFFLOAD_TIMESTAMP | 1240 RTE_ETH_RX_OFFLOAD_SECURITY | 1241 RTE_ETH_RX_OFFLOAD_RSS_HASH; 1242 1243 infos->rx_queue_offload_capa = 1244 RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 1245 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 1246 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 1247 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 1248 RTE_ETH_RX_OFFLOAD_TCP_LRO | 1249 RTE_ETH_RX_OFFLOAD_QINQ_STRIP | 1250 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1251 RTE_ETH_RX_OFFLOAD_MACSEC_STRIP | 1252 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 1253 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | 1254 RTE_ETH_RX_OFFLOAD_SCATTER | 1255 RTE_ETH_RX_OFFLOAD_TIMESTAMP | 1256 RTE_ETH_RX_OFFLOAD_SECURITY | 1257 RTE_ETH_RX_OFFLOAD_RSS_HASH; 1258 1259 infos->tx_offload_capa = 1260 RTE_ETH_TX_OFFLOAD_MULTI_SEGS | 1261 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | 1262 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | 1263 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | 1264 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | 1265 RTE_ETH_TX_OFFLOAD_TCP_TSO; 1266 1267 infos->flow_type_rss_offloads = 1268 RTE_ETH_RSS_IP | 1269 RTE_ETH_RSS_UDP | 1270 RTE_ETH_RSS_TCP; 1271 infos->dev_capa = 1272 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 1273 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 1274 infos->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; 1275 1276 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 1277 struct rte_eth_dev_info sub_info; 1278 1279 ret = rte_eth_dev_info_get(PORT_ID(sdev), &sub_info); 1280 ret = fs_err(sdev, ret); 1281 if (ret != 0) 1282 return ret; 1283 1284 fs_dev_merge_info(infos, &sub_info); 1285 } 1286 1287 return 0; 1288 } 1289 1290 static const uint32_t * 1291 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements) 1292 { 1293 struct sub_device *sdev; 1294 struct rte_eth_dev *edev; 1295 const uint32_t *ret; 1296 1297 if (fs_lock(dev, 0) != 0) 1298 return NULL; 1299 sdev = TX_SUBDEV(dev); 1300 if (sdev == NULL) { 1301 ret = NULL; 1302 goto unlock; 1303 } 1304 edev = ETH(sdev); 1305 /* ENOTSUP: counts as no supported ptypes */ 1306 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) { 1307 ret = NULL; 1308 goto unlock; 1309 } 1310 /* 1311 * The API does not permit to do a clean AND of all ptypes, 1312 * It is also incomplete by design and we do not really care 1313 * to have a best possible value in this context. 1314 * We just return the ptypes of the device of highest 1315 * priority, usually the PREFERRED device. 1316 */ 1317 ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev, no_of_elements); 1318 unlock: 1319 fs_unlock(dev, 0); 1320 return ret; 1321 } 1322 1323 static int 1324 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1325 { 1326 struct sub_device *sdev; 1327 uint8_t i; 1328 int ret; 1329 1330 ret = fs_lock(dev, 0); 1331 if (ret != 0) 1332 return ret; 1333 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1334 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i); 1335 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu); 1336 if ((ret = fs_err(sdev, ret))) { 1337 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d", 1338 i, ret); 1339 fs_unlock(dev, 0); 1340 return ret; 1341 } 1342 } 1343 fs_unlock(dev, 0); 1344 return 0; 1345 } 1346 1347 static int 1348 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1349 { 1350 struct sub_device *sdev; 1351 uint8_t i; 1352 int ret; 1353 1354 ret = fs_lock(dev, 0); 1355 if (ret != 0) 1356 return ret; 1357 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1358 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i); 1359 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on); 1360 if ((ret = fs_err(sdev, ret))) { 1361 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d" 1362 " with error %d", i, ret); 1363 fs_unlock(dev, 0); 1364 return ret; 1365 } 1366 } 1367 fs_unlock(dev, 0); 1368 return 0; 1369 } 1370 1371 static int 1372 fs_flow_ctrl_get(struct rte_eth_dev *dev, 1373 struct rte_eth_fc_conf *fc_conf) 1374 { 1375 struct sub_device *sdev; 1376 int ret; 1377 1378 ret = fs_lock(dev, 0); 1379 if (ret != 0) 1380 return ret; 1381 sdev = TX_SUBDEV(dev); 1382 if (sdev == NULL) { 1383 ret = 0; 1384 goto unlock; 1385 } 1386 if (SUBOPS(sdev, flow_ctrl_get) == NULL) { 1387 ret = -ENOTSUP; 1388 goto unlock; 1389 } 1390 ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf); 1391 unlock: 1392 fs_unlock(dev, 0); 1393 return ret; 1394 } 1395 1396 static int 1397 fs_flow_ctrl_set(struct rte_eth_dev *dev, 1398 struct rte_eth_fc_conf *fc_conf) 1399 { 1400 struct sub_device *sdev; 1401 uint8_t i; 1402 int ret; 1403 1404 ret = fs_lock(dev, 0); 1405 if (ret != 0) 1406 return ret; 1407 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1408 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i); 1409 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf); 1410 if ((ret = fs_err(sdev, ret))) { 1411 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d" 1412 " with error %d", i, ret); 1413 fs_unlock(dev, 0); 1414 return ret; 1415 } 1416 } 1417 fs_unlock(dev, 0); 1418 return 0; 1419 } 1420 1421 static void 1422 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 1423 { 1424 struct sub_device *sdev; 1425 uint8_t i; 1426 1427 if (fs_lock(dev, 0) != 0) 1428 return; 1429 /* No check: already done within the rte_eth_dev_mac_addr_remove 1430 * call for the fail-safe device. 1431 */ 1432 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 1433 rte_eth_dev_mac_addr_remove(PORT_ID(sdev), 1434 &dev->data->mac_addrs[index]); 1435 PRIV(dev)->mac_addr_pool[index] = 0; 1436 fs_unlock(dev, 0); 1437 } 1438 1439 static int 1440 fs_mac_addr_add(struct rte_eth_dev *dev, 1441 struct rte_ether_addr *mac_addr, 1442 uint32_t index, 1443 uint32_t vmdq) 1444 { 1445 struct sub_device *sdev; 1446 int ret; 1447 uint8_t i; 1448 1449 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR); 1450 ret = fs_lock(dev, 0); 1451 if (ret != 0) 1452 return ret; 1453 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1454 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq); 1455 if ((ret = fs_err(sdev, ret))) { 1456 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %" 1457 PRIu8 " with error %d", i, ret); 1458 fs_unlock(dev, 0); 1459 return ret; 1460 } 1461 } 1462 if (index >= PRIV(dev)->nb_mac_addr) { 1463 DEBUG("Growing mac_addrs array"); 1464 PRIV(dev)->nb_mac_addr = index; 1465 } 1466 PRIV(dev)->mac_addr_pool[index] = vmdq; 1467 fs_unlock(dev, 0); 1468 return 0; 1469 } 1470 1471 static int 1472 fs_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 1473 { 1474 struct sub_device *sdev; 1475 uint8_t i; 1476 int ret; 1477 1478 ret = fs_lock(dev, 0); 1479 if (ret != 0) 1480 return ret; 1481 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1482 ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr); 1483 ret = fs_err(sdev, ret); 1484 if (ret) { 1485 ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d", 1486 i, ret); 1487 fs_unlock(dev, 0); 1488 return ret; 1489 } 1490 } 1491 fs_unlock(dev, 0); 1492 1493 return 0; 1494 } 1495 1496 static int 1497 fs_set_mc_addr_list(struct rte_eth_dev *dev, 1498 struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) 1499 { 1500 struct sub_device *sdev; 1501 uint8_t i; 1502 int ret; 1503 void *mcast_addrs; 1504 1505 ret = fs_lock(dev, 0); 1506 if (ret != 0) 1507 return ret; 1508 1509 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1510 ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1511 mc_addr_set, nb_mc_addr); 1512 if (ret != 0) { 1513 ERROR("Operation rte_eth_dev_set_mc_addr_list failed for sub_device %d with error %d", 1514 i, ret); 1515 goto rollback; 1516 } 1517 } 1518 1519 mcast_addrs = rte_realloc(PRIV(dev)->mcast_addrs, 1520 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]), 0); 1521 if (mcast_addrs == NULL && nb_mc_addr > 0) { 1522 ret = -ENOMEM; 1523 goto rollback; 1524 } 1525 rte_memcpy(mcast_addrs, mc_addr_set, 1526 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0])); 1527 PRIV(dev)->nb_mcast_addr = nb_mc_addr; 1528 PRIV(dev)->mcast_addrs = mcast_addrs; 1529 1530 fs_unlock(dev, 0); 1531 return 0; 1532 1533 rollback: 1534 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1535 int rc = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1536 PRIV(dev)->mcast_addrs, PRIV(dev)->nb_mcast_addr); 1537 if (rc != 0) { 1538 ERROR("Multicast MAC address list rollback for sub_device %d failed with error %d", 1539 i, rc); 1540 } 1541 } 1542 1543 fs_unlock(dev, 0); 1544 return ret; 1545 } 1546 1547 static int 1548 fs_rss_hash_update(struct rte_eth_dev *dev, 1549 struct rte_eth_rss_conf *rss_conf) 1550 { 1551 struct sub_device *sdev; 1552 uint8_t i; 1553 int ret; 1554 1555 ret = fs_lock(dev, 0); 1556 if (ret != 0) 1557 return ret; 1558 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1559 ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf); 1560 ret = fs_err(sdev, ret); 1561 if (ret) { 1562 ERROR("Operation rte_eth_dev_rss_hash_update" 1563 " failed for sub_device %d with error %d", 1564 i, ret); 1565 fs_unlock(dev, 0); 1566 return ret; 1567 } 1568 } 1569 fs_unlock(dev, 0); 1570 1571 return 0; 1572 } 1573 1574 static int 1575 fs_flow_ops_get(struct rte_eth_dev *dev __rte_unused, 1576 const struct rte_flow_ops **ops) 1577 { 1578 *ops = &fs_flow_ops; 1579 return 0; 1580 } 1581 1582 const struct eth_dev_ops failsafe_ops = { 1583 .dev_configure = fs_dev_configure, 1584 .dev_start = fs_dev_start, 1585 .dev_stop = fs_dev_stop, 1586 .dev_set_link_down = fs_dev_set_link_down, 1587 .dev_set_link_up = fs_dev_set_link_up, 1588 .dev_close = failsafe_eth_dev_close, 1589 .promiscuous_enable = fs_promiscuous_enable, 1590 .promiscuous_disable = fs_promiscuous_disable, 1591 .allmulticast_enable = fs_allmulticast_enable, 1592 .allmulticast_disable = fs_allmulticast_disable, 1593 .link_update = fs_link_update, 1594 .stats_get = fs_stats_get, 1595 .stats_reset = fs_stats_reset, 1596 .xstats_get = fs_xstats_get, 1597 .xstats_get_names = fs_xstats_get_names, 1598 .xstats_reset = fs_xstats_reset, 1599 .dev_infos_get = fs_dev_infos_get, 1600 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get, 1601 .mtu_set = fs_mtu_set, 1602 .vlan_filter_set = fs_vlan_filter_set, 1603 .rx_queue_start = fs_rx_queue_start, 1604 .rx_queue_stop = fs_rx_queue_stop, 1605 .tx_queue_start = fs_tx_queue_start, 1606 .tx_queue_stop = fs_tx_queue_stop, 1607 .rx_queue_setup = fs_rx_queue_setup, 1608 .tx_queue_setup = fs_tx_queue_setup, 1609 .rx_queue_release = fs_rx_queue_release, 1610 .tx_queue_release = fs_tx_queue_release, 1611 .rx_queue_intr_enable = fs_rx_intr_enable, 1612 .rx_queue_intr_disable = fs_rx_intr_disable, 1613 .flow_ctrl_get = fs_flow_ctrl_get, 1614 .flow_ctrl_set = fs_flow_ctrl_set, 1615 .mac_addr_remove = fs_mac_addr_remove, 1616 .mac_addr_add = fs_mac_addr_add, 1617 .mac_addr_set = fs_mac_addr_set, 1618 .set_mc_addr_list = fs_set_mc_addr_list, 1619 .rss_hash_update = fs_rss_hash_update, 1620 .flow_ops_get = fs_flow_ops_get, 1621 }; 1622