1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017 6WIND S.A. 3 * Copyright 2017 Mellanox Technologies, Ltd 4 */ 5 6 #include <stdbool.h> 7 #include <stdint.h> 8 #include <unistd.h> 9 #ifdef RTE_EXEC_ENV_LINUX 10 #include <sys/eventfd.h> 11 #endif 12 13 #include <rte_debug.h> 14 #include <rte_atomic.h> 15 #include <ethdev_driver.h> 16 #include <rte_malloc.h> 17 #include <rte_flow.h> 18 #include <rte_cycles.h> 19 #include <rte_ethdev.h> 20 #include <rte_string_fns.h> 21 22 #include "failsafe_private.h" 23 24 static int 25 fs_dev_configure(struct rte_eth_dev *dev) 26 { 27 struct sub_device *sdev; 28 uint8_t i; 29 int ret; 30 31 ret = fs_lock(dev, 0); 32 if (ret != 0) 33 return ret; 34 FOREACH_SUBDEV(sdev, i, dev) { 35 int rmv_interrupt = 0; 36 int lsc_interrupt = 0; 37 int lsc_enabled; 38 39 if (sdev->state != DEV_PROBED && 40 !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE)) 41 continue; 42 43 rmv_interrupt = ETH(sdev)->data->dev_flags & 44 RTE_ETH_DEV_INTR_RMV; 45 if (rmv_interrupt) { 46 DEBUG("Enabling RMV interrupts for sub_device %d", i); 47 dev->data->dev_conf.intr_conf.rmv = 1; 48 } else { 49 DEBUG("sub_device %d does not support RMV event", i); 50 } 51 lsc_enabled = dev->data->dev_conf.intr_conf.lsc; 52 lsc_interrupt = lsc_enabled && 53 (ETH(sdev)->data->dev_flags & 54 RTE_ETH_DEV_INTR_LSC); 55 if (lsc_interrupt) { 56 DEBUG("Enabling LSC interrupts for sub_device %d", i); 57 dev->data->dev_conf.intr_conf.lsc = 1; 58 } else if (lsc_enabled && !lsc_interrupt) { 59 DEBUG("Disabling LSC interrupts for sub_device %d", i); 60 dev->data->dev_conf.intr_conf.lsc = 0; 61 } 62 DEBUG("Configuring sub-device %d", i); 63 ret = rte_eth_dev_configure(PORT_ID(sdev), 64 dev->data->nb_rx_queues, 65 dev->data->nb_tx_queues, 66 &dev->data->dev_conf); 67 if (ret) { 68 if (!fs_err(sdev, ret)) 69 continue; 70 ERROR("Could not configure sub_device %d", i); 71 fs_unlock(dev, 0); 72 return ret; 73 } 74 if (rmv_interrupt && sdev->rmv_callback == 0) { 75 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 76 RTE_ETH_EVENT_INTR_RMV, 77 failsafe_eth_rmv_event_callback, 78 sdev); 79 if (ret) 80 WARN("Failed to register RMV callback for sub_device %d", 81 SUB_ID(sdev)); 82 else 83 sdev->rmv_callback = 1; 84 } 85 dev->data->dev_conf.intr_conf.rmv = 0; 86 if (lsc_interrupt && sdev->lsc_callback == 0) { 87 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 88 RTE_ETH_EVENT_INTR_LSC, 89 failsafe_eth_lsc_event_callback, 90 dev); 91 if (ret) 92 WARN("Failed to register LSC callback for sub_device %d", 93 SUB_ID(sdev)); 94 else 95 sdev->lsc_callback = 1; 96 } 97 dev->data->dev_conf.intr_conf.lsc = lsc_enabled; 98 sdev->state = DEV_ACTIVE; 99 } 100 if (PRIV(dev)->state < DEV_ACTIVE) 101 PRIV(dev)->state = DEV_ACTIVE; 102 fs_unlock(dev, 0); 103 return 0; 104 } 105 106 static void 107 fs_set_queues_state_start(struct rte_eth_dev *dev) 108 { 109 struct rxq *rxq; 110 struct txq *txq; 111 uint16_t i; 112 113 for (i = 0; i < dev->data->nb_rx_queues; i++) { 114 rxq = dev->data->rx_queues[i]; 115 if (rxq != NULL && !rxq->info.conf.rx_deferred_start) 116 dev->data->rx_queue_state[i] = 117 RTE_ETH_QUEUE_STATE_STARTED; 118 } 119 for (i = 0; i < dev->data->nb_tx_queues; i++) { 120 txq = dev->data->tx_queues[i]; 121 if (txq != NULL && !txq->info.conf.tx_deferred_start) 122 dev->data->tx_queue_state[i] = 123 RTE_ETH_QUEUE_STATE_STARTED; 124 } 125 } 126 127 static int 128 fs_dev_start(struct rte_eth_dev *dev) 129 { 130 struct sub_device *sdev; 131 uint8_t i; 132 int ret; 133 134 ret = fs_lock(dev, 0); 135 if (ret != 0) 136 return ret; 137 ret = failsafe_rx_intr_install(dev); 138 if (ret) { 139 fs_unlock(dev, 0); 140 return ret; 141 } 142 FOREACH_SUBDEV(sdev, i, dev) { 143 if (sdev->state != DEV_ACTIVE) 144 continue; 145 DEBUG("Starting sub_device %d", i); 146 ret = rte_eth_dev_start(PORT_ID(sdev)); 147 if (ret) { 148 if (!fs_err(sdev, ret)) 149 continue; 150 fs_unlock(dev, 0); 151 return ret; 152 } 153 ret = failsafe_rx_intr_install_subdevice(sdev); 154 if (ret) { 155 if (!fs_err(sdev, ret)) 156 continue; 157 if (fs_err(sdev, rte_eth_dev_stop(PORT_ID(sdev))) < 0) 158 ERROR("Failed to stop sub-device %u", 159 SUB_ID(sdev)); 160 fs_unlock(dev, 0); 161 return ret; 162 } 163 sdev->state = DEV_STARTED; 164 } 165 if (PRIV(dev)->state < DEV_STARTED) { 166 PRIV(dev)->state = DEV_STARTED; 167 fs_set_queues_state_start(dev); 168 } 169 fs_switch_dev(dev, NULL); 170 fs_unlock(dev, 0); 171 return 0; 172 } 173 174 static void 175 fs_set_queues_state_stop(struct rte_eth_dev *dev) 176 { 177 uint16_t i; 178 179 for (i = 0; i < dev->data->nb_rx_queues; i++) 180 if (dev->data->rx_queues[i] != NULL) 181 dev->data->rx_queue_state[i] = 182 RTE_ETH_QUEUE_STATE_STOPPED; 183 for (i = 0; i < dev->data->nb_tx_queues; i++) 184 if (dev->data->tx_queues[i] != NULL) 185 dev->data->tx_queue_state[i] = 186 RTE_ETH_QUEUE_STATE_STOPPED; 187 } 188 189 static int 190 fs_dev_stop(struct rte_eth_dev *dev) 191 { 192 struct sub_device *sdev; 193 uint8_t i; 194 int ret; 195 196 ret = fs_lock(dev, 0); 197 if (ret != 0) 198 return ret; 199 PRIV(dev)->state = DEV_STARTED - 1; 200 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) { 201 ret = rte_eth_dev_stop(PORT_ID(sdev)); 202 if (fs_err(sdev, ret) < 0) { 203 ERROR("Failed to stop device %u", 204 PORT_ID(sdev)); 205 PRIV(dev)->state = DEV_STARTED + 1; 206 fs_unlock(dev, 0); 207 return ret; 208 } 209 failsafe_rx_intr_uninstall_subdevice(sdev); 210 sdev->state = DEV_STARTED - 1; 211 } 212 failsafe_rx_intr_uninstall(dev); 213 fs_set_queues_state_stop(dev); 214 fs_unlock(dev, 0); 215 216 return 0; 217 } 218 219 static int 220 fs_dev_set_link_up(struct rte_eth_dev *dev) 221 { 222 struct sub_device *sdev; 223 uint8_t i; 224 int ret; 225 226 ret = fs_lock(dev, 0); 227 if (ret != 0) 228 return ret; 229 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 230 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i); 231 ret = rte_eth_dev_set_link_up(PORT_ID(sdev)); 232 if ((ret = fs_err(sdev, ret))) { 233 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d" 234 " with error %d", i, ret); 235 fs_unlock(dev, 0); 236 return ret; 237 } 238 } 239 fs_unlock(dev, 0); 240 return 0; 241 } 242 243 static int 244 fs_dev_set_link_down(struct rte_eth_dev *dev) 245 { 246 struct sub_device *sdev; 247 uint8_t i; 248 int ret; 249 250 ret = fs_lock(dev, 0); 251 if (ret != 0) 252 return ret; 253 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 254 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i); 255 ret = rte_eth_dev_set_link_down(PORT_ID(sdev)); 256 if ((ret = fs_err(sdev, ret))) { 257 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d" 258 " with error %d", i, ret); 259 fs_unlock(dev, 0); 260 return ret; 261 } 262 } 263 fs_unlock(dev, 0); 264 return 0; 265 } 266 267 static int 268 fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 269 { 270 struct sub_device *sdev; 271 uint8_t i; 272 int ret; 273 int err = 0; 274 bool failure = true; 275 276 ret = fs_lock(dev, 0); 277 if (ret != 0) 278 return ret; 279 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 280 uint16_t port_id = ETH(sdev)->data->port_id; 281 282 ret = rte_eth_dev_rx_queue_stop(port_id, rx_queue_id); 283 ret = fs_err(sdev, ret); 284 if (ret) { 285 ERROR("Rx queue stop failed for subdevice %d", i); 286 err = ret; 287 } else { 288 failure = false; 289 } 290 } 291 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 292 fs_unlock(dev, 0); 293 /* Return 0 in case of at least one successful queue stop */ 294 return (failure) ? err : 0; 295 } 296 297 static int 298 fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 299 { 300 struct sub_device *sdev; 301 uint8_t i; 302 int ret; 303 304 ret = fs_lock(dev, 0); 305 if (ret != 0) 306 return ret; 307 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 308 uint16_t port_id = ETH(sdev)->data->port_id; 309 310 ret = rte_eth_dev_rx_queue_start(port_id, rx_queue_id); 311 ret = fs_err(sdev, ret); 312 if (ret) { 313 ERROR("Rx queue start failed for subdevice %d", i); 314 fs_rx_queue_stop(dev, rx_queue_id); 315 fs_unlock(dev, 0); 316 return ret; 317 } 318 } 319 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 320 fs_unlock(dev, 0); 321 return 0; 322 } 323 324 static int 325 fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 326 { 327 struct sub_device *sdev; 328 uint8_t i; 329 int ret; 330 int err = 0; 331 bool failure = true; 332 333 ret = fs_lock(dev, 0); 334 if (ret != 0) 335 return ret; 336 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 337 uint16_t port_id = ETH(sdev)->data->port_id; 338 339 ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id); 340 ret = fs_err(sdev, ret); 341 if (ret) { 342 ERROR("Tx queue stop failed for subdevice %d", i); 343 err = ret; 344 } else { 345 failure = false; 346 } 347 } 348 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 349 fs_unlock(dev, 0); 350 /* Return 0 in case of at least one successful queue stop */ 351 return (failure) ? err : 0; 352 } 353 354 static int 355 fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 356 { 357 struct sub_device *sdev; 358 uint8_t i; 359 int ret; 360 361 ret = fs_lock(dev, 0); 362 if (ret != 0) 363 return ret; 364 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 365 uint16_t port_id = ETH(sdev)->data->port_id; 366 367 ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id); 368 ret = fs_err(sdev, ret); 369 if (ret) { 370 ERROR("Tx queue start failed for subdevice %d", i); 371 fs_tx_queue_stop(dev, tx_queue_id); 372 fs_unlock(dev, 0); 373 return ret; 374 } 375 } 376 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 377 fs_unlock(dev, 0); 378 return 0; 379 } 380 381 static void 382 fs_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 383 { 384 struct sub_device *sdev; 385 uint8_t i; 386 struct rxq *rxq = dev->data->rx_queues[qid]; 387 388 if (rxq == NULL) 389 return; 390 if (fs_lock(dev, 0) != 0) 391 return; 392 if (rxq->event_fd >= 0) 393 close(rxq->event_fd); 394 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 395 if (ETH(sdev)->data->rx_queues != NULL && 396 ETH(sdev)->data->rx_queues[rxq->qid] != NULL) 397 SUBOPS(sdev, rx_queue_release)(ETH(sdev), rxq->qid); 398 } 399 dev->data->rx_queues[rxq->qid] = NULL; 400 rte_free(rxq); 401 fs_unlock(dev, 0); 402 } 403 404 static int 405 fs_rx_queue_setup(struct rte_eth_dev *dev, 406 uint16_t rx_queue_id, 407 uint16_t nb_rx_desc, 408 unsigned int socket_id, 409 const struct rte_eth_rxconf *rx_conf, 410 struct rte_mempool *mb_pool) 411 { 412 struct sub_device *sdev; 413 struct rxq *rxq; 414 uint8_t i; 415 int ret; 416 417 ret = fs_lock(dev, 0); 418 if (ret != 0) 419 return ret; 420 if (rx_conf->rx_deferred_start) { 421 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 422 if (SUBOPS(sdev, rx_queue_start) == NULL) { 423 ERROR("Rx queue deferred start is not " 424 "supported for subdevice %d", i); 425 fs_unlock(dev, 0); 426 return -EINVAL; 427 } 428 } 429 } 430 rxq = dev->data->rx_queues[rx_queue_id]; 431 if (rxq != NULL) { 432 fs_rx_queue_release(dev, rx_queue_id); 433 dev->data->rx_queues[rx_queue_id] = NULL; 434 } 435 rxq = rte_zmalloc(NULL, 436 sizeof(*rxq) + 437 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 438 RTE_CACHE_LINE_SIZE); 439 if (rxq == NULL) { 440 fs_unlock(dev, 0); 441 return -ENOMEM; 442 } 443 FOREACH_SUBDEV(sdev, i, dev) 444 rte_atomic64_init(&rxq->refcnt[i]); 445 rxq->qid = rx_queue_id; 446 rxq->socket_id = socket_id; 447 rxq->info.mp = mb_pool; 448 rxq->info.conf = *rx_conf; 449 rxq->info.nb_desc = nb_rx_desc; 450 rxq->priv = PRIV(dev); 451 rxq->sdev = PRIV(dev)->subs; 452 #ifdef RTE_EXEC_ENV_LINUX 453 rxq->event_fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); 454 if (rxq->event_fd < 0) { 455 ERROR("Failed to create an eventfd: %s", strerror(errno)); 456 fs_unlock(dev, 0); 457 return -errno; 458 } 459 #else 460 rxq->event_fd = -1; 461 #endif 462 dev->data->rx_queues[rx_queue_id] = rxq; 463 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 464 ret = rte_eth_rx_queue_setup(PORT_ID(sdev), 465 rx_queue_id, 466 nb_rx_desc, socket_id, 467 rx_conf, mb_pool); 468 if ((ret = fs_err(sdev, ret))) { 469 ERROR("RX queue setup failed for sub_device %d", i); 470 goto free_rxq; 471 } 472 } 473 fs_unlock(dev, 0); 474 return 0; 475 free_rxq: 476 fs_rx_queue_release(dev, rx_queue_id); 477 fs_unlock(dev, 0); 478 return ret; 479 } 480 481 static int 482 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx) 483 { 484 struct rxq *rxq; 485 struct sub_device *sdev; 486 uint8_t i; 487 int ret; 488 int rc = 0; 489 490 ret = fs_lock(dev, 0); 491 if (ret != 0) 492 return ret; 493 if (idx >= dev->data->nb_rx_queues) { 494 rc = -EINVAL; 495 goto unlock; 496 } 497 rxq = dev->data->rx_queues[idx]; 498 if (rxq == NULL || rxq->event_fd <= 0) { 499 rc = -EINVAL; 500 goto unlock; 501 } 502 /* Fail if proxy service is nor running. */ 503 if (PRIV(dev)->rxp.sstate != SS_RUNNING) { 504 ERROR("failsafe interrupt services are not running"); 505 rc = -EAGAIN; 506 goto unlock; 507 } 508 rxq->enable_events = 1; 509 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 510 ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx); 511 ret = fs_err(sdev, ret); 512 if (ret) 513 rc = ret; 514 } 515 unlock: 516 fs_unlock(dev, 0); 517 if (rc) 518 rte_errno = -rc; 519 return rc; 520 } 521 522 static int 523 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx) 524 { 525 struct rxq *rxq; 526 struct sub_device *sdev; 527 uint64_t u64; 528 uint8_t i; 529 int rc = 0; 530 int ret; 531 532 ret = fs_lock(dev, 0); 533 if (ret != 0) 534 return ret; 535 if (idx >= dev->data->nb_rx_queues) { 536 rc = -EINVAL; 537 goto unlock; 538 } 539 rxq = dev->data->rx_queues[idx]; 540 if (rxq == NULL || rxq->event_fd <= 0) { 541 rc = -EINVAL; 542 goto unlock; 543 } 544 rxq->enable_events = 0; 545 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 546 ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx); 547 ret = fs_err(sdev, ret); 548 if (ret) 549 rc = ret; 550 } 551 /* Clear pending events */ 552 while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0) 553 ; 554 unlock: 555 fs_unlock(dev, 0); 556 if (rc) 557 rte_errno = -rc; 558 return rc; 559 } 560 561 static void 562 fs_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 563 { 564 struct sub_device *sdev; 565 uint8_t i; 566 struct txq *txq = dev->data->tx_queues[qid]; 567 568 if (txq == NULL) 569 return; 570 if (fs_lock(dev, 0) != 0) 571 return; 572 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 573 if (ETH(sdev)->data->tx_queues != NULL && 574 ETH(sdev)->data->tx_queues[txq->qid] != NULL) 575 SUBOPS(sdev, tx_queue_release)(ETH(sdev), txq->qid); 576 } 577 dev->data->tx_queues[txq->qid] = NULL; 578 rte_free(txq); 579 fs_unlock(dev, 0); 580 } 581 582 static int 583 fs_tx_queue_setup(struct rte_eth_dev *dev, 584 uint16_t tx_queue_id, 585 uint16_t nb_tx_desc, 586 unsigned int socket_id, 587 const struct rte_eth_txconf *tx_conf) 588 { 589 struct sub_device *sdev; 590 struct txq *txq; 591 uint8_t i; 592 int ret; 593 594 ret = fs_lock(dev, 0); 595 if (ret != 0) 596 return ret; 597 if (tx_conf->tx_deferred_start) { 598 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 599 if (SUBOPS(sdev, tx_queue_start) == NULL) { 600 ERROR("Tx queue deferred start is not " 601 "supported for subdevice %d", i); 602 fs_unlock(dev, 0); 603 return -EINVAL; 604 } 605 } 606 } 607 txq = dev->data->tx_queues[tx_queue_id]; 608 if (txq != NULL) { 609 fs_tx_queue_release(dev, tx_queue_id); 610 dev->data->tx_queues[tx_queue_id] = NULL; 611 } 612 txq = rte_zmalloc("ethdev TX queue", 613 sizeof(*txq) + 614 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 615 RTE_CACHE_LINE_SIZE); 616 if (txq == NULL) { 617 fs_unlock(dev, 0); 618 return -ENOMEM; 619 } 620 FOREACH_SUBDEV(sdev, i, dev) 621 rte_atomic64_init(&txq->refcnt[i]); 622 txq->qid = tx_queue_id; 623 txq->socket_id = socket_id; 624 txq->info.conf = *tx_conf; 625 txq->info.nb_desc = nb_tx_desc; 626 txq->priv = PRIV(dev); 627 dev->data->tx_queues[tx_queue_id] = txq; 628 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 629 ret = rte_eth_tx_queue_setup(PORT_ID(sdev), 630 tx_queue_id, 631 nb_tx_desc, socket_id, 632 tx_conf); 633 if ((ret = fs_err(sdev, ret))) { 634 ERROR("TX queue setup failed for sub_device %d", i); 635 goto free_txq; 636 } 637 } 638 fs_unlock(dev, 0); 639 return 0; 640 free_txq: 641 fs_tx_queue_release(dev, tx_queue_id); 642 fs_unlock(dev, 0); 643 return ret; 644 } 645 646 static void 647 fs_dev_free_queues(struct rte_eth_dev *dev) 648 { 649 uint16_t i; 650 651 for (i = 0; i < dev->data->nb_rx_queues; i++) { 652 fs_rx_queue_release(dev, i); 653 dev->data->rx_queues[i] = NULL; 654 } 655 dev->data->nb_rx_queues = 0; 656 for (i = 0; i < dev->data->nb_tx_queues; i++) { 657 fs_tx_queue_release(dev, i); 658 dev->data->tx_queues[i] = NULL; 659 } 660 dev->data->nb_tx_queues = 0; 661 } 662 663 int 664 failsafe_eth_dev_close(struct rte_eth_dev *dev) 665 { 666 struct sub_device *sdev; 667 uint8_t i; 668 int err, ret = 0; 669 670 ret = fs_lock(dev, 0); 671 if (ret != 0) 672 return ret; 673 failsafe_hotplug_alarm_cancel(dev); 674 if (PRIV(dev)->state == DEV_STARTED) { 675 ret = dev->dev_ops->dev_stop(dev); 676 if (ret != 0) { 677 fs_unlock(dev, 0); 678 return ret; 679 } 680 } 681 PRIV(dev)->state = DEV_ACTIVE - 1; 682 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 683 DEBUG("Closing sub_device %d", i); 684 failsafe_eth_dev_unregister_callbacks(sdev); 685 err = rte_eth_dev_close(PORT_ID(sdev)); 686 if (err) { 687 ret = ret ? ret : err; 688 ERROR("Error while closing sub-device %u", 689 PORT_ID(sdev)); 690 } 691 sdev->state = DEV_ACTIVE - 1; 692 } 693 rte_eth_dev_callback_unregister(RTE_ETH_ALL, RTE_ETH_EVENT_NEW, 694 failsafe_eth_new_event_callback, dev); 695 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 696 fs_unlock(dev, 0); 697 return ret; 698 } 699 fs_dev_free_queues(dev); 700 err = failsafe_eal_uninit(dev); 701 if (err) { 702 ret = ret ? ret : err; 703 ERROR("Error while uninitializing sub-EAL"); 704 } 705 failsafe_args_free(dev); 706 rte_free(PRIV(dev)->subs); 707 rte_free(PRIV(dev)->mcast_addrs); 708 /* mac_addrs must not be freed alone because part of dev_private */ 709 dev->data->mac_addrs = NULL; 710 fs_unlock(dev, 0); 711 err = pthread_mutex_destroy(&PRIV(dev)->hotplug_mutex); 712 if (err) { 713 ret = ret ? ret : err; 714 ERROR("Error while destroying hotplug mutex"); 715 } 716 return ret; 717 } 718 719 static int 720 fs_promiscuous_enable(struct rte_eth_dev *dev) 721 { 722 struct sub_device *sdev; 723 uint8_t i; 724 int ret = 0; 725 726 ret = fs_lock(dev, 0); 727 if (ret != 0) 728 return ret; 729 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 730 ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); 731 ret = fs_err(sdev, ret); 732 if (ret != 0) { 733 ERROR("Promiscuous mode enable failed for subdevice %d", 734 PORT_ID(sdev)); 735 break; 736 } 737 } 738 if (ret != 0) { 739 /* Rollback in the case of failure */ 740 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 741 ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); 742 ret = fs_err(sdev, ret); 743 if (ret != 0) 744 ERROR("Promiscuous mode disable during rollback failed for subdevice %d", 745 PORT_ID(sdev)); 746 } 747 } 748 fs_unlock(dev, 0); 749 750 return ret; 751 } 752 753 static int 754 fs_promiscuous_disable(struct rte_eth_dev *dev) 755 { 756 struct sub_device *sdev; 757 uint8_t i; 758 int ret = 0; 759 760 ret = fs_lock(dev, 0); 761 if (ret != 0) 762 return ret; 763 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 764 ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); 765 ret = fs_err(sdev, ret); 766 if (ret != 0) { 767 ERROR("Promiscuous mode disable failed for subdevice %d", 768 PORT_ID(sdev)); 769 break; 770 } 771 } 772 if (ret != 0) { 773 /* Rollback in the case of failure */ 774 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 775 ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); 776 ret = fs_err(sdev, ret); 777 if (ret != 0) 778 ERROR("Promiscuous mode enable during rollback failed for subdevice %d", 779 PORT_ID(sdev)); 780 } 781 } 782 fs_unlock(dev, 0); 783 784 return ret; 785 } 786 787 static int 788 fs_allmulticast_enable(struct rte_eth_dev *dev) 789 { 790 struct sub_device *sdev; 791 uint8_t i; 792 int ret = 0; 793 794 ret = fs_lock(dev, 0); 795 if (ret != 0) 796 return ret; 797 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 798 ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); 799 ret = fs_err(sdev, ret); 800 if (ret != 0) { 801 ERROR("All-multicast mode enable failed for subdevice %d", 802 PORT_ID(sdev)); 803 break; 804 } 805 } 806 if (ret != 0) { 807 /* Rollback in the case of failure */ 808 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 809 ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); 810 ret = fs_err(sdev, ret); 811 if (ret != 0) 812 ERROR("All-multicast mode disable during rollback failed for subdevice %d", 813 PORT_ID(sdev)); 814 } 815 } 816 fs_unlock(dev, 0); 817 818 return ret; 819 } 820 821 static int 822 fs_allmulticast_disable(struct rte_eth_dev *dev) 823 { 824 struct sub_device *sdev; 825 uint8_t i; 826 int ret = 0; 827 828 ret = fs_lock(dev, 0); 829 if (ret != 0) 830 return ret; 831 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 832 ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); 833 ret = fs_err(sdev, ret); 834 if (ret != 0) { 835 ERROR("All-multicast mode disable failed for subdevice %d", 836 PORT_ID(sdev)); 837 break; 838 } 839 } 840 if (ret != 0) { 841 /* Rollback in the case of failure */ 842 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 843 ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); 844 ret = fs_err(sdev, ret); 845 if (ret != 0) 846 ERROR("All-multicast mode enable during rollback failed for subdevice %d", 847 PORT_ID(sdev)); 848 } 849 } 850 fs_unlock(dev, 0); 851 852 return ret; 853 } 854 855 static int 856 fs_link_update(struct rte_eth_dev *dev, 857 int wait_to_complete) 858 { 859 struct sub_device *sdev; 860 uint8_t i; 861 int ret; 862 863 ret = fs_lock(dev, 0); 864 if (ret != 0) 865 return ret; 866 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 867 DEBUG("Calling link_update on sub_device %d", i); 868 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete); 869 if (ret && ret != -1 && sdev->remove == 0 && 870 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) { 871 ERROR("Link update failed for sub_device %d with error %d", 872 i, ret); 873 fs_unlock(dev, 0); 874 return ret; 875 } 876 } 877 if (TX_SUBDEV(dev)) { 878 struct rte_eth_link *l1; 879 struct rte_eth_link *l2; 880 881 l1 = &dev->data->dev_link; 882 l2 = Ð(TX_SUBDEV(dev))->data->dev_link; 883 if (memcmp(l1, l2, sizeof(*l1))) { 884 *l1 = *l2; 885 fs_unlock(dev, 0); 886 return 0; 887 } 888 } 889 fs_unlock(dev, 0); 890 return -1; 891 } 892 893 static int 894 fs_stats_get(struct rte_eth_dev *dev, 895 struct rte_eth_stats *stats) 896 { 897 struct rte_eth_stats backup; 898 struct sub_device *sdev; 899 uint8_t i; 900 int ret; 901 902 ret = fs_lock(dev, 0); 903 if (ret != 0) 904 return ret; 905 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats)); 906 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 907 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats; 908 uint64_t *timestamp = &sdev->stats_snapshot.timestamp; 909 910 rte_memcpy(&backup, snapshot, sizeof(backup)); 911 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot); 912 if (ret) { 913 if (!fs_err(sdev, ret)) { 914 rte_memcpy(snapshot, &backup, sizeof(backup)); 915 goto inc; 916 } 917 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d", 918 i, ret); 919 *timestamp = 0; 920 fs_unlock(dev, 0); 921 return ret; 922 } 923 *timestamp = rte_rdtsc(); 924 inc: 925 failsafe_stats_increment(stats, snapshot); 926 } 927 fs_unlock(dev, 0); 928 return 0; 929 } 930 931 static int 932 fs_stats_reset(struct rte_eth_dev *dev) 933 { 934 struct sub_device *sdev; 935 uint8_t i; 936 int ret; 937 938 ret = fs_lock(dev, 0); 939 if (ret != 0) 940 return ret; 941 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 942 ret = rte_eth_stats_reset(PORT_ID(sdev)); 943 if (ret) { 944 if (!fs_err(sdev, ret)) 945 continue; 946 947 ERROR("Operation rte_eth_stats_reset failed for sub_device %d with error %d", 948 i, ret); 949 fs_unlock(dev, 0); 950 return ret; 951 } 952 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats)); 953 } 954 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats)); 955 fs_unlock(dev, 0); 956 957 return 0; 958 } 959 960 static int 961 __fs_xstats_count(struct rte_eth_dev *dev) 962 { 963 struct sub_device *sdev; 964 int count = 0; 965 uint8_t i; 966 int ret; 967 968 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 969 ret = rte_eth_xstats_get_names(PORT_ID(sdev), NULL, 0); 970 if (ret < 0) 971 return ret; 972 count += ret; 973 } 974 975 return count; 976 } 977 978 static int 979 __fs_xstats_get_names(struct rte_eth_dev *dev, 980 struct rte_eth_xstat_name *xstats_names, 981 unsigned int limit) 982 { 983 struct sub_device *sdev; 984 unsigned int count = 0; 985 uint8_t i; 986 987 /* Caller only cares about count */ 988 if (!xstats_names) 989 return __fs_xstats_count(dev); 990 991 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 992 struct rte_eth_xstat_name *sub_names = xstats_names + count; 993 int j, r; 994 995 if (count >= limit) 996 break; 997 998 r = rte_eth_xstats_get_names(PORT_ID(sdev), 999 sub_names, limit - count); 1000 if (r < 0) 1001 return r; 1002 1003 /* add subN_ prefix to names */ 1004 for (j = 0; j < r; j++) { 1005 char *xname = sub_names[j].name; 1006 char tmp[RTE_ETH_XSTATS_NAME_SIZE]; 1007 1008 if ((xname[0] == 't' || xname[0] == 'r') && 1009 xname[1] == 'x' && xname[2] == '_') 1010 snprintf(tmp, sizeof(tmp), "%.3ssub%u_%s", 1011 xname, i, xname + 3); 1012 else 1013 snprintf(tmp, sizeof(tmp), "sub%u_%s", 1014 i, xname); 1015 1016 strlcpy(xname, tmp, RTE_ETH_XSTATS_NAME_SIZE); 1017 } 1018 count += r; 1019 } 1020 return count; 1021 } 1022 1023 static int 1024 fs_xstats_get_names(struct rte_eth_dev *dev, 1025 struct rte_eth_xstat_name *xstats_names, 1026 unsigned int limit) 1027 { 1028 int ret; 1029 1030 ret = fs_lock(dev, 0); 1031 if (ret != 0) 1032 return ret; 1033 ret = __fs_xstats_get_names(dev, xstats_names, limit); 1034 fs_unlock(dev, 0); 1035 return ret; 1036 } 1037 1038 static int 1039 __fs_xstats_get(struct rte_eth_dev *dev, 1040 struct rte_eth_xstat *xstats, 1041 unsigned int n) 1042 { 1043 unsigned int count = 0; 1044 struct sub_device *sdev; 1045 uint8_t i; 1046 int j, ret; 1047 1048 ret = __fs_xstats_count(dev); 1049 /* 1050 * if error 1051 * or caller did not give enough space 1052 * or just querying 1053 */ 1054 if (ret < 0 || ret > (int)n || xstats == NULL) 1055 return ret; 1056 1057 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1058 ret = rte_eth_xstats_get(PORT_ID(sdev), xstats, n); 1059 if (ret < 0) 1060 return ret; 1061 1062 if (ret > (int)n) 1063 return n + count; 1064 1065 /* add offset to id's from sub-device */ 1066 for (j = 0; j < ret; j++) 1067 xstats[j].id += count; 1068 1069 xstats += ret; 1070 n -= ret; 1071 count += ret; 1072 } 1073 1074 return count; 1075 } 1076 1077 static int 1078 fs_xstats_get(struct rte_eth_dev *dev, 1079 struct rte_eth_xstat *xstats, 1080 unsigned int n) 1081 { 1082 int ret; 1083 1084 ret = fs_lock(dev, 0); 1085 if (ret != 0) 1086 return ret; 1087 ret = __fs_xstats_get(dev, xstats, n); 1088 fs_unlock(dev, 0); 1089 1090 return ret; 1091 } 1092 1093 1094 static int 1095 fs_xstats_reset(struct rte_eth_dev *dev) 1096 { 1097 struct sub_device *sdev; 1098 uint8_t i; 1099 int r; 1100 1101 r = fs_lock(dev, 0); 1102 if (r != 0) 1103 return r; 1104 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1105 r = rte_eth_xstats_reset(PORT_ID(sdev)); 1106 if (r < 0) 1107 break; 1108 } 1109 fs_unlock(dev, 0); 1110 1111 return r; 1112 } 1113 1114 static void 1115 fs_dev_merge_desc_lim(struct rte_eth_desc_lim *to, 1116 const struct rte_eth_desc_lim *from) 1117 { 1118 to->nb_max = RTE_MIN(to->nb_max, from->nb_max); 1119 to->nb_min = RTE_MAX(to->nb_min, from->nb_min); 1120 to->nb_align = RTE_MAX(to->nb_align, from->nb_align); 1121 1122 to->nb_seg_max = RTE_MIN(to->nb_seg_max, from->nb_seg_max); 1123 to->nb_mtu_seg_max = RTE_MIN(to->nb_mtu_seg_max, from->nb_mtu_seg_max); 1124 } 1125 1126 /* 1127 * Merge the information from sub-devices. 1128 * 1129 * The reported values must be the common subset of all sub devices 1130 */ 1131 static void 1132 fs_dev_merge_info(struct rte_eth_dev_info *info, 1133 const struct rte_eth_dev_info *sinfo) 1134 { 1135 info->min_mtu = RTE_MAX(info->min_mtu, sinfo->min_mtu); 1136 info->max_mtu = RTE_MIN(info->max_mtu, sinfo->max_mtu); 1137 info->max_rx_pktlen = RTE_MIN(info->max_rx_pktlen, sinfo->max_rx_pktlen); 1138 info->max_rx_queues = RTE_MIN(info->max_rx_queues, sinfo->max_rx_queues); 1139 info->max_tx_queues = RTE_MIN(info->max_tx_queues, sinfo->max_tx_queues); 1140 info->max_mac_addrs = RTE_MIN(info->max_mac_addrs, sinfo->max_mac_addrs); 1141 info->max_hash_mac_addrs = RTE_MIN(info->max_hash_mac_addrs, 1142 sinfo->max_hash_mac_addrs); 1143 info->max_vmdq_pools = RTE_MIN(info->max_vmdq_pools, sinfo->max_vmdq_pools); 1144 info->max_vfs = RTE_MIN(info->max_vfs, sinfo->max_vfs); 1145 1146 fs_dev_merge_desc_lim(&info->rx_desc_lim, &sinfo->rx_desc_lim); 1147 fs_dev_merge_desc_lim(&info->tx_desc_lim, &sinfo->tx_desc_lim); 1148 1149 info->rx_offload_capa &= sinfo->rx_offload_capa; 1150 info->tx_offload_capa &= sinfo->tx_offload_capa; 1151 info->rx_queue_offload_capa &= sinfo->rx_queue_offload_capa; 1152 info->tx_queue_offload_capa &= sinfo->tx_queue_offload_capa; 1153 info->flow_type_rss_offloads &= sinfo->flow_type_rss_offloads; 1154 1155 /* 1156 * RETA size is a GCD of RETA sizes indicated by sub-devices. 1157 * Each of these sizes is a power of 2, so use the lower one. 1158 */ 1159 info->reta_size = RTE_MIN(info->reta_size, sinfo->reta_size); 1160 1161 info->hash_key_size = RTE_MIN(info->hash_key_size, 1162 sinfo->hash_key_size); 1163 } 1164 1165 /** 1166 * Fail-safe dev_infos_get rules: 1167 * 1168 * No sub_device: 1169 * Numerables: 1170 * Use the maximum possible values for any field, so as not 1171 * to impede any further configuration effort. 1172 * Capabilities: 1173 * Limits capabilities to those that are understood by the 1174 * fail-safe PMD. This understanding stems from the fail-safe 1175 * being capable of verifying that the related capability is 1176 * expressed within the device configuration (struct rte_eth_conf). 1177 * 1178 * At least one probed sub_device: 1179 * Numerables: 1180 * Uses values from the active probed sub_device 1181 * The rationale here is that if any sub_device is less capable 1182 * (for example concerning the number of queues) than the active 1183 * sub_device, then its subsequent configuration will fail. 1184 * It is impossible to foresee this failure when the failing sub_device 1185 * is supposed to be plugged-in later on, so the configuration process 1186 * is the single point of failure and error reporting. 1187 * Capabilities: 1188 * Uses a logical AND of RX capabilities among 1189 * all sub_devices and the default capabilities. 1190 * Uses a logical AND of TX capabilities among 1191 * the active probed sub_device and the default capabilities. 1192 * Uses a logical AND of device capabilities among 1193 * all sub_devices and the default capabilities. 1194 * 1195 */ 1196 static int 1197 fs_dev_infos_get(struct rte_eth_dev *dev, 1198 struct rte_eth_dev_info *infos) 1199 { 1200 struct sub_device *sdev; 1201 uint8_t i; 1202 int ret; 1203 1204 /* Use maximum upper bounds by default */ 1205 infos->min_mtu = RTE_ETHER_MIN_MTU; 1206 infos->max_mtu = UINT16_MAX; 1207 infos->max_rx_pktlen = UINT32_MAX; 1208 infos->max_rx_queues = RTE_MAX_QUEUES_PER_PORT; 1209 infos->max_tx_queues = RTE_MAX_QUEUES_PER_PORT; 1210 infos->max_mac_addrs = FAILSAFE_MAX_ETHADDR; 1211 infos->max_hash_mac_addrs = UINT32_MAX; 1212 infos->max_vfs = UINT16_MAX; 1213 infos->max_vmdq_pools = UINT16_MAX; 1214 infos->reta_size = UINT16_MAX; 1215 infos->hash_key_size = UINT8_MAX; 1216 1217 /* 1218 * Set of capabilities that can be verified upon 1219 * configuring a sub-device. 1220 */ 1221 infos->rx_offload_capa = 1222 RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 1223 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 1224 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 1225 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 1226 RTE_ETH_RX_OFFLOAD_TCP_LRO | 1227 RTE_ETH_RX_OFFLOAD_QINQ_STRIP | 1228 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1229 RTE_ETH_RX_OFFLOAD_MACSEC_STRIP | 1230 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 1231 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | 1232 RTE_ETH_RX_OFFLOAD_SCATTER | 1233 RTE_ETH_RX_OFFLOAD_TIMESTAMP | 1234 RTE_ETH_RX_OFFLOAD_SECURITY | 1235 RTE_ETH_RX_OFFLOAD_RSS_HASH; 1236 1237 infos->rx_queue_offload_capa = 1238 RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 1239 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 1240 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 1241 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 1242 RTE_ETH_RX_OFFLOAD_TCP_LRO | 1243 RTE_ETH_RX_OFFLOAD_QINQ_STRIP | 1244 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1245 RTE_ETH_RX_OFFLOAD_MACSEC_STRIP | 1246 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 1247 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | 1248 RTE_ETH_RX_OFFLOAD_SCATTER | 1249 RTE_ETH_RX_OFFLOAD_TIMESTAMP | 1250 RTE_ETH_RX_OFFLOAD_SECURITY | 1251 RTE_ETH_RX_OFFLOAD_RSS_HASH; 1252 1253 infos->tx_offload_capa = 1254 RTE_ETH_TX_OFFLOAD_MULTI_SEGS | 1255 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | 1256 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | 1257 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | 1258 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | 1259 RTE_ETH_TX_OFFLOAD_TCP_TSO; 1260 1261 infos->flow_type_rss_offloads = 1262 RTE_ETH_RSS_IP | 1263 RTE_ETH_RSS_UDP | 1264 RTE_ETH_RSS_TCP; 1265 infos->dev_capa = 1266 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 1267 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 1268 infos->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; 1269 1270 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 1271 struct rte_eth_dev_info sub_info; 1272 1273 ret = rte_eth_dev_info_get(PORT_ID(sdev), &sub_info); 1274 ret = fs_err(sdev, ret); 1275 if (ret != 0) 1276 return ret; 1277 1278 fs_dev_merge_info(infos, &sub_info); 1279 } 1280 1281 return 0; 1282 } 1283 1284 static const uint32_t * 1285 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements) 1286 { 1287 struct sub_device *sdev; 1288 struct rte_eth_dev *edev; 1289 const uint32_t *ret; 1290 1291 if (fs_lock(dev, 0) != 0) 1292 return NULL; 1293 sdev = TX_SUBDEV(dev); 1294 if (sdev == NULL) { 1295 ret = NULL; 1296 goto unlock; 1297 } 1298 edev = ETH(sdev); 1299 /* ENOTSUP: counts as no supported ptypes */ 1300 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) { 1301 ret = NULL; 1302 goto unlock; 1303 } 1304 /* 1305 * The API does not permit to do a clean AND of all ptypes, 1306 * It is also incomplete by design and we do not really care 1307 * to have a best possible value in this context. 1308 * We just return the ptypes of the device of highest 1309 * priority, usually the PREFERRED device. 1310 */ 1311 ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev, no_of_elements); 1312 unlock: 1313 fs_unlock(dev, 0); 1314 return ret; 1315 } 1316 1317 static int 1318 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1319 { 1320 struct sub_device *sdev; 1321 uint8_t i; 1322 int ret; 1323 1324 ret = fs_lock(dev, 0); 1325 if (ret != 0) 1326 return ret; 1327 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1328 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i); 1329 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu); 1330 if ((ret = fs_err(sdev, ret))) { 1331 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d", 1332 i, ret); 1333 fs_unlock(dev, 0); 1334 return ret; 1335 } 1336 } 1337 fs_unlock(dev, 0); 1338 return 0; 1339 } 1340 1341 static int 1342 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1343 { 1344 struct sub_device *sdev; 1345 uint8_t i; 1346 int ret; 1347 1348 ret = fs_lock(dev, 0); 1349 if (ret != 0) 1350 return ret; 1351 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1352 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i); 1353 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on); 1354 if ((ret = fs_err(sdev, ret))) { 1355 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d" 1356 " with error %d", i, ret); 1357 fs_unlock(dev, 0); 1358 return ret; 1359 } 1360 } 1361 fs_unlock(dev, 0); 1362 return 0; 1363 } 1364 1365 static int 1366 fs_flow_ctrl_get(struct rte_eth_dev *dev, 1367 struct rte_eth_fc_conf *fc_conf) 1368 { 1369 struct sub_device *sdev; 1370 int ret; 1371 1372 ret = fs_lock(dev, 0); 1373 if (ret != 0) 1374 return ret; 1375 sdev = TX_SUBDEV(dev); 1376 if (sdev == NULL) { 1377 ret = 0; 1378 goto unlock; 1379 } 1380 if (SUBOPS(sdev, flow_ctrl_get) == NULL) { 1381 ret = -ENOTSUP; 1382 goto unlock; 1383 } 1384 ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf); 1385 unlock: 1386 fs_unlock(dev, 0); 1387 return ret; 1388 } 1389 1390 static int 1391 fs_flow_ctrl_set(struct rte_eth_dev *dev, 1392 struct rte_eth_fc_conf *fc_conf) 1393 { 1394 struct sub_device *sdev; 1395 uint8_t i; 1396 int ret; 1397 1398 ret = fs_lock(dev, 0); 1399 if (ret != 0) 1400 return ret; 1401 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1402 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i); 1403 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf); 1404 if ((ret = fs_err(sdev, ret))) { 1405 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d" 1406 " with error %d", i, ret); 1407 fs_unlock(dev, 0); 1408 return ret; 1409 } 1410 } 1411 fs_unlock(dev, 0); 1412 return 0; 1413 } 1414 1415 static void 1416 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 1417 { 1418 struct sub_device *sdev; 1419 uint8_t i; 1420 1421 if (fs_lock(dev, 0) != 0) 1422 return; 1423 /* No check: already done within the rte_eth_dev_mac_addr_remove 1424 * call for the fail-safe device. 1425 */ 1426 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 1427 rte_eth_dev_mac_addr_remove(PORT_ID(sdev), 1428 &dev->data->mac_addrs[index]); 1429 PRIV(dev)->mac_addr_pool[index] = 0; 1430 fs_unlock(dev, 0); 1431 } 1432 1433 static int 1434 fs_mac_addr_add(struct rte_eth_dev *dev, 1435 struct rte_ether_addr *mac_addr, 1436 uint32_t index, 1437 uint32_t vmdq) 1438 { 1439 struct sub_device *sdev; 1440 int ret; 1441 uint8_t i; 1442 1443 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR); 1444 ret = fs_lock(dev, 0); 1445 if (ret != 0) 1446 return ret; 1447 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1448 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq); 1449 if ((ret = fs_err(sdev, ret))) { 1450 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %" 1451 PRIu8 " with error %d", i, ret); 1452 fs_unlock(dev, 0); 1453 return ret; 1454 } 1455 } 1456 if (index >= PRIV(dev)->nb_mac_addr) { 1457 DEBUG("Growing mac_addrs array"); 1458 PRIV(dev)->nb_mac_addr = index; 1459 } 1460 PRIV(dev)->mac_addr_pool[index] = vmdq; 1461 fs_unlock(dev, 0); 1462 return 0; 1463 } 1464 1465 static int 1466 fs_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 1467 { 1468 struct sub_device *sdev; 1469 uint8_t i; 1470 int ret; 1471 1472 ret = fs_lock(dev, 0); 1473 if (ret != 0) 1474 return ret; 1475 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1476 ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr); 1477 ret = fs_err(sdev, ret); 1478 if (ret) { 1479 ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d", 1480 i, ret); 1481 fs_unlock(dev, 0); 1482 return ret; 1483 } 1484 } 1485 fs_unlock(dev, 0); 1486 1487 return 0; 1488 } 1489 1490 static int 1491 fs_set_mc_addr_list(struct rte_eth_dev *dev, 1492 struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) 1493 { 1494 struct sub_device *sdev; 1495 uint8_t i; 1496 int ret; 1497 void *mcast_addrs; 1498 1499 ret = fs_lock(dev, 0); 1500 if (ret != 0) 1501 return ret; 1502 1503 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1504 ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1505 mc_addr_set, nb_mc_addr); 1506 if (ret != 0) { 1507 ERROR("Operation rte_eth_dev_set_mc_addr_list failed for sub_device %d with error %d", 1508 i, ret); 1509 goto rollback; 1510 } 1511 } 1512 1513 mcast_addrs = rte_realloc(PRIV(dev)->mcast_addrs, 1514 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]), 0); 1515 if (mcast_addrs == NULL && nb_mc_addr > 0) { 1516 ret = -ENOMEM; 1517 goto rollback; 1518 } 1519 rte_memcpy(mcast_addrs, mc_addr_set, 1520 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0])); 1521 PRIV(dev)->nb_mcast_addr = nb_mc_addr; 1522 PRIV(dev)->mcast_addrs = mcast_addrs; 1523 1524 fs_unlock(dev, 0); 1525 return 0; 1526 1527 rollback: 1528 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1529 int rc = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1530 PRIV(dev)->mcast_addrs, PRIV(dev)->nb_mcast_addr); 1531 if (rc != 0) { 1532 ERROR("Multicast MAC address list rollback for sub_device %d failed with error %d", 1533 i, rc); 1534 } 1535 } 1536 1537 fs_unlock(dev, 0); 1538 return ret; 1539 } 1540 1541 static int 1542 fs_rss_hash_update(struct rte_eth_dev *dev, 1543 struct rte_eth_rss_conf *rss_conf) 1544 { 1545 struct sub_device *sdev; 1546 uint8_t i; 1547 int ret; 1548 1549 ret = fs_lock(dev, 0); 1550 if (ret != 0) 1551 return ret; 1552 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1553 ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf); 1554 ret = fs_err(sdev, ret); 1555 if (ret) { 1556 ERROR("Operation rte_eth_dev_rss_hash_update" 1557 " failed for sub_device %d with error %d", 1558 i, ret); 1559 fs_unlock(dev, 0); 1560 return ret; 1561 } 1562 } 1563 fs_unlock(dev, 0); 1564 1565 return 0; 1566 } 1567 1568 static int 1569 fs_flow_ops_get(struct rte_eth_dev *dev __rte_unused, 1570 const struct rte_flow_ops **ops) 1571 { 1572 *ops = &fs_flow_ops; 1573 return 0; 1574 } 1575 1576 const struct eth_dev_ops failsafe_ops = { 1577 .dev_configure = fs_dev_configure, 1578 .dev_start = fs_dev_start, 1579 .dev_stop = fs_dev_stop, 1580 .dev_set_link_down = fs_dev_set_link_down, 1581 .dev_set_link_up = fs_dev_set_link_up, 1582 .dev_close = failsafe_eth_dev_close, 1583 .promiscuous_enable = fs_promiscuous_enable, 1584 .promiscuous_disable = fs_promiscuous_disable, 1585 .allmulticast_enable = fs_allmulticast_enable, 1586 .allmulticast_disable = fs_allmulticast_disable, 1587 .link_update = fs_link_update, 1588 .stats_get = fs_stats_get, 1589 .stats_reset = fs_stats_reset, 1590 .xstats_get = fs_xstats_get, 1591 .xstats_get_names = fs_xstats_get_names, 1592 .xstats_reset = fs_xstats_reset, 1593 .dev_infos_get = fs_dev_infos_get, 1594 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get, 1595 .mtu_set = fs_mtu_set, 1596 .vlan_filter_set = fs_vlan_filter_set, 1597 .rx_queue_start = fs_rx_queue_start, 1598 .rx_queue_stop = fs_rx_queue_stop, 1599 .tx_queue_start = fs_tx_queue_start, 1600 .tx_queue_stop = fs_tx_queue_stop, 1601 .rx_queue_setup = fs_rx_queue_setup, 1602 .tx_queue_setup = fs_tx_queue_setup, 1603 .rx_queue_release = fs_rx_queue_release, 1604 .tx_queue_release = fs_tx_queue_release, 1605 .rx_queue_intr_enable = fs_rx_intr_enable, 1606 .rx_queue_intr_disable = fs_rx_intr_disable, 1607 .flow_ctrl_get = fs_flow_ctrl_get, 1608 .flow_ctrl_set = fs_flow_ctrl_set, 1609 .mac_addr_remove = fs_mac_addr_remove, 1610 .mac_addr_add = fs_mac_addr_add, 1611 .mac_addr_set = fs_mac_addr_set, 1612 .set_mc_addr_list = fs_set_mc_addr_list, 1613 .rss_hash_update = fs_rss_hash_update, 1614 .flow_ops_get = fs_flow_ops_get, 1615 }; 1616