1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017 6WIND S.A. 3 * Copyright 2017 Mellanox Technologies, Ltd 4 */ 5 6 #include <stdbool.h> 7 #include <stdint.h> 8 #include <unistd.h> 9 #ifdef RTE_EXEC_ENV_LINUX 10 #include <sys/eventfd.h> 11 #endif 12 13 #include <rte_debug.h> 14 #include <rte_atomic.h> 15 #include <ethdev_driver.h> 16 #include <rte_malloc.h> 17 #include <rte_flow.h> 18 #include <rte_cycles.h> 19 #include <rte_ethdev.h> 20 #include <rte_string_fns.h> 21 22 #include "failsafe_private.h" 23 24 static int 25 fs_dev_configure(struct rte_eth_dev *dev) 26 { 27 struct sub_device *sdev; 28 uint8_t i; 29 int ret; 30 31 fs_lock(dev, 0); 32 FOREACH_SUBDEV(sdev, i, dev) { 33 int rmv_interrupt = 0; 34 int lsc_interrupt = 0; 35 int lsc_enabled; 36 37 if (sdev->state != DEV_PROBED && 38 !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE)) 39 continue; 40 41 rmv_interrupt = ETH(sdev)->data->dev_flags & 42 RTE_ETH_DEV_INTR_RMV; 43 if (rmv_interrupt) { 44 DEBUG("Enabling RMV interrupts for sub_device %d", i); 45 dev->data->dev_conf.intr_conf.rmv = 1; 46 } else { 47 DEBUG("sub_device %d does not support RMV event", i); 48 } 49 lsc_enabled = dev->data->dev_conf.intr_conf.lsc; 50 lsc_interrupt = lsc_enabled && 51 (ETH(sdev)->data->dev_flags & 52 RTE_ETH_DEV_INTR_LSC); 53 if (lsc_interrupt) { 54 DEBUG("Enabling LSC interrupts for sub_device %d", i); 55 dev->data->dev_conf.intr_conf.lsc = 1; 56 } else if (lsc_enabled && !lsc_interrupt) { 57 DEBUG("Disabling LSC interrupts for sub_device %d", i); 58 dev->data->dev_conf.intr_conf.lsc = 0; 59 } 60 DEBUG("Configuring sub-device %d", i); 61 ret = rte_eth_dev_configure(PORT_ID(sdev), 62 dev->data->nb_rx_queues, 63 dev->data->nb_tx_queues, 64 &dev->data->dev_conf); 65 if (ret) { 66 if (!fs_err(sdev, ret)) 67 continue; 68 ERROR("Could not configure sub_device %d", i); 69 fs_unlock(dev, 0); 70 return ret; 71 } 72 if (rmv_interrupt && sdev->rmv_callback == 0) { 73 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 74 RTE_ETH_EVENT_INTR_RMV, 75 failsafe_eth_rmv_event_callback, 76 sdev); 77 if (ret) 78 WARN("Failed to register RMV callback for sub_device %d", 79 SUB_ID(sdev)); 80 else 81 sdev->rmv_callback = 1; 82 } 83 dev->data->dev_conf.intr_conf.rmv = 0; 84 if (lsc_interrupt && sdev->lsc_callback == 0) { 85 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 86 RTE_ETH_EVENT_INTR_LSC, 87 failsafe_eth_lsc_event_callback, 88 dev); 89 if (ret) 90 WARN("Failed to register LSC callback for sub_device %d", 91 SUB_ID(sdev)); 92 else 93 sdev->lsc_callback = 1; 94 } 95 dev->data->dev_conf.intr_conf.lsc = lsc_enabled; 96 sdev->state = DEV_ACTIVE; 97 } 98 if (PRIV(dev)->state < DEV_ACTIVE) 99 PRIV(dev)->state = DEV_ACTIVE; 100 fs_unlock(dev, 0); 101 return 0; 102 } 103 104 static void 105 fs_set_queues_state_start(struct rte_eth_dev *dev) 106 { 107 struct rxq *rxq; 108 struct txq *txq; 109 uint16_t i; 110 111 for (i = 0; i < dev->data->nb_rx_queues; i++) { 112 rxq = dev->data->rx_queues[i]; 113 if (rxq != NULL && !rxq->info.conf.rx_deferred_start) 114 dev->data->rx_queue_state[i] = 115 RTE_ETH_QUEUE_STATE_STARTED; 116 } 117 for (i = 0; i < dev->data->nb_tx_queues; i++) { 118 txq = dev->data->tx_queues[i]; 119 if (txq != NULL && !txq->info.conf.tx_deferred_start) 120 dev->data->tx_queue_state[i] = 121 RTE_ETH_QUEUE_STATE_STARTED; 122 } 123 } 124 125 static int 126 fs_dev_start(struct rte_eth_dev *dev) 127 { 128 struct sub_device *sdev; 129 uint8_t i; 130 int ret; 131 132 fs_lock(dev, 0); 133 ret = failsafe_rx_intr_install(dev); 134 if (ret) { 135 fs_unlock(dev, 0); 136 return ret; 137 } 138 FOREACH_SUBDEV(sdev, i, dev) { 139 if (sdev->state != DEV_ACTIVE) 140 continue; 141 DEBUG("Starting sub_device %d", i); 142 ret = rte_eth_dev_start(PORT_ID(sdev)); 143 if (ret) { 144 if (!fs_err(sdev, ret)) 145 continue; 146 fs_unlock(dev, 0); 147 return ret; 148 } 149 ret = failsafe_rx_intr_install_subdevice(sdev); 150 if (ret) { 151 if (!fs_err(sdev, ret)) 152 continue; 153 if (fs_err(sdev, rte_eth_dev_stop(PORT_ID(sdev))) < 0) 154 ERROR("Failed to stop sub-device %u", 155 SUB_ID(sdev)); 156 fs_unlock(dev, 0); 157 return ret; 158 } 159 sdev->state = DEV_STARTED; 160 } 161 if (PRIV(dev)->state < DEV_STARTED) { 162 PRIV(dev)->state = DEV_STARTED; 163 fs_set_queues_state_start(dev); 164 } 165 fs_switch_dev(dev, NULL); 166 fs_unlock(dev, 0); 167 return 0; 168 } 169 170 static void 171 fs_set_queues_state_stop(struct rte_eth_dev *dev) 172 { 173 uint16_t i; 174 175 for (i = 0; i < dev->data->nb_rx_queues; i++) 176 if (dev->data->rx_queues[i] != NULL) 177 dev->data->rx_queue_state[i] = 178 RTE_ETH_QUEUE_STATE_STOPPED; 179 for (i = 0; i < dev->data->nb_tx_queues; i++) 180 if (dev->data->tx_queues[i] != NULL) 181 dev->data->tx_queue_state[i] = 182 RTE_ETH_QUEUE_STATE_STOPPED; 183 } 184 185 static int 186 fs_dev_stop(struct rte_eth_dev *dev) 187 { 188 struct sub_device *sdev; 189 uint8_t i; 190 int ret; 191 192 fs_lock(dev, 0); 193 PRIV(dev)->state = DEV_STARTED - 1; 194 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) { 195 ret = rte_eth_dev_stop(PORT_ID(sdev)); 196 if (fs_err(sdev, ret) < 0) { 197 ERROR("Failed to stop device %u", 198 PORT_ID(sdev)); 199 PRIV(dev)->state = DEV_STARTED + 1; 200 fs_unlock(dev, 0); 201 return ret; 202 } 203 failsafe_rx_intr_uninstall_subdevice(sdev); 204 sdev->state = DEV_STARTED - 1; 205 } 206 failsafe_rx_intr_uninstall(dev); 207 fs_set_queues_state_stop(dev); 208 fs_unlock(dev, 0); 209 210 return 0; 211 } 212 213 static int 214 fs_dev_set_link_up(struct rte_eth_dev *dev) 215 { 216 struct sub_device *sdev; 217 uint8_t i; 218 int ret; 219 220 fs_lock(dev, 0); 221 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 222 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i); 223 ret = rte_eth_dev_set_link_up(PORT_ID(sdev)); 224 if ((ret = fs_err(sdev, ret))) { 225 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d" 226 " with error %d", i, ret); 227 fs_unlock(dev, 0); 228 return ret; 229 } 230 } 231 fs_unlock(dev, 0); 232 return 0; 233 } 234 235 static int 236 fs_dev_set_link_down(struct rte_eth_dev *dev) 237 { 238 struct sub_device *sdev; 239 uint8_t i; 240 int ret; 241 242 fs_lock(dev, 0); 243 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 244 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i); 245 ret = rte_eth_dev_set_link_down(PORT_ID(sdev)); 246 if ((ret = fs_err(sdev, ret))) { 247 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d" 248 " with error %d", i, ret); 249 fs_unlock(dev, 0); 250 return ret; 251 } 252 } 253 fs_unlock(dev, 0); 254 return 0; 255 } 256 257 static int 258 fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 259 { 260 struct sub_device *sdev; 261 uint8_t i; 262 int ret; 263 int err = 0; 264 bool failure = true; 265 266 fs_lock(dev, 0); 267 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 268 uint16_t port_id = ETH(sdev)->data->port_id; 269 270 ret = rte_eth_dev_rx_queue_stop(port_id, rx_queue_id); 271 ret = fs_err(sdev, ret); 272 if (ret) { 273 ERROR("Rx queue stop failed for subdevice %d", i); 274 err = ret; 275 } else { 276 failure = false; 277 } 278 } 279 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 280 fs_unlock(dev, 0); 281 /* Return 0 in case of at least one successful queue stop */ 282 return (failure) ? err : 0; 283 } 284 285 static int 286 fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 287 { 288 struct sub_device *sdev; 289 uint8_t i; 290 int ret; 291 292 fs_lock(dev, 0); 293 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 294 uint16_t port_id = ETH(sdev)->data->port_id; 295 296 ret = rte_eth_dev_rx_queue_start(port_id, rx_queue_id); 297 ret = fs_err(sdev, ret); 298 if (ret) { 299 ERROR("Rx queue start failed for subdevice %d", i); 300 fs_rx_queue_stop(dev, rx_queue_id); 301 fs_unlock(dev, 0); 302 return ret; 303 } 304 } 305 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 306 fs_unlock(dev, 0); 307 return 0; 308 } 309 310 static int 311 fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 312 { 313 struct sub_device *sdev; 314 uint8_t i; 315 int ret; 316 int err = 0; 317 bool failure = true; 318 319 fs_lock(dev, 0); 320 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 321 uint16_t port_id = ETH(sdev)->data->port_id; 322 323 ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id); 324 ret = fs_err(sdev, ret); 325 if (ret) { 326 ERROR("Tx queue stop failed for subdevice %d", i); 327 err = ret; 328 } else { 329 failure = false; 330 } 331 } 332 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 333 fs_unlock(dev, 0); 334 /* Return 0 in case of at least one successful queue stop */ 335 return (failure) ? err : 0; 336 } 337 338 static int 339 fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 340 { 341 struct sub_device *sdev; 342 uint8_t i; 343 int ret; 344 345 fs_lock(dev, 0); 346 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 347 uint16_t port_id = ETH(sdev)->data->port_id; 348 349 ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id); 350 ret = fs_err(sdev, ret); 351 if (ret) { 352 ERROR("Tx queue start failed for subdevice %d", i); 353 fs_tx_queue_stop(dev, tx_queue_id); 354 fs_unlock(dev, 0); 355 return ret; 356 } 357 } 358 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 359 fs_unlock(dev, 0); 360 return 0; 361 } 362 363 static void 364 fs_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 365 { 366 struct sub_device *sdev; 367 uint8_t i; 368 struct rxq *rxq = dev->data->rx_queues[qid]; 369 370 if (rxq == NULL) 371 return; 372 fs_lock(dev, 0); 373 if (rxq->event_fd >= 0) 374 close(rxq->event_fd); 375 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 376 if (ETH(sdev)->data->rx_queues != NULL && 377 ETH(sdev)->data->rx_queues[rxq->qid] != NULL) 378 SUBOPS(sdev, rx_queue_release)(ETH(sdev), rxq->qid); 379 } 380 dev->data->rx_queues[rxq->qid] = NULL; 381 rte_free(rxq); 382 fs_unlock(dev, 0); 383 } 384 385 static int 386 fs_rx_queue_setup(struct rte_eth_dev *dev, 387 uint16_t rx_queue_id, 388 uint16_t nb_rx_desc, 389 unsigned int socket_id, 390 const struct rte_eth_rxconf *rx_conf, 391 struct rte_mempool *mb_pool) 392 { 393 struct sub_device *sdev; 394 struct rxq *rxq; 395 uint8_t i; 396 int ret; 397 398 fs_lock(dev, 0); 399 if (rx_conf->rx_deferred_start) { 400 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 401 if (SUBOPS(sdev, rx_queue_start) == NULL) { 402 ERROR("Rx queue deferred start is not " 403 "supported for subdevice %d", i); 404 fs_unlock(dev, 0); 405 return -EINVAL; 406 } 407 } 408 } 409 rxq = dev->data->rx_queues[rx_queue_id]; 410 if (rxq != NULL) { 411 fs_rx_queue_release(dev, rx_queue_id); 412 dev->data->rx_queues[rx_queue_id] = NULL; 413 } 414 rxq = rte_zmalloc(NULL, 415 sizeof(*rxq) + 416 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 417 RTE_CACHE_LINE_SIZE); 418 if (rxq == NULL) { 419 fs_unlock(dev, 0); 420 return -ENOMEM; 421 } 422 FOREACH_SUBDEV(sdev, i, dev) 423 rte_atomic64_init(&rxq->refcnt[i]); 424 rxq->qid = rx_queue_id; 425 rxq->socket_id = socket_id; 426 rxq->info.mp = mb_pool; 427 rxq->info.conf = *rx_conf; 428 rxq->info.nb_desc = nb_rx_desc; 429 rxq->priv = PRIV(dev); 430 rxq->sdev = PRIV(dev)->subs; 431 #ifdef RTE_EXEC_ENV_LINUX 432 rxq->event_fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); 433 if (rxq->event_fd < 0) { 434 ERROR("Failed to create an eventfd: %s", strerror(errno)); 435 fs_unlock(dev, 0); 436 return -errno; 437 } 438 #else 439 rxq->event_fd = -1; 440 #endif 441 dev->data->rx_queues[rx_queue_id] = rxq; 442 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 443 ret = rte_eth_rx_queue_setup(PORT_ID(sdev), 444 rx_queue_id, 445 nb_rx_desc, socket_id, 446 rx_conf, mb_pool); 447 if ((ret = fs_err(sdev, ret))) { 448 ERROR("RX queue setup failed for sub_device %d", i); 449 goto free_rxq; 450 } 451 } 452 fs_unlock(dev, 0); 453 return 0; 454 free_rxq: 455 fs_rx_queue_release(dev, rx_queue_id); 456 fs_unlock(dev, 0); 457 return ret; 458 } 459 460 static int 461 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx) 462 { 463 struct rxq *rxq; 464 struct sub_device *sdev; 465 uint8_t i; 466 int ret; 467 int rc = 0; 468 469 fs_lock(dev, 0); 470 if (idx >= dev->data->nb_rx_queues) { 471 rc = -EINVAL; 472 goto unlock; 473 } 474 rxq = dev->data->rx_queues[idx]; 475 if (rxq == NULL || rxq->event_fd <= 0) { 476 rc = -EINVAL; 477 goto unlock; 478 } 479 /* Fail if proxy service is nor running. */ 480 if (PRIV(dev)->rxp.sstate != SS_RUNNING) { 481 ERROR("failsafe interrupt services are not running"); 482 rc = -EAGAIN; 483 goto unlock; 484 } 485 rxq->enable_events = 1; 486 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 487 ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx); 488 ret = fs_err(sdev, ret); 489 if (ret) 490 rc = ret; 491 } 492 unlock: 493 fs_unlock(dev, 0); 494 if (rc) 495 rte_errno = -rc; 496 return rc; 497 } 498 499 static int 500 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx) 501 { 502 struct rxq *rxq; 503 struct sub_device *sdev; 504 uint64_t u64; 505 uint8_t i; 506 int rc = 0; 507 int ret; 508 509 fs_lock(dev, 0); 510 if (idx >= dev->data->nb_rx_queues) { 511 rc = -EINVAL; 512 goto unlock; 513 } 514 rxq = dev->data->rx_queues[idx]; 515 if (rxq == NULL || rxq->event_fd <= 0) { 516 rc = -EINVAL; 517 goto unlock; 518 } 519 rxq->enable_events = 0; 520 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 521 ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx); 522 ret = fs_err(sdev, ret); 523 if (ret) 524 rc = ret; 525 } 526 /* Clear pending events */ 527 while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0) 528 ; 529 unlock: 530 fs_unlock(dev, 0); 531 if (rc) 532 rte_errno = -rc; 533 return rc; 534 } 535 536 static void 537 fs_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 538 { 539 struct sub_device *sdev; 540 uint8_t i; 541 struct txq *txq = dev->data->tx_queues[qid]; 542 543 if (txq == NULL) 544 return; 545 fs_lock(dev, 0); 546 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 547 if (ETH(sdev)->data->tx_queues != NULL && 548 ETH(sdev)->data->tx_queues[txq->qid] != NULL) 549 SUBOPS(sdev, tx_queue_release)(ETH(sdev), txq->qid); 550 } 551 dev->data->tx_queues[txq->qid] = NULL; 552 rte_free(txq); 553 fs_unlock(dev, 0); 554 } 555 556 static int 557 fs_tx_queue_setup(struct rte_eth_dev *dev, 558 uint16_t tx_queue_id, 559 uint16_t nb_tx_desc, 560 unsigned int socket_id, 561 const struct rte_eth_txconf *tx_conf) 562 { 563 struct sub_device *sdev; 564 struct txq *txq; 565 uint8_t i; 566 int ret; 567 568 fs_lock(dev, 0); 569 if (tx_conf->tx_deferred_start) { 570 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 571 if (SUBOPS(sdev, tx_queue_start) == NULL) { 572 ERROR("Tx queue deferred start is not " 573 "supported for subdevice %d", i); 574 fs_unlock(dev, 0); 575 return -EINVAL; 576 } 577 } 578 } 579 txq = dev->data->tx_queues[tx_queue_id]; 580 if (txq != NULL) { 581 fs_tx_queue_release(dev, tx_queue_id); 582 dev->data->tx_queues[tx_queue_id] = NULL; 583 } 584 txq = rte_zmalloc("ethdev TX queue", 585 sizeof(*txq) + 586 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 587 RTE_CACHE_LINE_SIZE); 588 if (txq == NULL) { 589 fs_unlock(dev, 0); 590 return -ENOMEM; 591 } 592 FOREACH_SUBDEV(sdev, i, dev) 593 rte_atomic64_init(&txq->refcnt[i]); 594 txq->qid = tx_queue_id; 595 txq->socket_id = socket_id; 596 txq->info.conf = *tx_conf; 597 txq->info.nb_desc = nb_tx_desc; 598 txq->priv = PRIV(dev); 599 dev->data->tx_queues[tx_queue_id] = txq; 600 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 601 ret = rte_eth_tx_queue_setup(PORT_ID(sdev), 602 tx_queue_id, 603 nb_tx_desc, socket_id, 604 tx_conf); 605 if ((ret = fs_err(sdev, ret))) { 606 ERROR("TX queue setup failed for sub_device %d", i); 607 goto free_txq; 608 } 609 } 610 fs_unlock(dev, 0); 611 return 0; 612 free_txq: 613 fs_tx_queue_release(dev, tx_queue_id); 614 fs_unlock(dev, 0); 615 return ret; 616 } 617 618 static void 619 fs_dev_free_queues(struct rte_eth_dev *dev) 620 { 621 uint16_t i; 622 623 for (i = 0; i < dev->data->nb_rx_queues; i++) { 624 fs_rx_queue_release(dev, i); 625 dev->data->rx_queues[i] = NULL; 626 } 627 dev->data->nb_rx_queues = 0; 628 for (i = 0; i < dev->data->nb_tx_queues; i++) { 629 fs_tx_queue_release(dev, i); 630 dev->data->tx_queues[i] = NULL; 631 } 632 dev->data->nb_tx_queues = 0; 633 } 634 635 int 636 failsafe_eth_dev_close(struct rte_eth_dev *dev) 637 { 638 struct sub_device *sdev; 639 uint8_t i; 640 int err, ret = 0; 641 642 fs_lock(dev, 0); 643 failsafe_hotplug_alarm_cancel(dev); 644 if (PRIV(dev)->state == DEV_STARTED) { 645 ret = dev->dev_ops->dev_stop(dev); 646 if (ret != 0) { 647 fs_unlock(dev, 0); 648 return ret; 649 } 650 } 651 PRIV(dev)->state = DEV_ACTIVE - 1; 652 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 653 DEBUG("Closing sub_device %d", i); 654 failsafe_eth_dev_unregister_callbacks(sdev); 655 err = rte_eth_dev_close(PORT_ID(sdev)); 656 if (err) { 657 ret = ret ? ret : err; 658 ERROR("Error while closing sub-device %u", 659 PORT_ID(sdev)); 660 } 661 sdev->state = DEV_ACTIVE - 1; 662 } 663 rte_eth_dev_callback_unregister(RTE_ETH_ALL, RTE_ETH_EVENT_NEW, 664 failsafe_eth_new_event_callback, dev); 665 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 666 fs_unlock(dev, 0); 667 return ret; 668 } 669 fs_dev_free_queues(dev); 670 err = failsafe_eal_uninit(dev); 671 if (err) { 672 ret = ret ? ret : err; 673 ERROR("Error while uninitializing sub-EAL"); 674 } 675 failsafe_args_free(dev); 676 rte_free(PRIV(dev)->subs); 677 rte_free(PRIV(dev)->mcast_addrs); 678 /* mac_addrs must not be freed alone because part of dev_private */ 679 dev->data->mac_addrs = NULL; 680 fs_unlock(dev, 0); 681 err = pthread_mutex_destroy(&PRIV(dev)->hotplug_mutex); 682 if (err) { 683 ret = ret ? ret : err; 684 ERROR("Error while destroying hotplug mutex"); 685 } 686 return ret; 687 } 688 689 static int 690 fs_promiscuous_enable(struct rte_eth_dev *dev) 691 { 692 struct sub_device *sdev; 693 uint8_t i; 694 int ret = 0; 695 696 fs_lock(dev, 0); 697 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 698 ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); 699 ret = fs_err(sdev, ret); 700 if (ret != 0) { 701 ERROR("Promiscuous mode enable failed for subdevice %d", 702 PORT_ID(sdev)); 703 break; 704 } 705 } 706 if (ret != 0) { 707 /* Rollback in the case of failure */ 708 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 709 ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); 710 ret = fs_err(sdev, ret); 711 if (ret != 0) 712 ERROR("Promiscuous mode disable during rollback failed for subdevice %d", 713 PORT_ID(sdev)); 714 } 715 } 716 fs_unlock(dev, 0); 717 718 return ret; 719 } 720 721 static int 722 fs_promiscuous_disable(struct rte_eth_dev *dev) 723 { 724 struct sub_device *sdev; 725 uint8_t i; 726 int ret = 0; 727 728 fs_lock(dev, 0); 729 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 730 ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); 731 ret = fs_err(sdev, ret); 732 if (ret != 0) { 733 ERROR("Promiscuous mode disable failed for subdevice %d", 734 PORT_ID(sdev)); 735 break; 736 } 737 } 738 if (ret != 0) { 739 /* Rollback in the case of failure */ 740 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 741 ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); 742 ret = fs_err(sdev, ret); 743 if (ret != 0) 744 ERROR("Promiscuous mode enable during rollback failed for subdevice %d", 745 PORT_ID(sdev)); 746 } 747 } 748 fs_unlock(dev, 0); 749 750 return ret; 751 } 752 753 static int 754 fs_allmulticast_enable(struct rte_eth_dev *dev) 755 { 756 struct sub_device *sdev; 757 uint8_t i; 758 int ret = 0; 759 760 fs_lock(dev, 0); 761 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 762 ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); 763 ret = fs_err(sdev, ret); 764 if (ret != 0) { 765 ERROR("All-multicast mode enable failed for subdevice %d", 766 PORT_ID(sdev)); 767 break; 768 } 769 } 770 if (ret != 0) { 771 /* Rollback in the case of failure */ 772 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 773 ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); 774 ret = fs_err(sdev, ret); 775 if (ret != 0) 776 ERROR("All-multicast mode disable during rollback failed for subdevice %d", 777 PORT_ID(sdev)); 778 } 779 } 780 fs_unlock(dev, 0); 781 782 return ret; 783 } 784 785 static int 786 fs_allmulticast_disable(struct rte_eth_dev *dev) 787 { 788 struct sub_device *sdev; 789 uint8_t i; 790 int ret = 0; 791 792 fs_lock(dev, 0); 793 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 794 ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); 795 ret = fs_err(sdev, ret); 796 if (ret != 0) { 797 ERROR("All-multicast mode disable failed for subdevice %d", 798 PORT_ID(sdev)); 799 break; 800 } 801 } 802 if (ret != 0) { 803 /* Rollback in the case of failure */ 804 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 805 ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); 806 ret = fs_err(sdev, ret); 807 if (ret != 0) 808 ERROR("All-multicast mode enable during rollback failed for subdevice %d", 809 PORT_ID(sdev)); 810 } 811 } 812 fs_unlock(dev, 0); 813 814 return ret; 815 } 816 817 static int 818 fs_link_update(struct rte_eth_dev *dev, 819 int wait_to_complete) 820 { 821 struct sub_device *sdev; 822 uint8_t i; 823 int ret; 824 825 fs_lock(dev, 0); 826 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 827 DEBUG("Calling link_update on sub_device %d", i); 828 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete); 829 if (ret && ret != -1 && sdev->remove == 0 && 830 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) { 831 ERROR("Link update failed for sub_device %d with error %d", 832 i, ret); 833 fs_unlock(dev, 0); 834 return ret; 835 } 836 } 837 if (TX_SUBDEV(dev)) { 838 struct rte_eth_link *l1; 839 struct rte_eth_link *l2; 840 841 l1 = &dev->data->dev_link; 842 l2 = Ð(TX_SUBDEV(dev))->data->dev_link; 843 if (memcmp(l1, l2, sizeof(*l1))) { 844 *l1 = *l2; 845 fs_unlock(dev, 0); 846 return 0; 847 } 848 } 849 fs_unlock(dev, 0); 850 return -1; 851 } 852 853 static int 854 fs_stats_get(struct rte_eth_dev *dev, 855 struct rte_eth_stats *stats) 856 { 857 struct rte_eth_stats backup; 858 struct sub_device *sdev; 859 uint8_t i; 860 int ret; 861 862 fs_lock(dev, 0); 863 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats)); 864 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 865 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats; 866 uint64_t *timestamp = &sdev->stats_snapshot.timestamp; 867 868 rte_memcpy(&backup, snapshot, sizeof(backup)); 869 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot); 870 if (ret) { 871 if (!fs_err(sdev, ret)) { 872 rte_memcpy(snapshot, &backup, sizeof(backup)); 873 goto inc; 874 } 875 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d", 876 i, ret); 877 *timestamp = 0; 878 fs_unlock(dev, 0); 879 return ret; 880 } 881 *timestamp = rte_rdtsc(); 882 inc: 883 failsafe_stats_increment(stats, snapshot); 884 } 885 fs_unlock(dev, 0); 886 return 0; 887 } 888 889 static int 890 fs_stats_reset(struct rte_eth_dev *dev) 891 { 892 struct sub_device *sdev; 893 uint8_t i; 894 int ret; 895 896 fs_lock(dev, 0); 897 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 898 ret = rte_eth_stats_reset(PORT_ID(sdev)); 899 if (ret) { 900 if (!fs_err(sdev, ret)) 901 continue; 902 903 ERROR("Operation rte_eth_stats_reset failed for sub_device %d with error %d", 904 i, ret); 905 fs_unlock(dev, 0); 906 return ret; 907 } 908 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats)); 909 } 910 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats)); 911 fs_unlock(dev, 0); 912 913 return 0; 914 } 915 916 static int 917 __fs_xstats_count(struct rte_eth_dev *dev) 918 { 919 struct sub_device *sdev; 920 int count = 0; 921 uint8_t i; 922 int ret; 923 924 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 925 ret = rte_eth_xstats_get_names(PORT_ID(sdev), NULL, 0); 926 if (ret < 0) 927 return ret; 928 count += ret; 929 } 930 931 return count; 932 } 933 934 static int 935 __fs_xstats_get_names(struct rte_eth_dev *dev, 936 struct rte_eth_xstat_name *xstats_names, 937 unsigned int limit) 938 { 939 struct sub_device *sdev; 940 unsigned int count = 0; 941 uint8_t i; 942 943 /* Caller only cares about count */ 944 if (!xstats_names) 945 return __fs_xstats_count(dev); 946 947 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 948 struct rte_eth_xstat_name *sub_names = xstats_names + count; 949 int j, r; 950 951 if (count >= limit) 952 break; 953 954 r = rte_eth_xstats_get_names(PORT_ID(sdev), 955 sub_names, limit - count); 956 if (r < 0) 957 return r; 958 959 /* add subN_ prefix to names */ 960 for (j = 0; j < r; j++) { 961 char *xname = sub_names[j].name; 962 char tmp[RTE_ETH_XSTATS_NAME_SIZE]; 963 964 if ((xname[0] == 't' || xname[0] == 'r') && 965 xname[1] == 'x' && xname[2] == '_') 966 snprintf(tmp, sizeof(tmp), "%.3ssub%u_%s", 967 xname, i, xname + 3); 968 else 969 snprintf(tmp, sizeof(tmp), "sub%u_%s", 970 i, xname); 971 972 strlcpy(xname, tmp, RTE_ETH_XSTATS_NAME_SIZE); 973 } 974 count += r; 975 } 976 return count; 977 } 978 979 static int 980 fs_xstats_get_names(struct rte_eth_dev *dev, 981 struct rte_eth_xstat_name *xstats_names, 982 unsigned int limit) 983 { 984 int ret; 985 986 fs_lock(dev, 0); 987 ret = __fs_xstats_get_names(dev, xstats_names, limit); 988 fs_unlock(dev, 0); 989 return ret; 990 } 991 992 static int 993 __fs_xstats_get(struct rte_eth_dev *dev, 994 struct rte_eth_xstat *xstats, 995 unsigned int n) 996 { 997 unsigned int count = 0; 998 struct sub_device *sdev; 999 uint8_t i; 1000 int j, ret; 1001 1002 ret = __fs_xstats_count(dev); 1003 /* 1004 * if error 1005 * or caller did not give enough space 1006 * or just querying 1007 */ 1008 if (ret < 0 || ret > (int)n || xstats == NULL) 1009 return ret; 1010 1011 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1012 ret = rte_eth_xstats_get(PORT_ID(sdev), xstats, n); 1013 if (ret < 0) 1014 return ret; 1015 1016 if (ret > (int)n) 1017 return n + count; 1018 1019 /* add offset to id's from sub-device */ 1020 for (j = 0; j < ret; j++) 1021 xstats[j].id += count; 1022 1023 xstats += ret; 1024 n -= ret; 1025 count += ret; 1026 } 1027 1028 return count; 1029 } 1030 1031 static int 1032 fs_xstats_get(struct rte_eth_dev *dev, 1033 struct rte_eth_xstat *xstats, 1034 unsigned int n) 1035 { 1036 int ret; 1037 1038 fs_lock(dev, 0); 1039 ret = __fs_xstats_get(dev, xstats, n); 1040 fs_unlock(dev, 0); 1041 1042 return ret; 1043 } 1044 1045 1046 static int 1047 fs_xstats_reset(struct rte_eth_dev *dev) 1048 { 1049 struct sub_device *sdev; 1050 uint8_t i; 1051 int r = 0; 1052 1053 fs_lock(dev, 0); 1054 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1055 r = rte_eth_xstats_reset(PORT_ID(sdev)); 1056 if (r < 0) 1057 break; 1058 } 1059 fs_unlock(dev, 0); 1060 1061 return r; 1062 } 1063 1064 static void 1065 fs_dev_merge_desc_lim(struct rte_eth_desc_lim *to, 1066 const struct rte_eth_desc_lim *from) 1067 { 1068 to->nb_max = RTE_MIN(to->nb_max, from->nb_max); 1069 to->nb_min = RTE_MAX(to->nb_min, from->nb_min); 1070 to->nb_align = RTE_MAX(to->nb_align, from->nb_align); 1071 1072 to->nb_seg_max = RTE_MIN(to->nb_seg_max, from->nb_seg_max); 1073 to->nb_mtu_seg_max = RTE_MIN(to->nb_mtu_seg_max, from->nb_mtu_seg_max); 1074 } 1075 1076 /* 1077 * Merge the information from sub-devices. 1078 * 1079 * The reported values must be the common subset of all sub devices 1080 */ 1081 static void 1082 fs_dev_merge_info(struct rte_eth_dev_info *info, 1083 const struct rte_eth_dev_info *sinfo) 1084 { 1085 info->min_mtu = RTE_MAX(info->min_mtu, sinfo->min_mtu); 1086 info->max_mtu = RTE_MIN(info->max_mtu, sinfo->max_mtu); 1087 info->max_rx_pktlen = RTE_MIN(info->max_rx_pktlen, sinfo->max_rx_pktlen); 1088 info->max_rx_queues = RTE_MIN(info->max_rx_queues, sinfo->max_rx_queues); 1089 info->max_tx_queues = RTE_MIN(info->max_tx_queues, sinfo->max_tx_queues); 1090 info->max_mac_addrs = RTE_MIN(info->max_mac_addrs, sinfo->max_mac_addrs); 1091 info->max_hash_mac_addrs = RTE_MIN(info->max_hash_mac_addrs, 1092 sinfo->max_hash_mac_addrs); 1093 info->max_vmdq_pools = RTE_MIN(info->max_vmdq_pools, sinfo->max_vmdq_pools); 1094 info->max_vfs = RTE_MIN(info->max_vfs, sinfo->max_vfs); 1095 1096 fs_dev_merge_desc_lim(&info->rx_desc_lim, &sinfo->rx_desc_lim); 1097 fs_dev_merge_desc_lim(&info->tx_desc_lim, &sinfo->tx_desc_lim); 1098 1099 info->rx_offload_capa &= sinfo->rx_offload_capa; 1100 info->tx_offload_capa &= sinfo->tx_offload_capa; 1101 info->rx_queue_offload_capa &= sinfo->rx_queue_offload_capa; 1102 info->tx_queue_offload_capa &= sinfo->tx_queue_offload_capa; 1103 info->flow_type_rss_offloads &= sinfo->flow_type_rss_offloads; 1104 1105 /* 1106 * RETA size is a GCD of RETA sizes indicated by sub-devices. 1107 * Each of these sizes is a power of 2, so use the lower one. 1108 */ 1109 info->reta_size = RTE_MIN(info->reta_size, sinfo->reta_size); 1110 1111 info->hash_key_size = RTE_MIN(info->hash_key_size, 1112 sinfo->hash_key_size); 1113 } 1114 1115 /** 1116 * Fail-safe dev_infos_get rules: 1117 * 1118 * No sub_device: 1119 * Numerables: 1120 * Use the maximum possible values for any field, so as not 1121 * to impede any further configuration effort. 1122 * Capabilities: 1123 * Limits capabilities to those that are understood by the 1124 * fail-safe PMD. This understanding stems from the fail-safe 1125 * being capable of verifying that the related capability is 1126 * expressed within the device configuration (struct rte_eth_conf). 1127 * 1128 * At least one probed sub_device: 1129 * Numerables: 1130 * Uses values from the active probed sub_device 1131 * The rationale here is that if any sub_device is less capable 1132 * (for example concerning the number of queues) than the active 1133 * sub_device, then its subsequent configuration will fail. 1134 * It is impossible to foresee this failure when the failing sub_device 1135 * is supposed to be plugged-in later on, so the configuration process 1136 * is the single point of failure and error reporting. 1137 * Capabilities: 1138 * Uses a logical AND of RX capabilities among 1139 * all sub_devices and the default capabilities. 1140 * Uses a logical AND of TX capabilities among 1141 * the active probed sub_device and the default capabilities. 1142 * Uses a logical AND of device capabilities among 1143 * all sub_devices and the default capabilities. 1144 * 1145 */ 1146 static int 1147 fs_dev_infos_get(struct rte_eth_dev *dev, 1148 struct rte_eth_dev_info *infos) 1149 { 1150 struct sub_device *sdev; 1151 uint8_t i; 1152 int ret; 1153 1154 /* Use maximum upper bounds by default */ 1155 infos->min_mtu = RTE_ETHER_MIN_MTU; 1156 infos->max_mtu = UINT16_MAX; 1157 infos->max_rx_pktlen = UINT32_MAX; 1158 infos->max_rx_queues = RTE_MAX_QUEUES_PER_PORT; 1159 infos->max_tx_queues = RTE_MAX_QUEUES_PER_PORT; 1160 infos->max_mac_addrs = FAILSAFE_MAX_ETHADDR; 1161 infos->max_hash_mac_addrs = UINT32_MAX; 1162 infos->max_vfs = UINT16_MAX; 1163 infos->max_vmdq_pools = UINT16_MAX; 1164 infos->reta_size = UINT16_MAX; 1165 infos->hash_key_size = UINT8_MAX; 1166 1167 /* 1168 * Set of capabilities that can be verified upon 1169 * configuring a sub-device. 1170 */ 1171 infos->rx_offload_capa = 1172 RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 1173 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 1174 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 1175 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 1176 RTE_ETH_RX_OFFLOAD_TCP_LRO | 1177 RTE_ETH_RX_OFFLOAD_QINQ_STRIP | 1178 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1179 RTE_ETH_RX_OFFLOAD_MACSEC_STRIP | 1180 RTE_ETH_RX_OFFLOAD_HEADER_SPLIT | 1181 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 1182 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | 1183 RTE_ETH_RX_OFFLOAD_SCATTER | 1184 RTE_ETH_RX_OFFLOAD_TIMESTAMP | 1185 RTE_ETH_RX_OFFLOAD_SECURITY | 1186 RTE_ETH_RX_OFFLOAD_RSS_HASH; 1187 1188 infos->rx_queue_offload_capa = 1189 RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 1190 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 1191 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 1192 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 1193 RTE_ETH_RX_OFFLOAD_TCP_LRO | 1194 RTE_ETH_RX_OFFLOAD_QINQ_STRIP | 1195 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1196 RTE_ETH_RX_OFFLOAD_MACSEC_STRIP | 1197 RTE_ETH_RX_OFFLOAD_HEADER_SPLIT | 1198 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 1199 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | 1200 RTE_ETH_RX_OFFLOAD_SCATTER | 1201 RTE_ETH_RX_OFFLOAD_TIMESTAMP | 1202 RTE_ETH_RX_OFFLOAD_SECURITY | 1203 RTE_ETH_RX_OFFLOAD_RSS_HASH; 1204 1205 infos->tx_offload_capa = 1206 RTE_ETH_TX_OFFLOAD_MULTI_SEGS | 1207 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | 1208 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | 1209 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | 1210 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | 1211 RTE_ETH_TX_OFFLOAD_TCP_TSO; 1212 1213 infos->flow_type_rss_offloads = 1214 RTE_ETH_RSS_IP | 1215 RTE_ETH_RSS_UDP | 1216 RTE_ETH_RSS_TCP; 1217 infos->dev_capa = 1218 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 1219 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 1220 infos->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; 1221 1222 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 1223 struct rte_eth_dev_info sub_info; 1224 1225 ret = rte_eth_dev_info_get(PORT_ID(sdev), &sub_info); 1226 ret = fs_err(sdev, ret); 1227 if (ret != 0) 1228 return ret; 1229 1230 fs_dev_merge_info(infos, &sub_info); 1231 } 1232 1233 return 0; 1234 } 1235 1236 static const uint32_t * 1237 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev) 1238 { 1239 struct sub_device *sdev; 1240 struct rte_eth_dev *edev; 1241 const uint32_t *ret; 1242 1243 fs_lock(dev, 0); 1244 sdev = TX_SUBDEV(dev); 1245 if (sdev == NULL) { 1246 ret = NULL; 1247 goto unlock; 1248 } 1249 edev = ETH(sdev); 1250 /* ENOTSUP: counts as no supported ptypes */ 1251 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) { 1252 ret = NULL; 1253 goto unlock; 1254 } 1255 /* 1256 * The API does not permit to do a clean AND of all ptypes, 1257 * It is also incomplete by design and we do not really care 1258 * to have a best possible value in this context. 1259 * We just return the ptypes of the device of highest 1260 * priority, usually the PREFERRED device. 1261 */ 1262 ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev); 1263 unlock: 1264 fs_unlock(dev, 0); 1265 return ret; 1266 } 1267 1268 static int 1269 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1270 { 1271 struct sub_device *sdev; 1272 uint8_t i; 1273 int ret; 1274 1275 fs_lock(dev, 0); 1276 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1277 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i); 1278 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu); 1279 if ((ret = fs_err(sdev, ret))) { 1280 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d", 1281 i, ret); 1282 fs_unlock(dev, 0); 1283 return ret; 1284 } 1285 } 1286 fs_unlock(dev, 0); 1287 return 0; 1288 } 1289 1290 static int 1291 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1292 { 1293 struct sub_device *sdev; 1294 uint8_t i; 1295 int ret; 1296 1297 fs_lock(dev, 0); 1298 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1299 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i); 1300 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on); 1301 if ((ret = fs_err(sdev, ret))) { 1302 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d" 1303 " with error %d", i, ret); 1304 fs_unlock(dev, 0); 1305 return ret; 1306 } 1307 } 1308 fs_unlock(dev, 0); 1309 return 0; 1310 } 1311 1312 static int 1313 fs_flow_ctrl_get(struct rte_eth_dev *dev, 1314 struct rte_eth_fc_conf *fc_conf) 1315 { 1316 struct sub_device *sdev; 1317 int ret; 1318 1319 fs_lock(dev, 0); 1320 sdev = TX_SUBDEV(dev); 1321 if (sdev == NULL) { 1322 ret = 0; 1323 goto unlock; 1324 } 1325 if (SUBOPS(sdev, flow_ctrl_get) == NULL) { 1326 ret = -ENOTSUP; 1327 goto unlock; 1328 } 1329 ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf); 1330 unlock: 1331 fs_unlock(dev, 0); 1332 return ret; 1333 } 1334 1335 static int 1336 fs_flow_ctrl_set(struct rte_eth_dev *dev, 1337 struct rte_eth_fc_conf *fc_conf) 1338 { 1339 struct sub_device *sdev; 1340 uint8_t i; 1341 int ret; 1342 1343 fs_lock(dev, 0); 1344 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1345 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i); 1346 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf); 1347 if ((ret = fs_err(sdev, ret))) { 1348 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d" 1349 " with error %d", i, ret); 1350 fs_unlock(dev, 0); 1351 return ret; 1352 } 1353 } 1354 fs_unlock(dev, 0); 1355 return 0; 1356 } 1357 1358 static void 1359 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 1360 { 1361 struct sub_device *sdev; 1362 uint8_t i; 1363 1364 fs_lock(dev, 0); 1365 /* No check: already done within the rte_eth_dev_mac_addr_remove 1366 * call for the fail-safe device. 1367 */ 1368 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 1369 rte_eth_dev_mac_addr_remove(PORT_ID(sdev), 1370 &dev->data->mac_addrs[index]); 1371 PRIV(dev)->mac_addr_pool[index] = 0; 1372 fs_unlock(dev, 0); 1373 } 1374 1375 static int 1376 fs_mac_addr_add(struct rte_eth_dev *dev, 1377 struct rte_ether_addr *mac_addr, 1378 uint32_t index, 1379 uint32_t vmdq) 1380 { 1381 struct sub_device *sdev; 1382 int ret; 1383 uint8_t i; 1384 1385 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR); 1386 fs_lock(dev, 0); 1387 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1388 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq); 1389 if ((ret = fs_err(sdev, ret))) { 1390 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %" 1391 PRIu8 " with error %d", i, ret); 1392 fs_unlock(dev, 0); 1393 return ret; 1394 } 1395 } 1396 if (index >= PRIV(dev)->nb_mac_addr) { 1397 DEBUG("Growing mac_addrs array"); 1398 PRIV(dev)->nb_mac_addr = index; 1399 } 1400 PRIV(dev)->mac_addr_pool[index] = vmdq; 1401 fs_unlock(dev, 0); 1402 return 0; 1403 } 1404 1405 static int 1406 fs_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 1407 { 1408 struct sub_device *sdev; 1409 uint8_t i; 1410 int ret; 1411 1412 fs_lock(dev, 0); 1413 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1414 ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr); 1415 ret = fs_err(sdev, ret); 1416 if (ret) { 1417 ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d", 1418 i, ret); 1419 fs_unlock(dev, 0); 1420 return ret; 1421 } 1422 } 1423 fs_unlock(dev, 0); 1424 1425 return 0; 1426 } 1427 1428 static int 1429 fs_set_mc_addr_list(struct rte_eth_dev *dev, 1430 struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) 1431 { 1432 struct sub_device *sdev; 1433 uint8_t i; 1434 int ret; 1435 void *mcast_addrs; 1436 1437 fs_lock(dev, 0); 1438 1439 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1440 ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1441 mc_addr_set, nb_mc_addr); 1442 if (ret != 0) { 1443 ERROR("Operation rte_eth_dev_set_mc_addr_list failed for sub_device %d with error %d", 1444 i, ret); 1445 goto rollback; 1446 } 1447 } 1448 1449 mcast_addrs = rte_realloc(PRIV(dev)->mcast_addrs, 1450 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]), 0); 1451 if (mcast_addrs == NULL && nb_mc_addr > 0) { 1452 ret = -ENOMEM; 1453 goto rollback; 1454 } 1455 rte_memcpy(mcast_addrs, mc_addr_set, 1456 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0])); 1457 PRIV(dev)->nb_mcast_addr = nb_mc_addr; 1458 PRIV(dev)->mcast_addrs = mcast_addrs; 1459 1460 fs_unlock(dev, 0); 1461 return 0; 1462 1463 rollback: 1464 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1465 int rc = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1466 PRIV(dev)->mcast_addrs, PRIV(dev)->nb_mcast_addr); 1467 if (rc != 0) { 1468 ERROR("Multicast MAC address list rollback for sub_device %d failed with error %d", 1469 i, rc); 1470 } 1471 } 1472 1473 fs_unlock(dev, 0); 1474 return ret; 1475 } 1476 1477 static int 1478 fs_rss_hash_update(struct rte_eth_dev *dev, 1479 struct rte_eth_rss_conf *rss_conf) 1480 { 1481 struct sub_device *sdev; 1482 uint8_t i; 1483 int ret; 1484 1485 fs_lock(dev, 0); 1486 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1487 ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf); 1488 ret = fs_err(sdev, ret); 1489 if (ret) { 1490 ERROR("Operation rte_eth_dev_rss_hash_update" 1491 " failed for sub_device %d with error %d", 1492 i, ret); 1493 fs_unlock(dev, 0); 1494 return ret; 1495 } 1496 } 1497 fs_unlock(dev, 0); 1498 1499 return 0; 1500 } 1501 1502 static int 1503 fs_flow_ops_get(struct rte_eth_dev *dev __rte_unused, 1504 const struct rte_flow_ops **ops) 1505 { 1506 *ops = &fs_flow_ops; 1507 return 0; 1508 } 1509 1510 const struct eth_dev_ops failsafe_ops = { 1511 .dev_configure = fs_dev_configure, 1512 .dev_start = fs_dev_start, 1513 .dev_stop = fs_dev_stop, 1514 .dev_set_link_down = fs_dev_set_link_down, 1515 .dev_set_link_up = fs_dev_set_link_up, 1516 .dev_close = failsafe_eth_dev_close, 1517 .promiscuous_enable = fs_promiscuous_enable, 1518 .promiscuous_disable = fs_promiscuous_disable, 1519 .allmulticast_enable = fs_allmulticast_enable, 1520 .allmulticast_disable = fs_allmulticast_disable, 1521 .link_update = fs_link_update, 1522 .stats_get = fs_stats_get, 1523 .stats_reset = fs_stats_reset, 1524 .xstats_get = fs_xstats_get, 1525 .xstats_get_names = fs_xstats_get_names, 1526 .xstats_reset = fs_xstats_reset, 1527 .dev_infos_get = fs_dev_infos_get, 1528 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get, 1529 .mtu_set = fs_mtu_set, 1530 .vlan_filter_set = fs_vlan_filter_set, 1531 .rx_queue_start = fs_rx_queue_start, 1532 .rx_queue_stop = fs_rx_queue_stop, 1533 .tx_queue_start = fs_tx_queue_start, 1534 .tx_queue_stop = fs_tx_queue_stop, 1535 .rx_queue_setup = fs_rx_queue_setup, 1536 .tx_queue_setup = fs_tx_queue_setup, 1537 .rx_queue_release = fs_rx_queue_release, 1538 .tx_queue_release = fs_tx_queue_release, 1539 .rx_queue_intr_enable = fs_rx_intr_enable, 1540 .rx_queue_intr_disable = fs_rx_intr_disable, 1541 .flow_ctrl_get = fs_flow_ctrl_get, 1542 .flow_ctrl_set = fs_flow_ctrl_set, 1543 .mac_addr_remove = fs_mac_addr_remove, 1544 .mac_addr_add = fs_mac_addr_add, 1545 .mac_addr_set = fs_mac_addr_set, 1546 .set_mc_addr_list = fs_set_mc_addr_list, 1547 .rss_hash_update = fs_rss_hash_update, 1548 .flow_ops_get = fs_flow_ops_get, 1549 }; 1550