1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017 6WIND S.A. 3 * Copyright 2017 Mellanox Technologies, Ltd 4 */ 5 6 #include <stdbool.h> 7 #include <stdint.h> 8 #include <unistd.h> 9 10 #include <rte_debug.h> 11 #include <rte_atomic.h> 12 #include <rte_ethdev_driver.h> 13 #include <rte_malloc.h> 14 #include <rte_flow.h> 15 #include <rte_cycles.h> 16 #include <rte_ethdev.h> 17 18 #include "failsafe_private.h" 19 20 static int 21 fs_dev_configure(struct rte_eth_dev *dev) 22 { 23 struct sub_device *sdev; 24 uint8_t i; 25 int ret; 26 27 fs_lock(dev, 0); 28 FOREACH_SUBDEV(sdev, i, dev) { 29 int rmv_interrupt = 0; 30 int lsc_interrupt = 0; 31 int lsc_enabled; 32 33 if (sdev->state != DEV_PROBED && 34 !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE)) 35 continue; 36 37 rmv_interrupt = ETH(sdev)->data->dev_flags & 38 RTE_ETH_DEV_INTR_RMV; 39 if (rmv_interrupt) { 40 DEBUG("Enabling RMV interrupts for sub_device %d", i); 41 dev->data->dev_conf.intr_conf.rmv = 1; 42 } else { 43 DEBUG("sub_device %d does not support RMV event", i); 44 } 45 lsc_enabled = dev->data->dev_conf.intr_conf.lsc; 46 lsc_interrupt = lsc_enabled && 47 (ETH(sdev)->data->dev_flags & 48 RTE_ETH_DEV_INTR_LSC); 49 if (lsc_interrupt) { 50 DEBUG("Enabling LSC interrupts for sub_device %d", i); 51 dev->data->dev_conf.intr_conf.lsc = 1; 52 } else if (lsc_enabled && !lsc_interrupt) { 53 DEBUG("Disabling LSC interrupts for sub_device %d", i); 54 dev->data->dev_conf.intr_conf.lsc = 0; 55 } 56 DEBUG("Configuring sub-device %d", i); 57 ret = rte_eth_dev_configure(PORT_ID(sdev), 58 dev->data->nb_rx_queues, 59 dev->data->nb_tx_queues, 60 &dev->data->dev_conf); 61 if (ret) { 62 if (!fs_err(sdev, ret)) 63 continue; 64 ERROR("Could not configure sub_device %d", i); 65 fs_unlock(dev, 0); 66 return ret; 67 } 68 if (rmv_interrupt && sdev->rmv_callback == 0) { 69 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 70 RTE_ETH_EVENT_INTR_RMV, 71 failsafe_eth_rmv_event_callback, 72 sdev); 73 if (ret) 74 WARN("Failed to register RMV callback for sub_device %d", 75 SUB_ID(sdev)); 76 else 77 sdev->rmv_callback = 1; 78 } 79 dev->data->dev_conf.intr_conf.rmv = 0; 80 if (lsc_interrupt && sdev->lsc_callback == 0) { 81 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 82 RTE_ETH_EVENT_INTR_LSC, 83 failsafe_eth_lsc_event_callback, 84 dev); 85 if (ret) 86 WARN("Failed to register LSC callback for sub_device %d", 87 SUB_ID(sdev)); 88 else 89 sdev->lsc_callback = 1; 90 } 91 dev->data->dev_conf.intr_conf.lsc = lsc_enabled; 92 sdev->state = DEV_ACTIVE; 93 } 94 if (PRIV(dev)->state < DEV_ACTIVE) 95 PRIV(dev)->state = DEV_ACTIVE; 96 fs_unlock(dev, 0); 97 return 0; 98 } 99 100 static void 101 fs_set_queues_state_start(struct rte_eth_dev *dev) 102 { 103 struct rxq *rxq; 104 struct txq *txq; 105 uint16_t i; 106 107 for (i = 0; i < dev->data->nb_rx_queues; i++) { 108 rxq = dev->data->rx_queues[i]; 109 if (rxq != NULL && !rxq->info.conf.rx_deferred_start) 110 dev->data->rx_queue_state[i] = 111 RTE_ETH_QUEUE_STATE_STARTED; 112 } 113 for (i = 0; i < dev->data->nb_tx_queues; i++) { 114 txq = dev->data->tx_queues[i]; 115 if (txq != NULL && !txq->info.conf.tx_deferred_start) 116 dev->data->tx_queue_state[i] = 117 RTE_ETH_QUEUE_STATE_STARTED; 118 } 119 } 120 121 static int 122 fs_dev_start(struct rte_eth_dev *dev) 123 { 124 struct sub_device *sdev; 125 uint8_t i; 126 int ret; 127 128 fs_lock(dev, 0); 129 ret = failsafe_rx_intr_install(dev); 130 if (ret) { 131 fs_unlock(dev, 0); 132 return ret; 133 } 134 FOREACH_SUBDEV(sdev, i, dev) { 135 if (sdev->state != DEV_ACTIVE) 136 continue; 137 DEBUG("Starting sub_device %d", i); 138 ret = rte_eth_dev_start(PORT_ID(sdev)); 139 if (ret) { 140 if (!fs_err(sdev, ret)) 141 continue; 142 fs_unlock(dev, 0); 143 return ret; 144 } 145 ret = failsafe_rx_intr_install_subdevice(sdev); 146 if (ret) { 147 if (!fs_err(sdev, ret)) 148 continue; 149 rte_eth_dev_stop(PORT_ID(sdev)); 150 fs_unlock(dev, 0); 151 return ret; 152 } 153 sdev->state = DEV_STARTED; 154 } 155 if (PRIV(dev)->state < DEV_STARTED) { 156 PRIV(dev)->state = DEV_STARTED; 157 fs_set_queues_state_start(dev); 158 } 159 fs_switch_dev(dev, NULL); 160 fs_unlock(dev, 0); 161 return 0; 162 } 163 164 static void 165 fs_set_queues_state_stop(struct rte_eth_dev *dev) 166 { 167 uint16_t i; 168 169 for (i = 0; i < dev->data->nb_rx_queues; i++) 170 if (dev->data->rx_queues[i] != NULL) 171 dev->data->rx_queue_state[i] = 172 RTE_ETH_QUEUE_STATE_STOPPED; 173 for (i = 0; i < dev->data->nb_tx_queues; i++) 174 if (dev->data->tx_queues[i] != NULL) 175 dev->data->tx_queue_state[i] = 176 RTE_ETH_QUEUE_STATE_STOPPED; 177 } 178 179 static void 180 fs_dev_stop(struct rte_eth_dev *dev) 181 { 182 struct sub_device *sdev; 183 uint8_t i; 184 185 fs_lock(dev, 0); 186 PRIV(dev)->state = DEV_STARTED - 1; 187 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) { 188 rte_eth_dev_stop(PORT_ID(sdev)); 189 failsafe_rx_intr_uninstall_subdevice(sdev); 190 sdev->state = DEV_STARTED - 1; 191 } 192 failsafe_rx_intr_uninstall(dev); 193 fs_set_queues_state_stop(dev); 194 fs_unlock(dev, 0); 195 } 196 197 static int 198 fs_dev_set_link_up(struct rte_eth_dev *dev) 199 { 200 struct sub_device *sdev; 201 uint8_t i; 202 int ret; 203 204 fs_lock(dev, 0); 205 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 206 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i); 207 ret = rte_eth_dev_set_link_up(PORT_ID(sdev)); 208 if ((ret = fs_err(sdev, ret))) { 209 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d" 210 " with error %d", i, ret); 211 fs_unlock(dev, 0); 212 return ret; 213 } 214 } 215 fs_unlock(dev, 0); 216 return 0; 217 } 218 219 static int 220 fs_dev_set_link_down(struct rte_eth_dev *dev) 221 { 222 struct sub_device *sdev; 223 uint8_t i; 224 int ret; 225 226 fs_lock(dev, 0); 227 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 228 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i); 229 ret = rte_eth_dev_set_link_down(PORT_ID(sdev)); 230 if ((ret = fs_err(sdev, ret))) { 231 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d" 232 " with error %d", i, ret); 233 fs_unlock(dev, 0); 234 return ret; 235 } 236 } 237 fs_unlock(dev, 0); 238 return 0; 239 } 240 241 static void fs_dev_free_queues(struct rte_eth_dev *dev); 242 static void 243 fs_dev_close(struct rte_eth_dev *dev) 244 { 245 struct sub_device *sdev; 246 uint8_t i; 247 248 fs_lock(dev, 0); 249 failsafe_hotplug_alarm_cancel(dev); 250 if (PRIV(dev)->state == DEV_STARTED) 251 dev->dev_ops->dev_stop(dev); 252 PRIV(dev)->state = DEV_ACTIVE - 1; 253 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 254 DEBUG("Closing sub_device %d", i); 255 failsafe_eth_dev_unregister_callbacks(sdev); 256 rte_eth_dev_close(PORT_ID(sdev)); 257 sdev->state = DEV_ACTIVE - 1; 258 } 259 fs_dev_free_queues(dev); 260 fs_unlock(dev, 0); 261 } 262 263 static int 264 fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 265 { 266 struct sub_device *sdev; 267 uint8_t i; 268 int ret; 269 int err = 0; 270 bool failure = true; 271 272 fs_lock(dev, 0); 273 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 274 uint16_t port_id = ETH(sdev)->data->port_id; 275 276 ret = rte_eth_dev_rx_queue_stop(port_id, rx_queue_id); 277 ret = fs_err(sdev, ret); 278 if (ret) { 279 ERROR("Rx queue stop failed for subdevice %d", i); 280 err = ret; 281 } else { 282 failure = false; 283 } 284 } 285 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 286 fs_unlock(dev, 0); 287 /* Return 0 in case of at least one successful queue stop */ 288 return (failure) ? err : 0; 289 } 290 291 static int 292 fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 293 { 294 struct sub_device *sdev; 295 uint8_t i; 296 int ret; 297 298 fs_lock(dev, 0); 299 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 300 uint16_t port_id = ETH(sdev)->data->port_id; 301 302 ret = rte_eth_dev_rx_queue_start(port_id, rx_queue_id); 303 ret = fs_err(sdev, ret); 304 if (ret) { 305 ERROR("Rx queue start failed for subdevice %d", i); 306 fs_rx_queue_stop(dev, rx_queue_id); 307 fs_unlock(dev, 0); 308 return ret; 309 } 310 } 311 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 312 fs_unlock(dev, 0); 313 return 0; 314 } 315 316 static int 317 fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 318 { 319 struct sub_device *sdev; 320 uint8_t i; 321 int ret; 322 int err = 0; 323 bool failure = true; 324 325 fs_lock(dev, 0); 326 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 327 uint16_t port_id = ETH(sdev)->data->port_id; 328 329 ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id); 330 ret = fs_err(sdev, ret); 331 if (ret) { 332 ERROR("Tx queue stop failed for subdevice %d", i); 333 err = ret; 334 } else { 335 failure = false; 336 } 337 } 338 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 339 fs_unlock(dev, 0); 340 /* Return 0 in case of at least one successful queue stop */ 341 return (failure) ? err : 0; 342 } 343 344 static int 345 fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 346 { 347 struct sub_device *sdev; 348 uint8_t i; 349 int ret; 350 351 fs_lock(dev, 0); 352 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 353 uint16_t port_id = ETH(sdev)->data->port_id; 354 355 ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id); 356 ret = fs_err(sdev, ret); 357 if (ret) { 358 ERROR("Tx queue start failed for subdevice %d", i); 359 fs_tx_queue_stop(dev, tx_queue_id); 360 fs_unlock(dev, 0); 361 return ret; 362 } 363 } 364 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 365 fs_unlock(dev, 0); 366 return 0; 367 } 368 369 static void 370 fs_rx_queue_release(void *queue) 371 { 372 struct rte_eth_dev *dev; 373 struct sub_device *sdev; 374 uint8_t i; 375 struct rxq *rxq; 376 377 if (queue == NULL) 378 return; 379 rxq = queue; 380 dev = &rte_eth_devices[rxq->priv->data->port_id]; 381 fs_lock(dev, 0); 382 if (rxq->event_fd > 0) 383 close(rxq->event_fd); 384 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 385 if (ETH(sdev)->data->rx_queues != NULL && 386 ETH(sdev)->data->rx_queues[rxq->qid] != NULL) { 387 SUBOPS(sdev, rx_queue_release) 388 (ETH(sdev)->data->rx_queues[rxq->qid]); 389 } 390 } 391 dev->data->rx_queues[rxq->qid] = NULL; 392 rte_free(rxq); 393 fs_unlock(dev, 0); 394 } 395 396 static int 397 fs_rx_queue_setup(struct rte_eth_dev *dev, 398 uint16_t rx_queue_id, 399 uint16_t nb_rx_desc, 400 unsigned int socket_id, 401 const struct rte_eth_rxconf *rx_conf, 402 struct rte_mempool *mb_pool) 403 { 404 /* 405 * FIXME: Add a proper interface in rte_eal_interrupts for 406 * allocating eventfd as an interrupt vector. 407 * For the time being, fake as if we are using MSIX interrupts, 408 * this will cause rte_intr_efd_enable to allocate an eventfd for us. 409 */ 410 struct rte_intr_handle intr_handle = { 411 .type = RTE_INTR_HANDLE_VFIO_MSIX, 412 .efds = { -1, }, 413 }; 414 struct sub_device *sdev; 415 struct rxq *rxq; 416 uint8_t i; 417 int ret; 418 419 fs_lock(dev, 0); 420 if (rx_conf->rx_deferred_start) { 421 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 422 if (SUBOPS(sdev, rx_queue_start) == NULL) { 423 ERROR("Rx queue deferred start is not " 424 "supported for subdevice %d", i); 425 fs_unlock(dev, 0); 426 return -EINVAL; 427 } 428 } 429 } 430 rxq = dev->data->rx_queues[rx_queue_id]; 431 if (rxq != NULL) { 432 fs_rx_queue_release(rxq); 433 dev->data->rx_queues[rx_queue_id] = NULL; 434 } 435 rxq = rte_zmalloc(NULL, 436 sizeof(*rxq) + 437 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 438 RTE_CACHE_LINE_SIZE); 439 if (rxq == NULL) { 440 fs_unlock(dev, 0); 441 return -ENOMEM; 442 } 443 FOREACH_SUBDEV(sdev, i, dev) 444 rte_atomic64_init(&rxq->refcnt[i]); 445 rxq->qid = rx_queue_id; 446 rxq->socket_id = socket_id; 447 rxq->info.mp = mb_pool; 448 rxq->info.conf = *rx_conf; 449 rxq->info.nb_desc = nb_rx_desc; 450 rxq->priv = PRIV(dev); 451 rxq->sdev = PRIV(dev)->subs; 452 ret = rte_intr_efd_enable(&intr_handle, 1); 453 if (ret < 0) { 454 fs_unlock(dev, 0); 455 return ret; 456 } 457 rxq->event_fd = intr_handle.efds[0]; 458 dev->data->rx_queues[rx_queue_id] = rxq; 459 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 460 ret = rte_eth_rx_queue_setup(PORT_ID(sdev), 461 rx_queue_id, 462 nb_rx_desc, socket_id, 463 rx_conf, mb_pool); 464 if ((ret = fs_err(sdev, ret))) { 465 ERROR("RX queue setup failed for sub_device %d", i); 466 goto free_rxq; 467 } 468 } 469 fs_unlock(dev, 0); 470 return 0; 471 free_rxq: 472 fs_rx_queue_release(rxq); 473 fs_unlock(dev, 0); 474 return ret; 475 } 476 477 static int 478 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx) 479 { 480 struct rxq *rxq; 481 struct sub_device *sdev; 482 uint8_t i; 483 int ret; 484 int rc = 0; 485 486 fs_lock(dev, 0); 487 if (idx >= dev->data->nb_rx_queues) { 488 rc = -EINVAL; 489 goto unlock; 490 } 491 rxq = dev->data->rx_queues[idx]; 492 if (rxq == NULL || rxq->event_fd <= 0) { 493 rc = -EINVAL; 494 goto unlock; 495 } 496 /* Fail if proxy service is nor running. */ 497 if (PRIV(dev)->rxp.sstate != SS_RUNNING) { 498 ERROR("failsafe interrupt services are not running"); 499 rc = -EAGAIN; 500 goto unlock; 501 } 502 rxq->enable_events = 1; 503 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 504 ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx); 505 ret = fs_err(sdev, ret); 506 if (ret) 507 rc = ret; 508 } 509 unlock: 510 fs_unlock(dev, 0); 511 if (rc) 512 rte_errno = -rc; 513 return rc; 514 } 515 516 static int 517 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx) 518 { 519 struct rxq *rxq; 520 struct sub_device *sdev; 521 uint64_t u64; 522 uint8_t i; 523 int rc = 0; 524 int ret; 525 526 fs_lock(dev, 0); 527 if (idx >= dev->data->nb_rx_queues) { 528 rc = -EINVAL; 529 goto unlock; 530 } 531 rxq = dev->data->rx_queues[idx]; 532 if (rxq == NULL || rxq->event_fd <= 0) { 533 rc = -EINVAL; 534 goto unlock; 535 } 536 rxq->enable_events = 0; 537 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 538 ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx); 539 ret = fs_err(sdev, ret); 540 if (ret) 541 rc = ret; 542 } 543 /* Clear pending events */ 544 while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0) 545 ; 546 unlock: 547 fs_unlock(dev, 0); 548 if (rc) 549 rte_errno = -rc; 550 return rc; 551 } 552 553 static void 554 fs_tx_queue_release(void *queue) 555 { 556 struct rte_eth_dev *dev; 557 struct sub_device *sdev; 558 uint8_t i; 559 struct txq *txq; 560 561 if (queue == NULL) 562 return; 563 txq = queue; 564 dev = &rte_eth_devices[txq->priv->data->port_id]; 565 fs_lock(dev, 0); 566 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 567 if (ETH(sdev)->data->tx_queues != NULL && 568 ETH(sdev)->data->tx_queues[txq->qid] != NULL) { 569 SUBOPS(sdev, tx_queue_release) 570 (ETH(sdev)->data->tx_queues[txq->qid]); 571 } 572 } 573 dev->data->tx_queues[txq->qid] = NULL; 574 rte_free(txq); 575 fs_unlock(dev, 0); 576 } 577 578 static int 579 fs_tx_queue_setup(struct rte_eth_dev *dev, 580 uint16_t tx_queue_id, 581 uint16_t nb_tx_desc, 582 unsigned int socket_id, 583 const struct rte_eth_txconf *tx_conf) 584 { 585 struct sub_device *sdev; 586 struct txq *txq; 587 uint8_t i; 588 int ret; 589 590 fs_lock(dev, 0); 591 if (tx_conf->tx_deferred_start) { 592 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 593 if (SUBOPS(sdev, tx_queue_start) == NULL) { 594 ERROR("Tx queue deferred start is not " 595 "supported for subdevice %d", i); 596 fs_unlock(dev, 0); 597 return -EINVAL; 598 } 599 } 600 } 601 txq = dev->data->tx_queues[tx_queue_id]; 602 if (txq != NULL) { 603 fs_tx_queue_release(txq); 604 dev->data->tx_queues[tx_queue_id] = NULL; 605 } 606 txq = rte_zmalloc("ethdev TX queue", 607 sizeof(*txq) + 608 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 609 RTE_CACHE_LINE_SIZE); 610 if (txq == NULL) { 611 fs_unlock(dev, 0); 612 return -ENOMEM; 613 } 614 FOREACH_SUBDEV(sdev, i, dev) 615 rte_atomic64_init(&txq->refcnt[i]); 616 txq->qid = tx_queue_id; 617 txq->socket_id = socket_id; 618 txq->info.conf = *tx_conf; 619 txq->info.nb_desc = nb_tx_desc; 620 txq->priv = PRIV(dev); 621 dev->data->tx_queues[tx_queue_id] = txq; 622 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 623 ret = rte_eth_tx_queue_setup(PORT_ID(sdev), 624 tx_queue_id, 625 nb_tx_desc, socket_id, 626 tx_conf); 627 if ((ret = fs_err(sdev, ret))) { 628 ERROR("TX queue setup failed for sub_device %d", i); 629 goto free_txq; 630 } 631 } 632 fs_unlock(dev, 0); 633 return 0; 634 free_txq: 635 fs_tx_queue_release(txq); 636 fs_unlock(dev, 0); 637 return ret; 638 } 639 640 static void 641 fs_dev_free_queues(struct rte_eth_dev *dev) 642 { 643 uint16_t i; 644 645 for (i = 0; i < dev->data->nb_rx_queues; i++) { 646 fs_rx_queue_release(dev->data->rx_queues[i]); 647 dev->data->rx_queues[i] = NULL; 648 } 649 dev->data->nb_rx_queues = 0; 650 for (i = 0; i < dev->data->nb_tx_queues; i++) { 651 fs_tx_queue_release(dev->data->tx_queues[i]); 652 dev->data->tx_queues[i] = NULL; 653 } 654 dev->data->nb_tx_queues = 0; 655 } 656 657 static int 658 fs_promiscuous_enable(struct rte_eth_dev *dev) 659 { 660 struct sub_device *sdev; 661 uint8_t i; 662 int ret = 0; 663 664 fs_lock(dev, 0); 665 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 666 ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); 667 ret = fs_err(sdev, ret); 668 if (ret != 0) { 669 ERROR("Promiscuous mode enable failed for subdevice %d", 670 PORT_ID(sdev)); 671 break; 672 } 673 } 674 if (ret != 0) { 675 /* Rollback in the case of failure */ 676 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 677 ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); 678 ret = fs_err(sdev, ret); 679 if (ret != 0) 680 ERROR("Promiscuous mode disable during rollback failed for subdevice %d", 681 PORT_ID(sdev)); 682 } 683 } 684 fs_unlock(dev, 0); 685 686 return ret; 687 } 688 689 static int 690 fs_promiscuous_disable(struct rte_eth_dev *dev) 691 { 692 struct sub_device *sdev; 693 uint8_t i; 694 int ret = 0; 695 696 fs_lock(dev, 0); 697 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 698 ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); 699 ret = fs_err(sdev, ret); 700 if (ret != 0) { 701 ERROR("Promiscuous mode disable failed for subdevice %d", 702 PORT_ID(sdev)); 703 break; 704 } 705 } 706 if (ret != 0) { 707 /* Rollback in the case of failure */ 708 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 709 ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); 710 ret = fs_err(sdev, ret); 711 if (ret != 0) 712 ERROR("Promiscuous mode enable during rollback failed for subdevice %d", 713 PORT_ID(sdev)); 714 } 715 } 716 fs_unlock(dev, 0); 717 718 return ret; 719 } 720 721 static int 722 fs_allmulticast_enable(struct rte_eth_dev *dev) 723 { 724 struct sub_device *sdev; 725 uint8_t i; 726 int ret = 0; 727 728 fs_lock(dev, 0); 729 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 730 ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); 731 ret = fs_err(sdev, ret); 732 if (ret != 0) { 733 ERROR("All-multicast mode enable failed for subdevice %d", 734 PORT_ID(sdev)); 735 break; 736 } 737 } 738 if (ret != 0) { 739 /* Rollback in the case of failure */ 740 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 741 ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); 742 ret = fs_err(sdev, ret); 743 if (ret != 0) 744 ERROR("All-multicast mode disable during rollback failed for subdevice %d", 745 PORT_ID(sdev)); 746 } 747 } 748 fs_unlock(dev, 0); 749 750 return ret; 751 } 752 753 static int 754 fs_allmulticast_disable(struct rte_eth_dev *dev) 755 { 756 struct sub_device *sdev; 757 uint8_t i; 758 int ret = 0; 759 760 fs_lock(dev, 0); 761 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 762 ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); 763 ret = fs_err(sdev, ret); 764 if (ret != 0) { 765 ERROR("All-multicast mode disable failed for subdevice %d", 766 PORT_ID(sdev)); 767 break; 768 } 769 } 770 if (ret != 0) { 771 /* Rollback in the case of failure */ 772 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 773 ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); 774 ret = fs_err(sdev, ret); 775 if (ret != 0) 776 ERROR("All-multicast mode enable during rollback failed for subdevice %d", 777 PORT_ID(sdev)); 778 } 779 } 780 fs_unlock(dev, 0); 781 782 return ret; 783 } 784 785 static int 786 fs_link_update(struct rte_eth_dev *dev, 787 int wait_to_complete) 788 { 789 struct sub_device *sdev; 790 uint8_t i; 791 int ret; 792 793 fs_lock(dev, 0); 794 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 795 DEBUG("Calling link_update on sub_device %d", i); 796 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete); 797 if (ret && ret != -1 && sdev->remove == 0 && 798 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) { 799 ERROR("Link update failed for sub_device %d with error %d", 800 i, ret); 801 fs_unlock(dev, 0); 802 return ret; 803 } 804 } 805 if (TX_SUBDEV(dev)) { 806 struct rte_eth_link *l1; 807 struct rte_eth_link *l2; 808 809 l1 = &dev->data->dev_link; 810 l2 = Ð(TX_SUBDEV(dev))->data->dev_link; 811 if (memcmp(l1, l2, sizeof(*l1))) { 812 *l1 = *l2; 813 fs_unlock(dev, 0); 814 return 0; 815 } 816 } 817 fs_unlock(dev, 0); 818 return -1; 819 } 820 821 static int 822 fs_stats_get(struct rte_eth_dev *dev, 823 struct rte_eth_stats *stats) 824 { 825 struct rte_eth_stats backup; 826 struct sub_device *sdev; 827 uint8_t i; 828 int ret; 829 830 fs_lock(dev, 0); 831 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats)); 832 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 833 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats; 834 uint64_t *timestamp = &sdev->stats_snapshot.timestamp; 835 836 rte_memcpy(&backup, snapshot, sizeof(backup)); 837 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot); 838 if (ret) { 839 if (!fs_err(sdev, ret)) { 840 rte_memcpy(snapshot, &backup, sizeof(backup)); 841 goto inc; 842 } 843 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d", 844 i, ret); 845 *timestamp = 0; 846 fs_unlock(dev, 0); 847 return ret; 848 } 849 *timestamp = rte_rdtsc(); 850 inc: 851 failsafe_stats_increment(stats, snapshot); 852 } 853 fs_unlock(dev, 0); 854 return 0; 855 } 856 857 static int 858 fs_stats_reset(struct rte_eth_dev *dev) 859 { 860 struct sub_device *sdev; 861 uint8_t i; 862 int ret; 863 864 fs_lock(dev, 0); 865 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 866 ret = rte_eth_stats_reset(PORT_ID(sdev)); 867 if (ret) { 868 if (!fs_err(sdev, ret)) 869 continue; 870 871 ERROR("Operation rte_eth_stats_reset failed for sub_device %d with error %d", 872 i, ret); 873 fs_unlock(dev, 0); 874 return ret; 875 } 876 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats)); 877 } 878 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats)); 879 fs_unlock(dev, 0); 880 881 return 0; 882 } 883 884 static void 885 fs_dev_merge_desc_lim(struct rte_eth_desc_lim *to, 886 const struct rte_eth_desc_lim *from) 887 { 888 to->nb_max = RTE_MIN(to->nb_max, from->nb_max); 889 to->nb_min = RTE_MAX(to->nb_min, from->nb_min); 890 to->nb_align = RTE_MAX(to->nb_align, from->nb_align); 891 892 to->nb_seg_max = RTE_MIN(to->nb_seg_max, from->nb_seg_max); 893 to->nb_mtu_seg_max = RTE_MIN(to->nb_mtu_seg_max, from->nb_mtu_seg_max); 894 } 895 896 /* 897 * Merge the information from sub-devices. 898 * 899 * The reported values must be the common subset of all sub devices 900 */ 901 static void 902 fs_dev_merge_info(struct rte_eth_dev_info *info, 903 const struct rte_eth_dev_info *sinfo) 904 { 905 info->max_rx_pktlen = RTE_MIN(info->max_rx_pktlen, sinfo->max_rx_pktlen); 906 info->max_rx_queues = RTE_MIN(info->max_rx_queues, sinfo->max_rx_queues); 907 info->max_tx_queues = RTE_MIN(info->max_tx_queues, sinfo->max_tx_queues); 908 info->max_mac_addrs = RTE_MIN(info->max_mac_addrs, sinfo->max_mac_addrs); 909 info->max_hash_mac_addrs = RTE_MIN(info->max_hash_mac_addrs, 910 sinfo->max_hash_mac_addrs); 911 info->max_vmdq_pools = RTE_MIN(info->max_vmdq_pools, sinfo->max_vmdq_pools); 912 info->max_vfs = RTE_MIN(info->max_vfs, sinfo->max_vfs); 913 914 fs_dev_merge_desc_lim(&info->rx_desc_lim, &sinfo->rx_desc_lim); 915 fs_dev_merge_desc_lim(&info->tx_desc_lim, &sinfo->tx_desc_lim); 916 917 info->rx_offload_capa &= sinfo->rx_offload_capa; 918 info->tx_offload_capa &= sinfo->tx_offload_capa; 919 info->rx_queue_offload_capa &= sinfo->rx_queue_offload_capa; 920 info->tx_queue_offload_capa &= sinfo->tx_queue_offload_capa; 921 info->flow_type_rss_offloads &= sinfo->flow_type_rss_offloads; 922 } 923 924 /** 925 * Fail-safe dev_infos_get rules: 926 * 927 * No sub_device: 928 * Numerables: 929 * Use the maximum possible values for any field, so as not 930 * to impede any further configuration effort. 931 * Capabilities: 932 * Limits capabilities to those that are understood by the 933 * fail-safe PMD. This understanding stems from the fail-safe 934 * being capable of verifying that the related capability is 935 * expressed within the device configuration (struct rte_eth_conf). 936 * 937 * At least one probed sub_device: 938 * Numerables: 939 * Uses values from the active probed sub_device 940 * The rationale here is that if any sub_device is less capable 941 * (for example concerning the number of queues) than the active 942 * sub_device, then its subsequent configuration will fail. 943 * It is impossible to foresee this failure when the failing sub_device 944 * is supposed to be plugged-in later on, so the configuration process 945 * is the single point of failure and error reporting. 946 * Capabilities: 947 * Uses a logical AND of RX capabilities among 948 * all sub_devices and the default capabilities. 949 * Uses a logical AND of TX capabilities among 950 * the active probed sub_device and the default capabilities. 951 * Uses a logical AND of device capabilities among 952 * all sub_devices and the default capabilities. 953 * 954 */ 955 static int 956 fs_dev_infos_get(struct rte_eth_dev *dev, 957 struct rte_eth_dev_info *infos) 958 { 959 struct sub_device *sdev; 960 uint8_t i; 961 int ret; 962 963 /* Use maximum upper bounds by default */ 964 infos->max_rx_pktlen = UINT32_MAX; 965 infos->max_rx_queues = RTE_MAX_QUEUES_PER_PORT; 966 infos->max_tx_queues = RTE_MAX_QUEUES_PER_PORT; 967 infos->max_mac_addrs = FAILSAFE_MAX_ETHADDR; 968 infos->max_hash_mac_addrs = UINT32_MAX; 969 infos->max_vfs = UINT16_MAX; 970 infos->max_vmdq_pools = UINT16_MAX; 971 972 /* 973 * Set of capabilities that can be verified upon 974 * configuring a sub-device. 975 */ 976 infos->rx_offload_capa = 977 DEV_RX_OFFLOAD_VLAN_STRIP | 978 DEV_RX_OFFLOAD_IPV4_CKSUM | 979 DEV_RX_OFFLOAD_UDP_CKSUM | 980 DEV_RX_OFFLOAD_TCP_CKSUM | 981 DEV_RX_OFFLOAD_TCP_LRO | 982 DEV_RX_OFFLOAD_QINQ_STRIP | 983 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 984 DEV_RX_OFFLOAD_MACSEC_STRIP | 985 DEV_RX_OFFLOAD_HEADER_SPLIT | 986 DEV_RX_OFFLOAD_VLAN_FILTER | 987 DEV_RX_OFFLOAD_VLAN_EXTEND | 988 DEV_RX_OFFLOAD_JUMBO_FRAME | 989 DEV_RX_OFFLOAD_SCATTER | 990 DEV_RX_OFFLOAD_TIMESTAMP | 991 DEV_RX_OFFLOAD_SECURITY; 992 993 infos->rx_queue_offload_capa = 994 DEV_RX_OFFLOAD_VLAN_STRIP | 995 DEV_RX_OFFLOAD_IPV4_CKSUM | 996 DEV_RX_OFFLOAD_UDP_CKSUM | 997 DEV_RX_OFFLOAD_TCP_CKSUM | 998 DEV_RX_OFFLOAD_TCP_LRO | 999 DEV_RX_OFFLOAD_QINQ_STRIP | 1000 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1001 DEV_RX_OFFLOAD_MACSEC_STRIP | 1002 DEV_RX_OFFLOAD_HEADER_SPLIT | 1003 DEV_RX_OFFLOAD_VLAN_FILTER | 1004 DEV_RX_OFFLOAD_VLAN_EXTEND | 1005 DEV_RX_OFFLOAD_JUMBO_FRAME | 1006 DEV_RX_OFFLOAD_SCATTER | 1007 DEV_RX_OFFLOAD_TIMESTAMP | 1008 DEV_RX_OFFLOAD_SECURITY; 1009 1010 infos->tx_offload_capa = 1011 DEV_TX_OFFLOAD_MULTI_SEGS | 1012 DEV_TX_OFFLOAD_MBUF_FAST_FREE | 1013 DEV_TX_OFFLOAD_IPV4_CKSUM | 1014 DEV_TX_OFFLOAD_UDP_CKSUM | 1015 DEV_TX_OFFLOAD_TCP_CKSUM | 1016 DEV_TX_OFFLOAD_TCP_TSO; 1017 1018 infos->flow_type_rss_offloads = 1019 ETH_RSS_IP | 1020 ETH_RSS_UDP | 1021 ETH_RSS_TCP; 1022 infos->dev_capa = 1023 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 1024 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 1025 1026 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 1027 struct rte_eth_dev_info sub_info; 1028 1029 ret = rte_eth_dev_info_get(PORT_ID(sdev), &sub_info); 1030 ret = fs_err(sdev, ret); 1031 if (ret != 0) 1032 return ret; 1033 1034 fs_dev_merge_info(infos, &sub_info); 1035 } 1036 1037 return 0; 1038 } 1039 1040 static const uint32_t * 1041 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev) 1042 { 1043 struct sub_device *sdev; 1044 struct rte_eth_dev *edev; 1045 const uint32_t *ret; 1046 1047 fs_lock(dev, 0); 1048 sdev = TX_SUBDEV(dev); 1049 if (sdev == NULL) { 1050 ret = NULL; 1051 goto unlock; 1052 } 1053 edev = ETH(sdev); 1054 /* ENOTSUP: counts as no supported ptypes */ 1055 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) { 1056 ret = NULL; 1057 goto unlock; 1058 } 1059 /* 1060 * The API does not permit to do a clean AND of all ptypes, 1061 * It is also incomplete by design and we do not really care 1062 * to have a best possible value in this context. 1063 * We just return the ptypes of the device of highest 1064 * priority, usually the PREFERRED device. 1065 */ 1066 ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev); 1067 unlock: 1068 fs_unlock(dev, 0); 1069 return ret; 1070 } 1071 1072 static int 1073 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1074 { 1075 struct sub_device *sdev; 1076 uint8_t i; 1077 int ret; 1078 1079 fs_lock(dev, 0); 1080 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1081 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i); 1082 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu); 1083 if ((ret = fs_err(sdev, ret))) { 1084 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d", 1085 i, ret); 1086 fs_unlock(dev, 0); 1087 return ret; 1088 } 1089 } 1090 fs_unlock(dev, 0); 1091 return 0; 1092 } 1093 1094 static int 1095 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1096 { 1097 struct sub_device *sdev; 1098 uint8_t i; 1099 int ret; 1100 1101 fs_lock(dev, 0); 1102 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1103 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i); 1104 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on); 1105 if ((ret = fs_err(sdev, ret))) { 1106 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d" 1107 " with error %d", i, ret); 1108 fs_unlock(dev, 0); 1109 return ret; 1110 } 1111 } 1112 fs_unlock(dev, 0); 1113 return 0; 1114 } 1115 1116 static int 1117 fs_flow_ctrl_get(struct rte_eth_dev *dev, 1118 struct rte_eth_fc_conf *fc_conf) 1119 { 1120 struct sub_device *sdev; 1121 int ret; 1122 1123 fs_lock(dev, 0); 1124 sdev = TX_SUBDEV(dev); 1125 if (sdev == NULL) { 1126 ret = 0; 1127 goto unlock; 1128 } 1129 if (SUBOPS(sdev, flow_ctrl_get) == NULL) { 1130 ret = -ENOTSUP; 1131 goto unlock; 1132 } 1133 ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf); 1134 unlock: 1135 fs_unlock(dev, 0); 1136 return ret; 1137 } 1138 1139 static int 1140 fs_flow_ctrl_set(struct rte_eth_dev *dev, 1141 struct rte_eth_fc_conf *fc_conf) 1142 { 1143 struct sub_device *sdev; 1144 uint8_t i; 1145 int ret; 1146 1147 fs_lock(dev, 0); 1148 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1149 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i); 1150 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf); 1151 if ((ret = fs_err(sdev, ret))) { 1152 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d" 1153 " with error %d", i, ret); 1154 fs_unlock(dev, 0); 1155 return ret; 1156 } 1157 } 1158 fs_unlock(dev, 0); 1159 return 0; 1160 } 1161 1162 static void 1163 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 1164 { 1165 struct sub_device *sdev; 1166 uint8_t i; 1167 1168 fs_lock(dev, 0); 1169 /* No check: already done within the rte_eth_dev_mac_addr_remove 1170 * call for the fail-safe device. 1171 */ 1172 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 1173 rte_eth_dev_mac_addr_remove(PORT_ID(sdev), 1174 &dev->data->mac_addrs[index]); 1175 PRIV(dev)->mac_addr_pool[index] = 0; 1176 fs_unlock(dev, 0); 1177 } 1178 1179 static int 1180 fs_mac_addr_add(struct rte_eth_dev *dev, 1181 struct rte_ether_addr *mac_addr, 1182 uint32_t index, 1183 uint32_t vmdq) 1184 { 1185 struct sub_device *sdev; 1186 int ret; 1187 uint8_t i; 1188 1189 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR); 1190 fs_lock(dev, 0); 1191 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1192 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq); 1193 if ((ret = fs_err(sdev, ret))) { 1194 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %" 1195 PRIu8 " with error %d", i, ret); 1196 fs_unlock(dev, 0); 1197 return ret; 1198 } 1199 } 1200 if (index >= PRIV(dev)->nb_mac_addr) { 1201 DEBUG("Growing mac_addrs array"); 1202 PRIV(dev)->nb_mac_addr = index; 1203 } 1204 PRIV(dev)->mac_addr_pool[index] = vmdq; 1205 fs_unlock(dev, 0); 1206 return 0; 1207 } 1208 1209 static int 1210 fs_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 1211 { 1212 struct sub_device *sdev; 1213 uint8_t i; 1214 int ret; 1215 1216 fs_lock(dev, 0); 1217 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1218 ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr); 1219 ret = fs_err(sdev, ret); 1220 if (ret) { 1221 ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d", 1222 i, ret); 1223 fs_unlock(dev, 0); 1224 return ret; 1225 } 1226 } 1227 fs_unlock(dev, 0); 1228 1229 return 0; 1230 } 1231 1232 static int 1233 fs_set_mc_addr_list(struct rte_eth_dev *dev, 1234 struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) 1235 { 1236 struct sub_device *sdev; 1237 uint8_t i; 1238 int ret; 1239 void *mcast_addrs; 1240 1241 fs_lock(dev, 0); 1242 1243 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1244 ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1245 mc_addr_set, nb_mc_addr); 1246 if (ret != 0) { 1247 ERROR("Operation rte_eth_dev_set_mc_addr_list failed for sub_device %d with error %d", 1248 i, ret); 1249 goto rollback; 1250 } 1251 } 1252 1253 mcast_addrs = rte_realloc(PRIV(dev)->mcast_addrs, 1254 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]), 0); 1255 if (mcast_addrs == NULL && nb_mc_addr > 0) { 1256 ret = -ENOMEM; 1257 goto rollback; 1258 } 1259 rte_memcpy(mcast_addrs, mc_addr_set, 1260 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0])); 1261 PRIV(dev)->nb_mcast_addr = nb_mc_addr; 1262 PRIV(dev)->mcast_addrs = mcast_addrs; 1263 1264 fs_unlock(dev, 0); 1265 return 0; 1266 1267 rollback: 1268 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1269 int rc = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1270 PRIV(dev)->mcast_addrs, PRIV(dev)->nb_mcast_addr); 1271 if (rc != 0) { 1272 ERROR("Multicast MAC address list rollback for sub_device %d failed with error %d", 1273 i, rc); 1274 } 1275 } 1276 1277 fs_unlock(dev, 0); 1278 return ret; 1279 } 1280 1281 static int 1282 fs_rss_hash_update(struct rte_eth_dev *dev, 1283 struct rte_eth_rss_conf *rss_conf) 1284 { 1285 struct sub_device *sdev; 1286 uint8_t i; 1287 int ret; 1288 1289 fs_lock(dev, 0); 1290 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1291 ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf); 1292 ret = fs_err(sdev, ret); 1293 if (ret) { 1294 ERROR("Operation rte_eth_dev_rss_hash_update" 1295 " failed for sub_device %d with error %d", 1296 i, ret); 1297 fs_unlock(dev, 0); 1298 return ret; 1299 } 1300 } 1301 fs_unlock(dev, 0); 1302 1303 return 0; 1304 } 1305 1306 static int 1307 fs_filter_ctrl(struct rte_eth_dev *dev __rte_unused, 1308 enum rte_filter_type type, 1309 enum rte_filter_op op, 1310 void *arg) 1311 { 1312 if (type == RTE_ETH_FILTER_GENERIC && 1313 op == RTE_ETH_FILTER_GET) { 1314 *(const void **)arg = &fs_flow_ops; 1315 return 0; 1316 } 1317 return -ENOTSUP; 1318 } 1319 1320 const struct eth_dev_ops failsafe_ops = { 1321 .dev_configure = fs_dev_configure, 1322 .dev_start = fs_dev_start, 1323 .dev_stop = fs_dev_stop, 1324 .dev_set_link_down = fs_dev_set_link_down, 1325 .dev_set_link_up = fs_dev_set_link_up, 1326 .dev_close = fs_dev_close, 1327 .promiscuous_enable = fs_promiscuous_enable, 1328 .promiscuous_disable = fs_promiscuous_disable, 1329 .allmulticast_enable = fs_allmulticast_enable, 1330 .allmulticast_disable = fs_allmulticast_disable, 1331 .link_update = fs_link_update, 1332 .stats_get = fs_stats_get, 1333 .stats_reset = fs_stats_reset, 1334 .dev_infos_get = fs_dev_infos_get, 1335 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get, 1336 .mtu_set = fs_mtu_set, 1337 .vlan_filter_set = fs_vlan_filter_set, 1338 .rx_queue_start = fs_rx_queue_start, 1339 .rx_queue_stop = fs_rx_queue_stop, 1340 .tx_queue_start = fs_tx_queue_start, 1341 .tx_queue_stop = fs_tx_queue_stop, 1342 .rx_queue_setup = fs_rx_queue_setup, 1343 .tx_queue_setup = fs_tx_queue_setup, 1344 .rx_queue_release = fs_rx_queue_release, 1345 .tx_queue_release = fs_tx_queue_release, 1346 .rx_queue_intr_enable = fs_rx_intr_enable, 1347 .rx_queue_intr_disable = fs_rx_intr_disable, 1348 .flow_ctrl_get = fs_flow_ctrl_get, 1349 .flow_ctrl_set = fs_flow_ctrl_set, 1350 .mac_addr_remove = fs_mac_addr_remove, 1351 .mac_addr_add = fs_mac_addr_add, 1352 .mac_addr_set = fs_mac_addr_set, 1353 .set_mc_addr_list = fs_set_mc_addr_list, 1354 .rss_hash_update = fs_rss_hash_update, 1355 .filter_ctrl = fs_filter_ctrl, 1356 }; 1357