1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017 6WIND S.A. 3 * Copyright 2017 Mellanox Technologies, Ltd 4 */ 5 6 #include <stdbool.h> 7 #include <stdint.h> 8 #include <unistd.h> 9 10 #include <rte_debug.h> 11 #include <rte_atomic.h> 12 #include <rte_ethdev_driver.h> 13 #include <rte_malloc.h> 14 #include <rte_flow.h> 15 #include <rte_cycles.h> 16 #include <rte_ethdev.h> 17 #include <rte_string_fns.h> 18 19 #include "failsafe_private.h" 20 21 static int 22 fs_dev_configure(struct rte_eth_dev *dev) 23 { 24 struct sub_device *sdev; 25 uint8_t i; 26 int ret; 27 28 fs_lock(dev, 0); 29 FOREACH_SUBDEV(sdev, i, dev) { 30 int rmv_interrupt = 0; 31 int lsc_interrupt = 0; 32 int lsc_enabled; 33 34 if (sdev->state != DEV_PROBED && 35 !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE)) 36 continue; 37 38 rmv_interrupt = ETH(sdev)->data->dev_flags & 39 RTE_ETH_DEV_INTR_RMV; 40 if (rmv_interrupt) { 41 DEBUG("Enabling RMV interrupts for sub_device %d", i); 42 dev->data->dev_conf.intr_conf.rmv = 1; 43 } else { 44 DEBUG("sub_device %d does not support RMV event", i); 45 } 46 lsc_enabled = dev->data->dev_conf.intr_conf.lsc; 47 lsc_interrupt = lsc_enabled && 48 (ETH(sdev)->data->dev_flags & 49 RTE_ETH_DEV_INTR_LSC); 50 if (lsc_interrupt) { 51 DEBUG("Enabling LSC interrupts for sub_device %d", i); 52 dev->data->dev_conf.intr_conf.lsc = 1; 53 } else if (lsc_enabled && !lsc_interrupt) { 54 DEBUG("Disabling LSC interrupts for sub_device %d", i); 55 dev->data->dev_conf.intr_conf.lsc = 0; 56 } 57 DEBUG("Configuring sub-device %d", i); 58 ret = rte_eth_dev_configure(PORT_ID(sdev), 59 dev->data->nb_rx_queues, 60 dev->data->nb_tx_queues, 61 &dev->data->dev_conf); 62 if (ret) { 63 if (!fs_err(sdev, ret)) 64 continue; 65 ERROR("Could not configure sub_device %d", i); 66 fs_unlock(dev, 0); 67 return ret; 68 } 69 if (rmv_interrupt && sdev->rmv_callback == 0) { 70 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 71 RTE_ETH_EVENT_INTR_RMV, 72 failsafe_eth_rmv_event_callback, 73 sdev); 74 if (ret) 75 WARN("Failed to register RMV callback for sub_device %d", 76 SUB_ID(sdev)); 77 else 78 sdev->rmv_callback = 1; 79 } 80 dev->data->dev_conf.intr_conf.rmv = 0; 81 if (lsc_interrupt && sdev->lsc_callback == 0) { 82 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 83 RTE_ETH_EVENT_INTR_LSC, 84 failsafe_eth_lsc_event_callback, 85 dev); 86 if (ret) 87 WARN("Failed to register LSC callback for sub_device %d", 88 SUB_ID(sdev)); 89 else 90 sdev->lsc_callback = 1; 91 } 92 dev->data->dev_conf.intr_conf.lsc = lsc_enabled; 93 sdev->state = DEV_ACTIVE; 94 } 95 if (PRIV(dev)->state < DEV_ACTIVE) 96 PRIV(dev)->state = DEV_ACTIVE; 97 fs_unlock(dev, 0); 98 return 0; 99 } 100 101 static void 102 fs_set_queues_state_start(struct rte_eth_dev *dev) 103 { 104 struct rxq *rxq; 105 struct txq *txq; 106 uint16_t i; 107 108 for (i = 0; i < dev->data->nb_rx_queues; i++) { 109 rxq = dev->data->rx_queues[i]; 110 if (rxq != NULL && !rxq->info.conf.rx_deferred_start) 111 dev->data->rx_queue_state[i] = 112 RTE_ETH_QUEUE_STATE_STARTED; 113 } 114 for (i = 0; i < dev->data->nb_tx_queues; i++) { 115 txq = dev->data->tx_queues[i]; 116 if (txq != NULL && !txq->info.conf.tx_deferred_start) 117 dev->data->tx_queue_state[i] = 118 RTE_ETH_QUEUE_STATE_STARTED; 119 } 120 } 121 122 static int 123 fs_dev_start(struct rte_eth_dev *dev) 124 { 125 struct sub_device *sdev; 126 uint8_t i; 127 int ret; 128 129 fs_lock(dev, 0); 130 ret = failsafe_rx_intr_install(dev); 131 if (ret) { 132 fs_unlock(dev, 0); 133 return ret; 134 } 135 FOREACH_SUBDEV(sdev, i, dev) { 136 if (sdev->state != DEV_ACTIVE) 137 continue; 138 DEBUG("Starting sub_device %d", i); 139 ret = rte_eth_dev_start(PORT_ID(sdev)); 140 if (ret) { 141 if (!fs_err(sdev, ret)) 142 continue; 143 fs_unlock(dev, 0); 144 return ret; 145 } 146 ret = failsafe_rx_intr_install_subdevice(sdev); 147 if (ret) { 148 if (!fs_err(sdev, ret)) 149 continue; 150 rte_eth_dev_stop(PORT_ID(sdev)); 151 fs_unlock(dev, 0); 152 return ret; 153 } 154 sdev->state = DEV_STARTED; 155 } 156 if (PRIV(dev)->state < DEV_STARTED) { 157 PRIV(dev)->state = DEV_STARTED; 158 fs_set_queues_state_start(dev); 159 } 160 fs_switch_dev(dev, NULL); 161 fs_unlock(dev, 0); 162 return 0; 163 } 164 165 static void 166 fs_set_queues_state_stop(struct rte_eth_dev *dev) 167 { 168 uint16_t i; 169 170 for (i = 0; i < dev->data->nb_rx_queues; i++) 171 if (dev->data->rx_queues[i] != NULL) 172 dev->data->rx_queue_state[i] = 173 RTE_ETH_QUEUE_STATE_STOPPED; 174 for (i = 0; i < dev->data->nb_tx_queues; i++) 175 if (dev->data->tx_queues[i] != NULL) 176 dev->data->tx_queue_state[i] = 177 RTE_ETH_QUEUE_STATE_STOPPED; 178 } 179 180 static void 181 fs_dev_stop(struct rte_eth_dev *dev) 182 { 183 struct sub_device *sdev; 184 uint8_t i; 185 186 fs_lock(dev, 0); 187 PRIV(dev)->state = DEV_STARTED - 1; 188 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) { 189 rte_eth_dev_stop(PORT_ID(sdev)); 190 failsafe_rx_intr_uninstall_subdevice(sdev); 191 sdev->state = DEV_STARTED - 1; 192 } 193 failsafe_rx_intr_uninstall(dev); 194 fs_set_queues_state_stop(dev); 195 fs_unlock(dev, 0); 196 } 197 198 static int 199 fs_dev_set_link_up(struct rte_eth_dev *dev) 200 { 201 struct sub_device *sdev; 202 uint8_t i; 203 int ret; 204 205 fs_lock(dev, 0); 206 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 207 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i); 208 ret = rte_eth_dev_set_link_up(PORT_ID(sdev)); 209 if ((ret = fs_err(sdev, ret))) { 210 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d" 211 " with error %d", i, ret); 212 fs_unlock(dev, 0); 213 return ret; 214 } 215 } 216 fs_unlock(dev, 0); 217 return 0; 218 } 219 220 static int 221 fs_dev_set_link_down(struct rte_eth_dev *dev) 222 { 223 struct sub_device *sdev; 224 uint8_t i; 225 int ret; 226 227 fs_lock(dev, 0); 228 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 229 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i); 230 ret = rte_eth_dev_set_link_down(PORT_ID(sdev)); 231 if ((ret = fs_err(sdev, ret))) { 232 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d" 233 " with error %d", i, ret); 234 fs_unlock(dev, 0); 235 return ret; 236 } 237 } 238 fs_unlock(dev, 0); 239 return 0; 240 } 241 242 static void fs_dev_free_queues(struct rte_eth_dev *dev); 243 static void 244 fs_dev_close(struct rte_eth_dev *dev) 245 { 246 struct sub_device *sdev; 247 uint8_t i; 248 249 fs_lock(dev, 0); 250 failsafe_hotplug_alarm_cancel(dev); 251 if (PRIV(dev)->state == DEV_STARTED) 252 dev->dev_ops->dev_stop(dev); 253 PRIV(dev)->state = DEV_ACTIVE - 1; 254 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 255 DEBUG("Closing sub_device %d", i); 256 failsafe_eth_dev_unregister_callbacks(sdev); 257 rte_eth_dev_close(PORT_ID(sdev)); 258 sdev->state = DEV_ACTIVE - 1; 259 } 260 fs_dev_free_queues(dev); 261 fs_unlock(dev, 0); 262 } 263 264 static int 265 fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 266 { 267 struct sub_device *sdev; 268 uint8_t i; 269 int ret; 270 int err = 0; 271 bool failure = true; 272 273 fs_lock(dev, 0); 274 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 275 uint16_t port_id = ETH(sdev)->data->port_id; 276 277 ret = rte_eth_dev_rx_queue_stop(port_id, rx_queue_id); 278 ret = fs_err(sdev, ret); 279 if (ret) { 280 ERROR("Rx queue stop failed for subdevice %d", i); 281 err = ret; 282 } else { 283 failure = false; 284 } 285 } 286 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 287 fs_unlock(dev, 0); 288 /* Return 0 in case of at least one successful queue stop */ 289 return (failure) ? err : 0; 290 } 291 292 static int 293 fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 294 { 295 struct sub_device *sdev; 296 uint8_t i; 297 int ret; 298 299 fs_lock(dev, 0); 300 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 301 uint16_t port_id = ETH(sdev)->data->port_id; 302 303 ret = rte_eth_dev_rx_queue_start(port_id, rx_queue_id); 304 ret = fs_err(sdev, ret); 305 if (ret) { 306 ERROR("Rx queue start failed for subdevice %d", i); 307 fs_rx_queue_stop(dev, rx_queue_id); 308 fs_unlock(dev, 0); 309 return ret; 310 } 311 } 312 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 313 fs_unlock(dev, 0); 314 return 0; 315 } 316 317 static int 318 fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 319 { 320 struct sub_device *sdev; 321 uint8_t i; 322 int ret; 323 int err = 0; 324 bool failure = true; 325 326 fs_lock(dev, 0); 327 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 328 uint16_t port_id = ETH(sdev)->data->port_id; 329 330 ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id); 331 ret = fs_err(sdev, ret); 332 if (ret) { 333 ERROR("Tx queue stop failed for subdevice %d", i); 334 err = ret; 335 } else { 336 failure = false; 337 } 338 } 339 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 340 fs_unlock(dev, 0); 341 /* Return 0 in case of at least one successful queue stop */ 342 return (failure) ? err : 0; 343 } 344 345 static int 346 fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 347 { 348 struct sub_device *sdev; 349 uint8_t i; 350 int ret; 351 352 fs_lock(dev, 0); 353 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 354 uint16_t port_id = ETH(sdev)->data->port_id; 355 356 ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id); 357 ret = fs_err(sdev, ret); 358 if (ret) { 359 ERROR("Tx queue start failed for subdevice %d", i); 360 fs_tx_queue_stop(dev, tx_queue_id); 361 fs_unlock(dev, 0); 362 return ret; 363 } 364 } 365 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 366 fs_unlock(dev, 0); 367 return 0; 368 } 369 370 static void 371 fs_rx_queue_release(void *queue) 372 { 373 struct rte_eth_dev *dev; 374 struct sub_device *sdev; 375 uint8_t i; 376 struct rxq *rxq; 377 378 if (queue == NULL) 379 return; 380 rxq = queue; 381 dev = &rte_eth_devices[rxq->priv->data->port_id]; 382 fs_lock(dev, 0); 383 if (rxq->event_fd > 0) 384 close(rxq->event_fd); 385 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 386 if (ETH(sdev)->data->rx_queues != NULL && 387 ETH(sdev)->data->rx_queues[rxq->qid] != NULL) { 388 SUBOPS(sdev, rx_queue_release) 389 (ETH(sdev)->data->rx_queues[rxq->qid]); 390 } 391 } 392 dev->data->rx_queues[rxq->qid] = NULL; 393 rte_free(rxq); 394 fs_unlock(dev, 0); 395 } 396 397 static int 398 fs_rx_queue_setup(struct rte_eth_dev *dev, 399 uint16_t rx_queue_id, 400 uint16_t nb_rx_desc, 401 unsigned int socket_id, 402 const struct rte_eth_rxconf *rx_conf, 403 struct rte_mempool *mb_pool) 404 { 405 /* 406 * FIXME: Add a proper interface in rte_eal_interrupts for 407 * allocating eventfd as an interrupt vector. 408 * For the time being, fake as if we are using MSIX interrupts, 409 * this will cause rte_intr_efd_enable to allocate an eventfd for us. 410 */ 411 struct rte_intr_handle intr_handle = { 412 .type = RTE_INTR_HANDLE_VFIO_MSIX, 413 .efds = { -1, }, 414 }; 415 struct sub_device *sdev; 416 struct rxq *rxq; 417 uint8_t i; 418 int ret; 419 420 fs_lock(dev, 0); 421 if (rx_conf->rx_deferred_start) { 422 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 423 if (SUBOPS(sdev, rx_queue_start) == NULL) { 424 ERROR("Rx queue deferred start is not " 425 "supported for subdevice %d", i); 426 fs_unlock(dev, 0); 427 return -EINVAL; 428 } 429 } 430 } 431 rxq = dev->data->rx_queues[rx_queue_id]; 432 if (rxq != NULL) { 433 fs_rx_queue_release(rxq); 434 dev->data->rx_queues[rx_queue_id] = NULL; 435 } 436 rxq = rte_zmalloc(NULL, 437 sizeof(*rxq) + 438 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 439 RTE_CACHE_LINE_SIZE); 440 if (rxq == NULL) { 441 fs_unlock(dev, 0); 442 return -ENOMEM; 443 } 444 FOREACH_SUBDEV(sdev, i, dev) 445 rte_atomic64_init(&rxq->refcnt[i]); 446 rxq->qid = rx_queue_id; 447 rxq->socket_id = socket_id; 448 rxq->info.mp = mb_pool; 449 rxq->info.conf = *rx_conf; 450 rxq->info.nb_desc = nb_rx_desc; 451 rxq->priv = PRIV(dev); 452 rxq->sdev = PRIV(dev)->subs; 453 ret = rte_intr_efd_enable(&intr_handle, 1); 454 if (ret < 0) { 455 fs_unlock(dev, 0); 456 return ret; 457 } 458 rxq->event_fd = intr_handle.efds[0]; 459 dev->data->rx_queues[rx_queue_id] = rxq; 460 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 461 ret = rte_eth_rx_queue_setup(PORT_ID(sdev), 462 rx_queue_id, 463 nb_rx_desc, socket_id, 464 rx_conf, mb_pool); 465 if ((ret = fs_err(sdev, ret))) { 466 ERROR("RX queue setup failed for sub_device %d", i); 467 goto free_rxq; 468 } 469 } 470 fs_unlock(dev, 0); 471 return 0; 472 free_rxq: 473 fs_rx_queue_release(rxq); 474 fs_unlock(dev, 0); 475 return ret; 476 } 477 478 static int 479 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx) 480 { 481 struct rxq *rxq; 482 struct sub_device *sdev; 483 uint8_t i; 484 int ret; 485 int rc = 0; 486 487 fs_lock(dev, 0); 488 if (idx >= dev->data->nb_rx_queues) { 489 rc = -EINVAL; 490 goto unlock; 491 } 492 rxq = dev->data->rx_queues[idx]; 493 if (rxq == NULL || rxq->event_fd <= 0) { 494 rc = -EINVAL; 495 goto unlock; 496 } 497 /* Fail if proxy service is nor running. */ 498 if (PRIV(dev)->rxp.sstate != SS_RUNNING) { 499 ERROR("failsafe interrupt services are not running"); 500 rc = -EAGAIN; 501 goto unlock; 502 } 503 rxq->enable_events = 1; 504 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 505 ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx); 506 ret = fs_err(sdev, ret); 507 if (ret) 508 rc = ret; 509 } 510 unlock: 511 fs_unlock(dev, 0); 512 if (rc) 513 rte_errno = -rc; 514 return rc; 515 } 516 517 static int 518 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx) 519 { 520 struct rxq *rxq; 521 struct sub_device *sdev; 522 uint64_t u64; 523 uint8_t i; 524 int rc = 0; 525 int ret; 526 527 fs_lock(dev, 0); 528 if (idx >= dev->data->nb_rx_queues) { 529 rc = -EINVAL; 530 goto unlock; 531 } 532 rxq = dev->data->rx_queues[idx]; 533 if (rxq == NULL || rxq->event_fd <= 0) { 534 rc = -EINVAL; 535 goto unlock; 536 } 537 rxq->enable_events = 0; 538 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 539 ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx); 540 ret = fs_err(sdev, ret); 541 if (ret) 542 rc = ret; 543 } 544 /* Clear pending events */ 545 while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0) 546 ; 547 unlock: 548 fs_unlock(dev, 0); 549 if (rc) 550 rte_errno = -rc; 551 return rc; 552 } 553 554 static void 555 fs_tx_queue_release(void *queue) 556 { 557 struct rte_eth_dev *dev; 558 struct sub_device *sdev; 559 uint8_t i; 560 struct txq *txq; 561 562 if (queue == NULL) 563 return; 564 txq = queue; 565 dev = &rte_eth_devices[txq->priv->data->port_id]; 566 fs_lock(dev, 0); 567 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 568 if (ETH(sdev)->data->tx_queues != NULL && 569 ETH(sdev)->data->tx_queues[txq->qid] != NULL) { 570 SUBOPS(sdev, tx_queue_release) 571 (ETH(sdev)->data->tx_queues[txq->qid]); 572 } 573 } 574 dev->data->tx_queues[txq->qid] = NULL; 575 rte_free(txq); 576 fs_unlock(dev, 0); 577 } 578 579 static int 580 fs_tx_queue_setup(struct rte_eth_dev *dev, 581 uint16_t tx_queue_id, 582 uint16_t nb_tx_desc, 583 unsigned int socket_id, 584 const struct rte_eth_txconf *tx_conf) 585 { 586 struct sub_device *sdev; 587 struct txq *txq; 588 uint8_t i; 589 int ret; 590 591 fs_lock(dev, 0); 592 if (tx_conf->tx_deferred_start) { 593 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 594 if (SUBOPS(sdev, tx_queue_start) == NULL) { 595 ERROR("Tx queue deferred start is not " 596 "supported for subdevice %d", i); 597 fs_unlock(dev, 0); 598 return -EINVAL; 599 } 600 } 601 } 602 txq = dev->data->tx_queues[tx_queue_id]; 603 if (txq != NULL) { 604 fs_tx_queue_release(txq); 605 dev->data->tx_queues[tx_queue_id] = NULL; 606 } 607 txq = rte_zmalloc("ethdev TX queue", 608 sizeof(*txq) + 609 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 610 RTE_CACHE_LINE_SIZE); 611 if (txq == NULL) { 612 fs_unlock(dev, 0); 613 return -ENOMEM; 614 } 615 FOREACH_SUBDEV(sdev, i, dev) 616 rte_atomic64_init(&txq->refcnt[i]); 617 txq->qid = tx_queue_id; 618 txq->socket_id = socket_id; 619 txq->info.conf = *tx_conf; 620 txq->info.nb_desc = nb_tx_desc; 621 txq->priv = PRIV(dev); 622 dev->data->tx_queues[tx_queue_id] = txq; 623 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 624 ret = rte_eth_tx_queue_setup(PORT_ID(sdev), 625 tx_queue_id, 626 nb_tx_desc, socket_id, 627 tx_conf); 628 if ((ret = fs_err(sdev, ret))) { 629 ERROR("TX queue setup failed for sub_device %d", i); 630 goto free_txq; 631 } 632 } 633 fs_unlock(dev, 0); 634 return 0; 635 free_txq: 636 fs_tx_queue_release(txq); 637 fs_unlock(dev, 0); 638 return ret; 639 } 640 641 static void 642 fs_dev_free_queues(struct rte_eth_dev *dev) 643 { 644 uint16_t i; 645 646 for (i = 0; i < dev->data->nb_rx_queues; i++) { 647 fs_rx_queue_release(dev->data->rx_queues[i]); 648 dev->data->rx_queues[i] = NULL; 649 } 650 dev->data->nb_rx_queues = 0; 651 for (i = 0; i < dev->data->nb_tx_queues; i++) { 652 fs_tx_queue_release(dev->data->tx_queues[i]); 653 dev->data->tx_queues[i] = NULL; 654 } 655 dev->data->nb_tx_queues = 0; 656 } 657 658 static int 659 fs_promiscuous_enable(struct rte_eth_dev *dev) 660 { 661 struct sub_device *sdev; 662 uint8_t i; 663 int ret = 0; 664 665 fs_lock(dev, 0); 666 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 667 ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); 668 ret = fs_err(sdev, ret); 669 if (ret != 0) { 670 ERROR("Promiscuous mode enable failed for subdevice %d", 671 PORT_ID(sdev)); 672 break; 673 } 674 } 675 if (ret != 0) { 676 /* Rollback in the case of failure */ 677 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 678 ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); 679 ret = fs_err(sdev, ret); 680 if (ret != 0) 681 ERROR("Promiscuous mode disable during rollback failed for subdevice %d", 682 PORT_ID(sdev)); 683 } 684 } 685 fs_unlock(dev, 0); 686 687 return ret; 688 } 689 690 static int 691 fs_promiscuous_disable(struct rte_eth_dev *dev) 692 { 693 struct sub_device *sdev; 694 uint8_t i; 695 int ret = 0; 696 697 fs_lock(dev, 0); 698 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 699 ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); 700 ret = fs_err(sdev, ret); 701 if (ret != 0) { 702 ERROR("Promiscuous mode disable failed for subdevice %d", 703 PORT_ID(sdev)); 704 break; 705 } 706 } 707 if (ret != 0) { 708 /* Rollback in the case of failure */ 709 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 710 ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); 711 ret = fs_err(sdev, ret); 712 if (ret != 0) 713 ERROR("Promiscuous mode enable during rollback failed for subdevice %d", 714 PORT_ID(sdev)); 715 } 716 } 717 fs_unlock(dev, 0); 718 719 return ret; 720 } 721 722 static int 723 fs_allmulticast_enable(struct rte_eth_dev *dev) 724 { 725 struct sub_device *sdev; 726 uint8_t i; 727 int ret = 0; 728 729 fs_lock(dev, 0); 730 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 731 ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); 732 ret = fs_err(sdev, ret); 733 if (ret != 0) { 734 ERROR("All-multicast mode enable failed for subdevice %d", 735 PORT_ID(sdev)); 736 break; 737 } 738 } 739 if (ret != 0) { 740 /* Rollback in the case of failure */ 741 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 742 ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); 743 ret = fs_err(sdev, ret); 744 if (ret != 0) 745 ERROR("All-multicast mode disable during rollback failed for subdevice %d", 746 PORT_ID(sdev)); 747 } 748 } 749 fs_unlock(dev, 0); 750 751 return ret; 752 } 753 754 static int 755 fs_allmulticast_disable(struct rte_eth_dev *dev) 756 { 757 struct sub_device *sdev; 758 uint8_t i; 759 int ret = 0; 760 761 fs_lock(dev, 0); 762 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 763 ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); 764 ret = fs_err(sdev, ret); 765 if (ret != 0) { 766 ERROR("All-multicast mode disable failed for subdevice %d", 767 PORT_ID(sdev)); 768 break; 769 } 770 } 771 if (ret != 0) { 772 /* Rollback in the case of failure */ 773 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 774 ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); 775 ret = fs_err(sdev, ret); 776 if (ret != 0) 777 ERROR("All-multicast mode enable during rollback failed for subdevice %d", 778 PORT_ID(sdev)); 779 } 780 } 781 fs_unlock(dev, 0); 782 783 return ret; 784 } 785 786 static int 787 fs_link_update(struct rte_eth_dev *dev, 788 int wait_to_complete) 789 { 790 struct sub_device *sdev; 791 uint8_t i; 792 int ret; 793 794 fs_lock(dev, 0); 795 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 796 DEBUG("Calling link_update on sub_device %d", i); 797 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete); 798 if (ret && ret != -1 && sdev->remove == 0 && 799 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) { 800 ERROR("Link update failed for sub_device %d with error %d", 801 i, ret); 802 fs_unlock(dev, 0); 803 return ret; 804 } 805 } 806 if (TX_SUBDEV(dev)) { 807 struct rte_eth_link *l1; 808 struct rte_eth_link *l2; 809 810 l1 = &dev->data->dev_link; 811 l2 = Ð(TX_SUBDEV(dev))->data->dev_link; 812 if (memcmp(l1, l2, sizeof(*l1))) { 813 *l1 = *l2; 814 fs_unlock(dev, 0); 815 return 0; 816 } 817 } 818 fs_unlock(dev, 0); 819 return -1; 820 } 821 822 static int 823 fs_stats_get(struct rte_eth_dev *dev, 824 struct rte_eth_stats *stats) 825 { 826 struct rte_eth_stats backup; 827 struct sub_device *sdev; 828 uint8_t i; 829 int ret; 830 831 fs_lock(dev, 0); 832 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats)); 833 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 834 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats; 835 uint64_t *timestamp = &sdev->stats_snapshot.timestamp; 836 837 rte_memcpy(&backup, snapshot, sizeof(backup)); 838 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot); 839 if (ret) { 840 if (!fs_err(sdev, ret)) { 841 rte_memcpy(snapshot, &backup, sizeof(backup)); 842 goto inc; 843 } 844 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d", 845 i, ret); 846 *timestamp = 0; 847 fs_unlock(dev, 0); 848 return ret; 849 } 850 *timestamp = rte_rdtsc(); 851 inc: 852 failsafe_stats_increment(stats, snapshot); 853 } 854 fs_unlock(dev, 0); 855 return 0; 856 } 857 858 static int 859 fs_stats_reset(struct rte_eth_dev *dev) 860 { 861 struct sub_device *sdev; 862 uint8_t i; 863 int ret; 864 865 fs_lock(dev, 0); 866 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 867 ret = rte_eth_stats_reset(PORT_ID(sdev)); 868 if (ret) { 869 if (!fs_err(sdev, ret)) 870 continue; 871 872 ERROR("Operation rte_eth_stats_reset failed for sub_device %d with error %d", 873 i, ret); 874 fs_unlock(dev, 0); 875 return ret; 876 } 877 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats)); 878 } 879 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats)); 880 fs_unlock(dev, 0); 881 882 return 0; 883 } 884 885 static int 886 __fs_xstats_count(struct rte_eth_dev *dev) 887 { 888 struct sub_device *sdev; 889 int count = 0; 890 uint8_t i; 891 int ret; 892 893 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 894 ret = rte_eth_xstats_get_names(PORT_ID(sdev), NULL, 0); 895 if (ret < 0) 896 return ret; 897 count += ret; 898 } 899 900 return count; 901 } 902 903 static int 904 __fs_xstats_get_names(struct rte_eth_dev *dev, 905 struct rte_eth_xstat_name *xstats_names, 906 unsigned int limit) 907 { 908 struct sub_device *sdev; 909 unsigned int count = 0; 910 uint8_t i; 911 912 /* Caller only cares about count */ 913 if (!xstats_names) 914 return __fs_xstats_count(dev); 915 916 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 917 struct rte_eth_xstat_name *sub_names = xstats_names + count; 918 int j, r; 919 920 if (count >= limit) 921 break; 922 923 r = rte_eth_xstats_get_names(PORT_ID(sdev), 924 sub_names, limit - count); 925 if (r < 0) 926 return r; 927 928 /* add subN_ prefix to names */ 929 for (j = 0; j < r; j++) { 930 char *xname = sub_names[j].name; 931 char tmp[RTE_ETH_XSTATS_NAME_SIZE]; 932 933 if ((xname[0] == 't' || xname[0] == 'r') && 934 xname[1] == 'x' && xname[2] == '_') 935 snprintf(tmp, sizeof(tmp), "%.3ssub%u_%s", 936 xname, i, xname + 3); 937 else 938 snprintf(tmp, sizeof(tmp), "sub%u_%s", 939 i, xname); 940 941 strlcpy(xname, tmp, RTE_ETH_XSTATS_NAME_SIZE); 942 } 943 count += r; 944 } 945 return count; 946 } 947 948 static int 949 fs_xstats_get_names(struct rte_eth_dev *dev, 950 struct rte_eth_xstat_name *xstats_names, 951 unsigned int limit) 952 { 953 int ret; 954 955 fs_lock(dev, 0); 956 ret = __fs_xstats_get_names(dev, xstats_names, limit); 957 fs_unlock(dev, 0); 958 return ret; 959 } 960 961 static int 962 __fs_xstats_get(struct rte_eth_dev *dev, 963 struct rte_eth_xstat *xstats, 964 unsigned int n) 965 { 966 unsigned int count = 0; 967 struct sub_device *sdev; 968 uint8_t i; 969 int j, ret; 970 971 ret = __fs_xstats_count(dev); 972 /* 973 * if error 974 * or caller did not give enough space 975 * or just querying 976 */ 977 if (ret < 0 || ret > (int)n || xstats == NULL) 978 return ret; 979 980 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 981 ret = rte_eth_xstats_get(PORT_ID(sdev), xstats, n); 982 if (ret < 0) 983 return ret; 984 985 if (ret > (int)n) 986 return n + count; 987 988 /* add offset to id's from sub-device */ 989 for (j = 0; j < ret; j++) 990 xstats[j].id += count; 991 992 xstats += ret; 993 n -= ret; 994 count += ret; 995 } 996 997 return count; 998 } 999 1000 static int 1001 fs_xstats_get(struct rte_eth_dev *dev, 1002 struct rte_eth_xstat *xstats, 1003 unsigned int n) 1004 { 1005 int ret; 1006 1007 fs_lock(dev, 0); 1008 ret = __fs_xstats_get(dev, xstats, n); 1009 fs_unlock(dev, 0); 1010 1011 return ret; 1012 } 1013 1014 1015 static int 1016 fs_xstats_reset(struct rte_eth_dev *dev) 1017 { 1018 struct sub_device *sdev; 1019 uint8_t i; 1020 int r = 0; 1021 1022 fs_lock(dev, 0); 1023 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1024 r = rte_eth_xstats_reset(PORT_ID(sdev)); 1025 if (r < 0) 1026 break; 1027 } 1028 fs_unlock(dev, 0); 1029 1030 return r; 1031 } 1032 1033 static void 1034 fs_dev_merge_desc_lim(struct rte_eth_desc_lim *to, 1035 const struct rte_eth_desc_lim *from) 1036 { 1037 to->nb_max = RTE_MIN(to->nb_max, from->nb_max); 1038 to->nb_min = RTE_MAX(to->nb_min, from->nb_min); 1039 to->nb_align = RTE_MAX(to->nb_align, from->nb_align); 1040 1041 to->nb_seg_max = RTE_MIN(to->nb_seg_max, from->nb_seg_max); 1042 to->nb_mtu_seg_max = RTE_MIN(to->nb_mtu_seg_max, from->nb_mtu_seg_max); 1043 } 1044 1045 /* 1046 * Merge the information from sub-devices. 1047 * 1048 * The reported values must be the common subset of all sub devices 1049 */ 1050 static void 1051 fs_dev_merge_info(struct rte_eth_dev_info *info, 1052 const struct rte_eth_dev_info *sinfo) 1053 { 1054 info->max_rx_pktlen = RTE_MIN(info->max_rx_pktlen, sinfo->max_rx_pktlen); 1055 info->max_rx_queues = RTE_MIN(info->max_rx_queues, sinfo->max_rx_queues); 1056 info->max_tx_queues = RTE_MIN(info->max_tx_queues, sinfo->max_tx_queues); 1057 info->max_mac_addrs = RTE_MIN(info->max_mac_addrs, sinfo->max_mac_addrs); 1058 info->max_hash_mac_addrs = RTE_MIN(info->max_hash_mac_addrs, 1059 sinfo->max_hash_mac_addrs); 1060 info->max_vmdq_pools = RTE_MIN(info->max_vmdq_pools, sinfo->max_vmdq_pools); 1061 info->max_vfs = RTE_MIN(info->max_vfs, sinfo->max_vfs); 1062 1063 fs_dev_merge_desc_lim(&info->rx_desc_lim, &sinfo->rx_desc_lim); 1064 fs_dev_merge_desc_lim(&info->tx_desc_lim, &sinfo->tx_desc_lim); 1065 1066 info->rx_offload_capa &= sinfo->rx_offload_capa; 1067 info->tx_offload_capa &= sinfo->tx_offload_capa; 1068 info->rx_queue_offload_capa &= sinfo->rx_queue_offload_capa; 1069 info->tx_queue_offload_capa &= sinfo->tx_queue_offload_capa; 1070 info->flow_type_rss_offloads &= sinfo->flow_type_rss_offloads; 1071 info->hash_key_size = RTE_MIN(info->hash_key_size, 1072 sinfo->hash_key_size); 1073 } 1074 1075 /** 1076 * Fail-safe dev_infos_get rules: 1077 * 1078 * No sub_device: 1079 * Numerables: 1080 * Use the maximum possible values for any field, so as not 1081 * to impede any further configuration effort. 1082 * Capabilities: 1083 * Limits capabilities to those that are understood by the 1084 * fail-safe PMD. This understanding stems from the fail-safe 1085 * being capable of verifying that the related capability is 1086 * expressed within the device configuration (struct rte_eth_conf). 1087 * 1088 * At least one probed sub_device: 1089 * Numerables: 1090 * Uses values from the active probed sub_device 1091 * The rationale here is that if any sub_device is less capable 1092 * (for example concerning the number of queues) than the active 1093 * sub_device, then its subsequent configuration will fail. 1094 * It is impossible to foresee this failure when the failing sub_device 1095 * is supposed to be plugged-in later on, so the configuration process 1096 * is the single point of failure and error reporting. 1097 * Capabilities: 1098 * Uses a logical AND of RX capabilities among 1099 * all sub_devices and the default capabilities. 1100 * Uses a logical AND of TX capabilities among 1101 * the active probed sub_device and the default capabilities. 1102 * Uses a logical AND of device capabilities among 1103 * all sub_devices and the default capabilities. 1104 * 1105 */ 1106 static int 1107 fs_dev_infos_get(struct rte_eth_dev *dev, 1108 struct rte_eth_dev_info *infos) 1109 { 1110 struct sub_device *sdev; 1111 uint8_t i; 1112 int ret; 1113 1114 /* Use maximum upper bounds by default */ 1115 infos->max_rx_pktlen = UINT32_MAX; 1116 infos->max_rx_queues = RTE_MAX_QUEUES_PER_PORT; 1117 infos->max_tx_queues = RTE_MAX_QUEUES_PER_PORT; 1118 infos->max_mac_addrs = FAILSAFE_MAX_ETHADDR; 1119 infos->max_hash_mac_addrs = UINT32_MAX; 1120 infos->max_vfs = UINT16_MAX; 1121 infos->max_vmdq_pools = UINT16_MAX; 1122 infos->hash_key_size = UINT8_MAX; 1123 1124 /* 1125 * Set of capabilities that can be verified upon 1126 * configuring a sub-device. 1127 */ 1128 infos->rx_offload_capa = 1129 DEV_RX_OFFLOAD_VLAN_STRIP | 1130 DEV_RX_OFFLOAD_IPV4_CKSUM | 1131 DEV_RX_OFFLOAD_UDP_CKSUM | 1132 DEV_RX_OFFLOAD_TCP_CKSUM | 1133 DEV_RX_OFFLOAD_TCP_LRO | 1134 DEV_RX_OFFLOAD_QINQ_STRIP | 1135 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1136 DEV_RX_OFFLOAD_MACSEC_STRIP | 1137 DEV_RX_OFFLOAD_HEADER_SPLIT | 1138 DEV_RX_OFFLOAD_VLAN_FILTER | 1139 DEV_RX_OFFLOAD_VLAN_EXTEND | 1140 DEV_RX_OFFLOAD_JUMBO_FRAME | 1141 DEV_RX_OFFLOAD_SCATTER | 1142 DEV_RX_OFFLOAD_TIMESTAMP | 1143 DEV_RX_OFFLOAD_SECURITY; 1144 1145 infos->rx_queue_offload_capa = 1146 DEV_RX_OFFLOAD_VLAN_STRIP | 1147 DEV_RX_OFFLOAD_IPV4_CKSUM | 1148 DEV_RX_OFFLOAD_UDP_CKSUM | 1149 DEV_RX_OFFLOAD_TCP_CKSUM | 1150 DEV_RX_OFFLOAD_TCP_LRO | 1151 DEV_RX_OFFLOAD_QINQ_STRIP | 1152 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1153 DEV_RX_OFFLOAD_MACSEC_STRIP | 1154 DEV_RX_OFFLOAD_HEADER_SPLIT | 1155 DEV_RX_OFFLOAD_VLAN_FILTER | 1156 DEV_RX_OFFLOAD_VLAN_EXTEND | 1157 DEV_RX_OFFLOAD_JUMBO_FRAME | 1158 DEV_RX_OFFLOAD_SCATTER | 1159 DEV_RX_OFFLOAD_TIMESTAMP | 1160 DEV_RX_OFFLOAD_SECURITY; 1161 1162 infos->tx_offload_capa = 1163 DEV_TX_OFFLOAD_MULTI_SEGS | 1164 DEV_TX_OFFLOAD_MBUF_FAST_FREE | 1165 DEV_TX_OFFLOAD_IPV4_CKSUM | 1166 DEV_TX_OFFLOAD_UDP_CKSUM | 1167 DEV_TX_OFFLOAD_TCP_CKSUM | 1168 DEV_TX_OFFLOAD_TCP_TSO; 1169 1170 infos->flow_type_rss_offloads = 1171 ETH_RSS_IP | 1172 ETH_RSS_UDP | 1173 ETH_RSS_TCP; 1174 infos->dev_capa = 1175 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 1176 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 1177 1178 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 1179 struct rte_eth_dev_info sub_info; 1180 1181 ret = rte_eth_dev_info_get(PORT_ID(sdev), &sub_info); 1182 ret = fs_err(sdev, ret); 1183 if (ret != 0) 1184 return ret; 1185 1186 fs_dev_merge_info(infos, &sub_info); 1187 } 1188 1189 return 0; 1190 } 1191 1192 static const uint32_t * 1193 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev) 1194 { 1195 struct sub_device *sdev; 1196 struct rte_eth_dev *edev; 1197 const uint32_t *ret; 1198 1199 fs_lock(dev, 0); 1200 sdev = TX_SUBDEV(dev); 1201 if (sdev == NULL) { 1202 ret = NULL; 1203 goto unlock; 1204 } 1205 edev = ETH(sdev); 1206 /* ENOTSUP: counts as no supported ptypes */ 1207 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) { 1208 ret = NULL; 1209 goto unlock; 1210 } 1211 /* 1212 * The API does not permit to do a clean AND of all ptypes, 1213 * It is also incomplete by design and we do not really care 1214 * to have a best possible value in this context. 1215 * We just return the ptypes of the device of highest 1216 * priority, usually the PREFERRED device. 1217 */ 1218 ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev); 1219 unlock: 1220 fs_unlock(dev, 0); 1221 return ret; 1222 } 1223 1224 static int 1225 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1226 { 1227 struct sub_device *sdev; 1228 uint8_t i; 1229 int ret; 1230 1231 fs_lock(dev, 0); 1232 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1233 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i); 1234 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu); 1235 if ((ret = fs_err(sdev, ret))) { 1236 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d", 1237 i, ret); 1238 fs_unlock(dev, 0); 1239 return ret; 1240 } 1241 } 1242 fs_unlock(dev, 0); 1243 return 0; 1244 } 1245 1246 static int 1247 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1248 { 1249 struct sub_device *sdev; 1250 uint8_t i; 1251 int ret; 1252 1253 fs_lock(dev, 0); 1254 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1255 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i); 1256 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on); 1257 if ((ret = fs_err(sdev, ret))) { 1258 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d" 1259 " with error %d", i, ret); 1260 fs_unlock(dev, 0); 1261 return ret; 1262 } 1263 } 1264 fs_unlock(dev, 0); 1265 return 0; 1266 } 1267 1268 static int 1269 fs_flow_ctrl_get(struct rte_eth_dev *dev, 1270 struct rte_eth_fc_conf *fc_conf) 1271 { 1272 struct sub_device *sdev; 1273 int ret; 1274 1275 fs_lock(dev, 0); 1276 sdev = TX_SUBDEV(dev); 1277 if (sdev == NULL) { 1278 ret = 0; 1279 goto unlock; 1280 } 1281 if (SUBOPS(sdev, flow_ctrl_get) == NULL) { 1282 ret = -ENOTSUP; 1283 goto unlock; 1284 } 1285 ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf); 1286 unlock: 1287 fs_unlock(dev, 0); 1288 return ret; 1289 } 1290 1291 static int 1292 fs_flow_ctrl_set(struct rte_eth_dev *dev, 1293 struct rte_eth_fc_conf *fc_conf) 1294 { 1295 struct sub_device *sdev; 1296 uint8_t i; 1297 int ret; 1298 1299 fs_lock(dev, 0); 1300 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1301 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i); 1302 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf); 1303 if ((ret = fs_err(sdev, ret))) { 1304 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d" 1305 " with error %d", i, ret); 1306 fs_unlock(dev, 0); 1307 return ret; 1308 } 1309 } 1310 fs_unlock(dev, 0); 1311 return 0; 1312 } 1313 1314 static void 1315 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 1316 { 1317 struct sub_device *sdev; 1318 uint8_t i; 1319 1320 fs_lock(dev, 0); 1321 /* No check: already done within the rte_eth_dev_mac_addr_remove 1322 * call for the fail-safe device. 1323 */ 1324 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 1325 rte_eth_dev_mac_addr_remove(PORT_ID(sdev), 1326 &dev->data->mac_addrs[index]); 1327 PRIV(dev)->mac_addr_pool[index] = 0; 1328 fs_unlock(dev, 0); 1329 } 1330 1331 static int 1332 fs_mac_addr_add(struct rte_eth_dev *dev, 1333 struct rte_ether_addr *mac_addr, 1334 uint32_t index, 1335 uint32_t vmdq) 1336 { 1337 struct sub_device *sdev; 1338 int ret; 1339 uint8_t i; 1340 1341 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR); 1342 fs_lock(dev, 0); 1343 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1344 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq); 1345 if ((ret = fs_err(sdev, ret))) { 1346 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %" 1347 PRIu8 " with error %d", i, ret); 1348 fs_unlock(dev, 0); 1349 return ret; 1350 } 1351 } 1352 if (index >= PRIV(dev)->nb_mac_addr) { 1353 DEBUG("Growing mac_addrs array"); 1354 PRIV(dev)->nb_mac_addr = index; 1355 } 1356 PRIV(dev)->mac_addr_pool[index] = vmdq; 1357 fs_unlock(dev, 0); 1358 return 0; 1359 } 1360 1361 static int 1362 fs_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 1363 { 1364 struct sub_device *sdev; 1365 uint8_t i; 1366 int ret; 1367 1368 fs_lock(dev, 0); 1369 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1370 ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr); 1371 ret = fs_err(sdev, ret); 1372 if (ret) { 1373 ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d", 1374 i, ret); 1375 fs_unlock(dev, 0); 1376 return ret; 1377 } 1378 } 1379 fs_unlock(dev, 0); 1380 1381 return 0; 1382 } 1383 1384 static int 1385 fs_set_mc_addr_list(struct rte_eth_dev *dev, 1386 struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) 1387 { 1388 struct sub_device *sdev; 1389 uint8_t i; 1390 int ret; 1391 void *mcast_addrs; 1392 1393 fs_lock(dev, 0); 1394 1395 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1396 ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1397 mc_addr_set, nb_mc_addr); 1398 if (ret != 0) { 1399 ERROR("Operation rte_eth_dev_set_mc_addr_list failed for sub_device %d with error %d", 1400 i, ret); 1401 goto rollback; 1402 } 1403 } 1404 1405 mcast_addrs = rte_realloc(PRIV(dev)->mcast_addrs, 1406 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]), 0); 1407 if (mcast_addrs == NULL && nb_mc_addr > 0) { 1408 ret = -ENOMEM; 1409 goto rollback; 1410 } 1411 rte_memcpy(mcast_addrs, mc_addr_set, 1412 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0])); 1413 PRIV(dev)->nb_mcast_addr = nb_mc_addr; 1414 PRIV(dev)->mcast_addrs = mcast_addrs; 1415 1416 fs_unlock(dev, 0); 1417 return 0; 1418 1419 rollback: 1420 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1421 int rc = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1422 PRIV(dev)->mcast_addrs, PRIV(dev)->nb_mcast_addr); 1423 if (rc != 0) { 1424 ERROR("Multicast MAC address list rollback for sub_device %d failed with error %d", 1425 i, rc); 1426 } 1427 } 1428 1429 fs_unlock(dev, 0); 1430 return ret; 1431 } 1432 1433 static int 1434 fs_rss_hash_update(struct rte_eth_dev *dev, 1435 struct rte_eth_rss_conf *rss_conf) 1436 { 1437 struct sub_device *sdev; 1438 uint8_t i; 1439 int ret; 1440 1441 fs_lock(dev, 0); 1442 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1443 ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf); 1444 ret = fs_err(sdev, ret); 1445 if (ret) { 1446 ERROR("Operation rte_eth_dev_rss_hash_update" 1447 " failed for sub_device %d with error %d", 1448 i, ret); 1449 fs_unlock(dev, 0); 1450 return ret; 1451 } 1452 } 1453 fs_unlock(dev, 0); 1454 1455 return 0; 1456 } 1457 1458 static int 1459 fs_filter_ctrl(struct rte_eth_dev *dev __rte_unused, 1460 enum rte_filter_type type, 1461 enum rte_filter_op op, 1462 void *arg) 1463 { 1464 if (type == RTE_ETH_FILTER_GENERIC && 1465 op == RTE_ETH_FILTER_GET) { 1466 *(const void **)arg = &fs_flow_ops; 1467 return 0; 1468 } 1469 return -ENOTSUP; 1470 } 1471 1472 const struct eth_dev_ops failsafe_ops = { 1473 .dev_configure = fs_dev_configure, 1474 .dev_start = fs_dev_start, 1475 .dev_stop = fs_dev_stop, 1476 .dev_set_link_down = fs_dev_set_link_down, 1477 .dev_set_link_up = fs_dev_set_link_up, 1478 .dev_close = fs_dev_close, 1479 .promiscuous_enable = fs_promiscuous_enable, 1480 .promiscuous_disable = fs_promiscuous_disable, 1481 .allmulticast_enable = fs_allmulticast_enable, 1482 .allmulticast_disable = fs_allmulticast_disable, 1483 .link_update = fs_link_update, 1484 .stats_get = fs_stats_get, 1485 .stats_reset = fs_stats_reset, 1486 .xstats_get = fs_xstats_get, 1487 .xstats_get_names = fs_xstats_get_names, 1488 .xstats_reset = fs_xstats_reset, 1489 .dev_infos_get = fs_dev_infos_get, 1490 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get, 1491 .mtu_set = fs_mtu_set, 1492 .vlan_filter_set = fs_vlan_filter_set, 1493 .rx_queue_start = fs_rx_queue_start, 1494 .rx_queue_stop = fs_rx_queue_stop, 1495 .tx_queue_start = fs_tx_queue_start, 1496 .tx_queue_stop = fs_tx_queue_stop, 1497 .rx_queue_setup = fs_rx_queue_setup, 1498 .tx_queue_setup = fs_tx_queue_setup, 1499 .rx_queue_release = fs_rx_queue_release, 1500 .tx_queue_release = fs_tx_queue_release, 1501 .rx_queue_intr_enable = fs_rx_intr_enable, 1502 .rx_queue_intr_disable = fs_rx_intr_disable, 1503 .flow_ctrl_get = fs_flow_ctrl_get, 1504 .flow_ctrl_set = fs_flow_ctrl_set, 1505 .mac_addr_remove = fs_mac_addr_remove, 1506 .mac_addr_add = fs_mac_addr_add, 1507 .mac_addr_set = fs_mac_addr_set, 1508 .set_mc_addr_list = fs_set_mc_addr_list, 1509 .rss_hash_update = fs_rss_hash_update, 1510 .filter_ctrl = fs_filter_ctrl, 1511 }; 1512