1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017 6WIND S.A. 3 * Copyright 2017 Mellanox Technologies, Ltd 4 */ 5 6 #include <stdbool.h> 7 #include <stdint.h> 8 #include <unistd.h> 9 10 #include <rte_debug.h> 11 #include <rte_atomic.h> 12 #include <ethdev_driver.h> 13 #include <rte_malloc.h> 14 #include <rte_flow.h> 15 #include <rte_cycles.h> 16 #include <rte_ethdev.h> 17 #include <rte_string_fns.h> 18 19 #include "failsafe_private.h" 20 21 static int 22 fs_dev_configure(struct rte_eth_dev *dev) 23 { 24 struct sub_device *sdev; 25 uint8_t i; 26 int ret; 27 28 fs_lock(dev, 0); 29 FOREACH_SUBDEV(sdev, i, dev) { 30 int rmv_interrupt = 0; 31 int lsc_interrupt = 0; 32 int lsc_enabled; 33 34 if (sdev->state != DEV_PROBED && 35 !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE)) 36 continue; 37 38 rmv_interrupt = ETH(sdev)->data->dev_flags & 39 RTE_ETH_DEV_INTR_RMV; 40 if (rmv_interrupt) { 41 DEBUG("Enabling RMV interrupts for sub_device %d", i); 42 dev->data->dev_conf.intr_conf.rmv = 1; 43 } else { 44 DEBUG("sub_device %d does not support RMV event", i); 45 } 46 lsc_enabled = dev->data->dev_conf.intr_conf.lsc; 47 lsc_interrupt = lsc_enabled && 48 (ETH(sdev)->data->dev_flags & 49 RTE_ETH_DEV_INTR_LSC); 50 if (lsc_interrupt) { 51 DEBUG("Enabling LSC interrupts for sub_device %d", i); 52 dev->data->dev_conf.intr_conf.lsc = 1; 53 } else if (lsc_enabled && !lsc_interrupt) { 54 DEBUG("Disabling LSC interrupts for sub_device %d", i); 55 dev->data->dev_conf.intr_conf.lsc = 0; 56 } 57 DEBUG("Configuring sub-device %d", i); 58 ret = rte_eth_dev_configure(PORT_ID(sdev), 59 dev->data->nb_rx_queues, 60 dev->data->nb_tx_queues, 61 &dev->data->dev_conf); 62 if (ret) { 63 if (!fs_err(sdev, ret)) 64 continue; 65 ERROR("Could not configure sub_device %d", i); 66 fs_unlock(dev, 0); 67 return ret; 68 } 69 if (rmv_interrupt && sdev->rmv_callback == 0) { 70 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 71 RTE_ETH_EVENT_INTR_RMV, 72 failsafe_eth_rmv_event_callback, 73 sdev); 74 if (ret) 75 WARN("Failed to register RMV callback for sub_device %d", 76 SUB_ID(sdev)); 77 else 78 sdev->rmv_callback = 1; 79 } 80 dev->data->dev_conf.intr_conf.rmv = 0; 81 if (lsc_interrupt && sdev->lsc_callback == 0) { 82 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 83 RTE_ETH_EVENT_INTR_LSC, 84 failsafe_eth_lsc_event_callback, 85 dev); 86 if (ret) 87 WARN("Failed to register LSC callback for sub_device %d", 88 SUB_ID(sdev)); 89 else 90 sdev->lsc_callback = 1; 91 } 92 dev->data->dev_conf.intr_conf.lsc = lsc_enabled; 93 sdev->state = DEV_ACTIVE; 94 } 95 if (PRIV(dev)->state < DEV_ACTIVE) 96 PRIV(dev)->state = DEV_ACTIVE; 97 fs_unlock(dev, 0); 98 return 0; 99 } 100 101 static void 102 fs_set_queues_state_start(struct rte_eth_dev *dev) 103 { 104 struct rxq *rxq; 105 struct txq *txq; 106 uint16_t i; 107 108 for (i = 0; i < dev->data->nb_rx_queues; i++) { 109 rxq = dev->data->rx_queues[i]; 110 if (rxq != NULL && !rxq->info.conf.rx_deferred_start) 111 dev->data->rx_queue_state[i] = 112 RTE_ETH_QUEUE_STATE_STARTED; 113 } 114 for (i = 0; i < dev->data->nb_tx_queues; i++) { 115 txq = dev->data->tx_queues[i]; 116 if (txq != NULL && !txq->info.conf.tx_deferred_start) 117 dev->data->tx_queue_state[i] = 118 RTE_ETH_QUEUE_STATE_STARTED; 119 } 120 } 121 122 static int 123 fs_dev_start(struct rte_eth_dev *dev) 124 { 125 struct sub_device *sdev; 126 uint8_t i; 127 int ret; 128 129 fs_lock(dev, 0); 130 ret = failsafe_rx_intr_install(dev); 131 if (ret) { 132 fs_unlock(dev, 0); 133 return ret; 134 } 135 FOREACH_SUBDEV(sdev, i, dev) { 136 if (sdev->state != DEV_ACTIVE) 137 continue; 138 DEBUG("Starting sub_device %d", i); 139 ret = rte_eth_dev_start(PORT_ID(sdev)); 140 if (ret) { 141 if (!fs_err(sdev, ret)) 142 continue; 143 fs_unlock(dev, 0); 144 return ret; 145 } 146 ret = failsafe_rx_intr_install_subdevice(sdev); 147 if (ret) { 148 if (!fs_err(sdev, ret)) 149 continue; 150 if (fs_err(sdev, rte_eth_dev_stop(PORT_ID(sdev))) < 0) 151 ERROR("Failed to stop sub-device %u", 152 SUB_ID(sdev)); 153 fs_unlock(dev, 0); 154 return ret; 155 } 156 sdev->state = DEV_STARTED; 157 } 158 if (PRIV(dev)->state < DEV_STARTED) { 159 PRIV(dev)->state = DEV_STARTED; 160 fs_set_queues_state_start(dev); 161 } 162 fs_switch_dev(dev, NULL); 163 fs_unlock(dev, 0); 164 return 0; 165 } 166 167 static void 168 fs_set_queues_state_stop(struct rte_eth_dev *dev) 169 { 170 uint16_t i; 171 172 for (i = 0; i < dev->data->nb_rx_queues; i++) 173 if (dev->data->rx_queues[i] != NULL) 174 dev->data->rx_queue_state[i] = 175 RTE_ETH_QUEUE_STATE_STOPPED; 176 for (i = 0; i < dev->data->nb_tx_queues; i++) 177 if (dev->data->tx_queues[i] != NULL) 178 dev->data->tx_queue_state[i] = 179 RTE_ETH_QUEUE_STATE_STOPPED; 180 } 181 182 static int 183 fs_dev_stop(struct rte_eth_dev *dev) 184 { 185 struct sub_device *sdev; 186 uint8_t i; 187 int ret; 188 189 fs_lock(dev, 0); 190 PRIV(dev)->state = DEV_STARTED - 1; 191 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) { 192 ret = rte_eth_dev_stop(PORT_ID(sdev)); 193 if (fs_err(sdev, ret) < 0) { 194 ERROR("Failed to stop device %u", 195 PORT_ID(sdev)); 196 PRIV(dev)->state = DEV_STARTED + 1; 197 fs_unlock(dev, 0); 198 return ret; 199 } 200 failsafe_rx_intr_uninstall_subdevice(sdev); 201 sdev->state = DEV_STARTED - 1; 202 } 203 failsafe_rx_intr_uninstall(dev); 204 fs_set_queues_state_stop(dev); 205 fs_unlock(dev, 0); 206 207 return 0; 208 } 209 210 static int 211 fs_dev_set_link_up(struct rte_eth_dev *dev) 212 { 213 struct sub_device *sdev; 214 uint8_t i; 215 int ret; 216 217 fs_lock(dev, 0); 218 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 219 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i); 220 ret = rte_eth_dev_set_link_up(PORT_ID(sdev)); 221 if ((ret = fs_err(sdev, ret))) { 222 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d" 223 " with error %d", i, ret); 224 fs_unlock(dev, 0); 225 return ret; 226 } 227 } 228 fs_unlock(dev, 0); 229 return 0; 230 } 231 232 static int 233 fs_dev_set_link_down(struct rte_eth_dev *dev) 234 { 235 struct sub_device *sdev; 236 uint8_t i; 237 int ret; 238 239 fs_lock(dev, 0); 240 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 241 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i); 242 ret = rte_eth_dev_set_link_down(PORT_ID(sdev)); 243 if ((ret = fs_err(sdev, ret))) { 244 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d" 245 " with error %d", i, ret); 246 fs_unlock(dev, 0); 247 return ret; 248 } 249 } 250 fs_unlock(dev, 0); 251 return 0; 252 } 253 254 static int 255 fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 256 { 257 struct sub_device *sdev; 258 uint8_t i; 259 int ret; 260 int err = 0; 261 bool failure = true; 262 263 fs_lock(dev, 0); 264 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 265 uint16_t port_id = ETH(sdev)->data->port_id; 266 267 ret = rte_eth_dev_rx_queue_stop(port_id, rx_queue_id); 268 ret = fs_err(sdev, ret); 269 if (ret) { 270 ERROR("Rx queue stop failed for subdevice %d", i); 271 err = ret; 272 } else { 273 failure = false; 274 } 275 } 276 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 277 fs_unlock(dev, 0); 278 /* Return 0 in case of at least one successful queue stop */ 279 return (failure) ? err : 0; 280 } 281 282 static int 283 fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 284 { 285 struct sub_device *sdev; 286 uint8_t i; 287 int ret; 288 289 fs_lock(dev, 0); 290 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 291 uint16_t port_id = ETH(sdev)->data->port_id; 292 293 ret = rte_eth_dev_rx_queue_start(port_id, rx_queue_id); 294 ret = fs_err(sdev, ret); 295 if (ret) { 296 ERROR("Rx queue start failed for subdevice %d", i); 297 fs_rx_queue_stop(dev, rx_queue_id); 298 fs_unlock(dev, 0); 299 return ret; 300 } 301 } 302 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 303 fs_unlock(dev, 0); 304 return 0; 305 } 306 307 static int 308 fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 309 { 310 struct sub_device *sdev; 311 uint8_t i; 312 int ret; 313 int err = 0; 314 bool failure = true; 315 316 fs_lock(dev, 0); 317 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 318 uint16_t port_id = ETH(sdev)->data->port_id; 319 320 ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id); 321 ret = fs_err(sdev, ret); 322 if (ret) { 323 ERROR("Tx queue stop failed for subdevice %d", i); 324 err = ret; 325 } else { 326 failure = false; 327 } 328 } 329 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 330 fs_unlock(dev, 0); 331 /* Return 0 in case of at least one successful queue stop */ 332 return (failure) ? err : 0; 333 } 334 335 static int 336 fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 337 { 338 struct sub_device *sdev; 339 uint8_t i; 340 int ret; 341 342 fs_lock(dev, 0); 343 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 344 uint16_t port_id = ETH(sdev)->data->port_id; 345 346 ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id); 347 ret = fs_err(sdev, ret); 348 if (ret) { 349 ERROR("Tx queue start failed for subdevice %d", i); 350 fs_tx_queue_stop(dev, tx_queue_id); 351 fs_unlock(dev, 0); 352 return ret; 353 } 354 } 355 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 356 fs_unlock(dev, 0); 357 return 0; 358 } 359 360 static void 361 fs_rx_queue_release(void *queue) 362 { 363 struct rte_eth_dev *dev; 364 struct sub_device *sdev; 365 uint8_t i; 366 struct rxq *rxq; 367 368 if (queue == NULL) 369 return; 370 rxq = queue; 371 dev = &rte_eth_devices[rxq->priv->data->port_id]; 372 fs_lock(dev, 0); 373 if (rxq->event_fd >= 0) 374 close(rxq->event_fd); 375 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 376 if (ETH(sdev)->data->rx_queues != NULL && 377 ETH(sdev)->data->rx_queues[rxq->qid] != NULL) { 378 SUBOPS(sdev, rx_queue_release) 379 (ETH(sdev)->data->rx_queues[rxq->qid]); 380 } 381 } 382 dev->data->rx_queues[rxq->qid] = NULL; 383 rte_free(rxq); 384 fs_unlock(dev, 0); 385 } 386 387 static int 388 fs_rx_queue_setup(struct rte_eth_dev *dev, 389 uint16_t rx_queue_id, 390 uint16_t nb_rx_desc, 391 unsigned int socket_id, 392 const struct rte_eth_rxconf *rx_conf, 393 struct rte_mempool *mb_pool) 394 { 395 /* 396 * FIXME: Add a proper interface in rte_eal_interrupts for 397 * allocating eventfd as an interrupt vector. 398 * For the time being, fake as if we are using MSIX interrupts, 399 * this will cause rte_intr_efd_enable to allocate an eventfd for us. 400 */ 401 struct rte_intr_handle intr_handle = { 402 .type = RTE_INTR_HANDLE_VFIO_MSIX, 403 .efds = { -1, }, 404 }; 405 struct sub_device *sdev; 406 struct rxq *rxq; 407 uint8_t i; 408 int ret; 409 410 fs_lock(dev, 0); 411 if (rx_conf->rx_deferred_start) { 412 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 413 if (SUBOPS(sdev, rx_queue_start) == NULL) { 414 ERROR("Rx queue deferred start is not " 415 "supported for subdevice %d", i); 416 fs_unlock(dev, 0); 417 return -EINVAL; 418 } 419 } 420 } 421 rxq = dev->data->rx_queues[rx_queue_id]; 422 if (rxq != NULL) { 423 fs_rx_queue_release(rxq); 424 dev->data->rx_queues[rx_queue_id] = NULL; 425 } 426 rxq = rte_zmalloc(NULL, 427 sizeof(*rxq) + 428 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 429 RTE_CACHE_LINE_SIZE); 430 if (rxq == NULL) { 431 fs_unlock(dev, 0); 432 return -ENOMEM; 433 } 434 FOREACH_SUBDEV(sdev, i, dev) 435 rte_atomic64_init(&rxq->refcnt[i]); 436 rxq->qid = rx_queue_id; 437 rxq->socket_id = socket_id; 438 rxq->info.mp = mb_pool; 439 rxq->info.conf = *rx_conf; 440 rxq->info.nb_desc = nb_rx_desc; 441 rxq->priv = PRIV(dev); 442 rxq->sdev = PRIV(dev)->subs; 443 ret = rte_intr_efd_enable(&intr_handle, 1); 444 if (ret < 0) { 445 fs_unlock(dev, 0); 446 return ret; 447 } 448 rxq->event_fd = intr_handle.efds[0]; 449 dev->data->rx_queues[rx_queue_id] = rxq; 450 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 451 ret = rte_eth_rx_queue_setup(PORT_ID(sdev), 452 rx_queue_id, 453 nb_rx_desc, socket_id, 454 rx_conf, mb_pool); 455 if ((ret = fs_err(sdev, ret))) { 456 ERROR("RX queue setup failed for sub_device %d", i); 457 goto free_rxq; 458 } 459 } 460 fs_unlock(dev, 0); 461 return 0; 462 free_rxq: 463 fs_rx_queue_release(rxq); 464 fs_unlock(dev, 0); 465 return ret; 466 } 467 468 static int 469 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx) 470 { 471 struct rxq *rxq; 472 struct sub_device *sdev; 473 uint8_t i; 474 int ret; 475 int rc = 0; 476 477 fs_lock(dev, 0); 478 if (idx >= dev->data->nb_rx_queues) { 479 rc = -EINVAL; 480 goto unlock; 481 } 482 rxq = dev->data->rx_queues[idx]; 483 if (rxq == NULL || rxq->event_fd <= 0) { 484 rc = -EINVAL; 485 goto unlock; 486 } 487 /* Fail if proxy service is nor running. */ 488 if (PRIV(dev)->rxp.sstate != SS_RUNNING) { 489 ERROR("failsafe interrupt services are not running"); 490 rc = -EAGAIN; 491 goto unlock; 492 } 493 rxq->enable_events = 1; 494 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 495 ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx); 496 ret = fs_err(sdev, ret); 497 if (ret) 498 rc = ret; 499 } 500 unlock: 501 fs_unlock(dev, 0); 502 if (rc) 503 rte_errno = -rc; 504 return rc; 505 } 506 507 static int 508 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx) 509 { 510 struct rxq *rxq; 511 struct sub_device *sdev; 512 uint64_t u64; 513 uint8_t i; 514 int rc = 0; 515 int ret; 516 517 fs_lock(dev, 0); 518 if (idx >= dev->data->nb_rx_queues) { 519 rc = -EINVAL; 520 goto unlock; 521 } 522 rxq = dev->data->rx_queues[idx]; 523 if (rxq == NULL || rxq->event_fd <= 0) { 524 rc = -EINVAL; 525 goto unlock; 526 } 527 rxq->enable_events = 0; 528 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 529 ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx); 530 ret = fs_err(sdev, ret); 531 if (ret) 532 rc = ret; 533 } 534 /* Clear pending events */ 535 while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0) 536 ; 537 unlock: 538 fs_unlock(dev, 0); 539 if (rc) 540 rte_errno = -rc; 541 return rc; 542 } 543 544 static void 545 fs_tx_queue_release(void *queue) 546 { 547 struct rte_eth_dev *dev; 548 struct sub_device *sdev; 549 uint8_t i; 550 struct txq *txq; 551 552 if (queue == NULL) 553 return; 554 txq = queue; 555 dev = &rte_eth_devices[txq->priv->data->port_id]; 556 fs_lock(dev, 0); 557 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 558 if (ETH(sdev)->data->tx_queues != NULL && 559 ETH(sdev)->data->tx_queues[txq->qid] != NULL) { 560 SUBOPS(sdev, tx_queue_release) 561 (ETH(sdev)->data->tx_queues[txq->qid]); 562 } 563 } 564 dev->data->tx_queues[txq->qid] = NULL; 565 rte_free(txq); 566 fs_unlock(dev, 0); 567 } 568 569 static int 570 fs_tx_queue_setup(struct rte_eth_dev *dev, 571 uint16_t tx_queue_id, 572 uint16_t nb_tx_desc, 573 unsigned int socket_id, 574 const struct rte_eth_txconf *tx_conf) 575 { 576 struct sub_device *sdev; 577 struct txq *txq; 578 uint8_t i; 579 int ret; 580 581 fs_lock(dev, 0); 582 if (tx_conf->tx_deferred_start) { 583 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 584 if (SUBOPS(sdev, tx_queue_start) == NULL) { 585 ERROR("Tx queue deferred start is not " 586 "supported for subdevice %d", i); 587 fs_unlock(dev, 0); 588 return -EINVAL; 589 } 590 } 591 } 592 txq = dev->data->tx_queues[tx_queue_id]; 593 if (txq != NULL) { 594 fs_tx_queue_release(txq); 595 dev->data->tx_queues[tx_queue_id] = NULL; 596 } 597 txq = rte_zmalloc("ethdev TX queue", 598 sizeof(*txq) + 599 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 600 RTE_CACHE_LINE_SIZE); 601 if (txq == NULL) { 602 fs_unlock(dev, 0); 603 return -ENOMEM; 604 } 605 FOREACH_SUBDEV(sdev, i, dev) 606 rte_atomic64_init(&txq->refcnt[i]); 607 txq->qid = tx_queue_id; 608 txq->socket_id = socket_id; 609 txq->info.conf = *tx_conf; 610 txq->info.nb_desc = nb_tx_desc; 611 txq->priv = PRIV(dev); 612 dev->data->tx_queues[tx_queue_id] = txq; 613 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 614 ret = rte_eth_tx_queue_setup(PORT_ID(sdev), 615 tx_queue_id, 616 nb_tx_desc, socket_id, 617 tx_conf); 618 if ((ret = fs_err(sdev, ret))) { 619 ERROR("TX queue setup failed for sub_device %d", i); 620 goto free_txq; 621 } 622 } 623 fs_unlock(dev, 0); 624 return 0; 625 free_txq: 626 fs_tx_queue_release(txq); 627 fs_unlock(dev, 0); 628 return ret; 629 } 630 631 static void 632 fs_dev_free_queues(struct rte_eth_dev *dev) 633 { 634 uint16_t i; 635 636 for (i = 0; i < dev->data->nb_rx_queues; i++) { 637 fs_rx_queue_release(dev->data->rx_queues[i]); 638 dev->data->rx_queues[i] = NULL; 639 } 640 dev->data->nb_rx_queues = 0; 641 for (i = 0; i < dev->data->nb_tx_queues; i++) { 642 fs_tx_queue_release(dev->data->tx_queues[i]); 643 dev->data->tx_queues[i] = NULL; 644 } 645 dev->data->nb_tx_queues = 0; 646 } 647 648 int 649 failsafe_eth_dev_close(struct rte_eth_dev *dev) 650 { 651 struct sub_device *sdev; 652 uint8_t i; 653 int err, ret = 0; 654 655 fs_lock(dev, 0); 656 failsafe_hotplug_alarm_cancel(dev); 657 if (PRIV(dev)->state == DEV_STARTED) { 658 ret = dev->dev_ops->dev_stop(dev); 659 if (ret != 0) { 660 fs_unlock(dev, 0); 661 return ret; 662 } 663 } 664 PRIV(dev)->state = DEV_ACTIVE - 1; 665 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 666 DEBUG("Closing sub_device %d", i); 667 failsafe_eth_dev_unregister_callbacks(sdev); 668 err = rte_eth_dev_close(PORT_ID(sdev)); 669 if (err) { 670 ret = ret ? ret : err; 671 ERROR("Error while closing sub-device %u", 672 PORT_ID(sdev)); 673 } 674 sdev->state = DEV_ACTIVE - 1; 675 } 676 rte_eth_dev_callback_unregister(RTE_ETH_ALL, RTE_ETH_EVENT_NEW, 677 failsafe_eth_new_event_callback, dev); 678 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 679 fs_unlock(dev, 0); 680 return ret; 681 } 682 fs_dev_free_queues(dev); 683 err = failsafe_eal_uninit(dev); 684 if (err) { 685 ret = ret ? ret : err; 686 ERROR("Error while uninitializing sub-EAL"); 687 } 688 failsafe_args_free(dev); 689 rte_free(PRIV(dev)->subs); 690 rte_free(PRIV(dev)->mcast_addrs); 691 /* mac_addrs must not be freed alone because part of dev_private */ 692 dev->data->mac_addrs = NULL; 693 fs_unlock(dev, 0); 694 err = pthread_mutex_destroy(&PRIV(dev)->hotplug_mutex); 695 if (err) { 696 ret = ret ? ret : err; 697 ERROR("Error while destroying hotplug mutex"); 698 } 699 return ret; 700 } 701 702 static int 703 fs_promiscuous_enable(struct rte_eth_dev *dev) 704 { 705 struct sub_device *sdev; 706 uint8_t i; 707 int ret = 0; 708 709 fs_lock(dev, 0); 710 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 711 ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); 712 ret = fs_err(sdev, ret); 713 if (ret != 0) { 714 ERROR("Promiscuous mode enable failed for subdevice %d", 715 PORT_ID(sdev)); 716 break; 717 } 718 } 719 if (ret != 0) { 720 /* Rollback in the case of failure */ 721 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 722 ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); 723 ret = fs_err(sdev, ret); 724 if (ret != 0) 725 ERROR("Promiscuous mode disable during rollback failed for subdevice %d", 726 PORT_ID(sdev)); 727 } 728 } 729 fs_unlock(dev, 0); 730 731 return ret; 732 } 733 734 static int 735 fs_promiscuous_disable(struct rte_eth_dev *dev) 736 { 737 struct sub_device *sdev; 738 uint8_t i; 739 int ret = 0; 740 741 fs_lock(dev, 0); 742 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 743 ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); 744 ret = fs_err(sdev, ret); 745 if (ret != 0) { 746 ERROR("Promiscuous mode disable failed for subdevice %d", 747 PORT_ID(sdev)); 748 break; 749 } 750 } 751 if (ret != 0) { 752 /* Rollback in the case of failure */ 753 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 754 ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); 755 ret = fs_err(sdev, ret); 756 if (ret != 0) 757 ERROR("Promiscuous mode enable during rollback failed for subdevice %d", 758 PORT_ID(sdev)); 759 } 760 } 761 fs_unlock(dev, 0); 762 763 return ret; 764 } 765 766 static int 767 fs_allmulticast_enable(struct rte_eth_dev *dev) 768 { 769 struct sub_device *sdev; 770 uint8_t i; 771 int ret = 0; 772 773 fs_lock(dev, 0); 774 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 775 ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); 776 ret = fs_err(sdev, ret); 777 if (ret != 0) { 778 ERROR("All-multicast mode enable failed for subdevice %d", 779 PORT_ID(sdev)); 780 break; 781 } 782 } 783 if (ret != 0) { 784 /* Rollback in the case of failure */ 785 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 786 ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); 787 ret = fs_err(sdev, ret); 788 if (ret != 0) 789 ERROR("All-multicast mode disable during rollback failed for subdevice %d", 790 PORT_ID(sdev)); 791 } 792 } 793 fs_unlock(dev, 0); 794 795 return ret; 796 } 797 798 static int 799 fs_allmulticast_disable(struct rte_eth_dev *dev) 800 { 801 struct sub_device *sdev; 802 uint8_t i; 803 int ret = 0; 804 805 fs_lock(dev, 0); 806 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 807 ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); 808 ret = fs_err(sdev, ret); 809 if (ret != 0) { 810 ERROR("All-multicast mode disable failed for subdevice %d", 811 PORT_ID(sdev)); 812 break; 813 } 814 } 815 if (ret != 0) { 816 /* Rollback in the case of failure */ 817 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 818 ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); 819 ret = fs_err(sdev, ret); 820 if (ret != 0) 821 ERROR("All-multicast mode enable during rollback failed for subdevice %d", 822 PORT_ID(sdev)); 823 } 824 } 825 fs_unlock(dev, 0); 826 827 return ret; 828 } 829 830 static int 831 fs_link_update(struct rte_eth_dev *dev, 832 int wait_to_complete) 833 { 834 struct sub_device *sdev; 835 uint8_t i; 836 int ret; 837 838 fs_lock(dev, 0); 839 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 840 DEBUG("Calling link_update on sub_device %d", i); 841 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete); 842 if (ret && ret != -1 && sdev->remove == 0 && 843 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) { 844 ERROR("Link update failed for sub_device %d with error %d", 845 i, ret); 846 fs_unlock(dev, 0); 847 return ret; 848 } 849 } 850 if (TX_SUBDEV(dev)) { 851 struct rte_eth_link *l1; 852 struct rte_eth_link *l2; 853 854 l1 = &dev->data->dev_link; 855 l2 = Ð(TX_SUBDEV(dev))->data->dev_link; 856 if (memcmp(l1, l2, sizeof(*l1))) { 857 *l1 = *l2; 858 fs_unlock(dev, 0); 859 return 0; 860 } 861 } 862 fs_unlock(dev, 0); 863 return -1; 864 } 865 866 static int 867 fs_stats_get(struct rte_eth_dev *dev, 868 struct rte_eth_stats *stats) 869 { 870 struct rte_eth_stats backup; 871 struct sub_device *sdev; 872 uint8_t i; 873 int ret; 874 875 fs_lock(dev, 0); 876 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats)); 877 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 878 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats; 879 uint64_t *timestamp = &sdev->stats_snapshot.timestamp; 880 881 rte_memcpy(&backup, snapshot, sizeof(backup)); 882 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot); 883 if (ret) { 884 if (!fs_err(sdev, ret)) { 885 rte_memcpy(snapshot, &backup, sizeof(backup)); 886 goto inc; 887 } 888 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d", 889 i, ret); 890 *timestamp = 0; 891 fs_unlock(dev, 0); 892 return ret; 893 } 894 *timestamp = rte_rdtsc(); 895 inc: 896 failsafe_stats_increment(stats, snapshot); 897 } 898 fs_unlock(dev, 0); 899 return 0; 900 } 901 902 static int 903 fs_stats_reset(struct rte_eth_dev *dev) 904 { 905 struct sub_device *sdev; 906 uint8_t i; 907 int ret; 908 909 fs_lock(dev, 0); 910 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 911 ret = rte_eth_stats_reset(PORT_ID(sdev)); 912 if (ret) { 913 if (!fs_err(sdev, ret)) 914 continue; 915 916 ERROR("Operation rte_eth_stats_reset failed for sub_device %d with error %d", 917 i, ret); 918 fs_unlock(dev, 0); 919 return ret; 920 } 921 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats)); 922 } 923 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats)); 924 fs_unlock(dev, 0); 925 926 return 0; 927 } 928 929 static int 930 __fs_xstats_count(struct rte_eth_dev *dev) 931 { 932 struct sub_device *sdev; 933 int count = 0; 934 uint8_t i; 935 int ret; 936 937 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 938 ret = rte_eth_xstats_get_names(PORT_ID(sdev), NULL, 0); 939 if (ret < 0) 940 return ret; 941 count += ret; 942 } 943 944 return count; 945 } 946 947 static int 948 __fs_xstats_get_names(struct rte_eth_dev *dev, 949 struct rte_eth_xstat_name *xstats_names, 950 unsigned int limit) 951 { 952 struct sub_device *sdev; 953 unsigned int count = 0; 954 uint8_t i; 955 956 /* Caller only cares about count */ 957 if (!xstats_names) 958 return __fs_xstats_count(dev); 959 960 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 961 struct rte_eth_xstat_name *sub_names = xstats_names + count; 962 int j, r; 963 964 if (count >= limit) 965 break; 966 967 r = rte_eth_xstats_get_names(PORT_ID(sdev), 968 sub_names, limit - count); 969 if (r < 0) 970 return r; 971 972 /* add subN_ prefix to names */ 973 for (j = 0; j < r; j++) { 974 char *xname = sub_names[j].name; 975 char tmp[RTE_ETH_XSTATS_NAME_SIZE]; 976 977 if ((xname[0] == 't' || xname[0] == 'r') && 978 xname[1] == 'x' && xname[2] == '_') 979 snprintf(tmp, sizeof(tmp), "%.3ssub%u_%s", 980 xname, i, xname + 3); 981 else 982 snprintf(tmp, sizeof(tmp), "sub%u_%s", 983 i, xname); 984 985 strlcpy(xname, tmp, RTE_ETH_XSTATS_NAME_SIZE); 986 } 987 count += r; 988 } 989 return count; 990 } 991 992 static int 993 fs_xstats_get_names(struct rte_eth_dev *dev, 994 struct rte_eth_xstat_name *xstats_names, 995 unsigned int limit) 996 { 997 int ret; 998 999 fs_lock(dev, 0); 1000 ret = __fs_xstats_get_names(dev, xstats_names, limit); 1001 fs_unlock(dev, 0); 1002 return ret; 1003 } 1004 1005 static int 1006 __fs_xstats_get(struct rte_eth_dev *dev, 1007 struct rte_eth_xstat *xstats, 1008 unsigned int n) 1009 { 1010 unsigned int count = 0; 1011 struct sub_device *sdev; 1012 uint8_t i; 1013 int j, ret; 1014 1015 ret = __fs_xstats_count(dev); 1016 /* 1017 * if error 1018 * or caller did not give enough space 1019 * or just querying 1020 */ 1021 if (ret < 0 || ret > (int)n || xstats == NULL) 1022 return ret; 1023 1024 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1025 ret = rte_eth_xstats_get(PORT_ID(sdev), xstats, n); 1026 if (ret < 0) 1027 return ret; 1028 1029 if (ret > (int)n) 1030 return n + count; 1031 1032 /* add offset to id's from sub-device */ 1033 for (j = 0; j < ret; j++) 1034 xstats[j].id += count; 1035 1036 xstats += ret; 1037 n -= ret; 1038 count += ret; 1039 } 1040 1041 return count; 1042 } 1043 1044 static int 1045 fs_xstats_get(struct rte_eth_dev *dev, 1046 struct rte_eth_xstat *xstats, 1047 unsigned int n) 1048 { 1049 int ret; 1050 1051 fs_lock(dev, 0); 1052 ret = __fs_xstats_get(dev, xstats, n); 1053 fs_unlock(dev, 0); 1054 1055 return ret; 1056 } 1057 1058 1059 static int 1060 fs_xstats_reset(struct rte_eth_dev *dev) 1061 { 1062 struct sub_device *sdev; 1063 uint8_t i; 1064 int r = 0; 1065 1066 fs_lock(dev, 0); 1067 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1068 r = rte_eth_xstats_reset(PORT_ID(sdev)); 1069 if (r < 0) 1070 break; 1071 } 1072 fs_unlock(dev, 0); 1073 1074 return r; 1075 } 1076 1077 static void 1078 fs_dev_merge_desc_lim(struct rte_eth_desc_lim *to, 1079 const struct rte_eth_desc_lim *from) 1080 { 1081 to->nb_max = RTE_MIN(to->nb_max, from->nb_max); 1082 to->nb_min = RTE_MAX(to->nb_min, from->nb_min); 1083 to->nb_align = RTE_MAX(to->nb_align, from->nb_align); 1084 1085 to->nb_seg_max = RTE_MIN(to->nb_seg_max, from->nb_seg_max); 1086 to->nb_mtu_seg_max = RTE_MIN(to->nb_mtu_seg_max, from->nb_mtu_seg_max); 1087 } 1088 1089 /* 1090 * Merge the information from sub-devices. 1091 * 1092 * The reported values must be the common subset of all sub devices 1093 */ 1094 static void 1095 fs_dev_merge_info(struct rte_eth_dev_info *info, 1096 const struct rte_eth_dev_info *sinfo) 1097 { 1098 info->min_mtu = RTE_MAX(info->min_mtu, sinfo->min_mtu); 1099 info->max_mtu = RTE_MIN(info->max_mtu, sinfo->max_mtu); 1100 info->max_rx_pktlen = RTE_MIN(info->max_rx_pktlen, sinfo->max_rx_pktlen); 1101 info->max_rx_queues = RTE_MIN(info->max_rx_queues, sinfo->max_rx_queues); 1102 info->max_tx_queues = RTE_MIN(info->max_tx_queues, sinfo->max_tx_queues); 1103 info->max_mac_addrs = RTE_MIN(info->max_mac_addrs, sinfo->max_mac_addrs); 1104 info->max_hash_mac_addrs = RTE_MIN(info->max_hash_mac_addrs, 1105 sinfo->max_hash_mac_addrs); 1106 info->max_vmdq_pools = RTE_MIN(info->max_vmdq_pools, sinfo->max_vmdq_pools); 1107 info->max_vfs = RTE_MIN(info->max_vfs, sinfo->max_vfs); 1108 1109 fs_dev_merge_desc_lim(&info->rx_desc_lim, &sinfo->rx_desc_lim); 1110 fs_dev_merge_desc_lim(&info->tx_desc_lim, &sinfo->tx_desc_lim); 1111 1112 info->rx_offload_capa &= sinfo->rx_offload_capa; 1113 info->tx_offload_capa &= sinfo->tx_offload_capa; 1114 info->rx_queue_offload_capa &= sinfo->rx_queue_offload_capa; 1115 info->tx_queue_offload_capa &= sinfo->tx_queue_offload_capa; 1116 info->flow_type_rss_offloads &= sinfo->flow_type_rss_offloads; 1117 1118 /* 1119 * RETA size is a GCD of RETA sizes indicated by sub-devices. 1120 * Each of these sizes is a power of 2, so use the lower one. 1121 */ 1122 info->reta_size = RTE_MIN(info->reta_size, sinfo->reta_size); 1123 1124 info->hash_key_size = RTE_MIN(info->hash_key_size, 1125 sinfo->hash_key_size); 1126 } 1127 1128 /** 1129 * Fail-safe dev_infos_get rules: 1130 * 1131 * No sub_device: 1132 * Numerables: 1133 * Use the maximum possible values for any field, so as not 1134 * to impede any further configuration effort. 1135 * Capabilities: 1136 * Limits capabilities to those that are understood by the 1137 * fail-safe PMD. This understanding stems from the fail-safe 1138 * being capable of verifying that the related capability is 1139 * expressed within the device configuration (struct rte_eth_conf). 1140 * 1141 * At least one probed sub_device: 1142 * Numerables: 1143 * Uses values from the active probed sub_device 1144 * The rationale here is that if any sub_device is less capable 1145 * (for example concerning the number of queues) than the active 1146 * sub_device, then its subsequent configuration will fail. 1147 * It is impossible to foresee this failure when the failing sub_device 1148 * is supposed to be plugged-in later on, so the configuration process 1149 * is the single point of failure and error reporting. 1150 * Capabilities: 1151 * Uses a logical AND of RX capabilities among 1152 * all sub_devices and the default capabilities. 1153 * Uses a logical AND of TX capabilities among 1154 * the active probed sub_device and the default capabilities. 1155 * Uses a logical AND of device capabilities among 1156 * all sub_devices and the default capabilities. 1157 * 1158 */ 1159 static int 1160 fs_dev_infos_get(struct rte_eth_dev *dev, 1161 struct rte_eth_dev_info *infos) 1162 { 1163 struct sub_device *sdev; 1164 uint8_t i; 1165 int ret; 1166 1167 /* Use maximum upper bounds by default */ 1168 infos->min_mtu = RTE_ETHER_MIN_MTU; 1169 infos->max_mtu = UINT16_MAX; 1170 infos->max_rx_pktlen = UINT32_MAX; 1171 infos->max_rx_queues = RTE_MAX_QUEUES_PER_PORT; 1172 infos->max_tx_queues = RTE_MAX_QUEUES_PER_PORT; 1173 infos->max_mac_addrs = FAILSAFE_MAX_ETHADDR; 1174 infos->max_hash_mac_addrs = UINT32_MAX; 1175 infos->max_vfs = UINT16_MAX; 1176 infos->max_vmdq_pools = UINT16_MAX; 1177 infos->reta_size = UINT16_MAX; 1178 infos->hash_key_size = UINT8_MAX; 1179 1180 /* 1181 * Set of capabilities that can be verified upon 1182 * configuring a sub-device. 1183 */ 1184 infos->rx_offload_capa = 1185 DEV_RX_OFFLOAD_VLAN_STRIP | 1186 DEV_RX_OFFLOAD_IPV4_CKSUM | 1187 DEV_RX_OFFLOAD_UDP_CKSUM | 1188 DEV_RX_OFFLOAD_TCP_CKSUM | 1189 DEV_RX_OFFLOAD_TCP_LRO | 1190 DEV_RX_OFFLOAD_QINQ_STRIP | 1191 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1192 DEV_RX_OFFLOAD_MACSEC_STRIP | 1193 DEV_RX_OFFLOAD_HEADER_SPLIT | 1194 DEV_RX_OFFLOAD_VLAN_FILTER | 1195 DEV_RX_OFFLOAD_VLAN_EXTEND | 1196 DEV_RX_OFFLOAD_JUMBO_FRAME | 1197 DEV_RX_OFFLOAD_SCATTER | 1198 DEV_RX_OFFLOAD_TIMESTAMP | 1199 DEV_RX_OFFLOAD_SECURITY | 1200 DEV_RX_OFFLOAD_RSS_HASH; 1201 1202 infos->rx_queue_offload_capa = 1203 DEV_RX_OFFLOAD_VLAN_STRIP | 1204 DEV_RX_OFFLOAD_IPV4_CKSUM | 1205 DEV_RX_OFFLOAD_UDP_CKSUM | 1206 DEV_RX_OFFLOAD_TCP_CKSUM | 1207 DEV_RX_OFFLOAD_TCP_LRO | 1208 DEV_RX_OFFLOAD_QINQ_STRIP | 1209 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1210 DEV_RX_OFFLOAD_MACSEC_STRIP | 1211 DEV_RX_OFFLOAD_HEADER_SPLIT | 1212 DEV_RX_OFFLOAD_VLAN_FILTER | 1213 DEV_RX_OFFLOAD_VLAN_EXTEND | 1214 DEV_RX_OFFLOAD_JUMBO_FRAME | 1215 DEV_RX_OFFLOAD_SCATTER | 1216 DEV_RX_OFFLOAD_TIMESTAMP | 1217 DEV_RX_OFFLOAD_SECURITY | 1218 DEV_RX_OFFLOAD_RSS_HASH; 1219 1220 infos->tx_offload_capa = 1221 DEV_TX_OFFLOAD_MULTI_SEGS | 1222 DEV_TX_OFFLOAD_MBUF_FAST_FREE | 1223 DEV_TX_OFFLOAD_IPV4_CKSUM | 1224 DEV_TX_OFFLOAD_UDP_CKSUM | 1225 DEV_TX_OFFLOAD_TCP_CKSUM | 1226 DEV_TX_OFFLOAD_TCP_TSO; 1227 1228 infos->flow_type_rss_offloads = 1229 ETH_RSS_IP | 1230 ETH_RSS_UDP | 1231 ETH_RSS_TCP; 1232 infos->dev_capa = 1233 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 1234 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 1235 1236 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 1237 struct rte_eth_dev_info sub_info; 1238 1239 ret = rte_eth_dev_info_get(PORT_ID(sdev), &sub_info); 1240 ret = fs_err(sdev, ret); 1241 if (ret != 0) 1242 return ret; 1243 1244 fs_dev_merge_info(infos, &sub_info); 1245 } 1246 1247 return 0; 1248 } 1249 1250 static const uint32_t * 1251 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev) 1252 { 1253 struct sub_device *sdev; 1254 struct rte_eth_dev *edev; 1255 const uint32_t *ret; 1256 1257 fs_lock(dev, 0); 1258 sdev = TX_SUBDEV(dev); 1259 if (sdev == NULL) { 1260 ret = NULL; 1261 goto unlock; 1262 } 1263 edev = ETH(sdev); 1264 /* ENOTSUP: counts as no supported ptypes */ 1265 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) { 1266 ret = NULL; 1267 goto unlock; 1268 } 1269 /* 1270 * The API does not permit to do a clean AND of all ptypes, 1271 * It is also incomplete by design and we do not really care 1272 * to have a best possible value in this context. 1273 * We just return the ptypes of the device of highest 1274 * priority, usually the PREFERRED device. 1275 */ 1276 ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev); 1277 unlock: 1278 fs_unlock(dev, 0); 1279 return ret; 1280 } 1281 1282 static int 1283 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1284 { 1285 struct sub_device *sdev; 1286 uint8_t i; 1287 int ret; 1288 1289 fs_lock(dev, 0); 1290 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1291 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i); 1292 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu); 1293 if ((ret = fs_err(sdev, ret))) { 1294 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d", 1295 i, ret); 1296 fs_unlock(dev, 0); 1297 return ret; 1298 } 1299 } 1300 fs_unlock(dev, 0); 1301 return 0; 1302 } 1303 1304 static int 1305 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1306 { 1307 struct sub_device *sdev; 1308 uint8_t i; 1309 int ret; 1310 1311 fs_lock(dev, 0); 1312 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1313 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i); 1314 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on); 1315 if ((ret = fs_err(sdev, ret))) { 1316 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d" 1317 " with error %d", i, ret); 1318 fs_unlock(dev, 0); 1319 return ret; 1320 } 1321 } 1322 fs_unlock(dev, 0); 1323 return 0; 1324 } 1325 1326 static int 1327 fs_flow_ctrl_get(struct rte_eth_dev *dev, 1328 struct rte_eth_fc_conf *fc_conf) 1329 { 1330 struct sub_device *sdev; 1331 int ret; 1332 1333 fs_lock(dev, 0); 1334 sdev = TX_SUBDEV(dev); 1335 if (sdev == NULL) { 1336 ret = 0; 1337 goto unlock; 1338 } 1339 if (SUBOPS(sdev, flow_ctrl_get) == NULL) { 1340 ret = -ENOTSUP; 1341 goto unlock; 1342 } 1343 ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf); 1344 unlock: 1345 fs_unlock(dev, 0); 1346 return ret; 1347 } 1348 1349 static int 1350 fs_flow_ctrl_set(struct rte_eth_dev *dev, 1351 struct rte_eth_fc_conf *fc_conf) 1352 { 1353 struct sub_device *sdev; 1354 uint8_t i; 1355 int ret; 1356 1357 fs_lock(dev, 0); 1358 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1359 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i); 1360 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf); 1361 if ((ret = fs_err(sdev, ret))) { 1362 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d" 1363 " with error %d", i, ret); 1364 fs_unlock(dev, 0); 1365 return ret; 1366 } 1367 } 1368 fs_unlock(dev, 0); 1369 return 0; 1370 } 1371 1372 static void 1373 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 1374 { 1375 struct sub_device *sdev; 1376 uint8_t i; 1377 1378 fs_lock(dev, 0); 1379 /* No check: already done within the rte_eth_dev_mac_addr_remove 1380 * call for the fail-safe device. 1381 */ 1382 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 1383 rte_eth_dev_mac_addr_remove(PORT_ID(sdev), 1384 &dev->data->mac_addrs[index]); 1385 PRIV(dev)->mac_addr_pool[index] = 0; 1386 fs_unlock(dev, 0); 1387 } 1388 1389 static int 1390 fs_mac_addr_add(struct rte_eth_dev *dev, 1391 struct rte_ether_addr *mac_addr, 1392 uint32_t index, 1393 uint32_t vmdq) 1394 { 1395 struct sub_device *sdev; 1396 int ret; 1397 uint8_t i; 1398 1399 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR); 1400 fs_lock(dev, 0); 1401 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1402 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq); 1403 if ((ret = fs_err(sdev, ret))) { 1404 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %" 1405 PRIu8 " with error %d", i, ret); 1406 fs_unlock(dev, 0); 1407 return ret; 1408 } 1409 } 1410 if (index >= PRIV(dev)->nb_mac_addr) { 1411 DEBUG("Growing mac_addrs array"); 1412 PRIV(dev)->nb_mac_addr = index; 1413 } 1414 PRIV(dev)->mac_addr_pool[index] = vmdq; 1415 fs_unlock(dev, 0); 1416 return 0; 1417 } 1418 1419 static int 1420 fs_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 1421 { 1422 struct sub_device *sdev; 1423 uint8_t i; 1424 int ret; 1425 1426 fs_lock(dev, 0); 1427 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1428 ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr); 1429 ret = fs_err(sdev, ret); 1430 if (ret) { 1431 ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d", 1432 i, ret); 1433 fs_unlock(dev, 0); 1434 return ret; 1435 } 1436 } 1437 fs_unlock(dev, 0); 1438 1439 return 0; 1440 } 1441 1442 static int 1443 fs_set_mc_addr_list(struct rte_eth_dev *dev, 1444 struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) 1445 { 1446 struct sub_device *sdev; 1447 uint8_t i; 1448 int ret; 1449 void *mcast_addrs; 1450 1451 fs_lock(dev, 0); 1452 1453 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1454 ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1455 mc_addr_set, nb_mc_addr); 1456 if (ret != 0) { 1457 ERROR("Operation rte_eth_dev_set_mc_addr_list failed for sub_device %d with error %d", 1458 i, ret); 1459 goto rollback; 1460 } 1461 } 1462 1463 mcast_addrs = rte_realloc(PRIV(dev)->mcast_addrs, 1464 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]), 0); 1465 if (mcast_addrs == NULL && nb_mc_addr > 0) { 1466 ret = -ENOMEM; 1467 goto rollback; 1468 } 1469 rte_memcpy(mcast_addrs, mc_addr_set, 1470 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0])); 1471 PRIV(dev)->nb_mcast_addr = nb_mc_addr; 1472 PRIV(dev)->mcast_addrs = mcast_addrs; 1473 1474 fs_unlock(dev, 0); 1475 return 0; 1476 1477 rollback: 1478 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1479 int rc = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1480 PRIV(dev)->mcast_addrs, PRIV(dev)->nb_mcast_addr); 1481 if (rc != 0) { 1482 ERROR("Multicast MAC address list rollback for sub_device %d failed with error %d", 1483 i, rc); 1484 } 1485 } 1486 1487 fs_unlock(dev, 0); 1488 return ret; 1489 } 1490 1491 static int 1492 fs_rss_hash_update(struct rte_eth_dev *dev, 1493 struct rte_eth_rss_conf *rss_conf) 1494 { 1495 struct sub_device *sdev; 1496 uint8_t i; 1497 int ret; 1498 1499 fs_lock(dev, 0); 1500 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1501 ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf); 1502 ret = fs_err(sdev, ret); 1503 if (ret) { 1504 ERROR("Operation rte_eth_dev_rss_hash_update" 1505 " failed for sub_device %d with error %d", 1506 i, ret); 1507 fs_unlock(dev, 0); 1508 return ret; 1509 } 1510 } 1511 fs_unlock(dev, 0); 1512 1513 return 0; 1514 } 1515 1516 static int 1517 fs_filter_ctrl(struct rte_eth_dev *dev __rte_unused, 1518 enum rte_filter_type type, 1519 enum rte_filter_op op, 1520 void *arg) 1521 { 1522 if (type == RTE_ETH_FILTER_GENERIC && 1523 op == RTE_ETH_FILTER_GET) { 1524 *(const void **)arg = &fs_flow_ops; 1525 return 0; 1526 } 1527 return -ENOTSUP; 1528 } 1529 1530 const struct eth_dev_ops failsafe_ops = { 1531 .dev_configure = fs_dev_configure, 1532 .dev_start = fs_dev_start, 1533 .dev_stop = fs_dev_stop, 1534 .dev_set_link_down = fs_dev_set_link_down, 1535 .dev_set_link_up = fs_dev_set_link_up, 1536 .dev_close = failsafe_eth_dev_close, 1537 .promiscuous_enable = fs_promiscuous_enable, 1538 .promiscuous_disable = fs_promiscuous_disable, 1539 .allmulticast_enable = fs_allmulticast_enable, 1540 .allmulticast_disable = fs_allmulticast_disable, 1541 .link_update = fs_link_update, 1542 .stats_get = fs_stats_get, 1543 .stats_reset = fs_stats_reset, 1544 .xstats_get = fs_xstats_get, 1545 .xstats_get_names = fs_xstats_get_names, 1546 .xstats_reset = fs_xstats_reset, 1547 .dev_infos_get = fs_dev_infos_get, 1548 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get, 1549 .mtu_set = fs_mtu_set, 1550 .vlan_filter_set = fs_vlan_filter_set, 1551 .rx_queue_start = fs_rx_queue_start, 1552 .rx_queue_stop = fs_rx_queue_stop, 1553 .tx_queue_start = fs_tx_queue_start, 1554 .tx_queue_stop = fs_tx_queue_stop, 1555 .rx_queue_setup = fs_rx_queue_setup, 1556 .tx_queue_setup = fs_tx_queue_setup, 1557 .rx_queue_release = fs_rx_queue_release, 1558 .tx_queue_release = fs_tx_queue_release, 1559 .rx_queue_intr_enable = fs_rx_intr_enable, 1560 .rx_queue_intr_disable = fs_rx_intr_disable, 1561 .flow_ctrl_get = fs_flow_ctrl_get, 1562 .flow_ctrl_set = fs_flow_ctrl_set, 1563 .mac_addr_remove = fs_mac_addr_remove, 1564 .mac_addr_add = fs_mac_addr_add, 1565 .mac_addr_set = fs_mac_addr_set, 1566 .set_mc_addr_list = fs_set_mc_addr_list, 1567 .rss_hash_update = fs_rss_hash_update, 1568 .filter_ctrl = fs_filter_ctrl, 1569 }; 1570