1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017 6WIND S.A. 3 * Copyright 2017 Mellanox Technologies, Ltd 4 */ 5 6 #include <stdbool.h> 7 #include <stdint.h> 8 #include <unistd.h> 9 10 #include <rte_debug.h> 11 #include <rte_atomic.h> 12 #include <ethdev_driver.h> 13 #include <rte_malloc.h> 14 #include <rte_flow.h> 15 #include <rte_cycles.h> 16 #include <rte_ethdev.h> 17 #include <rte_string_fns.h> 18 19 #include "failsafe_private.h" 20 21 static int 22 fs_dev_configure(struct rte_eth_dev *dev) 23 { 24 struct sub_device *sdev; 25 uint8_t i; 26 int ret; 27 28 fs_lock(dev, 0); 29 FOREACH_SUBDEV(sdev, i, dev) { 30 int rmv_interrupt = 0; 31 int lsc_interrupt = 0; 32 int lsc_enabled; 33 34 if (sdev->state != DEV_PROBED && 35 !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE)) 36 continue; 37 38 rmv_interrupt = ETH(sdev)->data->dev_flags & 39 RTE_ETH_DEV_INTR_RMV; 40 if (rmv_interrupt) { 41 DEBUG("Enabling RMV interrupts for sub_device %d", i); 42 dev->data->dev_conf.intr_conf.rmv = 1; 43 } else { 44 DEBUG("sub_device %d does not support RMV event", i); 45 } 46 lsc_enabled = dev->data->dev_conf.intr_conf.lsc; 47 lsc_interrupt = lsc_enabled && 48 (ETH(sdev)->data->dev_flags & 49 RTE_ETH_DEV_INTR_LSC); 50 if (lsc_interrupt) { 51 DEBUG("Enabling LSC interrupts for sub_device %d", i); 52 dev->data->dev_conf.intr_conf.lsc = 1; 53 } else if (lsc_enabled && !lsc_interrupt) { 54 DEBUG("Disabling LSC interrupts for sub_device %d", i); 55 dev->data->dev_conf.intr_conf.lsc = 0; 56 } 57 DEBUG("Configuring sub-device %d", i); 58 ret = rte_eth_dev_configure(PORT_ID(sdev), 59 dev->data->nb_rx_queues, 60 dev->data->nb_tx_queues, 61 &dev->data->dev_conf); 62 if (ret) { 63 if (!fs_err(sdev, ret)) 64 continue; 65 ERROR("Could not configure sub_device %d", i); 66 fs_unlock(dev, 0); 67 return ret; 68 } 69 if (rmv_interrupt && sdev->rmv_callback == 0) { 70 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 71 RTE_ETH_EVENT_INTR_RMV, 72 failsafe_eth_rmv_event_callback, 73 sdev); 74 if (ret) 75 WARN("Failed to register RMV callback for sub_device %d", 76 SUB_ID(sdev)); 77 else 78 sdev->rmv_callback = 1; 79 } 80 dev->data->dev_conf.intr_conf.rmv = 0; 81 if (lsc_interrupt && sdev->lsc_callback == 0) { 82 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 83 RTE_ETH_EVENT_INTR_LSC, 84 failsafe_eth_lsc_event_callback, 85 dev); 86 if (ret) 87 WARN("Failed to register LSC callback for sub_device %d", 88 SUB_ID(sdev)); 89 else 90 sdev->lsc_callback = 1; 91 } 92 dev->data->dev_conf.intr_conf.lsc = lsc_enabled; 93 sdev->state = DEV_ACTIVE; 94 } 95 if (PRIV(dev)->state < DEV_ACTIVE) 96 PRIV(dev)->state = DEV_ACTIVE; 97 fs_unlock(dev, 0); 98 return 0; 99 } 100 101 static void 102 fs_set_queues_state_start(struct rte_eth_dev *dev) 103 { 104 struct rxq *rxq; 105 struct txq *txq; 106 uint16_t i; 107 108 for (i = 0; i < dev->data->nb_rx_queues; i++) { 109 rxq = dev->data->rx_queues[i]; 110 if (rxq != NULL && !rxq->info.conf.rx_deferred_start) 111 dev->data->rx_queue_state[i] = 112 RTE_ETH_QUEUE_STATE_STARTED; 113 } 114 for (i = 0; i < dev->data->nb_tx_queues; i++) { 115 txq = dev->data->tx_queues[i]; 116 if (txq != NULL && !txq->info.conf.tx_deferred_start) 117 dev->data->tx_queue_state[i] = 118 RTE_ETH_QUEUE_STATE_STARTED; 119 } 120 } 121 122 static int 123 fs_dev_start(struct rte_eth_dev *dev) 124 { 125 struct sub_device *sdev; 126 uint8_t i; 127 int ret; 128 129 fs_lock(dev, 0); 130 ret = failsafe_rx_intr_install(dev); 131 if (ret) { 132 fs_unlock(dev, 0); 133 return ret; 134 } 135 FOREACH_SUBDEV(sdev, i, dev) { 136 if (sdev->state != DEV_ACTIVE) 137 continue; 138 DEBUG("Starting sub_device %d", i); 139 ret = rte_eth_dev_start(PORT_ID(sdev)); 140 if (ret) { 141 if (!fs_err(sdev, ret)) 142 continue; 143 fs_unlock(dev, 0); 144 return ret; 145 } 146 ret = failsafe_rx_intr_install_subdevice(sdev); 147 if (ret) { 148 if (!fs_err(sdev, ret)) 149 continue; 150 if (fs_err(sdev, rte_eth_dev_stop(PORT_ID(sdev))) < 0) 151 ERROR("Failed to stop sub-device %u", 152 SUB_ID(sdev)); 153 fs_unlock(dev, 0); 154 return ret; 155 } 156 sdev->state = DEV_STARTED; 157 } 158 if (PRIV(dev)->state < DEV_STARTED) { 159 PRIV(dev)->state = DEV_STARTED; 160 fs_set_queues_state_start(dev); 161 } 162 fs_switch_dev(dev, NULL); 163 fs_unlock(dev, 0); 164 return 0; 165 } 166 167 static void 168 fs_set_queues_state_stop(struct rte_eth_dev *dev) 169 { 170 uint16_t i; 171 172 for (i = 0; i < dev->data->nb_rx_queues; i++) 173 if (dev->data->rx_queues[i] != NULL) 174 dev->data->rx_queue_state[i] = 175 RTE_ETH_QUEUE_STATE_STOPPED; 176 for (i = 0; i < dev->data->nb_tx_queues; i++) 177 if (dev->data->tx_queues[i] != NULL) 178 dev->data->tx_queue_state[i] = 179 RTE_ETH_QUEUE_STATE_STOPPED; 180 } 181 182 static int 183 fs_dev_stop(struct rte_eth_dev *dev) 184 { 185 struct sub_device *sdev; 186 uint8_t i; 187 int ret; 188 189 fs_lock(dev, 0); 190 PRIV(dev)->state = DEV_STARTED - 1; 191 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) { 192 ret = rte_eth_dev_stop(PORT_ID(sdev)); 193 if (fs_err(sdev, ret) < 0) { 194 ERROR("Failed to stop device %u", 195 PORT_ID(sdev)); 196 PRIV(dev)->state = DEV_STARTED + 1; 197 fs_unlock(dev, 0); 198 return ret; 199 } 200 failsafe_rx_intr_uninstall_subdevice(sdev); 201 sdev->state = DEV_STARTED - 1; 202 } 203 failsafe_rx_intr_uninstall(dev); 204 fs_set_queues_state_stop(dev); 205 fs_unlock(dev, 0); 206 207 return 0; 208 } 209 210 static int 211 fs_dev_set_link_up(struct rte_eth_dev *dev) 212 { 213 struct sub_device *sdev; 214 uint8_t i; 215 int ret; 216 217 fs_lock(dev, 0); 218 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 219 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i); 220 ret = rte_eth_dev_set_link_up(PORT_ID(sdev)); 221 if ((ret = fs_err(sdev, ret))) { 222 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d" 223 " with error %d", i, ret); 224 fs_unlock(dev, 0); 225 return ret; 226 } 227 } 228 fs_unlock(dev, 0); 229 return 0; 230 } 231 232 static int 233 fs_dev_set_link_down(struct rte_eth_dev *dev) 234 { 235 struct sub_device *sdev; 236 uint8_t i; 237 int ret; 238 239 fs_lock(dev, 0); 240 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 241 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i); 242 ret = rte_eth_dev_set_link_down(PORT_ID(sdev)); 243 if ((ret = fs_err(sdev, ret))) { 244 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d" 245 " with error %d", i, ret); 246 fs_unlock(dev, 0); 247 return ret; 248 } 249 } 250 fs_unlock(dev, 0); 251 return 0; 252 } 253 254 static int 255 fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 256 { 257 struct sub_device *sdev; 258 uint8_t i; 259 int ret; 260 int err = 0; 261 bool failure = true; 262 263 fs_lock(dev, 0); 264 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 265 uint16_t port_id = ETH(sdev)->data->port_id; 266 267 ret = rte_eth_dev_rx_queue_stop(port_id, rx_queue_id); 268 ret = fs_err(sdev, ret); 269 if (ret) { 270 ERROR("Rx queue stop failed for subdevice %d", i); 271 err = ret; 272 } else { 273 failure = false; 274 } 275 } 276 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 277 fs_unlock(dev, 0); 278 /* Return 0 in case of at least one successful queue stop */ 279 return (failure) ? err : 0; 280 } 281 282 static int 283 fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 284 { 285 struct sub_device *sdev; 286 uint8_t i; 287 int ret; 288 289 fs_lock(dev, 0); 290 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 291 uint16_t port_id = ETH(sdev)->data->port_id; 292 293 ret = rte_eth_dev_rx_queue_start(port_id, rx_queue_id); 294 ret = fs_err(sdev, ret); 295 if (ret) { 296 ERROR("Rx queue start failed for subdevice %d", i); 297 fs_rx_queue_stop(dev, rx_queue_id); 298 fs_unlock(dev, 0); 299 return ret; 300 } 301 } 302 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 303 fs_unlock(dev, 0); 304 return 0; 305 } 306 307 static int 308 fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 309 { 310 struct sub_device *sdev; 311 uint8_t i; 312 int ret; 313 int err = 0; 314 bool failure = true; 315 316 fs_lock(dev, 0); 317 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 318 uint16_t port_id = ETH(sdev)->data->port_id; 319 320 ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id); 321 ret = fs_err(sdev, ret); 322 if (ret) { 323 ERROR("Tx queue stop failed for subdevice %d", i); 324 err = ret; 325 } else { 326 failure = false; 327 } 328 } 329 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 330 fs_unlock(dev, 0); 331 /* Return 0 in case of at least one successful queue stop */ 332 return (failure) ? err : 0; 333 } 334 335 static int 336 fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 337 { 338 struct sub_device *sdev; 339 uint8_t i; 340 int ret; 341 342 fs_lock(dev, 0); 343 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 344 uint16_t port_id = ETH(sdev)->data->port_id; 345 346 ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id); 347 ret = fs_err(sdev, ret); 348 if (ret) { 349 ERROR("Tx queue start failed for subdevice %d", i); 350 fs_tx_queue_stop(dev, tx_queue_id); 351 fs_unlock(dev, 0); 352 return ret; 353 } 354 } 355 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 356 fs_unlock(dev, 0); 357 return 0; 358 } 359 360 static void 361 fs_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 362 { 363 struct sub_device *sdev; 364 uint8_t i; 365 struct rxq *rxq = dev->data->rx_queues[qid]; 366 367 if (rxq == NULL) 368 return; 369 fs_lock(dev, 0); 370 if (rxq->event_fd >= 0) 371 close(rxq->event_fd); 372 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 373 if (ETH(sdev)->data->rx_queues != NULL && 374 ETH(sdev)->data->rx_queues[rxq->qid] != NULL) 375 SUBOPS(sdev, rx_queue_release)(ETH(sdev), rxq->qid); 376 } 377 dev->data->rx_queues[rxq->qid] = NULL; 378 rte_free(rxq); 379 fs_unlock(dev, 0); 380 } 381 382 static int 383 fs_rx_queue_setup(struct rte_eth_dev *dev, 384 uint16_t rx_queue_id, 385 uint16_t nb_rx_desc, 386 unsigned int socket_id, 387 const struct rte_eth_rxconf *rx_conf, 388 struct rte_mempool *mb_pool) 389 { 390 /* 391 * FIXME: Add a proper interface in rte_eal_interrupts for 392 * allocating eventfd as an interrupt vector. 393 * For the time being, fake as if we are using MSIX interrupts, 394 * this will cause rte_intr_efd_enable to allocate an eventfd for us. 395 */ 396 struct rte_intr_handle *intr_handle; 397 struct sub_device *sdev; 398 struct rxq *rxq; 399 uint8_t i; 400 int ret; 401 402 intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE); 403 if (intr_handle == NULL) 404 return -ENOMEM; 405 406 if (rte_intr_type_set(intr_handle, RTE_INTR_HANDLE_VFIO_MSIX)) 407 return -rte_errno; 408 409 if (rte_intr_efds_index_set(intr_handle, 0, -1)) 410 return -rte_errno; 411 412 fs_lock(dev, 0); 413 if (rx_conf->rx_deferred_start) { 414 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 415 if (SUBOPS(sdev, rx_queue_start) == NULL) { 416 ERROR("Rx queue deferred start is not " 417 "supported for subdevice %d", i); 418 fs_unlock(dev, 0); 419 return -EINVAL; 420 } 421 } 422 } 423 rxq = dev->data->rx_queues[rx_queue_id]; 424 if (rxq != NULL) { 425 fs_rx_queue_release(dev, rx_queue_id); 426 dev->data->rx_queues[rx_queue_id] = NULL; 427 } 428 rxq = rte_zmalloc(NULL, 429 sizeof(*rxq) + 430 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 431 RTE_CACHE_LINE_SIZE); 432 if (rxq == NULL) { 433 fs_unlock(dev, 0); 434 return -ENOMEM; 435 } 436 FOREACH_SUBDEV(sdev, i, dev) 437 rte_atomic64_init(&rxq->refcnt[i]); 438 rxq->qid = rx_queue_id; 439 rxq->socket_id = socket_id; 440 rxq->info.mp = mb_pool; 441 rxq->info.conf = *rx_conf; 442 rxq->info.nb_desc = nb_rx_desc; 443 rxq->priv = PRIV(dev); 444 rxq->sdev = PRIV(dev)->subs; 445 ret = rte_intr_efd_enable(intr_handle, 1); 446 if (ret < 0) { 447 fs_unlock(dev, 0); 448 return ret; 449 } 450 rxq->event_fd = rte_intr_efds_index_get(intr_handle, 0); 451 dev->data->rx_queues[rx_queue_id] = rxq; 452 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 453 ret = rte_eth_rx_queue_setup(PORT_ID(sdev), 454 rx_queue_id, 455 nb_rx_desc, socket_id, 456 rx_conf, mb_pool); 457 if ((ret = fs_err(sdev, ret))) { 458 ERROR("RX queue setup failed for sub_device %d", i); 459 goto free_rxq; 460 } 461 } 462 fs_unlock(dev, 0); 463 return 0; 464 free_rxq: 465 fs_rx_queue_release(dev, rx_queue_id); 466 fs_unlock(dev, 0); 467 return ret; 468 } 469 470 static int 471 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx) 472 { 473 struct rxq *rxq; 474 struct sub_device *sdev; 475 uint8_t i; 476 int ret; 477 int rc = 0; 478 479 fs_lock(dev, 0); 480 if (idx >= dev->data->nb_rx_queues) { 481 rc = -EINVAL; 482 goto unlock; 483 } 484 rxq = dev->data->rx_queues[idx]; 485 if (rxq == NULL || rxq->event_fd <= 0) { 486 rc = -EINVAL; 487 goto unlock; 488 } 489 /* Fail if proxy service is nor running. */ 490 if (PRIV(dev)->rxp.sstate != SS_RUNNING) { 491 ERROR("failsafe interrupt services are not running"); 492 rc = -EAGAIN; 493 goto unlock; 494 } 495 rxq->enable_events = 1; 496 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 497 ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx); 498 ret = fs_err(sdev, ret); 499 if (ret) 500 rc = ret; 501 } 502 unlock: 503 fs_unlock(dev, 0); 504 if (rc) 505 rte_errno = -rc; 506 return rc; 507 } 508 509 static int 510 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx) 511 { 512 struct rxq *rxq; 513 struct sub_device *sdev; 514 uint64_t u64; 515 uint8_t i; 516 int rc = 0; 517 int ret; 518 519 fs_lock(dev, 0); 520 if (idx >= dev->data->nb_rx_queues) { 521 rc = -EINVAL; 522 goto unlock; 523 } 524 rxq = dev->data->rx_queues[idx]; 525 if (rxq == NULL || rxq->event_fd <= 0) { 526 rc = -EINVAL; 527 goto unlock; 528 } 529 rxq->enable_events = 0; 530 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 531 ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx); 532 ret = fs_err(sdev, ret); 533 if (ret) 534 rc = ret; 535 } 536 /* Clear pending events */ 537 while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0) 538 ; 539 unlock: 540 fs_unlock(dev, 0); 541 if (rc) 542 rte_errno = -rc; 543 return rc; 544 } 545 546 static void 547 fs_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 548 { 549 struct sub_device *sdev; 550 uint8_t i; 551 struct txq *txq = dev->data->tx_queues[qid]; 552 553 if (txq == NULL) 554 return; 555 fs_lock(dev, 0); 556 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 557 if (ETH(sdev)->data->tx_queues != NULL && 558 ETH(sdev)->data->tx_queues[txq->qid] != NULL) 559 SUBOPS(sdev, tx_queue_release)(ETH(sdev), txq->qid); 560 } 561 dev->data->tx_queues[txq->qid] = NULL; 562 rte_free(txq); 563 fs_unlock(dev, 0); 564 } 565 566 static int 567 fs_tx_queue_setup(struct rte_eth_dev *dev, 568 uint16_t tx_queue_id, 569 uint16_t nb_tx_desc, 570 unsigned int socket_id, 571 const struct rte_eth_txconf *tx_conf) 572 { 573 struct sub_device *sdev; 574 struct txq *txq; 575 uint8_t i; 576 int ret; 577 578 fs_lock(dev, 0); 579 if (tx_conf->tx_deferred_start) { 580 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 581 if (SUBOPS(sdev, tx_queue_start) == NULL) { 582 ERROR("Tx queue deferred start is not " 583 "supported for subdevice %d", i); 584 fs_unlock(dev, 0); 585 return -EINVAL; 586 } 587 } 588 } 589 txq = dev->data->tx_queues[tx_queue_id]; 590 if (txq != NULL) { 591 fs_tx_queue_release(dev, tx_queue_id); 592 dev->data->tx_queues[tx_queue_id] = NULL; 593 } 594 txq = rte_zmalloc("ethdev TX queue", 595 sizeof(*txq) + 596 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 597 RTE_CACHE_LINE_SIZE); 598 if (txq == NULL) { 599 fs_unlock(dev, 0); 600 return -ENOMEM; 601 } 602 FOREACH_SUBDEV(sdev, i, dev) 603 rte_atomic64_init(&txq->refcnt[i]); 604 txq->qid = tx_queue_id; 605 txq->socket_id = socket_id; 606 txq->info.conf = *tx_conf; 607 txq->info.nb_desc = nb_tx_desc; 608 txq->priv = PRIV(dev); 609 dev->data->tx_queues[tx_queue_id] = txq; 610 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 611 ret = rte_eth_tx_queue_setup(PORT_ID(sdev), 612 tx_queue_id, 613 nb_tx_desc, socket_id, 614 tx_conf); 615 if ((ret = fs_err(sdev, ret))) { 616 ERROR("TX queue setup failed for sub_device %d", i); 617 goto free_txq; 618 } 619 } 620 fs_unlock(dev, 0); 621 return 0; 622 free_txq: 623 fs_tx_queue_release(dev, tx_queue_id); 624 fs_unlock(dev, 0); 625 return ret; 626 } 627 628 static void 629 fs_dev_free_queues(struct rte_eth_dev *dev) 630 { 631 uint16_t i; 632 633 for (i = 0; i < dev->data->nb_rx_queues; i++) { 634 fs_rx_queue_release(dev, i); 635 dev->data->rx_queues[i] = NULL; 636 } 637 dev->data->nb_rx_queues = 0; 638 for (i = 0; i < dev->data->nb_tx_queues; i++) { 639 fs_tx_queue_release(dev, i); 640 dev->data->tx_queues[i] = NULL; 641 } 642 dev->data->nb_tx_queues = 0; 643 } 644 645 int 646 failsafe_eth_dev_close(struct rte_eth_dev *dev) 647 { 648 struct sub_device *sdev; 649 uint8_t i; 650 int err, ret = 0; 651 652 fs_lock(dev, 0); 653 failsafe_hotplug_alarm_cancel(dev); 654 if (PRIV(dev)->state == DEV_STARTED) { 655 ret = dev->dev_ops->dev_stop(dev); 656 if (ret != 0) { 657 fs_unlock(dev, 0); 658 return ret; 659 } 660 } 661 PRIV(dev)->state = DEV_ACTIVE - 1; 662 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 663 DEBUG("Closing sub_device %d", i); 664 failsafe_eth_dev_unregister_callbacks(sdev); 665 err = rte_eth_dev_close(PORT_ID(sdev)); 666 if (err) { 667 ret = ret ? ret : err; 668 ERROR("Error while closing sub-device %u", 669 PORT_ID(sdev)); 670 } 671 sdev->state = DEV_ACTIVE - 1; 672 } 673 rte_eth_dev_callback_unregister(RTE_ETH_ALL, RTE_ETH_EVENT_NEW, 674 failsafe_eth_new_event_callback, dev); 675 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 676 fs_unlock(dev, 0); 677 return ret; 678 } 679 fs_dev_free_queues(dev); 680 err = failsafe_eal_uninit(dev); 681 if (err) { 682 ret = ret ? ret : err; 683 ERROR("Error while uninitializing sub-EAL"); 684 } 685 failsafe_args_free(dev); 686 rte_free(PRIV(dev)->subs); 687 rte_free(PRIV(dev)->mcast_addrs); 688 /* mac_addrs must not be freed alone because part of dev_private */ 689 dev->data->mac_addrs = NULL; 690 fs_unlock(dev, 0); 691 err = pthread_mutex_destroy(&PRIV(dev)->hotplug_mutex); 692 if (err) { 693 ret = ret ? ret : err; 694 ERROR("Error while destroying hotplug mutex"); 695 } 696 return ret; 697 } 698 699 static int 700 fs_promiscuous_enable(struct rte_eth_dev *dev) 701 { 702 struct sub_device *sdev; 703 uint8_t i; 704 int ret = 0; 705 706 fs_lock(dev, 0); 707 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 708 ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); 709 ret = fs_err(sdev, ret); 710 if (ret != 0) { 711 ERROR("Promiscuous mode enable failed for subdevice %d", 712 PORT_ID(sdev)); 713 break; 714 } 715 } 716 if (ret != 0) { 717 /* Rollback in the case of failure */ 718 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 719 ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); 720 ret = fs_err(sdev, ret); 721 if (ret != 0) 722 ERROR("Promiscuous mode disable during rollback failed for subdevice %d", 723 PORT_ID(sdev)); 724 } 725 } 726 fs_unlock(dev, 0); 727 728 return ret; 729 } 730 731 static int 732 fs_promiscuous_disable(struct rte_eth_dev *dev) 733 { 734 struct sub_device *sdev; 735 uint8_t i; 736 int ret = 0; 737 738 fs_lock(dev, 0); 739 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 740 ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); 741 ret = fs_err(sdev, ret); 742 if (ret != 0) { 743 ERROR("Promiscuous mode disable failed for subdevice %d", 744 PORT_ID(sdev)); 745 break; 746 } 747 } 748 if (ret != 0) { 749 /* Rollback in the case of failure */ 750 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 751 ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); 752 ret = fs_err(sdev, ret); 753 if (ret != 0) 754 ERROR("Promiscuous mode enable during rollback failed for subdevice %d", 755 PORT_ID(sdev)); 756 } 757 } 758 fs_unlock(dev, 0); 759 760 return ret; 761 } 762 763 static int 764 fs_allmulticast_enable(struct rte_eth_dev *dev) 765 { 766 struct sub_device *sdev; 767 uint8_t i; 768 int ret = 0; 769 770 fs_lock(dev, 0); 771 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 772 ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); 773 ret = fs_err(sdev, ret); 774 if (ret != 0) { 775 ERROR("All-multicast mode enable failed for subdevice %d", 776 PORT_ID(sdev)); 777 break; 778 } 779 } 780 if (ret != 0) { 781 /* Rollback in the case of failure */ 782 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 783 ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); 784 ret = fs_err(sdev, ret); 785 if (ret != 0) 786 ERROR("All-multicast mode disable during rollback failed for subdevice %d", 787 PORT_ID(sdev)); 788 } 789 } 790 fs_unlock(dev, 0); 791 792 return ret; 793 } 794 795 static int 796 fs_allmulticast_disable(struct rte_eth_dev *dev) 797 { 798 struct sub_device *sdev; 799 uint8_t i; 800 int ret = 0; 801 802 fs_lock(dev, 0); 803 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 804 ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); 805 ret = fs_err(sdev, ret); 806 if (ret != 0) { 807 ERROR("All-multicast mode disable failed for subdevice %d", 808 PORT_ID(sdev)); 809 break; 810 } 811 } 812 if (ret != 0) { 813 /* Rollback in the case of failure */ 814 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 815 ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); 816 ret = fs_err(sdev, ret); 817 if (ret != 0) 818 ERROR("All-multicast mode enable during rollback failed for subdevice %d", 819 PORT_ID(sdev)); 820 } 821 } 822 fs_unlock(dev, 0); 823 824 return ret; 825 } 826 827 static int 828 fs_link_update(struct rte_eth_dev *dev, 829 int wait_to_complete) 830 { 831 struct sub_device *sdev; 832 uint8_t i; 833 int ret; 834 835 fs_lock(dev, 0); 836 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 837 DEBUG("Calling link_update on sub_device %d", i); 838 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete); 839 if (ret && ret != -1 && sdev->remove == 0 && 840 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) { 841 ERROR("Link update failed for sub_device %d with error %d", 842 i, ret); 843 fs_unlock(dev, 0); 844 return ret; 845 } 846 } 847 if (TX_SUBDEV(dev)) { 848 struct rte_eth_link *l1; 849 struct rte_eth_link *l2; 850 851 l1 = &dev->data->dev_link; 852 l2 = Ð(TX_SUBDEV(dev))->data->dev_link; 853 if (memcmp(l1, l2, sizeof(*l1))) { 854 *l1 = *l2; 855 fs_unlock(dev, 0); 856 return 0; 857 } 858 } 859 fs_unlock(dev, 0); 860 return -1; 861 } 862 863 static int 864 fs_stats_get(struct rte_eth_dev *dev, 865 struct rte_eth_stats *stats) 866 { 867 struct rte_eth_stats backup; 868 struct sub_device *sdev; 869 uint8_t i; 870 int ret; 871 872 fs_lock(dev, 0); 873 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats)); 874 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 875 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats; 876 uint64_t *timestamp = &sdev->stats_snapshot.timestamp; 877 878 rte_memcpy(&backup, snapshot, sizeof(backup)); 879 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot); 880 if (ret) { 881 if (!fs_err(sdev, ret)) { 882 rte_memcpy(snapshot, &backup, sizeof(backup)); 883 goto inc; 884 } 885 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d", 886 i, ret); 887 *timestamp = 0; 888 fs_unlock(dev, 0); 889 return ret; 890 } 891 *timestamp = rte_rdtsc(); 892 inc: 893 failsafe_stats_increment(stats, snapshot); 894 } 895 fs_unlock(dev, 0); 896 return 0; 897 } 898 899 static int 900 fs_stats_reset(struct rte_eth_dev *dev) 901 { 902 struct sub_device *sdev; 903 uint8_t i; 904 int ret; 905 906 fs_lock(dev, 0); 907 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 908 ret = rte_eth_stats_reset(PORT_ID(sdev)); 909 if (ret) { 910 if (!fs_err(sdev, ret)) 911 continue; 912 913 ERROR("Operation rte_eth_stats_reset failed for sub_device %d with error %d", 914 i, ret); 915 fs_unlock(dev, 0); 916 return ret; 917 } 918 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats)); 919 } 920 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats)); 921 fs_unlock(dev, 0); 922 923 return 0; 924 } 925 926 static int 927 __fs_xstats_count(struct rte_eth_dev *dev) 928 { 929 struct sub_device *sdev; 930 int count = 0; 931 uint8_t i; 932 int ret; 933 934 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 935 ret = rte_eth_xstats_get_names(PORT_ID(sdev), NULL, 0); 936 if (ret < 0) 937 return ret; 938 count += ret; 939 } 940 941 return count; 942 } 943 944 static int 945 __fs_xstats_get_names(struct rte_eth_dev *dev, 946 struct rte_eth_xstat_name *xstats_names, 947 unsigned int limit) 948 { 949 struct sub_device *sdev; 950 unsigned int count = 0; 951 uint8_t i; 952 953 /* Caller only cares about count */ 954 if (!xstats_names) 955 return __fs_xstats_count(dev); 956 957 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 958 struct rte_eth_xstat_name *sub_names = xstats_names + count; 959 int j, r; 960 961 if (count >= limit) 962 break; 963 964 r = rte_eth_xstats_get_names(PORT_ID(sdev), 965 sub_names, limit - count); 966 if (r < 0) 967 return r; 968 969 /* add subN_ prefix to names */ 970 for (j = 0; j < r; j++) { 971 char *xname = sub_names[j].name; 972 char tmp[RTE_ETH_XSTATS_NAME_SIZE]; 973 974 if ((xname[0] == 't' || xname[0] == 'r') && 975 xname[1] == 'x' && xname[2] == '_') 976 snprintf(tmp, sizeof(tmp), "%.3ssub%u_%s", 977 xname, i, xname + 3); 978 else 979 snprintf(tmp, sizeof(tmp), "sub%u_%s", 980 i, xname); 981 982 strlcpy(xname, tmp, RTE_ETH_XSTATS_NAME_SIZE); 983 } 984 count += r; 985 } 986 return count; 987 } 988 989 static int 990 fs_xstats_get_names(struct rte_eth_dev *dev, 991 struct rte_eth_xstat_name *xstats_names, 992 unsigned int limit) 993 { 994 int ret; 995 996 fs_lock(dev, 0); 997 ret = __fs_xstats_get_names(dev, xstats_names, limit); 998 fs_unlock(dev, 0); 999 return ret; 1000 } 1001 1002 static int 1003 __fs_xstats_get(struct rte_eth_dev *dev, 1004 struct rte_eth_xstat *xstats, 1005 unsigned int n) 1006 { 1007 unsigned int count = 0; 1008 struct sub_device *sdev; 1009 uint8_t i; 1010 int j, ret; 1011 1012 ret = __fs_xstats_count(dev); 1013 /* 1014 * if error 1015 * or caller did not give enough space 1016 * or just querying 1017 */ 1018 if (ret < 0 || ret > (int)n || xstats == NULL) 1019 return ret; 1020 1021 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1022 ret = rte_eth_xstats_get(PORT_ID(sdev), xstats, n); 1023 if (ret < 0) 1024 return ret; 1025 1026 if (ret > (int)n) 1027 return n + count; 1028 1029 /* add offset to id's from sub-device */ 1030 for (j = 0; j < ret; j++) 1031 xstats[j].id += count; 1032 1033 xstats += ret; 1034 n -= ret; 1035 count += ret; 1036 } 1037 1038 return count; 1039 } 1040 1041 static int 1042 fs_xstats_get(struct rte_eth_dev *dev, 1043 struct rte_eth_xstat *xstats, 1044 unsigned int n) 1045 { 1046 int ret; 1047 1048 fs_lock(dev, 0); 1049 ret = __fs_xstats_get(dev, xstats, n); 1050 fs_unlock(dev, 0); 1051 1052 return ret; 1053 } 1054 1055 1056 static int 1057 fs_xstats_reset(struct rte_eth_dev *dev) 1058 { 1059 struct sub_device *sdev; 1060 uint8_t i; 1061 int r = 0; 1062 1063 fs_lock(dev, 0); 1064 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1065 r = rte_eth_xstats_reset(PORT_ID(sdev)); 1066 if (r < 0) 1067 break; 1068 } 1069 fs_unlock(dev, 0); 1070 1071 return r; 1072 } 1073 1074 static void 1075 fs_dev_merge_desc_lim(struct rte_eth_desc_lim *to, 1076 const struct rte_eth_desc_lim *from) 1077 { 1078 to->nb_max = RTE_MIN(to->nb_max, from->nb_max); 1079 to->nb_min = RTE_MAX(to->nb_min, from->nb_min); 1080 to->nb_align = RTE_MAX(to->nb_align, from->nb_align); 1081 1082 to->nb_seg_max = RTE_MIN(to->nb_seg_max, from->nb_seg_max); 1083 to->nb_mtu_seg_max = RTE_MIN(to->nb_mtu_seg_max, from->nb_mtu_seg_max); 1084 } 1085 1086 /* 1087 * Merge the information from sub-devices. 1088 * 1089 * The reported values must be the common subset of all sub devices 1090 */ 1091 static void 1092 fs_dev_merge_info(struct rte_eth_dev_info *info, 1093 const struct rte_eth_dev_info *sinfo) 1094 { 1095 info->min_mtu = RTE_MAX(info->min_mtu, sinfo->min_mtu); 1096 info->max_mtu = RTE_MIN(info->max_mtu, sinfo->max_mtu); 1097 info->max_rx_pktlen = RTE_MIN(info->max_rx_pktlen, sinfo->max_rx_pktlen); 1098 info->max_rx_queues = RTE_MIN(info->max_rx_queues, sinfo->max_rx_queues); 1099 info->max_tx_queues = RTE_MIN(info->max_tx_queues, sinfo->max_tx_queues); 1100 info->max_mac_addrs = RTE_MIN(info->max_mac_addrs, sinfo->max_mac_addrs); 1101 info->max_hash_mac_addrs = RTE_MIN(info->max_hash_mac_addrs, 1102 sinfo->max_hash_mac_addrs); 1103 info->max_vmdq_pools = RTE_MIN(info->max_vmdq_pools, sinfo->max_vmdq_pools); 1104 info->max_vfs = RTE_MIN(info->max_vfs, sinfo->max_vfs); 1105 1106 fs_dev_merge_desc_lim(&info->rx_desc_lim, &sinfo->rx_desc_lim); 1107 fs_dev_merge_desc_lim(&info->tx_desc_lim, &sinfo->tx_desc_lim); 1108 1109 info->rx_offload_capa &= sinfo->rx_offload_capa; 1110 info->tx_offload_capa &= sinfo->tx_offload_capa; 1111 info->rx_queue_offload_capa &= sinfo->rx_queue_offload_capa; 1112 info->tx_queue_offload_capa &= sinfo->tx_queue_offload_capa; 1113 info->flow_type_rss_offloads &= sinfo->flow_type_rss_offloads; 1114 1115 /* 1116 * RETA size is a GCD of RETA sizes indicated by sub-devices. 1117 * Each of these sizes is a power of 2, so use the lower one. 1118 */ 1119 info->reta_size = RTE_MIN(info->reta_size, sinfo->reta_size); 1120 1121 info->hash_key_size = RTE_MIN(info->hash_key_size, 1122 sinfo->hash_key_size); 1123 } 1124 1125 /** 1126 * Fail-safe dev_infos_get rules: 1127 * 1128 * No sub_device: 1129 * Numerables: 1130 * Use the maximum possible values for any field, so as not 1131 * to impede any further configuration effort. 1132 * Capabilities: 1133 * Limits capabilities to those that are understood by the 1134 * fail-safe PMD. This understanding stems from the fail-safe 1135 * being capable of verifying that the related capability is 1136 * expressed within the device configuration (struct rte_eth_conf). 1137 * 1138 * At least one probed sub_device: 1139 * Numerables: 1140 * Uses values from the active probed sub_device 1141 * The rationale here is that if any sub_device is less capable 1142 * (for example concerning the number of queues) than the active 1143 * sub_device, then its subsequent configuration will fail. 1144 * It is impossible to foresee this failure when the failing sub_device 1145 * is supposed to be plugged-in later on, so the configuration process 1146 * is the single point of failure and error reporting. 1147 * Capabilities: 1148 * Uses a logical AND of RX capabilities among 1149 * all sub_devices and the default capabilities. 1150 * Uses a logical AND of TX capabilities among 1151 * the active probed sub_device and the default capabilities. 1152 * Uses a logical AND of device capabilities among 1153 * all sub_devices and the default capabilities. 1154 * 1155 */ 1156 static int 1157 fs_dev_infos_get(struct rte_eth_dev *dev, 1158 struct rte_eth_dev_info *infos) 1159 { 1160 struct sub_device *sdev; 1161 uint8_t i; 1162 int ret; 1163 1164 /* Use maximum upper bounds by default */ 1165 infos->min_mtu = RTE_ETHER_MIN_MTU; 1166 infos->max_mtu = UINT16_MAX; 1167 infos->max_rx_pktlen = UINT32_MAX; 1168 infos->max_rx_queues = RTE_MAX_QUEUES_PER_PORT; 1169 infos->max_tx_queues = RTE_MAX_QUEUES_PER_PORT; 1170 infos->max_mac_addrs = FAILSAFE_MAX_ETHADDR; 1171 infos->max_hash_mac_addrs = UINT32_MAX; 1172 infos->max_vfs = UINT16_MAX; 1173 infos->max_vmdq_pools = UINT16_MAX; 1174 infos->reta_size = UINT16_MAX; 1175 infos->hash_key_size = UINT8_MAX; 1176 1177 /* 1178 * Set of capabilities that can be verified upon 1179 * configuring a sub-device. 1180 */ 1181 infos->rx_offload_capa = 1182 RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 1183 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 1184 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 1185 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 1186 RTE_ETH_RX_OFFLOAD_TCP_LRO | 1187 RTE_ETH_RX_OFFLOAD_QINQ_STRIP | 1188 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1189 RTE_ETH_RX_OFFLOAD_MACSEC_STRIP | 1190 RTE_ETH_RX_OFFLOAD_HEADER_SPLIT | 1191 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 1192 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | 1193 RTE_ETH_RX_OFFLOAD_SCATTER | 1194 RTE_ETH_RX_OFFLOAD_TIMESTAMP | 1195 RTE_ETH_RX_OFFLOAD_SECURITY | 1196 RTE_ETH_RX_OFFLOAD_RSS_HASH; 1197 1198 infos->rx_queue_offload_capa = 1199 RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 1200 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 1201 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 1202 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 1203 RTE_ETH_RX_OFFLOAD_TCP_LRO | 1204 RTE_ETH_RX_OFFLOAD_QINQ_STRIP | 1205 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1206 RTE_ETH_RX_OFFLOAD_MACSEC_STRIP | 1207 RTE_ETH_RX_OFFLOAD_HEADER_SPLIT | 1208 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 1209 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | 1210 RTE_ETH_RX_OFFLOAD_SCATTER | 1211 RTE_ETH_RX_OFFLOAD_TIMESTAMP | 1212 RTE_ETH_RX_OFFLOAD_SECURITY | 1213 RTE_ETH_RX_OFFLOAD_RSS_HASH; 1214 1215 infos->tx_offload_capa = 1216 RTE_ETH_TX_OFFLOAD_MULTI_SEGS | 1217 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | 1218 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | 1219 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | 1220 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | 1221 RTE_ETH_TX_OFFLOAD_TCP_TSO; 1222 1223 infos->flow_type_rss_offloads = 1224 RTE_ETH_RSS_IP | 1225 RTE_ETH_RSS_UDP | 1226 RTE_ETH_RSS_TCP; 1227 infos->dev_capa = 1228 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 1229 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 1230 infos->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; 1231 1232 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 1233 struct rte_eth_dev_info sub_info; 1234 1235 ret = rte_eth_dev_info_get(PORT_ID(sdev), &sub_info); 1236 ret = fs_err(sdev, ret); 1237 if (ret != 0) 1238 return ret; 1239 1240 fs_dev_merge_info(infos, &sub_info); 1241 } 1242 1243 return 0; 1244 } 1245 1246 static const uint32_t * 1247 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev) 1248 { 1249 struct sub_device *sdev; 1250 struct rte_eth_dev *edev; 1251 const uint32_t *ret; 1252 1253 fs_lock(dev, 0); 1254 sdev = TX_SUBDEV(dev); 1255 if (sdev == NULL) { 1256 ret = NULL; 1257 goto unlock; 1258 } 1259 edev = ETH(sdev); 1260 /* ENOTSUP: counts as no supported ptypes */ 1261 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) { 1262 ret = NULL; 1263 goto unlock; 1264 } 1265 /* 1266 * The API does not permit to do a clean AND of all ptypes, 1267 * It is also incomplete by design and we do not really care 1268 * to have a best possible value in this context. 1269 * We just return the ptypes of the device of highest 1270 * priority, usually the PREFERRED device. 1271 */ 1272 ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev); 1273 unlock: 1274 fs_unlock(dev, 0); 1275 return ret; 1276 } 1277 1278 static int 1279 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1280 { 1281 struct sub_device *sdev; 1282 uint8_t i; 1283 int ret; 1284 1285 fs_lock(dev, 0); 1286 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1287 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i); 1288 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu); 1289 if ((ret = fs_err(sdev, ret))) { 1290 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d", 1291 i, ret); 1292 fs_unlock(dev, 0); 1293 return ret; 1294 } 1295 } 1296 fs_unlock(dev, 0); 1297 return 0; 1298 } 1299 1300 static int 1301 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1302 { 1303 struct sub_device *sdev; 1304 uint8_t i; 1305 int ret; 1306 1307 fs_lock(dev, 0); 1308 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1309 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i); 1310 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on); 1311 if ((ret = fs_err(sdev, ret))) { 1312 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d" 1313 " with error %d", i, ret); 1314 fs_unlock(dev, 0); 1315 return ret; 1316 } 1317 } 1318 fs_unlock(dev, 0); 1319 return 0; 1320 } 1321 1322 static int 1323 fs_flow_ctrl_get(struct rte_eth_dev *dev, 1324 struct rte_eth_fc_conf *fc_conf) 1325 { 1326 struct sub_device *sdev; 1327 int ret; 1328 1329 fs_lock(dev, 0); 1330 sdev = TX_SUBDEV(dev); 1331 if (sdev == NULL) { 1332 ret = 0; 1333 goto unlock; 1334 } 1335 if (SUBOPS(sdev, flow_ctrl_get) == NULL) { 1336 ret = -ENOTSUP; 1337 goto unlock; 1338 } 1339 ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf); 1340 unlock: 1341 fs_unlock(dev, 0); 1342 return ret; 1343 } 1344 1345 static int 1346 fs_flow_ctrl_set(struct rte_eth_dev *dev, 1347 struct rte_eth_fc_conf *fc_conf) 1348 { 1349 struct sub_device *sdev; 1350 uint8_t i; 1351 int ret; 1352 1353 fs_lock(dev, 0); 1354 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1355 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i); 1356 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf); 1357 if ((ret = fs_err(sdev, ret))) { 1358 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d" 1359 " with error %d", i, ret); 1360 fs_unlock(dev, 0); 1361 return ret; 1362 } 1363 } 1364 fs_unlock(dev, 0); 1365 return 0; 1366 } 1367 1368 static void 1369 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 1370 { 1371 struct sub_device *sdev; 1372 uint8_t i; 1373 1374 fs_lock(dev, 0); 1375 /* No check: already done within the rte_eth_dev_mac_addr_remove 1376 * call for the fail-safe device. 1377 */ 1378 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 1379 rte_eth_dev_mac_addr_remove(PORT_ID(sdev), 1380 &dev->data->mac_addrs[index]); 1381 PRIV(dev)->mac_addr_pool[index] = 0; 1382 fs_unlock(dev, 0); 1383 } 1384 1385 static int 1386 fs_mac_addr_add(struct rte_eth_dev *dev, 1387 struct rte_ether_addr *mac_addr, 1388 uint32_t index, 1389 uint32_t vmdq) 1390 { 1391 struct sub_device *sdev; 1392 int ret; 1393 uint8_t i; 1394 1395 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR); 1396 fs_lock(dev, 0); 1397 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1398 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq); 1399 if ((ret = fs_err(sdev, ret))) { 1400 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %" 1401 PRIu8 " with error %d", i, ret); 1402 fs_unlock(dev, 0); 1403 return ret; 1404 } 1405 } 1406 if (index >= PRIV(dev)->nb_mac_addr) { 1407 DEBUG("Growing mac_addrs array"); 1408 PRIV(dev)->nb_mac_addr = index; 1409 } 1410 PRIV(dev)->mac_addr_pool[index] = vmdq; 1411 fs_unlock(dev, 0); 1412 return 0; 1413 } 1414 1415 static int 1416 fs_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 1417 { 1418 struct sub_device *sdev; 1419 uint8_t i; 1420 int ret; 1421 1422 fs_lock(dev, 0); 1423 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1424 ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr); 1425 ret = fs_err(sdev, ret); 1426 if (ret) { 1427 ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d", 1428 i, ret); 1429 fs_unlock(dev, 0); 1430 return ret; 1431 } 1432 } 1433 fs_unlock(dev, 0); 1434 1435 return 0; 1436 } 1437 1438 static int 1439 fs_set_mc_addr_list(struct rte_eth_dev *dev, 1440 struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) 1441 { 1442 struct sub_device *sdev; 1443 uint8_t i; 1444 int ret; 1445 void *mcast_addrs; 1446 1447 fs_lock(dev, 0); 1448 1449 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1450 ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1451 mc_addr_set, nb_mc_addr); 1452 if (ret != 0) { 1453 ERROR("Operation rte_eth_dev_set_mc_addr_list failed for sub_device %d with error %d", 1454 i, ret); 1455 goto rollback; 1456 } 1457 } 1458 1459 mcast_addrs = rte_realloc(PRIV(dev)->mcast_addrs, 1460 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]), 0); 1461 if (mcast_addrs == NULL && nb_mc_addr > 0) { 1462 ret = -ENOMEM; 1463 goto rollback; 1464 } 1465 rte_memcpy(mcast_addrs, mc_addr_set, 1466 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0])); 1467 PRIV(dev)->nb_mcast_addr = nb_mc_addr; 1468 PRIV(dev)->mcast_addrs = mcast_addrs; 1469 1470 fs_unlock(dev, 0); 1471 return 0; 1472 1473 rollback: 1474 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1475 int rc = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1476 PRIV(dev)->mcast_addrs, PRIV(dev)->nb_mcast_addr); 1477 if (rc != 0) { 1478 ERROR("Multicast MAC address list rollback for sub_device %d failed with error %d", 1479 i, rc); 1480 } 1481 } 1482 1483 fs_unlock(dev, 0); 1484 return ret; 1485 } 1486 1487 static int 1488 fs_rss_hash_update(struct rte_eth_dev *dev, 1489 struct rte_eth_rss_conf *rss_conf) 1490 { 1491 struct sub_device *sdev; 1492 uint8_t i; 1493 int ret; 1494 1495 fs_lock(dev, 0); 1496 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1497 ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf); 1498 ret = fs_err(sdev, ret); 1499 if (ret) { 1500 ERROR("Operation rte_eth_dev_rss_hash_update" 1501 " failed for sub_device %d with error %d", 1502 i, ret); 1503 fs_unlock(dev, 0); 1504 return ret; 1505 } 1506 } 1507 fs_unlock(dev, 0); 1508 1509 return 0; 1510 } 1511 1512 static int 1513 fs_flow_ops_get(struct rte_eth_dev *dev __rte_unused, 1514 const struct rte_flow_ops **ops) 1515 { 1516 *ops = &fs_flow_ops; 1517 return 0; 1518 } 1519 1520 const struct eth_dev_ops failsafe_ops = { 1521 .dev_configure = fs_dev_configure, 1522 .dev_start = fs_dev_start, 1523 .dev_stop = fs_dev_stop, 1524 .dev_set_link_down = fs_dev_set_link_down, 1525 .dev_set_link_up = fs_dev_set_link_up, 1526 .dev_close = failsafe_eth_dev_close, 1527 .promiscuous_enable = fs_promiscuous_enable, 1528 .promiscuous_disable = fs_promiscuous_disable, 1529 .allmulticast_enable = fs_allmulticast_enable, 1530 .allmulticast_disable = fs_allmulticast_disable, 1531 .link_update = fs_link_update, 1532 .stats_get = fs_stats_get, 1533 .stats_reset = fs_stats_reset, 1534 .xstats_get = fs_xstats_get, 1535 .xstats_get_names = fs_xstats_get_names, 1536 .xstats_reset = fs_xstats_reset, 1537 .dev_infos_get = fs_dev_infos_get, 1538 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get, 1539 .mtu_set = fs_mtu_set, 1540 .vlan_filter_set = fs_vlan_filter_set, 1541 .rx_queue_start = fs_rx_queue_start, 1542 .rx_queue_stop = fs_rx_queue_stop, 1543 .tx_queue_start = fs_tx_queue_start, 1544 .tx_queue_stop = fs_tx_queue_stop, 1545 .rx_queue_setup = fs_rx_queue_setup, 1546 .tx_queue_setup = fs_tx_queue_setup, 1547 .rx_queue_release = fs_rx_queue_release, 1548 .tx_queue_release = fs_tx_queue_release, 1549 .rx_queue_intr_enable = fs_rx_intr_enable, 1550 .rx_queue_intr_disable = fs_rx_intr_disable, 1551 .flow_ctrl_get = fs_flow_ctrl_get, 1552 .flow_ctrl_set = fs_flow_ctrl_set, 1553 .mac_addr_remove = fs_mac_addr_remove, 1554 .mac_addr_add = fs_mac_addr_add, 1555 .mac_addr_set = fs_mac_addr_set, 1556 .set_mc_addr_list = fs_set_mc_addr_list, 1557 .rss_hash_update = fs_rss_hash_update, 1558 .flow_ops_get = fs_flow_ops_get, 1559 }; 1560