1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017 6WIND S.A. 3 * Copyright 2017 Mellanox Technologies, Ltd 4 */ 5 6 #include <stdbool.h> 7 #include <stdint.h> 8 #include <unistd.h> 9 10 #include <rte_debug.h> 11 #include <rte_atomic.h> 12 #include <rte_ethdev_driver.h> 13 #include <rte_malloc.h> 14 #include <rte_flow.h> 15 #include <rte_cycles.h> 16 #include <rte_ethdev.h> 17 18 #include "failsafe_private.h" 19 20 static struct rte_eth_dev_info default_infos = { 21 /* Max possible number of elements */ 22 .max_rx_pktlen = UINT32_MAX, 23 .max_rx_queues = RTE_MAX_QUEUES_PER_PORT, 24 .max_tx_queues = RTE_MAX_QUEUES_PER_PORT, 25 .max_mac_addrs = FAILSAFE_MAX_ETHADDR, 26 .max_hash_mac_addrs = UINT32_MAX, 27 .max_vfs = UINT16_MAX, 28 .max_vmdq_pools = UINT16_MAX, 29 .rx_desc_lim = { 30 .nb_max = UINT16_MAX, 31 .nb_min = 0, 32 .nb_align = 1, 33 .nb_seg_max = UINT16_MAX, 34 .nb_mtu_seg_max = UINT16_MAX, 35 }, 36 .tx_desc_lim = { 37 .nb_max = UINT16_MAX, 38 .nb_min = 0, 39 .nb_align = 1, 40 .nb_seg_max = UINT16_MAX, 41 .nb_mtu_seg_max = UINT16_MAX, 42 }, 43 /* 44 * Set of capabilities that can be verified upon 45 * configuring a sub-device. 46 */ 47 .rx_offload_capa = 48 DEV_RX_OFFLOAD_VLAN_STRIP | 49 DEV_RX_OFFLOAD_IPV4_CKSUM | 50 DEV_RX_OFFLOAD_UDP_CKSUM | 51 DEV_RX_OFFLOAD_TCP_CKSUM | 52 DEV_RX_OFFLOAD_TCP_LRO | 53 DEV_RX_OFFLOAD_QINQ_STRIP | 54 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 55 DEV_RX_OFFLOAD_MACSEC_STRIP | 56 DEV_RX_OFFLOAD_HEADER_SPLIT | 57 DEV_RX_OFFLOAD_VLAN_FILTER | 58 DEV_RX_OFFLOAD_VLAN_EXTEND | 59 DEV_RX_OFFLOAD_JUMBO_FRAME | 60 DEV_RX_OFFLOAD_CRC_STRIP | 61 DEV_RX_OFFLOAD_SCATTER | 62 DEV_RX_OFFLOAD_TIMESTAMP | 63 DEV_RX_OFFLOAD_SECURITY, 64 .rx_queue_offload_capa = 65 DEV_RX_OFFLOAD_VLAN_STRIP | 66 DEV_RX_OFFLOAD_IPV4_CKSUM | 67 DEV_RX_OFFLOAD_UDP_CKSUM | 68 DEV_RX_OFFLOAD_TCP_CKSUM | 69 DEV_RX_OFFLOAD_TCP_LRO | 70 DEV_RX_OFFLOAD_QINQ_STRIP | 71 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 72 DEV_RX_OFFLOAD_MACSEC_STRIP | 73 DEV_RX_OFFLOAD_HEADER_SPLIT | 74 DEV_RX_OFFLOAD_VLAN_FILTER | 75 DEV_RX_OFFLOAD_VLAN_EXTEND | 76 DEV_RX_OFFLOAD_JUMBO_FRAME | 77 DEV_RX_OFFLOAD_CRC_STRIP | 78 DEV_RX_OFFLOAD_SCATTER | 79 DEV_RX_OFFLOAD_TIMESTAMP | 80 DEV_RX_OFFLOAD_SECURITY, 81 .tx_offload_capa = 82 DEV_TX_OFFLOAD_MULTI_SEGS | 83 DEV_TX_OFFLOAD_IPV4_CKSUM | 84 DEV_TX_OFFLOAD_UDP_CKSUM | 85 DEV_TX_OFFLOAD_TCP_CKSUM | 86 DEV_TX_OFFLOAD_TCP_TSO, 87 .flow_type_rss_offloads = 88 ETH_RSS_IP | 89 ETH_RSS_UDP | 90 ETH_RSS_TCP, 91 }; 92 93 static int 94 fs_dev_configure(struct rte_eth_dev *dev) 95 { 96 struct sub_device *sdev; 97 uint8_t i; 98 int ret; 99 100 fs_lock(dev, 0); 101 FOREACH_SUBDEV(sdev, i, dev) { 102 int rmv_interrupt = 0; 103 int lsc_interrupt = 0; 104 int lsc_enabled; 105 106 if (sdev->state != DEV_PROBED && 107 !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE)) 108 continue; 109 110 rmv_interrupt = ETH(sdev)->data->dev_flags & 111 RTE_ETH_DEV_INTR_RMV; 112 if (rmv_interrupt) { 113 DEBUG("Enabling RMV interrupts for sub_device %d", i); 114 dev->data->dev_conf.intr_conf.rmv = 1; 115 } else { 116 DEBUG("sub_device %d does not support RMV event", i); 117 } 118 lsc_enabled = dev->data->dev_conf.intr_conf.lsc; 119 lsc_interrupt = lsc_enabled && 120 (ETH(sdev)->data->dev_flags & 121 RTE_ETH_DEV_INTR_LSC); 122 if (lsc_interrupt) { 123 DEBUG("Enabling LSC interrupts for sub_device %d", i); 124 dev->data->dev_conf.intr_conf.lsc = 1; 125 } else if (lsc_enabled && !lsc_interrupt) { 126 DEBUG("Disabling LSC interrupts for sub_device %d", i); 127 dev->data->dev_conf.intr_conf.lsc = 0; 128 } 129 DEBUG("Configuring sub-device %d", i); 130 ret = rte_eth_dev_configure(PORT_ID(sdev), 131 dev->data->nb_rx_queues, 132 dev->data->nb_tx_queues, 133 &dev->data->dev_conf); 134 if (ret) { 135 if (!fs_err(sdev, ret)) 136 continue; 137 ERROR("Could not configure sub_device %d", i); 138 fs_unlock(dev, 0); 139 return ret; 140 } 141 if (rmv_interrupt && sdev->rmv_callback == 0) { 142 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 143 RTE_ETH_EVENT_INTR_RMV, 144 failsafe_eth_rmv_event_callback, 145 sdev); 146 if (ret) 147 WARN("Failed to register RMV callback for sub_device %d", 148 SUB_ID(sdev)); 149 else 150 sdev->rmv_callback = 1; 151 } 152 dev->data->dev_conf.intr_conf.rmv = 0; 153 if (lsc_interrupt && sdev->lsc_callback == 0) { 154 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 155 RTE_ETH_EVENT_INTR_LSC, 156 failsafe_eth_lsc_event_callback, 157 dev); 158 if (ret) 159 WARN("Failed to register LSC callback for sub_device %d", 160 SUB_ID(sdev)); 161 else 162 sdev->lsc_callback = 1; 163 } 164 dev->data->dev_conf.intr_conf.lsc = lsc_enabled; 165 sdev->state = DEV_ACTIVE; 166 } 167 if (PRIV(dev)->state < DEV_ACTIVE) 168 PRIV(dev)->state = DEV_ACTIVE; 169 fs_unlock(dev, 0); 170 return 0; 171 } 172 173 static int 174 fs_dev_start(struct rte_eth_dev *dev) 175 { 176 struct sub_device *sdev; 177 uint8_t i; 178 int ret; 179 180 fs_lock(dev, 0); 181 ret = failsafe_rx_intr_install(dev); 182 if (ret) { 183 fs_unlock(dev, 0); 184 return ret; 185 } 186 FOREACH_SUBDEV(sdev, i, dev) { 187 if (sdev->state != DEV_ACTIVE) 188 continue; 189 DEBUG("Starting sub_device %d", i); 190 ret = rte_eth_dev_start(PORT_ID(sdev)); 191 if (ret) { 192 if (!fs_err(sdev, ret)) 193 continue; 194 fs_unlock(dev, 0); 195 return ret; 196 } 197 ret = failsafe_rx_intr_install_subdevice(sdev); 198 if (ret) { 199 if (!fs_err(sdev, ret)) 200 continue; 201 rte_eth_dev_stop(PORT_ID(sdev)); 202 fs_unlock(dev, 0); 203 return ret; 204 } 205 sdev->state = DEV_STARTED; 206 } 207 if (PRIV(dev)->state < DEV_STARTED) 208 PRIV(dev)->state = DEV_STARTED; 209 fs_switch_dev(dev, NULL); 210 fs_unlock(dev, 0); 211 return 0; 212 } 213 214 static void 215 fs_dev_stop(struct rte_eth_dev *dev) 216 { 217 struct sub_device *sdev; 218 uint8_t i; 219 220 fs_lock(dev, 0); 221 PRIV(dev)->state = DEV_STARTED - 1; 222 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) { 223 rte_eth_dev_stop(PORT_ID(sdev)); 224 failsafe_rx_intr_uninstall_subdevice(sdev); 225 sdev->state = DEV_STARTED - 1; 226 } 227 failsafe_rx_intr_uninstall(dev); 228 fs_unlock(dev, 0); 229 } 230 231 static int 232 fs_dev_set_link_up(struct rte_eth_dev *dev) 233 { 234 struct sub_device *sdev; 235 uint8_t i; 236 int ret; 237 238 fs_lock(dev, 0); 239 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 240 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i); 241 ret = rte_eth_dev_set_link_up(PORT_ID(sdev)); 242 if ((ret = fs_err(sdev, ret))) { 243 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d" 244 " with error %d", i, ret); 245 fs_unlock(dev, 0); 246 return ret; 247 } 248 } 249 fs_unlock(dev, 0); 250 return 0; 251 } 252 253 static int 254 fs_dev_set_link_down(struct rte_eth_dev *dev) 255 { 256 struct sub_device *sdev; 257 uint8_t i; 258 int ret; 259 260 fs_lock(dev, 0); 261 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 262 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i); 263 ret = rte_eth_dev_set_link_down(PORT_ID(sdev)); 264 if ((ret = fs_err(sdev, ret))) { 265 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d" 266 " with error %d", i, ret); 267 fs_unlock(dev, 0); 268 return ret; 269 } 270 } 271 fs_unlock(dev, 0); 272 return 0; 273 } 274 275 static void fs_dev_free_queues(struct rte_eth_dev *dev); 276 static void 277 fs_dev_close(struct rte_eth_dev *dev) 278 { 279 struct sub_device *sdev; 280 uint8_t i; 281 282 fs_lock(dev, 0); 283 failsafe_hotplug_alarm_cancel(dev); 284 if (PRIV(dev)->state == DEV_STARTED) 285 dev->dev_ops->dev_stop(dev); 286 PRIV(dev)->state = DEV_ACTIVE - 1; 287 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 288 DEBUG("Closing sub_device %d", i); 289 failsafe_eth_dev_unregister_callbacks(sdev); 290 rte_eth_dev_close(PORT_ID(sdev)); 291 sdev->state = DEV_ACTIVE - 1; 292 } 293 fs_dev_free_queues(dev); 294 fs_unlock(dev, 0); 295 } 296 297 static void 298 fs_rx_queue_release(void *queue) 299 { 300 struct rte_eth_dev *dev; 301 struct sub_device *sdev; 302 uint8_t i; 303 struct rxq *rxq; 304 305 if (queue == NULL) 306 return; 307 rxq = queue; 308 dev = rxq->priv->dev; 309 fs_lock(dev, 0); 310 if (rxq->event_fd > 0) 311 close(rxq->event_fd); 312 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 313 SUBOPS(sdev, rx_queue_release) 314 (ETH(sdev)->data->rx_queues[rxq->qid]); 315 dev->data->rx_queues[rxq->qid] = NULL; 316 rte_free(rxq); 317 fs_unlock(dev, 0); 318 } 319 320 static int 321 fs_rx_queue_setup(struct rte_eth_dev *dev, 322 uint16_t rx_queue_id, 323 uint16_t nb_rx_desc, 324 unsigned int socket_id, 325 const struct rte_eth_rxconf *rx_conf, 326 struct rte_mempool *mb_pool) 327 { 328 /* 329 * FIXME: Add a proper interface in rte_eal_interrupts for 330 * allocating eventfd as an interrupt vector. 331 * For the time being, fake as if we are using MSIX interrupts, 332 * this will cause rte_intr_efd_enable to allocate an eventfd for us. 333 */ 334 struct rte_intr_handle intr_handle = { 335 .type = RTE_INTR_HANDLE_VFIO_MSIX, 336 .efds = { -1, }, 337 }; 338 struct sub_device *sdev; 339 struct rxq *rxq; 340 uint8_t i; 341 int ret; 342 343 fs_lock(dev, 0); 344 rxq = dev->data->rx_queues[rx_queue_id]; 345 if (rxq != NULL) { 346 fs_rx_queue_release(rxq); 347 dev->data->rx_queues[rx_queue_id] = NULL; 348 } 349 rxq = rte_zmalloc(NULL, 350 sizeof(*rxq) + 351 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 352 RTE_CACHE_LINE_SIZE); 353 if (rxq == NULL) { 354 fs_unlock(dev, 0); 355 return -ENOMEM; 356 } 357 FOREACH_SUBDEV(sdev, i, dev) 358 rte_atomic64_init(&rxq->refcnt[i]); 359 rxq->qid = rx_queue_id; 360 rxq->socket_id = socket_id; 361 rxq->info.mp = mb_pool; 362 rxq->info.conf = *rx_conf; 363 rxq->info.nb_desc = nb_rx_desc; 364 rxq->priv = PRIV(dev); 365 rxq->sdev = PRIV(dev)->subs; 366 ret = rte_intr_efd_enable(&intr_handle, 1); 367 if (ret < 0) { 368 fs_unlock(dev, 0); 369 return ret; 370 } 371 rxq->event_fd = intr_handle.efds[0]; 372 dev->data->rx_queues[rx_queue_id] = rxq; 373 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 374 ret = rte_eth_rx_queue_setup(PORT_ID(sdev), 375 rx_queue_id, 376 nb_rx_desc, socket_id, 377 rx_conf, mb_pool); 378 if ((ret = fs_err(sdev, ret))) { 379 ERROR("RX queue setup failed for sub_device %d", i); 380 goto free_rxq; 381 } 382 } 383 fs_unlock(dev, 0); 384 return 0; 385 free_rxq: 386 fs_rx_queue_release(rxq); 387 fs_unlock(dev, 0); 388 return ret; 389 } 390 391 static int 392 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx) 393 { 394 struct rxq *rxq; 395 struct sub_device *sdev; 396 uint8_t i; 397 int ret; 398 int rc = 0; 399 400 fs_lock(dev, 0); 401 if (idx >= dev->data->nb_rx_queues) { 402 rc = -EINVAL; 403 goto unlock; 404 } 405 rxq = dev->data->rx_queues[idx]; 406 if (rxq == NULL || rxq->event_fd <= 0) { 407 rc = -EINVAL; 408 goto unlock; 409 } 410 /* Fail if proxy service is nor running. */ 411 if (PRIV(dev)->rxp.sstate != SS_RUNNING) { 412 ERROR("failsafe interrupt services are not running"); 413 rc = -EAGAIN; 414 goto unlock; 415 } 416 rxq->enable_events = 1; 417 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 418 ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx); 419 ret = fs_err(sdev, ret); 420 if (ret) 421 rc = ret; 422 } 423 unlock: 424 fs_unlock(dev, 0); 425 if (rc) 426 rte_errno = -rc; 427 return rc; 428 } 429 430 static int 431 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx) 432 { 433 struct rxq *rxq; 434 struct sub_device *sdev; 435 uint64_t u64; 436 uint8_t i; 437 int rc = 0; 438 int ret; 439 440 fs_lock(dev, 0); 441 if (idx >= dev->data->nb_rx_queues) { 442 rc = -EINVAL; 443 goto unlock; 444 } 445 rxq = dev->data->rx_queues[idx]; 446 if (rxq == NULL || rxq->event_fd <= 0) { 447 rc = -EINVAL; 448 goto unlock; 449 } 450 rxq->enable_events = 0; 451 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 452 ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx); 453 ret = fs_err(sdev, ret); 454 if (ret) 455 rc = ret; 456 } 457 /* Clear pending events */ 458 while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0) 459 ; 460 unlock: 461 fs_unlock(dev, 0); 462 if (rc) 463 rte_errno = -rc; 464 return rc; 465 } 466 467 static void 468 fs_tx_queue_release(void *queue) 469 { 470 struct rte_eth_dev *dev; 471 struct sub_device *sdev; 472 uint8_t i; 473 struct txq *txq; 474 475 if (queue == NULL) 476 return; 477 txq = queue; 478 dev = txq->priv->dev; 479 fs_lock(dev, 0); 480 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 481 SUBOPS(sdev, tx_queue_release) 482 (ETH(sdev)->data->tx_queues[txq->qid]); 483 dev->data->tx_queues[txq->qid] = NULL; 484 rte_free(txq); 485 fs_unlock(dev, 0); 486 } 487 488 static int 489 fs_tx_queue_setup(struct rte_eth_dev *dev, 490 uint16_t tx_queue_id, 491 uint16_t nb_tx_desc, 492 unsigned int socket_id, 493 const struct rte_eth_txconf *tx_conf) 494 { 495 struct sub_device *sdev; 496 struct txq *txq; 497 uint8_t i; 498 int ret; 499 500 fs_lock(dev, 0); 501 txq = dev->data->tx_queues[tx_queue_id]; 502 if (txq != NULL) { 503 fs_tx_queue_release(txq); 504 dev->data->tx_queues[tx_queue_id] = NULL; 505 } 506 txq = rte_zmalloc("ethdev TX queue", 507 sizeof(*txq) + 508 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 509 RTE_CACHE_LINE_SIZE); 510 if (txq == NULL) { 511 fs_unlock(dev, 0); 512 return -ENOMEM; 513 } 514 FOREACH_SUBDEV(sdev, i, dev) 515 rte_atomic64_init(&txq->refcnt[i]); 516 txq->qid = tx_queue_id; 517 txq->socket_id = socket_id; 518 txq->info.conf = *tx_conf; 519 txq->info.nb_desc = nb_tx_desc; 520 txq->priv = PRIV(dev); 521 dev->data->tx_queues[tx_queue_id] = txq; 522 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 523 ret = rte_eth_tx_queue_setup(PORT_ID(sdev), 524 tx_queue_id, 525 nb_tx_desc, socket_id, 526 tx_conf); 527 if ((ret = fs_err(sdev, ret))) { 528 ERROR("TX queue setup failed for sub_device %d", i); 529 goto free_txq; 530 } 531 } 532 fs_unlock(dev, 0); 533 return 0; 534 free_txq: 535 fs_tx_queue_release(txq); 536 fs_unlock(dev, 0); 537 return ret; 538 } 539 540 static void 541 fs_dev_free_queues(struct rte_eth_dev *dev) 542 { 543 uint16_t i; 544 545 for (i = 0; i < dev->data->nb_rx_queues; i++) { 546 fs_rx_queue_release(dev->data->rx_queues[i]); 547 dev->data->rx_queues[i] = NULL; 548 } 549 dev->data->nb_rx_queues = 0; 550 for (i = 0; i < dev->data->nb_tx_queues; i++) { 551 fs_tx_queue_release(dev->data->tx_queues[i]); 552 dev->data->tx_queues[i] = NULL; 553 } 554 dev->data->nb_tx_queues = 0; 555 } 556 557 static void 558 fs_promiscuous_enable(struct rte_eth_dev *dev) 559 { 560 struct sub_device *sdev; 561 uint8_t i; 562 563 fs_lock(dev, 0); 564 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 565 rte_eth_promiscuous_enable(PORT_ID(sdev)); 566 fs_unlock(dev, 0); 567 } 568 569 static void 570 fs_promiscuous_disable(struct rte_eth_dev *dev) 571 { 572 struct sub_device *sdev; 573 uint8_t i; 574 575 fs_lock(dev, 0); 576 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 577 rte_eth_promiscuous_disable(PORT_ID(sdev)); 578 fs_unlock(dev, 0); 579 } 580 581 static void 582 fs_allmulticast_enable(struct rte_eth_dev *dev) 583 { 584 struct sub_device *sdev; 585 uint8_t i; 586 587 fs_lock(dev, 0); 588 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 589 rte_eth_allmulticast_enable(PORT_ID(sdev)); 590 fs_unlock(dev, 0); 591 } 592 593 static void 594 fs_allmulticast_disable(struct rte_eth_dev *dev) 595 { 596 struct sub_device *sdev; 597 uint8_t i; 598 599 fs_lock(dev, 0); 600 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 601 rte_eth_allmulticast_disable(PORT_ID(sdev)); 602 fs_unlock(dev, 0); 603 } 604 605 static int 606 fs_link_update(struct rte_eth_dev *dev, 607 int wait_to_complete) 608 { 609 struct sub_device *sdev; 610 uint8_t i; 611 int ret; 612 613 fs_lock(dev, 0); 614 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 615 DEBUG("Calling link_update on sub_device %d", i); 616 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete); 617 if (ret && ret != -1 && sdev->remove == 0 && 618 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) { 619 ERROR("Link update failed for sub_device %d with error %d", 620 i, ret); 621 fs_unlock(dev, 0); 622 return ret; 623 } 624 } 625 if (TX_SUBDEV(dev)) { 626 struct rte_eth_link *l1; 627 struct rte_eth_link *l2; 628 629 l1 = &dev->data->dev_link; 630 l2 = Ð(TX_SUBDEV(dev))->data->dev_link; 631 if (memcmp(l1, l2, sizeof(*l1))) { 632 *l1 = *l2; 633 fs_unlock(dev, 0); 634 return 0; 635 } 636 } 637 fs_unlock(dev, 0); 638 return -1; 639 } 640 641 static int 642 fs_stats_get(struct rte_eth_dev *dev, 643 struct rte_eth_stats *stats) 644 { 645 struct rte_eth_stats backup; 646 struct sub_device *sdev; 647 uint8_t i; 648 int ret; 649 650 fs_lock(dev, 0); 651 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats)); 652 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 653 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats; 654 uint64_t *timestamp = &sdev->stats_snapshot.timestamp; 655 656 rte_memcpy(&backup, snapshot, sizeof(backup)); 657 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot); 658 if (ret) { 659 if (!fs_err(sdev, ret)) { 660 rte_memcpy(snapshot, &backup, sizeof(backup)); 661 goto inc; 662 } 663 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d", 664 i, ret); 665 *timestamp = 0; 666 fs_unlock(dev, 0); 667 return ret; 668 } 669 *timestamp = rte_rdtsc(); 670 inc: 671 failsafe_stats_increment(stats, snapshot); 672 } 673 fs_unlock(dev, 0); 674 return 0; 675 } 676 677 static void 678 fs_stats_reset(struct rte_eth_dev *dev) 679 { 680 struct sub_device *sdev; 681 uint8_t i; 682 683 fs_lock(dev, 0); 684 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 685 rte_eth_stats_reset(PORT_ID(sdev)); 686 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats)); 687 } 688 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats)); 689 fs_unlock(dev, 0); 690 } 691 692 /** 693 * Fail-safe dev_infos_get rules: 694 * 695 * No sub_device: 696 * Numerables: 697 * Use the maximum possible values for any field, so as not 698 * to impede any further configuration effort. 699 * Capabilities: 700 * Limits capabilities to those that are understood by the 701 * fail-safe PMD. This understanding stems from the fail-safe 702 * being capable of verifying that the related capability is 703 * expressed within the device configuration (struct rte_eth_conf). 704 * 705 * At least one probed sub_device: 706 * Numerables: 707 * Uses values from the active probed sub_device 708 * The rationale here is that if any sub_device is less capable 709 * (for example concerning the number of queues) than the active 710 * sub_device, then its subsequent configuration will fail. 711 * It is impossible to foresee this failure when the failing sub_device 712 * is supposed to be plugged-in later on, so the configuration process 713 * is the single point of failure and error reporting. 714 * Capabilities: 715 * Uses a logical AND of RX capabilities among 716 * all sub_devices and the default capabilities. 717 * Uses a logical AND of TX capabilities among 718 * the active probed sub_device and the default capabilities. 719 * 720 */ 721 static void 722 fs_dev_infos_get(struct rte_eth_dev *dev, 723 struct rte_eth_dev_info *infos) 724 { 725 struct sub_device *sdev; 726 uint8_t i; 727 728 sdev = TX_SUBDEV(dev); 729 if (sdev == NULL) { 730 DEBUG("No probed device, using default infos"); 731 rte_memcpy(&PRIV(dev)->infos, &default_infos, 732 sizeof(default_infos)); 733 } else { 734 uint64_t rx_offload_capa; 735 uint64_t rxq_offload_capa; 736 uint64_t rss_hf_offload_capa; 737 738 rx_offload_capa = default_infos.rx_offload_capa; 739 rxq_offload_capa = default_infos.rx_queue_offload_capa; 740 rss_hf_offload_capa = default_infos.flow_type_rss_offloads; 741 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 742 rte_eth_dev_info_get(PORT_ID(sdev), 743 &PRIV(dev)->infos); 744 rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa; 745 rxq_offload_capa &= 746 PRIV(dev)->infos.rx_queue_offload_capa; 747 rss_hf_offload_capa &= 748 PRIV(dev)->infos.flow_type_rss_offloads; 749 } 750 sdev = TX_SUBDEV(dev); 751 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos); 752 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa; 753 PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa; 754 PRIV(dev)->infos.flow_type_rss_offloads = rss_hf_offload_capa; 755 PRIV(dev)->infos.tx_offload_capa &= 756 default_infos.tx_offload_capa; 757 PRIV(dev)->infos.tx_queue_offload_capa &= 758 default_infos.tx_queue_offload_capa; 759 } 760 rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos)); 761 } 762 763 static const uint32_t * 764 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev) 765 { 766 struct sub_device *sdev; 767 struct rte_eth_dev *edev; 768 const uint32_t *ret; 769 770 fs_lock(dev, 0); 771 sdev = TX_SUBDEV(dev); 772 if (sdev == NULL) { 773 ret = NULL; 774 goto unlock; 775 } 776 edev = ETH(sdev); 777 /* ENOTSUP: counts as no supported ptypes */ 778 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) { 779 ret = NULL; 780 goto unlock; 781 } 782 /* 783 * The API does not permit to do a clean AND of all ptypes, 784 * It is also incomplete by design and we do not really care 785 * to have a best possible value in this context. 786 * We just return the ptypes of the device of highest 787 * priority, usually the PREFERRED device. 788 */ 789 ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev); 790 unlock: 791 fs_unlock(dev, 0); 792 return ret; 793 } 794 795 static int 796 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 797 { 798 struct sub_device *sdev; 799 uint8_t i; 800 int ret; 801 802 fs_lock(dev, 0); 803 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 804 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i); 805 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu); 806 if ((ret = fs_err(sdev, ret))) { 807 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d", 808 i, ret); 809 fs_unlock(dev, 0); 810 return ret; 811 } 812 } 813 fs_unlock(dev, 0); 814 return 0; 815 } 816 817 static int 818 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 819 { 820 struct sub_device *sdev; 821 uint8_t i; 822 int ret; 823 824 fs_lock(dev, 0); 825 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 826 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i); 827 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on); 828 if ((ret = fs_err(sdev, ret))) { 829 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d" 830 " with error %d", i, ret); 831 fs_unlock(dev, 0); 832 return ret; 833 } 834 } 835 fs_unlock(dev, 0); 836 return 0; 837 } 838 839 static int 840 fs_flow_ctrl_get(struct rte_eth_dev *dev, 841 struct rte_eth_fc_conf *fc_conf) 842 { 843 struct sub_device *sdev; 844 int ret; 845 846 fs_lock(dev, 0); 847 sdev = TX_SUBDEV(dev); 848 if (sdev == NULL) { 849 ret = 0; 850 goto unlock; 851 } 852 if (SUBOPS(sdev, flow_ctrl_get) == NULL) { 853 ret = -ENOTSUP; 854 goto unlock; 855 } 856 ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf); 857 unlock: 858 fs_unlock(dev, 0); 859 return ret; 860 } 861 862 static int 863 fs_flow_ctrl_set(struct rte_eth_dev *dev, 864 struct rte_eth_fc_conf *fc_conf) 865 { 866 struct sub_device *sdev; 867 uint8_t i; 868 int ret; 869 870 fs_lock(dev, 0); 871 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 872 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i); 873 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf); 874 if ((ret = fs_err(sdev, ret))) { 875 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d" 876 " with error %d", i, ret); 877 fs_unlock(dev, 0); 878 return ret; 879 } 880 } 881 fs_unlock(dev, 0); 882 return 0; 883 } 884 885 static void 886 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 887 { 888 struct sub_device *sdev; 889 uint8_t i; 890 891 fs_lock(dev, 0); 892 /* No check: already done within the rte_eth_dev_mac_addr_remove 893 * call for the fail-safe device. 894 */ 895 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 896 rte_eth_dev_mac_addr_remove(PORT_ID(sdev), 897 &dev->data->mac_addrs[index]); 898 PRIV(dev)->mac_addr_pool[index] = 0; 899 fs_unlock(dev, 0); 900 } 901 902 static int 903 fs_mac_addr_add(struct rte_eth_dev *dev, 904 struct ether_addr *mac_addr, 905 uint32_t index, 906 uint32_t vmdq) 907 { 908 struct sub_device *sdev; 909 int ret; 910 uint8_t i; 911 912 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR); 913 fs_lock(dev, 0); 914 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 915 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq); 916 if ((ret = fs_err(sdev, ret))) { 917 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %" 918 PRIu8 " with error %d", i, ret); 919 fs_unlock(dev, 0); 920 return ret; 921 } 922 } 923 if (index >= PRIV(dev)->nb_mac_addr) { 924 DEBUG("Growing mac_addrs array"); 925 PRIV(dev)->nb_mac_addr = index; 926 } 927 PRIV(dev)->mac_addr_pool[index] = vmdq; 928 fs_unlock(dev, 0); 929 return 0; 930 } 931 932 static int 933 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) 934 { 935 struct sub_device *sdev; 936 uint8_t i; 937 int ret; 938 939 fs_lock(dev, 0); 940 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 941 ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr); 942 ret = fs_err(sdev, ret); 943 if (ret) { 944 ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d", 945 i, ret); 946 fs_unlock(dev, 0); 947 return ret; 948 } 949 } 950 fs_unlock(dev, 0); 951 952 return 0; 953 } 954 955 static int 956 fs_rss_hash_update(struct rte_eth_dev *dev, 957 struct rte_eth_rss_conf *rss_conf) 958 { 959 struct sub_device *sdev; 960 uint8_t i; 961 int ret; 962 963 fs_lock(dev, 0); 964 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 965 ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf); 966 ret = fs_err(sdev, ret); 967 if (ret) { 968 ERROR("Operation rte_eth_dev_rss_hash_update" 969 " failed for sub_device %d with error %d", 970 i, ret); 971 fs_unlock(dev, 0); 972 return ret; 973 } 974 } 975 fs_unlock(dev, 0); 976 977 return 0; 978 } 979 980 static int 981 fs_filter_ctrl(struct rte_eth_dev *dev, 982 enum rte_filter_type type, 983 enum rte_filter_op op, 984 void *arg) 985 { 986 struct sub_device *sdev; 987 uint8_t i; 988 int ret; 989 990 if (type == RTE_ETH_FILTER_GENERIC && 991 op == RTE_ETH_FILTER_GET) { 992 *(const void **)arg = &fs_flow_ops; 993 return 0; 994 } 995 fs_lock(dev, 0); 996 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 997 DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i); 998 ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg); 999 if ((ret = fs_err(sdev, ret))) { 1000 ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d" 1001 " with error %d", i, ret); 1002 fs_unlock(dev, 0); 1003 return ret; 1004 } 1005 } 1006 fs_unlock(dev, 0); 1007 return 0; 1008 } 1009 1010 const struct eth_dev_ops failsafe_ops = { 1011 .dev_configure = fs_dev_configure, 1012 .dev_start = fs_dev_start, 1013 .dev_stop = fs_dev_stop, 1014 .dev_set_link_down = fs_dev_set_link_down, 1015 .dev_set_link_up = fs_dev_set_link_up, 1016 .dev_close = fs_dev_close, 1017 .promiscuous_enable = fs_promiscuous_enable, 1018 .promiscuous_disable = fs_promiscuous_disable, 1019 .allmulticast_enable = fs_allmulticast_enable, 1020 .allmulticast_disable = fs_allmulticast_disable, 1021 .link_update = fs_link_update, 1022 .stats_get = fs_stats_get, 1023 .stats_reset = fs_stats_reset, 1024 .dev_infos_get = fs_dev_infos_get, 1025 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get, 1026 .mtu_set = fs_mtu_set, 1027 .vlan_filter_set = fs_vlan_filter_set, 1028 .rx_queue_setup = fs_rx_queue_setup, 1029 .tx_queue_setup = fs_tx_queue_setup, 1030 .rx_queue_release = fs_rx_queue_release, 1031 .tx_queue_release = fs_tx_queue_release, 1032 .rx_queue_intr_enable = fs_rx_intr_enable, 1033 .rx_queue_intr_disable = fs_rx_intr_disable, 1034 .flow_ctrl_get = fs_flow_ctrl_get, 1035 .flow_ctrl_set = fs_flow_ctrl_set, 1036 .mac_addr_remove = fs_mac_addr_remove, 1037 .mac_addr_add = fs_mac_addr_add, 1038 .mac_addr_set = fs_mac_addr_set, 1039 .rss_hash_update = fs_rss_hash_update, 1040 .filter_ctrl = fs_filter_ctrl, 1041 }; 1042