1 /*- 2 * BSD LICENSE 3 * 4 * Copyright 2017 6WIND S.A. 5 * Copyright 2017 Mellanox. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of 6WIND S.A. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stdint.h> 35 36 #include <rte_debug.h> 37 #include <rte_atomic.h> 38 #include <rte_ethdev.h> 39 #include <rte_malloc.h> 40 #include <rte_flow.h> 41 #include <rte_cycles.h> 42 43 #include "failsafe_private.h" 44 45 static struct rte_eth_dev_info default_infos = { 46 /* Max possible number of elements */ 47 .max_rx_pktlen = UINT32_MAX, 48 .max_rx_queues = RTE_MAX_QUEUES_PER_PORT, 49 .max_tx_queues = RTE_MAX_QUEUES_PER_PORT, 50 .max_mac_addrs = FAILSAFE_MAX_ETHADDR, 51 .max_hash_mac_addrs = UINT32_MAX, 52 .max_vfs = UINT16_MAX, 53 .max_vmdq_pools = UINT16_MAX, 54 .rx_desc_lim = { 55 .nb_max = UINT16_MAX, 56 .nb_min = 0, 57 .nb_align = 1, 58 .nb_seg_max = UINT16_MAX, 59 .nb_mtu_seg_max = UINT16_MAX, 60 }, 61 .tx_desc_lim = { 62 .nb_max = UINT16_MAX, 63 .nb_min = 0, 64 .nb_align = 1, 65 .nb_seg_max = UINT16_MAX, 66 .nb_mtu_seg_max = UINT16_MAX, 67 }, 68 /* 69 * Set of capabilities that can be verified upon 70 * configuring a sub-device. 71 */ 72 .rx_offload_capa = 73 DEV_RX_OFFLOAD_VLAN_STRIP | 74 DEV_RX_OFFLOAD_QINQ_STRIP | 75 DEV_RX_OFFLOAD_IPV4_CKSUM | 76 DEV_RX_OFFLOAD_UDP_CKSUM | 77 DEV_RX_OFFLOAD_TCP_CKSUM | 78 DEV_RX_OFFLOAD_TCP_LRO, 79 .tx_offload_capa = 0x0, 80 .flow_type_rss_offloads = 0x0, 81 }; 82 83 static int 84 fs_dev_configure(struct rte_eth_dev *dev) 85 { 86 struct sub_device *sdev; 87 uint8_t i; 88 int ret; 89 90 FOREACH_SUBDEV(sdev, i, dev) { 91 int rmv_interrupt = 0; 92 int lsc_interrupt = 0; 93 int lsc_enabled; 94 95 if (sdev->state != DEV_PROBED) 96 continue; 97 98 rmv_interrupt = ETH(sdev)->data->dev_flags & 99 RTE_ETH_DEV_INTR_RMV; 100 if (rmv_interrupt) { 101 DEBUG("Enabling RMV interrupts for sub_device %d", i); 102 dev->data->dev_conf.intr_conf.rmv = 1; 103 } else { 104 DEBUG("sub_device %d does not support RMV event", i); 105 } 106 lsc_enabled = dev->data->dev_conf.intr_conf.lsc; 107 lsc_interrupt = lsc_enabled && 108 (ETH(sdev)->data->dev_flags & 109 RTE_ETH_DEV_INTR_LSC); 110 if (lsc_interrupt) { 111 DEBUG("Enabling LSC interrupts for sub_device %d", i); 112 dev->data->dev_conf.intr_conf.lsc = 1; 113 } else if (lsc_enabled && !lsc_interrupt) { 114 DEBUG("Disabling LSC interrupts for sub_device %d", i); 115 dev->data->dev_conf.intr_conf.lsc = 0; 116 } 117 DEBUG("Configuring sub-device %d", i); 118 sdev->remove = 0; 119 ret = rte_eth_dev_configure(PORT_ID(sdev), 120 dev->data->nb_rx_queues, 121 dev->data->nb_tx_queues, 122 &dev->data->dev_conf); 123 if (ret) { 124 ERROR("Could not configure sub_device %d", i); 125 return ret; 126 } 127 if (rmv_interrupt) { 128 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 129 RTE_ETH_EVENT_INTR_RMV, 130 failsafe_eth_rmv_event_callback, 131 sdev); 132 if (ret) 133 WARN("Failed to register RMV callback for sub_device %d", 134 SUB_ID(sdev)); 135 } 136 dev->data->dev_conf.intr_conf.rmv = 0; 137 if (lsc_interrupt) { 138 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 139 RTE_ETH_EVENT_INTR_LSC, 140 failsafe_eth_lsc_event_callback, 141 dev); 142 if (ret) 143 WARN("Failed to register LSC callback for sub_device %d", 144 SUB_ID(sdev)); 145 } 146 dev->data->dev_conf.intr_conf.lsc = lsc_enabled; 147 sdev->state = DEV_ACTIVE; 148 } 149 if (PRIV(dev)->state < DEV_ACTIVE) 150 PRIV(dev)->state = DEV_ACTIVE; 151 return 0; 152 } 153 154 static int 155 fs_dev_start(struct rte_eth_dev *dev) 156 { 157 struct sub_device *sdev; 158 uint8_t i; 159 int ret; 160 161 FOREACH_SUBDEV(sdev, i, dev) { 162 if (sdev->state != DEV_ACTIVE) 163 continue; 164 DEBUG("Starting sub_device %d", i); 165 ret = rte_eth_dev_start(PORT_ID(sdev)); 166 if (ret) 167 return ret; 168 sdev->state = DEV_STARTED; 169 } 170 if (PRIV(dev)->state < DEV_STARTED) 171 PRIV(dev)->state = DEV_STARTED; 172 fs_switch_dev(dev, NULL); 173 return 0; 174 } 175 176 static void 177 fs_dev_stop(struct rte_eth_dev *dev) 178 { 179 struct sub_device *sdev; 180 uint8_t i; 181 182 PRIV(dev)->state = DEV_STARTED - 1; 183 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) { 184 rte_eth_dev_stop(PORT_ID(sdev)); 185 sdev->state = DEV_STARTED - 1; 186 } 187 } 188 189 static int 190 fs_dev_set_link_up(struct rte_eth_dev *dev) 191 { 192 struct sub_device *sdev; 193 uint8_t i; 194 int ret; 195 196 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 197 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i); 198 ret = rte_eth_dev_set_link_up(PORT_ID(sdev)); 199 if (ret) { 200 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d" 201 " with error %d", i, ret); 202 return ret; 203 } 204 } 205 return 0; 206 } 207 208 static int 209 fs_dev_set_link_down(struct rte_eth_dev *dev) 210 { 211 struct sub_device *sdev; 212 uint8_t i; 213 int ret; 214 215 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 216 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i); 217 ret = rte_eth_dev_set_link_down(PORT_ID(sdev)); 218 if (ret) { 219 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d" 220 " with error %d", i, ret); 221 return ret; 222 } 223 } 224 return 0; 225 } 226 227 static void fs_dev_free_queues(struct rte_eth_dev *dev); 228 static void 229 fs_dev_close(struct rte_eth_dev *dev) 230 { 231 struct sub_device *sdev; 232 uint8_t i; 233 234 failsafe_hotplug_alarm_cancel(dev); 235 if (PRIV(dev)->state == DEV_STARTED) 236 dev->dev_ops->dev_stop(dev); 237 PRIV(dev)->state = DEV_ACTIVE - 1; 238 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 239 DEBUG("Closing sub_device %d", i); 240 rte_eth_dev_close(PORT_ID(sdev)); 241 sdev->state = DEV_ACTIVE - 1; 242 } 243 fs_dev_free_queues(dev); 244 } 245 246 static void 247 fs_rx_queue_release(void *queue) 248 { 249 struct rte_eth_dev *dev; 250 struct sub_device *sdev; 251 uint8_t i; 252 struct rxq *rxq; 253 254 if (queue == NULL) 255 return; 256 rxq = queue; 257 dev = rxq->priv->dev; 258 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 259 SUBOPS(sdev, rx_queue_release) 260 (ETH(sdev)->data->rx_queues[rxq->qid]); 261 dev->data->rx_queues[rxq->qid] = NULL; 262 rte_free(rxq); 263 } 264 265 static int 266 fs_rx_queue_setup(struct rte_eth_dev *dev, 267 uint16_t rx_queue_id, 268 uint16_t nb_rx_desc, 269 unsigned int socket_id, 270 const struct rte_eth_rxconf *rx_conf, 271 struct rte_mempool *mb_pool) 272 { 273 struct sub_device *sdev; 274 struct rxq *rxq; 275 uint8_t i; 276 int ret; 277 278 rxq = dev->data->rx_queues[rx_queue_id]; 279 if (rxq != NULL) { 280 fs_rx_queue_release(rxq); 281 dev->data->rx_queues[rx_queue_id] = NULL; 282 } 283 rxq = rte_zmalloc(NULL, 284 sizeof(*rxq) + 285 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 286 RTE_CACHE_LINE_SIZE); 287 if (rxq == NULL) 288 return -ENOMEM; 289 FOREACH_SUBDEV(sdev, i, dev) 290 rte_atomic64_init(&rxq->refcnt[i]); 291 rxq->qid = rx_queue_id; 292 rxq->socket_id = socket_id; 293 rxq->info.mp = mb_pool; 294 rxq->info.conf = *rx_conf; 295 rxq->info.nb_desc = nb_rx_desc; 296 rxq->priv = PRIV(dev); 297 dev->data->rx_queues[rx_queue_id] = rxq; 298 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 299 ret = rte_eth_rx_queue_setup(PORT_ID(sdev), 300 rx_queue_id, 301 nb_rx_desc, socket_id, 302 rx_conf, mb_pool); 303 if (ret) { 304 ERROR("RX queue setup failed for sub_device %d", i); 305 goto free_rxq; 306 } 307 } 308 return 0; 309 free_rxq: 310 fs_rx_queue_release(rxq); 311 return ret; 312 } 313 314 static void 315 fs_tx_queue_release(void *queue) 316 { 317 struct rte_eth_dev *dev; 318 struct sub_device *sdev; 319 uint8_t i; 320 struct txq *txq; 321 322 if (queue == NULL) 323 return; 324 txq = queue; 325 dev = txq->priv->dev; 326 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 327 SUBOPS(sdev, tx_queue_release) 328 (ETH(sdev)->data->tx_queues[txq->qid]); 329 dev->data->tx_queues[txq->qid] = NULL; 330 rte_free(txq); 331 } 332 333 static int 334 fs_tx_queue_setup(struct rte_eth_dev *dev, 335 uint16_t tx_queue_id, 336 uint16_t nb_tx_desc, 337 unsigned int socket_id, 338 const struct rte_eth_txconf *tx_conf) 339 { 340 struct sub_device *sdev; 341 struct txq *txq; 342 uint8_t i; 343 int ret; 344 345 txq = dev->data->tx_queues[tx_queue_id]; 346 if (txq != NULL) { 347 fs_tx_queue_release(txq); 348 dev->data->tx_queues[tx_queue_id] = NULL; 349 } 350 txq = rte_zmalloc("ethdev TX queue", 351 sizeof(*txq) + 352 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 353 RTE_CACHE_LINE_SIZE); 354 if (txq == NULL) 355 return -ENOMEM; 356 FOREACH_SUBDEV(sdev, i, dev) 357 rte_atomic64_init(&txq->refcnt[i]); 358 txq->qid = tx_queue_id; 359 txq->socket_id = socket_id; 360 txq->info.conf = *tx_conf; 361 txq->info.nb_desc = nb_tx_desc; 362 txq->priv = PRIV(dev); 363 dev->data->tx_queues[tx_queue_id] = txq; 364 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 365 ret = rte_eth_tx_queue_setup(PORT_ID(sdev), 366 tx_queue_id, 367 nb_tx_desc, socket_id, 368 tx_conf); 369 if (ret) { 370 ERROR("TX queue setup failed for sub_device %d", i); 371 goto free_txq; 372 } 373 } 374 return 0; 375 free_txq: 376 fs_tx_queue_release(txq); 377 return ret; 378 } 379 380 static void 381 fs_dev_free_queues(struct rte_eth_dev *dev) 382 { 383 uint16_t i; 384 385 for (i = 0; i < dev->data->nb_rx_queues; i++) { 386 fs_rx_queue_release(dev->data->rx_queues[i]); 387 dev->data->rx_queues[i] = NULL; 388 } 389 dev->data->nb_rx_queues = 0; 390 for (i = 0; i < dev->data->nb_tx_queues; i++) { 391 fs_tx_queue_release(dev->data->tx_queues[i]); 392 dev->data->tx_queues[i] = NULL; 393 } 394 dev->data->nb_tx_queues = 0; 395 } 396 397 static void 398 fs_promiscuous_enable(struct rte_eth_dev *dev) 399 { 400 struct sub_device *sdev; 401 uint8_t i; 402 403 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 404 rte_eth_promiscuous_enable(PORT_ID(sdev)); 405 } 406 407 static void 408 fs_promiscuous_disable(struct rte_eth_dev *dev) 409 { 410 struct sub_device *sdev; 411 uint8_t i; 412 413 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 414 rte_eth_promiscuous_disable(PORT_ID(sdev)); 415 } 416 417 static void 418 fs_allmulticast_enable(struct rte_eth_dev *dev) 419 { 420 struct sub_device *sdev; 421 uint8_t i; 422 423 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 424 rte_eth_allmulticast_enable(PORT_ID(sdev)); 425 } 426 427 static void 428 fs_allmulticast_disable(struct rte_eth_dev *dev) 429 { 430 struct sub_device *sdev; 431 uint8_t i; 432 433 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 434 rte_eth_allmulticast_disable(PORT_ID(sdev)); 435 } 436 437 static int 438 fs_link_update(struct rte_eth_dev *dev, 439 int wait_to_complete) 440 { 441 struct sub_device *sdev; 442 uint8_t i; 443 int ret; 444 445 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 446 DEBUG("Calling link_update on sub_device %d", i); 447 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete); 448 if (ret && ret != -1) { 449 ERROR("Link update failed for sub_device %d with error %d", 450 i, ret); 451 return ret; 452 } 453 } 454 if (TX_SUBDEV(dev)) { 455 struct rte_eth_link *l1; 456 struct rte_eth_link *l2; 457 458 l1 = &dev->data->dev_link; 459 l2 = Ð(TX_SUBDEV(dev))->data->dev_link; 460 if (memcmp(l1, l2, sizeof(*l1))) { 461 *l1 = *l2; 462 return 0; 463 } 464 } 465 return -1; 466 } 467 468 static int 469 fs_stats_get(struct rte_eth_dev *dev, 470 struct rte_eth_stats *stats) 471 { 472 struct sub_device *sdev; 473 uint8_t i; 474 int ret; 475 476 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats)); 477 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 478 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats; 479 uint64_t *timestamp = &sdev->stats_snapshot.timestamp; 480 481 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot); 482 if (ret) { 483 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d", 484 i, ret); 485 *timestamp = 0; 486 return ret; 487 } 488 *timestamp = rte_rdtsc(); 489 failsafe_stats_increment(stats, snapshot); 490 } 491 return 0; 492 } 493 494 static void 495 fs_stats_reset(struct rte_eth_dev *dev) 496 { 497 struct sub_device *sdev; 498 uint8_t i; 499 500 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 501 rte_eth_stats_reset(PORT_ID(sdev)); 502 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats)); 503 } 504 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats)); 505 } 506 507 /** 508 * Fail-safe dev_infos_get rules: 509 * 510 * No sub_device: 511 * Numerables: 512 * Use the maximum possible values for any field, so as not 513 * to impede any further configuration effort. 514 * Capabilities: 515 * Limits capabilities to those that are understood by the 516 * fail-safe PMD. This understanding stems from the fail-safe 517 * being capable of verifying that the related capability is 518 * expressed within the device configuration (struct rte_eth_conf). 519 * 520 * At least one probed sub_device: 521 * Numerables: 522 * Uses values from the active probed sub_device 523 * The rationale here is that if any sub_device is less capable 524 * (for example concerning the number of queues) than the active 525 * sub_device, then its subsequent configuration will fail. 526 * It is impossible to foresee this failure when the failing sub_device 527 * is supposed to be plugged-in later on, so the configuration process 528 * is the single point of failure and error reporting. 529 * Capabilities: 530 * Uses a logical AND of RX capabilities among 531 * all sub_devices and the default capabilities. 532 * Uses a logical AND of TX capabilities among 533 * the active probed sub_device and the default capabilities. 534 * 535 */ 536 static void 537 fs_dev_infos_get(struct rte_eth_dev *dev, 538 struct rte_eth_dev_info *infos) 539 { 540 struct sub_device *sdev; 541 uint8_t i; 542 543 sdev = TX_SUBDEV(dev); 544 if (sdev == NULL) { 545 DEBUG("No probed device, using default infos"); 546 rte_memcpy(&PRIV(dev)->infos, &default_infos, 547 sizeof(default_infos)); 548 } else { 549 uint32_t rx_offload_capa; 550 551 rx_offload_capa = default_infos.rx_offload_capa; 552 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 553 rte_eth_dev_info_get(PORT_ID(sdev), 554 &PRIV(dev)->infos); 555 rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa; 556 } 557 sdev = TX_SUBDEV(dev); 558 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos); 559 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa; 560 PRIV(dev)->infos.tx_offload_capa &= 561 default_infos.tx_offload_capa; 562 PRIV(dev)->infos.flow_type_rss_offloads &= 563 default_infos.flow_type_rss_offloads; 564 } 565 rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos)); 566 } 567 568 static const uint32_t * 569 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev) 570 { 571 struct sub_device *sdev; 572 struct rte_eth_dev *edev; 573 574 sdev = TX_SUBDEV(dev); 575 if (sdev == NULL) 576 return NULL; 577 edev = ETH(sdev); 578 /* ENOTSUP: counts as no supported ptypes */ 579 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) 580 return NULL; 581 /* 582 * The API does not permit to do a clean AND of all ptypes, 583 * It is also incomplete by design and we do not really care 584 * to have a best possible value in this context. 585 * We just return the ptypes of the device of highest 586 * priority, usually the PREFERRED device. 587 */ 588 return SUBOPS(sdev, dev_supported_ptypes_get)(edev); 589 } 590 591 static int 592 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 593 { 594 struct sub_device *sdev; 595 uint8_t i; 596 int ret; 597 598 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 599 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i); 600 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu); 601 if (ret) { 602 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d", 603 i, ret); 604 return ret; 605 } 606 } 607 return 0; 608 } 609 610 static int 611 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 612 { 613 struct sub_device *sdev; 614 uint8_t i; 615 int ret; 616 617 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 618 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i); 619 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on); 620 if (ret) { 621 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d" 622 " with error %d", i, ret); 623 return ret; 624 } 625 } 626 return 0; 627 } 628 629 static int 630 fs_flow_ctrl_get(struct rte_eth_dev *dev, 631 struct rte_eth_fc_conf *fc_conf) 632 { 633 struct sub_device *sdev; 634 635 sdev = TX_SUBDEV(dev); 636 if (sdev == NULL) 637 return 0; 638 if (SUBOPS(sdev, flow_ctrl_get) == NULL) 639 return -ENOTSUP; 640 return SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf); 641 } 642 643 static int 644 fs_flow_ctrl_set(struct rte_eth_dev *dev, 645 struct rte_eth_fc_conf *fc_conf) 646 { 647 struct sub_device *sdev; 648 uint8_t i; 649 int ret; 650 651 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 652 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i); 653 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf); 654 if (ret) { 655 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d" 656 " with error %d", i, ret); 657 return ret; 658 } 659 } 660 return 0; 661 } 662 663 static void 664 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 665 { 666 struct sub_device *sdev; 667 uint8_t i; 668 669 /* No check: already done within the rte_eth_dev_mac_addr_remove 670 * call for the fail-safe device. 671 */ 672 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 673 rte_eth_dev_mac_addr_remove(PORT_ID(sdev), 674 &dev->data->mac_addrs[index]); 675 PRIV(dev)->mac_addr_pool[index] = 0; 676 } 677 678 static int 679 fs_mac_addr_add(struct rte_eth_dev *dev, 680 struct ether_addr *mac_addr, 681 uint32_t index, 682 uint32_t vmdq) 683 { 684 struct sub_device *sdev; 685 int ret; 686 uint8_t i; 687 688 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR); 689 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 690 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq); 691 if (ret) { 692 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %" 693 PRIu8 " with error %d", i, ret); 694 return ret; 695 } 696 } 697 if (index >= PRIV(dev)->nb_mac_addr) { 698 DEBUG("Growing mac_addrs array"); 699 PRIV(dev)->nb_mac_addr = index; 700 } 701 PRIV(dev)->mac_addr_pool[index] = vmdq; 702 return 0; 703 } 704 705 static void 706 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) 707 { 708 struct sub_device *sdev; 709 uint8_t i; 710 711 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 712 rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr); 713 } 714 715 static int 716 fs_filter_ctrl(struct rte_eth_dev *dev, 717 enum rte_filter_type type, 718 enum rte_filter_op op, 719 void *arg) 720 { 721 struct sub_device *sdev; 722 uint8_t i; 723 int ret; 724 725 if (type == RTE_ETH_FILTER_GENERIC && 726 op == RTE_ETH_FILTER_GET) { 727 *(const void **)arg = &fs_flow_ops; 728 return 0; 729 } 730 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 731 DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i); 732 ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg); 733 if (ret) { 734 ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d" 735 " with error %d", i, ret); 736 return ret; 737 } 738 } 739 return 0; 740 } 741 742 const struct eth_dev_ops failsafe_ops = { 743 .dev_configure = fs_dev_configure, 744 .dev_start = fs_dev_start, 745 .dev_stop = fs_dev_stop, 746 .dev_set_link_down = fs_dev_set_link_down, 747 .dev_set_link_up = fs_dev_set_link_up, 748 .dev_close = fs_dev_close, 749 .promiscuous_enable = fs_promiscuous_enable, 750 .promiscuous_disable = fs_promiscuous_disable, 751 .allmulticast_enable = fs_allmulticast_enable, 752 .allmulticast_disable = fs_allmulticast_disable, 753 .link_update = fs_link_update, 754 .stats_get = fs_stats_get, 755 .stats_reset = fs_stats_reset, 756 .dev_infos_get = fs_dev_infos_get, 757 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get, 758 .mtu_set = fs_mtu_set, 759 .vlan_filter_set = fs_vlan_filter_set, 760 .rx_queue_setup = fs_rx_queue_setup, 761 .tx_queue_setup = fs_tx_queue_setup, 762 .rx_queue_release = fs_rx_queue_release, 763 .tx_queue_release = fs_tx_queue_release, 764 .flow_ctrl_get = fs_flow_ctrl_get, 765 .flow_ctrl_set = fs_flow_ctrl_set, 766 .mac_addr_remove = fs_mac_addr_remove, 767 .mac_addr_add = fs_mac_addr_add, 768 .mac_addr_set = fs_mac_addr_set, 769 .filter_ctrl = fs_filter_ctrl, 770 }; 771