1 /*- 2 * BSD LICENSE 3 * 4 * Copyright 2017 6WIND S.A. 5 * Copyright 2017 Mellanox. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of 6WIND S.A. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stdint.h> 35 36 #include <rte_debug.h> 37 #include <rte_atomic.h> 38 #include <rte_ethdev.h> 39 #include <rte_malloc.h> 40 #include <rte_flow.h> 41 #include <rte_cycles.h> 42 43 #include "failsafe_private.h" 44 45 static struct rte_eth_dev_info default_infos = { 46 /* Max possible number of elements */ 47 .max_rx_pktlen = UINT32_MAX, 48 .max_rx_queues = RTE_MAX_QUEUES_PER_PORT, 49 .max_tx_queues = RTE_MAX_QUEUES_PER_PORT, 50 .max_mac_addrs = FAILSAFE_MAX_ETHADDR, 51 .max_hash_mac_addrs = UINT32_MAX, 52 .max_vfs = UINT16_MAX, 53 .max_vmdq_pools = UINT16_MAX, 54 .rx_desc_lim = { 55 .nb_max = UINT16_MAX, 56 .nb_min = 0, 57 .nb_align = 1, 58 .nb_seg_max = UINT16_MAX, 59 .nb_mtu_seg_max = UINT16_MAX, 60 }, 61 .tx_desc_lim = { 62 .nb_max = UINT16_MAX, 63 .nb_min = 0, 64 .nb_align = 1, 65 .nb_seg_max = UINT16_MAX, 66 .nb_mtu_seg_max = UINT16_MAX, 67 }, 68 /* 69 * Set of capabilities that can be verified upon 70 * configuring a sub-device. 71 */ 72 .rx_offload_capa = 73 DEV_RX_OFFLOAD_VLAN_STRIP | 74 DEV_RX_OFFLOAD_QINQ_STRIP | 75 DEV_RX_OFFLOAD_IPV4_CKSUM | 76 DEV_RX_OFFLOAD_UDP_CKSUM | 77 DEV_RX_OFFLOAD_TCP_CKSUM | 78 DEV_RX_OFFLOAD_TCP_LRO, 79 .tx_offload_capa = 0x0, 80 .flow_type_rss_offloads = 0x0, 81 }; 82 83 static int 84 fs_dev_configure(struct rte_eth_dev *dev) 85 { 86 struct sub_device *sdev; 87 uint8_t i; 88 int ret; 89 90 FOREACH_SUBDEV(sdev, i, dev) { 91 int rmv_interrupt = 0; 92 int lsc_interrupt = 0; 93 int lsc_enabled; 94 95 if (sdev->state != DEV_PROBED) 96 continue; 97 98 rmv_interrupt = ETH(sdev)->data->dev_flags & 99 RTE_ETH_DEV_INTR_RMV; 100 if (rmv_interrupt) { 101 DEBUG("Enabling RMV interrupts for sub_device %d", i); 102 dev->data->dev_conf.intr_conf.rmv = 1; 103 } else { 104 DEBUG("sub_device %d does not support RMV event", i); 105 } 106 lsc_enabled = dev->data->dev_conf.intr_conf.lsc; 107 lsc_interrupt = lsc_enabled && 108 (ETH(sdev)->data->dev_flags & 109 RTE_ETH_DEV_INTR_LSC); 110 if (lsc_interrupt) { 111 DEBUG("Enabling LSC interrupts for sub_device %d", i); 112 dev->data->dev_conf.intr_conf.lsc = 1; 113 } else if (lsc_enabled && !lsc_interrupt) { 114 DEBUG("Disabling LSC interrupts for sub_device %d", i); 115 dev->data->dev_conf.intr_conf.lsc = 0; 116 } 117 DEBUG("Configuring sub-device %d", i); 118 sdev->remove = 0; 119 ret = rte_eth_dev_configure(PORT_ID(sdev), 120 dev->data->nb_rx_queues, 121 dev->data->nb_tx_queues, 122 &dev->data->dev_conf); 123 if (ret) { 124 ERROR("Could not configure sub_device %d", i); 125 return ret; 126 } 127 if (rmv_interrupt) { 128 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 129 RTE_ETH_EVENT_INTR_RMV, 130 failsafe_eth_rmv_event_callback, 131 sdev); 132 if (ret) 133 WARN("Failed to register RMV callback for sub_device %d", 134 SUB_ID(sdev)); 135 } 136 dev->data->dev_conf.intr_conf.rmv = 0; 137 if (lsc_interrupt) { 138 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 139 RTE_ETH_EVENT_INTR_LSC, 140 failsafe_eth_lsc_event_callback, 141 dev); 142 if (ret) 143 WARN("Failed to register LSC callback for sub_device %d", 144 SUB_ID(sdev)); 145 } 146 dev->data->dev_conf.intr_conf.lsc = lsc_enabled; 147 sdev->state = DEV_ACTIVE; 148 } 149 if (PRIV(dev)->state < DEV_ACTIVE) 150 PRIV(dev)->state = DEV_ACTIVE; 151 return 0; 152 } 153 154 static int 155 fs_dev_start(struct rte_eth_dev *dev) 156 { 157 struct sub_device *sdev; 158 uint8_t i; 159 int ret; 160 161 FOREACH_SUBDEV(sdev, i, dev) { 162 if (sdev->state != DEV_ACTIVE) 163 continue; 164 DEBUG("Starting sub_device %d", i); 165 ret = rte_eth_dev_start(PORT_ID(sdev)); 166 if (ret) 167 return ret; 168 sdev->state = DEV_STARTED; 169 } 170 if (PRIV(dev)->state < DEV_STARTED) 171 PRIV(dev)->state = DEV_STARTED; 172 fs_switch_dev(dev, NULL); 173 return 0; 174 } 175 176 static void 177 fs_dev_stop(struct rte_eth_dev *dev) 178 { 179 struct sub_device *sdev; 180 uint8_t i; 181 182 PRIV(dev)->state = DEV_STARTED - 1; 183 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) { 184 rte_eth_dev_stop(PORT_ID(sdev)); 185 sdev->state = DEV_STARTED - 1; 186 } 187 } 188 189 static int 190 fs_dev_set_link_up(struct rte_eth_dev *dev) 191 { 192 struct sub_device *sdev; 193 uint8_t i; 194 int ret; 195 196 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 197 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i); 198 ret = rte_eth_dev_set_link_up(PORT_ID(sdev)); 199 if (ret) { 200 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d" 201 " with error %d", i, ret); 202 return ret; 203 } 204 } 205 return 0; 206 } 207 208 static int 209 fs_dev_set_link_down(struct rte_eth_dev *dev) 210 { 211 struct sub_device *sdev; 212 uint8_t i; 213 int ret; 214 215 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 216 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i); 217 ret = rte_eth_dev_set_link_down(PORT_ID(sdev)); 218 if (ret) { 219 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d" 220 " with error %d", i, ret); 221 return ret; 222 } 223 } 224 return 0; 225 } 226 227 static void fs_dev_free_queues(struct rte_eth_dev *dev); 228 static void 229 fs_dev_close(struct rte_eth_dev *dev) 230 { 231 struct sub_device *sdev; 232 uint8_t i; 233 234 failsafe_hotplug_alarm_cancel(dev); 235 if (PRIV(dev)->state == DEV_STARTED) 236 dev->dev_ops->dev_stop(dev); 237 PRIV(dev)->state = DEV_ACTIVE - 1; 238 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 239 DEBUG("Closing sub_device %d", i); 240 rte_eth_dev_close(PORT_ID(sdev)); 241 sdev->state = DEV_ACTIVE - 1; 242 } 243 fs_dev_free_queues(dev); 244 } 245 246 static void 247 fs_rx_queue_release(void *queue) 248 { 249 struct rte_eth_dev *dev; 250 struct sub_device *sdev; 251 uint8_t i; 252 struct rxq *rxq; 253 254 if (queue == NULL) 255 return; 256 rxq = queue; 257 dev = rxq->priv->dev; 258 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 259 SUBOPS(sdev, rx_queue_release) 260 (ETH(sdev)->data->rx_queues[rxq->qid]); 261 dev->data->rx_queues[rxq->qid] = NULL; 262 rte_free(rxq); 263 } 264 265 static int 266 fs_rx_queue_setup(struct rte_eth_dev *dev, 267 uint16_t rx_queue_id, 268 uint16_t nb_rx_desc, 269 unsigned int socket_id, 270 const struct rte_eth_rxconf *rx_conf, 271 struct rte_mempool *mb_pool) 272 { 273 struct sub_device *sdev; 274 struct rxq *rxq; 275 uint8_t i; 276 int ret; 277 278 rxq = dev->data->rx_queues[rx_queue_id]; 279 if (rxq != NULL) { 280 fs_rx_queue_release(rxq); 281 dev->data->rx_queues[rx_queue_id] = NULL; 282 } 283 rxq = rte_zmalloc(NULL, 284 sizeof(*rxq) + 285 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 286 RTE_CACHE_LINE_SIZE); 287 if (rxq == NULL) 288 return -ENOMEM; 289 FOREACH_SUBDEV(sdev, i, dev) 290 rte_atomic64_init(&rxq->refcnt[i]); 291 rxq->qid = rx_queue_id; 292 rxq->socket_id = socket_id; 293 rxq->info.mp = mb_pool; 294 rxq->info.conf = *rx_conf; 295 rxq->info.nb_desc = nb_rx_desc; 296 rxq->priv = PRIV(dev); 297 rxq->sdev = PRIV(dev)->subs; 298 dev->data->rx_queues[rx_queue_id] = rxq; 299 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 300 ret = rte_eth_rx_queue_setup(PORT_ID(sdev), 301 rx_queue_id, 302 nb_rx_desc, socket_id, 303 rx_conf, mb_pool); 304 if (ret) { 305 ERROR("RX queue setup failed for sub_device %d", i); 306 goto free_rxq; 307 } 308 } 309 return 0; 310 free_rxq: 311 fs_rx_queue_release(rxq); 312 return ret; 313 } 314 315 static void 316 fs_tx_queue_release(void *queue) 317 { 318 struct rte_eth_dev *dev; 319 struct sub_device *sdev; 320 uint8_t i; 321 struct txq *txq; 322 323 if (queue == NULL) 324 return; 325 txq = queue; 326 dev = txq->priv->dev; 327 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 328 SUBOPS(sdev, tx_queue_release) 329 (ETH(sdev)->data->tx_queues[txq->qid]); 330 dev->data->tx_queues[txq->qid] = NULL; 331 rte_free(txq); 332 } 333 334 static int 335 fs_tx_queue_setup(struct rte_eth_dev *dev, 336 uint16_t tx_queue_id, 337 uint16_t nb_tx_desc, 338 unsigned int socket_id, 339 const struct rte_eth_txconf *tx_conf) 340 { 341 struct sub_device *sdev; 342 struct txq *txq; 343 uint8_t i; 344 int ret; 345 346 txq = dev->data->tx_queues[tx_queue_id]; 347 if (txq != NULL) { 348 fs_tx_queue_release(txq); 349 dev->data->tx_queues[tx_queue_id] = NULL; 350 } 351 txq = rte_zmalloc("ethdev TX queue", 352 sizeof(*txq) + 353 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 354 RTE_CACHE_LINE_SIZE); 355 if (txq == NULL) 356 return -ENOMEM; 357 FOREACH_SUBDEV(sdev, i, dev) 358 rte_atomic64_init(&txq->refcnt[i]); 359 txq->qid = tx_queue_id; 360 txq->socket_id = socket_id; 361 txq->info.conf = *tx_conf; 362 txq->info.nb_desc = nb_tx_desc; 363 txq->priv = PRIV(dev); 364 dev->data->tx_queues[tx_queue_id] = txq; 365 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 366 ret = rte_eth_tx_queue_setup(PORT_ID(sdev), 367 tx_queue_id, 368 nb_tx_desc, socket_id, 369 tx_conf); 370 if (ret) { 371 ERROR("TX queue setup failed for sub_device %d", i); 372 goto free_txq; 373 } 374 } 375 return 0; 376 free_txq: 377 fs_tx_queue_release(txq); 378 return ret; 379 } 380 381 static void 382 fs_dev_free_queues(struct rte_eth_dev *dev) 383 { 384 uint16_t i; 385 386 for (i = 0; i < dev->data->nb_rx_queues; i++) { 387 fs_rx_queue_release(dev->data->rx_queues[i]); 388 dev->data->rx_queues[i] = NULL; 389 } 390 dev->data->nb_rx_queues = 0; 391 for (i = 0; i < dev->data->nb_tx_queues; i++) { 392 fs_tx_queue_release(dev->data->tx_queues[i]); 393 dev->data->tx_queues[i] = NULL; 394 } 395 dev->data->nb_tx_queues = 0; 396 } 397 398 static void 399 fs_promiscuous_enable(struct rte_eth_dev *dev) 400 { 401 struct sub_device *sdev; 402 uint8_t i; 403 404 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 405 rte_eth_promiscuous_enable(PORT_ID(sdev)); 406 } 407 408 static void 409 fs_promiscuous_disable(struct rte_eth_dev *dev) 410 { 411 struct sub_device *sdev; 412 uint8_t i; 413 414 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 415 rte_eth_promiscuous_disable(PORT_ID(sdev)); 416 } 417 418 static void 419 fs_allmulticast_enable(struct rte_eth_dev *dev) 420 { 421 struct sub_device *sdev; 422 uint8_t i; 423 424 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 425 rte_eth_allmulticast_enable(PORT_ID(sdev)); 426 } 427 428 static void 429 fs_allmulticast_disable(struct rte_eth_dev *dev) 430 { 431 struct sub_device *sdev; 432 uint8_t i; 433 434 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 435 rte_eth_allmulticast_disable(PORT_ID(sdev)); 436 } 437 438 static int 439 fs_link_update(struct rte_eth_dev *dev, 440 int wait_to_complete) 441 { 442 struct sub_device *sdev; 443 uint8_t i; 444 int ret; 445 446 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 447 DEBUG("Calling link_update on sub_device %d", i); 448 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete); 449 if (ret && ret != -1) { 450 ERROR("Link update failed for sub_device %d with error %d", 451 i, ret); 452 return ret; 453 } 454 } 455 if (TX_SUBDEV(dev)) { 456 struct rte_eth_link *l1; 457 struct rte_eth_link *l2; 458 459 l1 = &dev->data->dev_link; 460 l2 = Ð(TX_SUBDEV(dev))->data->dev_link; 461 if (memcmp(l1, l2, sizeof(*l1))) { 462 *l1 = *l2; 463 return 0; 464 } 465 } 466 return -1; 467 } 468 469 static int 470 fs_stats_get(struct rte_eth_dev *dev, 471 struct rte_eth_stats *stats) 472 { 473 struct sub_device *sdev; 474 uint8_t i; 475 int ret; 476 477 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats)); 478 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 479 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats; 480 uint64_t *timestamp = &sdev->stats_snapshot.timestamp; 481 482 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot); 483 if (ret) { 484 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d", 485 i, ret); 486 *timestamp = 0; 487 return ret; 488 } 489 *timestamp = rte_rdtsc(); 490 failsafe_stats_increment(stats, snapshot); 491 } 492 return 0; 493 } 494 495 static void 496 fs_stats_reset(struct rte_eth_dev *dev) 497 { 498 struct sub_device *sdev; 499 uint8_t i; 500 501 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 502 rte_eth_stats_reset(PORT_ID(sdev)); 503 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats)); 504 } 505 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats)); 506 } 507 508 /** 509 * Fail-safe dev_infos_get rules: 510 * 511 * No sub_device: 512 * Numerables: 513 * Use the maximum possible values for any field, so as not 514 * to impede any further configuration effort. 515 * Capabilities: 516 * Limits capabilities to those that are understood by the 517 * fail-safe PMD. This understanding stems from the fail-safe 518 * being capable of verifying that the related capability is 519 * expressed within the device configuration (struct rte_eth_conf). 520 * 521 * At least one probed sub_device: 522 * Numerables: 523 * Uses values from the active probed sub_device 524 * The rationale here is that if any sub_device is less capable 525 * (for example concerning the number of queues) than the active 526 * sub_device, then its subsequent configuration will fail. 527 * It is impossible to foresee this failure when the failing sub_device 528 * is supposed to be plugged-in later on, so the configuration process 529 * is the single point of failure and error reporting. 530 * Capabilities: 531 * Uses a logical AND of RX capabilities among 532 * all sub_devices and the default capabilities. 533 * Uses a logical AND of TX capabilities among 534 * the active probed sub_device and the default capabilities. 535 * 536 */ 537 static void 538 fs_dev_infos_get(struct rte_eth_dev *dev, 539 struct rte_eth_dev_info *infos) 540 { 541 struct sub_device *sdev; 542 uint8_t i; 543 544 sdev = TX_SUBDEV(dev); 545 if (sdev == NULL) { 546 DEBUG("No probed device, using default infos"); 547 rte_memcpy(&PRIV(dev)->infos, &default_infos, 548 sizeof(default_infos)); 549 } else { 550 uint32_t rx_offload_capa; 551 552 rx_offload_capa = default_infos.rx_offload_capa; 553 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 554 rte_eth_dev_info_get(PORT_ID(sdev), 555 &PRIV(dev)->infos); 556 rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa; 557 } 558 sdev = TX_SUBDEV(dev); 559 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos); 560 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa; 561 PRIV(dev)->infos.tx_offload_capa &= 562 default_infos.tx_offload_capa; 563 PRIV(dev)->infos.flow_type_rss_offloads &= 564 default_infos.flow_type_rss_offloads; 565 } 566 rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos)); 567 } 568 569 static const uint32_t * 570 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev) 571 { 572 struct sub_device *sdev; 573 struct rte_eth_dev *edev; 574 575 sdev = TX_SUBDEV(dev); 576 if (sdev == NULL) 577 return NULL; 578 edev = ETH(sdev); 579 /* ENOTSUP: counts as no supported ptypes */ 580 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) 581 return NULL; 582 /* 583 * The API does not permit to do a clean AND of all ptypes, 584 * It is also incomplete by design and we do not really care 585 * to have a best possible value in this context. 586 * We just return the ptypes of the device of highest 587 * priority, usually the PREFERRED device. 588 */ 589 return SUBOPS(sdev, dev_supported_ptypes_get)(edev); 590 } 591 592 static int 593 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 594 { 595 struct sub_device *sdev; 596 uint8_t i; 597 int ret; 598 599 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 600 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i); 601 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu); 602 if (ret) { 603 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d", 604 i, ret); 605 return ret; 606 } 607 } 608 return 0; 609 } 610 611 static int 612 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 613 { 614 struct sub_device *sdev; 615 uint8_t i; 616 int ret; 617 618 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 619 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i); 620 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on); 621 if (ret) { 622 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d" 623 " with error %d", i, ret); 624 return ret; 625 } 626 } 627 return 0; 628 } 629 630 static int 631 fs_flow_ctrl_get(struct rte_eth_dev *dev, 632 struct rte_eth_fc_conf *fc_conf) 633 { 634 struct sub_device *sdev; 635 636 sdev = TX_SUBDEV(dev); 637 if (sdev == NULL) 638 return 0; 639 if (SUBOPS(sdev, flow_ctrl_get) == NULL) 640 return -ENOTSUP; 641 return SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf); 642 } 643 644 static int 645 fs_flow_ctrl_set(struct rte_eth_dev *dev, 646 struct rte_eth_fc_conf *fc_conf) 647 { 648 struct sub_device *sdev; 649 uint8_t i; 650 int ret; 651 652 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 653 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i); 654 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf); 655 if (ret) { 656 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d" 657 " with error %d", i, ret); 658 return ret; 659 } 660 } 661 return 0; 662 } 663 664 static void 665 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 666 { 667 struct sub_device *sdev; 668 uint8_t i; 669 670 /* No check: already done within the rte_eth_dev_mac_addr_remove 671 * call for the fail-safe device. 672 */ 673 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 674 rte_eth_dev_mac_addr_remove(PORT_ID(sdev), 675 &dev->data->mac_addrs[index]); 676 PRIV(dev)->mac_addr_pool[index] = 0; 677 } 678 679 static int 680 fs_mac_addr_add(struct rte_eth_dev *dev, 681 struct ether_addr *mac_addr, 682 uint32_t index, 683 uint32_t vmdq) 684 { 685 struct sub_device *sdev; 686 int ret; 687 uint8_t i; 688 689 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR); 690 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 691 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq); 692 if (ret) { 693 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %" 694 PRIu8 " with error %d", i, ret); 695 return ret; 696 } 697 } 698 if (index >= PRIV(dev)->nb_mac_addr) { 699 DEBUG("Growing mac_addrs array"); 700 PRIV(dev)->nb_mac_addr = index; 701 } 702 PRIV(dev)->mac_addr_pool[index] = vmdq; 703 return 0; 704 } 705 706 static void 707 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) 708 { 709 struct sub_device *sdev; 710 uint8_t i; 711 712 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 713 rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr); 714 } 715 716 static int 717 fs_filter_ctrl(struct rte_eth_dev *dev, 718 enum rte_filter_type type, 719 enum rte_filter_op op, 720 void *arg) 721 { 722 struct sub_device *sdev; 723 uint8_t i; 724 int ret; 725 726 if (type == RTE_ETH_FILTER_GENERIC && 727 op == RTE_ETH_FILTER_GET) { 728 *(const void **)arg = &fs_flow_ops; 729 return 0; 730 } 731 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 732 DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i); 733 ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg); 734 if (ret) { 735 ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d" 736 " with error %d", i, ret); 737 return ret; 738 } 739 } 740 return 0; 741 } 742 743 const struct eth_dev_ops failsafe_ops = { 744 .dev_configure = fs_dev_configure, 745 .dev_start = fs_dev_start, 746 .dev_stop = fs_dev_stop, 747 .dev_set_link_down = fs_dev_set_link_down, 748 .dev_set_link_up = fs_dev_set_link_up, 749 .dev_close = fs_dev_close, 750 .promiscuous_enable = fs_promiscuous_enable, 751 .promiscuous_disable = fs_promiscuous_disable, 752 .allmulticast_enable = fs_allmulticast_enable, 753 .allmulticast_disable = fs_allmulticast_disable, 754 .link_update = fs_link_update, 755 .stats_get = fs_stats_get, 756 .stats_reset = fs_stats_reset, 757 .dev_infos_get = fs_dev_infos_get, 758 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get, 759 .mtu_set = fs_mtu_set, 760 .vlan_filter_set = fs_vlan_filter_set, 761 .rx_queue_setup = fs_rx_queue_setup, 762 .tx_queue_setup = fs_tx_queue_setup, 763 .rx_queue_release = fs_rx_queue_release, 764 .tx_queue_release = fs_tx_queue_release, 765 .flow_ctrl_get = fs_flow_ctrl_get, 766 .flow_ctrl_set = fs_flow_ctrl_set, 767 .mac_addr_remove = fs_mac_addr_remove, 768 .mac_addr_add = fs_mac_addr_add, 769 .mac_addr_set = fs_mac_addr_set, 770 .filter_ctrl = fs_filter_ctrl, 771 }; 772