1 /*- 2 * BSD LICENSE 3 * 4 * Copyright 2017 6WIND S.A. 5 * Copyright 2017 Mellanox. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of 6WIND S.A. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stdint.h> 35 36 #include <rte_debug.h> 37 #include <rte_atomic.h> 38 #include <rte_ethdev.h> 39 #include <rte_malloc.h> 40 #include <rte_flow.h> 41 42 #include "failsafe_private.h" 43 44 static struct rte_eth_dev_info default_infos = { 45 /* Max possible number of elements */ 46 .max_rx_pktlen = UINT32_MAX, 47 .max_rx_queues = RTE_MAX_QUEUES_PER_PORT, 48 .max_tx_queues = RTE_MAX_QUEUES_PER_PORT, 49 .max_mac_addrs = FAILSAFE_MAX_ETHADDR, 50 .max_hash_mac_addrs = UINT32_MAX, 51 .max_vfs = UINT16_MAX, 52 .max_vmdq_pools = UINT16_MAX, 53 .rx_desc_lim = { 54 .nb_max = UINT16_MAX, 55 .nb_min = 0, 56 .nb_align = 1, 57 .nb_seg_max = UINT16_MAX, 58 .nb_mtu_seg_max = UINT16_MAX, 59 }, 60 .tx_desc_lim = { 61 .nb_max = UINT16_MAX, 62 .nb_min = 0, 63 .nb_align = 1, 64 .nb_seg_max = UINT16_MAX, 65 .nb_mtu_seg_max = UINT16_MAX, 66 }, 67 /* 68 * Set of capabilities that can be verified upon 69 * configuring a sub-device. 70 */ 71 .rx_offload_capa = 72 DEV_RX_OFFLOAD_VLAN_STRIP | 73 DEV_RX_OFFLOAD_QINQ_STRIP | 74 DEV_RX_OFFLOAD_IPV4_CKSUM | 75 DEV_RX_OFFLOAD_UDP_CKSUM | 76 DEV_RX_OFFLOAD_TCP_CKSUM | 77 DEV_RX_OFFLOAD_TCP_LRO, 78 .tx_offload_capa = 0x0, 79 .flow_type_rss_offloads = 0x0, 80 }; 81 82 /** 83 * Check whether a specific offloading capability 84 * is supported by a sub_device. 85 * 86 * @return 87 * 0: all requested capabilities are supported by the sub_device 88 * positive value: This flag at least is not supported by the sub_device 89 */ 90 static int 91 fs_port_offload_validate(struct rte_eth_dev *dev, 92 struct sub_device *sdev) 93 { 94 struct rte_eth_dev_info infos = {0}; 95 struct rte_eth_conf *cf; 96 uint32_t cap; 97 98 cf = &dev->data->dev_conf; 99 SUBOPS(sdev, dev_infos_get)(ETH(sdev), &infos); 100 /* RX capabilities */ 101 cap = infos.rx_offload_capa; 102 if (cf->rxmode.hw_vlan_strip && 103 ((cap & DEV_RX_OFFLOAD_VLAN_STRIP) == 0)) { 104 WARN("VLAN stripping offload requested but not supported by sub_device %d", 105 SUB_ID(sdev)); 106 return DEV_RX_OFFLOAD_VLAN_STRIP; 107 } 108 if (cf->rxmode.hw_ip_checksum && 109 ((cap & (DEV_RX_OFFLOAD_IPV4_CKSUM | 110 DEV_RX_OFFLOAD_UDP_CKSUM | 111 DEV_RX_OFFLOAD_TCP_CKSUM)) != 112 (DEV_RX_OFFLOAD_IPV4_CKSUM | 113 DEV_RX_OFFLOAD_UDP_CKSUM | 114 DEV_RX_OFFLOAD_TCP_CKSUM))) { 115 WARN("IP checksum offload requested but not supported by sub_device %d", 116 SUB_ID(sdev)); 117 return DEV_RX_OFFLOAD_IPV4_CKSUM | 118 DEV_RX_OFFLOAD_UDP_CKSUM | 119 DEV_RX_OFFLOAD_TCP_CKSUM; 120 } 121 if (cf->rxmode.enable_lro && 122 ((cap & DEV_RX_OFFLOAD_TCP_LRO) == 0)) { 123 WARN("TCP LRO offload requested but not supported by sub_device %d", 124 SUB_ID(sdev)); 125 return DEV_RX_OFFLOAD_TCP_LRO; 126 } 127 if (cf->rxmode.hw_vlan_extend && 128 ((cap & DEV_RX_OFFLOAD_QINQ_STRIP) == 0)) { 129 WARN("Stacked VLAN stripping offload requested but not supported by sub_device %d", 130 SUB_ID(sdev)); 131 return DEV_RX_OFFLOAD_QINQ_STRIP; 132 } 133 /* TX capabilities */ 134 /* Nothing to do, no tx capa supported */ 135 return 0; 136 } 137 138 /* 139 * Disable the dev_conf flag related to an offload capability flag 140 * within an ethdev configuration. 141 */ 142 static int 143 fs_port_disable_offload(struct rte_eth_conf *cf, 144 uint32_t ol_cap) 145 { 146 switch (ol_cap) { 147 case DEV_RX_OFFLOAD_VLAN_STRIP: 148 INFO("Disabling VLAN stripping offload"); 149 cf->rxmode.hw_vlan_strip = 0; 150 break; 151 case DEV_RX_OFFLOAD_IPV4_CKSUM: 152 case DEV_RX_OFFLOAD_UDP_CKSUM: 153 case DEV_RX_OFFLOAD_TCP_CKSUM: 154 case (DEV_RX_OFFLOAD_IPV4_CKSUM | 155 DEV_RX_OFFLOAD_UDP_CKSUM | 156 DEV_RX_OFFLOAD_TCP_CKSUM): 157 INFO("Disabling IP checksum offload"); 158 cf->rxmode.hw_ip_checksum = 0; 159 break; 160 case DEV_RX_OFFLOAD_TCP_LRO: 161 INFO("Disabling TCP LRO offload"); 162 cf->rxmode.enable_lro = 0; 163 break; 164 case DEV_RX_OFFLOAD_QINQ_STRIP: 165 INFO("Disabling stacked VLAN stripping offload"); 166 cf->rxmode.hw_vlan_extend = 0; 167 break; 168 default: 169 DEBUG("Unable to disable offload capability: %" PRIx32, 170 ol_cap); 171 return -1; 172 } 173 return 0; 174 } 175 176 static int 177 fs_dev_configure(struct rte_eth_dev *dev) 178 { 179 struct sub_device *sdev; 180 uint8_t i; 181 int capa_flag; 182 int ret; 183 184 FOREACH_SUBDEV(sdev, i, dev) { 185 if (sdev->state != DEV_PROBED) 186 continue; 187 DEBUG("Checking capabilities for sub_device %d", i); 188 while ((capa_flag = fs_port_offload_validate(dev, sdev))) { 189 /* 190 * Refuse to change configuration if multiple devices 191 * are present and we already have configured at least 192 * some of them. 193 */ 194 if (PRIV(dev)->state >= DEV_ACTIVE && 195 PRIV(dev)->subs_tail > 1) { 196 ERROR("device already configured, cannot fix live configuration"); 197 return -1; 198 } 199 ret = fs_port_disable_offload(&dev->data->dev_conf, 200 capa_flag); 201 if (ret) { 202 ERROR("Unable to disable offload capability"); 203 return ret; 204 } 205 } 206 } 207 FOREACH_SUBDEV(sdev, i, dev) { 208 int rmv_interrupt = 0; 209 int lsc_interrupt = 0; 210 int lsc_enabled; 211 212 if (sdev->state != DEV_PROBED) 213 continue; 214 215 rmv_interrupt = ETH(sdev)->data->dev_flags & 216 RTE_ETH_DEV_INTR_RMV; 217 if (rmv_interrupt) { 218 DEBUG("Enabling RMV interrupts for sub_device %d", i); 219 dev->data->dev_conf.intr_conf.rmv = 1; 220 } else { 221 DEBUG("sub_device %d does not support RMV event", i); 222 } 223 lsc_enabled = dev->data->dev_conf.intr_conf.lsc; 224 lsc_interrupt = lsc_enabled && 225 (ETH(sdev)->data->dev_flags & 226 RTE_ETH_DEV_INTR_LSC); 227 if (lsc_interrupt) { 228 DEBUG("Enabling LSC interrupts for sub_device %d", i); 229 dev->data->dev_conf.intr_conf.lsc = 1; 230 } else if (lsc_enabled && !lsc_interrupt) { 231 DEBUG("Disabling LSC interrupts for sub_device %d", i); 232 dev->data->dev_conf.intr_conf.lsc = 0; 233 } 234 DEBUG("Configuring sub-device %d", i); 235 sdev->remove = 0; 236 ret = rte_eth_dev_configure(PORT_ID(sdev), 237 dev->data->nb_rx_queues, 238 dev->data->nb_tx_queues, 239 &dev->data->dev_conf); 240 if (ret) { 241 ERROR("Could not configure sub_device %d", i); 242 return ret; 243 } 244 if (rmv_interrupt) { 245 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 246 RTE_ETH_EVENT_INTR_RMV, 247 failsafe_eth_rmv_event_callback, 248 sdev); 249 if (ret) 250 WARN("Failed to register RMV callback for sub_device %d", 251 SUB_ID(sdev)); 252 } 253 dev->data->dev_conf.intr_conf.rmv = 0; 254 if (lsc_interrupt) { 255 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 256 RTE_ETH_EVENT_INTR_LSC, 257 failsafe_eth_lsc_event_callback, 258 dev); 259 if (ret) 260 WARN("Failed to register LSC callback for sub_device %d", 261 SUB_ID(sdev)); 262 } 263 dev->data->dev_conf.intr_conf.lsc = lsc_enabled; 264 sdev->state = DEV_ACTIVE; 265 } 266 if (PRIV(dev)->state < DEV_ACTIVE) 267 PRIV(dev)->state = DEV_ACTIVE; 268 return 0; 269 } 270 271 static int 272 fs_dev_start(struct rte_eth_dev *dev) 273 { 274 struct sub_device *sdev; 275 uint8_t i; 276 int ret; 277 278 FOREACH_SUBDEV(sdev, i, dev) { 279 if (sdev->state != DEV_ACTIVE) 280 continue; 281 DEBUG("Starting sub_device %d", i); 282 ret = rte_eth_dev_start(PORT_ID(sdev)); 283 if (ret) 284 return ret; 285 sdev->state = DEV_STARTED; 286 } 287 if (PRIV(dev)->state < DEV_STARTED) 288 PRIV(dev)->state = DEV_STARTED; 289 fs_switch_dev(dev, NULL); 290 return 0; 291 } 292 293 static void 294 fs_dev_stop(struct rte_eth_dev *dev) 295 { 296 struct sub_device *sdev; 297 uint8_t i; 298 299 PRIV(dev)->state = DEV_STARTED - 1; 300 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) { 301 rte_eth_dev_stop(PORT_ID(sdev)); 302 sdev->state = DEV_STARTED - 1; 303 } 304 } 305 306 static int 307 fs_dev_set_link_up(struct rte_eth_dev *dev) 308 { 309 struct sub_device *sdev; 310 uint8_t i; 311 int ret; 312 313 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 314 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i); 315 ret = rte_eth_dev_set_link_up(PORT_ID(sdev)); 316 if (ret) { 317 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d" 318 " with error %d", i, ret); 319 return ret; 320 } 321 } 322 return 0; 323 } 324 325 static int 326 fs_dev_set_link_down(struct rte_eth_dev *dev) 327 { 328 struct sub_device *sdev; 329 uint8_t i; 330 int ret; 331 332 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 333 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i); 334 ret = rte_eth_dev_set_link_down(PORT_ID(sdev)); 335 if (ret) { 336 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d" 337 " with error %d", i, ret); 338 return ret; 339 } 340 } 341 return 0; 342 } 343 344 static void fs_dev_free_queues(struct rte_eth_dev *dev); 345 static void 346 fs_dev_close(struct rte_eth_dev *dev) 347 { 348 struct sub_device *sdev; 349 uint8_t i; 350 351 failsafe_hotplug_alarm_cancel(dev); 352 if (PRIV(dev)->state == DEV_STARTED) 353 dev->dev_ops->dev_stop(dev); 354 PRIV(dev)->state = DEV_ACTIVE - 1; 355 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 356 DEBUG("Closing sub_device %d", i); 357 rte_eth_dev_close(PORT_ID(sdev)); 358 sdev->state = DEV_ACTIVE - 1; 359 } 360 fs_dev_free_queues(dev); 361 } 362 363 static void 364 fs_rx_queue_release(void *queue) 365 { 366 struct rte_eth_dev *dev; 367 struct sub_device *sdev; 368 uint8_t i; 369 struct rxq *rxq; 370 371 if (queue == NULL) 372 return; 373 rxq = queue; 374 dev = rxq->priv->dev; 375 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 376 SUBOPS(sdev, rx_queue_release) 377 (ETH(sdev)->data->rx_queues[rxq->qid]); 378 dev->data->rx_queues[rxq->qid] = NULL; 379 rte_free(rxq); 380 } 381 382 static int 383 fs_rx_queue_setup(struct rte_eth_dev *dev, 384 uint16_t rx_queue_id, 385 uint16_t nb_rx_desc, 386 unsigned int socket_id, 387 const struct rte_eth_rxconf *rx_conf, 388 struct rte_mempool *mb_pool) 389 { 390 struct sub_device *sdev; 391 struct rxq *rxq; 392 uint8_t i; 393 int ret; 394 395 rxq = dev->data->rx_queues[rx_queue_id]; 396 if (rxq != NULL) { 397 fs_rx_queue_release(rxq); 398 dev->data->rx_queues[rx_queue_id] = NULL; 399 } 400 rxq = rte_zmalloc(NULL, 401 sizeof(*rxq) + 402 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 403 RTE_CACHE_LINE_SIZE); 404 if (rxq == NULL) 405 return -ENOMEM; 406 FOREACH_SUBDEV(sdev, i, dev) 407 rte_atomic64_init(&rxq->refcnt[i]); 408 rxq->qid = rx_queue_id; 409 rxq->socket_id = socket_id; 410 rxq->info.mp = mb_pool; 411 rxq->info.conf = *rx_conf; 412 rxq->info.nb_desc = nb_rx_desc; 413 rxq->priv = PRIV(dev); 414 dev->data->rx_queues[rx_queue_id] = rxq; 415 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 416 ret = rte_eth_rx_queue_setup(PORT_ID(sdev), 417 rx_queue_id, 418 nb_rx_desc, socket_id, 419 rx_conf, mb_pool); 420 if (ret) { 421 ERROR("RX queue setup failed for sub_device %d", i); 422 goto free_rxq; 423 } 424 } 425 return 0; 426 free_rxq: 427 fs_rx_queue_release(rxq); 428 return ret; 429 } 430 431 static void 432 fs_tx_queue_release(void *queue) 433 { 434 struct rte_eth_dev *dev; 435 struct sub_device *sdev; 436 uint8_t i; 437 struct txq *txq; 438 439 if (queue == NULL) 440 return; 441 txq = queue; 442 dev = txq->priv->dev; 443 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 444 SUBOPS(sdev, tx_queue_release) 445 (ETH(sdev)->data->tx_queues[txq->qid]); 446 dev->data->tx_queues[txq->qid] = NULL; 447 rte_free(txq); 448 } 449 450 static int 451 fs_tx_queue_setup(struct rte_eth_dev *dev, 452 uint16_t tx_queue_id, 453 uint16_t nb_tx_desc, 454 unsigned int socket_id, 455 const struct rte_eth_txconf *tx_conf) 456 { 457 struct sub_device *sdev; 458 struct txq *txq; 459 uint8_t i; 460 int ret; 461 462 txq = dev->data->tx_queues[tx_queue_id]; 463 if (txq != NULL) { 464 fs_tx_queue_release(txq); 465 dev->data->tx_queues[tx_queue_id] = NULL; 466 } 467 txq = rte_zmalloc("ethdev TX queue", 468 sizeof(*txq) + 469 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 470 RTE_CACHE_LINE_SIZE); 471 if (txq == NULL) 472 return -ENOMEM; 473 FOREACH_SUBDEV(sdev, i, dev) 474 rte_atomic64_init(&txq->refcnt[i]); 475 txq->qid = tx_queue_id; 476 txq->socket_id = socket_id; 477 txq->info.conf = *tx_conf; 478 txq->info.nb_desc = nb_tx_desc; 479 txq->priv = PRIV(dev); 480 dev->data->tx_queues[tx_queue_id] = txq; 481 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 482 ret = rte_eth_tx_queue_setup(PORT_ID(sdev), 483 tx_queue_id, 484 nb_tx_desc, socket_id, 485 tx_conf); 486 if (ret) { 487 ERROR("TX queue setup failed for sub_device %d", i); 488 goto free_txq; 489 } 490 } 491 return 0; 492 free_txq: 493 fs_tx_queue_release(txq); 494 return ret; 495 } 496 497 static void 498 fs_dev_free_queues(struct rte_eth_dev *dev) 499 { 500 uint16_t i; 501 502 for (i = 0; i < dev->data->nb_rx_queues; i++) { 503 fs_rx_queue_release(dev->data->rx_queues[i]); 504 dev->data->rx_queues[i] = NULL; 505 } 506 dev->data->nb_rx_queues = 0; 507 for (i = 0; i < dev->data->nb_tx_queues; i++) { 508 fs_tx_queue_release(dev->data->tx_queues[i]); 509 dev->data->tx_queues[i] = NULL; 510 } 511 dev->data->nb_tx_queues = 0; 512 } 513 514 static void 515 fs_promiscuous_enable(struct rte_eth_dev *dev) 516 { 517 struct sub_device *sdev; 518 uint8_t i; 519 520 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 521 rte_eth_promiscuous_enable(PORT_ID(sdev)); 522 } 523 524 static void 525 fs_promiscuous_disable(struct rte_eth_dev *dev) 526 { 527 struct sub_device *sdev; 528 uint8_t i; 529 530 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 531 rte_eth_promiscuous_disable(PORT_ID(sdev)); 532 } 533 534 static void 535 fs_allmulticast_enable(struct rte_eth_dev *dev) 536 { 537 struct sub_device *sdev; 538 uint8_t i; 539 540 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 541 rte_eth_allmulticast_enable(PORT_ID(sdev)); 542 } 543 544 static void 545 fs_allmulticast_disable(struct rte_eth_dev *dev) 546 { 547 struct sub_device *sdev; 548 uint8_t i; 549 550 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 551 rte_eth_allmulticast_disable(PORT_ID(sdev)); 552 } 553 554 static int 555 fs_link_update(struct rte_eth_dev *dev, 556 int wait_to_complete) 557 { 558 struct sub_device *sdev; 559 uint8_t i; 560 int ret; 561 562 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 563 DEBUG("Calling link_update on sub_device %d", i); 564 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete); 565 if (ret && ret != -1) { 566 ERROR("Link update failed for sub_device %d with error %d", 567 i, ret); 568 return ret; 569 } 570 } 571 if (TX_SUBDEV(dev)) { 572 struct rte_eth_link *l1; 573 struct rte_eth_link *l2; 574 575 l1 = &dev->data->dev_link; 576 l2 = Ð(TX_SUBDEV(dev))->data->dev_link; 577 if (memcmp(l1, l2, sizeof(*l1))) { 578 *l1 = *l2; 579 return 0; 580 } 581 } 582 return -1; 583 } 584 585 static int 586 fs_stats_get(struct rte_eth_dev *dev, 587 struct rte_eth_stats *stats) 588 { 589 struct sub_device *sdev; 590 uint8_t i; 591 int ret; 592 593 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats)); 594 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 595 ret = rte_eth_stats_get(PORT_ID(sdev), &sdev->stats_snapshot); 596 if (ret) { 597 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d", 598 i, ret); 599 return ret; 600 } 601 failsafe_stats_increment(stats, &sdev->stats_snapshot); 602 } 603 return 0; 604 } 605 606 static void 607 fs_stats_reset(struct rte_eth_dev *dev) 608 { 609 struct sub_device *sdev; 610 uint8_t i; 611 612 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 613 rte_eth_stats_reset(PORT_ID(sdev)); 614 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats)); 615 } 616 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats)); 617 } 618 619 /** 620 * Fail-safe dev_infos_get rules: 621 * 622 * No sub_device: 623 * Numerables: 624 * Use the maximum possible values for any field, so as not 625 * to impede any further configuration effort. 626 * Capabilities: 627 * Limits capabilities to those that are understood by the 628 * fail-safe PMD. This understanding stems from the fail-safe 629 * being capable of verifying that the related capability is 630 * expressed within the device configuration (struct rte_eth_conf). 631 * 632 * At least one probed sub_device: 633 * Numerables: 634 * Uses values from the active probed sub_device 635 * The rationale here is that if any sub_device is less capable 636 * (for example concerning the number of queues) than the active 637 * sub_device, then its subsequent configuration will fail. 638 * It is impossible to foresee this failure when the failing sub_device 639 * is supposed to be plugged-in later on, so the configuration process 640 * is the single point of failure and error reporting. 641 * Capabilities: 642 * Uses a logical AND of RX capabilities among 643 * all sub_devices and the default capabilities. 644 * Uses a logical AND of TX capabilities among 645 * the active probed sub_device and the default capabilities. 646 * 647 */ 648 static void 649 fs_dev_infos_get(struct rte_eth_dev *dev, 650 struct rte_eth_dev_info *infos) 651 { 652 struct sub_device *sdev; 653 uint8_t i; 654 655 sdev = TX_SUBDEV(dev); 656 if (sdev == NULL) { 657 DEBUG("No probed device, using default infos"); 658 rte_memcpy(&PRIV(dev)->infos, &default_infos, 659 sizeof(default_infos)); 660 } else { 661 uint32_t rx_offload_capa; 662 663 rx_offload_capa = default_infos.rx_offload_capa; 664 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 665 rte_eth_dev_info_get(PORT_ID(sdev), 666 &PRIV(dev)->infos); 667 rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa; 668 } 669 sdev = TX_SUBDEV(dev); 670 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos); 671 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa; 672 PRIV(dev)->infos.tx_offload_capa &= 673 default_infos.tx_offload_capa; 674 PRIV(dev)->infos.flow_type_rss_offloads &= 675 default_infos.flow_type_rss_offloads; 676 } 677 rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos)); 678 } 679 680 static const uint32_t * 681 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev) 682 { 683 struct sub_device *sdev; 684 struct rte_eth_dev *edev; 685 686 sdev = TX_SUBDEV(dev); 687 if (sdev == NULL) 688 return NULL; 689 edev = ETH(sdev); 690 /* ENOTSUP: counts as no supported ptypes */ 691 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) 692 return NULL; 693 /* 694 * The API does not permit to do a clean AND of all ptypes, 695 * It is also incomplete by design and we do not really care 696 * to have a best possible value in this context. 697 * We just return the ptypes of the device of highest 698 * priority, usually the PREFERRED device. 699 */ 700 return SUBOPS(sdev, dev_supported_ptypes_get)(edev); 701 } 702 703 static int 704 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 705 { 706 struct sub_device *sdev; 707 uint8_t i; 708 int ret; 709 710 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 711 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i); 712 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu); 713 if (ret) { 714 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d", 715 i, ret); 716 return ret; 717 } 718 } 719 return 0; 720 } 721 722 static int 723 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 724 { 725 struct sub_device *sdev; 726 uint8_t i; 727 int ret; 728 729 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 730 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i); 731 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on); 732 if (ret) { 733 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d" 734 " with error %d", i, ret); 735 return ret; 736 } 737 } 738 return 0; 739 } 740 741 static int 742 fs_flow_ctrl_get(struct rte_eth_dev *dev, 743 struct rte_eth_fc_conf *fc_conf) 744 { 745 struct sub_device *sdev; 746 747 sdev = TX_SUBDEV(dev); 748 if (sdev == NULL) 749 return 0; 750 if (SUBOPS(sdev, flow_ctrl_get) == NULL) 751 return -ENOTSUP; 752 return SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf); 753 } 754 755 static int 756 fs_flow_ctrl_set(struct rte_eth_dev *dev, 757 struct rte_eth_fc_conf *fc_conf) 758 { 759 struct sub_device *sdev; 760 uint8_t i; 761 int ret; 762 763 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 764 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i); 765 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf); 766 if (ret) { 767 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d" 768 " with error %d", i, ret); 769 return ret; 770 } 771 } 772 return 0; 773 } 774 775 static void 776 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 777 { 778 struct sub_device *sdev; 779 uint8_t i; 780 781 /* No check: already done within the rte_eth_dev_mac_addr_remove 782 * call for the fail-safe device. 783 */ 784 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 785 rte_eth_dev_mac_addr_remove(PORT_ID(sdev), 786 &dev->data->mac_addrs[index]); 787 PRIV(dev)->mac_addr_pool[index] = 0; 788 } 789 790 static int 791 fs_mac_addr_add(struct rte_eth_dev *dev, 792 struct ether_addr *mac_addr, 793 uint32_t index, 794 uint32_t vmdq) 795 { 796 struct sub_device *sdev; 797 int ret; 798 uint8_t i; 799 800 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR); 801 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 802 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq); 803 if (ret) { 804 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %" 805 PRIu8 " with error %d", i, ret); 806 return ret; 807 } 808 } 809 if (index >= PRIV(dev)->nb_mac_addr) { 810 DEBUG("Growing mac_addrs array"); 811 PRIV(dev)->nb_mac_addr = index; 812 } 813 PRIV(dev)->mac_addr_pool[index] = vmdq; 814 return 0; 815 } 816 817 static void 818 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) 819 { 820 struct sub_device *sdev; 821 uint8_t i; 822 823 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 824 rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr); 825 } 826 827 static int 828 fs_filter_ctrl(struct rte_eth_dev *dev, 829 enum rte_filter_type type, 830 enum rte_filter_op op, 831 void *arg) 832 { 833 struct sub_device *sdev; 834 uint8_t i; 835 int ret; 836 837 if (type == RTE_ETH_FILTER_GENERIC && 838 op == RTE_ETH_FILTER_GET) { 839 *(const void **)arg = &fs_flow_ops; 840 return 0; 841 } 842 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 843 DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i); 844 ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg); 845 if (ret) { 846 ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d" 847 " with error %d", i, ret); 848 return ret; 849 } 850 } 851 return 0; 852 } 853 854 const struct eth_dev_ops failsafe_ops = { 855 .dev_configure = fs_dev_configure, 856 .dev_start = fs_dev_start, 857 .dev_stop = fs_dev_stop, 858 .dev_set_link_down = fs_dev_set_link_down, 859 .dev_set_link_up = fs_dev_set_link_up, 860 .dev_close = fs_dev_close, 861 .promiscuous_enable = fs_promiscuous_enable, 862 .promiscuous_disable = fs_promiscuous_disable, 863 .allmulticast_enable = fs_allmulticast_enable, 864 .allmulticast_disable = fs_allmulticast_disable, 865 .link_update = fs_link_update, 866 .stats_get = fs_stats_get, 867 .stats_reset = fs_stats_reset, 868 .dev_infos_get = fs_dev_infos_get, 869 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get, 870 .mtu_set = fs_mtu_set, 871 .vlan_filter_set = fs_vlan_filter_set, 872 .rx_queue_setup = fs_rx_queue_setup, 873 .tx_queue_setup = fs_tx_queue_setup, 874 .rx_queue_release = fs_rx_queue_release, 875 .tx_queue_release = fs_tx_queue_release, 876 .flow_ctrl_get = fs_flow_ctrl_get, 877 .flow_ctrl_set = fs_flow_ctrl_set, 878 .mac_addr_remove = fs_mac_addr_remove, 879 .mac_addr_add = fs_mac_addr_add, 880 .mac_addr_set = fs_mac_addr_set, 881 .filter_ctrl = fs_filter_ctrl, 882 }; 883