1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017 6WIND S.A. 3 * Copyright 2017 Mellanox Technologies, Ltd 4 */ 5 6 #include <stdbool.h> 7 #include <stdint.h> 8 #include <unistd.h> 9 10 #include <rte_debug.h> 11 #include <rte_atomic.h> 12 #include <rte_ethdev_driver.h> 13 #include <rte_malloc.h> 14 #include <rte_flow.h> 15 #include <rte_cycles.h> 16 #include <rte_ethdev.h> 17 18 #include "failsafe_private.h" 19 20 static int 21 fs_dev_configure(struct rte_eth_dev *dev) 22 { 23 struct sub_device *sdev; 24 uint8_t i; 25 int ret; 26 27 fs_lock(dev, 0); 28 FOREACH_SUBDEV(sdev, i, dev) { 29 int rmv_interrupt = 0; 30 int lsc_interrupt = 0; 31 int lsc_enabled; 32 33 if (sdev->state != DEV_PROBED && 34 !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE)) 35 continue; 36 37 rmv_interrupt = ETH(sdev)->data->dev_flags & 38 RTE_ETH_DEV_INTR_RMV; 39 if (rmv_interrupt) { 40 DEBUG("Enabling RMV interrupts for sub_device %d", i); 41 dev->data->dev_conf.intr_conf.rmv = 1; 42 } else { 43 DEBUG("sub_device %d does not support RMV event", i); 44 } 45 lsc_enabled = dev->data->dev_conf.intr_conf.lsc; 46 lsc_interrupt = lsc_enabled && 47 (ETH(sdev)->data->dev_flags & 48 RTE_ETH_DEV_INTR_LSC); 49 if (lsc_interrupt) { 50 DEBUG("Enabling LSC interrupts for sub_device %d", i); 51 dev->data->dev_conf.intr_conf.lsc = 1; 52 } else if (lsc_enabled && !lsc_interrupt) { 53 DEBUG("Disabling LSC interrupts for sub_device %d", i); 54 dev->data->dev_conf.intr_conf.lsc = 0; 55 } 56 DEBUG("Configuring sub-device %d", i); 57 ret = rte_eth_dev_configure(PORT_ID(sdev), 58 dev->data->nb_rx_queues, 59 dev->data->nb_tx_queues, 60 &dev->data->dev_conf); 61 if (ret) { 62 if (!fs_err(sdev, ret)) 63 continue; 64 ERROR("Could not configure sub_device %d", i); 65 fs_unlock(dev, 0); 66 return ret; 67 } 68 if (rmv_interrupt && sdev->rmv_callback == 0) { 69 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 70 RTE_ETH_EVENT_INTR_RMV, 71 failsafe_eth_rmv_event_callback, 72 sdev); 73 if (ret) 74 WARN("Failed to register RMV callback for sub_device %d", 75 SUB_ID(sdev)); 76 else 77 sdev->rmv_callback = 1; 78 } 79 dev->data->dev_conf.intr_conf.rmv = 0; 80 if (lsc_interrupt && sdev->lsc_callback == 0) { 81 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 82 RTE_ETH_EVENT_INTR_LSC, 83 failsafe_eth_lsc_event_callback, 84 dev); 85 if (ret) 86 WARN("Failed to register LSC callback for sub_device %d", 87 SUB_ID(sdev)); 88 else 89 sdev->lsc_callback = 1; 90 } 91 dev->data->dev_conf.intr_conf.lsc = lsc_enabled; 92 sdev->state = DEV_ACTIVE; 93 } 94 if (PRIV(dev)->state < DEV_ACTIVE) 95 PRIV(dev)->state = DEV_ACTIVE; 96 fs_unlock(dev, 0); 97 return 0; 98 } 99 100 static void 101 fs_set_queues_state_start(struct rte_eth_dev *dev) 102 { 103 struct rxq *rxq; 104 struct txq *txq; 105 uint16_t i; 106 107 for (i = 0; i < dev->data->nb_rx_queues; i++) { 108 rxq = dev->data->rx_queues[i]; 109 if (rxq != NULL && !rxq->info.conf.rx_deferred_start) 110 dev->data->rx_queue_state[i] = 111 RTE_ETH_QUEUE_STATE_STARTED; 112 } 113 for (i = 0; i < dev->data->nb_tx_queues; i++) { 114 txq = dev->data->tx_queues[i]; 115 if (txq != NULL && !txq->info.conf.tx_deferred_start) 116 dev->data->tx_queue_state[i] = 117 RTE_ETH_QUEUE_STATE_STARTED; 118 } 119 } 120 121 static int 122 fs_dev_start(struct rte_eth_dev *dev) 123 { 124 struct sub_device *sdev; 125 uint8_t i; 126 int ret; 127 128 fs_lock(dev, 0); 129 ret = failsafe_rx_intr_install(dev); 130 if (ret) { 131 fs_unlock(dev, 0); 132 return ret; 133 } 134 FOREACH_SUBDEV(sdev, i, dev) { 135 if (sdev->state != DEV_ACTIVE) 136 continue; 137 DEBUG("Starting sub_device %d", i); 138 ret = rte_eth_dev_start(PORT_ID(sdev)); 139 if (ret) { 140 if (!fs_err(sdev, ret)) 141 continue; 142 fs_unlock(dev, 0); 143 return ret; 144 } 145 ret = failsafe_rx_intr_install_subdevice(sdev); 146 if (ret) { 147 if (!fs_err(sdev, ret)) 148 continue; 149 rte_eth_dev_stop(PORT_ID(sdev)); 150 fs_unlock(dev, 0); 151 return ret; 152 } 153 sdev->state = DEV_STARTED; 154 } 155 if (PRIV(dev)->state < DEV_STARTED) { 156 PRIV(dev)->state = DEV_STARTED; 157 fs_set_queues_state_start(dev); 158 } 159 fs_switch_dev(dev, NULL); 160 fs_unlock(dev, 0); 161 return 0; 162 } 163 164 static void 165 fs_set_queues_state_stop(struct rte_eth_dev *dev) 166 { 167 uint16_t i; 168 169 for (i = 0; i < dev->data->nb_rx_queues; i++) 170 if (dev->data->rx_queues[i] != NULL) 171 dev->data->rx_queue_state[i] = 172 RTE_ETH_QUEUE_STATE_STOPPED; 173 for (i = 0; i < dev->data->nb_tx_queues; i++) 174 if (dev->data->tx_queues[i] != NULL) 175 dev->data->tx_queue_state[i] = 176 RTE_ETH_QUEUE_STATE_STOPPED; 177 } 178 179 static void 180 fs_dev_stop(struct rte_eth_dev *dev) 181 { 182 struct sub_device *sdev; 183 uint8_t i; 184 185 fs_lock(dev, 0); 186 PRIV(dev)->state = DEV_STARTED - 1; 187 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) { 188 rte_eth_dev_stop(PORT_ID(sdev)); 189 failsafe_rx_intr_uninstall_subdevice(sdev); 190 sdev->state = DEV_STARTED - 1; 191 } 192 failsafe_rx_intr_uninstall(dev); 193 fs_set_queues_state_stop(dev); 194 fs_unlock(dev, 0); 195 } 196 197 static int 198 fs_dev_set_link_up(struct rte_eth_dev *dev) 199 { 200 struct sub_device *sdev; 201 uint8_t i; 202 int ret; 203 204 fs_lock(dev, 0); 205 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 206 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i); 207 ret = rte_eth_dev_set_link_up(PORT_ID(sdev)); 208 if ((ret = fs_err(sdev, ret))) { 209 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d" 210 " with error %d", i, ret); 211 fs_unlock(dev, 0); 212 return ret; 213 } 214 } 215 fs_unlock(dev, 0); 216 return 0; 217 } 218 219 static int 220 fs_dev_set_link_down(struct rte_eth_dev *dev) 221 { 222 struct sub_device *sdev; 223 uint8_t i; 224 int ret; 225 226 fs_lock(dev, 0); 227 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 228 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i); 229 ret = rte_eth_dev_set_link_down(PORT_ID(sdev)); 230 if ((ret = fs_err(sdev, ret))) { 231 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d" 232 " with error %d", i, ret); 233 fs_unlock(dev, 0); 234 return ret; 235 } 236 } 237 fs_unlock(dev, 0); 238 return 0; 239 } 240 241 static void fs_dev_free_queues(struct rte_eth_dev *dev); 242 static void 243 fs_dev_close(struct rte_eth_dev *dev) 244 { 245 struct sub_device *sdev; 246 uint8_t i; 247 248 fs_lock(dev, 0); 249 failsafe_hotplug_alarm_cancel(dev); 250 if (PRIV(dev)->state == DEV_STARTED) 251 dev->dev_ops->dev_stop(dev); 252 PRIV(dev)->state = DEV_ACTIVE - 1; 253 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 254 DEBUG("Closing sub_device %d", i); 255 failsafe_eth_dev_unregister_callbacks(sdev); 256 rte_eth_dev_close(PORT_ID(sdev)); 257 sdev->state = DEV_ACTIVE - 1; 258 } 259 fs_dev_free_queues(dev); 260 fs_unlock(dev, 0); 261 } 262 263 static int 264 fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 265 { 266 struct sub_device *sdev; 267 uint8_t i; 268 int ret; 269 int err = 0; 270 bool failure = true; 271 272 fs_lock(dev, 0); 273 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 274 uint16_t port_id = ETH(sdev)->data->port_id; 275 276 ret = rte_eth_dev_rx_queue_stop(port_id, rx_queue_id); 277 ret = fs_err(sdev, ret); 278 if (ret) { 279 ERROR("Rx queue stop failed for subdevice %d", i); 280 err = ret; 281 } else { 282 failure = false; 283 } 284 } 285 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 286 fs_unlock(dev, 0); 287 /* Return 0 in case of at least one successful queue stop */ 288 return (failure) ? err : 0; 289 } 290 291 static int 292 fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 293 { 294 struct sub_device *sdev; 295 uint8_t i; 296 int ret; 297 298 fs_lock(dev, 0); 299 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 300 uint16_t port_id = ETH(sdev)->data->port_id; 301 302 ret = rte_eth_dev_rx_queue_start(port_id, rx_queue_id); 303 ret = fs_err(sdev, ret); 304 if (ret) { 305 ERROR("Rx queue start failed for subdevice %d", i); 306 fs_rx_queue_stop(dev, rx_queue_id); 307 fs_unlock(dev, 0); 308 return ret; 309 } 310 } 311 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 312 fs_unlock(dev, 0); 313 return 0; 314 } 315 316 static int 317 fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 318 { 319 struct sub_device *sdev; 320 uint8_t i; 321 int ret; 322 int err = 0; 323 bool failure = true; 324 325 fs_lock(dev, 0); 326 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 327 uint16_t port_id = ETH(sdev)->data->port_id; 328 329 ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id); 330 ret = fs_err(sdev, ret); 331 if (ret) { 332 ERROR("Tx queue stop failed for subdevice %d", i); 333 err = ret; 334 } else { 335 failure = false; 336 } 337 } 338 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 339 fs_unlock(dev, 0); 340 /* Return 0 in case of at least one successful queue stop */ 341 return (failure) ? err : 0; 342 } 343 344 static int 345 fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 346 { 347 struct sub_device *sdev; 348 uint8_t i; 349 int ret; 350 351 fs_lock(dev, 0); 352 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 353 uint16_t port_id = ETH(sdev)->data->port_id; 354 355 ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id); 356 ret = fs_err(sdev, ret); 357 if (ret) { 358 ERROR("Tx queue start failed for subdevice %d", i); 359 fs_tx_queue_stop(dev, tx_queue_id); 360 fs_unlock(dev, 0); 361 return ret; 362 } 363 } 364 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 365 fs_unlock(dev, 0); 366 return 0; 367 } 368 369 static void 370 fs_rx_queue_release(void *queue) 371 { 372 struct rte_eth_dev *dev; 373 struct sub_device *sdev; 374 uint8_t i; 375 struct rxq *rxq; 376 377 if (queue == NULL) 378 return; 379 rxq = queue; 380 dev = &rte_eth_devices[rxq->priv->data->port_id]; 381 fs_lock(dev, 0); 382 if (rxq->event_fd > 0) 383 close(rxq->event_fd); 384 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 385 if (ETH(sdev)->data->rx_queues != NULL && 386 ETH(sdev)->data->rx_queues[rxq->qid] != NULL) { 387 SUBOPS(sdev, rx_queue_release) 388 (ETH(sdev)->data->rx_queues[rxq->qid]); 389 } 390 } 391 dev->data->rx_queues[rxq->qid] = NULL; 392 rte_free(rxq); 393 fs_unlock(dev, 0); 394 } 395 396 static int 397 fs_rx_queue_setup(struct rte_eth_dev *dev, 398 uint16_t rx_queue_id, 399 uint16_t nb_rx_desc, 400 unsigned int socket_id, 401 const struct rte_eth_rxconf *rx_conf, 402 struct rte_mempool *mb_pool) 403 { 404 /* 405 * FIXME: Add a proper interface in rte_eal_interrupts for 406 * allocating eventfd as an interrupt vector. 407 * For the time being, fake as if we are using MSIX interrupts, 408 * this will cause rte_intr_efd_enable to allocate an eventfd for us. 409 */ 410 struct rte_intr_handle intr_handle = { 411 .type = RTE_INTR_HANDLE_VFIO_MSIX, 412 .efds = { -1, }, 413 }; 414 struct sub_device *sdev; 415 struct rxq *rxq; 416 uint8_t i; 417 int ret; 418 419 fs_lock(dev, 0); 420 if (rx_conf->rx_deferred_start) { 421 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 422 if (SUBOPS(sdev, rx_queue_start) == NULL) { 423 ERROR("Rx queue deferred start is not " 424 "supported for subdevice %d", i); 425 fs_unlock(dev, 0); 426 return -EINVAL; 427 } 428 } 429 } 430 rxq = dev->data->rx_queues[rx_queue_id]; 431 if (rxq != NULL) { 432 fs_rx_queue_release(rxq); 433 dev->data->rx_queues[rx_queue_id] = NULL; 434 } 435 rxq = rte_zmalloc(NULL, 436 sizeof(*rxq) + 437 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 438 RTE_CACHE_LINE_SIZE); 439 if (rxq == NULL) { 440 fs_unlock(dev, 0); 441 return -ENOMEM; 442 } 443 FOREACH_SUBDEV(sdev, i, dev) 444 rte_atomic64_init(&rxq->refcnt[i]); 445 rxq->qid = rx_queue_id; 446 rxq->socket_id = socket_id; 447 rxq->info.mp = mb_pool; 448 rxq->info.conf = *rx_conf; 449 rxq->info.nb_desc = nb_rx_desc; 450 rxq->priv = PRIV(dev); 451 rxq->sdev = PRIV(dev)->subs; 452 ret = rte_intr_efd_enable(&intr_handle, 1); 453 if (ret < 0) { 454 fs_unlock(dev, 0); 455 return ret; 456 } 457 rxq->event_fd = intr_handle.efds[0]; 458 dev->data->rx_queues[rx_queue_id] = rxq; 459 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 460 ret = rte_eth_rx_queue_setup(PORT_ID(sdev), 461 rx_queue_id, 462 nb_rx_desc, socket_id, 463 rx_conf, mb_pool); 464 if ((ret = fs_err(sdev, ret))) { 465 ERROR("RX queue setup failed for sub_device %d", i); 466 goto free_rxq; 467 } 468 } 469 fs_unlock(dev, 0); 470 return 0; 471 free_rxq: 472 fs_rx_queue_release(rxq); 473 fs_unlock(dev, 0); 474 return ret; 475 } 476 477 static int 478 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx) 479 { 480 struct rxq *rxq; 481 struct sub_device *sdev; 482 uint8_t i; 483 int ret; 484 int rc = 0; 485 486 fs_lock(dev, 0); 487 if (idx >= dev->data->nb_rx_queues) { 488 rc = -EINVAL; 489 goto unlock; 490 } 491 rxq = dev->data->rx_queues[idx]; 492 if (rxq == NULL || rxq->event_fd <= 0) { 493 rc = -EINVAL; 494 goto unlock; 495 } 496 /* Fail if proxy service is nor running. */ 497 if (PRIV(dev)->rxp.sstate != SS_RUNNING) { 498 ERROR("failsafe interrupt services are not running"); 499 rc = -EAGAIN; 500 goto unlock; 501 } 502 rxq->enable_events = 1; 503 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 504 ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx); 505 ret = fs_err(sdev, ret); 506 if (ret) 507 rc = ret; 508 } 509 unlock: 510 fs_unlock(dev, 0); 511 if (rc) 512 rte_errno = -rc; 513 return rc; 514 } 515 516 static int 517 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx) 518 { 519 struct rxq *rxq; 520 struct sub_device *sdev; 521 uint64_t u64; 522 uint8_t i; 523 int rc = 0; 524 int ret; 525 526 fs_lock(dev, 0); 527 if (idx >= dev->data->nb_rx_queues) { 528 rc = -EINVAL; 529 goto unlock; 530 } 531 rxq = dev->data->rx_queues[idx]; 532 if (rxq == NULL || rxq->event_fd <= 0) { 533 rc = -EINVAL; 534 goto unlock; 535 } 536 rxq->enable_events = 0; 537 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 538 ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx); 539 ret = fs_err(sdev, ret); 540 if (ret) 541 rc = ret; 542 } 543 /* Clear pending events */ 544 while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0) 545 ; 546 unlock: 547 fs_unlock(dev, 0); 548 if (rc) 549 rte_errno = -rc; 550 return rc; 551 } 552 553 static void 554 fs_tx_queue_release(void *queue) 555 { 556 struct rte_eth_dev *dev; 557 struct sub_device *sdev; 558 uint8_t i; 559 struct txq *txq; 560 561 if (queue == NULL) 562 return; 563 txq = queue; 564 dev = &rte_eth_devices[txq->priv->data->port_id]; 565 fs_lock(dev, 0); 566 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 567 if (ETH(sdev)->data->tx_queues != NULL && 568 ETH(sdev)->data->tx_queues[txq->qid] != NULL) { 569 SUBOPS(sdev, tx_queue_release) 570 (ETH(sdev)->data->tx_queues[txq->qid]); 571 } 572 } 573 dev->data->tx_queues[txq->qid] = NULL; 574 rte_free(txq); 575 fs_unlock(dev, 0); 576 } 577 578 static int 579 fs_tx_queue_setup(struct rte_eth_dev *dev, 580 uint16_t tx_queue_id, 581 uint16_t nb_tx_desc, 582 unsigned int socket_id, 583 const struct rte_eth_txconf *tx_conf) 584 { 585 struct sub_device *sdev; 586 struct txq *txq; 587 uint8_t i; 588 int ret; 589 590 fs_lock(dev, 0); 591 if (tx_conf->tx_deferred_start) { 592 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 593 if (SUBOPS(sdev, tx_queue_start) == NULL) { 594 ERROR("Tx queue deferred start is not " 595 "supported for subdevice %d", i); 596 fs_unlock(dev, 0); 597 return -EINVAL; 598 } 599 } 600 } 601 txq = dev->data->tx_queues[tx_queue_id]; 602 if (txq != NULL) { 603 fs_tx_queue_release(txq); 604 dev->data->tx_queues[tx_queue_id] = NULL; 605 } 606 txq = rte_zmalloc("ethdev TX queue", 607 sizeof(*txq) + 608 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 609 RTE_CACHE_LINE_SIZE); 610 if (txq == NULL) { 611 fs_unlock(dev, 0); 612 return -ENOMEM; 613 } 614 FOREACH_SUBDEV(sdev, i, dev) 615 rte_atomic64_init(&txq->refcnt[i]); 616 txq->qid = tx_queue_id; 617 txq->socket_id = socket_id; 618 txq->info.conf = *tx_conf; 619 txq->info.nb_desc = nb_tx_desc; 620 txq->priv = PRIV(dev); 621 dev->data->tx_queues[tx_queue_id] = txq; 622 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 623 ret = rte_eth_tx_queue_setup(PORT_ID(sdev), 624 tx_queue_id, 625 nb_tx_desc, socket_id, 626 tx_conf); 627 if ((ret = fs_err(sdev, ret))) { 628 ERROR("TX queue setup failed for sub_device %d", i); 629 goto free_txq; 630 } 631 } 632 fs_unlock(dev, 0); 633 return 0; 634 free_txq: 635 fs_tx_queue_release(txq); 636 fs_unlock(dev, 0); 637 return ret; 638 } 639 640 static void 641 fs_dev_free_queues(struct rte_eth_dev *dev) 642 { 643 uint16_t i; 644 645 for (i = 0; i < dev->data->nb_rx_queues; i++) { 646 fs_rx_queue_release(dev->data->rx_queues[i]); 647 dev->data->rx_queues[i] = NULL; 648 } 649 dev->data->nb_rx_queues = 0; 650 for (i = 0; i < dev->data->nb_tx_queues; i++) { 651 fs_tx_queue_release(dev->data->tx_queues[i]); 652 dev->data->tx_queues[i] = NULL; 653 } 654 dev->data->nb_tx_queues = 0; 655 } 656 657 static void 658 fs_promiscuous_enable(struct rte_eth_dev *dev) 659 { 660 struct sub_device *sdev; 661 uint8_t i; 662 663 fs_lock(dev, 0); 664 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 665 rte_eth_promiscuous_enable(PORT_ID(sdev)); 666 fs_unlock(dev, 0); 667 } 668 669 static void 670 fs_promiscuous_disable(struct rte_eth_dev *dev) 671 { 672 struct sub_device *sdev; 673 uint8_t i; 674 675 fs_lock(dev, 0); 676 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 677 rte_eth_promiscuous_disable(PORT_ID(sdev)); 678 fs_unlock(dev, 0); 679 } 680 681 static void 682 fs_allmulticast_enable(struct rte_eth_dev *dev) 683 { 684 struct sub_device *sdev; 685 uint8_t i; 686 687 fs_lock(dev, 0); 688 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 689 rte_eth_allmulticast_enable(PORT_ID(sdev)); 690 fs_unlock(dev, 0); 691 } 692 693 static void 694 fs_allmulticast_disable(struct rte_eth_dev *dev) 695 { 696 struct sub_device *sdev; 697 uint8_t i; 698 699 fs_lock(dev, 0); 700 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 701 rte_eth_allmulticast_disable(PORT_ID(sdev)); 702 fs_unlock(dev, 0); 703 } 704 705 static int 706 fs_link_update(struct rte_eth_dev *dev, 707 int wait_to_complete) 708 { 709 struct sub_device *sdev; 710 uint8_t i; 711 int ret; 712 713 fs_lock(dev, 0); 714 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 715 DEBUG("Calling link_update on sub_device %d", i); 716 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete); 717 if (ret && ret != -1 && sdev->remove == 0 && 718 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) { 719 ERROR("Link update failed for sub_device %d with error %d", 720 i, ret); 721 fs_unlock(dev, 0); 722 return ret; 723 } 724 } 725 if (TX_SUBDEV(dev)) { 726 struct rte_eth_link *l1; 727 struct rte_eth_link *l2; 728 729 l1 = &dev->data->dev_link; 730 l2 = Ð(TX_SUBDEV(dev))->data->dev_link; 731 if (memcmp(l1, l2, sizeof(*l1))) { 732 *l1 = *l2; 733 fs_unlock(dev, 0); 734 return 0; 735 } 736 } 737 fs_unlock(dev, 0); 738 return -1; 739 } 740 741 static int 742 fs_stats_get(struct rte_eth_dev *dev, 743 struct rte_eth_stats *stats) 744 { 745 struct rte_eth_stats backup; 746 struct sub_device *sdev; 747 uint8_t i; 748 int ret; 749 750 fs_lock(dev, 0); 751 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats)); 752 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 753 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats; 754 uint64_t *timestamp = &sdev->stats_snapshot.timestamp; 755 756 rte_memcpy(&backup, snapshot, sizeof(backup)); 757 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot); 758 if (ret) { 759 if (!fs_err(sdev, ret)) { 760 rte_memcpy(snapshot, &backup, sizeof(backup)); 761 goto inc; 762 } 763 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d", 764 i, ret); 765 *timestamp = 0; 766 fs_unlock(dev, 0); 767 return ret; 768 } 769 *timestamp = rte_rdtsc(); 770 inc: 771 failsafe_stats_increment(stats, snapshot); 772 } 773 fs_unlock(dev, 0); 774 return 0; 775 } 776 777 static void 778 fs_stats_reset(struct rte_eth_dev *dev) 779 { 780 struct sub_device *sdev; 781 uint8_t i; 782 783 fs_lock(dev, 0); 784 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 785 rte_eth_stats_reset(PORT_ID(sdev)); 786 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats)); 787 } 788 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats)); 789 fs_unlock(dev, 0); 790 } 791 792 static void 793 fs_dev_merge_desc_lim(struct rte_eth_desc_lim *to, 794 const struct rte_eth_desc_lim *from) 795 { 796 to->nb_max = RTE_MIN(to->nb_max, from->nb_max); 797 to->nb_min = RTE_MAX(to->nb_min, from->nb_min); 798 to->nb_align = RTE_MAX(to->nb_align, from->nb_align); 799 800 to->nb_seg_max = RTE_MIN(to->nb_seg_max, from->nb_seg_max); 801 to->nb_mtu_seg_max = RTE_MIN(to->nb_mtu_seg_max, from->nb_mtu_seg_max); 802 } 803 804 /* 805 * Merge the information from sub-devices. 806 * 807 * The reported values must be the common subset of all sub devices 808 */ 809 static void 810 fs_dev_merge_info(struct rte_eth_dev_info *info, 811 const struct rte_eth_dev_info *sinfo) 812 { 813 info->max_rx_pktlen = RTE_MIN(info->max_rx_pktlen, sinfo->max_rx_pktlen); 814 info->max_rx_queues = RTE_MIN(info->max_rx_queues, sinfo->max_rx_queues); 815 info->max_tx_queues = RTE_MIN(info->max_tx_queues, sinfo->max_tx_queues); 816 info->max_mac_addrs = RTE_MIN(info->max_mac_addrs, sinfo->max_mac_addrs); 817 info->max_hash_mac_addrs = RTE_MIN(info->max_hash_mac_addrs, 818 sinfo->max_hash_mac_addrs); 819 info->max_vmdq_pools = RTE_MIN(info->max_vmdq_pools, sinfo->max_vmdq_pools); 820 info->max_vfs = RTE_MIN(info->max_vfs, sinfo->max_vfs); 821 822 fs_dev_merge_desc_lim(&info->rx_desc_lim, &sinfo->rx_desc_lim); 823 fs_dev_merge_desc_lim(&info->tx_desc_lim, &sinfo->tx_desc_lim); 824 825 info->rx_offload_capa &= sinfo->rx_offload_capa; 826 info->tx_offload_capa &= sinfo->tx_offload_capa; 827 info->rx_queue_offload_capa &= sinfo->rx_queue_offload_capa; 828 info->tx_queue_offload_capa &= sinfo->tx_queue_offload_capa; 829 info->flow_type_rss_offloads &= sinfo->flow_type_rss_offloads; 830 } 831 832 /** 833 * Fail-safe dev_infos_get rules: 834 * 835 * No sub_device: 836 * Numerables: 837 * Use the maximum possible values for any field, so as not 838 * to impede any further configuration effort. 839 * Capabilities: 840 * Limits capabilities to those that are understood by the 841 * fail-safe PMD. This understanding stems from the fail-safe 842 * being capable of verifying that the related capability is 843 * expressed within the device configuration (struct rte_eth_conf). 844 * 845 * At least one probed sub_device: 846 * Numerables: 847 * Uses values from the active probed sub_device 848 * The rationale here is that if any sub_device is less capable 849 * (for example concerning the number of queues) than the active 850 * sub_device, then its subsequent configuration will fail. 851 * It is impossible to foresee this failure when the failing sub_device 852 * is supposed to be plugged-in later on, so the configuration process 853 * is the single point of failure and error reporting. 854 * Capabilities: 855 * Uses a logical AND of RX capabilities among 856 * all sub_devices and the default capabilities. 857 * Uses a logical AND of TX capabilities among 858 * the active probed sub_device and the default capabilities. 859 * Uses a logical AND of device capabilities among 860 * all sub_devices and the default capabilities. 861 * 862 */ 863 static int 864 fs_dev_infos_get(struct rte_eth_dev *dev, 865 struct rte_eth_dev_info *infos) 866 { 867 struct sub_device *sdev; 868 uint8_t i; 869 int ret; 870 871 /* Use maximum upper bounds by default */ 872 infos->max_rx_pktlen = UINT32_MAX; 873 infos->max_rx_queues = RTE_MAX_QUEUES_PER_PORT; 874 infos->max_tx_queues = RTE_MAX_QUEUES_PER_PORT; 875 infos->max_mac_addrs = FAILSAFE_MAX_ETHADDR; 876 infos->max_hash_mac_addrs = UINT32_MAX; 877 infos->max_vfs = UINT16_MAX; 878 infos->max_vmdq_pools = UINT16_MAX; 879 880 /* 881 * Set of capabilities that can be verified upon 882 * configuring a sub-device. 883 */ 884 infos->rx_offload_capa = 885 DEV_RX_OFFLOAD_VLAN_STRIP | 886 DEV_RX_OFFLOAD_IPV4_CKSUM | 887 DEV_RX_OFFLOAD_UDP_CKSUM | 888 DEV_RX_OFFLOAD_TCP_CKSUM | 889 DEV_RX_OFFLOAD_TCP_LRO | 890 DEV_RX_OFFLOAD_QINQ_STRIP | 891 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 892 DEV_RX_OFFLOAD_MACSEC_STRIP | 893 DEV_RX_OFFLOAD_HEADER_SPLIT | 894 DEV_RX_OFFLOAD_VLAN_FILTER | 895 DEV_RX_OFFLOAD_VLAN_EXTEND | 896 DEV_RX_OFFLOAD_JUMBO_FRAME | 897 DEV_RX_OFFLOAD_SCATTER | 898 DEV_RX_OFFLOAD_TIMESTAMP | 899 DEV_RX_OFFLOAD_SECURITY; 900 901 infos->rx_queue_offload_capa = 902 DEV_RX_OFFLOAD_VLAN_STRIP | 903 DEV_RX_OFFLOAD_IPV4_CKSUM | 904 DEV_RX_OFFLOAD_UDP_CKSUM | 905 DEV_RX_OFFLOAD_TCP_CKSUM | 906 DEV_RX_OFFLOAD_TCP_LRO | 907 DEV_RX_OFFLOAD_QINQ_STRIP | 908 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 909 DEV_RX_OFFLOAD_MACSEC_STRIP | 910 DEV_RX_OFFLOAD_HEADER_SPLIT | 911 DEV_RX_OFFLOAD_VLAN_FILTER | 912 DEV_RX_OFFLOAD_VLAN_EXTEND | 913 DEV_RX_OFFLOAD_JUMBO_FRAME | 914 DEV_RX_OFFLOAD_SCATTER | 915 DEV_RX_OFFLOAD_TIMESTAMP | 916 DEV_RX_OFFLOAD_SECURITY; 917 918 infos->tx_offload_capa = 919 DEV_TX_OFFLOAD_MULTI_SEGS | 920 DEV_TX_OFFLOAD_MBUF_FAST_FREE | 921 DEV_TX_OFFLOAD_IPV4_CKSUM | 922 DEV_TX_OFFLOAD_UDP_CKSUM | 923 DEV_TX_OFFLOAD_TCP_CKSUM | 924 DEV_TX_OFFLOAD_TCP_TSO; 925 926 infos->flow_type_rss_offloads = 927 ETH_RSS_IP | 928 ETH_RSS_UDP | 929 ETH_RSS_TCP; 930 infos->dev_capa = 931 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 932 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 933 934 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 935 struct rte_eth_dev_info sub_info; 936 937 ret = rte_eth_dev_info_get(PORT_ID(sdev), &sub_info); 938 ret = fs_err(sdev, ret); 939 if (ret != 0) 940 return ret; 941 942 fs_dev_merge_info(infos, &sub_info); 943 } 944 945 return 0; 946 } 947 948 static const uint32_t * 949 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev) 950 { 951 struct sub_device *sdev; 952 struct rte_eth_dev *edev; 953 const uint32_t *ret; 954 955 fs_lock(dev, 0); 956 sdev = TX_SUBDEV(dev); 957 if (sdev == NULL) { 958 ret = NULL; 959 goto unlock; 960 } 961 edev = ETH(sdev); 962 /* ENOTSUP: counts as no supported ptypes */ 963 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) { 964 ret = NULL; 965 goto unlock; 966 } 967 /* 968 * The API does not permit to do a clean AND of all ptypes, 969 * It is also incomplete by design and we do not really care 970 * to have a best possible value in this context. 971 * We just return the ptypes of the device of highest 972 * priority, usually the PREFERRED device. 973 */ 974 ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev); 975 unlock: 976 fs_unlock(dev, 0); 977 return ret; 978 } 979 980 static int 981 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 982 { 983 struct sub_device *sdev; 984 uint8_t i; 985 int ret; 986 987 fs_lock(dev, 0); 988 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 989 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i); 990 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu); 991 if ((ret = fs_err(sdev, ret))) { 992 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d", 993 i, ret); 994 fs_unlock(dev, 0); 995 return ret; 996 } 997 } 998 fs_unlock(dev, 0); 999 return 0; 1000 } 1001 1002 static int 1003 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1004 { 1005 struct sub_device *sdev; 1006 uint8_t i; 1007 int ret; 1008 1009 fs_lock(dev, 0); 1010 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1011 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i); 1012 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on); 1013 if ((ret = fs_err(sdev, ret))) { 1014 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d" 1015 " with error %d", i, ret); 1016 fs_unlock(dev, 0); 1017 return ret; 1018 } 1019 } 1020 fs_unlock(dev, 0); 1021 return 0; 1022 } 1023 1024 static int 1025 fs_flow_ctrl_get(struct rte_eth_dev *dev, 1026 struct rte_eth_fc_conf *fc_conf) 1027 { 1028 struct sub_device *sdev; 1029 int ret; 1030 1031 fs_lock(dev, 0); 1032 sdev = TX_SUBDEV(dev); 1033 if (sdev == NULL) { 1034 ret = 0; 1035 goto unlock; 1036 } 1037 if (SUBOPS(sdev, flow_ctrl_get) == NULL) { 1038 ret = -ENOTSUP; 1039 goto unlock; 1040 } 1041 ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf); 1042 unlock: 1043 fs_unlock(dev, 0); 1044 return ret; 1045 } 1046 1047 static int 1048 fs_flow_ctrl_set(struct rte_eth_dev *dev, 1049 struct rte_eth_fc_conf *fc_conf) 1050 { 1051 struct sub_device *sdev; 1052 uint8_t i; 1053 int ret; 1054 1055 fs_lock(dev, 0); 1056 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1057 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i); 1058 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf); 1059 if ((ret = fs_err(sdev, ret))) { 1060 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d" 1061 " with error %d", i, ret); 1062 fs_unlock(dev, 0); 1063 return ret; 1064 } 1065 } 1066 fs_unlock(dev, 0); 1067 return 0; 1068 } 1069 1070 static void 1071 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 1072 { 1073 struct sub_device *sdev; 1074 uint8_t i; 1075 1076 fs_lock(dev, 0); 1077 /* No check: already done within the rte_eth_dev_mac_addr_remove 1078 * call for the fail-safe device. 1079 */ 1080 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 1081 rte_eth_dev_mac_addr_remove(PORT_ID(sdev), 1082 &dev->data->mac_addrs[index]); 1083 PRIV(dev)->mac_addr_pool[index] = 0; 1084 fs_unlock(dev, 0); 1085 } 1086 1087 static int 1088 fs_mac_addr_add(struct rte_eth_dev *dev, 1089 struct rte_ether_addr *mac_addr, 1090 uint32_t index, 1091 uint32_t vmdq) 1092 { 1093 struct sub_device *sdev; 1094 int ret; 1095 uint8_t i; 1096 1097 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR); 1098 fs_lock(dev, 0); 1099 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1100 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq); 1101 if ((ret = fs_err(sdev, ret))) { 1102 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %" 1103 PRIu8 " with error %d", i, ret); 1104 fs_unlock(dev, 0); 1105 return ret; 1106 } 1107 } 1108 if (index >= PRIV(dev)->nb_mac_addr) { 1109 DEBUG("Growing mac_addrs array"); 1110 PRIV(dev)->nb_mac_addr = index; 1111 } 1112 PRIV(dev)->mac_addr_pool[index] = vmdq; 1113 fs_unlock(dev, 0); 1114 return 0; 1115 } 1116 1117 static int 1118 fs_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 1119 { 1120 struct sub_device *sdev; 1121 uint8_t i; 1122 int ret; 1123 1124 fs_lock(dev, 0); 1125 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1126 ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr); 1127 ret = fs_err(sdev, ret); 1128 if (ret) { 1129 ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d", 1130 i, ret); 1131 fs_unlock(dev, 0); 1132 return ret; 1133 } 1134 } 1135 fs_unlock(dev, 0); 1136 1137 return 0; 1138 } 1139 1140 static int 1141 fs_set_mc_addr_list(struct rte_eth_dev *dev, 1142 struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) 1143 { 1144 struct sub_device *sdev; 1145 uint8_t i; 1146 int ret; 1147 void *mcast_addrs; 1148 1149 fs_lock(dev, 0); 1150 1151 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1152 ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1153 mc_addr_set, nb_mc_addr); 1154 if (ret != 0) { 1155 ERROR("Operation rte_eth_dev_set_mc_addr_list failed for sub_device %d with error %d", 1156 i, ret); 1157 goto rollback; 1158 } 1159 } 1160 1161 mcast_addrs = rte_realloc(PRIV(dev)->mcast_addrs, 1162 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]), 0); 1163 if (mcast_addrs == NULL && nb_mc_addr > 0) { 1164 ret = -ENOMEM; 1165 goto rollback; 1166 } 1167 rte_memcpy(mcast_addrs, mc_addr_set, 1168 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0])); 1169 PRIV(dev)->nb_mcast_addr = nb_mc_addr; 1170 PRIV(dev)->mcast_addrs = mcast_addrs; 1171 1172 fs_unlock(dev, 0); 1173 return 0; 1174 1175 rollback: 1176 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1177 int rc = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1178 PRIV(dev)->mcast_addrs, PRIV(dev)->nb_mcast_addr); 1179 if (rc != 0) { 1180 ERROR("Multicast MAC address list rollback for sub_device %d failed with error %d", 1181 i, rc); 1182 } 1183 } 1184 1185 fs_unlock(dev, 0); 1186 return ret; 1187 } 1188 1189 static int 1190 fs_rss_hash_update(struct rte_eth_dev *dev, 1191 struct rte_eth_rss_conf *rss_conf) 1192 { 1193 struct sub_device *sdev; 1194 uint8_t i; 1195 int ret; 1196 1197 fs_lock(dev, 0); 1198 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1199 ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf); 1200 ret = fs_err(sdev, ret); 1201 if (ret) { 1202 ERROR("Operation rte_eth_dev_rss_hash_update" 1203 " failed for sub_device %d with error %d", 1204 i, ret); 1205 fs_unlock(dev, 0); 1206 return ret; 1207 } 1208 } 1209 fs_unlock(dev, 0); 1210 1211 return 0; 1212 } 1213 1214 static int 1215 fs_filter_ctrl(struct rte_eth_dev *dev __rte_unused, 1216 enum rte_filter_type type, 1217 enum rte_filter_op op, 1218 void *arg) 1219 { 1220 if (type == RTE_ETH_FILTER_GENERIC && 1221 op == RTE_ETH_FILTER_GET) { 1222 *(const void **)arg = &fs_flow_ops; 1223 return 0; 1224 } 1225 return -ENOTSUP; 1226 } 1227 1228 const struct eth_dev_ops failsafe_ops = { 1229 .dev_configure = fs_dev_configure, 1230 .dev_start = fs_dev_start, 1231 .dev_stop = fs_dev_stop, 1232 .dev_set_link_down = fs_dev_set_link_down, 1233 .dev_set_link_up = fs_dev_set_link_up, 1234 .dev_close = fs_dev_close, 1235 .promiscuous_enable = fs_promiscuous_enable, 1236 .promiscuous_disable = fs_promiscuous_disable, 1237 .allmulticast_enable = fs_allmulticast_enable, 1238 .allmulticast_disable = fs_allmulticast_disable, 1239 .link_update = fs_link_update, 1240 .stats_get = fs_stats_get, 1241 .stats_reset = fs_stats_reset, 1242 .dev_infos_get = fs_dev_infos_get, 1243 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get, 1244 .mtu_set = fs_mtu_set, 1245 .vlan_filter_set = fs_vlan_filter_set, 1246 .rx_queue_start = fs_rx_queue_start, 1247 .rx_queue_stop = fs_rx_queue_stop, 1248 .tx_queue_start = fs_tx_queue_start, 1249 .tx_queue_stop = fs_tx_queue_stop, 1250 .rx_queue_setup = fs_rx_queue_setup, 1251 .tx_queue_setup = fs_tx_queue_setup, 1252 .rx_queue_release = fs_rx_queue_release, 1253 .tx_queue_release = fs_tx_queue_release, 1254 .rx_queue_intr_enable = fs_rx_intr_enable, 1255 .rx_queue_intr_disable = fs_rx_intr_disable, 1256 .flow_ctrl_get = fs_flow_ctrl_get, 1257 .flow_ctrl_set = fs_flow_ctrl_set, 1258 .mac_addr_remove = fs_mac_addr_remove, 1259 .mac_addr_add = fs_mac_addr_add, 1260 .mac_addr_set = fs_mac_addr_set, 1261 .set_mc_addr_list = fs_set_mc_addr_list, 1262 .rss_hash_update = fs_rss_hash_update, 1263 .filter_ctrl = fs_filter_ctrl, 1264 }; 1265