1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017 6WIND S.A. 3 * Copyright 2017 Mellanox Technologies, Ltd 4 */ 5 6 #include <stdbool.h> 7 #include <stdint.h> 8 #include <unistd.h> 9 10 #include <rte_debug.h> 11 #include <rte_atomic.h> 12 #include <rte_ethdev_driver.h> 13 #include <rte_malloc.h> 14 #include <rte_flow.h> 15 #include <rte_cycles.h> 16 #include <rte_ethdev.h> 17 18 #include "failsafe_private.h" 19 20 static int 21 fs_dev_configure(struct rte_eth_dev *dev) 22 { 23 struct sub_device *sdev; 24 uint8_t i; 25 int ret; 26 27 fs_lock(dev, 0); 28 FOREACH_SUBDEV(sdev, i, dev) { 29 int rmv_interrupt = 0; 30 int lsc_interrupt = 0; 31 int lsc_enabled; 32 33 if (sdev->state != DEV_PROBED && 34 !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE)) 35 continue; 36 37 rmv_interrupt = ETH(sdev)->data->dev_flags & 38 RTE_ETH_DEV_INTR_RMV; 39 if (rmv_interrupt) { 40 DEBUG("Enabling RMV interrupts for sub_device %d", i); 41 dev->data->dev_conf.intr_conf.rmv = 1; 42 } else { 43 DEBUG("sub_device %d does not support RMV event", i); 44 } 45 lsc_enabled = dev->data->dev_conf.intr_conf.lsc; 46 lsc_interrupt = lsc_enabled && 47 (ETH(sdev)->data->dev_flags & 48 RTE_ETH_DEV_INTR_LSC); 49 if (lsc_interrupt) { 50 DEBUG("Enabling LSC interrupts for sub_device %d", i); 51 dev->data->dev_conf.intr_conf.lsc = 1; 52 } else if (lsc_enabled && !lsc_interrupt) { 53 DEBUG("Disabling LSC interrupts for sub_device %d", i); 54 dev->data->dev_conf.intr_conf.lsc = 0; 55 } 56 DEBUG("Configuring sub-device %d", i); 57 ret = rte_eth_dev_configure(PORT_ID(sdev), 58 dev->data->nb_rx_queues, 59 dev->data->nb_tx_queues, 60 &dev->data->dev_conf); 61 if (ret) { 62 if (!fs_err(sdev, ret)) 63 continue; 64 ERROR("Could not configure sub_device %d", i); 65 fs_unlock(dev, 0); 66 return ret; 67 } 68 if (rmv_interrupt && sdev->rmv_callback == 0) { 69 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 70 RTE_ETH_EVENT_INTR_RMV, 71 failsafe_eth_rmv_event_callback, 72 sdev); 73 if (ret) 74 WARN("Failed to register RMV callback for sub_device %d", 75 SUB_ID(sdev)); 76 else 77 sdev->rmv_callback = 1; 78 } 79 dev->data->dev_conf.intr_conf.rmv = 0; 80 if (lsc_interrupt && sdev->lsc_callback == 0) { 81 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 82 RTE_ETH_EVENT_INTR_LSC, 83 failsafe_eth_lsc_event_callback, 84 dev); 85 if (ret) 86 WARN("Failed to register LSC callback for sub_device %d", 87 SUB_ID(sdev)); 88 else 89 sdev->lsc_callback = 1; 90 } 91 dev->data->dev_conf.intr_conf.lsc = lsc_enabled; 92 sdev->state = DEV_ACTIVE; 93 } 94 if (PRIV(dev)->state < DEV_ACTIVE) 95 PRIV(dev)->state = DEV_ACTIVE; 96 fs_unlock(dev, 0); 97 return 0; 98 } 99 100 static void 101 fs_set_queues_state_start(struct rte_eth_dev *dev) 102 { 103 struct rxq *rxq; 104 struct txq *txq; 105 uint16_t i; 106 107 for (i = 0; i < dev->data->nb_rx_queues; i++) { 108 rxq = dev->data->rx_queues[i]; 109 if (rxq != NULL && !rxq->info.conf.rx_deferred_start) 110 dev->data->rx_queue_state[i] = 111 RTE_ETH_QUEUE_STATE_STARTED; 112 } 113 for (i = 0; i < dev->data->nb_tx_queues; i++) { 114 txq = dev->data->tx_queues[i]; 115 if (txq != NULL && !txq->info.conf.tx_deferred_start) 116 dev->data->tx_queue_state[i] = 117 RTE_ETH_QUEUE_STATE_STARTED; 118 } 119 } 120 121 static int 122 fs_dev_start(struct rte_eth_dev *dev) 123 { 124 struct sub_device *sdev; 125 uint8_t i; 126 int ret; 127 128 fs_lock(dev, 0); 129 ret = failsafe_rx_intr_install(dev); 130 if (ret) { 131 fs_unlock(dev, 0); 132 return ret; 133 } 134 FOREACH_SUBDEV(sdev, i, dev) { 135 if (sdev->state != DEV_ACTIVE) 136 continue; 137 DEBUG("Starting sub_device %d", i); 138 ret = rte_eth_dev_start(PORT_ID(sdev)); 139 if (ret) { 140 if (!fs_err(sdev, ret)) 141 continue; 142 fs_unlock(dev, 0); 143 return ret; 144 } 145 ret = failsafe_rx_intr_install_subdevice(sdev); 146 if (ret) { 147 if (!fs_err(sdev, ret)) 148 continue; 149 rte_eth_dev_stop(PORT_ID(sdev)); 150 fs_unlock(dev, 0); 151 return ret; 152 } 153 sdev->state = DEV_STARTED; 154 } 155 if (PRIV(dev)->state < DEV_STARTED) { 156 PRIV(dev)->state = DEV_STARTED; 157 fs_set_queues_state_start(dev); 158 } 159 fs_switch_dev(dev, NULL); 160 fs_unlock(dev, 0); 161 return 0; 162 } 163 164 static void 165 fs_set_queues_state_stop(struct rte_eth_dev *dev) 166 { 167 uint16_t i; 168 169 for (i = 0; i < dev->data->nb_rx_queues; i++) 170 if (dev->data->rx_queues[i] != NULL) 171 dev->data->rx_queue_state[i] = 172 RTE_ETH_QUEUE_STATE_STOPPED; 173 for (i = 0; i < dev->data->nb_tx_queues; i++) 174 if (dev->data->tx_queues[i] != NULL) 175 dev->data->tx_queue_state[i] = 176 RTE_ETH_QUEUE_STATE_STOPPED; 177 } 178 179 static void 180 fs_dev_stop(struct rte_eth_dev *dev) 181 { 182 struct sub_device *sdev; 183 uint8_t i; 184 185 fs_lock(dev, 0); 186 PRIV(dev)->state = DEV_STARTED - 1; 187 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) { 188 rte_eth_dev_stop(PORT_ID(sdev)); 189 failsafe_rx_intr_uninstall_subdevice(sdev); 190 sdev->state = DEV_STARTED - 1; 191 } 192 failsafe_rx_intr_uninstall(dev); 193 fs_set_queues_state_stop(dev); 194 fs_unlock(dev, 0); 195 } 196 197 static int 198 fs_dev_set_link_up(struct rte_eth_dev *dev) 199 { 200 struct sub_device *sdev; 201 uint8_t i; 202 int ret; 203 204 fs_lock(dev, 0); 205 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 206 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i); 207 ret = rte_eth_dev_set_link_up(PORT_ID(sdev)); 208 if ((ret = fs_err(sdev, ret))) { 209 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d" 210 " with error %d", i, ret); 211 fs_unlock(dev, 0); 212 return ret; 213 } 214 } 215 fs_unlock(dev, 0); 216 return 0; 217 } 218 219 static int 220 fs_dev_set_link_down(struct rte_eth_dev *dev) 221 { 222 struct sub_device *sdev; 223 uint8_t i; 224 int ret; 225 226 fs_lock(dev, 0); 227 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 228 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i); 229 ret = rte_eth_dev_set_link_down(PORT_ID(sdev)); 230 if ((ret = fs_err(sdev, ret))) { 231 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d" 232 " with error %d", i, ret); 233 fs_unlock(dev, 0); 234 return ret; 235 } 236 } 237 fs_unlock(dev, 0); 238 return 0; 239 } 240 241 static void fs_dev_free_queues(struct rte_eth_dev *dev); 242 static void 243 fs_dev_close(struct rte_eth_dev *dev) 244 { 245 struct sub_device *sdev; 246 uint8_t i; 247 248 fs_lock(dev, 0); 249 failsafe_hotplug_alarm_cancel(dev); 250 if (PRIV(dev)->state == DEV_STARTED) 251 dev->dev_ops->dev_stop(dev); 252 PRIV(dev)->state = DEV_ACTIVE - 1; 253 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 254 DEBUG("Closing sub_device %d", i); 255 failsafe_eth_dev_unregister_callbacks(sdev); 256 rte_eth_dev_close(PORT_ID(sdev)); 257 sdev->state = DEV_ACTIVE - 1; 258 } 259 fs_dev_free_queues(dev); 260 fs_unlock(dev, 0); 261 } 262 263 static int 264 fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 265 { 266 struct sub_device *sdev; 267 uint8_t i; 268 int ret; 269 int err = 0; 270 bool failure = true; 271 272 fs_lock(dev, 0); 273 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 274 uint16_t port_id = ETH(sdev)->data->port_id; 275 276 ret = rte_eth_dev_rx_queue_stop(port_id, rx_queue_id); 277 ret = fs_err(sdev, ret); 278 if (ret) { 279 ERROR("Rx queue stop failed for subdevice %d", i); 280 err = ret; 281 } else { 282 failure = false; 283 } 284 } 285 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 286 fs_unlock(dev, 0); 287 /* Return 0 in case of at least one successful queue stop */ 288 return (failure) ? err : 0; 289 } 290 291 static int 292 fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 293 { 294 struct sub_device *sdev; 295 uint8_t i; 296 int ret; 297 298 fs_lock(dev, 0); 299 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 300 uint16_t port_id = ETH(sdev)->data->port_id; 301 302 ret = rte_eth_dev_rx_queue_start(port_id, rx_queue_id); 303 ret = fs_err(sdev, ret); 304 if (ret) { 305 ERROR("Rx queue start failed for subdevice %d", i); 306 fs_rx_queue_stop(dev, rx_queue_id); 307 fs_unlock(dev, 0); 308 return ret; 309 } 310 } 311 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 312 fs_unlock(dev, 0); 313 return 0; 314 } 315 316 static int 317 fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 318 { 319 struct sub_device *sdev; 320 uint8_t i; 321 int ret; 322 int err = 0; 323 bool failure = true; 324 325 fs_lock(dev, 0); 326 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 327 uint16_t port_id = ETH(sdev)->data->port_id; 328 329 ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id); 330 ret = fs_err(sdev, ret); 331 if (ret) { 332 ERROR("Tx queue stop failed for subdevice %d", i); 333 err = ret; 334 } else { 335 failure = false; 336 } 337 } 338 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 339 fs_unlock(dev, 0); 340 /* Return 0 in case of at least one successful queue stop */ 341 return (failure) ? err : 0; 342 } 343 344 static int 345 fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 346 { 347 struct sub_device *sdev; 348 uint8_t i; 349 int ret; 350 351 fs_lock(dev, 0); 352 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 353 uint16_t port_id = ETH(sdev)->data->port_id; 354 355 ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id); 356 ret = fs_err(sdev, ret); 357 if (ret) { 358 ERROR("Tx queue start failed for subdevice %d", i); 359 fs_tx_queue_stop(dev, tx_queue_id); 360 fs_unlock(dev, 0); 361 return ret; 362 } 363 } 364 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 365 fs_unlock(dev, 0); 366 return 0; 367 } 368 369 static void 370 fs_rx_queue_release(void *queue) 371 { 372 struct rte_eth_dev *dev; 373 struct sub_device *sdev; 374 uint8_t i; 375 struct rxq *rxq; 376 377 if (queue == NULL) 378 return; 379 rxq = queue; 380 dev = &rte_eth_devices[rxq->priv->data->port_id]; 381 fs_lock(dev, 0); 382 if (rxq->event_fd > 0) 383 close(rxq->event_fd); 384 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 385 if (ETH(sdev)->data->rx_queues != NULL && 386 ETH(sdev)->data->rx_queues[rxq->qid] != NULL) { 387 SUBOPS(sdev, rx_queue_release) 388 (ETH(sdev)->data->rx_queues[rxq->qid]); 389 } 390 } 391 dev->data->rx_queues[rxq->qid] = NULL; 392 rte_free(rxq); 393 fs_unlock(dev, 0); 394 } 395 396 static int 397 fs_rx_queue_setup(struct rte_eth_dev *dev, 398 uint16_t rx_queue_id, 399 uint16_t nb_rx_desc, 400 unsigned int socket_id, 401 const struct rte_eth_rxconf *rx_conf, 402 struct rte_mempool *mb_pool) 403 { 404 /* 405 * FIXME: Add a proper interface in rte_eal_interrupts for 406 * allocating eventfd as an interrupt vector. 407 * For the time being, fake as if we are using MSIX interrupts, 408 * this will cause rte_intr_efd_enable to allocate an eventfd for us. 409 */ 410 struct rte_intr_handle intr_handle = { 411 .type = RTE_INTR_HANDLE_VFIO_MSIX, 412 .efds = { -1, }, 413 }; 414 struct sub_device *sdev; 415 struct rxq *rxq; 416 uint8_t i; 417 int ret; 418 419 fs_lock(dev, 0); 420 if (rx_conf->rx_deferred_start) { 421 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 422 if (SUBOPS(sdev, rx_queue_start) == NULL) { 423 ERROR("Rx queue deferred start is not " 424 "supported for subdevice %d", i); 425 fs_unlock(dev, 0); 426 return -EINVAL; 427 } 428 } 429 } 430 rxq = dev->data->rx_queues[rx_queue_id]; 431 if (rxq != NULL) { 432 fs_rx_queue_release(rxq); 433 dev->data->rx_queues[rx_queue_id] = NULL; 434 } 435 rxq = rte_zmalloc(NULL, 436 sizeof(*rxq) + 437 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 438 RTE_CACHE_LINE_SIZE); 439 if (rxq == NULL) { 440 fs_unlock(dev, 0); 441 return -ENOMEM; 442 } 443 FOREACH_SUBDEV(sdev, i, dev) 444 rte_atomic64_init(&rxq->refcnt[i]); 445 rxq->qid = rx_queue_id; 446 rxq->socket_id = socket_id; 447 rxq->info.mp = mb_pool; 448 rxq->info.conf = *rx_conf; 449 rxq->info.nb_desc = nb_rx_desc; 450 rxq->priv = PRIV(dev); 451 rxq->sdev = PRIV(dev)->subs; 452 ret = rte_intr_efd_enable(&intr_handle, 1); 453 if (ret < 0) { 454 fs_unlock(dev, 0); 455 return ret; 456 } 457 rxq->event_fd = intr_handle.efds[0]; 458 dev->data->rx_queues[rx_queue_id] = rxq; 459 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 460 ret = rte_eth_rx_queue_setup(PORT_ID(sdev), 461 rx_queue_id, 462 nb_rx_desc, socket_id, 463 rx_conf, mb_pool); 464 if ((ret = fs_err(sdev, ret))) { 465 ERROR("RX queue setup failed for sub_device %d", i); 466 goto free_rxq; 467 } 468 } 469 fs_unlock(dev, 0); 470 return 0; 471 free_rxq: 472 fs_rx_queue_release(rxq); 473 fs_unlock(dev, 0); 474 return ret; 475 } 476 477 static int 478 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx) 479 { 480 struct rxq *rxq; 481 struct sub_device *sdev; 482 uint8_t i; 483 int ret; 484 int rc = 0; 485 486 fs_lock(dev, 0); 487 if (idx >= dev->data->nb_rx_queues) { 488 rc = -EINVAL; 489 goto unlock; 490 } 491 rxq = dev->data->rx_queues[idx]; 492 if (rxq == NULL || rxq->event_fd <= 0) { 493 rc = -EINVAL; 494 goto unlock; 495 } 496 /* Fail if proxy service is nor running. */ 497 if (PRIV(dev)->rxp.sstate != SS_RUNNING) { 498 ERROR("failsafe interrupt services are not running"); 499 rc = -EAGAIN; 500 goto unlock; 501 } 502 rxq->enable_events = 1; 503 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 504 ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx); 505 ret = fs_err(sdev, ret); 506 if (ret) 507 rc = ret; 508 } 509 unlock: 510 fs_unlock(dev, 0); 511 if (rc) 512 rte_errno = -rc; 513 return rc; 514 } 515 516 static int 517 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx) 518 { 519 struct rxq *rxq; 520 struct sub_device *sdev; 521 uint64_t u64; 522 uint8_t i; 523 int rc = 0; 524 int ret; 525 526 fs_lock(dev, 0); 527 if (idx >= dev->data->nb_rx_queues) { 528 rc = -EINVAL; 529 goto unlock; 530 } 531 rxq = dev->data->rx_queues[idx]; 532 if (rxq == NULL || rxq->event_fd <= 0) { 533 rc = -EINVAL; 534 goto unlock; 535 } 536 rxq->enable_events = 0; 537 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 538 ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx); 539 ret = fs_err(sdev, ret); 540 if (ret) 541 rc = ret; 542 } 543 /* Clear pending events */ 544 while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0) 545 ; 546 unlock: 547 fs_unlock(dev, 0); 548 if (rc) 549 rte_errno = -rc; 550 return rc; 551 } 552 553 static void 554 fs_tx_queue_release(void *queue) 555 { 556 struct rte_eth_dev *dev; 557 struct sub_device *sdev; 558 uint8_t i; 559 struct txq *txq; 560 561 if (queue == NULL) 562 return; 563 txq = queue; 564 dev = &rte_eth_devices[txq->priv->data->port_id]; 565 fs_lock(dev, 0); 566 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 567 if (ETH(sdev)->data->tx_queues != NULL && 568 ETH(sdev)->data->tx_queues[txq->qid] != NULL) { 569 SUBOPS(sdev, tx_queue_release) 570 (ETH(sdev)->data->tx_queues[txq->qid]); 571 } 572 } 573 dev->data->tx_queues[txq->qid] = NULL; 574 rte_free(txq); 575 fs_unlock(dev, 0); 576 } 577 578 static int 579 fs_tx_queue_setup(struct rte_eth_dev *dev, 580 uint16_t tx_queue_id, 581 uint16_t nb_tx_desc, 582 unsigned int socket_id, 583 const struct rte_eth_txconf *tx_conf) 584 { 585 struct sub_device *sdev; 586 struct txq *txq; 587 uint8_t i; 588 int ret; 589 590 fs_lock(dev, 0); 591 if (tx_conf->tx_deferred_start) { 592 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 593 if (SUBOPS(sdev, tx_queue_start) == NULL) { 594 ERROR("Tx queue deferred start is not " 595 "supported for subdevice %d", i); 596 fs_unlock(dev, 0); 597 return -EINVAL; 598 } 599 } 600 } 601 txq = dev->data->tx_queues[tx_queue_id]; 602 if (txq != NULL) { 603 fs_tx_queue_release(txq); 604 dev->data->tx_queues[tx_queue_id] = NULL; 605 } 606 txq = rte_zmalloc("ethdev TX queue", 607 sizeof(*txq) + 608 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 609 RTE_CACHE_LINE_SIZE); 610 if (txq == NULL) { 611 fs_unlock(dev, 0); 612 return -ENOMEM; 613 } 614 FOREACH_SUBDEV(sdev, i, dev) 615 rte_atomic64_init(&txq->refcnt[i]); 616 txq->qid = tx_queue_id; 617 txq->socket_id = socket_id; 618 txq->info.conf = *tx_conf; 619 txq->info.nb_desc = nb_tx_desc; 620 txq->priv = PRIV(dev); 621 dev->data->tx_queues[tx_queue_id] = txq; 622 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 623 ret = rte_eth_tx_queue_setup(PORT_ID(sdev), 624 tx_queue_id, 625 nb_tx_desc, socket_id, 626 tx_conf); 627 if ((ret = fs_err(sdev, ret))) { 628 ERROR("TX queue setup failed for sub_device %d", i); 629 goto free_txq; 630 } 631 } 632 fs_unlock(dev, 0); 633 return 0; 634 free_txq: 635 fs_tx_queue_release(txq); 636 fs_unlock(dev, 0); 637 return ret; 638 } 639 640 static void 641 fs_dev_free_queues(struct rte_eth_dev *dev) 642 { 643 uint16_t i; 644 645 for (i = 0; i < dev->data->nb_rx_queues; i++) { 646 fs_rx_queue_release(dev->data->rx_queues[i]); 647 dev->data->rx_queues[i] = NULL; 648 } 649 dev->data->nb_rx_queues = 0; 650 for (i = 0; i < dev->data->nb_tx_queues; i++) { 651 fs_tx_queue_release(dev->data->tx_queues[i]); 652 dev->data->tx_queues[i] = NULL; 653 } 654 dev->data->nb_tx_queues = 0; 655 } 656 657 static void 658 fs_promiscuous_enable(struct rte_eth_dev *dev) 659 { 660 struct sub_device *sdev; 661 uint8_t i; 662 663 fs_lock(dev, 0); 664 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 665 rte_eth_promiscuous_enable(PORT_ID(sdev)); 666 fs_unlock(dev, 0); 667 } 668 669 static void 670 fs_promiscuous_disable(struct rte_eth_dev *dev) 671 { 672 struct sub_device *sdev; 673 uint8_t i; 674 675 fs_lock(dev, 0); 676 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 677 rte_eth_promiscuous_disable(PORT_ID(sdev)); 678 fs_unlock(dev, 0); 679 } 680 681 static void 682 fs_allmulticast_enable(struct rte_eth_dev *dev) 683 { 684 struct sub_device *sdev; 685 uint8_t i; 686 687 fs_lock(dev, 0); 688 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 689 rte_eth_allmulticast_enable(PORT_ID(sdev)); 690 fs_unlock(dev, 0); 691 } 692 693 static void 694 fs_allmulticast_disable(struct rte_eth_dev *dev) 695 { 696 struct sub_device *sdev; 697 uint8_t i; 698 699 fs_lock(dev, 0); 700 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 701 rte_eth_allmulticast_disable(PORT_ID(sdev)); 702 fs_unlock(dev, 0); 703 } 704 705 static int 706 fs_link_update(struct rte_eth_dev *dev, 707 int wait_to_complete) 708 { 709 struct sub_device *sdev; 710 uint8_t i; 711 int ret; 712 713 fs_lock(dev, 0); 714 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 715 DEBUG("Calling link_update on sub_device %d", i); 716 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete); 717 if (ret && ret != -1 && sdev->remove == 0 && 718 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) { 719 ERROR("Link update failed for sub_device %d with error %d", 720 i, ret); 721 fs_unlock(dev, 0); 722 return ret; 723 } 724 } 725 if (TX_SUBDEV(dev)) { 726 struct rte_eth_link *l1; 727 struct rte_eth_link *l2; 728 729 l1 = &dev->data->dev_link; 730 l2 = Ð(TX_SUBDEV(dev))->data->dev_link; 731 if (memcmp(l1, l2, sizeof(*l1))) { 732 *l1 = *l2; 733 fs_unlock(dev, 0); 734 return 0; 735 } 736 } 737 fs_unlock(dev, 0); 738 return -1; 739 } 740 741 static int 742 fs_stats_get(struct rte_eth_dev *dev, 743 struct rte_eth_stats *stats) 744 { 745 struct rte_eth_stats backup; 746 struct sub_device *sdev; 747 uint8_t i; 748 int ret; 749 750 fs_lock(dev, 0); 751 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats)); 752 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 753 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats; 754 uint64_t *timestamp = &sdev->stats_snapshot.timestamp; 755 756 rte_memcpy(&backup, snapshot, sizeof(backup)); 757 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot); 758 if (ret) { 759 if (!fs_err(sdev, ret)) { 760 rte_memcpy(snapshot, &backup, sizeof(backup)); 761 goto inc; 762 } 763 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d", 764 i, ret); 765 *timestamp = 0; 766 fs_unlock(dev, 0); 767 return ret; 768 } 769 *timestamp = rte_rdtsc(); 770 inc: 771 failsafe_stats_increment(stats, snapshot); 772 } 773 fs_unlock(dev, 0); 774 return 0; 775 } 776 777 static void 778 fs_stats_reset(struct rte_eth_dev *dev) 779 { 780 struct sub_device *sdev; 781 uint8_t i; 782 783 fs_lock(dev, 0); 784 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 785 rte_eth_stats_reset(PORT_ID(sdev)); 786 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats)); 787 } 788 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats)); 789 fs_unlock(dev, 0); 790 } 791 792 static void 793 fs_dev_merge_desc_lim(struct rte_eth_desc_lim *to, 794 const struct rte_eth_desc_lim *from) 795 { 796 to->nb_max = RTE_MIN(to->nb_max, from->nb_max); 797 to->nb_min = RTE_MAX(to->nb_min, from->nb_min); 798 to->nb_align = RTE_MAX(to->nb_align, from->nb_align); 799 800 to->nb_seg_max = RTE_MIN(to->nb_seg_max, from->nb_seg_max); 801 to->nb_mtu_seg_max = RTE_MIN(to->nb_mtu_seg_max, from->nb_mtu_seg_max); 802 } 803 804 /* 805 * Merge the information from sub-devices. 806 * 807 * The reported values must be the common subset of all sub devices 808 */ 809 static void 810 fs_dev_merge_info(struct rte_eth_dev_info *info, 811 const struct rte_eth_dev_info *sinfo) 812 { 813 info->max_rx_pktlen = RTE_MIN(info->max_rx_pktlen, sinfo->max_rx_pktlen); 814 info->max_rx_queues = RTE_MIN(info->max_rx_queues, sinfo->max_rx_queues); 815 info->max_tx_queues = RTE_MIN(info->max_tx_queues, sinfo->max_tx_queues); 816 info->max_mac_addrs = RTE_MIN(info->max_mac_addrs, sinfo->max_mac_addrs); 817 info->max_hash_mac_addrs = RTE_MIN(info->max_hash_mac_addrs, 818 sinfo->max_hash_mac_addrs); 819 info->max_vmdq_pools = RTE_MIN(info->max_vmdq_pools, sinfo->max_vmdq_pools); 820 info->max_vfs = RTE_MIN(info->max_vfs, sinfo->max_vfs); 821 822 fs_dev_merge_desc_lim(&info->rx_desc_lim, &sinfo->rx_desc_lim); 823 fs_dev_merge_desc_lim(&info->tx_desc_lim, &sinfo->tx_desc_lim); 824 825 info->rx_offload_capa &= sinfo->rx_offload_capa; 826 info->tx_offload_capa &= sinfo->tx_offload_capa; 827 info->rx_queue_offload_capa &= sinfo->rx_queue_offload_capa; 828 info->tx_queue_offload_capa &= sinfo->tx_queue_offload_capa; 829 info->flow_type_rss_offloads &= sinfo->flow_type_rss_offloads; 830 } 831 832 /** 833 * Fail-safe dev_infos_get rules: 834 * 835 * No sub_device: 836 * Numerables: 837 * Use the maximum possible values for any field, so as not 838 * to impede any further configuration effort. 839 * Capabilities: 840 * Limits capabilities to those that are understood by the 841 * fail-safe PMD. This understanding stems from the fail-safe 842 * being capable of verifying that the related capability is 843 * expressed within the device configuration (struct rte_eth_conf). 844 * 845 * At least one probed sub_device: 846 * Numerables: 847 * Uses values from the active probed sub_device 848 * The rationale here is that if any sub_device is less capable 849 * (for example concerning the number of queues) than the active 850 * sub_device, then its subsequent configuration will fail. 851 * It is impossible to foresee this failure when the failing sub_device 852 * is supposed to be plugged-in later on, so the configuration process 853 * is the single point of failure and error reporting. 854 * Capabilities: 855 * Uses a logical AND of RX capabilities among 856 * all sub_devices and the default capabilities. 857 * Uses a logical AND of TX capabilities among 858 * the active probed sub_device and the default capabilities. 859 * Uses a logical AND of device capabilities among 860 * all sub_devices and the default capabilities. 861 * 862 */ 863 static void 864 fs_dev_infos_get(struct rte_eth_dev *dev, 865 struct rte_eth_dev_info *infos) 866 { 867 struct sub_device *sdev; 868 uint8_t i; 869 870 /* Use maximum upper bounds by default */ 871 infos->max_rx_pktlen = UINT32_MAX; 872 infos->max_rx_queues = RTE_MAX_QUEUES_PER_PORT; 873 infos->max_tx_queues = RTE_MAX_QUEUES_PER_PORT; 874 infos->max_mac_addrs = FAILSAFE_MAX_ETHADDR; 875 infos->max_hash_mac_addrs = UINT32_MAX; 876 infos->max_vfs = UINT16_MAX; 877 infos->max_vmdq_pools = UINT16_MAX; 878 879 /* 880 * Set of capabilities that can be verified upon 881 * configuring a sub-device. 882 */ 883 infos->rx_offload_capa = 884 DEV_RX_OFFLOAD_VLAN_STRIP | 885 DEV_RX_OFFLOAD_IPV4_CKSUM | 886 DEV_RX_OFFLOAD_UDP_CKSUM | 887 DEV_RX_OFFLOAD_TCP_CKSUM | 888 DEV_RX_OFFLOAD_TCP_LRO | 889 DEV_RX_OFFLOAD_QINQ_STRIP | 890 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 891 DEV_RX_OFFLOAD_MACSEC_STRIP | 892 DEV_RX_OFFLOAD_HEADER_SPLIT | 893 DEV_RX_OFFLOAD_VLAN_FILTER | 894 DEV_RX_OFFLOAD_VLAN_EXTEND | 895 DEV_RX_OFFLOAD_JUMBO_FRAME | 896 DEV_RX_OFFLOAD_SCATTER | 897 DEV_RX_OFFLOAD_TIMESTAMP | 898 DEV_RX_OFFLOAD_SECURITY; 899 900 infos->rx_queue_offload_capa = 901 DEV_RX_OFFLOAD_VLAN_STRIP | 902 DEV_RX_OFFLOAD_IPV4_CKSUM | 903 DEV_RX_OFFLOAD_UDP_CKSUM | 904 DEV_RX_OFFLOAD_TCP_CKSUM | 905 DEV_RX_OFFLOAD_TCP_LRO | 906 DEV_RX_OFFLOAD_QINQ_STRIP | 907 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 908 DEV_RX_OFFLOAD_MACSEC_STRIP | 909 DEV_RX_OFFLOAD_HEADER_SPLIT | 910 DEV_RX_OFFLOAD_VLAN_FILTER | 911 DEV_RX_OFFLOAD_VLAN_EXTEND | 912 DEV_RX_OFFLOAD_JUMBO_FRAME | 913 DEV_RX_OFFLOAD_SCATTER | 914 DEV_RX_OFFLOAD_TIMESTAMP | 915 DEV_RX_OFFLOAD_SECURITY; 916 917 infos->tx_offload_capa = 918 DEV_TX_OFFLOAD_MULTI_SEGS | 919 DEV_TX_OFFLOAD_MBUF_FAST_FREE | 920 DEV_TX_OFFLOAD_IPV4_CKSUM | 921 DEV_TX_OFFLOAD_UDP_CKSUM | 922 DEV_TX_OFFLOAD_TCP_CKSUM | 923 DEV_TX_OFFLOAD_TCP_TSO; 924 925 infos->flow_type_rss_offloads = 926 ETH_RSS_IP | 927 ETH_RSS_UDP | 928 ETH_RSS_TCP; 929 infos->dev_capa = 930 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 931 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 932 933 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 934 struct rte_eth_dev_info sub_info; 935 936 rte_eth_dev_info_get(PORT_ID(sdev), &sub_info); 937 938 fs_dev_merge_info(infos, &sub_info); 939 } 940 } 941 942 static const uint32_t * 943 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev) 944 { 945 struct sub_device *sdev; 946 struct rte_eth_dev *edev; 947 const uint32_t *ret; 948 949 fs_lock(dev, 0); 950 sdev = TX_SUBDEV(dev); 951 if (sdev == NULL) { 952 ret = NULL; 953 goto unlock; 954 } 955 edev = ETH(sdev); 956 /* ENOTSUP: counts as no supported ptypes */ 957 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) { 958 ret = NULL; 959 goto unlock; 960 } 961 /* 962 * The API does not permit to do a clean AND of all ptypes, 963 * It is also incomplete by design and we do not really care 964 * to have a best possible value in this context. 965 * We just return the ptypes of the device of highest 966 * priority, usually the PREFERRED device. 967 */ 968 ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev); 969 unlock: 970 fs_unlock(dev, 0); 971 return ret; 972 } 973 974 static int 975 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 976 { 977 struct sub_device *sdev; 978 uint8_t i; 979 int ret; 980 981 fs_lock(dev, 0); 982 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 983 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i); 984 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu); 985 if ((ret = fs_err(sdev, ret))) { 986 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d", 987 i, ret); 988 fs_unlock(dev, 0); 989 return ret; 990 } 991 } 992 fs_unlock(dev, 0); 993 return 0; 994 } 995 996 static int 997 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 998 { 999 struct sub_device *sdev; 1000 uint8_t i; 1001 int ret; 1002 1003 fs_lock(dev, 0); 1004 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1005 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i); 1006 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on); 1007 if ((ret = fs_err(sdev, ret))) { 1008 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d" 1009 " with error %d", i, ret); 1010 fs_unlock(dev, 0); 1011 return ret; 1012 } 1013 } 1014 fs_unlock(dev, 0); 1015 return 0; 1016 } 1017 1018 static int 1019 fs_flow_ctrl_get(struct rte_eth_dev *dev, 1020 struct rte_eth_fc_conf *fc_conf) 1021 { 1022 struct sub_device *sdev; 1023 int ret; 1024 1025 fs_lock(dev, 0); 1026 sdev = TX_SUBDEV(dev); 1027 if (sdev == NULL) { 1028 ret = 0; 1029 goto unlock; 1030 } 1031 if (SUBOPS(sdev, flow_ctrl_get) == NULL) { 1032 ret = -ENOTSUP; 1033 goto unlock; 1034 } 1035 ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf); 1036 unlock: 1037 fs_unlock(dev, 0); 1038 return ret; 1039 } 1040 1041 static int 1042 fs_flow_ctrl_set(struct rte_eth_dev *dev, 1043 struct rte_eth_fc_conf *fc_conf) 1044 { 1045 struct sub_device *sdev; 1046 uint8_t i; 1047 int ret; 1048 1049 fs_lock(dev, 0); 1050 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1051 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i); 1052 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf); 1053 if ((ret = fs_err(sdev, ret))) { 1054 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d" 1055 " with error %d", i, ret); 1056 fs_unlock(dev, 0); 1057 return ret; 1058 } 1059 } 1060 fs_unlock(dev, 0); 1061 return 0; 1062 } 1063 1064 static void 1065 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 1066 { 1067 struct sub_device *sdev; 1068 uint8_t i; 1069 1070 fs_lock(dev, 0); 1071 /* No check: already done within the rte_eth_dev_mac_addr_remove 1072 * call for the fail-safe device. 1073 */ 1074 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 1075 rte_eth_dev_mac_addr_remove(PORT_ID(sdev), 1076 &dev->data->mac_addrs[index]); 1077 PRIV(dev)->mac_addr_pool[index] = 0; 1078 fs_unlock(dev, 0); 1079 } 1080 1081 static int 1082 fs_mac_addr_add(struct rte_eth_dev *dev, 1083 struct rte_ether_addr *mac_addr, 1084 uint32_t index, 1085 uint32_t vmdq) 1086 { 1087 struct sub_device *sdev; 1088 int ret; 1089 uint8_t i; 1090 1091 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR); 1092 fs_lock(dev, 0); 1093 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1094 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq); 1095 if ((ret = fs_err(sdev, ret))) { 1096 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %" 1097 PRIu8 " with error %d", i, ret); 1098 fs_unlock(dev, 0); 1099 return ret; 1100 } 1101 } 1102 if (index >= PRIV(dev)->nb_mac_addr) { 1103 DEBUG("Growing mac_addrs array"); 1104 PRIV(dev)->nb_mac_addr = index; 1105 } 1106 PRIV(dev)->mac_addr_pool[index] = vmdq; 1107 fs_unlock(dev, 0); 1108 return 0; 1109 } 1110 1111 static int 1112 fs_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 1113 { 1114 struct sub_device *sdev; 1115 uint8_t i; 1116 int ret; 1117 1118 fs_lock(dev, 0); 1119 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1120 ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr); 1121 ret = fs_err(sdev, ret); 1122 if (ret) { 1123 ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d", 1124 i, ret); 1125 fs_unlock(dev, 0); 1126 return ret; 1127 } 1128 } 1129 fs_unlock(dev, 0); 1130 1131 return 0; 1132 } 1133 1134 static int 1135 fs_set_mc_addr_list(struct rte_eth_dev *dev, 1136 struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) 1137 { 1138 struct sub_device *sdev; 1139 uint8_t i; 1140 int ret; 1141 void *mcast_addrs; 1142 1143 fs_lock(dev, 0); 1144 1145 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1146 ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1147 mc_addr_set, nb_mc_addr); 1148 if (ret != 0) { 1149 ERROR("Operation rte_eth_dev_set_mc_addr_list failed for sub_device %d with error %d", 1150 i, ret); 1151 goto rollback; 1152 } 1153 } 1154 1155 mcast_addrs = rte_realloc(PRIV(dev)->mcast_addrs, 1156 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]), 0); 1157 if (mcast_addrs == NULL && nb_mc_addr > 0) { 1158 ret = -ENOMEM; 1159 goto rollback; 1160 } 1161 rte_memcpy(mcast_addrs, mc_addr_set, 1162 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0])); 1163 PRIV(dev)->nb_mcast_addr = nb_mc_addr; 1164 PRIV(dev)->mcast_addrs = mcast_addrs; 1165 1166 fs_unlock(dev, 0); 1167 return 0; 1168 1169 rollback: 1170 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1171 int rc = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1172 PRIV(dev)->mcast_addrs, PRIV(dev)->nb_mcast_addr); 1173 if (rc != 0) { 1174 ERROR("Multicast MAC address list rollback for sub_device %d failed with error %d", 1175 i, rc); 1176 } 1177 } 1178 1179 fs_unlock(dev, 0); 1180 return ret; 1181 } 1182 1183 static int 1184 fs_rss_hash_update(struct rte_eth_dev *dev, 1185 struct rte_eth_rss_conf *rss_conf) 1186 { 1187 struct sub_device *sdev; 1188 uint8_t i; 1189 int ret; 1190 1191 fs_lock(dev, 0); 1192 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1193 ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf); 1194 ret = fs_err(sdev, ret); 1195 if (ret) { 1196 ERROR("Operation rte_eth_dev_rss_hash_update" 1197 " failed for sub_device %d with error %d", 1198 i, ret); 1199 fs_unlock(dev, 0); 1200 return ret; 1201 } 1202 } 1203 fs_unlock(dev, 0); 1204 1205 return 0; 1206 } 1207 1208 static int 1209 fs_filter_ctrl(struct rte_eth_dev *dev __rte_unused, 1210 enum rte_filter_type type, 1211 enum rte_filter_op op, 1212 void *arg) 1213 { 1214 if (type == RTE_ETH_FILTER_GENERIC && 1215 op == RTE_ETH_FILTER_GET) { 1216 *(const void **)arg = &fs_flow_ops; 1217 return 0; 1218 } 1219 return -ENOTSUP; 1220 } 1221 1222 const struct eth_dev_ops failsafe_ops = { 1223 .dev_configure = fs_dev_configure, 1224 .dev_start = fs_dev_start, 1225 .dev_stop = fs_dev_stop, 1226 .dev_set_link_down = fs_dev_set_link_down, 1227 .dev_set_link_up = fs_dev_set_link_up, 1228 .dev_close = fs_dev_close, 1229 .promiscuous_enable = fs_promiscuous_enable, 1230 .promiscuous_disable = fs_promiscuous_disable, 1231 .allmulticast_enable = fs_allmulticast_enable, 1232 .allmulticast_disable = fs_allmulticast_disable, 1233 .link_update = fs_link_update, 1234 .stats_get = fs_stats_get, 1235 .stats_reset = fs_stats_reset, 1236 .dev_infos_get = fs_dev_infos_get, 1237 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get, 1238 .mtu_set = fs_mtu_set, 1239 .vlan_filter_set = fs_vlan_filter_set, 1240 .rx_queue_start = fs_rx_queue_start, 1241 .rx_queue_stop = fs_rx_queue_stop, 1242 .tx_queue_start = fs_tx_queue_start, 1243 .tx_queue_stop = fs_tx_queue_stop, 1244 .rx_queue_setup = fs_rx_queue_setup, 1245 .tx_queue_setup = fs_tx_queue_setup, 1246 .rx_queue_release = fs_rx_queue_release, 1247 .tx_queue_release = fs_tx_queue_release, 1248 .rx_queue_intr_enable = fs_rx_intr_enable, 1249 .rx_queue_intr_disable = fs_rx_intr_disable, 1250 .flow_ctrl_get = fs_flow_ctrl_get, 1251 .flow_ctrl_set = fs_flow_ctrl_set, 1252 .mac_addr_remove = fs_mac_addr_remove, 1253 .mac_addr_add = fs_mac_addr_add, 1254 .mac_addr_set = fs_mac_addr_set, 1255 .set_mc_addr_list = fs_set_mc_addr_list, 1256 .rss_hash_update = fs_rss_hash_update, 1257 .filter_ctrl = fs_filter_ctrl, 1258 }; 1259