1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017 6WIND S.A. 3 * Copyright 2017 Mellanox Technologies, Ltd 4 */ 5 6 #include <stdbool.h> 7 #include <stdint.h> 8 #include <unistd.h> 9 10 #include <rte_debug.h> 11 #include <rte_atomic.h> 12 #include <rte_ethdev_driver.h> 13 #include <rte_malloc.h> 14 #include <rte_flow.h> 15 #include <rte_cycles.h> 16 #include <rte_ethdev.h> 17 18 #include "failsafe_private.h" 19 20 static struct rte_eth_dev_info default_infos = { 21 /* Max possible number of elements */ 22 .max_rx_pktlen = UINT32_MAX, 23 .max_rx_queues = RTE_MAX_QUEUES_PER_PORT, 24 .max_tx_queues = RTE_MAX_QUEUES_PER_PORT, 25 .max_mac_addrs = FAILSAFE_MAX_ETHADDR, 26 .max_hash_mac_addrs = UINT32_MAX, 27 .max_vfs = UINT16_MAX, 28 .max_vmdq_pools = UINT16_MAX, 29 .rx_desc_lim = { 30 .nb_max = UINT16_MAX, 31 .nb_min = 0, 32 .nb_align = 1, 33 .nb_seg_max = UINT16_MAX, 34 .nb_mtu_seg_max = UINT16_MAX, 35 }, 36 .tx_desc_lim = { 37 .nb_max = UINT16_MAX, 38 .nb_min = 0, 39 .nb_align = 1, 40 .nb_seg_max = UINT16_MAX, 41 .nb_mtu_seg_max = UINT16_MAX, 42 }, 43 /* 44 * Set of capabilities that can be verified upon 45 * configuring a sub-device. 46 */ 47 .rx_offload_capa = 48 DEV_RX_OFFLOAD_VLAN_STRIP | 49 DEV_RX_OFFLOAD_IPV4_CKSUM | 50 DEV_RX_OFFLOAD_UDP_CKSUM | 51 DEV_RX_OFFLOAD_TCP_CKSUM | 52 DEV_RX_OFFLOAD_TCP_LRO | 53 DEV_RX_OFFLOAD_QINQ_STRIP | 54 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 55 DEV_RX_OFFLOAD_MACSEC_STRIP | 56 DEV_RX_OFFLOAD_HEADER_SPLIT | 57 DEV_RX_OFFLOAD_VLAN_FILTER | 58 DEV_RX_OFFLOAD_VLAN_EXTEND | 59 DEV_RX_OFFLOAD_JUMBO_FRAME | 60 DEV_RX_OFFLOAD_SCATTER | 61 DEV_RX_OFFLOAD_TIMESTAMP | 62 DEV_RX_OFFLOAD_SECURITY, 63 .rx_queue_offload_capa = 64 DEV_RX_OFFLOAD_VLAN_STRIP | 65 DEV_RX_OFFLOAD_IPV4_CKSUM | 66 DEV_RX_OFFLOAD_UDP_CKSUM | 67 DEV_RX_OFFLOAD_TCP_CKSUM | 68 DEV_RX_OFFLOAD_TCP_LRO | 69 DEV_RX_OFFLOAD_QINQ_STRIP | 70 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 71 DEV_RX_OFFLOAD_MACSEC_STRIP | 72 DEV_RX_OFFLOAD_HEADER_SPLIT | 73 DEV_RX_OFFLOAD_VLAN_FILTER | 74 DEV_RX_OFFLOAD_VLAN_EXTEND | 75 DEV_RX_OFFLOAD_JUMBO_FRAME | 76 DEV_RX_OFFLOAD_SCATTER | 77 DEV_RX_OFFLOAD_TIMESTAMP | 78 DEV_RX_OFFLOAD_SECURITY, 79 .tx_offload_capa = 80 DEV_TX_OFFLOAD_MULTI_SEGS | 81 DEV_TX_OFFLOAD_MBUF_FAST_FREE | 82 DEV_TX_OFFLOAD_IPV4_CKSUM | 83 DEV_TX_OFFLOAD_UDP_CKSUM | 84 DEV_TX_OFFLOAD_TCP_CKSUM | 85 DEV_TX_OFFLOAD_TCP_TSO, 86 .flow_type_rss_offloads = 87 ETH_RSS_IP | 88 ETH_RSS_UDP | 89 ETH_RSS_TCP, 90 .dev_capa = 91 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 92 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, 93 }; 94 95 static int 96 fs_dev_configure(struct rte_eth_dev *dev) 97 { 98 struct sub_device *sdev; 99 uint8_t i; 100 int ret; 101 102 fs_lock(dev, 0); 103 FOREACH_SUBDEV(sdev, i, dev) { 104 int rmv_interrupt = 0; 105 int lsc_interrupt = 0; 106 int lsc_enabled; 107 108 if (sdev->state != DEV_PROBED && 109 !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE)) 110 continue; 111 112 rmv_interrupt = ETH(sdev)->data->dev_flags & 113 RTE_ETH_DEV_INTR_RMV; 114 if (rmv_interrupt) { 115 DEBUG("Enabling RMV interrupts for sub_device %d", i); 116 dev->data->dev_conf.intr_conf.rmv = 1; 117 } else { 118 DEBUG("sub_device %d does not support RMV event", i); 119 } 120 lsc_enabled = dev->data->dev_conf.intr_conf.lsc; 121 lsc_interrupt = lsc_enabled && 122 (ETH(sdev)->data->dev_flags & 123 RTE_ETH_DEV_INTR_LSC); 124 if (lsc_interrupt) { 125 DEBUG("Enabling LSC interrupts for sub_device %d", i); 126 dev->data->dev_conf.intr_conf.lsc = 1; 127 } else if (lsc_enabled && !lsc_interrupt) { 128 DEBUG("Disabling LSC interrupts for sub_device %d", i); 129 dev->data->dev_conf.intr_conf.lsc = 0; 130 } 131 DEBUG("Configuring sub-device %d", i); 132 ret = rte_eth_dev_configure(PORT_ID(sdev), 133 dev->data->nb_rx_queues, 134 dev->data->nb_tx_queues, 135 &dev->data->dev_conf); 136 if (ret) { 137 if (!fs_err(sdev, ret)) 138 continue; 139 ERROR("Could not configure sub_device %d", i); 140 fs_unlock(dev, 0); 141 return ret; 142 } 143 if (rmv_interrupt && sdev->rmv_callback == 0) { 144 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 145 RTE_ETH_EVENT_INTR_RMV, 146 failsafe_eth_rmv_event_callback, 147 sdev); 148 if (ret) 149 WARN("Failed to register RMV callback for sub_device %d", 150 SUB_ID(sdev)); 151 else 152 sdev->rmv_callback = 1; 153 } 154 dev->data->dev_conf.intr_conf.rmv = 0; 155 if (lsc_interrupt && sdev->lsc_callback == 0) { 156 ret = rte_eth_dev_callback_register(PORT_ID(sdev), 157 RTE_ETH_EVENT_INTR_LSC, 158 failsafe_eth_lsc_event_callback, 159 dev); 160 if (ret) 161 WARN("Failed to register LSC callback for sub_device %d", 162 SUB_ID(sdev)); 163 else 164 sdev->lsc_callback = 1; 165 } 166 dev->data->dev_conf.intr_conf.lsc = lsc_enabled; 167 sdev->state = DEV_ACTIVE; 168 } 169 if (PRIV(dev)->state < DEV_ACTIVE) 170 PRIV(dev)->state = DEV_ACTIVE; 171 fs_unlock(dev, 0); 172 return 0; 173 } 174 175 static void 176 fs_set_queues_state_start(struct rte_eth_dev *dev) 177 { 178 struct rxq *rxq; 179 struct txq *txq; 180 uint16_t i; 181 182 for (i = 0; i < dev->data->nb_rx_queues; i++) { 183 rxq = dev->data->rx_queues[i]; 184 if (rxq != NULL && !rxq->info.conf.rx_deferred_start) 185 dev->data->rx_queue_state[i] = 186 RTE_ETH_QUEUE_STATE_STARTED; 187 } 188 for (i = 0; i < dev->data->nb_tx_queues; i++) { 189 txq = dev->data->tx_queues[i]; 190 if (txq != NULL && !txq->info.conf.tx_deferred_start) 191 dev->data->tx_queue_state[i] = 192 RTE_ETH_QUEUE_STATE_STARTED; 193 } 194 } 195 196 static int 197 fs_dev_start(struct rte_eth_dev *dev) 198 { 199 struct sub_device *sdev; 200 uint8_t i; 201 int ret; 202 203 fs_lock(dev, 0); 204 ret = failsafe_rx_intr_install(dev); 205 if (ret) { 206 fs_unlock(dev, 0); 207 return ret; 208 } 209 FOREACH_SUBDEV(sdev, i, dev) { 210 if (sdev->state != DEV_ACTIVE) 211 continue; 212 DEBUG("Starting sub_device %d", i); 213 ret = rte_eth_dev_start(PORT_ID(sdev)); 214 if (ret) { 215 if (!fs_err(sdev, ret)) 216 continue; 217 fs_unlock(dev, 0); 218 return ret; 219 } 220 ret = failsafe_rx_intr_install_subdevice(sdev); 221 if (ret) { 222 if (!fs_err(sdev, ret)) 223 continue; 224 rte_eth_dev_stop(PORT_ID(sdev)); 225 fs_unlock(dev, 0); 226 return ret; 227 } 228 sdev->state = DEV_STARTED; 229 } 230 if (PRIV(dev)->state < DEV_STARTED) { 231 PRIV(dev)->state = DEV_STARTED; 232 fs_set_queues_state_start(dev); 233 } 234 fs_switch_dev(dev, NULL); 235 fs_unlock(dev, 0); 236 return 0; 237 } 238 239 static void 240 fs_set_queues_state_stop(struct rte_eth_dev *dev) 241 { 242 uint16_t i; 243 244 for (i = 0; i < dev->data->nb_rx_queues; i++) 245 if (dev->data->rx_queues[i] != NULL) 246 dev->data->rx_queue_state[i] = 247 RTE_ETH_QUEUE_STATE_STOPPED; 248 for (i = 0; i < dev->data->nb_tx_queues; i++) 249 if (dev->data->tx_queues[i] != NULL) 250 dev->data->tx_queue_state[i] = 251 RTE_ETH_QUEUE_STATE_STOPPED; 252 } 253 254 static void 255 fs_dev_stop(struct rte_eth_dev *dev) 256 { 257 struct sub_device *sdev; 258 uint8_t i; 259 260 fs_lock(dev, 0); 261 PRIV(dev)->state = DEV_STARTED - 1; 262 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) { 263 rte_eth_dev_stop(PORT_ID(sdev)); 264 failsafe_rx_intr_uninstall_subdevice(sdev); 265 sdev->state = DEV_STARTED - 1; 266 } 267 failsafe_rx_intr_uninstall(dev); 268 fs_set_queues_state_stop(dev); 269 fs_unlock(dev, 0); 270 } 271 272 static int 273 fs_dev_set_link_up(struct rte_eth_dev *dev) 274 { 275 struct sub_device *sdev; 276 uint8_t i; 277 int ret; 278 279 fs_lock(dev, 0); 280 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 281 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i); 282 ret = rte_eth_dev_set_link_up(PORT_ID(sdev)); 283 if ((ret = fs_err(sdev, ret))) { 284 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d" 285 " with error %d", i, ret); 286 fs_unlock(dev, 0); 287 return ret; 288 } 289 } 290 fs_unlock(dev, 0); 291 return 0; 292 } 293 294 static int 295 fs_dev_set_link_down(struct rte_eth_dev *dev) 296 { 297 struct sub_device *sdev; 298 uint8_t i; 299 int ret; 300 301 fs_lock(dev, 0); 302 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 303 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i); 304 ret = rte_eth_dev_set_link_down(PORT_ID(sdev)); 305 if ((ret = fs_err(sdev, ret))) { 306 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d" 307 " with error %d", i, ret); 308 fs_unlock(dev, 0); 309 return ret; 310 } 311 } 312 fs_unlock(dev, 0); 313 return 0; 314 } 315 316 static void fs_dev_free_queues(struct rte_eth_dev *dev); 317 static void 318 fs_dev_close(struct rte_eth_dev *dev) 319 { 320 struct sub_device *sdev; 321 uint8_t i; 322 323 fs_lock(dev, 0); 324 failsafe_hotplug_alarm_cancel(dev); 325 if (PRIV(dev)->state == DEV_STARTED) 326 dev->dev_ops->dev_stop(dev); 327 PRIV(dev)->state = DEV_ACTIVE - 1; 328 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 329 DEBUG("Closing sub_device %d", i); 330 failsafe_eth_dev_unregister_callbacks(sdev); 331 rte_eth_dev_close(PORT_ID(sdev)); 332 sdev->state = DEV_ACTIVE - 1; 333 } 334 fs_dev_free_queues(dev); 335 fs_unlock(dev, 0); 336 } 337 338 static int 339 fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 340 { 341 struct sub_device *sdev; 342 uint8_t i; 343 int ret; 344 int err = 0; 345 bool failure = true; 346 347 fs_lock(dev, 0); 348 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 349 uint16_t port_id = ETH(sdev)->data->port_id; 350 351 ret = rte_eth_dev_rx_queue_stop(port_id, rx_queue_id); 352 ret = fs_err(sdev, ret); 353 if (ret) { 354 ERROR("Rx queue stop failed for subdevice %d", i); 355 err = ret; 356 } else { 357 failure = false; 358 } 359 } 360 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 361 fs_unlock(dev, 0); 362 /* Return 0 in case of at least one successful queue stop */ 363 return (failure) ? err : 0; 364 } 365 366 static int 367 fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 368 { 369 struct sub_device *sdev; 370 uint8_t i; 371 int ret; 372 373 fs_lock(dev, 0); 374 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 375 uint16_t port_id = ETH(sdev)->data->port_id; 376 377 ret = rte_eth_dev_rx_queue_start(port_id, rx_queue_id); 378 ret = fs_err(sdev, ret); 379 if (ret) { 380 ERROR("Rx queue start failed for subdevice %d", i); 381 fs_rx_queue_stop(dev, rx_queue_id); 382 fs_unlock(dev, 0); 383 return ret; 384 } 385 } 386 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 387 fs_unlock(dev, 0); 388 return 0; 389 } 390 391 static int 392 fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 393 { 394 struct sub_device *sdev; 395 uint8_t i; 396 int ret; 397 int err = 0; 398 bool failure = true; 399 400 fs_lock(dev, 0); 401 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 402 uint16_t port_id = ETH(sdev)->data->port_id; 403 404 ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id); 405 ret = fs_err(sdev, ret); 406 if (ret) { 407 ERROR("Tx queue stop failed for subdevice %d", i); 408 err = ret; 409 } else { 410 failure = false; 411 } 412 } 413 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 414 fs_unlock(dev, 0); 415 /* Return 0 in case of at least one successful queue stop */ 416 return (failure) ? err : 0; 417 } 418 419 static int 420 fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 421 { 422 struct sub_device *sdev; 423 uint8_t i; 424 int ret; 425 426 fs_lock(dev, 0); 427 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 428 uint16_t port_id = ETH(sdev)->data->port_id; 429 430 ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id); 431 ret = fs_err(sdev, ret); 432 if (ret) { 433 ERROR("Tx queue start failed for subdevice %d", i); 434 fs_tx_queue_stop(dev, tx_queue_id); 435 fs_unlock(dev, 0); 436 return ret; 437 } 438 } 439 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 440 fs_unlock(dev, 0); 441 return 0; 442 } 443 444 static void 445 fs_rx_queue_release(void *queue) 446 { 447 struct rte_eth_dev *dev; 448 struct sub_device *sdev; 449 uint8_t i; 450 struct rxq *rxq; 451 452 if (queue == NULL) 453 return; 454 rxq = queue; 455 dev = rxq->priv->dev; 456 fs_lock(dev, 0); 457 if (rxq->event_fd > 0) 458 close(rxq->event_fd); 459 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 460 if (ETH(sdev)->data->rx_queues != NULL && 461 ETH(sdev)->data->rx_queues[rxq->qid] != NULL) { 462 SUBOPS(sdev, rx_queue_release) 463 (ETH(sdev)->data->rx_queues[rxq->qid]); 464 } 465 } 466 dev->data->rx_queues[rxq->qid] = NULL; 467 rte_free(rxq); 468 fs_unlock(dev, 0); 469 } 470 471 static int 472 fs_rx_queue_setup(struct rte_eth_dev *dev, 473 uint16_t rx_queue_id, 474 uint16_t nb_rx_desc, 475 unsigned int socket_id, 476 const struct rte_eth_rxconf *rx_conf, 477 struct rte_mempool *mb_pool) 478 { 479 /* 480 * FIXME: Add a proper interface in rte_eal_interrupts for 481 * allocating eventfd as an interrupt vector. 482 * For the time being, fake as if we are using MSIX interrupts, 483 * this will cause rte_intr_efd_enable to allocate an eventfd for us. 484 */ 485 struct rte_intr_handle intr_handle = { 486 .type = RTE_INTR_HANDLE_VFIO_MSIX, 487 .efds = { -1, }, 488 }; 489 struct sub_device *sdev; 490 struct rxq *rxq; 491 uint8_t i; 492 int ret; 493 494 fs_lock(dev, 0); 495 if (rx_conf->rx_deferred_start) { 496 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 497 if (SUBOPS(sdev, rx_queue_start) == NULL) { 498 ERROR("Rx queue deferred start is not " 499 "supported for subdevice %d", i); 500 fs_unlock(dev, 0); 501 return -EINVAL; 502 } 503 } 504 } 505 rxq = dev->data->rx_queues[rx_queue_id]; 506 if (rxq != NULL) { 507 fs_rx_queue_release(rxq); 508 dev->data->rx_queues[rx_queue_id] = NULL; 509 } 510 rxq = rte_zmalloc(NULL, 511 sizeof(*rxq) + 512 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 513 RTE_CACHE_LINE_SIZE); 514 if (rxq == NULL) { 515 fs_unlock(dev, 0); 516 return -ENOMEM; 517 } 518 FOREACH_SUBDEV(sdev, i, dev) 519 rte_atomic64_init(&rxq->refcnt[i]); 520 rxq->qid = rx_queue_id; 521 rxq->socket_id = socket_id; 522 rxq->info.mp = mb_pool; 523 rxq->info.conf = *rx_conf; 524 rxq->info.nb_desc = nb_rx_desc; 525 rxq->priv = PRIV(dev); 526 rxq->sdev = PRIV(dev)->subs; 527 ret = rte_intr_efd_enable(&intr_handle, 1); 528 if (ret < 0) { 529 fs_unlock(dev, 0); 530 return ret; 531 } 532 rxq->event_fd = intr_handle.efds[0]; 533 dev->data->rx_queues[rx_queue_id] = rxq; 534 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 535 ret = rte_eth_rx_queue_setup(PORT_ID(sdev), 536 rx_queue_id, 537 nb_rx_desc, socket_id, 538 rx_conf, mb_pool); 539 if ((ret = fs_err(sdev, ret))) { 540 ERROR("RX queue setup failed for sub_device %d", i); 541 goto free_rxq; 542 } 543 } 544 fs_unlock(dev, 0); 545 return 0; 546 free_rxq: 547 fs_rx_queue_release(rxq); 548 fs_unlock(dev, 0); 549 return ret; 550 } 551 552 static int 553 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx) 554 { 555 struct rxq *rxq; 556 struct sub_device *sdev; 557 uint8_t i; 558 int ret; 559 int rc = 0; 560 561 fs_lock(dev, 0); 562 if (idx >= dev->data->nb_rx_queues) { 563 rc = -EINVAL; 564 goto unlock; 565 } 566 rxq = dev->data->rx_queues[idx]; 567 if (rxq == NULL || rxq->event_fd <= 0) { 568 rc = -EINVAL; 569 goto unlock; 570 } 571 /* Fail if proxy service is nor running. */ 572 if (PRIV(dev)->rxp.sstate != SS_RUNNING) { 573 ERROR("failsafe interrupt services are not running"); 574 rc = -EAGAIN; 575 goto unlock; 576 } 577 rxq->enable_events = 1; 578 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 579 ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx); 580 ret = fs_err(sdev, ret); 581 if (ret) 582 rc = ret; 583 } 584 unlock: 585 fs_unlock(dev, 0); 586 if (rc) 587 rte_errno = -rc; 588 return rc; 589 } 590 591 static int 592 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx) 593 { 594 struct rxq *rxq; 595 struct sub_device *sdev; 596 uint64_t u64; 597 uint8_t i; 598 int rc = 0; 599 int ret; 600 601 fs_lock(dev, 0); 602 if (idx >= dev->data->nb_rx_queues) { 603 rc = -EINVAL; 604 goto unlock; 605 } 606 rxq = dev->data->rx_queues[idx]; 607 if (rxq == NULL || rxq->event_fd <= 0) { 608 rc = -EINVAL; 609 goto unlock; 610 } 611 rxq->enable_events = 0; 612 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 613 ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx); 614 ret = fs_err(sdev, ret); 615 if (ret) 616 rc = ret; 617 } 618 /* Clear pending events */ 619 while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0) 620 ; 621 unlock: 622 fs_unlock(dev, 0); 623 if (rc) 624 rte_errno = -rc; 625 return rc; 626 } 627 628 static void 629 fs_tx_queue_release(void *queue) 630 { 631 struct rte_eth_dev *dev; 632 struct sub_device *sdev; 633 uint8_t i; 634 struct txq *txq; 635 636 if (queue == NULL) 637 return; 638 txq = queue; 639 dev = txq->priv->dev; 640 fs_lock(dev, 0); 641 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 642 if (ETH(sdev)->data->tx_queues != NULL && 643 ETH(sdev)->data->tx_queues[txq->qid] != NULL) { 644 SUBOPS(sdev, tx_queue_release) 645 (ETH(sdev)->data->tx_queues[txq->qid]); 646 } 647 } 648 dev->data->tx_queues[txq->qid] = NULL; 649 rte_free(txq); 650 fs_unlock(dev, 0); 651 } 652 653 static int 654 fs_tx_queue_setup(struct rte_eth_dev *dev, 655 uint16_t tx_queue_id, 656 uint16_t nb_tx_desc, 657 unsigned int socket_id, 658 const struct rte_eth_txconf *tx_conf) 659 { 660 struct sub_device *sdev; 661 struct txq *txq; 662 uint8_t i; 663 int ret; 664 665 fs_lock(dev, 0); 666 if (tx_conf->tx_deferred_start) { 667 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 668 if (SUBOPS(sdev, tx_queue_start) == NULL) { 669 ERROR("Tx queue deferred start is not " 670 "supported for subdevice %d", i); 671 fs_unlock(dev, 0); 672 return -EINVAL; 673 } 674 } 675 } 676 txq = dev->data->tx_queues[tx_queue_id]; 677 if (txq != NULL) { 678 fs_tx_queue_release(txq); 679 dev->data->tx_queues[tx_queue_id] = NULL; 680 } 681 txq = rte_zmalloc("ethdev TX queue", 682 sizeof(*txq) + 683 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 684 RTE_CACHE_LINE_SIZE); 685 if (txq == NULL) { 686 fs_unlock(dev, 0); 687 return -ENOMEM; 688 } 689 FOREACH_SUBDEV(sdev, i, dev) 690 rte_atomic64_init(&txq->refcnt[i]); 691 txq->qid = tx_queue_id; 692 txq->socket_id = socket_id; 693 txq->info.conf = *tx_conf; 694 txq->info.nb_desc = nb_tx_desc; 695 txq->priv = PRIV(dev); 696 dev->data->tx_queues[tx_queue_id] = txq; 697 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 698 ret = rte_eth_tx_queue_setup(PORT_ID(sdev), 699 tx_queue_id, 700 nb_tx_desc, socket_id, 701 tx_conf); 702 if ((ret = fs_err(sdev, ret))) { 703 ERROR("TX queue setup failed for sub_device %d", i); 704 goto free_txq; 705 } 706 } 707 fs_unlock(dev, 0); 708 return 0; 709 free_txq: 710 fs_tx_queue_release(txq); 711 fs_unlock(dev, 0); 712 return ret; 713 } 714 715 static void 716 fs_dev_free_queues(struct rte_eth_dev *dev) 717 { 718 uint16_t i; 719 720 for (i = 0; i < dev->data->nb_rx_queues; i++) { 721 fs_rx_queue_release(dev->data->rx_queues[i]); 722 dev->data->rx_queues[i] = NULL; 723 } 724 dev->data->nb_rx_queues = 0; 725 for (i = 0; i < dev->data->nb_tx_queues; i++) { 726 fs_tx_queue_release(dev->data->tx_queues[i]); 727 dev->data->tx_queues[i] = NULL; 728 } 729 dev->data->nb_tx_queues = 0; 730 } 731 732 static void 733 fs_promiscuous_enable(struct rte_eth_dev *dev) 734 { 735 struct sub_device *sdev; 736 uint8_t i; 737 738 fs_lock(dev, 0); 739 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 740 rte_eth_promiscuous_enable(PORT_ID(sdev)); 741 fs_unlock(dev, 0); 742 } 743 744 static void 745 fs_promiscuous_disable(struct rte_eth_dev *dev) 746 { 747 struct sub_device *sdev; 748 uint8_t i; 749 750 fs_lock(dev, 0); 751 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 752 rte_eth_promiscuous_disable(PORT_ID(sdev)); 753 fs_unlock(dev, 0); 754 } 755 756 static void 757 fs_allmulticast_enable(struct rte_eth_dev *dev) 758 { 759 struct sub_device *sdev; 760 uint8_t i; 761 762 fs_lock(dev, 0); 763 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 764 rte_eth_allmulticast_enable(PORT_ID(sdev)); 765 fs_unlock(dev, 0); 766 } 767 768 static void 769 fs_allmulticast_disable(struct rte_eth_dev *dev) 770 { 771 struct sub_device *sdev; 772 uint8_t i; 773 774 fs_lock(dev, 0); 775 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 776 rte_eth_allmulticast_disable(PORT_ID(sdev)); 777 fs_unlock(dev, 0); 778 } 779 780 static int 781 fs_link_update(struct rte_eth_dev *dev, 782 int wait_to_complete) 783 { 784 struct sub_device *sdev; 785 uint8_t i; 786 int ret; 787 788 fs_lock(dev, 0); 789 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 790 DEBUG("Calling link_update on sub_device %d", i); 791 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete); 792 if (ret && ret != -1 && sdev->remove == 0 && 793 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) { 794 ERROR("Link update failed for sub_device %d with error %d", 795 i, ret); 796 fs_unlock(dev, 0); 797 return ret; 798 } 799 } 800 if (TX_SUBDEV(dev)) { 801 struct rte_eth_link *l1; 802 struct rte_eth_link *l2; 803 804 l1 = &dev->data->dev_link; 805 l2 = Ð(TX_SUBDEV(dev))->data->dev_link; 806 if (memcmp(l1, l2, sizeof(*l1))) { 807 *l1 = *l2; 808 fs_unlock(dev, 0); 809 return 0; 810 } 811 } 812 fs_unlock(dev, 0); 813 return -1; 814 } 815 816 static int 817 fs_stats_get(struct rte_eth_dev *dev, 818 struct rte_eth_stats *stats) 819 { 820 struct rte_eth_stats backup; 821 struct sub_device *sdev; 822 uint8_t i; 823 int ret; 824 825 fs_lock(dev, 0); 826 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats)); 827 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 828 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats; 829 uint64_t *timestamp = &sdev->stats_snapshot.timestamp; 830 831 rte_memcpy(&backup, snapshot, sizeof(backup)); 832 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot); 833 if (ret) { 834 if (!fs_err(sdev, ret)) { 835 rte_memcpy(snapshot, &backup, sizeof(backup)); 836 goto inc; 837 } 838 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d", 839 i, ret); 840 *timestamp = 0; 841 fs_unlock(dev, 0); 842 return ret; 843 } 844 *timestamp = rte_rdtsc(); 845 inc: 846 failsafe_stats_increment(stats, snapshot); 847 } 848 fs_unlock(dev, 0); 849 return 0; 850 } 851 852 static void 853 fs_stats_reset(struct rte_eth_dev *dev) 854 { 855 struct sub_device *sdev; 856 uint8_t i; 857 858 fs_lock(dev, 0); 859 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 860 rte_eth_stats_reset(PORT_ID(sdev)); 861 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats)); 862 } 863 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats)); 864 fs_unlock(dev, 0); 865 } 866 867 /** 868 * Fail-safe dev_infos_get rules: 869 * 870 * No sub_device: 871 * Numerables: 872 * Use the maximum possible values for any field, so as not 873 * to impede any further configuration effort. 874 * Capabilities: 875 * Limits capabilities to those that are understood by the 876 * fail-safe PMD. This understanding stems from the fail-safe 877 * being capable of verifying that the related capability is 878 * expressed within the device configuration (struct rte_eth_conf). 879 * 880 * At least one probed sub_device: 881 * Numerables: 882 * Uses values from the active probed sub_device 883 * The rationale here is that if any sub_device is less capable 884 * (for example concerning the number of queues) than the active 885 * sub_device, then its subsequent configuration will fail. 886 * It is impossible to foresee this failure when the failing sub_device 887 * is supposed to be plugged-in later on, so the configuration process 888 * is the single point of failure and error reporting. 889 * Capabilities: 890 * Uses a logical AND of RX capabilities among 891 * all sub_devices and the default capabilities. 892 * Uses a logical AND of TX capabilities among 893 * the active probed sub_device and the default capabilities. 894 * Uses a logical AND of device capabilities among 895 * all sub_devices and the default capabilities. 896 * 897 */ 898 static void 899 fs_dev_infos_get(struct rte_eth_dev *dev, 900 struct rte_eth_dev_info *infos) 901 { 902 struct sub_device *sdev; 903 uint8_t i; 904 905 sdev = TX_SUBDEV(dev); 906 if (sdev == NULL) { 907 DEBUG("No probed device, using default infos"); 908 rte_memcpy(&PRIV(dev)->infos, &default_infos, 909 sizeof(default_infos)); 910 } else { 911 uint64_t rx_offload_capa; 912 uint64_t rxq_offload_capa; 913 uint64_t rss_hf_offload_capa; 914 uint64_t dev_capa; 915 916 rx_offload_capa = default_infos.rx_offload_capa; 917 rxq_offload_capa = default_infos.rx_queue_offload_capa; 918 rss_hf_offload_capa = default_infos.flow_type_rss_offloads; 919 dev_capa = default_infos.dev_capa; 920 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 921 rte_eth_dev_info_get(PORT_ID(sdev), 922 &PRIV(dev)->infos); 923 rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa; 924 rxq_offload_capa &= 925 PRIV(dev)->infos.rx_queue_offload_capa; 926 rss_hf_offload_capa &= 927 PRIV(dev)->infos.flow_type_rss_offloads; 928 dev_capa &= PRIV(dev)->infos.dev_capa; 929 } 930 sdev = TX_SUBDEV(dev); 931 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos); 932 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa; 933 PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa; 934 PRIV(dev)->infos.flow_type_rss_offloads = rss_hf_offload_capa; 935 PRIV(dev)->infos.dev_capa = dev_capa; 936 PRIV(dev)->infos.tx_offload_capa &= 937 default_infos.tx_offload_capa; 938 PRIV(dev)->infos.tx_queue_offload_capa &= 939 default_infos.tx_queue_offload_capa; 940 } 941 rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos)); 942 } 943 944 static const uint32_t * 945 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev) 946 { 947 struct sub_device *sdev; 948 struct rte_eth_dev *edev; 949 const uint32_t *ret; 950 951 fs_lock(dev, 0); 952 sdev = TX_SUBDEV(dev); 953 if (sdev == NULL) { 954 ret = NULL; 955 goto unlock; 956 } 957 edev = ETH(sdev); 958 /* ENOTSUP: counts as no supported ptypes */ 959 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) { 960 ret = NULL; 961 goto unlock; 962 } 963 /* 964 * The API does not permit to do a clean AND of all ptypes, 965 * It is also incomplete by design and we do not really care 966 * to have a best possible value in this context. 967 * We just return the ptypes of the device of highest 968 * priority, usually the PREFERRED device. 969 */ 970 ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev); 971 unlock: 972 fs_unlock(dev, 0); 973 return ret; 974 } 975 976 static int 977 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 978 { 979 struct sub_device *sdev; 980 uint8_t i; 981 int ret; 982 983 fs_lock(dev, 0); 984 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 985 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i); 986 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu); 987 if ((ret = fs_err(sdev, ret))) { 988 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d", 989 i, ret); 990 fs_unlock(dev, 0); 991 return ret; 992 } 993 } 994 fs_unlock(dev, 0); 995 return 0; 996 } 997 998 static int 999 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1000 { 1001 struct sub_device *sdev; 1002 uint8_t i; 1003 int ret; 1004 1005 fs_lock(dev, 0); 1006 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1007 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i); 1008 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on); 1009 if ((ret = fs_err(sdev, ret))) { 1010 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d" 1011 " with error %d", i, ret); 1012 fs_unlock(dev, 0); 1013 return ret; 1014 } 1015 } 1016 fs_unlock(dev, 0); 1017 return 0; 1018 } 1019 1020 static int 1021 fs_flow_ctrl_get(struct rte_eth_dev *dev, 1022 struct rte_eth_fc_conf *fc_conf) 1023 { 1024 struct sub_device *sdev; 1025 int ret; 1026 1027 fs_lock(dev, 0); 1028 sdev = TX_SUBDEV(dev); 1029 if (sdev == NULL) { 1030 ret = 0; 1031 goto unlock; 1032 } 1033 if (SUBOPS(sdev, flow_ctrl_get) == NULL) { 1034 ret = -ENOTSUP; 1035 goto unlock; 1036 } 1037 ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf); 1038 unlock: 1039 fs_unlock(dev, 0); 1040 return ret; 1041 } 1042 1043 static int 1044 fs_flow_ctrl_set(struct rte_eth_dev *dev, 1045 struct rte_eth_fc_conf *fc_conf) 1046 { 1047 struct sub_device *sdev; 1048 uint8_t i; 1049 int ret; 1050 1051 fs_lock(dev, 0); 1052 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1053 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i); 1054 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf); 1055 if ((ret = fs_err(sdev, ret))) { 1056 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d" 1057 " with error %d", i, ret); 1058 fs_unlock(dev, 0); 1059 return ret; 1060 } 1061 } 1062 fs_unlock(dev, 0); 1063 return 0; 1064 } 1065 1066 static void 1067 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 1068 { 1069 struct sub_device *sdev; 1070 uint8_t i; 1071 1072 fs_lock(dev, 0); 1073 /* No check: already done within the rte_eth_dev_mac_addr_remove 1074 * call for the fail-safe device. 1075 */ 1076 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 1077 rte_eth_dev_mac_addr_remove(PORT_ID(sdev), 1078 &dev->data->mac_addrs[index]); 1079 PRIV(dev)->mac_addr_pool[index] = 0; 1080 fs_unlock(dev, 0); 1081 } 1082 1083 static int 1084 fs_mac_addr_add(struct rte_eth_dev *dev, 1085 struct ether_addr *mac_addr, 1086 uint32_t index, 1087 uint32_t vmdq) 1088 { 1089 struct sub_device *sdev; 1090 int ret; 1091 uint8_t i; 1092 1093 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR); 1094 fs_lock(dev, 0); 1095 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1096 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq); 1097 if ((ret = fs_err(sdev, ret))) { 1098 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %" 1099 PRIu8 " with error %d", i, ret); 1100 fs_unlock(dev, 0); 1101 return ret; 1102 } 1103 } 1104 if (index >= PRIV(dev)->nb_mac_addr) { 1105 DEBUG("Growing mac_addrs array"); 1106 PRIV(dev)->nb_mac_addr = index; 1107 } 1108 PRIV(dev)->mac_addr_pool[index] = vmdq; 1109 fs_unlock(dev, 0); 1110 return 0; 1111 } 1112 1113 static int 1114 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) 1115 { 1116 struct sub_device *sdev; 1117 uint8_t i; 1118 int ret; 1119 1120 fs_lock(dev, 0); 1121 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1122 ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr); 1123 ret = fs_err(sdev, ret); 1124 if (ret) { 1125 ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d", 1126 i, ret); 1127 fs_unlock(dev, 0); 1128 return ret; 1129 } 1130 } 1131 fs_unlock(dev, 0); 1132 1133 return 0; 1134 } 1135 1136 static int 1137 fs_set_mc_addr_list(struct rte_eth_dev *dev, 1138 struct ether_addr *mc_addr_set, uint32_t nb_mc_addr) 1139 { 1140 struct sub_device *sdev; 1141 uint8_t i; 1142 int ret; 1143 void *mcast_addrs; 1144 1145 fs_lock(dev, 0); 1146 1147 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1148 ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1149 mc_addr_set, nb_mc_addr); 1150 if (ret != 0) { 1151 ERROR("Operation rte_eth_dev_set_mc_addr_list failed for sub_device %d with error %d", 1152 i, ret); 1153 goto rollback; 1154 } 1155 } 1156 1157 mcast_addrs = rte_realloc(PRIV(dev)->mcast_addrs, 1158 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]), 0); 1159 if (mcast_addrs == NULL && nb_mc_addr > 0) { 1160 ret = -ENOMEM; 1161 goto rollback; 1162 } 1163 rte_memcpy(mcast_addrs, mc_addr_set, 1164 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0])); 1165 PRIV(dev)->nb_mcast_addr = nb_mc_addr; 1166 PRIV(dev)->mcast_addrs = mcast_addrs; 1167 1168 fs_unlock(dev, 0); 1169 return 0; 1170 1171 rollback: 1172 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1173 int rc = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1174 PRIV(dev)->mcast_addrs, PRIV(dev)->nb_mcast_addr); 1175 if (rc != 0) { 1176 ERROR("Multicast MAC address list rollback for sub_device %d failed with error %d", 1177 i, rc); 1178 } 1179 } 1180 1181 fs_unlock(dev, 0); 1182 return ret; 1183 } 1184 1185 static int 1186 fs_rss_hash_update(struct rte_eth_dev *dev, 1187 struct rte_eth_rss_conf *rss_conf) 1188 { 1189 struct sub_device *sdev; 1190 uint8_t i; 1191 int ret; 1192 1193 fs_lock(dev, 0); 1194 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1195 ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf); 1196 ret = fs_err(sdev, ret); 1197 if (ret) { 1198 ERROR("Operation rte_eth_dev_rss_hash_update" 1199 " failed for sub_device %d with error %d", 1200 i, ret); 1201 fs_unlock(dev, 0); 1202 return ret; 1203 } 1204 } 1205 fs_unlock(dev, 0); 1206 1207 return 0; 1208 } 1209 1210 static int 1211 fs_filter_ctrl(struct rte_eth_dev *dev, 1212 enum rte_filter_type type, 1213 enum rte_filter_op op, 1214 void *arg) 1215 { 1216 struct sub_device *sdev; 1217 uint8_t i; 1218 int ret; 1219 1220 if (type == RTE_ETH_FILTER_GENERIC && 1221 op == RTE_ETH_FILTER_GET) { 1222 *(const void **)arg = &fs_flow_ops; 1223 return 0; 1224 } 1225 fs_lock(dev, 0); 1226 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1227 DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i); 1228 ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg); 1229 if ((ret = fs_err(sdev, ret))) { 1230 ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d" 1231 " with error %d", i, ret); 1232 fs_unlock(dev, 0); 1233 return ret; 1234 } 1235 } 1236 fs_unlock(dev, 0); 1237 return 0; 1238 } 1239 1240 const struct eth_dev_ops failsafe_ops = { 1241 .dev_configure = fs_dev_configure, 1242 .dev_start = fs_dev_start, 1243 .dev_stop = fs_dev_stop, 1244 .dev_set_link_down = fs_dev_set_link_down, 1245 .dev_set_link_up = fs_dev_set_link_up, 1246 .dev_close = fs_dev_close, 1247 .promiscuous_enable = fs_promiscuous_enable, 1248 .promiscuous_disable = fs_promiscuous_disable, 1249 .allmulticast_enable = fs_allmulticast_enable, 1250 .allmulticast_disable = fs_allmulticast_disable, 1251 .link_update = fs_link_update, 1252 .stats_get = fs_stats_get, 1253 .stats_reset = fs_stats_reset, 1254 .dev_infos_get = fs_dev_infos_get, 1255 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get, 1256 .mtu_set = fs_mtu_set, 1257 .vlan_filter_set = fs_vlan_filter_set, 1258 .rx_queue_start = fs_rx_queue_start, 1259 .rx_queue_stop = fs_rx_queue_stop, 1260 .tx_queue_start = fs_tx_queue_start, 1261 .tx_queue_stop = fs_tx_queue_stop, 1262 .rx_queue_setup = fs_rx_queue_setup, 1263 .tx_queue_setup = fs_tx_queue_setup, 1264 .rx_queue_release = fs_rx_queue_release, 1265 .tx_queue_release = fs_tx_queue_release, 1266 .rx_queue_intr_enable = fs_rx_intr_enable, 1267 .rx_queue_intr_disable = fs_rx_intr_disable, 1268 .flow_ctrl_get = fs_flow_ctrl_get, 1269 .flow_ctrl_set = fs_flow_ctrl_set, 1270 .mac_addr_remove = fs_mac_addr_remove, 1271 .mac_addr_add = fs_mac_addr_add, 1272 .mac_addr_set = fs_mac_addr_set, 1273 .set_mc_addr_list = fs_set_mc_addr_list, 1274 .rss_hash_update = fs_rss_hash_update, 1275 .filter_ctrl = fs_filter_ctrl, 1276 }; 1277