Lines Matching defs:dev

763 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
767 if (rx_queue_id >= dev->data->nb_rx_queues) {
768 port_id = dev->data->port_id;
775 if (dev->data->rx_queues[rx_queue_id] == NULL) {
776 port_id = dev->data->port_id;
787 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
791 if (tx_queue_id >= dev->data->nb_tx_queues) {
792 port_id = dev->data->port_id;
799 if (dev->data->tx_queues[tx_queue_id] == NULL) {
800 port_id = dev->data->port_id;
813 struct rte_eth_dev *dev;
816 dev = &rte_eth_devices[port_id];
818 return eth_dev_validate_rx_queue(dev, queue_id);
824 struct rte_eth_dev *dev;
827 dev = &rte_eth_devices[port_id];
829 return eth_dev_validate_tx_queue(dev, queue_id);
835 struct rte_eth_dev *dev;
839 dev = &rte_eth_devices[port_id];
841 if (!dev->data->dev_started) {
848 ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
852 if (*dev->dev_ops->rx_queue_start == NULL)
855 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
862 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
869 ret = eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id));
879 struct rte_eth_dev *dev;
883 dev = &rte_eth_devices[port_id];
885 ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
889 if (*dev->dev_ops->rx_queue_stop == NULL)
892 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
899 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
906 ret = eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
916 struct rte_eth_dev *dev;
920 dev = &rte_eth_devices[port_id];
922 if (!dev->data->dev_started) {
929 ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
933 if (*dev->dev_ops->tx_queue_start == NULL)
936 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
943 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
950 ret = eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
960 struct rte_eth_dev *dev;
964 dev = &rte_eth_devices[port_id];
966 ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
970 if (*dev->dev_ops->tx_queue_stop == NULL)
973 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
980 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
987 ret = eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1284 struct rte_eth_dev *dev;
1292 dev = &rte_eth_devices[port_id];
1301 if (*dev->dev_ops->dev_configure == NULL)
1304 if (dev->data->dev_started) {
1316 dev->data->dev_configured = 0;
1319 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1322 * Copy the dev_conf parameter into the dev structure.
1325 if (dev_conf != &dev->data->dev_conf)
1326 memcpy(&dev->data->dev_conf, dev_conf,
1327 sizeof(dev->data->dev_conf));
1330 old_mtu = dev->data->mtu;
1407 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1409 dev->device->driver->name);
1414 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1416 dev->device->driver->name);
1422 dev->data->dev_conf.rxmode.mtu =
1427 dev->data->dev_conf.rxmode.mtu);
1431 dev->data->mtu = dev->data->dev_conf.rxmode.mtu;
1443 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len;
1445 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
1447 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1491 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1541 diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1550 diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1555 eth_dev_rx_queue_config(dev, 0);
1560 diag = (*dev->dev_ops->dev_configure)(dev);
1569 diag = __rte_eth_dev_profile_init(port_id, dev);
1580 dev->data->dev_conf.rxmode.offloads, "Rx",
1590 dev->data->dev_conf.txmode.offloads, "Tx",
1597 dev->data->dev_configured = 1;
1601 eth_dev_rx_queue_config(dev, 0);
1602 eth_dev_tx_queue_config(dev, 0);
1604 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1605 if (old_mtu != dev->data->mtu)
1606 dev->data->mtu = old_mtu;
1613 eth_dev_mac_restore(struct rte_eth_dev *dev,
1622 addr = &dev->data->mac_addrs[0];
1623 if (*dev->dev_ops->mac_addr_set != NULL)
1624 (*dev->dev_ops->mac_addr_set)(dev, addr);
1625 else if (*dev->dev_ops->mac_addr_add != NULL)
1626 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1628 if (*dev->dev_ops->mac_addr_add != NULL) {
1630 addr = &dev->data->mac_addrs[i];
1637 pool_mask = dev->data->mac_pool_sel[i];
1641 (*dev->dev_ops->mac_addr_add)(dev,
1651 eth_dev_promiscuous_restore(struct rte_eth_dev *dev, uint16_t port_id)
1661 *dev->dev_ops->promiscuous_enable != NULL) {
1663 (*dev->dev_ops->promiscuous_enable)(dev));
1671 *dev->dev_ops->promiscuous_disable != NULL) {
1673 (*dev->dev_ops->promiscuous_disable)(dev));
1686 eth_dev_allmulticast_restore(struct rte_eth_dev *dev, uint16_t port_id)
1696 *dev->dev_ops->allmulticast_enable != NULL) {
1698 (*dev->dev_ops->allmulticast_enable)(dev));
1706 *dev->dev_ops->allmulticast_disable != NULL) {
1708 (*dev->dev_ops->allmulticast_disable)(dev));
1721 eth_dev_config_restore(struct rte_eth_dev *dev,
1730 eth_dev_mac_restore(dev, dev_info);
1733 ret = eth_dev_promiscuous_restore(dev, port_id);
1739 ret = eth_dev_allmulticast_restore(dev, port_id);
1750 struct rte_eth_dev *dev;
1757 dev = &rte_eth_devices[port_id];
1759 if (*dev->dev_ops->dev_start == NULL)
1762 if (dev->data->dev_configured == 0) {
1769 if (dev->data->dev_started != 0) {
1780 restore_flags = rte_eth_get_restore_flags(dev, RTE_ETH_START);
1785 eth_dev_mac_restore(dev, &dev_info);
1787 diag = (*dev->dev_ops->dev_start)(dev);
1789 dev->data->dev_started = 1;
1793 ret = eth_dev_config_restore(dev, &dev_info, restore_flags, port_id);
1808 if (dev->data->dev_conf.intr_conf.lsc == 0) {
1809 if (*dev->dev_ops->link_update == NULL)
1811 (*dev->dev_ops->link_update)(dev, 0);
1815 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev);
1824 struct rte_eth_dev *dev;
1828 dev = &rte_eth_devices[port_id];
1830 if (*dev->dev_ops->dev_stop == NULL)
1833 if (dev->data->dev_started == 0) {
1843 ret = (*dev->dev_ops->dev_stop)(dev);
1845 dev->data->dev_started = 0;
1854 struct rte_eth_dev *dev;
1858 dev = &rte_eth_devices[port_id];
1860 if (*dev->dev_ops->dev_set_link_up == NULL)
1862 ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1872 struct rte_eth_dev *dev;
1876 dev = &rte_eth_devices[port_id];
1878 if (*dev->dev_ops->dev_set_link_down == NULL)
1880 ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1890 struct rte_eth_dev *dev;
1893 dev = &rte_eth_devices[port_id];
1895 if (*dev->dev_ops->speed_lanes_get == NULL)
1897 return eth_err(port_id, (*dev->dev_ops->speed_lanes_get)(dev, lane));
1905 struct rte_eth_dev *dev;
1909 dev = &rte_eth_devices[port_id];
1911 if (*dev->dev_ops->speed_lanes_get_capa == NULL)
1921 ret = (*dev->dev_ops->speed_lanes_get_capa)(dev, speed_lanes_capa, num);
1929 struct rte_eth_dev *dev;
1932 dev = &rte_eth_devices[port_id];
1934 if (*dev->dev_ops->speed_lanes_set == NULL)
1936 return eth_err(port_id, (*dev->dev_ops->speed_lanes_set)(dev, speed_lanes_capa));
1942 struct rte_eth_dev *dev;
1947 dev = &rte_eth_devices[port_id];
1955 dev->data->dev_started) {
1961 if (*dev->dev_ops->dev_close == NULL)
1963 *lasterr = (*dev->dev_ops->dev_close)(dev);
1968 *lasterr = rte_eth_dev_release_port(dev);
1976 struct rte_eth_dev *dev;
1980 dev = &rte_eth_devices[port_id];
1982 if (*dev->dev_ops->dev_reset == NULL)
1991 ret = eth_err(port_id, dev->dev_ops->dev_reset(dev));
2001 struct rte_eth_dev *dev;
2005 dev = &rte_eth_devices[port_id];
2007 if (dev->state == RTE_ETH_DEV_REMOVED)
2010 if (*dev->dev_ops->is_removed == NULL)
2013 ret = dev->dev_ops->is_removed(dev);
2016 dev->state = RTE_ETH_DEV_REMOVED;
2234 struct rte_eth_dev *dev;
2240 dev = &rte_eth_devices[port_id];
2242 if (rx_queue_id >= dev->data->nb_rx_queues) {
2247 if (*dev->dev_ops->rx_queue_setup == NULL)
2263 rx_offloads = dev->data->dev_conf.rxmode.offloads;
2352 if (dev->data->dev_started &&
2357 if (dev->data->dev_started &&
2358 (dev->data->rx_queue_state[rx_queue_id] !=
2362 eth_dev_rxq_release(dev, rx_queue_id);
2377 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
2418 max_rx_pktlen = dev->data->mtu + overhead_len;
2419 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
2420 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
2422 dev->data->dev_conf.rxmode.max_lro_pkt_size,
2429 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
2432 if (!dev->data->min_rx_buf_size ||
2433 dev->data->min_rx_buf_size > mbp_buf_size)
2434 dev->data->min_rx_buf_size = mbp_buf_size;
2448 struct rte_eth_dev *dev;
2454 dev = &rte_eth_devices[port_id];
2456 if (rx_queue_id >= dev->data->nb_rx_queues) {
2477 if (*dev->dev_ops->rx_hairpin_queue_setup == NULL)
2522 for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
2524 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
2532 if (dev->data->dev_started)
2534 eth_dev_rxq_release(dev, rx_queue_id);
2535 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2538 dev->data->rx_queue_state[rx_queue_id] =
2553 struct rte_eth_dev *dev;
2559 dev = &rte_eth_devices[port_id];
2561 if (tx_queue_id >= dev->data->nb_tx_queues) {
2566 if (*dev->dev_ops->tx_queue_setup == NULL)
2600 if (dev->data->dev_started &&
2605 if (dev->data->dev_started &&
2606 (dev->data->tx_queue_state[tx_queue_id] !=
2610 eth_dev_txq_release(dev, tx_queue_id);
2625 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2647 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2656 struct rte_eth_dev *dev;
2663 dev = &rte_eth_devices[port_id];
2665 if (tx_queue_id >= dev->data->nb_tx_queues) {
2680 if (*dev->dev_ops->tx_hairpin_queue_setup == NULL)
2725 for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2727 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2735 if (dev->data->dev_started)
2737 eth_dev_txq_release(dev, tx_queue_id);
2738 ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2739 (dev, tx_queue_id, nb_tx_desc, conf);
2741 dev->data->tx_queue_state[tx_queue_id] =
2754 struct rte_eth_dev *dev;
2758 dev = &rte_eth_devices[tx_port];
2760 if (dev->data->dev_started == 0) {
2765 if (*dev->dev_ops->hairpin_bind == NULL)
2767 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2781 struct rte_eth_dev *dev;
2785 dev = &rte_eth_devices[tx_port];
2787 if (dev->data->dev_started == 0) {
2792 if (*dev->dev_ops->hairpin_unbind == NULL)
2794 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2809 struct rte_eth_dev *dev;
2813 dev = &rte_eth_devices[port_id];
2829 if (*dev->dev_ops->hairpin_get_peer_ports == NULL)
2832 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2907 struct rte_eth_dev *dev;
2911 dev = &rte_eth_devices[port_id];
2914 ret = eth_dev_validate_tx_queue(dev, queue_id);
2919 if (*dev->dev_ops->tx_done_cleanup == NULL)
2923 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2935 struct rte_eth_dev *dev;
2939 dev = &rte_eth_devices[port_id];
2941 if (dev->data->promiscuous == 1)
2944 if (*dev->dev_ops->promiscuous_enable == NULL)
2947 diag = (*dev->dev_ops->promiscuous_enable)(dev);
2948 dev->data->promiscuous = (diag == 0) ? 1 : 0;
2952 rte_eth_trace_promiscuous_enable(port_id, dev->data->promiscuous,
2961 struct rte_eth_dev *dev;
2965 dev = &rte_eth_devices[port_id];
2967 if (dev->data->promiscuous == 0)
2970 if (*dev->dev_ops->promiscuous_disable == NULL)
2973 dev->data->promiscuous = 0;
2974 diag = (*dev->dev_ops->promiscuous_disable)(dev);
2976 dev->data->promiscuous = 1;
2980 rte_eth_trace_promiscuous_disable(port_id, dev->data->promiscuous,
2989 struct rte_eth_dev *dev;
2992 dev = &rte_eth_devices[port_id];
2994 rte_eth_trace_promiscuous_get(port_id, dev->data->promiscuous);
2996 return dev->data->promiscuous;
3002 struct rte_eth_dev *dev;
3006 dev = &rte_eth_devices[port_id];
3008 if (dev->data->all_multicast == 1)
3011 if (*dev->dev_ops->allmulticast_enable == NULL)
3013 diag = (*dev->dev_ops->allmulticast_enable)(dev);
3014 dev->data->all_multicast = (diag == 0) ? 1 : 0;
3018 rte_eth_trace_allmulticast_enable(port_id, dev->data->all_multicast,
3027 struct rte_eth_dev *dev;
3031 dev = &rte_eth_devices[port_id];
3033 if (dev->data->all_multicast == 0)
3036 if (*dev->dev_ops->allmulticast_disable == NULL)
3038 dev->data->all_multicast = 0;
3039 diag = (*dev->dev_ops->allmulticast_disable)(dev);
3041 dev->data->all_multicast = 1;
3045 rte_eth_trace_allmulticast_disable(port_id, dev->data->all_multicast,
3054 struct rte_eth_dev *dev;
3057 dev = &rte_eth_devices[port_id];
3059 rte_eth_trace_allmulticast_get(port_id, dev->data->all_multicast);
3061 return dev->data->all_multicast;
3067 struct rte_eth_dev *dev;
3070 dev = &rte_eth_devices[port_id];
3078 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
3079 rte_eth_linkstatus_get(dev, eth_link);
3081 if (*dev->dev_ops->link_update == NULL)
3083 (*dev->dev_ops->link_update)(dev, 1);
3084 *eth_link = dev->data->dev_link;
3095 struct rte_eth_dev *dev;
3098 dev = &rte_eth_devices[port_id];
3106 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
3107 rte_eth_linkstatus_get(dev, eth_link);
3109 if (*dev->dev_ops->link_update == NULL)
3111 (*dev->dev_ops->link_update)(dev, 0);
3112 *eth_link = dev->data->dev_link;
3222 struct rte_eth_dev *dev;
3226 dev = &rte_eth_devices[port_id];
3236 if (*dev->dev_ops->stats_get == NULL)
3238 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
3239 ret = eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
3249 struct rte_eth_dev *dev;
3253 dev = &rte_eth_devices[port_id];
3255 if (*dev->dev_ops->stats_reset == NULL)
3257 ret = (*dev->dev_ops->stats_reset)(dev);
3261 dev->data->rx_mbuf_alloc_failed = 0;
3269 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
3274 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3275 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3278 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
3289 struct rte_eth_dev *dev;
3293 dev = &rte_eth_devices[port_id];
3294 if (dev->dev_ops->xstats_get_names != NULL) {
3295 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
3302 count += eth_dev_get_xstats_basic_count(dev);
3361 eth_basic_stats_get_names(struct rte_eth_dev *dev,
3375 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
3378 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3389 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3413 struct rte_eth_dev *dev;
3418 dev = &rte_eth_devices[port_id];
3420 basic_count = eth_dev_get_xstats_basic_count(dev);
3437 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
3454 return (*dev->dev_ops->xstats_get_names_by_id)(dev,
3487 eth_basic_stats_get_names(dev, xstats_names_copy);
3519 struct rte_eth_dev *dev;
3531 dev = &rte_eth_devices[port_id];
3533 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
3535 if (dev->dev_ops->xstats_get_names != NULL) {
3539 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
3540 dev,
3559 struct rte_eth_dev *dev;
3570 dev = &rte_eth_devices[port_id];
3572 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3573 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3583 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
3620 struct rte_eth_dev *dev;
3625 dev = &rte_eth_devices[port_id];
3632 basic_count = eth_dev_get_xstats_basic_count(dev);
3645 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
3646 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
3663 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
3711 struct rte_eth_dev *dev;
3719 dev = &rte_eth_devices[port_id];
3721 count = eth_dev_get_xstats_basic_count(dev);
3724 if (dev->dev_ops->xstats_get != NULL) {
3728 xcount = (*dev->dev_ops->xstats_get)(dev,
3761 struct rte_eth_dev *dev;
3764 dev = &rte_eth_devices[port_id];
3767 if (dev->dev_ops->xstats_reset != NULL) {
3768 int ret = eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3783 struct rte_eth_dev *dev;
3786 dev = &rte_eth_devices[port_id];
3788 if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3791 if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3797 if (*dev->dev_ops->queue_stats_mapping_set == NULL)
3799 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx);
3837 struct rte_eth_dev *dev;
3841 dev = &rte_eth_devices[port_id];
3850 if (*dev->dev_ops->fw_version_get == NULL)
3852 ret = eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3863 struct rte_eth_dev *dev;
3874 dev = &rte_eth_devices[port_id];
3891 dev_info->device = dev->device;
3898 if (*dev->dev_ops->dev_infos_get == NULL)
3900 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3913 dev_info->driver_name = dev->device->driver->name;
3914 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3915 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3917 dev_info->dev_flags = &dev->data->dev_flags;
3927 struct rte_eth_dev *dev;
3930 dev = &rte_eth_devices[port_id];
3939 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf));
3952 struct rte_eth_dev *dev;
3957 dev = &rte_eth_devices[port_id];
3966 if (*dev->dev_ops->dev_supported_ptypes_get == NULL)
3968 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev,
4002 struct rte_eth_dev *dev;
4010 dev = &rte_eth_devices[port_id];
4019 if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
4020 *dev->dev_ops->dev_ptypes_set == NULL) {
4026 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
4046 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev,
4076 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
4090 struct rte_eth_dev *dev;
4103 dev = &rte_eth_devices[port_id];
4105 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0]));
4115 struct rte_eth_dev *dev;
4118 dev = &rte_eth_devices[port_id];
4127 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
4137 struct rte_eth_dev *dev;
4140 dev = &rte_eth_devices[port_id];
4148 *mtu = dev->data->mtu;
4160 struct rte_eth_dev *dev;
4163 dev = &rte_eth_devices[port_id];
4164 if (*dev->dev_ops->mtu_set == NULL)
4171 * which relies on dev->dev_ops->dev_infos_get.
4173 if (*dev->dev_ops->dev_infos_get != NULL) {
4183 if (dev->data->dev_configured == 0) {
4190 ret = (*dev->dev_ops->mtu_set)(dev, mtu);
4192 dev->data->mtu = mtu;
4204 struct rte_eth_dev *dev;
4208 dev = &rte_eth_devices[port_id];
4210 if (!(dev->data->dev_conf.rxmode.offloads &
4222 if (*dev->dev_ops->vlan_filter_set == NULL)
4225 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
4231 vfc = &dev->data->vlan_filter_conf;
4252 struct rte_eth_dev *dev;
4255 dev = &rte_eth_devices[port_id];
4257 if (rx_queue_id >= dev->data->nb_rx_queues) {
4262 if (*dev->dev_ops->vlan_strip_queue_set == NULL)
4264 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
4276 struct rte_eth_dev *dev;
4280 dev = &rte_eth_devices[port_id];
4282 if (*dev->dev_ops->vlan_tpid_set == NULL)
4284 ret = eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
4296 struct rte_eth_dev *dev;
4305 dev = &rte_eth_devices[port_id];
4308 orig_offloads = dev->data->dev_conf.rxmode.offloads;
4372 if (*dev->dev_ops->vlan_offload_set == NULL)
4374 dev->data->dev_conf.rxmode.offloads = dev_offloads;
4375 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
4378 dev->data->dev_conf.rxmode.offloads = orig_offloads;
4391 struct rte_eth_dev *dev;
4396 dev = &rte_eth_devices[port_id];
4397 dev_offloads = &dev->data->dev_conf.rxmode.offloads;
4419 struct rte_eth_dev *dev;
4423 dev = &rte_eth_devices[port_id];
4425 if (*dev->dev_ops->vlan_pvid_set == NULL)
4427 ret = eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
4437 struct rte_eth_dev *dev;
4441 dev = &rte_eth_devices[port_id];
4450 if (*dev->dev_ops->flow_ctrl_get == NULL)
4453 ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
4463 struct rte_eth_dev *dev;
4467 dev = &rte_eth_devices[port_id];
4481 if (*dev->dev_ops->flow_ctrl_set == NULL)
4483 ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
4494 struct rte_eth_dev *dev;
4498 dev = &rte_eth_devices[port_id];
4513 if (*dev->dev_ops->priority_flow_ctrl_set == NULL)
4515 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
4516 (dev, pfc_conf));
4577 struct rte_eth_dev *dev;
4581 dev = &rte_eth_devices[port_id];
4589 if (*dev->dev_ops->priority_flow_ctrl_queue_info_get == NULL)
4591 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get)
4592 (dev, pfc_queue_info));
4606 struct rte_eth_dev *dev;
4610 dev = &rte_eth_devices[port_id];
4665 if (*dev->dev_ops->priority_flow_ctrl_queue_config == NULL)
4667 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_config)
4668 (dev, pfc_queue_conf));
4725 struct rte_eth_dev *dev;
4729 dev = &rte_eth_devices[port_id];
4752 dev->data->nb_rx_queues);
4756 mq_mode = dev->data->dev_conf.rxmode.mq_mode;
4762 if (*dev->dev_ops->reta_update == NULL)
4764 ret = eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
4777 struct rte_eth_dev *dev;
4781 dev = &rte_eth_devices[port_id];
4795 if (*dev->dev_ops->reta_query == NULL)
4797 ret = eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
4809 struct rte_eth_dev *dev;
4815 dev = &rte_eth_devices[port_id];
4838 mq_mode = dev->data->dev_conf.rxmode.mq_mode;
4862 if (*dev->dev_ops->rss_hash_update == NULL)
4864 ret = eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
4877 struct rte_eth_dev *dev;
4881 dev = &rte_eth_devices[port_id];
4904 if (*dev->dev_ops->rss_hash_conf_get == NULL)
4906 ret = eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
4947 struct rte_eth_dev *dev;
4951 dev = &rte_eth_devices[port_id];
4965 if (*dev->dev_ops->udp_tunnel_port_add == NULL)
4967 ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
4979 struct rte_eth_dev *dev;
4983 dev = &rte_eth_devices[port_id];
4997 if (*dev->dev_ops->udp_tunnel_port_del == NULL)
4999 ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
5010 struct rte_eth_dev *dev;
5014 dev = &rte_eth_devices[port_id];
5016 if (*dev->dev_ops->dev_led_on == NULL)
5018 ret = eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
5028 struct rte_eth_dev *dev;
5032 dev = &rte_eth_devices[port_id];
5034 if (*dev->dev_ops->dev_led_off == NULL)
5036 ret = eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
5048 struct rte_eth_dev *dev;
5052 dev = &rte_eth_devices[port_id];
5061 if (*dev->dev_ops->fec_get_capability == NULL)
5063 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
5073 struct rte_eth_dev *dev;
5077 dev = &rte_eth_devices[port_id];
5086 if (*dev->dev_ops->fec_get == NULL)
5088 ret = eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
5098 struct rte_eth_dev *dev;
5102 dev = &rte_eth_devices[port_id];
5109 if (*dev->dev_ops->fec_set == NULL)
5111 ret = eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
5126 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5135 if (memcmp(addr, &dev->data->mac_addrs[i],
5148 struct rte_eth_dev *dev;
5154 dev = &rte_eth_devices[port_id];
5163 if (*dev->dev_ops->mac_addr_add == NULL)
5185 pool_mask = dev->data->mac_pool_sel[index];
5193 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
5197 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
5200 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool);
5213 struct rte_eth_dev *dev;
5217 dev = &rte_eth_devices[port_id];
5226 if (*dev->dev_ops->mac_addr_remove == NULL)
5239 (*dev->dev_ops->mac_addr_remove)(dev, index);
5242 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
5245 dev->data->mac_pool_sel[index] = 0;
5255 struct rte_eth_dev *dev;
5260 dev = &rte_eth_devices[port_id];
5272 if (*dev->dev_ops->mac_addr_set == NULL)
5275 /* Keep address unique in dev->data->mac_addrs[]. */
5284 ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
5289 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
5306 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5314 if (!dev->data->hash_mac_addrs)
5318 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
5331 struct rte_eth_dev *dev;
5334 dev = &rte_eth_devices[port_id];
5370 if (*dev->dev_ops->uc_hash_table_set == NULL)
5372 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
5377 &dev->data->hash_mac_addrs[index]);
5380 &dev->data->hash_mac_addrs[index]);
5393 struct rte_eth_dev *dev;
5397 dev = &rte_eth_devices[port_id];
5399 if (*dev->dev_ops->uc_all_hash_table_set == NULL)
5401 ret = eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, on));
5411 struct rte_eth_dev *dev;
5417 dev = &rte_eth_devices[port_id];
5423 link = dev->data->dev_link;
5439 if (*dev->dev_ops->set_queue_rate_limit == NULL)
5441 ret = eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
5452 struct rte_eth_dev *dev;
5456 dev = &rte_eth_devices[port_id];
5458 if (queue_id > dev->data->nb_rx_queues) {
5471 if (*dev->dev_ops->rx_queue_avail_thresh_set == NULL)
5473 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev,
5484 struct rte_eth_dev *dev;
5488 dev = &rte_eth_devices[port_id];
5492 if (*queue_id >= dev->data->nb_rx_queues)
5495 if (*dev->dev_ops->rx_queue_avail_thresh_query == NULL)
5497 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev,
5526 struct rte_eth_dev *dev;
5553 dev = &rte_eth_devices[next_port];
5555 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
5571 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
5596 struct rte_eth_dev *dev;
5623 dev = &rte_eth_devices[next_port];
5625 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
5639 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
5659 struct rte_eth_dev *dev;
5665 dev = &rte_eth_devices[port_id];
5667 if (!dev->intr_handle) {
5672 intr_handle = dev->intr_handle;
5678 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
5698 struct rte_eth_dev *dev;
5704 dev = &rte_eth_devices[port_id];
5706 if (queue_id >= dev->data->nb_rx_queues) {
5711 if (!dev->intr_handle) {
5716 intr_handle = dev->intr_handle;
5737 struct rte_eth_dev *dev;
5742 dev = &rte_eth_devices[port_id];
5744 if (queue_id >= dev->data->nb_rx_queues) {
5749 if (!dev->intr_handle) {
5754 intr_handle = dev->intr_handle;
5779 struct rte_eth_dev *dev;
5783 dev = &rte_eth_devices[port_id];
5785 ret = eth_dev_validate_rx_queue(dev, queue_id);
5789 if (*dev->dev_ops->rx_queue_intr_enable == NULL)
5791 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id));
5802 struct rte_eth_dev *dev;
5806 dev = &rte_eth_devices[port_id];
5808 ret = eth_dev_validate_rx_queue(dev, queue_id);
5812 if (*dev->dev_ops->rx_queue_intr_disable == NULL)
5814 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id));
5830 struct rte_eth_dev *dev;
5838 dev = &rte_eth_devices[port_id];
5839 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5931 struct rte_eth_dev *dev;
5940 dev = &rte_eth_devices[port_id];
5941 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5997 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
6003 prev_cb = &dev->post_rx_burst_cbs[queue_id];
6033 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
6039 prev_cb = &dev->pre_tx_burst_cbs[queue_id];
6060 struct rte_eth_dev *dev;
6063 dev = &rte_eth_devices[port_id];
6065 if (queue_id >= dev->data->nb_rx_queues) {
6076 if (dev->data->rx_queues == NULL ||
6077 dev->data->rx_queues[queue_id] == NULL) {
6085 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
6092 if (*dev->dev_ops->rxq_info_get == NULL)
6096 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
6097 qinfo->queue_state = dev->data->rx_queue_state[queue_id];
6108 struct rte_eth_dev *dev;
6111 dev = &rte_eth_devices[port_id];
6113 if (queue_id >= dev->data->nb_tx_queues) {
6124 if (dev->data->tx_queues == NULL ||
6125 dev->data->tx_queues[queue_id] == NULL) {
6133 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
6140 if (*dev->dev_ops->txq_info_get == NULL)
6144 dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
6145 qinfo->queue_state = dev->data->tx_queue_state[queue_id];
6156 struct rte_eth_dev *dev;
6160 dev = &rte_eth_devices[port_id];
6162 ret = eth_dev_validate_rx_queue(dev, queue_id);
6166 if (*dev->dev_ops->recycle_rxq_info_get == NULL)
6169 dev->dev_ops->recycle_rxq_info_get(dev, queue_id, recycle_rxq_info);
6178 struct rte_eth_dev *dev;
6182 dev = &rte_eth_devices[port_id];
6184 if (queue_id >= dev->data->nb_rx_queues) {
6196 if (*dev->dev_ops->rx_burst_mode_get == NULL)
6200 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
6211 struct rte_eth_dev *dev;
6215 dev = &rte_eth_devices[port_id];
6217 if (queue_id >= dev->data->nb_tx_queues) {
6229 if (*dev->dev_ops->tx_burst_mode_get == NULL)
6233 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
6244 struct rte_eth_dev *dev;
6248 dev = &rte_eth_devices[port_id];
6250 if (queue_id >= dev->data->nb_rx_queues) {
6262 if (*dev->dev_ops->get_monitor_addr == NULL)
6265 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc));
6277 struct rte_eth_dev *dev;
6281 dev = &rte_eth_devices[port_id];
6283 if (*dev->dev_ops->set_mc_addr_list == NULL)
6285 ret = eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
6297 struct rte_eth_dev *dev;
6301 dev = &rte_eth_devices[port_id];
6303 if (*dev->dev_ops->timesync_enable == NULL)
6305 ret = eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
6315 struct rte_eth_dev *dev;
6319 dev = &rte_eth_devices[port_id];
6321 if (*dev->dev_ops->timesync_disable == NULL)
6323 ret = eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
6334 struct rte_eth_dev *dev;
6338 dev = &rte_eth_devices[port_id];
6347 if (*dev->dev_ops->timesync_read_rx_timestamp == NULL)
6350 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
6351 (dev, timestamp, flags));
6363 struct rte_eth_dev *dev;
6367 dev = &rte_eth_devices[port_id];
6376 if (*dev->dev_ops->timesync_read_tx_timestamp == NULL)
6379 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
6380 (dev, timestamp));
6391 struct rte_eth_dev *dev;
6395 dev = &rte_eth_devices[port_id];
6397 if (*dev->dev_ops->timesync_adjust_time == NULL)
6399 ret = eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta));
6409 struct rte_eth_dev *dev;
6413 dev = &rte_eth_devices[port_id];
6415 if (*dev->dev_ops->timesync_adjust_freq == NULL)
6417 ret = eth_err(port_id, (*dev->dev_ops->timesync_adjust_freq)(dev, ppm));
6427 struct rte_eth_dev *dev;
6431 dev = &rte_eth_devices[port_id];
6440 if (*dev->dev_ops->timesync_read_time == NULL)
6442 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
6453 struct rte_eth_dev *dev;
6457 dev = &rte_eth_devices[port_id];
6466 if (*dev->dev_ops->timesync_write_time == NULL)
6468 ret = eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
6479 struct rte_eth_dev *dev;
6483 dev = &rte_eth_devices[port_id];
6491 if (*dev->dev_ops->read_clock == NULL)
6493 ret = eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
6531 struct rte_eth_dev *dev;
6536 dev = &rte_eth_devices[port_id];
6548 if (*dev->dev_ops->get_reg == NULL)
6550 ret = eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
6566 struct rte_eth_dev *dev;
6570 dev = &rte_eth_devices[port_id];
6572 if (*dev->dev_ops->get_eeprom_length == NULL)
6574 ret = eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
6584 struct rte_eth_dev *dev;
6588 dev = &rte_eth_devices[port_id];
6597 if (*dev->dev_ops->get_eeprom == NULL)
6599 ret = eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
6609 struct rte_eth_dev *dev;
6613 dev = &rte_eth_devices[port_id];
6622 if (*dev->dev_ops->set_eeprom == NULL)
6624 ret = eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
6635 struct rte_eth_dev *dev;
6639 dev = &rte_eth_devices[port_id];
6648 if (*dev->dev_ops->get_module_info == NULL)
6650 ret = (*dev->dev_ops->get_module_info)(dev, modinfo);
6661 struct rte_eth_dev *dev;
6665 dev = &rte_eth_devices[port_id];
6688 if (*dev->dev_ops->get_module_eeprom == NULL)
6690 ret = (*dev->dev_ops->get_module_eeprom)(dev, info);
6701 struct rte_eth_dev *dev;
6705 dev = &rte_eth_devices[port_id];
6716 if (*dev->dev_ops->get_dcb_info == NULL)
6718 ret = eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
6773 struct rte_eth_dev *dev;
6777 dev = &rte_eth_devices[port_id];
6786 if (*dev->dev_ops->hairpin_cap_get == NULL)
6789 ret = eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
6799 struct rte_eth_dev *dev;
6803 dev = &rte_eth_devices[port_id];
6812 if (*dev->dev_ops->pool_ops_supported == NULL)
6815 ret = (*dev->dev_ops->pool_ops_supported)(dev, pool);
6826 struct rte_eth_dev *dev;
6830 dev = &rte_eth_devices[port_id];
6832 if (*dev->dev_ops->representor_info_get == NULL)
6834 ret = eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info));
6844 struct rte_eth_dev *dev;
6848 dev = &rte_eth_devices[port_id];
6850 if (dev->data->dev_configured != 0) {
6866 if (*dev->dev_ops->rx_metadata_negotiate == NULL)
6869 (*dev->dev_ops->rx_metadata_negotiate)(dev, features));
6880 struct rte_eth_dev *dev;
6884 dev = &rte_eth_devices[port_id];
6886 if (dev->data->dev_configured == 0) {
6898 if (*dev->dev_ops->ip_reassembly_capability_get == NULL)
6902 ret = eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get)
6903 (dev, reassembly_capa));
6915 struct rte_eth_dev *dev;
6919 dev = &rte_eth_devices[port_id];
6921 if (dev->data->dev_configured == 0) {
6933 if (*dev->dev_ops->ip_reassembly_conf_get == NULL)
6937 (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf));
6948 struct rte_eth_dev *dev;
6952 dev = &rte_eth_devices[port_id];
6954 if (dev->data->dev_configured == 0) {
6961 if (dev->data->dev_started != 0) {
6974 if (*dev->dev_ops->ip_reassembly_conf_set == NULL)
6977 (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf));
6987 struct rte_eth_dev *dev;
6990 dev = &rte_eth_devices[port_id];
6997 if (*dev->dev_ops->eth_dev_priv_dump == NULL)
6999 return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file));
7006 struct rte_eth_dev *dev;
7009 dev = &rte_eth_devices[port_id];
7011 if (queue_id >= dev->data->nb_rx_queues) {
7021 if (*dev->dev_ops->eth_rx_descriptor_dump == NULL)
7024 return eth_err(port_id, (*dev->dev_ops->eth_rx_descriptor_dump)(dev,
7032 struct rte_eth_dev *dev;
7035 dev = &rte_eth_devices[port_id];
7037 if (queue_id >= dev->data->nb_tx_queues) {
7047 if (*dev->dev_ops->eth_tx_descriptor_dump == NULL)
7050 return eth_err(port_id, (*dev->dev_ops->eth_tx_descriptor_dump)(dev,
7059 struct rte_eth_dev *dev;
7064 dev = &rte_eth_devices[port_id];
7073 if (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get == NULL)
7075 all_types = (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get)(dev,
7096 struct rte_eth_dev *dev;
7100 dev = &rte_eth_devices[port_id];
7102 if (*dev->dev_ops->count_aggr_ports == NULL)
7104 ret = eth_err(port_id, (*dev->dev_ops->count_aggr_ports)(dev));
7114 struct rte_eth_dev *dev;
7119 dev = &rte_eth_devices[port_id];
7121 if (tx_queue_id >= dev->data->nb_tx_queues) {
7126 if (*dev->dev_ops->map_aggr_tx_affinity == NULL)
7129 if (dev->data->dev_configured == 0) {
7136 if (dev->data->dev_started) {
7158 ret = eth_err(port_id, (*dev->dev_ops->map_aggr_tx_affinity)(dev,