1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017 6WIND S.A. 3 * Copyright 2017 Mellanox Technologies, Ltd 4 */ 5 6 #include <rte_atomic.h> 7 #include <rte_debug.h> 8 #include <rte_mbuf.h> 9 #include <rte_ethdev_driver.h> 10 11 #include "failsafe_private.h" 12 13 static inline int 14 fs_rx_unsafe(struct sub_device *sdev) 15 { 16 return (ETH(sdev) == NULL) || 17 (ETH(sdev)->rx_pkt_burst == NULL) || 18 (sdev->state != DEV_STARTED) || 19 (sdev->remove != 0); 20 } 21 22 static inline int 23 fs_tx_unsafe(struct sub_device *sdev) 24 { 25 return (sdev == NULL) || 26 (ETH(sdev) == NULL) || 27 (ETH(sdev)->tx_pkt_burst == NULL) || 28 (sdev->state != DEV_STARTED); 29 } 30 31 void 32 failsafe_set_burst_fn(struct rte_eth_dev *dev, int force_safe) 33 { 34 struct sub_device *sdev; 35 uint8_t i; 36 int need_safe; 37 int safe_set; 38 39 need_safe = force_safe; 40 FOREACH_SUBDEV(sdev, i, dev) 41 need_safe |= fs_rx_unsafe(sdev); 42 safe_set = (dev->rx_pkt_burst == &failsafe_rx_burst); 43 if (need_safe && !safe_set) { 44 DEBUG("Using safe RX bursts%s", 45 (force_safe ? " (forced)" : "")); 46 dev->rx_pkt_burst = &failsafe_rx_burst; 47 } else if (!need_safe && safe_set) { 48 DEBUG("Using fast RX bursts"); 49 dev->rx_pkt_burst = &failsafe_rx_burst_fast; 50 } 51 need_safe = force_safe || fs_tx_unsafe(TX_SUBDEV(dev)); 52 safe_set = (dev->tx_pkt_burst == &failsafe_tx_burst); 53 if (need_safe && !safe_set) { 54 DEBUG("Using safe TX bursts%s", 55 (force_safe ? " (forced)" : "")); 56 dev->tx_pkt_burst = &failsafe_tx_burst; 57 } else if (!need_safe && safe_set) { 58 DEBUG("Using fast TX bursts"); 59 dev->tx_pkt_burst = &failsafe_tx_burst_fast; 60 } 61 rte_wmb(); 62 } 63 64 uint16_t 65 failsafe_rx_burst(void *queue, 66 struct rte_mbuf **rx_pkts, 67 uint16_t nb_pkts) 68 { 69 struct sub_device *sdev; 70 struct rxq *rxq; 71 void *sub_rxq; 72 uint16_t nb_rx; 73 74 rxq = queue; 75 sdev = rxq->sdev; 76 do { 77 if (fs_rx_unsafe(sdev)) { 78 nb_rx = 0; 79 sdev = sdev->next; 80 continue; 81 } 82 sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid]; 83 FS_ATOMIC_P(rxq->refcnt[sdev->sid]); 84 nb_rx = ETH(sdev)-> 85 rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts); 86 FS_ATOMIC_V(rxq->refcnt[sdev->sid]); 87 sdev = sdev->next; 88 } while (nb_rx == 0 && sdev != rxq->sdev); 89 rxq->sdev = sdev; 90 return nb_rx; 91 } 92 93 uint16_t 94 failsafe_rx_burst_fast(void *queue, 95 struct rte_mbuf **rx_pkts, 96 uint16_t nb_pkts) 97 { 98 struct sub_device *sdev; 99 struct rxq *rxq; 100 void *sub_rxq; 101 uint16_t nb_rx; 102 103 rxq = queue; 104 sdev = rxq->sdev; 105 do { 106 RTE_ASSERT(!fs_rx_unsafe(sdev)); 107 sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid]; 108 FS_ATOMIC_P(rxq->refcnt[sdev->sid]); 109 nb_rx = ETH(sdev)-> 110 rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts); 111 FS_ATOMIC_V(rxq->refcnt[sdev->sid]); 112 sdev = sdev->next; 113 } while (nb_rx == 0 && sdev != rxq->sdev); 114 rxq->sdev = sdev; 115 return nb_rx; 116 } 117 118 uint16_t 119 failsafe_tx_burst(void *queue, 120 struct rte_mbuf **tx_pkts, 121 uint16_t nb_pkts) 122 { 123 struct sub_device *sdev; 124 struct txq *txq; 125 void *sub_txq; 126 uint16_t nb_tx; 127 128 txq = queue; 129 sdev = TX_SUBDEV(&rte_eth_devices[txq->priv->data->port_id]); 130 if (unlikely(fs_tx_unsafe(sdev))) 131 return 0; 132 sub_txq = ETH(sdev)->data->tx_queues[txq->qid]; 133 FS_ATOMIC_P(txq->refcnt[sdev->sid]); 134 nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts); 135 FS_ATOMIC_V(txq->refcnt[sdev->sid]); 136 return nb_tx; 137 } 138 139 uint16_t 140 failsafe_tx_burst_fast(void *queue, 141 struct rte_mbuf **tx_pkts, 142 uint16_t nb_pkts) 143 { 144 struct sub_device *sdev; 145 struct txq *txq; 146 void *sub_txq; 147 uint16_t nb_tx; 148 149 txq = queue; 150 sdev = TX_SUBDEV(&rte_eth_devices[txq->priv->data->port_id]); 151 RTE_ASSERT(!fs_tx_unsafe(sdev)); 152 sub_txq = ETH(sdev)->data->tx_queues[txq->qid]; 153 FS_ATOMIC_P(txq->refcnt[sdev->sid]); 154 nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts); 155 FS_ATOMIC_V(txq->refcnt[sdev->sid]); 156 return nb_tx; 157 } 158