1 /*- 2 * BSD LICENSE 3 * 4 * Copyright 2017 6WIND S.A. 5 * Copyright 2017 Mellanox. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of 6WIND S.A. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <rte_atomic.h> 35 #include <rte_debug.h> 36 #include <rte_mbuf.h> 37 #include <rte_ethdev.h> 38 39 #include "failsafe_private.h" 40 41 static inline int 42 fs_rx_unsafe(struct sub_device *sdev) 43 { 44 return (ETH(sdev) == NULL) || 45 (ETH(sdev)->rx_pkt_burst == NULL) || 46 (sdev->state != DEV_STARTED) || 47 (sdev->remove != 0); 48 } 49 50 static inline int 51 fs_tx_unsafe(struct sub_device *sdev) 52 { 53 return (sdev == NULL) || 54 (ETH(sdev) == NULL) || 55 (ETH(sdev)->tx_pkt_burst == NULL) || 56 (sdev->state != DEV_STARTED); 57 } 58 59 void 60 set_burst_fn(struct rte_eth_dev *dev, int force_safe) 61 { 62 struct sub_device *sdev; 63 uint8_t i; 64 int need_safe; 65 int safe_set; 66 67 need_safe = force_safe; 68 FOREACH_SUBDEV(sdev, i, dev) 69 need_safe |= fs_rx_unsafe(sdev); 70 safe_set = (dev->rx_pkt_burst == &failsafe_rx_burst); 71 if (need_safe && !safe_set) { 72 DEBUG("Using safe RX bursts%s", 73 (force_safe ? " (forced)" : "")); 74 dev->rx_pkt_burst = &failsafe_rx_burst; 75 } else if (!need_safe && safe_set) { 76 DEBUG("Using fast RX bursts"); 77 dev->rx_pkt_burst = &failsafe_rx_burst_fast; 78 } 79 need_safe = force_safe || fs_tx_unsafe(TX_SUBDEV(dev)); 80 safe_set = (dev->tx_pkt_burst == &failsafe_tx_burst); 81 if (need_safe && !safe_set) { 82 DEBUG("Using safe TX bursts%s", 83 (force_safe ? " (forced)" : "")); 84 dev->tx_pkt_burst = &failsafe_tx_burst; 85 } else if (!need_safe && safe_set) { 86 DEBUG("Using fast TX bursts"); 87 dev->tx_pkt_burst = &failsafe_tx_burst_fast; 88 } 89 rte_wmb(); 90 } 91 92 uint16_t 93 failsafe_rx_burst(void *queue, 94 struct rte_mbuf **rx_pkts, 95 uint16_t nb_pkts) 96 { 97 struct fs_priv *priv; 98 struct sub_device *sdev; 99 struct rxq *rxq; 100 void *sub_rxq; 101 uint16_t nb_rx; 102 uint8_t nb_polled, nb_subs; 103 uint8_t i; 104 105 rxq = queue; 106 priv = rxq->priv; 107 nb_subs = priv->subs_tail - priv->subs_head; 108 nb_polled = 0; 109 for (i = rxq->last_polled; nb_polled < nb_subs; nb_polled++) { 110 i++; 111 if (i == priv->subs_tail) 112 i = priv->subs_head; 113 sdev = &priv->subs[i]; 114 if (unlikely(fs_rx_unsafe(sdev))) 115 continue; 116 sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid]; 117 FS_ATOMIC_P(rxq->refcnt[sdev->sid]); 118 nb_rx = ETH(sdev)-> 119 rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts); 120 FS_ATOMIC_V(rxq->refcnt[sdev->sid]); 121 if (nb_rx) { 122 rxq->last_polled = i; 123 return nb_rx; 124 } 125 } 126 return 0; 127 } 128 129 uint16_t 130 failsafe_rx_burst_fast(void *queue, 131 struct rte_mbuf **rx_pkts, 132 uint16_t nb_pkts) 133 { 134 struct fs_priv *priv; 135 struct sub_device *sdev; 136 struct rxq *rxq; 137 void *sub_rxq; 138 uint16_t nb_rx; 139 uint8_t nb_polled, nb_subs; 140 uint8_t i; 141 142 rxq = queue; 143 priv = rxq->priv; 144 nb_subs = priv->subs_tail - priv->subs_head; 145 nb_polled = 0; 146 for (i = rxq->last_polled; nb_polled < nb_subs; nb_polled++) { 147 i++; 148 if (i == priv->subs_tail) 149 i = priv->subs_head; 150 sdev = &priv->subs[i]; 151 RTE_ASSERT(!fs_rx_unsafe(sdev)); 152 sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid]; 153 FS_ATOMIC_P(rxq->refcnt[sdev->sid]); 154 nb_rx = ETH(sdev)-> 155 rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts); 156 FS_ATOMIC_V(rxq->refcnt[sdev->sid]); 157 if (nb_rx) { 158 rxq->last_polled = i; 159 return nb_rx; 160 } 161 } 162 return 0; 163 } 164 165 uint16_t 166 failsafe_tx_burst(void *queue, 167 struct rte_mbuf **tx_pkts, 168 uint16_t nb_pkts) 169 { 170 struct sub_device *sdev; 171 struct txq *txq; 172 void *sub_txq; 173 uint16_t nb_tx; 174 175 txq = queue; 176 sdev = TX_SUBDEV(txq->priv->dev); 177 if (unlikely(fs_tx_unsafe(sdev))) 178 return 0; 179 sub_txq = ETH(sdev)->data->tx_queues[txq->qid]; 180 FS_ATOMIC_P(txq->refcnt[sdev->sid]); 181 nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts); 182 FS_ATOMIC_V(txq->refcnt[sdev->sid]); 183 return nb_tx; 184 } 185 186 uint16_t 187 failsafe_tx_burst_fast(void *queue, 188 struct rte_mbuf **tx_pkts, 189 uint16_t nb_pkts) 190 { 191 struct sub_device *sdev; 192 struct txq *txq; 193 void *sub_txq; 194 uint16_t nb_tx; 195 196 txq = queue; 197 sdev = TX_SUBDEV(txq->priv->dev); 198 RTE_ASSERT(!fs_tx_unsafe(sdev)); 199 sub_txq = ETH(sdev)->data->tx_queues[txq->qid]; 200 FS_ATOMIC_P(txq->refcnt[sdev->sid]); 201 nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts); 202 FS_ATOMIC_V(txq->refcnt[sdev->sid]); 203 return nb_tx; 204 } 205