1 /*- 2 * BSD LICENSE 3 * 4 * Copyright 2017 6WIND S.A. 5 * Copyright 2017 Mellanox. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of 6WIND S.A. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <rte_atomic.h> 35 #include <rte_debug.h> 36 #include <rte_mbuf.h> 37 #include <rte_ethdev.h> 38 39 #include "failsafe_private.h" 40 41 static inline int 42 fs_rx_unsafe(struct sub_device *sdev) 43 { 44 return (ETH(sdev) == NULL) || 45 (ETH(sdev)->rx_pkt_burst == NULL) || 46 (sdev->state != DEV_STARTED); 47 } 48 49 static inline int 50 fs_tx_unsafe(struct sub_device *sdev) 51 { 52 return (sdev == NULL) || 53 (ETH(sdev) == NULL) || 54 (ETH(sdev)->tx_pkt_burst == NULL) || 55 (sdev->state != DEV_STARTED); 56 } 57 58 void 59 set_burst_fn(struct rte_eth_dev *dev, int force_safe) 60 { 61 struct sub_device *sdev; 62 uint8_t i; 63 int need_safe; 64 int safe_set; 65 66 need_safe = force_safe; 67 FOREACH_SUBDEV(sdev, i, dev) 68 need_safe |= fs_rx_unsafe(sdev); 69 safe_set = (dev->rx_pkt_burst == &failsafe_rx_burst); 70 if (need_safe && !safe_set) { 71 DEBUG("Using safe RX bursts%s", 72 (force_safe ? " (forced)" : "")); 73 dev->rx_pkt_burst = &failsafe_rx_burst; 74 } else if (!need_safe && safe_set) { 75 DEBUG("Using fast RX bursts"); 76 dev->rx_pkt_burst = &failsafe_rx_burst_fast; 77 } 78 need_safe = force_safe || fs_tx_unsafe(TX_SUBDEV(dev)); 79 safe_set = (dev->tx_pkt_burst == &failsafe_tx_burst); 80 if (need_safe && !safe_set) { 81 DEBUG("Using safe TX bursts%s", 82 (force_safe ? " (forced)" : "")); 83 dev->tx_pkt_burst = &failsafe_tx_burst; 84 } else if (!need_safe && safe_set) { 85 DEBUG("Using fast TX bursts"); 86 dev->tx_pkt_burst = &failsafe_tx_burst_fast; 87 } 88 rte_wmb(); 89 } 90 91 uint16_t 92 failsafe_rx_burst(void *queue, 93 struct rte_mbuf **rx_pkts, 94 uint16_t nb_pkts) 95 { 96 struct fs_priv *priv; 97 struct sub_device *sdev; 98 struct rxq *rxq; 99 void *sub_rxq; 100 uint16_t nb_rx; 101 uint8_t nb_polled, nb_subs; 102 uint8_t i; 103 104 rxq = queue; 105 priv = rxq->priv; 106 nb_subs = priv->subs_tail - priv->subs_head; 107 nb_polled = 0; 108 for (i = rxq->last_polled; nb_polled < nb_subs; nb_polled++) { 109 i++; 110 if (i == priv->subs_tail) 111 i = priv->subs_head; 112 sdev = &priv->subs[i]; 113 if (unlikely(fs_rx_unsafe(sdev))) 114 continue; 115 sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid]; 116 FS_ATOMIC_P(rxq->refcnt[sdev->sid]); 117 nb_rx = ETH(sdev)-> 118 rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts); 119 FS_ATOMIC_V(rxq->refcnt[sdev->sid]); 120 if (nb_rx) { 121 rxq->last_polled = i; 122 return nb_rx; 123 } 124 } 125 return 0; 126 } 127 128 uint16_t 129 failsafe_rx_burst_fast(void *queue, 130 struct rte_mbuf **rx_pkts, 131 uint16_t nb_pkts) 132 { 133 struct fs_priv *priv; 134 struct sub_device *sdev; 135 struct rxq *rxq; 136 void *sub_rxq; 137 uint16_t nb_rx; 138 uint8_t nb_polled, nb_subs; 139 uint8_t i; 140 141 rxq = queue; 142 priv = rxq->priv; 143 nb_subs = priv->subs_tail - priv->subs_head; 144 nb_polled = 0; 145 for (i = rxq->last_polled; nb_polled < nb_subs; nb_polled++) { 146 i++; 147 if (i == priv->subs_tail) 148 i = priv->subs_head; 149 sdev = &priv->subs[i]; 150 RTE_ASSERT(!fs_rx_unsafe(sdev)); 151 sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid]; 152 FS_ATOMIC_P(rxq->refcnt[sdev->sid]); 153 nb_rx = ETH(sdev)-> 154 rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts); 155 FS_ATOMIC_V(rxq->refcnt[sdev->sid]); 156 if (nb_rx) { 157 rxq->last_polled = i; 158 return nb_rx; 159 } 160 } 161 return 0; 162 } 163 164 uint16_t 165 failsafe_tx_burst(void *queue, 166 struct rte_mbuf **tx_pkts, 167 uint16_t nb_pkts) 168 { 169 struct sub_device *sdev; 170 struct txq *txq; 171 void *sub_txq; 172 uint16_t nb_tx; 173 174 txq = queue; 175 sdev = TX_SUBDEV(txq->priv->dev); 176 if (unlikely(fs_tx_unsafe(sdev))) 177 return 0; 178 sub_txq = ETH(sdev)->data->tx_queues[txq->qid]; 179 FS_ATOMIC_P(txq->refcnt[sdev->sid]); 180 nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts); 181 FS_ATOMIC_V(txq->refcnt[sdev->sid]); 182 return nb_tx; 183 } 184 185 uint16_t 186 failsafe_tx_burst_fast(void *queue, 187 struct rte_mbuf **tx_pkts, 188 uint16_t nb_pkts) 189 { 190 struct sub_device *sdev; 191 struct txq *txq; 192 void *sub_txq; 193 uint16_t nb_tx; 194 195 txq = queue; 196 sdev = TX_SUBDEV(txq->priv->dev); 197 RTE_ASSERT(!fs_tx_unsafe(sdev)); 198 sub_txq = ETH(sdev)->data->tx_queues[txq->qid]; 199 FS_ATOMIC_P(txq->refcnt[sdev->sid]); 200 nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts); 201 FS_ATOMIC_V(txq->refcnt[sdev->sid]); 202 return nb_tx; 203 } 204