1 /*- 2 * BSD LICENSE 3 * 4 * Copyright 2017 6WIND S.A. 5 * Copyright 2017 Mellanox. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of 6WIND S.A. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <rte_atomic.h> 35 #include <rte_debug.h> 36 #include <rte_mbuf.h> 37 #include <rte_ethdev.h> 38 39 #include "failsafe_private.h" 40 41 static inline int 42 fs_rx_unsafe(struct sub_device *sdev) 43 { 44 return (ETH(sdev) == NULL) || 45 (ETH(sdev)->rx_pkt_burst == NULL) || 46 (sdev->state != DEV_STARTED) || 47 (sdev->remove != 0); 48 } 49 50 static inline int 51 fs_tx_unsafe(struct sub_device *sdev) 52 { 53 return (sdev == NULL) || 54 (ETH(sdev) == NULL) || 55 (ETH(sdev)->tx_pkt_burst == NULL) || 56 (sdev->state != DEV_STARTED); 57 } 58 59 void 60 set_burst_fn(struct rte_eth_dev *dev, int force_safe) 61 { 62 struct sub_device *sdev; 63 uint8_t i; 64 int need_safe; 65 int safe_set; 66 67 need_safe = force_safe; 68 FOREACH_SUBDEV(sdev, i, dev) 69 need_safe |= fs_rx_unsafe(sdev); 70 safe_set = (dev->rx_pkt_burst == &failsafe_rx_burst); 71 if (need_safe && !safe_set) { 72 DEBUG("Using safe RX bursts%s", 73 (force_safe ? " (forced)" : "")); 74 dev->rx_pkt_burst = &failsafe_rx_burst; 75 } else if (!need_safe && safe_set) { 76 DEBUG("Using fast RX bursts"); 77 dev->rx_pkt_burst = &failsafe_rx_burst_fast; 78 } 79 need_safe = force_safe || fs_tx_unsafe(TX_SUBDEV(dev)); 80 safe_set = (dev->tx_pkt_burst == &failsafe_tx_burst); 81 if (need_safe && !safe_set) { 82 DEBUG("Using safe TX bursts%s", 83 (force_safe ? " (forced)" : "")); 84 dev->tx_pkt_burst = &failsafe_tx_burst; 85 } else if (!need_safe && safe_set) { 86 DEBUG("Using fast TX bursts"); 87 dev->tx_pkt_burst = &failsafe_tx_burst_fast; 88 } 89 rte_wmb(); 90 } 91 92 uint16_t 93 failsafe_rx_burst(void *queue, 94 struct rte_mbuf **rx_pkts, 95 uint16_t nb_pkts) 96 { 97 struct sub_device *sdev; 98 struct rxq *rxq; 99 void *sub_rxq; 100 uint16_t nb_rx; 101 102 rxq = queue; 103 sdev = rxq->sdev; 104 do { 105 if (fs_rx_unsafe(sdev)) { 106 nb_rx = 0; 107 continue; 108 } 109 sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid]; 110 FS_ATOMIC_P(rxq->refcnt[sdev->sid]); 111 nb_rx = ETH(sdev)-> 112 rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts); 113 FS_ATOMIC_V(rxq->refcnt[sdev->sid]); 114 sdev = sdev->next; 115 } while (nb_rx == 0 && sdev != rxq->sdev); 116 rxq->sdev = sdev; 117 return nb_rx; 118 } 119 120 uint16_t 121 failsafe_rx_burst_fast(void *queue, 122 struct rte_mbuf **rx_pkts, 123 uint16_t nb_pkts) 124 { 125 struct sub_device *sdev; 126 struct rxq *rxq; 127 void *sub_rxq; 128 uint16_t nb_rx; 129 130 rxq = queue; 131 sdev = rxq->sdev; 132 do { 133 RTE_ASSERT(!fs_rx_unsafe(sdev)); 134 sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid]; 135 FS_ATOMIC_P(rxq->refcnt[sdev->sid]); 136 nb_rx = ETH(sdev)-> 137 rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts); 138 FS_ATOMIC_V(rxq->refcnt[sdev->sid]); 139 sdev = sdev->next; 140 } while (nb_rx == 0 && sdev != rxq->sdev); 141 rxq->sdev = sdev; 142 return nb_rx; 143 } 144 145 uint16_t 146 failsafe_tx_burst(void *queue, 147 struct rte_mbuf **tx_pkts, 148 uint16_t nb_pkts) 149 { 150 struct sub_device *sdev; 151 struct txq *txq; 152 void *sub_txq; 153 uint16_t nb_tx; 154 155 txq = queue; 156 sdev = TX_SUBDEV(txq->priv->dev); 157 if (unlikely(fs_tx_unsafe(sdev))) 158 return 0; 159 sub_txq = ETH(sdev)->data->tx_queues[txq->qid]; 160 FS_ATOMIC_P(txq->refcnt[sdev->sid]); 161 nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts); 162 FS_ATOMIC_V(txq->refcnt[sdev->sid]); 163 return nb_tx; 164 } 165 166 uint16_t 167 failsafe_tx_burst_fast(void *queue, 168 struct rte_mbuf **tx_pkts, 169 uint16_t nb_pkts) 170 { 171 struct sub_device *sdev; 172 struct txq *txq; 173 void *sub_txq; 174 uint16_t nb_tx; 175 176 txq = queue; 177 sdev = TX_SUBDEV(txq->priv->dev); 178 RTE_ASSERT(!fs_tx_unsafe(sdev)); 179 sub_txq = ETH(sdev)->data->tx_queues[txq->qid]; 180 FS_ATOMIC_P(txq->refcnt[sdev->sid]); 181 nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts); 182 FS_ATOMIC_V(txq->refcnt[sdev->sid]); 183 return nb_tx; 184 } 185