1 /*- 2 * BSD LICENSE 3 * 4 * Copyright 2017 6WIND S.A. 5 * Copyright 2017 Mellanox. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of 6WIND S.A. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <rte_atomic.h> 35 #include <rte_debug.h> 36 #include <rte_mbuf.h> 37 #include <rte_ethdev_driver.h> 38 39 #include "failsafe_private.h" 40 41 static inline int 42 fs_rx_unsafe(struct sub_device *sdev) 43 { 44 return (ETH(sdev) == NULL) || 45 (ETH(sdev)->rx_pkt_burst == NULL) || 46 (sdev->state != DEV_STARTED) || 47 (sdev->remove != 0); 48 } 49 50 static inline int 51 fs_tx_unsafe(struct sub_device *sdev) 52 { 53 return (sdev == NULL) || 54 (ETH(sdev) == NULL) || 55 (ETH(sdev)->tx_pkt_burst == NULL) || 56 (sdev->state != DEV_STARTED); 57 } 58 59 void 60 set_burst_fn(struct rte_eth_dev *dev, int force_safe) 61 { 62 struct sub_device *sdev; 63 uint8_t i; 64 int need_safe; 65 int safe_set; 66 67 need_safe = force_safe; 68 FOREACH_SUBDEV(sdev, i, dev) 69 need_safe |= fs_rx_unsafe(sdev); 70 safe_set = (dev->rx_pkt_burst == &failsafe_rx_burst); 71 if (need_safe && !safe_set) { 72 DEBUG("Using safe RX bursts%s", 73 (force_safe ? " (forced)" : "")); 74 dev->rx_pkt_burst = &failsafe_rx_burst; 75 } else if (!need_safe && safe_set) { 76 DEBUG("Using fast RX bursts"); 77 dev->rx_pkt_burst = &failsafe_rx_burst_fast; 78 } 79 need_safe = force_safe || fs_tx_unsafe(TX_SUBDEV(dev)); 80 safe_set = (dev->tx_pkt_burst == &failsafe_tx_burst); 81 if (need_safe && !safe_set) { 82 DEBUG("Using safe TX bursts%s", 83 (force_safe ? " (forced)" : "")); 84 dev->tx_pkt_burst = &failsafe_tx_burst; 85 } else if (!need_safe && safe_set) { 86 DEBUG("Using fast TX bursts"); 87 dev->tx_pkt_burst = &failsafe_tx_burst_fast; 88 } 89 rte_wmb(); 90 } 91 92 uint16_t 93 failsafe_rx_burst(void *queue, 94 struct rte_mbuf **rx_pkts, 95 uint16_t nb_pkts) 96 { 97 struct sub_device *sdev; 98 struct rxq *rxq; 99 void *sub_rxq; 100 uint16_t nb_rx; 101 102 rxq = queue; 103 sdev = rxq->sdev; 104 do { 105 if (fs_rx_unsafe(sdev)) { 106 nb_rx = 0; 107 sdev = sdev->next; 108 continue; 109 } 110 sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid]; 111 FS_ATOMIC_P(rxq->refcnt[sdev->sid]); 112 nb_rx = ETH(sdev)-> 113 rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts); 114 FS_ATOMIC_V(rxq->refcnt[sdev->sid]); 115 sdev = sdev->next; 116 } while (nb_rx == 0 && sdev != rxq->sdev); 117 rxq->sdev = sdev; 118 return nb_rx; 119 } 120 121 uint16_t 122 failsafe_rx_burst_fast(void *queue, 123 struct rte_mbuf **rx_pkts, 124 uint16_t nb_pkts) 125 { 126 struct sub_device *sdev; 127 struct rxq *rxq; 128 void *sub_rxq; 129 uint16_t nb_rx; 130 131 rxq = queue; 132 sdev = rxq->sdev; 133 do { 134 RTE_ASSERT(!fs_rx_unsafe(sdev)); 135 sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid]; 136 FS_ATOMIC_P(rxq->refcnt[sdev->sid]); 137 nb_rx = ETH(sdev)-> 138 rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts); 139 FS_ATOMIC_V(rxq->refcnt[sdev->sid]); 140 sdev = sdev->next; 141 } while (nb_rx == 0 && sdev != rxq->sdev); 142 rxq->sdev = sdev; 143 return nb_rx; 144 } 145 146 uint16_t 147 failsafe_tx_burst(void *queue, 148 struct rte_mbuf **tx_pkts, 149 uint16_t nb_pkts) 150 { 151 struct sub_device *sdev; 152 struct txq *txq; 153 void *sub_txq; 154 uint16_t nb_tx; 155 156 txq = queue; 157 sdev = TX_SUBDEV(txq->priv->dev); 158 if (unlikely(fs_tx_unsafe(sdev))) 159 return 0; 160 sub_txq = ETH(sdev)->data->tx_queues[txq->qid]; 161 FS_ATOMIC_P(txq->refcnt[sdev->sid]); 162 nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts); 163 FS_ATOMIC_V(txq->refcnt[sdev->sid]); 164 return nb_tx; 165 } 166 167 uint16_t 168 failsafe_tx_burst_fast(void *queue, 169 struct rte_mbuf **tx_pkts, 170 uint16_t nb_pkts) 171 { 172 struct sub_device *sdev; 173 struct txq *txq; 174 void *sub_txq; 175 uint16_t nb_tx; 176 177 txq = queue; 178 sdev = TX_SUBDEV(txq->priv->dev); 179 RTE_ASSERT(!fs_tx_unsafe(sdev)); 180 sub_txq = ETH(sdev)->data->tx_queues[txq->qid]; 181 FS_ATOMIC_P(txq->refcnt[sdev->sid]); 182 nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts); 183 FS_ATOMIC_V(txq->refcnt[sdev->sid]); 184 return nb_tx; 185 } 186