1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2015 Intel Corporation 3 */ 4 5 #ifndef _I40E_RXTX_VEC_COMMON_H_ 6 #define _I40E_RXTX_VEC_COMMON_H_ 7 #include <stdint.h> 8 #include <ethdev_driver.h> 9 #include <rte_malloc.h> 10 11 #include "../common/rx.h" 12 #include "i40e_ethdev.h" 13 #include "i40e_rxtx.h" 14 15 static inline uint16_t 16 reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs, 17 uint16_t nb_bufs, uint8_t *split_flags) 18 { 19 struct rte_mbuf *pkts[RTE_I40E_VPMD_RX_BURST]; /*finished pkts*/ 20 struct rte_mbuf *start = rxq->pkt_first_seg; 21 struct rte_mbuf *end = rxq->pkt_last_seg; 22 unsigned pkt_idx, buf_idx; 23 24 for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) { 25 if (end != NULL) { 26 /* processing a split packet */ 27 end->next = rx_bufs[buf_idx]; 28 rx_bufs[buf_idx]->data_len += rxq->crc_len; 29 30 start->nb_segs++; 31 start->pkt_len += rx_bufs[buf_idx]->data_len; 32 end = end->next; 33 34 if (!split_flags[buf_idx]) { 35 /* it's the last packet of the set */ 36 start->hash = end->hash; 37 start->vlan_tci = end->vlan_tci; 38 start->ol_flags = end->ol_flags; 39 /* we need to strip crc for the whole packet */ 40 start->pkt_len -= rxq->crc_len; 41 if (end->data_len > rxq->crc_len) 42 end->data_len -= rxq->crc_len; 43 else { 44 /* free up last mbuf */ 45 struct rte_mbuf *secondlast = start; 46 47 start->nb_segs--; 48 while (secondlast->next != end) 49 secondlast = secondlast->next; 50 secondlast->data_len -= (rxq->crc_len - 51 end->data_len); 52 secondlast->next = NULL; 53 rte_pktmbuf_free_seg(end); 54 } 55 pkts[pkt_idx++] = start; 56 start = end = NULL; 57 } 58 } else { 59 /* not processing a split packet */ 60 if (!split_flags[buf_idx]) { 61 /* not a split packet, save and skip */ 62 pkts[pkt_idx++] = rx_bufs[buf_idx]; 63 continue; 64 } 65 end = start = rx_bufs[buf_idx]; 66 rx_bufs[buf_idx]->data_len += rxq->crc_len; 67 rx_bufs[buf_idx]->pkt_len += rxq->crc_len; 68 } 69 } 70 71 /* save the partial packet for next time */ 72 rxq->pkt_first_seg = start; 73 rxq->pkt_last_seg = end; 74 memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); 75 return pkt_idx; 76 } 77 78 static __rte_always_inline int 79 i40e_tx_free_bufs(struct ci_tx_queue *txq) 80 { 81 struct ci_tx_entry *txep; 82 uint32_t n; 83 uint32_t i; 84 int nb_free = 0; 85 struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ]; 86 87 /* check DD bits on threshold descriptor */ 88 if ((txq->i40e_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & 89 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) != 90 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) 91 return 0; 92 93 n = txq->tx_rs_thresh; 94 95 /* first buffer to free from S/W ring is at index 96 * tx_next_dd - (tx_rs_thresh-1) 97 */ 98 txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)]; 99 100 if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) { 101 for (i = 0; i < n; i++) { 102 free[i] = txep[i].mbuf; 103 /* no need to reset txep[i].mbuf in vector path */ 104 } 105 rte_mempool_put_bulk(free[0]->pool, (void **)free, n); 106 goto done; 107 } 108 109 m = rte_pktmbuf_prefree_seg(txep[0].mbuf); 110 if (likely(m != NULL)) { 111 free[0] = m; 112 nb_free = 1; 113 for (i = 1; i < n; i++) { 114 m = rte_pktmbuf_prefree_seg(txep[i].mbuf); 115 if (likely(m != NULL)) { 116 if (likely(m->pool == free[0]->pool)) { 117 free[nb_free++] = m; 118 } else { 119 rte_mempool_put_bulk(free[0]->pool, 120 (void *)free, 121 nb_free); 122 free[0] = m; 123 nb_free = 1; 124 } 125 } 126 } 127 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); 128 } else { 129 for (i = 1; i < n; i++) { 130 m = rte_pktmbuf_prefree_seg(txep[i].mbuf); 131 if (m != NULL) 132 rte_mempool_put(m->pool, m); 133 } 134 } 135 136 done: 137 /* buffers were freed, update counters */ 138 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); 139 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); 140 if (txq->tx_next_dd >= txq->nb_tx_desc) 141 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); 142 143 return txq->tx_rs_thresh; 144 } 145 146 static inline void 147 _i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq) 148 { 149 const unsigned mask = rxq->nb_rx_desc - 1; 150 unsigned i; 151 152 if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc) 153 return; 154 155 /* free all mbufs that are valid in the ring */ 156 if (rxq->rxrearm_nb == 0) { 157 for (i = 0; i < rxq->nb_rx_desc; i++) { 158 if (rxq->sw_ring[i].mbuf != NULL) 159 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); 160 } 161 } else { 162 for (i = rxq->rx_tail; 163 i != rxq->rxrearm_start; 164 i = (i + 1) & mask) { 165 if (rxq->sw_ring[i].mbuf != NULL) 166 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); 167 } 168 } 169 170 rxq->rxrearm_nb = rxq->nb_rx_desc; 171 172 /* set all entries to NULL */ 173 memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc); 174 } 175 176 static inline int 177 i40e_rxq_vec_setup_default(struct i40e_rx_queue *rxq) 178 { 179 uintptr_t p; 180 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ 181 182 mb_def.nb_segs = 1; 183 mb_def.data_off = RTE_PKTMBUF_HEADROOM; 184 mb_def.port = rxq->port_id; 185 rte_mbuf_refcnt_set(&mb_def, 1); 186 187 /* prevent compiler reordering: rearm_data covers previous fields */ 188 rte_compiler_barrier(); 189 p = (uintptr_t)&mb_def.rearm_data; 190 rxq->mbuf_initializer = *(uint64_t *)p; 191 rxq->rx_using_sse = 1; 192 return 0; 193 } 194 195 static inline int 196 i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev) 197 { 198 #ifndef RTE_LIBRTE_IEEE1588 199 struct i40e_adapter *ad = 200 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 201 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 202 struct i40e_rx_queue *rxq; 203 uint16_t desc, i; 204 bool first_queue; 205 206 /* no QinQ support */ 207 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 208 return -1; 209 210 /** 211 * Vector mode is allowed only when number of Rx queue 212 * descriptor is power of 2. 213 */ 214 if (!dev->data->dev_started) { 215 first_queue = true; 216 for (i = 0; i < dev->data->nb_rx_queues; i++) { 217 rxq = dev->data->rx_queues[i]; 218 if (!rxq) 219 continue; 220 desc = rxq->nb_rx_desc; 221 if (first_queue) 222 ad->rx_vec_allowed = 223 rte_is_power_of_2(desc); 224 else 225 ad->rx_vec_allowed = 226 ad->rx_vec_allowed ? 227 rte_is_power_of_2(desc) : 228 ad->rx_vec_allowed; 229 first_queue = false; 230 } 231 } else { 232 /* Only check the first queue's descriptor number */ 233 for (i = 0; i < dev->data->nb_rx_queues; i++) { 234 rxq = dev->data->rx_queues[i]; 235 if (!rxq) 236 continue; 237 desc = rxq->nb_rx_desc; 238 ad->rx_vec_allowed = rte_is_power_of_2(desc); 239 break; 240 } 241 } 242 243 return 0; 244 #else 245 RTE_SET_USED(dev); 246 return -1; 247 #endif 248 } 249 250 #endif 251