1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2015 Intel Corporation 3 */ 4 5 #ifndef _I40E_RXTX_VEC_COMMON_H_ 6 #define _I40E_RXTX_VEC_COMMON_H_ 7 #include <stdint.h> 8 #include <ethdev_driver.h> 9 #include <rte_malloc.h> 10 11 #include "../common/rx.h" 12 #include "i40e_ethdev.h" 13 #include "i40e_rxtx.h" 14 15 static inline uint16_t 16 reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs, 17 uint16_t nb_bufs, uint8_t *split_flags) 18 { 19 struct rte_mbuf *pkts[RTE_I40E_VPMD_RX_BURST]; /*finished pkts*/ 20 struct rte_mbuf *start = rxq->pkt_first_seg; 21 struct rte_mbuf *end = rxq->pkt_last_seg; 22 unsigned pkt_idx, buf_idx; 23 24 for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) { 25 if (end != NULL) { 26 /* processing a split packet */ 27 end->next = rx_bufs[buf_idx]; 28 rx_bufs[buf_idx]->data_len += rxq->crc_len; 29 30 start->nb_segs++; 31 start->pkt_len += rx_bufs[buf_idx]->data_len; 32 end = end->next; 33 34 if (!split_flags[buf_idx]) { 35 /* it's the last packet of the set */ 36 start->hash = end->hash; 37 start->vlan_tci = end->vlan_tci; 38 start->ol_flags = end->ol_flags; 39 /* we need to strip crc for the whole packet */ 40 start->pkt_len -= rxq->crc_len; 41 if (end->data_len > rxq->crc_len) 42 end->data_len -= rxq->crc_len; 43 else { 44 /* free up last mbuf */ 45 struct rte_mbuf *secondlast = start; 46 47 start->nb_segs--; 48 while (secondlast->next != end) 49 secondlast = secondlast->next; 50 secondlast->data_len -= (rxq->crc_len - 51 end->data_len); 52 secondlast->next = NULL; 53 rte_pktmbuf_free_seg(end); 54 } 55 pkts[pkt_idx++] = start; 56 start = end = NULL; 57 } 58 } else { 59 /* not processing a split packet */ 60 if (!split_flags[buf_idx]) { 61 /* not a split packet, save and skip */ 62 pkts[pkt_idx++] = rx_bufs[buf_idx]; 63 continue; 64 } 65 end = start = rx_bufs[buf_idx]; 66 rx_bufs[buf_idx]->data_len += rxq->crc_len; 67 rx_bufs[buf_idx]->pkt_len += rxq->crc_len; 68 } 69 } 70 71 /* save the partial packet for next time */ 72 rxq->pkt_first_seg = start; 73 rxq->pkt_last_seg = end; 74 memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); 75 return pkt_idx; 76 } 77 78 static inline int 79 i40e_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx) 80 { 81 return (txq->i40e_tx_ring[idx].cmd_type_offset_bsz & 82 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) == 83 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE); 84 } 85 86 static __rte_always_inline int 87 i40e_tx_free_bufs(struct ci_tx_queue *txq) 88 { 89 return ci_tx_free_bufs(txq, i40e_tx_desc_done); 90 } 91 92 static inline void 93 _i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq) 94 { 95 const unsigned mask = rxq->nb_rx_desc - 1; 96 unsigned i; 97 98 if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc) 99 return; 100 101 /* free all mbufs that are valid in the ring */ 102 if (rxq->rxrearm_nb == 0) { 103 for (i = 0; i < rxq->nb_rx_desc; i++) { 104 if (rxq->sw_ring[i].mbuf != NULL) 105 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); 106 } 107 } else { 108 for (i = rxq->rx_tail; 109 i != rxq->rxrearm_start; 110 i = (i + 1) & mask) { 111 if (rxq->sw_ring[i].mbuf != NULL) 112 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); 113 } 114 } 115 116 rxq->rxrearm_nb = rxq->nb_rx_desc; 117 118 /* set all entries to NULL */ 119 memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc); 120 } 121 122 static inline int 123 i40e_rxq_vec_setup_default(struct i40e_rx_queue *rxq) 124 { 125 uintptr_t p; 126 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ 127 128 mb_def.nb_segs = 1; 129 mb_def.data_off = RTE_PKTMBUF_HEADROOM; 130 mb_def.port = rxq->port_id; 131 rte_mbuf_refcnt_set(&mb_def, 1); 132 133 /* prevent compiler reordering: rearm_data covers previous fields */ 134 rte_compiler_barrier(); 135 p = (uintptr_t)&mb_def.rearm_data; 136 rxq->mbuf_initializer = *(uint64_t *)p; 137 rxq->rx_using_sse = 1; 138 return 0; 139 } 140 141 static inline int 142 i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev) 143 { 144 #ifndef RTE_LIBRTE_IEEE1588 145 struct i40e_adapter *ad = 146 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 147 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 148 struct i40e_rx_queue *rxq; 149 uint16_t desc, i; 150 bool first_queue; 151 152 /* no QinQ support */ 153 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 154 return -1; 155 156 /** 157 * Vector mode is allowed only when number of Rx queue 158 * descriptor is power of 2. 159 */ 160 if (!dev->data->dev_started) { 161 first_queue = true; 162 for (i = 0; i < dev->data->nb_rx_queues; i++) { 163 rxq = dev->data->rx_queues[i]; 164 if (!rxq) 165 continue; 166 desc = rxq->nb_rx_desc; 167 if (first_queue) 168 ad->rx_vec_allowed = 169 rte_is_power_of_2(desc); 170 else 171 ad->rx_vec_allowed = 172 ad->rx_vec_allowed ? 173 rte_is_power_of_2(desc) : 174 ad->rx_vec_allowed; 175 first_queue = false; 176 } 177 } else { 178 /* Only check the first queue's descriptor number */ 179 for (i = 0; i < dev->data->nb_rx_queues; i++) { 180 rxq = dev->data->rx_queues[i]; 181 if (!rxq) 182 continue; 183 desc = rxq->nb_rx_desc; 184 ad->rx_vec_allowed = rte_is_power_of_2(desc); 185 break; 186 } 187 } 188 189 return 0; 190 #else 191 RTE_SET_USED(dev); 192 return -1; 193 #endif 194 } 195 196 #endif 197