1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Intel Corporation 3 */ 4 5 #include <ethdev_driver.h> 6 #include <rte_net.h> 7 #include <rte_vect.h> 8 9 #include "ice_rxtx.h" 10 #include "ice_rxtx_vec_common.h" 11 12 #define ICE_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \ 13 RTE_MBUF_F_TX_L4_MASK | \ 14 RTE_MBUF_F_TX_TCP_SEG | \ 15 RTE_MBUF_F_TX_UDP_SEG | \ 16 RTE_MBUF_F_TX_OUTER_IP_CKSUM) 17 18 /** 19 * The mbuf dynamic field pointer for protocol extraction metadata. 20 */ 21 #define ICE_DYNF_PROTO_XTR_METADATA(m, n) \ 22 RTE_MBUF_DYNFIELD((m), (n), uint32_t *) 23 24 static int 25 ice_monitor_callback(const uint64_t value, 26 const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused) 27 { 28 const uint64_t m = rte_cpu_to_le_16(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S); 29 /* 30 * we expect the DD bit to be set to 1 if this descriptor was already 31 * written to. 32 */ 33 return (value & m) == m ? -1 : 0; 34 } 35 36 int 37 ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc) 38 { 39 volatile union ice_rx_flex_desc *rxdp; 40 struct ice_rx_queue *rxq = rx_queue; 41 uint16_t desc; 42 43 desc = rxq->rx_tail; 44 rxdp = &rxq->rx_ring[desc]; 45 /* watch for changes in status bit */ 46 pmc->addr = &rxdp->wb.status_error0; 47 48 /* comparison callback */ 49 pmc->fn = ice_monitor_callback; 50 51 /* register is 16-bit */ 52 pmc->size = sizeof(uint16_t); 53 54 return 0; 55 } 56 57 58 static inline uint8_t 59 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type) 60 { 61 static uint8_t rxdid_map[] = { 62 [PROTO_XTR_NONE] = ICE_RXDID_COMMS_OVS, 63 [PROTO_XTR_VLAN] = ICE_RXDID_COMMS_AUX_VLAN, 64 [PROTO_XTR_IPV4] = ICE_RXDID_COMMS_AUX_IPV4, 65 [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6, 66 [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW, 67 [PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP, 68 [PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET, 69 }; 70 71 return xtr_type < RTE_DIM(rxdid_map) ? 72 rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS; 73 } 74 75 static inline void 76 ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq, 77 struct rte_mbuf *mb, 78 volatile union ice_rx_flex_desc *rxdp) 79 { 80 volatile struct ice_32b_rx_flex_desc_comms *desc = 81 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp; 82 uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0); 83 84 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) { 85 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 86 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); 87 } 88 89 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 90 if (desc->flow_id != 0xFFFFFFFF) { 91 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; 92 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); 93 } 94 #endif 95 } 96 97 static inline void 98 ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq, 99 struct rte_mbuf *mb, 100 volatile union ice_rx_flex_desc *rxdp) 101 { 102 volatile struct ice_32b_rx_flex_desc_comms_ovs *desc = 103 (volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp; 104 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 105 uint16_t stat_err; 106 #endif 107 108 if (desc->flow_id != 0xFFFFFFFF) { 109 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; 110 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); 111 } 112 113 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 114 stat_err = rte_le_to_cpu_16(desc->status_error0); 115 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) { 116 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 117 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); 118 } 119 #endif 120 } 121 122 static inline void 123 ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq, 124 struct rte_mbuf *mb, 125 volatile union ice_rx_flex_desc *rxdp) 126 { 127 volatile struct ice_32b_rx_flex_desc_comms *desc = 128 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp; 129 uint16_t stat_err; 130 131 stat_err = rte_le_to_cpu_16(desc->status_error0); 132 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) { 133 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 134 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); 135 } 136 137 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 138 if (desc->flow_id != 0xFFFFFFFF) { 139 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; 140 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); 141 } 142 143 if (rxq->xtr_ol_flag) { 144 uint32_t metadata = 0; 145 146 stat_err = rte_le_to_cpu_16(desc->status_error1); 147 148 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S)) 149 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0); 150 151 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S)) 152 metadata |= 153 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16; 154 155 if (metadata) { 156 mb->ol_flags |= rxq->xtr_ol_flag; 157 158 *ICE_DYNF_PROTO_XTR_METADATA(mb, rxq->xtr_field_offs) = metadata; 159 } 160 } 161 #else 162 RTE_SET_USED(rxq); 163 #endif 164 } 165 166 static inline void 167 ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq, 168 struct rte_mbuf *mb, 169 volatile union ice_rx_flex_desc *rxdp) 170 { 171 volatile struct ice_32b_rx_flex_desc_comms *desc = 172 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp; 173 uint16_t stat_err; 174 175 stat_err = rte_le_to_cpu_16(desc->status_error0); 176 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) { 177 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 178 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); 179 } 180 181 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 182 if (desc->flow_id != 0xFFFFFFFF) { 183 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; 184 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); 185 } 186 187 if (rxq->xtr_ol_flag) { 188 uint32_t metadata = 0; 189 190 if (desc->flex_ts.flex.aux0 != 0xFFFF) 191 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0); 192 else if (desc->flex_ts.flex.aux1 != 0xFFFF) 193 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1); 194 195 if (metadata) { 196 mb->ol_flags |= rxq->xtr_ol_flag; 197 198 *ICE_DYNF_PROTO_XTR_METADATA(mb, rxq->xtr_field_offs) = metadata; 199 } 200 } 201 #else 202 RTE_SET_USED(rxq); 203 #endif 204 } 205 206 static const ice_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[] = { 207 [ICE_RXDID_COMMS_AUX_VLAN] = ice_rxd_to_pkt_fields_by_comms_aux_v1, 208 [ICE_RXDID_COMMS_AUX_IPV4] = ice_rxd_to_pkt_fields_by_comms_aux_v1, 209 [ICE_RXDID_COMMS_AUX_IPV6] = ice_rxd_to_pkt_fields_by_comms_aux_v1, 210 [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = ice_rxd_to_pkt_fields_by_comms_aux_v1, 211 [ICE_RXDID_COMMS_AUX_TCP] = ice_rxd_to_pkt_fields_by_comms_aux_v1, 212 [ICE_RXDID_COMMS_AUX_IP_OFFSET] = ice_rxd_to_pkt_fields_by_comms_aux_v2, 213 [ICE_RXDID_COMMS_GENERIC] = ice_rxd_to_pkt_fields_by_comms_generic, 214 [ICE_RXDID_COMMS_OVS] = ice_rxd_to_pkt_fields_by_comms_ovs, 215 }; 216 217 void 218 ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid) 219 { 220 rxq->rxdid = rxdid; 221 222 switch (rxdid) { 223 case ICE_RXDID_COMMS_AUX_VLAN: 224 case ICE_RXDID_COMMS_AUX_IPV4: 225 case ICE_RXDID_COMMS_AUX_IPV6: 226 case ICE_RXDID_COMMS_AUX_IPV6_FLOW: 227 case ICE_RXDID_COMMS_AUX_TCP: 228 case ICE_RXDID_COMMS_AUX_IP_OFFSET: 229 break; 230 case ICE_RXDID_COMMS_GENERIC: 231 /* fallthrough */ 232 case ICE_RXDID_COMMS_OVS: 233 break; 234 235 default: 236 /* update this according to the RXDID for PROTO_XTR_NONE */ 237 rxq->rxdid = ICE_RXDID_COMMS_OVS; 238 break; 239 } 240 241 if (rxq->xtr_field_offs == -1) 242 rxq->xtr_ol_flag = 0; 243 } 244 245 static int 246 ice_program_hw_rx_queue(struct ice_rx_queue *rxq) 247 { 248 struct ice_vsi *vsi = rxq->vsi; 249 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 250 struct ice_pf *pf = ICE_VSI_TO_PF(vsi); 251 struct rte_eth_dev_data *dev_data = rxq->vsi->adapter->pf.dev_data; 252 struct ice_rlan_ctx rx_ctx; 253 uint16_t buf_size; 254 uint32_t rxdid = ICE_RXDID_COMMS_OVS; 255 uint32_t regval; 256 struct ice_adapter *ad = rxq->vsi->adapter; 257 uint32_t frame_size = dev_data->mtu + ICE_ETH_OVERHEAD; 258 int err; 259 260 /* Set buffer size as the head split is disabled. */ 261 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - 262 RTE_PKTMBUF_HEADROOM); 263 rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size, (1 << ICE_RLAN_CTX_DBUF_S)); 264 rxq->rx_buf_len = RTE_MIN(rxq->rx_buf_len, ICE_RX_MAX_DATA_BUF_SIZE); 265 rxq->max_pkt_len = 266 RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len, 267 frame_size); 268 269 if (rxq->max_pkt_len <= RTE_ETHER_MIN_LEN || 270 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) { 271 PMD_DRV_LOG(ERR, "maximum packet length must " 272 "be larger than %u and smaller than %u", 273 (uint32_t)RTE_ETHER_MIN_LEN, 274 (uint32_t)ICE_FRAME_SIZE_MAX); 275 return -EINVAL; 276 } 277 278 if (!rxq->ts_enable && (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) { 279 /* Register mbuf field and flag for Rx timestamp */ 280 err = rte_mbuf_dyn_rx_timestamp_register( 281 &ice_timestamp_dynfield_offset, 282 &ice_timestamp_dynflag); 283 if (err) { 284 PMD_DRV_LOG(ERR, 285 "Cannot register mbuf field/flag for timestamp"); 286 return -EINVAL; 287 } 288 rxq->ts_enable = true; 289 } 290 291 memset(&rx_ctx, 0, sizeof(rx_ctx)); 292 293 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 294 uint32_t proto_hdr; 295 proto_hdr = rxq->rxseg[0].proto_hdr; 296 297 if (proto_hdr == RTE_PTYPE_UNKNOWN) { 298 PMD_DRV_LOG(ERR, "Buffer split protocol must be configured"); 299 return -EINVAL; 300 } 301 302 switch (proto_hdr & RTE_PTYPE_L4_MASK) { 303 case RTE_PTYPE_L4_TCP: 304 case RTE_PTYPE_L4_UDP: 305 rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 306 rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP; 307 goto set_hsplit_finish; 308 case RTE_PTYPE_L4_SCTP: 309 rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 310 rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP; 311 goto set_hsplit_finish; 312 } 313 314 switch (proto_hdr & RTE_PTYPE_L3_MASK) { 315 case RTE_PTYPE_L3_IPV4_EXT_UNKNOWN: 316 case RTE_PTYPE_L3_IPV6_EXT_UNKNOWN: 317 rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 318 rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_IP; 319 goto set_hsplit_finish; 320 } 321 322 switch (proto_hdr & RTE_PTYPE_L2_MASK) { 323 case RTE_PTYPE_L2_ETHER: 324 rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 325 rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_L2; 326 rx_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_SPLIT_L2; 327 goto set_hsplit_finish; 328 } 329 330 switch (proto_hdr & RTE_PTYPE_INNER_L4_MASK) { 331 case RTE_PTYPE_INNER_L4_TCP: 332 case RTE_PTYPE_INNER_L4_UDP: 333 rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 334 rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP; 335 goto set_hsplit_finish; 336 case RTE_PTYPE_INNER_L4_SCTP: 337 rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 338 rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP; 339 goto set_hsplit_finish; 340 } 341 342 switch (proto_hdr & RTE_PTYPE_INNER_L3_MASK) { 343 case RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN: 344 case RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN: 345 rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 346 rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_IP; 347 goto set_hsplit_finish; 348 } 349 350 switch (proto_hdr & RTE_PTYPE_INNER_L2_MASK) { 351 case RTE_PTYPE_INNER_L2_ETHER: 352 rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 353 rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_L2; 354 goto set_hsplit_finish; 355 } 356 357 switch (proto_hdr & RTE_PTYPE_TUNNEL_MASK) { 358 case RTE_PTYPE_TUNNEL_GRENAT: 359 rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 360 rx_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS; 361 goto set_hsplit_finish; 362 } 363 364 PMD_DRV_LOG(ERR, "Buffer split protocol is not supported"); 365 return -EINVAL; 366 367 set_hsplit_finish: 368 rxq->rx_hdr_len = ICE_RX_HDR_BUF_SIZE; 369 } else { 370 rxq->rx_hdr_len = 0; 371 rx_ctx.dtype = 0; /* No Protocol Based Buffer Split mode */ 372 } 373 374 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; 375 rx_ctx.qlen = rxq->nb_rx_desc; 376 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; 377 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S; 378 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 379 rx_ctx.dsize = 1; /* 32B descriptors */ 380 #endif 381 rx_ctx.rxmax = rxq->max_pkt_len; 382 /* TPH: Transaction Layer Packet (TLP) processing hints */ 383 rx_ctx.tphrdesc_ena = 1; 384 rx_ctx.tphwdesc_ena = 1; 385 rx_ctx.tphdata_ena = 1; 386 rx_ctx.tphhead_ena = 1; 387 /* Low Receive Queue Threshold defined in 64 descriptors units. 388 * When the number of free descriptors goes below the lrxqthresh, 389 * an immediate interrupt is triggered. 390 */ 391 rx_ctx.lrxqthresh = 2; 392 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/ 393 rx_ctx.l2tsel = 1; 394 rx_ctx.showiv = 0; 395 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0; 396 397 rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr); 398 399 PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u", 400 rxq->port_id, rxq->queue_id, rxdid); 401 402 if (!(pf->supported_rxdid & BIT(rxdid))) { 403 PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)", 404 rxdid); 405 return -EINVAL; 406 } 407 408 rxq->rxdid = rxdid; 409 410 /* Enable Flexible Descriptors in the queue context which 411 * allows this driver to select a specific receive descriptor format 412 */ 413 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & 414 QRXFLXP_CNTXT_RXDID_IDX_M; 415 416 /* increasing context priority to pick up profile ID; 417 * default is 0x01; setting to 0x03 to ensure profile 418 * is programming if prev context is of same priority 419 */ 420 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & 421 QRXFLXP_CNTXT_RXDID_PRIO_M; 422 423 if (ad->ptp_ena || rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) 424 regval |= QRXFLXP_CNTXT_TS_M; 425 426 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval); 427 428 err = ice_clear_rxq_ctx(hw, rxq->reg_idx); 429 if (err) { 430 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context", 431 rxq->queue_id); 432 return -EINVAL; 433 } 434 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx); 435 if (err) { 436 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context", 437 rxq->queue_id); 438 return -EINVAL; 439 } 440 441 /* Check if scattered RX needs to be used. */ 442 if (frame_size > buf_size) 443 dev_data->scattered_rx = 1; 444 445 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx); 446 447 /* Init the Rx tail register*/ 448 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); 449 450 return 0; 451 } 452 453 /* Allocate mbufs for all descriptors in rx queue */ 454 static int 455 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq) 456 { 457 struct ice_rx_entry *rxe = rxq->sw_ring; 458 uint64_t dma_addr; 459 uint16_t i; 460 461 for (i = 0; i < rxq->nb_rx_desc; i++) { 462 volatile union ice_rx_flex_desc *rxd; 463 rxd = &rxq->rx_ring[i]; 464 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp); 465 466 if (unlikely(!mbuf)) { 467 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX"); 468 return -ENOMEM; 469 } 470 471 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 472 mbuf->nb_segs = 1; 473 mbuf->port = rxq->port_id; 474 475 dma_addr = 476 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); 477 478 if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { 479 rte_mbuf_refcnt_set(mbuf, 1); 480 mbuf->next = NULL; 481 rxd->read.hdr_addr = 0; 482 rxd->read.pkt_addr = dma_addr; 483 } else { 484 struct rte_mbuf *mbuf_pay; 485 mbuf_pay = rte_mbuf_raw_alloc(rxq->rxseg[1].mp); 486 if (unlikely(!mbuf_pay)) { 487 rte_pktmbuf_free(mbuf); 488 PMD_DRV_LOG(ERR, "Failed to allocate payload mbuf for RX"); 489 return -ENOMEM; 490 } 491 492 mbuf_pay->next = NULL; 493 mbuf_pay->data_off = RTE_PKTMBUF_HEADROOM; 494 mbuf_pay->nb_segs = 1; 495 mbuf_pay->port = rxq->port_id; 496 mbuf->next = mbuf_pay; 497 498 rxd->read.hdr_addr = dma_addr; 499 /* The LS bit should be set to zero regardless of 500 * buffer split enablement. 501 */ 502 rxd->read.pkt_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf_pay)); 503 } 504 505 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 506 rxd->read.rsvd1 = 0; 507 rxd->read.rsvd2 = 0; 508 #endif 509 rxe[i].mbuf = mbuf; 510 } 511 512 return 0; 513 } 514 515 /* Free all mbufs for descriptors in rx queue */ 516 static void 517 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq) 518 { 519 uint16_t i; 520 521 if (!rxq || !rxq->sw_ring) { 522 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL"); 523 return; 524 } 525 526 for (i = 0; i < rxq->nb_rx_desc; i++) { 527 if (rxq->sw_ring[i].mbuf) { 528 rte_pktmbuf_free(rxq->sw_ring[i].mbuf); 529 rxq->sw_ring[i].mbuf = NULL; 530 } 531 } 532 if (rxq->rx_nb_avail == 0) 533 return; 534 for (i = 0; i < rxq->rx_nb_avail; i++) 535 rte_pktmbuf_free(rxq->rx_stage[rxq->rx_next_avail + i]); 536 537 rxq->rx_nb_avail = 0; 538 } 539 540 /* turn on or off rx queue 541 * @q_idx: queue index in pf scope 542 * @on: turn on or off the queue 543 */ 544 static int 545 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on) 546 { 547 uint32_t reg; 548 uint16_t j; 549 550 /* QRX_CTRL = QRX_ENA */ 551 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx)); 552 553 if (on) { 554 if (reg & QRX_CTRL_QENA_STAT_M) 555 return 0; /* Already on, skip */ 556 reg |= QRX_CTRL_QENA_REQ_M; 557 } else { 558 if (!(reg & QRX_CTRL_QENA_STAT_M)) 559 return 0; /* Already off, skip */ 560 reg &= ~QRX_CTRL_QENA_REQ_M; 561 } 562 563 /* Write the register */ 564 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg); 565 /* Check the result. It is said that QENA_STAT 566 * follows the QENA_REQ not more than 10 use. 567 * TODO: need to change the wait counter later 568 */ 569 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) { 570 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US); 571 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx)); 572 if (on) { 573 if ((reg & QRX_CTRL_QENA_REQ_M) && 574 (reg & QRX_CTRL_QENA_STAT_M)) 575 break; 576 } else { 577 if (!(reg & QRX_CTRL_QENA_REQ_M) && 578 !(reg & QRX_CTRL_QENA_STAT_M)) 579 break; 580 } 581 } 582 583 /* Check if it is timeout */ 584 if (j >= ICE_CHK_Q_ENA_COUNT) { 585 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]", 586 (on ? "enable" : "disable"), q_idx); 587 return -ETIMEDOUT; 588 } 589 590 return 0; 591 } 592 593 static inline int 594 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq) 595 { 596 int ret = 0; 597 598 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) { 599 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " 600 "rxq->rx_free_thresh=%d, " 601 "ICE_RX_MAX_BURST=%d", 602 rxq->rx_free_thresh, ICE_RX_MAX_BURST); 603 ret = -EINVAL; 604 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) { 605 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " 606 "rxq->rx_free_thresh=%d, " 607 "rxq->nb_rx_desc=%d", 608 rxq->rx_free_thresh, rxq->nb_rx_desc); 609 ret = -EINVAL; 610 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) { 611 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " 612 "rxq->nb_rx_desc=%d, " 613 "rxq->rx_free_thresh=%d", 614 rxq->nb_rx_desc, rxq->rx_free_thresh); 615 ret = -EINVAL; 616 } 617 618 return ret; 619 } 620 621 /* reset fields in ice_rx_queue back to default */ 622 static void 623 ice_reset_rx_queue(struct ice_rx_queue *rxq) 624 { 625 unsigned int i; 626 uint16_t len; 627 628 if (!rxq) { 629 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL"); 630 return; 631 } 632 633 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST); 634 635 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++) 636 ((volatile char *)rxq->rx_ring)[i] = 0; 637 638 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf)); 639 for (i = 0; i < ICE_RX_MAX_BURST; ++i) 640 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf; 641 642 rxq->rx_nb_avail = 0; 643 rxq->rx_next_avail = 0; 644 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); 645 646 rxq->rx_tail = 0; 647 rxq->nb_rx_hold = 0; 648 rxq->pkt_first_seg = NULL; 649 rxq->pkt_last_seg = NULL; 650 651 rxq->rxrearm_start = 0; 652 rxq->rxrearm_nb = 0; 653 } 654 655 int 656 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 657 { 658 struct ice_rx_queue *rxq; 659 int err; 660 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 661 662 PMD_INIT_FUNC_TRACE(); 663 664 if (rx_queue_id >= dev->data->nb_rx_queues) { 665 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u", 666 rx_queue_id, dev->data->nb_rx_queues); 667 return -EINVAL; 668 } 669 670 rxq = dev->data->rx_queues[rx_queue_id]; 671 if (!rxq || !rxq->q_set) { 672 PMD_DRV_LOG(ERR, "RX queue %u not available or setup", 673 rx_queue_id); 674 return -EINVAL; 675 } 676 677 if (dev->data->rx_queue_state[rx_queue_id] == 678 RTE_ETH_QUEUE_STATE_STARTED) 679 return 0; 680 681 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) 682 rxq->ts_enable = true; 683 err = ice_program_hw_rx_queue(rxq); 684 if (err) { 685 PMD_DRV_LOG(ERR, "fail to program RX queue %u", 686 rx_queue_id); 687 return -EIO; 688 } 689 690 err = ice_alloc_rx_queue_mbufs(rxq); 691 if (err) { 692 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf"); 693 return -ENOMEM; 694 } 695 696 /* Init the RX tail register. */ 697 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); 698 699 err = ice_switch_rx_queue(hw, rxq->reg_idx, true); 700 if (err) { 701 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", 702 rx_queue_id); 703 704 rxq->rx_rel_mbufs(rxq); 705 ice_reset_rx_queue(rxq); 706 return -EINVAL; 707 } 708 709 dev->data->rx_queue_state[rx_queue_id] = 710 RTE_ETH_QUEUE_STATE_STARTED; 711 712 return 0; 713 } 714 715 int 716 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 717 { 718 struct ice_rx_queue *rxq; 719 int err; 720 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 721 722 if (rx_queue_id < dev->data->nb_rx_queues) { 723 rxq = dev->data->rx_queues[rx_queue_id]; 724 725 if (dev->data->rx_queue_state[rx_queue_id] == 726 RTE_ETH_QUEUE_STATE_STOPPED) 727 return 0; 728 729 err = ice_switch_rx_queue(hw, rxq->reg_idx, false); 730 if (err) { 731 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", 732 rx_queue_id); 733 return -EINVAL; 734 } 735 rxq->rx_rel_mbufs(rxq); 736 ice_reset_rx_queue(rxq); 737 dev->data->rx_queue_state[rx_queue_id] = 738 RTE_ETH_QUEUE_STATE_STOPPED; 739 } 740 741 return 0; 742 } 743 744 int 745 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 746 { 747 struct ci_tx_queue *txq; 748 int err; 749 struct ice_vsi *vsi; 750 struct ice_hw *hw; 751 struct ice_pf *pf; 752 struct ice_aqc_add_tx_qgrp *txq_elem; 753 struct ice_tlan_ctx tx_ctx; 754 int buf_len; 755 struct ice_adapter *ad = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 756 757 PMD_INIT_FUNC_TRACE(); 758 759 if (tx_queue_id >= dev->data->nb_tx_queues) { 760 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u", 761 tx_queue_id, dev->data->nb_tx_queues); 762 return -EINVAL; 763 } 764 765 txq = dev->data->tx_queues[tx_queue_id]; 766 if (!txq || !txq->q_set) { 767 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup", 768 tx_queue_id); 769 return -EINVAL; 770 } 771 772 if (dev->data->tx_queue_state[tx_queue_id] == 773 RTE_ETH_QUEUE_STATE_STARTED) 774 return 0; 775 776 buf_len = ice_struct_size(txq_elem, txqs, 1); 777 txq_elem = ice_malloc(hw, buf_len); 778 if (!txq_elem) 779 return -ENOMEM; 780 781 vsi = txq->ice_vsi; 782 hw = ICE_VSI_TO_HW(vsi); 783 pf = ICE_VSI_TO_PF(vsi); 784 785 memset(&tx_ctx, 0, sizeof(tx_ctx)); 786 txq_elem->num_txqs = 1; 787 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx); 788 789 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; 790 tx_ctx.qlen = txq->nb_tx_desc; 791 tx_ctx.pf_num = hw->pf_id; 792 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; 793 tx_ctx.src_vsi = vsi->vsi_id; 794 tx_ctx.port_num = hw->port_info->lport; 795 tx_ctx.tso_ena = 1; /* tso enable */ 796 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */ 797 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */ 798 tx_ctx.tsyn_ena = 1; 799 800 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx, 801 ice_tlan_ctx_info); 802 803 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx); 804 805 /* Init the Tx tail register*/ 806 ICE_PCI_REG_WRITE(txq->qtx_tail, 0); 807 808 /* Fix me, we assume TC always 0 here */ 809 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1, 810 txq_elem, buf_len, NULL); 811 if (err) { 812 PMD_DRV_LOG(ERR, "Failed to add lan txq"); 813 rte_free(txq_elem); 814 return -EIO; 815 } 816 /* store the schedule node id */ 817 txq->q_teid = txq_elem->txqs[0].q_teid; 818 819 /* move the queue to correct position in hierarchy, if explicit hierarchy configured */ 820 if (pf->tm_conf.committed) 821 if (ice_tm_setup_txq_node(pf, hw, tx_queue_id, txq->q_teid) != 0) { 822 PMD_DRV_LOG(ERR, "Failed to set up txq traffic management node"); 823 rte_free(txq_elem); 824 return -EIO; 825 } 826 827 /* record what kind of descriptor cleanup we need on teardown */ 828 txq->vector_tx = ad->tx_vec_allowed; 829 txq->vector_sw_ring = ad->tx_use_avx512; 830 831 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 832 833 rte_free(txq_elem); 834 return 0; 835 } 836 837 static int 838 ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq) 839 { 840 struct ice_vsi *vsi = rxq->vsi; 841 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 842 uint32_t rxdid = ICE_RXDID_LEGACY_1; 843 struct ice_rlan_ctx rx_ctx; 844 uint32_t regval; 845 int err; 846 847 rxq->rx_hdr_len = 0; 848 rxq->rx_buf_len = 1024; 849 850 memset(&rx_ctx, 0, sizeof(rx_ctx)); 851 852 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; 853 rx_ctx.qlen = rxq->nb_rx_desc; 854 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; 855 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S; 856 rx_ctx.dtype = 0; /* No Buffer Split mode */ 857 rx_ctx.dsize = 1; /* 32B descriptors */ 858 rx_ctx.rxmax = ICE_ETH_MAX_LEN; 859 /* TPH: Transaction Layer Packet (TLP) processing hints */ 860 rx_ctx.tphrdesc_ena = 1; 861 rx_ctx.tphwdesc_ena = 1; 862 rx_ctx.tphdata_ena = 1; 863 rx_ctx.tphhead_ena = 1; 864 /* Low Receive Queue Threshold defined in 64 descriptors units. 865 * When the number of free descriptors goes below the lrxqthresh, 866 * an immediate interrupt is triggered. 867 */ 868 rx_ctx.lrxqthresh = 2; 869 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/ 870 rx_ctx.l2tsel = 1; 871 rx_ctx.showiv = 0; 872 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0; 873 874 /* Enable Flexible Descriptors in the queue context which 875 * allows this driver to select a specific receive descriptor format 876 */ 877 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & 878 QRXFLXP_CNTXT_RXDID_IDX_M; 879 880 /* increasing context priority to pick up profile ID; 881 * default is 0x01; setting to 0x03 to ensure profile 882 * is programming if prev context is of same priority 883 */ 884 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & 885 QRXFLXP_CNTXT_RXDID_PRIO_M; 886 887 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval); 888 889 err = ice_clear_rxq_ctx(hw, rxq->reg_idx); 890 if (err) { 891 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context", 892 rxq->queue_id); 893 return -EINVAL; 894 } 895 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx); 896 if (err) { 897 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context", 898 rxq->queue_id); 899 return -EINVAL; 900 } 901 902 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx); 903 904 /* Init the Rx tail register*/ 905 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); 906 907 return 0; 908 } 909 910 int 911 ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 912 { 913 struct ice_rx_queue *rxq; 914 int err; 915 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 916 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 917 918 PMD_INIT_FUNC_TRACE(); 919 920 rxq = pf->fdir.rxq; 921 if (!rxq || !rxq->q_set) { 922 PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup", 923 rx_queue_id); 924 return -EINVAL; 925 } 926 927 err = ice_fdir_program_hw_rx_queue(rxq); 928 if (err) { 929 PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u", 930 rx_queue_id); 931 return -EIO; 932 } 933 934 /* Init the RX tail register. */ 935 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); 936 937 err = ice_switch_rx_queue(hw, rxq->reg_idx, true); 938 if (err) { 939 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on", 940 rx_queue_id); 941 942 ice_reset_rx_queue(rxq); 943 return -EINVAL; 944 } 945 946 return 0; 947 } 948 949 int 950 ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 951 { 952 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 953 struct ci_tx_queue *txq; 954 int err; 955 struct ice_vsi *vsi; 956 struct ice_hw *hw; 957 struct ice_aqc_add_tx_qgrp *txq_elem; 958 struct ice_tlan_ctx tx_ctx; 959 int buf_len; 960 961 PMD_INIT_FUNC_TRACE(); 962 963 txq = pf->fdir.txq; 964 if (!txq || !txq->q_set) { 965 PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup", 966 tx_queue_id); 967 return -EINVAL; 968 } 969 970 buf_len = ice_struct_size(txq_elem, txqs, 1); 971 txq_elem = ice_malloc(hw, buf_len); 972 if (!txq_elem) 973 return -ENOMEM; 974 975 vsi = txq->ice_vsi; 976 hw = ICE_VSI_TO_HW(vsi); 977 978 memset(&tx_ctx, 0, sizeof(tx_ctx)); 979 txq_elem->num_txqs = 1; 980 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx); 981 982 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; 983 tx_ctx.qlen = txq->nb_tx_desc; 984 tx_ctx.pf_num = hw->pf_id; 985 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; 986 tx_ctx.src_vsi = vsi->vsi_id; 987 tx_ctx.port_num = hw->port_info->lport; 988 tx_ctx.tso_ena = 1; /* tso enable */ 989 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */ 990 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */ 991 992 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx, 993 ice_tlan_ctx_info); 994 995 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx); 996 997 /* Init the Tx tail register*/ 998 ICE_PCI_REG_WRITE(txq->qtx_tail, 0); 999 1000 /* Fix me, we assume TC always 0 here */ 1001 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1, 1002 txq_elem, buf_len, NULL); 1003 if (err) { 1004 PMD_DRV_LOG(ERR, "Failed to add FDIR txq"); 1005 rte_free(txq_elem); 1006 return -EIO; 1007 } 1008 /* store the schedule node id */ 1009 txq->q_teid = txq_elem->txqs[0].q_teid; 1010 1011 rte_free(txq_elem); 1012 return 0; 1013 } 1014 1015 static void 1016 ice_reset_tx_queue(struct ci_tx_queue *txq) 1017 { 1018 struct ci_tx_entry *txe; 1019 uint16_t i, prev, size; 1020 1021 if (!txq) { 1022 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL"); 1023 return; 1024 } 1025 1026 txe = txq->sw_ring; 1027 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc; 1028 for (i = 0; i < size; i++) 1029 ((volatile char *)txq->ice_tx_ring)[i] = 0; 1030 1031 prev = (uint16_t)(txq->nb_tx_desc - 1); 1032 for (i = 0; i < txq->nb_tx_desc; i++) { 1033 volatile struct ice_tx_desc *txd = &txq->ice_tx_ring[i]; 1034 1035 txd->cmd_type_offset_bsz = 1036 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE); 1037 txe[i].mbuf = NULL; 1038 txe[i].last_id = i; 1039 txe[prev].next_id = i; 1040 prev = i; 1041 } 1042 1043 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); 1044 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); 1045 1046 txq->tx_tail = 0; 1047 txq->nb_tx_used = 0; 1048 1049 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1); 1050 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1); 1051 } 1052 1053 int 1054 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 1055 { 1056 struct ci_tx_queue *txq; 1057 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1058 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1059 struct ice_vsi *vsi = pf->main_vsi; 1060 uint16_t q_ids[1]; 1061 uint32_t q_teids[1]; 1062 uint16_t q_handle = tx_queue_id; 1063 int status; 1064 1065 if (tx_queue_id >= dev->data->nb_tx_queues) { 1066 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u", 1067 tx_queue_id, dev->data->nb_tx_queues); 1068 return -EINVAL; 1069 } 1070 1071 txq = dev->data->tx_queues[tx_queue_id]; 1072 if (!txq) { 1073 PMD_DRV_LOG(ERR, "TX queue %u is not available", 1074 tx_queue_id); 1075 return -EINVAL; 1076 } 1077 1078 if (dev->data->tx_queue_state[tx_queue_id] == 1079 RTE_ETH_QUEUE_STATE_STOPPED) 1080 return 0; 1081 1082 q_ids[0] = txq->reg_idx; 1083 q_teids[0] = txq->q_teid; 1084 1085 /* Fix me, we assume TC always 0 here */ 1086 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle, 1087 q_ids, q_teids, ICE_NO_RESET, 0, NULL); 1088 if (status != ICE_SUCCESS) { 1089 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue"); 1090 return -EINVAL; 1091 } 1092 1093 ci_txq_release_all_mbufs(txq, false); 1094 ice_reset_tx_queue(txq); 1095 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 1096 1097 return 0; 1098 } 1099 1100 int 1101 ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1102 { 1103 struct ice_rx_queue *rxq; 1104 int err; 1105 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1106 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1107 1108 rxq = pf->fdir.rxq; 1109 1110 err = ice_switch_rx_queue(hw, rxq->reg_idx, false); 1111 if (err) { 1112 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off", 1113 rx_queue_id); 1114 return -EINVAL; 1115 } 1116 rxq->rx_rel_mbufs(rxq); 1117 1118 return 0; 1119 } 1120 1121 int 1122 ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 1123 { 1124 struct ci_tx_queue *txq; 1125 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1126 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1127 struct ice_vsi *vsi = pf->main_vsi; 1128 uint16_t q_ids[1]; 1129 uint32_t q_teids[1]; 1130 uint16_t q_handle = tx_queue_id; 1131 int status; 1132 1133 txq = pf->fdir.txq; 1134 if (!txq) { 1135 PMD_DRV_LOG(ERR, "TX queue %u is not available", 1136 tx_queue_id); 1137 return -EINVAL; 1138 } 1139 if (txq->qtx_tail == NULL) { 1140 PMD_DRV_LOG(INFO, "TX queue %u not started", tx_queue_id); 1141 return 0; 1142 } 1143 vsi = txq->ice_vsi; 1144 1145 q_ids[0] = txq->reg_idx; 1146 q_teids[0] = txq->q_teid; 1147 1148 /* Fix me, we assume TC always 0 here */ 1149 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle, 1150 q_ids, q_teids, ICE_NO_RESET, 0, NULL); 1151 if (status != ICE_SUCCESS) { 1152 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue"); 1153 return -EINVAL; 1154 } 1155 1156 ci_txq_release_all_mbufs(txq, false); 1157 txq->qtx_tail = NULL; 1158 1159 return 0; 1160 } 1161 1162 int 1163 ice_rx_queue_setup(struct rte_eth_dev *dev, 1164 uint16_t queue_idx, 1165 uint16_t nb_desc, 1166 unsigned int socket_id, 1167 const struct rte_eth_rxconf *rx_conf, 1168 struct rte_mempool *mp) 1169 { 1170 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1171 struct ice_adapter *ad = 1172 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1173 struct ice_vsi *vsi = pf->main_vsi; 1174 struct ice_rx_queue *rxq; 1175 const struct rte_memzone *rz; 1176 uint32_t ring_size; 1177 uint16_t len; 1178 int use_def_burst_func = 1; 1179 uint64_t offloads; 1180 uint16_t n_seg = rx_conf->rx_nseg; 1181 uint16_t i; 1182 1183 if (nb_desc % ICE_ALIGN_RING_DESC != 0 || 1184 nb_desc > ICE_MAX_RING_DESC || 1185 nb_desc < ICE_MIN_RING_DESC) { 1186 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is " 1187 "invalid", nb_desc); 1188 return -EINVAL; 1189 } 1190 1191 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; 1192 1193 if (mp) 1194 n_seg = 1; 1195 1196 if (n_seg > 1 && !(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { 1197 PMD_INIT_LOG(ERR, "port %u queue index %u split offload not configured", 1198 dev->data->port_id, queue_idx); 1199 return -EINVAL; 1200 } 1201 1202 /* Free memory if needed */ 1203 if (dev->data->rx_queues[queue_idx]) { 1204 ice_rx_queue_release(dev->data->rx_queues[queue_idx]); 1205 dev->data->rx_queues[queue_idx] = NULL; 1206 } 1207 1208 /* Allocate the rx queue data structure */ 1209 rxq = rte_zmalloc_socket(NULL, 1210 sizeof(struct ice_rx_queue), 1211 RTE_CACHE_LINE_SIZE, 1212 socket_id); 1213 1214 if (!rxq) { 1215 PMD_INIT_LOG(ERR, "Failed to allocate memory for " 1216 "rx queue data structure"); 1217 return -ENOMEM; 1218 } 1219 1220 rxq->rxseg_nb = n_seg; 1221 if (n_seg > 1) { 1222 for (i = 0; i < n_seg; i++) 1223 memcpy(&rxq->rxseg[i], &rx_conf->rx_seg[i].split, 1224 sizeof(struct rte_eth_rxseg_split)); 1225 1226 rxq->mp = rxq->rxseg[0].mp; 1227 } else { 1228 rxq->mp = mp; 1229 } 1230 1231 rxq->nb_rx_desc = nb_desc; 1232 rxq->rx_free_thresh = rx_conf->rx_free_thresh; 1233 rxq->queue_id = queue_idx; 1234 rxq->offloads = offloads; 1235 1236 rxq->reg_idx = vsi->base_queue + queue_idx; 1237 rxq->port_id = dev->data->port_id; 1238 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) 1239 rxq->crc_len = RTE_ETHER_CRC_LEN; 1240 else 1241 rxq->crc_len = 0; 1242 1243 rxq->drop_en = rx_conf->rx_drop_en; 1244 rxq->vsi = vsi; 1245 rxq->rx_deferred_start = rx_conf->rx_deferred_start; 1246 rxq->proto_xtr = pf->proto_xtr != NULL ? 1247 pf->proto_xtr[queue_idx] : PROTO_XTR_NONE; 1248 if (rxq->proto_xtr != PROTO_XTR_NONE && 1249 ad->devargs.xtr_flag_offs[rxq->proto_xtr] != 0xff) 1250 rxq->xtr_ol_flag = 1ULL << ad->devargs.xtr_flag_offs[rxq->proto_xtr]; 1251 rxq->xtr_field_offs = ad->devargs.xtr_field_offs; 1252 1253 /* Allocate the maximum number of RX ring hardware descriptor. */ 1254 len = ICE_MAX_RING_DESC; 1255 1256 /** 1257 * Allocating a little more memory because vectorized/bulk_alloc Rx 1258 * functions doesn't check boundaries each time. 1259 */ 1260 len += ICE_RX_MAX_BURST; 1261 1262 /* Allocate the maximum number of RX ring hardware descriptor. */ 1263 ring_size = sizeof(union ice_rx_flex_desc) * len; 1264 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN); 1265 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, 1266 ring_size, ICE_RING_BASE_ALIGN, 1267 socket_id); 1268 if (!rz) { 1269 ice_rx_queue_release(rxq); 1270 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX"); 1271 return -ENOMEM; 1272 } 1273 1274 rxq->mz = rz; 1275 /* Zero all the descriptors in the ring. */ 1276 memset(rz->addr, 0, ring_size); 1277 1278 rxq->rx_ring_dma = rz->iova; 1279 rxq->rx_ring = rz->addr; 1280 1281 /* always reserve more for bulk alloc */ 1282 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST); 1283 1284 /* Allocate the software ring. */ 1285 rxq->sw_ring = rte_zmalloc_socket(NULL, 1286 sizeof(struct ice_rx_entry) * len, 1287 RTE_CACHE_LINE_SIZE, 1288 socket_id); 1289 if (!rxq->sw_ring) { 1290 ice_rx_queue_release(rxq); 1291 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring"); 1292 return -ENOMEM; 1293 } 1294 1295 ice_reset_rx_queue(rxq); 1296 rxq->q_set = true; 1297 dev->data->rx_queues[queue_idx] = rxq; 1298 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs; 1299 1300 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq); 1301 1302 if (!use_def_burst_func) { 1303 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " 1304 "satisfied. Rx Burst Bulk Alloc function will be " 1305 "used on port=%d, queue=%d.", 1306 rxq->port_id, rxq->queue_id); 1307 } else { 1308 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " 1309 "not satisfied, Scattered Rx is requested. " 1310 "on port=%d, queue=%d.", 1311 rxq->port_id, rxq->queue_id); 1312 ad->rx_bulk_alloc_allowed = false; 1313 } 1314 1315 return 0; 1316 } 1317 1318 void 1319 ice_rx_queue_release(void *rxq) 1320 { 1321 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq; 1322 1323 if (!q) { 1324 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL"); 1325 return; 1326 } 1327 1328 if (q->rx_rel_mbufs != NULL) 1329 q->rx_rel_mbufs(q); 1330 rte_free(q->sw_ring); 1331 rte_memzone_free(q->mz); 1332 rte_free(q); 1333 } 1334 1335 int 1336 ice_tx_queue_setup(struct rte_eth_dev *dev, 1337 uint16_t queue_idx, 1338 uint16_t nb_desc, 1339 unsigned int socket_id, 1340 const struct rte_eth_txconf *tx_conf) 1341 { 1342 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1343 struct ice_vsi *vsi = pf->main_vsi; 1344 struct ci_tx_queue *txq; 1345 const struct rte_memzone *tz; 1346 uint32_t ring_size; 1347 uint16_t tx_rs_thresh, tx_free_thresh; 1348 uint64_t offloads; 1349 1350 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; 1351 1352 if (nb_desc % ICE_ALIGN_RING_DESC != 0 || 1353 nb_desc > ICE_MAX_RING_DESC || 1354 nb_desc < ICE_MIN_RING_DESC) { 1355 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is " 1356 "invalid", nb_desc); 1357 return -EINVAL; 1358 } 1359 1360 /** 1361 * The following two parameters control the setting of the RS bit on 1362 * transmit descriptors. TX descriptors will have their RS bit set 1363 * after txq->tx_rs_thresh descriptors have been used. The TX 1364 * descriptor ring will be cleaned after txq->tx_free_thresh 1365 * descriptors are used or if the number of descriptors required to 1366 * transmit a packet is greater than the number of free TX descriptors. 1367 * 1368 * The following constraints must be satisfied: 1369 * - tx_rs_thresh must be greater than 0. 1370 * - tx_rs_thresh must be less than the size of the ring minus 2. 1371 * - tx_rs_thresh must be less than or equal to tx_free_thresh. 1372 * - tx_rs_thresh must be a divisor of the ring size. 1373 * - tx_free_thresh must be greater than 0. 1374 * - tx_free_thresh must be less than the size of the ring minus 3. 1375 * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc. 1376 * 1377 * One descriptor in the TX ring is used as a sentinel to avoid a H/W 1378 * race condition, hence the maximum threshold constraints. When set 1379 * to zero use default values. 1380 */ 1381 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ? 1382 tx_conf->tx_free_thresh : 1383 ICE_DEFAULT_TX_FREE_THRESH); 1384 /* force tx_rs_thresh to adapt an aggressive tx_free_thresh */ 1385 tx_rs_thresh = 1386 (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ? 1387 nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH; 1388 if (tx_conf->tx_rs_thresh) 1389 tx_rs_thresh = tx_conf->tx_rs_thresh; 1390 if (tx_rs_thresh + tx_free_thresh > nb_desc) { 1391 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not " 1392 "exceed nb_desc. (tx_rs_thresh=%u " 1393 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)", 1394 (unsigned int)tx_rs_thresh, 1395 (unsigned int)tx_free_thresh, 1396 (unsigned int)nb_desc, 1397 (int)dev->data->port_id, 1398 (int)queue_idx); 1399 return -EINVAL; 1400 } 1401 if (tx_rs_thresh >= (nb_desc - 2)) { 1402 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the " 1403 "number of TX descriptors minus 2. " 1404 "(tx_rs_thresh=%u port=%d queue=%d)", 1405 (unsigned int)tx_rs_thresh, 1406 (int)dev->data->port_id, 1407 (int)queue_idx); 1408 return -EINVAL; 1409 } 1410 if (tx_free_thresh >= (nb_desc - 3)) { 1411 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the " 1412 "tx_free_thresh must be less than the " 1413 "number of TX descriptors minus 3. " 1414 "(tx_free_thresh=%u port=%d queue=%d)", 1415 (unsigned int)tx_free_thresh, 1416 (int)dev->data->port_id, 1417 (int)queue_idx); 1418 return -EINVAL; 1419 } 1420 if (tx_rs_thresh > tx_free_thresh) { 1421 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or " 1422 "equal to tx_free_thresh. (tx_free_thresh=%u" 1423 " tx_rs_thresh=%u port=%d queue=%d)", 1424 (unsigned int)tx_free_thresh, 1425 (unsigned int)tx_rs_thresh, 1426 (int)dev->data->port_id, 1427 (int)queue_idx); 1428 return -EINVAL; 1429 } 1430 if ((nb_desc % tx_rs_thresh) != 0) { 1431 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the " 1432 "number of TX descriptors. (tx_rs_thresh=%u" 1433 " port=%d queue=%d)", 1434 (unsigned int)tx_rs_thresh, 1435 (int)dev->data->port_id, 1436 (int)queue_idx); 1437 return -EINVAL; 1438 } 1439 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) { 1440 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if " 1441 "tx_rs_thresh is greater than 1. " 1442 "(tx_rs_thresh=%u port=%d queue=%d)", 1443 (unsigned int)tx_rs_thresh, 1444 (int)dev->data->port_id, 1445 (int)queue_idx); 1446 return -EINVAL; 1447 } 1448 1449 /* Free memory if needed. */ 1450 if (dev->data->tx_queues[queue_idx]) { 1451 ice_tx_queue_release(dev->data->tx_queues[queue_idx]); 1452 dev->data->tx_queues[queue_idx] = NULL; 1453 } 1454 1455 /* Allocate the TX queue data structure. */ 1456 txq = rte_zmalloc_socket(NULL, 1457 sizeof(struct ci_tx_queue), 1458 RTE_CACHE_LINE_SIZE, 1459 socket_id); 1460 if (!txq) { 1461 PMD_INIT_LOG(ERR, "Failed to allocate memory for " 1462 "tx queue structure"); 1463 return -ENOMEM; 1464 } 1465 1466 /* Allocate TX hardware ring descriptors. */ 1467 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC; 1468 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN); 1469 tz = rte_eth_dma_zone_reserve(dev, "ice_tx_ring", queue_idx, 1470 ring_size, ICE_RING_BASE_ALIGN, 1471 socket_id); 1472 if (!tz) { 1473 ice_tx_queue_release(txq); 1474 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX"); 1475 return -ENOMEM; 1476 } 1477 1478 txq->mz = tz; 1479 txq->nb_tx_desc = nb_desc; 1480 txq->tx_rs_thresh = tx_rs_thresh; 1481 txq->tx_free_thresh = tx_free_thresh; 1482 txq->queue_id = queue_idx; 1483 1484 txq->reg_idx = vsi->base_queue + queue_idx; 1485 txq->port_id = dev->data->port_id; 1486 txq->offloads = offloads; 1487 txq->ice_vsi = vsi; 1488 txq->tx_deferred_start = tx_conf->tx_deferred_start; 1489 1490 txq->tx_ring_dma = tz->iova; 1491 txq->ice_tx_ring = tz->addr; 1492 1493 /* Allocate software ring */ 1494 txq->sw_ring = 1495 rte_zmalloc_socket(NULL, 1496 sizeof(struct ci_tx_entry) * nb_desc, 1497 RTE_CACHE_LINE_SIZE, 1498 socket_id); 1499 if (!txq->sw_ring) { 1500 ice_tx_queue_release(txq); 1501 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring"); 1502 return -ENOMEM; 1503 } 1504 1505 ice_reset_tx_queue(txq); 1506 txq->q_set = true; 1507 dev->data->tx_queues[queue_idx] = txq; 1508 ice_set_tx_function_flag(dev, txq); 1509 1510 return 0; 1511 } 1512 1513 void 1514 ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 1515 { 1516 ice_rx_queue_release(dev->data->rx_queues[qid]); 1517 } 1518 1519 void 1520 ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 1521 { 1522 ice_tx_queue_release(dev->data->tx_queues[qid]); 1523 } 1524 1525 void 1526 ice_tx_queue_release(void *txq) 1527 { 1528 struct ci_tx_queue *q = (struct ci_tx_queue *)txq; 1529 1530 if (!q) { 1531 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL"); 1532 return; 1533 } 1534 1535 ci_txq_release_all_mbufs(q, false); 1536 rte_free(q->sw_ring); 1537 rte_memzone_free(q->mz); 1538 rte_free(q); 1539 } 1540 1541 void 1542 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 1543 struct rte_eth_rxq_info *qinfo) 1544 { 1545 struct ice_rx_queue *rxq; 1546 1547 rxq = dev->data->rx_queues[queue_id]; 1548 1549 qinfo->mp = rxq->mp; 1550 qinfo->scattered_rx = dev->data->scattered_rx; 1551 qinfo->nb_desc = rxq->nb_rx_desc; 1552 1553 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 1554 qinfo->conf.rx_drop_en = rxq->drop_en; 1555 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; 1556 } 1557 1558 void 1559 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 1560 struct rte_eth_txq_info *qinfo) 1561 { 1562 struct ci_tx_queue *txq; 1563 1564 txq = dev->data->tx_queues[queue_id]; 1565 1566 qinfo->nb_desc = txq->nb_tx_desc; 1567 1568 qinfo->conf.tx_thresh.pthresh = ICE_DEFAULT_TX_PTHRESH; 1569 qinfo->conf.tx_thresh.hthresh = ICE_DEFAULT_TX_HTHRESH; 1570 qinfo->conf.tx_thresh.wthresh = ICE_DEFAULT_TX_WTHRESH; 1571 1572 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 1573 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; 1574 qinfo->conf.offloads = txq->offloads; 1575 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 1576 } 1577 1578 uint32_t 1579 ice_rx_queue_count(void *rx_queue) 1580 { 1581 #define ICE_RXQ_SCAN_INTERVAL 4 1582 volatile union ice_rx_flex_desc *rxdp; 1583 struct ice_rx_queue *rxq; 1584 uint16_t desc = 0; 1585 1586 rxq = rx_queue; 1587 rxdp = &rxq->rx_ring[rxq->rx_tail]; 1588 while ((desc < rxq->nb_rx_desc) && 1589 rte_le_to_cpu_16(rxdp->wb.status_error0) & 1590 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) { 1591 /** 1592 * Check the DD bit of a rx descriptor of each 4 in a group, 1593 * to avoid checking too frequently and downgrading performance 1594 * too much. 1595 */ 1596 desc += ICE_RXQ_SCAN_INTERVAL; 1597 rxdp += ICE_RXQ_SCAN_INTERVAL; 1598 if (rxq->rx_tail + desc >= rxq->nb_rx_desc) 1599 rxdp = &(rxq->rx_ring[rxq->rx_tail + 1600 desc - rxq->nb_rx_desc]); 1601 } 1602 1603 return desc; 1604 } 1605 1606 #define ICE_RX_FLEX_ERR0_BITS \ 1607 ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) | \ 1608 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \ 1609 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \ 1610 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \ 1611 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \ 1612 (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S)) 1613 1614 /* Rx L3/L4 checksum */ 1615 static inline uint64_t 1616 ice_rxd_error_to_pkt_flags(uint16_t stat_err0) 1617 { 1618 uint64_t flags = 0; 1619 1620 /* check if HW has decoded the packet and checksum */ 1621 if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))) 1622 return 0; 1623 1624 if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) { 1625 flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | 1626 RTE_MBUF_F_RX_L4_CKSUM_GOOD | 1627 RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD); 1628 return flags; 1629 } 1630 1631 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S))) 1632 flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 1633 else 1634 flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 1635 1636 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))) 1637 flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 1638 else 1639 flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 1640 1641 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))) 1642 flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD; 1643 1644 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S))) 1645 flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD; 1646 else 1647 flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD; 1648 1649 return flags; 1650 } 1651 1652 static inline void 1653 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp) 1654 { 1655 if (rte_le_to_cpu_16(rxdp->wb.status_error0) & 1656 (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) { 1657 mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 1658 mb->vlan_tci = 1659 rte_le_to_cpu_16(rxdp->wb.l2tag1); 1660 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u", 1661 rte_le_to_cpu_16(rxdp->wb.l2tag1)); 1662 } else { 1663 mb->vlan_tci = 0; 1664 } 1665 1666 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 1667 if (rte_le_to_cpu_16(rxdp->wb.status_error1) & 1668 (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) { 1669 mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED | RTE_MBUF_F_RX_QINQ | 1670 RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN; 1671 mb->vlan_tci_outer = mb->vlan_tci; 1672 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd); 1673 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u", 1674 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st), 1675 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd)); 1676 } else { 1677 mb->vlan_tci_outer = 0; 1678 } 1679 #endif 1680 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u", 1681 mb->vlan_tci, mb->vlan_tci_outer); 1682 } 1683 1684 #define ICE_LOOK_AHEAD 8 1685 #if (ICE_LOOK_AHEAD != 8) 1686 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n" 1687 #endif 1688 1689 #define ICE_PTP_TS_VALID 0x1 1690 1691 static inline int 1692 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) 1693 { 1694 volatile union ice_rx_flex_desc *rxdp; 1695 struct ice_rx_entry *rxep; 1696 struct rte_mbuf *mb; 1697 uint16_t stat_err0; 1698 uint16_t pkt_len, hdr_len; 1699 int32_t s[ICE_LOOK_AHEAD], nb_dd; 1700 int32_t i, j, nb_rx = 0; 1701 uint64_t pkt_flags = 0; 1702 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; 1703 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 1704 bool is_tsinit = false; 1705 uint64_t ts_ns; 1706 struct ice_vsi *vsi = rxq->vsi; 1707 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 1708 struct ice_adapter *ad = rxq->vsi->adapter; 1709 #endif 1710 rxdp = &rxq->rx_ring[rxq->rx_tail]; 1711 rxep = &rxq->sw_ring[rxq->rx_tail]; 1712 1713 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0); 1714 1715 /* Make sure there is at least 1 packet to receive */ 1716 if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))) 1717 return 0; 1718 1719 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 1720 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) { 1721 uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000); 1722 1723 if (unlikely(sw_cur_time - rxq->hw_time_update > 4)) 1724 is_tsinit = 1; 1725 } 1726 #endif 1727 1728 /** 1729 * Scan LOOK_AHEAD descriptors at a time to determine which 1730 * descriptors reference packets that are ready to be received. 1731 */ 1732 for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD, 1733 rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) { 1734 /* Read desc statuses backwards to avoid race condition */ 1735 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--) 1736 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0); 1737 1738 rte_smp_rmb(); 1739 1740 /* Compute how many status bits were set */ 1741 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++) 1742 nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S); 1743 1744 nb_rx += nb_dd; 1745 1746 /* Translate descriptor info to mbuf parameters */ 1747 for (j = 0; j < nb_dd; j++) { 1748 mb = rxep[j].mbuf; 1749 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) & 1750 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; 1751 mb->data_len = pkt_len; 1752 mb->pkt_len = pkt_len; 1753 1754 if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { 1755 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) & 1756 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; 1757 mb->data_len = pkt_len; 1758 mb->pkt_len = pkt_len; 1759 } else { 1760 mb->nb_segs = (uint16_t)(mb->nb_segs + mb->next->nb_segs); 1761 mb->next->next = NULL; 1762 hdr_len = rte_le_to_cpu_16(rxdp[j].wb.hdr_len_sph_flex_flags1) & 1763 ICE_RX_FLEX_DESC_HEADER_LEN_M; 1764 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) & 1765 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; 1766 mb->data_len = hdr_len; 1767 mb->pkt_len = hdr_len + pkt_len; 1768 mb->next->data_len = pkt_len; 1769 #ifdef RTE_ETHDEV_DEBUG_RX 1770 rte_pktmbuf_dump(stdout, mb, rte_pktmbuf_pkt_len(mb)); 1771 #endif 1772 } 1773 1774 mb->ol_flags = 0; 1775 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0); 1776 pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0); 1777 mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M & 1778 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)]; 1779 ice_rxd_to_vlan_tci(mb, &rxdp[j]); 1780 rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]); 1781 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 1782 if (ice_timestamp_dynflag > 0 && 1783 (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) { 1784 rxq->time_high = 1785 rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high); 1786 if (unlikely(is_tsinit)) { 1787 ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, 1788 rxq->time_high); 1789 rxq->hw_time_low = (uint32_t)ts_ns; 1790 rxq->hw_time_high = (uint32_t)(ts_ns >> 32); 1791 is_tsinit = false; 1792 } else { 1793 if (rxq->time_high < rxq->hw_time_low) 1794 rxq->hw_time_high += 1; 1795 ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high; 1796 rxq->hw_time_low = rxq->time_high; 1797 } 1798 rxq->hw_time_update = rte_get_timer_cycles() / 1799 (rte_get_timer_hz() / 1000); 1800 *RTE_MBUF_DYNFIELD(mb, 1801 ice_timestamp_dynfield_offset, 1802 rte_mbuf_timestamp_t *) = ts_ns; 1803 pkt_flags |= ice_timestamp_dynflag; 1804 } 1805 1806 if (ad->ptp_ena && ((mb->packet_type & 1807 RTE_PTYPE_L2_MASK) == RTE_PTYPE_L2_ETHER_TIMESYNC)) { 1808 rxq->time_high = 1809 rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high); 1810 mb->timesync = rxq->queue_id; 1811 pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP; 1812 if (rxdp[j].wb.time_stamp_low & 1813 ICE_PTP_TS_VALID) 1814 pkt_flags |= 1815 RTE_MBUF_F_RX_IEEE1588_TMST; 1816 } 1817 #endif 1818 mb->ol_flags |= pkt_flags; 1819 } 1820 1821 for (j = 0; j < ICE_LOOK_AHEAD; j++) 1822 rxq->rx_stage[i + j] = rxep[j].mbuf; 1823 1824 if (nb_dd != ICE_LOOK_AHEAD) 1825 break; 1826 } 1827 1828 /* Clear software ring entries */ 1829 for (i = 0; i < nb_rx; i++) 1830 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL; 1831 1832 PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: " 1833 "port_id=%u, queue_id=%u, nb_rx=%d", 1834 rxq->port_id, rxq->queue_id, nb_rx); 1835 1836 return nb_rx; 1837 } 1838 1839 static inline uint16_t 1840 ice_rx_fill_from_stage(struct ice_rx_queue *rxq, 1841 struct rte_mbuf **rx_pkts, 1842 uint16_t nb_pkts) 1843 { 1844 uint16_t i; 1845 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail]; 1846 1847 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail); 1848 1849 for (i = 0; i < nb_pkts; i++) 1850 rx_pkts[i] = stage[i]; 1851 1852 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts); 1853 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts); 1854 1855 return nb_pkts; 1856 } 1857 1858 static inline int 1859 ice_rx_alloc_bufs(struct ice_rx_queue *rxq) 1860 { 1861 volatile union ice_rx_flex_desc *rxdp; 1862 struct ice_rx_entry *rxep; 1863 struct rte_mbuf *mb; 1864 uint16_t alloc_idx, i; 1865 uint64_t dma_addr; 1866 int diag, diag_pay; 1867 uint64_t pay_addr; 1868 struct rte_mbuf *mbufs_pay[rxq->rx_free_thresh]; 1869 1870 /* Allocate buffers in bulk */ 1871 alloc_idx = (uint16_t)(rxq->rx_free_trigger - 1872 (rxq->rx_free_thresh - 1)); 1873 rxep = &rxq->sw_ring[alloc_idx]; 1874 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep, 1875 rxq->rx_free_thresh); 1876 if (unlikely(diag != 0)) { 1877 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk"); 1878 return -ENOMEM; 1879 } 1880 1881 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 1882 diag_pay = rte_mempool_get_bulk(rxq->rxseg[1].mp, 1883 (void *)mbufs_pay, rxq->rx_free_thresh); 1884 if (unlikely(diag_pay != 0)) { 1885 rte_mempool_put_bulk(rxq->mp, (void *)rxep, 1886 rxq->rx_free_thresh); 1887 PMD_RX_LOG(ERR, "Failed to get payload mbufs in bulk"); 1888 return -ENOMEM; 1889 } 1890 } 1891 1892 rxdp = &rxq->rx_ring[alloc_idx]; 1893 for (i = 0; i < rxq->rx_free_thresh; i++) { 1894 if (likely(i < (rxq->rx_free_thresh - 1))) 1895 /* Prefetch next mbuf */ 1896 rte_prefetch0(rxep[i + 1].mbuf); 1897 1898 mb = rxep[i].mbuf; 1899 rte_mbuf_refcnt_set(mb, 1); 1900 mb->data_off = RTE_PKTMBUF_HEADROOM; 1901 mb->nb_segs = 1; 1902 mb->port = rxq->port_id; 1903 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb)); 1904 1905 if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { 1906 mb->next = NULL; 1907 rxdp[i].read.hdr_addr = 0; 1908 rxdp[i].read.pkt_addr = dma_addr; 1909 } else { 1910 mb->next = mbufs_pay[i]; 1911 pay_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbufs_pay[i])); 1912 rxdp[i].read.hdr_addr = dma_addr; 1913 rxdp[i].read.pkt_addr = pay_addr; 1914 } 1915 } 1916 1917 /* Update Rx tail register */ 1918 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger); 1919 1920 rxq->rx_free_trigger = 1921 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh); 1922 if (rxq->rx_free_trigger >= rxq->nb_rx_desc) 1923 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); 1924 1925 return 0; 1926 } 1927 1928 static inline uint16_t 1929 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 1930 { 1931 struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue; 1932 uint16_t nb_rx = 0; 1933 1934 if (!nb_pkts) 1935 return 0; 1936 1937 if (rxq->rx_nb_avail) 1938 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts); 1939 1940 nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq); 1941 rxq->rx_next_avail = 0; 1942 rxq->rx_nb_avail = nb_rx; 1943 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx); 1944 1945 if (rxq->rx_tail > rxq->rx_free_trigger) { 1946 if (ice_rx_alloc_bufs(rxq) != 0) { 1947 uint16_t i, j; 1948 1949 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed += 1950 rxq->rx_free_thresh; 1951 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for " 1952 "port_id=%u, queue_id=%u", 1953 rxq->port_id, rxq->queue_id); 1954 rxq->rx_nb_avail = 0; 1955 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx); 1956 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++) 1957 rxq->sw_ring[j].mbuf = rxq->rx_stage[i]; 1958 1959 return 0; 1960 } 1961 } 1962 1963 if (rxq->rx_tail >= rxq->nb_rx_desc) 1964 rxq->rx_tail = 0; 1965 1966 if (rxq->rx_nb_avail) 1967 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts); 1968 1969 return 0; 1970 } 1971 1972 static uint16_t 1973 ice_recv_pkts_bulk_alloc(void *rx_queue, 1974 struct rte_mbuf **rx_pkts, 1975 uint16_t nb_pkts) 1976 { 1977 uint16_t nb_rx = 0; 1978 uint16_t n; 1979 uint16_t count; 1980 1981 if (unlikely(nb_pkts == 0)) 1982 return nb_rx; 1983 1984 if (likely(nb_pkts <= ICE_RX_MAX_BURST)) 1985 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts); 1986 1987 while (nb_pkts) { 1988 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST); 1989 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n); 1990 nb_rx = (uint16_t)(nb_rx + count); 1991 nb_pkts = (uint16_t)(nb_pkts - count); 1992 if (count < n) 1993 break; 1994 } 1995 1996 return nb_rx; 1997 } 1998 1999 static uint16_t 2000 ice_recv_scattered_pkts(void *rx_queue, 2001 struct rte_mbuf **rx_pkts, 2002 uint16_t nb_pkts) 2003 { 2004 struct ice_rx_queue *rxq = rx_queue; 2005 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring; 2006 volatile union ice_rx_flex_desc *rxdp; 2007 union ice_rx_flex_desc rxd; 2008 struct ice_rx_entry *sw_ring = rxq->sw_ring; 2009 struct ice_rx_entry *rxe; 2010 struct rte_mbuf *first_seg = rxq->pkt_first_seg; 2011 struct rte_mbuf *last_seg = rxq->pkt_last_seg; 2012 struct rte_mbuf *nmb; /* new allocated mbuf */ 2013 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */ 2014 uint16_t rx_id = rxq->rx_tail; 2015 uint16_t nb_rx = 0; 2016 uint16_t nb_hold = 0; 2017 uint16_t rx_packet_len; 2018 uint16_t rx_stat_err0; 2019 uint64_t dma_addr; 2020 uint64_t pkt_flags; 2021 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; 2022 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 2023 bool is_tsinit = false; 2024 uint64_t ts_ns; 2025 struct ice_vsi *vsi = rxq->vsi; 2026 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 2027 struct ice_adapter *ad = rxq->vsi->adapter; 2028 2029 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) { 2030 uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000); 2031 2032 if (unlikely(sw_cur_time - rxq->hw_time_update > 4)) 2033 is_tsinit = true; 2034 } 2035 #endif 2036 2037 while (nb_rx < nb_pkts) { 2038 rxdp = &rx_ring[rx_id]; 2039 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0); 2040 2041 /* Check the DD bit first */ 2042 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))) 2043 break; 2044 2045 /* allocate mbuf */ 2046 nmb = rte_mbuf_raw_alloc(rxq->mp); 2047 if (unlikely(!nmb)) { 2048 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++; 2049 break; 2050 } 2051 rxd = *rxdp; /* copy descriptor in ring to temp variable*/ 2052 2053 nb_hold++; 2054 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */ 2055 rx_id++; 2056 if (unlikely(rx_id == rxq->nb_rx_desc)) 2057 rx_id = 0; 2058 2059 /* Prefetch next mbuf */ 2060 rte_prefetch0(sw_ring[rx_id].mbuf); 2061 2062 /** 2063 * When next RX descriptor is on a cache line boundary, 2064 * prefetch the next 4 RX descriptors and next 8 pointers 2065 * to mbufs. 2066 */ 2067 if ((rx_id & 0x3) == 0) { 2068 rte_prefetch0(&rx_ring[rx_id]); 2069 rte_prefetch0(&sw_ring[rx_id]); 2070 } 2071 2072 rxm = rxe->mbuf; 2073 rxe->mbuf = nmb; 2074 dma_addr = 2075 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); 2076 2077 /* Set data buffer address and data length of the mbuf */ 2078 rxdp->read.hdr_addr = 0; 2079 rxdp->read.pkt_addr = dma_addr; 2080 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) & 2081 ICE_RX_FLX_DESC_PKT_LEN_M; 2082 rxm->data_len = rx_packet_len; 2083 rxm->data_off = RTE_PKTMBUF_HEADROOM; 2084 2085 /** 2086 * If this is the first buffer of the received packet, set the 2087 * pointer to the first mbuf of the packet and initialize its 2088 * context. Otherwise, update the total length and the number 2089 * of segments of the current scattered packet, and update the 2090 * pointer to the last mbuf of the current packet. 2091 */ 2092 if (!first_seg) { 2093 first_seg = rxm; 2094 first_seg->nb_segs = 1; 2095 first_seg->pkt_len = rx_packet_len; 2096 } else { 2097 first_seg->pkt_len = 2098 (uint16_t)(first_seg->pkt_len + 2099 rx_packet_len); 2100 first_seg->nb_segs++; 2101 last_seg->next = rxm; 2102 } 2103 2104 /** 2105 * If this is not the last buffer of the received packet, 2106 * update the pointer to the last mbuf of the current scattered 2107 * packet and continue to parse the RX ring. 2108 */ 2109 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) { 2110 last_seg = rxm; 2111 continue; 2112 } 2113 2114 /** 2115 * This is the last buffer of the received packet. If the CRC 2116 * is not stripped by the hardware: 2117 * - Subtract the CRC length from the total packet length. 2118 * - If the last buffer only contains the whole CRC or a part 2119 * of it, free the mbuf associated to the last buffer. If part 2120 * of the CRC is also contained in the previous mbuf, subtract 2121 * the length of that CRC part from the data length of the 2122 * previous mbuf. 2123 */ 2124 rxm->next = NULL; 2125 if (unlikely(rxq->crc_len > 0)) { 2126 first_seg->pkt_len -= RTE_ETHER_CRC_LEN; 2127 if (rx_packet_len <= RTE_ETHER_CRC_LEN) { 2128 rte_pktmbuf_free_seg(rxm); 2129 first_seg->nb_segs--; 2130 last_seg->data_len = 2131 (uint16_t)(last_seg->data_len - 2132 (RTE_ETHER_CRC_LEN - rx_packet_len)); 2133 last_seg->next = NULL; 2134 } else 2135 rxm->data_len = (uint16_t)(rx_packet_len - 2136 RTE_ETHER_CRC_LEN); 2137 } else if (rx_packet_len == 0) { 2138 rte_pktmbuf_free_seg(rxm); 2139 first_seg->nb_segs--; 2140 last_seg->next = NULL; 2141 } 2142 2143 first_seg->port = rxq->port_id; 2144 first_seg->ol_flags = 0; 2145 first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M & 2146 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; 2147 ice_rxd_to_vlan_tci(first_seg, &rxd); 2148 rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd); 2149 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0); 2150 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 2151 if (ice_timestamp_dynflag > 0 && 2152 (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) { 2153 rxq->time_high = 2154 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); 2155 if (unlikely(is_tsinit)) { 2156 ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high); 2157 rxq->hw_time_low = (uint32_t)ts_ns; 2158 rxq->hw_time_high = (uint32_t)(ts_ns >> 32); 2159 is_tsinit = false; 2160 } else { 2161 if (rxq->time_high < rxq->hw_time_low) 2162 rxq->hw_time_high += 1; 2163 ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high; 2164 rxq->hw_time_low = rxq->time_high; 2165 } 2166 rxq->hw_time_update = rte_get_timer_cycles() / 2167 (rte_get_timer_hz() / 1000); 2168 *RTE_MBUF_DYNFIELD(first_seg, 2169 (ice_timestamp_dynfield_offset), 2170 rte_mbuf_timestamp_t *) = ts_ns; 2171 pkt_flags |= ice_timestamp_dynflag; 2172 } 2173 2174 if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK) 2175 == RTE_PTYPE_L2_ETHER_TIMESYNC)) { 2176 rxq->time_high = 2177 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); 2178 first_seg->timesync = rxq->queue_id; 2179 pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP; 2180 } 2181 #endif 2182 first_seg->ol_flags |= pkt_flags; 2183 /* Prefetch data of first segment, if configured to do so. */ 2184 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr, 2185 first_seg->data_off)); 2186 rx_pkts[nb_rx++] = first_seg; 2187 first_seg = NULL; 2188 } 2189 2190 /* Record index of the next RX descriptor to probe. */ 2191 rxq->rx_tail = rx_id; 2192 rxq->pkt_first_seg = first_seg; 2193 rxq->pkt_last_seg = last_seg; 2194 2195 /** 2196 * If the number of free RX descriptors is greater than the RX free 2197 * threshold of the queue, advance the Receive Descriptor Tail (RDT) 2198 * register. Update the RDT with the value of the last processed RX 2199 * descriptor minus 1, to guarantee that the RDT register is never 2200 * equal to the RDH register, which creates a "full" ring situation 2201 * from the hardware point of view. 2202 */ 2203 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); 2204 if (nb_hold > rxq->rx_free_thresh) { 2205 rx_id = (uint16_t)(rx_id == 0 ? 2206 (rxq->nb_rx_desc - 1) : (rx_id - 1)); 2207 /* write TAIL register */ 2208 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id); 2209 nb_hold = 0; 2210 } 2211 rxq->nb_rx_hold = nb_hold; 2212 2213 /* return received packet in the burst */ 2214 return nb_rx; 2215 } 2216 2217 const uint32_t * 2218 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements) 2219 { 2220 struct ice_adapter *ad = 2221 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 2222 const uint32_t *ptypes; 2223 2224 static const uint32_t ptypes_os[] = { 2225 /* refers to ice_get_default_pkt_type() */ 2226 RTE_PTYPE_L2_ETHER, 2227 RTE_PTYPE_L2_ETHER_TIMESYNC, 2228 RTE_PTYPE_L2_ETHER_LLDP, 2229 RTE_PTYPE_L2_ETHER_ARP, 2230 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 2231 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 2232 RTE_PTYPE_L4_FRAG, 2233 RTE_PTYPE_L4_ICMP, 2234 RTE_PTYPE_L4_NONFRAG, 2235 RTE_PTYPE_L4_SCTP, 2236 RTE_PTYPE_L4_TCP, 2237 RTE_PTYPE_L4_UDP, 2238 RTE_PTYPE_TUNNEL_GRENAT, 2239 RTE_PTYPE_TUNNEL_IP, 2240 RTE_PTYPE_INNER_L2_ETHER, 2241 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 2242 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 2243 RTE_PTYPE_INNER_L4_FRAG, 2244 RTE_PTYPE_INNER_L4_ICMP, 2245 RTE_PTYPE_INNER_L4_NONFRAG, 2246 RTE_PTYPE_INNER_L4_SCTP, 2247 RTE_PTYPE_INNER_L4_TCP, 2248 RTE_PTYPE_INNER_L4_UDP, 2249 }; 2250 2251 static const uint32_t ptypes_comms[] = { 2252 /* refers to ice_get_default_pkt_type() */ 2253 RTE_PTYPE_L2_ETHER, 2254 RTE_PTYPE_L2_ETHER_TIMESYNC, 2255 RTE_PTYPE_L2_ETHER_LLDP, 2256 RTE_PTYPE_L2_ETHER_ARP, 2257 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 2258 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 2259 RTE_PTYPE_L4_FRAG, 2260 RTE_PTYPE_L4_ICMP, 2261 RTE_PTYPE_L4_NONFRAG, 2262 RTE_PTYPE_L4_SCTP, 2263 RTE_PTYPE_L4_TCP, 2264 RTE_PTYPE_L4_UDP, 2265 RTE_PTYPE_TUNNEL_GRENAT, 2266 RTE_PTYPE_TUNNEL_IP, 2267 RTE_PTYPE_INNER_L2_ETHER, 2268 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 2269 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 2270 RTE_PTYPE_INNER_L4_FRAG, 2271 RTE_PTYPE_INNER_L4_ICMP, 2272 RTE_PTYPE_INNER_L4_NONFRAG, 2273 RTE_PTYPE_INNER_L4_SCTP, 2274 RTE_PTYPE_INNER_L4_TCP, 2275 RTE_PTYPE_INNER_L4_UDP, 2276 RTE_PTYPE_TUNNEL_GTPC, 2277 RTE_PTYPE_TUNNEL_GTPU, 2278 RTE_PTYPE_L2_ETHER_PPPOE, 2279 }; 2280 2281 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) { 2282 *no_of_elements = RTE_DIM(ptypes_comms); 2283 ptypes = ptypes_comms; 2284 } else { 2285 *no_of_elements = RTE_DIM(ptypes_os); 2286 ptypes = ptypes_os; 2287 } 2288 2289 if (dev->rx_pkt_burst == ice_recv_pkts || 2290 dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc || 2291 dev->rx_pkt_burst == ice_recv_scattered_pkts) 2292 return ptypes; 2293 2294 #ifdef RTE_ARCH_X86 2295 if (dev->rx_pkt_burst == ice_recv_pkts_vec || 2296 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec || 2297 #ifdef CC_AVX512_SUPPORT 2298 dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 || 2299 dev->rx_pkt_burst == ice_recv_pkts_vec_avx512_offload || 2300 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 || 2301 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload || 2302 #endif 2303 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 || 2304 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2_offload || 2305 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2 || 2306 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2_offload) 2307 return ptypes; 2308 #endif 2309 2310 return NULL; 2311 } 2312 2313 int 2314 ice_rx_descriptor_status(void *rx_queue, uint16_t offset) 2315 { 2316 volatile union ice_rx_flex_desc *rxdp; 2317 struct ice_rx_queue *rxq = rx_queue; 2318 uint32_t desc; 2319 2320 if (unlikely(offset >= rxq->nb_rx_desc)) 2321 return -EINVAL; 2322 2323 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold) 2324 return RTE_ETH_RX_DESC_UNAVAIL; 2325 2326 desc = rxq->rx_tail + offset; 2327 if (desc >= rxq->nb_rx_desc) 2328 desc -= rxq->nb_rx_desc; 2329 2330 rxdp = &rxq->rx_ring[desc]; 2331 if (rte_le_to_cpu_16(rxdp->wb.status_error0) & 2332 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) 2333 return RTE_ETH_RX_DESC_DONE; 2334 2335 return RTE_ETH_RX_DESC_AVAIL; 2336 } 2337 2338 int 2339 ice_tx_descriptor_status(void *tx_queue, uint16_t offset) 2340 { 2341 struct ci_tx_queue *txq = tx_queue; 2342 volatile uint64_t *status; 2343 uint64_t mask, expect; 2344 uint32_t desc; 2345 2346 if (unlikely(offset >= txq->nb_tx_desc)) 2347 return -EINVAL; 2348 2349 desc = txq->tx_tail + offset; 2350 /* go to next desc that has the RS bit */ 2351 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) * 2352 txq->tx_rs_thresh; 2353 if (desc >= txq->nb_tx_desc) { 2354 desc -= txq->nb_tx_desc; 2355 if (desc >= txq->nb_tx_desc) 2356 desc -= txq->nb_tx_desc; 2357 } 2358 2359 status = &txq->ice_tx_ring[desc].cmd_type_offset_bsz; 2360 mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M); 2361 expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE << 2362 ICE_TXD_QW1_DTYPE_S); 2363 if ((*status & mask) == expect) 2364 return RTE_ETH_TX_DESC_DONE; 2365 2366 return RTE_ETH_TX_DESC_FULL; 2367 } 2368 2369 void 2370 ice_free_queues(struct rte_eth_dev *dev) 2371 { 2372 uint16_t i; 2373 2374 PMD_INIT_FUNC_TRACE(); 2375 2376 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2377 if (!dev->data->rx_queues[i]) 2378 continue; 2379 ice_rx_queue_release(dev->data->rx_queues[i]); 2380 dev->data->rx_queues[i] = NULL; 2381 } 2382 dev->data->nb_rx_queues = 0; 2383 2384 for (i = 0; i < dev->data->nb_tx_queues; i++) { 2385 if (!dev->data->tx_queues[i]) 2386 continue; 2387 ice_tx_queue_release(dev->data->tx_queues[i]); 2388 dev->data->tx_queues[i] = NULL; 2389 } 2390 dev->data->nb_tx_queues = 0; 2391 } 2392 2393 #define ICE_FDIR_NUM_TX_DESC ICE_MIN_RING_DESC 2394 #define ICE_FDIR_NUM_RX_DESC ICE_MIN_RING_DESC 2395 2396 int 2397 ice_fdir_setup_tx_resources(struct ice_pf *pf) 2398 { 2399 struct ci_tx_queue *txq; 2400 const struct rte_memzone *tz = NULL; 2401 uint32_t ring_size; 2402 struct rte_eth_dev *dev; 2403 2404 if (!pf) { 2405 PMD_DRV_LOG(ERR, "PF is not available"); 2406 return -EINVAL; 2407 } 2408 2409 dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id]; 2410 2411 /* Allocate the TX queue data structure. */ 2412 txq = rte_zmalloc_socket("ice fdir tx queue", 2413 sizeof(struct ci_tx_queue), 2414 RTE_CACHE_LINE_SIZE, 2415 SOCKET_ID_ANY); 2416 if (!txq) { 2417 PMD_DRV_LOG(ERR, "Failed to allocate memory for " 2418 "tx queue structure."); 2419 return -ENOMEM; 2420 } 2421 2422 /* Allocate TX hardware ring descriptors. */ 2423 ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC; 2424 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN); 2425 2426 tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring", 2427 ICE_FDIR_QUEUE_ID, ring_size, 2428 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY); 2429 if (!tz) { 2430 ice_tx_queue_release(txq); 2431 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX."); 2432 return -ENOMEM; 2433 } 2434 2435 txq->mz = tz; 2436 txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC; 2437 txq->queue_id = ICE_FDIR_QUEUE_ID; 2438 txq->reg_idx = pf->fdir.fdir_vsi->base_queue; 2439 txq->ice_vsi = pf->fdir.fdir_vsi; 2440 2441 txq->tx_ring_dma = tz->iova; 2442 txq->ice_tx_ring = (struct ice_tx_desc *)tz->addr; 2443 /* 2444 * don't need to allocate software ring and reset for the fdir 2445 * program queue just set the queue has been configured. 2446 */ 2447 txq->q_set = true; 2448 pf->fdir.txq = txq; 2449 2450 2451 return ICE_SUCCESS; 2452 } 2453 2454 int 2455 ice_fdir_setup_rx_resources(struct ice_pf *pf) 2456 { 2457 struct ice_rx_queue *rxq; 2458 const struct rte_memzone *rz = NULL; 2459 uint32_t ring_size; 2460 struct rte_eth_dev *dev; 2461 2462 if (!pf) { 2463 PMD_DRV_LOG(ERR, "PF is not available"); 2464 return -EINVAL; 2465 } 2466 2467 dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id]; 2468 2469 /* Allocate the RX queue data structure. */ 2470 rxq = rte_zmalloc_socket("ice fdir rx queue", 2471 sizeof(struct ice_rx_queue), 2472 RTE_CACHE_LINE_SIZE, 2473 SOCKET_ID_ANY); 2474 if (!rxq) { 2475 PMD_DRV_LOG(ERR, "Failed to allocate memory for " 2476 "rx queue structure."); 2477 return -ENOMEM; 2478 } 2479 2480 /* Allocate RX hardware ring descriptors. */ 2481 ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC; 2482 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN); 2483 2484 rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring", 2485 ICE_FDIR_QUEUE_ID, ring_size, 2486 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY); 2487 if (!rz) { 2488 ice_rx_queue_release(rxq); 2489 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX."); 2490 return -ENOMEM; 2491 } 2492 2493 rxq->mz = rz; 2494 rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC; 2495 rxq->queue_id = ICE_FDIR_QUEUE_ID; 2496 rxq->reg_idx = pf->fdir.fdir_vsi->base_queue; 2497 rxq->vsi = pf->fdir.fdir_vsi; 2498 2499 rxq->rx_ring_dma = rz->iova; 2500 memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC * 2501 sizeof(union ice_32byte_rx_desc)); 2502 rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr; 2503 2504 /* 2505 * Don't need to allocate software ring and reset for the fdir 2506 * rx queue, just set the queue has been configured. 2507 */ 2508 rxq->q_set = true; 2509 pf->fdir.rxq = rxq; 2510 2511 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs; 2512 2513 return ICE_SUCCESS; 2514 } 2515 2516 uint16_t 2517 ice_recv_pkts(void *rx_queue, 2518 struct rte_mbuf **rx_pkts, 2519 uint16_t nb_pkts) 2520 { 2521 struct ice_rx_queue *rxq = rx_queue; 2522 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring; 2523 volatile union ice_rx_flex_desc *rxdp; 2524 union ice_rx_flex_desc rxd; 2525 struct ice_rx_entry *sw_ring = rxq->sw_ring; 2526 struct ice_rx_entry *rxe; 2527 struct rte_mbuf *nmb; /* new allocated mbuf */ 2528 struct rte_mbuf *nmb_pay; /* new allocated payload mbuf */ 2529 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */ 2530 uint16_t rx_id = rxq->rx_tail; 2531 uint16_t nb_rx = 0; 2532 uint16_t nb_hold = 0; 2533 uint16_t rx_packet_len; 2534 uint16_t rx_header_len; 2535 uint16_t rx_stat_err0; 2536 uint64_t dma_addr; 2537 uint64_t pkt_flags; 2538 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; 2539 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 2540 bool is_tsinit = false; 2541 uint64_t ts_ns; 2542 struct ice_vsi *vsi = rxq->vsi; 2543 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 2544 struct ice_adapter *ad = rxq->vsi->adapter; 2545 2546 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) { 2547 uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000); 2548 2549 if (unlikely(sw_cur_time - rxq->hw_time_update > 4)) 2550 is_tsinit = 1; 2551 } 2552 #endif 2553 2554 while (nb_rx < nb_pkts) { 2555 rxdp = &rx_ring[rx_id]; 2556 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0); 2557 2558 /* Check the DD bit first */ 2559 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))) 2560 break; 2561 2562 /* allocate header mbuf */ 2563 nmb = rte_mbuf_raw_alloc(rxq->mp); 2564 if (unlikely(!nmb)) { 2565 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++; 2566 break; 2567 } 2568 2569 rxd = *rxdp; /* copy descriptor in ring to temp variable*/ 2570 2571 nb_hold++; 2572 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */ 2573 rx_id++; 2574 if (unlikely(rx_id == rxq->nb_rx_desc)) 2575 rx_id = 0; 2576 rxm = rxe->mbuf; 2577 rxe->mbuf = nmb; 2578 dma_addr = 2579 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); 2580 2581 if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { 2582 /** 2583 * fill the read format of descriptor with physic address in 2584 * new allocated mbuf: nmb 2585 */ 2586 rxdp->read.hdr_addr = 0; 2587 rxdp->read.pkt_addr = dma_addr; 2588 } else { 2589 /* allocate payload mbuf */ 2590 nmb_pay = rte_mbuf_raw_alloc(rxq->rxseg[1].mp); 2591 if (unlikely(!nmb_pay)) { 2592 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++; 2593 rxe->mbuf = NULL; 2594 nb_hold--; 2595 if (unlikely(rx_id == 0)) 2596 rx_id = rxq->nb_rx_desc; 2597 2598 rx_id--; 2599 rte_pktmbuf_free(nmb); 2600 break; 2601 } 2602 2603 nmb->next = nmb_pay; 2604 nmb_pay->next = NULL; 2605 2606 /** 2607 * fill the read format of descriptor with physic address in 2608 * new allocated mbuf: nmb 2609 */ 2610 rxdp->read.hdr_addr = dma_addr; 2611 rxdp->read.pkt_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb_pay)); 2612 } 2613 2614 /* fill old mbuf with received descriptor: rxd */ 2615 rxm->data_off = RTE_PKTMBUF_HEADROOM; 2616 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM)); 2617 if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { 2618 rxm->nb_segs = 1; 2619 rxm->next = NULL; 2620 /* calculate rx_packet_len of the received pkt */ 2621 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) & 2622 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; 2623 rxm->data_len = rx_packet_len; 2624 rxm->pkt_len = rx_packet_len; 2625 } else { 2626 rxm->nb_segs = (uint16_t)(rxm->nb_segs + rxm->next->nb_segs); 2627 rxm->next->next = NULL; 2628 /* calculate rx_packet_len of the received pkt */ 2629 rx_header_len = rte_le_to_cpu_16(rxd.wb.hdr_len_sph_flex_flags1) & 2630 ICE_RX_FLEX_DESC_HEADER_LEN_M; 2631 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) & 2632 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; 2633 rxm->data_len = rx_header_len; 2634 rxm->pkt_len = rx_header_len + rx_packet_len; 2635 rxm->next->data_len = rx_packet_len; 2636 2637 #ifdef RTE_ETHDEV_DEBUG_RX 2638 rte_pktmbuf_dump(stdout, rxm, rte_pktmbuf_pkt_len(rxm)); 2639 #endif 2640 } 2641 2642 rxm->port = rxq->port_id; 2643 rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M & 2644 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; 2645 ice_rxd_to_vlan_tci(rxm, &rxd); 2646 rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd); 2647 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0); 2648 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 2649 if (ice_timestamp_dynflag > 0 && 2650 (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) { 2651 rxq->time_high = 2652 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); 2653 if (unlikely(is_tsinit)) { 2654 ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high); 2655 rxq->hw_time_low = (uint32_t)ts_ns; 2656 rxq->hw_time_high = (uint32_t)(ts_ns >> 32); 2657 is_tsinit = false; 2658 } else { 2659 if (rxq->time_high < rxq->hw_time_low) 2660 rxq->hw_time_high += 1; 2661 ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high; 2662 rxq->hw_time_low = rxq->time_high; 2663 } 2664 rxq->hw_time_update = rte_get_timer_cycles() / 2665 (rte_get_timer_hz() / 1000); 2666 *RTE_MBUF_DYNFIELD(rxm, 2667 (ice_timestamp_dynfield_offset), 2668 rte_mbuf_timestamp_t *) = ts_ns; 2669 pkt_flags |= ice_timestamp_dynflag; 2670 } 2671 2672 if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) == 2673 RTE_PTYPE_L2_ETHER_TIMESYNC)) { 2674 rxq->time_high = 2675 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); 2676 rxm->timesync = rxq->queue_id; 2677 pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP; 2678 } 2679 #endif 2680 rxm->ol_flags |= pkt_flags; 2681 /* copy old mbuf to rx_pkts */ 2682 rx_pkts[nb_rx++] = rxm; 2683 } 2684 2685 rxq->rx_tail = rx_id; 2686 /** 2687 * If the number of free RX descriptors is greater than the RX free 2688 * threshold of the queue, advance the receive tail register of queue. 2689 * Update that register with the value of the last processed RX 2690 * descriptor minus 1. 2691 */ 2692 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); 2693 if (nb_hold > rxq->rx_free_thresh) { 2694 rx_id = (uint16_t)(rx_id == 0 ? 2695 (rxq->nb_rx_desc - 1) : (rx_id - 1)); 2696 /* write TAIL register */ 2697 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id); 2698 nb_hold = 0; 2699 } 2700 rxq->nb_rx_hold = nb_hold; 2701 2702 /* return received packet in the burst */ 2703 return nb_rx; 2704 } 2705 2706 static inline void 2707 ice_parse_tunneling_params(uint64_t ol_flags, 2708 union ice_tx_offload tx_offload, 2709 uint32_t *cd_tunneling) 2710 { 2711 /* EIPT: External (outer) IP header type */ 2712 if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) 2713 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4; 2714 else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) 2715 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM; 2716 else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) 2717 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6; 2718 2719 /* EIPLEN: External (outer) IP header length, in DWords */ 2720 *cd_tunneling |= (tx_offload.outer_l3_len >> 2) << 2721 ICE_TXD_CTX_QW0_EIPLEN_S; 2722 2723 /* L4TUNT: L4 Tunneling Type */ 2724 switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { 2725 case RTE_MBUF_F_TX_TUNNEL_IPIP: 2726 /* for non UDP / GRE tunneling, set to 00b */ 2727 break; 2728 case RTE_MBUF_F_TX_TUNNEL_VXLAN: 2729 case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE: 2730 case RTE_MBUF_F_TX_TUNNEL_GTP: 2731 case RTE_MBUF_F_TX_TUNNEL_GENEVE: 2732 *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING; 2733 break; 2734 case RTE_MBUF_F_TX_TUNNEL_GRE: 2735 *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING; 2736 break; 2737 default: 2738 PMD_TX_LOG(ERR, "Tunnel type not supported"); 2739 return; 2740 } 2741 2742 /* L4TUNLEN: L4 Tunneling Length, in Words 2743 * 2744 * We depend on app to set rte_mbuf.l2_len correctly. 2745 * For IP in GRE it should be set to the length of the GRE 2746 * header; 2747 * For MAC in GRE or MAC in UDP it should be set to the length 2748 * of the GRE or UDP headers plus the inner MAC up to including 2749 * its last Ethertype. 2750 * If MPLS labels exists, it should include them as well. 2751 */ 2752 *cd_tunneling |= (tx_offload.l2_len >> 1) << 2753 ICE_TXD_CTX_QW0_NATLEN_S; 2754 2755 /** 2756 * Calculate the tunneling UDP checksum. 2757 * Shall be set only if L4TUNT = 01b and EIPT is not zero 2758 */ 2759 if ((*cd_tunneling & ICE_TXD_CTX_QW0_EIPT_M) && 2760 (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING) && 2761 (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) 2762 *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M; 2763 } 2764 2765 static inline void 2766 ice_txd_enable_checksum(uint64_t ol_flags, 2767 uint32_t *td_cmd, 2768 uint32_t *td_offset, 2769 union ice_tx_offload tx_offload) 2770 { 2771 /* Set MACLEN */ 2772 if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) 2773 *td_offset |= (tx_offload.l2_len >> 1) 2774 << ICE_TX_DESC_LEN_MACLEN_S; 2775 2776 /* Enable L3 checksum offloads */ 2777 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { 2778 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 2779 *td_offset |= (tx_offload.l3_len >> 2) << 2780 ICE_TX_DESC_LEN_IPLEN_S; 2781 } else if (ol_flags & RTE_MBUF_F_TX_IPV4) { 2782 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 2783 *td_offset |= (tx_offload.l3_len >> 2) << 2784 ICE_TX_DESC_LEN_IPLEN_S; 2785 } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { 2786 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 2787 *td_offset |= (tx_offload.l3_len >> 2) << 2788 ICE_TX_DESC_LEN_IPLEN_S; 2789 } 2790 2791 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { 2792 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 2793 *td_offset |= (tx_offload.l4_len >> 2) << 2794 ICE_TX_DESC_LEN_L4_LEN_S; 2795 return; 2796 } 2797 2798 if (ol_flags & RTE_MBUF_F_TX_UDP_SEG) { 2799 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 2800 *td_offset |= (tx_offload.l4_len >> 2) << 2801 ICE_TX_DESC_LEN_L4_LEN_S; 2802 return; 2803 } 2804 2805 /* Enable L4 checksum offloads */ 2806 switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) { 2807 case RTE_MBUF_F_TX_TCP_CKSUM: 2808 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 2809 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << 2810 ICE_TX_DESC_LEN_L4_LEN_S; 2811 break; 2812 case RTE_MBUF_F_TX_SCTP_CKSUM: 2813 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 2814 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << 2815 ICE_TX_DESC_LEN_L4_LEN_S; 2816 break; 2817 case RTE_MBUF_F_TX_UDP_CKSUM: 2818 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 2819 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) << 2820 ICE_TX_DESC_LEN_L4_LEN_S; 2821 break; 2822 default: 2823 break; 2824 } 2825 } 2826 2827 static inline int 2828 ice_xmit_cleanup(struct ci_tx_queue *txq) 2829 { 2830 struct ci_tx_entry *sw_ring = txq->sw_ring; 2831 volatile struct ice_tx_desc *txd = txq->ice_tx_ring; 2832 uint16_t last_desc_cleaned = txq->last_desc_cleaned; 2833 uint16_t nb_tx_desc = txq->nb_tx_desc; 2834 uint16_t desc_to_clean_to; 2835 uint16_t nb_tx_to_clean; 2836 2837 /* Determine the last descriptor needing to be cleaned */ 2838 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh); 2839 if (desc_to_clean_to >= nb_tx_desc) 2840 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc); 2841 2842 /* Check to make sure the last descriptor to clean is done */ 2843 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; 2844 if (!(txd[desc_to_clean_to].cmd_type_offset_bsz & 2845 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) { 2846 PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done " 2847 "(port=%d queue=%d) value=0x%"PRIx64, 2848 desc_to_clean_to, 2849 txq->port_id, txq->queue_id, 2850 txd[desc_to_clean_to].cmd_type_offset_bsz); 2851 /* Failed to clean any descriptors */ 2852 return -1; 2853 } 2854 2855 /* Figure out how many descriptors will be cleaned */ 2856 if (last_desc_cleaned > desc_to_clean_to) 2857 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) + 2858 desc_to_clean_to); 2859 else 2860 nb_tx_to_clean = (uint16_t)(desc_to_clean_to - 2861 last_desc_cleaned); 2862 2863 /* The last descriptor to clean is done, so that means all the 2864 * descriptors from the last descriptor that was cleaned 2865 * up to the last descriptor with the RS bit set 2866 * are done. Only reset the threshold descriptor. 2867 */ 2868 txd[desc_to_clean_to].cmd_type_offset_bsz = 0; 2869 2870 /* Update the txq to reflect the last descriptor that was cleaned */ 2871 txq->last_desc_cleaned = desc_to_clean_to; 2872 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean); 2873 2874 return 0; 2875 } 2876 2877 /* Construct the tx flags */ 2878 static inline uint64_t 2879 ice_build_ctob(uint32_t td_cmd, 2880 uint32_t td_offset, 2881 uint16_t size, 2882 uint32_t td_tag) 2883 { 2884 return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA | 2885 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) | 2886 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) | 2887 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) | 2888 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S)); 2889 } 2890 2891 /* Check if the context descriptor is needed for TX offloading */ 2892 static inline uint16_t 2893 ice_calc_context_desc(uint64_t flags) 2894 { 2895 static uint64_t mask = RTE_MBUF_F_TX_TCP_SEG | 2896 RTE_MBUF_F_TX_UDP_SEG | 2897 RTE_MBUF_F_TX_QINQ | 2898 RTE_MBUF_F_TX_OUTER_IP_CKSUM | 2899 RTE_MBUF_F_TX_TUNNEL_MASK | 2900 RTE_MBUF_F_TX_IEEE1588_TMST; 2901 2902 return (flags & mask) ? 1 : 0; 2903 } 2904 2905 /* set ice TSO context descriptor */ 2906 static inline uint64_t 2907 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload) 2908 { 2909 uint64_t ctx_desc = 0; 2910 uint32_t cd_cmd, hdr_len, cd_tso_len; 2911 2912 if (!tx_offload.l4_len) { 2913 PMD_TX_LOG(DEBUG, "L4 length set to 0"); 2914 return ctx_desc; 2915 } 2916 2917 hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len; 2918 hdr_len += (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ? 2919 tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0; 2920 2921 cd_cmd = ICE_TX_CTX_DESC_TSO; 2922 cd_tso_len = mbuf->pkt_len - hdr_len; 2923 ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) | 2924 ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 2925 ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S); 2926 2927 return ctx_desc; 2928 } 2929 2930 /* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */ 2931 #define ICE_MAX_DATA_PER_TXD \ 2932 (ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S) 2933 /* Calculate the number of TX descriptors needed for each pkt */ 2934 static inline uint16_t 2935 ice_calc_pkt_desc(struct rte_mbuf *tx_pkt) 2936 { 2937 struct rte_mbuf *txd = tx_pkt; 2938 uint16_t count = 0; 2939 2940 while (txd != NULL) { 2941 count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD); 2942 txd = txd->next; 2943 } 2944 2945 return count; 2946 } 2947 2948 uint16_t 2949 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 2950 { 2951 struct ci_tx_queue *txq; 2952 volatile struct ice_tx_desc *ice_tx_ring; 2953 volatile struct ice_tx_desc *txd; 2954 struct ci_tx_entry *sw_ring; 2955 struct ci_tx_entry *txe, *txn; 2956 struct rte_mbuf *tx_pkt; 2957 struct rte_mbuf *m_seg; 2958 uint32_t cd_tunneling_params; 2959 uint16_t tx_id; 2960 uint16_t nb_tx; 2961 uint16_t nb_used; 2962 uint16_t nb_ctx; 2963 uint32_t td_cmd = 0; 2964 uint32_t td_offset = 0; 2965 uint32_t td_tag = 0; 2966 uint16_t tx_last; 2967 uint16_t slen; 2968 uint64_t buf_dma_addr; 2969 uint64_t ol_flags; 2970 union ice_tx_offload tx_offload = {0}; 2971 2972 txq = tx_queue; 2973 sw_ring = txq->sw_ring; 2974 ice_tx_ring = txq->ice_tx_ring; 2975 tx_id = txq->tx_tail; 2976 txe = &sw_ring[tx_id]; 2977 2978 /* Check if the descriptor ring needs to be cleaned. */ 2979 if (txq->nb_tx_free < txq->tx_free_thresh) 2980 (void)ice_xmit_cleanup(txq); 2981 2982 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { 2983 tx_pkt = *tx_pkts++; 2984 2985 td_cmd = 0; 2986 td_tag = 0; 2987 td_offset = 0; 2988 ol_flags = tx_pkt->ol_flags; 2989 tx_offload.l2_len = tx_pkt->l2_len; 2990 tx_offload.l3_len = tx_pkt->l3_len; 2991 tx_offload.outer_l2_len = tx_pkt->outer_l2_len; 2992 tx_offload.outer_l3_len = tx_pkt->outer_l3_len; 2993 tx_offload.l4_len = tx_pkt->l4_len; 2994 tx_offload.tso_segsz = tx_pkt->tso_segsz; 2995 /* Calculate the number of context descriptors needed. */ 2996 nb_ctx = ice_calc_context_desc(ol_flags); 2997 2998 /* The number of descriptors that must be allocated for 2999 * a packet equals to the number of the segments of that 3000 * packet plus the number of context descriptor if needed. 3001 * Recalculate the needed tx descs when TSO enabled in case 3002 * the mbuf data size exceeds max data size that hw allows 3003 * per tx desc. 3004 */ 3005 if (ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) 3006 nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) + 3007 nb_ctx); 3008 else 3009 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx); 3010 tx_last = (uint16_t)(tx_id + nb_used - 1); 3011 3012 /* Circular ring */ 3013 if (tx_last >= txq->nb_tx_desc) 3014 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc); 3015 3016 if (nb_used > txq->nb_tx_free) { 3017 if (ice_xmit_cleanup(txq) != 0) { 3018 if (nb_tx == 0) 3019 return 0; 3020 goto end_of_tx; 3021 } 3022 if (unlikely(nb_used > txq->tx_rs_thresh)) { 3023 while (nb_used > txq->nb_tx_free) { 3024 if (ice_xmit_cleanup(txq) != 0) { 3025 if (nb_tx == 0) 3026 return 0; 3027 goto end_of_tx; 3028 } 3029 } 3030 } 3031 } 3032 3033 /* Descriptor based VLAN insertion */ 3034 if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) { 3035 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1; 3036 td_tag = tx_pkt->vlan_tci; 3037 } 3038 3039 /* Fill in tunneling parameters if necessary */ 3040 cd_tunneling_params = 0; 3041 if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { 3042 td_offset |= (tx_offload.outer_l2_len >> 1) 3043 << ICE_TX_DESC_LEN_MACLEN_S; 3044 ice_parse_tunneling_params(ol_flags, tx_offload, 3045 &cd_tunneling_params); 3046 } 3047 3048 /* Enable checksum offloading */ 3049 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) 3050 ice_txd_enable_checksum(ol_flags, &td_cmd, 3051 &td_offset, tx_offload); 3052 3053 if (nb_ctx) { 3054 /* Setup TX context descriptor if required */ 3055 volatile struct ice_tx_ctx_desc *ctx_txd = 3056 (volatile struct ice_tx_ctx_desc *) 3057 &ice_tx_ring[tx_id]; 3058 uint16_t cd_l2tag2 = 0; 3059 uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX; 3060 3061 txn = &sw_ring[txe->next_id]; 3062 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); 3063 if (txe->mbuf) { 3064 rte_pktmbuf_free_seg(txe->mbuf); 3065 txe->mbuf = NULL; 3066 } 3067 3068 if (ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) 3069 cd_type_cmd_tso_mss |= 3070 ice_set_tso_ctx(tx_pkt, tx_offload); 3071 else if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) 3072 cd_type_cmd_tso_mss |= 3073 ((uint64_t)ICE_TX_CTX_DESC_TSYN << 3074 ICE_TXD_CTX_QW1_CMD_S) | 3075 (((uint64_t)txq->ice_vsi->adapter->ptp_tx_index << 3076 ICE_TXD_CTX_QW1_TSYN_S) & ICE_TXD_CTX_QW1_TSYN_M); 3077 3078 ctx_txd->tunneling_params = 3079 rte_cpu_to_le_32(cd_tunneling_params); 3080 3081 /* TX context descriptor based double VLAN insert */ 3082 if (ol_flags & RTE_MBUF_F_TX_QINQ) { 3083 cd_l2tag2 = tx_pkt->vlan_tci_outer; 3084 cd_type_cmd_tso_mss |= 3085 ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 << 3086 ICE_TXD_CTX_QW1_CMD_S); 3087 } 3088 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2); 3089 ctx_txd->qw1 = 3090 rte_cpu_to_le_64(cd_type_cmd_tso_mss); 3091 3092 txe->last_id = tx_last; 3093 tx_id = txe->next_id; 3094 txe = txn; 3095 } 3096 m_seg = tx_pkt; 3097 3098 do { 3099 txd = &ice_tx_ring[tx_id]; 3100 txn = &sw_ring[txe->next_id]; 3101 3102 if (txe->mbuf) 3103 rte_pktmbuf_free_seg(txe->mbuf); 3104 txe->mbuf = m_seg; 3105 3106 /* Setup TX Descriptor */ 3107 slen = m_seg->data_len; 3108 buf_dma_addr = rte_mbuf_data_iova(m_seg); 3109 3110 while ((ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) && 3111 unlikely(slen > ICE_MAX_DATA_PER_TXD)) { 3112 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr); 3113 txd->cmd_type_offset_bsz = 3114 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA | 3115 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) | 3116 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) | 3117 ((uint64_t)ICE_MAX_DATA_PER_TXD << 3118 ICE_TXD_QW1_TX_BUF_SZ_S) | 3119 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S)); 3120 3121 buf_dma_addr += ICE_MAX_DATA_PER_TXD; 3122 slen -= ICE_MAX_DATA_PER_TXD; 3123 3124 txe->last_id = tx_last; 3125 tx_id = txe->next_id; 3126 txe = txn; 3127 txd = &ice_tx_ring[tx_id]; 3128 txn = &sw_ring[txe->next_id]; 3129 } 3130 3131 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr); 3132 txd->cmd_type_offset_bsz = 3133 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA | 3134 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) | 3135 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) | 3136 ((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) | 3137 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S)); 3138 3139 txe->last_id = tx_last; 3140 tx_id = txe->next_id; 3141 txe = txn; 3142 m_seg = m_seg->next; 3143 } while (m_seg); 3144 3145 /* fill the last descriptor with End of Packet (EOP) bit */ 3146 td_cmd |= ICE_TX_DESC_CMD_EOP; 3147 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used); 3148 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used); 3149 3150 /* set RS bit on the last descriptor of one packet */ 3151 if (txq->nb_tx_used >= txq->tx_rs_thresh) { 3152 PMD_TX_LOG(DEBUG, 3153 "Setting RS bit on TXD id=" 3154 "%4u (port=%d queue=%d)", 3155 tx_last, txq->port_id, txq->queue_id); 3156 3157 td_cmd |= ICE_TX_DESC_CMD_RS; 3158 3159 /* Update txq RS bit counters */ 3160 txq->nb_tx_used = 0; 3161 } 3162 txd->cmd_type_offset_bsz |= 3163 rte_cpu_to_le_64(((uint64_t)td_cmd) << 3164 ICE_TXD_QW1_CMD_S); 3165 } 3166 end_of_tx: 3167 /* update Tail register */ 3168 ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id); 3169 txq->tx_tail = tx_id; 3170 3171 return nb_tx; 3172 } 3173 3174 static __rte_always_inline int 3175 ice_tx_free_bufs(struct ci_tx_queue *txq) 3176 { 3177 struct ci_tx_entry *txep; 3178 uint16_t i; 3179 3180 if ((txq->ice_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & 3181 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) != 3182 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE)) 3183 return 0; 3184 3185 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]; 3186 3187 for (i = 0; i < txq->tx_rs_thresh; i++) 3188 rte_prefetch0((txep + i)->mbuf); 3189 3190 if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) { 3191 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) { 3192 rte_mempool_put(txep->mbuf->pool, txep->mbuf); 3193 txep->mbuf = NULL; 3194 } 3195 } else { 3196 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) { 3197 rte_pktmbuf_free_seg(txep->mbuf); 3198 txep->mbuf = NULL; 3199 } 3200 } 3201 3202 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); 3203 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); 3204 if (txq->tx_next_dd >= txq->nb_tx_desc) 3205 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); 3206 3207 return txq->tx_rs_thresh; 3208 } 3209 3210 static int 3211 ice_tx_done_cleanup_full(struct ci_tx_queue *txq, 3212 uint32_t free_cnt) 3213 { 3214 struct ci_tx_entry *swr_ring = txq->sw_ring; 3215 uint16_t i, tx_last, tx_id; 3216 uint16_t nb_tx_free_last; 3217 uint16_t nb_tx_to_clean; 3218 uint32_t pkt_cnt; 3219 3220 /* Start free mbuf from the next of tx_tail */ 3221 tx_last = txq->tx_tail; 3222 tx_id = swr_ring[tx_last].next_id; 3223 3224 if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq)) 3225 return 0; 3226 3227 nb_tx_to_clean = txq->nb_tx_free; 3228 nb_tx_free_last = txq->nb_tx_free; 3229 if (!free_cnt) 3230 free_cnt = txq->nb_tx_desc; 3231 3232 /* Loop through swr_ring to count the amount of 3233 * freeable mubfs and packets. 3234 */ 3235 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) { 3236 for (i = 0; i < nb_tx_to_clean && 3237 pkt_cnt < free_cnt && 3238 tx_id != tx_last; i++) { 3239 if (swr_ring[tx_id].mbuf != NULL) { 3240 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf); 3241 swr_ring[tx_id].mbuf = NULL; 3242 3243 /* 3244 * last segment in the packet, 3245 * increment packet count 3246 */ 3247 pkt_cnt += (swr_ring[tx_id].last_id == tx_id); 3248 } 3249 3250 tx_id = swr_ring[tx_id].next_id; 3251 } 3252 3253 if (txq->tx_rs_thresh > txq->nb_tx_desc - 3254 txq->nb_tx_free || tx_id == tx_last) 3255 break; 3256 3257 if (pkt_cnt < free_cnt) { 3258 if (ice_xmit_cleanup(txq)) 3259 break; 3260 3261 nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last; 3262 nb_tx_free_last = txq->nb_tx_free; 3263 } 3264 } 3265 3266 return (int)pkt_cnt; 3267 } 3268 3269 #ifdef RTE_ARCH_X86 3270 static int 3271 ice_tx_done_cleanup_vec(struct ci_tx_queue *txq __rte_unused, 3272 uint32_t free_cnt __rte_unused) 3273 { 3274 return -ENOTSUP; 3275 } 3276 #endif 3277 3278 static int 3279 ice_tx_done_cleanup_simple(struct ci_tx_queue *txq, 3280 uint32_t free_cnt) 3281 { 3282 int i, n, cnt; 3283 3284 if (free_cnt == 0 || free_cnt > txq->nb_tx_desc) 3285 free_cnt = txq->nb_tx_desc; 3286 3287 cnt = free_cnt - free_cnt % txq->tx_rs_thresh; 3288 3289 for (i = 0; i < cnt; i += n) { 3290 if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh) 3291 break; 3292 3293 n = ice_tx_free_bufs(txq); 3294 3295 if (n == 0) 3296 break; 3297 } 3298 3299 return i; 3300 } 3301 3302 int 3303 ice_tx_done_cleanup(void *txq, uint32_t free_cnt) 3304 { 3305 struct ci_tx_queue *q = (struct ci_tx_queue *)txq; 3306 struct rte_eth_dev *dev = &rte_eth_devices[q->port_id]; 3307 struct ice_adapter *ad = 3308 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 3309 3310 #ifdef RTE_ARCH_X86 3311 if (ad->tx_vec_allowed) 3312 return ice_tx_done_cleanup_vec(q, free_cnt); 3313 #endif 3314 if (ad->tx_simple_allowed) 3315 return ice_tx_done_cleanup_simple(q, free_cnt); 3316 else 3317 return ice_tx_done_cleanup_full(q, free_cnt); 3318 } 3319 3320 /* Populate 4 descriptors with data from 4 mbufs */ 3321 static inline void 3322 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts) 3323 { 3324 uint64_t dma_addr; 3325 uint32_t i; 3326 3327 for (i = 0; i < 4; i++, txdp++, pkts++) { 3328 dma_addr = rte_mbuf_data_iova(*pkts); 3329 txdp->buf_addr = rte_cpu_to_le_64(dma_addr); 3330 txdp->cmd_type_offset_bsz = 3331 ice_build_ctob((uint32_t)ICE_TD_CMD, 0, 3332 (*pkts)->data_len, 0); 3333 } 3334 } 3335 3336 /* Populate 1 descriptor with data from 1 mbuf */ 3337 static inline void 3338 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts) 3339 { 3340 uint64_t dma_addr; 3341 3342 dma_addr = rte_mbuf_data_iova(*pkts); 3343 txdp->buf_addr = rte_cpu_to_le_64(dma_addr); 3344 txdp->cmd_type_offset_bsz = 3345 ice_build_ctob((uint32_t)ICE_TD_CMD, 0, 3346 (*pkts)->data_len, 0); 3347 } 3348 3349 static inline void 3350 ice_tx_fill_hw_ring(struct ci_tx_queue *txq, struct rte_mbuf **pkts, 3351 uint16_t nb_pkts) 3352 { 3353 volatile struct ice_tx_desc *txdp = &txq->ice_tx_ring[txq->tx_tail]; 3354 struct ci_tx_entry *txep = &txq->sw_ring[txq->tx_tail]; 3355 const int N_PER_LOOP = 4; 3356 const int N_PER_LOOP_MASK = N_PER_LOOP - 1; 3357 int mainpart, leftover; 3358 int i, j; 3359 3360 /** 3361 * Process most of the packets in chunks of N pkts. Any 3362 * leftover packets will get processed one at a time. 3363 */ 3364 mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK); 3365 leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK); 3366 for (i = 0; i < mainpart; i += N_PER_LOOP) { 3367 /* Copy N mbuf pointers to the S/W ring */ 3368 for (j = 0; j < N_PER_LOOP; ++j) 3369 (txep + i + j)->mbuf = *(pkts + i + j); 3370 tx4(txdp + i, pkts + i); 3371 } 3372 3373 if (unlikely(leftover > 0)) { 3374 for (i = 0; i < leftover; ++i) { 3375 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i); 3376 tx1(txdp + mainpart + i, pkts + mainpart + i); 3377 } 3378 } 3379 } 3380 3381 static inline uint16_t 3382 tx_xmit_pkts(struct ci_tx_queue *txq, 3383 struct rte_mbuf **tx_pkts, 3384 uint16_t nb_pkts) 3385 { 3386 volatile struct ice_tx_desc *txr = txq->ice_tx_ring; 3387 uint16_t n = 0; 3388 3389 /** 3390 * Begin scanning the H/W ring for done descriptors when the number 3391 * of available descriptors drops below tx_free_thresh. For each done 3392 * descriptor, free the associated buffer. 3393 */ 3394 if (txq->nb_tx_free < txq->tx_free_thresh) 3395 ice_tx_free_bufs(txq); 3396 3397 /* Use available descriptor only */ 3398 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); 3399 if (unlikely(!nb_pkts)) 3400 return 0; 3401 3402 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); 3403 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) { 3404 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail); 3405 ice_tx_fill_hw_ring(txq, tx_pkts, n); 3406 txr[txq->tx_next_rs].cmd_type_offset_bsz |= 3407 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) << 3408 ICE_TXD_QW1_CMD_S); 3409 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); 3410 txq->tx_tail = 0; 3411 } 3412 3413 /* Fill hardware descriptor ring with mbuf data */ 3414 ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n)); 3415 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n)); 3416 3417 /* Determine if RS bit needs to be set */ 3418 if (txq->tx_tail > txq->tx_next_rs) { 3419 txr[txq->tx_next_rs].cmd_type_offset_bsz |= 3420 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) << 3421 ICE_TXD_QW1_CMD_S); 3422 txq->tx_next_rs = 3423 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); 3424 if (txq->tx_next_rs >= txq->nb_tx_desc) 3425 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); 3426 } 3427 3428 if (txq->tx_tail >= txq->nb_tx_desc) 3429 txq->tx_tail = 0; 3430 3431 /* Update the tx tail register */ 3432 ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail); 3433 3434 return nb_pkts; 3435 } 3436 3437 static uint16_t 3438 ice_xmit_pkts_simple(void *tx_queue, 3439 struct rte_mbuf **tx_pkts, 3440 uint16_t nb_pkts) 3441 { 3442 uint16_t nb_tx = 0; 3443 3444 if (likely(nb_pkts <= ICE_TX_MAX_BURST)) 3445 return tx_xmit_pkts((struct ci_tx_queue *)tx_queue, 3446 tx_pkts, nb_pkts); 3447 3448 while (nb_pkts) { 3449 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts, 3450 ICE_TX_MAX_BURST); 3451 3452 ret = tx_xmit_pkts((struct ci_tx_queue *)tx_queue, 3453 &tx_pkts[nb_tx], num); 3454 nb_tx = (uint16_t)(nb_tx + ret); 3455 nb_pkts = (uint16_t)(nb_pkts - ret); 3456 if (ret < num) 3457 break; 3458 } 3459 3460 return nb_tx; 3461 } 3462 3463 void __rte_cold 3464 ice_set_rx_function(struct rte_eth_dev *dev) 3465 { 3466 PMD_INIT_FUNC_TRACE(); 3467 struct ice_adapter *ad = 3468 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 3469 #ifdef RTE_ARCH_X86 3470 struct ice_rx_queue *rxq; 3471 int i; 3472 int rx_check_ret = -1; 3473 3474 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 3475 ad->rx_use_avx512 = false; 3476 ad->rx_use_avx2 = false; 3477 rx_check_ret = ice_rx_vec_dev_check(dev); 3478 if (ad->ptp_ena) 3479 rx_check_ret = -1; 3480 ad->rx_vec_offload_support = 3481 (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH); 3482 if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed && 3483 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 3484 ad->rx_vec_allowed = true; 3485 for (i = 0; i < dev->data->nb_rx_queues; i++) { 3486 rxq = dev->data->rx_queues[i]; 3487 if (rxq && ice_rxq_vec_setup(rxq)) { 3488 ad->rx_vec_allowed = false; 3489 break; 3490 } 3491 } 3492 3493 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 && 3494 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 && 3495 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1) 3496 #ifdef CC_AVX512_SUPPORT 3497 ad->rx_use_avx512 = true; 3498 #else 3499 PMD_DRV_LOG(NOTICE, 3500 "AVX512 is not supported in build env"); 3501 #endif 3502 if (!ad->rx_use_avx512 && 3503 (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || 3504 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) && 3505 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256) 3506 ad->rx_use_avx2 = true; 3507 3508 } else { 3509 ad->rx_vec_allowed = false; 3510 } 3511 } 3512 3513 if (ad->rx_vec_allowed) { 3514 if (dev->data->scattered_rx) { 3515 if (ad->rx_use_avx512) { 3516 #ifdef CC_AVX512_SUPPORT 3517 if (ad->rx_vec_offload_support) { 3518 PMD_DRV_LOG(NOTICE, 3519 "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).", 3520 dev->data->port_id); 3521 dev->rx_pkt_burst = 3522 ice_recv_scattered_pkts_vec_avx512_offload; 3523 } else { 3524 PMD_DRV_LOG(NOTICE, 3525 "Using AVX512 Vector Scattered Rx (port %d).", 3526 dev->data->port_id); 3527 dev->rx_pkt_burst = 3528 ice_recv_scattered_pkts_vec_avx512; 3529 } 3530 #endif 3531 } else if (ad->rx_use_avx2) { 3532 if (ad->rx_vec_offload_support) { 3533 PMD_DRV_LOG(NOTICE, 3534 "Using AVX2 OFFLOAD Vector Scattered Rx (port %d).", 3535 dev->data->port_id); 3536 dev->rx_pkt_burst = 3537 ice_recv_scattered_pkts_vec_avx2_offload; 3538 } else { 3539 PMD_DRV_LOG(NOTICE, 3540 "Using AVX2 Vector Scattered Rx (port %d).", 3541 dev->data->port_id); 3542 dev->rx_pkt_burst = 3543 ice_recv_scattered_pkts_vec_avx2; 3544 } 3545 } else { 3546 PMD_DRV_LOG(DEBUG, 3547 "Using Vector Scattered Rx (port %d).", 3548 dev->data->port_id); 3549 dev->rx_pkt_burst = ice_recv_scattered_pkts_vec; 3550 } 3551 } else { 3552 if (ad->rx_use_avx512) { 3553 #ifdef CC_AVX512_SUPPORT 3554 if (ad->rx_vec_offload_support) { 3555 PMD_DRV_LOG(NOTICE, 3556 "Using AVX512 OFFLOAD Vector Rx (port %d).", 3557 dev->data->port_id); 3558 dev->rx_pkt_burst = 3559 ice_recv_pkts_vec_avx512_offload; 3560 } else { 3561 PMD_DRV_LOG(NOTICE, 3562 "Using AVX512 Vector Rx (port %d).", 3563 dev->data->port_id); 3564 dev->rx_pkt_burst = 3565 ice_recv_pkts_vec_avx512; 3566 } 3567 #endif 3568 } else if (ad->rx_use_avx2) { 3569 if (ad->rx_vec_offload_support) { 3570 PMD_DRV_LOG(NOTICE, 3571 "Using AVX2 OFFLOAD Vector Rx (port %d).", 3572 dev->data->port_id); 3573 dev->rx_pkt_burst = 3574 ice_recv_pkts_vec_avx2_offload; 3575 } else { 3576 PMD_DRV_LOG(NOTICE, 3577 "Using AVX2 Vector Rx (port %d).", 3578 dev->data->port_id); 3579 dev->rx_pkt_burst = 3580 ice_recv_pkts_vec_avx2; 3581 } 3582 } else { 3583 PMD_DRV_LOG(DEBUG, 3584 "Using Vector Rx (port %d).", 3585 dev->data->port_id); 3586 dev->rx_pkt_burst = ice_recv_pkts_vec; 3587 } 3588 } 3589 return; 3590 } 3591 3592 #endif 3593 3594 if (dev->data->scattered_rx) { 3595 /* Set the non-LRO scattered function */ 3596 PMD_INIT_LOG(DEBUG, 3597 "Using a Scattered function on port %d.", 3598 dev->data->port_id); 3599 dev->rx_pkt_burst = ice_recv_scattered_pkts; 3600 } else if (ad->rx_bulk_alloc_allowed) { 3601 PMD_INIT_LOG(DEBUG, 3602 "Rx Burst Bulk Alloc Preconditions are " 3603 "satisfied. Rx Burst Bulk Alloc function " 3604 "will be used on port %d.", 3605 dev->data->port_id); 3606 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc; 3607 } else { 3608 PMD_INIT_LOG(DEBUG, 3609 "Rx Burst Bulk Alloc Preconditions are not " 3610 "satisfied, Normal Rx will be used on port %d.", 3611 dev->data->port_id); 3612 dev->rx_pkt_burst = ice_recv_pkts; 3613 } 3614 } 3615 3616 static const struct { 3617 eth_rx_burst_t pkt_burst; 3618 const char *info; 3619 } ice_rx_burst_infos[] = { 3620 { ice_recv_scattered_pkts, "Scalar Scattered" }, 3621 { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" }, 3622 { ice_recv_pkts, "Scalar" }, 3623 #ifdef RTE_ARCH_X86 3624 #ifdef CC_AVX512_SUPPORT 3625 { ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" }, 3626 { ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" }, 3627 { ice_recv_pkts_vec_avx512, "Vector AVX512" }, 3628 { ice_recv_pkts_vec_avx512_offload, "Offload Vector AVX512" }, 3629 #endif 3630 { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" }, 3631 { ice_recv_scattered_pkts_vec_avx2_offload, "Offload Vector AVX2 Scattered" }, 3632 { ice_recv_pkts_vec_avx2, "Vector AVX2" }, 3633 { ice_recv_pkts_vec_avx2_offload, "Offload Vector AVX2" }, 3634 { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" }, 3635 { ice_recv_pkts_vec, "Vector SSE" }, 3636 #endif 3637 }; 3638 3639 int 3640 ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 3641 struct rte_eth_burst_mode *mode) 3642 { 3643 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; 3644 int ret = -EINVAL; 3645 unsigned int i; 3646 3647 for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) { 3648 if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) { 3649 snprintf(mode->info, sizeof(mode->info), "%s", 3650 ice_rx_burst_infos[i].info); 3651 ret = 0; 3652 break; 3653 } 3654 } 3655 3656 return ret; 3657 } 3658 3659 void __rte_cold 3660 ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ci_tx_queue *txq) 3661 { 3662 struct ice_adapter *ad = 3663 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 3664 3665 /* Use a simple Tx queue if possible (only fast free is allowed) */ 3666 ad->tx_simple_allowed = 3667 (txq->offloads == 3668 (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) && 3669 txq->tx_rs_thresh >= ICE_TX_MAX_BURST); 3670 3671 if (ad->tx_simple_allowed) 3672 PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.", 3673 txq->queue_id); 3674 else 3675 PMD_INIT_LOG(DEBUG, 3676 "Simple Tx can NOT be enabled on Tx queue %u.", 3677 txq->queue_id); 3678 } 3679 3680 /********************************************************************* 3681 * 3682 * TX prep functions 3683 * 3684 **********************************************************************/ 3685 /* The default values of TSO MSS */ 3686 #define ICE_MIN_TSO_MSS 64 3687 #define ICE_MAX_TSO_MSS 9728 3688 #define ICE_MAX_TSO_FRAME_SIZE 262144 3689 3690 /*Check for empty mbuf*/ 3691 static inline uint16_t 3692 ice_check_empty_mbuf(struct rte_mbuf *tx_pkt) 3693 { 3694 struct rte_mbuf *txd = tx_pkt; 3695 3696 while (txd != NULL) { 3697 if (txd->data_len == 0) 3698 return -1; 3699 txd = txd->next; 3700 } 3701 3702 return 0; 3703 } 3704 3705 /* Tx mbuf check */ 3706 static uint16_t 3707 ice_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 3708 { 3709 struct ci_tx_queue *txq = tx_queue; 3710 uint16_t idx; 3711 struct rte_mbuf *mb; 3712 bool pkt_error = false; 3713 uint16_t good_pkts = nb_pkts; 3714 const char *reason = NULL; 3715 struct ice_adapter *adapter = txq->ice_vsi->adapter; 3716 uint64_t ol_flags; 3717 3718 for (idx = 0; idx < nb_pkts; idx++) { 3719 mb = tx_pkts[idx]; 3720 ol_flags = mb->ol_flags; 3721 3722 if ((adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_MBUF) && 3723 (rte_mbuf_check(mb, 1, &reason) != 0)) { 3724 PMD_TX_LOG(ERR, "INVALID mbuf: %s", reason); 3725 pkt_error = true; 3726 break; 3727 } 3728 3729 if ((adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_SIZE) && 3730 (mb->data_len > mb->pkt_len || 3731 mb->data_len < ICE_TX_MIN_PKT_LEN || 3732 mb->data_len > ICE_FRAME_SIZE_MAX)) { 3733 PMD_TX_LOG(ERR, "INVALID mbuf: data_len (%u) is out of range, reasonable range (%d - %d)", 3734 mb->data_len, ICE_TX_MIN_PKT_LEN, ICE_FRAME_SIZE_MAX); 3735 pkt_error = true; 3736 break; 3737 } 3738 3739 if (adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_SEGMENT) { 3740 if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) { 3741 /** 3742 * No TSO case: nb->segs, pkt_len to not exceed 3743 * the limites. 3744 */ 3745 if (mb->nb_segs > ICE_TX_MTU_SEG_MAX) { 3746 PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds HW limit, maximum allowed value is %d", 3747 mb->nb_segs, ICE_TX_MTU_SEG_MAX); 3748 pkt_error = true; 3749 break; 3750 } 3751 if (mb->pkt_len > ICE_FRAME_SIZE_MAX) { 3752 PMD_TX_LOG(ERR, "INVALID mbuf: pkt_len (%d) exceeds HW limit, maximum allowed value is %d", 3753 mb->nb_segs, ICE_FRAME_SIZE_MAX); 3754 pkt_error = true; 3755 break; 3756 } 3757 } else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { 3758 /** TSO case: tso_segsz, nb_segs, pkt_len not exceed 3759 * the limits. 3760 */ 3761 if (mb->tso_segsz < ICE_MIN_TSO_MSS || 3762 mb->tso_segsz > ICE_MAX_TSO_MSS) { 3763 /** 3764 * MSS outside the range are considered malicious 3765 */ 3766 PMD_TX_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out of range, reasonable range (%d - %u)", 3767 mb->tso_segsz, ICE_MIN_TSO_MSS, ICE_MAX_TSO_MSS); 3768 pkt_error = true; 3769 break; 3770 } 3771 if (mb->nb_segs > ((struct ci_tx_queue *)tx_queue)->nb_tx_desc) { 3772 PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs out of ring length"); 3773 pkt_error = true; 3774 break; 3775 } 3776 } 3777 } 3778 3779 if (adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_OFFLOAD) { 3780 if (ol_flags & ICE_TX_OFFLOAD_NOTSUP_MASK) { 3781 PMD_TX_LOG(ERR, "INVALID mbuf: TX offload is not supported"); 3782 pkt_error = true; 3783 break; 3784 } 3785 3786 if (!rte_validate_tx_offload(mb)) { 3787 PMD_TX_LOG(ERR, "INVALID mbuf: TX offload setup error"); 3788 pkt_error = true; 3789 break; 3790 } 3791 } 3792 } 3793 3794 if (pkt_error) { 3795 txq->mbuf_errors++; 3796 good_pkts = idx; 3797 if (good_pkts == 0) 3798 return 0; 3799 } 3800 3801 return adapter->tx_pkt_burst(tx_queue, tx_pkts, good_pkts); 3802 } 3803 3804 uint16_t 3805 ice_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 3806 uint16_t nb_pkts) 3807 { 3808 int i, ret; 3809 uint64_t ol_flags; 3810 struct rte_mbuf *m; 3811 3812 for (i = 0; i < nb_pkts; i++) { 3813 m = tx_pkts[i]; 3814 ol_flags = m->ol_flags; 3815 3816 if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG) && 3817 /** 3818 * No TSO case: nb->segs, pkt_len to not exceed 3819 * the limites. 3820 */ 3821 (m->nb_segs > ICE_TX_MTU_SEG_MAX || 3822 m->pkt_len > ICE_FRAME_SIZE_MAX)) { 3823 rte_errno = EINVAL; 3824 return i; 3825 } else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG && 3826 /** TSO case: tso_segsz, nb_segs, pkt_len not exceed 3827 * the limits. 3828 */ 3829 (m->tso_segsz < ICE_MIN_TSO_MSS || 3830 m->tso_segsz > ICE_MAX_TSO_MSS || 3831 m->nb_segs > 3832 ((struct ci_tx_queue *)tx_queue)->nb_tx_desc || 3833 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) { 3834 /** 3835 * MSS outside the range are considered malicious 3836 */ 3837 rte_errno = EINVAL; 3838 return i; 3839 } 3840 3841 if (m->pkt_len < ICE_TX_MIN_PKT_LEN) { 3842 rte_errno = EINVAL; 3843 return i; 3844 } 3845 3846 #ifdef RTE_ETHDEV_DEBUG_TX 3847 ret = rte_validate_tx_offload(m); 3848 if (ret != 0) { 3849 rte_errno = -ret; 3850 return i; 3851 } 3852 #endif 3853 ret = rte_net_intel_cksum_prepare(m); 3854 if (ret != 0) { 3855 rte_errno = -ret; 3856 return i; 3857 } 3858 3859 if (ice_check_empty_mbuf(m) != 0) { 3860 rte_errno = EINVAL; 3861 return i; 3862 } 3863 } 3864 return i; 3865 } 3866 3867 void __rte_cold 3868 ice_set_tx_function(struct rte_eth_dev *dev) 3869 { 3870 struct ice_adapter *ad = 3871 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 3872 int mbuf_check = ad->devargs.mbuf_check; 3873 #ifdef RTE_ARCH_X86 3874 struct ci_tx_queue *txq; 3875 int i; 3876 int tx_check_ret = -1; 3877 3878 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 3879 ad->tx_use_avx2 = false; 3880 ad->tx_use_avx512 = false; 3881 tx_check_ret = ice_tx_vec_dev_check(dev); 3882 if (tx_check_ret >= 0 && 3883 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 3884 ad->tx_vec_allowed = true; 3885 3886 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 && 3887 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 && 3888 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1) 3889 #ifdef CC_AVX512_SUPPORT 3890 ad->tx_use_avx512 = true; 3891 #else 3892 PMD_DRV_LOG(NOTICE, 3893 "AVX512 is not supported in build env"); 3894 #endif 3895 if (!ad->tx_use_avx512 && 3896 (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || 3897 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) && 3898 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256) 3899 ad->tx_use_avx2 = true; 3900 3901 if (!ad->tx_use_avx2 && !ad->tx_use_avx512 && 3902 tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) 3903 ad->tx_vec_allowed = false; 3904 3905 if (ad->tx_vec_allowed) { 3906 for (i = 0; i < dev->data->nb_tx_queues; i++) { 3907 txq = dev->data->tx_queues[i]; 3908 if (txq && ice_txq_vec_setup(txq)) { 3909 ad->tx_vec_allowed = false; 3910 break; 3911 } 3912 } 3913 } 3914 } else { 3915 ad->tx_vec_allowed = false; 3916 } 3917 } 3918 3919 if (ad->tx_vec_allowed) { 3920 dev->tx_pkt_prepare = NULL; 3921 if (ad->tx_use_avx512) { 3922 #ifdef CC_AVX512_SUPPORT 3923 if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) { 3924 PMD_DRV_LOG(NOTICE, 3925 "Using AVX512 OFFLOAD Vector Tx (port %d).", 3926 dev->data->port_id); 3927 dev->tx_pkt_burst = 3928 ice_xmit_pkts_vec_avx512_offload; 3929 dev->tx_pkt_prepare = ice_prep_pkts; 3930 } else { 3931 PMD_DRV_LOG(NOTICE, 3932 "Using AVX512 Vector Tx (port %d).", 3933 dev->data->port_id); 3934 dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512; 3935 } 3936 #endif 3937 } else { 3938 if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) { 3939 PMD_DRV_LOG(NOTICE, 3940 "Using AVX2 OFFLOAD Vector Tx (port %d).", 3941 dev->data->port_id); 3942 dev->tx_pkt_burst = 3943 ice_xmit_pkts_vec_avx2_offload; 3944 dev->tx_pkt_prepare = ice_prep_pkts; 3945 } else { 3946 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).", 3947 ad->tx_use_avx2 ? "avx2 " : "", 3948 dev->data->port_id); 3949 dev->tx_pkt_burst = ad->tx_use_avx2 ? 3950 ice_xmit_pkts_vec_avx2 : 3951 ice_xmit_pkts_vec; 3952 } 3953 } 3954 3955 if (mbuf_check) { 3956 ad->tx_pkt_burst = dev->tx_pkt_burst; 3957 dev->tx_pkt_burst = ice_xmit_pkts_check; 3958 } 3959 return; 3960 } 3961 #endif 3962 3963 if (ad->tx_simple_allowed) { 3964 PMD_INIT_LOG(DEBUG, "Simple tx finally be used."); 3965 dev->tx_pkt_burst = ice_xmit_pkts_simple; 3966 dev->tx_pkt_prepare = NULL; 3967 } else { 3968 PMD_INIT_LOG(DEBUG, "Normal tx finally be used."); 3969 dev->tx_pkt_burst = ice_xmit_pkts; 3970 dev->tx_pkt_prepare = ice_prep_pkts; 3971 } 3972 3973 if (mbuf_check) { 3974 ad->tx_pkt_burst = dev->tx_pkt_burst; 3975 dev->tx_pkt_burst = ice_xmit_pkts_check; 3976 } 3977 } 3978 3979 static const struct { 3980 eth_tx_burst_t pkt_burst; 3981 const char *info; 3982 } ice_tx_burst_infos[] = { 3983 { ice_xmit_pkts_simple, "Scalar Simple" }, 3984 { ice_xmit_pkts, "Scalar" }, 3985 #ifdef RTE_ARCH_X86 3986 #ifdef CC_AVX512_SUPPORT 3987 { ice_xmit_pkts_vec_avx512, "Vector AVX512" }, 3988 { ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" }, 3989 #endif 3990 { ice_xmit_pkts_vec_avx2, "Vector AVX2" }, 3991 { ice_xmit_pkts_vec_avx2_offload, "Offload Vector AVX2" }, 3992 { ice_xmit_pkts_vec, "Vector SSE" }, 3993 #endif 3994 }; 3995 3996 int 3997 ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 3998 struct rte_eth_burst_mode *mode) 3999 { 4000 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; 4001 int ret = -EINVAL; 4002 unsigned int i; 4003 4004 for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) { 4005 if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) { 4006 snprintf(mode->info, sizeof(mode->info), "%s", 4007 ice_tx_burst_infos[i].info); 4008 ret = 0; 4009 break; 4010 } 4011 } 4012 4013 return ret; 4014 } 4015 4016 /* For each value it means, datasheet of hardware can tell more details 4017 * 4018 * @note: fix ice_dev_supported_ptypes_get() if any change here. 4019 */ 4020 static inline uint32_t 4021 ice_get_default_pkt_type(uint16_t ptype) 4022 { 4023 static const alignas(RTE_CACHE_LINE_SIZE) uint32_t type_table[ICE_MAX_PKT_TYPE] = { 4024 /* L2 types */ 4025 /* [0] reserved */ 4026 [1] = RTE_PTYPE_L2_ETHER, 4027 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC, 4028 /* [3] - [5] reserved */ 4029 [6] = RTE_PTYPE_L2_ETHER_LLDP, 4030 /* [7] - [10] reserved */ 4031 [11] = RTE_PTYPE_L2_ETHER_ARP, 4032 /* [12] - [21] reserved */ 4033 4034 /* Non tunneled IPv4 */ 4035 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4036 RTE_PTYPE_L4_FRAG, 4037 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4038 RTE_PTYPE_L4_NONFRAG, 4039 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4040 RTE_PTYPE_L4_UDP, 4041 /* [25] reserved */ 4042 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4043 RTE_PTYPE_L4_TCP, 4044 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4045 RTE_PTYPE_L4_SCTP, 4046 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4047 RTE_PTYPE_L4_ICMP, 4048 4049 /* IPv4 --> IPv4 */ 4050 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4051 RTE_PTYPE_TUNNEL_IP | 4052 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4053 RTE_PTYPE_INNER_L4_FRAG, 4054 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4055 RTE_PTYPE_TUNNEL_IP | 4056 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4057 RTE_PTYPE_INNER_L4_NONFRAG, 4058 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4059 RTE_PTYPE_TUNNEL_IP | 4060 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4061 RTE_PTYPE_INNER_L4_UDP, 4062 /* [32] reserved */ 4063 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4064 RTE_PTYPE_TUNNEL_IP | 4065 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4066 RTE_PTYPE_INNER_L4_TCP, 4067 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4068 RTE_PTYPE_TUNNEL_IP | 4069 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4070 RTE_PTYPE_INNER_L4_SCTP, 4071 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4072 RTE_PTYPE_TUNNEL_IP | 4073 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4074 RTE_PTYPE_INNER_L4_ICMP, 4075 4076 /* IPv4 --> IPv6 */ 4077 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4078 RTE_PTYPE_TUNNEL_IP | 4079 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4080 RTE_PTYPE_INNER_L4_FRAG, 4081 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4082 RTE_PTYPE_TUNNEL_IP | 4083 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4084 RTE_PTYPE_INNER_L4_NONFRAG, 4085 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4086 RTE_PTYPE_TUNNEL_IP | 4087 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4088 RTE_PTYPE_INNER_L4_UDP, 4089 /* [39] reserved */ 4090 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4091 RTE_PTYPE_TUNNEL_IP | 4092 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4093 RTE_PTYPE_INNER_L4_TCP, 4094 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4095 RTE_PTYPE_TUNNEL_IP | 4096 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4097 RTE_PTYPE_INNER_L4_SCTP, 4098 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4099 RTE_PTYPE_TUNNEL_IP | 4100 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4101 RTE_PTYPE_INNER_L4_ICMP, 4102 4103 /* IPv4 --> GRE/Teredo/VXLAN */ 4104 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4105 RTE_PTYPE_TUNNEL_GRENAT, 4106 4107 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */ 4108 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4109 RTE_PTYPE_TUNNEL_GRENAT | 4110 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4111 RTE_PTYPE_INNER_L4_FRAG, 4112 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4113 RTE_PTYPE_TUNNEL_GRENAT | 4114 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4115 RTE_PTYPE_INNER_L4_NONFRAG, 4116 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4117 RTE_PTYPE_TUNNEL_GRENAT | 4118 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4119 RTE_PTYPE_INNER_L4_UDP, 4120 /* [47] reserved */ 4121 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4122 RTE_PTYPE_TUNNEL_GRENAT | 4123 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4124 RTE_PTYPE_INNER_L4_TCP, 4125 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4126 RTE_PTYPE_TUNNEL_GRENAT | 4127 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4128 RTE_PTYPE_INNER_L4_SCTP, 4129 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4130 RTE_PTYPE_TUNNEL_GRENAT | 4131 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4132 RTE_PTYPE_INNER_L4_ICMP, 4133 4134 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */ 4135 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4136 RTE_PTYPE_TUNNEL_GRENAT | 4137 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4138 RTE_PTYPE_INNER_L4_FRAG, 4139 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4140 RTE_PTYPE_TUNNEL_GRENAT | 4141 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4142 RTE_PTYPE_INNER_L4_NONFRAG, 4143 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4144 RTE_PTYPE_TUNNEL_GRENAT | 4145 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4146 RTE_PTYPE_INNER_L4_UDP, 4147 /* [54] reserved */ 4148 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4149 RTE_PTYPE_TUNNEL_GRENAT | 4150 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4151 RTE_PTYPE_INNER_L4_TCP, 4152 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4153 RTE_PTYPE_TUNNEL_GRENAT | 4154 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4155 RTE_PTYPE_INNER_L4_SCTP, 4156 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4157 RTE_PTYPE_TUNNEL_GRENAT | 4158 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4159 RTE_PTYPE_INNER_L4_ICMP, 4160 4161 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */ 4162 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4163 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, 4164 4165 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ 4166 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4167 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4168 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4169 RTE_PTYPE_INNER_L4_FRAG, 4170 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4171 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4172 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4173 RTE_PTYPE_INNER_L4_NONFRAG, 4174 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4175 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4176 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4177 RTE_PTYPE_INNER_L4_UDP, 4178 /* [62] reserved */ 4179 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4180 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4181 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4182 RTE_PTYPE_INNER_L4_TCP, 4183 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4184 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4185 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4186 RTE_PTYPE_INNER_L4_SCTP, 4187 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4188 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4189 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4190 RTE_PTYPE_INNER_L4_ICMP, 4191 4192 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ 4193 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4194 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4195 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4196 RTE_PTYPE_INNER_L4_FRAG, 4197 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4198 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4199 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4200 RTE_PTYPE_INNER_L4_NONFRAG, 4201 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4202 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4203 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4204 RTE_PTYPE_INNER_L4_UDP, 4205 /* [69] reserved */ 4206 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4207 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4208 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4209 RTE_PTYPE_INNER_L4_TCP, 4210 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4211 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4212 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4213 RTE_PTYPE_INNER_L4_SCTP, 4214 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4215 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4216 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4217 RTE_PTYPE_INNER_L4_ICMP, 4218 /* [73] - [87] reserved */ 4219 4220 /* Non tunneled IPv6 */ 4221 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4222 RTE_PTYPE_L4_FRAG, 4223 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4224 RTE_PTYPE_L4_NONFRAG, 4225 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4226 RTE_PTYPE_L4_UDP, 4227 /* [91] reserved */ 4228 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4229 RTE_PTYPE_L4_TCP, 4230 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4231 RTE_PTYPE_L4_SCTP, 4232 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4233 RTE_PTYPE_L4_ICMP, 4234 4235 /* IPv6 --> IPv4 */ 4236 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4237 RTE_PTYPE_TUNNEL_IP | 4238 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4239 RTE_PTYPE_INNER_L4_FRAG, 4240 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4241 RTE_PTYPE_TUNNEL_IP | 4242 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4243 RTE_PTYPE_INNER_L4_NONFRAG, 4244 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4245 RTE_PTYPE_TUNNEL_IP | 4246 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4247 RTE_PTYPE_INNER_L4_UDP, 4248 /* [98] reserved */ 4249 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4250 RTE_PTYPE_TUNNEL_IP | 4251 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4252 RTE_PTYPE_INNER_L4_TCP, 4253 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4254 RTE_PTYPE_TUNNEL_IP | 4255 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4256 RTE_PTYPE_INNER_L4_SCTP, 4257 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4258 RTE_PTYPE_TUNNEL_IP | 4259 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4260 RTE_PTYPE_INNER_L4_ICMP, 4261 4262 /* IPv6 --> IPv6 */ 4263 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4264 RTE_PTYPE_TUNNEL_IP | 4265 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4266 RTE_PTYPE_INNER_L4_FRAG, 4267 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4268 RTE_PTYPE_TUNNEL_IP | 4269 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4270 RTE_PTYPE_INNER_L4_NONFRAG, 4271 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4272 RTE_PTYPE_TUNNEL_IP | 4273 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4274 RTE_PTYPE_INNER_L4_UDP, 4275 /* [105] reserved */ 4276 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4277 RTE_PTYPE_TUNNEL_IP | 4278 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4279 RTE_PTYPE_INNER_L4_TCP, 4280 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4281 RTE_PTYPE_TUNNEL_IP | 4282 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4283 RTE_PTYPE_INNER_L4_SCTP, 4284 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4285 RTE_PTYPE_TUNNEL_IP | 4286 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4287 RTE_PTYPE_INNER_L4_ICMP, 4288 4289 /* IPv6 --> GRE/Teredo/VXLAN */ 4290 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4291 RTE_PTYPE_TUNNEL_GRENAT, 4292 4293 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */ 4294 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4295 RTE_PTYPE_TUNNEL_GRENAT | 4296 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4297 RTE_PTYPE_INNER_L4_FRAG, 4298 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4299 RTE_PTYPE_TUNNEL_GRENAT | 4300 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4301 RTE_PTYPE_INNER_L4_NONFRAG, 4302 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4303 RTE_PTYPE_TUNNEL_GRENAT | 4304 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4305 RTE_PTYPE_INNER_L4_UDP, 4306 /* [113] reserved */ 4307 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4308 RTE_PTYPE_TUNNEL_GRENAT | 4309 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4310 RTE_PTYPE_INNER_L4_TCP, 4311 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4312 RTE_PTYPE_TUNNEL_GRENAT | 4313 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4314 RTE_PTYPE_INNER_L4_SCTP, 4315 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4316 RTE_PTYPE_TUNNEL_GRENAT | 4317 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4318 RTE_PTYPE_INNER_L4_ICMP, 4319 4320 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */ 4321 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4322 RTE_PTYPE_TUNNEL_GRENAT | 4323 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4324 RTE_PTYPE_INNER_L4_FRAG, 4325 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4326 RTE_PTYPE_TUNNEL_GRENAT | 4327 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4328 RTE_PTYPE_INNER_L4_NONFRAG, 4329 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4330 RTE_PTYPE_TUNNEL_GRENAT | 4331 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4332 RTE_PTYPE_INNER_L4_UDP, 4333 /* [120] reserved */ 4334 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4335 RTE_PTYPE_TUNNEL_GRENAT | 4336 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4337 RTE_PTYPE_INNER_L4_TCP, 4338 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4339 RTE_PTYPE_TUNNEL_GRENAT | 4340 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4341 RTE_PTYPE_INNER_L4_SCTP, 4342 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4343 RTE_PTYPE_TUNNEL_GRENAT | 4344 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4345 RTE_PTYPE_INNER_L4_ICMP, 4346 4347 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */ 4348 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4349 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, 4350 4351 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ 4352 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4353 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4354 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4355 RTE_PTYPE_INNER_L4_FRAG, 4356 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4357 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4358 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4359 RTE_PTYPE_INNER_L4_NONFRAG, 4360 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4361 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4362 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4363 RTE_PTYPE_INNER_L4_UDP, 4364 /* [128] reserved */ 4365 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4366 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4367 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4368 RTE_PTYPE_INNER_L4_TCP, 4369 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4370 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4371 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4372 RTE_PTYPE_INNER_L4_SCTP, 4373 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4374 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4375 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4376 RTE_PTYPE_INNER_L4_ICMP, 4377 4378 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ 4379 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4380 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4381 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4382 RTE_PTYPE_INNER_L4_FRAG, 4383 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4384 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4385 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4386 RTE_PTYPE_INNER_L4_NONFRAG, 4387 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4388 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4389 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4390 RTE_PTYPE_INNER_L4_UDP, 4391 /* [135] reserved */ 4392 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4393 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4394 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4395 RTE_PTYPE_INNER_L4_TCP, 4396 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4397 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4398 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4399 RTE_PTYPE_INNER_L4_SCTP, 4400 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4401 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4402 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4403 RTE_PTYPE_INNER_L4_ICMP, 4404 /* [139] - [299] reserved */ 4405 4406 /* PPPoE */ 4407 [300] = RTE_PTYPE_L2_ETHER_PPPOE, 4408 [301] = RTE_PTYPE_L2_ETHER_PPPOE, 4409 4410 /* PPPoE --> IPv4 */ 4411 [302] = RTE_PTYPE_L2_ETHER_PPPOE | 4412 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4413 RTE_PTYPE_L4_FRAG, 4414 [303] = RTE_PTYPE_L2_ETHER_PPPOE | 4415 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4416 RTE_PTYPE_L4_NONFRAG, 4417 [304] = RTE_PTYPE_L2_ETHER_PPPOE | 4418 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4419 RTE_PTYPE_L4_UDP, 4420 [305] = RTE_PTYPE_L2_ETHER_PPPOE | 4421 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4422 RTE_PTYPE_L4_TCP, 4423 [306] = RTE_PTYPE_L2_ETHER_PPPOE | 4424 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4425 RTE_PTYPE_L4_SCTP, 4426 [307] = RTE_PTYPE_L2_ETHER_PPPOE | 4427 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4428 RTE_PTYPE_L4_ICMP, 4429 4430 /* PPPoE --> IPv6 */ 4431 [308] = RTE_PTYPE_L2_ETHER_PPPOE | 4432 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4433 RTE_PTYPE_L4_FRAG, 4434 [309] = RTE_PTYPE_L2_ETHER_PPPOE | 4435 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4436 RTE_PTYPE_L4_NONFRAG, 4437 [310] = RTE_PTYPE_L2_ETHER_PPPOE | 4438 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4439 RTE_PTYPE_L4_UDP, 4440 [311] = RTE_PTYPE_L2_ETHER_PPPOE | 4441 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4442 RTE_PTYPE_L4_TCP, 4443 [312] = RTE_PTYPE_L2_ETHER_PPPOE | 4444 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4445 RTE_PTYPE_L4_SCTP, 4446 [313] = RTE_PTYPE_L2_ETHER_PPPOE | 4447 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4448 RTE_PTYPE_L4_ICMP, 4449 /* [314] - [324] reserved */ 4450 4451 /* IPv4/IPv6 --> GTPC/GTPU */ 4452 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4453 RTE_PTYPE_TUNNEL_GTPC, 4454 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4455 RTE_PTYPE_TUNNEL_GTPC, 4456 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4457 RTE_PTYPE_TUNNEL_GTPC, 4458 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4459 RTE_PTYPE_TUNNEL_GTPC, 4460 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4461 RTE_PTYPE_TUNNEL_GTPU, 4462 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4463 RTE_PTYPE_TUNNEL_GTPU, 4464 4465 /* IPv4 --> GTPU --> IPv4 */ 4466 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4467 RTE_PTYPE_TUNNEL_GTPU | 4468 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4469 RTE_PTYPE_INNER_L4_FRAG, 4470 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4471 RTE_PTYPE_TUNNEL_GTPU | 4472 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4473 RTE_PTYPE_INNER_L4_NONFRAG, 4474 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4475 RTE_PTYPE_TUNNEL_GTPU | 4476 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4477 RTE_PTYPE_INNER_L4_UDP, 4478 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4479 RTE_PTYPE_TUNNEL_GTPU | 4480 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4481 RTE_PTYPE_INNER_L4_TCP, 4482 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4483 RTE_PTYPE_TUNNEL_GTPU | 4484 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4485 RTE_PTYPE_INNER_L4_ICMP, 4486 4487 /* IPv6 --> GTPU --> IPv4 */ 4488 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4489 RTE_PTYPE_TUNNEL_GTPU | 4490 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4491 RTE_PTYPE_INNER_L4_FRAG, 4492 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4493 RTE_PTYPE_TUNNEL_GTPU | 4494 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4495 RTE_PTYPE_INNER_L4_NONFRAG, 4496 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4497 RTE_PTYPE_TUNNEL_GTPU | 4498 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4499 RTE_PTYPE_INNER_L4_UDP, 4500 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4501 RTE_PTYPE_TUNNEL_GTPU | 4502 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4503 RTE_PTYPE_INNER_L4_TCP, 4504 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4505 RTE_PTYPE_TUNNEL_GTPU | 4506 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4507 RTE_PTYPE_INNER_L4_ICMP, 4508 4509 /* IPv4 --> GTPU --> IPv6 */ 4510 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4511 RTE_PTYPE_TUNNEL_GTPU | 4512 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4513 RTE_PTYPE_INNER_L4_FRAG, 4514 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4515 RTE_PTYPE_TUNNEL_GTPU | 4516 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4517 RTE_PTYPE_INNER_L4_NONFRAG, 4518 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4519 RTE_PTYPE_TUNNEL_GTPU | 4520 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4521 RTE_PTYPE_INNER_L4_UDP, 4522 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4523 RTE_PTYPE_TUNNEL_GTPU | 4524 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4525 RTE_PTYPE_INNER_L4_TCP, 4526 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4527 RTE_PTYPE_TUNNEL_GTPU | 4528 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4529 RTE_PTYPE_INNER_L4_ICMP, 4530 4531 /* IPv6 --> GTPU --> IPv6 */ 4532 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4533 RTE_PTYPE_TUNNEL_GTPU | 4534 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4535 RTE_PTYPE_INNER_L4_FRAG, 4536 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4537 RTE_PTYPE_TUNNEL_GTPU | 4538 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4539 RTE_PTYPE_INNER_L4_NONFRAG, 4540 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4541 RTE_PTYPE_TUNNEL_GTPU | 4542 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4543 RTE_PTYPE_INNER_L4_UDP, 4544 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4545 RTE_PTYPE_TUNNEL_GTPU | 4546 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4547 RTE_PTYPE_INNER_L4_TCP, 4548 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4549 RTE_PTYPE_TUNNEL_GTPU | 4550 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4551 RTE_PTYPE_INNER_L4_ICMP, 4552 4553 /* IPv4 --> UDP ECPRI */ 4554 [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4555 RTE_PTYPE_L4_UDP, 4556 [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4557 RTE_PTYPE_L4_UDP, 4558 [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4559 RTE_PTYPE_L4_UDP, 4560 [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4561 RTE_PTYPE_L4_UDP, 4562 [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4563 RTE_PTYPE_L4_UDP, 4564 [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4565 RTE_PTYPE_L4_UDP, 4566 [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4567 RTE_PTYPE_L4_UDP, 4568 [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4569 RTE_PTYPE_L4_UDP, 4570 [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4571 RTE_PTYPE_L4_UDP, 4572 [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4573 RTE_PTYPE_L4_UDP, 4574 4575 /* IPV6 --> UDP ECPRI */ 4576 [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4577 RTE_PTYPE_L4_UDP, 4578 [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4579 RTE_PTYPE_L4_UDP, 4580 [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4581 RTE_PTYPE_L4_UDP, 4582 [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4583 RTE_PTYPE_L4_UDP, 4584 [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4585 RTE_PTYPE_L4_UDP, 4586 [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4587 RTE_PTYPE_L4_UDP, 4588 [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4589 RTE_PTYPE_L4_UDP, 4590 [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4591 RTE_PTYPE_L4_UDP, 4592 [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4593 RTE_PTYPE_L4_UDP, 4594 [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4595 RTE_PTYPE_L4_UDP, 4596 /* All others reserved */ 4597 }; 4598 4599 return type_table[ptype]; 4600 } 4601 4602 void __rte_cold 4603 ice_set_default_ptype_table(struct rte_eth_dev *dev) 4604 { 4605 struct ice_adapter *ad = 4606 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 4607 int i; 4608 4609 for (i = 0; i < ICE_MAX_PKT_TYPE; i++) 4610 ad->ptype_tbl[i] = ice_get_default_pkt_type(i); 4611 } 4612 4613 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S 1 4614 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M \ 4615 (0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S) 4616 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0 4617 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1 4618 4619 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S 4 4620 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M \ 4621 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S) 4622 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S 5 4623 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M \ 4624 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S) 4625 4626 /* 4627 * check the programming status descriptor in rx queue. 4628 * done after Programming Flow Director is programmed on 4629 * tx queue 4630 */ 4631 static inline int 4632 ice_check_fdir_programming_status(struct ice_rx_queue *rxq) 4633 { 4634 volatile union ice_32byte_rx_desc *rxdp; 4635 uint64_t qword1; 4636 uint32_t rx_status; 4637 uint32_t error; 4638 uint32_t id; 4639 int ret = -EAGAIN; 4640 4641 rxdp = (volatile union ice_32byte_rx_desc *) 4642 (&rxq->rx_ring[rxq->rx_tail]); 4643 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len); 4644 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M) 4645 >> ICE_RXD_QW1_STATUS_S; 4646 4647 if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) { 4648 ret = 0; 4649 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >> 4650 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S; 4651 id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >> 4652 ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S; 4653 if (error) { 4654 if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD) 4655 PMD_DRV_LOG(ERR, "Failed to add FDIR rule."); 4656 else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL) 4657 PMD_DRV_LOG(ERR, "Failed to remove FDIR rule."); 4658 ret = -EINVAL; 4659 goto err; 4660 } 4661 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >> 4662 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S; 4663 if (error) { 4664 PMD_DRV_LOG(ERR, "Failed to create FDIR profile."); 4665 ret = -EINVAL; 4666 } 4667 err: 4668 rxdp->wb.qword1.status_error_len = 0; 4669 rxq->rx_tail++; 4670 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc)) 4671 rxq->rx_tail = 0; 4672 if (rxq->rx_tail == 0) 4673 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); 4674 else 4675 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1); 4676 } 4677 4678 return ret; 4679 } 4680 4681 #define ICE_FDIR_MAX_WAIT_US 10000 4682 4683 int 4684 ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc) 4685 { 4686 struct ci_tx_queue *txq = pf->fdir.txq; 4687 struct ice_rx_queue *rxq = pf->fdir.rxq; 4688 volatile struct ice_fltr_desc *fdirdp; 4689 volatile struct ice_tx_desc *txdp; 4690 uint32_t td_cmd; 4691 uint16_t i; 4692 4693 fdirdp = (volatile struct ice_fltr_desc *) 4694 (&txq->ice_tx_ring[txq->tx_tail]); 4695 fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat; 4696 fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid; 4697 4698 txdp = &txq->ice_tx_ring[txq->tx_tail + 1]; 4699 txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr); 4700 td_cmd = ICE_TX_DESC_CMD_EOP | 4701 ICE_TX_DESC_CMD_RS | 4702 ICE_TX_DESC_CMD_DUMMY; 4703 4704 txdp->cmd_type_offset_bsz = 4705 ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0); 4706 4707 txq->tx_tail += 2; 4708 if (txq->tx_tail >= txq->nb_tx_desc) 4709 txq->tx_tail = 0; 4710 /* Update the tx tail register */ 4711 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); 4712 for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) { 4713 if ((txdp->cmd_type_offset_bsz & 4714 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) == 4715 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE)) 4716 break; 4717 rte_delay_us(1); 4718 } 4719 if (i >= ICE_FDIR_MAX_WAIT_US) { 4720 PMD_DRV_LOG(ERR, 4721 "Failed to program FDIR filter: time out to get DD on tx queue."); 4722 return -ETIMEDOUT; 4723 } 4724 4725 for (; i < ICE_FDIR_MAX_WAIT_US; i++) { 4726 int ret; 4727 4728 ret = ice_check_fdir_programming_status(rxq); 4729 if (ret == -EAGAIN) 4730 rte_delay_us(1); 4731 else 4732 return ret; 4733 } 4734 4735 PMD_DRV_LOG(ERR, 4736 "Failed to program FDIR filter: programming status reported."); 4737 return -ETIMEDOUT; 4738 4739 4740 } 4741