1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Intel Corporation 3 */ 4 5 #include <ethdev_driver.h> 6 #include <rte_net.h> 7 #include <rte_vect.h> 8 9 #include "ice_rxtx.h" 10 #include "ice_rxtx_vec_common.h" 11 12 #define ICE_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \ 13 RTE_MBUF_F_TX_L4_MASK | \ 14 RTE_MBUF_F_TX_TCP_SEG | \ 15 RTE_MBUF_F_TX_UDP_SEG | \ 16 RTE_MBUF_F_TX_OUTER_IP_CKSUM) 17 18 /** 19 * The mbuf dynamic field pointer for protocol extraction metadata. 20 */ 21 #define ICE_DYNF_PROTO_XTR_METADATA(m, n) \ 22 RTE_MBUF_DYNFIELD((m), (n), uint32_t *) 23 24 static int 25 ice_monitor_callback(const uint64_t value, 26 const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused) 27 { 28 const uint64_t m = rte_cpu_to_le_16(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S); 29 /* 30 * we expect the DD bit to be set to 1 if this descriptor was already 31 * written to. 32 */ 33 return (value & m) == m ? -1 : 0; 34 } 35 36 int 37 ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc) 38 { 39 volatile union ice_rx_flex_desc *rxdp; 40 struct ice_rx_queue *rxq = rx_queue; 41 uint16_t desc; 42 43 desc = rxq->rx_tail; 44 rxdp = &rxq->rx_ring[desc]; 45 /* watch for changes in status bit */ 46 pmc->addr = &rxdp->wb.status_error0; 47 48 /* comparison callback */ 49 pmc->fn = ice_monitor_callback; 50 51 /* register is 16-bit */ 52 pmc->size = sizeof(uint16_t); 53 54 return 0; 55 } 56 57 58 static inline uint8_t 59 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type) 60 { 61 static uint8_t rxdid_map[] = { 62 [PROTO_XTR_NONE] = ICE_RXDID_COMMS_OVS, 63 [PROTO_XTR_VLAN] = ICE_RXDID_COMMS_AUX_VLAN, 64 [PROTO_XTR_IPV4] = ICE_RXDID_COMMS_AUX_IPV4, 65 [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6, 66 [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW, 67 [PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP, 68 [PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET, 69 }; 70 71 return xtr_type < RTE_DIM(rxdid_map) ? 72 rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS; 73 } 74 75 static inline void 76 ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq, 77 struct rte_mbuf *mb, 78 volatile union ice_rx_flex_desc *rxdp) 79 { 80 volatile struct ice_32b_rx_flex_desc_comms *desc = 81 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp; 82 uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0); 83 84 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) { 85 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 86 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); 87 } 88 89 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 90 if (desc->flow_id != 0xFFFFFFFF) { 91 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; 92 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); 93 } 94 #endif 95 } 96 97 static inline void 98 ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq, 99 struct rte_mbuf *mb, 100 volatile union ice_rx_flex_desc *rxdp) 101 { 102 volatile struct ice_32b_rx_flex_desc_comms_ovs *desc = 103 (volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp; 104 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 105 uint16_t stat_err; 106 #endif 107 108 if (desc->flow_id != 0xFFFFFFFF) { 109 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; 110 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); 111 } 112 113 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 114 stat_err = rte_le_to_cpu_16(desc->status_error0); 115 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) { 116 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 117 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); 118 } 119 #endif 120 } 121 122 static inline void 123 ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq, 124 struct rte_mbuf *mb, 125 volatile union ice_rx_flex_desc *rxdp) 126 { 127 volatile struct ice_32b_rx_flex_desc_comms *desc = 128 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp; 129 uint16_t stat_err; 130 131 stat_err = rte_le_to_cpu_16(desc->status_error0); 132 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) { 133 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 134 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); 135 } 136 137 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 138 if (desc->flow_id != 0xFFFFFFFF) { 139 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; 140 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); 141 } 142 143 if (rxq->xtr_ol_flag) { 144 uint32_t metadata = 0; 145 146 stat_err = rte_le_to_cpu_16(desc->status_error1); 147 148 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S)) 149 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0); 150 151 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S)) 152 metadata |= 153 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16; 154 155 if (metadata) { 156 mb->ol_flags |= rxq->xtr_ol_flag; 157 158 *ICE_DYNF_PROTO_XTR_METADATA(mb, rxq->xtr_field_offs) = metadata; 159 } 160 } 161 #else 162 RTE_SET_USED(rxq); 163 #endif 164 } 165 166 static inline void 167 ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq, 168 struct rte_mbuf *mb, 169 volatile union ice_rx_flex_desc *rxdp) 170 { 171 volatile struct ice_32b_rx_flex_desc_comms *desc = 172 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp; 173 uint16_t stat_err; 174 175 stat_err = rte_le_to_cpu_16(desc->status_error0); 176 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) { 177 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 178 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); 179 } 180 181 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 182 if (desc->flow_id != 0xFFFFFFFF) { 183 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; 184 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); 185 } 186 187 if (rxq->xtr_ol_flag) { 188 uint32_t metadata = 0; 189 190 if (desc->flex_ts.flex.aux0 != 0xFFFF) 191 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0); 192 else if (desc->flex_ts.flex.aux1 != 0xFFFF) 193 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1); 194 195 if (metadata) { 196 mb->ol_flags |= rxq->xtr_ol_flag; 197 198 *ICE_DYNF_PROTO_XTR_METADATA(mb, rxq->xtr_field_offs) = metadata; 199 } 200 } 201 #else 202 RTE_SET_USED(rxq); 203 #endif 204 } 205 206 static const ice_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[] = { 207 [ICE_RXDID_COMMS_AUX_VLAN] = ice_rxd_to_pkt_fields_by_comms_aux_v1, 208 [ICE_RXDID_COMMS_AUX_IPV4] = ice_rxd_to_pkt_fields_by_comms_aux_v1, 209 [ICE_RXDID_COMMS_AUX_IPV6] = ice_rxd_to_pkt_fields_by_comms_aux_v1, 210 [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = ice_rxd_to_pkt_fields_by_comms_aux_v1, 211 [ICE_RXDID_COMMS_AUX_TCP] = ice_rxd_to_pkt_fields_by_comms_aux_v1, 212 [ICE_RXDID_COMMS_AUX_IP_OFFSET] = ice_rxd_to_pkt_fields_by_comms_aux_v2, 213 [ICE_RXDID_COMMS_GENERIC] = ice_rxd_to_pkt_fields_by_comms_generic, 214 [ICE_RXDID_COMMS_OVS] = ice_rxd_to_pkt_fields_by_comms_ovs, 215 }; 216 217 void 218 ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid) 219 { 220 rxq->rxdid = rxdid; 221 222 switch (rxdid) { 223 case ICE_RXDID_COMMS_AUX_VLAN: 224 case ICE_RXDID_COMMS_AUX_IPV4: 225 case ICE_RXDID_COMMS_AUX_IPV6: 226 case ICE_RXDID_COMMS_AUX_IPV6_FLOW: 227 case ICE_RXDID_COMMS_AUX_TCP: 228 case ICE_RXDID_COMMS_AUX_IP_OFFSET: 229 break; 230 case ICE_RXDID_COMMS_GENERIC: 231 /* fallthrough */ 232 case ICE_RXDID_COMMS_OVS: 233 break; 234 235 default: 236 /* update this according to the RXDID for PROTO_XTR_NONE */ 237 rxq->rxdid = ICE_RXDID_COMMS_OVS; 238 break; 239 } 240 241 if (rxq->xtr_field_offs == -1) 242 rxq->xtr_ol_flag = 0; 243 } 244 245 static int 246 ice_program_hw_rx_queue(struct ice_rx_queue *rxq) 247 { 248 struct ice_vsi *vsi = rxq->vsi; 249 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 250 struct ice_pf *pf = ICE_VSI_TO_PF(vsi); 251 struct rte_eth_dev_data *dev_data = rxq->vsi->adapter->pf.dev_data; 252 struct ice_rlan_ctx rx_ctx; 253 uint16_t buf_size; 254 uint32_t rxdid = ICE_RXDID_COMMS_OVS; 255 uint32_t regval; 256 struct ice_adapter *ad = rxq->vsi->adapter; 257 uint32_t frame_size = dev_data->mtu + ICE_ETH_OVERHEAD; 258 int err; 259 260 /* Set buffer size as the head split is disabled. */ 261 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - 262 RTE_PKTMBUF_HEADROOM); 263 rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size, (1 << ICE_RLAN_CTX_DBUF_S)); 264 rxq->rx_buf_len = RTE_MIN(rxq->rx_buf_len, ICE_RX_MAX_DATA_BUF_SIZE); 265 rxq->max_pkt_len = 266 RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len, 267 frame_size); 268 269 if (rxq->max_pkt_len <= RTE_ETHER_MIN_LEN || 270 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) { 271 PMD_DRV_LOG(ERR, "maximum packet length must " 272 "be larger than %u and smaller than %u", 273 (uint32_t)RTE_ETHER_MIN_LEN, 274 (uint32_t)ICE_FRAME_SIZE_MAX); 275 return -EINVAL; 276 } 277 278 if (!rxq->ts_enable && (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) { 279 /* Register mbuf field and flag for Rx timestamp */ 280 err = rte_mbuf_dyn_rx_timestamp_register( 281 &ice_timestamp_dynfield_offset, 282 &ice_timestamp_dynflag); 283 if (err) { 284 PMD_DRV_LOG(ERR, 285 "Cannot register mbuf field/flag for timestamp"); 286 return -EINVAL; 287 } 288 rxq->ts_enable = true; 289 } 290 291 memset(&rx_ctx, 0, sizeof(rx_ctx)); 292 293 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 294 uint32_t proto_hdr; 295 proto_hdr = rxq->rxseg[0].proto_hdr; 296 297 if (proto_hdr == RTE_PTYPE_UNKNOWN) { 298 PMD_DRV_LOG(ERR, "Buffer split protocol must be configured"); 299 return -EINVAL; 300 } 301 302 switch (proto_hdr & RTE_PTYPE_L4_MASK) { 303 case RTE_PTYPE_L4_TCP: 304 case RTE_PTYPE_L4_UDP: 305 rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 306 rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP; 307 goto set_hsplit_finish; 308 case RTE_PTYPE_L4_SCTP: 309 rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 310 rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP; 311 goto set_hsplit_finish; 312 } 313 314 switch (proto_hdr & RTE_PTYPE_L3_MASK) { 315 case RTE_PTYPE_L3_IPV4_EXT_UNKNOWN: 316 case RTE_PTYPE_L3_IPV6_EXT_UNKNOWN: 317 rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 318 rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_IP; 319 goto set_hsplit_finish; 320 } 321 322 switch (proto_hdr & RTE_PTYPE_L2_MASK) { 323 case RTE_PTYPE_L2_ETHER: 324 rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 325 rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_L2; 326 rx_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_SPLIT_L2; 327 goto set_hsplit_finish; 328 } 329 330 switch (proto_hdr & RTE_PTYPE_INNER_L4_MASK) { 331 case RTE_PTYPE_INNER_L4_TCP: 332 case RTE_PTYPE_INNER_L4_UDP: 333 rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 334 rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP; 335 goto set_hsplit_finish; 336 case RTE_PTYPE_INNER_L4_SCTP: 337 rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 338 rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP; 339 goto set_hsplit_finish; 340 } 341 342 switch (proto_hdr & RTE_PTYPE_INNER_L3_MASK) { 343 case RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN: 344 case RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN: 345 rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 346 rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_IP; 347 goto set_hsplit_finish; 348 } 349 350 switch (proto_hdr & RTE_PTYPE_INNER_L2_MASK) { 351 case RTE_PTYPE_INNER_L2_ETHER: 352 rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 353 rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_L2; 354 goto set_hsplit_finish; 355 } 356 357 switch (proto_hdr & RTE_PTYPE_TUNNEL_MASK) { 358 case RTE_PTYPE_TUNNEL_GRENAT: 359 rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 360 rx_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS; 361 goto set_hsplit_finish; 362 } 363 364 PMD_DRV_LOG(ERR, "Buffer split protocol is not supported"); 365 return -EINVAL; 366 367 set_hsplit_finish: 368 rxq->rx_hdr_len = ICE_RX_HDR_BUF_SIZE; 369 } else { 370 rxq->rx_hdr_len = 0; 371 rx_ctx.dtype = 0; /* No Protocol Based Buffer Split mode */ 372 } 373 374 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; 375 rx_ctx.qlen = rxq->nb_rx_desc; 376 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; 377 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S; 378 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 379 rx_ctx.dsize = 1; /* 32B descriptors */ 380 #endif 381 rx_ctx.rxmax = rxq->max_pkt_len; 382 /* TPH: Transaction Layer Packet (TLP) processing hints */ 383 rx_ctx.tphrdesc_ena = 1; 384 rx_ctx.tphwdesc_ena = 1; 385 rx_ctx.tphdata_ena = 1; 386 rx_ctx.tphhead_ena = 1; 387 /* Low Receive Queue Threshold defined in 64 descriptors units. 388 * When the number of free descriptors goes below the lrxqthresh, 389 * an immediate interrupt is triggered. 390 */ 391 rx_ctx.lrxqthresh = 2; 392 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/ 393 rx_ctx.l2tsel = 1; 394 rx_ctx.showiv = 0; 395 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0; 396 397 rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr); 398 399 PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u", 400 rxq->port_id, rxq->queue_id, rxdid); 401 402 if (!(pf->supported_rxdid & BIT(rxdid))) { 403 PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)", 404 rxdid); 405 return -EINVAL; 406 } 407 408 rxq->rxdid = rxdid; 409 410 /* Enable Flexible Descriptors in the queue context which 411 * allows this driver to select a specific receive descriptor format 412 */ 413 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & 414 QRXFLXP_CNTXT_RXDID_IDX_M; 415 416 /* increasing context priority to pick up profile ID; 417 * default is 0x01; setting to 0x03 to ensure profile 418 * is programming if prev context is of same priority 419 */ 420 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & 421 QRXFLXP_CNTXT_RXDID_PRIO_M; 422 423 if (ad->ptp_ena || rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) 424 regval |= QRXFLXP_CNTXT_TS_M; 425 426 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval); 427 428 err = ice_clear_rxq_ctx(hw, rxq->reg_idx); 429 if (err) { 430 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context", 431 rxq->queue_id); 432 return -EINVAL; 433 } 434 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx); 435 if (err) { 436 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context", 437 rxq->queue_id); 438 return -EINVAL; 439 } 440 441 /* Check if scattered RX needs to be used. */ 442 if (frame_size > buf_size) 443 dev_data->scattered_rx = 1; 444 445 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx); 446 447 /* Init the Rx tail register*/ 448 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); 449 450 return 0; 451 } 452 453 /* Allocate mbufs for all descriptors in rx queue */ 454 static int 455 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq) 456 { 457 struct ice_rx_entry *rxe = rxq->sw_ring; 458 uint64_t dma_addr; 459 uint16_t i; 460 461 for (i = 0; i < rxq->nb_rx_desc; i++) { 462 volatile union ice_rx_flex_desc *rxd; 463 rxd = &rxq->rx_ring[i]; 464 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp); 465 466 if (unlikely(!mbuf)) { 467 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX"); 468 return -ENOMEM; 469 } 470 471 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 472 mbuf->nb_segs = 1; 473 mbuf->port = rxq->port_id; 474 475 dma_addr = 476 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); 477 478 if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { 479 rte_mbuf_refcnt_set(mbuf, 1); 480 mbuf->next = NULL; 481 rxd->read.hdr_addr = 0; 482 rxd->read.pkt_addr = dma_addr; 483 } else { 484 struct rte_mbuf *mbuf_pay; 485 mbuf_pay = rte_mbuf_raw_alloc(rxq->rxseg[1].mp); 486 if (unlikely(!mbuf_pay)) { 487 rte_pktmbuf_free(mbuf); 488 PMD_DRV_LOG(ERR, "Failed to allocate payload mbuf for RX"); 489 return -ENOMEM; 490 } 491 492 mbuf_pay->next = NULL; 493 mbuf_pay->data_off = RTE_PKTMBUF_HEADROOM; 494 mbuf_pay->nb_segs = 1; 495 mbuf_pay->port = rxq->port_id; 496 mbuf->next = mbuf_pay; 497 498 rxd->read.hdr_addr = dma_addr; 499 /* The LS bit should be set to zero regardless of 500 * buffer split enablement. 501 */ 502 rxd->read.pkt_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf_pay)); 503 } 504 505 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 506 rxd->read.rsvd1 = 0; 507 rxd->read.rsvd2 = 0; 508 #endif 509 rxe[i].mbuf = mbuf; 510 } 511 512 return 0; 513 } 514 515 /* Free all mbufs for descriptors in rx queue */ 516 static void 517 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq) 518 { 519 uint16_t i; 520 521 if (!rxq || !rxq->sw_ring) { 522 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL"); 523 return; 524 } 525 526 for (i = 0; i < rxq->nb_rx_desc; i++) { 527 if (rxq->sw_ring[i].mbuf) { 528 rte_pktmbuf_free(rxq->sw_ring[i].mbuf); 529 rxq->sw_ring[i].mbuf = NULL; 530 } 531 } 532 if (rxq->rx_nb_avail == 0) 533 return; 534 for (i = 0; i < rxq->rx_nb_avail; i++) 535 rte_pktmbuf_free(rxq->rx_stage[rxq->rx_next_avail + i]); 536 537 rxq->rx_nb_avail = 0; 538 } 539 540 /* turn on or off rx queue 541 * @q_idx: queue index in pf scope 542 * @on: turn on or off the queue 543 */ 544 static int 545 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on) 546 { 547 uint32_t reg; 548 uint16_t j; 549 550 /* QRX_CTRL = QRX_ENA */ 551 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx)); 552 553 if (on) { 554 if (reg & QRX_CTRL_QENA_STAT_M) 555 return 0; /* Already on, skip */ 556 reg |= QRX_CTRL_QENA_REQ_M; 557 } else { 558 if (!(reg & QRX_CTRL_QENA_STAT_M)) 559 return 0; /* Already off, skip */ 560 reg &= ~QRX_CTRL_QENA_REQ_M; 561 } 562 563 /* Write the register */ 564 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg); 565 /* Check the result. It is said that QENA_STAT 566 * follows the QENA_REQ not more than 10 use. 567 * TODO: need to change the wait counter later 568 */ 569 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) { 570 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US); 571 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx)); 572 if (on) { 573 if ((reg & QRX_CTRL_QENA_REQ_M) && 574 (reg & QRX_CTRL_QENA_STAT_M)) 575 break; 576 } else { 577 if (!(reg & QRX_CTRL_QENA_REQ_M) && 578 !(reg & QRX_CTRL_QENA_STAT_M)) 579 break; 580 } 581 } 582 583 /* Check if it is timeout */ 584 if (j >= ICE_CHK_Q_ENA_COUNT) { 585 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]", 586 (on ? "enable" : "disable"), q_idx); 587 return -ETIMEDOUT; 588 } 589 590 return 0; 591 } 592 593 static inline int 594 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq) 595 { 596 int ret = 0; 597 598 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) { 599 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " 600 "rxq->rx_free_thresh=%d, " 601 "ICE_RX_MAX_BURST=%d", 602 rxq->rx_free_thresh, ICE_RX_MAX_BURST); 603 ret = -EINVAL; 604 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) { 605 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " 606 "rxq->rx_free_thresh=%d, " 607 "rxq->nb_rx_desc=%d", 608 rxq->rx_free_thresh, rxq->nb_rx_desc); 609 ret = -EINVAL; 610 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) { 611 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " 612 "rxq->nb_rx_desc=%d, " 613 "rxq->rx_free_thresh=%d", 614 rxq->nb_rx_desc, rxq->rx_free_thresh); 615 ret = -EINVAL; 616 } 617 618 return ret; 619 } 620 621 /* reset fields in ice_rx_queue back to default */ 622 static void 623 ice_reset_rx_queue(struct ice_rx_queue *rxq) 624 { 625 unsigned int i; 626 uint16_t len; 627 628 if (!rxq) { 629 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL"); 630 return; 631 } 632 633 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST); 634 635 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++) 636 ((volatile char *)rxq->rx_ring)[i] = 0; 637 638 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf)); 639 for (i = 0; i < ICE_RX_MAX_BURST; ++i) 640 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf; 641 642 rxq->rx_nb_avail = 0; 643 rxq->rx_next_avail = 0; 644 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); 645 646 rxq->rx_tail = 0; 647 rxq->nb_rx_hold = 0; 648 rxq->pkt_first_seg = NULL; 649 rxq->pkt_last_seg = NULL; 650 651 rxq->rxrearm_start = 0; 652 rxq->rxrearm_nb = 0; 653 } 654 655 int 656 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 657 { 658 struct ice_rx_queue *rxq; 659 int err; 660 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 661 662 PMD_INIT_FUNC_TRACE(); 663 664 if (rx_queue_id >= dev->data->nb_rx_queues) { 665 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u", 666 rx_queue_id, dev->data->nb_rx_queues); 667 return -EINVAL; 668 } 669 670 rxq = dev->data->rx_queues[rx_queue_id]; 671 if (!rxq || !rxq->q_set) { 672 PMD_DRV_LOG(ERR, "RX queue %u not available or setup", 673 rx_queue_id); 674 return -EINVAL; 675 } 676 677 if (dev->data->rx_queue_state[rx_queue_id] == 678 RTE_ETH_QUEUE_STATE_STARTED) 679 return 0; 680 681 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) 682 rxq->ts_enable = true; 683 err = ice_program_hw_rx_queue(rxq); 684 if (err) { 685 PMD_DRV_LOG(ERR, "fail to program RX queue %u", 686 rx_queue_id); 687 return -EIO; 688 } 689 690 err = ice_alloc_rx_queue_mbufs(rxq); 691 if (err) { 692 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf"); 693 return -ENOMEM; 694 } 695 696 /* Init the RX tail register. */ 697 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); 698 699 err = ice_switch_rx_queue(hw, rxq->reg_idx, true); 700 if (err) { 701 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", 702 rx_queue_id); 703 704 rxq->rx_rel_mbufs(rxq); 705 ice_reset_rx_queue(rxq); 706 return -EINVAL; 707 } 708 709 dev->data->rx_queue_state[rx_queue_id] = 710 RTE_ETH_QUEUE_STATE_STARTED; 711 712 return 0; 713 } 714 715 int 716 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 717 { 718 struct ice_rx_queue *rxq; 719 int err; 720 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 721 722 if (rx_queue_id < dev->data->nb_rx_queues) { 723 rxq = dev->data->rx_queues[rx_queue_id]; 724 725 if (dev->data->rx_queue_state[rx_queue_id] == 726 RTE_ETH_QUEUE_STATE_STOPPED) 727 return 0; 728 729 err = ice_switch_rx_queue(hw, rxq->reg_idx, false); 730 if (err) { 731 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", 732 rx_queue_id); 733 return -EINVAL; 734 } 735 rxq->rx_rel_mbufs(rxq); 736 ice_reset_rx_queue(rxq); 737 dev->data->rx_queue_state[rx_queue_id] = 738 RTE_ETH_QUEUE_STATE_STOPPED; 739 } 740 741 return 0; 742 } 743 744 int 745 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 746 { 747 struct ci_tx_queue *txq; 748 int err; 749 struct ice_vsi *vsi; 750 struct ice_hw *hw; 751 struct ice_pf *pf; 752 struct ice_aqc_add_tx_qgrp *txq_elem; 753 struct ice_tlan_ctx tx_ctx; 754 int buf_len; 755 756 PMD_INIT_FUNC_TRACE(); 757 758 if (tx_queue_id >= dev->data->nb_tx_queues) { 759 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u", 760 tx_queue_id, dev->data->nb_tx_queues); 761 return -EINVAL; 762 } 763 764 txq = dev->data->tx_queues[tx_queue_id]; 765 if (!txq || !txq->q_set) { 766 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup", 767 tx_queue_id); 768 return -EINVAL; 769 } 770 771 if (dev->data->tx_queue_state[tx_queue_id] == 772 RTE_ETH_QUEUE_STATE_STARTED) 773 return 0; 774 775 buf_len = ice_struct_size(txq_elem, txqs, 1); 776 txq_elem = ice_malloc(hw, buf_len); 777 if (!txq_elem) 778 return -ENOMEM; 779 780 vsi = txq->ice_vsi; 781 hw = ICE_VSI_TO_HW(vsi); 782 pf = ICE_VSI_TO_PF(vsi); 783 784 memset(&tx_ctx, 0, sizeof(tx_ctx)); 785 txq_elem->num_txqs = 1; 786 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx); 787 788 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; 789 tx_ctx.qlen = txq->nb_tx_desc; 790 tx_ctx.pf_num = hw->pf_id; 791 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; 792 tx_ctx.src_vsi = vsi->vsi_id; 793 tx_ctx.port_num = hw->port_info->lport; 794 tx_ctx.tso_ena = 1; /* tso enable */ 795 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */ 796 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */ 797 tx_ctx.tsyn_ena = 1; 798 799 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx, 800 ice_tlan_ctx_info); 801 802 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx); 803 804 /* Init the Tx tail register*/ 805 ICE_PCI_REG_WRITE(txq->qtx_tail, 0); 806 807 /* Fix me, we assume TC always 0 here */ 808 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1, 809 txq_elem, buf_len, NULL); 810 if (err) { 811 PMD_DRV_LOG(ERR, "Failed to add lan txq"); 812 rte_free(txq_elem); 813 return -EIO; 814 } 815 /* store the schedule node id */ 816 txq->q_teid = txq_elem->txqs[0].q_teid; 817 818 /* move the queue to correct position in hierarchy, if explicit hierarchy configured */ 819 if (pf->tm_conf.committed) 820 if (ice_tm_setup_txq_node(pf, hw, tx_queue_id, txq->q_teid) != 0) { 821 PMD_DRV_LOG(ERR, "Failed to set up txq traffic management node"); 822 rte_free(txq_elem); 823 return -EIO; 824 } 825 826 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 827 828 rte_free(txq_elem); 829 return 0; 830 } 831 832 static int 833 ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq) 834 { 835 struct ice_vsi *vsi = rxq->vsi; 836 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 837 uint32_t rxdid = ICE_RXDID_LEGACY_1; 838 struct ice_rlan_ctx rx_ctx; 839 uint32_t regval; 840 int err; 841 842 rxq->rx_hdr_len = 0; 843 rxq->rx_buf_len = 1024; 844 845 memset(&rx_ctx, 0, sizeof(rx_ctx)); 846 847 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; 848 rx_ctx.qlen = rxq->nb_rx_desc; 849 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; 850 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S; 851 rx_ctx.dtype = 0; /* No Buffer Split mode */ 852 rx_ctx.dsize = 1; /* 32B descriptors */ 853 rx_ctx.rxmax = ICE_ETH_MAX_LEN; 854 /* TPH: Transaction Layer Packet (TLP) processing hints */ 855 rx_ctx.tphrdesc_ena = 1; 856 rx_ctx.tphwdesc_ena = 1; 857 rx_ctx.tphdata_ena = 1; 858 rx_ctx.tphhead_ena = 1; 859 /* Low Receive Queue Threshold defined in 64 descriptors units. 860 * When the number of free descriptors goes below the lrxqthresh, 861 * an immediate interrupt is triggered. 862 */ 863 rx_ctx.lrxqthresh = 2; 864 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/ 865 rx_ctx.l2tsel = 1; 866 rx_ctx.showiv = 0; 867 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0; 868 869 /* Enable Flexible Descriptors in the queue context which 870 * allows this driver to select a specific receive descriptor format 871 */ 872 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & 873 QRXFLXP_CNTXT_RXDID_IDX_M; 874 875 /* increasing context priority to pick up profile ID; 876 * default is 0x01; setting to 0x03 to ensure profile 877 * is programming if prev context is of same priority 878 */ 879 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & 880 QRXFLXP_CNTXT_RXDID_PRIO_M; 881 882 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval); 883 884 err = ice_clear_rxq_ctx(hw, rxq->reg_idx); 885 if (err) { 886 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context", 887 rxq->queue_id); 888 return -EINVAL; 889 } 890 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx); 891 if (err) { 892 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context", 893 rxq->queue_id); 894 return -EINVAL; 895 } 896 897 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx); 898 899 /* Init the Rx tail register*/ 900 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); 901 902 return 0; 903 } 904 905 int 906 ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 907 { 908 struct ice_rx_queue *rxq; 909 int err; 910 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 911 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 912 913 PMD_INIT_FUNC_TRACE(); 914 915 rxq = pf->fdir.rxq; 916 if (!rxq || !rxq->q_set) { 917 PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup", 918 rx_queue_id); 919 return -EINVAL; 920 } 921 922 err = ice_fdir_program_hw_rx_queue(rxq); 923 if (err) { 924 PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u", 925 rx_queue_id); 926 return -EIO; 927 } 928 929 /* Init the RX tail register. */ 930 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); 931 932 err = ice_switch_rx_queue(hw, rxq->reg_idx, true); 933 if (err) { 934 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on", 935 rx_queue_id); 936 937 ice_reset_rx_queue(rxq); 938 return -EINVAL; 939 } 940 941 return 0; 942 } 943 944 int 945 ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 946 { 947 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 948 struct ci_tx_queue *txq; 949 int err; 950 struct ice_vsi *vsi; 951 struct ice_hw *hw; 952 struct ice_aqc_add_tx_qgrp *txq_elem; 953 struct ice_tlan_ctx tx_ctx; 954 int buf_len; 955 956 PMD_INIT_FUNC_TRACE(); 957 958 txq = pf->fdir.txq; 959 if (!txq || !txq->q_set) { 960 PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup", 961 tx_queue_id); 962 return -EINVAL; 963 } 964 965 buf_len = ice_struct_size(txq_elem, txqs, 1); 966 txq_elem = ice_malloc(hw, buf_len); 967 if (!txq_elem) 968 return -ENOMEM; 969 970 vsi = txq->ice_vsi; 971 hw = ICE_VSI_TO_HW(vsi); 972 973 memset(&tx_ctx, 0, sizeof(tx_ctx)); 974 txq_elem->num_txqs = 1; 975 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx); 976 977 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; 978 tx_ctx.qlen = txq->nb_tx_desc; 979 tx_ctx.pf_num = hw->pf_id; 980 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; 981 tx_ctx.src_vsi = vsi->vsi_id; 982 tx_ctx.port_num = hw->port_info->lport; 983 tx_ctx.tso_ena = 1; /* tso enable */ 984 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */ 985 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */ 986 987 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx, 988 ice_tlan_ctx_info); 989 990 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx); 991 992 /* Init the Tx tail register*/ 993 ICE_PCI_REG_WRITE(txq->qtx_tail, 0); 994 995 /* Fix me, we assume TC always 0 here */ 996 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1, 997 txq_elem, buf_len, NULL); 998 if (err) { 999 PMD_DRV_LOG(ERR, "Failed to add FDIR txq"); 1000 rte_free(txq_elem); 1001 return -EIO; 1002 } 1003 /* store the schedule node id */ 1004 txq->q_teid = txq_elem->txqs[0].q_teid; 1005 1006 rte_free(txq_elem); 1007 return 0; 1008 } 1009 1010 /* Free all mbufs for descriptors in tx queue */ 1011 static void 1012 _ice_tx_queue_release_mbufs(struct ci_tx_queue *txq) 1013 { 1014 uint16_t i; 1015 1016 if (!txq || !txq->sw_ring) { 1017 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL"); 1018 return; 1019 } 1020 1021 for (i = 0; i < txq->nb_tx_desc; i++) { 1022 if (txq->sw_ring[i].mbuf) { 1023 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); 1024 txq->sw_ring[i].mbuf = NULL; 1025 } 1026 } 1027 } 1028 1029 static void 1030 ice_reset_tx_queue(struct ci_tx_queue *txq) 1031 { 1032 struct ci_tx_entry *txe; 1033 uint16_t i, prev, size; 1034 1035 if (!txq) { 1036 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL"); 1037 return; 1038 } 1039 1040 txe = txq->sw_ring; 1041 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc; 1042 for (i = 0; i < size; i++) 1043 ((volatile char *)txq->ice_tx_ring)[i] = 0; 1044 1045 prev = (uint16_t)(txq->nb_tx_desc - 1); 1046 for (i = 0; i < txq->nb_tx_desc; i++) { 1047 volatile struct ice_tx_desc *txd = &txq->ice_tx_ring[i]; 1048 1049 txd->cmd_type_offset_bsz = 1050 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE); 1051 txe[i].mbuf = NULL; 1052 txe[i].last_id = i; 1053 txe[prev].next_id = i; 1054 prev = i; 1055 } 1056 1057 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); 1058 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); 1059 1060 txq->tx_tail = 0; 1061 txq->nb_tx_used = 0; 1062 1063 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1); 1064 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1); 1065 } 1066 1067 int 1068 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 1069 { 1070 struct ci_tx_queue *txq; 1071 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1072 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1073 struct ice_vsi *vsi = pf->main_vsi; 1074 uint16_t q_ids[1]; 1075 uint32_t q_teids[1]; 1076 uint16_t q_handle = tx_queue_id; 1077 int status; 1078 1079 if (tx_queue_id >= dev->data->nb_tx_queues) { 1080 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u", 1081 tx_queue_id, dev->data->nb_tx_queues); 1082 return -EINVAL; 1083 } 1084 1085 txq = dev->data->tx_queues[tx_queue_id]; 1086 if (!txq) { 1087 PMD_DRV_LOG(ERR, "TX queue %u is not available", 1088 tx_queue_id); 1089 return -EINVAL; 1090 } 1091 1092 if (dev->data->tx_queue_state[tx_queue_id] == 1093 RTE_ETH_QUEUE_STATE_STOPPED) 1094 return 0; 1095 1096 q_ids[0] = txq->reg_idx; 1097 q_teids[0] = txq->q_teid; 1098 1099 /* Fix me, we assume TC always 0 here */ 1100 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle, 1101 q_ids, q_teids, ICE_NO_RESET, 0, NULL); 1102 if (status != ICE_SUCCESS) { 1103 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue"); 1104 return -EINVAL; 1105 } 1106 1107 txq->tx_rel_mbufs(txq); 1108 ice_reset_tx_queue(txq); 1109 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 1110 1111 return 0; 1112 } 1113 1114 int 1115 ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1116 { 1117 struct ice_rx_queue *rxq; 1118 int err; 1119 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1120 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1121 1122 rxq = pf->fdir.rxq; 1123 1124 err = ice_switch_rx_queue(hw, rxq->reg_idx, false); 1125 if (err) { 1126 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off", 1127 rx_queue_id); 1128 return -EINVAL; 1129 } 1130 rxq->rx_rel_mbufs(rxq); 1131 1132 return 0; 1133 } 1134 1135 int 1136 ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 1137 { 1138 struct ci_tx_queue *txq; 1139 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1140 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1141 struct ice_vsi *vsi = pf->main_vsi; 1142 uint16_t q_ids[1]; 1143 uint32_t q_teids[1]; 1144 uint16_t q_handle = tx_queue_id; 1145 int status; 1146 1147 txq = pf->fdir.txq; 1148 if (!txq) { 1149 PMD_DRV_LOG(ERR, "TX queue %u is not available", 1150 tx_queue_id); 1151 return -EINVAL; 1152 } 1153 if (txq->qtx_tail == NULL) { 1154 PMD_DRV_LOG(INFO, "TX queue %u not started", tx_queue_id); 1155 return 0; 1156 } 1157 vsi = txq->ice_vsi; 1158 1159 q_ids[0] = txq->reg_idx; 1160 q_teids[0] = txq->q_teid; 1161 1162 /* Fix me, we assume TC always 0 here */ 1163 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle, 1164 q_ids, q_teids, ICE_NO_RESET, 0, NULL); 1165 if (status != ICE_SUCCESS) { 1166 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue"); 1167 return -EINVAL; 1168 } 1169 1170 txq->tx_rel_mbufs(txq); 1171 txq->qtx_tail = NULL; 1172 1173 return 0; 1174 } 1175 1176 int 1177 ice_rx_queue_setup(struct rte_eth_dev *dev, 1178 uint16_t queue_idx, 1179 uint16_t nb_desc, 1180 unsigned int socket_id, 1181 const struct rte_eth_rxconf *rx_conf, 1182 struct rte_mempool *mp) 1183 { 1184 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1185 struct ice_adapter *ad = 1186 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1187 struct ice_vsi *vsi = pf->main_vsi; 1188 struct ice_rx_queue *rxq; 1189 const struct rte_memzone *rz; 1190 uint32_t ring_size; 1191 uint16_t len; 1192 int use_def_burst_func = 1; 1193 uint64_t offloads; 1194 uint16_t n_seg = rx_conf->rx_nseg; 1195 uint16_t i; 1196 1197 if (nb_desc % ICE_ALIGN_RING_DESC != 0 || 1198 nb_desc > ICE_MAX_RING_DESC || 1199 nb_desc < ICE_MIN_RING_DESC) { 1200 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is " 1201 "invalid", nb_desc); 1202 return -EINVAL; 1203 } 1204 1205 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; 1206 1207 if (mp) 1208 n_seg = 1; 1209 1210 if (n_seg > 1 && !(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { 1211 PMD_INIT_LOG(ERR, "port %u queue index %u split offload not configured", 1212 dev->data->port_id, queue_idx); 1213 return -EINVAL; 1214 } 1215 1216 /* Free memory if needed */ 1217 if (dev->data->rx_queues[queue_idx]) { 1218 ice_rx_queue_release(dev->data->rx_queues[queue_idx]); 1219 dev->data->rx_queues[queue_idx] = NULL; 1220 } 1221 1222 /* Allocate the rx queue data structure */ 1223 rxq = rte_zmalloc_socket(NULL, 1224 sizeof(struct ice_rx_queue), 1225 RTE_CACHE_LINE_SIZE, 1226 socket_id); 1227 1228 if (!rxq) { 1229 PMD_INIT_LOG(ERR, "Failed to allocate memory for " 1230 "rx queue data structure"); 1231 return -ENOMEM; 1232 } 1233 1234 rxq->rxseg_nb = n_seg; 1235 if (n_seg > 1) { 1236 for (i = 0; i < n_seg; i++) 1237 memcpy(&rxq->rxseg[i], &rx_conf->rx_seg[i].split, 1238 sizeof(struct rte_eth_rxseg_split)); 1239 1240 rxq->mp = rxq->rxseg[0].mp; 1241 } else { 1242 rxq->mp = mp; 1243 } 1244 1245 rxq->nb_rx_desc = nb_desc; 1246 rxq->rx_free_thresh = rx_conf->rx_free_thresh; 1247 rxq->queue_id = queue_idx; 1248 rxq->offloads = offloads; 1249 1250 rxq->reg_idx = vsi->base_queue + queue_idx; 1251 rxq->port_id = dev->data->port_id; 1252 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) 1253 rxq->crc_len = RTE_ETHER_CRC_LEN; 1254 else 1255 rxq->crc_len = 0; 1256 1257 rxq->drop_en = rx_conf->rx_drop_en; 1258 rxq->vsi = vsi; 1259 rxq->rx_deferred_start = rx_conf->rx_deferred_start; 1260 rxq->proto_xtr = pf->proto_xtr != NULL ? 1261 pf->proto_xtr[queue_idx] : PROTO_XTR_NONE; 1262 if (rxq->proto_xtr != PROTO_XTR_NONE && 1263 ad->devargs.xtr_flag_offs[rxq->proto_xtr] != 0xff) 1264 rxq->xtr_ol_flag = 1ULL << ad->devargs.xtr_flag_offs[rxq->proto_xtr]; 1265 rxq->xtr_field_offs = ad->devargs.xtr_field_offs; 1266 1267 /* Allocate the maximum number of RX ring hardware descriptor. */ 1268 len = ICE_MAX_RING_DESC; 1269 1270 /** 1271 * Allocating a little more memory because vectorized/bulk_alloc Rx 1272 * functions doesn't check boundaries each time. 1273 */ 1274 len += ICE_RX_MAX_BURST; 1275 1276 /* Allocate the maximum number of RX ring hardware descriptor. */ 1277 ring_size = sizeof(union ice_rx_flex_desc) * len; 1278 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN); 1279 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, 1280 ring_size, ICE_RING_BASE_ALIGN, 1281 socket_id); 1282 if (!rz) { 1283 ice_rx_queue_release(rxq); 1284 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX"); 1285 return -ENOMEM; 1286 } 1287 1288 rxq->mz = rz; 1289 /* Zero all the descriptors in the ring. */ 1290 memset(rz->addr, 0, ring_size); 1291 1292 rxq->rx_ring_dma = rz->iova; 1293 rxq->rx_ring = rz->addr; 1294 1295 /* always reserve more for bulk alloc */ 1296 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST); 1297 1298 /* Allocate the software ring. */ 1299 rxq->sw_ring = rte_zmalloc_socket(NULL, 1300 sizeof(struct ice_rx_entry) * len, 1301 RTE_CACHE_LINE_SIZE, 1302 socket_id); 1303 if (!rxq->sw_ring) { 1304 ice_rx_queue_release(rxq); 1305 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring"); 1306 return -ENOMEM; 1307 } 1308 1309 ice_reset_rx_queue(rxq); 1310 rxq->q_set = true; 1311 dev->data->rx_queues[queue_idx] = rxq; 1312 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs; 1313 1314 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq); 1315 1316 if (!use_def_burst_func) { 1317 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " 1318 "satisfied. Rx Burst Bulk Alloc function will be " 1319 "used on port=%d, queue=%d.", 1320 rxq->port_id, rxq->queue_id); 1321 } else { 1322 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " 1323 "not satisfied, Scattered Rx is requested. " 1324 "on port=%d, queue=%d.", 1325 rxq->port_id, rxq->queue_id); 1326 ad->rx_bulk_alloc_allowed = false; 1327 } 1328 1329 return 0; 1330 } 1331 1332 void 1333 ice_rx_queue_release(void *rxq) 1334 { 1335 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq; 1336 1337 if (!q) { 1338 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL"); 1339 return; 1340 } 1341 1342 if (q->rx_rel_mbufs != NULL) 1343 q->rx_rel_mbufs(q); 1344 rte_free(q->sw_ring); 1345 rte_memzone_free(q->mz); 1346 rte_free(q); 1347 } 1348 1349 int 1350 ice_tx_queue_setup(struct rte_eth_dev *dev, 1351 uint16_t queue_idx, 1352 uint16_t nb_desc, 1353 unsigned int socket_id, 1354 const struct rte_eth_txconf *tx_conf) 1355 { 1356 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1357 struct ice_vsi *vsi = pf->main_vsi; 1358 struct ci_tx_queue *txq; 1359 const struct rte_memzone *tz; 1360 uint32_t ring_size; 1361 uint16_t tx_rs_thresh, tx_free_thresh; 1362 uint64_t offloads; 1363 1364 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; 1365 1366 if (nb_desc % ICE_ALIGN_RING_DESC != 0 || 1367 nb_desc > ICE_MAX_RING_DESC || 1368 nb_desc < ICE_MIN_RING_DESC) { 1369 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is " 1370 "invalid", nb_desc); 1371 return -EINVAL; 1372 } 1373 1374 /** 1375 * The following two parameters control the setting of the RS bit on 1376 * transmit descriptors. TX descriptors will have their RS bit set 1377 * after txq->tx_rs_thresh descriptors have been used. The TX 1378 * descriptor ring will be cleaned after txq->tx_free_thresh 1379 * descriptors are used or if the number of descriptors required to 1380 * transmit a packet is greater than the number of free TX descriptors. 1381 * 1382 * The following constraints must be satisfied: 1383 * - tx_rs_thresh must be greater than 0. 1384 * - tx_rs_thresh must be less than the size of the ring minus 2. 1385 * - tx_rs_thresh must be less than or equal to tx_free_thresh. 1386 * - tx_rs_thresh must be a divisor of the ring size. 1387 * - tx_free_thresh must be greater than 0. 1388 * - tx_free_thresh must be less than the size of the ring minus 3. 1389 * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc. 1390 * 1391 * One descriptor in the TX ring is used as a sentinel to avoid a H/W 1392 * race condition, hence the maximum threshold constraints. When set 1393 * to zero use default values. 1394 */ 1395 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ? 1396 tx_conf->tx_free_thresh : 1397 ICE_DEFAULT_TX_FREE_THRESH); 1398 /* force tx_rs_thresh to adapt an aggressive tx_free_thresh */ 1399 tx_rs_thresh = 1400 (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ? 1401 nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH; 1402 if (tx_conf->tx_rs_thresh) 1403 tx_rs_thresh = tx_conf->tx_rs_thresh; 1404 if (tx_rs_thresh + tx_free_thresh > nb_desc) { 1405 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not " 1406 "exceed nb_desc. (tx_rs_thresh=%u " 1407 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)", 1408 (unsigned int)tx_rs_thresh, 1409 (unsigned int)tx_free_thresh, 1410 (unsigned int)nb_desc, 1411 (int)dev->data->port_id, 1412 (int)queue_idx); 1413 return -EINVAL; 1414 } 1415 if (tx_rs_thresh >= (nb_desc - 2)) { 1416 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the " 1417 "number of TX descriptors minus 2. " 1418 "(tx_rs_thresh=%u port=%d queue=%d)", 1419 (unsigned int)tx_rs_thresh, 1420 (int)dev->data->port_id, 1421 (int)queue_idx); 1422 return -EINVAL; 1423 } 1424 if (tx_free_thresh >= (nb_desc - 3)) { 1425 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the " 1426 "tx_free_thresh must be less than the " 1427 "number of TX descriptors minus 3. " 1428 "(tx_free_thresh=%u port=%d queue=%d)", 1429 (unsigned int)tx_free_thresh, 1430 (int)dev->data->port_id, 1431 (int)queue_idx); 1432 return -EINVAL; 1433 } 1434 if (tx_rs_thresh > tx_free_thresh) { 1435 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or " 1436 "equal to tx_free_thresh. (tx_free_thresh=%u" 1437 " tx_rs_thresh=%u port=%d queue=%d)", 1438 (unsigned int)tx_free_thresh, 1439 (unsigned int)tx_rs_thresh, 1440 (int)dev->data->port_id, 1441 (int)queue_idx); 1442 return -EINVAL; 1443 } 1444 if ((nb_desc % tx_rs_thresh) != 0) { 1445 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the " 1446 "number of TX descriptors. (tx_rs_thresh=%u" 1447 " port=%d queue=%d)", 1448 (unsigned int)tx_rs_thresh, 1449 (int)dev->data->port_id, 1450 (int)queue_idx); 1451 return -EINVAL; 1452 } 1453 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) { 1454 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if " 1455 "tx_rs_thresh is greater than 1. " 1456 "(tx_rs_thresh=%u port=%d queue=%d)", 1457 (unsigned int)tx_rs_thresh, 1458 (int)dev->data->port_id, 1459 (int)queue_idx); 1460 return -EINVAL; 1461 } 1462 1463 /* Free memory if needed. */ 1464 if (dev->data->tx_queues[queue_idx]) { 1465 ice_tx_queue_release(dev->data->tx_queues[queue_idx]); 1466 dev->data->tx_queues[queue_idx] = NULL; 1467 } 1468 1469 /* Allocate the TX queue data structure. */ 1470 txq = rte_zmalloc_socket(NULL, 1471 sizeof(struct ci_tx_queue), 1472 RTE_CACHE_LINE_SIZE, 1473 socket_id); 1474 if (!txq) { 1475 PMD_INIT_LOG(ERR, "Failed to allocate memory for " 1476 "tx queue structure"); 1477 return -ENOMEM; 1478 } 1479 1480 /* Allocate TX hardware ring descriptors. */ 1481 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC; 1482 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN); 1483 tz = rte_eth_dma_zone_reserve(dev, "ice_tx_ring", queue_idx, 1484 ring_size, ICE_RING_BASE_ALIGN, 1485 socket_id); 1486 if (!tz) { 1487 ice_tx_queue_release(txq); 1488 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX"); 1489 return -ENOMEM; 1490 } 1491 1492 txq->mz = tz; 1493 txq->nb_tx_desc = nb_desc; 1494 txq->tx_rs_thresh = tx_rs_thresh; 1495 txq->tx_free_thresh = tx_free_thresh; 1496 txq->queue_id = queue_idx; 1497 1498 txq->reg_idx = vsi->base_queue + queue_idx; 1499 txq->port_id = dev->data->port_id; 1500 txq->offloads = offloads; 1501 txq->ice_vsi = vsi; 1502 txq->tx_deferred_start = tx_conf->tx_deferred_start; 1503 1504 txq->tx_ring_dma = tz->iova; 1505 txq->ice_tx_ring = tz->addr; 1506 1507 /* Allocate software ring */ 1508 txq->sw_ring = 1509 rte_zmalloc_socket(NULL, 1510 sizeof(struct ci_tx_entry) * nb_desc, 1511 RTE_CACHE_LINE_SIZE, 1512 socket_id); 1513 if (!txq->sw_ring) { 1514 ice_tx_queue_release(txq); 1515 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring"); 1516 return -ENOMEM; 1517 } 1518 1519 ice_reset_tx_queue(txq); 1520 txq->q_set = true; 1521 dev->data->tx_queues[queue_idx] = txq; 1522 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs; 1523 ice_set_tx_function_flag(dev, txq); 1524 1525 return 0; 1526 } 1527 1528 void 1529 ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 1530 { 1531 ice_rx_queue_release(dev->data->rx_queues[qid]); 1532 } 1533 1534 void 1535 ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 1536 { 1537 ice_tx_queue_release(dev->data->tx_queues[qid]); 1538 } 1539 1540 void 1541 ice_tx_queue_release(void *txq) 1542 { 1543 struct ci_tx_queue *q = (struct ci_tx_queue *)txq; 1544 1545 if (!q) { 1546 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL"); 1547 return; 1548 } 1549 1550 if (q->tx_rel_mbufs != NULL) 1551 q->tx_rel_mbufs(q); 1552 rte_free(q->sw_ring); 1553 rte_memzone_free(q->mz); 1554 rte_free(q); 1555 } 1556 1557 void 1558 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 1559 struct rte_eth_rxq_info *qinfo) 1560 { 1561 struct ice_rx_queue *rxq; 1562 1563 rxq = dev->data->rx_queues[queue_id]; 1564 1565 qinfo->mp = rxq->mp; 1566 qinfo->scattered_rx = dev->data->scattered_rx; 1567 qinfo->nb_desc = rxq->nb_rx_desc; 1568 1569 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 1570 qinfo->conf.rx_drop_en = rxq->drop_en; 1571 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; 1572 } 1573 1574 void 1575 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 1576 struct rte_eth_txq_info *qinfo) 1577 { 1578 struct ci_tx_queue *txq; 1579 1580 txq = dev->data->tx_queues[queue_id]; 1581 1582 qinfo->nb_desc = txq->nb_tx_desc; 1583 1584 qinfo->conf.tx_thresh.pthresh = ICE_DEFAULT_TX_PTHRESH; 1585 qinfo->conf.tx_thresh.hthresh = ICE_DEFAULT_TX_HTHRESH; 1586 qinfo->conf.tx_thresh.wthresh = ICE_DEFAULT_TX_WTHRESH; 1587 1588 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 1589 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; 1590 qinfo->conf.offloads = txq->offloads; 1591 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 1592 } 1593 1594 uint32_t 1595 ice_rx_queue_count(void *rx_queue) 1596 { 1597 #define ICE_RXQ_SCAN_INTERVAL 4 1598 volatile union ice_rx_flex_desc *rxdp; 1599 struct ice_rx_queue *rxq; 1600 uint16_t desc = 0; 1601 1602 rxq = rx_queue; 1603 rxdp = &rxq->rx_ring[rxq->rx_tail]; 1604 while ((desc < rxq->nb_rx_desc) && 1605 rte_le_to_cpu_16(rxdp->wb.status_error0) & 1606 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) { 1607 /** 1608 * Check the DD bit of a rx descriptor of each 4 in a group, 1609 * to avoid checking too frequently and downgrading performance 1610 * too much. 1611 */ 1612 desc += ICE_RXQ_SCAN_INTERVAL; 1613 rxdp += ICE_RXQ_SCAN_INTERVAL; 1614 if (rxq->rx_tail + desc >= rxq->nb_rx_desc) 1615 rxdp = &(rxq->rx_ring[rxq->rx_tail + 1616 desc - rxq->nb_rx_desc]); 1617 } 1618 1619 return desc; 1620 } 1621 1622 #define ICE_RX_FLEX_ERR0_BITS \ 1623 ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) | \ 1624 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \ 1625 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \ 1626 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \ 1627 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \ 1628 (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S)) 1629 1630 /* Rx L3/L4 checksum */ 1631 static inline uint64_t 1632 ice_rxd_error_to_pkt_flags(uint16_t stat_err0) 1633 { 1634 uint64_t flags = 0; 1635 1636 /* check if HW has decoded the packet and checksum */ 1637 if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))) 1638 return 0; 1639 1640 if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) { 1641 flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | 1642 RTE_MBUF_F_RX_L4_CKSUM_GOOD | 1643 RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD); 1644 return flags; 1645 } 1646 1647 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S))) 1648 flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 1649 else 1650 flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 1651 1652 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))) 1653 flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 1654 else 1655 flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 1656 1657 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))) 1658 flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD; 1659 1660 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S))) 1661 flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD; 1662 else 1663 flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD; 1664 1665 return flags; 1666 } 1667 1668 static inline void 1669 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp) 1670 { 1671 if (rte_le_to_cpu_16(rxdp->wb.status_error0) & 1672 (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) { 1673 mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 1674 mb->vlan_tci = 1675 rte_le_to_cpu_16(rxdp->wb.l2tag1); 1676 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u", 1677 rte_le_to_cpu_16(rxdp->wb.l2tag1)); 1678 } else { 1679 mb->vlan_tci = 0; 1680 } 1681 1682 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 1683 if (rte_le_to_cpu_16(rxdp->wb.status_error1) & 1684 (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) { 1685 mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED | RTE_MBUF_F_RX_QINQ | 1686 RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN; 1687 mb->vlan_tci_outer = mb->vlan_tci; 1688 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd); 1689 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u", 1690 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st), 1691 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd)); 1692 } else { 1693 mb->vlan_tci_outer = 0; 1694 } 1695 #endif 1696 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u", 1697 mb->vlan_tci, mb->vlan_tci_outer); 1698 } 1699 1700 #define ICE_LOOK_AHEAD 8 1701 #if (ICE_LOOK_AHEAD != 8) 1702 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n" 1703 #endif 1704 1705 #define ICE_PTP_TS_VALID 0x1 1706 1707 static inline int 1708 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) 1709 { 1710 volatile union ice_rx_flex_desc *rxdp; 1711 struct ice_rx_entry *rxep; 1712 struct rte_mbuf *mb; 1713 uint16_t stat_err0; 1714 uint16_t pkt_len, hdr_len; 1715 int32_t s[ICE_LOOK_AHEAD], nb_dd; 1716 int32_t i, j, nb_rx = 0; 1717 uint64_t pkt_flags = 0; 1718 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; 1719 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 1720 bool is_tsinit = false; 1721 uint64_t ts_ns; 1722 struct ice_vsi *vsi = rxq->vsi; 1723 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 1724 struct ice_adapter *ad = rxq->vsi->adapter; 1725 #endif 1726 rxdp = &rxq->rx_ring[rxq->rx_tail]; 1727 rxep = &rxq->sw_ring[rxq->rx_tail]; 1728 1729 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0); 1730 1731 /* Make sure there is at least 1 packet to receive */ 1732 if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))) 1733 return 0; 1734 1735 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 1736 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) { 1737 uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000); 1738 1739 if (unlikely(sw_cur_time - rxq->hw_time_update > 4)) 1740 is_tsinit = 1; 1741 } 1742 #endif 1743 1744 /** 1745 * Scan LOOK_AHEAD descriptors at a time to determine which 1746 * descriptors reference packets that are ready to be received. 1747 */ 1748 for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD, 1749 rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) { 1750 /* Read desc statuses backwards to avoid race condition */ 1751 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--) 1752 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0); 1753 1754 rte_smp_rmb(); 1755 1756 /* Compute how many status bits were set */ 1757 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++) 1758 nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S); 1759 1760 nb_rx += nb_dd; 1761 1762 /* Translate descriptor info to mbuf parameters */ 1763 for (j = 0; j < nb_dd; j++) { 1764 mb = rxep[j].mbuf; 1765 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) & 1766 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; 1767 mb->data_len = pkt_len; 1768 mb->pkt_len = pkt_len; 1769 1770 if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { 1771 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) & 1772 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; 1773 mb->data_len = pkt_len; 1774 mb->pkt_len = pkt_len; 1775 } else { 1776 mb->nb_segs = (uint16_t)(mb->nb_segs + mb->next->nb_segs); 1777 mb->next->next = NULL; 1778 hdr_len = rte_le_to_cpu_16(rxdp[j].wb.hdr_len_sph_flex_flags1) & 1779 ICE_RX_FLEX_DESC_HEADER_LEN_M; 1780 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) & 1781 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; 1782 mb->data_len = hdr_len; 1783 mb->pkt_len = hdr_len + pkt_len; 1784 mb->next->data_len = pkt_len; 1785 #ifdef RTE_ETHDEV_DEBUG_RX 1786 rte_pktmbuf_dump(stdout, mb, rte_pktmbuf_pkt_len(mb)); 1787 #endif 1788 } 1789 1790 mb->ol_flags = 0; 1791 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0); 1792 pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0); 1793 mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M & 1794 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)]; 1795 ice_rxd_to_vlan_tci(mb, &rxdp[j]); 1796 rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]); 1797 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 1798 if (ice_timestamp_dynflag > 0 && 1799 (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) { 1800 rxq->time_high = 1801 rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high); 1802 if (unlikely(is_tsinit)) { 1803 ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, 1804 rxq->time_high); 1805 rxq->hw_time_low = (uint32_t)ts_ns; 1806 rxq->hw_time_high = (uint32_t)(ts_ns >> 32); 1807 is_tsinit = false; 1808 } else { 1809 if (rxq->time_high < rxq->hw_time_low) 1810 rxq->hw_time_high += 1; 1811 ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high; 1812 rxq->hw_time_low = rxq->time_high; 1813 } 1814 rxq->hw_time_update = rte_get_timer_cycles() / 1815 (rte_get_timer_hz() / 1000); 1816 *RTE_MBUF_DYNFIELD(mb, 1817 ice_timestamp_dynfield_offset, 1818 rte_mbuf_timestamp_t *) = ts_ns; 1819 pkt_flags |= ice_timestamp_dynflag; 1820 } 1821 1822 if (ad->ptp_ena && ((mb->packet_type & 1823 RTE_PTYPE_L2_MASK) == RTE_PTYPE_L2_ETHER_TIMESYNC)) { 1824 rxq->time_high = 1825 rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high); 1826 mb->timesync = rxq->queue_id; 1827 pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP; 1828 if (rxdp[j].wb.time_stamp_low & 1829 ICE_PTP_TS_VALID) 1830 pkt_flags |= 1831 RTE_MBUF_F_RX_IEEE1588_TMST; 1832 } 1833 #endif 1834 mb->ol_flags |= pkt_flags; 1835 } 1836 1837 for (j = 0; j < ICE_LOOK_AHEAD; j++) 1838 rxq->rx_stage[i + j] = rxep[j].mbuf; 1839 1840 if (nb_dd != ICE_LOOK_AHEAD) 1841 break; 1842 } 1843 1844 /* Clear software ring entries */ 1845 for (i = 0; i < nb_rx; i++) 1846 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL; 1847 1848 PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: " 1849 "port_id=%u, queue_id=%u, nb_rx=%d", 1850 rxq->port_id, rxq->queue_id, nb_rx); 1851 1852 return nb_rx; 1853 } 1854 1855 static inline uint16_t 1856 ice_rx_fill_from_stage(struct ice_rx_queue *rxq, 1857 struct rte_mbuf **rx_pkts, 1858 uint16_t nb_pkts) 1859 { 1860 uint16_t i; 1861 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail]; 1862 1863 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail); 1864 1865 for (i = 0; i < nb_pkts; i++) 1866 rx_pkts[i] = stage[i]; 1867 1868 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts); 1869 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts); 1870 1871 return nb_pkts; 1872 } 1873 1874 static inline int 1875 ice_rx_alloc_bufs(struct ice_rx_queue *rxq) 1876 { 1877 volatile union ice_rx_flex_desc *rxdp; 1878 struct ice_rx_entry *rxep; 1879 struct rte_mbuf *mb; 1880 uint16_t alloc_idx, i; 1881 uint64_t dma_addr; 1882 int diag, diag_pay; 1883 uint64_t pay_addr; 1884 struct rte_mbuf *mbufs_pay[rxq->rx_free_thresh]; 1885 1886 /* Allocate buffers in bulk */ 1887 alloc_idx = (uint16_t)(rxq->rx_free_trigger - 1888 (rxq->rx_free_thresh - 1)); 1889 rxep = &rxq->sw_ring[alloc_idx]; 1890 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep, 1891 rxq->rx_free_thresh); 1892 if (unlikely(diag != 0)) { 1893 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk"); 1894 return -ENOMEM; 1895 } 1896 1897 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 1898 diag_pay = rte_mempool_get_bulk(rxq->rxseg[1].mp, 1899 (void *)mbufs_pay, rxq->rx_free_thresh); 1900 if (unlikely(diag_pay != 0)) { 1901 rte_mempool_put_bulk(rxq->mp, (void *)rxep, 1902 rxq->rx_free_thresh); 1903 PMD_RX_LOG(ERR, "Failed to get payload mbufs in bulk"); 1904 return -ENOMEM; 1905 } 1906 } 1907 1908 rxdp = &rxq->rx_ring[alloc_idx]; 1909 for (i = 0; i < rxq->rx_free_thresh; i++) { 1910 if (likely(i < (rxq->rx_free_thresh - 1))) 1911 /* Prefetch next mbuf */ 1912 rte_prefetch0(rxep[i + 1].mbuf); 1913 1914 mb = rxep[i].mbuf; 1915 rte_mbuf_refcnt_set(mb, 1); 1916 mb->data_off = RTE_PKTMBUF_HEADROOM; 1917 mb->nb_segs = 1; 1918 mb->port = rxq->port_id; 1919 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb)); 1920 1921 if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { 1922 mb->next = NULL; 1923 rxdp[i].read.hdr_addr = 0; 1924 rxdp[i].read.pkt_addr = dma_addr; 1925 } else { 1926 mb->next = mbufs_pay[i]; 1927 pay_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbufs_pay[i])); 1928 rxdp[i].read.hdr_addr = dma_addr; 1929 rxdp[i].read.pkt_addr = pay_addr; 1930 } 1931 } 1932 1933 /* Update Rx tail register */ 1934 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger); 1935 1936 rxq->rx_free_trigger = 1937 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh); 1938 if (rxq->rx_free_trigger >= rxq->nb_rx_desc) 1939 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); 1940 1941 return 0; 1942 } 1943 1944 static inline uint16_t 1945 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 1946 { 1947 struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue; 1948 uint16_t nb_rx = 0; 1949 1950 if (!nb_pkts) 1951 return 0; 1952 1953 if (rxq->rx_nb_avail) 1954 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts); 1955 1956 nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq); 1957 rxq->rx_next_avail = 0; 1958 rxq->rx_nb_avail = nb_rx; 1959 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx); 1960 1961 if (rxq->rx_tail > rxq->rx_free_trigger) { 1962 if (ice_rx_alloc_bufs(rxq) != 0) { 1963 uint16_t i, j; 1964 1965 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed += 1966 rxq->rx_free_thresh; 1967 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for " 1968 "port_id=%u, queue_id=%u", 1969 rxq->port_id, rxq->queue_id); 1970 rxq->rx_nb_avail = 0; 1971 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx); 1972 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++) 1973 rxq->sw_ring[j].mbuf = rxq->rx_stage[i]; 1974 1975 return 0; 1976 } 1977 } 1978 1979 if (rxq->rx_tail >= rxq->nb_rx_desc) 1980 rxq->rx_tail = 0; 1981 1982 if (rxq->rx_nb_avail) 1983 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts); 1984 1985 return 0; 1986 } 1987 1988 static uint16_t 1989 ice_recv_pkts_bulk_alloc(void *rx_queue, 1990 struct rte_mbuf **rx_pkts, 1991 uint16_t nb_pkts) 1992 { 1993 uint16_t nb_rx = 0; 1994 uint16_t n; 1995 uint16_t count; 1996 1997 if (unlikely(nb_pkts == 0)) 1998 return nb_rx; 1999 2000 if (likely(nb_pkts <= ICE_RX_MAX_BURST)) 2001 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts); 2002 2003 while (nb_pkts) { 2004 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST); 2005 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n); 2006 nb_rx = (uint16_t)(nb_rx + count); 2007 nb_pkts = (uint16_t)(nb_pkts - count); 2008 if (count < n) 2009 break; 2010 } 2011 2012 return nb_rx; 2013 } 2014 2015 static uint16_t 2016 ice_recv_scattered_pkts(void *rx_queue, 2017 struct rte_mbuf **rx_pkts, 2018 uint16_t nb_pkts) 2019 { 2020 struct ice_rx_queue *rxq = rx_queue; 2021 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring; 2022 volatile union ice_rx_flex_desc *rxdp; 2023 union ice_rx_flex_desc rxd; 2024 struct ice_rx_entry *sw_ring = rxq->sw_ring; 2025 struct ice_rx_entry *rxe; 2026 struct rte_mbuf *first_seg = rxq->pkt_first_seg; 2027 struct rte_mbuf *last_seg = rxq->pkt_last_seg; 2028 struct rte_mbuf *nmb; /* new allocated mbuf */ 2029 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */ 2030 uint16_t rx_id = rxq->rx_tail; 2031 uint16_t nb_rx = 0; 2032 uint16_t nb_hold = 0; 2033 uint16_t rx_packet_len; 2034 uint16_t rx_stat_err0; 2035 uint64_t dma_addr; 2036 uint64_t pkt_flags; 2037 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; 2038 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 2039 bool is_tsinit = false; 2040 uint64_t ts_ns; 2041 struct ice_vsi *vsi = rxq->vsi; 2042 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 2043 struct ice_adapter *ad = rxq->vsi->adapter; 2044 2045 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) { 2046 uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000); 2047 2048 if (unlikely(sw_cur_time - rxq->hw_time_update > 4)) 2049 is_tsinit = true; 2050 } 2051 #endif 2052 2053 while (nb_rx < nb_pkts) { 2054 rxdp = &rx_ring[rx_id]; 2055 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0); 2056 2057 /* Check the DD bit first */ 2058 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))) 2059 break; 2060 2061 /* allocate mbuf */ 2062 nmb = rte_mbuf_raw_alloc(rxq->mp); 2063 if (unlikely(!nmb)) { 2064 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++; 2065 break; 2066 } 2067 rxd = *rxdp; /* copy descriptor in ring to temp variable*/ 2068 2069 nb_hold++; 2070 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */ 2071 rx_id++; 2072 if (unlikely(rx_id == rxq->nb_rx_desc)) 2073 rx_id = 0; 2074 2075 /* Prefetch next mbuf */ 2076 rte_prefetch0(sw_ring[rx_id].mbuf); 2077 2078 /** 2079 * When next RX descriptor is on a cache line boundary, 2080 * prefetch the next 4 RX descriptors and next 8 pointers 2081 * to mbufs. 2082 */ 2083 if ((rx_id & 0x3) == 0) { 2084 rte_prefetch0(&rx_ring[rx_id]); 2085 rte_prefetch0(&sw_ring[rx_id]); 2086 } 2087 2088 rxm = rxe->mbuf; 2089 rxe->mbuf = nmb; 2090 dma_addr = 2091 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); 2092 2093 /* Set data buffer address and data length of the mbuf */ 2094 rxdp->read.hdr_addr = 0; 2095 rxdp->read.pkt_addr = dma_addr; 2096 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) & 2097 ICE_RX_FLX_DESC_PKT_LEN_M; 2098 rxm->data_len = rx_packet_len; 2099 rxm->data_off = RTE_PKTMBUF_HEADROOM; 2100 2101 /** 2102 * If this is the first buffer of the received packet, set the 2103 * pointer to the first mbuf of the packet and initialize its 2104 * context. Otherwise, update the total length and the number 2105 * of segments of the current scattered packet, and update the 2106 * pointer to the last mbuf of the current packet. 2107 */ 2108 if (!first_seg) { 2109 first_seg = rxm; 2110 first_seg->nb_segs = 1; 2111 first_seg->pkt_len = rx_packet_len; 2112 } else { 2113 first_seg->pkt_len = 2114 (uint16_t)(first_seg->pkt_len + 2115 rx_packet_len); 2116 first_seg->nb_segs++; 2117 last_seg->next = rxm; 2118 } 2119 2120 /** 2121 * If this is not the last buffer of the received packet, 2122 * update the pointer to the last mbuf of the current scattered 2123 * packet and continue to parse the RX ring. 2124 */ 2125 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) { 2126 last_seg = rxm; 2127 continue; 2128 } 2129 2130 /** 2131 * This is the last buffer of the received packet. If the CRC 2132 * is not stripped by the hardware: 2133 * - Subtract the CRC length from the total packet length. 2134 * - If the last buffer only contains the whole CRC or a part 2135 * of it, free the mbuf associated to the last buffer. If part 2136 * of the CRC is also contained in the previous mbuf, subtract 2137 * the length of that CRC part from the data length of the 2138 * previous mbuf. 2139 */ 2140 rxm->next = NULL; 2141 if (unlikely(rxq->crc_len > 0)) { 2142 first_seg->pkt_len -= RTE_ETHER_CRC_LEN; 2143 if (rx_packet_len <= RTE_ETHER_CRC_LEN) { 2144 rte_pktmbuf_free_seg(rxm); 2145 first_seg->nb_segs--; 2146 last_seg->data_len = 2147 (uint16_t)(last_seg->data_len - 2148 (RTE_ETHER_CRC_LEN - rx_packet_len)); 2149 last_seg->next = NULL; 2150 } else 2151 rxm->data_len = (uint16_t)(rx_packet_len - 2152 RTE_ETHER_CRC_LEN); 2153 } else if (rx_packet_len == 0) { 2154 rte_pktmbuf_free_seg(rxm); 2155 first_seg->nb_segs--; 2156 last_seg->next = NULL; 2157 } 2158 2159 first_seg->port = rxq->port_id; 2160 first_seg->ol_flags = 0; 2161 first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M & 2162 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; 2163 ice_rxd_to_vlan_tci(first_seg, &rxd); 2164 rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd); 2165 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0); 2166 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 2167 if (ice_timestamp_dynflag > 0 && 2168 (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) { 2169 rxq->time_high = 2170 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); 2171 if (unlikely(is_tsinit)) { 2172 ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high); 2173 rxq->hw_time_low = (uint32_t)ts_ns; 2174 rxq->hw_time_high = (uint32_t)(ts_ns >> 32); 2175 is_tsinit = false; 2176 } else { 2177 if (rxq->time_high < rxq->hw_time_low) 2178 rxq->hw_time_high += 1; 2179 ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high; 2180 rxq->hw_time_low = rxq->time_high; 2181 } 2182 rxq->hw_time_update = rte_get_timer_cycles() / 2183 (rte_get_timer_hz() / 1000); 2184 *RTE_MBUF_DYNFIELD(first_seg, 2185 (ice_timestamp_dynfield_offset), 2186 rte_mbuf_timestamp_t *) = ts_ns; 2187 pkt_flags |= ice_timestamp_dynflag; 2188 } 2189 2190 if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK) 2191 == RTE_PTYPE_L2_ETHER_TIMESYNC)) { 2192 rxq->time_high = 2193 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); 2194 first_seg->timesync = rxq->queue_id; 2195 pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP; 2196 } 2197 #endif 2198 first_seg->ol_flags |= pkt_flags; 2199 /* Prefetch data of first segment, if configured to do so. */ 2200 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr, 2201 first_seg->data_off)); 2202 rx_pkts[nb_rx++] = first_seg; 2203 first_seg = NULL; 2204 } 2205 2206 /* Record index of the next RX descriptor to probe. */ 2207 rxq->rx_tail = rx_id; 2208 rxq->pkt_first_seg = first_seg; 2209 rxq->pkt_last_seg = last_seg; 2210 2211 /** 2212 * If the number of free RX descriptors is greater than the RX free 2213 * threshold of the queue, advance the Receive Descriptor Tail (RDT) 2214 * register. Update the RDT with the value of the last processed RX 2215 * descriptor minus 1, to guarantee that the RDT register is never 2216 * equal to the RDH register, which creates a "full" ring situation 2217 * from the hardware point of view. 2218 */ 2219 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); 2220 if (nb_hold > rxq->rx_free_thresh) { 2221 rx_id = (uint16_t)(rx_id == 0 ? 2222 (rxq->nb_rx_desc - 1) : (rx_id - 1)); 2223 /* write TAIL register */ 2224 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id); 2225 nb_hold = 0; 2226 } 2227 rxq->nb_rx_hold = nb_hold; 2228 2229 /* return received packet in the burst */ 2230 return nb_rx; 2231 } 2232 2233 const uint32_t * 2234 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements) 2235 { 2236 struct ice_adapter *ad = 2237 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 2238 const uint32_t *ptypes; 2239 2240 static const uint32_t ptypes_os[] = { 2241 /* refers to ice_get_default_pkt_type() */ 2242 RTE_PTYPE_L2_ETHER, 2243 RTE_PTYPE_L2_ETHER_TIMESYNC, 2244 RTE_PTYPE_L2_ETHER_LLDP, 2245 RTE_PTYPE_L2_ETHER_ARP, 2246 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 2247 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 2248 RTE_PTYPE_L4_FRAG, 2249 RTE_PTYPE_L4_ICMP, 2250 RTE_PTYPE_L4_NONFRAG, 2251 RTE_PTYPE_L4_SCTP, 2252 RTE_PTYPE_L4_TCP, 2253 RTE_PTYPE_L4_UDP, 2254 RTE_PTYPE_TUNNEL_GRENAT, 2255 RTE_PTYPE_TUNNEL_IP, 2256 RTE_PTYPE_INNER_L2_ETHER, 2257 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 2258 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 2259 RTE_PTYPE_INNER_L4_FRAG, 2260 RTE_PTYPE_INNER_L4_ICMP, 2261 RTE_PTYPE_INNER_L4_NONFRAG, 2262 RTE_PTYPE_INNER_L4_SCTP, 2263 RTE_PTYPE_INNER_L4_TCP, 2264 RTE_PTYPE_INNER_L4_UDP, 2265 }; 2266 2267 static const uint32_t ptypes_comms[] = { 2268 /* refers to ice_get_default_pkt_type() */ 2269 RTE_PTYPE_L2_ETHER, 2270 RTE_PTYPE_L2_ETHER_TIMESYNC, 2271 RTE_PTYPE_L2_ETHER_LLDP, 2272 RTE_PTYPE_L2_ETHER_ARP, 2273 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 2274 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 2275 RTE_PTYPE_L4_FRAG, 2276 RTE_PTYPE_L4_ICMP, 2277 RTE_PTYPE_L4_NONFRAG, 2278 RTE_PTYPE_L4_SCTP, 2279 RTE_PTYPE_L4_TCP, 2280 RTE_PTYPE_L4_UDP, 2281 RTE_PTYPE_TUNNEL_GRENAT, 2282 RTE_PTYPE_TUNNEL_IP, 2283 RTE_PTYPE_INNER_L2_ETHER, 2284 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 2285 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 2286 RTE_PTYPE_INNER_L4_FRAG, 2287 RTE_PTYPE_INNER_L4_ICMP, 2288 RTE_PTYPE_INNER_L4_NONFRAG, 2289 RTE_PTYPE_INNER_L4_SCTP, 2290 RTE_PTYPE_INNER_L4_TCP, 2291 RTE_PTYPE_INNER_L4_UDP, 2292 RTE_PTYPE_TUNNEL_GTPC, 2293 RTE_PTYPE_TUNNEL_GTPU, 2294 RTE_PTYPE_L2_ETHER_PPPOE, 2295 }; 2296 2297 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) { 2298 *no_of_elements = RTE_DIM(ptypes_comms); 2299 ptypes = ptypes_comms; 2300 } else { 2301 *no_of_elements = RTE_DIM(ptypes_os); 2302 ptypes = ptypes_os; 2303 } 2304 2305 if (dev->rx_pkt_burst == ice_recv_pkts || 2306 dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc || 2307 dev->rx_pkt_burst == ice_recv_scattered_pkts) 2308 return ptypes; 2309 2310 #ifdef RTE_ARCH_X86 2311 if (dev->rx_pkt_burst == ice_recv_pkts_vec || 2312 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec || 2313 #ifdef CC_AVX512_SUPPORT 2314 dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 || 2315 dev->rx_pkt_burst == ice_recv_pkts_vec_avx512_offload || 2316 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 || 2317 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload || 2318 #endif 2319 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 || 2320 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2_offload || 2321 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2 || 2322 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2_offload) 2323 return ptypes; 2324 #endif 2325 2326 return NULL; 2327 } 2328 2329 int 2330 ice_rx_descriptor_status(void *rx_queue, uint16_t offset) 2331 { 2332 volatile union ice_rx_flex_desc *rxdp; 2333 struct ice_rx_queue *rxq = rx_queue; 2334 uint32_t desc; 2335 2336 if (unlikely(offset >= rxq->nb_rx_desc)) 2337 return -EINVAL; 2338 2339 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold) 2340 return RTE_ETH_RX_DESC_UNAVAIL; 2341 2342 desc = rxq->rx_tail + offset; 2343 if (desc >= rxq->nb_rx_desc) 2344 desc -= rxq->nb_rx_desc; 2345 2346 rxdp = &rxq->rx_ring[desc]; 2347 if (rte_le_to_cpu_16(rxdp->wb.status_error0) & 2348 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) 2349 return RTE_ETH_RX_DESC_DONE; 2350 2351 return RTE_ETH_RX_DESC_AVAIL; 2352 } 2353 2354 int 2355 ice_tx_descriptor_status(void *tx_queue, uint16_t offset) 2356 { 2357 struct ci_tx_queue *txq = tx_queue; 2358 volatile uint64_t *status; 2359 uint64_t mask, expect; 2360 uint32_t desc; 2361 2362 if (unlikely(offset >= txq->nb_tx_desc)) 2363 return -EINVAL; 2364 2365 desc = txq->tx_tail + offset; 2366 /* go to next desc that has the RS bit */ 2367 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) * 2368 txq->tx_rs_thresh; 2369 if (desc >= txq->nb_tx_desc) { 2370 desc -= txq->nb_tx_desc; 2371 if (desc >= txq->nb_tx_desc) 2372 desc -= txq->nb_tx_desc; 2373 } 2374 2375 status = &txq->ice_tx_ring[desc].cmd_type_offset_bsz; 2376 mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M); 2377 expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE << 2378 ICE_TXD_QW1_DTYPE_S); 2379 if ((*status & mask) == expect) 2380 return RTE_ETH_TX_DESC_DONE; 2381 2382 return RTE_ETH_TX_DESC_FULL; 2383 } 2384 2385 void 2386 ice_free_queues(struct rte_eth_dev *dev) 2387 { 2388 uint16_t i; 2389 2390 PMD_INIT_FUNC_TRACE(); 2391 2392 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2393 if (!dev->data->rx_queues[i]) 2394 continue; 2395 ice_rx_queue_release(dev->data->rx_queues[i]); 2396 dev->data->rx_queues[i] = NULL; 2397 } 2398 dev->data->nb_rx_queues = 0; 2399 2400 for (i = 0; i < dev->data->nb_tx_queues; i++) { 2401 if (!dev->data->tx_queues[i]) 2402 continue; 2403 ice_tx_queue_release(dev->data->tx_queues[i]); 2404 dev->data->tx_queues[i] = NULL; 2405 } 2406 dev->data->nb_tx_queues = 0; 2407 } 2408 2409 #define ICE_FDIR_NUM_TX_DESC ICE_MIN_RING_DESC 2410 #define ICE_FDIR_NUM_RX_DESC ICE_MIN_RING_DESC 2411 2412 int 2413 ice_fdir_setup_tx_resources(struct ice_pf *pf) 2414 { 2415 struct ci_tx_queue *txq; 2416 const struct rte_memzone *tz = NULL; 2417 uint32_t ring_size; 2418 struct rte_eth_dev *dev; 2419 2420 if (!pf) { 2421 PMD_DRV_LOG(ERR, "PF is not available"); 2422 return -EINVAL; 2423 } 2424 2425 dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id]; 2426 2427 /* Allocate the TX queue data structure. */ 2428 txq = rte_zmalloc_socket("ice fdir tx queue", 2429 sizeof(struct ci_tx_queue), 2430 RTE_CACHE_LINE_SIZE, 2431 SOCKET_ID_ANY); 2432 if (!txq) { 2433 PMD_DRV_LOG(ERR, "Failed to allocate memory for " 2434 "tx queue structure."); 2435 return -ENOMEM; 2436 } 2437 2438 /* Allocate TX hardware ring descriptors. */ 2439 ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC; 2440 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN); 2441 2442 tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring", 2443 ICE_FDIR_QUEUE_ID, ring_size, 2444 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY); 2445 if (!tz) { 2446 ice_tx_queue_release(txq); 2447 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX."); 2448 return -ENOMEM; 2449 } 2450 2451 txq->mz = tz; 2452 txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC; 2453 txq->queue_id = ICE_FDIR_QUEUE_ID; 2454 txq->reg_idx = pf->fdir.fdir_vsi->base_queue; 2455 txq->ice_vsi = pf->fdir.fdir_vsi; 2456 2457 txq->tx_ring_dma = tz->iova; 2458 txq->ice_tx_ring = (struct ice_tx_desc *)tz->addr; 2459 /* 2460 * don't need to allocate software ring and reset for the fdir 2461 * program queue just set the queue has been configured. 2462 */ 2463 txq->q_set = true; 2464 pf->fdir.txq = txq; 2465 2466 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs; 2467 2468 return ICE_SUCCESS; 2469 } 2470 2471 int 2472 ice_fdir_setup_rx_resources(struct ice_pf *pf) 2473 { 2474 struct ice_rx_queue *rxq; 2475 const struct rte_memzone *rz = NULL; 2476 uint32_t ring_size; 2477 struct rte_eth_dev *dev; 2478 2479 if (!pf) { 2480 PMD_DRV_LOG(ERR, "PF is not available"); 2481 return -EINVAL; 2482 } 2483 2484 dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id]; 2485 2486 /* Allocate the RX queue data structure. */ 2487 rxq = rte_zmalloc_socket("ice fdir rx queue", 2488 sizeof(struct ice_rx_queue), 2489 RTE_CACHE_LINE_SIZE, 2490 SOCKET_ID_ANY); 2491 if (!rxq) { 2492 PMD_DRV_LOG(ERR, "Failed to allocate memory for " 2493 "rx queue structure."); 2494 return -ENOMEM; 2495 } 2496 2497 /* Allocate RX hardware ring descriptors. */ 2498 ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC; 2499 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN); 2500 2501 rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring", 2502 ICE_FDIR_QUEUE_ID, ring_size, 2503 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY); 2504 if (!rz) { 2505 ice_rx_queue_release(rxq); 2506 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX."); 2507 return -ENOMEM; 2508 } 2509 2510 rxq->mz = rz; 2511 rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC; 2512 rxq->queue_id = ICE_FDIR_QUEUE_ID; 2513 rxq->reg_idx = pf->fdir.fdir_vsi->base_queue; 2514 rxq->vsi = pf->fdir.fdir_vsi; 2515 2516 rxq->rx_ring_dma = rz->iova; 2517 memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC * 2518 sizeof(union ice_32byte_rx_desc)); 2519 rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr; 2520 2521 /* 2522 * Don't need to allocate software ring and reset for the fdir 2523 * rx queue, just set the queue has been configured. 2524 */ 2525 rxq->q_set = true; 2526 pf->fdir.rxq = rxq; 2527 2528 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs; 2529 2530 return ICE_SUCCESS; 2531 } 2532 2533 uint16_t 2534 ice_recv_pkts(void *rx_queue, 2535 struct rte_mbuf **rx_pkts, 2536 uint16_t nb_pkts) 2537 { 2538 struct ice_rx_queue *rxq = rx_queue; 2539 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring; 2540 volatile union ice_rx_flex_desc *rxdp; 2541 union ice_rx_flex_desc rxd; 2542 struct ice_rx_entry *sw_ring = rxq->sw_ring; 2543 struct ice_rx_entry *rxe; 2544 struct rte_mbuf *nmb; /* new allocated mbuf */ 2545 struct rte_mbuf *nmb_pay; /* new allocated payload mbuf */ 2546 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */ 2547 uint16_t rx_id = rxq->rx_tail; 2548 uint16_t nb_rx = 0; 2549 uint16_t nb_hold = 0; 2550 uint16_t rx_packet_len; 2551 uint16_t rx_header_len; 2552 uint16_t rx_stat_err0; 2553 uint64_t dma_addr; 2554 uint64_t pkt_flags; 2555 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; 2556 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 2557 bool is_tsinit = false; 2558 uint64_t ts_ns; 2559 struct ice_vsi *vsi = rxq->vsi; 2560 struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 2561 struct ice_adapter *ad = rxq->vsi->adapter; 2562 2563 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) { 2564 uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000); 2565 2566 if (unlikely(sw_cur_time - rxq->hw_time_update > 4)) 2567 is_tsinit = 1; 2568 } 2569 #endif 2570 2571 while (nb_rx < nb_pkts) { 2572 rxdp = &rx_ring[rx_id]; 2573 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0); 2574 2575 /* Check the DD bit first */ 2576 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))) 2577 break; 2578 2579 /* allocate header mbuf */ 2580 nmb = rte_mbuf_raw_alloc(rxq->mp); 2581 if (unlikely(!nmb)) { 2582 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++; 2583 break; 2584 } 2585 2586 rxd = *rxdp; /* copy descriptor in ring to temp variable*/ 2587 2588 nb_hold++; 2589 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */ 2590 rx_id++; 2591 if (unlikely(rx_id == rxq->nb_rx_desc)) 2592 rx_id = 0; 2593 rxm = rxe->mbuf; 2594 rxe->mbuf = nmb; 2595 dma_addr = 2596 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); 2597 2598 if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { 2599 /** 2600 * fill the read format of descriptor with physic address in 2601 * new allocated mbuf: nmb 2602 */ 2603 rxdp->read.hdr_addr = 0; 2604 rxdp->read.pkt_addr = dma_addr; 2605 } else { 2606 /* allocate payload mbuf */ 2607 nmb_pay = rte_mbuf_raw_alloc(rxq->rxseg[1].mp); 2608 if (unlikely(!nmb_pay)) { 2609 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++; 2610 rxe->mbuf = NULL; 2611 nb_hold--; 2612 if (unlikely(rx_id == 0)) 2613 rx_id = rxq->nb_rx_desc; 2614 2615 rx_id--; 2616 rte_pktmbuf_free(nmb); 2617 break; 2618 } 2619 2620 nmb->next = nmb_pay; 2621 nmb_pay->next = NULL; 2622 2623 /** 2624 * fill the read format of descriptor with physic address in 2625 * new allocated mbuf: nmb 2626 */ 2627 rxdp->read.hdr_addr = dma_addr; 2628 rxdp->read.pkt_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb_pay)); 2629 } 2630 2631 /* fill old mbuf with received descriptor: rxd */ 2632 rxm->data_off = RTE_PKTMBUF_HEADROOM; 2633 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM)); 2634 if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { 2635 rxm->nb_segs = 1; 2636 rxm->next = NULL; 2637 /* calculate rx_packet_len of the received pkt */ 2638 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) & 2639 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; 2640 rxm->data_len = rx_packet_len; 2641 rxm->pkt_len = rx_packet_len; 2642 } else { 2643 rxm->nb_segs = (uint16_t)(rxm->nb_segs + rxm->next->nb_segs); 2644 rxm->next->next = NULL; 2645 /* calculate rx_packet_len of the received pkt */ 2646 rx_header_len = rte_le_to_cpu_16(rxd.wb.hdr_len_sph_flex_flags1) & 2647 ICE_RX_FLEX_DESC_HEADER_LEN_M; 2648 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) & 2649 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; 2650 rxm->data_len = rx_header_len; 2651 rxm->pkt_len = rx_header_len + rx_packet_len; 2652 rxm->next->data_len = rx_packet_len; 2653 2654 #ifdef RTE_ETHDEV_DEBUG_RX 2655 rte_pktmbuf_dump(stdout, rxm, rte_pktmbuf_pkt_len(rxm)); 2656 #endif 2657 } 2658 2659 rxm->port = rxq->port_id; 2660 rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M & 2661 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; 2662 ice_rxd_to_vlan_tci(rxm, &rxd); 2663 rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd); 2664 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0); 2665 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 2666 if (ice_timestamp_dynflag > 0 && 2667 (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) { 2668 rxq->time_high = 2669 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); 2670 if (unlikely(is_tsinit)) { 2671 ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high); 2672 rxq->hw_time_low = (uint32_t)ts_ns; 2673 rxq->hw_time_high = (uint32_t)(ts_ns >> 32); 2674 is_tsinit = false; 2675 } else { 2676 if (rxq->time_high < rxq->hw_time_low) 2677 rxq->hw_time_high += 1; 2678 ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high; 2679 rxq->hw_time_low = rxq->time_high; 2680 } 2681 rxq->hw_time_update = rte_get_timer_cycles() / 2682 (rte_get_timer_hz() / 1000); 2683 *RTE_MBUF_DYNFIELD(rxm, 2684 (ice_timestamp_dynfield_offset), 2685 rte_mbuf_timestamp_t *) = ts_ns; 2686 pkt_flags |= ice_timestamp_dynflag; 2687 } 2688 2689 if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) == 2690 RTE_PTYPE_L2_ETHER_TIMESYNC)) { 2691 rxq->time_high = 2692 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); 2693 rxm->timesync = rxq->queue_id; 2694 pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP; 2695 } 2696 #endif 2697 rxm->ol_flags |= pkt_flags; 2698 /* copy old mbuf to rx_pkts */ 2699 rx_pkts[nb_rx++] = rxm; 2700 } 2701 2702 rxq->rx_tail = rx_id; 2703 /** 2704 * If the number of free RX descriptors is greater than the RX free 2705 * threshold of the queue, advance the receive tail register of queue. 2706 * Update that register with the value of the last processed RX 2707 * descriptor minus 1. 2708 */ 2709 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); 2710 if (nb_hold > rxq->rx_free_thresh) { 2711 rx_id = (uint16_t)(rx_id == 0 ? 2712 (rxq->nb_rx_desc - 1) : (rx_id - 1)); 2713 /* write TAIL register */ 2714 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id); 2715 nb_hold = 0; 2716 } 2717 rxq->nb_rx_hold = nb_hold; 2718 2719 /* return received packet in the burst */ 2720 return nb_rx; 2721 } 2722 2723 static inline void 2724 ice_parse_tunneling_params(uint64_t ol_flags, 2725 union ice_tx_offload tx_offload, 2726 uint32_t *cd_tunneling) 2727 { 2728 /* EIPT: External (outer) IP header type */ 2729 if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) 2730 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4; 2731 else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) 2732 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM; 2733 else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) 2734 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6; 2735 2736 /* EIPLEN: External (outer) IP header length, in DWords */ 2737 *cd_tunneling |= (tx_offload.outer_l3_len >> 2) << 2738 ICE_TXD_CTX_QW0_EIPLEN_S; 2739 2740 /* L4TUNT: L4 Tunneling Type */ 2741 switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { 2742 case RTE_MBUF_F_TX_TUNNEL_IPIP: 2743 /* for non UDP / GRE tunneling, set to 00b */ 2744 break; 2745 case RTE_MBUF_F_TX_TUNNEL_VXLAN: 2746 case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE: 2747 case RTE_MBUF_F_TX_TUNNEL_GTP: 2748 case RTE_MBUF_F_TX_TUNNEL_GENEVE: 2749 *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING; 2750 break; 2751 case RTE_MBUF_F_TX_TUNNEL_GRE: 2752 *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING; 2753 break; 2754 default: 2755 PMD_TX_LOG(ERR, "Tunnel type not supported"); 2756 return; 2757 } 2758 2759 /* L4TUNLEN: L4 Tunneling Length, in Words 2760 * 2761 * We depend on app to set rte_mbuf.l2_len correctly. 2762 * For IP in GRE it should be set to the length of the GRE 2763 * header; 2764 * For MAC in GRE or MAC in UDP it should be set to the length 2765 * of the GRE or UDP headers plus the inner MAC up to including 2766 * its last Ethertype. 2767 * If MPLS labels exists, it should include them as well. 2768 */ 2769 *cd_tunneling |= (tx_offload.l2_len >> 1) << 2770 ICE_TXD_CTX_QW0_NATLEN_S; 2771 2772 /** 2773 * Calculate the tunneling UDP checksum. 2774 * Shall be set only if L4TUNT = 01b and EIPT is not zero 2775 */ 2776 if ((*cd_tunneling & ICE_TXD_CTX_QW0_EIPT_M) && 2777 (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING) && 2778 (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) 2779 *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M; 2780 } 2781 2782 static inline void 2783 ice_txd_enable_checksum(uint64_t ol_flags, 2784 uint32_t *td_cmd, 2785 uint32_t *td_offset, 2786 union ice_tx_offload tx_offload) 2787 { 2788 /* Set MACLEN */ 2789 if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) 2790 *td_offset |= (tx_offload.l2_len >> 1) 2791 << ICE_TX_DESC_LEN_MACLEN_S; 2792 2793 /* Enable L3 checksum offloads */ 2794 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { 2795 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 2796 *td_offset |= (tx_offload.l3_len >> 2) << 2797 ICE_TX_DESC_LEN_IPLEN_S; 2798 } else if (ol_flags & RTE_MBUF_F_TX_IPV4) { 2799 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 2800 *td_offset |= (tx_offload.l3_len >> 2) << 2801 ICE_TX_DESC_LEN_IPLEN_S; 2802 } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { 2803 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 2804 *td_offset |= (tx_offload.l3_len >> 2) << 2805 ICE_TX_DESC_LEN_IPLEN_S; 2806 } 2807 2808 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { 2809 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 2810 *td_offset |= (tx_offload.l4_len >> 2) << 2811 ICE_TX_DESC_LEN_L4_LEN_S; 2812 return; 2813 } 2814 2815 if (ol_flags & RTE_MBUF_F_TX_UDP_SEG) { 2816 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 2817 *td_offset |= (tx_offload.l4_len >> 2) << 2818 ICE_TX_DESC_LEN_L4_LEN_S; 2819 return; 2820 } 2821 2822 /* Enable L4 checksum offloads */ 2823 switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) { 2824 case RTE_MBUF_F_TX_TCP_CKSUM: 2825 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 2826 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << 2827 ICE_TX_DESC_LEN_L4_LEN_S; 2828 break; 2829 case RTE_MBUF_F_TX_SCTP_CKSUM: 2830 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 2831 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << 2832 ICE_TX_DESC_LEN_L4_LEN_S; 2833 break; 2834 case RTE_MBUF_F_TX_UDP_CKSUM: 2835 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 2836 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) << 2837 ICE_TX_DESC_LEN_L4_LEN_S; 2838 break; 2839 default: 2840 break; 2841 } 2842 } 2843 2844 static inline int 2845 ice_xmit_cleanup(struct ci_tx_queue *txq) 2846 { 2847 struct ci_tx_entry *sw_ring = txq->sw_ring; 2848 volatile struct ice_tx_desc *txd = txq->ice_tx_ring; 2849 uint16_t last_desc_cleaned = txq->last_desc_cleaned; 2850 uint16_t nb_tx_desc = txq->nb_tx_desc; 2851 uint16_t desc_to_clean_to; 2852 uint16_t nb_tx_to_clean; 2853 2854 /* Determine the last descriptor needing to be cleaned */ 2855 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh); 2856 if (desc_to_clean_to >= nb_tx_desc) 2857 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc); 2858 2859 /* Check to make sure the last descriptor to clean is done */ 2860 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; 2861 if (!(txd[desc_to_clean_to].cmd_type_offset_bsz & 2862 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) { 2863 PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done " 2864 "(port=%d queue=%d) value=0x%"PRIx64, 2865 desc_to_clean_to, 2866 txq->port_id, txq->queue_id, 2867 txd[desc_to_clean_to].cmd_type_offset_bsz); 2868 /* Failed to clean any descriptors */ 2869 return -1; 2870 } 2871 2872 /* Figure out how many descriptors will be cleaned */ 2873 if (last_desc_cleaned > desc_to_clean_to) 2874 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) + 2875 desc_to_clean_to); 2876 else 2877 nb_tx_to_clean = (uint16_t)(desc_to_clean_to - 2878 last_desc_cleaned); 2879 2880 /* The last descriptor to clean is done, so that means all the 2881 * descriptors from the last descriptor that was cleaned 2882 * up to the last descriptor with the RS bit set 2883 * are done. Only reset the threshold descriptor. 2884 */ 2885 txd[desc_to_clean_to].cmd_type_offset_bsz = 0; 2886 2887 /* Update the txq to reflect the last descriptor that was cleaned */ 2888 txq->last_desc_cleaned = desc_to_clean_to; 2889 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean); 2890 2891 return 0; 2892 } 2893 2894 /* Construct the tx flags */ 2895 static inline uint64_t 2896 ice_build_ctob(uint32_t td_cmd, 2897 uint32_t td_offset, 2898 uint16_t size, 2899 uint32_t td_tag) 2900 { 2901 return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA | 2902 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) | 2903 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) | 2904 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) | 2905 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S)); 2906 } 2907 2908 /* Check if the context descriptor is needed for TX offloading */ 2909 static inline uint16_t 2910 ice_calc_context_desc(uint64_t flags) 2911 { 2912 static uint64_t mask = RTE_MBUF_F_TX_TCP_SEG | 2913 RTE_MBUF_F_TX_UDP_SEG | 2914 RTE_MBUF_F_TX_QINQ | 2915 RTE_MBUF_F_TX_OUTER_IP_CKSUM | 2916 RTE_MBUF_F_TX_TUNNEL_MASK | 2917 RTE_MBUF_F_TX_IEEE1588_TMST; 2918 2919 return (flags & mask) ? 1 : 0; 2920 } 2921 2922 /* set ice TSO context descriptor */ 2923 static inline uint64_t 2924 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload) 2925 { 2926 uint64_t ctx_desc = 0; 2927 uint32_t cd_cmd, hdr_len, cd_tso_len; 2928 2929 if (!tx_offload.l4_len) { 2930 PMD_TX_LOG(DEBUG, "L4 length set to 0"); 2931 return ctx_desc; 2932 } 2933 2934 hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len; 2935 hdr_len += (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ? 2936 tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0; 2937 2938 cd_cmd = ICE_TX_CTX_DESC_TSO; 2939 cd_tso_len = mbuf->pkt_len - hdr_len; 2940 ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) | 2941 ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 2942 ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S); 2943 2944 return ctx_desc; 2945 } 2946 2947 /* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */ 2948 #define ICE_MAX_DATA_PER_TXD \ 2949 (ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S) 2950 /* Calculate the number of TX descriptors needed for each pkt */ 2951 static inline uint16_t 2952 ice_calc_pkt_desc(struct rte_mbuf *tx_pkt) 2953 { 2954 struct rte_mbuf *txd = tx_pkt; 2955 uint16_t count = 0; 2956 2957 while (txd != NULL) { 2958 count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD); 2959 txd = txd->next; 2960 } 2961 2962 return count; 2963 } 2964 2965 uint16_t 2966 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 2967 { 2968 struct ci_tx_queue *txq; 2969 volatile struct ice_tx_desc *ice_tx_ring; 2970 volatile struct ice_tx_desc *txd; 2971 struct ci_tx_entry *sw_ring; 2972 struct ci_tx_entry *txe, *txn; 2973 struct rte_mbuf *tx_pkt; 2974 struct rte_mbuf *m_seg; 2975 uint32_t cd_tunneling_params; 2976 uint16_t tx_id; 2977 uint16_t nb_tx; 2978 uint16_t nb_used; 2979 uint16_t nb_ctx; 2980 uint32_t td_cmd = 0; 2981 uint32_t td_offset = 0; 2982 uint32_t td_tag = 0; 2983 uint16_t tx_last; 2984 uint16_t slen; 2985 uint64_t buf_dma_addr; 2986 uint64_t ol_flags; 2987 union ice_tx_offload tx_offload = {0}; 2988 2989 txq = tx_queue; 2990 sw_ring = txq->sw_ring; 2991 ice_tx_ring = txq->ice_tx_ring; 2992 tx_id = txq->tx_tail; 2993 txe = &sw_ring[tx_id]; 2994 2995 /* Check if the descriptor ring needs to be cleaned. */ 2996 if (txq->nb_tx_free < txq->tx_free_thresh) 2997 (void)ice_xmit_cleanup(txq); 2998 2999 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { 3000 tx_pkt = *tx_pkts++; 3001 3002 td_cmd = 0; 3003 td_tag = 0; 3004 td_offset = 0; 3005 ol_flags = tx_pkt->ol_flags; 3006 tx_offload.l2_len = tx_pkt->l2_len; 3007 tx_offload.l3_len = tx_pkt->l3_len; 3008 tx_offload.outer_l2_len = tx_pkt->outer_l2_len; 3009 tx_offload.outer_l3_len = tx_pkt->outer_l3_len; 3010 tx_offload.l4_len = tx_pkt->l4_len; 3011 tx_offload.tso_segsz = tx_pkt->tso_segsz; 3012 /* Calculate the number of context descriptors needed. */ 3013 nb_ctx = ice_calc_context_desc(ol_flags); 3014 3015 /* The number of descriptors that must be allocated for 3016 * a packet equals to the number of the segments of that 3017 * packet plus the number of context descriptor if needed. 3018 * Recalculate the needed tx descs when TSO enabled in case 3019 * the mbuf data size exceeds max data size that hw allows 3020 * per tx desc. 3021 */ 3022 if (ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) 3023 nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) + 3024 nb_ctx); 3025 else 3026 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx); 3027 tx_last = (uint16_t)(tx_id + nb_used - 1); 3028 3029 /* Circular ring */ 3030 if (tx_last >= txq->nb_tx_desc) 3031 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc); 3032 3033 if (nb_used > txq->nb_tx_free) { 3034 if (ice_xmit_cleanup(txq) != 0) { 3035 if (nb_tx == 0) 3036 return 0; 3037 goto end_of_tx; 3038 } 3039 if (unlikely(nb_used > txq->tx_rs_thresh)) { 3040 while (nb_used > txq->nb_tx_free) { 3041 if (ice_xmit_cleanup(txq) != 0) { 3042 if (nb_tx == 0) 3043 return 0; 3044 goto end_of_tx; 3045 } 3046 } 3047 } 3048 } 3049 3050 /* Descriptor based VLAN insertion */ 3051 if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) { 3052 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1; 3053 td_tag = tx_pkt->vlan_tci; 3054 } 3055 3056 /* Fill in tunneling parameters if necessary */ 3057 cd_tunneling_params = 0; 3058 if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { 3059 td_offset |= (tx_offload.outer_l2_len >> 1) 3060 << ICE_TX_DESC_LEN_MACLEN_S; 3061 ice_parse_tunneling_params(ol_flags, tx_offload, 3062 &cd_tunneling_params); 3063 } 3064 3065 /* Enable checksum offloading */ 3066 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) 3067 ice_txd_enable_checksum(ol_flags, &td_cmd, 3068 &td_offset, tx_offload); 3069 3070 if (nb_ctx) { 3071 /* Setup TX context descriptor if required */ 3072 volatile struct ice_tx_ctx_desc *ctx_txd = 3073 (volatile struct ice_tx_ctx_desc *) 3074 &ice_tx_ring[tx_id]; 3075 uint16_t cd_l2tag2 = 0; 3076 uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX; 3077 3078 txn = &sw_ring[txe->next_id]; 3079 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); 3080 if (txe->mbuf) { 3081 rte_pktmbuf_free_seg(txe->mbuf); 3082 txe->mbuf = NULL; 3083 } 3084 3085 if (ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) 3086 cd_type_cmd_tso_mss |= 3087 ice_set_tso_ctx(tx_pkt, tx_offload); 3088 else if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) 3089 cd_type_cmd_tso_mss |= 3090 ((uint64_t)ICE_TX_CTX_DESC_TSYN << 3091 ICE_TXD_CTX_QW1_CMD_S) | 3092 (((uint64_t)txq->ice_vsi->adapter->ptp_tx_index << 3093 ICE_TXD_CTX_QW1_TSYN_S) & ICE_TXD_CTX_QW1_TSYN_M); 3094 3095 ctx_txd->tunneling_params = 3096 rte_cpu_to_le_32(cd_tunneling_params); 3097 3098 /* TX context descriptor based double VLAN insert */ 3099 if (ol_flags & RTE_MBUF_F_TX_QINQ) { 3100 cd_l2tag2 = tx_pkt->vlan_tci_outer; 3101 cd_type_cmd_tso_mss |= 3102 ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 << 3103 ICE_TXD_CTX_QW1_CMD_S); 3104 } 3105 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2); 3106 ctx_txd->qw1 = 3107 rte_cpu_to_le_64(cd_type_cmd_tso_mss); 3108 3109 txe->last_id = tx_last; 3110 tx_id = txe->next_id; 3111 txe = txn; 3112 } 3113 m_seg = tx_pkt; 3114 3115 do { 3116 txd = &ice_tx_ring[tx_id]; 3117 txn = &sw_ring[txe->next_id]; 3118 3119 if (txe->mbuf) 3120 rte_pktmbuf_free_seg(txe->mbuf); 3121 txe->mbuf = m_seg; 3122 3123 /* Setup TX Descriptor */ 3124 slen = m_seg->data_len; 3125 buf_dma_addr = rte_mbuf_data_iova(m_seg); 3126 3127 while ((ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) && 3128 unlikely(slen > ICE_MAX_DATA_PER_TXD)) { 3129 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr); 3130 txd->cmd_type_offset_bsz = 3131 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA | 3132 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) | 3133 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) | 3134 ((uint64_t)ICE_MAX_DATA_PER_TXD << 3135 ICE_TXD_QW1_TX_BUF_SZ_S) | 3136 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S)); 3137 3138 buf_dma_addr += ICE_MAX_DATA_PER_TXD; 3139 slen -= ICE_MAX_DATA_PER_TXD; 3140 3141 txe->last_id = tx_last; 3142 tx_id = txe->next_id; 3143 txe = txn; 3144 txd = &ice_tx_ring[tx_id]; 3145 txn = &sw_ring[txe->next_id]; 3146 } 3147 3148 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr); 3149 txd->cmd_type_offset_bsz = 3150 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA | 3151 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) | 3152 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) | 3153 ((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) | 3154 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S)); 3155 3156 txe->last_id = tx_last; 3157 tx_id = txe->next_id; 3158 txe = txn; 3159 m_seg = m_seg->next; 3160 } while (m_seg); 3161 3162 /* fill the last descriptor with End of Packet (EOP) bit */ 3163 td_cmd |= ICE_TX_DESC_CMD_EOP; 3164 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used); 3165 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used); 3166 3167 /* set RS bit on the last descriptor of one packet */ 3168 if (txq->nb_tx_used >= txq->tx_rs_thresh) { 3169 PMD_TX_LOG(DEBUG, 3170 "Setting RS bit on TXD id=" 3171 "%4u (port=%d queue=%d)", 3172 tx_last, txq->port_id, txq->queue_id); 3173 3174 td_cmd |= ICE_TX_DESC_CMD_RS; 3175 3176 /* Update txq RS bit counters */ 3177 txq->nb_tx_used = 0; 3178 } 3179 txd->cmd_type_offset_bsz |= 3180 rte_cpu_to_le_64(((uint64_t)td_cmd) << 3181 ICE_TXD_QW1_CMD_S); 3182 } 3183 end_of_tx: 3184 /* update Tail register */ 3185 ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id); 3186 txq->tx_tail = tx_id; 3187 3188 return nb_tx; 3189 } 3190 3191 static __rte_always_inline int 3192 ice_tx_free_bufs(struct ci_tx_queue *txq) 3193 { 3194 struct ci_tx_entry *txep; 3195 uint16_t i; 3196 3197 if ((txq->ice_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & 3198 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) != 3199 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE)) 3200 return 0; 3201 3202 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]; 3203 3204 for (i = 0; i < txq->tx_rs_thresh; i++) 3205 rte_prefetch0((txep + i)->mbuf); 3206 3207 if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) { 3208 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) { 3209 rte_mempool_put(txep->mbuf->pool, txep->mbuf); 3210 txep->mbuf = NULL; 3211 } 3212 } else { 3213 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) { 3214 rte_pktmbuf_free_seg(txep->mbuf); 3215 txep->mbuf = NULL; 3216 } 3217 } 3218 3219 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); 3220 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); 3221 if (txq->tx_next_dd >= txq->nb_tx_desc) 3222 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); 3223 3224 return txq->tx_rs_thresh; 3225 } 3226 3227 static int 3228 ice_tx_done_cleanup_full(struct ci_tx_queue *txq, 3229 uint32_t free_cnt) 3230 { 3231 struct ci_tx_entry *swr_ring = txq->sw_ring; 3232 uint16_t i, tx_last, tx_id; 3233 uint16_t nb_tx_free_last; 3234 uint16_t nb_tx_to_clean; 3235 uint32_t pkt_cnt; 3236 3237 /* Start free mbuf from the next of tx_tail */ 3238 tx_last = txq->tx_tail; 3239 tx_id = swr_ring[tx_last].next_id; 3240 3241 if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq)) 3242 return 0; 3243 3244 nb_tx_to_clean = txq->nb_tx_free; 3245 nb_tx_free_last = txq->nb_tx_free; 3246 if (!free_cnt) 3247 free_cnt = txq->nb_tx_desc; 3248 3249 /* Loop through swr_ring to count the amount of 3250 * freeable mubfs and packets. 3251 */ 3252 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) { 3253 for (i = 0; i < nb_tx_to_clean && 3254 pkt_cnt < free_cnt && 3255 tx_id != tx_last; i++) { 3256 if (swr_ring[tx_id].mbuf != NULL) { 3257 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf); 3258 swr_ring[tx_id].mbuf = NULL; 3259 3260 /* 3261 * last segment in the packet, 3262 * increment packet count 3263 */ 3264 pkt_cnt += (swr_ring[tx_id].last_id == tx_id); 3265 } 3266 3267 tx_id = swr_ring[tx_id].next_id; 3268 } 3269 3270 if (txq->tx_rs_thresh > txq->nb_tx_desc - 3271 txq->nb_tx_free || tx_id == tx_last) 3272 break; 3273 3274 if (pkt_cnt < free_cnt) { 3275 if (ice_xmit_cleanup(txq)) 3276 break; 3277 3278 nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last; 3279 nb_tx_free_last = txq->nb_tx_free; 3280 } 3281 } 3282 3283 return (int)pkt_cnt; 3284 } 3285 3286 #ifdef RTE_ARCH_X86 3287 static int 3288 ice_tx_done_cleanup_vec(struct ci_tx_queue *txq __rte_unused, 3289 uint32_t free_cnt __rte_unused) 3290 { 3291 return -ENOTSUP; 3292 } 3293 #endif 3294 3295 static int 3296 ice_tx_done_cleanup_simple(struct ci_tx_queue *txq, 3297 uint32_t free_cnt) 3298 { 3299 int i, n, cnt; 3300 3301 if (free_cnt == 0 || free_cnt > txq->nb_tx_desc) 3302 free_cnt = txq->nb_tx_desc; 3303 3304 cnt = free_cnt - free_cnt % txq->tx_rs_thresh; 3305 3306 for (i = 0; i < cnt; i += n) { 3307 if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh) 3308 break; 3309 3310 n = ice_tx_free_bufs(txq); 3311 3312 if (n == 0) 3313 break; 3314 } 3315 3316 return i; 3317 } 3318 3319 int 3320 ice_tx_done_cleanup(void *txq, uint32_t free_cnt) 3321 { 3322 struct ci_tx_queue *q = (struct ci_tx_queue *)txq; 3323 struct rte_eth_dev *dev = &rte_eth_devices[q->port_id]; 3324 struct ice_adapter *ad = 3325 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 3326 3327 #ifdef RTE_ARCH_X86 3328 if (ad->tx_vec_allowed) 3329 return ice_tx_done_cleanup_vec(q, free_cnt); 3330 #endif 3331 if (ad->tx_simple_allowed) 3332 return ice_tx_done_cleanup_simple(q, free_cnt); 3333 else 3334 return ice_tx_done_cleanup_full(q, free_cnt); 3335 } 3336 3337 /* Populate 4 descriptors with data from 4 mbufs */ 3338 static inline void 3339 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts) 3340 { 3341 uint64_t dma_addr; 3342 uint32_t i; 3343 3344 for (i = 0; i < 4; i++, txdp++, pkts++) { 3345 dma_addr = rte_mbuf_data_iova(*pkts); 3346 txdp->buf_addr = rte_cpu_to_le_64(dma_addr); 3347 txdp->cmd_type_offset_bsz = 3348 ice_build_ctob((uint32_t)ICE_TD_CMD, 0, 3349 (*pkts)->data_len, 0); 3350 } 3351 } 3352 3353 /* Populate 1 descriptor with data from 1 mbuf */ 3354 static inline void 3355 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts) 3356 { 3357 uint64_t dma_addr; 3358 3359 dma_addr = rte_mbuf_data_iova(*pkts); 3360 txdp->buf_addr = rte_cpu_to_le_64(dma_addr); 3361 txdp->cmd_type_offset_bsz = 3362 ice_build_ctob((uint32_t)ICE_TD_CMD, 0, 3363 (*pkts)->data_len, 0); 3364 } 3365 3366 static inline void 3367 ice_tx_fill_hw_ring(struct ci_tx_queue *txq, struct rte_mbuf **pkts, 3368 uint16_t nb_pkts) 3369 { 3370 volatile struct ice_tx_desc *txdp = &txq->ice_tx_ring[txq->tx_tail]; 3371 struct ci_tx_entry *txep = &txq->sw_ring[txq->tx_tail]; 3372 const int N_PER_LOOP = 4; 3373 const int N_PER_LOOP_MASK = N_PER_LOOP - 1; 3374 int mainpart, leftover; 3375 int i, j; 3376 3377 /** 3378 * Process most of the packets in chunks of N pkts. Any 3379 * leftover packets will get processed one at a time. 3380 */ 3381 mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK); 3382 leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK); 3383 for (i = 0; i < mainpart; i += N_PER_LOOP) { 3384 /* Copy N mbuf pointers to the S/W ring */ 3385 for (j = 0; j < N_PER_LOOP; ++j) 3386 (txep + i + j)->mbuf = *(pkts + i + j); 3387 tx4(txdp + i, pkts + i); 3388 } 3389 3390 if (unlikely(leftover > 0)) { 3391 for (i = 0; i < leftover; ++i) { 3392 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i); 3393 tx1(txdp + mainpart + i, pkts + mainpart + i); 3394 } 3395 } 3396 } 3397 3398 static inline uint16_t 3399 tx_xmit_pkts(struct ci_tx_queue *txq, 3400 struct rte_mbuf **tx_pkts, 3401 uint16_t nb_pkts) 3402 { 3403 volatile struct ice_tx_desc *txr = txq->ice_tx_ring; 3404 uint16_t n = 0; 3405 3406 /** 3407 * Begin scanning the H/W ring for done descriptors when the number 3408 * of available descriptors drops below tx_free_thresh. For each done 3409 * descriptor, free the associated buffer. 3410 */ 3411 if (txq->nb_tx_free < txq->tx_free_thresh) 3412 ice_tx_free_bufs(txq); 3413 3414 /* Use available descriptor only */ 3415 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); 3416 if (unlikely(!nb_pkts)) 3417 return 0; 3418 3419 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); 3420 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) { 3421 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail); 3422 ice_tx_fill_hw_ring(txq, tx_pkts, n); 3423 txr[txq->tx_next_rs].cmd_type_offset_bsz |= 3424 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) << 3425 ICE_TXD_QW1_CMD_S); 3426 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); 3427 txq->tx_tail = 0; 3428 } 3429 3430 /* Fill hardware descriptor ring with mbuf data */ 3431 ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n)); 3432 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n)); 3433 3434 /* Determine if RS bit needs to be set */ 3435 if (txq->tx_tail > txq->tx_next_rs) { 3436 txr[txq->tx_next_rs].cmd_type_offset_bsz |= 3437 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) << 3438 ICE_TXD_QW1_CMD_S); 3439 txq->tx_next_rs = 3440 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); 3441 if (txq->tx_next_rs >= txq->nb_tx_desc) 3442 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); 3443 } 3444 3445 if (txq->tx_tail >= txq->nb_tx_desc) 3446 txq->tx_tail = 0; 3447 3448 /* Update the tx tail register */ 3449 ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail); 3450 3451 return nb_pkts; 3452 } 3453 3454 static uint16_t 3455 ice_xmit_pkts_simple(void *tx_queue, 3456 struct rte_mbuf **tx_pkts, 3457 uint16_t nb_pkts) 3458 { 3459 uint16_t nb_tx = 0; 3460 3461 if (likely(nb_pkts <= ICE_TX_MAX_BURST)) 3462 return tx_xmit_pkts((struct ci_tx_queue *)tx_queue, 3463 tx_pkts, nb_pkts); 3464 3465 while (nb_pkts) { 3466 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts, 3467 ICE_TX_MAX_BURST); 3468 3469 ret = tx_xmit_pkts((struct ci_tx_queue *)tx_queue, 3470 &tx_pkts[nb_tx], num); 3471 nb_tx = (uint16_t)(nb_tx + ret); 3472 nb_pkts = (uint16_t)(nb_pkts - ret); 3473 if (ret < num) 3474 break; 3475 } 3476 3477 return nb_tx; 3478 } 3479 3480 void __rte_cold 3481 ice_set_rx_function(struct rte_eth_dev *dev) 3482 { 3483 PMD_INIT_FUNC_TRACE(); 3484 struct ice_adapter *ad = 3485 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 3486 #ifdef RTE_ARCH_X86 3487 struct ice_rx_queue *rxq; 3488 int i; 3489 int rx_check_ret = -1; 3490 3491 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 3492 ad->rx_use_avx512 = false; 3493 ad->rx_use_avx2 = false; 3494 rx_check_ret = ice_rx_vec_dev_check(dev); 3495 if (ad->ptp_ena) 3496 rx_check_ret = -1; 3497 ad->rx_vec_offload_support = 3498 (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH); 3499 if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed && 3500 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 3501 ad->rx_vec_allowed = true; 3502 for (i = 0; i < dev->data->nb_rx_queues; i++) { 3503 rxq = dev->data->rx_queues[i]; 3504 if (rxq && ice_rxq_vec_setup(rxq)) { 3505 ad->rx_vec_allowed = false; 3506 break; 3507 } 3508 } 3509 3510 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 && 3511 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 && 3512 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1) 3513 #ifdef CC_AVX512_SUPPORT 3514 ad->rx_use_avx512 = true; 3515 #else 3516 PMD_DRV_LOG(NOTICE, 3517 "AVX512 is not supported in build env"); 3518 #endif 3519 if (!ad->rx_use_avx512 && 3520 (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || 3521 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) && 3522 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256) 3523 ad->rx_use_avx2 = true; 3524 3525 } else { 3526 ad->rx_vec_allowed = false; 3527 } 3528 } 3529 3530 if (ad->rx_vec_allowed) { 3531 if (dev->data->scattered_rx) { 3532 if (ad->rx_use_avx512) { 3533 #ifdef CC_AVX512_SUPPORT 3534 if (ad->rx_vec_offload_support) { 3535 PMD_DRV_LOG(NOTICE, 3536 "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).", 3537 dev->data->port_id); 3538 dev->rx_pkt_burst = 3539 ice_recv_scattered_pkts_vec_avx512_offload; 3540 } else { 3541 PMD_DRV_LOG(NOTICE, 3542 "Using AVX512 Vector Scattered Rx (port %d).", 3543 dev->data->port_id); 3544 dev->rx_pkt_burst = 3545 ice_recv_scattered_pkts_vec_avx512; 3546 } 3547 #endif 3548 } else if (ad->rx_use_avx2) { 3549 if (ad->rx_vec_offload_support) { 3550 PMD_DRV_LOG(NOTICE, 3551 "Using AVX2 OFFLOAD Vector Scattered Rx (port %d).", 3552 dev->data->port_id); 3553 dev->rx_pkt_burst = 3554 ice_recv_scattered_pkts_vec_avx2_offload; 3555 } else { 3556 PMD_DRV_LOG(NOTICE, 3557 "Using AVX2 Vector Scattered Rx (port %d).", 3558 dev->data->port_id); 3559 dev->rx_pkt_burst = 3560 ice_recv_scattered_pkts_vec_avx2; 3561 } 3562 } else { 3563 PMD_DRV_LOG(DEBUG, 3564 "Using Vector Scattered Rx (port %d).", 3565 dev->data->port_id); 3566 dev->rx_pkt_burst = ice_recv_scattered_pkts_vec; 3567 } 3568 } else { 3569 if (ad->rx_use_avx512) { 3570 #ifdef CC_AVX512_SUPPORT 3571 if (ad->rx_vec_offload_support) { 3572 PMD_DRV_LOG(NOTICE, 3573 "Using AVX512 OFFLOAD Vector Rx (port %d).", 3574 dev->data->port_id); 3575 dev->rx_pkt_burst = 3576 ice_recv_pkts_vec_avx512_offload; 3577 } else { 3578 PMD_DRV_LOG(NOTICE, 3579 "Using AVX512 Vector Rx (port %d).", 3580 dev->data->port_id); 3581 dev->rx_pkt_burst = 3582 ice_recv_pkts_vec_avx512; 3583 } 3584 #endif 3585 } else if (ad->rx_use_avx2) { 3586 if (ad->rx_vec_offload_support) { 3587 PMD_DRV_LOG(NOTICE, 3588 "Using AVX2 OFFLOAD Vector Rx (port %d).", 3589 dev->data->port_id); 3590 dev->rx_pkt_burst = 3591 ice_recv_pkts_vec_avx2_offload; 3592 } else { 3593 PMD_DRV_LOG(NOTICE, 3594 "Using AVX2 Vector Rx (port %d).", 3595 dev->data->port_id); 3596 dev->rx_pkt_burst = 3597 ice_recv_pkts_vec_avx2; 3598 } 3599 } else { 3600 PMD_DRV_LOG(DEBUG, 3601 "Using Vector Rx (port %d).", 3602 dev->data->port_id); 3603 dev->rx_pkt_burst = ice_recv_pkts_vec; 3604 } 3605 } 3606 return; 3607 } 3608 3609 #endif 3610 3611 if (dev->data->scattered_rx) { 3612 /* Set the non-LRO scattered function */ 3613 PMD_INIT_LOG(DEBUG, 3614 "Using a Scattered function on port %d.", 3615 dev->data->port_id); 3616 dev->rx_pkt_burst = ice_recv_scattered_pkts; 3617 } else if (ad->rx_bulk_alloc_allowed) { 3618 PMD_INIT_LOG(DEBUG, 3619 "Rx Burst Bulk Alloc Preconditions are " 3620 "satisfied. Rx Burst Bulk Alloc function " 3621 "will be used on port %d.", 3622 dev->data->port_id); 3623 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc; 3624 } else { 3625 PMD_INIT_LOG(DEBUG, 3626 "Rx Burst Bulk Alloc Preconditions are not " 3627 "satisfied, Normal Rx will be used on port %d.", 3628 dev->data->port_id); 3629 dev->rx_pkt_burst = ice_recv_pkts; 3630 } 3631 } 3632 3633 static const struct { 3634 eth_rx_burst_t pkt_burst; 3635 const char *info; 3636 } ice_rx_burst_infos[] = { 3637 { ice_recv_scattered_pkts, "Scalar Scattered" }, 3638 { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" }, 3639 { ice_recv_pkts, "Scalar" }, 3640 #ifdef RTE_ARCH_X86 3641 #ifdef CC_AVX512_SUPPORT 3642 { ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" }, 3643 { ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" }, 3644 { ice_recv_pkts_vec_avx512, "Vector AVX512" }, 3645 { ice_recv_pkts_vec_avx512_offload, "Offload Vector AVX512" }, 3646 #endif 3647 { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" }, 3648 { ice_recv_scattered_pkts_vec_avx2_offload, "Offload Vector AVX2 Scattered" }, 3649 { ice_recv_pkts_vec_avx2, "Vector AVX2" }, 3650 { ice_recv_pkts_vec_avx2_offload, "Offload Vector AVX2" }, 3651 { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" }, 3652 { ice_recv_pkts_vec, "Vector SSE" }, 3653 #endif 3654 }; 3655 3656 int 3657 ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 3658 struct rte_eth_burst_mode *mode) 3659 { 3660 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; 3661 int ret = -EINVAL; 3662 unsigned int i; 3663 3664 for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) { 3665 if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) { 3666 snprintf(mode->info, sizeof(mode->info), "%s", 3667 ice_rx_burst_infos[i].info); 3668 ret = 0; 3669 break; 3670 } 3671 } 3672 3673 return ret; 3674 } 3675 3676 void __rte_cold 3677 ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ci_tx_queue *txq) 3678 { 3679 struct ice_adapter *ad = 3680 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 3681 3682 /* Use a simple Tx queue if possible (only fast free is allowed) */ 3683 ad->tx_simple_allowed = 3684 (txq->offloads == 3685 (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) && 3686 txq->tx_rs_thresh >= ICE_TX_MAX_BURST); 3687 3688 if (ad->tx_simple_allowed) 3689 PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.", 3690 txq->queue_id); 3691 else 3692 PMD_INIT_LOG(DEBUG, 3693 "Simple Tx can NOT be enabled on Tx queue %u.", 3694 txq->queue_id); 3695 } 3696 3697 /********************************************************************* 3698 * 3699 * TX prep functions 3700 * 3701 **********************************************************************/ 3702 /* The default values of TSO MSS */ 3703 #define ICE_MIN_TSO_MSS 64 3704 #define ICE_MAX_TSO_MSS 9728 3705 #define ICE_MAX_TSO_FRAME_SIZE 262144 3706 3707 /*Check for empty mbuf*/ 3708 static inline uint16_t 3709 ice_check_empty_mbuf(struct rte_mbuf *tx_pkt) 3710 { 3711 struct rte_mbuf *txd = tx_pkt; 3712 3713 while (txd != NULL) { 3714 if (txd->data_len == 0) 3715 return -1; 3716 txd = txd->next; 3717 } 3718 3719 return 0; 3720 } 3721 3722 /* Tx mbuf check */ 3723 static uint16_t 3724 ice_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 3725 { 3726 struct ci_tx_queue *txq = tx_queue; 3727 uint16_t idx; 3728 struct rte_mbuf *mb; 3729 bool pkt_error = false; 3730 uint16_t good_pkts = nb_pkts; 3731 const char *reason = NULL; 3732 struct ice_adapter *adapter = txq->ice_vsi->adapter; 3733 uint64_t ol_flags; 3734 3735 for (idx = 0; idx < nb_pkts; idx++) { 3736 mb = tx_pkts[idx]; 3737 ol_flags = mb->ol_flags; 3738 3739 if ((adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_MBUF) && 3740 (rte_mbuf_check(mb, 1, &reason) != 0)) { 3741 PMD_TX_LOG(ERR, "INVALID mbuf: %s", reason); 3742 pkt_error = true; 3743 break; 3744 } 3745 3746 if ((adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_SIZE) && 3747 (mb->data_len > mb->pkt_len || 3748 mb->data_len < ICE_TX_MIN_PKT_LEN || 3749 mb->data_len > ICE_FRAME_SIZE_MAX)) { 3750 PMD_TX_LOG(ERR, "INVALID mbuf: data_len (%u) is out of range, reasonable range (%d - %d)", 3751 mb->data_len, ICE_TX_MIN_PKT_LEN, ICE_FRAME_SIZE_MAX); 3752 pkt_error = true; 3753 break; 3754 } 3755 3756 if (adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_SEGMENT) { 3757 if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) { 3758 /** 3759 * No TSO case: nb->segs, pkt_len to not exceed 3760 * the limites. 3761 */ 3762 if (mb->nb_segs > ICE_TX_MTU_SEG_MAX) { 3763 PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds HW limit, maximum allowed value is %d", 3764 mb->nb_segs, ICE_TX_MTU_SEG_MAX); 3765 pkt_error = true; 3766 break; 3767 } 3768 if (mb->pkt_len > ICE_FRAME_SIZE_MAX) { 3769 PMD_TX_LOG(ERR, "INVALID mbuf: pkt_len (%d) exceeds HW limit, maximum allowed value is %d", 3770 mb->nb_segs, ICE_FRAME_SIZE_MAX); 3771 pkt_error = true; 3772 break; 3773 } 3774 } else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { 3775 /** TSO case: tso_segsz, nb_segs, pkt_len not exceed 3776 * the limits. 3777 */ 3778 if (mb->tso_segsz < ICE_MIN_TSO_MSS || 3779 mb->tso_segsz > ICE_MAX_TSO_MSS) { 3780 /** 3781 * MSS outside the range are considered malicious 3782 */ 3783 PMD_TX_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out of range, reasonable range (%d - %u)", 3784 mb->tso_segsz, ICE_MIN_TSO_MSS, ICE_MAX_TSO_MSS); 3785 pkt_error = true; 3786 break; 3787 } 3788 if (mb->nb_segs > ((struct ci_tx_queue *)tx_queue)->nb_tx_desc) { 3789 PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs out of ring length"); 3790 pkt_error = true; 3791 break; 3792 } 3793 } 3794 } 3795 3796 if (adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_OFFLOAD) { 3797 if (ol_flags & ICE_TX_OFFLOAD_NOTSUP_MASK) { 3798 PMD_TX_LOG(ERR, "INVALID mbuf: TX offload is not supported"); 3799 pkt_error = true; 3800 break; 3801 } 3802 3803 if (!rte_validate_tx_offload(mb)) { 3804 PMD_TX_LOG(ERR, "INVALID mbuf: TX offload setup error"); 3805 pkt_error = true; 3806 break; 3807 } 3808 } 3809 } 3810 3811 if (pkt_error) { 3812 txq->mbuf_errors++; 3813 good_pkts = idx; 3814 if (good_pkts == 0) 3815 return 0; 3816 } 3817 3818 return adapter->tx_pkt_burst(tx_queue, tx_pkts, good_pkts); 3819 } 3820 3821 uint16_t 3822 ice_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 3823 uint16_t nb_pkts) 3824 { 3825 int i, ret; 3826 uint64_t ol_flags; 3827 struct rte_mbuf *m; 3828 3829 for (i = 0; i < nb_pkts; i++) { 3830 m = tx_pkts[i]; 3831 ol_flags = m->ol_flags; 3832 3833 if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG) && 3834 /** 3835 * No TSO case: nb->segs, pkt_len to not exceed 3836 * the limites. 3837 */ 3838 (m->nb_segs > ICE_TX_MTU_SEG_MAX || 3839 m->pkt_len > ICE_FRAME_SIZE_MAX)) { 3840 rte_errno = EINVAL; 3841 return i; 3842 } else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG && 3843 /** TSO case: tso_segsz, nb_segs, pkt_len not exceed 3844 * the limits. 3845 */ 3846 (m->tso_segsz < ICE_MIN_TSO_MSS || 3847 m->tso_segsz > ICE_MAX_TSO_MSS || 3848 m->nb_segs > 3849 ((struct ci_tx_queue *)tx_queue)->nb_tx_desc || 3850 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) { 3851 /** 3852 * MSS outside the range are considered malicious 3853 */ 3854 rte_errno = EINVAL; 3855 return i; 3856 } 3857 3858 if (m->pkt_len < ICE_TX_MIN_PKT_LEN) { 3859 rte_errno = EINVAL; 3860 return i; 3861 } 3862 3863 #ifdef RTE_ETHDEV_DEBUG_TX 3864 ret = rte_validate_tx_offload(m); 3865 if (ret != 0) { 3866 rte_errno = -ret; 3867 return i; 3868 } 3869 #endif 3870 ret = rte_net_intel_cksum_prepare(m); 3871 if (ret != 0) { 3872 rte_errno = -ret; 3873 return i; 3874 } 3875 3876 if (ice_check_empty_mbuf(m) != 0) { 3877 rte_errno = EINVAL; 3878 return i; 3879 } 3880 } 3881 return i; 3882 } 3883 3884 void __rte_cold 3885 ice_set_tx_function(struct rte_eth_dev *dev) 3886 { 3887 struct ice_adapter *ad = 3888 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 3889 int mbuf_check = ad->devargs.mbuf_check; 3890 #ifdef RTE_ARCH_X86 3891 struct ci_tx_queue *txq; 3892 int i; 3893 int tx_check_ret = -1; 3894 3895 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 3896 ad->tx_use_avx2 = false; 3897 ad->tx_use_avx512 = false; 3898 tx_check_ret = ice_tx_vec_dev_check(dev); 3899 if (tx_check_ret >= 0 && 3900 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 3901 ad->tx_vec_allowed = true; 3902 3903 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 && 3904 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 && 3905 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1) 3906 #ifdef CC_AVX512_SUPPORT 3907 ad->tx_use_avx512 = true; 3908 #else 3909 PMD_DRV_LOG(NOTICE, 3910 "AVX512 is not supported in build env"); 3911 #endif 3912 if (!ad->tx_use_avx512 && 3913 (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || 3914 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) && 3915 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256) 3916 ad->tx_use_avx2 = true; 3917 3918 if (!ad->tx_use_avx2 && !ad->tx_use_avx512 && 3919 tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) 3920 ad->tx_vec_allowed = false; 3921 3922 if (ad->tx_vec_allowed) { 3923 for (i = 0; i < dev->data->nb_tx_queues; i++) { 3924 txq = dev->data->tx_queues[i]; 3925 if (txq && ice_txq_vec_setup(txq)) { 3926 ad->tx_vec_allowed = false; 3927 break; 3928 } 3929 } 3930 } 3931 } else { 3932 ad->tx_vec_allowed = false; 3933 } 3934 } 3935 3936 if (ad->tx_vec_allowed) { 3937 dev->tx_pkt_prepare = NULL; 3938 if (ad->tx_use_avx512) { 3939 #ifdef CC_AVX512_SUPPORT 3940 if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) { 3941 PMD_DRV_LOG(NOTICE, 3942 "Using AVX512 OFFLOAD Vector Tx (port %d).", 3943 dev->data->port_id); 3944 dev->tx_pkt_burst = 3945 ice_xmit_pkts_vec_avx512_offload; 3946 dev->tx_pkt_prepare = ice_prep_pkts; 3947 } else { 3948 PMD_DRV_LOG(NOTICE, 3949 "Using AVX512 Vector Tx (port %d).", 3950 dev->data->port_id); 3951 dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512; 3952 } 3953 #endif 3954 } else { 3955 if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) { 3956 PMD_DRV_LOG(NOTICE, 3957 "Using AVX2 OFFLOAD Vector Tx (port %d).", 3958 dev->data->port_id); 3959 dev->tx_pkt_burst = 3960 ice_xmit_pkts_vec_avx2_offload; 3961 dev->tx_pkt_prepare = ice_prep_pkts; 3962 } else { 3963 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).", 3964 ad->tx_use_avx2 ? "avx2 " : "", 3965 dev->data->port_id); 3966 dev->tx_pkt_burst = ad->tx_use_avx2 ? 3967 ice_xmit_pkts_vec_avx2 : 3968 ice_xmit_pkts_vec; 3969 } 3970 } 3971 3972 if (mbuf_check) { 3973 ad->tx_pkt_burst = dev->tx_pkt_burst; 3974 dev->tx_pkt_burst = ice_xmit_pkts_check; 3975 } 3976 return; 3977 } 3978 #endif 3979 3980 if (ad->tx_simple_allowed) { 3981 PMD_INIT_LOG(DEBUG, "Simple tx finally be used."); 3982 dev->tx_pkt_burst = ice_xmit_pkts_simple; 3983 dev->tx_pkt_prepare = NULL; 3984 } else { 3985 PMD_INIT_LOG(DEBUG, "Normal tx finally be used."); 3986 dev->tx_pkt_burst = ice_xmit_pkts; 3987 dev->tx_pkt_prepare = ice_prep_pkts; 3988 } 3989 3990 if (mbuf_check) { 3991 ad->tx_pkt_burst = dev->tx_pkt_burst; 3992 dev->tx_pkt_burst = ice_xmit_pkts_check; 3993 } 3994 } 3995 3996 static const struct { 3997 eth_tx_burst_t pkt_burst; 3998 const char *info; 3999 } ice_tx_burst_infos[] = { 4000 { ice_xmit_pkts_simple, "Scalar Simple" }, 4001 { ice_xmit_pkts, "Scalar" }, 4002 #ifdef RTE_ARCH_X86 4003 #ifdef CC_AVX512_SUPPORT 4004 { ice_xmit_pkts_vec_avx512, "Vector AVX512" }, 4005 { ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" }, 4006 #endif 4007 { ice_xmit_pkts_vec_avx2, "Vector AVX2" }, 4008 { ice_xmit_pkts_vec_avx2_offload, "Offload Vector AVX2" }, 4009 { ice_xmit_pkts_vec, "Vector SSE" }, 4010 #endif 4011 }; 4012 4013 int 4014 ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 4015 struct rte_eth_burst_mode *mode) 4016 { 4017 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; 4018 int ret = -EINVAL; 4019 unsigned int i; 4020 4021 for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) { 4022 if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) { 4023 snprintf(mode->info, sizeof(mode->info), "%s", 4024 ice_tx_burst_infos[i].info); 4025 ret = 0; 4026 break; 4027 } 4028 } 4029 4030 return ret; 4031 } 4032 4033 /* For each value it means, datasheet of hardware can tell more details 4034 * 4035 * @note: fix ice_dev_supported_ptypes_get() if any change here. 4036 */ 4037 static inline uint32_t 4038 ice_get_default_pkt_type(uint16_t ptype) 4039 { 4040 static const alignas(RTE_CACHE_LINE_SIZE) uint32_t type_table[ICE_MAX_PKT_TYPE] = { 4041 /* L2 types */ 4042 /* [0] reserved */ 4043 [1] = RTE_PTYPE_L2_ETHER, 4044 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC, 4045 /* [3] - [5] reserved */ 4046 [6] = RTE_PTYPE_L2_ETHER_LLDP, 4047 /* [7] - [10] reserved */ 4048 [11] = RTE_PTYPE_L2_ETHER_ARP, 4049 /* [12] - [21] reserved */ 4050 4051 /* Non tunneled IPv4 */ 4052 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4053 RTE_PTYPE_L4_FRAG, 4054 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4055 RTE_PTYPE_L4_NONFRAG, 4056 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4057 RTE_PTYPE_L4_UDP, 4058 /* [25] reserved */ 4059 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4060 RTE_PTYPE_L4_TCP, 4061 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4062 RTE_PTYPE_L4_SCTP, 4063 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4064 RTE_PTYPE_L4_ICMP, 4065 4066 /* IPv4 --> IPv4 */ 4067 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4068 RTE_PTYPE_TUNNEL_IP | 4069 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4070 RTE_PTYPE_INNER_L4_FRAG, 4071 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4072 RTE_PTYPE_TUNNEL_IP | 4073 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4074 RTE_PTYPE_INNER_L4_NONFRAG, 4075 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4076 RTE_PTYPE_TUNNEL_IP | 4077 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4078 RTE_PTYPE_INNER_L4_UDP, 4079 /* [32] reserved */ 4080 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4081 RTE_PTYPE_TUNNEL_IP | 4082 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4083 RTE_PTYPE_INNER_L4_TCP, 4084 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4085 RTE_PTYPE_TUNNEL_IP | 4086 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4087 RTE_PTYPE_INNER_L4_SCTP, 4088 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4089 RTE_PTYPE_TUNNEL_IP | 4090 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4091 RTE_PTYPE_INNER_L4_ICMP, 4092 4093 /* IPv4 --> IPv6 */ 4094 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4095 RTE_PTYPE_TUNNEL_IP | 4096 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4097 RTE_PTYPE_INNER_L4_FRAG, 4098 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4099 RTE_PTYPE_TUNNEL_IP | 4100 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4101 RTE_PTYPE_INNER_L4_NONFRAG, 4102 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4103 RTE_PTYPE_TUNNEL_IP | 4104 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4105 RTE_PTYPE_INNER_L4_UDP, 4106 /* [39] reserved */ 4107 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4108 RTE_PTYPE_TUNNEL_IP | 4109 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4110 RTE_PTYPE_INNER_L4_TCP, 4111 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4112 RTE_PTYPE_TUNNEL_IP | 4113 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4114 RTE_PTYPE_INNER_L4_SCTP, 4115 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4116 RTE_PTYPE_TUNNEL_IP | 4117 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4118 RTE_PTYPE_INNER_L4_ICMP, 4119 4120 /* IPv4 --> GRE/Teredo/VXLAN */ 4121 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4122 RTE_PTYPE_TUNNEL_GRENAT, 4123 4124 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */ 4125 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4126 RTE_PTYPE_TUNNEL_GRENAT | 4127 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4128 RTE_PTYPE_INNER_L4_FRAG, 4129 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4130 RTE_PTYPE_TUNNEL_GRENAT | 4131 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4132 RTE_PTYPE_INNER_L4_NONFRAG, 4133 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4134 RTE_PTYPE_TUNNEL_GRENAT | 4135 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4136 RTE_PTYPE_INNER_L4_UDP, 4137 /* [47] reserved */ 4138 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4139 RTE_PTYPE_TUNNEL_GRENAT | 4140 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4141 RTE_PTYPE_INNER_L4_TCP, 4142 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4143 RTE_PTYPE_TUNNEL_GRENAT | 4144 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4145 RTE_PTYPE_INNER_L4_SCTP, 4146 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4147 RTE_PTYPE_TUNNEL_GRENAT | 4148 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4149 RTE_PTYPE_INNER_L4_ICMP, 4150 4151 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */ 4152 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4153 RTE_PTYPE_TUNNEL_GRENAT | 4154 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4155 RTE_PTYPE_INNER_L4_FRAG, 4156 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4157 RTE_PTYPE_TUNNEL_GRENAT | 4158 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4159 RTE_PTYPE_INNER_L4_NONFRAG, 4160 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4161 RTE_PTYPE_TUNNEL_GRENAT | 4162 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4163 RTE_PTYPE_INNER_L4_UDP, 4164 /* [54] reserved */ 4165 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4166 RTE_PTYPE_TUNNEL_GRENAT | 4167 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4168 RTE_PTYPE_INNER_L4_TCP, 4169 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4170 RTE_PTYPE_TUNNEL_GRENAT | 4171 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4172 RTE_PTYPE_INNER_L4_SCTP, 4173 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4174 RTE_PTYPE_TUNNEL_GRENAT | 4175 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4176 RTE_PTYPE_INNER_L4_ICMP, 4177 4178 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */ 4179 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4180 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, 4181 4182 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ 4183 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4184 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4185 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4186 RTE_PTYPE_INNER_L4_FRAG, 4187 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4188 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4189 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4190 RTE_PTYPE_INNER_L4_NONFRAG, 4191 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4192 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4193 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4194 RTE_PTYPE_INNER_L4_UDP, 4195 /* [62] reserved */ 4196 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4197 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4198 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4199 RTE_PTYPE_INNER_L4_TCP, 4200 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4201 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4202 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4203 RTE_PTYPE_INNER_L4_SCTP, 4204 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4205 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4206 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4207 RTE_PTYPE_INNER_L4_ICMP, 4208 4209 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ 4210 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4211 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4212 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4213 RTE_PTYPE_INNER_L4_FRAG, 4214 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4215 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4216 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4217 RTE_PTYPE_INNER_L4_NONFRAG, 4218 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4219 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4220 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4221 RTE_PTYPE_INNER_L4_UDP, 4222 /* [69] reserved */ 4223 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4224 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4225 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4226 RTE_PTYPE_INNER_L4_TCP, 4227 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4228 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4229 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4230 RTE_PTYPE_INNER_L4_SCTP, 4231 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4232 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4233 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4234 RTE_PTYPE_INNER_L4_ICMP, 4235 /* [73] - [87] reserved */ 4236 4237 /* Non tunneled IPv6 */ 4238 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4239 RTE_PTYPE_L4_FRAG, 4240 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4241 RTE_PTYPE_L4_NONFRAG, 4242 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4243 RTE_PTYPE_L4_UDP, 4244 /* [91] reserved */ 4245 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4246 RTE_PTYPE_L4_TCP, 4247 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4248 RTE_PTYPE_L4_SCTP, 4249 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4250 RTE_PTYPE_L4_ICMP, 4251 4252 /* IPv6 --> IPv4 */ 4253 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4254 RTE_PTYPE_TUNNEL_IP | 4255 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4256 RTE_PTYPE_INNER_L4_FRAG, 4257 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4258 RTE_PTYPE_TUNNEL_IP | 4259 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4260 RTE_PTYPE_INNER_L4_NONFRAG, 4261 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4262 RTE_PTYPE_TUNNEL_IP | 4263 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4264 RTE_PTYPE_INNER_L4_UDP, 4265 /* [98] reserved */ 4266 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4267 RTE_PTYPE_TUNNEL_IP | 4268 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4269 RTE_PTYPE_INNER_L4_TCP, 4270 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4271 RTE_PTYPE_TUNNEL_IP | 4272 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4273 RTE_PTYPE_INNER_L4_SCTP, 4274 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4275 RTE_PTYPE_TUNNEL_IP | 4276 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4277 RTE_PTYPE_INNER_L4_ICMP, 4278 4279 /* IPv6 --> IPv6 */ 4280 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4281 RTE_PTYPE_TUNNEL_IP | 4282 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4283 RTE_PTYPE_INNER_L4_FRAG, 4284 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4285 RTE_PTYPE_TUNNEL_IP | 4286 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4287 RTE_PTYPE_INNER_L4_NONFRAG, 4288 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4289 RTE_PTYPE_TUNNEL_IP | 4290 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4291 RTE_PTYPE_INNER_L4_UDP, 4292 /* [105] reserved */ 4293 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4294 RTE_PTYPE_TUNNEL_IP | 4295 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4296 RTE_PTYPE_INNER_L4_TCP, 4297 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4298 RTE_PTYPE_TUNNEL_IP | 4299 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4300 RTE_PTYPE_INNER_L4_SCTP, 4301 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4302 RTE_PTYPE_TUNNEL_IP | 4303 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4304 RTE_PTYPE_INNER_L4_ICMP, 4305 4306 /* IPv6 --> GRE/Teredo/VXLAN */ 4307 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4308 RTE_PTYPE_TUNNEL_GRENAT, 4309 4310 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */ 4311 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4312 RTE_PTYPE_TUNNEL_GRENAT | 4313 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4314 RTE_PTYPE_INNER_L4_FRAG, 4315 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4316 RTE_PTYPE_TUNNEL_GRENAT | 4317 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4318 RTE_PTYPE_INNER_L4_NONFRAG, 4319 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4320 RTE_PTYPE_TUNNEL_GRENAT | 4321 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4322 RTE_PTYPE_INNER_L4_UDP, 4323 /* [113] reserved */ 4324 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4325 RTE_PTYPE_TUNNEL_GRENAT | 4326 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4327 RTE_PTYPE_INNER_L4_TCP, 4328 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4329 RTE_PTYPE_TUNNEL_GRENAT | 4330 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4331 RTE_PTYPE_INNER_L4_SCTP, 4332 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4333 RTE_PTYPE_TUNNEL_GRENAT | 4334 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4335 RTE_PTYPE_INNER_L4_ICMP, 4336 4337 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */ 4338 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4339 RTE_PTYPE_TUNNEL_GRENAT | 4340 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4341 RTE_PTYPE_INNER_L4_FRAG, 4342 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4343 RTE_PTYPE_TUNNEL_GRENAT | 4344 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4345 RTE_PTYPE_INNER_L4_NONFRAG, 4346 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4347 RTE_PTYPE_TUNNEL_GRENAT | 4348 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4349 RTE_PTYPE_INNER_L4_UDP, 4350 /* [120] reserved */ 4351 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4352 RTE_PTYPE_TUNNEL_GRENAT | 4353 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4354 RTE_PTYPE_INNER_L4_TCP, 4355 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4356 RTE_PTYPE_TUNNEL_GRENAT | 4357 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4358 RTE_PTYPE_INNER_L4_SCTP, 4359 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4360 RTE_PTYPE_TUNNEL_GRENAT | 4361 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4362 RTE_PTYPE_INNER_L4_ICMP, 4363 4364 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */ 4365 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4366 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, 4367 4368 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ 4369 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4370 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4371 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4372 RTE_PTYPE_INNER_L4_FRAG, 4373 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4374 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4375 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4376 RTE_PTYPE_INNER_L4_NONFRAG, 4377 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4378 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4379 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4380 RTE_PTYPE_INNER_L4_UDP, 4381 /* [128] reserved */ 4382 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4383 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4384 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4385 RTE_PTYPE_INNER_L4_TCP, 4386 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4387 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4388 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4389 RTE_PTYPE_INNER_L4_SCTP, 4390 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4391 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4392 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4393 RTE_PTYPE_INNER_L4_ICMP, 4394 4395 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ 4396 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4397 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4398 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4399 RTE_PTYPE_INNER_L4_FRAG, 4400 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4401 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4402 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4403 RTE_PTYPE_INNER_L4_NONFRAG, 4404 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4405 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4406 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4407 RTE_PTYPE_INNER_L4_UDP, 4408 /* [135] reserved */ 4409 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4410 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4411 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4412 RTE_PTYPE_INNER_L4_TCP, 4413 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4414 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4415 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4416 RTE_PTYPE_INNER_L4_SCTP, 4417 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4418 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4419 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4420 RTE_PTYPE_INNER_L4_ICMP, 4421 /* [139] - [299] reserved */ 4422 4423 /* PPPoE */ 4424 [300] = RTE_PTYPE_L2_ETHER_PPPOE, 4425 [301] = RTE_PTYPE_L2_ETHER_PPPOE, 4426 4427 /* PPPoE --> IPv4 */ 4428 [302] = RTE_PTYPE_L2_ETHER_PPPOE | 4429 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4430 RTE_PTYPE_L4_FRAG, 4431 [303] = RTE_PTYPE_L2_ETHER_PPPOE | 4432 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4433 RTE_PTYPE_L4_NONFRAG, 4434 [304] = RTE_PTYPE_L2_ETHER_PPPOE | 4435 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4436 RTE_PTYPE_L4_UDP, 4437 [305] = RTE_PTYPE_L2_ETHER_PPPOE | 4438 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4439 RTE_PTYPE_L4_TCP, 4440 [306] = RTE_PTYPE_L2_ETHER_PPPOE | 4441 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4442 RTE_PTYPE_L4_SCTP, 4443 [307] = RTE_PTYPE_L2_ETHER_PPPOE | 4444 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4445 RTE_PTYPE_L4_ICMP, 4446 4447 /* PPPoE --> IPv6 */ 4448 [308] = RTE_PTYPE_L2_ETHER_PPPOE | 4449 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4450 RTE_PTYPE_L4_FRAG, 4451 [309] = RTE_PTYPE_L2_ETHER_PPPOE | 4452 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4453 RTE_PTYPE_L4_NONFRAG, 4454 [310] = RTE_PTYPE_L2_ETHER_PPPOE | 4455 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4456 RTE_PTYPE_L4_UDP, 4457 [311] = RTE_PTYPE_L2_ETHER_PPPOE | 4458 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4459 RTE_PTYPE_L4_TCP, 4460 [312] = RTE_PTYPE_L2_ETHER_PPPOE | 4461 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4462 RTE_PTYPE_L4_SCTP, 4463 [313] = RTE_PTYPE_L2_ETHER_PPPOE | 4464 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4465 RTE_PTYPE_L4_ICMP, 4466 /* [314] - [324] reserved */ 4467 4468 /* IPv4/IPv6 --> GTPC/GTPU */ 4469 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4470 RTE_PTYPE_TUNNEL_GTPC, 4471 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4472 RTE_PTYPE_TUNNEL_GTPC, 4473 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4474 RTE_PTYPE_TUNNEL_GTPC, 4475 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4476 RTE_PTYPE_TUNNEL_GTPC, 4477 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4478 RTE_PTYPE_TUNNEL_GTPU, 4479 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4480 RTE_PTYPE_TUNNEL_GTPU, 4481 4482 /* IPv4 --> GTPU --> IPv4 */ 4483 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4484 RTE_PTYPE_TUNNEL_GTPU | 4485 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4486 RTE_PTYPE_INNER_L4_FRAG, 4487 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4488 RTE_PTYPE_TUNNEL_GTPU | 4489 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4490 RTE_PTYPE_INNER_L4_NONFRAG, 4491 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4492 RTE_PTYPE_TUNNEL_GTPU | 4493 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4494 RTE_PTYPE_INNER_L4_UDP, 4495 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4496 RTE_PTYPE_TUNNEL_GTPU | 4497 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4498 RTE_PTYPE_INNER_L4_TCP, 4499 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4500 RTE_PTYPE_TUNNEL_GTPU | 4501 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4502 RTE_PTYPE_INNER_L4_ICMP, 4503 4504 /* IPv6 --> GTPU --> IPv4 */ 4505 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4506 RTE_PTYPE_TUNNEL_GTPU | 4507 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4508 RTE_PTYPE_INNER_L4_FRAG, 4509 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4510 RTE_PTYPE_TUNNEL_GTPU | 4511 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4512 RTE_PTYPE_INNER_L4_NONFRAG, 4513 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4514 RTE_PTYPE_TUNNEL_GTPU | 4515 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4516 RTE_PTYPE_INNER_L4_UDP, 4517 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4518 RTE_PTYPE_TUNNEL_GTPU | 4519 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4520 RTE_PTYPE_INNER_L4_TCP, 4521 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4522 RTE_PTYPE_TUNNEL_GTPU | 4523 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4524 RTE_PTYPE_INNER_L4_ICMP, 4525 4526 /* IPv4 --> GTPU --> IPv6 */ 4527 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4528 RTE_PTYPE_TUNNEL_GTPU | 4529 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4530 RTE_PTYPE_INNER_L4_FRAG, 4531 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4532 RTE_PTYPE_TUNNEL_GTPU | 4533 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4534 RTE_PTYPE_INNER_L4_NONFRAG, 4535 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4536 RTE_PTYPE_TUNNEL_GTPU | 4537 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4538 RTE_PTYPE_INNER_L4_UDP, 4539 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4540 RTE_PTYPE_TUNNEL_GTPU | 4541 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4542 RTE_PTYPE_INNER_L4_TCP, 4543 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4544 RTE_PTYPE_TUNNEL_GTPU | 4545 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4546 RTE_PTYPE_INNER_L4_ICMP, 4547 4548 /* IPv6 --> GTPU --> IPv6 */ 4549 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4550 RTE_PTYPE_TUNNEL_GTPU | 4551 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4552 RTE_PTYPE_INNER_L4_FRAG, 4553 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4554 RTE_PTYPE_TUNNEL_GTPU | 4555 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4556 RTE_PTYPE_INNER_L4_NONFRAG, 4557 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4558 RTE_PTYPE_TUNNEL_GTPU | 4559 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4560 RTE_PTYPE_INNER_L4_UDP, 4561 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4562 RTE_PTYPE_TUNNEL_GTPU | 4563 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4564 RTE_PTYPE_INNER_L4_TCP, 4565 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4566 RTE_PTYPE_TUNNEL_GTPU | 4567 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4568 RTE_PTYPE_INNER_L4_ICMP, 4569 4570 /* IPv4 --> UDP ECPRI */ 4571 [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4572 RTE_PTYPE_L4_UDP, 4573 [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4574 RTE_PTYPE_L4_UDP, 4575 [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4576 RTE_PTYPE_L4_UDP, 4577 [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4578 RTE_PTYPE_L4_UDP, 4579 [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4580 RTE_PTYPE_L4_UDP, 4581 [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4582 RTE_PTYPE_L4_UDP, 4583 [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4584 RTE_PTYPE_L4_UDP, 4585 [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4586 RTE_PTYPE_L4_UDP, 4587 [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4588 RTE_PTYPE_L4_UDP, 4589 [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4590 RTE_PTYPE_L4_UDP, 4591 4592 /* IPV6 --> UDP ECPRI */ 4593 [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4594 RTE_PTYPE_L4_UDP, 4595 [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4596 RTE_PTYPE_L4_UDP, 4597 [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4598 RTE_PTYPE_L4_UDP, 4599 [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4600 RTE_PTYPE_L4_UDP, 4601 [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4602 RTE_PTYPE_L4_UDP, 4603 [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4604 RTE_PTYPE_L4_UDP, 4605 [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4606 RTE_PTYPE_L4_UDP, 4607 [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4608 RTE_PTYPE_L4_UDP, 4609 [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4610 RTE_PTYPE_L4_UDP, 4611 [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4612 RTE_PTYPE_L4_UDP, 4613 /* All others reserved */ 4614 }; 4615 4616 return type_table[ptype]; 4617 } 4618 4619 void __rte_cold 4620 ice_set_default_ptype_table(struct rte_eth_dev *dev) 4621 { 4622 struct ice_adapter *ad = 4623 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 4624 int i; 4625 4626 for (i = 0; i < ICE_MAX_PKT_TYPE; i++) 4627 ad->ptype_tbl[i] = ice_get_default_pkt_type(i); 4628 } 4629 4630 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S 1 4631 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M \ 4632 (0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S) 4633 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0 4634 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1 4635 4636 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S 4 4637 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M \ 4638 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S) 4639 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S 5 4640 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M \ 4641 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S) 4642 4643 /* 4644 * check the programming status descriptor in rx queue. 4645 * done after Programming Flow Director is programmed on 4646 * tx queue 4647 */ 4648 static inline int 4649 ice_check_fdir_programming_status(struct ice_rx_queue *rxq) 4650 { 4651 volatile union ice_32byte_rx_desc *rxdp; 4652 uint64_t qword1; 4653 uint32_t rx_status; 4654 uint32_t error; 4655 uint32_t id; 4656 int ret = -EAGAIN; 4657 4658 rxdp = (volatile union ice_32byte_rx_desc *) 4659 (&rxq->rx_ring[rxq->rx_tail]); 4660 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len); 4661 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M) 4662 >> ICE_RXD_QW1_STATUS_S; 4663 4664 if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) { 4665 ret = 0; 4666 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >> 4667 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S; 4668 id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >> 4669 ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S; 4670 if (error) { 4671 if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD) 4672 PMD_DRV_LOG(ERR, "Failed to add FDIR rule."); 4673 else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL) 4674 PMD_DRV_LOG(ERR, "Failed to remove FDIR rule."); 4675 ret = -EINVAL; 4676 goto err; 4677 } 4678 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >> 4679 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S; 4680 if (error) { 4681 PMD_DRV_LOG(ERR, "Failed to create FDIR profile."); 4682 ret = -EINVAL; 4683 } 4684 err: 4685 rxdp->wb.qword1.status_error_len = 0; 4686 rxq->rx_tail++; 4687 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc)) 4688 rxq->rx_tail = 0; 4689 if (rxq->rx_tail == 0) 4690 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); 4691 else 4692 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1); 4693 } 4694 4695 return ret; 4696 } 4697 4698 #define ICE_FDIR_MAX_WAIT_US 10000 4699 4700 int 4701 ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc) 4702 { 4703 struct ci_tx_queue *txq = pf->fdir.txq; 4704 struct ice_rx_queue *rxq = pf->fdir.rxq; 4705 volatile struct ice_fltr_desc *fdirdp; 4706 volatile struct ice_tx_desc *txdp; 4707 uint32_t td_cmd; 4708 uint16_t i; 4709 4710 fdirdp = (volatile struct ice_fltr_desc *) 4711 (&txq->ice_tx_ring[txq->tx_tail]); 4712 fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat; 4713 fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid; 4714 4715 txdp = &txq->ice_tx_ring[txq->tx_tail + 1]; 4716 txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr); 4717 td_cmd = ICE_TX_DESC_CMD_EOP | 4718 ICE_TX_DESC_CMD_RS | 4719 ICE_TX_DESC_CMD_DUMMY; 4720 4721 txdp->cmd_type_offset_bsz = 4722 ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0); 4723 4724 txq->tx_tail += 2; 4725 if (txq->tx_tail >= txq->nb_tx_desc) 4726 txq->tx_tail = 0; 4727 /* Update the tx tail register */ 4728 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); 4729 for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) { 4730 if ((txdp->cmd_type_offset_bsz & 4731 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) == 4732 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE)) 4733 break; 4734 rte_delay_us(1); 4735 } 4736 if (i >= ICE_FDIR_MAX_WAIT_US) { 4737 PMD_DRV_LOG(ERR, 4738 "Failed to program FDIR filter: time out to get DD on tx queue."); 4739 return -ETIMEDOUT; 4740 } 4741 4742 for (; i < ICE_FDIR_MAX_WAIT_US; i++) { 4743 int ret; 4744 4745 ret = ice_check_fdir_programming_status(rxq); 4746 if (ret == -EAGAIN) 4747 rte_delay_us(1); 4748 else 4749 return ret; 4750 } 4751 4752 PMD_DRV_LOG(ERR, 4753 "Failed to program FDIR filter: programming status reported."); 4754 return -ETIMEDOUT; 4755 4756 4757 } 4758