1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016-2021 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 11 #include <rte_mbuf.h> 12 #include <ethdev_driver.h> 13 #include <rte_malloc.h> 14 #include <rte_memcpy.h> 15 #include <rte_string_fns.h> 16 #include <dev_driver.h> 17 #include <rte_hexdump.h> 18 19 #include <bus_fslmc_driver.h> 20 #include <fslmc_vfio.h> 21 #include <dpaa2_hw_pvt.h> 22 #include <dpaa2_hw_dpio.h> 23 #include <dpaa2_hw_mempool.h> 24 25 #include "dpaa2_pmd_logs.h" 26 #include "dpaa2_ethdev.h" 27 #include "base/dpaa2_hw_dpni_annot.h" 28 29 static inline uint32_t __rte_hot 30 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf, 31 struct dpaa2_annot_hdr *annotation); 32 33 static void enable_tx_tstamp(struct qbman_fd *fd) __rte_unused; 34 35 static inline rte_mbuf_timestamp_t * 36 dpaa2_timestamp_dynfield(struct rte_mbuf *mbuf) 37 { 38 return RTE_MBUF_DYNFIELD(mbuf, 39 dpaa2_timestamp_dynfield_offset, rte_mbuf_timestamp_t *); 40 } 41 42 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \ 43 DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \ 44 DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \ 45 DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \ 46 DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \ 47 DPAA2_SET_FD_FRC(_fd, 0); \ 48 DPAA2_RESET_FD_CTRL(_fd); \ 49 DPAA2_RESET_FD_FLC(_fd); \ 50 } while (0) 51 52 static inline void __rte_hot 53 dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd, 54 void *hw_annot_addr) 55 { 56 uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd); 57 struct dpaa2_annot_hdr *annotation = 58 (struct dpaa2_annot_hdr *)hw_annot_addr; 59 60 m->packet_type = RTE_PTYPE_UNKNOWN; 61 switch (frc) { 62 case DPAA2_PKT_TYPE_ETHER: 63 m->packet_type = RTE_PTYPE_L2_ETHER; 64 break; 65 case DPAA2_PKT_TYPE_IPV4: 66 m->packet_type = RTE_PTYPE_L2_ETHER | 67 RTE_PTYPE_L3_IPV4; 68 break; 69 case DPAA2_PKT_TYPE_IPV6: 70 m->packet_type = RTE_PTYPE_L2_ETHER | 71 RTE_PTYPE_L3_IPV6; 72 break; 73 case DPAA2_PKT_TYPE_IPV4_EXT: 74 m->packet_type = RTE_PTYPE_L2_ETHER | 75 RTE_PTYPE_L3_IPV4_EXT; 76 break; 77 case DPAA2_PKT_TYPE_IPV6_EXT: 78 m->packet_type = RTE_PTYPE_L2_ETHER | 79 RTE_PTYPE_L3_IPV6_EXT; 80 break; 81 case DPAA2_PKT_TYPE_IPV4_TCP: 82 m->packet_type = RTE_PTYPE_L2_ETHER | 83 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP; 84 break; 85 case DPAA2_PKT_TYPE_IPV6_TCP: 86 m->packet_type = RTE_PTYPE_L2_ETHER | 87 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP; 88 break; 89 case DPAA2_PKT_TYPE_IPV4_UDP: 90 m->packet_type = RTE_PTYPE_L2_ETHER | 91 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP; 92 break; 93 case DPAA2_PKT_TYPE_IPV6_UDP: 94 m->packet_type = RTE_PTYPE_L2_ETHER | 95 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP; 96 break; 97 case DPAA2_PKT_TYPE_IPV4_SCTP: 98 m->packet_type = RTE_PTYPE_L2_ETHER | 99 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP; 100 break; 101 case DPAA2_PKT_TYPE_IPV6_SCTP: 102 m->packet_type = RTE_PTYPE_L2_ETHER | 103 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP; 104 break; 105 case DPAA2_PKT_TYPE_IPV4_ICMP: 106 m->packet_type = RTE_PTYPE_L2_ETHER | 107 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP; 108 break; 109 case DPAA2_PKT_TYPE_IPV6_ICMP: 110 m->packet_type = RTE_PTYPE_L2_ETHER | 111 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP; 112 break; 113 default: 114 m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation); 115 } 116 m->hash.rss = fd->simple.flc_hi; 117 m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 118 119 if (dpaa2_enable_ts[m->port]) { 120 *dpaa2_timestamp_dynfield(m) = annotation->word2; 121 m->ol_flags |= dpaa2_timestamp_rx_dynflag; 122 DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "", 123 *dpaa2_timestamp_dynfield(m)); 124 } 125 126 DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x " 127 "ol_flags =0x%" PRIx64 "", 128 frc, m->packet_type, m->ol_flags); 129 } 130 131 static inline uint32_t __rte_hot 132 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf, 133 struct dpaa2_annot_hdr *annotation) 134 { 135 uint32_t pkt_type = RTE_PTYPE_UNKNOWN; 136 uint16_t *vlan_tci; 137 138 DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t" 139 "(4)=0x%" PRIx64 "\t", 140 annotation->word3, annotation->word4); 141 142 #if defined(RTE_LIBRTE_IEEE1588) 143 if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP)) { 144 mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP; 145 mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST; 146 } 147 #endif 148 149 if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) { 150 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *, 151 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16)); 152 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci); 153 mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN; 154 pkt_type |= RTE_PTYPE_L2_ETHER_VLAN; 155 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) { 156 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *, 157 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16)); 158 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci); 159 mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_QINQ; 160 pkt_type |= RTE_PTYPE_L2_ETHER_QINQ; 161 } 162 163 if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) { 164 pkt_type |= RTE_PTYPE_L2_ETHER_ARP; 165 goto parse_done; 166 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) { 167 pkt_type |= RTE_PTYPE_L2_ETHER; 168 } else { 169 goto parse_done; 170 } 171 172 if (BIT_ISSET_AT_POS(annotation->word3, L2_MPLS_1_PRESENT | 173 L2_MPLS_N_PRESENT)) 174 pkt_type |= RTE_PTYPE_L2_ETHER_MPLS; 175 176 if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT | 177 L3_IPV4_N_PRESENT)) { 178 pkt_type |= RTE_PTYPE_L3_IPV4; 179 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT | 180 L3_IP_N_OPT_PRESENT)) 181 pkt_type |= RTE_PTYPE_L3_IPV4_EXT; 182 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_IPSEC_ESP_PRESENT | 183 L3_PROTO_ESP_PRESENT)) 184 pkt_type |= RTE_PTYPE_TUNNEL_ESP; 185 186 } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT | 187 L3_IPV6_N_PRESENT)) { 188 pkt_type |= RTE_PTYPE_L3_IPV6; 189 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT | 190 L3_IP_N_OPT_PRESENT)) 191 pkt_type |= RTE_PTYPE_L3_IPV6_EXT; 192 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_IPSEC_ESP_PRESENT | 193 L3_PROTO_ESP_PRESENT)) 194 pkt_type |= RTE_PTYPE_TUNNEL_ESP; 195 } else { 196 goto parse_done; 197 } 198 199 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE)) 200 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 201 else 202 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 203 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) 204 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 205 else 206 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 207 208 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT | 209 L3_IP_1_MORE_FRAGMENT | 210 L3_IP_N_FIRST_FRAGMENT | 211 L3_IP_N_MORE_FRAGMENT)) { 212 pkt_type |= RTE_PTYPE_L4_FRAG; 213 goto parse_done; 214 } else { 215 pkt_type |= RTE_PTYPE_L4_NONFRAG; 216 } 217 218 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT)) 219 pkt_type |= RTE_PTYPE_L4_UDP; 220 221 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT)) 222 pkt_type |= RTE_PTYPE_L4_TCP; 223 224 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT)) 225 pkt_type |= RTE_PTYPE_L4_SCTP; 226 227 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT)) 228 pkt_type |= RTE_PTYPE_L4_ICMP; 229 230 else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL)) 231 pkt_type |= RTE_PTYPE_UNKNOWN; 232 233 parse_done: 234 return pkt_type; 235 } 236 237 static inline uint32_t __rte_hot 238 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr) 239 { 240 struct dpaa2_annot_hdr *annotation = 241 (struct dpaa2_annot_hdr *)hw_annot_addr; 242 243 DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t", 244 annotation->word4); 245 246 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE)) 247 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 248 else 249 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 250 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) 251 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 252 else 253 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 254 255 if (dpaa2_enable_ts[mbuf->port]) { 256 *dpaa2_timestamp_dynfield(mbuf) = annotation->word2; 257 mbuf->ol_flags |= dpaa2_timestamp_rx_dynflag; 258 DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "", 259 *dpaa2_timestamp_dynfield(mbuf)); 260 } 261 262 /* Check detailed parsing requirement */ 263 if (annotation->word3 & 0x7FFFFC3FFFF) 264 return dpaa2_dev_rx_parse_slow(mbuf, annotation); 265 266 /* Return some common types from parse processing */ 267 switch (annotation->word4) { 268 case DPAA2_L3_IPv4: 269 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; 270 case DPAA2_L3_IPv6: 271 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; 272 case DPAA2_L3_IPv4_TCP: 273 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 274 RTE_PTYPE_L4_TCP; 275 case DPAA2_L3_IPv4_UDP: 276 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 277 RTE_PTYPE_L4_UDP; 278 case DPAA2_L3_IPv6_TCP: 279 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 280 RTE_PTYPE_L4_TCP; 281 case DPAA2_L3_IPv6_UDP: 282 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 283 RTE_PTYPE_L4_UDP; 284 default: 285 break; 286 } 287 288 return dpaa2_dev_rx_parse_slow(mbuf, annotation); 289 } 290 291 static inline struct rte_mbuf *__rte_hot 292 eth_sg_fd_to_mbuf(const struct qbman_fd *fd, 293 int port_id) 294 { 295 struct qbman_sge *sgt, *sge; 296 size_t sg_addr, fd_addr; 297 int i = 0; 298 void *hw_annot_addr; 299 struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp; 300 301 fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 302 hw_annot_addr = (void *)(fd_addr + DPAA2_FD_PTA_SIZE); 303 304 /* Get Scatter gather table address */ 305 sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd)); 306 307 sge = &sgt[i++]; 308 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge)); 309 310 /* First Scatter gather entry */ 311 first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, 312 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 313 /* Prepare all the metadata for first segment */ 314 first_seg->buf_addr = (uint8_t *)sg_addr; 315 first_seg->ol_flags = 0; 316 first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge); 317 first_seg->data_len = sge->length & 0x1FFFF; 318 first_seg->pkt_len = DPAA2_GET_FD_LEN(fd); 319 first_seg->nb_segs = 1; 320 first_seg->next = NULL; 321 first_seg->port = port_id; 322 if (dpaa2_svr_family == SVR_LX2160A) 323 dpaa2_dev_rx_parse_new(first_seg, fd, hw_annot_addr); 324 else 325 first_seg->packet_type = 326 dpaa2_dev_rx_parse(first_seg, hw_annot_addr); 327 328 rte_mbuf_refcnt_set(first_seg, 1); 329 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 330 rte_mempool_check_cookies(rte_mempool_from_obj((void *)first_seg), 331 (void **)&first_seg, 1, 1); 332 #endif 333 cur_seg = first_seg; 334 while (!DPAA2_SG_IS_FINAL(sge)) { 335 sge = &sgt[i++]; 336 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR( 337 DPAA2_GET_FLE_ADDR(sge)); 338 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, 339 rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size); 340 next_seg->buf_addr = (uint8_t *)sg_addr; 341 next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge); 342 next_seg->data_len = sge->length & 0x1FFFF; 343 first_seg->nb_segs += 1; 344 rte_mbuf_refcnt_set(next_seg, 1); 345 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 346 rte_mempool_check_cookies(rte_mempool_from_obj((void *)next_seg), 347 (void **)&next_seg, 1, 1); 348 #endif 349 cur_seg->next = next_seg; 350 next_seg->next = NULL; 351 cur_seg = next_seg; 352 } 353 temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr, 354 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 355 rte_mbuf_refcnt_set(temp, 1); 356 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 357 rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp), 358 (void **)&temp, 1, 1); 359 #endif 360 rte_pktmbuf_free_seg(temp); 361 362 return (void *)first_seg; 363 } 364 365 static inline struct rte_mbuf *__rte_hot 366 eth_fd_to_mbuf(const struct qbman_fd *fd, 367 int port_id) 368 { 369 void *v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 370 void *hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE); 371 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr, 372 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 373 374 /* need to repopulated some of the fields, 375 * as they may have changed in last transmission 376 */ 377 mbuf->nb_segs = 1; 378 mbuf->ol_flags = 0; 379 mbuf->data_off = DPAA2_GET_FD_OFFSET(fd); 380 mbuf->data_len = DPAA2_GET_FD_LEN(fd); 381 mbuf->pkt_len = mbuf->data_len; 382 mbuf->port = port_id; 383 mbuf->next = NULL; 384 rte_mbuf_refcnt_set(mbuf, 1); 385 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 386 rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf), 387 (void **)&mbuf, 1, 1); 388 #endif 389 390 /* Parse the packet */ 391 /* parse results for LX2 are there in FRC field of FD. 392 * For other DPAA2 platforms , parse results are after 393 * the private - sw annotation area 394 */ 395 396 if (dpaa2_svr_family == SVR_LX2160A) 397 dpaa2_dev_rx_parse_new(mbuf, fd, hw_annot_addr); 398 else 399 mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, hw_annot_addr); 400 401 DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d," 402 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d", 403 mbuf, mbuf->buf_addr, mbuf->data_off, 404 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), 405 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 406 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd)); 407 408 return mbuf; 409 } 410 411 static int __rte_noinline __rte_hot 412 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, 413 struct qbman_fd *fd, 414 struct sw_buf_free *free_buf, 415 uint32_t *free_count, 416 uint32_t pkt_id, 417 uint16_t bpid) 418 { 419 struct rte_mbuf *cur_seg = mbuf, *mi, *temp; 420 struct qbman_sge *sgt, *sge = NULL; 421 int i, offset = 0; 422 423 #ifdef RTE_LIBRTE_IEEE1588 424 /* annotation area for timestamp in first buffer */ 425 offset = 0x64; 426 #endif 427 if (RTE_MBUF_DIRECT(mbuf) && 428 (mbuf->data_off > (mbuf->nb_segs * sizeof(struct qbman_sge) 429 + offset))) { 430 temp = mbuf; 431 if (rte_mbuf_refcnt_read(temp) > 1) { 432 /* If refcnt > 1, invalid bpid is set to ensure 433 * buffer is not freed by HW 434 */ 435 fd->simple.bpid_offset = 0; 436 DPAA2_SET_FD_IVP(fd); 437 rte_mbuf_refcnt_update(temp, -1); 438 } else { 439 DPAA2_SET_ONLY_FD_BPID(fd, bpid); 440 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 441 rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp), 442 (void **)&temp, 1, 0); 443 #endif 444 } 445 DPAA2_SET_FD_OFFSET(fd, offset); 446 } else { 447 temp = rte_pktmbuf_alloc(dpaa2_tx_sg_pool); 448 if (temp == NULL) { 449 DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table"); 450 return -ENOMEM; 451 } 452 DPAA2_SET_ONLY_FD_BPID(fd, mempool_to_bpid(dpaa2_tx_sg_pool)); 453 DPAA2_SET_FD_OFFSET(fd, temp->data_off); 454 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 455 rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp), 456 (void **)&temp, 1, 0); 457 #endif 458 } 459 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp)); 460 DPAA2_SET_FD_LEN(fd, mbuf->pkt_len); 461 DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg); 462 DPAA2_RESET_FD_FRC(fd); 463 DPAA2_RESET_FD_CTRL(fd); 464 DPAA2_RESET_FD_FLC(fd); 465 /*Set Scatter gather table and Scatter gather entries*/ 466 sgt = (struct qbman_sge *)( 467 (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) 468 + DPAA2_GET_FD_OFFSET(fd)); 469 470 for (i = 0; i < mbuf->nb_segs; i++) { 471 sge = &sgt[i]; 472 /*Resetting the buffer pool id and offset field*/ 473 sge->fin_bpid_offset = 0; 474 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(cur_seg)); 475 sge->length = cur_seg->data_len; 476 if (RTE_MBUF_DIRECT(cur_seg)) { 477 /* if we are using inline SGT in same buffers 478 * set the FLE FMT as Frame Data Section 479 */ 480 if (temp == cur_seg) { 481 DPAA2_SG_SET_FORMAT(sge, qbman_fd_list); 482 DPAA2_SET_FLE_IVP(sge); 483 } else { 484 if (rte_mbuf_refcnt_read(cur_seg) > 1) { 485 /* If refcnt > 1, invalid bpid is set to ensure 486 * buffer is not freed by HW 487 */ 488 DPAA2_SET_FLE_IVP(sge); 489 rte_mbuf_refcnt_update(cur_seg, -1); 490 } else { 491 DPAA2_SET_FLE_BPID(sge, 492 mempool_to_bpid(cur_seg->pool)); 493 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 494 rte_mempool_check_cookies(rte_mempool_from_obj((void *)cur_seg), 495 (void **)&cur_seg, 1, 0); 496 #endif 497 } 498 } 499 } else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) { 500 free_buf[*free_count].seg = cur_seg; 501 free_buf[*free_count].pkt_id = pkt_id; 502 ++*free_count; 503 DPAA2_SET_FLE_IVP(sge); 504 } else { 505 /* Get owner MBUF from indirect buffer */ 506 mi = rte_mbuf_from_indirect(cur_seg); 507 if (rte_mbuf_refcnt_read(mi) > 1) { 508 /* If refcnt > 1, invalid bpid is set to ensure 509 * owner buffer is not freed by HW 510 */ 511 DPAA2_SET_FLE_IVP(sge); 512 } else { 513 DPAA2_SET_FLE_BPID(sge, 514 mempool_to_bpid(mi->pool)); 515 rte_mbuf_refcnt_update(mi, 1); 516 } 517 free_buf[*free_count].seg = cur_seg; 518 free_buf[*free_count].pkt_id = pkt_id; 519 ++*free_count; 520 } 521 cur_seg = cur_seg->next; 522 } 523 DPAA2_SG_SET_FINAL(sge, true); 524 return 0; 525 } 526 527 static void 528 eth_mbuf_to_fd(struct rte_mbuf *mbuf, 529 struct qbman_fd *fd, 530 struct sw_buf_free *buf_to_free, 531 uint32_t *free_count, 532 uint32_t pkt_id, 533 uint16_t bpid) __rte_unused; 534 535 static void __rte_noinline __rte_hot 536 eth_mbuf_to_fd(struct rte_mbuf *mbuf, 537 struct qbman_fd *fd, 538 struct sw_buf_free *buf_to_free, 539 uint32_t *free_count, 540 uint32_t pkt_id, 541 uint16_t bpid) 542 { 543 DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid); 544 545 DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d," 546 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d", 547 mbuf, mbuf->buf_addr, mbuf->data_off, 548 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), 549 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 550 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd)); 551 if (RTE_MBUF_DIRECT(mbuf)) { 552 if (rte_mbuf_refcnt_read(mbuf) > 1) { 553 DPAA2_SET_FD_IVP(fd); 554 rte_mbuf_refcnt_update(mbuf, -1); 555 } 556 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 557 else 558 rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf), 559 (void **)&mbuf, 1, 0); 560 #endif 561 } else if (RTE_MBUF_HAS_EXTBUF(mbuf)) { 562 buf_to_free[*free_count].seg = mbuf; 563 buf_to_free[*free_count].pkt_id = pkt_id; 564 ++*free_count; 565 DPAA2_SET_FD_IVP(fd); 566 } else { 567 struct rte_mbuf *mi; 568 569 mi = rte_mbuf_from_indirect(mbuf); 570 if (rte_mbuf_refcnt_read(mi) > 1) 571 DPAA2_SET_FD_IVP(fd); 572 else 573 rte_mbuf_refcnt_update(mi, 1); 574 575 buf_to_free[*free_count].seg = mbuf; 576 buf_to_free[*free_count].pkt_id = pkt_id; 577 ++*free_count; 578 } 579 } 580 581 static inline int __rte_hot 582 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf, 583 struct qbman_fd *fd, uint16_t bpid) 584 { 585 struct rte_mbuf *m; 586 void *mb = NULL; 587 588 if (rte_dpaa2_mbuf_alloc_bulk( 589 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) { 590 DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer"); 591 return -1; 592 } 593 m = (struct rte_mbuf *)mb; 594 memcpy((char *)m->buf_addr + mbuf->data_off, 595 (void *)((char *)mbuf->buf_addr + mbuf->data_off), 596 mbuf->pkt_len); 597 598 /* Copy required fields */ 599 m->data_off = mbuf->data_off; 600 m->ol_flags = mbuf->ol_flags; 601 m->packet_type = mbuf->packet_type; 602 m->tx_offload = mbuf->tx_offload; 603 604 DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid); 605 606 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 607 rte_mempool_check_cookies(rte_mempool_from_obj((void *)m), 608 (void **)&m, 1, 0); 609 #endif 610 DPAA2_PMD_DP_DEBUG( 611 "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d," 612 " meta: %d, off: %d, len: %d", 613 (void *)mbuf, 614 mbuf->buf_addr, 615 DPAA2_GET_FD_ADDR(fd), 616 DPAA2_GET_FD_BPID(fd), 617 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 618 DPAA2_GET_FD_OFFSET(fd), 619 DPAA2_GET_FD_LEN(fd)); 620 621 return 0; 622 } 623 624 static void 625 dump_err_pkts(struct dpaa2_queue *dpaa2_q) 626 { 627 /* Function receive frames for a given device and VQ */ 628 struct qbman_result *dq_storage; 629 uint32_t fqid = dpaa2_q->fqid; 630 int ret, num_rx = 0; 631 uint8_t pending, status; 632 struct qbman_swp *swp; 633 const struct qbman_fd *fd; 634 struct qbman_pull_desc pulldesc; 635 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; 636 uint32_t lcore_id = rte_lcore_id(); 637 void *v_addr, *hw_annot_addr; 638 struct dpaa2_fas *fas; 639 640 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 641 ret = dpaa2_affine_qbman_swp(); 642 if (ret) { 643 DPAA2_PMD_ERR("Failed to allocate IO portal, tid: %d", 644 rte_gettid()); 645 return; 646 } 647 } 648 swp = DPAA2_PER_LCORE_PORTAL; 649 650 dq_storage = dpaa2_q->q_storage[lcore_id].dq_storage[0]; 651 qbman_pull_desc_clear(&pulldesc); 652 qbman_pull_desc_set_fq(&pulldesc, fqid); 653 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 654 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); 655 qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size); 656 657 while (1) { 658 if (qbman_swp_pull(swp, &pulldesc)) { 659 DPAA2_PMD_DP_DEBUG("VDQ command is not issued.QBMAN is busy"); 660 /* Portal was busy, try again */ 661 continue; 662 } 663 break; 664 } 665 666 /* Check if the previous issued command is completed. */ 667 while (!qbman_check_command_complete(dq_storage)) 668 ; 669 670 pending = 1; 671 do { 672 /* Loop until the dq_storage is updated with 673 * new token by QBMAN 674 */ 675 while (!qbman_check_new_result(dq_storage)) 676 ; 677 678 /* Check whether Last Pull command is Expired and 679 * setting Condition for Loop termination 680 */ 681 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 682 pending = 0; 683 /* Check for valid frame. */ 684 status = qbman_result_DQ_flags(dq_storage); 685 if (unlikely((status & 686 QBMAN_DQ_STAT_VALIDFRAME) == 0)) 687 continue; 688 } 689 fd = qbman_result_DQ_fd(dq_storage); 690 v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 691 hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE); 692 fas = hw_annot_addr; 693 694 DPAA2_PMD_ERR("[%d] error packet on port[%d]:" 695 " fd_off: %d, fd_err: %x, fas_status: %x", 696 rte_lcore_id(), eth_data->port_id, 697 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ERR(fd), 698 fas->status); 699 rte_hexdump(stderr, "Error packet", v_addr, 700 DPAA2_GET_FD_OFFSET(fd) + DPAA2_GET_FD_LEN(fd)); 701 702 dq_storage++; 703 num_rx++; 704 } while (pending); 705 706 dpaa2_q->err_pkts += num_rx; 707 } 708 709 /* This function assumes that caller will be keep the same value for nb_pkts 710 * across calls per queue, if that is not the case, better use non-prefetch 711 * version of rx call. 712 * It will return the packets as requested in previous call without honoring 713 * the current nb_pkts or bufs space. 714 */ 715 uint16_t 716 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 717 { 718 /* Function receive frames for a given device and VQ*/ 719 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; 720 struct qbman_result *dq_storage, *dq_storage1 = NULL; 721 uint32_t fqid = dpaa2_q->fqid; 722 int ret, num_rx = 0, pull_size; 723 uint8_t pending, status; 724 struct qbman_swp *swp; 725 const struct qbman_fd *fd; 726 struct qbman_pull_desc pulldesc; 727 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage; 728 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; 729 struct dpaa2_dev_priv *priv = eth_data->dev_private; 730 731 if (unlikely(dpaa2_enable_err_queue)) 732 dump_err_pkts(priv->rx_err_vq); 733 734 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) { 735 ret = dpaa2_affine_qbman_ethrx_swp(); 736 if (ret) { 737 DPAA2_PMD_ERR("Failure in affining portal"); 738 return 0; 739 } 740 } 741 742 if (unlikely(!rte_dpaa2_bpid_info && 743 rte_eal_process_type() == RTE_PROC_SECONDARY)) 744 rte_dpaa2_bpid_info = dpaa2_q->bp_array; 745 746 swp = DPAA2_PER_LCORE_ETHRX_PORTAL; 747 pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts; 748 if (unlikely(!q_storage->active_dqs)) { 749 q_storage->toggle = 0; 750 dq_storage = q_storage->dq_storage[q_storage->toggle]; 751 q_storage->last_num_pkts = pull_size; 752 qbman_pull_desc_clear(&pulldesc); 753 qbman_pull_desc_set_numframes(&pulldesc, 754 q_storage->last_num_pkts); 755 qbman_pull_desc_set_fq(&pulldesc, fqid); 756 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 757 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); 758 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { 759 while (!qbman_check_command_complete( 760 get_swp_active_dqs( 761 DPAA2_PER_LCORE_ETHRX_DPIO->index))) 762 ; 763 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); 764 } 765 while (1) { 766 if (qbman_swp_pull(swp, &pulldesc)) { 767 DPAA2_PMD_DP_DEBUG("VDQ command is not issued." 768 " QBMAN is busy (1)"); 769 /* Portal was busy, try again */ 770 continue; 771 } 772 break; 773 } 774 q_storage->active_dqs = dq_storage; 775 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; 776 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, 777 dq_storage); 778 } 779 780 dq_storage = q_storage->active_dqs; 781 rte_prefetch0((void *)(size_t)(dq_storage)); 782 rte_prefetch0((void *)(size_t)(dq_storage + 1)); 783 784 /* Prepare next pull descriptor. This will give space for the 785 * prefetching done on DQRR entries 786 */ 787 q_storage->toggle ^= 1; 788 dq_storage1 = q_storage->dq_storage[q_storage->toggle]; 789 qbman_pull_desc_clear(&pulldesc); 790 qbman_pull_desc_set_numframes(&pulldesc, pull_size); 791 qbman_pull_desc_set_fq(&pulldesc, fqid); 792 qbman_pull_desc_set_storage(&pulldesc, dq_storage1, 793 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); 794 795 /* Check if the previous issued command is completed. 796 * Also seems like the SWP is shared between the Ethernet Driver 797 * and the SEC driver. 798 */ 799 while (!qbman_check_command_complete(dq_storage)) 800 ; 801 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id)) 802 clear_swp_active_dqs(q_storage->active_dpio_id); 803 804 pending = 1; 805 806 do { 807 /* Loop until the dq_storage is updated with 808 * new token by QBMAN 809 */ 810 while (!qbman_check_new_result(dq_storage)) 811 ; 812 rte_prefetch0((void *)((size_t)(dq_storage + 2))); 813 /* Check whether Last Pull command is Expired and 814 * setting Condition for Loop termination 815 */ 816 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 817 pending = 0; 818 /* Check for valid frame. */ 819 status = qbman_result_DQ_flags(dq_storage); 820 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) 821 continue; 822 } 823 fd = qbman_result_DQ_fd(dq_storage); 824 825 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA 826 if (dpaa2_svr_family != SVR_LX2160A) { 827 const struct qbman_fd *next_fd = 828 qbman_result_DQ_fd(dq_storage + 1); 829 /* Prefetch Annotation address for the parse results */ 830 rte_prefetch0(DPAA2_IOVA_TO_VADDR((DPAA2_GET_FD_ADDR( 831 next_fd) + DPAA2_FD_PTA_SIZE + 16))); 832 } 833 #endif 834 835 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg)) 836 bufs[num_rx] = eth_sg_fd_to_mbuf(fd, eth_data->port_id); 837 else 838 bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id); 839 #if defined(RTE_LIBRTE_IEEE1588) 840 if (bufs[num_rx]->ol_flags & RTE_MBUF_F_RX_IEEE1588_TMST) { 841 priv->rx_timestamp = 842 *dpaa2_timestamp_dynfield(bufs[num_rx]); 843 } 844 #endif 845 846 if (eth_data->dev_conf.rxmode.offloads & 847 RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 848 rte_vlan_strip(bufs[num_rx]); 849 850 dq_storage++; 851 num_rx++; 852 } while (pending); 853 854 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { 855 while (!qbman_check_command_complete( 856 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index))) 857 ; 858 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); 859 } 860 /* issue a volatile dequeue command for next pull */ 861 while (1) { 862 if (qbman_swp_pull(swp, &pulldesc)) { 863 DPAA2_PMD_DP_DEBUG("VDQ command is not issued." 864 "QBMAN is busy (2)"); 865 continue; 866 } 867 break; 868 } 869 q_storage->active_dqs = dq_storage1; 870 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; 871 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1); 872 873 dpaa2_q->rx_pkts += num_rx; 874 875 return num_rx; 876 } 877 878 void __rte_hot 879 dpaa2_dev_process_parallel_event(struct qbman_swp *swp, 880 const struct qbman_fd *fd, 881 const struct qbman_result *dq, 882 struct dpaa2_queue *rxq, 883 struct rte_event *ev) 884 { 885 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + 886 DPAA2_FD_PTA_SIZE + 16)); 887 888 ev->flow_id = rxq->ev.flow_id; 889 ev->sub_event_type = rxq->ev.sub_event_type; 890 ev->event_type = RTE_EVENT_TYPE_ETHDEV; 891 ev->op = RTE_EVENT_OP_NEW; 892 ev->sched_type = rxq->ev.sched_type; 893 ev->queue_id = rxq->ev.queue_id; 894 ev->priority = rxq->ev.priority; 895 896 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id); 897 898 qbman_swp_dqrr_consume(swp, dq); 899 } 900 901 void __rte_hot 902 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused, 903 const struct qbman_fd *fd, 904 const struct qbman_result *dq, 905 struct dpaa2_queue *rxq, 906 struct rte_event *ev) 907 { 908 uint8_t dqrr_index; 909 910 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + 911 DPAA2_FD_PTA_SIZE + 16)); 912 913 ev->flow_id = rxq->ev.flow_id; 914 ev->sub_event_type = rxq->ev.sub_event_type; 915 ev->event_type = RTE_EVENT_TYPE_ETHDEV; 916 ev->op = RTE_EVENT_OP_NEW; 917 ev->sched_type = rxq->ev.sched_type; 918 ev->queue_id = rxq->ev.queue_id; 919 ev->priority = rxq->ev.priority; 920 921 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id); 922 923 dqrr_index = qbman_get_dqrr_idx(dq); 924 *dpaa2_seqn(ev->mbuf) = dqrr_index + 1; 925 DPAA2_PER_LCORE_DQRR_SIZE++; 926 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; 927 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf; 928 } 929 930 void __rte_hot 931 dpaa2_dev_process_ordered_event(struct qbman_swp *swp, 932 const struct qbman_fd *fd, 933 const struct qbman_result *dq, 934 struct dpaa2_queue *rxq, 935 struct rte_event *ev) 936 { 937 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + 938 DPAA2_FD_PTA_SIZE + 16)); 939 940 ev->flow_id = rxq->ev.flow_id; 941 ev->sub_event_type = rxq->ev.sub_event_type; 942 ev->event_type = RTE_EVENT_TYPE_ETHDEV; 943 ev->op = RTE_EVENT_OP_NEW; 944 ev->sched_type = rxq->ev.sched_type; 945 ev->queue_id = rxq->ev.queue_id; 946 ev->priority = rxq->ev.priority; 947 948 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id); 949 950 *dpaa2_seqn(ev->mbuf) = DPAA2_ENQUEUE_FLAG_ORP; 951 *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT; 952 *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT; 953 954 qbman_swp_dqrr_consume(swp, dq); 955 } 956 957 uint16_t 958 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 959 { 960 /* Function receive frames for a given device and VQ */ 961 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; 962 struct qbman_result *dq_storage; 963 uint32_t fqid = dpaa2_q->fqid; 964 int ret, num_rx = 0, next_pull = nb_pkts, num_pulled; 965 uint8_t pending, status; 966 struct qbman_swp *swp; 967 const struct qbman_fd *fd; 968 struct qbman_pull_desc pulldesc; 969 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; 970 struct dpaa2_dev_priv *priv = eth_data->dev_private; 971 972 if (unlikely(dpaa2_enable_err_queue)) 973 dump_err_pkts(priv->rx_err_vq); 974 975 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 976 ret = dpaa2_affine_qbman_swp(); 977 if (ret) { 978 DPAA2_PMD_ERR( 979 "Failed to allocate IO portal, tid: %d", 980 rte_gettid()); 981 return 0; 982 } 983 } 984 swp = DPAA2_PER_LCORE_PORTAL; 985 986 do { 987 dq_storage = dpaa2_q->q_storage->dq_storage[0]; 988 qbman_pull_desc_clear(&pulldesc); 989 qbman_pull_desc_set_fq(&pulldesc, fqid); 990 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 991 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); 992 993 if (next_pull > dpaa2_dqrr_size) { 994 qbman_pull_desc_set_numframes(&pulldesc, 995 dpaa2_dqrr_size); 996 next_pull -= dpaa2_dqrr_size; 997 } else { 998 qbman_pull_desc_set_numframes(&pulldesc, next_pull); 999 next_pull = 0; 1000 } 1001 1002 while (1) { 1003 if (qbman_swp_pull(swp, &pulldesc)) { 1004 DPAA2_PMD_DP_DEBUG( 1005 "VDQ command is not issued.QBMAN is busy"); 1006 /* Portal was busy, try again */ 1007 continue; 1008 } 1009 break; 1010 } 1011 1012 rte_prefetch0((void *)((size_t)(dq_storage + 1))); 1013 /* Check if the previous issued command is completed. */ 1014 while (!qbman_check_command_complete(dq_storage)) 1015 ; 1016 1017 num_pulled = 0; 1018 pending = 1; 1019 do { 1020 /* Loop until the dq_storage is updated with 1021 * new token by QBMAN 1022 */ 1023 while (!qbman_check_new_result(dq_storage)) 1024 ; 1025 rte_prefetch0((void *)((size_t)(dq_storage + 2))); 1026 /* Check whether Last Pull command is Expired and 1027 * setting Condition for Loop termination 1028 */ 1029 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 1030 pending = 0; 1031 /* Check for valid frame. */ 1032 status = qbman_result_DQ_flags(dq_storage); 1033 if (unlikely((status & 1034 QBMAN_DQ_STAT_VALIDFRAME) == 0)) 1035 continue; 1036 } 1037 fd = qbman_result_DQ_fd(dq_storage); 1038 1039 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA 1040 if (dpaa2_svr_family != SVR_LX2160A) { 1041 const struct qbman_fd *next_fd = 1042 qbman_result_DQ_fd(dq_storage + 1); 1043 1044 /* Prefetch Annotation address for the parse 1045 * results. 1046 */ 1047 rte_prefetch0((DPAA2_IOVA_TO_VADDR( 1048 DPAA2_GET_FD_ADDR(next_fd) + 1049 DPAA2_FD_PTA_SIZE + 16))); 1050 } 1051 #endif 1052 1053 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg)) 1054 bufs[num_rx] = eth_sg_fd_to_mbuf(fd, 1055 eth_data->port_id); 1056 else 1057 bufs[num_rx] = eth_fd_to_mbuf(fd, 1058 eth_data->port_id); 1059 1060 #if defined(RTE_LIBRTE_IEEE1588) 1061 if (bufs[num_rx]->ol_flags & RTE_MBUF_F_RX_IEEE1588_TMST) { 1062 priv->rx_timestamp = 1063 *dpaa2_timestamp_dynfield(bufs[num_rx]); 1064 } 1065 #endif 1066 1067 if (eth_data->dev_conf.rxmode.offloads & 1068 RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { 1069 rte_vlan_strip(bufs[num_rx]); 1070 } 1071 1072 dq_storage++; 1073 num_rx++; 1074 num_pulled++; 1075 } while (pending); 1076 /* Last VDQ provided all packets and more packets are requested */ 1077 } while (next_pull && num_pulled == dpaa2_dqrr_size); 1078 1079 dpaa2_q->rx_pkts += num_rx; 1080 1081 return num_rx; 1082 } 1083 1084 uint16_t dpaa2_dev_tx_conf(void *queue) 1085 { 1086 /* Function receive frames for a given device and VQ */ 1087 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; 1088 struct qbman_result *dq_storage; 1089 uint32_t fqid = dpaa2_q->fqid; 1090 int ret, num_tx_conf = 0, num_pulled; 1091 uint8_t pending, status; 1092 struct qbman_swp *swp; 1093 const struct qbman_fd *fd, *next_fd; 1094 struct qbman_pull_desc pulldesc; 1095 struct qbman_release_desc releasedesc; 1096 uint32_t bpid; 1097 uint64_t buf; 1098 #if defined(RTE_LIBRTE_IEEE1588) 1099 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; 1100 struct dpaa2_dev_priv *priv = eth_data->dev_private; 1101 struct dpaa2_annot_hdr *annotation; 1102 void *v_addr; 1103 struct rte_mbuf *mbuf; 1104 #endif 1105 1106 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 1107 ret = dpaa2_affine_qbman_swp(); 1108 if (ret) { 1109 DPAA2_PMD_ERR( 1110 "Failed to allocate IO portal, tid: %d", 1111 rte_gettid()); 1112 return 0; 1113 } 1114 } 1115 swp = DPAA2_PER_LCORE_PORTAL; 1116 1117 do { 1118 dq_storage = dpaa2_q->q_storage->dq_storage[0]; 1119 qbman_pull_desc_clear(&pulldesc); 1120 qbman_pull_desc_set_fq(&pulldesc, fqid); 1121 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 1122 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); 1123 1124 qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size); 1125 1126 while (1) { 1127 if (qbman_swp_pull(swp, &pulldesc)) { 1128 DPAA2_PMD_DP_DEBUG("VDQ command is not issued." 1129 "QBMAN is busy"); 1130 /* Portal was busy, try again */ 1131 continue; 1132 } 1133 break; 1134 } 1135 1136 rte_prefetch0((void *)((size_t)(dq_storage + 1))); 1137 /* Check if the previous issued command is completed. */ 1138 while (!qbman_check_command_complete(dq_storage)) 1139 ; 1140 1141 num_pulled = 0; 1142 pending = 1; 1143 do { 1144 /* Loop until the dq_storage is updated with 1145 * new token by QBMAN 1146 */ 1147 while (!qbman_check_new_result(dq_storage)) 1148 ; 1149 rte_prefetch0((void *)((size_t)(dq_storage + 2))); 1150 /* Check whether Last Pull command is Expired and 1151 * setting Condition for Loop termination 1152 */ 1153 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 1154 pending = 0; 1155 /* Check for valid frame. */ 1156 status = qbman_result_DQ_flags(dq_storage); 1157 if (unlikely((status & 1158 QBMAN_DQ_STAT_VALIDFRAME) == 0)) 1159 continue; 1160 } 1161 fd = qbman_result_DQ_fd(dq_storage); 1162 1163 next_fd = qbman_result_DQ_fd(dq_storage + 1); 1164 /* Prefetch Annotation address for the parse results */ 1165 rte_prefetch0((void *)(size_t) 1166 (DPAA2_GET_FD_ADDR(next_fd) + 1167 DPAA2_FD_PTA_SIZE + 16)); 1168 1169 bpid = DPAA2_GET_FD_BPID(fd); 1170 1171 /* Create a release descriptor required for releasing 1172 * buffers into QBMAN 1173 */ 1174 qbman_release_desc_clear(&releasedesc); 1175 qbman_release_desc_set_bpid(&releasedesc, bpid); 1176 1177 buf = DPAA2_GET_FD_ADDR(fd); 1178 /* feed them to bman */ 1179 do { 1180 ret = qbman_swp_release(swp, &releasedesc, 1181 &buf, 1); 1182 } while (ret == -EBUSY); 1183 1184 dq_storage++; 1185 num_tx_conf++; 1186 num_pulled++; 1187 #if defined(RTE_LIBRTE_IEEE1588) 1188 v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 1189 mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr, 1190 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 1191 1192 if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) { 1193 annotation = (struct dpaa2_annot_hdr *)((size_t) 1194 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + 1195 DPAA2_FD_PTA_SIZE); 1196 priv->tx_timestamp = annotation->word2; 1197 } 1198 #endif 1199 } while (pending); 1200 1201 /* Last VDQ provided all packets and more packets are requested */ 1202 } while (num_pulled == dpaa2_dqrr_size); 1203 1204 dpaa2_q->rx_pkts += num_tx_conf; 1205 1206 return num_tx_conf; 1207 } 1208 1209 /* Configure the egress frame annotation for timestamp update */ 1210 static void enable_tx_tstamp(struct qbman_fd *fd) 1211 { 1212 struct dpaa2_faead *fd_faead; 1213 1214 /* Set frame annotation status field as valid */ 1215 (fd)->simple.frc |= DPAA2_FD_FRC_FASV; 1216 1217 /* Set frame annotation egress action descriptor as valid */ 1218 (fd)->simple.frc |= DPAA2_FD_FRC_FAEADV; 1219 1220 /* Set Annotation Length as 128B */ 1221 (fd)->simple.ctrl |= DPAA2_FD_CTRL_ASAL; 1222 1223 /* enable update of confirmation frame annotation */ 1224 fd_faead = (struct dpaa2_faead *)((size_t) 1225 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + 1226 DPAA2_FD_PTA_SIZE + DPAA2_FD_HW_ANNOT_FAEAD_OFFSET); 1227 fd_faead->ctrl = DPAA2_ANNOT_FAEAD_A2V | DPAA2_ANNOT_FAEAD_UPDV | 1228 DPAA2_ANNOT_FAEAD_UPD; 1229 } 1230 1231 /* 1232 * Callback to handle sending packets through WRIOP based interface 1233 */ 1234 uint16_t 1235 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 1236 { 1237 /* Function to transmit the frames to given device and VQ*/ 1238 uint32_t loop, retry_count; 1239 int32_t ret; 1240 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 1241 struct rte_mbuf *mi; 1242 uint32_t frames_to_send; 1243 struct rte_mempool *mp; 1244 struct qbman_eq_desc eqdesc; 1245 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; 1246 struct qbman_swp *swp; 1247 uint16_t num_tx = 0; 1248 uint16_t bpid; 1249 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; 1250 struct dpaa2_dev_priv *priv = eth_data->dev_private; 1251 uint32_t flags[MAX_TX_RING_SLOTS] = {0}; 1252 struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size]; 1253 uint32_t free_count = 0; 1254 1255 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 1256 ret = dpaa2_affine_qbman_swp(); 1257 if (ret) { 1258 DPAA2_PMD_ERR( 1259 "Failed to allocate IO portal, tid: %d", 1260 rte_gettid()); 1261 return 0; 1262 } 1263 } 1264 swp = DPAA2_PER_LCORE_PORTAL; 1265 1266 DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d", 1267 eth_data, dpaa2_q->fqid); 1268 1269 #ifdef RTE_LIBRTE_IEEE1588 1270 /* IEEE1588 driver need pointer to tx confirmation queue 1271 * corresponding to last packet transmitted for reading 1272 * the timestamp 1273 */ 1274 if ((*bufs)->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) { 1275 priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue; 1276 dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue); 1277 priv->tx_timestamp = 0; 1278 } 1279 #endif 1280 1281 /*Prepare enqueue descriptor*/ 1282 qbman_eq_desc_clear(&eqdesc); 1283 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 1284 qbman_eq_desc_set_fq(&eqdesc, dpaa2_q->fqid); 1285 1286 /*Clear the unused FD fields before sending*/ 1287 while (nb_pkts) { 1288 /*Check if the queue is congested*/ 1289 retry_count = 0; 1290 while (qbman_result_SCN_state(dpaa2_q->cscn)) { 1291 retry_count++; 1292 /* Retry for some time before giving up */ 1293 if (retry_count > CONG_RETRY_COUNT) 1294 goto skip_tx; 1295 } 1296 1297 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ? 1298 dpaa2_eqcr_size : nb_pkts; 1299 1300 for (loop = 0; loop < frames_to_send; loop++) { 1301 if (*dpaa2_seqn(*bufs)) { 1302 uint8_t dqrr_index = *dpaa2_seqn(*bufs) - 1; 1303 1304 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | 1305 dqrr_index; 1306 DPAA2_PER_LCORE_DQRR_SIZE--; 1307 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); 1308 *dpaa2_seqn(*bufs) = DPAA2_INVALID_MBUF_SEQN; 1309 } 1310 1311 if (likely(RTE_MBUF_DIRECT(*bufs))) { 1312 mp = (*bufs)->pool; 1313 /* Check the basic scenario and set 1314 * the FD appropriately here itself. 1315 */ 1316 if (likely(mp && mp->ops_index == 1317 priv->bp_list->dpaa2_ops_index && 1318 (*bufs)->nb_segs == 1 && 1319 rte_mbuf_refcnt_read((*bufs)) == 1)) { 1320 if (unlikely(((*bufs)->ol_flags 1321 & RTE_MBUF_F_TX_VLAN) || 1322 (eth_data->dev_conf.txmode.offloads 1323 & RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) { 1324 ret = rte_vlan_insert(bufs); 1325 if (ret) 1326 goto send_n_return; 1327 } 1328 DPAA2_MBUF_TO_CONTIG_FD((*bufs), 1329 &fd_arr[loop], mempool_to_bpid(mp)); 1330 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 1331 rte_mempool_check_cookies 1332 (rte_mempool_from_obj((void *)*bufs), 1333 (void **)bufs, 1, 0); 1334 #endif 1335 bufs++; 1336 #ifdef RTE_LIBRTE_IEEE1588 1337 enable_tx_tstamp(&fd_arr[loop]); 1338 #endif 1339 continue; 1340 } 1341 } else { 1342 mi = rte_mbuf_from_indirect(*bufs); 1343 mp = mi->pool; 1344 } 1345 1346 if (unlikely(RTE_MBUF_HAS_EXTBUF(*bufs))) { 1347 if (unlikely((*bufs)->nb_segs > 1)) { 1348 mp = (*bufs)->pool; 1349 if (eth_mbuf_to_sg_fd(*bufs, 1350 &fd_arr[loop], 1351 buf_to_free, 1352 &free_count, 1353 loop, 1354 mempool_to_bpid(mp))) 1355 goto send_n_return; 1356 } else { 1357 eth_mbuf_to_fd(*bufs, 1358 &fd_arr[loop], 1359 buf_to_free, 1360 &free_count, 1361 loop, 0); 1362 } 1363 bufs++; 1364 #ifdef RTE_LIBRTE_IEEE1588 1365 enable_tx_tstamp(&fd_arr[loop]); 1366 #endif 1367 continue; 1368 } 1369 1370 /* Not a hw_pkt pool allocated frame */ 1371 if (unlikely(!mp || !priv->bp_list)) { 1372 DPAA2_PMD_ERR("Err: No buffer pool attached"); 1373 goto send_n_return; 1374 } 1375 1376 if (unlikely(((*bufs)->ol_flags & RTE_MBUF_F_TX_VLAN) || 1377 (eth_data->dev_conf.txmode.offloads 1378 & RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) { 1379 int ret = rte_vlan_insert(bufs); 1380 if (ret) 1381 goto send_n_return; 1382 } 1383 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { 1384 DPAA2_PMD_WARN("Non DPAA2 buffer pool"); 1385 /* alloc should be from the default buffer pool 1386 * attached to this interface 1387 */ 1388 bpid = priv->bp_list->buf_pool.bpid; 1389 1390 if (unlikely((*bufs)->nb_segs > 1)) { 1391 DPAA2_PMD_ERR("S/G support not added" 1392 " for non hw offload buffer"); 1393 goto send_n_return; 1394 } 1395 if (eth_copy_mbuf_to_fd(*bufs, 1396 &fd_arr[loop], bpid)) { 1397 goto send_n_return; 1398 } 1399 /* free the original packet */ 1400 rte_pktmbuf_free(*bufs); 1401 } else { 1402 bpid = mempool_to_bpid(mp); 1403 if (unlikely((*bufs)->nb_segs > 1)) { 1404 if (eth_mbuf_to_sg_fd(*bufs, 1405 &fd_arr[loop], 1406 buf_to_free, 1407 &free_count, 1408 loop, 1409 bpid)) 1410 goto send_n_return; 1411 } else { 1412 eth_mbuf_to_fd(*bufs, 1413 &fd_arr[loop], 1414 buf_to_free, 1415 &free_count, 1416 loop, bpid); 1417 } 1418 } 1419 #ifdef RTE_LIBRTE_IEEE1588 1420 enable_tx_tstamp(&fd_arr[loop]); 1421 #endif 1422 bufs++; 1423 } 1424 1425 loop = 0; 1426 retry_count = 0; 1427 while (loop < frames_to_send) { 1428 ret = qbman_swp_enqueue_multiple(swp, &eqdesc, 1429 &fd_arr[loop], &flags[loop], 1430 frames_to_send - loop); 1431 if (unlikely(ret < 0)) { 1432 retry_count++; 1433 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { 1434 num_tx += loop; 1435 nb_pkts -= loop; 1436 goto send_n_return; 1437 } 1438 } else { 1439 loop += ret; 1440 retry_count = 0; 1441 } 1442 } 1443 1444 num_tx += loop; 1445 nb_pkts -= loop; 1446 } 1447 dpaa2_q->tx_pkts += num_tx; 1448 1449 for (loop = 0; loop < free_count; loop++) { 1450 if (buf_to_free[loop].pkt_id < num_tx) 1451 rte_pktmbuf_free_seg(buf_to_free[loop].seg); 1452 } 1453 1454 return num_tx; 1455 1456 send_n_return: 1457 /* send any already prepared fd */ 1458 if (loop) { 1459 unsigned int i = 0; 1460 1461 retry_count = 0; 1462 while (i < loop) { 1463 ret = qbman_swp_enqueue_multiple(swp, &eqdesc, 1464 &fd_arr[i], 1465 &flags[i], 1466 loop - i); 1467 if (unlikely(ret < 0)) { 1468 retry_count++; 1469 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) 1470 break; 1471 } else { 1472 i += ret; 1473 retry_count = 0; 1474 } 1475 } 1476 num_tx += i; 1477 } 1478 skip_tx: 1479 dpaa2_q->tx_pkts += num_tx; 1480 1481 for (loop = 0; loop < free_count; loop++) { 1482 if (buf_to_free[loop].pkt_id < num_tx) 1483 rte_pktmbuf_free_seg(buf_to_free[loop].seg); 1484 } 1485 1486 return num_tx; 1487 } 1488 1489 void 1490 dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci, 1491 __rte_unused struct dpaa2_queue *dpaa2_q) 1492 { 1493 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO; 1494 struct qbman_fd *fd; 1495 struct rte_mbuf *m; 1496 1497 fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]); 1498 1499 /* Setting port id does not matter as we are to free the mbuf */ 1500 m = eth_fd_to_mbuf(fd, 0); 1501 rte_pktmbuf_free(m); 1502 } 1503 1504 static void 1505 dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q, 1506 struct rte_mbuf *m, 1507 struct qbman_eq_desc *eqdesc) 1508 { 1509 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; 1510 struct dpaa2_dev_priv *priv = eth_data->dev_private; 1511 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO; 1512 struct eqresp_metadata *eqresp_meta; 1513 uint16_t orpid, seqnum; 1514 uint8_t dq_idx; 1515 1516 qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid); 1517 1518 if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) { 1519 orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >> 1520 DPAA2_EQCR_OPRID_SHIFT; 1521 seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >> 1522 DPAA2_EQCR_SEQNUM_SHIFT; 1523 1524 if (!priv->en_loose_ordered) { 1525 qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0); 1526 qbman_eq_desc_set_response(eqdesc, (uint64_t) 1527 DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[ 1528 dpio_dev->eqresp_pi]), 1); 1529 qbman_eq_desc_set_token(eqdesc, 1); 1530 1531 eqresp_meta = &dpio_dev->eqresp_meta[ 1532 dpio_dev->eqresp_pi]; 1533 eqresp_meta->dpaa2_q = dpaa2_q; 1534 eqresp_meta->mp = m->pool; 1535 1536 dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ? 1537 dpio_dev->eqresp_pi++ : 1538 (dpio_dev->eqresp_pi = 0); 1539 } else { 1540 qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0); 1541 } 1542 } else { 1543 dq_idx = *dpaa2_seqn(m) - 1; 1544 qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0); 1545 DPAA2_PER_LCORE_DQRR_SIZE--; 1546 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx); 1547 } 1548 *dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN; 1549 } 1550 1551 uint16_t 1552 dpaa2_dev_tx_multi_txq_ordered(void **queue, 1553 struct rte_mbuf **bufs, uint16_t nb_pkts) 1554 { 1555 /* Function to transmit the frames to multiple queues respectively.*/ 1556 uint32_t loop, i, retry_count; 1557 int32_t ret; 1558 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 1559 uint32_t frames_to_send, num_free_eq_desc = 0; 1560 struct rte_mempool *mp; 1561 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS]; 1562 struct dpaa2_queue *dpaa2_q[MAX_TX_RING_SLOTS]; 1563 struct qbman_swp *swp; 1564 uint16_t bpid; 1565 struct rte_mbuf *mi; 1566 struct rte_eth_dev_data *eth_data; 1567 struct dpaa2_dev_priv *priv; 1568 struct dpaa2_queue *order_sendq; 1569 struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size]; 1570 uint32_t free_count = 0; 1571 1572 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 1573 ret = dpaa2_affine_qbman_swp(); 1574 if (ret) { 1575 DPAA2_PMD_ERR( 1576 "Failed to allocate IO portal, tid: %d", 1577 rte_gettid()); 1578 return 0; 1579 } 1580 } 1581 swp = DPAA2_PER_LCORE_PORTAL; 1582 1583 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ? 1584 dpaa2_eqcr_size : nb_pkts; 1585 1586 for (loop = 0; loop < frames_to_send; loop++) { 1587 dpaa2_q[loop] = (struct dpaa2_queue *)queue[loop]; 1588 eth_data = dpaa2_q[loop]->eth_data; 1589 priv = eth_data->dev_private; 1590 if (!priv->en_loose_ordered) { 1591 if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) { 1592 if (!num_free_eq_desc) { 1593 num_free_eq_desc = dpaa2_free_eq_descriptors(); 1594 if (!num_free_eq_desc) 1595 goto send_frames; 1596 } 1597 num_free_eq_desc--; 1598 } 1599 } 1600 1601 DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d", 1602 eth_data, dpaa2_q[loop]->fqid); 1603 1604 /* Check if the queue is congested */ 1605 retry_count = 0; 1606 while (qbman_result_SCN_state(dpaa2_q[loop]->cscn)) { 1607 retry_count++; 1608 /* Retry for some time before giving up */ 1609 if (retry_count > CONG_RETRY_COUNT) 1610 goto send_frames; 1611 } 1612 1613 /* Prepare enqueue descriptor */ 1614 qbman_eq_desc_clear(&eqdesc[loop]); 1615 1616 if (*dpaa2_seqn(*bufs) && priv->en_ordered) { 1617 order_sendq = (struct dpaa2_queue *)priv->tx_vq[0]; 1618 dpaa2_set_enqueue_descriptor(order_sendq, 1619 (*bufs), 1620 &eqdesc[loop]); 1621 } else { 1622 qbman_eq_desc_set_no_orp(&eqdesc[loop], 1623 DPAA2_EQ_RESP_ERR_FQ); 1624 qbman_eq_desc_set_fq(&eqdesc[loop], 1625 dpaa2_q[loop]->fqid); 1626 } 1627 1628 if (likely(RTE_MBUF_DIRECT(*bufs))) { 1629 mp = (*bufs)->pool; 1630 /* Check the basic scenario and set 1631 * the FD appropriately here itself. 1632 */ 1633 if (likely(mp && mp->ops_index == 1634 priv->bp_list->dpaa2_ops_index && 1635 (*bufs)->nb_segs == 1 && 1636 rte_mbuf_refcnt_read((*bufs)) == 1)) { 1637 if (unlikely((*bufs)->ol_flags 1638 & RTE_MBUF_F_TX_VLAN)) { 1639 ret = rte_vlan_insert(bufs); 1640 if (ret) 1641 goto send_frames; 1642 } 1643 DPAA2_MBUF_TO_CONTIG_FD((*bufs), 1644 &fd_arr[loop], 1645 mempool_to_bpid(mp)); 1646 bufs++; 1647 continue; 1648 } 1649 } else { 1650 mi = rte_mbuf_from_indirect(*bufs); 1651 mp = mi->pool; 1652 } 1653 /* Not a hw_pkt pool allocated frame */ 1654 if (unlikely(!mp || !priv->bp_list)) { 1655 DPAA2_PMD_ERR("Err: No buffer pool attached"); 1656 goto send_frames; 1657 } 1658 1659 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { 1660 DPAA2_PMD_WARN("Non DPAA2 buffer pool"); 1661 /* alloc should be from the default buffer pool 1662 * attached to this interface 1663 */ 1664 bpid = priv->bp_list->buf_pool.bpid; 1665 1666 if (unlikely((*bufs)->nb_segs > 1)) { 1667 DPAA2_PMD_ERR( 1668 "S/G not supp for non hw offload buffer"); 1669 goto send_frames; 1670 } 1671 if (eth_copy_mbuf_to_fd(*bufs, 1672 &fd_arr[loop], bpid)) { 1673 goto send_frames; 1674 } 1675 /* free the original packet */ 1676 rte_pktmbuf_free(*bufs); 1677 } else { 1678 bpid = mempool_to_bpid(mp); 1679 if (unlikely((*bufs)->nb_segs > 1)) { 1680 if (eth_mbuf_to_sg_fd(*bufs, 1681 &fd_arr[loop], 1682 buf_to_free, 1683 &free_count, 1684 loop, 1685 bpid)) 1686 goto send_frames; 1687 } else { 1688 eth_mbuf_to_fd(*bufs, 1689 &fd_arr[loop], 1690 buf_to_free, 1691 &free_count, 1692 loop, bpid); 1693 } 1694 } 1695 1696 bufs++; 1697 } 1698 1699 send_frames: 1700 frames_to_send = loop; 1701 loop = 0; 1702 retry_count = 0; 1703 while (loop < frames_to_send) { 1704 ret = qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop], 1705 &fd_arr[loop], 1706 frames_to_send - loop); 1707 if (likely(ret > 0)) { 1708 loop += ret; 1709 retry_count = 0; 1710 } else { 1711 retry_count++; 1712 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) 1713 break; 1714 } 1715 } 1716 1717 for (i = 0; i < free_count; i++) { 1718 if (buf_to_free[i].pkt_id < loop) 1719 rte_pktmbuf_free_seg(buf_to_free[i].seg); 1720 } 1721 return loop; 1722 } 1723 1724 /* Callback to handle sending ordered packets through WRIOP based interface */ 1725 uint16_t 1726 dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 1727 { 1728 /* Function to transmit the frames to given device and VQ*/ 1729 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; 1730 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; 1731 struct dpaa2_dev_priv *priv = eth_data->dev_private; 1732 struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0]; 1733 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 1734 struct rte_mbuf *mi; 1735 struct rte_mempool *mp; 1736 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS]; 1737 struct qbman_swp *swp; 1738 uint32_t frames_to_send, num_free_eq_desc; 1739 uint32_t loop, retry_count; 1740 int32_t ret; 1741 uint16_t num_tx = 0; 1742 uint16_t bpid; 1743 struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size]; 1744 uint32_t free_count = 0; 1745 1746 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 1747 ret = dpaa2_affine_qbman_swp(); 1748 if (ret) { 1749 DPAA2_PMD_ERR( 1750 "Failed to allocate IO portal, tid: %d", 1751 rte_gettid()); 1752 return 0; 1753 } 1754 } 1755 swp = DPAA2_PER_LCORE_PORTAL; 1756 1757 DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d", 1758 eth_data, dpaa2_q->fqid); 1759 1760 /* This would also handle normal and atomic queues as any type 1761 * of packet can be enqueued when ordered queues are being used. 1762 */ 1763 while (nb_pkts) { 1764 /*Check if the queue is congested*/ 1765 retry_count = 0; 1766 while (qbman_result_SCN_state(dpaa2_q->cscn)) { 1767 retry_count++; 1768 /* Retry for some time before giving up */ 1769 if (retry_count > CONG_RETRY_COUNT) 1770 goto skip_tx; 1771 } 1772 1773 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ? 1774 dpaa2_eqcr_size : nb_pkts; 1775 1776 if (!priv->en_loose_ordered) { 1777 if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) { 1778 num_free_eq_desc = dpaa2_free_eq_descriptors(); 1779 if (num_free_eq_desc < frames_to_send) 1780 frames_to_send = num_free_eq_desc; 1781 } 1782 } 1783 1784 for (loop = 0; loop < frames_to_send; loop++) { 1785 /*Prepare enqueue descriptor*/ 1786 qbman_eq_desc_clear(&eqdesc[loop]); 1787 1788 if (*dpaa2_seqn(*bufs)) { 1789 /* Use only queue 0 for Tx in case of atomic/ 1790 * ordered packets as packets can get unordered 1791 * when being transmitted out from the interface 1792 */ 1793 dpaa2_set_enqueue_descriptor(order_sendq, 1794 (*bufs), 1795 &eqdesc[loop]); 1796 } else { 1797 qbman_eq_desc_set_no_orp(&eqdesc[loop], 1798 DPAA2_EQ_RESP_ERR_FQ); 1799 qbman_eq_desc_set_fq(&eqdesc[loop], 1800 dpaa2_q->fqid); 1801 } 1802 1803 if (likely(RTE_MBUF_DIRECT(*bufs))) { 1804 mp = (*bufs)->pool; 1805 /* Check the basic scenario and set 1806 * the FD appropriately here itself. 1807 */ 1808 if (likely(mp && mp->ops_index == 1809 priv->bp_list->dpaa2_ops_index && 1810 (*bufs)->nb_segs == 1 && 1811 rte_mbuf_refcnt_read((*bufs)) == 1)) { 1812 if (unlikely((*bufs)->ol_flags 1813 & RTE_MBUF_F_TX_VLAN)) { 1814 ret = rte_vlan_insert(bufs); 1815 if (ret) 1816 goto send_n_return; 1817 } 1818 DPAA2_MBUF_TO_CONTIG_FD((*bufs), 1819 &fd_arr[loop], 1820 mempool_to_bpid(mp)); 1821 bufs++; 1822 continue; 1823 } 1824 } else { 1825 mi = rte_mbuf_from_indirect(*bufs); 1826 mp = mi->pool; 1827 } 1828 /* Not a hw_pkt pool allocated frame */ 1829 if (unlikely(!mp || !priv->bp_list)) { 1830 DPAA2_PMD_ERR("Err: No buffer pool attached"); 1831 goto send_n_return; 1832 } 1833 1834 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { 1835 DPAA2_PMD_WARN("Non DPAA2 buffer pool"); 1836 /* alloc should be from the default buffer pool 1837 * attached to this interface 1838 */ 1839 bpid = priv->bp_list->buf_pool.bpid; 1840 1841 if (unlikely((*bufs)->nb_segs > 1)) { 1842 DPAA2_PMD_ERR( 1843 "S/G not supp for non hw offload buffer"); 1844 goto send_n_return; 1845 } 1846 if (eth_copy_mbuf_to_fd(*bufs, 1847 &fd_arr[loop], bpid)) { 1848 goto send_n_return; 1849 } 1850 /* free the original packet */ 1851 rte_pktmbuf_free(*bufs); 1852 } else { 1853 bpid = mempool_to_bpid(mp); 1854 if (unlikely((*bufs)->nb_segs > 1)) { 1855 if (eth_mbuf_to_sg_fd(*bufs, 1856 &fd_arr[loop], 1857 buf_to_free, 1858 &free_count, 1859 loop, 1860 bpid)) 1861 goto send_n_return; 1862 } else { 1863 eth_mbuf_to_fd(*bufs, 1864 &fd_arr[loop], 1865 buf_to_free, 1866 &free_count, 1867 loop, bpid); 1868 } 1869 } 1870 bufs++; 1871 } 1872 1873 loop = 0; 1874 retry_count = 0; 1875 while (loop < frames_to_send) { 1876 ret = qbman_swp_enqueue_multiple_desc(swp, 1877 &eqdesc[loop], &fd_arr[loop], 1878 frames_to_send - loop); 1879 if (unlikely(ret < 0)) { 1880 retry_count++; 1881 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { 1882 num_tx += loop; 1883 nb_pkts -= loop; 1884 goto send_n_return; 1885 } 1886 } else { 1887 loop += ret; 1888 retry_count = 0; 1889 } 1890 } 1891 1892 num_tx += loop; 1893 nb_pkts -= loop; 1894 } 1895 dpaa2_q->tx_pkts += num_tx; 1896 for (loop = 0; loop < free_count; loop++) { 1897 if (buf_to_free[loop].pkt_id < num_tx) 1898 rte_pktmbuf_free_seg(buf_to_free[loop].seg); 1899 } 1900 1901 return num_tx; 1902 1903 send_n_return: 1904 /* send any already prepared fd */ 1905 if (loop) { 1906 unsigned int i = 0; 1907 1908 retry_count = 0; 1909 while (i < loop) { 1910 ret = qbman_swp_enqueue_multiple_desc(swp, 1911 &eqdesc[i], &fd_arr[i], loop - i); 1912 if (unlikely(ret < 0)) { 1913 retry_count++; 1914 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) 1915 break; 1916 } else { 1917 i += ret; 1918 retry_count = 0; 1919 } 1920 } 1921 num_tx += i; 1922 } 1923 skip_tx: 1924 dpaa2_q->tx_pkts += num_tx; 1925 for (loop = 0; loop < free_count; loop++) { 1926 if (buf_to_free[loop].pkt_id < num_tx) 1927 rte_pktmbuf_free_seg(buf_to_free[loop].seg); 1928 } 1929 1930 return num_tx; 1931 } 1932 1933 #if defined(RTE_TOOLCHAIN_GCC) 1934 #pragma GCC diagnostic push 1935 #pragma GCC diagnostic ignored "-Wcast-qual" 1936 #elif defined(RTE_TOOLCHAIN_CLANG) 1937 #pragma clang diagnostic push 1938 #pragma clang diagnostic ignored "-Wcast-qual" 1939 #endif 1940 1941 /* This function loopbacks all the received packets.*/ 1942 uint16_t 1943 dpaa2_dev_loopback_rx(void *queue, 1944 struct rte_mbuf **bufs __rte_unused, 1945 uint16_t nb_pkts) 1946 { 1947 /* Function receive frames for a given device and VQ*/ 1948 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; 1949 struct qbman_result *dq_storage, *dq_storage1 = NULL; 1950 uint32_t fqid = dpaa2_q->fqid; 1951 int ret, num_rx = 0, num_tx = 0, pull_size; 1952 uint8_t pending, status; 1953 struct qbman_swp *swp; 1954 struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE]; 1955 struct qbman_pull_desc pulldesc; 1956 struct qbman_eq_desc eqdesc; 1957 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage; 1958 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; 1959 struct dpaa2_dev_priv *priv = eth_data->dev_private; 1960 struct dpaa2_queue *tx_q = priv->tx_vq[0]; 1961 /* todo - currently we are using 1st TX queue only for loopback*/ 1962 1963 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) { 1964 ret = dpaa2_affine_qbman_ethrx_swp(); 1965 if (ret) { 1966 DPAA2_PMD_ERR("Failure in affining portal"); 1967 return 0; 1968 } 1969 } 1970 swp = DPAA2_PER_LCORE_ETHRX_PORTAL; 1971 pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts; 1972 if (unlikely(!q_storage->active_dqs)) { 1973 q_storage->toggle = 0; 1974 dq_storage = q_storage->dq_storage[q_storage->toggle]; 1975 q_storage->last_num_pkts = pull_size; 1976 qbman_pull_desc_clear(&pulldesc); 1977 qbman_pull_desc_set_numframes(&pulldesc, 1978 q_storage->last_num_pkts); 1979 qbman_pull_desc_set_fq(&pulldesc, fqid); 1980 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 1981 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); 1982 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { 1983 while (!qbman_check_command_complete( 1984 get_swp_active_dqs( 1985 DPAA2_PER_LCORE_ETHRX_DPIO->index))) 1986 ; 1987 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); 1988 } 1989 while (1) { 1990 if (qbman_swp_pull(swp, &pulldesc)) { 1991 DPAA2_PMD_DP_DEBUG( 1992 "VDQ command not issued.QBMAN busy"); 1993 /* Portal was busy, try again */ 1994 continue; 1995 } 1996 break; 1997 } 1998 q_storage->active_dqs = dq_storage; 1999 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; 2000 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, 2001 dq_storage); 2002 } 2003 2004 dq_storage = q_storage->active_dqs; 2005 rte_prefetch0((void *)(size_t)(dq_storage)); 2006 rte_prefetch0((void *)(size_t)(dq_storage + 1)); 2007 2008 /* Prepare next pull descriptor. This will give space for the 2009 * prefetching done on DQRR entries 2010 */ 2011 q_storage->toggle ^= 1; 2012 dq_storage1 = q_storage->dq_storage[q_storage->toggle]; 2013 qbman_pull_desc_clear(&pulldesc); 2014 qbman_pull_desc_set_numframes(&pulldesc, pull_size); 2015 qbman_pull_desc_set_fq(&pulldesc, fqid); 2016 qbman_pull_desc_set_storage(&pulldesc, dq_storage1, 2017 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); 2018 2019 /*Prepare enqueue descriptor*/ 2020 qbman_eq_desc_clear(&eqdesc); 2021 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 2022 qbman_eq_desc_set_response(&eqdesc, 0, 0); 2023 qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid); 2024 2025 /* Check if the previous issued command is completed. 2026 * Also seems like the SWP is shared between the Ethernet Driver 2027 * and the SEC driver. 2028 */ 2029 while (!qbman_check_command_complete(dq_storage)) 2030 ; 2031 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id)) 2032 clear_swp_active_dqs(q_storage->active_dpio_id); 2033 2034 pending = 1; 2035 2036 do { 2037 /* Loop until the dq_storage is updated with 2038 * new token by QBMAN 2039 */ 2040 while (!qbman_check_new_result(dq_storage)) 2041 ; 2042 rte_prefetch0((void *)((size_t)(dq_storage + 2))); 2043 /* Check whether Last Pull command is Expired and 2044 * setting Condition for Loop termination 2045 */ 2046 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 2047 pending = 0; 2048 /* Check for valid frame. */ 2049 status = qbman_result_DQ_flags(dq_storage); 2050 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) 2051 continue; 2052 } 2053 fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage); 2054 2055 dq_storage++; 2056 num_rx++; 2057 } while (pending); 2058 2059 while (num_tx < num_rx) { 2060 num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc, 2061 &fd[num_tx], 0, num_rx - num_tx); 2062 } 2063 2064 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { 2065 while (!qbman_check_command_complete( 2066 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index))) 2067 ; 2068 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); 2069 } 2070 /* issue a volatile dequeue command for next pull */ 2071 while (1) { 2072 if (qbman_swp_pull(swp, &pulldesc)) { 2073 DPAA2_PMD_DP_DEBUG("VDQ command is not issued." 2074 "QBMAN is busy (2)"); 2075 continue; 2076 } 2077 break; 2078 } 2079 q_storage->active_dqs = dq_storage1; 2080 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; 2081 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1); 2082 2083 dpaa2_q->rx_pkts += num_rx; 2084 dpaa2_q->tx_pkts += num_tx; 2085 2086 return 0; 2087 } 2088 #if defined(RTE_TOOLCHAIN_GCC) 2089 #pragma GCC diagnostic pop 2090 #elif defined(RTE_TOOLCHAIN_CLANG) 2091 #pragma clang diagnostic pop 2092 #endif 2093