1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016-2018 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 11 #include <rte_mbuf.h> 12 #include <rte_ethdev_driver.h> 13 #include <rte_malloc.h> 14 #include <rte_memcpy.h> 15 #include <rte_string_fns.h> 16 #include <rte_dev.h> 17 18 #include <rte_fslmc.h> 19 #include <fslmc_vfio.h> 20 #include <dpaa2_hw_pvt.h> 21 #include <dpaa2_hw_dpio.h> 22 #include <dpaa2_hw_mempool.h> 23 24 #include "dpaa2_pmd_logs.h" 25 #include "dpaa2_ethdev.h" 26 #include "base/dpaa2_hw_dpni_annot.h" 27 28 static inline uint32_t __attribute__((hot)) 29 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf, 30 struct dpaa2_annot_hdr *annotation); 31 32 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \ 33 DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \ 34 DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \ 35 DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \ 36 DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \ 37 DPAA2_SET_FD_FRC(_fd, 0); \ 38 DPAA2_RESET_FD_CTRL(_fd); \ 39 DPAA2_RESET_FD_FLC(_fd); \ 40 } while (0) 41 42 static inline void __attribute__((hot)) 43 dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd) 44 { 45 struct dpaa2_annot_hdr *annotation; 46 uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd); 47 48 m->packet_type = RTE_PTYPE_UNKNOWN; 49 switch (frc) { 50 case DPAA2_PKT_TYPE_ETHER: 51 m->packet_type = RTE_PTYPE_L2_ETHER; 52 break; 53 case DPAA2_PKT_TYPE_IPV4: 54 m->packet_type = RTE_PTYPE_L2_ETHER | 55 RTE_PTYPE_L3_IPV4; 56 break; 57 case DPAA2_PKT_TYPE_IPV6: 58 m->packet_type = RTE_PTYPE_L2_ETHER | 59 RTE_PTYPE_L3_IPV6; 60 break; 61 case DPAA2_PKT_TYPE_IPV4_EXT: 62 m->packet_type = RTE_PTYPE_L2_ETHER | 63 RTE_PTYPE_L3_IPV4_EXT; 64 break; 65 case DPAA2_PKT_TYPE_IPV6_EXT: 66 m->packet_type = RTE_PTYPE_L2_ETHER | 67 RTE_PTYPE_L3_IPV6_EXT; 68 break; 69 case DPAA2_PKT_TYPE_IPV4_TCP: 70 m->packet_type = RTE_PTYPE_L2_ETHER | 71 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP; 72 break; 73 case DPAA2_PKT_TYPE_IPV6_TCP: 74 m->packet_type = RTE_PTYPE_L2_ETHER | 75 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP; 76 break; 77 case DPAA2_PKT_TYPE_IPV4_UDP: 78 m->packet_type = RTE_PTYPE_L2_ETHER | 79 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP; 80 break; 81 case DPAA2_PKT_TYPE_IPV6_UDP: 82 m->packet_type = RTE_PTYPE_L2_ETHER | 83 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP; 84 break; 85 case DPAA2_PKT_TYPE_IPV4_SCTP: 86 m->packet_type = RTE_PTYPE_L2_ETHER | 87 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP; 88 break; 89 case DPAA2_PKT_TYPE_IPV6_SCTP: 90 m->packet_type = RTE_PTYPE_L2_ETHER | 91 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP; 92 break; 93 case DPAA2_PKT_TYPE_IPV4_ICMP: 94 m->packet_type = RTE_PTYPE_L2_ETHER | 95 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP; 96 break; 97 case DPAA2_PKT_TYPE_IPV6_ICMP: 98 m->packet_type = RTE_PTYPE_L2_ETHER | 99 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP; 100 break; 101 default: 102 m->packet_type = dpaa2_dev_rx_parse_slow(m, 103 (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) 104 + DPAA2_FD_PTA_SIZE)); 105 } 106 m->hash.rss = fd->simple.flc_hi; 107 m->ol_flags |= PKT_RX_RSS_HASH; 108 109 if (dpaa2_enable_ts == PMD_DPAA2_ENABLE_TS) { 110 annotation = (struct dpaa2_annot_hdr *) 111 ((size_t)DPAA2_IOVA_TO_VADDR( 112 DPAA2_GET_FD_ADDR(fd)) + DPAA2_FD_PTA_SIZE); 113 m->timestamp = annotation->word2; 114 m->ol_flags |= PKT_RX_TIMESTAMP; 115 DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "", m->timestamp); 116 } 117 118 DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x " 119 "ol_flags =0x%" PRIx64 "", 120 frc, m->packet_type, m->ol_flags); 121 } 122 123 static inline uint32_t __attribute__((hot)) 124 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf, 125 struct dpaa2_annot_hdr *annotation) 126 { 127 uint32_t pkt_type = RTE_PTYPE_UNKNOWN; 128 uint16_t *vlan_tci; 129 130 DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t" 131 "(4)=0x%" PRIx64 "\t", 132 annotation->word3, annotation->word4); 133 134 if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) { 135 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *, 136 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16)); 137 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci); 138 mbuf->ol_flags |= PKT_RX_VLAN; 139 pkt_type |= RTE_PTYPE_L2_ETHER_VLAN; 140 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) { 141 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *, 142 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16)); 143 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci); 144 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ; 145 pkt_type |= RTE_PTYPE_L2_ETHER_QINQ; 146 } 147 148 if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) { 149 pkt_type |= RTE_PTYPE_L2_ETHER_ARP; 150 goto parse_done; 151 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) { 152 pkt_type |= RTE_PTYPE_L2_ETHER; 153 } else { 154 goto parse_done; 155 } 156 157 if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT | 158 L3_IPV4_N_PRESENT)) { 159 pkt_type |= RTE_PTYPE_L3_IPV4; 160 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT | 161 L3_IP_N_OPT_PRESENT)) 162 pkt_type |= RTE_PTYPE_L3_IPV4_EXT; 163 164 } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT | 165 L3_IPV6_N_PRESENT)) { 166 pkt_type |= RTE_PTYPE_L3_IPV6; 167 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT | 168 L3_IP_N_OPT_PRESENT)) 169 pkt_type |= RTE_PTYPE_L3_IPV6_EXT; 170 } else { 171 goto parse_done; 172 } 173 174 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE)) 175 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; 176 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) 177 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; 178 179 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT | 180 L3_IP_1_MORE_FRAGMENT | 181 L3_IP_N_FIRST_FRAGMENT | 182 L3_IP_N_MORE_FRAGMENT)) { 183 pkt_type |= RTE_PTYPE_L4_FRAG; 184 goto parse_done; 185 } else { 186 pkt_type |= RTE_PTYPE_L4_NONFRAG; 187 } 188 189 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT)) 190 pkt_type |= RTE_PTYPE_L4_UDP; 191 192 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT)) 193 pkt_type |= RTE_PTYPE_L4_TCP; 194 195 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT)) 196 pkt_type |= RTE_PTYPE_L4_SCTP; 197 198 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT)) 199 pkt_type |= RTE_PTYPE_L4_ICMP; 200 201 else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL)) 202 pkt_type |= RTE_PTYPE_UNKNOWN; 203 204 parse_done: 205 return pkt_type; 206 } 207 208 static inline uint32_t __attribute__((hot)) 209 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr) 210 { 211 struct dpaa2_annot_hdr *annotation = 212 (struct dpaa2_annot_hdr *)hw_annot_addr; 213 214 DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t", 215 annotation->word4); 216 217 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE)) 218 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; 219 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) 220 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; 221 222 mbuf->ol_flags |= PKT_RX_TIMESTAMP; 223 mbuf->timestamp = annotation->word2; 224 DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "", mbuf->timestamp); 225 226 /* Check detailed parsing requirement */ 227 if (annotation->word3 & 0x7FFFFC3FFFF) 228 return dpaa2_dev_rx_parse_slow(mbuf, annotation); 229 230 /* Return some common types from parse processing */ 231 switch (annotation->word4) { 232 case DPAA2_L3_IPv4: 233 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; 234 case DPAA2_L3_IPv6: 235 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; 236 case DPAA2_L3_IPv4_TCP: 237 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 238 RTE_PTYPE_L4_TCP; 239 case DPAA2_L3_IPv4_UDP: 240 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 241 RTE_PTYPE_L4_UDP; 242 case DPAA2_L3_IPv6_TCP: 243 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 244 RTE_PTYPE_L4_TCP; 245 case DPAA2_L3_IPv6_UDP: 246 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 247 RTE_PTYPE_L4_UDP; 248 default: 249 break; 250 } 251 252 return dpaa2_dev_rx_parse_slow(mbuf, annotation); 253 } 254 255 static inline struct rte_mbuf *__attribute__((hot)) 256 eth_sg_fd_to_mbuf(const struct qbman_fd *fd) 257 { 258 struct qbman_sge *sgt, *sge; 259 size_t sg_addr, fd_addr; 260 int i = 0; 261 struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp; 262 263 fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 264 265 /* Get Scatter gather table address */ 266 sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd)); 267 268 sge = &sgt[i++]; 269 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge)); 270 271 /* First Scatter gather entry */ 272 first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, 273 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 274 /* Prepare all the metadata for first segment */ 275 first_seg->buf_addr = (uint8_t *)sg_addr; 276 first_seg->ol_flags = 0; 277 first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge); 278 first_seg->data_len = sge->length & 0x1FFFF; 279 first_seg->pkt_len = DPAA2_GET_FD_LEN(fd); 280 first_seg->nb_segs = 1; 281 first_seg->next = NULL; 282 if (dpaa2_svr_family == SVR_LX2160A) 283 dpaa2_dev_rx_parse_new(first_seg, fd); 284 else 285 first_seg->packet_type = dpaa2_dev_rx_parse(first_seg, 286 (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) 287 + DPAA2_FD_PTA_SIZE)); 288 289 rte_mbuf_refcnt_set(first_seg, 1); 290 cur_seg = first_seg; 291 while (!DPAA2_SG_IS_FINAL(sge)) { 292 sge = &sgt[i++]; 293 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR( 294 DPAA2_GET_FLE_ADDR(sge)); 295 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, 296 rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size); 297 next_seg->buf_addr = (uint8_t *)sg_addr; 298 next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge); 299 next_seg->data_len = sge->length & 0x1FFFF; 300 first_seg->nb_segs += 1; 301 rte_mbuf_refcnt_set(next_seg, 1); 302 cur_seg->next = next_seg; 303 next_seg->next = NULL; 304 cur_seg = next_seg; 305 } 306 temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr, 307 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 308 rte_mbuf_refcnt_set(temp, 1); 309 rte_pktmbuf_free_seg(temp); 310 311 return (void *)first_seg; 312 } 313 314 static inline struct rte_mbuf *__attribute__((hot)) 315 eth_fd_to_mbuf(const struct qbman_fd *fd) 316 { 317 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( 318 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), 319 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 320 321 /* need to repopulated some of the fields, 322 * as they may have changed in last transmission 323 */ 324 mbuf->nb_segs = 1; 325 mbuf->ol_flags = 0; 326 mbuf->data_off = DPAA2_GET_FD_OFFSET(fd); 327 mbuf->data_len = DPAA2_GET_FD_LEN(fd); 328 mbuf->pkt_len = mbuf->data_len; 329 mbuf->next = NULL; 330 rte_mbuf_refcnt_set(mbuf, 1); 331 332 /* Parse the packet */ 333 /* parse results for LX2 are there in FRC field of FD. 334 * For other DPAA2 platforms , parse results are after 335 * the private - sw annotation area 336 */ 337 338 if (dpaa2_svr_family == SVR_LX2160A) 339 dpaa2_dev_rx_parse_new(mbuf, fd); 340 else 341 mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, 342 (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) 343 + DPAA2_FD_PTA_SIZE)); 344 345 DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d," 346 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n", 347 mbuf, mbuf->buf_addr, mbuf->data_off, 348 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), 349 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 350 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd)); 351 352 return mbuf; 353 } 354 355 static int __attribute__ ((noinline)) __attribute__((hot)) 356 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, 357 struct qbman_fd *fd, uint16_t bpid) 358 { 359 struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp; 360 struct qbman_sge *sgt, *sge = NULL; 361 int i; 362 363 temp = rte_pktmbuf_alloc(mbuf->pool); 364 if (temp == NULL) { 365 DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n"); 366 return -ENOMEM; 367 } 368 369 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp)); 370 DPAA2_SET_FD_LEN(fd, mbuf->pkt_len); 371 DPAA2_SET_ONLY_FD_BPID(fd, bpid); 372 DPAA2_SET_FD_OFFSET(fd, temp->data_off); 373 DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg); 374 DPAA2_RESET_FD_FRC(fd); 375 DPAA2_RESET_FD_CTRL(fd); 376 /*Set Scatter gather table and Scatter gather entries*/ 377 sgt = (struct qbman_sge *)( 378 (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) 379 + DPAA2_GET_FD_OFFSET(fd)); 380 381 for (i = 0; i < mbuf->nb_segs; i++) { 382 sge = &sgt[i]; 383 /*Resetting the buffer pool id and offset field*/ 384 sge->fin_bpid_offset = 0; 385 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg)); 386 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off); 387 sge->length = cur_seg->data_len; 388 if (RTE_MBUF_DIRECT(cur_seg)) { 389 if (rte_mbuf_refcnt_read(cur_seg) > 1) { 390 /* If refcnt > 1, invalid bpid is set to ensure 391 * buffer is not freed by HW 392 */ 393 DPAA2_SET_FLE_IVP(sge); 394 rte_mbuf_refcnt_update(cur_seg, -1); 395 } else 396 DPAA2_SET_FLE_BPID(sge, 397 mempool_to_bpid(cur_seg->pool)); 398 cur_seg = cur_seg->next; 399 } else { 400 /* Get owner MBUF from indirect buffer */ 401 mi = rte_mbuf_from_indirect(cur_seg); 402 if (rte_mbuf_refcnt_read(mi) > 1) { 403 /* If refcnt > 1, invalid bpid is set to ensure 404 * owner buffer is not freed by HW 405 */ 406 DPAA2_SET_FLE_IVP(sge); 407 } else { 408 DPAA2_SET_FLE_BPID(sge, 409 mempool_to_bpid(mi->pool)); 410 rte_mbuf_refcnt_update(mi, 1); 411 } 412 prev_seg = cur_seg; 413 cur_seg = cur_seg->next; 414 prev_seg->next = NULL; 415 rte_pktmbuf_free(prev_seg); 416 } 417 } 418 DPAA2_SG_SET_FINAL(sge, true); 419 return 0; 420 } 421 422 static void 423 eth_mbuf_to_fd(struct rte_mbuf *mbuf, 424 struct qbman_fd *fd, uint16_t bpid) __attribute__((unused)); 425 426 static void __attribute__ ((noinline)) __attribute__((hot)) 427 eth_mbuf_to_fd(struct rte_mbuf *mbuf, 428 struct qbman_fd *fd, uint16_t bpid) 429 { 430 DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid); 431 432 DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d," 433 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n", 434 mbuf, mbuf->buf_addr, mbuf->data_off, 435 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), 436 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 437 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd)); 438 if (RTE_MBUF_DIRECT(mbuf)) { 439 if (rte_mbuf_refcnt_read(mbuf) > 1) { 440 DPAA2_SET_FD_IVP(fd); 441 rte_mbuf_refcnt_update(mbuf, -1); 442 } 443 } else { 444 struct rte_mbuf *mi; 445 446 mi = rte_mbuf_from_indirect(mbuf); 447 if (rte_mbuf_refcnt_read(mi) > 1) 448 DPAA2_SET_FD_IVP(fd); 449 else 450 rte_mbuf_refcnt_update(mi, 1); 451 rte_pktmbuf_free(mbuf); 452 } 453 } 454 455 static inline int __attribute__((hot)) 456 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf, 457 struct qbman_fd *fd, uint16_t bpid) 458 { 459 struct rte_mbuf *m; 460 void *mb = NULL; 461 462 if (rte_dpaa2_mbuf_alloc_bulk( 463 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) { 464 DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n"); 465 return -1; 466 } 467 m = (struct rte_mbuf *)mb; 468 memcpy((char *)m->buf_addr + mbuf->data_off, 469 (void *)((char *)mbuf->buf_addr + mbuf->data_off), 470 mbuf->pkt_len); 471 472 /* Copy required fields */ 473 m->data_off = mbuf->data_off; 474 m->ol_flags = mbuf->ol_flags; 475 m->packet_type = mbuf->packet_type; 476 m->tx_offload = mbuf->tx_offload; 477 478 DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid); 479 480 DPAA2_PMD_DP_DEBUG( 481 "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d," 482 " meta: %d, off: %d, len: %d\n", 483 (void *)mbuf, 484 mbuf->buf_addr, 485 DPAA2_GET_FD_ADDR(fd), 486 DPAA2_GET_FD_BPID(fd), 487 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 488 DPAA2_GET_FD_OFFSET(fd), 489 DPAA2_GET_FD_LEN(fd)); 490 491 return 0; 492 } 493 494 /* This function assumes that caller will be keep the same value for nb_pkts 495 * across calls per queue, if that is not the case, better use non-prefetch 496 * version of rx call. 497 * It will return the packets as requested in previous call without honoring 498 * the current nb_pkts or bufs space. 499 */ 500 uint16_t 501 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 502 { 503 /* Function receive frames for a given device and VQ*/ 504 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; 505 struct qbman_result *dq_storage, *dq_storage1 = NULL; 506 uint32_t fqid = dpaa2_q->fqid; 507 int ret, num_rx = 0, pull_size; 508 uint8_t pending, status; 509 struct qbman_swp *swp; 510 const struct qbman_fd *fd, *next_fd; 511 struct qbman_pull_desc pulldesc; 512 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage; 513 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; 514 515 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) { 516 ret = dpaa2_affine_qbman_ethrx_swp(); 517 if (ret) { 518 DPAA2_PMD_ERR("Failure in affining portal"); 519 return 0; 520 } 521 } 522 523 if (unlikely(!rte_dpaa2_bpid_info && 524 rte_eal_process_type() == RTE_PROC_SECONDARY)) 525 rte_dpaa2_bpid_info = dpaa2_q->bp_array; 526 527 swp = DPAA2_PER_LCORE_ETHRX_PORTAL; 528 pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts; 529 if (unlikely(!q_storage->active_dqs)) { 530 q_storage->toggle = 0; 531 dq_storage = q_storage->dq_storage[q_storage->toggle]; 532 q_storage->last_num_pkts = pull_size; 533 qbman_pull_desc_clear(&pulldesc); 534 qbman_pull_desc_set_numframes(&pulldesc, 535 q_storage->last_num_pkts); 536 qbman_pull_desc_set_fq(&pulldesc, fqid); 537 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 538 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); 539 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { 540 while (!qbman_check_command_complete( 541 get_swp_active_dqs( 542 DPAA2_PER_LCORE_ETHRX_DPIO->index))) 543 ; 544 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); 545 } 546 while (1) { 547 if (qbman_swp_pull(swp, &pulldesc)) { 548 DPAA2_PMD_DP_DEBUG("VDQ command is not issued." 549 " QBMAN is busy (1)\n"); 550 /* Portal was busy, try again */ 551 continue; 552 } 553 break; 554 } 555 q_storage->active_dqs = dq_storage; 556 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; 557 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, 558 dq_storage); 559 } 560 561 dq_storage = q_storage->active_dqs; 562 rte_prefetch0((void *)(size_t)(dq_storage)); 563 rte_prefetch0((void *)(size_t)(dq_storage + 1)); 564 565 /* Prepare next pull descriptor. This will give space for the 566 * prefething done on DQRR entries 567 */ 568 q_storage->toggle ^= 1; 569 dq_storage1 = q_storage->dq_storage[q_storage->toggle]; 570 qbman_pull_desc_clear(&pulldesc); 571 qbman_pull_desc_set_numframes(&pulldesc, pull_size); 572 qbman_pull_desc_set_fq(&pulldesc, fqid); 573 qbman_pull_desc_set_storage(&pulldesc, dq_storage1, 574 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); 575 576 /* Check if the previous issued command is completed. 577 * Also seems like the SWP is shared between the Ethernet Driver 578 * and the SEC driver. 579 */ 580 while (!qbman_check_command_complete(dq_storage)) 581 ; 582 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id)) 583 clear_swp_active_dqs(q_storage->active_dpio_id); 584 585 pending = 1; 586 587 do { 588 /* Loop until the dq_storage is updated with 589 * new token by QBMAN 590 */ 591 while (!qbman_check_new_result(dq_storage)) 592 ; 593 rte_prefetch0((void *)((size_t)(dq_storage + 2))); 594 /* Check whether Last Pull command is Expired and 595 * setting Condition for Loop termination 596 */ 597 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 598 pending = 0; 599 /* Check for valid frame. */ 600 status = qbman_result_DQ_flags(dq_storage); 601 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) 602 continue; 603 } 604 fd = qbman_result_DQ_fd(dq_storage); 605 606 if (dpaa2_svr_family != SVR_LX2160A) { 607 next_fd = qbman_result_DQ_fd(dq_storage + 1); 608 /* Prefetch Annotation address for the parse results */ 609 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR( 610 next_fd) + DPAA2_FD_PTA_SIZE + 16)); 611 } 612 613 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg)) 614 bufs[num_rx] = eth_sg_fd_to_mbuf(fd); 615 else 616 bufs[num_rx] = eth_fd_to_mbuf(fd); 617 bufs[num_rx]->port = eth_data->port_id; 618 619 if (eth_data->dev_conf.rxmode.offloads & 620 DEV_RX_OFFLOAD_VLAN_STRIP) 621 rte_vlan_strip(bufs[num_rx]); 622 623 dq_storage++; 624 num_rx++; 625 } while (pending); 626 627 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { 628 while (!qbman_check_command_complete( 629 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index))) 630 ; 631 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); 632 } 633 /* issue a volatile dequeue command for next pull */ 634 while (1) { 635 if (qbman_swp_pull(swp, &pulldesc)) { 636 DPAA2_PMD_DP_DEBUG("VDQ command is not issued." 637 "QBMAN is busy (2)\n"); 638 continue; 639 } 640 break; 641 } 642 q_storage->active_dqs = dq_storage1; 643 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; 644 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1); 645 646 dpaa2_q->rx_pkts += num_rx; 647 648 return num_rx; 649 } 650 651 void __attribute__((hot)) 652 dpaa2_dev_process_parallel_event(struct qbman_swp *swp, 653 const struct qbman_fd *fd, 654 const struct qbman_result *dq, 655 struct dpaa2_queue *rxq, 656 struct rte_event *ev) 657 { 658 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + 659 DPAA2_FD_PTA_SIZE + 16)); 660 661 ev->flow_id = rxq->ev.flow_id; 662 ev->sub_event_type = rxq->ev.sub_event_type; 663 ev->event_type = RTE_EVENT_TYPE_ETHDEV; 664 ev->op = RTE_EVENT_OP_NEW; 665 ev->sched_type = rxq->ev.sched_type; 666 ev->queue_id = rxq->ev.queue_id; 667 ev->priority = rxq->ev.priority; 668 669 ev->mbuf = eth_fd_to_mbuf(fd); 670 671 qbman_swp_dqrr_consume(swp, dq); 672 } 673 674 void __attribute__((hot)) 675 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)), 676 const struct qbman_fd *fd, 677 const struct qbman_result *dq, 678 struct dpaa2_queue *rxq, 679 struct rte_event *ev) 680 { 681 uint8_t dqrr_index; 682 683 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + 684 DPAA2_FD_PTA_SIZE + 16)); 685 686 ev->flow_id = rxq->ev.flow_id; 687 ev->sub_event_type = rxq->ev.sub_event_type; 688 ev->event_type = RTE_EVENT_TYPE_ETHDEV; 689 ev->op = RTE_EVENT_OP_NEW; 690 ev->sched_type = rxq->ev.sched_type; 691 ev->queue_id = rxq->ev.queue_id; 692 ev->priority = rxq->ev.priority; 693 694 ev->mbuf = eth_fd_to_mbuf(fd); 695 696 dqrr_index = qbman_get_dqrr_idx(dq); 697 ev->mbuf->seqn = dqrr_index + 1; 698 DPAA2_PER_LCORE_DQRR_SIZE++; 699 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; 700 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf; 701 } 702 703 void __attribute__((hot)) 704 dpaa2_dev_process_ordered_event(struct qbman_swp *swp, 705 const struct qbman_fd *fd, 706 const struct qbman_result *dq, 707 struct dpaa2_queue *rxq, 708 struct rte_event *ev) 709 { 710 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + 711 DPAA2_FD_PTA_SIZE + 16)); 712 713 ev->flow_id = rxq->ev.flow_id; 714 ev->sub_event_type = rxq->ev.sub_event_type; 715 ev->event_type = RTE_EVENT_TYPE_ETHDEV; 716 ev->op = RTE_EVENT_OP_NEW; 717 ev->sched_type = rxq->ev.sched_type; 718 ev->queue_id = rxq->ev.queue_id; 719 ev->priority = rxq->ev.priority; 720 721 ev->mbuf = eth_fd_to_mbuf(fd); 722 723 ev->mbuf->seqn = DPAA2_ENQUEUE_FLAG_ORP; 724 ev->mbuf->seqn |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT; 725 ev->mbuf->seqn |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT; 726 727 qbman_swp_dqrr_consume(swp, dq); 728 } 729 730 /* 731 * Callback to handle sending packets through WRIOP based interface 732 */ 733 uint16_t 734 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 735 { 736 /* Function to transmit the frames to given device and VQ*/ 737 uint32_t loop, retry_count; 738 int32_t ret; 739 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 740 struct rte_mbuf *mi; 741 uint32_t frames_to_send; 742 struct rte_mempool *mp; 743 struct qbman_eq_desc eqdesc; 744 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; 745 struct qbman_swp *swp; 746 uint16_t num_tx = 0; 747 uint16_t bpid; 748 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; 749 struct dpaa2_dev_priv *priv = eth_data->dev_private; 750 uint32_t flags[MAX_TX_RING_SLOTS] = {0}; 751 752 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 753 ret = dpaa2_affine_qbman_swp(); 754 if (ret) { 755 DPAA2_PMD_ERR("Failure in affining portal"); 756 return 0; 757 } 758 } 759 swp = DPAA2_PER_LCORE_PORTAL; 760 761 DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n", 762 eth_data, dpaa2_q->fqid); 763 764 /*Prepare enqueue descriptor*/ 765 qbman_eq_desc_clear(&eqdesc); 766 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 767 qbman_eq_desc_set_qd(&eqdesc, priv->qdid, 768 dpaa2_q->flow_id, dpaa2_q->tc_index); 769 /*Clear the unused FD fields before sending*/ 770 while (nb_pkts) { 771 /*Check if the queue is congested*/ 772 retry_count = 0; 773 while (qbman_result_SCN_state(dpaa2_q->cscn)) { 774 retry_count++; 775 /* Retry for some time before giving up */ 776 if (retry_count > CONG_RETRY_COUNT) 777 goto skip_tx; 778 } 779 780 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ? 781 dpaa2_eqcr_size : nb_pkts; 782 783 for (loop = 0; loop < frames_to_send; loop++) { 784 if ((*bufs)->seqn) { 785 uint8_t dqrr_index = (*bufs)->seqn - 1; 786 787 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | 788 dqrr_index; 789 DPAA2_PER_LCORE_DQRR_SIZE--; 790 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); 791 (*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN; 792 } 793 794 if (likely(RTE_MBUF_DIRECT(*bufs))) { 795 mp = (*bufs)->pool; 796 /* Check the basic scenario and set 797 * the FD appropriately here itself. 798 */ 799 if (likely(mp && mp->ops_index == 800 priv->bp_list->dpaa2_ops_index && 801 (*bufs)->nb_segs == 1 && 802 rte_mbuf_refcnt_read((*bufs)) == 1)) { 803 if (unlikely(((*bufs)->ol_flags 804 & PKT_TX_VLAN_PKT) || 805 (eth_data->dev_conf.txmode.offloads 806 & DEV_TX_OFFLOAD_VLAN_INSERT))) { 807 ret = rte_vlan_insert(bufs); 808 if (ret) 809 goto send_n_return; 810 } 811 DPAA2_MBUF_TO_CONTIG_FD((*bufs), 812 &fd_arr[loop], mempool_to_bpid(mp)); 813 bufs++; 814 continue; 815 } 816 } else { 817 mi = rte_mbuf_from_indirect(*bufs); 818 mp = mi->pool; 819 } 820 /* Not a hw_pkt pool allocated frame */ 821 if (unlikely(!mp || !priv->bp_list)) { 822 DPAA2_PMD_ERR("Err: No buffer pool attached"); 823 goto send_n_return; 824 } 825 826 if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) || 827 (eth_data->dev_conf.txmode.offloads 828 & DEV_TX_OFFLOAD_VLAN_INSERT))) { 829 int ret = rte_vlan_insert(bufs); 830 if (ret) 831 goto send_n_return; 832 } 833 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { 834 DPAA2_PMD_WARN("Non DPAA2 buffer pool"); 835 /* alloc should be from the default buffer pool 836 * attached to this interface 837 */ 838 bpid = priv->bp_list->buf_pool.bpid; 839 840 if (unlikely((*bufs)->nb_segs > 1)) { 841 DPAA2_PMD_ERR("S/G support not added" 842 " for non hw offload buffer"); 843 goto send_n_return; 844 } 845 if (eth_copy_mbuf_to_fd(*bufs, 846 &fd_arr[loop], bpid)) { 847 goto send_n_return; 848 } 849 /* free the original packet */ 850 rte_pktmbuf_free(*bufs); 851 } else { 852 bpid = mempool_to_bpid(mp); 853 if (unlikely((*bufs)->nb_segs > 1)) { 854 if (eth_mbuf_to_sg_fd(*bufs, 855 &fd_arr[loop], bpid)) 856 goto send_n_return; 857 } else { 858 eth_mbuf_to_fd(*bufs, 859 &fd_arr[loop], bpid); 860 } 861 } 862 bufs++; 863 } 864 loop = 0; 865 while (loop < frames_to_send) { 866 loop += qbman_swp_enqueue_multiple(swp, &eqdesc, 867 &fd_arr[loop], &flags[loop], 868 frames_to_send - loop); 869 } 870 871 num_tx += frames_to_send; 872 nb_pkts -= frames_to_send; 873 } 874 dpaa2_q->tx_pkts += num_tx; 875 return num_tx; 876 877 send_n_return: 878 /* send any already prepared fd */ 879 if (loop) { 880 unsigned int i = 0; 881 882 while (i < loop) { 883 i += qbman_swp_enqueue_multiple(swp, &eqdesc, 884 &fd_arr[i], 885 &flags[loop], 886 loop - i); 887 } 888 num_tx += loop; 889 } 890 skip_tx: 891 dpaa2_q->tx_pkts += num_tx; 892 return num_tx; 893 } 894 895 void 896 dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci) 897 { 898 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO; 899 struct qbman_fd *fd; 900 struct rte_mbuf *m; 901 902 fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]); 903 m = eth_fd_to_mbuf(fd); 904 rte_pktmbuf_free(m); 905 } 906 907 static void 908 dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q, 909 struct rte_mbuf *m, 910 struct qbman_eq_desc *eqdesc) 911 { 912 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; 913 struct dpaa2_dev_priv *priv = eth_data->dev_private; 914 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO; 915 struct eqresp_metadata *eqresp_meta; 916 uint16_t orpid, seqnum; 917 uint8_t dq_idx; 918 919 qbman_eq_desc_set_qd(eqdesc, priv->qdid, dpaa2_q->flow_id, 920 dpaa2_q->tc_index); 921 922 if (m->seqn & DPAA2_ENQUEUE_FLAG_ORP) { 923 orpid = (m->seqn & DPAA2_EQCR_OPRID_MASK) >> 924 DPAA2_EQCR_OPRID_SHIFT; 925 seqnum = (m->seqn & DPAA2_EQCR_SEQNUM_MASK) >> 926 DPAA2_EQCR_SEQNUM_SHIFT; 927 928 if (!priv->en_loose_ordered) { 929 qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0); 930 qbman_eq_desc_set_response(eqdesc, (uint64_t) 931 DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[ 932 dpio_dev->eqresp_pi]), 1); 933 qbman_eq_desc_set_token(eqdesc, 1); 934 935 eqresp_meta = &dpio_dev->eqresp_meta[ 936 dpio_dev->eqresp_pi]; 937 eqresp_meta->dpaa2_q = dpaa2_q; 938 eqresp_meta->mp = m->pool; 939 940 dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ? 941 dpio_dev->eqresp_pi++ : 942 (dpio_dev->eqresp_pi = 0); 943 } else { 944 qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0); 945 } 946 } else { 947 dq_idx = m->seqn - 1; 948 qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0); 949 DPAA2_PER_LCORE_DQRR_SIZE--; 950 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx); 951 } 952 m->seqn = DPAA2_INVALID_MBUF_SEQN; 953 } 954 955 /* Callback to handle sending ordered packets through WRIOP based interface */ 956 uint16_t 957 dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 958 { 959 /* Function to transmit the frames to given device and VQ*/ 960 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; 961 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; 962 struct dpaa2_dev_priv *priv = eth_data->dev_private; 963 struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0]; 964 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 965 struct rte_mbuf *mi; 966 struct rte_mempool *mp; 967 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS]; 968 struct qbman_swp *swp; 969 uint32_t frames_to_send, num_free_eq_desc; 970 uint32_t loop, retry_count; 971 int32_t ret; 972 uint16_t num_tx = 0; 973 uint16_t bpid; 974 975 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 976 ret = dpaa2_affine_qbman_swp(); 977 if (ret) { 978 DPAA2_PMD_ERR("Failure in affining portal"); 979 return 0; 980 } 981 } 982 swp = DPAA2_PER_LCORE_PORTAL; 983 984 DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n", 985 eth_data, dpaa2_q->fqid); 986 987 /* This would also handle normal and atomic queues as any type 988 * of packet can be enqueued when ordered queues are being used. 989 */ 990 while (nb_pkts) { 991 /*Check if the queue is congested*/ 992 retry_count = 0; 993 while (qbman_result_SCN_state(dpaa2_q->cscn)) { 994 retry_count++; 995 /* Retry for some time before giving up */ 996 if (retry_count > CONG_RETRY_COUNT) 997 goto skip_tx; 998 } 999 1000 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ? 1001 dpaa2_eqcr_size : nb_pkts; 1002 1003 if (!priv->en_loose_ordered) { 1004 if ((*bufs)->seqn & DPAA2_ENQUEUE_FLAG_ORP) { 1005 num_free_eq_desc = dpaa2_free_eq_descriptors(); 1006 if (num_free_eq_desc < frames_to_send) 1007 frames_to_send = num_free_eq_desc; 1008 } 1009 } 1010 1011 for (loop = 0; loop < frames_to_send; loop++) { 1012 /*Prepare enqueue descriptor*/ 1013 qbman_eq_desc_clear(&eqdesc[loop]); 1014 1015 if ((*bufs)->seqn) { 1016 /* Use only queue 0 for Tx in case of atomic/ 1017 * ordered packets as packets can get unordered 1018 * when being tranmitted out from the interface 1019 */ 1020 dpaa2_set_enqueue_descriptor(order_sendq, 1021 (*bufs), 1022 &eqdesc[loop]); 1023 } else { 1024 qbman_eq_desc_set_no_orp(&eqdesc[loop], 1025 DPAA2_EQ_RESP_ERR_FQ); 1026 qbman_eq_desc_set_qd(&eqdesc[loop], priv->qdid, 1027 dpaa2_q->flow_id, 1028 dpaa2_q->tc_index); 1029 } 1030 1031 if (likely(RTE_MBUF_DIRECT(*bufs))) { 1032 mp = (*bufs)->pool; 1033 /* Check the basic scenario and set 1034 * the FD appropriately here itself. 1035 */ 1036 if (likely(mp && mp->ops_index == 1037 priv->bp_list->dpaa2_ops_index && 1038 (*bufs)->nb_segs == 1 && 1039 rte_mbuf_refcnt_read((*bufs)) == 1)) { 1040 if (unlikely((*bufs)->ol_flags 1041 & PKT_TX_VLAN_PKT)) { 1042 ret = rte_vlan_insert(bufs); 1043 if (ret) 1044 goto send_n_return; 1045 } 1046 DPAA2_MBUF_TO_CONTIG_FD((*bufs), 1047 &fd_arr[loop], 1048 mempool_to_bpid(mp)); 1049 bufs++; 1050 continue; 1051 } 1052 } else { 1053 mi = rte_mbuf_from_indirect(*bufs); 1054 mp = mi->pool; 1055 } 1056 /* Not a hw_pkt pool allocated frame */ 1057 if (unlikely(!mp || !priv->bp_list)) { 1058 DPAA2_PMD_ERR("Err: No buffer pool attached"); 1059 goto send_n_return; 1060 } 1061 1062 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { 1063 DPAA2_PMD_WARN("Non DPAA2 buffer pool"); 1064 /* alloc should be from the default buffer pool 1065 * attached to this interface 1066 */ 1067 bpid = priv->bp_list->buf_pool.bpid; 1068 1069 if (unlikely((*bufs)->nb_segs > 1)) { 1070 DPAA2_PMD_ERR( 1071 "S/G not supp for non hw offload buffer"); 1072 goto send_n_return; 1073 } 1074 if (eth_copy_mbuf_to_fd(*bufs, 1075 &fd_arr[loop], bpid)) { 1076 goto send_n_return; 1077 } 1078 /* free the original packet */ 1079 rte_pktmbuf_free(*bufs); 1080 } else { 1081 bpid = mempool_to_bpid(mp); 1082 if (unlikely((*bufs)->nb_segs > 1)) { 1083 if (eth_mbuf_to_sg_fd(*bufs, 1084 &fd_arr[loop], 1085 bpid)) 1086 goto send_n_return; 1087 } else { 1088 eth_mbuf_to_fd(*bufs, 1089 &fd_arr[loop], bpid); 1090 } 1091 } 1092 bufs++; 1093 } 1094 loop = 0; 1095 while (loop < frames_to_send) { 1096 loop += qbman_swp_enqueue_multiple_desc(swp, 1097 &eqdesc[loop], &fd_arr[loop], 1098 frames_to_send - loop); 1099 } 1100 1101 num_tx += frames_to_send; 1102 nb_pkts -= frames_to_send; 1103 } 1104 dpaa2_q->tx_pkts += num_tx; 1105 return num_tx; 1106 1107 send_n_return: 1108 /* send any already prepared fd */ 1109 if (loop) { 1110 unsigned int i = 0; 1111 1112 while (i < loop) { 1113 i += qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop], 1114 &fd_arr[i], loop - i); 1115 } 1116 num_tx += loop; 1117 } 1118 skip_tx: 1119 dpaa2_q->tx_pkts += num_tx; 1120 return num_tx; 1121 } 1122 1123 /** 1124 * Dummy DPDK callback for TX. 1125 * 1126 * This function is used to temporarily replace the real callback during 1127 * unsafe control operations on the queue, or in case of error. 1128 * 1129 * @param dpdk_txq 1130 * Generic pointer to TX queue structure. 1131 * @param[in] pkts 1132 * Packets to transmit. 1133 * @param pkts_n 1134 * Number of packets in array. 1135 * 1136 * @return 1137 * Number of packets successfully transmitted (<= pkts_n). 1138 */ 1139 uint16_t 1140 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 1141 { 1142 (void)queue; 1143 (void)bufs; 1144 (void)nb_pkts; 1145 return 0; 1146 } 1147 1148 #if defined(RTE_TOOLCHAIN_GCC) 1149 #pragma GCC diagnostic push 1150 #pragma GCC diagnostic ignored "-Wcast-qual" 1151 #elif defined(RTE_TOOLCHAIN_CLANG) 1152 #pragma clang diagnostic push 1153 #pragma clang diagnostic ignored "-Wcast-qual" 1154 #endif 1155 1156 /* This function loopbacks all the received packets.*/ 1157 uint16_t 1158 dpaa2_dev_loopback_rx(void *queue, 1159 struct rte_mbuf **bufs __rte_unused, 1160 uint16_t nb_pkts) 1161 { 1162 /* Function receive frames for a given device and VQ*/ 1163 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; 1164 struct qbman_result *dq_storage, *dq_storage1 = NULL; 1165 uint32_t fqid = dpaa2_q->fqid; 1166 int ret, num_rx = 0, num_tx = 0, pull_size; 1167 uint8_t pending, status; 1168 struct qbman_swp *swp; 1169 struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE]; 1170 struct qbman_pull_desc pulldesc; 1171 struct qbman_eq_desc eqdesc; 1172 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage; 1173 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; 1174 struct dpaa2_dev_priv *priv = eth_data->dev_private; 1175 struct dpaa2_queue *tx_q = priv->tx_vq[0]; 1176 /* todo - currently we are using 1st TX queue only for loopback*/ 1177 1178 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) { 1179 ret = dpaa2_affine_qbman_ethrx_swp(); 1180 if (ret) { 1181 DPAA2_PMD_ERR("Failure in affining portal"); 1182 return 0; 1183 } 1184 } 1185 swp = DPAA2_PER_LCORE_ETHRX_PORTAL; 1186 pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts; 1187 if (unlikely(!q_storage->active_dqs)) { 1188 q_storage->toggle = 0; 1189 dq_storage = q_storage->dq_storage[q_storage->toggle]; 1190 q_storage->last_num_pkts = pull_size; 1191 qbman_pull_desc_clear(&pulldesc); 1192 qbman_pull_desc_set_numframes(&pulldesc, 1193 q_storage->last_num_pkts); 1194 qbman_pull_desc_set_fq(&pulldesc, fqid); 1195 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 1196 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); 1197 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { 1198 while (!qbman_check_command_complete( 1199 get_swp_active_dqs( 1200 DPAA2_PER_LCORE_ETHRX_DPIO->index))) 1201 ; 1202 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); 1203 } 1204 while (1) { 1205 if (qbman_swp_pull(swp, &pulldesc)) { 1206 DPAA2_PMD_DP_DEBUG( 1207 "VDQ command not issued.QBMAN busy\n"); 1208 /* Portal was busy, try again */ 1209 continue; 1210 } 1211 break; 1212 } 1213 q_storage->active_dqs = dq_storage; 1214 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; 1215 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, 1216 dq_storage); 1217 } 1218 1219 dq_storage = q_storage->active_dqs; 1220 rte_prefetch0((void *)(size_t)(dq_storage)); 1221 rte_prefetch0((void *)(size_t)(dq_storage + 1)); 1222 1223 /* Prepare next pull descriptor. This will give space for the 1224 * prefething done on DQRR entries 1225 */ 1226 q_storage->toggle ^= 1; 1227 dq_storage1 = q_storage->dq_storage[q_storage->toggle]; 1228 qbman_pull_desc_clear(&pulldesc); 1229 qbman_pull_desc_set_numframes(&pulldesc, pull_size); 1230 qbman_pull_desc_set_fq(&pulldesc, fqid); 1231 qbman_pull_desc_set_storage(&pulldesc, dq_storage1, 1232 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); 1233 1234 /*Prepare enqueue descriptor*/ 1235 qbman_eq_desc_clear(&eqdesc); 1236 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 1237 qbman_eq_desc_set_response(&eqdesc, 0, 0); 1238 qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid); 1239 1240 /* Check if the previous issued command is completed. 1241 * Also seems like the SWP is shared between the Ethernet Driver 1242 * and the SEC driver. 1243 */ 1244 while (!qbman_check_command_complete(dq_storage)) 1245 ; 1246 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id)) 1247 clear_swp_active_dqs(q_storage->active_dpio_id); 1248 1249 pending = 1; 1250 1251 do { 1252 /* Loop until the dq_storage is updated with 1253 * new token by QBMAN 1254 */ 1255 while (!qbman_check_new_result(dq_storage)) 1256 ; 1257 rte_prefetch0((void *)((size_t)(dq_storage + 2))); 1258 /* Check whether Last Pull command is Expired and 1259 * setting Condition for Loop termination 1260 */ 1261 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 1262 pending = 0; 1263 /* Check for valid frame. */ 1264 status = qbman_result_DQ_flags(dq_storage); 1265 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) 1266 continue; 1267 } 1268 fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage); 1269 1270 dq_storage++; 1271 num_rx++; 1272 } while (pending); 1273 1274 while (num_tx < num_rx) { 1275 num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc, 1276 &fd[num_tx], 0, num_rx - num_tx); 1277 } 1278 1279 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { 1280 while (!qbman_check_command_complete( 1281 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index))) 1282 ; 1283 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); 1284 } 1285 /* issue a volatile dequeue command for next pull */ 1286 while (1) { 1287 if (qbman_swp_pull(swp, &pulldesc)) { 1288 DPAA2_PMD_DP_DEBUG("VDQ command is not issued." 1289 "QBMAN is busy (2)\n"); 1290 continue; 1291 } 1292 break; 1293 } 1294 q_storage->active_dqs = dq_storage1; 1295 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; 1296 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1); 1297 1298 dpaa2_q->rx_pkts += num_rx; 1299 dpaa2_q->tx_pkts += num_tx; 1300 1301 return 0; 1302 } 1303 #if defined(RTE_TOOLCHAIN_GCC) 1304 #pragma GCC diagnostic pop 1305 #elif defined(RTE_TOOLCHAIN_CLANG) 1306 #pragma clang diagnostic pop 1307 #endif 1308