1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016-2018 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 11 #include <rte_mbuf.h> 12 #include <rte_ethdev_driver.h> 13 #include <rte_malloc.h> 14 #include <rte_memcpy.h> 15 #include <rte_string_fns.h> 16 #include <rte_dev.h> 17 18 #include <rte_fslmc.h> 19 #include <fslmc_vfio.h> 20 #include <dpaa2_hw_pvt.h> 21 #include <dpaa2_hw_dpio.h> 22 #include <dpaa2_hw_mempool.h> 23 24 #include "dpaa2_pmd_logs.h" 25 #include "dpaa2_ethdev.h" 26 #include "base/dpaa2_hw_dpni_annot.h" 27 28 static inline uint32_t __attribute__((hot)) 29 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf, 30 struct dpaa2_annot_hdr *annotation); 31 32 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \ 33 DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \ 34 DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \ 35 DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \ 36 DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \ 37 DPAA2_SET_FD_FRC(_fd, 0); \ 38 DPAA2_RESET_FD_CTRL(_fd); \ 39 DPAA2_RESET_FD_FLC(_fd); \ 40 } while (0) 41 42 static inline void __attribute__((hot)) 43 dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd) 44 { 45 struct dpaa2_annot_hdr *annotation; 46 uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd); 47 48 m->packet_type = RTE_PTYPE_UNKNOWN; 49 switch (frc) { 50 case DPAA2_PKT_TYPE_ETHER: 51 m->packet_type = RTE_PTYPE_L2_ETHER; 52 break; 53 case DPAA2_PKT_TYPE_IPV4: 54 m->packet_type = RTE_PTYPE_L2_ETHER | 55 RTE_PTYPE_L3_IPV4; 56 break; 57 case DPAA2_PKT_TYPE_IPV6: 58 m->packet_type = RTE_PTYPE_L2_ETHER | 59 RTE_PTYPE_L3_IPV6; 60 break; 61 case DPAA2_PKT_TYPE_IPV4_EXT: 62 m->packet_type = RTE_PTYPE_L2_ETHER | 63 RTE_PTYPE_L3_IPV4_EXT; 64 break; 65 case DPAA2_PKT_TYPE_IPV6_EXT: 66 m->packet_type = RTE_PTYPE_L2_ETHER | 67 RTE_PTYPE_L3_IPV6_EXT; 68 break; 69 case DPAA2_PKT_TYPE_IPV4_TCP: 70 m->packet_type = RTE_PTYPE_L2_ETHER | 71 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP; 72 break; 73 case DPAA2_PKT_TYPE_IPV6_TCP: 74 m->packet_type = RTE_PTYPE_L2_ETHER | 75 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP; 76 break; 77 case DPAA2_PKT_TYPE_IPV4_UDP: 78 m->packet_type = RTE_PTYPE_L2_ETHER | 79 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP; 80 break; 81 case DPAA2_PKT_TYPE_IPV6_UDP: 82 m->packet_type = RTE_PTYPE_L2_ETHER | 83 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP; 84 break; 85 case DPAA2_PKT_TYPE_IPV4_SCTP: 86 m->packet_type = RTE_PTYPE_L2_ETHER | 87 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP; 88 break; 89 case DPAA2_PKT_TYPE_IPV6_SCTP: 90 m->packet_type = RTE_PTYPE_L2_ETHER | 91 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP; 92 break; 93 case DPAA2_PKT_TYPE_IPV4_ICMP: 94 m->packet_type = RTE_PTYPE_L2_ETHER | 95 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP; 96 break; 97 case DPAA2_PKT_TYPE_IPV6_ICMP: 98 m->packet_type = RTE_PTYPE_L2_ETHER | 99 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP; 100 break; 101 default: 102 m->packet_type = dpaa2_dev_rx_parse_slow(m, 103 (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) 104 + DPAA2_FD_PTA_SIZE)); 105 } 106 m->hash.rss = fd->simple.flc_hi; 107 m->ol_flags |= PKT_RX_RSS_HASH; 108 109 if (dpaa2_enable_ts == PMD_DPAA2_ENABLE_TS) { 110 annotation = (struct dpaa2_annot_hdr *) 111 ((size_t)DPAA2_IOVA_TO_VADDR( 112 DPAA2_GET_FD_ADDR(fd)) + DPAA2_FD_PTA_SIZE); 113 m->timestamp = annotation->word2; 114 m->ol_flags |= PKT_RX_TIMESTAMP; 115 DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "", m->timestamp); 116 } 117 118 DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x " 119 "ol_flags =0x%" PRIx64 "", 120 frc, m->packet_type, m->ol_flags); 121 } 122 123 static inline uint32_t __attribute__((hot)) 124 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf, 125 struct dpaa2_annot_hdr *annotation) 126 { 127 uint32_t pkt_type = RTE_PTYPE_UNKNOWN; 128 uint16_t *vlan_tci; 129 130 DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t" 131 "(4)=0x%" PRIx64 "\t", 132 annotation->word3, annotation->word4); 133 134 if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) { 135 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *, 136 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16)); 137 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci); 138 mbuf->ol_flags |= PKT_RX_VLAN; 139 pkt_type |= RTE_PTYPE_L2_ETHER_VLAN; 140 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) { 141 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *, 142 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16)); 143 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci); 144 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ; 145 pkt_type |= RTE_PTYPE_L2_ETHER_QINQ; 146 } 147 148 if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) { 149 pkt_type |= RTE_PTYPE_L2_ETHER_ARP; 150 goto parse_done; 151 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) { 152 pkt_type |= RTE_PTYPE_L2_ETHER; 153 } else { 154 goto parse_done; 155 } 156 157 if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT | 158 L3_IPV4_N_PRESENT)) { 159 pkt_type |= RTE_PTYPE_L3_IPV4; 160 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT | 161 L3_IP_N_OPT_PRESENT)) 162 pkt_type |= RTE_PTYPE_L3_IPV4_EXT; 163 164 } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT | 165 L3_IPV6_N_PRESENT)) { 166 pkt_type |= RTE_PTYPE_L3_IPV6; 167 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT | 168 L3_IP_N_OPT_PRESENT)) 169 pkt_type |= RTE_PTYPE_L3_IPV6_EXT; 170 } else { 171 goto parse_done; 172 } 173 174 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE)) 175 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; 176 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) 177 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; 178 179 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT | 180 L3_IP_1_MORE_FRAGMENT | 181 L3_IP_N_FIRST_FRAGMENT | 182 L3_IP_N_MORE_FRAGMENT)) { 183 pkt_type |= RTE_PTYPE_L4_FRAG; 184 goto parse_done; 185 } else { 186 pkt_type |= RTE_PTYPE_L4_NONFRAG; 187 } 188 189 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT)) 190 pkt_type |= RTE_PTYPE_L4_UDP; 191 192 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT)) 193 pkt_type |= RTE_PTYPE_L4_TCP; 194 195 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT)) 196 pkt_type |= RTE_PTYPE_L4_SCTP; 197 198 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT)) 199 pkt_type |= RTE_PTYPE_L4_ICMP; 200 201 else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL)) 202 pkt_type |= RTE_PTYPE_UNKNOWN; 203 204 parse_done: 205 return pkt_type; 206 } 207 208 static inline uint32_t __attribute__((hot)) 209 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr) 210 { 211 struct dpaa2_annot_hdr *annotation = 212 (struct dpaa2_annot_hdr *)hw_annot_addr; 213 214 DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t", 215 annotation->word4); 216 217 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE)) 218 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; 219 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) 220 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; 221 222 mbuf->ol_flags |= PKT_RX_TIMESTAMP; 223 mbuf->timestamp = annotation->word2; 224 DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "", mbuf->timestamp); 225 226 /* Check detailed parsing requirement */ 227 if (annotation->word3 & 0x7FFFFC3FFFF) 228 return dpaa2_dev_rx_parse_slow(mbuf, annotation); 229 230 /* Return some common types from parse processing */ 231 switch (annotation->word4) { 232 case DPAA2_L3_IPv4: 233 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; 234 case DPAA2_L3_IPv6: 235 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; 236 case DPAA2_L3_IPv4_TCP: 237 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 238 RTE_PTYPE_L4_TCP; 239 case DPAA2_L3_IPv4_UDP: 240 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 241 RTE_PTYPE_L4_UDP; 242 case DPAA2_L3_IPv6_TCP: 243 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 244 RTE_PTYPE_L4_TCP; 245 case DPAA2_L3_IPv6_UDP: 246 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 247 RTE_PTYPE_L4_UDP; 248 default: 249 break; 250 } 251 252 return dpaa2_dev_rx_parse_slow(mbuf, annotation); 253 } 254 255 static inline struct rte_mbuf *__attribute__((hot)) 256 eth_sg_fd_to_mbuf(const struct qbman_fd *fd) 257 { 258 struct qbman_sge *sgt, *sge; 259 size_t sg_addr, fd_addr; 260 int i = 0; 261 struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp; 262 263 fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 264 265 /* Get Scatter gather table address */ 266 sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd)); 267 268 sge = &sgt[i++]; 269 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge)); 270 271 /* First Scatter gather entry */ 272 first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, 273 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 274 /* Prepare all the metadata for first segment */ 275 first_seg->buf_addr = (uint8_t *)sg_addr; 276 first_seg->ol_flags = 0; 277 first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge); 278 first_seg->data_len = sge->length & 0x1FFFF; 279 first_seg->pkt_len = DPAA2_GET_FD_LEN(fd); 280 first_seg->nb_segs = 1; 281 first_seg->next = NULL; 282 if (dpaa2_svr_family == SVR_LX2160A) 283 dpaa2_dev_rx_parse_new(first_seg, fd); 284 else 285 first_seg->packet_type = dpaa2_dev_rx_parse(first_seg, 286 (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) 287 + DPAA2_FD_PTA_SIZE)); 288 289 rte_mbuf_refcnt_set(first_seg, 1); 290 cur_seg = first_seg; 291 while (!DPAA2_SG_IS_FINAL(sge)) { 292 sge = &sgt[i++]; 293 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR( 294 DPAA2_GET_FLE_ADDR(sge)); 295 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, 296 rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size); 297 next_seg->buf_addr = (uint8_t *)sg_addr; 298 next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge); 299 next_seg->data_len = sge->length & 0x1FFFF; 300 first_seg->nb_segs += 1; 301 rte_mbuf_refcnt_set(next_seg, 1); 302 cur_seg->next = next_seg; 303 next_seg->next = NULL; 304 cur_seg = next_seg; 305 } 306 temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr, 307 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 308 rte_mbuf_refcnt_set(temp, 1); 309 rte_pktmbuf_free_seg(temp); 310 311 return (void *)first_seg; 312 } 313 314 static inline struct rte_mbuf *__attribute__((hot)) 315 eth_fd_to_mbuf(const struct qbman_fd *fd) 316 { 317 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( 318 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), 319 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 320 321 /* need to repopulated some of the fields, 322 * as they may have changed in last transmission 323 */ 324 mbuf->nb_segs = 1; 325 mbuf->ol_flags = 0; 326 mbuf->data_off = DPAA2_GET_FD_OFFSET(fd); 327 mbuf->data_len = DPAA2_GET_FD_LEN(fd); 328 mbuf->pkt_len = mbuf->data_len; 329 mbuf->next = NULL; 330 rte_mbuf_refcnt_set(mbuf, 1); 331 332 /* Parse the packet */ 333 /* parse results for LX2 are there in FRC field of FD. 334 * For other DPAA2 platforms , parse results are after 335 * the private - sw annotation area 336 */ 337 338 if (dpaa2_svr_family == SVR_LX2160A) 339 dpaa2_dev_rx_parse_new(mbuf, fd); 340 else 341 mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, 342 (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) 343 + DPAA2_FD_PTA_SIZE)); 344 345 DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d," 346 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n", 347 mbuf, mbuf->buf_addr, mbuf->data_off, 348 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), 349 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 350 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd)); 351 352 return mbuf; 353 } 354 355 static int __attribute__ ((noinline)) __attribute__((hot)) 356 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, 357 struct qbman_fd *fd, uint16_t bpid) 358 { 359 struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp; 360 struct qbman_sge *sgt, *sge = NULL; 361 int i; 362 363 temp = rte_pktmbuf_alloc(mbuf->pool); 364 if (temp == NULL) { 365 DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n"); 366 return -ENOMEM; 367 } 368 369 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp)); 370 DPAA2_SET_FD_LEN(fd, mbuf->pkt_len); 371 DPAA2_SET_ONLY_FD_BPID(fd, bpid); 372 DPAA2_SET_FD_OFFSET(fd, temp->data_off); 373 DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL); 374 DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg); 375 /*Set Scatter gather table and Scatter gather entries*/ 376 sgt = (struct qbman_sge *)( 377 (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) 378 + DPAA2_GET_FD_OFFSET(fd)); 379 380 for (i = 0; i < mbuf->nb_segs; i++) { 381 sge = &sgt[i]; 382 /*Resetting the buffer pool id and offset field*/ 383 sge->fin_bpid_offset = 0; 384 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg)); 385 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off); 386 sge->length = cur_seg->data_len; 387 if (RTE_MBUF_DIRECT(cur_seg)) { 388 if (rte_mbuf_refcnt_read(cur_seg) > 1) { 389 /* If refcnt > 1, invalid bpid is set to ensure 390 * buffer is not freed by HW 391 */ 392 DPAA2_SET_FLE_IVP(sge); 393 rte_mbuf_refcnt_update(cur_seg, -1); 394 } else 395 DPAA2_SET_FLE_BPID(sge, 396 mempool_to_bpid(cur_seg->pool)); 397 cur_seg = cur_seg->next; 398 } else { 399 /* Get owner MBUF from indirect buffer */ 400 mi = rte_mbuf_from_indirect(cur_seg); 401 if (rte_mbuf_refcnt_read(mi) > 1) { 402 /* If refcnt > 1, invalid bpid is set to ensure 403 * owner buffer is not freed by HW 404 */ 405 DPAA2_SET_FLE_IVP(sge); 406 } else { 407 DPAA2_SET_FLE_BPID(sge, 408 mempool_to_bpid(mi->pool)); 409 rte_mbuf_refcnt_update(mi, 1); 410 } 411 prev_seg = cur_seg; 412 cur_seg = cur_seg->next; 413 prev_seg->next = NULL; 414 rte_pktmbuf_free(prev_seg); 415 } 416 } 417 DPAA2_SG_SET_FINAL(sge, true); 418 return 0; 419 } 420 421 static void 422 eth_mbuf_to_fd(struct rte_mbuf *mbuf, 423 struct qbman_fd *fd, uint16_t bpid) __attribute__((unused)); 424 425 static void __attribute__ ((noinline)) __attribute__((hot)) 426 eth_mbuf_to_fd(struct rte_mbuf *mbuf, 427 struct qbman_fd *fd, uint16_t bpid) 428 { 429 DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid); 430 431 DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d," 432 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n", 433 mbuf, mbuf->buf_addr, mbuf->data_off, 434 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), 435 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 436 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd)); 437 if (RTE_MBUF_DIRECT(mbuf)) { 438 if (rte_mbuf_refcnt_read(mbuf) > 1) { 439 DPAA2_SET_FD_IVP(fd); 440 rte_mbuf_refcnt_update(mbuf, -1); 441 } 442 } else { 443 struct rte_mbuf *mi; 444 445 mi = rte_mbuf_from_indirect(mbuf); 446 if (rte_mbuf_refcnt_read(mi) > 1) 447 DPAA2_SET_FD_IVP(fd); 448 else 449 rte_mbuf_refcnt_update(mi, 1); 450 rte_pktmbuf_free(mbuf); 451 } 452 } 453 454 static inline int __attribute__((hot)) 455 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf, 456 struct qbman_fd *fd, uint16_t bpid) 457 { 458 struct rte_mbuf *m; 459 void *mb = NULL; 460 461 if (rte_dpaa2_mbuf_alloc_bulk( 462 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) { 463 DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n"); 464 return -1; 465 } 466 m = (struct rte_mbuf *)mb; 467 memcpy((char *)m->buf_addr + mbuf->data_off, 468 (void *)((char *)mbuf->buf_addr + mbuf->data_off), 469 mbuf->pkt_len); 470 471 /* Copy required fields */ 472 m->data_off = mbuf->data_off; 473 m->ol_flags = mbuf->ol_flags; 474 m->packet_type = mbuf->packet_type; 475 m->tx_offload = mbuf->tx_offload; 476 477 DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid); 478 479 DPAA2_PMD_DP_DEBUG( 480 "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d," 481 " meta: %d, off: %d, len: %d\n", 482 (void *)mbuf, 483 mbuf->buf_addr, 484 DPAA2_GET_FD_ADDR(fd), 485 DPAA2_GET_FD_BPID(fd), 486 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 487 DPAA2_GET_FD_OFFSET(fd), 488 DPAA2_GET_FD_LEN(fd)); 489 490 return 0; 491 } 492 493 /* This function assumes that caller will be keep the same value for nb_pkts 494 * across calls per queue, if that is not the case, better use non-prefetch 495 * version of rx call. 496 * It will return the packets as requested in previous call without honoring 497 * the current nb_pkts or bufs space. 498 */ 499 uint16_t 500 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 501 { 502 /* Function receive frames for a given device and VQ*/ 503 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; 504 struct qbman_result *dq_storage, *dq_storage1 = NULL; 505 uint32_t fqid = dpaa2_q->fqid; 506 int ret, num_rx = 0, pull_size; 507 uint8_t pending, status; 508 struct qbman_swp *swp; 509 const struct qbman_fd *fd, *next_fd; 510 struct qbman_pull_desc pulldesc; 511 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage; 512 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; 513 514 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) { 515 ret = dpaa2_affine_qbman_ethrx_swp(); 516 if (ret) { 517 DPAA2_PMD_ERR("Failure in affining portal"); 518 return 0; 519 } 520 } 521 522 if (unlikely(!rte_dpaa2_bpid_info && 523 rte_eal_process_type() == RTE_PROC_SECONDARY)) 524 rte_dpaa2_bpid_info = dpaa2_q->bp_array; 525 526 swp = DPAA2_PER_LCORE_ETHRX_PORTAL; 527 pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts; 528 if (unlikely(!q_storage->active_dqs)) { 529 q_storage->toggle = 0; 530 dq_storage = q_storage->dq_storage[q_storage->toggle]; 531 q_storage->last_num_pkts = pull_size; 532 qbman_pull_desc_clear(&pulldesc); 533 qbman_pull_desc_set_numframes(&pulldesc, 534 q_storage->last_num_pkts); 535 qbman_pull_desc_set_fq(&pulldesc, fqid); 536 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 537 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); 538 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { 539 while (!qbman_check_command_complete( 540 get_swp_active_dqs( 541 DPAA2_PER_LCORE_ETHRX_DPIO->index))) 542 ; 543 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); 544 } 545 while (1) { 546 if (qbman_swp_pull(swp, &pulldesc)) { 547 DPAA2_PMD_DP_DEBUG("VDQ command is not issued." 548 " QBMAN is busy (1)\n"); 549 /* Portal was busy, try again */ 550 continue; 551 } 552 break; 553 } 554 q_storage->active_dqs = dq_storage; 555 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; 556 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, 557 dq_storage); 558 } 559 560 dq_storage = q_storage->active_dqs; 561 rte_prefetch0((void *)(size_t)(dq_storage)); 562 rte_prefetch0((void *)(size_t)(dq_storage + 1)); 563 564 /* Prepare next pull descriptor. This will give space for the 565 * prefething done on DQRR entries 566 */ 567 q_storage->toggle ^= 1; 568 dq_storage1 = q_storage->dq_storage[q_storage->toggle]; 569 qbman_pull_desc_clear(&pulldesc); 570 qbman_pull_desc_set_numframes(&pulldesc, pull_size); 571 qbman_pull_desc_set_fq(&pulldesc, fqid); 572 qbman_pull_desc_set_storage(&pulldesc, dq_storage1, 573 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); 574 575 /* Check if the previous issued command is completed. 576 * Also seems like the SWP is shared between the Ethernet Driver 577 * and the SEC driver. 578 */ 579 while (!qbman_check_command_complete(dq_storage)) 580 ; 581 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id)) 582 clear_swp_active_dqs(q_storage->active_dpio_id); 583 584 pending = 1; 585 586 do { 587 /* Loop until the dq_storage is updated with 588 * new token by QBMAN 589 */ 590 while (!qbman_check_new_result(dq_storage)) 591 ; 592 rte_prefetch0((void *)((size_t)(dq_storage + 2))); 593 /* Check whether Last Pull command is Expired and 594 * setting Condition for Loop termination 595 */ 596 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 597 pending = 0; 598 /* Check for valid frame. */ 599 status = qbman_result_DQ_flags(dq_storage); 600 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) 601 continue; 602 } 603 fd = qbman_result_DQ_fd(dq_storage); 604 605 if (dpaa2_svr_family != SVR_LX2160A) { 606 next_fd = qbman_result_DQ_fd(dq_storage + 1); 607 /* Prefetch Annotation address for the parse results */ 608 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR( 609 next_fd) + DPAA2_FD_PTA_SIZE + 16)); 610 } 611 612 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg)) 613 bufs[num_rx] = eth_sg_fd_to_mbuf(fd); 614 else 615 bufs[num_rx] = eth_fd_to_mbuf(fd); 616 bufs[num_rx]->port = eth_data->port_id; 617 618 if (eth_data->dev_conf.rxmode.offloads & 619 DEV_RX_OFFLOAD_VLAN_STRIP) 620 rte_vlan_strip(bufs[num_rx]); 621 622 dq_storage++; 623 num_rx++; 624 } while (pending); 625 626 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { 627 while (!qbman_check_command_complete( 628 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index))) 629 ; 630 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); 631 } 632 /* issue a volatile dequeue command for next pull */ 633 while (1) { 634 if (qbman_swp_pull(swp, &pulldesc)) { 635 DPAA2_PMD_DP_DEBUG("VDQ command is not issued." 636 "QBMAN is busy (2)\n"); 637 continue; 638 } 639 break; 640 } 641 q_storage->active_dqs = dq_storage1; 642 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; 643 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1); 644 645 dpaa2_q->rx_pkts += num_rx; 646 647 return num_rx; 648 } 649 650 void __attribute__((hot)) 651 dpaa2_dev_process_parallel_event(struct qbman_swp *swp, 652 const struct qbman_fd *fd, 653 const struct qbman_result *dq, 654 struct dpaa2_queue *rxq, 655 struct rte_event *ev) 656 { 657 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + 658 DPAA2_FD_PTA_SIZE + 16)); 659 660 ev->flow_id = rxq->ev.flow_id; 661 ev->sub_event_type = rxq->ev.sub_event_type; 662 ev->event_type = RTE_EVENT_TYPE_ETHDEV; 663 ev->op = RTE_EVENT_OP_NEW; 664 ev->sched_type = rxq->ev.sched_type; 665 ev->queue_id = rxq->ev.queue_id; 666 ev->priority = rxq->ev.priority; 667 668 ev->mbuf = eth_fd_to_mbuf(fd); 669 670 qbman_swp_dqrr_consume(swp, dq); 671 } 672 673 void __attribute__((hot)) 674 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)), 675 const struct qbman_fd *fd, 676 const struct qbman_result *dq, 677 struct dpaa2_queue *rxq, 678 struct rte_event *ev) 679 { 680 uint8_t dqrr_index; 681 682 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + 683 DPAA2_FD_PTA_SIZE + 16)); 684 685 ev->flow_id = rxq->ev.flow_id; 686 ev->sub_event_type = rxq->ev.sub_event_type; 687 ev->event_type = RTE_EVENT_TYPE_ETHDEV; 688 ev->op = RTE_EVENT_OP_NEW; 689 ev->sched_type = rxq->ev.sched_type; 690 ev->queue_id = rxq->ev.queue_id; 691 ev->priority = rxq->ev.priority; 692 693 ev->mbuf = eth_fd_to_mbuf(fd); 694 695 dqrr_index = qbman_get_dqrr_idx(dq); 696 ev->mbuf->seqn = dqrr_index + 1; 697 DPAA2_PER_LCORE_DQRR_SIZE++; 698 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; 699 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf; 700 } 701 702 /* 703 * Callback to handle sending packets through WRIOP based interface 704 */ 705 uint16_t 706 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 707 { 708 /* Function to transmit the frames to given device and VQ*/ 709 uint32_t loop, retry_count; 710 int32_t ret; 711 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 712 struct rte_mbuf *mi; 713 uint32_t frames_to_send; 714 struct rte_mempool *mp; 715 struct qbman_eq_desc eqdesc; 716 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; 717 struct qbman_swp *swp; 718 uint16_t num_tx = 0; 719 uint16_t bpid; 720 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; 721 struct dpaa2_dev_priv *priv = eth_data->dev_private; 722 uint32_t flags[MAX_TX_RING_SLOTS] = {0}; 723 724 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 725 ret = dpaa2_affine_qbman_swp(); 726 if (ret) { 727 DPAA2_PMD_ERR("Failure in affining portal"); 728 return 0; 729 } 730 } 731 swp = DPAA2_PER_LCORE_PORTAL; 732 733 DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n", 734 eth_data, dpaa2_q->fqid); 735 736 /*Prepare enqueue descriptor*/ 737 qbman_eq_desc_clear(&eqdesc); 738 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 739 qbman_eq_desc_set_qd(&eqdesc, priv->qdid, 740 dpaa2_q->flow_id, dpaa2_q->tc_index); 741 /*Clear the unused FD fields before sending*/ 742 while (nb_pkts) { 743 /*Check if the queue is congested*/ 744 retry_count = 0; 745 while (qbman_result_SCN_state(dpaa2_q->cscn)) { 746 retry_count++; 747 /* Retry for some time before giving up */ 748 if (retry_count > CONG_RETRY_COUNT) 749 goto skip_tx; 750 } 751 752 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ? 753 dpaa2_eqcr_size : nb_pkts; 754 755 for (loop = 0; loop < frames_to_send; loop++) { 756 if ((*bufs)->seqn) { 757 uint8_t dqrr_index = (*bufs)->seqn - 1; 758 759 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | 760 dqrr_index; 761 DPAA2_PER_LCORE_DQRR_SIZE--; 762 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); 763 (*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN; 764 } 765 766 if (likely(RTE_MBUF_DIRECT(*bufs))) { 767 mp = (*bufs)->pool; 768 /* Check the basic scenario and set 769 * the FD appropriately here itself. 770 */ 771 if (likely(mp && mp->ops_index == 772 priv->bp_list->dpaa2_ops_index && 773 (*bufs)->nb_segs == 1 && 774 rte_mbuf_refcnt_read((*bufs)) == 1)) { 775 if (unlikely(((*bufs)->ol_flags 776 & PKT_TX_VLAN_PKT) || 777 (eth_data->dev_conf.txmode.offloads 778 & DEV_TX_OFFLOAD_VLAN_INSERT))) { 779 ret = rte_vlan_insert(bufs); 780 if (ret) 781 goto send_n_return; 782 } 783 DPAA2_MBUF_TO_CONTIG_FD((*bufs), 784 &fd_arr[loop], mempool_to_bpid(mp)); 785 bufs++; 786 continue; 787 } 788 } else { 789 mi = rte_mbuf_from_indirect(*bufs); 790 mp = mi->pool; 791 } 792 /* Not a hw_pkt pool allocated frame */ 793 if (unlikely(!mp || !priv->bp_list)) { 794 DPAA2_PMD_ERR("Err: No buffer pool attached"); 795 goto send_n_return; 796 } 797 798 if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) || 799 (eth_data->dev_conf.txmode.offloads 800 & DEV_TX_OFFLOAD_VLAN_INSERT))) { 801 int ret = rte_vlan_insert(bufs); 802 if (ret) 803 goto send_n_return; 804 } 805 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { 806 DPAA2_PMD_WARN("Non DPAA2 buffer pool"); 807 /* alloc should be from the default buffer pool 808 * attached to this interface 809 */ 810 bpid = priv->bp_list->buf_pool.bpid; 811 812 if (unlikely((*bufs)->nb_segs > 1)) { 813 DPAA2_PMD_ERR("S/G support not added" 814 " for non hw offload buffer"); 815 goto send_n_return; 816 } 817 if (eth_copy_mbuf_to_fd(*bufs, 818 &fd_arr[loop], bpid)) { 819 goto send_n_return; 820 } 821 /* free the original packet */ 822 rte_pktmbuf_free(*bufs); 823 } else { 824 bpid = mempool_to_bpid(mp); 825 if (unlikely((*bufs)->nb_segs > 1)) { 826 if (eth_mbuf_to_sg_fd(*bufs, 827 &fd_arr[loop], bpid)) 828 goto send_n_return; 829 } else { 830 eth_mbuf_to_fd(*bufs, 831 &fd_arr[loop], bpid); 832 } 833 } 834 bufs++; 835 } 836 loop = 0; 837 while (loop < frames_to_send) { 838 loop += qbman_swp_enqueue_multiple(swp, &eqdesc, 839 &fd_arr[loop], &flags[loop], 840 frames_to_send - loop); 841 } 842 843 num_tx += frames_to_send; 844 nb_pkts -= frames_to_send; 845 } 846 dpaa2_q->tx_pkts += num_tx; 847 return num_tx; 848 849 send_n_return: 850 /* send any already prepared fd */ 851 if (loop) { 852 unsigned int i = 0; 853 854 while (i < loop) { 855 i += qbman_swp_enqueue_multiple(swp, &eqdesc, 856 &fd_arr[i], 857 &flags[loop], 858 loop - i); 859 } 860 num_tx += loop; 861 } 862 skip_tx: 863 dpaa2_q->tx_pkts += num_tx; 864 return num_tx; 865 } 866 867 /** 868 * Dummy DPDK callback for TX. 869 * 870 * This function is used to temporarily replace the real callback during 871 * unsafe control operations on the queue, or in case of error. 872 * 873 * @param dpdk_txq 874 * Generic pointer to TX queue structure. 875 * @param[in] pkts 876 * Packets to transmit. 877 * @param pkts_n 878 * Number of packets in array. 879 * 880 * @return 881 * Number of packets successfully transmitted (<= pkts_n). 882 */ 883 uint16_t 884 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 885 { 886 (void)queue; 887 (void)bufs; 888 (void)nb_pkts; 889 return 0; 890 } 891