1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 11 #include <rte_mbuf.h> 12 #include <rte_ethdev_driver.h> 13 #include <rte_malloc.h> 14 #include <rte_memcpy.h> 15 #include <rte_string_fns.h> 16 #include <rte_dev.h> 17 18 #include <rte_fslmc.h> 19 #include <fslmc_logs.h> 20 #include <fslmc_vfio.h> 21 #include <dpaa2_hw_pvt.h> 22 #include <dpaa2_hw_dpio.h> 23 #include <dpaa2_hw_mempool.h> 24 #include <dpaa2_eventdev.h> 25 26 #include "dpaa2_ethdev.h" 27 #include "base/dpaa2_hw_dpni_annot.h" 28 29 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \ 30 DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \ 31 DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \ 32 DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \ 33 DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \ 34 DPAA2_SET_FD_ASAL(_fd, DPAA2_ASAL_VAL); \ 35 } while (0) 36 37 static inline void __attribute__((hot)) 38 dpaa2_dev_rx_parse_frc(struct rte_mbuf *m, uint16_t frc) 39 { 40 PMD_RX_LOG(DEBUG, "frc = 0x%x ", frc); 41 42 m->packet_type = RTE_PTYPE_UNKNOWN; 43 switch (frc) { 44 case DPAA2_PKT_TYPE_ETHER: 45 m->packet_type = RTE_PTYPE_L2_ETHER; 46 break; 47 case DPAA2_PKT_TYPE_IPV4: 48 m->packet_type = RTE_PTYPE_L2_ETHER | 49 RTE_PTYPE_L3_IPV4; 50 break; 51 case DPAA2_PKT_TYPE_IPV6: 52 m->packet_type = RTE_PTYPE_L2_ETHER | 53 RTE_PTYPE_L3_IPV6; 54 break; 55 case DPAA2_PKT_TYPE_IPV4_EXT: 56 m->packet_type = RTE_PTYPE_L2_ETHER | 57 RTE_PTYPE_L3_IPV4_EXT; 58 break; 59 case DPAA2_PKT_TYPE_IPV6_EXT: 60 m->packet_type = RTE_PTYPE_L2_ETHER | 61 RTE_PTYPE_L3_IPV6_EXT; 62 break; 63 case DPAA2_PKT_TYPE_IPV4_TCP: 64 m->packet_type = RTE_PTYPE_L2_ETHER | 65 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP; 66 break; 67 case DPAA2_PKT_TYPE_IPV6_TCP: 68 m->packet_type = RTE_PTYPE_L2_ETHER | 69 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP; 70 break; 71 case DPAA2_PKT_TYPE_IPV4_UDP: 72 m->packet_type = RTE_PTYPE_L2_ETHER | 73 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP; 74 break; 75 case DPAA2_PKT_TYPE_IPV6_UDP: 76 m->packet_type = RTE_PTYPE_L2_ETHER | 77 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP; 78 break; 79 case DPAA2_PKT_TYPE_IPV4_SCTP: 80 m->packet_type = RTE_PTYPE_L2_ETHER | 81 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP; 82 break; 83 case DPAA2_PKT_TYPE_IPV6_SCTP: 84 m->packet_type = RTE_PTYPE_L2_ETHER | 85 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP; 86 break; 87 case DPAA2_PKT_TYPE_IPV4_ICMP: 88 m->packet_type = RTE_PTYPE_L2_ETHER | 89 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP; 90 break; 91 case DPAA2_PKT_TYPE_IPV6_ICMP: 92 m->packet_type = RTE_PTYPE_L2_ETHER | 93 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP; 94 break; 95 case DPAA2_PKT_TYPE_VLAN_1: 96 case DPAA2_PKT_TYPE_VLAN_2: 97 m->ol_flags |= PKT_RX_VLAN; 98 break; 99 /* More switch cases can be added */ 100 /* TODO: Add handling for checksum error check from FRC */ 101 default: 102 m->packet_type = RTE_PTYPE_UNKNOWN; 103 } 104 } 105 106 static inline uint32_t __attribute__((hot)) 107 dpaa2_dev_rx_parse_slow(uint64_t hw_annot_addr) 108 { 109 uint32_t pkt_type = RTE_PTYPE_UNKNOWN; 110 struct dpaa2_annot_hdr *annotation = 111 (struct dpaa2_annot_hdr *)hw_annot_addr; 112 113 PMD_RX_LOG(DEBUG, "annotation = 0x%lx ", annotation->word4); 114 if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) { 115 pkt_type = RTE_PTYPE_L2_ETHER_ARP; 116 goto parse_done; 117 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) { 118 pkt_type = RTE_PTYPE_L2_ETHER; 119 } else { 120 goto parse_done; 121 } 122 123 if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT | 124 L3_IPV4_N_PRESENT)) { 125 pkt_type |= RTE_PTYPE_L3_IPV4; 126 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT | 127 L3_IP_N_OPT_PRESENT)) 128 pkt_type |= RTE_PTYPE_L3_IPV4_EXT; 129 130 } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT | 131 L3_IPV6_N_PRESENT)) { 132 pkt_type |= RTE_PTYPE_L3_IPV6; 133 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT | 134 L3_IP_N_OPT_PRESENT)) 135 pkt_type |= RTE_PTYPE_L3_IPV6_EXT; 136 } else { 137 goto parse_done; 138 } 139 140 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT | 141 L3_IP_1_MORE_FRAGMENT | 142 L3_IP_N_FIRST_FRAGMENT | 143 L3_IP_N_MORE_FRAGMENT)) { 144 pkt_type |= RTE_PTYPE_L4_FRAG; 145 goto parse_done; 146 } else { 147 pkt_type |= RTE_PTYPE_L4_NONFRAG; 148 } 149 150 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT)) 151 pkt_type |= RTE_PTYPE_L4_UDP; 152 153 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT)) 154 pkt_type |= RTE_PTYPE_L4_TCP; 155 156 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT)) 157 pkt_type |= RTE_PTYPE_L4_SCTP; 158 159 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT)) 160 pkt_type |= RTE_PTYPE_L4_ICMP; 161 162 else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL)) 163 pkt_type |= RTE_PTYPE_UNKNOWN; 164 165 parse_done: 166 return pkt_type; 167 } 168 169 static inline uint32_t __attribute__((hot)) 170 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, uint64_t hw_annot_addr) 171 { 172 struct dpaa2_annot_hdr *annotation = 173 (struct dpaa2_annot_hdr *)hw_annot_addr; 174 175 PMD_RX_LOG(DEBUG, "annotation = 0x%lx ", annotation->word4); 176 177 /* Check offloads first */ 178 if (BIT_ISSET_AT_POS(annotation->word3, 179 L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT)) 180 mbuf->ol_flags |= PKT_RX_VLAN; 181 182 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE)) 183 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; 184 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) 185 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; 186 187 /* Return some common types from parse processing */ 188 switch (annotation->word4) { 189 case DPAA2_L3_IPv4: 190 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; 191 case DPAA2_L3_IPv6: 192 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; 193 case DPAA2_L3_IPv4_TCP: 194 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 195 RTE_PTYPE_L4_TCP; 196 case DPAA2_L3_IPv4_UDP: 197 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 198 RTE_PTYPE_L4_UDP; 199 case DPAA2_L3_IPv6_TCP: 200 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 201 RTE_PTYPE_L4_TCP; 202 case DPAA2_L3_IPv6_UDP: 203 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 204 RTE_PTYPE_L4_UDP; 205 default: 206 PMD_RX_LOG(DEBUG, "Slow parse the parsing results\n"); 207 break; 208 } 209 210 return dpaa2_dev_rx_parse_slow(hw_annot_addr); 211 } 212 213 static inline struct rte_mbuf *__attribute__((hot)) 214 eth_sg_fd_to_mbuf(const struct qbman_fd *fd) 215 { 216 struct qbman_sge *sgt, *sge; 217 dma_addr_t sg_addr; 218 int i = 0; 219 uint64_t fd_addr; 220 struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp; 221 222 fd_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 223 224 /* Get Scatter gather table address */ 225 sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd)); 226 227 sge = &sgt[i++]; 228 sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge)); 229 230 /* First Scatter gather entry */ 231 first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, 232 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 233 /* Prepare all the metadata for first segment */ 234 first_seg->buf_addr = (uint8_t *)sg_addr; 235 first_seg->ol_flags = 0; 236 first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge); 237 first_seg->data_len = sge->length & 0x1FFFF; 238 first_seg->pkt_len = DPAA2_GET_FD_LEN(fd); 239 first_seg->nb_segs = 1; 240 first_seg->next = NULL; 241 if (dpaa2_svr_family == SVR_LX2160A) 242 dpaa2_dev_rx_parse_frc(first_seg, 243 DPAA2_GET_FD_FRC_PARSE_SUM(fd)); 244 else 245 first_seg->packet_type = dpaa2_dev_rx_parse(first_seg, 246 (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) 247 + DPAA2_FD_PTA_SIZE); 248 249 rte_mbuf_refcnt_set(first_seg, 1); 250 cur_seg = first_seg; 251 while (!DPAA2_SG_IS_FINAL(sge)) { 252 sge = &sgt[i++]; 253 sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR( 254 DPAA2_GET_FLE_ADDR(sge)); 255 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, 256 rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size); 257 next_seg->buf_addr = (uint8_t *)sg_addr; 258 next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge); 259 next_seg->data_len = sge->length & 0x1FFFF; 260 first_seg->nb_segs += 1; 261 rte_mbuf_refcnt_set(next_seg, 1); 262 cur_seg->next = next_seg; 263 next_seg->next = NULL; 264 cur_seg = next_seg; 265 } 266 temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr, 267 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 268 rte_mbuf_refcnt_set(temp, 1); 269 rte_pktmbuf_free_seg(temp); 270 271 return (void *)first_seg; 272 } 273 274 static inline struct rte_mbuf *__attribute__((hot)) 275 eth_fd_to_mbuf(const struct qbman_fd *fd) 276 { 277 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( 278 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), 279 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 280 281 /* need to repopulated some of the fields, 282 * as they may have changed in last transmission 283 */ 284 mbuf->nb_segs = 1; 285 mbuf->ol_flags = 0; 286 mbuf->data_off = DPAA2_GET_FD_OFFSET(fd); 287 mbuf->data_len = DPAA2_GET_FD_LEN(fd); 288 mbuf->pkt_len = mbuf->data_len; 289 mbuf->next = NULL; 290 rte_mbuf_refcnt_set(mbuf, 1); 291 292 /* Parse the packet */ 293 /* parse results for LX2 are there in FRC field of FD. 294 * For other DPAA2 platforms , parse results are after 295 * the private - sw annotation area 296 */ 297 298 if (dpaa2_svr_family == SVR_LX2160A) 299 dpaa2_dev_rx_parse_frc(mbuf, DPAA2_GET_FD_FRC_PARSE_SUM(fd)); 300 else 301 mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, 302 (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) 303 + DPAA2_FD_PTA_SIZE); 304 305 PMD_RX_LOG(DEBUG, "to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d," 306 "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n", 307 mbuf, mbuf->buf_addr, mbuf->data_off, 308 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), 309 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 310 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd)); 311 312 return mbuf; 313 } 314 315 static int __attribute__ ((noinline)) __attribute__((hot)) 316 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, 317 struct qbman_fd *fd, uint16_t bpid) 318 { 319 struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp; 320 struct qbman_sge *sgt, *sge = NULL; 321 int i; 322 323 if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) { 324 int ret = rte_vlan_insert(&mbuf); 325 if (ret) 326 return ret; 327 } 328 329 temp = rte_pktmbuf_alloc(mbuf->pool); 330 if (temp == NULL) { 331 PMD_TX_LOG(ERR, "No memory to allocate S/G table"); 332 return -ENOMEM; 333 } 334 335 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp)); 336 DPAA2_SET_FD_LEN(fd, mbuf->pkt_len); 337 DPAA2_SET_FD_OFFSET(fd, temp->data_off); 338 DPAA2_SET_FD_BPID(fd, bpid); 339 DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL); 340 DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg); 341 /*Set Scatter gather table and Scatter gather entries*/ 342 sgt = (struct qbman_sge *)( 343 (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) 344 + DPAA2_GET_FD_OFFSET(fd)); 345 346 for (i = 0; i < mbuf->nb_segs; i++) { 347 sge = &sgt[i]; 348 /*Resetting the buffer pool id and offset field*/ 349 sge->fin_bpid_offset = 0; 350 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg)); 351 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off); 352 sge->length = cur_seg->data_len; 353 if (RTE_MBUF_DIRECT(cur_seg)) { 354 if (rte_mbuf_refcnt_read(cur_seg) > 1) { 355 /* If refcnt > 1, invalid bpid is set to ensure 356 * buffer is not freed by HW 357 */ 358 DPAA2_SET_FLE_IVP(sge); 359 rte_mbuf_refcnt_update(cur_seg, -1); 360 } else 361 DPAA2_SET_FLE_BPID(sge, 362 mempool_to_bpid(cur_seg->pool)); 363 cur_seg = cur_seg->next; 364 } else { 365 /* Get owner MBUF from indirect buffer */ 366 mi = rte_mbuf_from_indirect(cur_seg); 367 if (rte_mbuf_refcnt_read(mi) > 1) { 368 /* If refcnt > 1, invalid bpid is set to ensure 369 * owner buffer is not freed by HW 370 */ 371 DPAA2_SET_FLE_IVP(sge); 372 } else { 373 DPAA2_SET_FLE_BPID(sge, 374 mempool_to_bpid(mi->pool)); 375 rte_mbuf_refcnt_update(mi, 1); 376 } 377 prev_seg = cur_seg; 378 cur_seg = cur_seg->next; 379 prev_seg->next = NULL; 380 rte_pktmbuf_free(prev_seg); 381 } 382 } 383 DPAA2_SG_SET_FINAL(sge, true); 384 return 0; 385 } 386 387 static void 388 eth_mbuf_to_fd(struct rte_mbuf *mbuf, 389 struct qbman_fd *fd, uint16_t bpid) __attribute__((unused)); 390 391 static void __attribute__ ((noinline)) __attribute__((hot)) 392 eth_mbuf_to_fd(struct rte_mbuf *mbuf, 393 struct qbman_fd *fd, uint16_t bpid) 394 { 395 if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) { 396 if (rte_vlan_insert(&mbuf)) { 397 rte_pktmbuf_free(mbuf); 398 return; 399 } 400 } 401 402 DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid); 403 404 PMD_TX_LOG(DEBUG, "mbuf =%p, mbuf->buf_addr =%p, off = %d," 405 "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n", 406 mbuf, mbuf->buf_addr, mbuf->data_off, 407 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), 408 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 409 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd)); 410 if (RTE_MBUF_DIRECT(mbuf)) { 411 if (rte_mbuf_refcnt_read(mbuf) > 1) { 412 DPAA2_SET_FD_IVP(fd); 413 rte_mbuf_refcnt_update(mbuf, -1); 414 } 415 } else { 416 struct rte_mbuf *mi; 417 418 mi = rte_mbuf_from_indirect(mbuf); 419 if (rte_mbuf_refcnt_read(mi) > 1) 420 DPAA2_SET_FD_IVP(fd); 421 else 422 rte_mbuf_refcnt_update(mi, 1); 423 rte_pktmbuf_free(mbuf); 424 } 425 } 426 427 static inline int __attribute__((hot)) 428 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf, 429 struct qbman_fd *fd, uint16_t bpid) 430 { 431 struct rte_mbuf *m; 432 void *mb = NULL; 433 434 if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) { 435 int ret = rte_vlan_insert(&mbuf); 436 if (ret) 437 return ret; 438 } 439 440 if (rte_dpaa2_mbuf_alloc_bulk( 441 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) { 442 PMD_TX_LOG(WARNING, "Unable to allocated DPAA2 buffer"); 443 return -1; 444 } 445 m = (struct rte_mbuf *)mb; 446 memcpy((char *)m->buf_addr + mbuf->data_off, 447 (void *)((char *)mbuf->buf_addr + mbuf->data_off), 448 mbuf->pkt_len); 449 450 /* Copy required fields */ 451 m->data_off = mbuf->data_off; 452 m->ol_flags = mbuf->ol_flags; 453 m->packet_type = mbuf->packet_type; 454 m->tx_offload = mbuf->tx_offload; 455 456 DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid); 457 458 PMD_TX_LOG(DEBUG, " mbuf %p BMAN buf addr %p", 459 (void *)mbuf, mbuf->buf_addr); 460 461 PMD_TX_LOG(DEBUG, " fdaddr =%lx bpid =%d meta =%d off =%d, len =%d", 462 DPAA2_GET_FD_ADDR(fd), 463 DPAA2_GET_FD_BPID(fd), 464 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 465 DPAA2_GET_FD_OFFSET(fd), 466 DPAA2_GET_FD_LEN(fd)); 467 468 return 0; 469 } 470 471 uint16_t 472 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 473 { 474 /* Function receive frames for a given device and VQ*/ 475 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; 476 struct qbman_result *dq_storage, *dq_storage1 = 0; 477 uint32_t fqid = dpaa2_q->fqid; 478 int ret, num_rx = 0, next_pull = 0, num_pulled, num_to_pull; 479 uint8_t pending, is_repeat, status; 480 struct qbman_swp *swp; 481 const struct qbman_fd *fd, *next_fd; 482 struct qbman_pull_desc pulldesc; 483 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage; 484 struct rte_eth_dev *dev = dpaa2_q->dev; 485 486 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 487 ret = dpaa2_affine_qbman_swp(); 488 if (ret) { 489 RTE_LOG(ERR, PMD, "Failure in affining portal\n"); 490 return 0; 491 } 492 } 493 swp = DPAA2_PER_LCORE_PORTAL; 494 495 /* if the original request for this q was from another portal */ 496 if (unlikely(DPAA2_PER_LCORE_DPIO->index != 497 q_storage->active_dpio_id)) { 498 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) { 499 while (!qbman_check_command_complete(get_swp_active_dqs 500 (DPAA2_PER_LCORE_DPIO->index))) 501 ; 502 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index); 503 } 504 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index; 505 } 506 507 if (unlikely(!q_storage->active_dqs)) { 508 q_storage->toggle = 0; 509 dq_storage = q_storage->dq_storage[q_storage->toggle]; 510 q_storage->last_num_pkts = (nb_pkts > DPAA2_DQRR_RING_SIZE) ? 511 DPAA2_DQRR_RING_SIZE : nb_pkts; 512 qbman_pull_desc_clear(&pulldesc); 513 qbman_pull_desc_set_numframes(&pulldesc, 514 q_storage->last_num_pkts); 515 qbman_pull_desc_set_fq(&pulldesc, fqid); 516 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 517 (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); 518 while (1) { 519 if (qbman_swp_pull(swp, &pulldesc)) { 520 PMD_RX_LOG(WARNING, 521 "VDQ command not issued.QBMAN busy\n"); 522 /* Portal was busy, try again */ 523 continue; 524 } 525 break; 526 } 527 q_storage->active_dqs = dq_storage; 528 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage); 529 } 530 531 /* pkt to pull in current pull request */ 532 num_to_pull = q_storage->last_num_pkts; 533 534 /* Number of packet requested is more than current pull request */ 535 if (nb_pkts > num_to_pull) 536 next_pull = nb_pkts - num_to_pull; 537 538 dq_storage = q_storage->active_dqs; 539 /* Check if the previous issued command is completed. 540 * Also seems like the SWP is shared between the Ethernet Driver 541 * and the SEC driver. 542 */ 543 while (!qbman_check_command_complete(dq_storage)) 544 ; 545 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id)) 546 clear_swp_active_dqs(q_storage->active_dpio_id); 547 548 repeat: 549 is_repeat = 0; 550 551 /* issue the deq command one more time to get another set of packets */ 552 if (next_pull) { 553 q_storage->toggle ^= 1; 554 dq_storage1 = q_storage->dq_storage[q_storage->toggle]; 555 qbman_pull_desc_clear(&pulldesc); 556 557 if (next_pull > DPAA2_DQRR_RING_SIZE) { 558 qbman_pull_desc_set_numframes(&pulldesc, 559 DPAA2_DQRR_RING_SIZE); 560 next_pull = next_pull - DPAA2_DQRR_RING_SIZE; 561 q_storage->last_num_pkts = DPAA2_DQRR_RING_SIZE; 562 } else { 563 qbman_pull_desc_set_numframes(&pulldesc, next_pull); 564 q_storage->last_num_pkts = next_pull; 565 next_pull = 0; 566 } 567 qbman_pull_desc_set_fq(&pulldesc, fqid); 568 qbman_pull_desc_set_storage(&pulldesc, dq_storage1, 569 (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); 570 while (1) { 571 if (qbman_swp_pull(swp, &pulldesc)) { 572 PMD_RX_LOG(WARNING, 573 "VDQ command not issued.QBMAN busy\n"); 574 /* Portal was busy, try again */ 575 continue; 576 } 577 break; 578 } 579 is_repeat = 1; 580 q_storage->active_dqs = dq_storage1; 581 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1); 582 } 583 584 rte_prefetch0((void *)((uint64_t)(dq_storage + 1))); 585 586 num_pulled = 0; 587 pending = 1; 588 589 do { 590 /* Loop until the dq_storage is updated with 591 * new token by QBMAN 592 */ 593 while (!qbman_check_new_result(dq_storage)) 594 ; 595 rte_prefetch0((void *)((uint64_t)(dq_storage + 2))); 596 /* Check whether Last Pull command is Expired and 597 * setting Condition for Loop termination 598 */ 599 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 600 pending = 0; 601 /* Check for valid frame. */ 602 status = qbman_result_DQ_flags(dq_storage); 603 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) 604 continue; 605 } 606 fd = qbman_result_DQ_fd(dq_storage); 607 608 next_fd = qbman_result_DQ_fd(dq_storage + 1); 609 /* Prefetch Annotation address for the parse results */ 610 rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(next_fd) 611 + DPAA2_FD_PTA_SIZE + 16)); 612 613 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg)) 614 bufs[num_rx] = eth_sg_fd_to_mbuf(fd); 615 else 616 bufs[num_rx] = eth_fd_to_mbuf(fd); 617 bufs[num_rx]->port = dev->data->port_id; 618 619 if (dev->data->dev_conf.rxmode.hw_vlan_strip) 620 rte_vlan_strip(bufs[num_rx]); 621 622 dq_storage++; 623 num_rx++; 624 num_pulled++; 625 } while (pending); 626 627 /* Another VDQ request pending and this request returned full */ 628 if (is_repeat) { 629 /* all packets pulled from this pull request */ 630 if (num_pulled == num_to_pull) { 631 /* pkt to pull in current pull request */ 632 num_to_pull = q_storage->last_num_pkts; 633 634 dq_storage = dq_storage1; 635 636 while (!qbman_check_command_complete(dq_storage)) 637 ; 638 goto repeat; 639 } else { 640 /* if this request did not returned all pkts */ 641 goto next_time; 642 } 643 } 644 645 q_storage->toggle ^= 1; 646 dq_storage = q_storage->dq_storage[q_storage->toggle]; 647 q_storage->last_num_pkts = (nb_pkts > DPAA2_DQRR_RING_SIZE) ? 648 DPAA2_DQRR_RING_SIZE : nb_pkts; 649 qbman_pull_desc_clear(&pulldesc); 650 qbman_pull_desc_set_numframes(&pulldesc, q_storage->last_num_pkts); 651 qbman_pull_desc_set_fq(&pulldesc, fqid); 652 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 653 (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); 654 /* issue a volatile dequeue command for next pull */ 655 while (1) { 656 if (qbman_swp_pull(swp, &pulldesc)) { 657 PMD_RX_LOG(WARNING, "VDQ command is not issued." 658 "QBMAN is busy\n"); 659 continue; 660 } 661 break; 662 } 663 q_storage->active_dqs = dq_storage; 664 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage); 665 666 next_time: 667 dpaa2_q->rx_pkts += num_rx; 668 669 return num_rx; 670 } 671 672 void __attribute__((hot)) 673 dpaa2_dev_process_parallel_event(struct qbman_swp *swp, 674 const struct qbman_fd *fd, 675 const struct qbman_result *dq, 676 struct dpaa2_queue *rxq, 677 struct rte_event *ev) 678 { 679 ev->mbuf = eth_fd_to_mbuf(fd); 680 681 ev->flow_id = rxq->ev.flow_id; 682 ev->sub_event_type = rxq->ev.sub_event_type; 683 ev->event_type = RTE_EVENT_TYPE_ETHDEV; 684 ev->op = RTE_EVENT_OP_NEW; 685 ev->sched_type = rxq->ev.sched_type; 686 ev->queue_id = rxq->ev.queue_id; 687 ev->priority = rxq->ev.priority; 688 689 qbman_swp_dqrr_consume(swp, dq); 690 } 691 692 void dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)), 693 const struct qbman_fd *fd, 694 const struct qbman_result *dq, 695 struct dpaa2_queue *rxq, 696 struct rte_event *ev) 697 { 698 uint8_t dqrr_index = qbman_get_dqrr_idx(dq); 699 700 ev->mbuf = eth_fd_to_mbuf(fd); 701 702 ev->flow_id = rxq->ev.flow_id; 703 ev->sub_event_type = rxq->ev.sub_event_type; 704 ev->event_type = RTE_EVENT_TYPE_ETHDEV; 705 ev->op = RTE_EVENT_OP_NEW; 706 ev->sched_type = rxq->ev.sched_type; 707 ev->queue_id = rxq->ev.queue_id; 708 ev->priority = rxq->ev.priority; 709 710 ev->mbuf->seqn = dqrr_index + 1; 711 DPAA2_PER_LCORE_DQRR_SIZE++; 712 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; 713 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf; 714 } 715 716 /* 717 * Callback to handle sending packets through WRIOP based interface 718 */ 719 uint16_t 720 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 721 { 722 /* Function to transmit the frames to given device and VQ*/ 723 uint32_t loop, retry_count; 724 int32_t ret; 725 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 726 struct rte_mbuf *mi; 727 uint32_t frames_to_send; 728 struct rte_mempool *mp; 729 struct qbman_eq_desc eqdesc; 730 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; 731 struct qbman_swp *swp; 732 uint16_t num_tx = 0; 733 uint16_t bpid; 734 struct rte_eth_dev *dev = dpaa2_q->dev; 735 struct dpaa2_dev_priv *priv = dev->data->dev_private; 736 uint32_t flags[MAX_TX_RING_SLOTS] = {0}; 737 738 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 739 ret = dpaa2_affine_qbman_swp(); 740 if (ret) { 741 RTE_LOG(ERR, PMD, "Failure in affining portal\n"); 742 return 0; 743 } 744 } 745 swp = DPAA2_PER_LCORE_PORTAL; 746 747 PMD_TX_LOG(DEBUG, "===> dev =%p, fqid =%d", dev, dpaa2_q->fqid); 748 749 /*Prepare enqueue descriptor*/ 750 qbman_eq_desc_clear(&eqdesc); 751 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 752 qbman_eq_desc_set_response(&eqdesc, 0, 0); 753 qbman_eq_desc_set_qd(&eqdesc, priv->qdid, 754 dpaa2_q->flow_id, dpaa2_q->tc_index); 755 /*Clear the unused FD fields before sending*/ 756 while (nb_pkts) { 757 /*Check if the queue is congested*/ 758 retry_count = 0; 759 while (qbman_result_SCN_state(dpaa2_q->cscn)) { 760 retry_count++; 761 /* Retry for some time before giving up */ 762 if (retry_count > CONG_RETRY_COUNT) 763 goto skip_tx; 764 } 765 766 frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts; 767 768 for (loop = 0; loop < frames_to_send; loop++) { 769 if ((*bufs)->seqn) { 770 uint8_t dqrr_index = (*bufs)->seqn - 1; 771 772 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | 773 dqrr_index; 774 DPAA2_PER_LCORE_DQRR_SIZE--; 775 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); 776 (*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN; 777 } 778 779 fd_arr[loop].simple.bpid_offset = 0; 780 fd_arr[loop].simple.frc = 0; 781 DPAA2_RESET_FD_CTRL((&fd_arr[loop])); 782 DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL); 783 if (likely(RTE_MBUF_DIRECT(*bufs))) { 784 mp = (*bufs)->pool; 785 /* Check the basic scenario and set 786 * the FD appropriately here itself. 787 */ 788 if (likely(mp && mp->ops_index == 789 priv->bp_list->dpaa2_ops_index && 790 (*bufs)->nb_segs == 1 && 791 rte_mbuf_refcnt_read((*bufs)) == 1)) { 792 if (unlikely((*bufs)->ol_flags 793 & PKT_TX_VLAN_PKT)) { 794 ret = rte_vlan_insert(bufs); 795 if (ret) 796 goto send_n_return; 797 } 798 DPAA2_MBUF_TO_CONTIG_FD((*bufs), 799 &fd_arr[loop], mempool_to_bpid(mp)); 800 bufs++; 801 continue; 802 } 803 } else { 804 mi = rte_mbuf_from_indirect(*bufs); 805 mp = mi->pool; 806 } 807 /* Not a hw_pkt pool allocated frame */ 808 if (unlikely(!mp || !priv->bp_list)) { 809 PMD_TX_LOG(ERR, "err: no bpool attached"); 810 goto send_n_return; 811 } 812 813 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { 814 PMD_TX_LOG(ERR, "non hw offload bufffer "); 815 /* alloc should be from the default buffer pool 816 * attached to this interface 817 */ 818 bpid = priv->bp_list->buf_pool.bpid; 819 820 if (unlikely((*bufs)->nb_segs > 1)) { 821 PMD_TX_LOG(ERR, "S/G support not added" 822 " for non hw offload buffer"); 823 goto send_n_return; 824 } 825 if (eth_copy_mbuf_to_fd(*bufs, 826 &fd_arr[loop], bpid)) { 827 goto send_n_return; 828 } 829 /* free the original packet */ 830 rte_pktmbuf_free(*bufs); 831 } else { 832 bpid = mempool_to_bpid(mp); 833 if (unlikely((*bufs)->nb_segs > 1)) { 834 if (eth_mbuf_to_sg_fd(*bufs, 835 &fd_arr[loop], bpid)) 836 goto send_n_return; 837 } else { 838 eth_mbuf_to_fd(*bufs, 839 &fd_arr[loop], bpid); 840 } 841 } 842 bufs++; 843 } 844 loop = 0; 845 while (loop < frames_to_send) { 846 loop += qbman_swp_enqueue_multiple(swp, &eqdesc, 847 &fd_arr[loop], &flags[loop], 848 frames_to_send - loop); 849 } 850 851 num_tx += frames_to_send; 852 nb_pkts -= frames_to_send; 853 } 854 dpaa2_q->tx_pkts += num_tx; 855 return num_tx; 856 857 send_n_return: 858 /* send any already prepared fd */ 859 if (loop) { 860 unsigned int i = 0; 861 862 while (i < loop) { 863 i += qbman_swp_enqueue_multiple(swp, &eqdesc, 864 &fd_arr[i], 865 &flags[loop], 866 loop - i); 867 } 868 num_tx += loop; 869 } 870 skip_tx: 871 dpaa2_q->tx_pkts += num_tx; 872 return num_tx; 873 } 874 875 /** 876 * Dummy DPDK callback for TX. 877 * 878 * This function is used to temporarily replace the real callback during 879 * unsafe control operations on the queue, or in case of error. 880 * 881 * @param dpdk_txq 882 * Generic pointer to TX queue structure. 883 * @param[in] pkts 884 * Packets to transmit. 885 * @param pkts_n 886 * Number of packets in array. 887 * 888 * @return 889 * Number of packets successfully transmitted (<= pkts_n). 890 */ 891 uint16_t 892 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 893 { 894 (void)queue; 895 (void)bufs; 896 (void)nb_pkts; 897 return 0; 898 } 899