1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 11 #include <rte_mbuf.h> 12 #include <rte_ethdev_driver.h> 13 #include <rte_malloc.h> 14 #include <rte_memcpy.h> 15 #include <rte_string_fns.h> 16 #include <rte_dev.h> 17 18 #include <rte_fslmc.h> 19 #include <fslmc_vfio.h> 20 #include <dpaa2_hw_pvt.h> 21 #include <dpaa2_hw_dpio.h> 22 #include <dpaa2_hw_mempool.h> 23 24 #include "dpaa2_pmd_logs.h" 25 #include "dpaa2_ethdev.h" 26 #include "base/dpaa2_hw_dpni_annot.h" 27 28 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \ 29 DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \ 30 DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \ 31 DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \ 32 DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \ 33 DPAA2_SET_FD_ASAL(_fd, DPAA2_ASAL_VAL); \ 34 } while (0) 35 36 static inline void __attribute__((hot)) 37 dpaa2_dev_rx_parse_frc(struct rte_mbuf *m, uint16_t frc) 38 { 39 DPAA2_PMD_DP_DEBUG("frc = 0x%x\t", frc); 40 41 m->packet_type = RTE_PTYPE_UNKNOWN; 42 switch (frc) { 43 case DPAA2_PKT_TYPE_ETHER: 44 m->packet_type = RTE_PTYPE_L2_ETHER; 45 break; 46 case DPAA2_PKT_TYPE_IPV4: 47 m->packet_type = RTE_PTYPE_L2_ETHER | 48 RTE_PTYPE_L3_IPV4; 49 break; 50 case DPAA2_PKT_TYPE_IPV6: 51 m->packet_type = RTE_PTYPE_L2_ETHER | 52 RTE_PTYPE_L3_IPV6; 53 break; 54 case DPAA2_PKT_TYPE_IPV4_EXT: 55 m->packet_type = RTE_PTYPE_L2_ETHER | 56 RTE_PTYPE_L3_IPV4_EXT; 57 break; 58 case DPAA2_PKT_TYPE_IPV6_EXT: 59 m->packet_type = RTE_PTYPE_L2_ETHER | 60 RTE_PTYPE_L3_IPV6_EXT; 61 break; 62 case DPAA2_PKT_TYPE_IPV4_TCP: 63 m->packet_type = RTE_PTYPE_L2_ETHER | 64 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP; 65 break; 66 case DPAA2_PKT_TYPE_IPV6_TCP: 67 m->packet_type = RTE_PTYPE_L2_ETHER | 68 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP; 69 break; 70 case DPAA2_PKT_TYPE_IPV4_UDP: 71 m->packet_type = RTE_PTYPE_L2_ETHER | 72 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP; 73 break; 74 case DPAA2_PKT_TYPE_IPV6_UDP: 75 m->packet_type = RTE_PTYPE_L2_ETHER | 76 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP; 77 break; 78 case DPAA2_PKT_TYPE_IPV4_SCTP: 79 m->packet_type = RTE_PTYPE_L2_ETHER | 80 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP; 81 break; 82 case DPAA2_PKT_TYPE_IPV6_SCTP: 83 m->packet_type = RTE_PTYPE_L2_ETHER | 84 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP; 85 break; 86 case DPAA2_PKT_TYPE_IPV4_ICMP: 87 m->packet_type = RTE_PTYPE_L2_ETHER | 88 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP; 89 break; 90 case DPAA2_PKT_TYPE_IPV6_ICMP: 91 m->packet_type = RTE_PTYPE_L2_ETHER | 92 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP; 93 break; 94 case DPAA2_PKT_TYPE_VLAN_1: 95 case DPAA2_PKT_TYPE_VLAN_2: 96 m->ol_flags |= PKT_RX_VLAN; 97 break; 98 /* More switch cases can be added */ 99 /* TODO: Add handling for checksum error check from FRC */ 100 default: 101 m->packet_type = RTE_PTYPE_UNKNOWN; 102 } 103 } 104 105 static inline uint32_t __attribute__((hot)) 106 dpaa2_dev_rx_parse_slow(struct dpaa2_annot_hdr *annotation) 107 { 108 uint32_t pkt_type = RTE_PTYPE_UNKNOWN; 109 110 DPAA2_PMD_DP_DEBUG("(slow parse) Annotation = 0x%" PRIx64 "\t", 111 annotation->word4); 112 if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) { 113 pkt_type = RTE_PTYPE_L2_ETHER_ARP; 114 goto parse_done; 115 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) { 116 pkt_type = RTE_PTYPE_L2_ETHER; 117 } else { 118 goto parse_done; 119 } 120 121 if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT | 122 L3_IPV4_N_PRESENT)) { 123 pkt_type |= RTE_PTYPE_L3_IPV4; 124 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT | 125 L3_IP_N_OPT_PRESENT)) 126 pkt_type |= RTE_PTYPE_L3_IPV4_EXT; 127 128 } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT | 129 L3_IPV6_N_PRESENT)) { 130 pkt_type |= RTE_PTYPE_L3_IPV6; 131 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT | 132 L3_IP_N_OPT_PRESENT)) 133 pkt_type |= RTE_PTYPE_L3_IPV6_EXT; 134 } else { 135 goto parse_done; 136 } 137 138 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT | 139 L3_IP_1_MORE_FRAGMENT | 140 L3_IP_N_FIRST_FRAGMENT | 141 L3_IP_N_MORE_FRAGMENT)) { 142 pkt_type |= RTE_PTYPE_L4_FRAG; 143 goto parse_done; 144 } else { 145 pkt_type |= RTE_PTYPE_L4_NONFRAG; 146 } 147 148 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT)) 149 pkt_type |= RTE_PTYPE_L4_UDP; 150 151 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT)) 152 pkt_type |= RTE_PTYPE_L4_TCP; 153 154 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT)) 155 pkt_type |= RTE_PTYPE_L4_SCTP; 156 157 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT)) 158 pkt_type |= RTE_PTYPE_L4_ICMP; 159 160 else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL)) 161 pkt_type |= RTE_PTYPE_UNKNOWN; 162 163 parse_done: 164 return pkt_type; 165 } 166 167 static inline uint32_t __attribute__((hot)) 168 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr) 169 { 170 struct dpaa2_annot_hdr *annotation = 171 (struct dpaa2_annot_hdr *)hw_annot_addr; 172 173 DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t", 174 annotation->word4); 175 176 /* Check offloads first */ 177 if (BIT_ISSET_AT_POS(annotation->word3, 178 L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT)) 179 mbuf->ol_flags |= PKT_RX_VLAN; 180 181 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE)) 182 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; 183 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) 184 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; 185 186 /* Return some common types from parse processing */ 187 switch (annotation->word4) { 188 case DPAA2_L3_IPv4: 189 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; 190 case DPAA2_L3_IPv6: 191 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; 192 case DPAA2_L3_IPv4_TCP: 193 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 194 RTE_PTYPE_L4_TCP; 195 case DPAA2_L3_IPv4_UDP: 196 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 197 RTE_PTYPE_L4_UDP; 198 case DPAA2_L3_IPv6_TCP: 199 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 200 RTE_PTYPE_L4_TCP; 201 case DPAA2_L3_IPv6_UDP: 202 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 203 RTE_PTYPE_L4_UDP; 204 default: 205 break; 206 } 207 208 return dpaa2_dev_rx_parse_slow(annotation); 209 } 210 211 static inline struct rte_mbuf *__attribute__((hot)) 212 eth_sg_fd_to_mbuf(const struct qbman_fd *fd) 213 { 214 struct qbman_sge *sgt, *sge; 215 size_t sg_addr, fd_addr; 216 int i = 0; 217 struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp; 218 219 fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 220 221 /* Get Scatter gather table address */ 222 sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd)); 223 224 sge = &sgt[i++]; 225 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge)); 226 227 /* First Scatter gather entry */ 228 first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, 229 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 230 /* Prepare all the metadata for first segment */ 231 first_seg->buf_addr = (uint8_t *)sg_addr; 232 first_seg->ol_flags = 0; 233 first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge); 234 first_seg->data_len = sge->length & 0x1FFFF; 235 first_seg->pkt_len = DPAA2_GET_FD_LEN(fd); 236 first_seg->nb_segs = 1; 237 first_seg->next = NULL; 238 if (dpaa2_svr_family == SVR_LX2160A) 239 dpaa2_dev_rx_parse_frc(first_seg, 240 DPAA2_GET_FD_FRC_PARSE_SUM(fd)); 241 else 242 first_seg->packet_type = dpaa2_dev_rx_parse(first_seg, 243 (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) 244 + DPAA2_FD_PTA_SIZE)); 245 246 rte_mbuf_refcnt_set(first_seg, 1); 247 cur_seg = first_seg; 248 while (!DPAA2_SG_IS_FINAL(sge)) { 249 sge = &sgt[i++]; 250 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR( 251 DPAA2_GET_FLE_ADDR(sge)); 252 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, 253 rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size); 254 next_seg->buf_addr = (uint8_t *)sg_addr; 255 next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge); 256 next_seg->data_len = sge->length & 0x1FFFF; 257 first_seg->nb_segs += 1; 258 rte_mbuf_refcnt_set(next_seg, 1); 259 cur_seg->next = next_seg; 260 next_seg->next = NULL; 261 cur_seg = next_seg; 262 } 263 temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr, 264 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 265 rte_mbuf_refcnt_set(temp, 1); 266 rte_pktmbuf_free_seg(temp); 267 268 return (void *)first_seg; 269 } 270 271 static inline struct rte_mbuf *__attribute__((hot)) 272 eth_fd_to_mbuf(const struct qbman_fd *fd) 273 { 274 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( 275 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), 276 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 277 278 /* need to repopulated some of the fields, 279 * as they may have changed in last transmission 280 */ 281 mbuf->nb_segs = 1; 282 mbuf->ol_flags = 0; 283 mbuf->data_off = DPAA2_GET_FD_OFFSET(fd); 284 mbuf->data_len = DPAA2_GET_FD_LEN(fd); 285 mbuf->pkt_len = mbuf->data_len; 286 mbuf->next = NULL; 287 rte_mbuf_refcnt_set(mbuf, 1); 288 289 /* Parse the packet */ 290 /* parse results for LX2 are there in FRC field of FD. 291 * For other DPAA2 platforms , parse results are after 292 * the private - sw annotation area 293 */ 294 295 if (dpaa2_svr_family == SVR_LX2160A) 296 dpaa2_dev_rx_parse_frc(mbuf, DPAA2_GET_FD_FRC_PARSE_SUM(fd)); 297 else 298 mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, 299 (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) 300 + DPAA2_FD_PTA_SIZE)); 301 302 DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d," 303 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n", 304 mbuf, mbuf->buf_addr, mbuf->data_off, 305 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), 306 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 307 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd)); 308 309 return mbuf; 310 } 311 312 static int __attribute__ ((noinline)) __attribute__((hot)) 313 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, 314 struct qbman_fd *fd, uint16_t bpid) 315 { 316 struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp; 317 struct qbman_sge *sgt, *sge = NULL; 318 int i; 319 320 temp = rte_pktmbuf_alloc(mbuf->pool); 321 if (temp == NULL) { 322 DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n"); 323 return -ENOMEM; 324 } 325 326 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp)); 327 DPAA2_SET_FD_LEN(fd, mbuf->pkt_len); 328 DPAA2_SET_ONLY_FD_BPID(fd, bpid); 329 DPAA2_SET_FD_OFFSET(fd, temp->data_off); 330 DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL); 331 DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg); 332 /*Set Scatter gather table and Scatter gather entries*/ 333 sgt = (struct qbman_sge *)( 334 (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) 335 + DPAA2_GET_FD_OFFSET(fd)); 336 337 for (i = 0; i < mbuf->nb_segs; i++) { 338 sge = &sgt[i]; 339 /*Resetting the buffer pool id and offset field*/ 340 sge->fin_bpid_offset = 0; 341 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg)); 342 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off); 343 sge->length = cur_seg->data_len; 344 if (RTE_MBUF_DIRECT(cur_seg)) { 345 if (rte_mbuf_refcnt_read(cur_seg) > 1) { 346 /* If refcnt > 1, invalid bpid is set to ensure 347 * buffer is not freed by HW 348 */ 349 DPAA2_SET_FLE_IVP(sge); 350 rte_mbuf_refcnt_update(cur_seg, -1); 351 } else 352 DPAA2_SET_FLE_BPID(sge, 353 mempool_to_bpid(cur_seg->pool)); 354 cur_seg = cur_seg->next; 355 } else { 356 /* Get owner MBUF from indirect buffer */ 357 mi = rte_mbuf_from_indirect(cur_seg); 358 if (rte_mbuf_refcnt_read(mi) > 1) { 359 /* If refcnt > 1, invalid bpid is set to ensure 360 * owner buffer is not freed by HW 361 */ 362 DPAA2_SET_FLE_IVP(sge); 363 } else { 364 DPAA2_SET_FLE_BPID(sge, 365 mempool_to_bpid(mi->pool)); 366 rte_mbuf_refcnt_update(mi, 1); 367 } 368 prev_seg = cur_seg; 369 cur_seg = cur_seg->next; 370 prev_seg->next = NULL; 371 rte_pktmbuf_free(prev_seg); 372 } 373 } 374 DPAA2_SG_SET_FINAL(sge, true); 375 return 0; 376 } 377 378 static void 379 eth_mbuf_to_fd(struct rte_mbuf *mbuf, 380 struct qbman_fd *fd, uint16_t bpid) __attribute__((unused)); 381 382 static void __attribute__ ((noinline)) __attribute__((hot)) 383 eth_mbuf_to_fd(struct rte_mbuf *mbuf, 384 struct qbman_fd *fd, uint16_t bpid) 385 { 386 DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid); 387 388 DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d," 389 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n", 390 mbuf, mbuf->buf_addr, mbuf->data_off, 391 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), 392 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 393 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd)); 394 if (RTE_MBUF_DIRECT(mbuf)) { 395 if (rte_mbuf_refcnt_read(mbuf) > 1) { 396 DPAA2_SET_FD_IVP(fd); 397 rte_mbuf_refcnt_update(mbuf, -1); 398 } 399 } else { 400 struct rte_mbuf *mi; 401 402 mi = rte_mbuf_from_indirect(mbuf); 403 if (rte_mbuf_refcnt_read(mi) > 1) 404 DPAA2_SET_FD_IVP(fd); 405 else 406 rte_mbuf_refcnt_update(mi, 1); 407 rte_pktmbuf_free(mbuf); 408 } 409 } 410 411 static inline int __attribute__((hot)) 412 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf, 413 struct qbman_fd *fd, uint16_t bpid) 414 { 415 struct rte_mbuf *m; 416 void *mb = NULL; 417 418 if (rte_dpaa2_mbuf_alloc_bulk( 419 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) { 420 DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n"); 421 return -1; 422 } 423 m = (struct rte_mbuf *)mb; 424 memcpy((char *)m->buf_addr + mbuf->data_off, 425 (void *)((char *)mbuf->buf_addr + mbuf->data_off), 426 mbuf->pkt_len); 427 428 /* Copy required fields */ 429 m->data_off = mbuf->data_off; 430 m->ol_flags = mbuf->ol_flags; 431 m->packet_type = mbuf->packet_type; 432 m->tx_offload = mbuf->tx_offload; 433 434 DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid); 435 436 DPAA2_PMD_DP_DEBUG( 437 "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d," 438 " meta: %d, off: %d, len: %d\n", 439 (void *)mbuf, 440 mbuf->buf_addr, 441 DPAA2_GET_FD_ADDR(fd), 442 DPAA2_GET_FD_BPID(fd), 443 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 444 DPAA2_GET_FD_OFFSET(fd), 445 DPAA2_GET_FD_LEN(fd)); 446 447 return 0; 448 } 449 450 uint16_t 451 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 452 { 453 /* Function receive frames for a given device and VQ*/ 454 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; 455 struct qbman_result *dq_storage, *dq_storage1 = NULL; 456 uint32_t fqid = dpaa2_q->fqid; 457 int ret, num_rx = 0; 458 uint8_t pending, status; 459 struct qbman_swp *swp; 460 const struct qbman_fd *fd, *next_fd; 461 struct qbman_pull_desc pulldesc; 462 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage; 463 struct rte_eth_dev *dev = dpaa2_q->dev; 464 465 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) { 466 ret = dpaa2_affine_qbman_ethrx_swp(); 467 if (ret) { 468 DPAA2_PMD_ERR("Failure in affining portal"); 469 return 0; 470 } 471 } 472 swp = DPAA2_PER_LCORE_ETHRX_PORTAL; 473 474 if (unlikely(!q_storage->active_dqs)) { 475 q_storage->toggle = 0; 476 dq_storage = q_storage->dq_storage[q_storage->toggle]; 477 q_storage->last_num_pkts = (nb_pkts > DPAA2_DQRR_RING_SIZE) ? 478 DPAA2_DQRR_RING_SIZE : nb_pkts; 479 qbman_pull_desc_clear(&pulldesc); 480 qbman_pull_desc_set_numframes(&pulldesc, 481 q_storage->last_num_pkts); 482 qbman_pull_desc_set_fq(&pulldesc, fqid); 483 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 484 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); 485 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { 486 while (!qbman_check_command_complete( 487 get_swp_active_dqs( 488 DPAA2_PER_LCORE_ETHRX_DPIO->index))) 489 ; 490 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); 491 } 492 while (1) { 493 if (qbman_swp_pull(swp, &pulldesc)) { 494 DPAA2_PMD_DP_DEBUG("VDQ command is not issued." 495 " QBMAN is busy (1)\n"); 496 /* Portal was busy, try again */ 497 continue; 498 } 499 break; 500 } 501 q_storage->active_dqs = dq_storage; 502 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; 503 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, 504 dq_storage); 505 } 506 507 dq_storage = q_storage->active_dqs; 508 rte_prefetch0((void *)(size_t)(dq_storage)); 509 rte_prefetch0((void *)(size_t)(dq_storage + 1)); 510 511 /* Prepare next pull descriptor. This will give space for the 512 * prefething done on DQRR entries 513 */ 514 q_storage->toggle ^= 1; 515 dq_storage1 = q_storage->dq_storage[q_storage->toggle]; 516 qbman_pull_desc_clear(&pulldesc); 517 qbman_pull_desc_set_numframes(&pulldesc, DPAA2_DQRR_RING_SIZE); 518 qbman_pull_desc_set_fq(&pulldesc, fqid); 519 qbman_pull_desc_set_storage(&pulldesc, dq_storage1, 520 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); 521 522 /* Check if the previous issued command is completed. 523 * Also seems like the SWP is shared between the Ethernet Driver 524 * and the SEC driver. 525 */ 526 while (!qbman_check_command_complete(dq_storage)) 527 ; 528 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id)) 529 clear_swp_active_dqs(q_storage->active_dpio_id); 530 531 pending = 1; 532 533 do { 534 /* Loop until the dq_storage is updated with 535 * new token by QBMAN 536 */ 537 while (!qbman_check_new_result(dq_storage)) 538 ; 539 rte_prefetch0((void *)((size_t)(dq_storage + 2))); 540 /* Check whether Last Pull command is Expired and 541 * setting Condition for Loop termination 542 */ 543 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 544 pending = 0; 545 /* Check for valid frame. */ 546 status = qbman_result_DQ_flags(dq_storage); 547 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) 548 continue; 549 } 550 fd = qbman_result_DQ_fd(dq_storage); 551 552 next_fd = qbman_result_DQ_fd(dq_storage + 1); 553 /* Prefetch Annotation address for the parse results */ 554 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(next_fd) 555 + DPAA2_FD_PTA_SIZE + 16)); 556 557 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg)) 558 bufs[num_rx] = eth_sg_fd_to_mbuf(fd); 559 else 560 bufs[num_rx] = eth_fd_to_mbuf(fd); 561 bufs[num_rx]->port = dev->data->port_id; 562 563 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 564 rte_vlan_strip(bufs[num_rx]); 565 566 dq_storage++; 567 num_rx++; 568 } while (pending); 569 570 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { 571 while (!qbman_check_command_complete( 572 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index))) 573 ; 574 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); 575 } 576 /* issue a volatile dequeue command for next pull */ 577 while (1) { 578 if (qbman_swp_pull(swp, &pulldesc)) { 579 DPAA2_PMD_DP_DEBUG("VDQ command is not issued." 580 "QBMAN is busy (2)\n"); 581 continue; 582 } 583 break; 584 } 585 q_storage->active_dqs = dq_storage1; 586 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; 587 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1); 588 589 dpaa2_q->rx_pkts += num_rx; 590 591 return num_rx; 592 } 593 594 void __attribute__((hot)) 595 dpaa2_dev_process_parallel_event(struct qbman_swp *swp, 596 const struct qbman_fd *fd, 597 const struct qbman_result *dq, 598 struct dpaa2_queue *rxq, 599 struct rte_event *ev) 600 { 601 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + 602 DPAA2_FD_PTA_SIZE + 16)); 603 604 ev->flow_id = rxq->ev.flow_id; 605 ev->sub_event_type = rxq->ev.sub_event_type; 606 ev->event_type = RTE_EVENT_TYPE_ETHDEV; 607 ev->op = RTE_EVENT_OP_NEW; 608 ev->sched_type = rxq->ev.sched_type; 609 ev->queue_id = rxq->ev.queue_id; 610 ev->priority = rxq->ev.priority; 611 612 ev->mbuf = eth_fd_to_mbuf(fd); 613 614 qbman_swp_dqrr_consume(swp, dq); 615 } 616 617 void __attribute__((hot)) 618 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)), 619 const struct qbman_fd *fd, 620 const struct qbman_result *dq, 621 struct dpaa2_queue *rxq, 622 struct rte_event *ev) 623 { 624 uint8_t dqrr_index; 625 626 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + 627 DPAA2_FD_PTA_SIZE + 16)); 628 629 ev->flow_id = rxq->ev.flow_id; 630 ev->sub_event_type = rxq->ev.sub_event_type; 631 ev->event_type = RTE_EVENT_TYPE_ETHDEV; 632 ev->op = RTE_EVENT_OP_NEW; 633 ev->sched_type = rxq->ev.sched_type; 634 ev->queue_id = rxq->ev.queue_id; 635 ev->priority = rxq->ev.priority; 636 637 ev->mbuf = eth_fd_to_mbuf(fd); 638 639 dqrr_index = qbman_get_dqrr_idx(dq); 640 ev->mbuf->seqn = dqrr_index + 1; 641 DPAA2_PER_LCORE_DQRR_SIZE++; 642 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; 643 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf; 644 } 645 646 /* 647 * Callback to handle sending packets through WRIOP based interface 648 */ 649 uint16_t 650 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 651 { 652 /* Function to transmit the frames to given device and VQ*/ 653 uint32_t loop, retry_count; 654 int32_t ret; 655 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 656 struct rte_mbuf *mi; 657 uint32_t frames_to_send; 658 struct rte_mempool *mp; 659 struct qbman_eq_desc eqdesc; 660 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; 661 struct qbman_swp *swp; 662 uint16_t num_tx = 0; 663 uint16_t bpid; 664 struct rte_eth_dev *dev = dpaa2_q->dev; 665 struct dpaa2_dev_priv *priv = dev->data->dev_private; 666 uint32_t flags[MAX_TX_RING_SLOTS] = {0}; 667 668 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 669 ret = dpaa2_affine_qbman_swp(); 670 if (ret) { 671 DPAA2_PMD_ERR("Failure in affining portal"); 672 return 0; 673 } 674 } 675 swp = DPAA2_PER_LCORE_PORTAL; 676 677 DPAA2_PMD_DP_DEBUG("===> dev =%p, fqid =%d\n", dev, dpaa2_q->fqid); 678 679 /*Prepare enqueue descriptor*/ 680 qbman_eq_desc_clear(&eqdesc); 681 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 682 qbman_eq_desc_set_response(&eqdesc, 0, 0); 683 qbman_eq_desc_set_qd(&eqdesc, priv->qdid, 684 dpaa2_q->flow_id, dpaa2_q->tc_index); 685 /*Clear the unused FD fields before sending*/ 686 while (nb_pkts) { 687 /*Check if the queue is congested*/ 688 retry_count = 0; 689 while (qbman_result_SCN_state(dpaa2_q->cscn)) { 690 retry_count++; 691 /* Retry for some time before giving up */ 692 if (retry_count > CONG_RETRY_COUNT) 693 goto skip_tx; 694 } 695 696 frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts; 697 698 for (loop = 0; loop < frames_to_send; loop++) { 699 if ((*bufs)->seqn) { 700 uint8_t dqrr_index = (*bufs)->seqn - 1; 701 702 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | 703 dqrr_index; 704 DPAA2_PER_LCORE_DQRR_SIZE--; 705 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); 706 (*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN; 707 } 708 709 fd_arr[loop].simple.frc = 0; 710 DPAA2_RESET_FD_CTRL((&fd_arr[loop])); 711 DPAA2_SET_FD_FLC((&fd_arr[loop]), (size_t)NULL); 712 if (likely(RTE_MBUF_DIRECT(*bufs))) { 713 mp = (*bufs)->pool; 714 /* Check the basic scenario and set 715 * the FD appropriately here itself. 716 */ 717 if (likely(mp && mp->ops_index == 718 priv->bp_list->dpaa2_ops_index && 719 (*bufs)->nb_segs == 1 && 720 rte_mbuf_refcnt_read((*bufs)) == 1)) { 721 if (unlikely(((*bufs)->ol_flags 722 & PKT_TX_VLAN_PKT) || 723 (dev->data->dev_conf.txmode.offloads 724 & DEV_TX_OFFLOAD_VLAN_INSERT))) { 725 ret = rte_vlan_insert(bufs); 726 if (ret) 727 goto send_n_return; 728 } 729 DPAA2_MBUF_TO_CONTIG_FD((*bufs), 730 &fd_arr[loop], mempool_to_bpid(mp)); 731 bufs++; 732 continue; 733 } 734 } else { 735 mi = rte_mbuf_from_indirect(*bufs); 736 mp = mi->pool; 737 } 738 /* Not a hw_pkt pool allocated frame */ 739 if (unlikely(!mp || !priv->bp_list)) { 740 DPAA2_PMD_ERR("Err: No buffer pool attached"); 741 goto send_n_return; 742 } 743 744 if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) || 745 (dev->data->dev_conf.txmode.offloads 746 & DEV_TX_OFFLOAD_VLAN_INSERT))) { 747 int ret = rte_vlan_insert(bufs); 748 if (ret) 749 goto send_n_return; 750 } 751 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { 752 DPAA2_PMD_WARN("Non DPAA2 buffer pool"); 753 /* alloc should be from the default buffer pool 754 * attached to this interface 755 */ 756 bpid = priv->bp_list->buf_pool.bpid; 757 758 if (unlikely((*bufs)->nb_segs > 1)) { 759 DPAA2_PMD_ERR("S/G support not added" 760 " for non hw offload buffer"); 761 goto send_n_return; 762 } 763 if (eth_copy_mbuf_to_fd(*bufs, 764 &fd_arr[loop], bpid)) { 765 goto send_n_return; 766 } 767 /* free the original packet */ 768 rte_pktmbuf_free(*bufs); 769 } else { 770 bpid = mempool_to_bpid(mp); 771 if (unlikely((*bufs)->nb_segs > 1)) { 772 if (eth_mbuf_to_sg_fd(*bufs, 773 &fd_arr[loop], bpid)) 774 goto send_n_return; 775 } else { 776 eth_mbuf_to_fd(*bufs, 777 &fd_arr[loop], bpid); 778 } 779 } 780 bufs++; 781 } 782 loop = 0; 783 while (loop < frames_to_send) { 784 loop += qbman_swp_enqueue_multiple(swp, &eqdesc, 785 &fd_arr[loop], &flags[loop], 786 frames_to_send - loop); 787 } 788 789 num_tx += frames_to_send; 790 nb_pkts -= frames_to_send; 791 } 792 dpaa2_q->tx_pkts += num_tx; 793 return num_tx; 794 795 send_n_return: 796 /* send any already prepared fd */ 797 if (loop) { 798 unsigned int i = 0; 799 800 while (i < loop) { 801 i += qbman_swp_enqueue_multiple(swp, &eqdesc, 802 &fd_arr[i], 803 &flags[loop], 804 loop - i); 805 } 806 num_tx += loop; 807 } 808 skip_tx: 809 dpaa2_q->tx_pkts += num_tx; 810 return num_tx; 811 } 812 813 /** 814 * Dummy DPDK callback for TX. 815 * 816 * This function is used to temporarily replace the real callback during 817 * unsafe control operations on the queue, or in case of error. 818 * 819 * @param dpdk_txq 820 * Generic pointer to TX queue structure. 821 * @param[in] pkts 822 * Packets to transmit. 823 * @param pkts_n 824 * Number of packets in array. 825 * 826 * @return 827 * Number of packets successfully transmitted (<= pkts_n). 828 */ 829 uint16_t 830 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 831 { 832 (void)queue; 833 (void)bufs; 834 (void)nb_pkts; 835 return 0; 836 } 837