1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2017,2019 NXP 5 * 6 */ 7 8 /* System headers */ 9 #include <inttypes.h> 10 #include <unistd.h> 11 #include <stdio.h> 12 #include <limits.h> 13 #include <sched.h> 14 #include <pthread.h> 15 16 #include <rte_byteorder.h> 17 #include <rte_common.h> 18 #include <rte_interrupts.h> 19 #include <rte_log.h> 20 #include <rte_debug.h> 21 #include <rte_pci.h> 22 #include <rte_atomic.h> 23 #include <rte_branch_prediction.h> 24 #include <rte_memory.h> 25 #include <rte_tailq.h> 26 #include <rte_eal.h> 27 #include <rte_alarm.h> 28 #include <rte_ether.h> 29 #include <rte_ethdev_driver.h> 30 #include <rte_malloc.h> 31 #include <rte_ring.h> 32 #include <rte_ip.h> 33 #include <rte_tcp.h> 34 #include <rte_udp.h> 35 #include <rte_net.h> 36 #include <rte_eventdev.h> 37 38 #include "dpaa_ethdev.h" 39 #include "dpaa_rxtx.h" 40 #include <rte_dpaa_bus.h> 41 #include <dpaa_mempool.h> 42 43 #include <qman.h> 44 #include <fsl_usd.h> 45 #include <fsl_qman.h> 46 #include <fsl_bman.h> 47 #include <dpaa_of.h> 48 #include <netcfg.h> 49 50 #define DPAA_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) \ 51 do { \ 52 (_fd)->cmd = 0; \ 53 (_fd)->opaque_addr = 0; \ 54 (_fd)->opaque = QM_FD_CONTIG << DPAA_FD_FORMAT_SHIFT; \ 55 (_fd)->opaque |= ((_mbuf)->data_off) << DPAA_FD_OFFSET_SHIFT; \ 56 (_fd)->opaque |= (_mbuf)->pkt_len; \ 57 (_fd)->addr = (_mbuf)->buf_iova; \ 58 (_fd)->bpid = _bpid; \ 59 } while (0) 60 61 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 62 #define DISPLAY_PRINT printf 63 static void dpaa_display_frame_info(const struct qm_fd *fd, 64 uint32_t fqid, bool rx) 65 { 66 int ii; 67 char *ptr; 68 struct annotations_t *annot = rte_dpaa_mem_ptov(fd->addr); 69 uint8_t format; 70 71 if (!fd->status) { 72 /* Do not display correct packets.*/ 73 return; 74 } 75 76 format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> 77 DPAA_FD_FORMAT_SHIFT; 78 79 DISPLAY_PRINT("fqid %d bpid %d addr 0x%lx, format %d\r\n", 80 fqid, fd->bpid, (unsigned long)fd->addr, fd->format); 81 DISPLAY_PRINT("off %d, len %d stat 0x%x\r\n", 82 fd->offset, fd->length20, fd->status); 83 if (rx) { 84 ptr = (char *)&annot->parse; 85 DISPLAY_PRINT("RX parser result:\r\n"); 86 for (ii = 0; ii < (int)sizeof(struct dpaa_eth_parse_results_t); 87 ii++) { 88 DISPLAY_PRINT("%02x ", ptr[ii]); 89 if (((ii + 1) % 16) == 0) 90 DISPLAY_PRINT("\n"); 91 } 92 DISPLAY_PRINT("\n"); 93 } 94 95 if (unlikely(format == qm_fd_sg)) { 96 /*TBD:S/G display: to be implemented*/ 97 return; 98 } 99 100 DISPLAY_PRINT("Frame payload:\r\n"); 101 ptr = (char *)annot; 102 ptr += fd->offset; 103 for (ii = 0; ii < fd->length20; ii++) { 104 DISPLAY_PRINT("%02x ", ptr[ii]); 105 if (((ii + 1) % 16) == 0) 106 printf("\n"); 107 } 108 DISPLAY_PRINT("\n"); 109 } 110 #else 111 #define dpaa_display_frame_info(a, b, c) 112 #endif 113 114 static inline void dpaa_slow_parsing(struct rte_mbuf *m __rte_unused, 115 uint64_t prs __rte_unused) 116 { 117 DPAA_DP_LOG(DEBUG, "Slow parsing"); 118 /*TBD:XXX: to be implemented*/ 119 } 120 121 static inline void dpaa_eth_packet_info(struct rte_mbuf *m, void *fd_virt_addr) 122 { 123 struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr); 124 uint64_t prs = *((uintptr_t *)(&annot->parse)) & DPAA_PARSE_MASK; 125 126 DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot); 127 128 switch (prs) { 129 case DPAA_PKT_TYPE_IPV4: 130 m->packet_type = RTE_PTYPE_L2_ETHER | 131 RTE_PTYPE_L3_IPV4; 132 break; 133 case DPAA_PKT_TYPE_IPV6: 134 m->packet_type = RTE_PTYPE_L2_ETHER | 135 RTE_PTYPE_L3_IPV6; 136 break; 137 case DPAA_PKT_TYPE_ETHER: 138 m->packet_type = RTE_PTYPE_L2_ETHER; 139 break; 140 case DPAA_PKT_TYPE_IPV4_FRAG: 141 case DPAA_PKT_TYPE_IPV4_FRAG_UDP: 142 case DPAA_PKT_TYPE_IPV4_FRAG_TCP: 143 case DPAA_PKT_TYPE_IPV4_FRAG_SCTP: 144 m->packet_type = RTE_PTYPE_L2_ETHER | 145 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG; 146 break; 147 case DPAA_PKT_TYPE_IPV6_FRAG: 148 case DPAA_PKT_TYPE_IPV6_FRAG_UDP: 149 case DPAA_PKT_TYPE_IPV6_FRAG_TCP: 150 case DPAA_PKT_TYPE_IPV6_FRAG_SCTP: 151 m->packet_type = RTE_PTYPE_L2_ETHER | 152 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG; 153 break; 154 case DPAA_PKT_TYPE_IPV4_EXT: 155 m->packet_type = RTE_PTYPE_L2_ETHER | 156 RTE_PTYPE_L3_IPV4_EXT; 157 break; 158 case DPAA_PKT_TYPE_IPV6_EXT: 159 m->packet_type = RTE_PTYPE_L2_ETHER | 160 RTE_PTYPE_L3_IPV6_EXT; 161 break; 162 case DPAA_PKT_TYPE_IPV4_TCP: 163 m->packet_type = RTE_PTYPE_L2_ETHER | 164 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP; 165 break; 166 case DPAA_PKT_TYPE_IPV6_TCP: 167 m->packet_type = RTE_PTYPE_L2_ETHER | 168 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP; 169 break; 170 case DPAA_PKT_TYPE_IPV4_UDP: 171 m->packet_type = RTE_PTYPE_L2_ETHER | 172 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP; 173 break; 174 case DPAA_PKT_TYPE_IPV6_UDP: 175 m->packet_type = RTE_PTYPE_L2_ETHER | 176 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP; 177 break; 178 case DPAA_PKT_TYPE_IPV4_EXT_UDP: 179 m->packet_type = RTE_PTYPE_L2_ETHER | 180 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP; 181 break; 182 case DPAA_PKT_TYPE_IPV6_EXT_UDP: 183 m->packet_type = RTE_PTYPE_L2_ETHER | 184 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP; 185 break; 186 case DPAA_PKT_TYPE_IPV4_EXT_TCP: 187 m->packet_type = RTE_PTYPE_L2_ETHER | 188 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP; 189 break; 190 case DPAA_PKT_TYPE_IPV6_EXT_TCP: 191 m->packet_type = RTE_PTYPE_L2_ETHER | 192 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP; 193 break; 194 case DPAA_PKT_TYPE_IPV4_SCTP: 195 m->packet_type = RTE_PTYPE_L2_ETHER | 196 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP; 197 break; 198 case DPAA_PKT_TYPE_IPV6_SCTP: 199 m->packet_type = RTE_PTYPE_L2_ETHER | 200 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP; 201 break; 202 case DPAA_PKT_TYPE_NONE: 203 m->packet_type = 0; 204 break; 205 /* More switch cases can be added */ 206 default: 207 dpaa_slow_parsing(m, prs); 208 } 209 210 m->tx_offload = annot->parse.ip_off[0]; 211 m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0]) 212 << DPAA_PKT_L3_LEN_SHIFT; 213 214 /* Set the hash values */ 215 m->hash.rss = (uint32_t)(annot->hash); 216 /* All packets with Bad checksum are dropped by interface (and 217 * corresponding notification issued to RX error queues). 218 */ 219 m->ol_flags = PKT_RX_RSS_HASH | PKT_RX_IP_CKSUM_GOOD; 220 221 /* Check if Vlan is present */ 222 if (prs & DPAA_PARSE_VLAN_MASK) 223 m->ol_flags |= PKT_RX_VLAN; 224 /* Packet received without stripping the vlan */ 225 } 226 227 static inline void dpaa_checksum(struct rte_mbuf *mbuf) 228 { 229 struct rte_ether_hdr *eth_hdr = 230 rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *); 231 char *l3_hdr = (char *)eth_hdr + mbuf->l2_len; 232 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 233 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 234 235 DPAA_DP_LOG(DEBUG, "Calculating checksum for mbuf: %p", mbuf); 236 237 if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) || 238 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 239 RTE_PTYPE_L3_IPV4_EXT)) { 240 ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 241 ipv4_hdr->hdr_checksum = 0; 242 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr); 243 } else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 244 RTE_PTYPE_L3_IPV6) || 245 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 246 RTE_PTYPE_L3_IPV6_EXT)) 247 ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 248 249 if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) { 250 struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)(l3_hdr + 251 mbuf->l3_len); 252 tcp_hdr->cksum = 0; 253 if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPV4)) 254 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, 255 tcp_hdr); 256 else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */ 257 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, 258 tcp_hdr); 259 } else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == 260 RTE_PTYPE_L4_UDP) { 261 struct rte_udp_hdr *udp_hdr = (struct rte_udp_hdr *)(l3_hdr + 262 mbuf->l3_len); 263 udp_hdr->dgram_cksum = 0; 264 if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPV4)) 265 udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, 266 udp_hdr); 267 else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */ 268 udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, 269 udp_hdr); 270 } 271 } 272 273 static inline void dpaa_checksum_offload(struct rte_mbuf *mbuf, 274 struct qm_fd *fd, char *prs_buf) 275 { 276 struct dpaa_eth_parse_results_t *prs; 277 278 DPAA_DP_LOG(DEBUG, " Offloading checksum for mbuf: %p", mbuf); 279 280 prs = GET_TX_PRS(prs_buf); 281 prs->l3r = 0; 282 prs->l4r = 0; 283 if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) || 284 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 285 RTE_PTYPE_L3_IPV4_EXT)) 286 prs->l3r = DPAA_L3_PARSE_RESULT_IPV4; 287 else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 288 RTE_PTYPE_L3_IPV6) || 289 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 290 RTE_PTYPE_L3_IPV6_EXT)) 291 prs->l3r = DPAA_L3_PARSE_RESULT_IPV6; 292 293 if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) 294 prs->l4r = DPAA_L4_PARSE_RESULT_TCP; 295 else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP) 296 prs->l4r = DPAA_L4_PARSE_RESULT_UDP; 297 298 prs->ip_off[0] = mbuf->l2_len; 299 prs->l4_off = mbuf->l3_len + mbuf->l2_len; 300 /* Enable L3 (and L4, if TCP or UDP) HW checksum*/ 301 fd->cmd = DPAA_FD_CMD_RPD | DPAA_FD_CMD_DTC; 302 } 303 304 static inline void 305 dpaa_unsegmented_checksum(struct rte_mbuf *mbuf, struct qm_fd *fd_arr) 306 { 307 if (!mbuf->packet_type) { 308 struct rte_net_hdr_lens hdr_lens; 309 310 mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens, 311 RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK 312 | RTE_PTYPE_L4_MASK); 313 mbuf->l2_len = hdr_lens.l2_len; 314 mbuf->l3_len = hdr_lens.l3_len; 315 } 316 if (mbuf->data_off < (DEFAULT_TX_ICEOF + 317 sizeof(struct dpaa_eth_parse_results_t))) { 318 DPAA_DP_LOG(DEBUG, "Checksum offload Err: " 319 "Not enough Headroom " 320 "space for correct Checksum offload." 321 "So Calculating checksum in Software."); 322 dpaa_checksum(mbuf); 323 } else { 324 dpaa_checksum_offload(mbuf, fd_arr, mbuf->buf_addr); 325 } 326 } 327 328 struct rte_mbuf * 329 dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid) 330 { 331 struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid); 332 struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp; 333 struct qm_sg_entry *sgt, *sg_temp; 334 void *vaddr, *sg_vaddr; 335 int i = 0; 336 uint16_t fd_offset = fd->offset; 337 338 vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd)); 339 if (!vaddr) { 340 DPAA_PMD_ERR("unable to convert physical address"); 341 return NULL; 342 } 343 sgt = vaddr + fd_offset; 344 sg_temp = &sgt[i++]; 345 hw_sg_to_cpu(sg_temp); 346 temp = (struct rte_mbuf *)((char *)vaddr - bp_info->meta_data_size); 347 sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_sg_entry_get64(sg_temp)); 348 349 first_seg = (struct rte_mbuf *)((char *)sg_vaddr - 350 bp_info->meta_data_size); 351 first_seg->data_off = sg_temp->offset; 352 first_seg->data_len = sg_temp->length; 353 first_seg->pkt_len = sg_temp->length; 354 rte_mbuf_refcnt_set(first_seg, 1); 355 356 first_seg->port = ifid; 357 first_seg->nb_segs = 1; 358 first_seg->ol_flags = 0; 359 prev_seg = first_seg; 360 while (i < DPAA_SGT_MAX_ENTRIES) { 361 sg_temp = &sgt[i++]; 362 hw_sg_to_cpu(sg_temp); 363 sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, 364 qm_sg_entry_get64(sg_temp)); 365 cur_seg = (struct rte_mbuf *)((char *)sg_vaddr - 366 bp_info->meta_data_size); 367 cur_seg->data_off = sg_temp->offset; 368 cur_seg->data_len = sg_temp->length; 369 first_seg->pkt_len += sg_temp->length; 370 first_seg->nb_segs += 1; 371 rte_mbuf_refcnt_set(cur_seg, 1); 372 prev_seg->next = cur_seg; 373 if (sg_temp->final) { 374 cur_seg->next = NULL; 375 break; 376 } 377 prev_seg = cur_seg; 378 } 379 DPAA_DP_LOG(DEBUG, "Received an SG frame len =%d, num_sg =%d", 380 first_seg->pkt_len, first_seg->nb_segs); 381 382 dpaa_eth_packet_info(first_seg, vaddr); 383 rte_pktmbuf_free_seg(temp); 384 385 return first_seg; 386 } 387 388 static inline struct rte_mbuf * 389 dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid) 390 { 391 struct rte_mbuf *mbuf; 392 struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid); 393 void *ptr; 394 uint8_t format = 395 (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT; 396 uint16_t offset; 397 uint32_t length; 398 399 if (unlikely(format == qm_fd_sg)) 400 return dpaa_eth_sg_to_mbuf(fd, ifid); 401 402 offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT; 403 length = fd->opaque & DPAA_FD_LENGTH_MASK; 404 405 DPAA_DP_LOG(DEBUG, " FD--->MBUF off %d len = %d", offset, length); 406 407 /* Ignoring case when format != qm_fd_contig */ 408 ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd)); 409 410 mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size); 411 /* Prefetch the Parse results and packet data to L1 */ 412 rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF)); 413 414 mbuf->data_off = offset; 415 mbuf->data_len = length; 416 mbuf->pkt_len = length; 417 418 mbuf->port = ifid; 419 mbuf->nb_segs = 1; 420 mbuf->ol_flags = 0; 421 mbuf->next = NULL; 422 rte_mbuf_refcnt_set(mbuf, 1); 423 dpaa_eth_packet_info(mbuf, mbuf->buf_addr); 424 425 return mbuf; 426 } 427 428 uint16_t 429 dpaa_free_mbuf(const struct qm_fd *fd) 430 { 431 struct rte_mbuf *mbuf; 432 struct dpaa_bp_info *bp_info; 433 uint8_t format; 434 void *ptr; 435 436 bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid); 437 format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT; 438 if (unlikely(format == qm_fd_sg)) { 439 struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp; 440 struct qm_sg_entry *sgt, *sg_temp; 441 void *vaddr, *sg_vaddr; 442 int i = 0; 443 uint16_t fd_offset = fd->offset; 444 445 vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd)); 446 if (!vaddr) { 447 DPAA_PMD_ERR("unable to convert physical address"); 448 return -1; 449 } 450 sgt = vaddr + fd_offset; 451 sg_temp = &sgt[i++]; 452 hw_sg_to_cpu(sg_temp); 453 temp = (struct rte_mbuf *) 454 ((char *)vaddr - bp_info->meta_data_size); 455 sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, 456 qm_sg_entry_get64(sg_temp)); 457 458 first_seg = (struct rte_mbuf *)((char *)sg_vaddr - 459 bp_info->meta_data_size); 460 first_seg->nb_segs = 1; 461 prev_seg = first_seg; 462 while (i < DPAA_SGT_MAX_ENTRIES) { 463 sg_temp = &sgt[i++]; 464 hw_sg_to_cpu(sg_temp); 465 sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, 466 qm_sg_entry_get64(sg_temp)); 467 cur_seg = (struct rte_mbuf *)((char *)sg_vaddr - 468 bp_info->meta_data_size); 469 first_seg->nb_segs += 1; 470 prev_seg->next = cur_seg; 471 if (sg_temp->final) { 472 cur_seg->next = NULL; 473 break; 474 } 475 prev_seg = cur_seg; 476 } 477 478 rte_pktmbuf_free_seg(temp); 479 rte_pktmbuf_free_seg(first_seg); 480 return 0; 481 } 482 483 ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd)); 484 mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size); 485 486 rte_pktmbuf_free(mbuf); 487 488 return 0; 489 } 490 491 /* Specific for LS1043 */ 492 void 493 dpaa_rx_cb_no_prefetch(struct qman_fq **fq, struct qm_dqrr_entry **dqrr, 494 void **bufs, int num_bufs) 495 { 496 struct rte_mbuf *mbuf; 497 struct dpaa_bp_info *bp_info; 498 const struct qm_fd *fd; 499 void *ptr; 500 struct dpaa_if *dpaa_intf; 501 uint16_t offset, i; 502 uint32_t length; 503 uint8_t format; 504 505 bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[0]->fd.bpid); 506 ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[0]->fd)); 507 rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF)); 508 bufs[0] = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size); 509 510 for (i = 0; i < num_bufs; i++) { 511 if (i < num_bufs - 1) { 512 bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[i + 1]->fd.bpid); 513 ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[i + 1]->fd)); 514 rte_prefetch0((void *)((uint8_t *)ptr + 515 DEFAULT_RX_ICEOF)); 516 bufs[i + 1] = (struct rte_mbuf *)((char *)ptr - 517 bp_info->meta_data_size); 518 } 519 520 fd = &dqrr[i]->fd; 521 dpaa_intf = fq[0]->dpaa_intf; 522 format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> 523 DPAA_FD_FORMAT_SHIFT; 524 if (unlikely(format == qm_fd_sg)) { 525 bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid); 526 continue; 527 } 528 529 offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> 530 DPAA_FD_OFFSET_SHIFT; 531 length = fd->opaque & DPAA_FD_LENGTH_MASK; 532 533 mbuf = bufs[i]; 534 mbuf->data_off = offset; 535 mbuf->data_len = length; 536 mbuf->pkt_len = length; 537 mbuf->port = dpaa_intf->ifid; 538 539 mbuf->nb_segs = 1; 540 mbuf->ol_flags = 0; 541 mbuf->next = NULL; 542 rte_mbuf_refcnt_set(mbuf, 1); 543 dpaa_eth_packet_info(mbuf, mbuf->buf_addr); 544 dpaa_display_frame_info(fd, fq[0]->fqid, true); 545 } 546 } 547 548 void 549 dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr, 550 void **bufs, int num_bufs) 551 { 552 struct rte_mbuf *mbuf; 553 const struct qm_fd *fd; 554 struct dpaa_if *dpaa_intf; 555 uint16_t offset, i; 556 uint32_t length; 557 uint8_t format; 558 559 for (i = 0; i < num_bufs; i++) { 560 fd = &dqrr[i]->fd; 561 dpaa_intf = fq[0]->dpaa_intf; 562 format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> 563 DPAA_FD_FORMAT_SHIFT; 564 if (unlikely(format == qm_fd_sg)) { 565 bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid); 566 continue; 567 } 568 569 offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> 570 DPAA_FD_OFFSET_SHIFT; 571 length = fd->opaque & DPAA_FD_LENGTH_MASK; 572 573 mbuf = bufs[i]; 574 mbuf->data_off = offset; 575 mbuf->data_len = length; 576 mbuf->pkt_len = length; 577 mbuf->port = dpaa_intf->ifid; 578 579 mbuf->nb_segs = 1; 580 mbuf->ol_flags = 0; 581 mbuf->next = NULL; 582 rte_mbuf_refcnt_set(mbuf, 1); 583 dpaa_eth_packet_info(mbuf, mbuf->buf_addr); 584 dpaa_display_frame_info(fd, fq[0]->fqid, true); 585 } 586 } 587 588 void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs) 589 { 590 struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(dq->fd.bpid); 591 void *ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dq->fd)); 592 593 /* In case of LS1046, annotation stashing is disabled due to L2 cache 594 * being bottleneck in case of multicore scanario for this platform. 595 * So we prefetch the annoation beforehand, so that it is available 596 * in cache when accessed. 597 */ 598 rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF)); 599 600 *bufs = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size); 601 } 602 603 static uint16_t 604 dpaa_eth_queue_portal_rx(struct qman_fq *fq, 605 struct rte_mbuf **bufs, 606 uint16_t nb_bufs) 607 { 608 int ret; 609 610 if (unlikely(!fq->qp_initialized)) { 611 ret = rte_dpaa_portal_fq_init((void *)0, fq); 612 if (ret) { 613 DPAA_PMD_ERR("Failure in affining portal %d", ret); 614 return 0; 615 } 616 fq->qp_initialized = 1; 617 } 618 619 return qman_portal_poll_rx(nb_bufs, (void **)bufs, fq->qp); 620 } 621 622 enum qman_cb_dqrr_result 623 dpaa_rx_cb_parallel(void *event, 624 struct qman_portal *qm __always_unused, 625 struct qman_fq *fq, 626 const struct qm_dqrr_entry *dqrr, 627 void **bufs) 628 { 629 u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid; 630 struct rte_mbuf *mbuf; 631 struct rte_event *ev = (struct rte_event *)event; 632 633 mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid); 634 ev->event_ptr = (void *)mbuf; 635 ev->flow_id = fq->ev.flow_id; 636 ev->sub_event_type = fq->ev.sub_event_type; 637 ev->event_type = RTE_EVENT_TYPE_ETHDEV; 638 ev->op = RTE_EVENT_OP_NEW; 639 ev->sched_type = fq->ev.sched_type; 640 ev->queue_id = fq->ev.queue_id; 641 ev->priority = fq->ev.priority; 642 ev->impl_opaque = (uint8_t)DPAA_INVALID_MBUF_SEQN; 643 mbuf->seqn = DPAA_INVALID_MBUF_SEQN; 644 *bufs = mbuf; 645 646 return qman_cb_dqrr_consume; 647 } 648 649 enum qman_cb_dqrr_result 650 dpaa_rx_cb_atomic(void *event, 651 struct qman_portal *qm __always_unused, 652 struct qman_fq *fq, 653 const struct qm_dqrr_entry *dqrr, 654 void **bufs) 655 { 656 u8 index; 657 u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid; 658 struct rte_mbuf *mbuf; 659 struct rte_event *ev = (struct rte_event *)event; 660 661 mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid); 662 ev->event_ptr = (void *)mbuf; 663 ev->flow_id = fq->ev.flow_id; 664 ev->sub_event_type = fq->ev.sub_event_type; 665 ev->event_type = RTE_EVENT_TYPE_ETHDEV; 666 ev->op = RTE_EVENT_OP_NEW; 667 ev->sched_type = fq->ev.sched_type; 668 ev->queue_id = fq->ev.queue_id; 669 ev->priority = fq->ev.priority; 670 671 /* Save active dqrr entries */ 672 index = DQRR_PTR2IDX(dqrr); 673 DPAA_PER_LCORE_DQRR_SIZE++; 674 DPAA_PER_LCORE_DQRR_HELD |= 1 << index; 675 DPAA_PER_LCORE_DQRR_MBUF(index) = mbuf; 676 ev->impl_opaque = index + 1; 677 mbuf->seqn = (uint32_t)index + 1; 678 *bufs = mbuf; 679 680 return qman_cb_dqrr_defer; 681 } 682 683 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 684 static inline void dpaa_eth_err_queue(struct dpaa_if *dpaa_intf) 685 { 686 struct rte_mbuf *mbuf; 687 struct qman_fq *debug_fq; 688 int ret, i; 689 struct qm_dqrr_entry *dq; 690 struct qm_fd *fd; 691 692 if (unlikely(!RTE_PER_LCORE(dpaa_io))) { 693 ret = rte_dpaa_portal_init((void *)0); 694 if (ret) { 695 DPAA_PMD_ERR("Failure in affining portal"); 696 return; 697 } 698 } 699 for (i = 0; i <= DPAA_DEBUG_FQ_TX_ERROR; i++) { 700 debug_fq = &dpaa_intf->debug_queues[i]; 701 ret = qman_set_vdq(debug_fq, 4, QM_VDQCR_EXACT); 702 if (ret) 703 return; 704 705 do { 706 dq = qman_dequeue(debug_fq); 707 if (!dq) 708 continue; 709 fd = &dq->fd; 710 if (i == DPAA_DEBUG_FQ_RX_ERROR) 711 DPAA_PMD_ERR("RX ERROR status: 0x%08x", 712 fd->status); 713 else 714 DPAA_PMD_ERR("TX ERROR status: 0x%08x", 715 fd->status); 716 dpaa_display_frame_info(fd, debug_fq->fqid, 717 i == DPAA_DEBUG_FQ_RX_ERROR); 718 719 mbuf = dpaa_eth_fd_to_mbuf(fd, dpaa_intf->ifid); 720 rte_pktmbuf_free(mbuf); 721 qman_dqrr_consume(debug_fq, dq); 722 } while (debug_fq->flags & QMAN_FQ_STATE_VDQCR); 723 } 724 } 725 #endif 726 727 uint16_t dpaa_eth_queue_rx(void *q, 728 struct rte_mbuf **bufs, 729 uint16_t nb_bufs) 730 { 731 struct qman_fq *fq = q; 732 struct qm_dqrr_entry *dq; 733 uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid; 734 int num_rx_bufs, ret; 735 uint32_t vdqcr_flags = 0; 736 737 if (unlikely(rte_dpaa_bpid_info == NULL && 738 rte_eal_process_type() == RTE_PROC_SECONDARY)) 739 rte_dpaa_bpid_info = fq->bp_array; 740 741 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 742 if (fq->fqid == ((struct dpaa_if *)fq->dpaa_intf)->rx_queues[0].fqid) 743 dpaa_eth_err_queue((struct dpaa_if *)fq->dpaa_intf); 744 #endif 745 746 if (likely(fq->is_static)) 747 return dpaa_eth_queue_portal_rx(fq, bufs, nb_bufs); 748 749 if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 750 ret = rte_dpaa_portal_init((void *)0); 751 if (ret) { 752 DPAA_PMD_ERR("Failure in affining portal"); 753 return 0; 754 } 755 } 756 757 /* Until request for four buffers, we provide exact number of buffers. 758 * Otherwise we do not set the QM_VDQCR_EXACT flag. 759 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than 760 * requested, so we request two less in this case. 761 */ 762 if (nb_bufs < 4) { 763 vdqcr_flags = QM_VDQCR_EXACT; 764 num_rx_bufs = nb_bufs; 765 } else { 766 num_rx_bufs = nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES ? 767 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_bufs - 2); 768 } 769 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags); 770 if (ret) 771 return 0; 772 773 do { 774 dq = qman_dequeue(fq); 775 if (!dq) 776 continue; 777 bufs[num_rx++] = dpaa_eth_fd_to_mbuf(&dq->fd, ifid); 778 dpaa_display_frame_info(&dq->fd, fq->fqid, true); 779 qman_dqrr_consume(fq, dq); 780 } while (fq->flags & QMAN_FQ_STATE_VDQCR); 781 782 return num_rx; 783 } 784 785 int 786 dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, 787 struct qm_fd *fd, 788 uint32_t bpid) 789 { 790 struct rte_mbuf *cur_seg = mbuf, *prev_seg = NULL; 791 struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(bpid); 792 struct rte_mbuf *temp, *mi; 793 struct qm_sg_entry *sg_temp, *sgt; 794 int i = 0; 795 796 DPAA_DP_LOG(DEBUG, "Creating SG FD to transmit"); 797 798 temp = rte_pktmbuf_alloc(bp_info->mp); 799 if (!temp) { 800 DPAA_PMD_ERR("Failure in allocation of mbuf"); 801 return -1; 802 } 803 if (temp->buf_len < ((mbuf->nb_segs * sizeof(struct qm_sg_entry)) 804 + temp->data_off)) { 805 DPAA_PMD_ERR("Insufficient space in mbuf for SG entries"); 806 return -1; 807 } 808 809 fd->cmd = 0; 810 fd->opaque_addr = 0; 811 812 if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) { 813 if (!mbuf->packet_type) { 814 struct rte_net_hdr_lens hdr_lens; 815 816 mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens, 817 RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK 818 | RTE_PTYPE_L4_MASK); 819 mbuf->l2_len = hdr_lens.l2_len; 820 mbuf->l3_len = hdr_lens.l3_len; 821 } 822 if (temp->data_off < DEFAULT_TX_ICEOF 823 + sizeof(struct dpaa_eth_parse_results_t)) 824 temp->data_off = DEFAULT_TX_ICEOF 825 + sizeof(struct dpaa_eth_parse_results_t); 826 dcbz_64(temp->buf_addr); 827 dpaa_checksum_offload(mbuf, fd, temp->buf_addr); 828 } 829 830 sgt = temp->buf_addr + temp->data_off; 831 fd->format = QM_FD_SG; 832 fd->addr = temp->buf_iova; 833 fd->offset = temp->data_off; 834 fd->bpid = bpid; 835 fd->length20 = mbuf->pkt_len; 836 837 while (i < DPAA_SGT_MAX_ENTRIES) { 838 sg_temp = &sgt[i++]; 839 sg_temp->opaque = 0; 840 sg_temp->val = 0; 841 sg_temp->addr = cur_seg->buf_iova; 842 sg_temp->offset = cur_seg->data_off; 843 sg_temp->length = cur_seg->data_len; 844 if (RTE_MBUF_DIRECT(cur_seg)) { 845 if (rte_mbuf_refcnt_read(cur_seg) > 1) { 846 /*If refcnt > 1, invalid bpid is set to ensure 847 * buffer is not freed by HW. 848 */ 849 sg_temp->bpid = 0xff; 850 rte_mbuf_refcnt_update(cur_seg, -1); 851 } else { 852 sg_temp->bpid = 853 DPAA_MEMPOOL_TO_BPID(cur_seg->pool); 854 } 855 cur_seg = cur_seg->next; 856 } else { 857 /* Get owner MBUF from indirect buffer */ 858 mi = rte_mbuf_from_indirect(cur_seg); 859 if (rte_mbuf_refcnt_read(mi) > 1) { 860 /*If refcnt > 1, invalid bpid is set to ensure 861 * owner buffer is not freed by HW. 862 */ 863 sg_temp->bpid = 0xff; 864 } else { 865 sg_temp->bpid = DPAA_MEMPOOL_TO_BPID(mi->pool); 866 rte_mbuf_refcnt_update(mi, 1); 867 } 868 prev_seg = cur_seg; 869 cur_seg = cur_seg->next; 870 prev_seg->next = NULL; 871 rte_pktmbuf_free(prev_seg); 872 } 873 if (cur_seg == NULL) { 874 sg_temp->final = 1; 875 cpu_to_hw_sg(sg_temp); 876 break; 877 } 878 cpu_to_hw_sg(sg_temp); 879 } 880 return 0; 881 } 882 883 /* Handle mbufs which are not segmented (non SG) */ 884 static inline void 885 tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf, 886 struct dpaa_bp_info *bp_info, 887 struct qm_fd *fd_arr) 888 { 889 struct rte_mbuf *mi = NULL; 890 891 if (RTE_MBUF_DIRECT(mbuf)) { 892 if (rte_mbuf_refcnt_read(mbuf) > 1) { 893 /* In case of direct mbuf and mbuf being cloned, 894 * BMAN should _not_ release buffer. 895 */ 896 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff); 897 /* Buffer should be releasd by EAL */ 898 rte_mbuf_refcnt_update(mbuf, -1); 899 } else { 900 /* In case of direct mbuf and no cloning, mbuf can be 901 * released by BMAN. 902 */ 903 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid); 904 } 905 } else { 906 /* This is data-containing core mbuf: 'mi' */ 907 mi = rte_mbuf_from_indirect(mbuf); 908 if (rte_mbuf_refcnt_read(mi) > 1) { 909 /* In case of indirect mbuf, and mbuf being cloned, 910 * BMAN should _not_ release it and let EAL release 911 * it through pktmbuf_free below. 912 */ 913 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff); 914 } else { 915 /* In case of indirect mbuf, and no cloning, core mbuf 916 * should be released by BMAN. 917 * Increate refcnt of core mbuf so that when 918 * pktmbuf_free is called and mbuf is released, EAL 919 * doesn't try to release core mbuf which would have 920 * been released by BMAN. 921 */ 922 rte_mbuf_refcnt_update(mi, 1); 923 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid); 924 } 925 rte_pktmbuf_free(mbuf); 926 } 927 928 if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) 929 dpaa_unsegmented_checksum(mbuf, fd_arr); 930 } 931 932 /* Handle all mbufs on dpaa BMAN managed pool */ 933 static inline uint16_t 934 tx_on_dpaa_pool(struct rte_mbuf *mbuf, 935 struct dpaa_bp_info *bp_info, 936 struct qm_fd *fd_arr) 937 { 938 DPAA_DP_LOG(DEBUG, "BMAN offloaded buffer, mbuf: %p", mbuf); 939 940 if (mbuf->nb_segs == 1) { 941 /* Case for non-segmented buffers */ 942 tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr); 943 } else if (mbuf->nb_segs > 1 && 944 mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) { 945 if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, bp_info->bpid)) { 946 DPAA_PMD_DEBUG("Unable to create Scatter Gather FD"); 947 return 1; 948 } 949 } else { 950 DPAA_PMD_DEBUG("Number of Segments not supported"); 951 return 1; 952 } 953 954 return 0; 955 } 956 957 /* Handle all mbufs on an external pool (non-dpaa) */ 958 static inline struct rte_mbuf * 959 reallocate_mbuf(struct qman_fq *txq, struct rte_mbuf *mbuf) 960 { 961 struct dpaa_if *dpaa_intf = txq->dpaa_intf; 962 struct dpaa_bp_info *bp_info = dpaa_intf->bp_info; 963 struct rte_mbuf *new_mbufs[DPAA_SGT_MAX_ENTRIES + 1] = {0}; 964 struct rte_mbuf *temp_mbuf; 965 int num_new_segs, mbuf_greater, ret, extra_seg = 0, i = 0; 966 uint64_t mbufs_size, bytes_to_copy, offset1 = 0, offset2 = 0; 967 char *data; 968 969 DPAA_DP_LOG(DEBUG, "Reallocating transmit buffer"); 970 971 mbufs_size = bp_info->size - 972 bp_info->meta_data_size - RTE_PKTMBUF_HEADROOM; 973 extra_seg = !!(mbuf->pkt_len % mbufs_size); 974 num_new_segs = (mbuf->pkt_len / mbufs_size) + extra_seg; 975 976 ret = rte_pktmbuf_alloc_bulk(bp_info->mp, new_mbufs, num_new_segs); 977 if (ret != 0) { 978 DPAA_DP_LOG(DEBUG, "Allocation for new buffers failed"); 979 return NULL; 980 } 981 982 temp_mbuf = mbuf; 983 984 while (temp_mbuf) { 985 /* If mbuf data is less than new mbuf remaining memory */ 986 if ((temp_mbuf->data_len - offset1) < (mbufs_size - offset2)) { 987 bytes_to_copy = temp_mbuf->data_len - offset1; 988 mbuf_greater = -1; 989 /* If mbuf data is greater than new mbuf remaining memory */ 990 } else if ((temp_mbuf->data_len - offset1) > 991 (mbufs_size - offset2)) { 992 bytes_to_copy = mbufs_size - offset2; 993 mbuf_greater = 1; 994 /* if mbuf data is equal to new mbuf remaining memory */ 995 } else { 996 bytes_to_copy = temp_mbuf->data_len - offset1; 997 mbuf_greater = 0; 998 } 999 1000 /* Copy the data */ 1001 data = rte_pktmbuf_append(new_mbufs[0], bytes_to_copy); 1002 1003 rte_memcpy((uint8_t *)data, rte_pktmbuf_mtod_offset(mbuf, 1004 void *, offset1), bytes_to_copy); 1005 1006 /* Set new offsets and the temp buffers */ 1007 if (mbuf_greater == -1) { 1008 offset1 = 0; 1009 offset2 += bytes_to_copy; 1010 temp_mbuf = temp_mbuf->next; 1011 } else if (mbuf_greater == 1) { 1012 offset2 = 0; 1013 offset1 += bytes_to_copy; 1014 new_mbufs[i]->next = new_mbufs[i + 1]; 1015 new_mbufs[0]->nb_segs++; 1016 i++; 1017 } else { 1018 offset1 = 0; 1019 offset2 = 0; 1020 temp_mbuf = temp_mbuf->next; 1021 new_mbufs[i]->next = new_mbufs[i + 1]; 1022 if (new_mbufs[i + 1]) 1023 new_mbufs[0]->nb_segs++; 1024 i++; 1025 } 1026 } 1027 1028 /* Copy other required fields */ 1029 new_mbufs[0]->ol_flags = mbuf->ol_flags; 1030 new_mbufs[0]->packet_type = mbuf->packet_type; 1031 new_mbufs[0]->tx_offload = mbuf->tx_offload; 1032 1033 rte_pktmbuf_free(mbuf); 1034 1035 return new_mbufs[0]; 1036 } 1037 1038 uint16_t 1039 dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) 1040 { 1041 struct rte_mbuf *mbuf, *mi = NULL; 1042 struct rte_mempool *mp; 1043 struct dpaa_bp_info *bp_info; 1044 struct qm_fd fd_arr[DPAA_TX_BURST_SIZE]; 1045 uint32_t frames_to_send, loop, sent = 0; 1046 uint16_t state; 1047 int ret, realloc_mbuf = 0; 1048 uint32_t seqn, index, flags[DPAA_TX_BURST_SIZE] = {0}; 1049 1050 if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 1051 ret = rte_dpaa_portal_init((void *)0); 1052 if (ret) { 1053 DPAA_PMD_ERR("Failure in affining portal"); 1054 return 0; 1055 } 1056 } 1057 1058 DPAA_DP_LOG(DEBUG, "Transmitting %d buffers on queue: %p", nb_bufs, q); 1059 1060 while (nb_bufs) { 1061 frames_to_send = (nb_bufs > DPAA_TX_BURST_SIZE) ? 1062 DPAA_TX_BURST_SIZE : nb_bufs; 1063 for (loop = 0; loop < frames_to_send; loop++) { 1064 mbuf = *(bufs++); 1065 /* In case the data offset is not multiple of 16, 1066 * FMAN can stall because of an errata. So reallocate 1067 * the buffer in such case. 1068 */ 1069 if (dpaa_svr_family == SVR_LS1043A_FAMILY && 1070 (mbuf->data_off & 0x7F) != 0x0) 1071 realloc_mbuf = 1; 1072 seqn = mbuf->seqn; 1073 if (seqn != DPAA_INVALID_MBUF_SEQN) { 1074 index = seqn - 1; 1075 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) { 1076 flags[loop] = 1077 ((index & QM_EQCR_DCA_IDXMASK) << 8); 1078 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA; 1079 DPAA_PER_LCORE_DQRR_SIZE--; 1080 DPAA_PER_LCORE_DQRR_HELD &= 1081 ~(1 << index); 1082 } 1083 } 1084 1085 if (likely(RTE_MBUF_DIRECT(mbuf))) { 1086 mp = mbuf->pool; 1087 bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); 1088 if (likely(mp->ops_index == 1089 bp_info->dpaa_ops_index && 1090 mbuf->nb_segs == 1 && 1091 realloc_mbuf == 0 && 1092 rte_mbuf_refcnt_read(mbuf) == 1)) { 1093 DPAA_MBUF_TO_CONTIG_FD(mbuf, 1094 &fd_arr[loop], bp_info->bpid); 1095 if (mbuf->ol_flags & 1096 DPAA_TX_CKSUM_OFFLOAD_MASK) 1097 dpaa_unsegmented_checksum(mbuf, 1098 &fd_arr[loop]); 1099 continue; 1100 } 1101 } else { 1102 mi = rte_mbuf_from_indirect(mbuf); 1103 mp = mi->pool; 1104 } 1105 1106 bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); 1107 if (unlikely(mp->ops_index != bp_info->dpaa_ops_index || 1108 realloc_mbuf == 1)) { 1109 struct rte_mbuf *temp_mbuf; 1110 1111 temp_mbuf = reallocate_mbuf(q, mbuf); 1112 if (!temp_mbuf) { 1113 /* Set frames_to_send & nb_bufs so 1114 * that packets are transmitted till 1115 * previous frame. 1116 */ 1117 frames_to_send = loop; 1118 nb_bufs = loop; 1119 goto send_pkts; 1120 } 1121 mbuf = temp_mbuf; 1122 realloc_mbuf = 0; 1123 } 1124 1125 state = tx_on_dpaa_pool(mbuf, bp_info, 1126 &fd_arr[loop]); 1127 if (unlikely(state)) { 1128 /* Set frames_to_send & nb_bufs so 1129 * that packets are transmitted till 1130 * previous frame. 1131 */ 1132 frames_to_send = loop; 1133 nb_bufs = loop; 1134 goto send_pkts; 1135 } 1136 } 1137 1138 send_pkts: 1139 loop = 0; 1140 while (loop < frames_to_send) { 1141 loop += qman_enqueue_multi(q, &fd_arr[loop], 1142 &flags[loop], 1143 frames_to_send - loop); 1144 } 1145 nb_bufs -= frames_to_send; 1146 sent += frames_to_send; 1147 } 1148 1149 DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", sent, q); 1150 1151 return sent; 1152 } 1153 1154 uint16_t 1155 dpaa_eth_queue_tx_slow(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) 1156 { 1157 qman_ern_poll_free(); 1158 1159 return dpaa_eth_queue_tx(q, bufs, nb_bufs); 1160 } 1161 1162 uint16_t dpaa_eth_tx_drop_all(void *q __rte_unused, 1163 struct rte_mbuf **bufs __rte_unused, 1164 uint16_t nb_bufs __rte_unused) 1165 { 1166 DPAA_DP_LOG(DEBUG, "Drop all packets"); 1167 1168 /* Drop all incoming packets. No need to free packets here 1169 * because the rte_eth f/w frees up the packets through tx_buffer 1170 * callback in case this functions returns count less than nb_bufs 1171 */ 1172 return 0; 1173 } 1174