1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2017,2019-2024 NXP 5 * 6 */ 7 8 /* System headers */ 9 #include <inttypes.h> 10 #include <unistd.h> 11 #include <stdio.h> 12 #include <limits.h> 13 #include <sched.h> 14 #include <pthread.h> 15 16 #include <rte_byteorder.h> 17 #include <rte_common.h> 18 #include <rte_interrupts.h> 19 #include <rte_log.h> 20 #include <rte_debug.h> 21 #include <rte_pci.h> 22 #include <rte_atomic.h> 23 #include <rte_branch_prediction.h> 24 #include <rte_memory.h> 25 #include <rte_tailq.h> 26 #include <rte_eal.h> 27 #include <rte_alarm.h> 28 #include <rte_ether.h> 29 #include <ethdev_driver.h> 30 #include <rte_malloc.h> 31 #include <rte_ring.h> 32 #include <rte_ip.h> 33 #include <rte_tcp.h> 34 #include <rte_udp.h> 35 #include <rte_net.h> 36 #include <rte_eventdev.h> 37 38 #include "dpaa_ethdev.h" 39 #include "dpaa_rxtx.h" 40 #include <bus_dpaa_driver.h> 41 #include <dpaa_mempool.h> 42 43 #include <qman.h> 44 #include <fsl_usd.h> 45 #include <fsl_qman.h> 46 #include <fsl_bman.h> 47 #include <dpaa_of.h> 48 #include <netcfg.h> 49 50 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 51 static int s_force_display_frm; 52 #endif 53 54 #define DPAA_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) \ 55 do { \ 56 (_fd)->opaque_addr = 0; \ 57 (_fd)->opaque = QM_FD_CONTIG << DPAA_FD_FORMAT_SHIFT; \ 58 (_fd)->opaque |= ((_mbuf)->data_off) << DPAA_FD_OFFSET_SHIFT; \ 59 (_fd)->opaque |= (_mbuf)->pkt_len; \ 60 (_fd)->addr = (_mbuf)->buf_iova; \ 61 (_fd)->bpid = _bpid; \ 62 } while (0) 63 64 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 65 void 66 dpaa_force_display_frame_set(int set) 67 { 68 s_force_display_frm = set; 69 } 70 71 #define DISPLAY_PRINT printf 72 static void 73 dpaa_display_frame_info(const struct qm_fd *fd, 74 uint32_t fqid, bool rx) 75 { 76 int pos, offset = 0; 77 char *ptr, info[1024]; 78 struct annotations_t *annot = rte_dpaa_mem_ptov(fd->addr); 79 uint8_t format; 80 const struct dpaa_eth_parse_results_t *psr; 81 82 if (!fd->status && !s_force_display_frm) { 83 /* Do not display correct packets unless force display.*/ 84 return; 85 } 86 psr = &annot->parse; 87 88 format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT; 89 if (format == qm_fd_contig) 90 sprintf(info, "simple"); 91 else if (format == qm_fd_sg) 92 sprintf(info, "sg"); 93 else 94 sprintf(info, "unknown format(%d)", format); 95 96 DISPLAY_PRINT("%s: fqid=%08x, bpid=%d, phy addr=0x%lx ", 97 rx ? "RX" : "TX", fqid, fd->bpid, (unsigned long)fd->addr); 98 DISPLAY_PRINT("format=%s offset=%d, len=%d, stat=0x%x\r\n", 99 info, fd->offset, fd->length20, fd->status); 100 if (rx) { 101 DISPLAY_PRINT("Display usual RX parser result:\r\n"); 102 if (psr->eth_frame_type == 0) 103 offset += sprintf(&info[offset], "unicast"); 104 else if (psr->eth_frame_type == 1) 105 offset += sprintf(&info[offset], "multicast"); 106 else if (psr->eth_frame_type == 3) 107 offset += sprintf(&info[offset], "broadcast"); 108 else 109 offset += sprintf(&info[offset], "unknown eth type(%d)", 110 psr->eth_frame_type); 111 if (psr->l2r_err) { 112 offset += sprintf(&info[offset], " L2 error(%d)", 113 psr->l2r_err); 114 } else { 115 offset += sprintf(&info[offset], " L2 non error"); 116 } 117 DISPLAY_PRINT("L2: %s, %s, ethernet type:%s\r\n", 118 psr->ethernet ? "is ethernet" : "non ethernet", 119 psr->vlan ? "is vlan" : "non vlan", info); 120 121 offset = 0; 122 DISPLAY_PRINT("L3: %s/%s, %s/%s, %s, %s\r\n", 123 psr->first_ipv4 ? "first IPv4" : "non first IPv4", 124 psr->last_ipv4 ? "last IPv4" : "non last IPv4", 125 psr->first_ipv6 ? "first IPv6" : "non first IPv6", 126 psr->last_ipv6 ? "last IPv6" : "non last IPv6", 127 psr->gre ? "GRE" : "non GRE", 128 psr->l3_err ? "L3 has error" : "L3 non error"); 129 130 if (psr->l4_type == DPAA_PR_L4_TCP_TYPE) { 131 offset += sprintf(&info[offset], "tcp"); 132 } else if (psr->l4_type == DPAA_PR_L4_UDP_TYPE) { 133 offset += sprintf(&info[offset], "udp"); 134 } else if (psr->l4_type == DPAA_PR_L4_IPSEC_TYPE) { 135 offset += sprintf(&info[offset], "IPSec "); 136 if (psr->esp_sum) 137 offset += sprintf(&info[offset], "ESP"); 138 if (psr->ah) 139 offset += sprintf(&info[offset], "AH"); 140 } else if (psr->l4_type == DPAA_PR_L4_SCTP_TYPE) { 141 offset += sprintf(&info[offset], "sctp"); 142 } else if (psr->l4_type == DPAA_PR_L4_DCCP_TYPE) { 143 offset += sprintf(&info[offset], "dccp"); 144 } else { 145 offset += sprintf(&info[offset], "unknown l4 type(%d)", 146 psr->l4_type); 147 } 148 DISPLAY_PRINT("L4: type:%s, L4 validation %s\r\n", 149 info, psr->l4cv ? "Performed" : "NOT performed"); 150 151 offset = 0; 152 if (psr->ethernet) { 153 offset += sprintf(&info[offset], 154 "Eth offset=%d, ethtype offset=%d, ", 155 psr->eth_off, psr->etype_off); 156 } 157 if (psr->vlan) { 158 offset += sprintf(&info[offset], "vLAN offset=%d, ", 159 psr->vlan_off[0]); 160 } 161 if (psr->first_ipv4 || psr->first_ipv6) { 162 offset += sprintf(&info[offset], "first IP offset=%d, ", 163 psr->ip_off[0]); 164 } 165 if (psr->last_ipv4 || psr->last_ipv6) { 166 offset += sprintf(&info[offset], "last IP offset=%d, ", 167 psr->ip_off[1]); 168 } 169 if (psr->gre) { 170 offset += sprintf(&info[offset], "GRE offset=%d, ", 171 psr->gre_off); 172 } 173 if (psr->l4_type >= DPAA_PR_L4_TCP_TYPE) { 174 offset += sprintf(&info[offset], "L4 offset=%d, ", 175 psr->l4_off); 176 } 177 offset += sprintf(&info[offset], "Next HDR(0x%04x) offset=%d.", 178 rte_be_to_cpu_16(psr->nxthdr), psr->nxthdr_off); 179 180 DISPLAY_PRINT("%s\r\n", info); 181 } 182 183 if (unlikely(format == qm_fd_sg)) { 184 /*TBD:S/G display: to be implemented*/ 185 return; 186 } 187 188 DISPLAY_PRINT("Frame payload:\r\n"); 189 ptr = (char *)annot; 190 ptr += fd->offset; 191 for (pos = 0; pos < fd->length20; pos++) { 192 DISPLAY_PRINT("%02x ", ptr[pos]); 193 if (((pos + 1) % 16) == 0) 194 DISPLAY_PRINT("\n"); 195 } 196 DISPLAY_PRINT("\n"); 197 } 198 199 #else 200 #define dpaa_display_frame_info(a, b, c) 201 #endif 202 203 static inline void 204 dpaa_slow_parsing(struct rte_mbuf *m, 205 const struct annotations_t *annot) 206 { 207 const struct dpaa_eth_parse_results_t *parse; 208 209 DPAA_DP_LOG(DEBUG, "Slow parsing"); 210 parse = &annot->parse; 211 212 if (parse->ethernet) 213 m->packet_type |= RTE_PTYPE_L2_ETHER; 214 if (parse->vlan) 215 m->packet_type |= RTE_PTYPE_L2_ETHER_VLAN; 216 if (parse->first_ipv4) 217 m->packet_type |= RTE_PTYPE_L3_IPV4; 218 if (parse->first_ipv6) 219 m->packet_type |= RTE_PTYPE_L3_IPV6; 220 if (parse->gre) 221 m->packet_type |= RTE_PTYPE_TUNNEL_GRE; 222 if (parse->last_ipv4) 223 m->packet_type |= RTE_PTYPE_L3_IPV4_EXT; 224 if (parse->last_ipv6) 225 m->packet_type |= RTE_PTYPE_L3_IPV6_EXT; 226 if (parse->l4_type == DPAA_PR_L4_TCP_TYPE) 227 m->packet_type |= RTE_PTYPE_L4_TCP; 228 else if (parse->l4_type == DPAA_PR_L4_UDP_TYPE) 229 m->packet_type |= RTE_PTYPE_L4_UDP; 230 else if (parse->l4_type == DPAA_PR_L4_IPSEC_TYPE && 231 !parse->l4_info_err && parse->esp_sum) 232 m->packet_type |= RTE_PTYPE_TUNNEL_ESP; 233 else if (parse->l4_type == DPAA_PR_L4_SCTP_TYPE) 234 m->packet_type |= RTE_PTYPE_L4_SCTP; 235 } 236 237 static inline void dpaa_eth_packet_info(struct rte_mbuf *m, void *fd_virt_addr) 238 { 239 struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr); 240 uint64_t prs = *((uintptr_t *)(&annot->parse)) & DPAA_PARSE_MASK; 241 struct rte_ether_hdr *eth_hdr = 242 rte_pktmbuf_mtod(m, struct rte_ether_hdr *); 243 244 DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot); 245 246 m->ol_flags = RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_IP_CKSUM_GOOD | 247 RTE_MBUF_F_RX_L4_CKSUM_GOOD; 248 249 switch (prs) { 250 case DPAA_PKT_TYPE_IPV4: 251 m->packet_type = RTE_PTYPE_L2_ETHER | 252 RTE_PTYPE_L3_IPV4; 253 break; 254 case DPAA_PKT_TYPE_IPV6: 255 m->packet_type = RTE_PTYPE_L2_ETHER | 256 RTE_PTYPE_L3_IPV6; 257 break; 258 case DPAA_PKT_TYPE_ETHER: 259 m->packet_type = RTE_PTYPE_L2_ETHER; 260 break; 261 case DPAA_PKT_TYPE_IPV4_FRAG: 262 case DPAA_PKT_TYPE_IPV4_FRAG_UDP: 263 case DPAA_PKT_TYPE_IPV4_FRAG_TCP: 264 case DPAA_PKT_TYPE_IPV4_FRAG_SCTP: 265 m->packet_type = RTE_PTYPE_L2_ETHER | 266 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG; 267 break; 268 case DPAA_PKT_TYPE_IPV6_FRAG: 269 case DPAA_PKT_TYPE_IPV6_FRAG_UDP: 270 case DPAA_PKT_TYPE_IPV6_FRAG_TCP: 271 case DPAA_PKT_TYPE_IPV6_FRAG_SCTP: 272 m->packet_type = RTE_PTYPE_L2_ETHER | 273 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG; 274 break; 275 case DPAA_PKT_TYPE_IPV4_EXT: 276 m->packet_type = RTE_PTYPE_L2_ETHER | 277 RTE_PTYPE_L3_IPV4_EXT; 278 break; 279 case DPAA_PKT_TYPE_IPV6_EXT: 280 m->packet_type = RTE_PTYPE_L2_ETHER | 281 RTE_PTYPE_L3_IPV6_EXT; 282 break; 283 case DPAA_PKT_TYPE_IPV4_TCP: 284 m->packet_type = RTE_PTYPE_L2_ETHER | 285 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP; 286 break; 287 case DPAA_PKT_TYPE_IPV6_TCP: 288 m->packet_type = RTE_PTYPE_L2_ETHER | 289 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP; 290 break; 291 case DPAA_PKT_TYPE_IPV4_UDP: 292 m->packet_type = RTE_PTYPE_L2_ETHER | 293 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP; 294 break; 295 case DPAA_PKT_TYPE_IPV6_UDP: 296 m->packet_type = RTE_PTYPE_L2_ETHER | 297 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP; 298 break; 299 case DPAA_PKT_TYPE_IPSEC_IPV4: 300 if (*((uintptr_t *)&annot->parse) & DPAA_PARSE_ESP_MASK) 301 m->packet_type = RTE_PTYPE_L2_ETHER | 302 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_ESP; 303 break; 304 case DPAA_PKT_TYPE_IPSEC_IPV6: 305 if (*((uintptr_t *)&annot->parse) & DPAA_PARSE_ESP_MASK) 306 m->packet_type = RTE_PTYPE_L2_ETHER | 307 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_ESP; 308 break; 309 case DPAA_PKT_TYPE_IPV4_EXT_UDP: 310 m->packet_type = RTE_PTYPE_L2_ETHER | 311 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP; 312 break; 313 case DPAA_PKT_TYPE_IPV6_EXT_UDP: 314 m->packet_type = RTE_PTYPE_L2_ETHER | 315 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP; 316 break; 317 case DPAA_PKT_TYPE_IPV4_EXT_TCP: 318 m->packet_type = RTE_PTYPE_L2_ETHER | 319 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP; 320 break; 321 case DPAA_PKT_TYPE_IPV6_EXT_TCP: 322 m->packet_type = RTE_PTYPE_L2_ETHER | 323 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP; 324 break; 325 case DPAA_PKT_TYPE_IPV4_SCTP: 326 m->packet_type = RTE_PTYPE_L2_ETHER | 327 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP; 328 break; 329 case DPAA_PKT_TYPE_IPV6_SCTP: 330 m->packet_type = RTE_PTYPE_L2_ETHER | 331 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP; 332 break; 333 case DPAA_PKT_TYPE_IPV4_CSUM_ERR: 334 case DPAA_PKT_TYPE_IPV6_CSUM_ERR: 335 m->ol_flags = RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_IP_CKSUM_BAD; 336 break; 337 case DPAA_PKT_TYPE_IPV4_TCP_CSUM_ERR: 338 case DPAA_PKT_TYPE_IPV6_TCP_CSUM_ERR: 339 case DPAA_PKT_TYPE_IPV4_UDP_CSUM_ERR: 340 case DPAA_PKT_TYPE_IPV6_UDP_CSUM_ERR: 341 m->ol_flags = RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_L4_CKSUM_BAD; 342 break; 343 case DPAA_PKT_TYPE_NONE: 344 m->packet_type = 0; 345 break; 346 /* More switch cases can be added */ 347 default: 348 dpaa_slow_parsing(m, annot); 349 } 350 351 m->tx_offload = annot->parse.ip_off[0]; 352 m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0]) 353 << DPAA_PKT_L3_LEN_SHIFT; 354 355 /* Set the hash values */ 356 m->hash.rss = (uint32_t)(annot->hash); 357 358 /* Check if Vlan is present */ 359 if (prs & DPAA_PARSE_VLAN_MASK) 360 m->ol_flags |= RTE_MBUF_F_RX_VLAN; 361 /* Packet received without stripping the vlan */ 362 363 if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_1588)) { 364 m->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP; 365 m->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST; 366 } 367 } 368 369 static inline void dpaa_checksum(struct rte_mbuf *mbuf) 370 { 371 struct rte_ether_hdr *eth_hdr = 372 rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *); 373 char *l3_hdr = (char *)eth_hdr + mbuf->l2_len; 374 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 375 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 376 377 DPAA_DP_LOG(DEBUG, "Calculating checksum for mbuf: %p", mbuf); 378 379 if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) || 380 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 381 RTE_PTYPE_L3_IPV4_EXT)) { 382 ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 383 ipv4_hdr->hdr_checksum = 0; 384 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr); 385 } else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 386 RTE_PTYPE_L3_IPV6) || 387 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 388 RTE_PTYPE_L3_IPV6_EXT)) 389 ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 390 391 if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) { 392 struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)(l3_hdr + 393 mbuf->l3_len); 394 tcp_hdr->cksum = 0; 395 if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPV4)) 396 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, 397 tcp_hdr); 398 else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */ 399 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, 400 tcp_hdr); 401 } else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == 402 RTE_PTYPE_L4_UDP) { 403 struct rte_udp_hdr *udp_hdr = (struct rte_udp_hdr *)(l3_hdr + 404 mbuf->l3_len); 405 udp_hdr->dgram_cksum = 0; 406 if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPV4)) 407 udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, 408 udp_hdr); 409 else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */ 410 udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, 411 udp_hdr); 412 } 413 } 414 415 static inline void dpaa_checksum_offload(struct rte_mbuf *mbuf, 416 struct qm_fd *fd, char *prs_buf) 417 { 418 struct dpaa_eth_parse_results_t *prs; 419 420 DPAA_DP_LOG(DEBUG, " Offloading checksum for mbuf: %p", mbuf); 421 422 prs = GET_TX_PRS(prs_buf); 423 prs->l3r = 0; 424 prs->l4r = 0; 425 if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) || 426 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 427 RTE_PTYPE_L3_IPV4_EXT)) 428 prs->l3r = DPAA_L3_PARSE_RESULT_IPV4; 429 else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 430 RTE_PTYPE_L3_IPV6) || 431 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 432 RTE_PTYPE_L3_IPV6_EXT)) 433 prs->l3r = DPAA_L3_PARSE_RESULT_IPV6; 434 435 if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) 436 prs->l4r = DPAA_L4_PARSE_RESULT_TCP; 437 else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP) 438 prs->l4r = DPAA_L4_PARSE_RESULT_UDP; 439 440 prs->ip_off[0] = mbuf->l2_len; 441 prs->l4_off = mbuf->l3_len + mbuf->l2_len; 442 /* Enable L3 (and L4, if TCP or UDP) HW checksum*/ 443 fd->cmd |= DPAA_FD_CMD_RPD | DPAA_FD_CMD_DTC; 444 } 445 446 static inline void 447 dpaa_unsegmented_checksum(struct rte_mbuf *mbuf, struct qm_fd *fd_arr) 448 { 449 if (!mbuf->packet_type) { 450 struct rte_net_hdr_lens hdr_lens; 451 452 mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens, 453 RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK 454 | RTE_PTYPE_L4_MASK); 455 mbuf->l2_len = hdr_lens.l2_len; 456 mbuf->l3_len = hdr_lens.l3_len; 457 } 458 if (mbuf->data_off < (DEFAULT_TX_ICEOF + 459 sizeof(struct dpaa_eth_parse_results_t))) { 460 DPAA_DP_LOG(DEBUG, "Checksum offload Err: " 461 "Not enough Headroom " 462 "space for correct Checksum offload." 463 "So Calculating checksum in Software."); 464 dpaa_checksum(mbuf); 465 } else { 466 dpaa_checksum_offload(mbuf, fd_arr, mbuf->buf_addr); 467 } 468 } 469 470 static struct rte_mbuf * 471 dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid) 472 { 473 struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid); 474 struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp; 475 struct qm_sg_entry *sgt, *sg_temp; 476 void *vaddr, *sg_vaddr; 477 int i = 0; 478 uint16_t fd_offset = fd->offset; 479 480 vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd)); 481 if (!vaddr) { 482 DPAA_PMD_ERR("unable to convert physical address"); 483 return NULL; 484 } 485 sgt = vaddr + fd_offset; 486 sg_temp = &sgt[i++]; 487 hw_sg_to_cpu(sg_temp); 488 temp = (struct rte_mbuf *)((char *)vaddr - bp_info->meta_data_size); 489 sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_sg_entry_get64(sg_temp)); 490 491 first_seg = (struct rte_mbuf *)((char *)sg_vaddr - 492 bp_info->meta_data_size); 493 first_seg->data_off = sg_temp->offset; 494 first_seg->data_len = sg_temp->length; 495 first_seg->pkt_len = sg_temp->length; 496 rte_mbuf_refcnt_set(first_seg, 1); 497 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 498 rte_mempool_check_cookies(rte_mempool_from_obj((void *)first_seg), 499 (void **)&first_seg, 1, 1); 500 #endif 501 502 first_seg->port = ifid; 503 first_seg->nb_segs = 1; 504 first_seg->ol_flags = 0; 505 prev_seg = first_seg; 506 while (i < DPAA_SGT_MAX_ENTRIES) { 507 sg_temp = &sgt[i++]; 508 hw_sg_to_cpu(sg_temp); 509 sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, 510 qm_sg_entry_get64(sg_temp)); 511 cur_seg = (struct rte_mbuf *)((char *)sg_vaddr - 512 bp_info->meta_data_size); 513 cur_seg->data_off = sg_temp->offset; 514 cur_seg->data_len = sg_temp->length; 515 first_seg->pkt_len += sg_temp->length; 516 first_seg->nb_segs += 1; 517 rte_mbuf_refcnt_set(cur_seg, 1); 518 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 519 rte_mempool_check_cookies(rte_mempool_from_obj((void *)cur_seg), 520 (void **)&cur_seg, 1, 1); 521 #endif 522 prev_seg->next = cur_seg; 523 if (sg_temp->final) { 524 cur_seg->next = NULL; 525 break; 526 } 527 prev_seg = cur_seg; 528 } 529 DPAA_DP_LOG(DEBUG, "Received an SG frame len =%d, num_sg =%d", 530 first_seg->pkt_len, first_seg->nb_segs); 531 532 dpaa_eth_packet_info(first_seg, vaddr); 533 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 534 rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp), 535 (void **)&temp, 1, 1); 536 #endif 537 rte_pktmbuf_free_seg(temp); 538 539 return first_seg; 540 } 541 542 static inline struct rte_mbuf * 543 dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid) 544 { 545 struct rte_mbuf *mbuf; 546 struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid); 547 void *ptr; 548 uint8_t format = 549 (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT; 550 uint16_t offset; 551 uint32_t length; 552 553 if (unlikely(format == qm_fd_sg)) 554 return dpaa_eth_sg_to_mbuf(fd, ifid); 555 556 offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT; 557 length = fd->opaque & DPAA_FD_LENGTH_MASK; 558 559 DPAA_DP_LOG(DEBUG, " FD--->MBUF off %d len = %d", offset, length); 560 561 /* Ignoring case when format != qm_fd_contig */ 562 ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd)); 563 564 mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size); 565 /* Prefetch the Parse results and packet data to L1 */ 566 rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF)); 567 568 mbuf->data_off = offset; 569 mbuf->data_len = length; 570 mbuf->pkt_len = length; 571 572 mbuf->port = ifid; 573 mbuf->nb_segs = 1; 574 mbuf->ol_flags = 0; 575 mbuf->next = NULL; 576 rte_mbuf_refcnt_set(mbuf, 1); 577 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 578 rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf), 579 (void **)&mbuf, 1, 1); 580 #endif 581 dpaa_eth_packet_info(mbuf, mbuf->buf_addr); 582 583 return mbuf; 584 } 585 586 uint16_t 587 dpaa_free_mbuf(const struct qm_fd *fd) 588 { 589 struct rte_mbuf *mbuf; 590 struct dpaa_bp_info *bp_info; 591 uint8_t format; 592 void *ptr; 593 594 bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid); 595 format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT; 596 if (unlikely(format == qm_fd_sg)) { 597 struct rte_mbuf *first_seg, *cur_seg; 598 struct qm_sg_entry *sgt, *sg_temp; 599 void *vaddr, *sg_vaddr; 600 int i = 0; 601 uint16_t fd_offset = fd->offset; 602 603 vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd)); 604 if (!vaddr) { 605 DPAA_PMD_ERR("unable to convert physical address"); 606 return -1; 607 } 608 sgt = vaddr + fd_offset; 609 sg_temp = &sgt[i++]; 610 hw_sg_to_cpu(sg_temp); 611 sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, 612 qm_sg_entry_get64(sg_temp)); 613 first_seg = (struct rte_mbuf *)((char *)sg_vaddr - 614 bp_info->meta_data_size); 615 first_seg->nb_segs = 1; 616 while (i < DPAA_SGT_MAX_ENTRIES) { 617 sg_temp = &sgt[i++]; 618 hw_sg_to_cpu(sg_temp); 619 if (sg_temp->bpid != 0xFF) { 620 bp_info = DPAA_BPID_TO_POOL_INFO(sg_temp->bpid); 621 sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, 622 qm_sg_entry_get64(sg_temp)); 623 cur_seg = (struct rte_mbuf *)((char *)sg_vaddr - 624 bp_info->meta_data_size); 625 rte_pktmbuf_free_seg(cur_seg); 626 } 627 if (sg_temp->final) 628 break; 629 } 630 rte_pktmbuf_free_seg(first_seg); 631 return 0; 632 } 633 634 ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd)); 635 mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size); 636 637 rte_pktmbuf_free(mbuf); 638 639 return 0; 640 } 641 642 /* Specific for LS1043 */ 643 void 644 dpaa_rx_cb_no_prefetch(struct qman_fq **fq, struct qm_dqrr_entry **dqrr, 645 void **bufs, int num_bufs) 646 { 647 struct rte_mbuf *mbuf; 648 struct dpaa_bp_info *bp_info; 649 const struct qm_fd *fd; 650 void *ptr; 651 struct dpaa_if *dpaa_intf; 652 uint16_t offset, i; 653 uint32_t length; 654 uint8_t format; 655 struct annotations_t *annot; 656 657 bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[0]->fd.bpid); 658 ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[0]->fd)); 659 rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF)); 660 bufs[0] = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size); 661 662 for (i = 0; i < num_bufs; i++) { 663 if (i < num_bufs - 1) { 664 bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[i + 1]->fd.bpid); 665 ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[i + 1]->fd)); 666 rte_prefetch0((void *)((uint8_t *)ptr + 667 DEFAULT_RX_ICEOF)); 668 bufs[i + 1] = (struct rte_mbuf *)((char *)ptr - 669 bp_info->meta_data_size); 670 } 671 672 fd = &dqrr[i]->fd; 673 dpaa_intf = fq[0]->dpaa_intf; 674 format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> 675 DPAA_FD_FORMAT_SHIFT; 676 if (unlikely(format == qm_fd_sg)) { 677 bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid); 678 continue; 679 } 680 681 offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> 682 DPAA_FD_OFFSET_SHIFT; 683 length = fd->opaque & DPAA_FD_LENGTH_MASK; 684 685 mbuf = bufs[i]; 686 mbuf->data_off = offset; 687 mbuf->data_len = length; 688 mbuf->pkt_len = length; 689 mbuf->port = dpaa_intf->ifid; 690 691 mbuf->nb_segs = 1; 692 mbuf->ol_flags = 0; 693 mbuf->next = NULL; 694 rte_mbuf_refcnt_set(mbuf, 1); 695 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 696 rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf), 697 (void **)&mbuf, 1, 1); 698 #endif 699 dpaa_eth_packet_info(mbuf, mbuf->buf_addr); 700 dpaa_display_frame_info(fd, fq[0]->fqid, true); 701 if (dpaa_ieee_1588) { 702 annot = GET_ANNOTATIONS(mbuf->buf_addr); 703 dpaa_intf->rx_timestamp = 704 rte_cpu_to_be_64(annot->timestamp); 705 } 706 } 707 } 708 709 void 710 dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr, 711 void **bufs, int num_bufs) 712 { 713 struct rte_mbuf *mbuf; 714 const struct qm_fd *fd; 715 struct dpaa_if *dpaa_intf; 716 uint16_t offset, i; 717 uint32_t length; 718 uint8_t format; 719 struct annotations_t *annot; 720 721 for (i = 0; i < num_bufs; i++) { 722 fd = &dqrr[i]->fd; 723 dpaa_intf = fq[0]->dpaa_intf; 724 format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> 725 DPAA_FD_FORMAT_SHIFT; 726 if (unlikely(format == qm_fd_sg)) { 727 bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid); 728 continue; 729 } 730 731 offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> 732 DPAA_FD_OFFSET_SHIFT; 733 length = fd->opaque & DPAA_FD_LENGTH_MASK; 734 735 mbuf = bufs[i]; 736 mbuf->data_off = offset; 737 mbuf->data_len = length; 738 mbuf->pkt_len = length; 739 mbuf->port = dpaa_intf->ifid; 740 741 mbuf->nb_segs = 1; 742 mbuf->ol_flags = 0; 743 mbuf->next = NULL; 744 rte_mbuf_refcnt_set(mbuf, 1); 745 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 746 rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf), 747 (void **)&mbuf, 1, 1); 748 #endif 749 dpaa_eth_packet_info(mbuf, mbuf->buf_addr); 750 dpaa_display_frame_info(fd, fq[0]->fqid, true); 751 if (dpaa_ieee_1588) { 752 annot = GET_ANNOTATIONS(mbuf->buf_addr); 753 dpaa_intf->rx_timestamp = 754 rte_cpu_to_be_64(annot->timestamp); 755 } 756 } 757 } 758 759 void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs) 760 { 761 struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(dq->fd.bpid); 762 void *ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dq->fd)); 763 764 /* In case of LS1046, annotation stashing is disabled due to L2 cache 765 * being bottleneck in case of multicore scenario for this platform. 766 * So we prefetch the annotation beforehand, so that it is available 767 * in cache when accessed. 768 */ 769 rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF)); 770 771 *bufs = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size); 772 } 773 774 static uint16_t 775 dpaa_eth_queue_portal_rx(struct qman_fq *fq, 776 struct rte_mbuf **bufs, 777 uint16_t nb_bufs) 778 { 779 int ret; 780 781 if (unlikely(!fq->qp_initialized)) { 782 ret = rte_dpaa_portal_fq_init((void *)0, fq); 783 if (ret) { 784 DPAA_PMD_ERR("Failure in affining portal %d", ret); 785 return 0; 786 } 787 fq->qp_initialized = 1; 788 } 789 790 return qman_portal_poll_rx(nb_bufs, (void **)bufs, fq->qp); 791 } 792 793 enum qman_cb_dqrr_result 794 dpaa_rx_cb_parallel(void *event, 795 struct qman_portal *qm __always_unused, 796 struct qman_fq *fq, 797 const struct qm_dqrr_entry *dqrr, 798 void **bufs) 799 { 800 u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid; 801 struct rte_mbuf *mbuf; 802 struct rte_event *ev = (struct rte_event *)event; 803 804 mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid); 805 ev->event_ptr = (void *)mbuf; 806 ev->flow_id = fq->ev.flow_id; 807 ev->sub_event_type = fq->ev.sub_event_type; 808 ev->event_type = RTE_EVENT_TYPE_ETHDEV; 809 ev->op = RTE_EVENT_OP_NEW; 810 ev->sched_type = fq->ev.sched_type; 811 ev->queue_id = fq->ev.queue_id; 812 ev->priority = fq->ev.priority; 813 ev->impl_opaque = (uint8_t)DPAA_INVALID_MBUF_SEQN; 814 *dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN; 815 *bufs = mbuf; 816 817 return qman_cb_dqrr_consume; 818 } 819 820 enum qman_cb_dqrr_result 821 dpaa_rx_cb_atomic(void *event, 822 struct qman_portal *qm __always_unused, 823 struct qman_fq *fq, 824 const struct qm_dqrr_entry *dqrr, 825 void **bufs) 826 { 827 u8 index; 828 u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid; 829 struct rte_mbuf *mbuf; 830 struct rte_event *ev = (struct rte_event *)event; 831 832 mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid); 833 ev->event_ptr = (void *)mbuf; 834 ev->flow_id = fq->ev.flow_id; 835 ev->sub_event_type = fq->ev.sub_event_type; 836 ev->event_type = RTE_EVENT_TYPE_ETHDEV; 837 ev->op = RTE_EVENT_OP_NEW; 838 ev->sched_type = fq->ev.sched_type; 839 ev->queue_id = fq->ev.queue_id; 840 ev->priority = fq->ev.priority; 841 842 /* Save active dqrr entries */ 843 index = DQRR_PTR2IDX(dqrr); 844 DPAA_PER_LCORE_DQRR_SIZE++; 845 DPAA_PER_LCORE_DQRR_HELD |= 1 << index; 846 DPAA_PER_LCORE_DQRR_MBUF(index) = mbuf; 847 ev->impl_opaque = index + 1; 848 *dpaa_seqn(mbuf) = (uint32_t)index + 1; 849 *bufs = mbuf; 850 851 return qman_cb_dqrr_defer; 852 } 853 854 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 855 static inline void 856 dpaa_eth_err_queue(struct qman_fq *fq) 857 { 858 struct rte_mbuf *mbuf; 859 struct qman_fq *debug_fq; 860 int ret, i; 861 struct qm_dqrr_entry *dq; 862 struct qm_fd *fd; 863 struct dpaa_if *dpaa_intf; 864 865 dpaa_intf = fq->dpaa_intf; 866 if (fq != &dpaa_intf->rx_queues[0]) { 867 /* Associate error queues to the first RXQ.*/ 868 return; 869 } 870 871 if (dpaa_intf->cfg->fman_if->is_shared_mac) { 872 /* Error queues of shared MAC are handled in kernel. */ 873 return; 874 } 875 876 if (unlikely(!RTE_PER_LCORE(dpaa_io))) { 877 ret = rte_dpaa_portal_init((void *)0); 878 if (ret) { 879 DPAA_PMD_ERR("Failure in affining portal"); 880 return; 881 } 882 } 883 for (i = 0; i < DPAA_DEBUG_FQ_MAX_NUM; i++) { 884 debug_fq = &dpaa_intf->debug_queues[i]; 885 ret = qman_set_vdq(debug_fq, 4, QM_VDQCR_EXACT); 886 if (ret) 887 return; 888 889 do { 890 dq = qman_dequeue(debug_fq); 891 if (!dq) 892 continue; 893 fd = &dq->fd; 894 if (i == DPAA_DEBUG_FQ_RX_ERROR) 895 DPAA_PMD_ERR("RX ERROR status: 0x%08x", 896 fd->status); 897 else 898 DPAA_PMD_ERR("TX ERROR status: 0x%08x", 899 fd->status); 900 dpaa_display_frame_info(fd, debug_fq->fqid, 901 i == DPAA_DEBUG_FQ_RX_ERROR); 902 903 mbuf = dpaa_eth_fd_to_mbuf(fd, dpaa_intf->ifid); 904 rte_pktmbuf_free(mbuf); 905 qman_dqrr_consume(debug_fq, dq); 906 } while (debug_fq->flags & QMAN_FQ_STATE_VDQCR); 907 } 908 } 909 #endif 910 911 uint16_t dpaa_eth_queue_rx(void *q, 912 struct rte_mbuf **bufs, 913 uint16_t nb_bufs) 914 { 915 struct qman_fq *fq = q; 916 struct qm_dqrr_entry *dq; 917 uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid; 918 int num_rx_bufs, ret; 919 uint32_t vdqcr_flags = 0; 920 struct annotations_t *annot; 921 struct dpaa_if *dpaa_intf = fq->dpaa_intf; 922 923 if (unlikely(rte_dpaa_bpid_info == NULL && 924 rte_eal_process_type() == RTE_PROC_SECONDARY)) 925 rte_dpaa_bpid_info = fq->bp_array; 926 927 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 928 dpaa_eth_err_queue(fq); 929 #endif 930 931 if (likely(fq->is_static)) 932 return dpaa_eth_queue_portal_rx(fq, bufs, nb_bufs); 933 934 if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 935 ret = rte_dpaa_portal_init((void *)0); 936 if (ret) { 937 DPAA_PMD_ERR("Failure in affining portal"); 938 return 0; 939 } 940 } 941 942 /* Until request for four buffers, we provide exact number of buffers. 943 * Otherwise we do not set the QM_VDQCR_EXACT flag. 944 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than 945 * requested, so we request two less in this case. 946 */ 947 if (nb_bufs < 4) { 948 vdqcr_flags = QM_VDQCR_EXACT; 949 num_rx_bufs = nb_bufs; 950 } else { 951 num_rx_bufs = nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES ? 952 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_bufs - 2); 953 } 954 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags); 955 if (ret) 956 return 0; 957 958 do { 959 dq = qman_dequeue(fq); 960 if (!dq) 961 continue; 962 bufs[num_rx++] = dpaa_eth_fd_to_mbuf(&dq->fd, ifid); 963 dpaa_display_frame_info(&dq->fd, fq->fqid, true); 964 if (dpaa_ieee_1588) { 965 annot = GET_ANNOTATIONS(bufs[num_rx - 1]->buf_addr); 966 dpaa_intf->rx_timestamp = rte_cpu_to_be_64(annot->timestamp); 967 } 968 qman_dqrr_consume(fq, dq); 969 } while (fq->flags & QMAN_FQ_STATE_VDQCR); 970 971 return num_rx; 972 } 973 974 static int 975 dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, 976 struct qm_fd *fd, 977 struct dpaa_sw_buf_free *free_buf, 978 uint32_t *free_count, 979 uint32_t pkt_id) 980 { 981 struct rte_mbuf *cur_seg = mbuf; 982 struct rte_mbuf *temp, *mi; 983 struct qm_sg_entry *sg_temp, *sgt; 984 int i = 0; 985 986 DPAA_DP_LOG(DEBUG, "Creating SG FD to transmit"); 987 988 temp = rte_pktmbuf_alloc(dpaa_tx_sg_pool); 989 if (!temp) { 990 DPAA_PMD_ERR("Failure in allocation of mbuf"); 991 return -1; 992 } 993 if (temp->buf_len < ((mbuf->nb_segs * sizeof(struct qm_sg_entry)) 994 + temp->data_off)) { 995 DPAA_PMD_ERR("Insufficient space in mbuf for SG entries"); 996 return -1; 997 } 998 999 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 1000 rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp), 1001 (void **)&temp, 1, 0); 1002 #endif 1003 fd->cmd = 0; 1004 fd->opaque_addr = 0; 1005 1006 if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) { 1007 if (!mbuf->packet_type) { 1008 struct rte_net_hdr_lens hdr_lens; 1009 1010 mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens, 1011 RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK 1012 | RTE_PTYPE_L4_MASK); 1013 mbuf->l2_len = hdr_lens.l2_len; 1014 mbuf->l3_len = hdr_lens.l3_len; 1015 } 1016 if (temp->data_off < DEFAULT_TX_ICEOF 1017 + sizeof(struct dpaa_eth_parse_results_t)) 1018 temp->data_off = DEFAULT_TX_ICEOF 1019 + sizeof(struct dpaa_eth_parse_results_t); 1020 dcbz_64(temp->buf_addr); 1021 dpaa_checksum_offload(mbuf, fd, temp->buf_addr); 1022 } 1023 1024 sgt = temp->buf_addr + temp->data_off; 1025 fd->format = QM_FD_SG; 1026 fd->addr = temp->buf_iova; 1027 fd->offset = temp->data_off; 1028 fd->bpid = DPAA_MEMPOOL_TO_BPID(dpaa_tx_sg_pool); 1029 fd->length20 = mbuf->pkt_len; 1030 1031 while (i < DPAA_SGT_MAX_ENTRIES) { 1032 sg_temp = &sgt[i++]; 1033 sg_temp->opaque = 0; 1034 sg_temp->val = 0; 1035 sg_temp->addr = cur_seg->buf_iova; 1036 sg_temp->offset = cur_seg->data_off; 1037 sg_temp->length = cur_seg->data_len; 1038 if (RTE_MBUF_DIRECT(cur_seg)) { 1039 if (rte_mbuf_refcnt_read(cur_seg) > 1) { 1040 /*If refcnt > 1, invalid bpid is set to ensure 1041 * buffer is not freed by HW. 1042 */ 1043 sg_temp->bpid = 0xff; 1044 rte_mbuf_refcnt_update(cur_seg, -1); 1045 } else { 1046 sg_temp->bpid = 1047 DPAA_MEMPOOL_TO_BPID(cur_seg->pool); 1048 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 1049 rte_mempool_check_cookies(rte_mempool_from_obj((void *)cur_seg), 1050 (void **)&cur_seg, 1, 0); 1051 #endif 1052 } 1053 } else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) { 1054 free_buf[*free_count].seg = cur_seg; 1055 free_buf[*free_count].pkt_id = pkt_id; 1056 ++*free_count; 1057 sg_temp->bpid = 0xff; 1058 } else { 1059 /* Get owner MBUF from indirect buffer */ 1060 mi = rte_mbuf_from_indirect(cur_seg); 1061 if (rte_mbuf_refcnt_read(mi) > 1) { 1062 /*If refcnt > 1, invalid bpid is set to ensure 1063 * owner buffer is not freed by HW. 1064 */ 1065 sg_temp->bpid = 0xff; 1066 } else { 1067 sg_temp->bpid = DPAA_MEMPOOL_TO_BPID(mi->pool); 1068 rte_mbuf_refcnt_update(mi, 1); 1069 } 1070 free_buf[*free_count].seg = cur_seg; 1071 free_buf[*free_count].pkt_id = pkt_id; 1072 ++*free_count; 1073 } 1074 cur_seg = cur_seg->next; 1075 if (cur_seg == NULL) { 1076 sg_temp->final = 1; 1077 cpu_to_hw_sg(sg_temp); 1078 break; 1079 } 1080 cpu_to_hw_sg(sg_temp); 1081 } 1082 return 0; 1083 } 1084 1085 /* Handle mbufs which are not segmented (non SG) */ 1086 static inline void 1087 tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf, 1088 struct dpaa_bp_info *bp_info, 1089 struct qm_fd *fd_arr, 1090 struct dpaa_sw_buf_free *buf_to_free, 1091 uint32_t *free_count, 1092 uint32_t pkt_id) 1093 { 1094 struct rte_mbuf *mi = NULL; 1095 1096 if (RTE_MBUF_DIRECT(mbuf)) { 1097 if (rte_mbuf_refcnt_read(mbuf) > 1) { 1098 /* In case of direct mbuf and mbuf being cloned, 1099 * BMAN should _not_ release buffer. 1100 */ 1101 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff); 1102 /* Buffer should be releasd by EAL */ 1103 rte_mbuf_refcnt_update(mbuf, -1); 1104 } else { 1105 /* In case of direct mbuf and no cloning, mbuf can be 1106 * released by BMAN. 1107 */ 1108 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid); 1109 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 1110 rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf), 1111 (void **)&mbuf, 1, 0); 1112 #endif 1113 } 1114 } else if (RTE_MBUF_HAS_EXTBUF(mbuf)) { 1115 buf_to_free[*free_count].seg = mbuf; 1116 buf_to_free[*free_count].pkt_id = pkt_id; 1117 ++*free_count; 1118 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 1119 bp_info ? bp_info->bpid : 0xff); 1120 } else { 1121 /* This is data-containing core mbuf: 'mi' */ 1122 mi = rte_mbuf_from_indirect(mbuf); 1123 if (rte_mbuf_refcnt_read(mi) > 1) { 1124 /* In case of indirect mbuf, and mbuf being cloned, 1125 * BMAN should _not_ release it and let EAL release 1126 * it through pktmbuf_free below. 1127 */ 1128 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff); 1129 } else { 1130 /* In case of indirect mbuf, and no cloning, core mbuf 1131 * should be released by BMAN. 1132 * Increate refcnt of core mbuf so that when 1133 * pktmbuf_free is called and mbuf is released, EAL 1134 * doesn't try to release core mbuf which would have 1135 * been released by BMAN. 1136 */ 1137 rte_mbuf_refcnt_update(mi, 1); 1138 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 1139 bp_info ? bp_info->bpid : 0xff); 1140 } 1141 buf_to_free[*free_count].seg = mbuf; 1142 buf_to_free[*free_count].pkt_id = pkt_id; 1143 ++*free_count; 1144 } 1145 1146 if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) 1147 dpaa_unsegmented_checksum(mbuf, fd_arr); 1148 } 1149 1150 /* Handle all mbufs on dpaa BMAN managed pool */ 1151 static inline uint16_t 1152 tx_on_dpaa_pool(struct rte_mbuf *mbuf, 1153 struct dpaa_bp_info *bp_info, 1154 struct qm_fd *fd_arr, 1155 struct dpaa_sw_buf_free *buf_to_free, 1156 uint32_t *free_count, 1157 uint32_t pkt_id) 1158 { 1159 DPAA_DP_LOG(DEBUG, "BMAN offloaded buffer, mbuf: %p", mbuf); 1160 1161 if (mbuf->nb_segs == 1) { 1162 /* Case for non-segmented buffers */ 1163 tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr, 1164 buf_to_free, free_count, pkt_id); 1165 } else if (mbuf->nb_segs > 1 && 1166 mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) { 1167 if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, buf_to_free, 1168 free_count, pkt_id)) { 1169 DPAA_PMD_DEBUG("Unable to create Scatter Gather FD"); 1170 return 1; 1171 } 1172 } else { 1173 DPAA_PMD_DEBUG("Number of Segments not supported"); 1174 return 1; 1175 } 1176 1177 return 0; 1178 } 1179 1180 /* Handle all mbufs on an external pool (non-dpaa) */ 1181 static inline struct rte_mbuf * 1182 reallocate_mbuf(struct qman_fq *txq, struct rte_mbuf *mbuf) 1183 { 1184 struct dpaa_if *dpaa_intf = txq->dpaa_intf; 1185 struct dpaa_bp_info *bp_info = dpaa_intf->bp_info; 1186 struct rte_mbuf *new_mbufs[DPAA_SGT_MAX_ENTRIES + 1] = {0}; 1187 struct rte_mbuf *temp_mbuf; 1188 int num_new_segs, mbuf_greater, ret, extra_seg = 0, i = 0; 1189 uint64_t mbufs_size, bytes_to_copy, offset1 = 0, offset2 = 0; 1190 char *data; 1191 1192 DPAA_DP_LOG(DEBUG, "Reallocating transmit buffer"); 1193 1194 mbufs_size = bp_info->size - 1195 bp_info->meta_data_size - RTE_PKTMBUF_HEADROOM; 1196 extra_seg = !!(mbuf->pkt_len % mbufs_size); 1197 num_new_segs = (mbuf->pkt_len / mbufs_size) + extra_seg; 1198 1199 ret = rte_pktmbuf_alloc_bulk(bp_info->mp, new_mbufs, num_new_segs); 1200 if (ret != 0) { 1201 DPAA_DP_LOG(DEBUG, "Allocation for new buffers failed"); 1202 return NULL; 1203 } 1204 1205 temp_mbuf = mbuf; 1206 1207 while (temp_mbuf) { 1208 /* If mbuf data is less than new mbuf remaining memory */ 1209 if ((temp_mbuf->data_len - offset1) < (mbufs_size - offset2)) { 1210 bytes_to_copy = temp_mbuf->data_len - offset1; 1211 mbuf_greater = -1; 1212 /* If mbuf data is greater than new mbuf remaining memory */ 1213 } else if ((temp_mbuf->data_len - offset1) > 1214 (mbufs_size - offset2)) { 1215 bytes_to_copy = mbufs_size - offset2; 1216 mbuf_greater = 1; 1217 /* if mbuf data is equal to new mbuf remaining memory */ 1218 } else { 1219 bytes_to_copy = temp_mbuf->data_len - offset1; 1220 mbuf_greater = 0; 1221 } 1222 1223 /* Copy the data */ 1224 data = rte_pktmbuf_append(new_mbufs[0], bytes_to_copy); 1225 1226 rte_memcpy((uint8_t *)data, rte_pktmbuf_mtod_offset(temp_mbuf, 1227 void *, offset1), bytes_to_copy); 1228 1229 /* Set new offsets and the temp buffers */ 1230 if (mbuf_greater == -1) { 1231 offset1 = 0; 1232 offset2 += bytes_to_copy; 1233 temp_mbuf = temp_mbuf->next; 1234 } else if (mbuf_greater == 1) { 1235 offset2 = 0; 1236 offset1 += bytes_to_copy; 1237 new_mbufs[i]->next = new_mbufs[i + 1]; 1238 new_mbufs[0]->nb_segs++; 1239 i++; 1240 } else { 1241 offset1 = 0; 1242 offset2 = 0; 1243 temp_mbuf = temp_mbuf->next; 1244 new_mbufs[i]->next = new_mbufs[i + 1]; 1245 if (new_mbufs[i + 1]) 1246 new_mbufs[0]->nb_segs++; 1247 i++; 1248 } 1249 } 1250 1251 /* Copy other required fields */ 1252 new_mbufs[0]->ol_flags = mbuf->ol_flags; 1253 new_mbufs[0]->packet_type = mbuf->packet_type; 1254 new_mbufs[0]->tx_offload = mbuf->tx_offload; 1255 1256 rte_pktmbuf_free(mbuf); 1257 1258 return new_mbufs[0]; 1259 } 1260 1261 #ifdef RTE_LIBRTE_DPAA_ERRATA_LS1043_A010022 1262 /* In case the data offset is not multiple of 16, 1263 * FMAN can stall because of an errata. So reallocate 1264 * the buffer in such case. 1265 */ 1266 static inline int 1267 dpaa_eth_ls1043a_mbuf_realloc(struct rte_mbuf *mbuf) 1268 { 1269 uint64_t len, offset; 1270 1271 if (dpaa_svr_family != SVR_LS1043A_FAMILY) 1272 return 0; 1273 1274 while (mbuf) { 1275 len = mbuf->data_len; 1276 offset = mbuf->data_off; 1277 if ((mbuf->next && 1278 !rte_is_aligned((void *)len, 16)) || 1279 !rte_is_aligned((void *)offset, 16)) { 1280 DPAA_PMD_DEBUG("Errata condition hit"); 1281 1282 return 1; 1283 } 1284 mbuf = mbuf->next; 1285 } 1286 return 0; 1287 } 1288 #endif 1289 1290 uint16_t 1291 dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) 1292 { 1293 struct rte_mbuf *mbuf, *mi = NULL; 1294 struct rte_mempool *mp; 1295 struct dpaa_bp_info *bp_info; 1296 struct qm_fd fd_arr[DPAA_TX_BURST_SIZE]; 1297 uint32_t frames_to_send, loop, sent = 0; 1298 uint16_t state; 1299 int ret, realloc_mbuf = 0; 1300 uint32_t seqn, index, flags[DPAA_TX_BURST_SIZE] = {0}; 1301 struct dpaa_sw_buf_free buf_to_free[DPAA_MAX_SGS * DPAA_MAX_DEQUEUE_NUM_FRAMES]; 1302 uint32_t free_count = 0; 1303 struct qman_fq *fq = q; 1304 struct dpaa_if *dpaa_intf = fq->dpaa_intf; 1305 struct qman_fq *fq_txconf = fq->tx_conf_queue; 1306 1307 if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 1308 ret = rte_dpaa_portal_init((void *)0); 1309 if (ret) { 1310 DPAA_PMD_ERR("Failure in affining portal"); 1311 return 0; 1312 } 1313 } 1314 1315 DPAA_DP_LOG(DEBUG, "Transmitting %d buffers on queue: %p", nb_bufs, q); 1316 1317 if (dpaa_ieee_1588) { 1318 dpaa_intf->next_tx_conf_queue = fq_txconf; 1319 dpaa_eth_tx_conf(fq_txconf); 1320 dpaa_intf->tx_timestamp = 0; 1321 } 1322 1323 while (nb_bufs) { 1324 frames_to_send = (nb_bufs > DPAA_TX_BURST_SIZE) ? 1325 DPAA_TX_BURST_SIZE : nb_bufs; 1326 for (loop = 0; loop < frames_to_send; loop++) { 1327 mbuf = *(bufs++); 1328 fd_arr[loop].cmd = 0; 1329 if (dpaa_ieee_1588) { 1330 fd_arr[loop].cmd |= DPAA_FD_CMD_FCO | 1331 qman_fq_fqid(fq_txconf); 1332 fd_arr[loop].cmd |= DPAA_FD_CMD_RPD | 1333 DPAA_FD_CMD_UPD; 1334 } 1335 #ifdef RTE_LIBRTE_DPAA_ERRATA_LS1043_A010022 1336 realloc_mbuf = dpaa_eth_ls1043a_mbuf_realloc(mbuf); 1337 #endif 1338 seqn = *dpaa_seqn(mbuf); 1339 if (seqn != DPAA_INVALID_MBUF_SEQN) { 1340 index = seqn - 1; 1341 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) { 1342 flags[loop] = 1343 ((index & QM_EQCR_DCA_IDXMASK) << 8); 1344 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA; 1345 DPAA_PER_LCORE_DQRR_SIZE--; 1346 DPAA_PER_LCORE_DQRR_HELD &= 1347 ~(1 << index); 1348 } 1349 } 1350 1351 if (likely(RTE_MBUF_DIRECT(mbuf))) { 1352 mp = mbuf->pool; 1353 bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); 1354 if (likely(mp->ops_index == 1355 bp_info->dpaa_ops_index && 1356 mbuf->nb_segs == 1 && 1357 realloc_mbuf == 0 && 1358 rte_mbuf_refcnt_read(mbuf) == 1)) { 1359 DPAA_MBUF_TO_CONTIG_FD(mbuf, 1360 &fd_arr[loop], bp_info->bpid); 1361 if (mbuf->ol_flags & 1362 DPAA_TX_CKSUM_OFFLOAD_MASK) 1363 dpaa_unsegmented_checksum(mbuf, 1364 &fd_arr[loop]); 1365 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 1366 rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf), 1367 (void **)&mbuf, 1, 0); 1368 #endif 1369 continue; 1370 } 1371 } else { 1372 mi = rte_mbuf_from_indirect(mbuf); 1373 mp = mi->pool; 1374 } 1375 1376 if (unlikely(RTE_MBUF_HAS_EXTBUF(mbuf))) { 1377 bp_info = NULL; 1378 goto indirect_buf; 1379 } 1380 1381 bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); 1382 if (unlikely(mp->ops_index != bp_info->dpaa_ops_index || 1383 realloc_mbuf == 1)) { 1384 struct rte_mbuf *temp_mbuf; 1385 1386 temp_mbuf = reallocate_mbuf(q, mbuf); 1387 if (!temp_mbuf) { 1388 /* Set frames_to_send & nb_bufs so 1389 * that packets are transmitted till 1390 * previous frame. 1391 */ 1392 frames_to_send = loop; 1393 nb_bufs = loop; 1394 goto send_pkts; 1395 } 1396 mbuf = temp_mbuf; 1397 realloc_mbuf = 0; 1398 } 1399 indirect_buf: 1400 state = tx_on_dpaa_pool(mbuf, bp_info, 1401 &fd_arr[loop], 1402 buf_to_free, 1403 &free_count, 1404 loop); 1405 if (unlikely(state)) { 1406 /* Set frames_to_send & nb_bufs so 1407 * that packets are transmitted till 1408 * previous frame. 1409 */ 1410 frames_to_send = loop; 1411 nb_bufs = loop; 1412 goto send_pkts; 1413 } 1414 } 1415 1416 send_pkts: 1417 loop = 0; 1418 while (loop < frames_to_send) { 1419 loop += qman_enqueue_multi(q, &fd_arr[loop], 1420 &flags[loop], 1421 frames_to_send - loop); 1422 } 1423 nb_bufs -= frames_to_send; 1424 sent += frames_to_send; 1425 } 1426 1427 DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", sent, q); 1428 1429 for (loop = 0; loop < free_count; loop++) { 1430 if (buf_to_free[loop].pkt_id < sent) 1431 rte_pktmbuf_free_seg(buf_to_free[loop].seg); 1432 } 1433 1434 return sent; 1435 } 1436 1437 void 1438 dpaa_eth_tx_conf(void *q) 1439 { 1440 struct qman_fq *fq = q; 1441 struct qm_dqrr_entry *dq; 1442 int num_tx_conf, ret, dq_num; 1443 uint32_t vdqcr_flags = 0; 1444 struct dpaa_if *dpaa_intf = fq->dpaa_intf; 1445 struct qm_dqrr_entry *dqrr; 1446 struct dpaa_bp_info *bp_info; 1447 struct rte_mbuf *mbuf; 1448 void *ptr; 1449 struct annotations_t *annot; 1450 1451 if (unlikely(rte_dpaa_bpid_info == NULL && 1452 rte_eal_process_type() == RTE_PROC_SECONDARY)) 1453 rte_dpaa_bpid_info = fq->bp_array; 1454 1455 if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 1456 ret = rte_dpaa_portal_init((void *)0); 1457 if (ret) { 1458 DPAA_PMD_ERR("Failure in affining portal"); 1459 return; 1460 } 1461 } 1462 1463 num_tx_conf = DPAA_MAX_DEQUEUE_NUM_FRAMES - 2; 1464 1465 do { 1466 dq_num = 0; 1467 ret = qman_set_vdq(fq, num_tx_conf, vdqcr_flags); 1468 if (ret) 1469 return; 1470 do { 1471 dq = qman_dequeue(fq); 1472 if (!dq) 1473 continue; 1474 dqrr = dq; 1475 dq_num++; 1476 bp_info = DPAA_BPID_TO_POOL_INFO(dqrr->fd.bpid); 1477 ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr->fd)); 1478 rte_prefetch0((void *)((uint8_t *)ptr 1479 + DEFAULT_RX_ICEOF)); 1480 mbuf = (struct rte_mbuf *) 1481 ((char *)ptr - bp_info->meta_data_size); 1482 1483 if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) { 1484 annot = GET_ANNOTATIONS(mbuf->buf_addr); 1485 dpaa_intf->tx_timestamp = 1486 rte_cpu_to_be_64(annot->timestamp); 1487 } 1488 dpaa_display_frame_info(&dq->fd, fq->fqid, true); 1489 qman_dqrr_consume(fq, dq); 1490 dpaa_free_mbuf(&dq->fd); 1491 } while (fq->flags & QMAN_FQ_STATE_VDQCR); 1492 } while (dq_num == num_tx_conf); 1493 } 1494 1495 uint16_t 1496 dpaa_eth_queue_tx_slow(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) 1497 { 1498 qman_ern_poll_free(); 1499 1500 return dpaa_eth_queue_tx(q, bufs, nb_bufs); 1501 } 1502 1503 uint16_t dpaa_eth_tx_drop_all(void *q __rte_unused, 1504 struct rte_mbuf **bufs __rte_unused, 1505 uint16_t nb_bufs __rte_unused) 1506 { 1507 DPAA_DP_LOG(DEBUG, "Drop all packets"); 1508 1509 /* Drop all incoming packets. No need to free packets here 1510 * because the rte_eth f/w frees up the packets through tx_buffer 1511 * callback in case this functions returns count less than nb_bufs 1512 */ 1513 return 0; 1514 } 1515