1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2017 NXP 5 * 6 */ 7 8 /* System headers */ 9 #include <inttypes.h> 10 #include <unistd.h> 11 #include <stdio.h> 12 #include <limits.h> 13 #include <sched.h> 14 #include <pthread.h> 15 16 #include <rte_byteorder.h> 17 #include <rte_common.h> 18 #include <rte_interrupts.h> 19 #include <rte_log.h> 20 #include <rte_debug.h> 21 #include <rte_pci.h> 22 #include <rte_atomic.h> 23 #include <rte_branch_prediction.h> 24 #include <rte_memory.h> 25 #include <rte_tailq.h> 26 #include <rte_eal.h> 27 #include <rte_alarm.h> 28 #include <rte_ether.h> 29 #include <rte_ethdev.h> 30 #include <rte_malloc.h> 31 #include <rte_ring.h> 32 #include <rte_ip.h> 33 #include <rte_tcp.h> 34 #include <rte_udp.h> 35 36 #include "dpaa_ethdev.h" 37 #include "dpaa_rxtx.h" 38 #include <rte_dpaa_bus.h> 39 #include <dpaa_mempool.h> 40 41 #include <fsl_usd.h> 42 #include <fsl_qman.h> 43 #include <fsl_bman.h> 44 #include <of.h> 45 #include <netcfg.h> 46 47 #define DPAA_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) \ 48 do { \ 49 (_fd)->cmd = 0; \ 50 (_fd)->opaque_addr = 0; \ 51 (_fd)->opaque = QM_FD_CONTIG << DPAA_FD_FORMAT_SHIFT; \ 52 (_fd)->opaque |= ((_mbuf)->data_off) << DPAA_FD_OFFSET_SHIFT; \ 53 (_fd)->opaque |= (_mbuf)->pkt_len; \ 54 (_fd)->addr = (_mbuf)->buf_iova; \ 55 (_fd)->bpid = _bpid; \ 56 } while (0) 57 58 #if (defined RTE_LIBRTE_DPAA_DEBUG_DRIVER) 59 void dpaa_display_frame(const struct qm_fd *fd) 60 { 61 int ii; 62 char *ptr; 63 64 printf("%s::bpid %x addr %08x%08x, format %d off %d, len %d stat %x\n", 65 __func__, fd->bpid, fd->addr_hi, fd->addr_lo, fd->format, 66 fd->offset, fd->length20, fd->status); 67 68 ptr = (char *)rte_dpaa_mem_ptov(fd->addr); 69 ptr += fd->offset; 70 printf("%02x ", *ptr); 71 for (ii = 1; ii < fd->length20; ii++) { 72 printf("%02x ", *ptr); 73 if ((ii % 16) == 0) 74 printf("\n"); 75 ptr++; 76 } 77 printf("\n"); 78 } 79 #else 80 #define dpaa_display_frame(a) 81 #endif 82 83 static inline void dpaa_slow_parsing(struct rte_mbuf *m __rte_unused, 84 uint64_t prs __rte_unused) 85 { 86 DPAA_DP_LOG(DEBUG, "Slow parsing"); 87 /*TBD:XXX: to be implemented*/ 88 } 89 90 static inline void dpaa_eth_packet_info(struct rte_mbuf *m, 91 uint64_t fd_virt_addr) 92 { 93 struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr); 94 uint64_t prs = *((uint64_t *)(&annot->parse)) & DPAA_PARSE_MASK; 95 96 DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot); 97 98 switch (prs) { 99 case DPAA_PKT_TYPE_NONE: 100 m->packet_type = 0; 101 break; 102 case DPAA_PKT_TYPE_ETHER: 103 m->packet_type = RTE_PTYPE_L2_ETHER; 104 break; 105 case DPAA_PKT_TYPE_IPV4: 106 m->packet_type = RTE_PTYPE_L2_ETHER | 107 RTE_PTYPE_L3_IPV4; 108 break; 109 case DPAA_PKT_TYPE_IPV6: 110 m->packet_type = RTE_PTYPE_L2_ETHER | 111 RTE_PTYPE_L3_IPV6; 112 break; 113 case DPAA_PKT_TYPE_IPV4_FRAG: 114 case DPAA_PKT_TYPE_IPV4_FRAG_UDP: 115 case DPAA_PKT_TYPE_IPV4_FRAG_TCP: 116 case DPAA_PKT_TYPE_IPV4_FRAG_SCTP: 117 m->packet_type = RTE_PTYPE_L2_ETHER | 118 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG; 119 break; 120 case DPAA_PKT_TYPE_IPV6_FRAG: 121 case DPAA_PKT_TYPE_IPV6_FRAG_UDP: 122 case DPAA_PKT_TYPE_IPV6_FRAG_TCP: 123 case DPAA_PKT_TYPE_IPV6_FRAG_SCTP: 124 m->packet_type = RTE_PTYPE_L2_ETHER | 125 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG; 126 break; 127 case DPAA_PKT_TYPE_IPV4_EXT: 128 m->packet_type = RTE_PTYPE_L2_ETHER | 129 RTE_PTYPE_L3_IPV4_EXT; 130 break; 131 case DPAA_PKT_TYPE_IPV6_EXT: 132 m->packet_type = RTE_PTYPE_L2_ETHER | 133 RTE_PTYPE_L3_IPV6_EXT; 134 break; 135 case DPAA_PKT_TYPE_IPV4_TCP: 136 m->packet_type = RTE_PTYPE_L2_ETHER | 137 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP; 138 break; 139 case DPAA_PKT_TYPE_IPV6_TCP: 140 m->packet_type = RTE_PTYPE_L2_ETHER | 141 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP; 142 break; 143 case DPAA_PKT_TYPE_IPV4_UDP: 144 m->packet_type = RTE_PTYPE_L2_ETHER | 145 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP; 146 break; 147 case DPAA_PKT_TYPE_IPV6_UDP: 148 m->packet_type = RTE_PTYPE_L2_ETHER | 149 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP; 150 break; 151 case DPAA_PKT_TYPE_IPV4_EXT_UDP: 152 m->packet_type = RTE_PTYPE_L2_ETHER | 153 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP; 154 break; 155 case DPAA_PKT_TYPE_IPV6_EXT_UDP: 156 m->packet_type = RTE_PTYPE_L2_ETHER | 157 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP; 158 break; 159 case DPAA_PKT_TYPE_IPV4_EXT_TCP: 160 m->packet_type = RTE_PTYPE_L2_ETHER | 161 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP; 162 break; 163 case DPAA_PKT_TYPE_IPV6_EXT_TCP: 164 m->packet_type = RTE_PTYPE_L2_ETHER | 165 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP; 166 break; 167 case DPAA_PKT_TYPE_IPV4_SCTP: 168 m->packet_type = RTE_PTYPE_L2_ETHER | 169 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP; 170 break; 171 case DPAA_PKT_TYPE_IPV6_SCTP: 172 m->packet_type = RTE_PTYPE_L2_ETHER | 173 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP; 174 break; 175 /* More switch cases can be added */ 176 default: 177 dpaa_slow_parsing(m, prs); 178 } 179 180 m->tx_offload = annot->parse.ip_off[0]; 181 m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0]) 182 << DPAA_PKT_L3_LEN_SHIFT; 183 184 /* Set the hash values */ 185 m->hash.rss = (uint32_t)(rte_be_to_cpu_64(annot->hash)); 186 m->ol_flags = PKT_RX_RSS_HASH; 187 /* All packets with Bad checksum are dropped by interface (and 188 * corresponding notification issued to RX error queues). 189 */ 190 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD; 191 192 /* Check if Vlan is present */ 193 if (prs & DPAA_PARSE_VLAN_MASK) 194 m->ol_flags |= PKT_RX_VLAN; 195 /* Packet received without stripping the vlan */ 196 } 197 198 static inline void dpaa_checksum(struct rte_mbuf *mbuf) 199 { 200 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); 201 char *l3_hdr = (char *)eth_hdr + mbuf->l2_len; 202 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)l3_hdr; 203 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)l3_hdr; 204 205 DPAA_DP_LOG(DEBUG, "Calculating checksum for mbuf: %p", mbuf); 206 207 if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) || 208 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 209 RTE_PTYPE_L3_IPV4_EXT)) { 210 ipv4_hdr = (struct ipv4_hdr *)l3_hdr; 211 ipv4_hdr->hdr_checksum = 0; 212 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr); 213 } else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 214 RTE_PTYPE_L3_IPV6) || 215 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 216 RTE_PTYPE_L3_IPV6_EXT)) 217 ipv6_hdr = (struct ipv6_hdr *)l3_hdr; 218 219 if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) { 220 struct tcp_hdr *tcp_hdr = (struct tcp_hdr *)(l3_hdr + 221 mbuf->l3_len); 222 tcp_hdr->cksum = 0; 223 if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4)) 224 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, 225 tcp_hdr); 226 else /* assume ethertype == ETHER_TYPE_IPv6 */ 227 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, 228 tcp_hdr); 229 } else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == 230 RTE_PTYPE_L4_UDP) { 231 struct udp_hdr *udp_hdr = (struct udp_hdr *)(l3_hdr + 232 mbuf->l3_len); 233 udp_hdr->dgram_cksum = 0; 234 if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4)) 235 udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, 236 udp_hdr); 237 else /* assume ethertype == ETHER_TYPE_IPv6 */ 238 udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, 239 udp_hdr); 240 } 241 } 242 243 static inline void dpaa_checksum_offload(struct rte_mbuf *mbuf, 244 struct qm_fd *fd, char *prs_buf) 245 { 246 struct dpaa_eth_parse_results_t *prs; 247 248 DPAA_DP_LOG(DEBUG, " Offloading checksum for mbuf: %p", mbuf); 249 250 prs = GET_TX_PRS(prs_buf); 251 prs->l3r = 0; 252 prs->l4r = 0; 253 if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) || 254 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 255 RTE_PTYPE_L3_IPV4_EXT)) 256 prs->l3r = DPAA_L3_PARSE_RESULT_IPV4; 257 else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 258 RTE_PTYPE_L3_IPV6) || 259 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 260 RTE_PTYPE_L3_IPV6_EXT)) 261 prs->l3r = DPAA_L3_PARSE_RESULT_IPV6; 262 263 if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) 264 prs->l4r = DPAA_L4_PARSE_RESULT_TCP; 265 else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP) 266 prs->l4r = DPAA_L4_PARSE_RESULT_UDP; 267 268 prs->ip_off[0] = mbuf->l2_len; 269 prs->l4_off = mbuf->l3_len + mbuf->l2_len; 270 /* Enable L3 (and L4, if TCP or UDP) HW checksum*/ 271 fd->cmd = DPAA_FD_CMD_RPD | DPAA_FD_CMD_DTC; 272 } 273 274 struct rte_mbuf * 275 dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid) 276 { 277 struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid); 278 struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp; 279 struct qm_sg_entry *sgt, *sg_temp; 280 void *vaddr, *sg_vaddr; 281 int i = 0; 282 uint8_t fd_offset = fd->offset; 283 284 DPAA_DP_LOG(DEBUG, "Received an SG frame"); 285 286 vaddr = rte_dpaa_mem_ptov(qm_fd_addr(fd)); 287 if (!vaddr) { 288 DPAA_PMD_ERR("unable to convert physical address"); 289 return NULL; 290 } 291 sgt = vaddr + fd_offset; 292 sg_temp = &sgt[i++]; 293 hw_sg_to_cpu(sg_temp); 294 temp = (struct rte_mbuf *)((char *)vaddr - bp_info->meta_data_size); 295 sg_vaddr = rte_dpaa_mem_ptov(qm_sg_entry_get64(sg_temp)); 296 297 first_seg = (struct rte_mbuf *)((char *)sg_vaddr - 298 bp_info->meta_data_size); 299 first_seg->data_off = sg_temp->offset; 300 first_seg->data_len = sg_temp->length; 301 first_seg->pkt_len = sg_temp->length; 302 rte_mbuf_refcnt_set(first_seg, 1); 303 304 first_seg->port = ifid; 305 first_seg->nb_segs = 1; 306 first_seg->ol_flags = 0; 307 prev_seg = first_seg; 308 while (i < DPAA_SGT_MAX_ENTRIES) { 309 sg_temp = &sgt[i++]; 310 hw_sg_to_cpu(sg_temp); 311 sg_vaddr = rte_dpaa_mem_ptov(qm_sg_entry_get64(sg_temp)); 312 cur_seg = (struct rte_mbuf *)((char *)sg_vaddr - 313 bp_info->meta_data_size); 314 cur_seg->data_off = sg_temp->offset; 315 cur_seg->data_len = sg_temp->length; 316 first_seg->pkt_len += sg_temp->length; 317 first_seg->nb_segs += 1; 318 rte_mbuf_refcnt_set(cur_seg, 1); 319 prev_seg->next = cur_seg; 320 if (sg_temp->final) { 321 cur_seg->next = NULL; 322 break; 323 } 324 prev_seg = cur_seg; 325 } 326 327 dpaa_eth_packet_info(first_seg, (uint64_t)vaddr); 328 rte_pktmbuf_free_seg(temp); 329 330 return first_seg; 331 } 332 333 static inline struct rte_mbuf *dpaa_eth_fd_to_mbuf(struct qm_fd *fd, 334 uint32_t ifid) 335 { 336 struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid); 337 struct rte_mbuf *mbuf; 338 void *ptr; 339 uint8_t format = 340 (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT; 341 uint16_t offset = 342 (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT; 343 uint32_t length = fd->opaque & DPAA_FD_LENGTH_MASK; 344 345 DPAA_DP_LOG(DEBUG, " FD--->MBUF"); 346 347 if (unlikely(format == qm_fd_sg)) 348 return dpaa_eth_sg_to_mbuf(fd, ifid); 349 350 /* Ignoring case when format != qm_fd_contig */ 351 dpaa_display_frame(fd); 352 ptr = rte_dpaa_mem_ptov(fd->addr); 353 /* Ignoring case when ptr would be NULL. That is only possible incase 354 * of a corrupted packet 355 */ 356 357 mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size); 358 /* Prefetch the Parse results and packet data to L1 */ 359 rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF)); 360 rte_prefetch0((void *)((uint8_t *)ptr + offset)); 361 362 mbuf->data_off = offset; 363 mbuf->data_len = length; 364 mbuf->pkt_len = length; 365 366 mbuf->port = ifid; 367 mbuf->nb_segs = 1; 368 mbuf->ol_flags = 0; 369 mbuf->next = NULL; 370 rte_mbuf_refcnt_set(mbuf, 1); 371 dpaa_eth_packet_info(mbuf, (uint64_t)mbuf->buf_addr); 372 373 return mbuf; 374 } 375 376 uint16_t dpaa_eth_queue_rx(void *q, 377 struct rte_mbuf **bufs, 378 uint16_t nb_bufs) 379 { 380 struct qman_fq *fq = q; 381 struct qm_dqrr_entry *dq; 382 uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid; 383 int ret; 384 385 ret = rte_dpaa_portal_init((void *)0); 386 if (ret) { 387 DPAA_PMD_ERR("Failure in affining portal"); 388 return 0; 389 } 390 391 ret = qman_set_vdq(fq, (nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES) ? 392 DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_bufs); 393 if (ret) 394 return 0; 395 396 do { 397 dq = qman_dequeue(fq); 398 if (!dq) 399 continue; 400 bufs[num_rx++] = dpaa_eth_fd_to_mbuf(&dq->fd, ifid); 401 qman_dqrr_consume(fq, dq); 402 } while (fq->flags & QMAN_FQ_STATE_VDQCR); 403 404 return num_rx; 405 } 406 407 static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info) 408 { 409 int ret; 410 uint64_t buf = 0; 411 struct bm_buffer bufs; 412 413 ret = bman_acquire(bp_info->bp, &bufs, 1, 0); 414 if (ret <= 0) { 415 DPAA_PMD_WARN("Failed to allocate buffers %d", ret); 416 return (void *)buf; 417 } 418 419 DPAA_DP_LOG(DEBUG, "got buffer 0x%lx from pool %d", 420 (uint64_t)bufs.addr, bufs.bpid); 421 422 buf = (uint64_t)rte_dpaa_mem_ptov(bufs.addr) - bp_info->meta_data_size; 423 if (!buf) 424 goto out; 425 426 out: 427 return (void *)buf; 428 } 429 430 static struct rte_mbuf *dpaa_get_dmable_mbuf(struct rte_mbuf *mbuf, 431 struct dpaa_if *dpaa_intf) 432 { 433 struct rte_mbuf *dpaa_mbuf; 434 435 /* allocate pktbuffer on bpid for dpaa port */ 436 dpaa_mbuf = dpaa_get_pktbuf(dpaa_intf->bp_info); 437 if (!dpaa_mbuf) 438 return NULL; 439 440 memcpy((uint8_t *)(dpaa_mbuf->buf_addr) + mbuf->data_off, (void *) 441 ((uint8_t *)(mbuf->buf_addr) + mbuf->data_off), mbuf->pkt_len); 442 443 /* Copy only the required fields */ 444 dpaa_mbuf->data_off = mbuf->data_off; 445 dpaa_mbuf->pkt_len = mbuf->pkt_len; 446 dpaa_mbuf->ol_flags = mbuf->ol_flags; 447 dpaa_mbuf->packet_type = mbuf->packet_type; 448 dpaa_mbuf->tx_offload = mbuf->tx_offload; 449 rte_pktmbuf_free(mbuf); 450 return dpaa_mbuf; 451 } 452 453 int 454 dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, 455 struct qm_fd *fd, 456 uint32_t bpid) 457 { 458 struct rte_mbuf *cur_seg = mbuf, *prev_seg = NULL; 459 struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(bpid); 460 struct rte_mbuf *temp, *mi; 461 struct qm_sg_entry *sg_temp, *sgt; 462 int i = 0; 463 464 DPAA_DP_LOG(DEBUG, "Creating SG FD to transmit"); 465 466 temp = rte_pktmbuf_alloc(bp_info->mp); 467 if (!temp) { 468 DPAA_PMD_ERR("Failure in allocation of mbuf"); 469 return -1; 470 } 471 if (temp->buf_len < ((mbuf->nb_segs * sizeof(struct qm_sg_entry)) 472 + temp->data_off)) { 473 DPAA_PMD_ERR("Insufficient space in mbuf for SG entries"); 474 return -1; 475 } 476 477 fd->cmd = 0; 478 fd->opaque_addr = 0; 479 480 if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) { 481 if (temp->data_off < DEFAULT_TX_ICEOF 482 + sizeof(struct dpaa_eth_parse_results_t)) 483 temp->data_off = DEFAULT_TX_ICEOF 484 + sizeof(struct dpaa_eth_parse_results_t); 485 dcbz_64(temp->buf_addr); 486 dpaa_checksum_offload(mbuf, fd, temp->buf_addr); 487 } 488 489 sgt = temp->buf_addr + temp->data_off; 490 fd->format = QM_FD_SG; 491 fd->addr = temp->buf_iova; 492 fd->offset = temp->data_off; 493 fd->bpid = bpid; 494 fd->length20 = mbuf->pkt_len; 495 496 while (i < DPAA_SGT_MAX_ENTRIES) { 497 sg_temp = &sgt[i++]; 498 sg_temp->opaque = 0; 499 sg_temp->val = 0; 500 sg_temp->addr = cur_seg->buf_iova; 501 sg_temp->offset = cur_seg->data_off; 502 sg_temp->length = cur_seg->data_len; 503 if (RTE_MBUF_DIRECT(cur_seg)) { 504 if (rte_mbuf_refcnt_read(cur_seg) > 1) { 505 /*If refcnt > 1, invalid bpid is set to ensure 506 * buffer is not freed by HW. 507 */ 508 sg_temp->bpid = 0xff; 509 rte_mbuf_refcnt_update(cur_seg, -1); 510 } else { 511 sg_temp->bpid = 512 DPAA_MEMPOOL_TO_BPID(cur_seg->pool); 513 } 514 cur_seg = cur_seg->next; 515 } else { 516 /* Get owner MBUF from indirect buffer */ 517 mi = rte_mbuf_from_indirect(cur_seg); 518 if (rte_mbuf_refcnt_read(mi) > 1) { 519 /*If refcnt > 1, invalid bpid is set to ensure 520 * owner buffer is not freed by HW. 521 */ 522 sg_temp->bpid = 0xff; 523 } else { 524 sg_temp->bpid = DPAA_MEMPOOL_TO_BPID(mi->pool); 525 rte_mbuf_refcnt_update(mi, 1); 526 } 527 prev_seg = cur_seg; 528 cur_seg = cur_seg->next; 529 prev_seg->next = NULL; 530 rte_pktmbuf_free(prev_seg); 531 } 532 if (cur_seg == NULL) { 533 sg_temp->final = 1; 534 cpu_to_hw_sg(sg_temp); 535 break; 536 } 537 cpu_to_hw_sg(sg_temp); 538 } 539 return 0; 540 } 541 542 /* Handle mbufs which are not segmented (non SG) */ 543 static inline void 544 tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf, 545 struct dpaa_bp_info *bp_info, 546 struct qm_fd *fd_arr) 547 { 548 struct rte_mbuf *mi = NULL; 549 550 if (RTE_MBUF_DIRECT(mbuf)) { 551 if (rte_mbuf_refcnt_read(mbuf) > 1) { 552 /* In case of direct mbuf and mbuf being cloned, 553 * BMAN should _not_ release buffer. 554 */ 555 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff); 556 /* Buffer should be releasd by EAL */ 557 rte_mbuf_refcnt_update(mbuf, -1); 558 } else { 559 /* In case of direct mbuf and no cloning, mbuf can be 560 * released by BMAN. 561 */ 562 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid); 563 } 564 } else { 565 /* This is data-containing core mbuf: 'mi' */ 566 mi = rte_mbuf_from_indirect(mbuf); 567 if (rte_mbuf_refcnt_read(mi) > 1) { 568 /* In case of indirect mbuf, and mbuf being cloned, 569 * BMAN should _not_ release it and let EAL release 570 * it through pktmbuf_free below. 571 */ 572 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff); 573 } else { 574 /* In case of indirect mbuf, and no cloning, core mbuf 575 * should be released by BMAN. 576 * Increate refcnt of core mbuf so that when 577 * pktmbuf_free is called and mbuf is released, EAL 578 * doesn't try to release core mbuf which would have 579 * been released by BMAN. 580 */ 581 rte_mbuf_refcnt_update(mi, 1); 582 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid); 583 } 584 rte_pktmbuf_free(mbuf); 585 } 586 587 if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) { 588 if (mbuf->data_off < (DEFAULT_TX_ICEOF + 589 sizeof(struct dpaa_eth_parse_results_t))) { 590 DPAA_DP_LOG(DEBUG, "Checksum offload Err: " 591 "Not enough Headroom " 592 "space for correct Checksum offload." 593 "So Calculating checksum in Software."); 594 dpaa_checksum(mbuf); 595 } else { 596 dpaa_checksum_offload(mbuf, fd_arr, mbuf->buf_addr); 597 } 598 } 599 } 600 601 /* Handle all mbufs on dpaa BMAN managed pool */ 602 static inline uint16_t 603 tx_on_dpaa_pool(struct rte_mbuf *mbuf, 604 struct dpaa_bp_info *bp_info, 605 struct qm_fd *fd_arr) 606 { 607 DPAA_DP_LOG(DEBUG, "BMAN offloaded buffer, mbuf: %p", mbuf); 608 609 if (mbuf->nb_segs == 1) { 610 /* Case for non-segmented buffers */ 611 tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr); 612 } else if (mbuf->nb_segs > 1 && 613 mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) { 614 if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, bp_info->bpid)) { 615 DPAA_PMD_DEBUG("Unable to create Scatter Gather FD"); 616 return 1; 617 } 618 } else { 619 DPAA_PMD_DEBUG("Number of Segments not supported"); 620 return 1; 621 } 622 623 return 0; 624 } 625 626 /* Handle all mbufs on an external pool (non-dpaa) */ 627 static inline uint16_t 628 tx_on_external_pool(struct qman_fq *txq, struct rte_mbuf *mbuf, 629 struct qm_fd *fd_arr) 630 { 631 struct dpaa_if *dpaa_intf = txq->dpaa_intf; 632 struct rte_mbuf *dmable_mbuf; 633 634 DPAA_DP_LOG(DEBUG, "Non-BMAN offloaded buffer." 635 "Allocating an offloaded buffer"); 636 dmable_mbuf = dpaa_get_dmable_mbuf(mbuf, dpaa_intf); 637 if (!dmable_mbuf) { 638 DPAA_DP_LOG(DEBUG, "no dpaa buffers."); 639 return 1; 640 } 641 642 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, dpaa_intf->bp_info->bpid); 643 644 return 0; 645 } 646 647 uint16_t 648 dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) 649 { 650 struct rte_mbuf *mbuf, *mi = NULL; 651 struct rte_mempool *mp; 652 struct dpaa_bp_info *bp_info; 653 struct qm_fd fd_arr[MAX_TX_RING_SLOTS]; 654 uint32_t frames_to_send, loop, i = 0; 655 uint16_t state; 656 int ret; 657 658 ret = rte_dpaa_portal_init((void *)0); 659 if (ret) { 660 DPAA_PMD_ERR("Failure in affining portal"); 661 return 0; 662 } 663 664 DPAA_DP_LOG(DEBUG, "Transmitting %d buffers on queue: %p", nb_bufs, q); 665 666 while (nb_bufs) { 667 frames_to_send = (nb_bufs >> 3) ? MAX_TX_RING_SLOTS : nb_bufs; 668 for (loop = 0; loop < frames_to_send; loop++, i++) { 669 mbuf = bufs[i]; 670 if (RTE_MBUF_DIRECT(mbuf)) { 671 mp = mbuf->pool; 672 } else { 673 mi = rte_mbuf_from_indirect(mbuf); 674 mp = mi->pool; 675 } 676 677 bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); 678 if (likely(mp->ops_index == bp_info->dpaa_ops_index)) { 679 state = tx_on_dpaa_pool(mbuf, bp_info, 680 &fd_arr[loop]); 681 if (unlikely(state)) { 682 /* Set frames_to_send & nb_bufs so 683 * that packets are transmitted till 684 * previous frame. 685 */ 686 frames_to_send = loop; 687 nb_bufs = loop; 688 goto send_pkts; 689 } 690 } else { 691 state = tx_on_external_pool(q, mbuf, 692 &fd_arr[loop]); 693 if (unlikely(state)) { 694 /* Set frames_to_send & nb_bufs so 695 * that packets are transmitted till 696 * previous frame. 697 */ 698 frames_to_send = loop; 699 nb_bufs = loop; 700 goto send_pkts; 701 } 702 } 703 } 704 705 send_pkts: 706 loop = 0; 707 while (loop < frames_to_send) { 708 loop += qman_enqueue_multi(q, &fd_arr[loop], 709 frames_to_send - loop); 710 } 711 nb_bufs -= frames_to_send; 712 } 713 714 DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", i, q); 715 716 return i; 717 } 718 719 uint16_t dpaa_eth_tx_drop_all(void *q __rte_unused, 720 struct rte_mbuf **bufs __rte_unused, 721 uint16_t nb_bufs __rte_unused) 722 { 723 DPAA_DP_LOG(DEBUG, "Drop all packets"); 724 725 /* Drop all incoming packets. No need to free packets here 726 * because the rte_eth f/w frees up the packets through tx_buffer 727 * callback in case this functions returns count less than nb_bufs 728 */ 729 return 0; 730 } 731