Lines Matching defs:mbuf

52     struct mbuf *);
53 static struct mbuf *ena_rx_mbuf(struct ena_ring *, struct ena_com_rx_buf_info *,
56 struct mbuf *);
57 static void ena_tx_csum(struct ena_com_tx_ctx *, struct mbuf *, bool);
59 struct mbuf **mbuf);
60 static int ena_xmit_mbuf(struct ena_ring *, struct mbuf **);
136 ena_mq_start(if_t ifp, struct mbuf *m)
219 if (unlikely(tx_ring->tx_buffer_info[*req_id].mbuf == NULL)) {
221 "tx_info doesn't have valid mbuf. req_id %hu qid %hu\n",
235 * descriptors. We find the related mbuf chain in a map (index in an array)
268 struct mbuf *mbuf;
276 mbuf = tx_info->mbuf;
278 tx_info->mbuf = NULL;
285 ena_log_io(adapter->pdev, DBG, "tx: q %d mbuf %p completed\n",
286 tx_ring->qid, mbuf);
288 m_freem(mbuf);
347 struct mbuf *mbuf)
352 mbuf->m_pkthdr.flowid = ena_rx_ctx->hash;
362 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
369 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
377 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
380 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4);
383 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
389 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
392 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6);
395 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
399 M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
402 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
405 mbuf->m_pkthdr.flowid = rx_ring->qid;
406 M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
411 * ena_rx_mbuf - assemble mbuf from descriptors
418 static struct mbuf *
422 struct mbuf *mbuf;
436 if (unlikely(rx_info->mbuf == NULL)) {
437 ena_log(pdev, ERR, "NULL mbuf in rx_info. qid %u req_id %u\n",
443 ena_log_io(pdev, DBG, "rx_info %p, mbuf %p, paddr %jx\n", rx_info,
444 rx_info->mbuf, (uintmax_t)rx_info->ena_buf.paddr);
448 mbuf = rx_info->mbuf;
449 mbuf->m_flags |= M_PKTHDR;
450 mbuf->m_pkthdr.len = len;
451 mbuf->m_len = len;
453 mbuf->m_data = mtodo(mbuf, ena_rx_ctx->pkt_offset);
455 mbuf->m_pkthdr.rcvif = rx_ring->que->adapter->ifp;
457 /* Fill mbuf with hash key and it's interpretation for optimization */
458 ena_rx_hash_mbuf(rx_ring, ena_rx_ctx, mbuf);
460 ena_log_io(pdev, DBG, "rx mbuf 0x%p, flags=0x%x, len: %d\n", mbuf,
461 mbuf->m_flags, mbuf->m_pkthdr.len);
466 rx_info->mbuf = NULL;
480 if (unlikely(rx_info->mbuf == NULL)) {
481 ena_log(pdev, ERR, "NULL mbuf in rx_info. qid %u req_id %u\n",
493 m_freem(mbuf);
500 if (unlikely(m_append(mbuf, len, rx_info->mbuf->m_data) == 0)) {
502 ena_log_io(pdev, WARN, "Failed to append Rx mbuf %p\n",
503 mbuf);
506 ena_log_io(pdev, DBG, "rx mbuf updated. len %d\n",
507 mbuf->m_pkthdr.len);
509 /* Free already appended mbuf, it won't be useful anymore */
511 m_freem(rx_info->mbuf);
512 rx_info->mbuf = NULL;
520 return (mbuf);
524 * ena_rx_checksum - indicate in mbuf if hw indicated a good cksum
528 struct mbuf *mbuf)
536 mbuf->m_pkthdr.csum_flags = 0;
547 mbuf->m_pkthdr.csum_flags = 0;
551 mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
552 mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
567 struct mbuf *mbuf;
634 /* Receive mbuf from the ring */
635 mbuf = ena_rx_mbuf(rx_ring, rx_ring->ena_bufs, &ena_rx_ctx,
640 if (unlikely(mbuf == NULL)) {
652 ena_rx_checksum(rx_ring, &ena_rx_ctx, mbuf);
657 mbuf->m_pkthdr.len);
659 mbuf->m_pkthdr.len);
667 ((mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) != 0) &&
676 (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0))
681 "calling if_input() with mbuf %p\n", mbuf);
682 if_input(ifp, mbuf);
708 ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct mbuf *mbuf,
713 struct mbuf *mbuf_next;
726 mss = mbuf->m_pkthdr.tso_segsz;
731 if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) != 0)
734 if ((mbuf->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0)
737 if ((mbuf->m_pkthdr.csum_flags & CSUM6_OFFLOAD) != 0)
751 eh = mtod(mbuf, struct ether_vlan_header *);
760 mbuf_next = m_getptr(mbuf, ehdrlen, &offset);
773 iphlen = ip6_lasthdr(mbuf, ehdrlen, IPPROTO_IPV6, &ipproto);
783 mbuf_next = m_getptr(mbuf, iphlen + ehdrlen, &offset);
786 if ((mbuf->m_pkthdr.csum_flags & CSUM_IP) != 0) {
789 if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
796 if ((mbuf->m_pkthdr.csum_flags &
803 if ((mbuf->m_pkthdr.csum_flags &
820 ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
823 struct mbuf *collapsed_mbuf;
827 num_frags = ena_mbuf_count(*mbuf);
834 ((*mbuf)->m_pkthdr.len < tx_ring->tx_max_header_size))
839 collapsed_mbuf = m_collapse(*mbuf, M_NOWAIT,
846 /* If mbuf was collapsed succesfully, original mbuf is released. */
847 *mbuf = collapsed_mbuf;
854 struct mbuf *mbuf, void **push_hdr, u16 *header_len)
864 mbuf_head_len = mbuf->m_len;
865 tx_info->mbuf = mbuf;
869 * For easier maintaining of the DMA map, map the whole mbuf even if
873 tx_info->dmamap, mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
888 * First check if header fits in the mbuf. If not, copy it to
891 *header_len = min_t(uint32_t, mbuf->m_pkthdr.len,
894 /* If header is in linear space, just point into mbuf's data. */
896 *push_hdr = mbuf->m_data;
902 m_copydata(mbuf, 0, *header_len,
910 "mbuf: %p header_buf->vaddr: %p push_len: %d\n",
911 mbuf, *push_hdr, *header_len);
914 if (mbuf->m_pkthdr.len <= tx_ring->tx_max_header_size) {
920 * it and just map the residuum of the mbuf to DMA
944 * mbuf, setting header_len to 0 is making the device ignore
950 /* Map rest of the mbuf */
963 tx_info->mbuf = NULL;
968 ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
990 rc = ena_check_and_collapse_mbuf(tx_ring, mbuf);
992 ena_log_io(pdev, WARN, "Failed to collapse mbuf! err: %d\n",
997 ena_log_io(pdev, DBG, "Tx: %d bytes\n", (*mbuf)->m_pkthdr.len);
1004 ENA_WARN(tx_info->mbuf != NULL, adapter->ena_dev,
1005 "mbuf isn't NULL for req_id %d\n", req_id);
1007 rc = ena_tx_map_mbuf(tx_ring, tx_info, *mbuf, &push_hdr, &header_len);
1009 ena_log_io(pdev, WARN, "Failed to map TX mbuf\n");
1020 ena_tx_csum(&ena_tx_ctx, *mbuf, adapter->disable_meta_caching);
1048 (*mbuf)->m_pkthdr.len);
1052 (*mbuf)->m_pkthdr.len);
1095 tx_info->mbuf = NULL;
1104 struct mbuf *mbuf;
1116 while ((mbuf = drbr_peek(adapter->ifp, tx_ring->br)) != NULL) {
1118 "\ndequeued mbuf %p with flags %#x and header csum flags %#jx\n",
1119 mbuf, mbuf->m_flags, (uint64_t)mbuf->m_pkthdr.csum_flags);
1122 drbr_putback(adapter->ifp, tx_ring->br, mbuf);
1126 if (unlikely((ret = ena_xmit_mbuf(tx_ring, &mbuf)) != 0)) {
1128 drbr_putback(adapter->ifp, tx_ring->br, mbuf);
1130 drbr_putback(adapter->ifp, tx_ring->br, mbuf);
1132 m_freem(mbuf);
1146 BPF_MTAP(adapter->ifp, mbuf);