Lines Matching defs:mb
38 struct mbuf *mb;
47 mb_head = mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
49 if (unlikely(mb == NULL))
52 mb->m_len = MLX5E_MAX_RX_BYTES;
53 mb->m_pkthdr.len = MLX5E_MAX_RX_BYTES;
58 mb = mb->m_next = m_getjcl(M_NOWAIT, MT_DATA, 0,
60 if (unlikely(mb == NULL)) {
64 mb->m_len = MLX5E_MAX_RX_BYTES;
68 mb = mb_head;
71 m_adj(mb, MLX5E_NET_IP_ALIGN);
77 mb, segs, &nsegs, BUS_DMA_NOWAIT);
97 rq->mbuf[ix].mbuf = mb;
98 rq->mbuf[ix].data = mb->m_data;
105 m_freem(mb);
132 mlx5e_lro_update_hdr(struct mbuf *mb, struct mlx5_cqe64 *cqe)
145 eh = mtod(mb, struct ether_header *);
206 ip4->ip_sum = in_cksum(mb, ip4->ip_hl << 2);
281 struct mbuf *mb, struct mlx5e_rq_mbuf *mr, u32 cqe_bcnt)
291 mlx5e_lro_update_hdr(mb, cqe);
296 mb->m_pkthdr.len = cqe_bcnt;
297 for (mb_head = mb; mb != NULL; mb = mb->m_next) {
298 if (mb->m_len > cqe_bcnt)
299 mb->m_len = cqe_bcnt;
300 cqe_bcnt -= mb->m_len;
302 if (likely(mb->m_next != NULL)) {
304 m_freem(mb->m_next);
305 mb->m_next = NULL;
311 mb = mb_head;
315 mb->m_pkthdr.flowid = be32_to_cpu(cqe->rss_hash_result);
322 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_TCP_IPV4);
325 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_UDP_IPV4);
328 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_IPV4);
332 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_TCP_IPV6);
335 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_UDP_IPV6);
338 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_IPV6);
341 M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE_HASH);
345 M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE_HASH);
349 M_HASHTYPE_SETINNER(mb);
352 mb->m_pkthdr.flowid = rq->ix;
353 M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE);
355 mb->m_pkthdr.rcvif = ifp;
356 mb->m_pkthdr.leaf_rcvif = ifp;
366 mb->m_pkthdr.csum_flags |=
370 mb->m_pkthdr.csum_data = htons(0xffff);
373 mb->m_pkthdr.csum_flags |=
383 mb->m_pkthdr.csum_flags =
386 mb->m_pkthdr.csum_data = htons(0xffff);
392 mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->vlan_info);
393 mb->m_flags |= M_VLANTAG;
405 mb->m_flags |= M_TSTMP_HPREC;
408 mb->m_pkthdr.rcv_tstmp = tstmp;
409 mb->m_flags |= M_TSTMP;
415 mb->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED;
425 mlx5e_accel_ipsec_handle_rx(mb, cqe, mr);
512 struct mbuf *mb;
543 rq->mbuf[wqe_counter].data, seglen, rq->ifp, &mb);
574 (mb = m_gethdr(M_NOWAIT, MT_DATA)) != NULL) {
576 mb->m_len = MHLEN - MLX5E_NET_IP_ALIGN;
578 mb->m_data += MLX5E_NET_IP_ALIGN;
580 bcopy(rq->mbuf[wqe_counter].data, mtod(mb, caddr_t),
583 mb = rq->mbuf[wqe_counter].mbuf;
590 mlx5e_build_rx_mbuf(cqe, rq, mb, &rq->mbuf[wqe_counter],
595 mb->m_pkthdr.numa_domain = if_getnumadomain(rq->ifp);
599 tcp_lro_queue_mbuf(&rq->lro, mb);
601 if (mb->m_pkthdr.csum_flags == 0 ||
604 tcp_lro_rx(&rq->lro, mb, 0) != 0) {
605 if_input(rq->ifp, mb);
632 struct mbuf *mb = m_gethdr(M_NOWAIT, MT_DATA);
634 if (mb != NULL) {
636 mb->m_pkthdr.len = mb->m_len = 15;
637 memset(mb->m_data, 255, 14);
638 mb->m_data[14] = rq->ix;
639 mb->m_pkthdr.rcvif = rq->ifp;
640 mb->m_pkthdr.leaf_rcvif = rq->ifp;
641 if_input(rq->ifp, mb);