Lines Matching +full:ctrl +full:- +full:len
1 /*-
2 * Copyright (c) 2015-2021 Mellanox Technologies. All rights reserved.
38 sq->cev_counter++;
40 if (sq->cev_counter >= sq->cev_factor) {
41 sq->cev_counter = 0;
57 u16 pi = sq->pc & sq->wq.sz_m1;
58 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
60 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl));
62 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
63 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
65 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
67 wqe->ctrl.fm_ce_se = 0;
70 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
72 sq->mbuf[pi].mbuf = NULL;
73 sq->mbuf[pi].num_bytes = 0;
74 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
75 sq->pc += sq->mbuf[pi].num_wqebbs;
95 mb_tag = mb->m_pkthdr.snd_tag;
101 switch (mb_tag->sw->type) {
105 struct mlx5e_rl_channel, tag)->sq;
109 mb_tag = container_of(mb_tag, struct mlx5e_tls_tag, tag)->rl_tag;
115 struct mlx5e_channel, tag)->sq[0];
116 KASSERT((mb_tag->refcount > 0),
121 mb_tag = container_of(mb_tag, struct mlx5e_tls_tag, tag)->rl_tag;
130 if (sq != NULL && READ_ONCE(sq->running) != 0)
145 if (mb->m_flags & M_VLANTAG) {
146 tc = (mb->m_pkthdr.ether_vtag >> 13);
147 if (tc >= priv->num_tc)
148 tc = priv->default_vlan_prio;
150 tc = priv->default_vlan_prio;
153 ch = priv->params.num_channels;
160 if (rss_hash2bucket(mb->m_pkthdr.flowid,
165 ch = (mb->m_pkthdr.flowid % 128) % ch;
172 sq = &priv->channel[ch].sq[tc];
173 if (likely(READ_ONCE(sq->running) != 0))
186 if (unlikely(mb->m_len < ETHER_HDR_LEN)) {
188 } else if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
189 if (unlikely(mb->m_len < (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)))
191 eth_type = ntohs(eh->evl_proto);
194 eth_type = ntohs(eh->evl_encap_proto);
216 if (mb->m_pkthdr.len < min_inline)
221 return (MIN(mb->m_pkthdr.len, sq->max_inline));
248 if (unlikely(mb->m_len < ETHER_HDR_LEN))
250 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
251 if (unlikely(mb->m_len < ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN))
253 eth_type = ntohs(eh->evl_proto);
256 eth_type = ntohs(eh->evl_encap_proto);
262 ip = (const struct ip *)(mb->m_data + eth_hdr_len);
263 if (unlikely(mb->m_len < eth_hdr_len + sizeof(*ip)))
265 switch (ip->ip_p) {
267 ip_hlen = ip->ip_hl << 2;
271 ip_hlen = ip->ip_hl << 2;
280 ip6 = (const struct ip6_hdr *)(mb->m_data + eth_hdr_len);
281 if (unlikely(mb->m_len < eth_hdr_len + sizeof(*ip6)))
283 switch (ip6->ip6_nxt) {
299 if (unlikely(mb->m_len < eth_hdr_len + sizeof(*th))) {
300 const struct mbuf *m_th = mb->m_next;
301 if (unlikely(mb->m_len != eth_hdr_len ||
302 m_th == NULL || m_th->m_len < sizeof(*th)))
304 th = (const struct tcphdr *)(m_th->m_data);
306 th = (const struct tcphdr *)(mb->m_data + eth_hdr_len);
308 tcp_hlen = th->th_off << 2;
316 if (unlikely(mb->m_pkthdr.len < eth_hdr_len))
334 if (unlikely(mb[0]->m_len == eth_hdr_len)) {
336 if (unlikely((mb[0] = mb[0]->m_next) == NULL))
339 if (unlikely(mb[0]->m_len < eth_hdr_len - poffset[0] + min_len))
341 return (mb[0]->m_data + eth_hdr_len - poffset[0]);
370 pkt_hdr_len = mb->m_pkthdr.len;
371 has_outer_vlan_tag = (mb->m_flags & M_VLANTAG) != 0;
375 if (unlikely(mb->m_len < ETHER_HDR_LEN))
378 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
379 if (unlikely(mb->m_len < ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN))
381 eth_type = eh->evl_proto;
384 eth_type = eh->evl_encap_proto;
394 ip_type = ip4->ip_p;
397 wqe->eth.swp_outer_l3_offset = eth_hdr_len / 2;
398 wqe->eth.cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
399 ip_hlen = ip4->ip_hl << 2;
405 wqe->eth.swp_outer_l4_offset = eth_hdr_len / 2;
406 wqe->eth.swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_TYPE;
414 ip_type = ip6->ip6_nxt;
417 wqe->eth.swp_outer_l3_offset = eth_hdr_len / 2;
418 wqe->eth.cs_flags = MLX5_ETH_WQE_L4_CSUM;
424 wqe->eth.swp_outer_l4_offset = eth_hdr_len / 2;
425 wqe->eth.swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_TYPE |
448 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
449 if (unlikely(mb->m_len < eth_hdr_len - offset + ETHER_HDR_LEN +
452 eth_type = eh->evl_proto;
455 eth_type = eh->evl_encap_proto;
466 wqe->eth.swp_inner_l3_offset = eth_hdr_len / 2;
467 wqe->eth.cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
468 ip_type = ip4->ip_p;
469 ip_hlen = ip4->ip_hl << 2;
477 wqe->eth.swp_inner_l3_offset = eth_hdr_len / 2;
478 wqe->eth.swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_TYPE;
479 ip_type = ip6->ip6_nxt;
499 wqe->eth.swp_inner_l4_offset = (eth_hdr_len / 2);
500 wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
501 wqe->eth.swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_TYPE;
509 wqe->eth.swp_inner_l4_offset = eth_hdr_len / 2;
510 wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
511 tcp_hlen = th->th_off << 2;
523 wqe->eth.swp_outer_l3_offset += ETHER_VLAN_ENCAP_LEN / 2;
524 wqe->eth.swp_outer_l4_offset += ETHER_VLAN_ENCAP_LEN / 2;
525 wqe->eth.swp_inner_l3_offset += ETHER_VLAN_ENCAP_LEN / 2;
526 wqe->eth.swp_inner_l4_offset += ETHER_VLAN_ENCAP_LEN / 2;
533 if (wqe->eth.cs_flags & (MLX5_ETH_WQE_L3_INNER_CSUM |
535 wqe->eth.cs_flags &= ~MLX5_ETH_WQE_L4_CSUM;
541 struct mlx5_wqe_ctrl_seg ctrl;
568 pi = sq->pc & sq->wq.sz_m1;
570 sq->mbuf[pi].num_bytes = mb->m_pkthdr.len;
571 sq->mbuf[pi].num_wqebbs = 0;
574 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
578 sq->stats.defragged++;
586 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
594 bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map,
598 msb = sq->priv->params_ethtool.hw_mtu_msb;
604 bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map);
612 sq->stats.enobuf++;
613 bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map);
619 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
620 wqe_last = mlx5_wq_cyc_get_wqe(&sq->wq, sq->wq.sz_m1);
624 u32 len = segs[x].ds_len - off;
627 if (likely(len > msb))
628 len = msb;
630 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl));
633 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
634 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
635 wqe->ctrl.imm = cpu_to_be32(parg->tisn << 8);
638 wqe->data.addr = cpu_to_be64((uint64_t)segs[x].ds_addr + off);
639 wqe->data.lkey = sq->mkey_be;
640 wqe->data.byte_count = cpu_to_be32(len);
644 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, 0);
648 sq->mbuf[pi].num_wqebbs++;
649 sq->pc++;
653 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
654 wqe_last = mlx5_wq_cyc_get_wqe(&sq->wq, (sq->pc - 1) & sq->wq.sz_m1);
657 wqe->ctrl.fm_ce_se |= MLX5_FENCE_MODE_INITIATOR_SMALL;
661 wqe_last->ctrl.fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
664 memcpy(sq->doorbell.d32, wqe_last, sizeof(sq->doorbell.d32));
667 sq->mbuf[pi].mbuf = mb;
668 sq->mbuf[pi].mst = m_snd_tag_ref(parg->mst);
671 sq->stats.packets++;
672 sq->stats.bytes += sq->mbuf[pi].num_bytes;
678 sq->stats.dropped++;
705 sq->stats.enobuf++;
710 pi = ((~sq->pc) & sq->wq.sz_m1);
711 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
714 pi = ((~sq->pc) & sq->wq.sz_m1);
715 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
716 sq->stats.enobuf++;
739 pi = sq->pc & sq->wq.sz_m1;
740 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
741 ifp = sq->ifp;
754 if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) {
755 wqe->eth.cs_flags |= MLX5_ETH_WQE_L3_CSUM;
757 if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) {
758 wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_CSUM;
760 if (wqe->eth.cs_flags == 0) {
761 sq->stats.csum_offload_none++;
763 if (mb->m_pkthdr.csum_flags & CSUM_TSO) {
765 u32 mss = mb->m_pkthdr.tso_segsz;
768 wqe->eth.mss = cpu_to_be16(mss);
776 payload_len = mb->m_pkthdr.len - args.ihs;
781 sq->mbuf[pi].num_bytes = payload_len + (num_pkts * args.ihs);
784 sq->stats.tso_packets++;
785 sq->stats.tso_bytes += payload_len;
786 } else if (mb->m_pkthdr.csum_flags & CSUM_ENCAP_VXLAN) {
788 if (mb->m_pkthdr.csum_flags & (CSUM_INNER_IP_TSO |
791 u32 mss = mb->m_pkthdr.tso_segsz;
794 wqe->eth.mss = cpu_to_be16(mss);
810 payload_len = mb->m_pkthdr.len - args.ihs;
815 sq->mbuf[pi].num_bytes = payload_len +
818 sq->stats.tso_packets++;
819 sq->stats.tso_bytes += payload_len;
826 if (mb->m_pkthdr.csum_flags &
834 } else if (mb->m_pkthdr.csum_flags & CSUM_INNER_IP) {
852 sq->mbuf[pi].num_bytes = max_t (unsigned int,
853 mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
859 switch (sq->min_inline_mode) {
872 if ((mb->m_flags & M_VLANTAG) != 0 &&
873 (sq->min_insert_caps & MLX5E_INSERT_VLAN) != 0) {
875 wqe->eth.vlan_cmd = htons(0x8000); /* bit 0 CVLAN */
876 wqe->eth.vlan_hdr = htons(mb->m_pkthdr.ether_vtag);
878 } else if ((mb->m_flags & M_VLANTAG) == 0 &&
879 (sq->min_insert_caps & MLX5E_INSERT_NON_VLAN) != 0) {
880 /* inlining non-VLAN data is not required */
889 sq->mbuf[pi].num_bytes = max_t (unsigned int,
890 mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
895 } else if ((mb->m_flags & M_VLANTAG) != 0) {
897 wqe->eth.inline_hdr_start;
900 if (unlikely(args.ihs > (sq->max_inline - ETHER_VLAN_ENCAP_LEN))) {
901 if (mb->m_pkthdr.csum_flags & (CSUM_TSO | CSUM_ENCAP_VXLAN)) {
905 args.ihs = (sq->max_inline - ETHER_VLAN_ENCAP_LEN);
913 eh->evl_proto = eh->evl_encap_proto;
914 eh->evl_encap_proto = htons(ETHERTYPE_VLAN);
915 eh->evl_tag = htons(mb->m_pkthdr.ether_vtag);
917 m_copydata(mb, 0, args.ihs - ETHER_HDR_LEN, (caddr_t)(eh + 1));
918 m_adj(mb, args.ihs - ETHER_HDR_LEN);
921 wqe->eth.inline_hdr_sz = cpu_to_be16(args.ihs);
924 if (unlikely(args.ihs > sq->max_inline)) {
925 if (unlikely(mb->m_pkthdr.csum_flags & (CSUM_TSO |
930 args.ihs = sq->max_inline;
932 m_copydata(mb, 0, args.ihs, wqe->eth.inline_hdr_start);
934 wqe->eth.inline_hdr_sz = cpu_to_be16(args.ihs);
938 if (args.ihs > sizeof(wqe->eth.inline_hdr_start)) {
939 ds_cnt += DIV_ROUND_UP(args.ihs - sizeof(wqe->eth.inline_hdr_start),
942 dseg = ((struct mlx5_wqe_data_seg *)&wqe->ctrl) + ds_cnt;
944 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
948 sq->stats.defragged++;
956 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
965 bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map,
969 bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map);
977 dseg->addr = cpu_to_be64((uint64_t)segs[x].ds_addr);
978 dseg->lkey = sq->mkey_be;
979 dseg->byte_count = cpu_to_be32((uint32_t)segs[x].ds_len);
983 ds_cnt = (dseg - ((struct mlx5_wqe_data_seg *)&wqe->ctrl));
985 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
986 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
987 wqe->ctrl.imm = cpu_to_be32(args.tisn << 8);
990 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
992 wqe->ctrl.fm_ce_se = 0;
995 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
998 sq->mbuf[pi].mbuf = mb;
999 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
1001 sq->mbuf[pi].mst = m_snd_tag_ref(args.mst);
1003 MPASS(sq->mbuf[pi].mst == NULL);
1005 sq->pc += sq->mbuf[pi].num_wqebbs;
1008 sq->stats.packets++;
1009 sq->stats.bytes += sq->mbuf[pi].num_bytes;
1015 sq->stats.dropped++;
1027 * sq->cc must be updated only after mlx5_cqwq_update_db_record(),
1030 sqcc = sq->cc;
1042 cqe = mlx5e_get_cqe(&sq->cq);
1046 mlx5_cqwq_pop(&sq->cq.wq);
1050 mlx5e_dump_err_cqe(&sq->cq, sq->sqn, (const void *)cqe);
1051 sq->stats.cqe_err++;
1055 sqcc_this = be16toh(cqe->wqe_counter);
1059 budget -= sq->cev_factor;
1064 } else if (unlikely(x == sq->cev_factor)) {
1066 sq->stats.cqe_err++;
1069 ci = sqcc & sq->wq.sz_m1;
1070 delta = sqcc_this - sqcc;
1071 match = (delta < sq->mbuf[ci].num_wqebbs);
1072 mb = sq->mbuf[ci].mbuf;
1073 sq->mbuf[ci].mbuf = NULL;
1074 mst = sq->mbuf[ci].mst;
1075 sq->mbuf[ci].mst = NULL;
1078 if (unlikely(sq->mbuf[ci].num_bytes == 0))
1079 sq->stats.nop++;
1081 bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map,
1083 bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map);
1092 sqcc += sq->mbuf[ci].num_wqebbs;
1096 mlx5_cqwq_update_db_record(&sq->cq.wq);
1101 sq->cc = sqcc;
1110 READ_ONCE(sq->running) == 0)) {
1129 if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL &&
1130 sq->cev_factor != 1)) {
1135 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
1146 if (mb->m_pkthdr.csum_flags & CSUM_SND_TAG) {
1147 MPASS(mb->m_pkthdr.snd_tag->ifp == ifp);
1164 mtx_lock(&sq->lock);
1166 mtx_unlock(&sq->lock);
1176 mtx_lock(&sq->comp_lock);
1178 mlx5e_cq_arm(&sq->cq, MLX5_GET_DOORBELL_LOCK(&sq->priv->doorbell_lock));
1179 mtx_unlock(&sq->comp_lock);