Lines Matching defs:sq

36 mlx5e_do_send_cqe_inline(struct mlx5e_sq *sq)
38 sq->cev_counter++;
40 if (sq->cev_counter >= sq->cev_factor) {
41 sq->cev_counter = 0;
48 mlx5e_do_send_cqe(struct mlx5e_sq *sq)
51 return (mlx5e_do_send_cqe_inline(sq));
55 mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt)
57 u16 pi = sq->pc & sq->wq.sz_m1;
58 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
62 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
63 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
64 if (mlx5e_do_send_cqe_inline(sq))
70 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
72 sq->mbuf[pi].mbuf = NULL;
73 sq->mbuf[pi].num_bytes = 0;
74 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
75 sq->pc += sq->mbuf[pi].num_wqebbs;
93 struct mlx5e_sq *sq;
104 sq = container_of(mb_tag,
105 struct mlx5e_rl_channel, tag)->sq;
114 sq = &container_of(mb_tag,
115 struct mlx5e_channel, tag)->sq[0];
125 sq = NULL;
130 if (sq != NULL && READ_ONCE(sq->running) != 0)
131 return (sq);
140 struct mlx5e_sq *sq;
172 sq = &priv->channel[ch].sq[tc];
173 if (likely(READ_ONCE(sq->running) != 0))
174 return (sq);
179 mlx5e_get_l2_header_size(struct mlx5e_sq *sq, struct mbuf *mb)
221 return (MIN(mb->m_pkthdr.len, sq->max_inline));
548 mlx5e_sq_dump_xmit(struct mlx5e_sq *sq, struct mlx5e_xmit_args *parg, struct mbuf **mbp)
568 pi = sq->pc & sq->wq.sz_m1;
570 sq->mbuf[pi].num_bytes = mb->m_pkthdr.len;
571 sq->mbuf[pi].num_wqebbs = 0;
574 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
578 sq->stats.defragged++;
586 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
594 bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map,
598 msb = sq->priv->params_ethtool.hw_mtu_msb;
604 bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map);
611 if (unlikely(!mlx5e_sq_has_room_for(sq, xsegs))) {
612 sq->stats.enobuf++;
613 bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map);
619 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
620 wqe_last = mlx5_wq_cyc_get_wqe(&sq->wq, sq->wq.sz_m1);
633 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
634 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
639 wqe->data.lkey = sq->mkey_be;
644 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, 0);
648 sq->mbuf[pi].num_wqebbs++;
649 sq->pc++;
653 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
654 wqe_last = mlx5_wq_cyc_get_wqe(&sq->wq, (sq->pc - 1) & sq->wq.sz_m1);
660 if (mlx5e_do_send_cqe_inline(sq))
664 memcpy(sq->doorbell.d32, wqe_last, sizeof(sq->doorbell.d32));
667 sq->mbuf[pi].mbuf = mb;
668 sq->mbuf[pi].mst = m_snd_tag_ref(parg->mst);
671 sq->stats.packets++;
672 sq->stats.bytes += sq->mbuf[pi].num_bytes;
678 sq->stats.dropped++;
685 mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
704 if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS))) {
705 sq->stats.enobuf++;
710 pi = ((~sq->pc) & sq->wq.sz_m1);
713 mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS);
714 pi = ((~sq->pc) & sq->wq.sz_m1);
716 sq->stats.enobuf++;
723 switch (mlx5e_sq_tls_xmit(sq, &args, mbp)) {
739 pi = sq->pc & sq->wq.sz_m1;
740 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
741 ifp = sq->ifp;
761 sq->stats.csum_offload_none++;
781 sq->mbuf[pi].num_bytes = payload_len + (num_pkts * args.ihs);
784 sq->stats.tso_packets++;
785 sq->stats.tso_bytes += payload_len;
815 sq->mbuf[pi].num_bytes = payload_len +
818 sq->stats.tso_packets++;
819 sq->stats.tso_bytes += payload_len;
852 sq->mbuf[pi].num_bytes = max_t (unsigned int,
859 switch (sq->min_inline_mode) {
864 args.ihs = mlx5e_get_l2_header_size(sq, mb);
867 args.ihs = mlx5e_get_l2_header_size(sq, mb);
873 (sq->min_insert_caps & MLX5E_INSERT_VLAN) != 0) {
879 (sq->min_insert_caps & MLX5E_INSERT_NON_VLAN) != 0) {
884 args.ihs = mlx5e_get_l2_header_size(sq, mb);
889 sq->mbuf[pi].num_bytes = max_t (unsigned int,
900 if (unlikely(args.ihs > (sq->max_inline - ETHER_VLAN_ENCAP_LEN))) {
905 args.ihs = (sq->max_inline - ETHER_VLAN_ENCAP_LEN);
924 if (unlikely(args.ihs > sq->max_inline)) {
930 args.ihs = sq->max_inline;
944 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
948 sq->stats.defragged++;
956 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
965 bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map,
969 bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map);
978 dseg->lkey = sq->mkey_be;
985 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
986 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
989 if (mlx5e_do_send_cqe_inline(sq))
995 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
998 sq->mbuf[pi].mbuf = mb;
999 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
1001 sq->mbuf[pi].mst = m_snd_tag_ref(args.mst);
1003 MPASS(sq->mbuf[pi].mst == NULL);
1005 sq->pc += sq->mbuf[pi].num_wqebbs;
1008 sq->stats.packets++;
1009 sq->stats.bytes += sq->mbuf[pi].num_bytes;
1015 sq->stats.dropped++;
1022 mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
1027 * sq->cc must be updated only after mlx5_cqwq_update_db_record(),
1030 sqcc = sq->cc;
1042 cqe = mlx5e_get_cqe(&sq->cq);
1046 mlx5_cqwq_pop(&sq->cq.wq);
1050 mlx5e_dump_err_cqe(&sq->cq, sq->sqn, (const void *)cqe);
1051 sq->stats.cqe_err++;
1059 budget -= sq->cev_factor;
1064 } else if (unlikely(x == sq->cev_factor)) {
1066 sq->stats.cqe_err++;
1069 ci = sqcc & sq->wq.sz_m1;
1071 match = (delta < sq->mbuf[ci].num_wqebbs);
1072 mb = sq->mbuf[ci].mbuf;
1073 sq->mbuf[ci].mbuf = NULL;
1074 mst = sq->mbuf[ci].mst;
1075 sq->mbuf[ci].mst = NULL;
1078 if (unlikely(sq->mbuf[ci].num_bytes == 0))
1079 sq->stats.nop++;
1081 bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map,
1083 bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map);
1092 sqcc += sq->mbuf[ci].num_wqebbs;
1096 mlx5_cqwq_update_db_record(&sq->cq.wq);
1101 sq->cc = sqcc;
1105 mlx5e_xmit_locked(if_t ifp, struct mlx5e_sq *sq, struct mbuf *mb)
1110 READ_ONCE(sq->running) == 0)) {
1116 if (mlx5e_sq_xmit(sq, &mb) != 0) {
1123 mlx5e_tx_notify_hw(sq, false);
1129 if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL &&
1130 sq->cev_factor != 1)) {
1132 mlx5e_sq_cev_timeout(sq);
1135 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
1143 struct mlx5e_sq *sq;
1148 sq = mlx5e_select_queue_by_send_tag(ifp, mb);
1149 if (unlikely(sq == NULL)) {
1154 sq = mlx5e_select_queue(ifp, mb);
1155 if (unlikely(sq == NULL)) {
1164 mtx_lock(&sq->lock);
1165 ret = mlx5e_xmit_locked(ifp, sq, mb);
1166 mtx_unlock(&sq->lock);
1174 struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq);
1176 mtx_lock(&sq->comp_lock);
1177 mlx5e_poll_tx_cq(sq, MLX5E_BUDGET_MAX);
1178 mlx5e_cq_arm(&sq->cq, MLX5_GET_DOORBELL_LOCK(&sq->priv->doorbell_lock));
1179 mtx_unlock(&sq->comp_lock);