Lines Matching +full:ctrl +full:- +full:len

14  *      - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
59 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_create_tx_ring()
70 return -ENOMEM; in mlx4_en_create_tx_ring()
75 if ((err = -bus_dma_tag_create( in mlx4_en_create_tx_ring()
76 bus_get_dma_tag(mdev->pdev->dev.bsddev), in mlx4_en_create_tx_ring()
87 &ring->dma_tag))) in mlx4_en_create_tx_ring()
90 ring->size = size; in mlx4_en_create_tx_ring()
91 ring->size_mask = size - 1; in mlx4_en_create_tx_ring()
92 ring->stride = stride; in mlx4_en_create_tx_ring()
93 ring->inline_thold = MAX(MIN_PKT_LEN, MIN(priv->prof->inline_thold, MAX_INLINE)); in mlx4_en_create_tx_ring()
94 mtx_init(&ring->tx_lock, "mlx4 tx", NULL, MTX_DEF); in mlx4_en_create_tx_ring()
95 mtx_init(&ring->comp_lock, "mlx4 comp", NULL, MTX_DEF); in mlx4_en_create_tx_ring()
98 ring->tx_info = kzalloc_node(tmp, GFP_KERNEL, node); in mlx4_en_create_tx_ring()
99 if (!ring->tx_info) { in mlx4_en_create_tx_ring()
100 ring->tx_info = kzalloc(tmp, GFP_KERNEL); in mlx4_en_create_tx_ring()
101 if (!ring->tx_info) { in mlx4_en_create_tx_ring()
102 err = -ENOMEM; in mlx4_en_create_tx_ring()
109 err = -bus_dmamap_create(ring->dma_tag, 0, in mlx4_en_create_tx_ring()
110 &ring->tx_info[x].dma_map); in mlx4_en_create_tx_ring()
112 while (x--) { in mlx4_en_create_tx_ring()
113 bus_dmamap_destroy(ring->dma_tag, in mlx4_en_create_tx_ring()
114 ring->tx_info[x].dma_map); in mlx4_en_create_tx_ring()
121 ring->tx_info, tmp); in mlx4_en_create_tx_ring()
123 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); in mlx4_en_create_tx_ring()
126 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, in mlx4_en_create_tx_ring()
133 err = mlx4_en_map_buffer(&ring->wqres.buf); in mlx4_en_create_tx_ring()
139 ring->buf = ring->wqres.buf.direct.buf; in mlx4_en_create_tx_ring()
141 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " in mlx4_en_create_tx_ring()
142 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, in mlx4_en_create_tx_ring()
143 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); in mlx4_en_create_tx_ring()
145 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn, in mlx4_en_create_tx_ring()
152 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); in mlx4_en_create_tx_ring()
154 en_err(priv, "Failed allocating qp %d\n", ring->qpn); in mlx4_en_create_tx_ring()
157 ring->qp.event = mlx4_en_sqp_event; in mlx4_en_create_tx_ring()
159 err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); in mlx4_en_create_tx_ring()
162 ring->bf.uar = &mdev->priv_uar; in mlx4_en_create_tx_ring()
163 ring->bf.uar->map = mdev->uar_map; in mlx4_en_create_tx_ring()
164 ring->bf_enabled = false; in mlx4_en_create_tx_ring()
166 ring->bf_enabled = true; in mlx4_en_create_tx_ring()
167 ring->queue_index = queue_idx; in mlx4_en_create_tx_ring()
173 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); in mlx4_en_create_tx_ring()
175 mlx4_en_unmap_buffer(&ring->wqres.buf); in mlx4_en_create_tx_ring()
177 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); in mlx4_en_create_tx_ring()
180 bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map); in mlx4_en_create_tx_ring()
182 vfree(ring->tx_info); in mlx4_en_create_tx_ring()
184 bus_dma_tag_destroy(ring->dma_tag); in mlx4_en_create_tx_ring()
193 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_destroy_tx_ring()
196 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); in mlx4_en_destroy_tx_ring()
198 if (ring->bf_enabled) in mlx4_en_destroy_tx_ring()
199 mlx4_bf_free(mdev->dev, &ring->bf); in mlx4_en_destroy_tx_ring()
200 mlx4_qp_remove(mdev->dev, &ring->qp); in mlx4_en_destroy_tx_ring()
201 mlx4_qp_free(mdev->dev, &ring->qp); in mlx4_en_destroy_tx_ring()
202 mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1); in mlx4_en_destroy_tx_ring()
203 mlx4_en_unmap_buffer(&ring->wqres.buf); in mlx4_en_destroy_tx_ring()
204 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); in mlx4_en_destroy_tx_ring()
205 for (x = 0; x != ring->size; x++) in mlx4_en_destroy_tx_ring()
206 bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map); in mlx4_en_destroy_tx_ring()
207 vfree(ring->tx_info); in mlx4_en_destroy_tx_ring()
208 mtx_destroy(&ring->tx_lock); in mlx4_en_destroy_tx_ring()
209 mtx_destroy(&ring->comp_lock); in mlx4_en_destroy_tx_ring()
210 bus_dma_tag_destroy(ring->dma_tag); in mlx4_en_destroy_tx_ring()
219 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_activate_tx_ring()
222 ring->cqn = cq; in mlx4_en_activate_tx_ring()
223 ring->prod = 0; in mlx4_en_activate_tx_ring()
224 ring->cons = 0xffffffff; in mlx4_en_activate_tx_ring()
225 ring->last_nr_txbb = 1; in mlx4_en_activate_tx_ring()
226 ring->poll_cnt = 0; in mlx4_en_activate_tx_ring()
227 memset(ring->buf, 0, ring->buf_size); in mlx4_en_activate_tx_ring()
228 ring->watchdog_time = 0; in mlx4_en_activate_tx_ring()
230 ring->qp_state = MLX4_QP_STATE_RST; in mlx4_en_activate_tx_ring()
231 ring->doorbell_qpn = ring->qp.qpn << 8; in mlx4_en_activate_tx_ring()
233 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, in mlx4_en_activate_tx_ring()
234 ring->cqn, user_prio, &ring->context); in mlx4_en_activate_tx_ring()
235 if (ring->bf_enabled) in mlx4_en_activate_tx_ring()
236 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); in mlx4_en_activate_tx_ring()
238 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, in mlx4_en_activate_tx_ring()
239 &ring->qp, &ring->qp_state); in mlx4_en_activate_tx_ring()
246 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_deactivate_tx_ring()
248 mlx4_qp_modify(mdev->dev, NULL, ring->qp_state, in mlx4_en_deactivate_tx_ring()
249 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); in mlx4_en_deactivate_tx_ring()
254 struct mbuf *mb, int len, __be32 owner_bit) in mlx4_en_store_inline_lso_data() argument
259 m_copydata(mb, 0, len, inl + 4); in mlx4_en_store_inline_lso_data()
260 dseg += DIV_ROUND_UP(4 + len, DS_SIZE_ALIGNMENT); in mlx4_en_store_inline_lso_data()
266 int len, __be32 owner_bit) in mlx4_en_store_inline_lso_header() argument
274 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; in mlx4_en_stamp_wqe()
276 (ring->buf + (index * TXBB_SIZE)); in mlx4_en_stamp_wqe()
283 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { in mlx4_en_stamp_wqe()
296 tx_info = &ring->tx_info[index]; in mlx4_en_free_tx_desc()
297 mb = tx_info->mb; in mlx4_en_free_tx_desc()
302 bus_dmamap_sync(ring->dma_tag, tx_info->dma_map, in mlx4_en_free_tx_desc()
304 bus_dmamap_unload(ring->dma_tag, tx_info->dma_map); in mlx4_en_free_tx_desc()
308 return (tx_info->nr_txbb); in mlx4_en_free_tx_desc()
317 ring->cons += ring->last_nr_txbb; in mlx4_en_free_tx_buf()
318 en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", in mlx4_en_free_tx_buf()
319 ring->cons, ring->prod); in mlx4_en_free_tx_buf()
321 if ((u32) (ring->prod - ring->cons) > ring->size) { in mlx4_en_free_tx_buf()
326 while (ring->cons != ring->prod) { in mlx4_en_free_tx_buf()
327 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, in mlx4_en_free_tx_buf()
328 ring->cons & ring->size_mask); in mlx4_en_free_tx_buf()
329 ring->cons += ring->last_nr_txbb; in mlx4_en_free_tx_buf()
343 wqs = ring->size - (ring->prod - ring->cons); in mlx4_en_tx_ring_is_full()
351 struct mlx4_cq *mcq = &cq->mcq; in mlx4_en_process_tx_cq()
352 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; in mlx4_en_process_tx_cq()
358 u32 cons_index = mcq->cons_index; in mlx4_en_process_tx_cq()
359 int size = cq->size; in mlx4_en_process_tx_cq()
360 u32 size_mask = ring->size_mask; in mlx4_en_process_tx_cq()
361 struct mlx4_cqe *buf = cq->buf; in mlx4_en_process_tx_cq()
362 int factor = priv->cqe_factor; in mlx4_en_process_tx_cq()
364 if (!priv->port_up) in mlx4_en_process_tx_cq()
369 ring_index = ring->cons & size_mask; in mlx4_en_process_tx_cq()
373 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, in mlx4_en_process_tx_cq()
381 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == in mlx4_en_process_tx_cq()
383 en_err(priv, "CQE completed in error - vendor syndrom: 0x%x syndrom: 0x%x\n", in mlx4_en_process_tx_cq()
384 ((struct mlx4_err_cqe *)cqe)-> in mlx4_en_process_tx_cq()
386 ((struct mlx4_err_cqe *)cqe)->syndrome); in mlx4_en_process_tx_cq()
390 new_index = be16_to_cpu(cqe->wqe_index) & size_mask; in mlx4_en_process_tx_cq()
393 txbbs_skipped += ring->last_nr_txbb; in mlx4_en_process_tx_cq()
394 ring_index = (ring_index + ring->last_nr_txbb) & size_mask; in mlx4_en_process_tx_cq()
396 ring->last_nr_txbb = mlx4_en_free_tx_desc( in mlx4_en_process_tx_cq()
399 !!((ring->cons + txbbs_stamp) & in mlx4_en_process_tx_cq()
400 ring->size)); in mlx4_en_process_tx_cq()
415 mcq->cons_index = cons_index; in mlx4_en_process_tx_cq()
418 ring->cons += txbbs_skipped; in mlx4_en_process_tx_cq()
426 struct mlx4_en_priv *priv = mlx4_netdev_priv(cq->dev); in mlx4_en_tx_irq()
427 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; in mlx4_en_tx_irq()
429 if (priv->port_up == 0 || !spin_trylock(&ring->comp_lock)) in mlx4_en_tx_irq()
431 mlx4_en_process_tx_cq(cq->dev, cq); in mlx4_en_tx_irq()
432 mod_timer(&cq->timer, jiffies + 1); in mlx4_en_tx_irq()
433 spin_unlock(&ring->comp_lock); in mlx4_en_tx_irq()
439 struct mlx4_en_priv *priv = mlx4_netdev_priv(cq->dev); in mlx4_en_poll_tx_cq()
440 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; in mlx4_en_poll_tx_cq()
443 INC_PERF_COUNTER(priv->pstats.tx_poll); in mlx4_en_poll_tx_cq()
445 if (priv->port_up == 0) in mlx4_en_poll_tx_cq()
447 if (!spin_trylock(&ring->comp_lock)) { in mlx4_en_poll_tx_cq()
448 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); in mlx4_en_poll_tx_cq()
451 mlx4_en_process_tx_cq(cq->dev, cq); in mlx4_en_poll_tx_cq()
452 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb); in mlx4_en_poll_tx_cq()
457 if (inflight && priv->port_up) in mlx4_en_poll_tx_cq()
458 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); in mlx4_en_poll_tx_cq()
460 spin_unlock(&ring->comp_lock); in mlx4_en_poll_tx_cq()
465 struct mlx4_en_cq *cq = priv->tx_cq[tx_ind]; in mlx4_en_xmit_poll()
466 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind]; in mlx4_en_xmit_poll()
468 if (priv->port_up == 0) in mlx4_en_xmit_poll()
473 if (!timer_pending(&cq->timer)) in mlx4_en_xmit_poll()
474 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); in mlx4_en_xmit_poll()
477 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) in mlx4_en_xmit_poll()
478 if (spin_trylock(&ring->comp_lock)) { in mlx4_en_xmit_poll()
479 mlx4_en_process_tx_cq(priv->dev, cq); in mlx4_en_xmit_poll()
480 spin_unlock(&ring->comp_lock); in mlx4_en_xmit_poll()
490 retval = MIN(ring->inline_thold, mb->m_len); in mlx4_en_get_inline_hdr_size()
494 retval = MIN(ring->inline_thold, mb->m_pkthdr.len); in mlx4_en_get_inline_hdr_size()
510 if (mb->m_len < ETHER_HDR_LEN) in mlx4_en_get_header_size()
512 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { in mlx4_en_get_header_size()
513 eth_type = ntohs(eh->evl_proto); in mlx4_en_get_header_size()
516 eth_type = ntohs(eh->evl_encap_proto); in mlx4_en_get_header_size()
519 if (mb->m_len < eth_hdr_len) in mlx4_en_get_header_size()
523 ip = (struct ip *)(mb->m_data + eth_hdr_len); in mlx4_en_get_header_size()
524 if (mb->m_len < eth_hdr_len + sizeof(*ip)) in mlx4_en_get_header_size()
526 if (ip->ip_p != IPPROTO_TCP) in mlx4_en_get_header_size()
528 ip_hlen = ip->ip_hl << 2; in mlx4_en_get_header_size()
532 ip6 = (struct ip6_hdr *)(mb->m_data + eth_hdr_len); in mlx4_en_get_header_size()
533 if (mb->m_len < eth_hdr_len + sizeof(*ip6)) in mlx4_en_get_header_size()
535 if (ip6->ip6_nxt != IPPROTO_TCP) in mlx4_en_get_header_size()
542 if (mb->m_len < eth_hdr_len + sizeof(*th)) in mlx4_en_get_header_size()
544 th = (struct tcphdr *)(mb->m_data + eth_hdr_len); in mlx4_en_get_header_size()
545 tcp_hlen = th->th_off << 2; in mlx4_en_get_header_size()
547 if (mb->m_len < eth_hdr_len) in mlx4_en_get_header_size()
554 struct mbuf *mb, int len, __be32 owner_bit) in mlx4_en_store_inline_data() argument
557 const int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - 4; in mlx4_en_store_inline_data()
559 if (unlikely(len < MIN_PKT_LEN)) { in mlx4_en_store_inline_data()
560 m_copydata(mb, 0, len, inl + 4); in mlx4_en_store_inline_data()
561 memset(inl + 4 + len, 0, MIN_PKT_LEN - len); in mlx4_en_store_inline_data()
563 } else if (len <= spc) { in mlx4_en_store_inline_data()
564 m_copydata(mb, 0, len, inl + 4); in mlx4_en_store_inline_data()
565 dseg += DIV_ROUND_UP(4 + len, DS_SIZE_ALIGNMENT); in mlx4_en_store_inline_data()
568 m_copydata(mb, spc, len - spc, inl + 8 + spc); in mlx4_en_store_inline_data()
569 dseg += DIV_ROUND_UP(8 + len, DS_SIZE_ALIGNMENT); in mlx4_en_store_inline_data()
576 int len, __be32 owner_bit) in mlx4_en_store_inline_header() argument
579 const int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - 4; in mlx4_en_store_inline_header()
581 if (unlikely(len < MIN_PKT_LEN)) { in mlx4_en_store_inline_header()
584 } else if (len <= spc) { in mlx4_en_store_inline_header()
586 SET_BYTE_COUNT((1U << 31) | len); in mlx4_en_store_inline_header()
589 SET_BYTE_COUNT((1U << 31) | (len - spc)); in mlx4_en_store_inline_header()
610 u32 rings_p_up = priv->num_tx_rings_p_up; in mlx4_en_select_queue()
616 if (mb->m_flags & M_VLANTAG) { in mlx4_en_select_queue()
617 u32 vlan_tag = mb->m_pkthdr.ether_vtag; in mlx4_en_select_queue()
642 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind]; in mlx4_en_xmit()
643 if_t ifp = priv->dev; in mlx4_en_xmit()
658 if (unlikely(!priv->port_up)) { in mlx4_en_xmit()
666 mlx4_en_arm_cq(priv, priv->tx_cq[tx_ind]); in mlx4_en_xmit()
671 KASSERT(((~ring->prod) & ring->size_mask) >= in mlx4_en_xmit()
672 (MLX4_EN_TX_WQE_MAX_WQEBBS - 1), ("Wrapping around TX ring")); in mlx4_en_xmit()
675 AVG_PERF_COUNTER(priv->pstats.inflight_avg, in mlx4_en_xmit()
676 (u32) (ring->prod - ring->cons - 1)); in mlx4_en_xmit()
679 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, mb->m_pkthdr.len); in mlx4_en_xmit()
682 owner_bit = (ring->prod & ring->size) ? in mlx4_en_xmit()
684 index = ring->prod & ring->size_mask; in mlx4_en_xmit()
686 (ring->buf + index * TXBB_SIZE); in mlx4_en_xmit()
687 tx_info = &ring->tx_info[index]; in mlx4_en_xmit()
688 dseg = &tx_desc->data; in mlx4_en_xmit()
695 tx_desc->ctrl.srcrb_flags = CTRL_FLAGS; in mlx4_en_xmit()
697 if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) in mlx4_en_xmit()
698 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM); in mlx4_en_xmit()
700 if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | in mlx4_en_xmit()
702 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_TCP_UDP_CSUM); in mlx4_en_xmit()
705 if (likely(tx_desc->ctrl.srcrb_flags != CTRL_FLAGS)) { in mlx4_en_xmit()
706 priv->port_stats.tx_chksum_offload++; in mlx4_en_xmit()
707 ring->tx_csum++; in mlx4_en_xmit()
711 if (mb->m_flags & M_VLANTAG) { in mlx4_en_xmit()
712 tx_desc->ctrl.vlan_tag = cpu_to_be16(mb->m_pkthdr.ether_vtag); in mlx4_en_xmit()
713 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN; in mlx4_en_xmit()
715 tx_desc->ctrl.vlan_tag = 0; in mlx4_en_xmit()
716 tx_desc->ctrl.ins_vlan = 0; in mlx4_en_xmit()
719 if (unlikely(mlx4_is_mfunc(priv->mdev->dev) || priv->validate_loopback)) { in mlx4_en_xmit()
725 m_copydata(mb, 0, 2, __DEVOLATILE(void *, &tx_desc->ctrl.srcrb_flags16[0])); in mlx4_en_xmit()
726 m_copydata(mb, 2, 4, __DEVOLATILE(void *, &tx_desc->ctrl.imm)); in mlx4_en_xmit()
729 tx_desc->ctrl.imm = 0; in mlx4_en_xmit()
733 if (mb->m_pkthdr.csum_flags & CSUM_TSO) { in mlx4_en_xmit()
735 u32 mss = mb->m_pkthdr.tso_segsz; in mlx4_en_xmit()
742 ring->oversized_packets++; in mlx4_en_xmit()
746 tx_desc->lso.mss_hdr_size = cpu_to_be32((mss << 16) | ihs); in mlx4_en_xmit()
747 payload_len = mb->m_pkthdr.len - ihs; in mlx4_en_xmit()
752 ring->bytes += payload_len + (num_pkts * ihs); in mlx4_en_xmit()
753 ring->packets += num_pkts; in mlx4_en_xmit()
754 ring->tso_packets++; in mlx4_en_xmit()
764 ring->bytes += max_t (unsigned int, in mlx4_en_xmit()
765 mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN); in mlx4_en_xmit()
766 ring->packets++; in mlx4_en_xmit()
775 err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map, in mlx4_en_xmit()
779 ring->defrag_attempts++; in mlx4_en_xmit()
782 ring->oversized_packets++; in mlx4_en_xmit()
787 err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map, in mlx4_en_xmit()
792 ring->oversized_packets++; in mlx4_en_xmit()
798 bus_dmamap_sync(ring->dma_tag, tx_info->dma_map, in mlx4_en_xmit()
802 bus_dmamap_unload(ring->dma_tag, tx_info->dma_map); in mlx4_en_xmit()
808 ds_cnt = (dseg - ((volatile struct mlx4_wqe_data_seg *)tx_desc)) + nr_segs; in mlx4_en_xmit()
815 pad = (~(ring->prod + pad)) & ring->size_mask; in mlx4_en_xmit()
817 if (unlikely(pad < (MLX4_EN_TX_WQE_MAX_WQEBBS - 1))) { in mlx4_en_xmit()
837 * ((MLX4_EN_TX_WQE_MAX_WQEBBS - 1) * DS_FACT) + in mlx4_en_xmit()
842 tx_desc->ctrl.fence_size = (ds_cnt & 0x3f); in mlx4_en_xmit()
845 tx_info->mb = mb; in mlx4_en_xmit()
846 tx_info->nr_txbb = DIV_ROUND_UP(ds_cnt, DS_FACT); in mlx4_en_xmit()
848 bf_prod = ring->prod; in mlx4_en_xmit()
854 while (pad--) { in mlx4_en_xmit()
855 dseg--; in mlx4_en_xmit()
856 dseg->addr = 0; in mlx4_en_xmit()
857 dseg->lkey = 0; in mlx4_en_xmit()
859 dseg->byte_count = SET_BYTE_COUNT((1U << 31)|0); in mlx4_en_xmit()
863 while (nr_segs--) { in mlx4_en_xmit()
865 dseg--; in mlx4_en_xmit()
866 dseg->addr = 0; in mlx4_en_xmit()
867 dseg->lkey = 0; in mlx4_en_xmit()
869 dseg->byte_count = SET_BYTE_COUNT((1U << 31)|0); in mlx4_en_xmit()
871 dseg--; in mlx4_en_xmit()
872 dseg->addr = cpu_to_be64((uint64_t)segs[nr_segs].ds_addr); in mlx4_en_xmit()
873 dseg->lkey = cpu_to_be32(priv->mdev->mr.key); in mlx4_en_xmit()
875 dseg->byte_count = SET_BYTE_COUNT((uint32_t)segs[nr_segs].ds_len); in mlx4_en_xmit()
888 ring->prod += tx_info->nr_txbb; in mlx4_en_xmit()
890 if (ring->bf_enabled && bf_size <= MAX_BF && in mlx4_en_xmit()
891 (tx_desc->ctrl.ins_vlan != MLX4_WQE_CTRL_INS_CVLAN)) { in mlx4_en_xmit()
894 *(volatile __be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); in mlx4_en_xmit()
904 tx_desc->ctrl.owner_opcode = opcode; in mlx4_en_xmit()
906 mlx4_bf_copy(((u8 *)ring->bf.reg) + ring->bf.offset, in mlx4_en_xmit()
907 (volatile unsigned long *) &tx_desc->ctrl, bf_size); in mlx4_en_xmit()
909 ring->bf.offset ^= ring->bf.buf_size; in mlx4_en_xmit()
916 tx_desc->ctrl.owner_opcode = opcode; in mlx4_en_xmit()
918 writel(cpu_to_be32(ring->doorbell_qpn), in mlx4_en_xmit()
919 ((u8 *)ring->bf.uar->map) + MLX4_SEND_DOORBELL); in mlx4_en_xmit()
933 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind]; in mlx4_en_transmit_locked()
937 READ_ONCE(priv->port_up) == 0)) { in mlx4_en_transmit_locked()
946 if (ring->watchdog_time == 0) in mlx4_en_transmit_locked()
947 ring->watchdog_time = ticks + MLX4_EN_WATCHDOG_TIMEOUT; in mlx4_en_transmit_locked()
949 ring->watchdog_time = 0; in mlx4_en_transmit_locked()
961 if (priv->port_up == 0) { in mlx4_en_transmit()
968 i = (m->m_pkthdr.flowid % 128) % priv->tx_ring_num; in mlx4_en_transmit()
974 ring = priv->tx_ring[i]; in mlx4_en_transmit()
976 spin_lock(&ring->tx_lock); in mlx4_en_transmit()
979 spin_unlock(&ring->tx_lock); in mlx4_en_transmit()
998 if (priv->port_up == 0) in mlx4_en_qflush()