xref: /dpdk/drivers/net/sfc/sfc_dp_tx.h (revision 3037e6cf3ddec72a4091b5f023301152a0640900)
144c0947bSAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause
2dbdc8241SAndrew Rybchenko  *
398d26ef7SAndrew Rybchenko  * Copyright(c) 2019-2021 Xilinx, Inc.
4a0147be5SAndrew Rybchenko  * Copyright(c) 2016-2019 Solarflare Communications Inc.
5dbdc8241SAndrew Rybchenko  *
6dbdc8241SAndrew Rybchenko  * This software was jointly developed between OKTET Labs (under contract
7dbdc8241SAndrew Rybchenko  * for Solarflare) and Solarflare Communications, Inc.
8dbdc8241SAndrew Rybchenko  */
9dbdc8241SAndrew Rybchenko 
10dbdc8241SAndrew Rybchenko #ifndef _SFC_DP_TX_H
11dbdc8241SAndrew Rybchenko #define _SFC_DP_TX_H
12dbdc8241SAndrew Rybchenko 
13df96fd0dSBruce Richardson #include <ethdev_driver.h>
14dbdc8241SAndrew Rybchenko 
15dbdc8241SAndrew Rybchenko #include "sfc_dp.h"
1607685524SIgor Romanov #include "sfc_debug.h"
179b70500cSIgor Romanov #include "sfc_tso.h"
18*3037e6cfSViacheslav Galaktionov #include "sfc_nic_dma_dp.h"
19dbdc8241SAndrew Rybchenko 
20dbdc8241SAndrew Rybchenko #ifdef __cplusplus
21dbdc8241SAndrew Rybchenko extern "C" {
22dbdc8241SAndrew Rybchenko #endif
23dbdc8241SAndrew Rybchenko 
24dbdc8241SAndrew Rybchenko /**
25dbdc8241SAndrew Rybchenko  * Generic transmit queue information used on data path.
26dbdc8241SAndrew Rybchenko  * It must be kept as small as it is possible since it is built into
27dbdc8241SAndrew Rybchenko  * the structure used on datapath.
28dbdc8241SAndrew Rybchenko  */
29dbdc8241SAndrew Rybchenko struct sfc_dp_txq {
30dbdc8241SAndrew Rybchenko 	struct sfc_dp_queue	dpq;
31dbdc8241SAndrew Rybchenko };
32dbdc8241SAndrew Rybchenko 
339dbd28dfSIgor Romanov /** Datapath transmit queue descriptor number limitations */
349dbd28dfSIgor Romanov struct sfc_dp_tx_hw_limits {
359dbd28dfSIgor Romanov 	unsigned int txq_max_entries;
369dbd28dfSIgor Romanov 	unsigned int txq_min_entries;
379dbd28dfSIgor Romanov };
389dbd28dfSIgor Romanov 
39dbdc8241SAndrew Rybchenko /**
40dbdc8241SAndrew Rybchenko  * Datapath transmit queue creation information.
41dbdc8241SAndrew Rybchenko  *
42dbdc8241SAndrew Rybchenko  * The structure is used just to pass information from control path to
43dbdc8241SAndrew Rybchenko  * datapath. It could be just function arguments, but it would be hardly
44dbdc8241SAndrew Rybchenko  * readable.
45dbdc8241SAndrew Rybchenko  */
46dbdc8241SAndrew Rybchenko struct sfc_dp_tx_qcreate_info {
47eaab5d96SAndrew Rybchenko 	/** Maximum number of pushed Tx descriptors */
48eaab5d96SAndrew Rybchenko 	unsigned int		max_fill_level;
49dbdc8241SAndrew Rybchenko 	/** Minimum number of unused Tx descriptors to do reap */
50dbdc8241SAndrew Rybchenko 	unsigned int		free_thresh;
51c78d280eSIvan Malov 	/** Offloads enabled on the transmit queue */
52c78d280eSIvan Malov 	uint64_t		offloads;
53dbdc8241SAndrew Rybchenko 	/** Tx queue size */
54dbdc8241SAndrew Rybchenko 	unsigned int		txq_entries;
55dbdc8241SAndrew Rybchenko 	/** Maximum size of data in the DMA descriptor */
56dbdc8241SAndrew Rybchenko 	uint16_t		dma_desc_size_max;
578b00f426SAndrew Rybchenko 	/** DMA-mapped Tx descriptors ring */
588b00f426SAndrew Rybchenko 	void			*txq_hw_ring;
598b00f426SAndrew Rybchenko 	/** Associated event queue size */
608b00f426SAndrew Rybchenko 	unsigned int		evq_entries;
618b00f426SAndrew Rybchenko 	/** Hardware event ring */
628b00f426SAndrew Rybchenko 	void			*evq_hw_ring;
638b00f426SAndrew Rybchenko 	/** The queue index in hardware (required to push right doorbell) */
648b00f426SAndrew Rybchenko 	unsigned int		hw_index;
658b00f426SAndrew Rybchenko 	/** Virtual address of the memory-mapped BAR to push Tx doorbell */
668b00f426SAndrew Rybchenko 	volatile void		*mem_bar;
67714bff55SAndrew Rybchenko 	/** VI window size shift */
68714bff55SAndrew Rybchenko 	unsigned int		vi_window_shift;
696bc985e4SIgor Romanov 	/**
706bc985e4SIgor Romanov 	 * Maximum number of bytes into the packet the TCP header can start for
716bc985e4SIgor Romanov 	 * the hardware to apply TSO packet edits.
726bc985e4SIgor Romanov 	 */
736bc985e4SIgor Romanov 	uint16_t		tso_tcp_header_offset_limit;
744f936666SIvan Malov 	/** Maximum number of header DMA descriptors per TSOv3 transaction */
754f936666SIvan Malov 	uint16_t		tso_max_nb_header_descs;
764f936666SIvan Malov 	/** Maximum header length acceptable by TSOv3 transaction */
774f936666SIvan Malov 	uint16_t		tso_max_header_len;
784f936666SIvan Malov 	/** Maximum number of payload DMA descriptors per TSOv3 transaction */
794f936666SIvan Malov 	uint16_t		tso_max_nb_payload_descs;
804f936666SIvan Malov 	/** Maximum payload length per TSOv3 transaction */
814f936666SIvan Malov 	uint32_t		tso_max_payload_len;
824f936666SIvan Malov 	/** Maximum number of frames to be generated per TSOv3 transaction */
834f936666SIvan Malov 	uint32_t		tso_max_nb_outgoing_frames;
84*3037e6cfSViacheslav Galaktionov 
85*3037e6cfSViacheslav Galaktionov 	/** NIC's DMA mapping information */
86*3037e6cfSViacheslav Galaktionov 	const struct sfc_nic_dma_info	*nic_dma_info;
87dbdc8241SAndrew Rybchenko };
88dbdc8241SAndrew Rybchenko 
89dbdc8241SAndrew Rybchenko /**
90c7dadc9fSAndrew Rybchenko  * Get Tx datapath specific device info.
91c7dadc9fSAndrew Rybchenko  *
92c7dadc9fSAndrew Rybchenko  * @param dev_info		Device info to be adjusted
93c7dadc9fSAndrew Rybchenko  */
94c7dadc9fSAndrew Rybchenko typedef void (sfc_dp_tx_get_dev_info_t)(struct rte_eth_dev_info *dev_info);
95c7dadc9fSAndrew Rybchenko 
96c7dadc9fSAndrew Rybchenko /**
97420efecbSAndrew Rybchenko  * Get size of transmit and event queue rings by the number of Tx
98420efecbSAndrew Rybchenko  * descriptors.
99420efecbSAndrew Rybchenko  *
100420efecbSAndrew Rybchenko  * @param nb_tx_desc		Number of Tx descriptors
101420efecbSAndrew Rybchenko  * @param txq_entries		Location for number of Tx ring entries
102420efecbSAndrew Rybchenko  * @param evq_entries		Location for number of event ring entries
103420efecbSAndrew Rybchenko  * @param txq_max_fill_level	Location for maximum Tx ring fill level
104420efecbSAndrew Rybchenko  *
105420efecbSAndrew Rybchenko  * @return 0 or positive errno.
106420efecbSAndrew Rybchenko  */
107420efecbSAndrew Rybchenko typedef int (sfc_dp_tx_qsize_up_rings_t)(uint16_t nb_tx_desc,
1089dbd28dfSIgor Romanov 					 struct sfc_dp_tx_hw_limits *limits,
109420efecbSAndrew Rybchenko 					 unsigned int *txq_entries,
110420efecbSAndrew Rybchenko 					 unsigned int *evq_entries,
111420efecbSAndrew Rybchenko 					 unsigned int *txq_max_fill_level);
112420efecbSAndrew Rybchenko 
113420efecbSAndrew Rybchenko /**
114dbdc8241SAndrew Rybchenko  * Allocate and initialize datapath transmit queue.
115dbdc8241SAndrew Rybchenko  *
116dbdc8241SAndrew Rybchenko  * @param port_id	The port identifier
117dbdc8241SAndrew Rybchenko  * @param queue_id	The queue identifier
118dbdc8241SAndrew Rybchenko  * @param pci_addr	PCI function address
119dbdc8241SAndrew Rybchenko  * @param socket_id	Socket identifier to allocate memory
120dbdc8241SAndrew Rybchenko  * @param info		Tx queue details wrapped in structure
121dbdc8241SAndrew Rybchenko  * @param dp_txqp	Location for generic datapath transmit queue pointer
122dbdc8241SAndrew Rybchenko  *
123dbdc8241SAndrew Rybchenko  * @return 0 or positive errno.
124dbdc8241SAndrew Rybchenko  */
125dbdc8241SAndrew Rybchenko typedef int (sfc_dp_tx_qcreate_t)(uint16_t port_id, uint16_t queue_id,
126dbdc8241SAndrew Rybchenko 				  const struct rte_pci_addr *pci_addr,
127dbdc8241SAndrew Rybchenko 				  int socket_id,
128dbdc8241SAndrew Rybchenko 				  const struct sfc_dp_tx_qcreate_info *info,
129dbdc8241SAndrew Rybchenko 				  struct sfc_dp_txq **dp_txqp);
130dbdc8241SAndrew Rybchenko 
131dbdc8241SAndrew Rybchenko /**
132dbdc8241SAndrew Rybchenko  * Free resources allocated for datapath transmit queue.
133dbdc8241SAndrew Rybchenko  */
134dbdc8241SAndrew Rybchenko typedef void (sfc_dp_tx_qdestroy_t)(struct sfc_dp_txq *dp_txq);
135dbdc8241SAndrew Rybchenko 
136dbdc8241SAndrew Rybchenko /**
137dbdc8241SAndrew Rybchenko  * Transmit queue start callback.
138dbdc8241SAndrew Rybchenko  *
139dbdc8241SAndrew Rybchenko  * It handovers EvQ to the datapath.
140dbdc8241SAndrew Rybchenko  */
141dbdc8241SAndrew Rybchenko typedef int (sfc_dp_tx_qstart_t)(struct sfc_dp_txq *dp_txq,
142dbdc8241SAndrew Rybchenko 				 unsigned int evq_read_ptr,
143dbdc8241SAndrew Rybchenko 				 unsigned int txq_desc_index);
144dbdc8241SAndrew Rybchenko 
145dbdc8241SAndrew Rybchenko /**
146dbdc8241SAndrew Rybchenko  * Transmit queue stop function called before the queue flush.
147dbdc8241SAndrew Rybchenko  *
148dbdc8241SAndrew Rybchenko  * It returns EvQ to the control path.
149dbdc8241SAndrew Rybchenko  */
150dbdc8241SAndrew Rybchenko typedef void (sfc_dp_tx_qstop_t)(struct sfc_dp_txq *dp_txq,
151dbdc8241SAndrew Rybchenko 				 unsigned int *evq_read_ptr);
152dbdc8241SAndrew Rybchenko 
153dbdc8241SAndrew Rybchenko /**
1548b00f426SAndrew Rybchenko  * Transmit event handler used during queue flush only.
1558b00f426SAndrew Rybchenko  */
1568b00f426SAndrew Rybchenko typedef bool (sfc_dp_tx_qtx_ev_t)(struct sfc_dp_txq *dp_txq, unsigned int id);
1578b00f426SAndrew Rybchenko 
1588b00f426SAndrew Rybchenko /**
159dbdc8241SAndrew Rybchenko  * Transmit queue function called after the queue flush.
160dbdc8241SAndrew Rybchenko  */
161dbdc8241SAndrew Rybchenko typedef void (sfc_dp_tx_qreap_t)(struct sfc_dp_txq *dp_txq);
162dbdc8241SAndrew Rybchenko 
1637df6f854SIvan Malov /**
1647df6f854SIvan Malov  * Check Tx descriptor status
1657df6f854SIvan Malov  */
1667df6f854SIvan Malov typedef int (sfc_dp_tx_qdesc_status_t)(struct sfc_dp_txq *dp_txq,
1677df6f854SIvan Malov 				       uint16_t offset);
1687df6f854SIvan Malov 
169dbdc8241SAndrew Rybchenko /** Transmit datapath definition */
170dbdc8241SAndrew Rybchenko struct sfc_dp_tx {
171dbdc8241SAndrew Rybchenko 	struct sfc_dp			dp;
172dbdc8241SAndrew Rybchenko 
17345cbb96cSAndrew Rybchenko 	unsigned int			features;
1749aa0afd1SAndrew Rybchenko #define SFC_DP_TX_FEAT_MULTI_PROCESS	0x1
175acc47448SIvan Ilchenko #define SFC_DP_TX_FEAT_STATS		0x2
1769aa0afd1SAndrew Rybchenko 	/**
1779aa0afd1SAndrew Rybchenko 	 * Tx offload capabilities supported by the datapath on device
1789aa0afd1SAndrew Rybchenko 	 * level only if HW/FW supports it.
1799aa0afd1SAndrew Rybchenko 	 */
1809aa0afd1SAndrew Rybchenko 	uint64_t			dev_offload_capa;
1819aa0afd1SAndrew Rybchenko 	/**
1829aa0afd1SAndrew Rybchenko 	 * Tx offload capabilities supported by the datapath per-queue
1839aa0afd1SAndrew Rybchenko 	 * if HW/FW supports it.
1849aa0afd1SAndrew Rybchenko 	 */
1859aa0afd1SAndrew Rybchenko 	uint64_t			queue_offload_capa;
186c7dadc9fSAndrew Rybchenko 	sfc_dp_tx_get_dev_info_t	*get_dev_info;
187420efecbSAndrew Rybchenko 	sfc_dp_tx_qsize_up_rings_t	*qsize_up_rings;
188dbdc8241SAndrew Rybchenko 	sfc_dp_tx_qcreate_t		*qcreate;
189dbdc8241SAndrew Rybchenko 	sfc_dp_tx_qdestroy_t		*qdestroy;
190dbdc8241SAndrew Rybchenko 	sfc_dp_tx_qstart_t		*qstart;
191dbdc8241SAndrew Rybchenko 	sfc_dp_tx_qstop_t		*qstop;
1928b00f426SAndrew Rybchenko 	sfc_dp_tx_qtx_ev_t		*qtx_ev;
193dbdc8241SAndrew Rybchenko 	sfc_dp_tx_qreap_t		*qreap;
1947df6f854SIvan Malov 	sfc_dp_tx_qdesc_status_t	*qdesc_status;
19507685524SIgor Romanov 	eth_tx_prep_t			pkt_prepare;
196dbdc8241SAndrew Rybchenko 	eth_tx_burst_t			pkt_burst;
197dbdc8241SAndrew Rybchenko };
198dbdc8241SAndrew Rybchenko 
199dbdc8241SAndrew Rybchenko static inline struct sfc_dp_tx *
sfc_dp_find_tx_by_name(struct sfc_dp_list * head,const char * name)200dbdc8241SAndrew Rybchenko sfc_dp_find_tx_by_name(struct sfc_dp_list *head, const char *name)
201dbdc8241SAndrew Rybchenko {
202dbdc8241SAndrew Rybchenko 	struct sfc_dp *p = sfc_dp_find_by_name(head, SFC_DP_TX, name);
203dbdc8241SAndrew Rybchenko 
204dbdc8241SAndrew Rybchenko 	return (p == NULL) ? NULL : container_of(p, struct sfc_dp_tx, dp);
205dbdc8241SAndrew Rybchenko }
206dbdc8241SAndrew Rybchenko 
207dbdc8241SAndrew Rybchenko static inline struct sfc_dp_tx *
sfc_dp_find_tx_by_caps(struct sfc_dp_list * head,unsigned int avail_caps)208dbdc8241SAndrew Rybchenko sfc_dp_find_tx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps)
209dbdc8241SAndrew Rybchenko {
210dbdc8241SAndrew Rybchenko 	struct sfc_dp *p = sfc_dp_find_by_caps(head, SFC_DP_TX, avail_caps);
211dbdc8241SAndrew Rybchenko 
212dbdc8241SAndrew Rybchenko 	return (p == NULL) ? NULL : container_of(p, struct sfc_dp_tx, dp);
213dbdc8241SAndrew Rybchenko }
214dbdc8241SAndrew Rybchenko 
2153cf4b9c2SAndrew Rybchenko /** Get Tx datapath ops by the datapath TxQ handle */
2163cf4b9c2SAndrew Rybchenko const struct sfc_dp_tx *sfc_dp_tx_by_dp_txq(const struct sfc_dp_txq *dp_txq);
2173cf4b9c2SAndrew Rybchenko 
2189aa0afd1SAndrew Rybchenko static inline uint64_t
sfc_dp_tx_offload_capa(const struct sfc_dp_tx * dp_tx)2199aa0afd1SAndrew Rybchenko sfc_dp_tx_offload_capa(const struct sfc_dp_tx *dp_tx)
2209aa0afd1SAndrew Rybchenko {
2219aa0afd1SAndrew Rybchenko 	return dp_tx->dev_offload_capa | dp_tx->queue_offload_capa;
2229aa0afd1SAndrew Rybchenko }
2239aa0afd1SAndrew Rybchenko 
22438109b5bSIvan Malov static inline unsigned int
sfc_dp_tx_pkt_extra_hdr_segs(struct rte_mbuf ** m_seg,unsigned int * header_len_remaining)22538109b5bSIvan Malov sfc_dp_tx_pkt_extra_hdr_segs(struct rte_mbuf **m_seg,
22638109b5bSIvan Malov 			     unsigned int *header_len_remaining)
22738109b5bSIvan Malov {
22838109b5bSIvan Malov 	unsigned int nb_extra_header_segs = 0;
22938109b5bSIvan Malov 
23038109b5bSIvan Malov 	while (rte_pktmbuf_data_len(*m_seg) < *header_len_remaining) {
23138109b5bSIvan Malov 		*header_len_remaining -= rte_pktmbuf_data_len(*m_seg);
23238109b5bSIvan Malov 		*m_seg = (*m_seg)->next;
23338109b5bSIvan Malov 		++nb_extra_header_segs;
23438109b5bSIvan Malov 	}
23538109b5bSIvan Malov 
23638109b5bSIvan Malov 	return nb_extra_header_segs;
23738109b5bSIvan Malov }
23838109b5bSIvan Malov 
23907685524SIgor Romanov static inline int
sfc_dp_tx_prepare_pkt(struct rte_mbuf * m,unsigned int max_nb_header_segs,unsigned int tso_bounce_buffer_len,uint32_t tso_tcp_header_offset_limit,unsigned int max_fill_level,unsigned int nb_tso_descs,unsigned int nb_vlan_descs)240a3895ef3SIgor Romanov sfc_dp_tx_prepare_pkt(struct rte_mbuf *m,
24138109b5bSIvan Malov 			   unsigned int max_nb_header_segs,
24238109b5bSIvan Malov 			   unsigned int tso_bounce_buffer_len,
243f7a66f93SIgor Romanov 			   uint32_t tso_tcp_header_offset_limit,
244f7a66f93SIgor Romanov 			   unsigned int max_fill_level,
245f7a66f93SIgor Romanov 			   unsigned int nb_tso_descs,
246f7a66f93SIgor Romanov 			   unsigned int nb_vlan_descs)
24707685524SIgor Romanov {
248f7a66f93SIgor Romanov 	unsigned int descs_required = m->nb_segs;
249daa02b5cSOlivier Matz 	unsigned int tcph_off = ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
25038109b5bSIvan Malov 				 m->outer_l2_len + m->outer_l3_len : 0) +
25138109b5bSIvan Malov 				m->l2_len + m->l3_len;
25238109b5bSIvan Malov 	unsigned int header_len = tcph_off + m->l4_len;
25338109b5bSIvan Malov 	unsigned int header_len_remaining = header_len;
25438109b5bSIvan Malov 	unsigned int nb_header_segs = 1;
25538109b5bSIvan Malov 	struct rte_mbuf *m_seg = m;
256f7a66f93SIgor Romanov 
25707685524SIgor Romanov #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
25807685524SIgor Romanov 	int ret;
25907685524SIgor Romanov 
26007685524SIgor Romanov 	ret = rte_validate_tx_offload(m);
26107685524SIgor Romanov 	if (ret != 0) {
26207685524SIgor Romanov 		/*
26307685524SIgor Romanov 		 * Negative error code is returned by rte_validate_tx_offload(),
26407685524SIgor Romanov 		 * but positive are used inside net/sfc PMD.
26507685524SIgor Romanov 		 */
26607685524SIgor Romanov 		SFC_ASSERT(ret < 0);
26707685524SIgor Romanov 		return -ret;
26807685524SIgor Romanov 	}
26907685524SIgor Romanov #endif
27007685524SIgor Romanov 
27138109b5bSIvan Malov 	if (max_nb_header_segs != 0) {
27238109b5bSIvan Malov 		/* There is a limit on the number of header segments. */
273c1ce2ba2SIvan Malov 
27438109b5bSIvan Malov 		nb_header_segs +=
27538109b5bSIvan Malov 		    sfc_dp_tx_pkt_extra_hdr_segs(&m_seg,
27638109b5bSIvan Malov 						 &header_len_remaining);
27738109b5bSIvan Malov 
27838109b5bSIvan Malov 		if (unlikely(nb_header_segs > max_nb_header_segs)) {
27938109b5bSIvan Malov 			/*
28038109b5bSIvan Malov 			 * The number of header segments is too large.
28138109b5bSIvan Malov 			 *
28238109b5bSIvan Malov 			 * If TSO is requested and if the datapath supports
28338109b5bSIvan Malov 			 * linearisation of TSO headers, allow the packet
28438109b5bSIvan Malov 			 * to proceed with additional checks below.
28538109b5bSIvan Malov 			 * Otherwise, throw an error.
28638109b5bSIvan Malov 			 */
287daa02b5cSOlivier Matz 			if ((m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) == 0 ||
28838109b5bSIvan Malov 			    tso_bounce_buffer_len == 0)
28938109b5bSIvan Malov 				return EINVAL;
29038109b5bSIvan Malov 		}
29138109b5bSIvan Malov 	}
29238109b5bSIvan Malov 
293daa02b5cSOlivier Matz 	if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
294daa02b5cSOlivier Matz 		switch (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
295c1ce2ba2SIvan Malov 		case 0:
296c1ce2ba2SIvan Malov 			break;
297daa02b5cSOlivier Matz 		case RTE_MBUF_F_TX_TUNNEL_VXLAN:
298c1ce2ba2SIvan Malov 			/* FALLTHROUGH */
299daa02b5cSOlivier Matz 		case RTE_MBUF_F_TX_TUNNEL_GENEVE:
300c1ce2ba2SIvan Malov 			if (!(m->ol_flags &
301daa02b5cSOlivier Matz 			      (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6)))
302c1ce2ba2SIvan Malov 				return EINVAL;
303c1ce2ba2SIvan Malov 		}
304c1ce2ba2SIvan Malov 
305a3895ef3SIgor Romanov 		if (unlikely(tcph_off > tso_tcp_header_offset_limit))
306a3895ef3SIgor Romanov 			return EINVAL;
307f7a66f93SIgor Romanov 
308f7a66f93SIgor Romanov 		descs_required += nb_tso_descs;
309f7a66f93SIgor Romanov 
310f7a66f93SIgor Romanov 		/*
31138109b5bSIvan Malov 		 * If headers segments are already counted above, here
31238109b5bSIvan Malov 		 * nothing is done since remaining length is smaller
31338109b5bSIvan Malov 		 * then current segment size.
314f7a66f93SIgor Romanov 		 */
31538109b5bSIvan Malov 		nb_header_segs +=
31638109b5bSIvan Malov 		    sfc_dp_tx_pkt_extra_hdr_segs(&m_seg,
31738109b5bSIvan Malov 						 &header_len_remaining);
31838109b5bSIvan Malov 
31938109b5bSIvan Malov 		/*
32038109b5bSIvan Malov 		 * Extra descriptor which is required when (a part of) payload
32138109b5bSIvan Malov 		 * shares the same segment with (a part of) the header.
32238109b5bSIvan Malov 		 */
32338109b5bSIvan Malov 		if (rte_pktmbuf_data_len(m_seg) > header_len_remaining)
324f7a66f93SIgor Romanov 			descs_required++;
32538109b5bSIvan Malov 
32638109b5bSIvan Malov 		if (tso_bounce_buffer_len != 0) {
32738109b5bSIvan Malov 			if (nb_header_segs > 1 &&
32838109b5bSIvan Malov 			    unlikely(header_len > tso_bounce_buffer_len)) {
3299b70500cSIgor Romanov 				/*
3309b70500cSIgor Romanov 				 * Header linearization is required and
3319b70500cSIgor Romanov 				 * the header is too big to be linearized
3329b70500cSIgor Romanov 				 */
3339b70500cSIgor Romanov 				return EINVAL;
3349b70500cSIgor Romanov 			}
335a3895ef3SIgor Romanov 		}
33638109b5bSIvan Malov 	}
337a3895ef3SIgor Romanov 
338f7a66f93SIgor Romanov 	/*
339f7a66f93SIgor Romanov 	 * The number of VLAN descriptors is added regardless of requested
340f7a66f93SIgor Romanov 	 * VLAN offload since VLAN is sticky and sending packet without VLAN
341f7a66f93SIgor Romanov 	 * insertion may require VLAN descriptor to reset the sticky to 0.
342f7a66f93SIgor Romanov 	 */
343f7a66f93SIgor Romanov 	descs_required += nb_vlan_descs;
344f7a66f93SIgor Romanov 
345f7a66f93SIgor Romanov 	/*
346f7a66f93SIgor Romanov 	 * Max fill level must be sufficient to hold all required descriptors
347f7a66f93SIgor Romanov 	 * to send the packet entirely.
348f7a66f93SIgor Romanov 	 */
349f7a66f93SIgor Romanov 	if (descs_required > max_fill_level)
350f7a66f93SIgor Romanov 		return ENOBUFS;
351f7a66f93SIgor Romanov 
35207685524SIgor Romanov 	return 0;
35307685524SIgor Romanov }
35407685524SIgor Romanov 
355dbdc8241SAndrew Rybchenko extern struct sfc_dp_tx sfc_efx_tx;
3568b00f426SAndrew Rybchenko extern struct sfc_dp_tx sfc_ef10_tx;
35756885200SAndrew Rybchenko extern struct sfc_dp_tx sfc_ef10_simple_tx;
3580cb551b6SAndrew Rybchenko extern struct sfc_dp_tx sfc_ef100_tx;
359dbdc8241SAndrew Rybchenko 
360dbdc8241SAndrew Rybchenko #ifdef __cplusplus
361dbdc8241SAndrew Rybchenko }
362dbdc8241SAndrew Rybchenko #endif
363dbdc8241SAndrew Rybchenko #endif /* _SFC_DP_TX_H */
364