10cb551b6SAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause
20cb551b6SAndrew Rybchenko *
398d26ef7SAndrew Rybchenko * Copyright(c) 2019-2021 Xilinx, Inc.
40cb551b6SAndrew Rybchenko * Copyright(c) 2018-2019 Solarflare Communications Inc.
50cb551b6SAndrew Rybchenko *
60cb551b6SAndrew Rybchenko * This software was jointly developed between OKTET Labs (under contract
70cb551b6SAndrew Rybchenko * for Solarflare) and Solarflare Communications, Inc.
80cb551b6SAndrew Rybchenko */
90cb551b6SAndrew Rybchenko
100cb551b6SAndrew Rybchenko #include <stdbool.h>
110cb551b6SAndrew Rybchenko
120cb551b6SAndrew Rybchenko #include <rte_mbuf.h>
133f95dfb9SIgor Romanov #include <rte_mbuf_dyn.h>
140cb551b6SAndrew Rybchenko #include <rte_io.h>
15f71965f9SAndrew Rybchenko #include <rte_net.h>
160cb551b6SAndrew Rybchenko
170cb551b6SAndrew Rybchenko #include "efx.h"
180cb551b6SAndrew Rybchenko #include "efx_types.h"
190cb551b6SAndrew Rybchenko #include "efx_regs.h"
200cb551b6SAndrew Rybchenko #include "efx_regs_ef100.h"
210cb551b6SAndrew Rybchenko
220cb551b6SAndrew Rybchenko #include "sfc_debug.h"
230cb551b6SAndrew Rybchenko #include "sfc_dp_tx.h"
240cb551b6SAndrew Rybchenko #include "sfc_tweak.h"
250cb551b6SAndrew Rybchenko #include "sfc_kvargs.h"
260cb551b6SAndrew Rybchenko #include "sfc_ef100.h"
273037e6cfSViacheslav Galaktionov #include "sfc_nic_dma_dp.h"
280cb551b6SAndrew Rybchenko
290cb551b6SAndrew Rybchenko
300cb551b6SAndrew Rybchenko #define sfc_ef100_tx_err(_txq, ...) \
310cb551b6SAndrew Rybchenko SFC_DP_LOG(SFC_KVARG_DATAPATH_EF100, ERR, &(_txq)->dp.dpq, __VA_ARGS__)
320cb551b6SAndrew Rybchenko
330cb551b6SAndrew Rybchenko #define sfc_ef100_tx_debug(_txq, ...) \
340cb551b6SAndrew Rybchenko SFC_DP_LOG(SFC_KVARG_DATAPATH_EF100, DEBUG, &(_txq)->dp.dpq, \
350cb551b6SAndrew Rybchenko __VA_ARGS__)
360cb551b6SAndrew Rybchenko
370cb551b6SAndrew Rybchenko
380cb551b6SAndrew Rybchenko /** Maximum length of the send descriptor data */
390cb551b6SAndrew Rybchenko #define SFC_EF100_TX_SEND_DESC_LEN_MAX \
400cb551b6SAndrew Rybchenko ((1u << ESF_GZ_TX_SEND_LEN_WIDTH) - 1)
410cb551b6SAndrew Rybchenko
4294d31cd1SAndrew Rybchenko /** Maximum length of the segment descriptor data */
4394d31cd1SAndrew Rybchenko #define SFC_EF100_TX_SEG_DESC_LEN_MAX \
4494d31cd1SAndrew Rybchenko ((1u << ESF_GZ_TX_SEG_LEN_WIDTH) - 1)
4594d31cd1SAndrew Rybchenko
460cb551b6SAndrew Rybchenko /**
470cb551b6SAndrew Rybchenko * Maximum number of descriptors/buffers in the Tx ring.
480cb551b6SAndrew Rybchenko * It should guarantee that corresponding event queue never overfill.
490cb551b6SAndrew Rybchenko * EF100 native datapath uses event queue of the same size as Tx queue.
500cb551b6SAndrew Rybchenko * Maximum number of events on datapath can be estimated as number of
510cb551b6SAndrew Rybchenko * Tx queue entries (one event per Tx buffer in the worst case) plus
520cb551b6SAndrew Rybchenko * Tx error and flush events.
530cb551b6SAndrew Rybchenko */
540cb551b6SAndrew Rybchenko #define SFC_EF100_TXQ_LIMIT(_ndesc) \
550cb551b6SAndrew Rybchenko ((_ndesc) - 1 /* head must not step on tail */ - \
560cb551b6SAndrew Rybchenko 1 /* Rx error */ - 1 /* flush */)
570cb551b6SAndrew Rybchenko
580cb551b6SAndrew Rybchenko struct sfc_ef100_tx_sw_desc {
590cb551b6SAndrew Rybchenko struct rte_mbuf *mbuf;
600cb551b6SAndrew Rybchenko };
610cb551b6SAndrew Rybchenko
620cb551b6SAndrew Rybchenko struct sfc_ef100_txq {
630cb551b6SAndrew Rybchenko unsigned int flags;
640cb551b6SAndrew Rybchenko #define SFC_EF100_TXQ_STARTED 0x1
650cb551b6SAndrew Rybchenko #define SFC_EF100_TXQ_NOT_RUNNING 0x2
660cb551b6SAndrew Rybchenko #define SFC_EF100_TXQ_EXCEPTION 0x4
673037e6cfSViacheslav Galaktionov #define SFC_EF100_TXQ_NIC_DMA_MAP 0x8
680cb551b6SAndrew Rybchenko
690cb551b6SAndrew Rybchenko unsigned int ptr_mask;
700cb551b6SAndrew Rybchenko unsigned int added;
710cb551b6SAndrew Rybchenko unsigned int completed;
720cb551b6SAndrew Rybchenko unsigned int max_fill_level;
730cb551b6SAndrew Rybchenko unsigned int free_thresh;
740cb551b6SAndrew Rybchenko struct sfc_ef100_tx_sw_desc *sw_ring;
750cb551b6SAndrew Rybchenko efx_oword_t *txq_hw_ring;
760cb551b6SAndrew Rybchenko volatile void *doorbell;
770cb551b6SAndrew Rybchenko
780cb551b6SAndrew Rybchenko /* Completion/reap */
790cb551b6SAndrew Rybchenko unsigned int evq_read_ptr;
800cb551b6SAndrew Rybchenko unsigned int evq_phase_bit_shift;
810cb551b6SAndrew Rybchenko volatile efx_qword_t *evq_hw_ring;
820cb551b6SAndrew Rybchenko
834f936666SIvan Malov uint16_t tso_tcp_header_offset_limit;
844f936666SIvan Malov uint16_t tso_max_nb_header_descs;
854f936666SIvan Malov uint16_t tso_max_header_len;
864f936666SIvan Malov uint16_t tso_max_nb_payload_descs;
874f936666SIvan Malov uint32_t tso_max_payload_len;
884f936666SIvan Malov uint32_t tso_max_nb_outgoing_frames;
894f936666SIvan Malov
900cb551b6SAndrew Rybchenko /* Datapath transmit queue anchor */
910cb551b6SAndrew Rybchenko struct sfc_dp_txq dp;
923037e6cfSViacheslav Galaktionov
933037e6cfSViacheslav Galaktionov const struct sfc_nic_dma_info *nic_dma_info;
940cb551b6SAndrew Rybchenko };
950cb551b6SAndrew Rybchenko
960cb551b6SAndrew Rybchenko static inline struct sfc_ef100_txq *
sfc_ef100_txq_by_dp_txq(struct sfc_dp_txq * dp_txq)970cb551b6SAndrew Rybchenko sfc_ef100_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
980cb551b6SAndrew Rybchenko {
990cb551b6SAndrew Rybchenko return container_of(dp_txq, struct sfc_ef100_txq, dp);
1000cb551b6SAndrew Rybchenko }
1010cb551b6SAndrew Rybchenko
1024f936666SIvan Malov static int
sfc_ef100_tx_prepare_pkt_tso(struct sfc_ef100_txq * const txq,struct rte_mbuf * m)1034f936666SIvan Malov sfc_ef100_tx_prepare_pkt_tso(struct sfc_ef100_txq * const txq,
1044f936666SIvan Malov struct rte_mbuf *m)
1054f936666SIvan Malov {
106daa02b5cSOlivier Matz size_t header_len = ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
10777cb0071SIvan Malov m->outer_l2_len + m->outer_l3_len : 0) +
10877cb0071SIvan Malov m->l2_len + m->l3_len + m->l4_len;
1094f936666SIvan Malov size_t payload_len = m->pkt_len - header_len;
1104f936666SIvan Malov unsigned long mss_conformant_max_payload_len;
1114f936666SIvan Malov unsigned int nb_payload_descs;
1124f936666SIvan Malov
11377cb0071SIvan Malov #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
114daa02b5cSOlivier Matz switch (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
11577cb0071SIvan Malov case 0:
11677cb0071SIvan Malov /* FALLTHROUGH */
117daa02b5cSOlivier Matz case RTE_MBUF_F_TX_TUNNEL_VXLAN:
11877cb0071SIvan Malov /* FALLTHROUGH */
119daa02b5cSOlivier Matz case RTE_MBUF_F_TX_TUNNEL_GENEVE:
12077cb0071SIvan Malov break;
12177cb0071SIvan Malov default:
12277cb0071SIvan Malov return ENOTSUP;
12377cb0071SIvan Malov }
12477cb0071SIvan Malov #endif
12577cb0071SIvan Malov
1264f936666SIvan Malov mss_conformant_max_payload_len =
1274f936666SIvan Malov m->tso_segsz * txq->tso_max_nb_outgoing_frames;
1284f936666SIvan Malov
1294f936666SIvan Malov /*
1304f936666SIvan Malov * Don't really want to know exact number of payload segments.
1314f936666SIvan Malov * Just use total number of segments as upper limit. Practically
1324f936666SIvan Malov * maximum number of payload segments is significantly bigger
1334f936666SIvan Malov * than maximum number header segments, so we can neglect header
1344f936666SIvan Malov * segments excluded total number of segments to estimate number
1354f936666SIvan Malov * of payload segments required.
1364f936666SIvan Malov */
1374f936666SIvan Malov nb_payload_descs = m->nb_segs;
1384f936666SIvan Malov
1394f936666SIvan Malov /*
1404f936666SIvan Malov * Carry out multiple independent checks using bitwise OR
1414f936666SIvan Malov * to avoid unnecessary conditional branching.
1424f936666SIvan Malov */
1434f936666SIvan Malov if (unlikely((header_len > txq->tso_max_header_len) |
1444f936666SIvan Malov (nb_payload_descs > txq->tso_max_nb_payload_descs) |
1454f936666SIvan Malov (payload_len > txq->tso_max_payload_len) |
1464f936666SIvan Malov (payload_len > mss_conformant_max_payload_len) |
1474f936666SIvan Malov (m->pkt_len == header_len)))
1484f936666SIvan Malov return EINVAL;
1494f936666SIvan Malov
1504f936666SIvan Malov return 0;
1514f936666SIvan Malov }
1524f936666SIvan Malov
15394d31cd1SAndrew Rybchenko static uint16_t
sfc_ef100_tx_prepare_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)15494d31cd1SAndrew Rybchenko sfc_ef100_tx_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
15594d31cd1SAndrew Rybchenko uint16_t nb_pkts)
15694d31cd1SAndrew Rybchenko {
15794d31cd1SAndrew Rybchenko struct sfc_ef100_txq * const txq = sfc_ef100_txq_by_dp_txq(tx_queue);
15894d31cd1SAndrew Rybchenko uint16_t i;
15994d31cd1SAndrew Rybchenko
16094d31cd1SAndrew Rybchenko for (i = 0; i < nb_pkts; i++) {
16194d31cd1SAndrew Rybchenko struct rte_mbuf *m = tx_pkts[i];
16238109b5bSIvan Malov unsigned int max_nb_header_segs = 0;
163f71965f9SAndrew Rybchenko bool calc_phdr_cksum = false;
16494d31cd1SAndrew Rybchenko int ret;
16594d31cd1SAndrew Rybchenko
166f71965f9SAndrew Rybchenko /*
167f71965f9SAndrew Rybchenko * Partial checksum offload is used in the case of
168f71965f9SAndrew Rybchenko * inner TCP/UDP checksum offload. It requires
169f71965f9SAndrew Rybchenko * pseudo-header checksum which is calculated below,
170f71965f9SAndrew Rybchenko * but requires contiguous packet headers.
171f71965f9SAndrew Rybchenko */
172daa02b5cSOlivier Matz if ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) &&
173daa02b5cSOlivier Matz (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)) {
174f71965f9SAndrew Rybchenko calc_phdr_cksum = true;
175f71965f9SAndrew Rybchenko max_nb_header_segs = 1;
176daa02b5cSOlivier Matz } else if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
1774f936666SIvan Malov max_nb_header_segs = txq->tso_max_nb_header_descs;
178f71965f9SAndrew Rybchenko }
179f71965f9SAndrew Rybchenko
18038109b5bSIvan Malov ret = sfc_dp_tx_prepare_pkt(m, max_nb_header_segs, 0,
1814f936666SIvan Malov txq->tso_tcp_header_offset_limit,
1824f936666SIvan Malov txq->max_fill_level, 1, 0);
18394d31cd1SAndrew Rybchenko if (unlikely(ret != 0)) {
18494d31cd1SAndrew Rybchenko rte_errno = ret;
18594d31cd1SAndrew Rybchenko break;
18694d31cd1SAndrew Rybchenko }
18794d31cd1SAndrew Rybchenko
188daa02b5cSOlivier Matz if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
1894f936666SIvan Malov ret = sfc_ef100_tx_prepare_pkt_tso(txq, m);
1904f936666SIvan Malov if (unlikely(ret != 0)) {
1914f936666SIvan Malov rte_errno = ret;
1924f936666SIvan Malov break;
1934f936666SIvan Malov }
1944f936666SIvan Malov } else if (m->nb_segs > EFX_MASK32(ESF_GZ_TX_SEND_NUM_SEGS)) {
19594d31cd1SAndrew Rybchenko rte_errno = EINVAL;
19694d31cd1SAndrew Rybchenko break;
19794d31cd1SAndrew Rybchenko }
198f71965f9SAndrew Rybchenko
199f71965f9SAndrew Rybchenko if (calc_phdr_cksum) {
200f71965f9SAndrew Rybchenko /*
201f71965f9SAndrew Rybchenko * Full checksum offload does IPv4 header checksum
202f71965f9SAndrew Rybchenko * and does not require any assistance.
203f71965f9SAndrew Rybchenko */
204f71965f9SAndrew Rybchenko ret = rte_net_intel_cksum_flags_prepare(m,
205daa02b5cSOlivier Matz m->ol_flags & ~RTE_MBUF_F_TX_IP_CKSUM);
206f71965f9SAndrew Rybchenko if (unlikely(ret != 0)) {
207f71965f9SAndrew Rybchenko rte_errno = -ret;
208f71965f9SAndrew Rybchenko break;
209f71965f9SAndrew Rybchenko }
210f71965f9SAndrew Rybchenko }
21194d31cd1SAndrew Rybchenko }
21294d31cd1SAndrew Rybchenko
21394d31cd1SAndrew Rybchenko return i;
21494d31cd1SAndrew Rybchenko }
21594d31cd1SAndrew Rybchenko
2160cb551b6SAndrew Rybchenko static bool
sfc_ef100_tx_get_event(struct sfc_ef100_txq * txq,efx_qword_t * ev)2170cb551b6SAndrew Rybchenko sfc_ef100_tx_get_event(struct sfc_ef100_txq *txq, efx_qword_t *ev)
2180cb551b6SAndrew Rybchenko {
2190cb551b6SAndrew Rybchenko volatile efx_qword_t *evq_hw_ring = txq->evq_hw_ring;
2200cb551b6SAndrew Rybchenko
2210cb551b6SAndrew Rybchenko /*
2220cb551b6SAndrew Rybchenko * Exception flag is set when reap is done.
2230cb551b6SAndrew Rybchenko * It is never done twice per packet burst get, and absence of
2240cb551b6SAndrew Rybchenko * the flag is checked on burst get entry.
2250cb551b6SAndrew Rybchenko */
2260cb551b6SAndrew Rybchenko SFC_ASSERT((txq->flags & SFC_EF100_TXQ_EXCEPTION) == 0);
2270cb551b6SAndrew Rybchenko
2280cb551b6SAndrew Rybchenko *ev = evq_hw_ring[txq->evq_read_ptr & txq->ptr_mask];
2290cb551b6SAndrew Rybchenko
2300cb551b6SAndrew Rybchenko if (!sfc_ef100_ev_present(ev,
2310cb551b6SAndrew Rybchenko (txq->evq_read_ptr >> txq->evq_phase_bit_shift) & 1))
2320cb551b6SAndrew Rybchenko return false;
2330cb551b6SAndrew Rybchenko
2340cb551b6SAndrew Rybchenko if (unlikely(!sfc_ef100_ev_type_is(ev,
2350cb551b6SAndrew Rybchenko ESE_GZ_EF100_EV_TX_COMPLETION))) {
2360cb551b6SAndrew Rybchenko /*
2370cb551b6SAndrew Rybchenko * Do not move read_ptr to keep the event for exception
2380cb551b6SAndrew Rybchenko * handling by the control path.
2390cb551b6SAndrew Rybchenko */
2400cb551b6SAndrew Rybchenko txq->flags |= SFC_EF100_TXQ_EXCEPTION;
2410cb551b6SAndrew Rybchenko sfc_ef100_tx_err(txq,
2420cb551b6SAndrew Rybchenko "TxQ exception at EvQ ptr %u(%#x), event %08x:%08x",
2430cb551b6SAndrew Rybchenko txq->evq_read_ptr, txq->evq_read_ptr & txq->ptr_mask,
2440cb551b6SAndrew Rybchenko EFX_QWORD_FIELD(*ev, EFX_DWORD_1),
2450cb551b6SAndrew Rybchenko EFX_QWORD_FIELD(*ev, EFX_DWORD_0));
2460cb551b6SAndrew Rybchenko return false;
2470cb551b6SAndrew Rybchenko }
2480cb551b6SAndrew Rybchenko
2490cb551b6SAndrew Rybchenko sfc_ef100_tx_debug(txq, "TxQ got event %08x:%08x at %u (%#x)",
2500cb551b6SAndrew Rybchenko EFX_QWORD_FIELD(*ev, EFX_DWORD_1),
2510cb551b6SAndrew Rybchenko EFX_QWORD_FIELD(*ev, EFX_DWORD_0),
2520cb551b6SAndrew Rybchenko txq->evq_read_ptr,
2530cb551b6SAndrew Rybchenko txq->evq_read_ptr & txq->ptr_mask);
2540cb551b6SAndrew Rybchenko
2550cb551b6SAndrew Rybchenko txq->evq_read_ptr++;
2560cb551b6SAndrew Rybchenko return true;
2570cb551b6SAndrew Rybchenko }
2580cb551b6SAndrew Rybchenko
2590cb551b6SAndrew Rybchenko static unsigned int
sfc_ef100_tx_process_events(struct sfc_ef100_txq * txq)2600cb551b6SAndrew Rybchenko sfc_ef100_tx_process_events(struct sfc_ef100_txq *txq)
2610cb551b6SAndrew Rybchenko {
2620cb551b6SAndrew Rybchenko unsigned int num_descs = 0;
2630cb551b6SAndrew Rybchenko efx_qword_t tx_ev;
2640cb551b6SAndrew Rybchenko
2650cb551b6SAndrew Rybchenko while (sfc_ef100_tx_get_event(txq, &tx_ev))
2660cb551b6SAndrew Rybchenko num_descs += EFX_QWORD_FIELD(tx_ev, ESF_GZ_EV_TXCMPL_NUM_DESC);
2670cb551b6SAndrew Rybchenko
2680cb551b6SAndrew Rybchenko return num_descs;
2690cb551b6SAndrew Rybchenko }
2700cb551b6SAndrew Rybchenko
2710cb551b6SAndrew Rybchenko static void
sfc_ef100_tx_reap_num_descs(struct sfc_ef100_txq * txq,unsigned int num_descs)2720cb551b6SAndrew Rybchenko sfc_ef100_tx_reap_num_descs(struct sfc_ef100_txq *txq, unsigned int num_descs)
2730cb551b6SAndrew Rybchenko {
2740cb551b6SAndrew Rybchenko if (num_descs > 0) {
2750cb551b6SAndrew Rybchenko unsigned int completed = txq->completed;
2760cb551b6SAndrew Rybchenko unsigned int pending = completed + num_descs;
2770cb551b6SAndrew Rybchenko struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
2780cb551b6SAndrew Rybchenko unsigned int nb = 0;
2790cb551b6SAndrew Rybchenko
2800cb551b6SAndrew Rybchenko do {
2810cb551b6SAndrew Rybchenko struct sfc_ef100_tx_sw_desc *txd;
2820cb551b6SAndrew Rybchenko struct rte_mbuf *m;
2830cb551b6SAndrew Rybchenko
2840cb551b6SAndrew Rybchenko txd = &txq->sw_ring[completed & txq->ptr_mask];
2850cb551b6SAndrew Rybchenko if (txd->mbuf == NULL)
2860cb551b6SAndrew Rybchenko continue;
2870cb551b6SAndrew Rybchenko
2880cb551b6SAndrew Rybchenko m = rte_pktmbuf_prefree_seg(txd->mbuf);
2890cb551b6SAndrew Rybchenko if (m == NULL)
2900cb551b6SAndrew Rybchenko continue;
2910cb551b6SAndrew Rybchenko
2920cb551b6SAndrew Rybchenko txd->mbuf = NULL;
2930cb551b6SAndrew Rybchenko
2940cb551b6SAndrew Rybchenko if (nb == RTE_DIM(bulk) ||
2950cb551b6SAndrew Rybchenko (nb != 0 && m->pool != bulk[0]->pool)) {
2960cb551b6SAndrew Rybchenko rte_mempool_put_bulk(bulk[0]->pool,
2970cb551b6SAndrew Rybchenko (void *)bulk, nb);
2980cb551b6SAndrew Rybchenko nb = 0;
2990cb551b6SAndrew Rybchenko }
3000cb551b6SAndrew Rybchenko
3010cb551b6SAndrew Rybchenko bulk[nb++] = m;
3020cb551b6SAndrew Rybchenko } while (++completed != pending);
3030cb551b6SAndrew Rybchenko
3040cb551b6SAndrew Rybchenko if (nb != 0)
3050cb551b6SAndrew Rybchenko rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
3060cb551b6SAndrew Rybchenko
3070cb551b6SAndrew Rybchenko txq->completed = completed;
3080cb551b6SAndrew Rybchenko }
3090cb551b6SAndrew Rybchenko }
3100cb551b6SAndrew Rybchenko
3110cb551b6SAndrew Rybchenko static void
sfc_ef100_tx_reap(struct sfc_ef100_txq * txq)3120cb551b6SAndrew Rybchenko sfc_ef100_tx_reap(struct sfc_ef100_txq *txq)
3130cb551b6SAndrew Rybchenko {
3140cb551b6SAndrew Rybchenko sfc_ef100_tx_reap_num_descs(txq, sfc_ef100_tx_process_events(txq));
3150cb551b6SAndrew Rybchenko }
3160cb551b6SAndrew Rybchenko
3173f95dfb9SIgor Romanov static void
sfc_ef100_tx_qdesc_prefix_create(const struct rte_mbuf * m,efx_oword_t * tx_desc)3183f95dfb9SIgor Romanov sfc_ef100_tx_qdesc_prefix_create(const struct rte_mbuf *m, efx_oword_t *tx_desc)
3193f95dfb9SIgor Romanov {
3203f95dfb9SIgor Romanov efx_mport_id_t *mport_id =
3213f95dfb9SIgor Romanov RTE_MBUF_DYNFIELD(m, sfc_dp_mport_offset, efx_mport_id_t *);
3223f95dfb9SIgor Romanov
3233f95dfb9SIgor Romanov EFX_POPULATE_OWORD_3(*tx_desc,
3243f95dfb9SIgor Romanov ESF_GZ_TX_PREFIX_EGRESS_MPORT,
3253f95dfb9SIgor Romanov mport_id->id,
3263f95dfb9SIgor Romanov ESF_GZ_TX_PREFIX_EGRESS_MPORT_EN, 1,
3273f95dfb9SIgor Romanov ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_PREFIX);
3283f95dfb9SIgor Romanov }
3293f95dfb9SIgor Romanov
330f71965f9SAndrew Rybchenko static uint8_t
sfc_ef100_tx_qdesc_cso_inner_l3(uint64_t tx_tunnel)331f71965f9SAndrew Rybchenko sfc_ef100_tx_qdesc_cso_inner_l3(uint64_t tx_tunnel)
332f71965f9SAndrew Rybchenko {
333f71965f9SAndrew Rybchenko uint8_t inner_l3;
334f71965f9SAndrew Rybchenko
335f71965f9SAndrew Rybchenko switch (tx_tunnel) {
336daa02b5cSOlivier Matz case RTE_MBUF_F_TX_TUNNEL_VXLAN:
337f71965f9SAndrew Rybchenko inner_l3 = ESE_GZ_TX_DESC_CS_INNER_L3_VXLAN;
338f71965f9SAndrew Rybchenko break;
339daa02b5cSOlivier Matz case RTE_MBUF_F_TX_TUNNEL_GENEVE:
340f71965f9SAndrew Rybchenko inner_l3 = ESE_GZ_TX_DESC_CS_INNER_L3_GENEVE;
341f71965f9SAndrew Rybchenko break;
342f71965f9SAndrew Rybchenko default:
343f71965f9SAndrew Rybchenko inner_l3 = ESE_GZ_TX_DESC_CS_INNER_L3_OFF;
344f71965f9SAndrew Rybchenko break;
345f71965f9SAndrew Rybchenko }
346f71965f9SAndrew Rybchenko return inner_l3;
347f71965f9SAndrew Rybchenko }
348f71965f9SAndrew Rybchenko
3493037e6cfSViacheslav Galaktionov static int
sfc_ef100_tx_map(const struct sfc_ef100_txq * txq,rte_iova_t iova,size_t len,rte_iova_t * dma_addr)3503037e6cfSViacheslav Galaktionov sfc_ef100_tx_map(const struct sfc_ef100_txq *txq, rte_iova_t iova, size_t len,
3513037e6cfSViacheslav Galaktionov rte_iova_t *dma_addr)
3523037e6cfSViacheslav Galaktionov {
3533037e6cfSViacheslav Galaktionov if ((txq->flags & SFC_EF100_TXQ_NIC_DMA_MAP) == 0) {
3543037e6cfSViacheslav Galaktionov *dma_addr = iova;
3553037e6cfSViacheslav Galaktionov } else {
3563037e6cfSViacheslav Galaktionov *dma_addr = sfc_nic_dma_map(txq->nic_dma_info, iova, len);
3573037e6cfSViacheslav Galaktionov if (unlikely(*dma_addr == RTE_BAD_IOVA))
3583037e6cfSViacheslav Galaktionov sfc_ef100_tx_err(txq, "failed to map DMA address on Tx");
3593037e6cfSViacheslav Galaktionov }
3603037e6cfSViacheslav Galaktionov return 0;
3613037e6cfSViacheslav Galaktionov }
3623037e6cfSViacheslav Galaktionov
3633037e6cfSViacheslav Galaktionov static int
sfc_ef100_tx_qdesc_send_create(const struct sfc_ef100_txq * txq,const struct rte_mbuf * m,efx_oword_t * tx_desc)3643037e6cfSViacheslav Galaktionov sfc_ef100_tx_qdesc_send_create(const struct sfc_ef100_txq *txq,
3653037e6cfSViacheslav Galaktionov const struct rte_mbuf *m, efx_oword_t *tx_desc)
3660cb551b6SAndrew Rybchenko {
367e30f1081SAndrew Rybchenko bool outer_l3;
368a8e0c002SAndrew Rybchenko bool outer_l4;
369f71965f9SAndrew Rybchenko uint8_t inner_l3;
370f71965f9SAndrew Rybchenko uint8_t partial_en;
371f71965f9SAndrew Rybchenko uint16_t part_cksum_w;
372f71965f9SAndrew Rybchenko uint16_t l4_offset_w;
3733037e6cfSViacheslav Galaktionov rte_iova_t dma_addr;
3743037e6cfSViacheslav Galaktionov int rc;
375a8e0c002SAndrew Rybchenko
376daa02b5cSOlivier Matz if ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) == 0) {
377daa02b5cSOlivier Matz outer_l3 = (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM);
378daa02b5cSOlivier Matz outer_l4 = (m->ol_flags & RTE_MBUF_F_TX_L4_MASK);
379f71965f9SAndrew Rybchenko inner_l3 = ESE_GZ_TX_DESC_CS_INNER_L3_OFF;
380f71965f9SAndrew Rybchenko partial_en = ESE_GZ_TX_DESC_CSO_PARTIAL_EN_OFF;
381f71965f9SAndrew Rybchenko part_cksum_w = 0;
382f71965f9SAndrew Rybchenko l4_offset_w = 0;
383f71965f9SAndrew Rybchenko } else {
384daa02b5cSOlivier Matz outer_l3 = (m->ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM);
385daa02b5cSOlivier Matz outer_l4 = (m->ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM);
386f71965f9SAndrew Rybchenko inner_l3 = sfc_ef100_tx_qdesc_cso_inner_l3(m->ol_flags &
387daa02b5cSOlivier Matz RTE_MBUF_F_TX_TUNNEL_MASK);
388a8e0c002SAndrew Rybchenko
389daa02b5cSOlivier Matz switch (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
390daa02b5cSOlivier Matz case RTE_MBUF_F_TX_TCP_CKSUM:
391f71965f9SAndrew Rybchenko partial_en = ESE_GZ_TX_DESC_CSO_PARTIAL_EN_TCP;
392f71965f9SAndrew Rybchenko part_cksum_w = offsetof(struct rte_tcp_hdr, cksum) >> 1;
393f71965f9SAndrew Rybchenko break;
394daa02b5cSOlivier Matz case RTE_MBUF_F_TX_UDP_CKSUM:
395f71965f9SAndrew Rybchenko partial_en = ESE_GZ_TX_DESC_CSO_PARTIAL_EN_UDP;
396f71965f9SAndrew Rybchenko part_cksum_w = offsetof(struct rte_udp_hdr,
397f71965f9SAndrew Rybchenko dgram_cksum) >> 1;
398f71965f9SAndrew Rybchenko break;
399f71965f9SAndrew Rybchenko default:
400f71965f9SAndrew Rybchenko partial_en = ESE_GZ_TX_DESC_CSO_PARTIAL_EN_OFF;
401f71965f9SAndrew Rybchenko part_cksum_w = 0;
402f71965f9SAndrew Rybchenko break;
403f71965f9SAndrew Rybchenko }
404f71965f9SAndrew Rybchenko l4_offset_w = (m->outer_l2_len + m->outer_l3_len +
405f71965f9SAndrew Rybchenko m->l2_len + m->l3_len) >> 1;
406f71965f9SAndrew Rybchenko }
407f71965f9SAndrew Rybchenko
408e7694b08SViacheslav Galaktionov rc = sfc_ef100_tx_map(txq, rte_mbuf_data_iova(m),
4093037e6cfSViacheslav Galaktionov rte_pktmbuf_data_len(m), &dma_addr);
4103037e6cfSViacheslav Galaktionov if (unlikely(rc != 0))
4113037e6cfSViacheslav Galaktionov return rc;
4123037e6cfSViacheslav Galaktionov
413f71965f9SAndrew Rybchenko EFX_POPULATE_OWORD_10(*tx_desc,
4143037e6cfSViacheslav Galaktionov ESF_GZ_TX_SEND_ADDR, dma_addr,
4150cb551b6SAndrew Rybchenko ESF_GZ_TX_SEND_LEN, rte_pktmbuf_data_len(m),
41694d31cd1SAndrew Rybchenko ESF_GZ_TX_SEND_NUM_SEGS, m->nb_segs,
417f71965f9SAndrew Rybchenko ESF_GZ_TX_SEND_CSO_PARTIAL_START_W, l4_offset_w,
418f71965f9SAndrew Rybchenko ESF_GZ_TX_SEND_CSO_PARTIAL_CSUM_W, part_cksum_w,
419f71965f9SAndrew Rybchenko ESF_GZ_TX_SEND_CSO_PARTIAL_EN, partial_en,
420f71965f9SAndrew Rybchenko ESF_GZ_TX_SEND_CSO_INNER_L3, inner_l3,
421e30f1081SAndrew Rybchenko ESF_GZ_TX_SEND_CSO_OUTER_L3, outer_l3,
422a8e0c002SAndrew Rybchenko ESF_GZ_TX_SEND_CSO_OUTER_L4, outer_l4,
4230cb551b6SAndrew Rybchenko ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEND);
424942a6364SAndrew Rybchenko
425daa02b5cSOlivier Matz if (m->ol_flags & RTE_MBUF_F_TX_VLAN) {
426942a6364SAndrew Rybchenko efx_oword_t tx_desc_extra_fields;
427942a6364SAndrew Rybchenko
428942a6364SAndrew Rybchenko EFX_POPULATE_OWORD_2(tx_desc_extra_fields,
429942a6364SAndrew Rybchenko ESF_GZ_TX_SEND_VLAN_INSERT_EN, 1,
430942a6364SAndrew Rybchenko ESF_GZ_TX_SEND_VLAN_INSERT_TCI, m->vlan_tci);
431942a6364SAndrew Rybchenko
432942a6364SAndrew Rybchenko EFX_OR_OWORD(*tx_desc, tx_desc_extra_fields);
433942a6364SAndrew Rybchenko }
4343037e6cfSViacheslav Galaktionov
4353037e6cfSViacheslav Galaktionov return 0;
4360cb551b6SAndrew Rybchenko }
4370cb551b6SAndrew Rybchenko
43894d31cd1SAndrew Rybchenko static void
sfc_ef100_tx_qdesc_seg_create(rte_iova_t addr,uint16_t len,efx_oword_t * tx_desc)43994d31cd1SAndrew Rybchenko sfc_ef100_tx_qdesc_seg_create(rte_iova_t addr, uint16_t len,
44094d31cd1SAndrew Rybchenko efx_oword_t *tx_desc)
44194d31cd1SAndrew Rybchenko {
44294d31cd1SAndrew Rybchenko EFX_POPULATE_OWORD_3(*tx_desc,
44394d31cd1SAndrew Rybchenko ESF_GZ_TX_SEG_ADDR, addr,
44494d31cd1SAndrew Rybchenko ESF_GZ_TX_SEG_LEN, len,
44594d31cd1SAndrew Rybchenko ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEG);
44694d31cd1SAndrew Rybchenko }
44794d31cd1SAndrew Rybchenko
4484f936666SIvan Malov static void
sfc_ef100_tx_qdesc_tso_create(const struct rte_mbuf * m,uint16_t nb_header_descs,uint16_t nb_payload_descs,size_t header_len,size_t payload_len,size_t outer_iph_off,size_t outer_udph_off,size_t iph_off,size_t tcph_off,efx_oword_t * tx_desc)4494f936666SIvan Malov sfc_ef100_tx_qdesc_tso_create(const struct rte_mbuf *m,
4504f936666SIvan Malov uint16_t nb_header_descs,
4514f936666SIvan Malov uint16_t nb_payload_descs,
4524f936666SIvan Malov size_t header_len, size_t payload_len,
45377cb0071SIvan Malov size_t outer_iph_off, size_t outer_udph_off,
4544f936666SIvan Malov size_t iph_off, size_t tcph_off,
4554f936666SIvan Malov efx_oword_t *tx_desc)
4564f936666SIvan Malov {
4574f936666SIvan Malov efx_oword_t tx_desc_extra_fields;
45877cb0071SIvan Malov int ed_outer_udp_len = (outer_udph_off != 0) ? 1 : 0;
45977cb0071SIvan Malov int ed_outer_ip_len = (outer_iph_off != 0) ? 1 : 0;
46077cb0071SIvan Malov int ed_outer_ip_id = (outer_iph_off != 0) ?
46177cb0071SIvan Malov ESE_GZ_TX_DESC_IP4_ID_INC_MOD16 : 0;
4624f936666SIvan Malov /*
4634f936666SIvan Malov * If no tunnel encapsulation is present, then the ED_INNER
4644f936666SIvan Malov * fields should be used.
4654f936666SIvan Malov */
4664f936666SIvan Malov int ed_inner_ip_id = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
46777cb0071SIvan Malov uint8_t inner_l3 = sfc_ef100_tx_qdesc_cso_inner_l3(
468daa02b5cSOlivier Matz m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK);
4694f936666SIvan Malov
47077cb0071SIvan Malov EFX_POPULATE_OWORD_10(*tx_desc,
4714f936666SIvan Malov ESF_GZ_TX_TSO_MSS, m->tso_segsz,
4724f936666SIvan Malov ESF_GZ_TX_TSO_HDR_NUM_SEGS, nb_header_descs,
4734f936666SIvan Malov ESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS, nb_payload_descs,
47477cb0071SIvan Malov ESF_GZ_TX_TSO_ED_OUTER_IP4_ID, ed_outer_ip_id,
4754f936666SIvan Malov ESF_GZ_TX_TSO_ED_INNER_IP4_ID, ed_inner_ip_id,
47677cb0071SIvan Malov ESF_GZ_TX_TSO_ED_OUTER_IP_LEN, ed_outer_ip_len,
4774f936666SIvan Malov ESF_GZ_TX_TSO_ED_INNER_IP_LEN, 1,
47877cb0071SIvan Malov ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN, ed_outer_udp_len,
4794f936666SIvan Malov ESF_GZ_TX_TSO_HDR_LEN_W, header_len >> 1,
4804f936666SIvan Malov ESF_GZ_TX_TSO_PAYLOAD_LEN, payload_len);
4814f936666SIvan Malov
48277cb0071SIvan Malov EFX_POPULATE_OWORD_9(tx_desc_extra_fields,
48377cb0071SIvan Malov /*
48477cb0071SIvan Malov * Outer offsets are required for outer IPv4 ID
48577cb0071SIvan Malov * and length edits in the case of tunnel TSO.
48677cb0071SIvan Malov */
48777cb0071SIvan Malov ESF_GZ_TX_TSO_OUTER_L3_OFF_W, outer_iph_off >> 1,
48877cb0071SIvan Malov ESF_GZ_TX_TSO_OUTER_L4_OFF_W, outer_udph_off >> 1,
4894f936666SIvan Malov /*
4904f936666SIvan Malov * Inner offsets are required for inner IPv4 ID
49177cb0071SIvan Malov * and IP length edits and partial checksum
49277cb0071SIvan Malov * offload in the case of tunnel TSO.
4934f936666SIvan Malov */
4944f936666SIvan Malov ESF_GZ_TX_TSO_INNER_L3_OFF_W, iph_off >> 1,
4954f936666SIvan Malov ESF_GZ_TX_TSO_INNER_L4_OFF_W, tcph_off >> 1,
49677cb0071SIvan Malov ESF_GZ_TX_TSO_CSO_INNER_L4,
49777cb0071SIvan Malov inner_l3 != ESE_GZ_TX_DESC_CS_INNER_L3_OFF,
49877cb0071SIvan Malov ESF_GZ_TX_TSO_CSO_INNER_L3, inner_l3,
4994f936666SIvan Malov /*
5004f936666SIvan Malov * Use outer full checksum offloads which do
5014f936666SIvan Malov * not require any extra information.
5024f936666SIvan Malov */
5034f936666SIvan Malov ESF_GZ_TX_TSO_CSO_OUTER_L3, 1,
5044f936666SIvan Malov ESF_GZ_TX_TSO_CSO_OUTER_L4, 1,
5054f936666SIvan Malov ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_TSO);
5064f936666SIvan Malov
5074f936666SIvan Malov EFX_OR_OWORD(*tx_desc, tx_desc_extra_fields);
508942a6364SAndrew Rybchenko
509daa02b5cSOlivier Matz if (m->ol_flags & RTE_MBUF_F_TX_VLAN) {
510942a6364SAndrew Rybchenko EFX_POPULATE_OWORD_2(tx_desc_extra_fields,
511942a6364SAndrew Rybchenko ESF_GZ_TX_TSO_VLAN_INSERT_EN, 1,
512942a6364SAndrew Rybchenko ESF_GZ_TX_TSO_VLAN_INSERT_TCI, m->vlan_tci);
513942a6364SAndrew Rybchenko
514942a6364SAndrew Rybchenko EFX_OR_OWORD(*tx_desc, tx_desc_extra_fields);
515942a6364SAndrew Rybchenko }
5164f936666SIvan Malov }
5174f936666SIvan Malov
5180cb551b6SAndrew Rybchenko static inline void
sfc_ef100_tx_qpush(struct sfc_ef100_txq * txq,unsigned int added)5190cb551b6SAndrew Rybchenko sfc_ef100_tx_qpush(struct sfc_ef100_txq *txq, unsigned int added)
5200cb551b6SAndrew Rybchenko {
5210cb551b6SAndrew Rybchenko efx_dword_t dword;
5220cb551b6SAndrew Rybchenko
5230cb551b6SAndrew Rybchenko EFX_POPULATE_DWORD_1(dword, ERF_GZ_TX_RING_PIDX, added & txq->ptr_mask);
5240cb551b6SAndrew Rybchenko
5250cb551b6SAndrew Rybchenko /* DMA sync to device is not required */
5260cb551b6SAndrew Rybchenko
5270cb551b6SAndrew Rybchenko /*
5280cb551b6SAndrew Rybchenko * rte_write32() has rte_io_wmb() which guarantees that the STORE
5290cb551b6SAndrew Rybchenko * operations (i.e. Rx and event descriptor updates) that precede
5300cb551b6SAndrew Rybchenko * the rte_io_wmb() call are visible to NIC before the STORE
5310cb551b6SAndrew Rybchenko * operations that follow it (i.e. doorbell write).
5320cb551b6SAndrew Rybchenko */
5330cb551b6SAndrew Rybchenko rte_write32(dword.ed_u32[0], txq->doorbell);
53450448dd3SAndrew Rybchenko txq->dp.dpq.dbells++;
5350cb551b6SAndrew Rybchenko
5360cb551b6SAndrew Rybchenko sfc_ef100_tx_debug(txq, "TxQ pushed doorbell at pidx %u (added=%u)",
5370cb551b6SAndrew Rybchenko EFX_DWORD_FIELD(dword, ERF_GZ_TX_RING_PIDX),
5380cb551b6SAndrew Rybchenko added);
5390cb551b6SAndrew Rybchenko }
5400cb551b6SAndrew Rybchenko
5410cb551b6SAndrew Rybchenko static unsigned int
sfc_ef100_tx_pkt_descs_max(const struct rte_mbuf * m)5420cb551b6SAndrew Rybchenko sfc_ef100_tx_pkt_descs_max(const struct rte_mbuf *m)
5430cb551b6SAndrew Rybchenko {
5444f936666SIvan Malov unsigned int extra_descs = 0;
5454f936666SIvan Malov
5460cb551b6SAndrew Rybchenko /** Maximum length of an mbuf segment data */
5470cb551b6SAndrew Rybchenko #define SFC_MBUF_SEG_LEN_MAX UINT16_MAX
5480cb551b6SAndrew Rybchenko RTE_BUILD_BUG_ON(sizeof(m->data_len) != 2);
5490cb551b6SAndrew Rybchenko
550daa02b5cSOlivier Matz if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
5514f936666SIvan Malov /* Tx TSO descriptor */
5524f936666SIvan Malov extra_descs++;
5530cb551b6SAndrew Rybchenko /*
5544f936666SIvan Malov * Extra Tx segment descriptor may be required if header
5554f936666SIvan Malov * ends in the middle of segment.
5564f936666SIvan Malov */
5574f936666SIvan Malov extra_descs++;
5584f936666SIvan Malov } else {
5594f936666SIvan Malov /*
5604f936666SIvan Malov * mbuf segment cannot be bigger than maximum segment length
5614f936666SIvan Malov * and maximum packet length since TSO is not supported yet.
5620cb551b6SAndrew Rybchenko * Make sure that the first segment does not need fragmentation
5630cb551b6SAndrew Rybchenko * (split into many Tx descriptors).
5640cb551b6SAndrew Rybchenko */
5650cb551b6SAndrew Rybchenko RTE_BUILD_BUG_ON(SFC_EF100_TX_SEND_DESC_LEN_MAX <
566*323b626aSStephen Hemminger RTE_MIN_T(EFX_MAC_PDU_MAX, SFC_MBUF_SEG_LEN_MAX, uint32_t));
5674f936666SIvan Malov }
5680cb551b6SAndrew Rybchenko
5693f95dfb9SIgor Romanov if (m->ol_flags & sfc_dp_mport_override) {
5703f95dfb9SIgor Romanov /* Tx override prefix descriptor will be used */
5713f95dfb9SIgor Romanov extra_descs++;
5723f95dfb9SIgor Romanov }
5733f95dfb9SIgor Romanov
57494d31cd1SAndrew Rybchenko /*
57594d31cd1SAndrew Rybchenko * Any segment of scattered packet cannot be bigger than maximum
5764f936666SIvan Malov * segment length. Make sure that subsequent segments do not need
5774f936666SIvan Malov * fragmentation (split into many Tx descriptors).
57894d31cd1SAndrew Rybchenko */
5794f936666SIvan Malov RTE_BUILD_BUG_ON(SFC_EF100_TX_SEG_DESC_LEN_MAX < SFC_MBUF_SEG_LEN_MAX);
58094d31cd1SAndrew Rybchenko
5814f936666SIvan Malov return m->nb_segs + extra_descs;
5824f936666SIvan Malov }
5834f936666SIvan Malov
5843037e6cfSViacheslav Galaktionov static int
sfc_ef100_xmit_tso_pkt(struct sfc_ef100_txq * const txq,struct rte_mbuf ** m,unsigned int * added)5854f936666SIvan Malov sfc_ef100_xmit_tso_pkt(struct sfc_ef100_txq * const txq,
5863037e6cfSViacheslav Galaktionov struct rte_mbuf **m, unsigned int *added)
5874f936666SIvan Malov {
5883037e6cfSViacheslav Galaktionov struct rte_mbuf *m_seg = *m;
5894f936666SIvan Malov unsigned int nb_hdr_descs;
5904f936666SIvan Malov unsigned int nb_pld_descs;
5914f936666SIvan Malov unsigned int seg_split = 0;
5924f936666SIvan Malov unsigned int tso_desc_id;
5934f936666SIvan Malov unsigned int id;
59477cb0071SIvan Malov size_t outer_iph_off;
59577cb0071SIvan Malov size_t outer_udph_off;
5964f936666SIvan Malov size_t iph_off;
5974f936666SIvan Malov size_t tcph_off;
5984f936666SIvan Malov size_t header_len;
5994f936666SIvan Malov size_t remaining_hdr_len;
6003037e6cfSViacheslav Galaktionov rte_iova_t dma_addr;
6013037e6cfSViacheslav Galaktionov int rc;
6024f936666SIvan Malov
6033037e6cfSViacheslav Galaktionov if (m_seg->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
6043037e6cfSViacheslav Galaktionov outer_iph_off = m_seg->outer_l2_len;
6053037e6cfSViacheslav Galaktionov outer_udph_off = outer_iph_off + m_seg->outer_l3_len;
60677cb0071SIvan Malov } else {
60777cb0071SIvan Malov outer_iph_off = 0;
60877cb0071SIvan Malov outer_udph_off = 0;
60977cb0071SIvan Malov }
6103037e6cfSViacheslav Galaktionov iph_off = outer_udph_off + m_seg->l2_len;
6113037e6cfSViacheslav Galaktionov tcph_off = iph_off + m_seg->l3_len;
6123037e6cfSViacheslav Galaktionov header_len = tcph_off + m_seg->l4_len;
6134f936666SIvan Malov
6144f936666SIvan Malov /*
6154f936666SIvan Malov * Remember ID of the TX_TSO descriptor to be filled in.
6164f936666SIvan Malov * We can't fill it in right now since we need to calculate
6174f936666SIvan Malov * number of header and payload segments first and don't want
6184f936666SIvan Malov * to traverse it twice here.
6194f936666SIvan Malov */
6204f936666SIvan Malov tso_desc_id = (*added)++ & txq->ptr_mask;
6214f936666SIvan Malov
6224f936666SIvan Malov remaining_hdr_len = header_len;
6234f936666SIvan Malov do {
6243037e6cfSViacheslav Galaktionov rc = sfc_ef100_tx_map(txq, rte_mbuf_data_iova(m_seg),
6253037e6cfSViacheslav Galaktionov rte_pktmbuf_data_len(m_seg), &dma_addr);
6263037e6cfSViacheslav Galaktionov if (unlikely(rc != 0))
6273037e6cfSViacheslav Galaktionov return rc;
6283037e6cfSViacheslav Galaktionov
6294f936666SIvan Malov id = (*added)++ & txq->ptr_mask;
6304f936666SIvan Malov if (rte_pktmbuf_data_len(m_seg) <= remaining_hdr_len) {
6314f936666SIvan Malov /* The segment is fully header segment */
6323037e6cfSViacheslav Galaktionov sfc_ef100_tx_qdesc_seg_create(dma_addr,
6334f936666SIvan Malov rte_pktmbuf_data_len(m_seg),
6344f936666SIvan Malov &txq->txq_hw_ring[id]);
6354f936666SIvan Malov remaining_hdr_len -= rte_pktmbuf_data_len(m_seg);
6364f936666SIvan Malov } else {
6374f936666SIvan Malov /*
6384f936666SIvan Malov * The segment must be split into header and
6394f936666SIvan Malov * payload segments
6404f936666SIvan Malov */
6413037e6cfSViacheslav Galaktionov sfc_ef100_tx_qdesc_seg_create(dma_addr,
6423037e6cfSViacheslav Galaktionov remaining_hdr_len, &txq->txq_hw_ring[id]);
6433037e6cfSViacheslav Galaktionov txq->sw_ring[id].mbuf = NULL;
6444f936666SIvan Malov
6454f936666SIvan Malov id = (*added)++ & txq->ptr_mask;
6464f936666SIvan Malov sfc_ef100_tx_qdesc_seg_create(
6473037e6cfSViacheslav Galaktionov dma_addr + remaining_hdr_len,
6484f936666SIvan Malov rte_pktmbuf_data_len(m_seg) - remaining_hdr_len,
6494f936666SIvan Malov &txq->txq_hw_ring[id]);
6504f936666SIvan Malov remaining_hdr_len = 0;
6514f936666SIvan Malov seg_split = 1;
6524f936666SIvan Malov }
6534f936666SIvan Malov txq->sw_ring[id].mbuf = m_seg;
6544f936666SIvan Malov m_seg = m_seg->next;
6554f936666SIvan Malov } while (remaining_hdr_len > 0);
6564f936666SIvan Malov
6574f936666SIvan Malov /*
6584f936666SIvan Malov * If a segment is split into header and payload segments, added
6594f936666SIvan Malov * pointer counts it twice and we should correct it.
6604f936666SIvan Malov */
6614f936666SIvan Malov nb_hdr_descs = ((id - tso_desc_id) & txq->ptr_mask) - seg_split;
6623037e6cfSViacheslav Galaktionov nb_pld_descs = (*m)->nb_segs - nb_hdr_descs + seg_split;
6634f936666SIvan Malov
6643037e6cfSViacheslav Galaktionov sfc_ef100_tx_qdesc_tso_create(*m, nb_hdr_descs, nb_pld_descs, header_len,
6653037e6cfSViacheslav Galaktionov rte_pktmbuf_pkt_len(*m) - header_len,
66677cb0071SIvan Malov outer_iph_off, outer_udph_off,
6674f936666SIvan Malov iph_off, tcph_off,
6684f936666SIvan Malov &txq->txq_hw_ring[tso_desc_id]);
6694f936666SIvan Malov
6703037e6cfSViacheslav Galaktionov *m = m_seg;
6713037e6cfSViacheslav Galaktionov return 0;
6720cb551b6SAndrew Rybchenko }
6730cb551b6SAndrew Rybchenko
6740cb551b6SAndrew Rybchenko static uint16_t
sfc_ef100_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)6750cb551b6SAndrew Rybchenko sfc_ef100_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6760cb551b6SAndrew Rybchenko {
6770cb551b6SAndrew Rybchenko struct sfc_ef100_txq * const txq = sfc_ef100_txq_by_dp_txq(tx_queue);
6780cb551b6SAndrew Rybchenko unsigned int added;
6790cb551b6SAndrew Rybchenko unsigned int dma_desc_space;
6800cb551b6SAndrew Rybchenko bool reap_done;
6810cb551b6SAndrew Rybchenko struct rte_mbuf **pktp;
6820cb551b6SAndrew Rybchenko struct rte_mbuf **pktp_end;
6833037e6cfSViacheslav Galaktionov rte_iova_t dma_addr;
6843037e6cfSViacheslav Galaktionov int rc;
6850cb551b6SAndrew Rybchenko
6860cb551b6SAndrew Rybchenko if (unlikely(txq->flags &
6870cb551b6SAndrew Rybchenko (SFC_EF100_TXQ_NOT_RUNNING | SFC_EF100_TXQ_EXCEPTION)))
6880cb551b6SAndrew Rybchenko return 0;
6890cb551b6SAndrew Rybchenko
6900cb551b6SAndrew Rybchenko added = txq->added;
6910cb551b6SAndrew Rybchenko dma_desc_space = txq->max_fill_level - (added - txq->completed);
6920cb551b6SAndrew Rybchenko
6930cb551b6SAndrew Rybchenko reap_done = (dma_desc_space < txq->free_thresh);
6940cb551b6SAndrew Rybchenko if (reap_done) {
6950cb551b6SAndrew Rybchenko sfc_ef100_tx_reap(txq);
6960cb551b6SAndrew Rybchenko dma_desc_space = txq->max_fill_level - (added - txq->completed);
6970cb551b6SAndrew Rybchenko }
6980cb551b6SAndrew Rybchenko
6990cb551b6SAndrew Rybchenko for (pktp = &tx_pkts[0], pktp_end = &tx_pkts[nb_pkts];
7000cb551b6SAndrew Rybchenko pktp != pktp_end;
7010cb551b6SAndrew Rybchenko ++pktp) {
7020cb551b6SAndrew Rybchenko struct rte_mbuf *m_seg = *pktp;
7030cb551b6SAndrew Rybchenko unsigned int pkt_start = added;
7040cb551b6SAndrew Rybchenko unsigned int id;
7050cb551b6SAndrew Rybchenko
7060cb551b6SAndrew Rybchenko if (likely(pktp + 1 != pktp_end))
7070cb551b6SAndrew Rybchenko rte_mbuf_prefetch_part1(pktp[1]);
7080cb551b6SAndrew Rybchenko
7090cb551b6SAndrew Rybchenko if (sfc_ef100_tx_pkt_descs_max(m_seg) > dma_desc_space) {
7100cb551b6SAndrew Rybchenko if (reap_done)
7110cb551b6SAndrew Rybchenko break;
7120cb551b6SAndrew Rybchenko
7130cb551b6SAndrew Rybchenko /* Push already prepared descriptors before polling */
7140cb551b6SAndrew Rybchenko if (added != txq->added) {
7150cb551b6SAndrew Rybchenko sfc_ef100_tx_qpush(txq, added);
7160cb551b6SAndrew Rybchenko txq->added = added;
7170cb551b6SAndrew Rybchenko }
7180cb551b6SAndrew Rybchenko
7190cb551b6SAndrew Rybchenko sfc_ef100_tx_reap(txq);
7200cb551b6SAndrew Rybchenko reap_done = true;
7210cb551b6SAndrew Rybchenko dma_desc_space = txq->max_fill_level -
7220cb551b6SAndrew Rybchenko (added - txq->completed);
7230cb551b6SAndrew Rybchenko if (sfc_ef100_tx_pkt_descs_max(m_seg) > dma_desc_space)
7240cb551b6SAndrew Rybchenko break;
7250cb551b6SAndrew Rybchenko }
7260cb551b6SAndrew Rybchenko
7273f95dfb9SIgor Romanov if (m_seg->ol_flags & sfc_dp_mport_override) {
7283f95dfb9SIgor Romanov id = added++ & txq->ptr_mask;
7293f95dfb9SIgor Romanov sfc_ef100_tx_qdesc_prefix_create(m_seg,
7303f95dfb9SIgor Romanov &txq->txq_hw_ring[id]);
7313037e6cfSViacheslav Galaktionov txq->sw_ring[id].mbuf = NULL;
7323f95dfb9SIgor Romanov }
7333f95dfb9SIgor Romanov
734daa02b5cSOlivier Matz if (m_seg->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
7353037e6cfSViacheslav Galaktionov rc = sfc_ef100_xmit_tso_pkt(txq, &m_seg, &added);
7364f936666SIvan Malov } else {
7370cb551b6SAndrew Rybchenko id = added++ & txq->ptr_mask;
7383037e6cfSViacheslav Galaktionov rc = sfc_ef100_tx_qdesc_send_create(txq, m_seg,
7394f936666SIvan Malov &txq->txq_hw_ring[id]);
7400cb551b6SAndrew Rybchenko
7410cb551b6SAndrew Rybchenko /*
7420cb551b6SAndrew Rybchenko * rte_pktmbuf_free() is commonly used in DPDK for
7430cb551b6SAndrew Rybchenko * recycling packets - the function checks every
7440cb551b6SAndrew Rybchenko * segment's reference counter and returns the
7450cb551b6SAndrew Rybchenko * buffer to its pool whenever possible;
7460cb551b6SAndrew Rybchenko * nevertheless, freeing mbuf segments one by one
7470cb551b6SAndrew Rybchenko * may entail some performance decline;
7480cb551b6SAndrew Rybchenko * from this point, sfc_efx_tx_reap() does the same job
7490cb551b6SAndrew Rybchenko * on its own and frees buffers in bulks (all mbufs
7500cb551b6SAndrew Rybchenko * within a bulk belong to the same pool);
7510cb551b6SAndrew Rybchenko * from this perspective, individual segment pointers
7520cb551b6SAndrew Rybchenko * must be associated with the corresponding SW
7530cb551b6SAndrew Rybchenko * descriptors independently so that only one loop
7540cb551b6SAndrew Rybchenko * is sufficient on reap to inspect all the buffers
7550cb551b6SAndrew Rybchenko */
7560cb551b6SAndrew Rybchenko txq->sw_ring[id].mbuf = m_seg;
7574f936666SIvan Malov m_seg = m_seg->next;
7584f936666SIvan Malov }
7590cb551b6SAndrew Rybchenko
7603037e6cfSViacheslav Galaktionov while (likely(rc == 0) && m_seg != NULL) {
76194d31cd1SAndrew Rybchenko RTE_BUILD_BUG_ON(SFC_MBUF_SEG_LEN_MAX >
76294d31cd1SAndrew Rybchenko SFC_EF100_TX_SEG_DESC_LEN_MAX);
76394d31cd1SAndrew Rybchenko
76494d31cd1SAndrew Rybchenko id = added++ & txq->ptr_mask;
7653037e6cfSViacheslav Galaktionov rc = sfc_ef100_tx_map(txq, rte_mbuf_data_iova(m_seg),
7663037e6cfSViacheslav Galaktionov rte_pktmbuf_data_len(m_seg),
7673037e6cfSViacheslav Galaktionov &dma_addr);
7683037e6cfSViacheslav Galaktionov sfc_ef100_tx_qdesc_seg_create(dma_addr,
76994d31cd1SAndrew Rybchenko rte_pktmbuf_data_len(m_seg),
77094d31cd1SAndrew Rybchenko &txq->txq_hw_ring[id]);
77194d31cd1SAndrew Rybchenko txq->sw_ring[id].mbuf = m_seg;
7724f936666SIvan Malov m_seg = m_seg->next;
77394d31cd1SAndrew Rybchenko }
77494d31cd1SAndrew Rybchenko
7753037e6cfSViacheslav Galaktionov if (likely(rc == 0)) {
7760cb551b6SAndrew Rybchenko dma_desc_space -= (added - pkt_start);
777acc47448SIvan Ilchenko
778acc47448SIvan Ilchenko sfc_pkts_bytes_add(&txq->dp.dpq.stats, 1,
779acc47448SIvan Ilchenko rte_pktmbuf_pkt_len(*pktp));
7803037e6cfSViacheslav Galaktionov } else {
7813037e6cfSViacheslav Galaktionov added = pkt_start;
7823037e6cfSViacheslav Galaktionov }
7830cb551b6SAndrew Rybchenko }
7840cb551b6SAndrew Rybchenko
7850cb551b6SAndrew Rybchenko if (likely(added != txq->added)) {
7860cb551b6SAndrew Rybchenko sfc_ef100_tx_qpush(txq, added);
7870cb551b6SAndrew Rybchenko txq->added = added;
7880cb551b6SAndrew Rybchenko }
7890cb551b6SAndrew Rybchenko
7900cb551b6SAndrew Rybchenko #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
7910cb551b6SAndrew Rybchenko if (!reap_done)
7920cb551b6SAndrew Rybchenko sfc_ef100_tx_reap(txq);
7930cb551b6SAndrew Rybchenko #endif
7940cb551b6SAndrew Rybchenko
7950cb551b6SAndrew Rybchenko return pktp - &tx_pkts[0];
7960cb551b6SAndrew Rybchenko }
7970cb551b6SAndrew Rybchenko
7980cb551b6SAndrew Rybchenko static sfc_dp_tx_get_dev_info_t sfc_ef100_get_dev_info;
7990cb551b6SAndrew Rybchenko static void
sfc_ef100_get_dev_info(struct rte_eth_dev_info * dev_info)8000cb551b6SAndrew Rybchenko sfc_ef100_get_dev_info(struct rte_eth_dev_info *dev_info)
8010cb551b6SAndrew Rybchenko {
8020cb551b6SAndrew Rybchenko /*
8030cb551b6SAndrew Rybchenko * Number of descriptors just defines maximum number of pushed
8040cb551b6SAndrew Rybchenko * descriptors (fill level).
8050cb551b6SAndrew Rybchenko */
8060cb551b6SAndrew Rybchenko dev_info->tx_desc_lim.nb_min = 1;
8070cb551b6SAndrew Rybchenko dev_info->tx_desc_lim.nb_align = 1;
8080cb551b6SAndrew Rybchenko }
8090cb551b6SAndrew Rybchenko
8100cb551b6SAndrew Rybchenko static sfc_dp_tx_qsize_up_rings_t sfc_ef100_tx_qsize_up_rings;
8110cb551b6SAndrew Rybchenko static int
sfc_ef100_tx_qsize_up_rings(uint16_t nb_tx_desc,struct sfc_dp_tx_hw_limits * limits,unsigned int * txq_entries,unsigned int * evq_entries,unsigned int * txq_max_fill_level)8120cb551b6SAndrew Rybchenko sfc_ef100_tx_qsize_up_rings(uint16_t nb_tx_desc,
8130cb551b6SAndrew Rybchenko struct sfc_dp_tx_hw_limits *limits,
8140cb551b6SAndrew Rybchenko unsigned int *txq_entries,
8150cb551b6SAndrew Rybchenko unsigned int *evq_entries,
8160cb551b6SAndrew Rybchenko unsigned int *txq_max_fill_level)
8170cb551b6SAndrew Rybchenko {
8180cb551b6SAndrew Rybchenko /*
8190cb551b6SAndrew Rybchenko * rte_ethdev API guarantees that the number meets min, max and
8200cb551b6SAndrew Rybchenko * alignment requirements.
8210cb551b6SAndrew Rybchenko */
8220cb551b6SAndrew Rybchenko if (nb_tx_desc <= limits->txq_min_entries)
8230cb551b6SAndrew Rybchenko *txq_entries = limits->txq_min_entries;
8240cb551b6SAndrew Rybchenko else
8250cb551b6SAndrew Rybchenko *txq_entries = rte_align32pow2(nb_tx_desc);
8260cb551b6SAndrew Rybchenko
8270cb551b6SAndrew Rybchenko *evq_entries = *txq_entries;
8280cb551b6SAndrew Rybchenko
8290cb551b6SAndrew Rybchenko *txq_max_fill_level = RTE_MIN(nb_tx_desc,
8300cb551b6SAndrew Rybchenko SFC_EF100_TXQ_LIMIT(*evq_entries));
8310cb551b6SAndrew Rybchenko return 0;
8320cb551b6SAndrew Rybchenko }
8330cb551b6SAndrew Rybchenko
8340cb551b6SAndrew Rybchenko static sfc_dp_tx_qcreate_t sfc_ef100_tx_qcreate;
8350cb551b6SAndrew Rybchenko static int
sfc_ef100_tx_qcreate(uint16_t port_id,uint16_t queue_id,const struct rte_pci_addr * pci_addr,int socket_id,const struct sfc_dp_tx_qcreate_info * info,struct sfc_dp_txq ** dp_txqp)8360cb551b6SAndrew Rybchenko sfc_ef100_tx_qcreate(uint16_t port_id, uint16_t queue_id,
8370cb551b6SAndrew Rybchenko const struct rte_pci_addr *pci_addr, int socket_id,
8380cb551b6SAndrew Rybchenko const struct sfc_dp_tx_qcreate_info *info,
8390cb551b6SAndrew Rybchenko struct sfc_dp_txq **dp_txqp)
8400cb551b6SAndrew Rybchenko {
8410cb551b6SAndrew Rybchenko struct sfc_ef100_txq *txq;
8420cb551b6SAndrew Rybchenko int rc;
8430cb551b6SAndrew Rybchenko
8440cb551b6SAndrew Rybchenko rc = EINVAL;
8450cb551b6SAndrew Rybchenko if (info->txq_entries != info->evq_entries)
8460cb551b6SAndrew Rybchenko goto fail_bad_args;
8470cb551b6SAndrew Rybchenko
8480cb551b6SAndrew Rybchenko rc = ENOMEM;
8490cb551b6SAndrew Rybchenko txq = rte_zmalloc_socket("sfc-ef100-txq", sizeof(*txq),
8500cb551b6SAndrew Rybchenko RTE_CACHE_LINE_SIZE, socket_id);
8510cb551b6SAndrew Rybchenko if (txq == NULL)
8520cb551b6SAndrew Rybchenko goto fail_txq_alloc;
8530cb551b6SAndrew Rybchenko
8540cb551b6SAndrew Rybchenko sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
8550cb551b6SAndrew Rybchenko
8560cb551b6SAndrew Rybchenko rc = ENOMEM;
8570cb551b6SAndrew Rybchenko txq->sw_ring = rte_calloc_socket("sfc-ef100-txq-sw_ring",
8580cb551b6SAndrew Rybchenko info->txq_entries,
8590cb551b6SAndrew Rybchenko sizeof(*txq->sw_ring),
8600cb551b6SAndrew Rybchenko RTE_CACHE_LINE_SIZE, socket_id);
8610cb551b6SAndrew Rybchenko if (txq->sw_ring == NULL)
8620cb551b6SAndrew Rybchenko goto fail_sw_ring_alloc;
8630cb551b6SAndrew Rybchenko
8640cb551b6SAndrew Rybchenko txq->flags = SFC_EF100_TXQ_NOT_RUNNING;
8650cb551b6SAndrew Rybchenko txq->ptr_mask = info->txq_entries - 1;
8660cb551b6SAndrew Rybchenko txq->max_fill_level = info->max_fill_level;
8670cb551b6SAndrew Rybchenko txq->free_thresh = info->free_thresh;
8680cb551b6SAndrew Rybchenko txq->evq_phase_bit_shift = rte_bsf32(info->evq_entries);
8690cb551b6SAndrew Rybchenko txq->txq_hw_ring = info->txq_hw_ring;
8700cb551b6SAndrew Rybchenko txq->doorbell = (volatile uint8_t *)info->mem_bar +
8710cb551b6SAndrew Rybchenko ER_GZ_TX_RING_DOORBELL_OFST +
8720cb551b6SAndrew Rybchenko (info->hw_index << info->vi_window_shift);
8730cb551b6SAndrew Rybchenko txq->evq_hw_ring = info->evq_hw_ring;
8740cb551b6SAndrew Rybchenko
8754f936666SIvan Malov txq->tso_tcp_header_offset_limit = info->tso_tcp_header_offset_limit;
8764f936666SIvan Malov txq->tso_max_nb_header_descs = info->tso_max_nb_header_descs;
8774f936666SIvan Malov txq->tso_max_header_len = info->tso_max_header_len;
8784f936666SIvan Malov txq->tso_max_nb_payload_descs = info->tso_max_nb_payload_descs;
8794f936666SIvan Malov txq->tso_max_payload_len = info->tso_max_payload_len;
8804f936666SIvan Malov txq->tso_max_nb_outgoing_frames = info->tso_max_nb_outgoing_frames;
8814f936666SIvan Malov
8823037e6cfSViacheslav Galaktionov txq->nic_dma_info = info->nic_dma_info;
8833037e6cfSViacheslav Galaktionov if (txq->nic_dma_info->nb_regions > 0)
8843037e6cfSViacheslav Galaktionov txq->flags |= SFC_EF100_TXQ_NIC_DMA_MAP;
8853037e6cfSViacheslav Galaktionov
8860cb551b6SAndrew Rybchenko sfc_ef100_tx_debug(txq, "TxQ doorbell is %p", txq->doorbell);
8870cb551b6SAndrew Rybchenko
8880cb551b6SAndrew Rybchenko *dp_txqp = &txq->dp;
8890cb551b6SAndrew Rybchenko return 0;
8900cb551b6SAndrew Rybchenko
8910cb551b6SAndrew Rybchenko fail_sw_ring_alloc:
8920cb551b6SAndrew Rybchenko rte_free(txq);
8930cb551b6SAndrew Rybchenko
8940cb551b6SAndrew Rybchenko fail_txq_alloc:
8950cb551b6SAndrew Rybchenko fail_bad_args:
8960cb551b6SAndrew Rybchenko return rc;
8970cb551b6SAndrew Rybchenko }
8980cb551b6SAndrew Rybchenko
8990cb551b6SAndrew Rybchenko static sfc_dp_tx_qdestroy_t sfc_ef100_tx_qdestroy;
9000cb551b6SAndrew Rybchenko static void
sfc_ef100_tx_qdestroy(struct sfc_dp_txq * dp_txq)9010cb551b6SAndrew Rybchenko sfc_ef100_tx_qdestroy(struct sfc_dp_txq *dp_txq)
9020cb551b6SAndrew Rybchenko {
9030cb551b6SAndrew Rybchenko struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
9040cb551b6SAndrew Rybchenko
9050cb551b6SAndrew Rybchenko rte_free(txq->sw_ring);
9060cb551b6SAndrew Rybchenko rte_free(txq);
9070cb551b6SAndrew Rybchenko }
9080cb551b6SAndrew Rybchenko
9090cb551b6SAndrew Rybchenko static sfc_dp_tx_qstart_t sfc_ef100_tx_qstart;
9100cb551b6SAndrew Rybchenko static int
sfc_ef100_tx_qstart(struct sfc_dp_txq * dp_txq,unsigned int evq_read_ptr,unsigned int txq_desc_index)9110cb551b6SAndrew Rybchenko sfc_ef100_tx_qstart(struct sfc_dp_txq *dp_txq, unsigned int evq_read_ptr,
9120cb551b6SAndrew Rybchenko unsigned int txq_desc_index)
9130cb551b6SAndrew Rybchenko {
9140cb551b6SAndrew Rybchenko struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
9150cb551b6SAndrew Rybchenko
9160cb551b6SAndrew Rybchenko txq->evq_read_ptr = evq_read_ptr;
9170cb551b6SAndrew Rybchenko txq->added = txq->completed = txq_desc_index;
9180cb551b6SAndrew Rybchenko
9190cb551b6SAndrew Rybchenko txq->flags |= SFC_EF100_TXQ_STARTED;
9200cb551b6SAndrew Rybchenko txq->flags &= ~(SFC_EF100_TXQ_NOT_RUNNING | SFC_EF100_TXQ_EXCEPTION);
9210cb551b6SAndrew Rybchenko
9220cb551b6SAndrew Rybchenko return 0;
9230cb551b6SAndrew Rybchenko }
9240cb551b6SAndrew Rybchenko
9250cb551b6SAndrew Rybchenko static sfc_dp_tx_qstop_t sfc_ef100_tx_qstop;
9260cb551b6SAndrew Rybchenko static void
sfc_ef100_tx_qstop(struct sfc_dp_txq * dp_txq,unsigned int * evq_read_ptr)9270cb551b6SAndrew Rybchenko sfc_ef100_tx_qstop(struct sfc_dp_txq *dp_txq, unsigned int *evq_read_ptr)
9280cb551b6SAndrew Rybchenko {
9290cb551b6SAndrew Rybchenko struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
9300cb551b6SAndrew Rybchenko
9310cb551b6SAndrew Rybchenko txq->flags |= SFC_EF100_TXQ_NOT_RUNNING;
9320cb551b6SAndrew Rybchenko
9330cb551b6SAndrew Rybchenko *evq_read_ptr = txq->evq_read_ptr;
9340cb551b6SAndrew Rybchenko }
9350cb551b6SAndrew Rybchenko
9360cb551b6SAndrew Rybchenko static sfc_dp_tx_qtx_ev_t sfc_ef100_tx_qtx_ev;
9370cb551b6SAndrew Rybchenko static bool
sfc_ef100_tx_qtx_ev(struct sfc_dp_txq * dp_txq,unsigned int num_descs)9380cb551b6SAndrew Rybchenko sfc_ef100_tx_qtx_ev(struct sfc_dp_txq *dp_txq, unsigned int num_descs)
9390cb551b6SAndrew Rybchenko {
9400cb551b6SAndrew Rybchenko struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
9410cb551b6SAndrew Rybchenko
9420cb551b6SAndrew Rybchenko SFC_ASSERT(txq->flags & SFC_EF100_TXQ_NOT_RUNNING);
9430cb551b6SAndrew Rybchenko
9440cb551b6SAndrew Rybchenko sfc_ef100_tx_reap_num_descs(txq, num_descs);
9450cb551b6SAndrew Rybchenko
9460cb551b6SAndrew Rybchenko return false;
9470cb551b6SAndrew Rybchenko }
9480cb551b6SAndrew Rybchenko
9490cb551b6SAndrew Rybchenko static sfc_dp_tx_qreap_t sfc_ef100_tx_qreap;
9500cb551b6SAndrew Rybchenko static void
sfc_ef100_tx_qreap(struct sfc_dp_txq * dp_txq)9510cb551b6SAndrew Rybchenko sfc_ef100_tx_qreap(struct sfc_dp_txq *dp_txq)
9520cb551b6SAndrew Rybchenko {
9530cb551b6SAndrew Rybchenko struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
9540cb551b6SAndrew Rybchenko unsigned int completed;
9550cb551b6SAndrew Rybchenko
9560cb551b6SAndrew Rybchenko for (completed = txq->completed; completed != txq->added; ++completed) {
9570cb551b6SAndrew Rybchenko struct sfc_ef100_tx_sw_desc *txd;
9580cb551b6SAndrew Rybchenko
9590cb551b6SAndrew Rybchenko txd = &txq->sw_ring[completed & txq->ptr_mask];
9600cb551b6SAndrew Rybchenko if (txd->mbuf != NULL) {
9610cb551b6SAndrew Rybchenko rte_pktmbuf_free_seg(txd->mbuf);
9620cb551b6SAndrew Rybchenko txd->mbuf = NULL;
9630cb551b6SAndrew Rybchenko }
9640cb551b6SAndrew Rybchenko }
9650cb551b6SAndrew Rybchenko
9660cb551b6SAndrew Rybchenko txq->flags &= ~SFC_EF100_TXQ_STARTED;
9670cb551b6SAndrew Rybchenko }
9680cb551b6SAndrew Rybchenko
9690cb551b6SAndrew Rybchenko static unsigned int
sfc_ef100_tx_qdesc_npending(struct sfc_ef100_txq * txq)9700cb551b6SAndrew Rybchenko sfc_ef100_tx_qdesc_npending(struct sfc_ef100_txq *txq)
9710cb551b6SAndrew Rybchenko {
9720cb551b6SAndrew Rybchenko const unsigned int evq_old_read_ptr = txq->evq_read_ptr;
9730cb551b6SAndrew Rybchenko unsigned int npending = 0;
9740cb551b6SAndrew Rybchenko efx_qword_t tx_ev;
9750cb551b6SAndrew Rybchenko
9760cb551b6SAndrew Rybchenko if (unlikely(txq->flags &
9770cb551b6SAndrew Rybchenko (SFC_EF100_TXQ_NOT_RUNNING | SFC_EF100_TXQ_EXCEPTION)))
9780cb551b6SAndrew Rybchenko return 0;
9790cb551b6SAndrew Rybchenko
9800cb551b6SAndrew Rybchenko while (sfc_ef100_tx_get_event(txq, &tx_ev))
9810cb551b6SAndrew Rybchenko npending += EFX_QWORD_FIELD(tx_ev, ESF_GZ_EV_TXCMPL_NUM_DESC);
9820cb551b6SAndrew Rybchenko
9830cb551b6SAndrew Rybchenko /*
9840cb551b6SAndrew Rybchenko * The function does not process events, so return event queue read
9850cb551b6SAndrew Rybchenko * pointer to the original position to allow the events that were
9860cb551b6SAndrew Rybchenko * read to be processed later
9870cb551b6SAndrew Rybchenko */
9880cb551b6SAndrew Rybchenko txq->evq_read_ptr = evq_old_read_ptr;
9890cb551b6SAndrew Rybchenko
9900cb551b6SAndrew Rybchenko return npending;
9910cb551b6SAndrew Rybchenko }
9920cb551b6SAndrew Rybchenko
9930cb551b6SAndrew Rybchenko static sfc_dp_tx_qdesc_status_t sfc_ef100_tx_qdesc_status;
9940cb551b6SAndrew Rybchenko static int
sfc_ef100_tx_qdesc_status(struct sfc_dp_txq * dp_txq,uint16_t offset)9950cb551b6SAndrew Rybchenko sfc_ef100_tx_qdesc_status(struct sfc_dp_txq *dp_txq, uint16_t offset)
9960cb551b6SAndrew Rybchenko {
9970cb551b6SAndrew Rybchenko struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
9980cb551b6SAndrew Rybchenko unsigned int pushed = txq->added - txq->completed;
9990cb551b6SAndrew Rybchenko
10000cb551b6SAndrew Rybchenko if (unlikely(offset > txq->ptr_mask))
10010cb551b6SAndrew Rybchenko return -EINVAL;
10020cb551b6SAndrew Rybchenko
10030cb551b6SAndrew Rybchenko if (unlikely(offset >= txq->max_fill_level))
10040cb551b6SAndrew Rybchenko return RTE_ETH_TX_DESC_UNAVAIL;
10050cb551b6SAndrew Rybchenko
10060cb551b6SAndrew Rybchenko return (offset >= pushed ||
10070cb551b6SAndrew Rybchenko offset < sfc_ef100_tx_qdesc_npending(txq)) ?
10080cb551b6SAndrew Rybchenko RTE_ETH_TX_DESC_DONE : RTE_ETH_TX_DESC_FULL;
10090cb551b6SAndrew Rybchenko }
10100cb551b6SAndrew Rybchenko
10110cb551b6SAndrew Rybchenko struct sfc_dp_tx sfc_ef100_tx = {
10120cb551b6SAndrew Rybchenko .dp = {
10130cb551b6SAndrew Rybchenko .name = SFC_KVARG_DATAPATH_EF100,
10140cb551b6SAndrew Rybchenko .type = SFC_DP_TX,
10150cb551b6SAndrew Rybchenko .hw_fw_caps = SFC_DP_HW_FW_CAP_EF100,
10160cb551b6SAndrew Rybchenko },
1017acc47448SIvan Ilchenko .features = SFC_DP_TX_FEAT_MULTI_PROCESS |
1018acc47448SIvan Ilchenko SFC_DP_TX_FEAT_STATS,
10190cb551b6SAndrew Rybchenko .dev_offload_capa = 0,
1020295968d1SFerruh Yigit .queue_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
1021295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1022295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1023295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
1024295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1025295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
1026295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
1027295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_TCP_TSO |
1028295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1029295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO,
10300cb551b6SAndrew Rybchenko .get_dev_info = sfc_ef100_get_dev_info,
10310cb551b6SAndrew Rybchenko .qsize_up_rings = sfc_ef100_tx_qsize_up_rings,
10320cb551b6SAndrew Rybchenko .qcreate = sfc_ef100_tx_qcreate,
10330cb551b6SAndrew Rybchenko .qdestroy = sfc_ef100_tx_qdestroy,
10340cb551b6SAndrew Rybchenko .qstart = sfc_ef100_tx_qstart,
10350cb551b6SAndrew Rybchenko .qtx_ev = sfc_ef100_tx_qtx_ev,
10360cb551b6SAndrew Rybchenko .qstop = sfc_ef100_tx_qstop,
10370cb551b6SAndrew Rybchenko .qreap = sfc_ef100_tx_qreap,
10380cb551b6SAndrew Rybchenko .qdesc_status = sfc_ef100_tx_qdesc_status,
103994d31cd1SAndrew Rybchenko .pkt_prepare = sfc_ef100_tx_prepare_pkts,
10400cb551b6SAndrew Rybchenko .pkt_burst = sfc_ef100_xmit_pkts,
10410cb551b6SAndrew Rybchenko };
1042