xref: /dpdk/drivers/net/sfc/sfc_tso.c (revision 23f3dac43237d5de18f9544c6e3f932c70c39e27)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2016-2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 #include <rte_ip.h>
11 #include <rte_tcp.h>
12 
13 #include "sfc.h"
14 #include "sfc_debug.h"
15 #include "sfc_tx.h"
16 #include "sfc_ev.h"
17 #include "sfc_tso.h"
18 
19 int
sfc_efx_tso_alloc_tsoh_objs(struct sfc_efx_tx_sw_desc * sw_ring,unsigned int txq_entries,unsigned int socket_id)20 sfc_efx_tso_alloc_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
21 			    unsigned int txq_entries, unsigned int socket_id)
22 {
23 	unsigned int i;
24 
25 	for (i = 0; i < txq_entries; ++i) {
26 		sw_ring[i].tsoh = rte_malloc_socket("sfc-efx-txq-tsoh-obj",
27 						    SFC_TSOH_STD_LEN,
28 						    RTE_CACHE_LINE_SIZE,
29 						    socket_id);
30 		if (sw_ring[i].tsoh == NULL)
31 			goto fail_alloc_tsoh_objs;
32 	}
33 
34 	return 0;
35 
36 fail_alloc_tsoh_objs:
37 	while (i > 0)
38 		rte_free(sw_ring[--i].tsoh);
39 
40 	return ENOMEM;
41 }
42 
43 void
sfc_efx_tso_free_tsoh_objs(struct sfc_efx_tx_sw_desc * sw_ring,unsigned int txq_entries)44 sfc_efx_tso_free_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
45 			   unsigned int txq_entries)
46 {
47 	unsigned int i;
48 
49 	for (i = 0; i < txq_entries; ++i) {
50 		rte_free(sw_ring[i].tsoh);
51 		sw_ring[i].tsoh = NULL;
52 	}
53 }
54 
55 unsigned int
sfc_tso_prepare_header(uint8_t * tsoh,size_t header_len,struct rte_mbuf ** in_seg,size_t * in_off)56 sfc_tso_prepare_header(uint8_t *tsoh, size_t header_len,
57 		       struct rte_mbuf **in_seg, size_t *in_off)
58 {
59 	struct rte_mbuf *m = *in_seg;
60 	size_t bytes_to_copy = 0;
61 	size_t bytes_left = header_len;
62 	unsigned int segments_copied = 0;
63 
64 	do {
65 		bytes_to_copy = MIN(bytes_left, m->data_len);
66 
67 		rte_memcpy(tsoh, rte_pktmbuf_mtod(m, uint8_t *),
68 			   bytes_to_copy);
69 
70 		bytes_left -= bytes_to_copy;
71 		tsoh += bytes_to_copy;
72 
73 		if (bytes_left > 0) {
74 			m = m->next;
75 			SFC_ASSERT(m != NULL);
76 			segments_copied++;
77 		}
78 	} while (bytes_left > 0);
79 
80 	if (bytes_to_copy == m->data_len) {
81 		*in_seg = m->next;
82 		*in_off = 0;
83 		segments_copied++;
84 	} else {
85 		*in_seg = m;
86 		*in_off = bytes_to_copy;
87 	}
88 
89 	return segments_copied;
90 }
91 
92 int
sfc_efx_tso_do(struct sfc_efx_txq * txq,unsigned int idx,struct rte_mbuf ** in_seg,size_t * in_off,efx_desc_t ** pend,unsigned int * pkt_descs,size_t * pkt_len)93 sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
94 	       struct rte_mbuf **in_seg, size_t *in_off, efx_desc_t **pend,
95 	       unsigned int *pkt_descs, size_t *pkt_len)
96 {
97 	uint8_t *tsoh;
98 	const struct rte_tcp_hdr *th;
99 	efsys_dma_addr_t header_paddr;
100 	uint16_t packet_id = 0;
101 	uint32_t sent_seq;
102 	struct rte_mbuf *m = *in_seg;
103 	size_t nh_off = m->l2_len; /* IP header offset */
104 	size_t tcph_off = m->l2_len + m->l3_len; /* TCP header offset */
105 	size_t header_len = m->l2_len + m->l3_len + m->l4_len;
106 
107 	idx += SFC_EF10_TSO_OPT_DESCS_NUM;
108 
109 	header_paddr = rte_pktmbuf_iova(m);
110 
111 	/*
112 	 * Sometimes headers may be split across multiple mbufs. In such cases
113 	 * we need to glue those pieces and store them in some temporary place.
114 	 * Also, packet headers must be contiguous in memory, so that
115 	 * they can be referred to with a single DMA descriptor. EF10 has no
116 	 * limitations on address boundaries crossing by DMA descriptor data.
117 	 */
118 	if (m->data_len < header_len) {
119 		/*
120 		 * Discard a packet if header linearization is needed but
121 		 * the header is too big.
122 		 * Duplicate Tx prepare check here to avoid spoil of
123 		 * memory if Tx prepare is skipped.
124 		 */
125 		if (unlikely(header_len > SFC_TSOH_STD_LEN))
126 			return EMSGSIZE;
127 
128 		tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
129 		sfc_tso_prepare_header(tsoh, header_len, in_seg, in_off);
130 
131 		header_paddr = rte_malloc_virt2iova((void *)tsoh);
132 	} else {
133 		if (m->data_len == header_len) {
134 			*in_off = 0;
135 			*in_seg = m->next;
136 		} else {
137 			*in_off = header_len;
138 		}
139 
140 		tsoh = rte_pktmbuf_mtod(m, uint8_t *);
141 	}
142 
143 	/*
144 	 * 8000-series EF10 hardware requires that innermost IP length
145 	 * be greater than or equal to the value which each segment is
146 	 * supposed to have; otherwise, TCP checksum will be incorrect.
147 	 */
148 	sfc_tso_innermost_ip_fix_len(m, tsoh, nh_off);
149 
150 	/*
151 	 * Handle IP header. Tx prepare has debug-only checks that offload flags
152 	 * are correctly filled in TSO mbuf. Use zero IPID if there is no
153 	 * IPv4 flag. If the packet is still IPv4, HW will simply start from
154 	 * zero IPID.
155 	 */
156 	if (m->ol_flags & RTE_MBUF_F_TX_IPV4)
157 		packet_id = sfc_tso_ip4_get_ipid(tsoh, nh_off);
158 
159 	/* Handle TCP header */
160 	th = (const struct rte_tcp_hdr *)(tsoh + tcph_off);
161 
162 	rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
163 	sent_seq = rte_be_to_cpu_32(sent_seq);
164 
165 	efx_tx_qdesc_tso2_create(txq->common, packet_id, 0, sent_seq,
166 				 m->tso_segsz,
167 				 *pend, EFX_TX_FATSOV2_OPT_NDESCS);
168 
169 	*pend += EFX_TX_FATSOV2_OPT_NDESCS;
170 	*pkt_descs += EFX_TX_FATSOV2_OPT_NDESCS;
171 
172 	efx_tx_qdesc_dma_create(txq->common, header_paddr, header_len,
173 				B_FALSE, (*pend)++);
174 	(*pkt_descs)++;
175 	*pkt_len -= header_len;
176 
177 	return 0;
178 }
179