xref: /dpdk/lib/port/rte_port_ras.c (revision ae67895b507bb6af22263c79ba0d5c374b396485)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 #include <string.h>
5 
6 #include <rte_ip_frag.h>
7 #include <rte_cycles.h>
8 #include <rte_log.h>
9 
10 #include "rte_port_ras.h"
11 
12 #include "port_log.h"
13 
14 #ifndef RTE_PORT_RAS_N_BUCKETS
15 #define RTE_PORT_RAS_N_BUCKETS                                 4094
16 #endif
17 
18 #ifndef RTE_PORT_RAS_N_ENTRIES_PER_BUCKET
19 #define RTE_PORT_RAS_N_ENTRIES_PER_BUCKET                      8
20 #endif
21 
22 #ifndef RTE_PORT_RAS_N_ENTRIES
23 #define RTE_PORT_RAS_N_ENTRIES (RTE_PORT_RAS_N_BUCKETS * RTE_PORT_RAS_N_ENTRIES_PER_BUCKET)
24 #endif
25 
26 #ifdef RTE_PORT_STATS_COLLECT
27 
28 #define RTE_PORT_RING_WRITER_RAS_STATS_PKTS_IN_ADD(port, val) \
29 	port->stats.n_pkts_in += val
30 #define RTE_PORT_RING_WRITER_RAS_STATS_PKTS_DROP_ADD(port, val) \
31 	port->stats.n_pkts_drop += val
32 
33 #else
34 
35 #define RTE_PORT_RING_WRITER_RAS_STATS_PKTS_IN_ADD(port, val)
36 #define RTE_PORT_RING_WRITER_RAS_STATS_PKTS_DROP_ADD(port, val)
37 
38 #endif
39 
40 struct rte_port_ring_writer_ras;
41 
42 typedef void (*ras_op)(
43 		struct rte_port_ring_writer_ras *p,
44 		struct rte_mbuf *pkt);
45 
46 static void
47 process_ipv4(struct rte_port_ring_writer_ras *p, struct rte_mbuf *pkt);
48 static void
49 process_ipv6(struct rte_port_ring_writer_ras *p, struct rte_mbuf *pkt);
50 
51 struct rte_port_ring_writer_ras {
52 	struct rte_port_out_stats stats;
53 
54 	struct rte_mbuf *tx_buf[RTE_PORT_IN_BURST_SIZE_MAX];
55 	struct rte_ring *ring;
56 	uint32_t tx_burst_sz;
57 	uint32_t tx_buf_count;
58 	struct rte_ip_frag_tbl *frag_tbl;
59 	struct rte_ip_frag_death_row death_row;
60 
61 	ras_op f_ras;
62 };
63 
64 static void *
rte_port_ring_writer_ras_create(void * params,int socket_id,int is_ipv4)65 rte_port_ring_writer_ras_create(void *params, int socket_id, int is_ipv4)
66 {
67 	struct rte_port_ring_writer_ras_params *conf =
68 			params;
69 	struct rte_port_ring_writer_ras *port;
70 	uint64_t frag_cycles;
71 
72 	/* Check input parameters */
73 	if (conf == NULL) {
74 		PORT_LOG(ERR, "%s: Parameter conf is NULL", __func__);
75 		return NULL;
76 	}
77 	if (conf->ring == NULL) {
78 		PORT_LOG(ERR, "%s: Parameter ring is NULL", __func__);
79 		return NULL;
80 	}
81 	if ((conf->tx_burst_sz == 0) ||
82 	    (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX)) {
83 		PORT_LOG(ERR, "%s: Parameter tx_burst_sz is invalid",
84 			__func__);
85 		return NULL;
86 	}
87 
88 	/* Memory allocation */
89 	port = rte_zmalloc_socket("PORT", sizeof(*port),
90 			RTE_CACHE_LINE_SIZE, socket_id);
91 	if (port == NULL) {
92 		PORT_LOG(ERR, "%s: Failed to allocate socket", __func__);
93 		return NULL;
94 	}
95 
96 	/* Create fragmentation table */
97 	frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) / MS_PER_S * MS_PER_S;
98 	frag_cycles *= 100;
99 
100 	port->frag_tbl = rte_ip_frag_table_create(
101 		RTE_PORT_RAS_N_BUCKETS,
102 		RTE_PORT_RAS_N_ENTRIES_PER_BUCKET,
103 		RTE_PORT_RAS_N_ENTRIES,
104 		frag_cycles,
105 		socket_id);
106 
107 	if (port->frag_tbl == NULL) {
108 		PORT_LOG(ERR, "%s: rte_ip_frag_table_create failed",
109 			__func__);
110 		rte_free(port);
111 		return NULL;
112 	}
113 
114 	/* Initialization */
115 	port->ring = conf->ring;
116 	port->tx_burst_sz = conf->tx_burst_sz;
117 	port->tx_buf_count = 0;
118 
119 	port->f_ras = (is_ipv4 == 1) ? process_ipv4 : process_ipv6;
120 
121 	return port;
122 }
123 
124 static void *
rte_port_ring_writer_ipv4_ras_create(void * params,int socket_id)125 rte_port_ring_writer_ipv4_ras_create(void *params, int socket_id)
126 {
127 	return rte_port_ring_writer_ras_create(params, socket_id, 1);
128 }
129 
130 static void *
rte_port_ring_writer_ipv6_ras_create(void * params,int socket_id)131 rte_port_ring_writer_ipv6_ras_create(void *params, int socket_id)
132 {
133 	return rte_port_ring_writer_ras_create(params, socket_id, 0);
134 }
135 
136 static inline void
send_burst(struct rte_port_ring_writer_ras * p)137 send_burst(struct rte_port_ring_writer_ras *p)
138 {
139 	uint32_t nb_tx;
140 
141 	nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
142 			p->tx_buf_count, NULL);
143 
144 	RTE_PORT_RING_WRITER_RAS_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
145 	for ( ; nb_tx < p->tx_buf_count; nb_tx++)
146 		rte_pktmbuf_free(p->tx_buf[nb_tx]);
147 
148 	p->tx_buf_count = 0;
149 }
150 
151 static void
process_ipv4(struct rte_port_ring_writer_ras * p,struct rte_mbuf * pkt)152 process_ipv4(struct rte_port_ring_writer_ras *p, struct rte_mbuf *pkt)
153 {
154 	/* Assume there is no ethernet header */
155 	struct rte_ipv4_hdr *pkt_hdr =
156 		rte_pktmbuf_mtod(pkt, struct rte_ipv4_hdr *);
157 
158 	/* Get "More fragments" flag and fragment offset */
159 	uint16_t frag_field = rte_be_to_cpu_16(pkt_hdr->fragment_offset);
160 	uint16_t frag_offset = (uint16_t)(frag_field & RTE_IPV4_HDR_OFFSET_MASK);
161 	uint16_t frag_flag = (uint16_t)(frag_field & RTE_IPV4_HDR_MF_FLAG);
162 
163 	/* If it is a fragmented packet, then try to reassemble */
164 	if ((frag_flag == 0) && (frag_offset == 0))
165 		p->tx_buf[p->tx_buf_count++] = pkt;
166 	else {
167 		struct rte_mbuf *mo;
168 		struct rte_ip_frag_tbl *tbl = p->frag_tbl;
169 		struct rte_ip_frag_death_row *dr = &p->death_row;
170 
171 		pkt->l3_len = sizeof(*pkt_hdr);
172 
173 		/* Process this fragment */
174 		mo = rte_ipv4_frag_reassemble_packet(tbl, dr, pkt, rte_rdtsc(),
175 				pkt_hdr);
176 		if (mo != NULL)
177 			p->tx_buf[p->tx_buf_count++] = mo;
178 
179 		rte_ip_frag_free_death_row(&p->death_row, 3);
180 	}
181 }
182 
183 static void
process_ipv6(struct rte_port_ring_writer_ras * p,struct rte_mbuf * pkt)184 process_ipv6(struct rte_port_ring_writer_ras *p, struct rte_mbuf *pkt)
185 {
186 	/* Assume there is no ethernet header */
187 	struct rte_ipv6_hdr *pkt_hdr =
188 		rte_pktmbuf_mtod(pkt, struct rte_ipv6_hdr *);
189 
190 	struct rte_ipv6_fragment_ext *frag_hdr;
191 	uint16_t frag_data = 0;
192 	frag_hdr = rte_ipv6_frag_get_ipv6_fragment_header(pkt_hdr);
193 	if (frag_hdr != NULL)
194 		frag_data = rte_be_to_cpu_16(frag_hdr->frag_data);
195 
196 	/* If it is a fragmented packet, then try to reassemble */
197 	if ((frag_data & RTE_IPV6_FRAG_USED_MASK) == 0)
198 		p->tx_buf[p->tx_buf_count++] = pkt;
199 	else {
200 		struct rte_mbuf *mo;
201 		struct rte_ip_frag_tbl *tbl = p->frag_tbl;
202 		struct rte_ip_frag_death_row *dr = &p->death_row;
203 
204 		pkt->l3_len = sizeof(*pkt_hdr) + sizeof(*frag_hdr);
205 
206 		/* Process this fragment */
207 		mo = rte_ipv6_frag_reassemble_packet(tbl, dr, pkt, rte_rdtsc(), pkt_hdr,
208 				frag_hdr);
209 		if (mo != NULL)
210 			p->tx_buf[p->tx_buf_count++] = mo;
211 
212 		rte_ip_frag_free_death_row(&p->death_row, 3);
213 	}
214 }
215 
216 static int
rte_port_ring_writer_ras_tx(void * port,struct rte_mbuf * pkt)217 rte_port_ring_writer_ras_tx(void *port, struct rte_mbuf *pkt)
218 {
219 	struct rte_port_ring_writer_ras *p =
220 			port;
221 
222 	RTE_PORT_RING_WRITER_RAS_STATS_PKTS_IN_ADD(p, 1);
223 	p->f_ras(p, pkt);
224 	if (p->tx_buf_count >= p->tx_burst_sz)
225 		send_burst(p);
226 
227 	return 0;
228 }
229 
230 static int
rte_port_ring_writer_ras_tx_bulk(void * port,struct rte_mbuf ** pkts,uint64_t pkts_mask)231 rte_port_ring_writer_ras_tx_bulk(void *port,
232 		struct rte_mbuf **pkts,
233 		uint64_t pkts_mask)
234 {
235 	struct rte_port_ring_writer_ras *p =
236 			port;
237 
238 	if ((pkts_mask & (pkts_mask + 1)) == 0) {
239 		uint64_t n_pkts = rte_popcount64(pkts_mask);
240 		uint32_t i;
241 
242 		for (i = 0; i < n_pkts; i++) {
243 			struct rte_mbuf *pkt = pkts[i];
244 
245 			RTE_PORT_RING_WRITER_RAS_STATS_PKTS_IN_ADD(p, 1);
246 			p->f_ras(p, pkt);
247 			if (p->tx_buf_count >= p->tx_burst_sz)
248 				send_burst(p);
249 		}
250 	} else {
251 		for ( ; pkts_mask; ) {
252 			uint32_t pkt_index = rte_ctz64(pkts_mask);
253 			uint64_t pkt_mask = 1LLU << pkt_index;
254 			struct rte_mbuf *pkt = pkts[pkt_index];
255 
256 			RTE_PORT_RING_WRITER_RAS_STATS_PKTS_IN_ADD(p, 1);
257 			p->f_ras(p, pkt);
258 			if (p->tx_buf_count >= p->tx_burst_sz)
259 				send_burst(p);
260 
261 			pkts_mask &= ~pkt_mask;
262 		}
263 	}
264 
265 	return 0;
266 }
267 
268 static int
rte_port_ring_writer_ras_flush(void * port)269 rte_port_ring_writer_ras_flush(void *port)
270 {
271 	struct rte_port_ring_writer_ras *p =
272 			port;
273 
274 	if (p->tx_buf_count > 0)
275 		send_burst(p);
276 
277 	return 0;
278 }
279 
280 static int
rte_port_ring_writer_ras_free(void * port)281 rte_port_ring_writer_ras_free(void *port)
282 {
283 	struct rte_port_ring_writer_ras *p =
284 			port;
285 
286 	if (port == NULL) {
287 		PORT_LOG(ERR, "%s: Parameter port is NULL", __func__);
288 		return -1;
289 	}
290 
291 	rte_port_ring_writer_ras_flush(port);
292 	rte_ip_frag_table_destroy(p->frag_tbl);
293 	rte_free(port);
294 
295 	return 0;
296 }
297 
298 static int
rte_port_ras_writer_stats_read(void * port,struct rte_port_out_stats * stats,int clear)299 rte_port_ras_writer_stats_read(void *port,
300 		struct rte_port_out_stats *stats, int clear)
301 {
302 	struct rte_port_ring_writer_ras *p =
303 		port;
304 
305 	if (stats != NULL)
306 		memcpy(stats, &p->stats, sizeof(p->stats));
307 
308 	if (clear)
309 		memset(&p->stats, 0, sizeof(p->stats));
310 
311 	return 0;
312 }
313 
314 /*
315  * Summary of port operations
316  */
317 struct rte_port_out_ops rte_port_ring_writer_ipv4_ras_ops = {
318 	.f_create = rte_port_ring_writer_ipv4_ras_create,
319 	.f_free = rte_port_ring_writer_ras_free,
320 	.f_tx = rte_port_ring_writer_ras_tx,
321 	.f_tx_bulk = rte_port_ring_writer_ras_tx_bulk,
322 	.f_flush = rte_port_ring_writer_ras_flush,
323 	.f_stats = rte_port_ras_writer_stats_read,
324 };
325 
326 struct rte_port_out_ops rte_port_ring_writer_ipv6_ras_ops = {
327 	.f_create = rte_port_ring_writer_ipv6_ras_create,
328 	.f_free = rte_port_ring_writer_ras_free,
329 	.f_tx = rte_port_ring_writer_ras_tx,
330 	.f_tx_bulk = rte_port_ring_writer_ras_tx_bulk,
331 	.f_flush = rte_port_ring_writer_ras_flush,
332 	.f_stats = rte_port_ras_writer_stats_read,
333 };
334