xref: /dpdk/app/test-pmd/noisy_vnf.c (revision 1d343c19330a11f05e3ea369ae5780d38772358e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Red Hat Corp.
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <stdbool.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <unistd.h>
13 #include <inttypes.h>
14 
15 #include <sys/queue.h>
16 #include <sys/stat.h>
17 
18 #include <rte_common.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_cycles.h>
22 #include <rte_memory.h>
23 #include <rte_launch.h>
24 #include <rte_eal.h>
25 #include <rte_per_lcore.h>
26 #include <rte_lcore.h>
27 #include <rte_memcpy.h>
28 #include <rte_mempool.h>
29 #include <rte_mbuf.h>
30 #include <rte_ethdev.h>
31 #include <rte_flow.h>
32 #include <rte_malloc.h>
33 
34 #include "testpmd.h"
35 #include "5tswap.h"
36 #include "macfwd.h"
37 #if defined(RTE_ARCH_X86)
38 #include "macswap_sse.h"
39 #elif defined(__ARM_NEON)
40 #include "macswap_neon.h"
41 #else
42 #include "macswap.h"
43 #endif
44 
45 #define NOISY_STRSIZE 256
46 #define NOISY_RING "noisy_ring_%d\n"
47 
48 struct noisy_config {
49 	struct rte_ring *f;
50 	uint64_t prev_time;
51 	char *vnf_mem;
52 	bool do_buffering;
53 	bool do_flush;
54 	bool do_sim;
55 };
56 
57 struct noisy_config *noisy_cfg[RTE_MAX_ETHPORTS];
58 
59 static inline void
do_write(char * vnf_mem)60 do_write(char *vnf_mem)
61 {
62 	uint64_t i = rte_rand();
63 	uint64_t w = rte_rand();
64 
65 	vnf_mem[i % ((noisy_lkup_mem_sz * 1024 * 1024) /
66 			RTE_CACHE_LINE_SIZE)] = w;
67 }
68 
69 static inline void
do_read(char * vnf_mem)70 do_read(char *vnf_mem)
71 {
72 	uint64_t r __rte_unused;
73 	uint64_t i = rte_rand();
74 
75 	r = vnf_mem[i % ((noisy_lkup_mem_sz * 1024 * 1024) /
76 			RTE_CACHE_LINE_SIZE)];
77 	r++;
78 }
79 
80 static inline void
do_readwrite(char * vnf_mem)81 do_readwrite(char *vnf_mem)
82 {
83 	do_read(vnf_mem);
84 	do_write(vnf_mem);
85 }
86 
87 /*
88  * Simulate route lookups as defined by commandline parameters
89  */
90 static void
sim_memory_lookups(struct noisy_config * ncf,uint16_t nb_pkts)91 sim_memory_lookups(struct noisy_config *ncf, uint16_t nb_pkts)
92 {
93 	uint16_t i, j;
94 
95 	for (i = 0; i < nb_pkts; i++) {
96 		for (j = 0; j < noisy_lkup_num_writes; j++)
97 			do_write(ncf->vnf_mem);
98 		for (j = 0; j < noisy_lkup_num_reads; j++)
99 			do_read(ncf->vnf_mem);
100 		for (j = 0; j < noisy_lkup_num_reads_writes; j++)
101 			do_readwrite(ncf->vnf_mem);
102 	}
103 }
104 
105 /*
106  * Forwarding of packets in noisy VNF mode.  Forward packets but perform
107  * memory operations first as specified on cmdline.
108  *
109  * Depending on which commandline parameters are specified we have
110  * different cases to handle:
111  *
112  * 1. No FIFO size was given, so we don't do buffering of incoming
113  *    packets.  This case is pretty much what iofwd does but in this case
114  *    we also do simulation of memory accesses (depending on which
115  *    parameters were specified for it).
116  * 2. User wants do buffer packets in a FIFO and sent out overflowing
117  *    packets.
118  * 3. User wants a FIFO and specifies a time in ms to flush all packets
119  *    out of the FIFO
120  * 4. Cases 2 and 3 combined
121  */
122 static uint16_t
noisy_eth_tx_burst(struct fwd_stream * fs,uint16_t nb_rx,struct rte_mbuf ** pkts_burst)123 noisy_eth_tx_burst(struct fwd_stream *fs, uint16_t nb_rx, struct rte_mbuf **pkts_burst)
124 {
125 	const uint64_t freq_khz = rte_get_timer_hz() / 1000;
126 	struct noisy_config *ncf = noisy_cfg[fs->rx_port];
127 	struct rte_mbuf *tmp_pkts[MAX_PKT_BURST];
128 	uint16_t nb_deqd = 0;
129 	uint16_t nb_tx = 0;
130 	uint16_t nb_enqd;
131 	unsigned int fifo_free;
132 	uint64_t delta_ms;
133 	bool needs_flush = false;
134 	uint64_t now;
135 
136 	if (unlikely(nb_rx == 0)) {
137 		if (!ncf->do_buffering)
138 			goto end;
139 		else
140 			goto flush;
141 	}
142 
143 	if (!ncf->do_buffering) {
144 		if (ncf->do_sim)
145 			sim_memory_lookups(ncf, nb_rx);
146 		nb_tx = common_fwd_stream_transmit(fs, pkts_burst, nb_rx);
147 		goto end;
148 	}
149 
150 	fifo_free = rte_ring_free_count(ncf->f);
151 	if (fifo_free >= nb_rx) {
152 		nb_enqd = rte_ring_enqueue_burst(ncf->f, (void **) pkts_burst, nb_rx, NULL);
153 		if (nb_enqd < nb_rx) {
154 			fs->fwd_dropped += nb_rx - nb_enqd;
155 			rte_pktmbuf_free_bulk(&pkts_burst[nb_enqd], nb_rx - nb_enqd);
156 		}
157 	} else {
158 		nb_deqd = rte_ring_dequeue_burst(ncf->f, (void **) tmp_pkts, nb_rx, NULL);
159 		nb_enqd = rte_ring_enqueue_burst(ncf->f, (void **) pkts_burst, nb_deqd, NULL);
160 		if (nb_deqd > 0)
161 			nb_tx = common_fwd_stream_transmit(fs, tmp_pkts, nb_deqd);
162 	}
163 
164 	if (ncf->do_sim)
165 		sim_memory_lookups(ncf, nb_enqd);
166 
167 flush:
168 	if (ncf->do_flush) {
169 		if (!ncf->prev_time)
170 			now = ncf->prev_time = rte_get_timer_cycles();
171 		else
172 			now = rte_get_timer_cycles();
173 		delta_ms = (now - ncf->prev_time) / freq_khz;
174 		needs_flush = delta_ms >= noisy_tx_sw_buf_flush_time &&
175 				noisy_tx_sw_buf_flush_time > 0 && !nb_tx;
176 	}
177 	while (needs_flush && !rte_ring_empty(ncf->f)) {
178 		nb_deqd = rte_ring_dequeue_burst(ncf->f, (void **)tmp_pkts,
179 				MAX_PKT_BURST, NULL);
180 		nb_tx += common_fwd_stream_transmit(fs, tmp_pkts, nb_deqd);
181 		ncf->prev_time = rte_get_timer_cycles();
182 	}
183 end:
184 	return nb_tx;
185 }
186 
187 static bool
pkt_burst_io(struct fwd_stream * fs)188 pkt_burst_io(struct fwd_stream *fs)
189 {
190 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
191 	uint16_t nb_rx;
192 	uint16_t nb_tx;
193 
194 	nb_rx = common_fwd_stream_receive(fs, pkts_burst, nb_pkt_per_burst);
195 	nb_tx = noisy_eth_tx_burst(fs, nb_rx, pkts_burst);
196 
197 	return nb_rx > 0 || nb_tx > 0;
198 }
199 
200 static bool
pkt_burst_mac(struct fwd_stream * fs)201 pkt_burst_mac(struct fwd_stream *fs)
202 {
203 	struct rte_mbuf  *pkts_burst[MAX_PKT_BURST];
204 	uint16_t nb_rx;
205 	uint16_t nb_tx;
206 
207 	nb_rx = common_fwd_stream_receive(fs, pkts_burst, nb_pkt_per_burst);
208 	if (likely(nb_rx != 0))
209 		do_macfwd(pkts_burst, nb_rx, fs);
210 	nb_tx = noisy_eth_tx_burst(fs, nb_rx, pkts_burst);
211 
212 	return nb_rx > 0 || nb_tx > 0;
213 }
214 
215 static bool
pkt_burst_macswap(struct fwd_stream * fs)216 pkt_burst_macswap(struct fwd_stream *fs)
217 {
218 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
219 	uint16_t nb_rx;
220 	uint16_t nb_tx;
221 
222 	nb_rx = common_fwd_stream_receive(fs, pkts_burst, nb_pkt_per_burst);
223 	if (likely(nb_rx != 0))
224 		do_macswap(pkts_burst, nb_rx, &ports[fs->tx_port]);
225 	nb_tx = noisy_eth_tx_burst(fs, nb_rx, pkts_burst);
226 
227 	return nb_rx > 0 || nb_tx > 0;
228 }
229 
230 static bool
pkt_burst_5tswap(struct fwd_stream * fs)231 pkt_burst_5tswap(struct fwd_stream *fs)
232 {
233 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
234 	uint16_t nb_rx;
235 	uint16_t nb_tx;
236 
237 	nb_rx = common_fwd_stream_receive(fs, pkts_burst, nb_pkt_per_burst);
238 	if (likely(nb_rx != 0))
239 		do_5tswap(pkts_burst, nb_rx, fs);
240 	nb_tx = noisy_eth_tx_burst(fs, nb_rx, pkts_burst);
241 
242 	return nb_rx > 0 || nb_tx > 0;
243 }
244 
245 static void
noisy_fwd_end(portid_t pi)246 noisy_fwd_end(portid_t pi)
247 {
248 	rte_ring_free(noisy_cfg[pi]->f);
249 	rte_free(noisy_cfg[pi]->vnf_mem);
250 	rte_free(noisy_cfg[pi]);
251 }
252 
253 static int
noisy_fwd_begin(portid_t pi)254 noisy_fwd_begin(portid_t pi)
255 {
256 	struct noisy_config *n;
257 	char name[NOISY_STRSIZE];
258 
259 	noisy_cfg[pi] = rte_zmalloc("testpmd noisy fifo and timers",
260 				sizeof(struct noisy_config),
261 				RTE_CACHE_LINE_SIZE);
262 	if (noisy_cfg[pi] == NULL) {
263 		rte_exit(EXIT_FAILURE,
264 			 "rte_zmalloc(%d) struct noisy_config) failed\n",
265 			 (int) pi);
266 	}
267 	n = noisy_cfg[pi];
268 	n->do_buffering = noisy_tx_sw_bufsz > 0;
269 	n->do_sim = noisy_lkup_num_writes + noisy_lkup_num_reads +
270 		    noisy_lkup_num_reads_writes;
271 	n->do_flush = noisy_tx_sw_buf_flush_time > 0;
272 
273 	if (n->do_buffering) {
274 		snprintf(name, NOISY_STRSIZE, NOISY_RING, pi);
275 		n->f = rte_ring_create(name, noisy_tx_sw_bufsz,
276 				rte_socket_id(), 0);
277 		if (!n->f)
278 			rte_exit(EXIT_FAILURE,
279 				 "rte_ring_create(%d), size %d) failed\n",
280 				 (int) pi,
281 				 noisy_tx_sw_bufsz);
282 	}
283 	if (noisy_lkup_mem_sz > 0) {
284 		n->vnf_mem = (char *) rte_zmalloc("vnf sim memory",
285 				 noisy_lkup_mem_sz * 1024 * 1024,
286 				 RTE_CACHE_LINE_SIZE);
287 		if (!n->vnf_mem)
288 			rte_exit(EXIT_FAILURE,
289 			   "rte_zmalloc(%" PRIu64 ") for vnf memory) failed\n",
290 			   noisy_lkup_mem_sz);
291 	} else if (n->do_sim) {
292 		rte_exit(EXIT_FAILURE,
293 			 "--noisy-lkup-memory-size must be > 0\n");
294 	}
295 
296 	if (noisy_fwd_mode == NOISY_FWD_MODE_IO)
297 		noisy_vnf_engine.packet_fwd = pkt_burst_io;
298 	else if (noisy_fwd_mode == NOISY_FWD_MODE_MAC)
299 		noisy_vnf_engine.packet_fwd = pkt_burst_mac;
300 	else if (noisy_fwd_mode == NOISY_FWD_MODE_MACSWAP)
301 		noisy_vnf_engine.packet_fwd = pkt_burst_macswap;
302 	else if (noisy_fwd_mode == NOISY_FWD_MODE_5TSWAP)
303 		noisy_vnf_engine.packet_fwd = pkt_burst_5tswap;
304 	else
305 		rte_exit(EXIT_FAILURE,
306 			 " Invalid noisy_fwd_mode specified\n");
307 
308 	noisy_vnf_engine.status = noisy_fwd_mode_desc[noisy_fwd_mode];
309 
310 	return 0;
311 }
312 
313 struct fwd_engine noisy_vnf_engine = {
314 	.fwd_mode_name  = "noisy",
315 	.port_fwd_begin = noisy_fwd_begin,
316 	.port_fwd_end   = noisy_fwd_end,
317 	.stream_init	= common_fwd_stream_init,
318 	.packet_fwd     = pkt_burst_io,
319 };
320