xref: /dpdk/examples/l2fwd-event/l2fwd_poll.c (revision 9a212dc06c7aaf09b146d9c3dcfd584d741634c1)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4 
5 #include "l2fwd_poll.h"
6 
7 static inline void
l2fwd_poll_simple_forward(struct l2fwd_resources * rsrc,struct rte_mbuf * m,uint32_t portid)8 l2fwd_poll_simple_forward(struct l2fwd_resources *rsrc, struct rte_mbuf *m,
9 			  uint32_t portid)
10 {
11 	struct rte_eth_dev_tx_buffer *buffer;
12 	uint32_t dst_port;
13 	int sent;
14 
15 	dst_port = rsrc->dst_ports[portid];
16 
17 	if (rsrc->mac_updating)
18 		l2fwd_mac_updating(m, dst_port, &rsrc->eth_addr[dst_port]);
19 
20 	buffer = ((struct l2fwd_poll_resources *)rsrc->poll_rsrc)->tx_buffer[
21 								dst_port];
22 	sent = rte_eth_tx_buffer(dst_port, 0, buffer, m);
23 	if (sent)
24 		rsrc->port_stats[dst_port].tx += sent;
25 }
26 
27 /* main poll mode processing loop */
28 static void
l2fwd_poll_main_loop(struct l2fwd_resources * rsrc)29 l2fwd_poll_main_loop(struct l2fwd_resources *rsrc)
30 {
31 	uint64_t prev_tsc, diff_tsc, cur_tsc, drain_tsc;
32 	struct l2fwd_poll_resources *poll_rsrc = rsrc->poll_rsrc;
33 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
34 	struct rte_eth_dev_tx_buffer *buf;
35 	struct lcore_queue_conf *qconf;
36 	uint32_t i, j, port_id, nb_rx;
37 	struct rte_mbuf *m;
38 	uint32_t lcore_id;
39 	int32_t sent;
40 
41 	drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S *
42 			BURST_TX_DRAIN_US;
43 	prev_tsc = 0;
44 
45 	lcore_id = rte_lcore_id();
46 	qconf = &poll_rsrc->lcore_queue_conf[lcore_id];
47 
48 	if (qconf->n_rx_port == 0) {
49 		printf("lcore %u has nothing to do\n", lcore_id);
50 		return;
51 	}
52 
53 	printf("entering main loop on lcore %u\n", lcore_id);
54 
55 	for (i = 0; i < qconf->n_rx_port; i++) {
56 
57 		port_id = qconf->rx_port_list[i];
58 		printf(" -- lcoreid=%u port_id=%u\n", lcore_id, port_id);
59 
60 	}
61 
62 	while (!rsrc->force_quit) {
63 
64 		/* Draining TX queue in main loop. 8< */
65 		cur_tsc = rte_rdtsc();
66 
67 		/*
68 		 * TX burst queue drain
69 		 */
70 		diff_tsc = cur_tsc - prev_tsc;
71 		if (unlikely(diff_tsc > drain_tsc)) {
72 			for (i = 0; i < qconf->n_rx_port; i++) {
73 				port_id =
74 					rsrc->dst_ports[qconf->rx_port_list[i]];
75 				buf = poll_rsrc->tx_buffer[port_id];
76 				sent = rte_eth_tx_buffer_flush(port_id, 0, buf);
77 				if (sent)
78 					rsrc->port_stats[port_id].tx += sent;
79 			}
80 
81 			prev_tsc = cur_tsc;
82 		}
83 		/* >8 End of draining TX queue in main loop. */
84 
85 		/* Reading ingress packets. 8< */
86 
87 		/* Read packet from RX queues */
88 		for (i = 0; i < qconf->n_rx_port; i++) {
89 
90 			port_id = qconf->rx_port_list[i];
91 			nb_rx = rte_eth_rx_burst(port_id, 0, pkts_burst,
92 						 MAX_PKT_BURST);
93 
94 			rsrc->port_stats[port_id].rx += nb_rx;
95 
96 			for (j = 0; j < nb_rx; j++) {
97 				m = pkts_burst[j];
98 				rte_prefetch0(rte_pktmbuf_mtod(m, void *));
99 				l2fwd_poll_simple_forward(rsrc, m, port_id);
100 			}
101 		}
102 		/* >8 End of reading ingress packets. */
103 	}
104 }
105 
106 static void
l2fwd_poll_lcore_config(struct l2fwd_resources * rsrc)107 l2fwd_poll_lcore_config(struct l2fwd_resources *rsrc)
108 {
109 	struct l2fwd_poll_resources *poll_rsrc = rsrc->poll_rsrc;
110 	struct lcore_queue_conf *qconf = NULL;
111 	uint32_t rx_lcore_id = 0;
112 	uint16_t port_id;
113 
114 	/* Initialize the port/queue configuration of each logical core */
115 	RTE_ETH_FOREACH_DEV(port_id) {
116 		/* skip ports that are not enabled */
117 		if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
118 			continue;
119 
120 		/* get the lcore_id for this port */
121 		while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
122 		       rx_lcore_id == rte_get_main_lcore() ||
123 		       poll_rsrc->lcore_queue_conf[rx_lcore_id].n_rx_port ==
124 		       rsrc->rx_queue_per_lcore) {
125 			rx_lcore_id++;
126 			if (rx_lcore_id >= RTE_MAX_LCORE)
127 				rte_panic("Not enough cores\n");
128 		}
129 
130 		if (qconf != &poll_rsrc->lcore_queue_conf[rx_lcore_id]) {
131 			/* Assigned a new logical core in the loop above. */
132 			qconf = &poll_rsrc->lcore_queue_conf[rx_lcore_id];
133 		}
134 
135 		qconf->rx_port_list[qconf->n_rx_port] = port_id;
136 		qconf->n_rx_port++;
137 		printf("Lcore %u: RX port %u\n", rx_lcore_id, port_id);
138 	}
139 }
140 
141 static void
l2fwd_poll_init_tx_buffers(struct l2fwd_resources * rsrc)142 l2fwd_poll_init_tx_buffers(struct l2fwd_resources *rsrc)
143 {
144 	struct l2fwd_poll_resources *poll_rsrc = rsrc->poll_rsrc;
145 	uint16_t port_id;
146 	int ret;
147 
148 	RTE_ETH_FOREACH_DEV(port_id) {
149 		/* Initialize TX buffers */
150 		poll_rsrc->tx_buffer[port_id] = rte_zmalloc_socket("tx_buffer",
151 				RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
152 				rte_eth_dev_socket_id(port_id));
153 		if (poll_rsrc->tx_buffer[port_id] == NULL)
154 			rte_panic("Cannot allocate buffer for tx on port %u\n",
155 				  port_id);
156 
157 		rte_eth_tx_buffer_init(poll_rsrc->tx_buffer[port_id],
158 				       MAX_PKT_BURST);
159 
160 		ret = rte_eth_tx_buffer_set_err_callback(
161 				poll_rsrc->tx_buffer[port_id],
162 				rte_eth_tx_buffer_count_callback,
163 				&rsrc->port_stats[port_id].dropped);
164 		if (ret < 0)
165 			rte_panic("Cannot set error callback for tx buffer on port %u\n",
166 				  port_id);
167 	}
168 }
169 
170 void
l2fwd_poll_resource_setup(struct l2fwd_resources * rsrc)171 l2fwd_poll_resource_setup(struct l2fwd_resources *rsrc)
172 {
173 	struct l2fwd_poll_resources *poll_rsrc;
174 
175 	poll_rsrc = rte_zmalloc("l2fwd_poll_rsrc",
176 				sizeof(struct l2fwd_poll_resources), 0);
177 	if (poll_rsrc == NULL)
178 		rte_panic("Failed to allocate resources for l2fwd poll mode\n");
179 
180 	rsrc->poll_rsrc = poll_rsrc;
181 	l2fwd_poll_lcore_config(rsrc);
182 	l2fwd_poll_init_tx_buffers(rsrc);
183 
184 	poll_rsrc->poll_main_loop = l2fwd_poll_main_loop;
185 }
186