xref: /dpdk/examples/l2fwd-event/l2fwd_poll.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4 
5 #include "l2fwd_poll.h"
6 
7 static inline void
8 l2fwd_poll_simple_forward(struct l2fwd_resources *rsrc, struct rte_mbuf *m,
9 			  uint32_t portid)
10 {
11 	struct rte_eth_dev_tx_buffer *buffer;
12 	uint32_t dst_port;
13 	int sent;
14 
15 	dst_port = rsrc->dst_ports[portid];
16 
17 	if (rsrc->mac_updating)
18 		l2fwd_mac_updating(m, dst_port, &rsrc->eth_addr[dst_port]);
19 
20 	buffer = ((struct l2fwd_poll_resources *)rsrc->poll_rsrc)->tx_buffer[
21 								dst_port];
22 	sent = rte_eth_tx_buffer(dst_port, 0, buffer, m);
23 	if (sent)
24 		rsrc->port_stats[dst_port].tx += sent;
25 }
26 
27 /* main poll mode processing loop */
28 static void
29 l2fwd_poll_main_loop(struct l2fwd_resources *rsrc)
30 {
31 	uint64_t prev_tsc, diff_tsc, cur_tsc, drain_tsc;
32 	struct l2fwd_poll_resources *poll_rsrc = rsrc->poll_rsrc;
33 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
34 	struct rte_eth_dev_tx_buffer *buf;
35 	struct lcore_queue_conf *qconf;
36 	uint32_t i, j, port_id, nb_rx;
37 	struct rte_mbuf *m;
38 	uint32_t lcore_id;
39 	int32_t sent;
40 
41 	drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S *
42 			BURST_TX_DRAIN_US;
43 	prev_tsc = 0;
44 
45 	lcore_id = rte_lcore_id();
46 	qconf = &poll_rsrc->lcore_queue_conf[lcore_id];
47 
48 	if (qconf->n_rx_port == 0) {
49 		printf("lcore %u has nothing to do\n", lcore_id);
50 		return;
51 	}
52 
53 	printf("entering main loop on lcore %u\n", lcore_id);
54 
55 	for (i = 0; i < qconf->n_rx_port; i++) {
56 
57 		port_id = qconf->rx_port_list[i];
58 		printf(" -- lcoreid=%u port_id=%u\n", lcore_id, port_id);
59 
60 	}
61 
62 	while (!rsrc->force_quit) {
63 
64 		cur_tsc = rte_rdtsc();
65 
66 		/*
67 		 * TX burst queue drain
68 		 */
69 		diff_tsc = cur_tsc - prev_tsc;
70 		if (unlikely(diff_tsc > drain_tsc)) {
71 			for (i = 0; i < qconf->n_rx_port; i++) {
72 				port_id =
73 					rsrc->dst_ports[qconf->rx_port_list[i]];
74 				buf = poll_rsrc->tx_buffer[port_id];
75 				sent = rte_eth_tx_buffer_flush(port_id, 0, buf);
76 				if (sent)
77 					rsrc->port_stats[port_id].tx += sent;
78 			}
79 
80 			prev_tsc = cur_tsc;
81 		}
82 
83 		/*
84 		 * Read packet from RX queues
85 		 */
86 		for (i = 0; i < qconf->n_rx_port; i++) {
87 
88 			port_id = qconf->rx_port_list[i];
89 			nb_rx = rte_eth_rx_burst(port_id, 0, pkts_burst,
90 						 MAX_PKT_BURST);
91 
92 			rsrc->port_stats[port_id].rx += nb_rx;
93 
94 			for (j = 0; j < nb_rx; j++) {
95 				m = pkts_burst[j];
96 				rte_prefetch0(rte_pktmbuf_mtod(m, void *));
97 				l2fwd_poll_simple_forward(rsrc, m, port_id);
98 			}
99 		}
100 	}
101 }
102 
103 static void
104 l2fwd_poll_lcore_config(struct l2fwd_resources *rsrc)
105 {
106 	struct l2fwd_poll_resources *poll_rsrc = rsrc->poll_rsrc;
107 	struct lcore_queue_conf *qconf = NULL;
108 	uint32_t rx_lcore_id = 0;
109 	uint16_t port_id;
110 
111 	/* Initialize the port/queue configuration of each logical core */
112 	RTE_ETH_FOREACH_DEV(port_id) {
113 		/* skip ports that are not enabled */
114 		if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
115 			continue;
116 
117 		/* get the lcore_id for this port */
118 		while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
119 		       rx_lcore_id == rte_get_main_lcore() ||
120 		       poll_rsrc->lcore_queue_conf[rx_lcore_id].n_rx_port ==
121 		       rsrc->rx_queue_per_lcore) {
122 			rx_lcore_id++;
123 			if (rx_lcore_id >= RTE_MAX_LCORE)
124 				rte_panic("Not enough cores\n");
125 		}
126 
127 		if (qconf != &poll_rsrc->lcore_queue_conf[rx_lcore_id]) {
128 			/* Assigned a new logical core in the loop above. */
129 			qconf = &poll_rsrc->lcore_queue_conf[rx_lcore_id];
130 		}
131 
132 		qconf->rx_port_list[qconf->n_rx_port] = port_id;
133 		qconf->n_rx_port++;
134 		printf("Lcore %u: RX port %u\n", rx_lcore_id, port_id);
135 	}
136 }
137 
138 static void
139 l2fwd_poll_init_tx_buffers(struct l2fwd_resources *rsrc)
140 {
141 	struct l2fwd_poll_resources *poll_rsrc = rsrc->poll_rsrc;
142 	uint16_t port_id;
143 	int ret;
144 
145 	RTE_ETH_FOREACH_DEV(port_id) {
146 		/* Initialize TX buffers */
147 		poll_rsrc->tx_buffer[port_id] = rte_zmalloc_socket("tx_buffer",
148 				RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
149 				rte_eth_dev_socket_id(port_id));
150 		if (poll_rsrc->tx_buffer[port_id] == NULL)
151 			rte_panic("Cannot allocate buffer for tx on port %u\n",
152 				  port_id);
153 
154 		rte_eth_tx_buffer_init(poll_rsrc->tx_buffer[port_id],
155 				       MAX_PKT_BURST);
156 
157 		ret = rte_eth_tx_buffer_set_err_callback(
158 				poll_rsrc->tx_buffer[port_id],
159 				rte_eth_tx_buffer_count_callback,
160 				&rsrc->port_stats[port_id].dropped);
161 		if (ret < 0)
162 			rte_panic("Cannot set error callback for tx buffer on port %u\n",
163 				  port_id);
164 	}
165 }
166 
167 void
168 l2fwd_poll_resource_setup(struct l2fwd_resources *rsrc)
169 {
170 	struct l2fwd_poll_resources *poll_rsrc;
171 
172 	poll_rsrc = rte_zmalloc("l2fwd_poll_rsrc",
173 				sizeof(struct l2fwd_poll_resources), 0);
174 	if (poll_rsrc == NULL)
175 		rte_panic("Failed to allocate resources for l2fwd poll mode\n");
176 
177 	rsrc->poll_rsrc = poll_rsrc;
178 	l2fwd_poll_lcore_config(rsrc);
179 	l2fwd_poll_init_tx_buffers(rsrc);
180 
181 	poll_rsrc->poll_main_loop = l2fwd_poll_main_loop;
182 }
183