xref: /dpdk/examples/qos_sched/init.c (revision 87d396163c005deb8d9f72ec0977f19e5edd8f47)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdlib.h>
7 #include <memory.h>
8 
9 #include <rte_log.h>
10 #include <rte_mbuf.h>
11 #include <rte_debug.h>
12 #include <rte_ethdev.h>
13 #include <rte_mempool.h>
14 #include <rte_sched.h>
15 #include <rte_cycles.h>
16 #include <rte_string_fns.h>
17 #include <rte_cfgfile.h>
18 
19 #include "main.h"
20 #include "cfg_file.h"
21 
22 uint32_t app_numa_mask = 0;
23 static uint32_t app_inited_port_mask = 0;
24 
25 int app_pipe_to_profile[MAX_SCHED_SUBPORTS][MAX_SCHED_PIPES];
26 
27 #define MAX_NAME_LEN 32
28 
29 struct ring_conf ring_conf = {
30 	.rx_size   = APP_RX_DESC_DEFAULT,
31 	.ring_size = APP_RING_SIZE,
32 	.tx_size   = APP_TX_DESC_DEFAULT,
33 };
34 
35 struct burst_conf burst_conf = {
36 	.rx_burst    = MAX_PKT_RX_BURST,
37 	.ring_burst  = PKT_ENQUEUE,
38 	.qos_dequeue = PKT_DEQUEUE,
39 	.tx_burst    = MAX_PKT_TX_BURST,
40 };
41 
42 struct ring_thresh rx_thresh = {
43 	.pthresh = RX_PTHRESH,
44 	.hthresh = RX_HTHRESH,
45 	.wthresh = RX_WTHRESH,
46 };
47 
48 struct ring_thresh tx_thresh = {
49 	.pthresh = TX_PTHRESH,
50 	.hthresh = TX_HTHRESH,
51 	.wthresh = TX_WTHRESH,
52 };
53 
54 uint32_t nb_pfc;
55 const char *cfg_profile = NULL;
56 int mp_size = NB_MBUF;
57 struct flow_conf qos_conf[MAX_DATA_STREAMS];
58 
59 static struct rte_eth_conf port_conf = {
60 	.rxmode = {
61 		.split_hdr_size = 0,
62 	},
63 	.txmode = {
64 		.mq_mode = RTE_ETH_MQ_TX_NONE,
65 	},
66 };
67 
68 static int
69 app_init_port(uint16_t portid, struct rte_mempool *mp)
70 {
71 	int ret;
72 	struct rte_eth_link link;
73 	struct rte_eth_dev_info dev_info;
74 	struct rte_eth_rxconf rx_conf;
75 	struct rte_eth_txconf tx_conf;
76 	uint16_t rx_size;
77 	uint16_t tx_size;
78 	struct rte_eth_conf local_port_conf = port_conf;
79 	char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
80 
81 	/* check if port already initialized (multistream configuration) */
82 	if (app_inited_port_mask & (1u << portid))
83 		return 0;
84 
85 	rx_conf.rx_thresh.pthresh = rx_thresh.pthresh;
86 	rx_conf.rx_thresh.hthresh = rx_thresh.hthresh;
87 	rx_conf.rx_thresh.wthresh = rx_thresh.wthresh;
88 	rx_conf.rx_free_thresh = 32;
89 	rx_conf.rx_drop_en = 0;
90 	rx_conf.rx_deferred_start = 0;
91 
92 	tx_conf.tx_thresh.pthresh = tx_thresh.pthresh;
93 	tx_conf.tx_thresh.hthresh = tx_thresh.hthresh;
94 	tx_conf.tx_thresh.wthresh = tx_thresh.wthresh;
95 	tx_conf.tx_free_thresh = 0;
96 	tx_conf.tx_rs_thresh = 0;
97 	tx_conf.tx_deferred_start = 0;
98 
99 	/* init port */
100 	RTE_LOG(INFO, APP, "Initializing port %"PRIu16"... ", portid);
101 	fflush(stdout);
102 
103 	ret = rte_eth_dev_info_get(portid, &dev_info);
104 	if (ret != 0)
105 		rte_exit(EXIT_FAILURE,
106 			"Error during getting device (port %u) info: %s\n",
107 			portid, strerror(-ret));
108 
109 	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
110 		local_port_conf.txmode.offloads |=
111 			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
112 	ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
113 	if (ret < 0)
114 		rte_exit(EXIT_FAILURE,
115 			 "Cannot configure device: err=%d, port=%u\n",
116 			 ret, portid);
117 
118 	rx_size = ring_conf.rx_size;
119 	tx_size = ring_conf.tx_size;
120 	ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &rx_size, &tx_size);
121 	if (ret < 0)
122 		rte_exit(EXIT_FAILURE,
123 			 "rte_eth_dev_adjust_nb_rx_tx_desc: err=%d,port=%u\n",
124 			 ret, portid);
125 	ring_conf.rx_size = rx_size;
126 	ring_conf.tx_size = tx_size;
127 
128 	/* init one RX queue */
129 	fflush(stdout);
130 	rx_conf.offloads = local_port_conf.rxmode.offloads;
131 	ret = rte_eth_rx_queue_setup(portid, 0, (uint16_t)ring_conf.rx_size,
132 		rte_eth_dev_socket_id(portid), &rx_conf, mp);
133 	if (ret < 0)
134 		rte_exit(EXIT_FAILURE,
135 			 "rte_eth_tx_queue_setup: err=%d, port=%u\n",
136 			 ret, portid);
137 
138 	/* init one TX queue */
139 	fflush(stdout);
140 	tx_conf.offloads = local_port_conf.txmode.offloads;
141 	ret = rte_eth_tx_queue_setup(portid, 0,
142 		(uint16_t)ring_conf.tx_size, rte_eth_dev_socket_id(portid), &tx_conf);
143 	if (ret < 0)
144 		rte_exit(EXIT_FAILURE,
145 			 "rte_eth_tx_queue_setup: err=%d, port=%u queue=%d\n",
146 			 ret, portid, 0);
147 
148 	/* Start device */
149 	ret = rte_eth_dev_start(portid);
150 	if (ret < 0)
151 		rte_exit(EXIT_FAILURE,
152 			 "rte_pmd_port_start: err=%d, port=%u\n",
153 			 ret, portid);
154 
155 	printf("done: ");
156 
157 	/* get link status */
158 	ret = rte_eth_link_get(portid, &link);
159 	if (ret < 0)
160 		rte_exit(EXIT_FAILURE,
161 			 "rte_eth_link_get: err=%d, port=%u: %s\n",
162 			 ret, portid, rte_strerror(-ret));
163 
164 	rte_eth_link_to_str(link_status_text, sizeof(link_status_text), &link);
165 	printf("%s\n", link_status_text);
166 
167 	ret = rte_eth_promiscuous_enable(portid);
168 	if (ret != 0)
169 		rte_exit(EXIT_FAILURE,
170 			"rte_eth_promiscuous_enable: err=%s, port=%u\n",
171 			rte_strerror(-ret), portid);
172 
173 	/* mark port as initialized */
174 	app_inited_port_mask |= 1u << portid;
175 
176 	return 0;
177 }
178 
179 static struct rte_sched_pipe_params pipe_profiles[MAX_SCHED_PIPE_PROFILES] = {
180 	{ /* Profile #0 */
181 		.tb_rate = 305175,
182 		.tb_size = 1000000,
183 
184 		.tc_rate = {305175, 305175, 305175, 305175, 305175, 305175,
185 			305175, 305175, 305175, 305175, 305175, 305175, 305175},
186 		.tc_period = 40,
187 		.tc_ov_weight = 1,
188 
189 		.wrr_weights = {1, 1, 1, 1},
190 	},
191 };
192 
193 static struct rte_sched_subport_profile_params
194 		subport_profile[MAX_SCHED_SUBPORT_PROFILES] = {
195 	{
196 		.tb_rate = 1250000000,
197 		.tb_size = 1000000,
198 		.tc_rate = {1250000000, 1250000000, 1250000000, 1250000000,
199 			1250000000, 1250000000, 1250000000, 1250000000, 1250000000,
200 			1250000000, 1250000000, 1250000000, 1250000000},
201 		.tc_period = 10,
202 	},
203 };
204 
205 struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS] = {
206 	{
207 		.n_pipes_per_subport_enabled = 4096,
208 		.qsize = {64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64},
209 		.pipe_profiles = pipe_profiles,
210 		.n_pipe_profiles = sizeof(pipe_profiles) /
211 			sizeof(struct rte_sched_pipe_params),
212 		.n_max_pipe_profiles = MAX_SCHED_PIPE_PROFILES,
213 		.cman_params = NULL,
214 	},
215 };
216 
217 struct rte_sched_port_params port_params = {
218 	.name = "port_scheduler_0",
219 	.socket = 0, /* computed */
220 	.rate = 0, /* computed */
221 	.mtu = 6 + 6 + 4 + 4 + 2 + 1500,
222 	.frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
223 	.n_subports_per_port = 1,
224 	.n_subport_profiles = 1,
225 	.subport_profiles = subport_profile,
226 	.n_max_subport_profiles = MAX_SCHED_SUBPORT_PROFILES,
227 	.n_pipes_per_subport = MAX_SCHED_PIPES,
228 };
229 
230 static struct rte_sched_port *
231 app_init_sched_port(uint32_t portid, uint32_t socketid)
232 {
233 	static char port_name[32]; /* static as referenced from global port_params*/
234 	struct rte_eth_link link;
235 	struct rte_sched_port *port = NULL;
236 	uint32_t pipe, subport;
237 	int err;
238 
239 	err = rte_eth_link_get(portid, &link);
240 	if (err < 0)
241 		rte_exit(EXIT_FAILURE,
242 			 "rte_eth_link_get: err=%d, port=%u: %s\n",
243 			 err, portid, rte_strerror(-err));
244 
245 	port_params.socket = socketid;
246 	port_params.rate = (uint64_t) link.link_speed * 1000 * 1000 / 8;
247 	snprintf(port_name, sizeof(port_name), "port_%d", portid);
248 	port_params.name = port_name;
249 
250 	port = rte_sched_port_config(&port_params);
251 	if (port == NULL){
252 		rte_exit(EXIT_FAILURE, "Unable to config sched port\n");
253 	}
254 
255 	for (subport = 0; subport < port_params.n_subports_per_port; subport ++) {
256 		err = rte_sched_subport_config(port, subport,
257 				&subport_params[subport],
258 				0);
259 		if (err) {
260 			rte_exit(EXIT_FAILURE, "Unable to config sched "
261 				 "subport %u, err=%d\n", subport, err);
262 		}
263 
264 		uint32_t n_pipes_per_subport =
265 			subport_params[subport].n_pipes_per_subport_enabled;
266 
267 		for (pipe = 0; pipe < n_pipes_per_subport; pipe++) {
268 			if (app_pipe_to_profile[subport][pipe] != -1) {
269 				err = rte_sched_pipe_config(port, subport, pipe,
270 						app_pipe_to_profile[subport][pipe]);
271 				if (err) {
272 					rte_exit(EXIT_FAILURE, "Unable to config sched pipe %u "
273 							"for profile %d, err=%d\n", pipe,
274 							app_pipe_to_profile[subport][pipe], err);
275 				}
276 			}
277 		}
278 	}
279 
280 	return port;
281 }
282 
283 static int
284 app_load_cfg_profile(const char *profile)
285 {
286 	if (profile == NULL)
287 		return 0;
288 	struct rte_cfgfile *file = rte_cfgfile_load(profile, 0);
289 	if (file == NULL)
290 		rte_exit(EXIT_FAILURE, "Cannot load configuration profile %s\n", profile);
291 
292 	cfg_load_port(file, &port_params);
293 	cfg_load_subport(file, subport_params);
294 	cfg_load_subport_profile(file, subport_profile);
295 	cfg_load_pipe(file, pipe_profiles);
296 
297 	rte_cfgfile_close(file);
298 
299 	return 0;
300 }
301 
302 int app_init(void)
303 {
304 	uint32_t i;
305 	char ring_name[MAX_NAME_LEN];
306 	char pool_name[MAX_NAME_LEN];
307 
308 	if (rte_eth_dev_count_avail() == 0)
309 		rte_exit(EXIT_FAILURE, "No Ethernet port - bye\n");
310 
311 	/* load configuration profile */
312 	if (app_load_cfg_profile(cfg_profile) != 0)
313 		rte_exit(EXIT_FAILURE, "Invalid configuration profile\n");
314 
315 	/* Initialize each active flow */
316 	for(i = 0; i < nb_pfc; i++) {
317 		uint32_t socket = rte_lcore_to_socket_id(qos_conf[i].rx_core);
318 		struct rte_ring *ring;
319 
320 		snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].rx_core);
321 		ring = rte_ring_lookup(ring_name);
322 		if (ring == NULL)
323 			qos_conf[i].rx_ring = rte_ring_create(ring_name, ring_conf.ring_size,
324 			 	socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
325 		else
326 			qos_conf[i].rx_ring = ring;
327 
328 		snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].tx_core);
329 		ring = rte_ring_lookup(ring_name);
330 		if (ring == NULL)
331 			qos_conf[i].tx_ring = rte_ring_create(ring_name, ring_conf.ring_size,
332 				socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
333 		else
334 			qos_conf[i].tx_ring = ring;
335 
336 
337 		/* create the mbuf pools for each RX Port */
338 		snprintf(pool_name, MAX_NAME_LEN, "mbuf_pool%u", i);
339 		qos_conf[i].mbuf_pool = rte_pktmbuf_pool_create(pool_name,
340 			mp_size, burst_conf.rx_burst * 4, 0,
341 			RTE_MBUF_DEFAULT_BUF_SIZE,
342 			rte_eth_dev_socket_id(qos_conf[i].rx_port));
343 		if (qos_conf[i].mbuf_pool == NULL)
344 			rte_exit(EXIT_FAILURE, "Cannot init mbuf pool for socket %u\n", i);
345 
346 		app_init_port(qos_conf[i].rx_port, qos_conf[i].mbuf_pool);
347 		app_init_port(qos_conf[i].tx_port, qos_conf[i].mbuf_pool);
348 
349 		qos_conf[i].sched_port = app_init_sched_port(qos_conf[i].tx_port, socket);
350 	}
351 
352 	RTE_LOG(INFO, APP, "time stamp clock running at %" PRIu64 " Hz\n",
353 			 rte_get_timer_hz());
354 
355 	RTE_LOG(INFO, APP, "Ring sizes: NIC RX = %u, Mempool = %d SW queue = %u,"
356 			 "NIC TX = %u\n", ring_conf.rx_size, mp_size, ring_conf.ring_size,
357 			 ring_conf.tx_size);
358 
359 	RTE_LOG(INFO, APP, "Burst sizes: RX read = %hu, RX write = %hu,\n"
360 						  "             Worker read/QoS enqueue = %hu,\n"
361 						  "             QoS dequeue = %hu, Worker write = %hu\n",
362 		burst_conf.rx_burst, burst_conf.ring_burst, burst_conf.ring_burst,
363 		burst_conf.qos_dequeue, burst_conf.tx_burst);
364 
365 	RTE_LOG(INFO, APP, "NIC thresholds RX (p = %hhu, h = %hhu, w = %hhu),"
366 				 "TX (p = %hhu, h = %hhu, w = %hhu)\n",
367 		rx_thresh.pthresh, rx_thresh.hthresh, rx_thresh.wthresh,
368 		tx_thresh.pthresh, tx_thresh.hthresh, tx_thresh.wthresh);
369 
370 	return 0;
371 }
372