xref: /dpdk/examples/qos_sched/init.c (revision 1c839246f934340e8dfb8fd71bc436f81541a587)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <memory.h>
7 
8 #include <rte_log.h>
9 #include <rte_mbuf.h>
10 #include <rte_debug.h>
11 #include <rte_ethdev.h>
12 #include <rte_mempool.h>
13 #include <rte_sched.h>
14 #include <rte_cycles.h>
15 #include <rte_string_fns.h>
16 #include <rte_cfgfile.h>
17 
18 #include "main.h"
19 #include "cfg_file.h"
20 
21 uint32_t app_numa_mask = 0;
22 static uint32_t app_inited_port_mask = 0;
23 
24 int app_pipe_to_profile[MAX_SCHED_SUBPORTS][MAX_SCHED_PIPES];
25 
26 #define MAX_NAME_LEN 32
27 
28 struct ring_conf ring_conf = {
29 	.rx_size   = APP_RX_DESC_DEFAULT,
30 	.ring_size = APP_RING_SIZE,
31 	.tx_size   = APP_TX_DESC_DEFAULT,
32 };
33 
34 struct burst_conf burst_conf = {
35 	.rx_burst    = MAX_PKT_RX_BURST,
36 	.ring_burst  = PKT_ENQUEUE,
37 	.qos_dequeue = PKT_DEQUEUE,
38 	.tx_burst    = MAX_PKT_TX_BURST,
39 };
40 
41 struct ring_thresh rx_thresh = {
42 	.pthresh = RX_PTHRESH,
43 	.hthresh = RX_HTHRESH,
44 	.wthresh = RX_WTHRESH,
45 };
46 
47 struct ring_thresh tx_thresh = {
48 	.pthresh = TX_PTHRESH,
49 	.hthresh = TX_HTHRESH,
50 	.wthresh = TX_WTHRESH,
51 };
52 
53 uint32_t nb_pfc;
54 const char *cfg_profile = NULL;
55 int mp_size = NB_MBUF;
56 struct flow_conf qos_conf[MAX_DATA_STREAMS];
57 
58 static struct rte_eth_conf port_conf = {
59 	.rxmode = {
60 		.split_hdr_size = 0,
61 	},
62 	.txmode = {
63 		.mq_mode = RTE_ETH_MQ_TX_NONE,
64 	},
65 };
66 
67 static int
68 app_init_port(uint16_t portid, struct rte_mempool *mp)
69 {
70 	int ret;
71 	struct rte_eth_link link;
72 	struct rte_eth_dev_info dev_info;
73 	struct rte_eth_rxconf rx_conf;
74 	struct rte_eth_txconf tx_conf;
75 	uint16_t rx_size;
76 	uint16_t tx_size;
77 	struct rte_eth_conf local_port_conf = port_conf;
78 	char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
79 
80 	/* check if port already initialized (multistream configuration) */
81 	if (app_inited_port_mask & (1u << portid))
82 		return 0;
83 
84 	rx_conf.rx_thresh.pthresh = rx_thresh.pthresh;
85 	rx_conf.rx_thresh.hthresh = rx_thresh.hthresh;
86 	rx_conf.rx_thresh.wthresh = rx_thresh.wthresh;
87 	rx_conf.rx_free_thresh = 32;
88 	rx_conf.rx_drop_en = 0;
89 	rx_conf.rx_deferred_start = 0;
90 
91 	tx_conf.tx_thresh.pthresh = tx_thresh.pthresh;
92 	tx_conf.tx_thresh.hthresh = tx_thresh.hthresh;
93 	tx_conf.tx_thresh.wthresh = tx_thresh.wthresh;
94 	tx_conf.tx_free_thresh = 0;
95 	tx_conf.tx_rs_thresh = 0;
96 	tx_conf.tx_deferred_start = 0;
97 
98 	/* init port */
99 	RTE_LOG(INFO, APP, "Initializing port %"PRIu16"... ", portid);
100 	fflush(stdout);
101 
102 	ret = rte_eth_dev_info_get(portid, &dev_info);
103 	if (ret != 0)
104 		rte_exit(EXIT_FAILURE,
105 			"Error during getting device (port %u) info: %s\n",
106 			portid, strerror(-ret));
107 
108 	if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
109 		local_port_conf.txmode.offloads |=
110 			RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
111 	ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
112 	if (ret < 0)
113 		rte_exit(EXIT_FAILURE,
114 			 "Cannot configure device: err=%d, port=%u\n",
115 			 ret, portid);
116 
117 	rx_size = ring_conf.rx_size;
118 	tx_size = ring_conf.tx_size;
119 	ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &rx_size, &tx_size);
120 	if (ret < 0)
121 		rte_exit(EXIT_FAILURE,
122 			 "rte_eth_dev_adjust_nb_rx_tx_desc: err=%d,port=%u\n",
123 			 ret, portid);
124 	ring_conf.rx_size = rx_size;
125 	ring_conf.tx_size = tx_size;
126 
127 	/* init one RX queue */
128 	fflush(stdout);
129 	rx_conf.offloads = local_port_conf.rxmode.offloads;
130 	ret = rte_eth_rx_queue_setup(portid, 0, (uint16_t)ring_conf.rx_size,
131 		rte_eth_dev_socket_id(portid), &rx_conf, mp);
132 	if (ret < 0)
133 		rte_exit(EXIT_FAILURE,
134 			 "rte_eth_tx_queue_setup: err=%d, port=%u\n",
135 			 ret, portid);
136 
137 	/* init one TX queue */
138 	fflush(stdout);
139 	tx_conf.offloads = local_port_conf.txmode.offloads;
140 	ret = rte_eth_tx_queue_setup(portid, 0,
141 		(uint16_t)ring_conf.tx_size, rte_eth_dev_socket_id(portid), &tx_conf);
142 	if (ret < 0)
143 		rte_exit(EXIT_FAILURE,
144 			 "rte_eth_tx_queue_setup: err=%d, port=%u queue=%d\n",
145 			 ret, portid, 0);
146 
147 	/* Start device */
148 	ret = rte_eth_dev_start(portid);
149 	if (ret < 0)
150 		rte_exit(EXIT_FAILURE,
151 			 "rte_pmd_port_start: err=%d, port=%u\n",
152 			 ret, portid);
153 
154 	printf("done: ");
155 
156 	/* get link status */
157 	ret = rte_eth_link_get(portid, &link);
158 	if (ret < 0)
159 		rte_exit(EXIT_FAILURE,
160 			 "rte_eth_link_get: err=%d, port=%u: %s\n",
161 			 ret, portid, rte_strerror(-ret));
162 
163 	rte_eth_link_to_str(link_status_text, sizeof(link_status_text), &link);
164 	printf("%s\n", link_status_text);
165 
166 	ret = rte_eth_promiscuous_enable(portid);
167 	if (ret != 0)
168 		rte_exit(EXIT_FAILURE,
169 			"rte_eth_promiscuous_enable: err=%s, port=%u\n",
170 			rte_strerror(-ret), portid);
171 
172 	/* mark port as initialized */
173 	app_inited_port_mask |= 1u << portid;
174 
175 	return 0;
176 }
177 
178 static struct rte_sched_pipe_params pipe_profiles[MAX_SCHED_PIPE_PROFILES] = {
179 	{ /* Profile #0 */
180 		.tb_rate = 305175,
181 		.tb_size = 1000000,
182 
183 		.tc_rate = {305175, 305175, 305175, 305175, 305175, 305175,
184 			305175, 305175, 305175, 305175, 305175, 305175, 305175},
185 		.tc_period = 40,
186 		.tc_ov_weight = 1,
187 
188 		.wrr_weights = {1, 1, 1, 1},
189 	},
190 };
191 
192 static struct rte_sched_subport_profile_params
193 		subport_profile[MAX_SCHED_SUBPORT_PROFILES] = {
194 	{
195 		.tb_rate = 1250000000,
196 		.tb_size = 1000000,
197 		.tc_rate = {1250000000, 1250000000, 1250000000, 1250000000,
198 			1250000000, 1250000000, 1250000000, 1250000000, 1250000000,
199 			1250000000, 1250000000, 1250000000, 1250000000},
200 		.tc_period = 10,
201 	},
202 };
203 
204 #ifdef RTE_SCHED_CMAN
205 struct rte_sched_cman_params cman_params = {
206 	.cman_mode = RTE_SCHED_CMAN_RED,
207 	.red_params = {
208 		/* Traffic Class 0 Colors Green / Yellow / Red */
209 		[0][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
210 		[0][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
211 		[0][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
212 
213 		/* Traffic Class 1 - Colors Green / Yellow / Red */
214 		[1][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
215 		[1][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
216 		[1][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
217 
218 		/* Traffic Class 2 - Colors Green / Yellow / Red */
219 		[2][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
220 		[2][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
221 		[2][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
222 
223 		/* Traffic Class 3 - Colors Green / Yellow / Red */
224 		[3][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
225 		[3][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
226 		[3][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
227 
228 		/* Traffic Class 4 - Colors Green / Yellow / Red */
229 		[4][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
230 		[4][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
231 		[4][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
232 
233 		/* Traffic Class 5 - Colors Green / Yellow / Red */
234 		[5][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
235 		[5][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
236 		[5][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
237 
238 		/* Traffic Class 6 - Colors Green / Yellow / Red */
239 		[6][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
240 		[6][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
241 		[6][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
242 
243 		/* Traffic Class 7 - Colors Green / Yellow / Red */
244 		[7][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
245 		[7][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
246 		[7][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
247 
248 		/* Traffic Class 8 - Colors Green / Yellow / Red */
249 		[8][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
250 		[8][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
251 		[8][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
252 
253 		/* Traffic Class 9 - Colors Green / Yellow / Red */
254 		[9][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
255 		[9][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
256 		[9][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
257 
258 		/* Traffic Class 10 - Colors Green / Yellow / Red */
259 		[10][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
260 		[10][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
261 		[10][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
262 
263 		/* Traffic Class 11 - Colors Green / Yellow / Red */
264 		[11][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
265 		[11][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
266 		[11][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
267 
268 		/* Traffic Class 12 - Colors Green / Yellow / Red */
269 		[12][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
270 		[12][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
271 		[12][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
272 	},
273 };
274 #endif /* RTE_SCHED_CMAN */
275 
276 struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS] = {
277 	{
278 		.n_pipes_per_subport_enabled = 4096,
279 		.qsize = {64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64},
280 		.pipe_profiles = pipe_profiles,
281 		.n_pipe_profiles = sizeof(pipe_profiles) /
282 			sizeof(struct rte_sched_pipe_params),
283 		.n_max_pipe_profiles = MAX_SCHED_PIPE_PROFILES,
284 #ifdef RTE_SCHED_CMAN
285 		.cman_params = &cman_params,
286 #endif /* RTE_SCHED_CMAN */
287 	},
288 };
289 
290 struct rte_sched_port_params port_params = {
291 	.name = "port_scheduler_0",
292 	.socket = 0, /* computed */
293 	.rate = 0, /* computed */
294 	.mtu = 6 + 6 + 4 + 4 + 2 + 1500,
295 	.frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
296 	.n_subports_per_port = 1,
297 	.n_subport_profiles = 1,
298 	.subport_profiles = subport_profile,
299 	.n_max_subport_profiles = MAX_SCHED_SUBPORT_PROFILES,
300 	.n_pipes_per_subport = MAX_SCHED_PIPES,
301 };
302 
303 static struct rte_sched_port *
304 app_init_sched_port(uint32_t portid, uint32_t socketid)
305 {
306 	static char port_name[32]; /* static as referenced from global port_params*/
307 	struct rte_eth_link link;
308 	struct rte_sched_port *port = NULL;
309 	uint32_t pipe, subport;
310 	int err;
311 
312 	err = rte_eth_link_get(portid, &link);
313 	if (err < 0)
314 		rte_exit(EXIT_FAILURE,
315 			 "rte_eth_link_get: err=%d, port=%u: %s\n",
316 			 err, portid, rte_strerror(-err));
317 
318 	port_params.socket = socketid;
319 	port_params.rate = (uint64_t) link.link_speed * 1000 * 1000 / 8;
320 	snprintf(port_name, sizeof(port_name), "port_%d", portid);
321 	port_params.name = port_name;
322 
323 	port = rte_sched_port_config(&port_params);
324 	if (port == NULL){
325 		rte_exit(EXIT_FAILURE, "Unable to config sched port\n");
326 	}
327 
328 	for (subport = 0; subport < port_params.n_subports_per_port; subport ++) {
329 		err = rte_sched_subport_config(port, subport,
330 				&subport_params[subport],
331 				0);
332 		if (err) {
333 			rte_exit(EXIT_FAILURE, "Unable to config sched "
334 				 "subport %u, err=%d\n", subport, err);
335 		}
336 
337 		uint32_t n_pipes_per_subport =
338 			subport_params[subport].n_pipes_per_subport_enabled;
339 
340 		for (pipe = 0; pipe < n_pipes_per_subport; pipe++) {
341 			if (app_pipe_to_profile[subport][pipe] != -1) {
342 				err = rte_sched_pipe_config(port, subport, pipe,
343 						app_pipe_to_profile[subport][pipe]);
344 				if (err) {
345 					rte_exit(EXIT_FAILURE, "Unable to config sched pipe %u "
346 							"for profile %d, err=%d\n", pipe,
347 							app_pipe_to_profile[subport][pipe], err);
348 				}
349 			}
350 		}
351 	}
352 
353 	return port;
354 }
355 
356 static int
357 app_load_cfg_profile(const char *profile)
358 {
359 	if (profile == NULL)
360 		return 0;
361 	struct rte_cfgfile *file = rte_cfgfile_load(profile, 0);
362 	if (file == NULL)
363 		rte_exit(EXIT_FAILURE, "Cannot load configuration profile %s\n", profile);
364 
365 	cfg_load_port(file, &port_params);
366 	cfg_load_subport(file, subport_params);
367 	cfg_load_subport_profile(file, subport_profile);
368 	cfg_load_pipe(file, pipe_profiles);
369 
370 	rte_cfgfile_close(file);
371 
372 	return 0;
373 }
374 
375 int app_init(void)
376 {
377 	uint32_t i;
378 	char ring_name[MAX_NAME_LEN];
379 	char pool_name[MAX_NAME_LEN];
380 
381 	if (rte_eth_dev_count_avail() == 0)
382 		rte_exit(EXIT_FAILURE, "No Ethernet port - bye\n");
383 
384 	/* load configuration profile */
385 	if (app_load_cfg_profile(cfg_profile) != 0)
386 		rte_exit(EXIT_FAILURE, "Invalid configuration profile\n");
387 
388 	/* Initialize each active flow */
389 	for(i = 0; i < nb_pfc; i++) {
390 		uint32_t socket = rte_lcore_to_socket_id(qos_conf[i].rx_core);
391 		struct rte_ring *ring;
392 
393 		snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].rx_core);
394 		ring = rte_ring_lookup(ring_name);
395 		if (ring == NULL)
396 			qos_conf[i].rx_ring = rte_ring_create(ring_name, ring_conf.ring_size,
397 			 	socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
398 		else
399 			qos_conf[i].rx_ring = ring;
400 
401 		snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].tx_core);
402 		ring = rte_ring_lookup(ring_name);
403 		if (ring == NULL)
404 			qos_conf[i].tx_ring = rte_ring_create(ring_name, ring_conf.ring_size,
405 				socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
406 		else
407 			qos_conf[i].tx_ring = ring;
408 
409 
410 		/* create the mbuf pools for each RX Port */
411 		snprintf(pool_name, MAX_NAME_LEN, "mbuf_pool%u", i);
412 		qos_conf[i].mbuf_pool = rte_pktmbuf_pool_create(pool_name,
413 			mp_size, burst_conf.rx_burst * 4, 0,
414 			RTE_MBUF_DEFAULT_BUF_SIZE,
415 			rte_eth_dev_socket_id(qos_conf[i].rx_port));
416 		if (qos_conf[i].mbuf_pool == NULL)
417 			rte_exit(EXIT_FAILURE, "Cannot init mbuf pool for socket %u\n", i);
418 
419 		app_init_port(qos_conf[i].rx_port, qos_conf[i].mbuf_pool);
420 		app_init_port(qos_conf[i].tx_port, qos_conf[i].mbuf_pool);
421 
422 		qos_conf[i].sched_port = app_init_sched_port(qos_conf[i].tx_port, socket);
423 	}
424 
425 	RTE_LOG(INFO, APP, "time stamp clock running at %" PRIu64 " Hz\n",
426 			 rte_get_timer_hz());
427 
428 	RTE_LOG(INFO, APP, "Ring sizes: NIC RX = %u, Mempool = %d SW queue = %u,"
429 			 "NIC TX = %u\n", ring_conf.rx_size, mp_size, ring_conf.ring_size,
430 			 ring_conf.tx_size);
431 
432 	RTE_LOG(INFO, APP, "Burst sizes: RX read = %hu, RX write = %hu,\n"
433 						  "             Worker read/QoS enqueue = %hu,\n"
434 						  "             QoS dequeue = %hu, Worker write = %hu\n",
435 		burst_conf.rx_burst, burst_conf.ring_burst, burst_conf.ring_burst,
436 		burst_conf.qos_dequeue, burst_conf.tx_burst);
437 
438 	RTE_LOG(INFO, APP, "NIC thresholds RX (p = %hhu, h = %hhu, w = %hhu),"
439 				 "TX (p = %hhu, h = %hhu, w = %hhu)\n",
440 		rx_thresh.pthresh, rx_thresh.hthresh, rx_thresh.wthresh,
441 		tx_thresh.pthresh, tx_thresh.hthresh, tx_thresh.wthresh);
442 
443 	return 0;
444 }
445