xref: /dpdk/examples/qos_sched/init.c (revision 25d11a86c56d50947af33d0b79ede622809bd8b9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <memory.h>
7 
8 #include <rte_log.h>
9 #include <rte_mbuf.h>
10 #include <rte_debug.h>
11 #include <rte_ethdev.h>
12 #include <rte_mempool.h>
13 #include <rte_sched.h>
14 #include <rte_cycles.h>
15 #include <rte_string_fns.h>
16 #include <rte_cfgfile.h>
17 
18 #include "main.h"
19 #include "cfg_file.h"
20 
21 uint32_t app_numa_mask = 0;
22 static uint32_t app_inited_port_mask = 0;
23 
24 int app_pipe_to_profile[MAX_SCHED_SUBPORTS][MAX_SCHED_PIPES];
25 
26 #define MAX_NAME_LEN 32
27 
28 struct ring_conf ring_conf = {
29 	.rx_size   = APP_RX_DESC_DEFAULT,
30 	.ring_size = APP_RING_SIZE,
31 	.tx_size   = APP_TX_DESC_DEFAULT,
32 };
33 
34 struct burst_conf burst_conf = {
35 	.rx_burst    = MAX_PKT_RX_BURST,
36 	.ring_burst  = PKT_ENQUEUE,
37 	.qos_dequeue = PKT_DEQUEUE,
38 	.tx_burst    = MAX_PKT_TX_BURST,
39 };
40 
41 struct ring_thresh rx_thresh = {
42 	.pthresh = RX_PTHRESH,
43 	.hthresh = RX_HTHRESH,
44 	.wthresh = RX_WTHRESH,
45 };
46 
47 struct ring_thresh tx_thresh = {
48 	.pthresh = TX_PTHRESH,
49 	.hthresh = TX_HTHRESH,
50 	.wthresh = TX_WTHRESH,
51 };
52 
53 uint32_t nb_pfc;
54 const char *cfg_profile = NULL;
55 int mp_size = NB_MBUF;
56 struct flow_conf qos_conf[MAX_DATA_STREAMS];
57 
58 static struct rte_eth_conf port_conf = {
59 	.rxmode = {
60 		.max_rx_pkt_len = ETHER_MAX_LEN,
61 		.split_hdr_size = 0,
62 	},
63 	.txmode = {
64 		.mq_mode = ETH_DCB_NONE,
65 	},
66 };
67 
68 static int
69 app_init_port(uint16_t portid, struct rte_mempool *mp)
70 {
71 	int ret;
72 	struct rte_eth_link link;
73 	struct rte_eth_dev_info dev_info;
74 	struct rte_eth_rxconf rx_conf;
75 	struct rte_eth_txconf tx_conf;
76 	uint16_t rx_size;
77 	uint16_t tx_size;
78 	struct rte_eth_conf local_port_conf = port_conf;
79 
80 	/* check if port already initialized (multistream configuration) */
81 	if (app_inited_port_mask & (1u << portid))
82 		return 0;
83 
84 	rx_conf.rx_thresh.pthresh = rx_thresh.pthresh;
85 	rx_conf.rx_thresh.hthresh = rx_thresh.hthresh;
86 	rx_conf.rx_thresh.wthresh = rx_thresh.wthresh;
87 	rx_conf.rx_free_thresh = 32;
88 	rx_conf.rx_drop_en = 0;
89 	rx_conf.rx_deferred_start = 0;
90 
91 	tx_conf.tx_thresh.pthresh = tx_thresh.pthresh;
92 	tx_conf.tx_thresh.hthresh = tx_thresh.hthresh;
93 	tx_conf.tx_thresh.wthresh = tx_thresh.wthresh;
94 	tx_conf.tx_free_thresh = 0;
95 	tx_conf.tx_rs_thresh = 0;
96 	tx_conf.tx_deferred_start = 0;
97 
98 	/* init port */
99 	RTE_LOG(INFO, APP, "Initializing port %"PRIu16"... ", portid);
100 	fflush(stdout);
101 	rte_eth_dev_info_get(portid, &dev_info);
102 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
103 		local_port_conf.txmode.offloads |=
104 			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
105 	ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
106 	if (ret < 0)
107 		rte_exit(EXIT_FAILURE,
108 			 "Cannot configure device: err=%d, port=%u\n",
109 			 ret, portid);
110 
111 	rx_size = ring_conf.rx_size;
112 	tx_size = ring_conf.tx_size;
113 	ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &rx_size, &tx_size);
114 	if (ret < 0)
115 		rte_exit(EXIT_FAILURE,
116 			 "rte_eth_dev_adjust_nb_rx_tx_desc: err=%d,port=%u\n",
117 			 ret, portid);
118 	ring_conf.rx_size = rx_size;
119 	ring_conf.tx_size = tx_size;
120 
121 	/* init one RX queue */
122 	fflush(stdout);
123 	rx_conf.offloads = local_port_conf.rxmode.offloads;
124 	ret = rte_eth_rx_queue_setup(portid, 0, (uint16_t)ring_conf.rx_size,
125 		rte_eth_dev_socket_id(portid), &rx_conf, mp);
126 	if (ret < 0)
127 		rte_exit(EXIT_FAILURE,
128 			 "rte_eth_tx_queue_setup: err=%d, port=%u\n",
129 			 ret, portid);
130 
131 	/* init one TX queue */
132 	fflush(stdout);
133 	tx_conf.offloads = local_port_conf.txmode.offloads;
134 	ret = rte_eth_tx_queue_setup(portid, 0,
135 		(uint16_t)ring_conf.tx_size, rte_eth_dev_socket_id(portid), &tx_conf);
136 	if (ret < 0)
137 		rte_exit(EXIT_FAILURE,
138 			 "rte_eth_tx_queue_setup: err=%d, port=%u queue=%d\n",
139 			 ret, portid, 0);
140 
141 	/* Start device */
142 	ret = rte_eth_dev_start(portid);
143 	if (ret < 0)
144 		rte_exit(EXIT_FAILURE,
145 			 "rte_pmd_port_start: err=%d, port=%u\n",
146 			 ret, portid);
147 
148 	printf("done: ");
149 
150 	/* get link status */
151 	rte_eth_link_get(portid, &link);
152 	if (link.link_status) {
153 		printf(" Link Up - speed %u Mbps - %s\n",
154 			(uint32_t) link.link_speed,
155 			(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
156 			("full-duplex") : ("half-duplex\n"));
157 	} else {
158 		printf(" Link Down\n");
159 	}
160 	rte_eth_promiscuous_enable(portid);
161 
162 	/* mark port as initialized */
163 	app_inited_port_mask |= 1u << portid;
164 
165 	return 0;
166 }
167 
168 static struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS] = {
169 	{
170 		.tb_rate = 1250000000,
171 		.tb_size = 1000000,
172 
173 		.tc_rate = {1250000000, 1250000000, 1250000000, 1250000000},
174 		.tc_period = 10,
175 	},
176 };
177 
178 static struct rte_sched_pipe_params pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT] = {
179 	{ /* Profile #0 */
180 		.tb_rate = 305175,
181 		.tb_size = 1000000,
182 
183 		.tc_rate = {305175, 305175, 305175, 305175},
184 		.tc_period = 40,
185 #ifdef RTE_SCHED_SUBPORT_TC_OV
186 		.tc_ov_weight = 1,
187 #endif
188 
189 		.wrr_weights = {1, 1, 1, 1,  1, 1, 1, 1,  1, 1, 1, 1,  1, 1, 1, 1},
190 	},
191 };
192 
193 struct rte_sched_port_params port_params = {
194 	.name = "port_scheduler_0",
195 	.socket = 0, /* computed */
196 	.rate = 0, /* computed */
197 	.mtu = 6 + 6 + 4 + 4 + 2 + 1500,
198 	.frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
199 	.n_subports_per_port = 1,
200 	.n_pipes_per_subport = 4096,
201 	.qsize = {64, 64, 64, 64},
202 	.pipe_profiles = pipe_profiles,
203 	.n_pipe_profiles = sizeof(pipe_profiles) / sizeof(struct rte_sched_pipe_params),
204 
205 #ifdef RTE_SCHED_RED
206 	.red_params = {
207 		/* Traffic Class 0 Colors Green / Yellow / Red */
208 		[0][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
209 		[0][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
210 		[0][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
211 
212 		/* Traffic Class 1 - Colors Green / Yellow / Red */
213 		[1][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
214 		[1][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
215 		[1][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
216 
217 		/* Traffic Class 2 - Colors Green / Yellow / Red */
218 		[2][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
219 		[2][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
220 		[2][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
221 
222 		/* Traffic Class 3 - Colors Green / Yellow / Red */
223 		[3][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
224 		[3][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
225 		[3][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}
226 	}
227 #endif /* RTE_SCHED_RED */
228 };
229 
230 static struct rte_sched_port *
231 app_init_sched_port(uint32_t portid, uint32_t socketid)
232 {
233 	static char port_name[32]; /* static as referenced from global port_params*/
234 	struct rte_eth_link link;
235 	struct rte_sched_port *port = NULL;
236 	uint32_t pipe, subport;
237 	int err;
238 
239 	rte_eth_link_get(portid, &link);
240 
241 	port_params.socket = socketid;
242 	port_params.rate = (uint64_t) link.link_speed * 1000 * 1000 / 8;
243 	snprintf(port_name, sizeof(port_name), "port_%d", portid);
244 	port_params.name = port_name;
245 
246 	port = rte_sched_port_config(&port_params);
247 	if (port == NULL){
248 		rte_exit(EXIT_FAILURE, "Unable to config sched port\n");
249 	}
250 
251 	for (subport = 0; subport < port_params.n_subports_per_port; subport ++) {
252 		err = rte_sched_subport_config(port, subport, &subport_params[subport]);
253 		if (err) {
254 			rte_exit(EXIT_FAILURE, "Unable to config sched subport %u, err=%d\n",
255 					subport, err);
256 		}
257 
258 		for (pipe = 0; pipe < port_params.n_pipes_per_subport; pipe ++) {
259 			if (app_pipe_to_profile[subport][pipe] != -1) {
260 				err = rte_sched_pipe_config(port, subport, pipe,
261 						app_pipe_to_profile[subport][pipe]);
262 				if (err) {
263 					rte_exit(EXIT_FAILURE, "Unable to config sched pipe %u "
264 							"for profile %d, err=%d\n", pipe,
265 							app_pipe_to_profile[subport][pipe], err);
266 				}
267 			}
268 		}
269 	}
270 
271 	return port;
272 }
273 
274 static int
275 app_load_cfg_profile(const char *profile)
276 {
277 	if (profile == NULL)
278 		return 0;
279 	struct rte_cfgfile *file = rte_cfgfile_load(profile, 0);
280 	if (file == NULL)
281 		rte_exit(EXIT_FAILURE, "Cannot load configuration profile %s\n", profile);
282 
283 	cfg_load_port(file, &port_params);
284 	cfg_load_subport(file, subport_params);
285 	cfg_load_pipe(file, pipe_profiles);
286 
287 	rte_cfgfile_close(file);
288 
289 	return 0;
290 }
291 
292 int app_init(void)
293 {
294 	uint32_t i;
295 	char ring_name[MAX_NAME_LEN];
296 	char pool_name[MAX_NAME_LEN];
297 
298 	if (rte_eth_dev_count_avail() == 0)
299 		rte_exit(EXIT_FAILURE, "No Ethernet port - bye\n");
300 
301 	/* load configuration profile */
302 	if (app_load_cfg_profile(cfg_profile) != 0)
303 		rte_exit(EXIT_FAILURE, "Invalid configuration profile\n");
304 
305 	/* Initialize each active flow */
306 	for(i = 0; i < nb_pfc; i++) {
307 		uint32_t socket = rte_lcore_to_socket_id(qos_conf[i].rx_core);
308 		struct rte_ring *ring;
309 
310 		snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].rx_core);
311 		ring = rte_ring_lookup(ring_name);
312 		if (ring == NULL)
313 			qos_conf[i].rx_ring = rte_ring_create(ring_name, ring_conf.ring_size,
314 			 	socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
315 		else
316 			qos_conf[i].rx_ring = ring;
317 
318 		snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].tx_core);
319 		ring = rte_ring_lookup(ring_name);
320 		if (ring == NULL)
321 			qos_conf[i].tx_ring = rte_ring_create(ring_name, ring_conf.ring_size,
322 				socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
323 		else
324 			qos_conf[i].tx_ring = ring;
325 
326 
327 		/* create the mbuf pools for each RX Port */
328 		snprintf(pool_name, MAX_NAME_LEN, "mbuf_pool%u", i);
329 		qos_conf[i].mbuf_pool = rte_pktmbuf_pool_create(pool_name,
330 			mp_size, burst_conf.rx_burst * 4, 0,
331 			RTE_MBUF_DEFAULT_BUF_SIZE,
332 			rte_eth_dev_socket_id(qos_conf[i].rx_port));
333 		if (qos_conf[i].mbuf_pool == NULL)
334 			rte_exit(EXIT_FAILURE, "Cannot init mbuf pool for socket %u\n", i);
335 
336 		app_init_port(qos_conf[i].rx_port, qos_conf[i].mbuf_pool);
337 		app_init_port(qos_conf[i].tx_port, qos_conf[i].mbuf_pool);
338 
339 		qos_conf[i].sched_port = app_init_sched_port(qos_conf[i].tx_port, socket);
340 	}
341 
342 	RTE_LOG(INFO, APP, "time stamp clock running at %" PRIu64 " Hz\n",
343 			 rte_get_timer_hz());
344 
345 	RTE_LOG(INFO, APP, "Ring sizes: NIC RX = %u, Mempool = %d SW queue = %u,"
346 			 "NIC TX = %u\n", ring_conf.rx_size, mp_size, ring_conf.ring_size,
347 			 ring_conf.tx_size);
348 
349 	RTE_LOG(INFO, APP, "Burst sizes: RX read = %hu, RX write = %hu,\n"
350 						  "             Worker read/QoS enqueue = %hu,\n"
351 						  "             QoS dequeue = %hu, Worker write = %hu\n",
352 		burst_conf.rx_burst, burst_conf.ring_burst, burst_conf.ring_burst,
353 		burst_conf.qos_dequeue, burst_conf.tx_burst);
354 
355 	RTE_LOG(INFO, APP, "NIC thresholds RX (p = %hhu, h = %hhu, w = %hhu),"
356 				 "TX (p = %hhu, h = %hhu, w = %hhu)\n",
357 		rx_thresh.pthresh, rx_thresh.hthresh, rx_thresh.wthresh,
358 		tx_thresh.pthresh, tx_thresh.hthresh, tx_thresh.wthresh);
359 
360 	return 0;
361 }
362