xref: /dpdk/examples/qos_sched/init.c (revision 1c1d4d7a923d4804f1926fc5264f9ecdd8977b04)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdint.h>
35 #include <memory.h>
36 
37 #include <rte_log.h>
38 #include <rte_mbuf.h>
39 #include <rte_debug.h>
40 #include <rte_ethdev.h>
41 #include <rte_mempool.h>
42 #include <rte_sched.h>
43 #include <rte_cycles.h>
44 #include <rte_string_fns.h>
45 
46 #include "main.h"
47 #include "cfg_file.h"
48 
49 uint32_t app_numa_mask = 0;
50 static uint32_t app_inited_port_mask = 0;
51 
52 int app_pipe_to_profile[MAX_SCHED_SUBPORTS][MAX_SCHED_PIPES];
53 
54 #define MAX_NAME_LEN 32
55 
56 struct ring_conf ring_conf = {
57 	.rx_size   = APP_RX_DESC_DEFAULT,
58 	.ring_size = APP_RING_SIZE,
59 	.tx_size   = APP_TX_DESC_DEFAULT,
60 };
61 
62 struct burst_conf burst_conf = {
63 	.rx_burst    = MAX_PKT_RX_BURST,
64 	.ring_burst  = PKT_ENQUEUE,
65 	.qos_dequeue = PKT_DEQUEUE,
66 	.tx_burst    = MAX_PKT_TX_BURST,
67 };
68 
69 struct ring_thresh rx_thresh = {
70 	.pthresh = RX_PTHRESH,
71 	.hthresh = RX_HTHRESH,
72 	.wthresh = RX_WTHRESH,
73 };
74 
75 struct ring_thresh tx_thresh = {
76 	.pthresh = TX_PTHRESH,
77 	.hthresh = TX_HTHRESH,
78 	.wthresh = TX_WTHRESH,
79 };
80 
81 uint32_t nb_pfc;
82 const char *cfg_profile = NULL;
83 int mp_size = NB_MBUF;
84 struct flow_conf qos_conf[MAX_DATA_STREAMS];
85 
86 static const struct rte_eth_conf port_conf = {
87 	.rxmode = {
88 		.max_rx_pkt_len = ETHER_MAX_LEN,
89 		.split_hdr_size = 0,
90 		.header_split   = 0, /**< Header Split disabled */
91 		.hw_ip_checksum = 0, /**< IP checksum offload disabled */
92 		.hw_vlan_filter = 0, /**< VLAN filtering disabled */
93 		.jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
94 		.hw_strip_crc   = 0, /**< CRC stripped by hardware */
95 	},
96 	.txmode = {
97 		.mq_mode = ETH_DCB_NONE,
98 	},
99 };
100 
101 static int
102 app_init_port(uint8_t portid, struct rte_mempool *mp)
103 {
104 	int ret;
105 	struct rte_eth_link link;
106 	struct rte_eth_rxconf rx_conf;
107 	struct rte_eth_txconf tx_conf;
108 
109 	/* check if port already initialized (multistream configuration) */
110 	if (app_inited_port_mask & (1u << portid))
111 		return 0;
112 
113 	rx_conf.rx_thresh.pthresh = rx_thresh.pthresh;
114 	rx_conf.rx_thresh.hthresh = rx_thresh.hthresh;
115 	rx_conf.rx_thresh.wthresh = rx_thresh.wthresh;
116 	rx_conf.rx_free_thresh = 32;
117 	rx_conf.rx_drop_en = 0;
118 
119 	tx_conf.tx_thresh.pthresh = tx_thresh.pthresh;
120 	tx_conf.tx_thresh.hthresh = tx_thresh.hthresh;
121 	tx_conf.tx_thresh.wthresh = tx_thresh.wthresh;
122 	tx_conf.tx_free_thresh = 0;
123 	tx_conf.tx_rs_thresh = 0;
124 	tx_conf.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS;
125 
126 	/* init port */
127 	RTE_LOG(INFO, APP, "Initializing port %hu... ", portid);
128 	fflush(stdout);
129 	ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
130 	if (ret < 0)
131 		rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%hu\n",
132 		ret, portid);
133 
134 	/* init one RX queue */
135 	fflush(stdout);
136 	ret = rte_eth_rx_queue_setup(portid, 0, (uint16_t)ring_conf.rx_size,
137 		rte_eth_dev_socket_id(portid), &rx_conf, mp);
138 	if (ret < 0)
139 		rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, port=%hu\n",
140 		ret, portid);
141 
142 	/* init one TX queue */
143 	fflush(stdout);
144 	ret = rte_eth_tx_queue_setup(portid, 0,
145 		(uint16_t)ring_conf.tx_size, rte_eth_dev_socket_id(portid), &tx_conf);
146 	if (ret < 0)
147 		rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
148 		"port=%hu queue=%d\n",
149 		ret, portid, 0);
150 
151 	/* Start device */
152 	ret = rte_eth_dev_start(portid);
153 	if (ret < 0)
154 		rte_exit(EXIT_FAILURE, "rte_pmd_port_start: err=%d, port=%hu\n",
155 		ret, portid);
156 
157 	printf("done: ");
158 
159 	/* get link status */
160 	rte_eth_link_get(portid, &link);
161 	if (link.link_status) {
162 		printf(" Link Up - speed %u Mbps - %s\n",
163 			(uint32_t) link.link_speed,
164 			(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
165 			("full-duplex") : ("half-duplex\n"));
166 	} else {
167 		printf(" Link Down\n");
168 	}
169 	rte_eth_promiscuous_enable(portid);
170 
171 	/* mark port as initialized */
172 	app_inited_port_mask |= 1u << portid;
173 
174 	return 0;
175 }
176 
177 static struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS] = {
178 	{
179 		.tb_rate = 1250000000,
180 		.tb_size = 1000000,
181 
182 		.tc_rate = {1250000000, 1250000000, 1250000000, 1250000000},
183 		.tc_period = 10,
184 	},
185 };
186 
187 static struct rte_sched_pipe_params pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT] = {
188 	{ /* Profile #0 */
189 		.tb_rate = 305175,
190 		.tb_size = 1000000,
191 
192 		.tc_rate = {305175, 305175, 305175, 305175},
193 		.tc_period = 40,
194 #ifdef RTE_SCHED_SUBPORT_TC_OV
195 		.tc_ov_weight = 1,
196 #endif
197 
198 		.wrr_weights = {1, 1, 1, 1,  1, 1, 1, 1,  1, 1, 1, 1,  1, 1, 1, 1},
199 	},
200 };
201 
202 struct rte_sched_port_params port_params = {
203 	.name = "port_scheduler_0",
204 	.socket = 0, /* computed */
205 	.rate = 0, /* computed */
206 	.mtu = 6 + 6 + 4 + 4 + 2 + 1500,
207 	.frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
208 	.n_subports_per_port = 1,
209 	.n_pipes_per_subport = 4096,
210 	.qsize = {64, 64, 64, 64},
211 	.pipe_profiles = pipe_profiles,
212 	.n_pipe_profiles = sizeof(pipe_profiles) / sizeof(struct rte_sched_pipe_params),
213 
214 #ifdef RTE_SCHED_RED
215 	.red_params = {
216 		/* Traffic Class 0 Colors Green / Yellow / Red */
217 		[0][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
218 		[0][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
219 		[0][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
220 
221 		/* Traffic Class 1 - Colors Green / Yellow / Red */
222 		[1][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
223 		[1][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
224 		[1][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
225 
226 		/* Traffic Class 2 - Colors Green / Yellow / Red */
227 		[2][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
228 		[2][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
229 		[2][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
230 
231 		/* Traffic Class 3 - Colors Green / Yellow / Red */
232 		[3][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
233 		[3][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
234 		[3][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}
235 	}
236 #endif /* RTE_SCHED_RED */
237 };
238 
239 static struct rte_sched_port *
240 app_init_sched_port(uint32_t portid, uint32_t socketid)
241 {
242 	static char port_name[32]; /* static as referenced from global port_params*/
243 	struct rte_eth_link link;
244 	struct rte_sched_port *port = NULL;
245 	uint32_t pipe, subport;
246 	int err;
247 
248 	rte_eth_link_get((uint8_t)portid, &link);
249 
250 	port_params.socket = socketid;
251 	port_params.rate = (uint64_t) link.link_speed * 1000 * 1000 / 8;
252 	rte_snprintf(port_name, sizeof(port_name), "port_%d", portid);
253 	port_params.name = port_name;
254 
255 	port = rte_sched_port_config(&port_params);
256 	if (port == NULL){
257 		rte_exit(EXIT_FAILURE, "Unable to config sched port\n");
258 	}
259 
260 	for (subport = 0; subport < port_params.n_subports_per_port; subport ++) {
261 		err = rte_sched_subport_config(port, subport, &subport_params[subport]);
262 		if (err) {
263 			rte_exit(EXIT_FAILURE, "Unable to config sched subport %u, err=%d\n",
264 					subport, err);
265 		}
266 
267 		for (pipe = 0; pipe < port_params.n_pipes_per_subport; pipe ++) {
268 			if (app_pipe_to_profile[subport][pipe] != -1) {
269 				err = rte_sched_pipe_config(port, subport, pipe,
270 						app_pipe_to_profile[subport][pipe]);
271 				if (err) {
272 					rte_exit(EXIT_FAILURE, "Unable to config sched pipe %u "
273 							"for profile %d, err=%d\n", pipe,
274 							app_pipe_to_profile[subport][pipe], err);
275 				}
276 			}
277 		}
278 	}
279 
280 	return port;
281 }
282 
283 static int
284 app_load_cfg_profile(const char *profile)
285 {
286 	if (profile == NULL)
287 		return 0;
288 
289 	struct cfg_file *cfg_file = cfg_load(profile, 0);
290 	if (cfg_file == NULL)
291 		rte_exit(EXIT_FAILURE, "Cannot load configuration profile %s\n", profile);
292 
293 	cfg_load_port(cfg_file, &port_params);
294 	cfg_load_subport(cfg_file, subport_params);
295 	cfg_load_pipe(cfg_file, pipe_profiles);
296 
297 	cfg_close(cfg_file);
298 
299 	return 0;
300 }
301 
302 int app_init(void)
303 {
304 	uint32_t i;
305 	char ring_name[MAX_NAME_LEN];
306 	char pool_name[MAX_NAME_LEN];
307 
308 	/* init driver(s) */
309 	if (rte_pmd_init_all() < 0)
310 		rte_exit(EXIT_FAILURE, "Cannot init PMD\n");
311 
312 	if (rte_eal_pci_probe() < 0)
313 		rte_exit(EXIT_FAILURE, "Cannot probe PCI\n");
314 
315 	if (rte_eth_dev_count() == 0)
316 		rte_exit(EXIT_FAILURE, "No Ethernet port - bye\n");
317 
318 	/* load configuration profile */
319 	if (app_load_cfg_profile(cfg_profile) != 0)
320 		rte_exit(EXIT_FAILURE, "Invalid configuration profile\n");
321 
322 	/* Initialize each active flow */
323 	for(i = 0; i < nb_pfc; i++) {
324 		uint32_t socket = rte_lcore_to_socket_id(qos_conf[i].rx_core);
325 		struct rte_ring *ring;
326 
327 		rte_snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].rx_core);
328 		ring = rte_ring_lookup(ring_name);
329 		if (ring == NULL)
330 			qos_conf[i].rx_ring = rte_ring_create(ring_name, ring_conf.ring_size,
331 			 	socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
332 		else
333 			qos_conf[i].rx_ring = ring;
334 
335 		rte_snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].tx_core);
336 		ring = rte_ring_lookup(ring_name);
337 		if (ring == NULL)
338 			qos_conf[i].tx_ring = rte_ring_create(ring_name, ring_conf.ring_size,
339 				socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
340 		else
341 			qos_conf[i].tx_ring = ring;
342 
343 
344 		/* create the mbuf pools for each RX Port */
345 		rte_snprintf(pool_name, MAX_NAME_LEN, "mbuf_pool%u", i);
346 		qos_conf[i].mbuf_pool = rte_mempool_create(pool_name, mp_size, MBUF_SIZE,
347 						burst_conf.rx_burst * 4,
348 						sizeof(struct rte_pktmbuf_pool_private),
349 						rte_pktmbuf_pool_init, NULL,
350 						rte_pktmbuf_init, NULL,
351 						rte_eth_dev_socket_id(qos_conf[i].rx_port),
352 						0);
353 		if (qos_conf[i].mbuf_pool == NULL)
354 			rte_exit(EXIT_FAILURE, "Cannot init mbuf pool for socket %u\n", i);
355 
356 		app_init_port(qos_conf[i].rx_port, qos_conf[i].mbuf_pool);
357 		app_init_port(qos_conf[i].tx_port, qos_conf[i].mbuf_pool);
358 
359 		qos_conf[i].sched_port = app_init_sched_port(qos_conf[i].rx_port, socket);
360 	}
361 
362 	RTE_LOG(INFO, APP, "time stamp clock running at %" PRIu64 " Hz\n",
363 			 rte_get_timer_hz());
364 
365 	RTE_LOG(INFO, APP, "Ring sizes: NIC RX = %u, Mempool = %d SW queue = %u,"
366 			 "NIC TX = %u\n", ring_conf.rx_size, mp_size, ring_conf.ring_size,
367 			 ring_conf.tx_size);
368 
369 	RTE_LOG(INFO, APP, "Burst sizes: RX read = %hu, RX write = %hu,\n"
370 						  "             Worker read/QoS enqueue = %hu,\n"
371 						  "             QoS dequeue = %hu, Worker write = %hu\n",
372 		burst_conf.rx_burst, burst_conf.ring_burst, burst_conf.ring_burst,
373 		burst_conf.qos_dequeue, burst_conf.tx_burst);
374 
375 	RTE_LOG(INFO, APP, "NIC thresholds RX (p = %hhu, h = %hhu, w = %hhu),"
376 				 "TX (p = %hhu, h = %hhu, w = %hhu)\n",
377 		rx_thresh.pthresh, rx_thresh.hthresh, rx_thresh.wthresh,
378 		tx_thresh.pthresh, tx_thresh.hthresh, tx_thresh.wthresh);
379 
380 	return 0;
381 }
382