xref: /dpdk/examples/qos_sched/app_thread.c (revision cf9b3c36e5a297200c169dbbf9d6e655d8096948)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 
7 #include <rte_log.h>
8 #include <rte_mbuf.h>
9 #include <rte_malloc.h>
10 #include <rte_cycles.h>
11 #include <rte_ethdev.h>
12 #include <rte_memcpy.h>
13 #include <rte_byteorder.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_sched.h>
16 
17 #include "main.h"
18 
19 /*
20  * QoS parameters are encoded as follows:
21  *		Outer VLAN ID defines subport
22  *		Inner VLAN ID defines pipe
23  *		Destination IP host (0.0.0.XXX) defines queue
24  * Values below define offset to each field from start of frame
25  */
26 #define SUBPORT_OFFSET	7
27 #define PIPE_OFFSET		9
28 #define QUEUE_OFFSET	20
29 #define COLOR_OFFSET	19
30 
31 static inline int
32 get_pkt_sched(struct rte_mbuf *m, uint32_t *subport, uint32_t *pipe,
33 			uint32_t *traffic_class, uint32_t *queue, uint32_t *color)
34 {
35 	uint16_t *pdata = rte_pktmbuf_mtod(m, uint16_t *);
36 	uint16_t pipe_queue;
37 
38 	/* Outer VLAN ID*/
39 	*subport = (rte_be_to_cpu_16(pdata[SUBPORT_OFFSET]) & 0x0FFF) &
40 		(port_params.n_subports_per_port - 1);
41 
42 	/* Inner VLAN ID */
43 	*pipe = (rte_be_to_cpu_16(pdata[PIPE_OFFSET]) & 0x0FFF) &
44 		(subport_params[*subport].n_pipes_per_subport_enabled - 1);
45 
46 	pipe_queue = active_queues[(pdata[QUEUE_OFFSET] >> 8) % n_active_queues];
47 
48 	/* Traffic class (Destination IP) */
49 	*traffic_class = pipe_queue > RTE_SCHED_TRAFFIC_CLASS_BE ?
50 			RTE_SCHED_TRAFFIC_CLASS_BE : pipe_queue;
51 
52 	/* Traffic class queue (Destination IP) */
53 	*queue = pipe_queue - *traffic_class;
54 
55 	/* Color (Destination IP) */
56 	*color = pdata[COLOR_OFFSET] & 0x03;
57 
58 	return 0;
59 }
60 
61 void
62 app_rx_thread(struct thread_conf **confs)
63 {
64 	uint32_t i, nb_rx;
65 	struct rte_mbuf *rx_mbufs[burst_conf.rx_burst] __rte_cache_aligned;
66 	struct thread_conf *conf;
67 	int conf_idx = 0;
68 
69 	uint32_t subport;
70 	uint32_t pipe;
71 	uint32_t traffic_class;
72 	uint32_t queue;
73 	uint32_t color;
74 
75 	while ((conf = confs[conf_idx])) {
76 		nb_rx = rte_eth_rx_burst(conf->rx_port, conf->rx_queue, rx_mbufs,
77 				burst_conf.rx_burst);
78 
79 		if (likely(nb_rx != 0)) {
80 			APP_STATS_ADD(conf->stat.nb_rx, nb_rx);
81 
82 			for(i = 0; i < nb_rx; i++) {
83 				get_pkt_sched(rx_mbufs[i],
84 						&subport, &pipe, &traffic_class, &queue, &color);
85 				rte_sched_port_pkt_write(conf->sched_port,
86 						rx_mbufs[i],
87 						subport, pipe,
88 						traffic_class, queue,
89 						(enum rte_color) color);
90 			}
91 
92 			if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,
93 					(void **)rx_mbufs, nb_rx, NULL) == 0)) {
94 				for(i = 0; i < nb_rx; i++) {
95 					rte_pktmbuf_free(rx_mbufs[i]);
96 
97 					APP_STATS_ADD(conf->stat.nb_drop, 1);
98 				}
99 			}
100 		}
101 		conf_idx++;
102 		if (confs[conf_idx] == NULL)
103 			conf_idx = 0;
104 	}
105 }
106 
107 
108 
109 /* Send the packet to an output interface
110  * For performance reason function returns number of packets dropped, not sent,
111  * so 0 means that all packets were sent successfully
112  */
113 
114 static inline void
115 app_send_burst(struct thread_conf *qconf)
116 {
117 	struct rte_mbuf **mbufs;
118 	uint32_t n, ret;
119 
120 	mbufs = (struct rte_mbuf **)qconf->m_table;
121 	n = qconf->n_mbufs;
122 
123 	do {
124 		ret = rte_eth_tx_burst(qconf->tx_port, qconf->tx_queue, mbufs, (uint16_t)n);
125 		/* we cannot drop the packets, so re-send */
126 		/* update number of packets to be sent */
127 		n -= ret;
128 		mbufs = (struct rte_mbuf **)&mbufs[ret];
129 	} while (n);
130 }
131 
132 
133 /* Send the packet to an output interface */
134 static void
135 app_send_packets(struct thread_conf *qconf, struct rte_mbuf **mbufs, uint32_t nb_pkt)
136 {
137 	uint32_t i, len;
138 
139 	len = qconf->n_mbufs;
140 	for(i = 0; i < nb_pkt; i++) {
141 		qconf->m_table[len] = mbufs[i];
142 		len++;
143 		/* enough pkts to be sent */
144 		if (unlikely(len == burst_conf.tx_burst)) {
145 			qconf->n_mbufs = len;
146 			app_send_burst(qconf);
147 			len = 0;
148 		}
149 	}
150 
151 	qconf->n_mbufs = len;
152 }
153 
154 void
155 app_tx_thread(struct thread_conf **confs)
156 {
157 	struct rte_mbuf *mbufs[burst_conf.qos_dequeue];
158 	struct thread_conf *conf;
159 	int conf_idx = 0;
160 	int retval;
161 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
162 
163 	while ((conf = confs[conf_idx])) {
164 		retval = rte_ring_sc_dequeue_bulk(conf->tx_ring, (void **)mbufs,
165 					burst_conf.qos_dequeue, NULL);
166 		if (likely(retval != 0)) {
167 			app_send_packets(conf, mbufs, burst_conf.qos_dequeue);
168 
169 			conf->counter = 0; /* reset empty read loop counter */
170 		}
171 
172 		conf->counter++;
173 
174 		/* drain ring and TX queues */
175 		if (unlikely(conf->counter > drain_tsc)) {
176 			/* now check is there any packets left to be transmitted */
177 			if (conf->n_mbufs != 0) {
178 				app_send_burst(conf);
179 
180 				conf->n_mbufs = 0;
181 			}
182 			conf->counter = 0;
183 		}
184 
185 		conf_idx++;
186 		if (confs[conf_idx] == NULL)
187 			conf_idx = 0;
188 	}
189 }
190 
191 
192 void
193 app_worker_thread(struct thread_conf **confs)
194 {
195 	struct rte_mbuf *mbufs[burst_conf.ring_burst];
196 	struct thread_conf *conf;
197 	int conf_idx = 0;
198 
199 	while ((conf = confs[conf_idx])) {
200 		uint32_t nb_pkt;
201 
202 		/* Read packet from the ring */
203 		nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
204 					burst_conf.ring_burst, NULL);
205 		if (likely(nb_pkt)) {
206 			int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
207 					nb_pkt);
208 
209 			APP_STATS_ADD(conf->stat.nb_drop, nb_pkt - nb_sent);
210 			APP_STATS_ADD(conf->stat.nb_rx, nb_pkt);
211 		}
212 
213 		nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs,
214 					burst_conf.qos_dequeue);
215 		if (likely(nb_pkt > 0))
216 			while (rte_ring_sp_enqueue_bulk(conf->tx_ring,
217 					(void **)mbufs, nb_pkt, NULL) == 0)
218 				; /* empty body */
219 
220 		conf_idx++;
221 		if (confs[conf_idx] == NULL)
222 			conf_idx = 0;
223 	}
224 }
225 
226 
227 void
228 app_mixed_thread(struct thread_conf **confs)
229 {
230 	struct rte_mbuf *mbufs[burst_conf.ring_burst];
231 	struct thread_conf *conf;
232 	int conf_idx = 0;
233 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
234 
235 	while ((conf = confs[conf_idx])) {
236 		uint32_t nb_pkt;
237 
238 		/* Read packet from the ring */
239 		nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
240 					burst_conf.ring_burst, NULL);
241 		if (likely(nb_pkt)) {
242 			int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
243 					nb_pkt);
244 
245 			APP_STATS_ADD(conf->stat.nb_drop, nb_pkt - nb_sent);
246 			APP_STATS_ADD(conf->stat.nb_rx, nb_pkt);
247 		}
248 
249 
250 		nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs,
251 					burst_conf.qos_dequeue);
252 		if (likely(nb_pkt > 0)) {
253 			app_send_packets(conf, mbufs, nb_pkt);
254 
255 			conf->counter = 0; /* reset empty read loop counter */
256 		}
257 
258 		conf->counter++;
259 
260 		/* drain ring and TX queues */
261 		if (unlikely(conf->counter > drain_tsc)) {
262 
263 			/* now check is there any packets left to be transmitted */
264 			if (conf->n_mbufs != 0) {
265 				app_send_burst(conf);
266 
267 				conf->n_mbufs = 0;
268 			}
269 			conf->counter = 0;
270 		}
271 
272 		conf_idx++;
273 		if (confs[conf_idx] == NULL)
274 			conf_idx = 0;
275 	}
276 }
277