xref: /dpdk/examples/qos_sched/app_thread.c (revision be1e533238c0293c1f15f1b14e613b0bba643222)
13998e2a0SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
23998e2a0SBruce Richardson  * Copyright(c) 2010-2014 Intel Corporation
3de3cfa2cSIntel  */
4de3cfa2cSIntel 
5de3cfa2cSIntel #include <stdint.h>
6de3cfa2cSIntel 
7de3cfa2cSIntel #include <rte_log.h>
8de3cfa2cSIntel #include <rte_mbuf.h>
9de3cfa2cSIntel #include <rte_malloc.h>
10de3cfa2cSIntel #include <rte_cycles.h>
11de3cfa2cSIntel #include <rte_ethdev.h>
12de3cfa2cSIntel #include <rte_memcpy.h>
13de3cfa2cSIntel #include <rte_byteorder.h>
14de3cfa2cSIntel #include <rte_branch_prediction.h>
15de3cfa2cSIntel #include <rte_sched.h>
16de3cfa2cSIntel 
17de3cfa2cSIntel #include "main.h"
18de3cfa2cSIntel 
19de3cfa2cSIntel /*
20de3cfa2cSIntel  * QoS parameters are encoded as follows:
21de3cfa2cSIntel  *		Outer VLAN ID defines subport
22de3cfa2cSIntel  *		Inner VLAN ID defines pipe
23de3cfa2cSIntel  *		Destination IP host (0.0.0.XXX) defines queue
24de3cfa2cSIntel  * Values below define offset to each field from start of frame
25de3cfa2cSIntel  */
26de3cfa2cSIntel #define SUBPORT_OFFSET	7
27de3cfa2cSIntel #define PIPE_OFFSET		9
28de3cfa2cSIntel #define QUEUE_OFFSET	20
29de3cfa2cSIntel #define COLOR_OFFSET	19
30de3cfa2cSIntel 
31de3cfa2cSIntel static inline int
32de3cfa2cSIntel get_pkt_sched(struct rte_mbuf *m, uint32_t *subport, uint32_t *pipe,
33de3cfa2cSIntel 			uint32_t *traffic_class, uint32_t *queue, uint32_t *color)
34de3cfa2cSIntel {
35de3cfa2cSIntel 	uint16_t *pdata = rte_pktmbuf_mtod(m, uint16_t *);
36*be1e5332SJasvinder Singh 	uint16_t pipe_queue;
37de3cfa2cSIntel 
38de3cfa2cSIntel 	*subport = (rte_be_to_cpu_16(pdata[SUBPORT_OFFSET]) & 0x0FFF) &
39de3cfa2cSIntel 			(port_params.n_subports_per_port - 1); /* Outer VLAN ID*/
40de3cfa2cSIntel 	*pipe = (rte_be_to_cpu_16(pdata[PIPE_OFFSET]) & 0x0FFF) &
41de3cfa2cSIntel 			(port_params.n_pipes_per_subport - 1); /* Inner VLAN ID */
42*be1e5332SJasvinder Singh 	pipe_queue = active_queues[(pdata[QUEUE_OFFSET] >> 8) % n_active_queues];
43*be1e5332SJasvinder Singh 	*traffic_class = pipe_queue > RTE_SCHED_TRAFFIC_CLASS_BE ?
44*be1e5332SJasvinder Singh 			RTE_SCHED_TRAFFIC_CLASS_BE : pipe_queue; /* Destination IP */
45*be1e5332SJasvinder Singh 	*queue = pipe_queue - *traffic_class; /* Destination IP */
46de3cfa2cSIntel 	*color = pdata[COLOR_OFFSET] & 0x03; /* Destination IP */
47de3cfa2cSIntel 
48de3cfa2cSIntel 	return 0;
49de3cfa2cSIntel }
50de3cfa2cSIntel 
51de3cfa2cSIntel void
52de3cfa2cSIntel app_rx_thread(struct thread_conf **confs)
53de3cfa2cSIntel {
54de3cfa2cSIntel 	uint32_t i, nb_rx;
55de3cfa2cSIntel 	struct rte_mbuf *rx_mbufs[burst_conf.rx_burst] __rte_cache_aligned;
56de3cfa2cSIntel 	struct thread_conf *conf;
57de3cfa2cSIntel 	int conf_idx = 0;
58de3cfa2cSIntel 
59de3cfa2cSIntel 	uint32_t subport;
60de3cfa2cSIntel 	uint32_t pipe;
61de3cfa2cSIntel 	uint32_t traffic_class;
62de3cfa2cSIntel 	uint32_t queue;
63de3cfa2cSIntel 	uint32_t color;
64de3cfa2cSIntel 
65de3cfa2cSIntel 	while ((conf = confs[conf_idx])) {
66de3cfa2cSIntel 		nb_rx = rte_eth_rx_burst(conf->rx_port, conf->rx_queue, rx_mbufs,
67de3cfa2cSIntel 				burst_conf.rx_burst);
68de3cfa2cSIntel 
69de3cfa2cSIntel 		if (likely(nb_rx != 0)) {
70de3cfa2cSIntel 			APP_STATS_ADD(conf->stat.nb_rx, nb_rx);
71de3cfa2cSIntel 
72de3cfa2cSIntel 			for(i = 0; i < nb_rx; i++) {
73de3cfa2cSIntel 				get_pkt_sched(rx_mbufs[i],
74de3cfa2cSIntel 						&subport, &pipe, &traffic_class, &queue, &color);
755d3f7210SReshma Pattan 				rte_sched_port_pkt_write(conf->sched_port,
765d3f7210SReshma Pattan 						rx_mbufs[i],
775d3f7210SReshma Pattan 						subport, pipe,
785d3f7210SReshma Pattan 						traffic_class, queue,
79c1656328SJasvinder Singh 						(enum rte_color) color);
80de3cfa2cSIntel 			}
81de3cfa2cSIntel 
82de3cfa2cSIntel 			if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,
8314fbffb0SBruce Richardson 					(void **)rx_mbufs, nb_rx, NULL) == 0)) {
84de3cfa2cSIntel 				for(i = 0; i < nb_rx; i++) {
85de3cfa2cSIntel 					rte_pktmbuf_free(rx_mbufs[i]);
86de3cfa2cSIntel 
87de3cfa2cSIntel 					APP_STATS_ADD(conf->stat.nb_drop, 1);
88de3cfa2cSIntel 				}
89de3cfa2cSIntel 			}
90de3cfa2cSIntel 		}
91de3cfa2cSIntel 		conf_idx++;
92de3cfa2cSIntel 		if (confs[conf_idx] == NULL)
93de3cfa2cSIntel 			conf_idx = 0;
94de3cfa2cSIntel 	}
95de3cfa2cSIntel }
96de3cfa2cSIntel 
97de3cfa2cSIntel 
98de3cfa2cSIntel 
99de3cfa2cSIntel /* Send the packet to an output interface
100de3cfa2cSIntel  * For performance reason function returns number of packets dropped, not sent,
101de3cfa2cSIntel  * so 0 means that all packets were sent successfully
102de3cfa2cSIntel  */
103de3cfa2cSIntel 
104de3cfa2cSIntel static inline void
105de3cfa2cSIntel app_send_burst(struct thread_conf *qconf)
106de3cfa2cSIntel {
107de3cfa2cSIntel 	struct rte_mbuf **mbufs;
108de3cfa2cSIntel 	uint32_t n, ret;
109de3cfa2cSIntel 
110de3cfa2cSIntel 	mbufs = (struct rte_mbuf **)qconf->m_table;
111de3cfa2cSIntel 	n = qconf->n_mbufs;
112de3cfa2cSIntel 
113de3cfa2cSIntel 	do {
114de3cfa2cSIntel 		ret = rte_eth_tx_burst(qconf->tx_port, qconf->tx_queue, mbufs, (uint16_t)n);
115d827c269SYong Liu 		/* we cannot drop the packets, so re-send */
116de3cfa2cSIntel 		/* update number of packets to be sent */
117de3cfa2cSIntel 		n -= ret;
118de3cfa2cSIntel 		mbufs = (struct rte_mbuf **)&mbufs[ret];
119d827c269SYong Liu 	} while (n);
120de3cfa2cSIntel }
121de3cfa2cSIntel 
122de3cfa2cSIntel 
123de3cfa2cSIntel /* Send the packet to an output interface */
124de3cfa2cSIntel static void
125de3cfa2cSIntel app_send_packets(struct thread_conf *qconf, struct rte_mbuf **mbufs, uint32_t nb_pkt)
126de3cfa2cSIntel {
127de3cfa2cSIntel 	uint32_t i, len;
128de3cfa2cSIntel 
129de3cfa2cSIntel 	len = qconf->n_mbufs;
130de3cfa2cSIntel 	for(i = 0; i < nb_pkt; i++) {
131de3cfa2cSIntel 		qconf->m_table[len] = mbufs[i];
132de3cfa2cSIntel 		len++;
133de3cfa2cSIntel 		/* enough pkts to be sent */
134de3cfa2cSIntel 		if (unlikely(len == burst_conf.tx_burst)) {
135de3cfa2cSIntel 			qconf->n_mbufs = len;
136de3cfa2cSIntel 			app_send_burst(qconf);
137de3cfa2cSIntel 			len = 0;
138de3cfa2cSIntel 		}
139de3cfa2cSIntel 	}
140de3cfa2cSIntel 
141de3cfa2cSIntel 	qconf->n_mbufs = len;
142de3cfa2cSIntel }
143de3cfa2cSIntel 
144de3cfa2cSIntel void
145de3cfa2cSIntel app_tx_thread(struct thread_conf **confs)
146de3cfa2cSIntel {
147de3cfa2cSIntel 	struct rte_mbuf *mbufs[burst_conf.qos_dequeue];
148de3cfa2cSIntel 	struct thread_conf *conf;
149de3cfa2cSIntel 	int conf_idx = 0;
150de3cfa2cSIntel 	int retval;
151de3cfa2cSIntel 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
152de3cfa2cSIntel 
153de3cfa2cSIntel 	while ((conf = confs[conf_idx])) {
154de3cfa2cSIntel 		retval = rte_ring_sc_dequeue_bulk(conf->tx_ring, (void **)mbufs,
155ecaed092SBruce Richardson 					burst_conf.qos_dequeue, NULL);
156cfa7c9e6SBruce Richardson 		if (likely(retval != 0)) {
157de3cfa2cSIntel 			app_send_packets(conf, mbufs, burst_conf.qos_dequeue);
158de3cfa2cSIntel 
159de3cfa2cSIntel 			conf->counter = 0; /* reset empty read loop counter */
160de3cfa2cSIntel 		}
161de3cfa2cSIntel 
162de3cfa2cSIntel 		conf->counter++;
163de3cfa2cSIntel 
164de3cfa2cSIntel 		/* drain ring and TX queues */
165de3cfa2cSIntel 		if (unlikely(conf->counter > drain_tsc)) {
166de3cfa2cSIntel 			/* now check is there any packets left to be transmitted */
167de3cfa2cSIntel 			if (conf->n_mbufs != 0) {
168de3cfa2cSIntel 				app_send_burst(conf);
169de3cfa2cSIntel 
170de3cfa2cSIntel 				conf->n_mbufs = 0;
171de3cfa2cSIntel 			}
172de3cfa2cSIntel 			conf->counter = 0;
173de3cfa2cSIntel 		}
174de3cfa2cSIntel 
175de3cfa2cSIntel 		conf_idx++;
176de3cfa2cSIntel 		if (confs[conf_idx] == NULL)
177de3cfa2cSIntel 			conf_idx = 0;
178de3cfa2cSIntel 	}
179de3cfa2cSIntel }
180de3cfa2cSIntel 
181de3cfa2cSIntel 
182de3cfa2cSIntel void
183de3cfa2cSIntel app_worker_thread(struct thread_conf **confs)
184de3cfa2cSIntel {
185de3cfa2cSIntel 	struct rte_mbuf *mbufs[burst_conf.ring_burst];
186de3cfa2cSIntel 	struct thread_conf *conf;
187de3cfa2cSIntel 	int conf_idx = 0;
188de3cfa2cSIntel 
189de3cfa2cSIntel 	while ((conf = confs[conf_idx])) {
190de3cfa2cSIntel 		uint32_t nb_pkt;
191de3cfa2cSIntel 
192de3cfa2cSIntel 		/* Read packet from the ring */
193edabd7feSJasvinder Singh 		nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
194ecaed092SBruce Richardson 					burst_conf.ring_burst, NULL);
195edabd7feSJasvinder Singh 		if (likely(nb_pkt)) {
196de3cfa2cSIntel 			int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
197edabd7feSJasvinder Singh 					nb_pkt);
198de3cfa2cSIntel 
199edabd7feSJasvinder Singh 			APP_STATS_ADD(conf->stat.nb_drop, nb_pkt - nb_sent);
200edabd7feSJasvinder Singh 			APP_STATS_ADD(conf->stat.nb_rx, nb_pkt);
201de3cfa2cSIntel 		}
202de3cfa2cSIntel 
203de3cfa2cSIntel 		nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs,
204de3cfa2cSIntel 					burst_conf.qos_dequeue);
205de3cfa2cSIntel 		if (likely(nb_pkt > 0))
206cfa7c9e6SBruce Richardson 			while (rte_ring_sp_enqueue_bulk(conf->tx_ring,
20714fbffb0SBruce Richardson 					(void **)mbufs, nb_pkt, NULL) == 0)
208cfa7c9e6SBruce Richardson 				; /* empty body */
209de3cfa2cSIntel 
210de3cfa2cSIntel 		conf_idx++;
211de3cfa2cSIntel 		if (confs[conf_idx] == NULL)
212de3cfa2cSIntel 			conf_idx = 0;
213de3cfa2cSIntel 	}
214de3cfa2cSIntel }
215de3cfa2cSIntel 
216de3cfa2cSIntel 
217de3cfa2cSIntel void
218de3cfa2cSIntel app_mixed_thread(struct thread_conf **confs)
219de3cfa2cSIntel {
220de3cfa2cSIntel 	struct rte_mbuf *mbufs[burst_conf.ring_burst];
221de3cfa2cSIntel 	struct thread_conf *conf;
222de3cfa2cSIntel 	int conf_idx = 0;
223de3cfa2cSIntel 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
224de3cfa2cSIntel 
225de3cfa2cSIntel 	while ((conf = confs[conf_idx])) {
226de3cfa2cSIntel 		uint32_t nb_pkt;
227de3cfa2cSIntel 
228de3cfa2cSIntel 		/* Read packet from the ring */
229edabd7feSJasvinder Singh 		nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
230ecaed092SBruce Richardson 					burst_conf.ring_burst, NULL);
231edabd7feSJasvinder Singh 		if (likely(nb_pkt)) {
232de3cfa2cSIntel 			int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
233edabd7feSJasvinder Singh 					nb_pkt);
234de3cfa2cSIntel 
235edabd7feSJasvinder Singh 			APP_STATS_ADD(conf->stat.nb_drop, nb_pkt - nb_sent);
236edabd7feSJasvinder Singh 			APP_STATS_ADD(conf->stat.nb_rx, nb_pkt);
237de3cfa2cSIntel 		}
238de3cfa2cSIntel 
239de3cfa2cSIntel 
240de3cfa2cSIntel 		nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs,
241de3cfa2cSIntel 					burst_conf.qos_dequeue);
242de3cfa2cSIntel 		if (likely(nb_pkt > 0)) {
243de3cfa2cSIntel 			app_send_packets(conf, mbufs, nb_pkt);
244de3cfa2cSIntel 
245de3cfa2cSIntel 			conf->counter = 0; /* reset empty read loop counter */
246de3cfa2cSIntel 		}
247de3cfa2cSIntel 
248de3cfa2cSIntel 		conf->counter++;
249de3cfa2cSIntel 
250de3cfa2cSIntel 		/* drain ring and TX queues */
251de3cfa2cSIntel 		if (unlikely(conf->counter > drain_tsc)) {
252de3cfa2cSIntel 
253de3cfa2cSIntel 			/* now check is there any packets left to be transmitted */
254de3cfa2cSIntel 			if (conf->n_mbufs != 0) {
255de3cfa2cSIntel 				app_send_burst(conf);
256de3cfa2cSIntel 
257de3cfa2cSIntel 				conf->n_mbufs = 0;
258de3cfa2cSIntel 			}
259de3cfa2cSIntel 			conf->counter = 0;
260de3cfa2cSIntel 		}
261de3cfa2cSIntel 
262de3cfa2cSIntel 		conf_idx++;
263de3cfa2cSIntel 		if (confs[conf_idx] == NULL)
264de3cfa2cSIntel 			conf_idx = 0;
265de3cfa2cSIntel 	}
266de3cfa2cSIntel }
267