xref: /dpdk/examples/qos_sched/app_thread.c (revision 7e06c0de1952d3109a5b0c4779d7e7d8059c9d78)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 
7 #include <rte_log.h>
8 #include <rte_mbuf.h>
9 #include <rte_malloc.h>
10 #include <rte_cycles.h>
11 #include <rte_ethdev.h>
12 #include <rte_memcpy.h>
13 #include <rte_byteorder.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_sched.h>
16 
17 #include "main.h"
18 
19 /*
20  * QoS parameters are encoded as follows:
21  *		Outer VLAN ID defines subport
22  *		Inner VLAN ID defines pipe
23  *		Destination IP host (0.0.0.XXX) defines queue
24  * Values below define offset to each field from start of frame
25  */
26 #define SUBPORT_OFFSET	7
27 #define PIPE_OFFSET		9
28 #define QUEUE_OFFSET	20
29 #define COLOR_OFFSET	19
30 
31 static inline int
get_pkt_sched(struct rte_mbuf * m,uint32_t * subport,uint32_t * pipe,uint32_t * traffic_class,uint32_t * queue,uint32_t * color)32 get_pkt_sched(struct rte_mbuf *m, uint32_t *subport, uint32_t *pipe,
33 			uint32_t *traffic_class, uint32_t *queue, uint32_t *color)
34 {
35 	uint16_t *pdata = rte_pktmbuf_mtod(m, uint16_t *);
36 	uint16_t pipe_queue;
37 
38 	/* Outer VLAN ID*/
39 	*subport = (rte_be_to_cpu_16(pdata[SUBPORT_OFFSET]) & 0x0FFF) &
40 		(port_params.n_subports_per_port - 1);
41 
42 	/* Inner VLAN ID */
43 	*pipe = (rte_be_to_cpu_16(pdata[PIPE_OFFSET]) & 0x0FFF) &
44 		(subport_params[*subport].n_pipes_per_subport_enabled - 1);
45 
46 	pipe_queue = active_queues[(pdata[QUEUE_OFFSET] >> 8) % n_active_queues];
47 
48 	/* Traffic class (Destination IP) */
49 	*traffic_class = pipe_queue > RTE_SCHED_TRAFFIC_CLASS_BE ?
50 			RTE_SCHED_TRAFFIC_CLASS_BE : pipe_queue;
51 
52 	/* Traffic class queue (Destination IP) */
53 	*queue = pipe_queue - *traffic_class;
54 
55 	/* Color (Destination IP) */
56 	*color = pdata[COLOR_OFFSET] & 0x03;
57 
58 	return 0;
59 }
60 
61 void
app_rx_thread(struct thread_conf ** confs)62 app_rx_thread(struct thread_conf **confs)
63 {
64 	uint32_t i, nb_rx;
65 	alignas(RTE_CACHE_LINE_SIZE) struct rte_mbuf *rx_mbufs[burst_conf.rx_burst];
66 	struct thread_conf *conf;
67 	int conf_idx = 0;
68 
69 	uint32_t subport;
70 	uint32_t pipe;
71 	uint32_t traffic_class;
72 	uint32_t queue;
73 	uint32_t color;
74 
75 	while ((conf = confs[conf_idx])) {
76 		nb_rx = rte_eth_rx_burst(conf->rx_port, conf->rx_queue, rx_mbufs,
77 				burst_conf.rx_burst);
78 
79 		if (likely(nb_rx != 0)) {
80 			APP_STATS_ADD(conf->stat.nb_rx, nb_rx);
81 
82 			for(i = 0; i < nb_rx; i++) {
83 				get_pkt_sched(rx_mbufs[i],
84 						&subport, &pipe, &traffic_class, &queue, &color);
85 				rte_sched_port_pkt_write(conf->sched_port,
86 						rx_mbufs[i],
87 						subport, pipe,
88 						traffic_class, queue,
89 						(enum rte_color) color);
90 			}
91 
92 			if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,
93 					(void **)rx_mbufs, nb_rx, NULL) == 0)) {
94 				for(i = 0; i < nb_rx; i++) {
95 					rte_pktmbuf_free(rx_mbufs[i]);
96 
97 					APP_STATS_ADD(conf->stat.nb_drop, 1);
98 				}
99 			}
100 		}
101 		conf_idx++;
102 		if (confs[conf_idx] == NULL)
103 			conf_idx = 0;
104 	}
105 }
106 
107 void
app_tx_thread(struct thread_conf ** confs)108 app_tx_thread(struct thread_conf **confs)
109 {
110 	struct rte_mbuf *mbufs[burst_conf.qos_dequeue];
111 	struct thread_conf *conf;
112 	int conf_idx = 0;
113 	int nb_pkts;
114 
115 	while ((conf = confs[conf_idx])) {
116 		nb_pkts = rte_ring_sc_dequeue_burst(conf->tx_ring, (void **)mbufs,
117 					burst_conf.qos_dequeue, NULL);
118 		if (likely(nb_pkts != 0)) {
119 			uint16_t nb_tx = rte_eth_tx_burst(conf->tx_port, 0, mbufs, nb_pkts);
120 			if (nb_pkts != nb_tx)
121 				rte_pktmbuf_free_bulk(&mbufs[nb_tx], nb_pkts - nb_tx);
122 		}
123 
124 		conf_idx++;
125 		if (confs[conf_idx] == NULL)
126 			conf_idx = 0;
127 	}
128 }
129 
130 
131 void
app_worker_thread(struct thread_conf ** confs)132 app_worker_thread(struct thread_conf **confs)
133 {
134 	struct rte_mbuf *mbufs[burst_conf.ring_burst];
135 	struct thread_conf *conf;
136 	int conf_idx = 0;
137 
138 	while ((conf = confs[conf_idx])) {
139 		uint32_t nb_pkt;
140 
141 		/* Read packet from the ring */
142 		nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
143 					burst_conf.ring_burst, NULL);
144 		if (likely(nb_pkt)) {
145 			int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
146 					nb_pkt);
147 
148 			APP_STATS_ADD(conf->stat.nb_drop, nb_pkt - nb_sent);
149 			APP_STATS_ADD(conf->stat.nb_rx, nb_pkt);
150 		}
151 
152 		nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs,
153 					burst_conf.qos_dequeue);
154 		if (likely(nb_pkt > 0))
155 			while (rte_ring_sp_enqueue_bulk(conf->tx_ring,
156 					(void **)mbufs, nb_pkt, NULL) == 0)
157 				; /* empty body */
158 
159 		conf_idx++;
160 		if (confs[conf_idx] == NULL)
161 			conf_idx = 0;
162 	}
163 }
164 
165 
166 void
app_mixed_thread(struct thread_conf ** confs)167 app_mixed_thread(struct thread_conf **confs)
168 {
169 	struct rte_mbuf *mbufs[burst_conf.ring_burst];
170 	struct thread_conf *conf;
171 	int conf_idx = 0;
172 
173 	while ((conf = confs[conf_idx])) {
174 		uint32_t nb_pkt;
175 
176 		/* Read packet from the ring */
177 		nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
178 					burst_conf.ring_burst, NULL);
179 		if (likely(nb_pkt)) {
180 			int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
181 					nb_pkt);
182 
183 			APP_STATS_ADD(conf->stat.nb_drop, nb_pkt - nb_sent);
184 			APP_STATS_ADD(conf->stat.nb_rx, nb_pkt);
185 		}
186 
187 
188 		nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs,
189 					burst_conf.qos_dequeue);
190 		if (likely(nb_pkt > 0)) {
191 			uint16_t nb_tx = rte_eth_tx_burst(conf->tx_port, 0, mbufs, nb_pkt);
192 			if (nb_tx != nb_pkt)
193 				rte_pktmbuf_free_bulk(&mbufs[nb_tx], nb_pkt - nb_tx);
194 		}
195 
196 		conf_idx++;
197 		if (confs[conf_idx] == NULL)
198 			conf_idx = 0;
199 	}
200 }
201