xref: /dpdk/lib/port/rte_port_sched.c (revision 9ad3a41ab2a10db0059e1decdbf3ec038f348e08)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 #include <string.h>
5 
6 #include <rte_malloc.h>
7 
8 #include "rte_port_sched.h"
9 
10 /*
11  * Reader
12  */
13 #ifdef RTE_PORT_STATS_COLLECT
14 
15 #define RTE_PORT_SCHED_READER_PKTS_IN_ADD(port, val) \
16 	port->stats.n_pkts_in += val
17 #define RTE_PORT_SCHED_READER_PKTS_DROP_ADD(port, val) \
18 	port->stats.n_pkts_drop += val
19 
20 #else
21 
22 #define RTE_PORT_SCHED_READER_PKTS_IN_ADD(port, val)
23 #define RTE_PORT_SCHED_READER_PKTS_DROP_ADD(port, val)
24 
25 #endif
26 
27 struct rte_port_sched_reader {
28 	struct rte_port_in_stats stats;
29 
30 	struct rte_sched_port *sched;
31 };
32 
33 static void *
34 rte_port_sched_reader_create(void *params, int socket_id)
35 {
36 	struct rte_port_sched_reader_params *conf =
37 			params;
38 	struct rte_port_sched_reader *port;
39 
40 	/* Check input parameters */
41 	if ((conf == NULL) ||
42 	    (conf->sched == NULL)) {
43 		RTE_LOG(ERR, PORT, "%s: Invalid params\n", __func__);
44 		return NULL;
45 	}
46 
47 	/* Memory allocation */
48 	port = rte_zmalloc_socket("PORT", sizeof(*port),
49 			RTE_CACHE_LINE_SIZE, socket_id);
50 	if (port == NULL) {
51 		RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
52 		return NULL;
53 	}
54 
55 	/* Initialization */
56 	port->sched = conf->sched;
57 
58 	return port;
59 }
60 
61 static int
62 rte_port_sched_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
63 {
64 	struct rte_port_sched_reader *p = port;
65 	uint32_t nb_rx;
66 
67 	nb_rx = rte_sched_port_dequeue(p->sched, pkts, n_pkts);
68 	RTE_PORT_SCHED_READER_PKTS_IN_ADD(p, nb_rx);
69 
70 	return nb_rx;
71 }
72 
73 static int
74 rte_port_sched_reader_free(void *port)
75 {
76 	if (port == NULL) {
77 		RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
78 		return -EINVAL;
79 	}
80 
81 	rte_free(port);
82 
83 	return 0;
84 }
85 
86 static int
87 rte_port_sched_reader_stats_read(void *port,
88 		struct rte_port_in_stats *stats, int clear)
89 {
90 	struct rte_port_sched_reader *p =
91 		port;
92 
93 	if (stats != NULL)
94 		memcpy(stats, &p->stats, sizeof(p->stats));
95 
96 	if (clear)
97 		memset(&p->stats, 0, sizeof(p->stats));
98 
99 	return 0;
100 }
101 
102 /*
103  * Writer
104  */
105 #ifdef RTE_PORT_STATS_COLLECT
106 
107 #define RTE_PORT_SCHED_WRITER_STATS_PKTS_IN_ADD(port, val) \
108 	port->stats.n_pkts_in += val
109 #define RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(port, val) \
110 	port->stats.n_pkts_drop += val
111 
112 #else
113 
114 #define RTE_PORT_SCHED_WRITER_STATS_PKTS_IN_ADD(port, val)
115 #define RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(port, val)
116 
117 #endif
118 
119 struct rte_port_sched_writer {
120 	struct rte_port_out_stats stats;
121 
122 	struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
123 	struct rte_sched_port *sched;
124 	uint32_t tx_burst_sz;
125 	uint32_t tx_buf_count;
126 	uint64_t bsz_mask;
127 };
128 
129 static void *
130 rte_port_sched_writer_create(void *params, int socket_id)
131 {
132 	struct rte_port_sched_writer_params *conf =
133 			params;
134 	struct rte_port_sched_writer *port;
135 
136 	/* Check input parameters */
137 	if ((conf == NULL) ||
138 	    (conf->sched == NULL) ||
139 	    (conf->tx_burst_sz == 0) ||
140 	    (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
141 		(!rte_is_power_of_2(conf->tx_burst_sz))) {
142 		RTE_LOG(ERR, PORT, "%s: Invalid params\n", __func__);
143 		return NULL;
144 	}
145 
146 	/* Memory allocation */
147 	port = rte_zmalloc_socket("PORT", sizeof(*port),
148 			RTE_CACHE_LINE_SIZE, socket_id);
149 	if (port == NULL) {
150 		RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
151 		return NULL;
152 	}
153 
154 	/* Initialization */
155 	port->sched = conf->sched;
156 	port->tx_burst_sz = conf->tx_burst_sz;
157 	port->tx_buf_count = 0;
158 	port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
159 
160 	return port;
161 }
162 
163 static int
164 rte_port_sched_writer_tx(void *port, struct rte_mbuf *pkt)
165 {
166 	struct rte_port_sched_writer *p = (struct rte_port_sched_writer *) port;
167 
168 	p->tx_buf[p->tx_buf_count++] = pkt;
169 	RTE_PORT_SCHED_WRITER_STATS_PKTS_IN_ADD(p, 1);
170 	if (p->tx_buf_count >= p->tx_burst_sz) {
171 		__rte_unused uint32_t nb_tx;
172 
173 		nb_tx = rte_sched_port_enqueue(p->sched, p->tx_buf, p->tx_buf_count);
174 		RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
175 		p->tx_buf_count = 0;
176 	}
177 
178 	return 0;
179 }
180 
181 static int
182 rte_port_sched_writer_tx_bulk(void *port,
183 		struct rte_mbuf **pkts,
184 		uint64_t pkts_mask)
185 {
186 	struct rte_port_sched_writer *p = (struct rte_port_sched_writer *) port;
187 	uint64_t bsz_mask = p->bsz_mask;
188 	uint32_t tx_buf_count = p->tx_buf_count;
189 	uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
190 			((pkts_mask & bsz_mask) ^ bsz_mask);
191 
192 	if (expr == 0) {
193 		__rte_unused uint32_t nb_tx;
194 		uint64_t n_pkts = __builtin_popcountll(pkts_mask);
195 
196 		if (tx_buf_count) {
197 			nb_tx = rte_sched_port_enqueue(p->sched, p->tx_buf,
198 				tx_buf_count);
199 			RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(p, tx_buf_count - nb_tx);
200 			p->tx_buf_count = 0;
201 		}
202 
203 		nb_tx = rte_sched_port_enqueue(p->sched, pkts, n_pkts);
204 		RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - nb_tx);
205 	} else {
206 		for ( ; pkts_mask; ) {
207 			uint32_t pkt_index = __builtin_ctzll(pkts_mask);
208 			uint64_t pkt_mask = 1LLU << pkt_index;
209 			struct rte_mbuf *pkt = pkts[pkt_index];
210 
211 			p->tx_buf[tx_buf_count++] = pkt;
212 			RTE_PORT_SCHED_WRITER_STATS_PKTS_IN_ADD(p, 1);
213 			pkts_mask &= ~pkt_mask;
214 		}
215 		p->tx_buf_count = tx_buf_count;
216 
217 		if (tx_buf_count >= p->tx_burst_sz) {
218 			__rte_unused uint32_t nb_tx;
219 
220 			nb_tx = rte_sched_port_enqueue(p->sched, p->tx_buf,
221 				tx_buf_count);
222 			RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(p, tx_buf_count - nb_tx);
223 			p->tx_buf_count = 0;
224 		}
225 	}
226 
227 	return 0;
228 }
229 
230 static int
231 rte_port_sched_writer_flush(void *port)
232 {
233 	struct rte_port_sched_writer *p = (struct rte_port_sched_writer *) port;
234 
235 	if (p->tx_buf_count) {
236 		__rte_unused uint32_t nb_tx;
237 
238 		nb_tx = rte_sched_port_enqueue(p->sched, p->tx_buf, p->tx_buf_count);
239 		RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
240 		p->tx_buf_count = 0;
241 	}
242 
243 	return 0;
244 }
245 
246 static int
247 rte_port_sched_writer_free(void *port)
248 {
249 	if (port == NULL) {
250 		RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
251 		return -EINVAL;
252 	}
253 
254 	rte_port_sched_writer_flush(port);
255 	rte_free(port);
256 
257 	return 0;
258 }
259 
260 static int
261 rte_port_sched_writer_stats_read(void *port,
262 		struct rte_port_out_stats *stats, int clear)
263 {
264 	struct rte_port_sched_writer *p =
265 		port;
266 
267 	if (stats != NULL)
268 		memcpy(stats, &p->stats, sizeof(p->stats));
269 
270 	if (clear)
271 		memset(&p->stats, 0, sizeof(p->stats));
272 
273 	return 0;
274 }
275 
276 /*
277  * Summary of port operations
278  */
279 struct rte_port_in_ops rte_port_sched_reader_ops = {
280 	.f_create = rte_port_sched_reader_create,
281 	.f_free = rte_port_sched_reader_free,
282 	.f_rx = rte_port_sched_reader_rx,
283 	.f_stats = rte_port_sched_reader_stats_read,
284 };
285 
286 struct rte_port_out_ops rte_port_sched_writer_ops = {
287 	.f_create = rte_port_sched_writer_create,
288 	.f_free = rte_port_sched_writer_free,
289 	.f_tx = rte_port_sched_writer_tx,
290 	.f_tx_bulk = rte_port_sched_writer_tx_bulk,
291 	.f_flush = rte_port_sched_writer_flush,
292 	.f_stats = rte_port_sched_writer_stats_read,
293 };
294