xref: /dpdk/lib/latencystats/rte_latencystats.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #include <math.h>
6 
7 #include <rte_string_fns.h>
8 #include <rte_mbuf_dyn.h>
9 #include <rte_log.h>
10 #include <rte_cycles.h>
11 #include <rte_ethdev.h>
12 #include <rte_metrics.h>
13 #include <rte_memzone.h>
14 #include <rte_lcore.h>
15 
16 #include "rte_latencystats.h"
17 
18 /** Nano seconds per second */
19 #define NS_PER_SEC 1E9
20 
21 /** Clock cycles per nano second */
22 static uint64_t
23 latencystat_cycles_per_ns(void)
24 {
25 	return rte_get_timer_hz() / NS_PER_SEC;
26 }
27 
28 RTE_LOG_REGISTER_DEFAULT(latencystat_logtype, INFO);
29 #define RTE_LOGTYPE_LATENCY_STATS latencystat_logtype
30 #define LATENCY_STATS_LOG(level, ...) \
31 	RTE_LOG_LINE(level, LATENCY_STATS, "" __VA_ARGS__)
32 
33 static uint64_t timestamp_dynflag;
34 static int timestamp_dynfield_offset = -1;
35 
36 static inline rte_mbuf_timestamp_t *
37 timestamp_dynfield(struct rte_mbuf *mbuf)
38 {
39 	return RTE_MBUF_DYNFIELD(mbuf,
40 			timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
41 }
42 
43 static const char *MZ_RTE_LATENCY_STATS = "rte_latencystats";
44 static int latency_stats_index;
45 static uint64_t samp_intvl;
46 static uint64_t timer_tsc;
47 static uint64_t prev_tsc;
48 
49 struct rte_latency_stats {
50 	float min_latency; /**< Minimum latency in nano seconds */
51 	float avg_latency; /**< Average latency in nano seconds */
52 	float max_latency; /**< Maximum latency in nano seconds */
53 	float jitter; /** Latency variation */
54 	rte_spinlock_t lock; /** Latency calculation lock */
55 };
56 
57 static struct rte_latency_stats *glob_stats;
58 
59 struct rxtx_cbs {
60 	const struct rte_eth_rxtx_callback *cb;
61 };
62 
63 static struct rxtx_cbs rx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
64 static struct rxtx_cbs tx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
65 
66 struct latency_stats_nameoff {
67 	char name[RTE_ETH_XSTATS_NAME_SIZE];
68 	unsigned int offset;
69 };
70 
71 static const struct latency_stats_nameoff lat_stats_strings[] = {
72 	{"min_latency_ns", offsetof(struct rte_latency_stats, min_latency)},
73 	{"avg_latency_ns", offsetof(struct rte_latency_stats, avg_latency)},
74 	{"max_latency_ns", offsetof(struct rte_latency_stats, max_latency)},
75 	{"jitter_ns", offsetof(struct rte_latency_stats, jitter)},
76 };
77 
78 #define NUM_LATENCY_STATS (sizeof(lat_stats_strings) / \
79 				sizeof(lat_stats_strings[0]))
80 
81 int32_t
82 rte_latencystats_update(void)
83 {
84 	unsigned int i;
85 	float *stats_ptr = NULL;
86 	uint64_t values[NUM_LATENCY_STATS] = {0};
87 	int ret;
88 
89 	for (i = 0; i < NUM_LATENCY_STATS; i++) {
90 		stats_ptr = RTE_PTR_ADD(glob_stats,
91 				lat_stats_strings[i].offset);
92 		values[i] = (uint64_t)floor((*stats_ptr)/
93 				latencystat_cycles_per_ns());
94 	}
95 
96 	ret = rte_metrics_update_values(RTE_METRICS_GLOBAL,
97 					latency_stats_index,
98 					values, NUM_LATENCY_STATS);
99 	if (ret < 0)
100 		LATENCY_STATS_LOG(INFO, "Failed to push the stats");
101 
102 	return ret;
103 }
104 
105 static void
106 rte_latencystats_fill_values(struct rte_metric_value *values)
107 {
108 	unsigned int i;
109 	float *stats_ptr = NULL;
110 
111 	for (i = 0; i < NUM_LATENCY_STATS; i++) {
112 		stats_ptr = RTE_PTR_ADD(glob_stats,
113 				lat_stats_strings[i].offset);
114 		values[i].key = i;
115 		values[i].value = (uint64_t)floor((*stats_ptr)/
116 						latencystat_cycles_per_ns());
117 	}
118 }
119 
120 static uint16_t
121 add_time_stamps(uint16_t pid __rte_unused,
122 		uint16_t qid __rte_unused,
123 		struct rte_mbuf **pkts,
124 		uint16_t nb_pkts,
125 		uint16_t max_pkts __rte_unused,
126 		void *user_cb __rte_unused)
127 {
128 	unsigned int i;
129 	uint64_t diff_tsc, now;
130 
131 	/*
132 	 * For every sample interval,
133 	 * time stamp is marked on one received packet.
134 	 */
135 	now = rte_rdtsc();
136 	for (i = 0; i < nb_pkts; i++) {
137 		diff_tsc = now - prev_tsc;
138 		timer_tsc += diff_tsc;
139 
140 		if ((pkts[i]->ol_flags & timestamp_dynflag) == 0
141 				&& (timer_tsc >= samp_intvl)) {
142 			*timestamp_dynfield(pkts[i]) = now;
143 			pkts[i]->ol_flags |= timestamp_dynflag;
144 			timer_tsc = 0;
145 		}
146 		prev_tsc = now;
147 		now = rte_rdtsc();
148 	}
149 
150 	return nb_pkts;
151 }
152 
153 static uint16_t
154 calc_latency(uint16_t pid __rte_unused,
155 		uint16_t qid __rte_unused,
156 		struct rte_mbuf **pkts,
157 		uint16_t nb_pkts,
158 		void *_ __rte_unused)
159 {
160 	unsigned int i, cnt = 0;
161 	uint64_t now;
162 	float latency[nb_pkts];
163 	static float prev_latency;
164 	/*
165 	 * Alpha represents degree of weighting decrease in EWMA,
166 	 * a constant smoothing factor between 0 and 1. The value
167 	 * is used below for measuring average latency.
168 	 */
169 	const float alpha = 0.2;
170 
171 	now = rte_rdtsc();
172 	for (i = 0; i < nb_pkts; i++) {
173 		if (pkts[i]->ol_flags & timestamp_dynflag)
174 			latency[cnt++] = now - *timestamp_dynfield(pkts[i]);
175 	}
176 
177 	rte_spinlock_lock(&glob_stats->lock);
178 	for (i = 0; i < cnt; i++) {
179 		/*
180 		 * The jitter is calculated as statistical mean of interpacket
181 		 * delay variation. The "jitter estimate" is computed by taking
182 		 * the absolute values of the ipdv sequence and applying an
183 		 * exponential filter with parameter 1/16 to generate the
184 		 * estimate. i.e J=J+(|D(i-1,i)|-J)/16. Where J is jitter,
185 		 * D(i-1,i) is difference in latency of two consecutive packets
186 		 * i-1 and i.
187 		 * Reference: Calculated as per RFC 5481, sec 4.1,
188 		 * RFC 3393 sec 4.5, RFC 1889 sec.
189 		 */
190 		glob_stats->jitter +=  (fabsf(prev_latency - latency[i])
191 					- glob_stats->jitter)/16;
192 		if (glob_stats->min_latency == 0)
193 			glob_stats->min_latency = latency[i];
194 		else if (latency[i] < glob_stats->min_latency)
195 			glob_stats->min_latency = latency[i];
196 		else if (latency[i] > glob_stats->max_latency)
197 			glob_stats->max_latency = latency[i];
198 		/*
199 		 * The average latency is measured using exponential moving
200 		 * average, i.e. using EWMA
201 		 * https://en.wikipedia.org/wiki/Moving_average
202 		 */
203 		glob_stats->avg_latency +=
204 			alpha * (latency[i] - glob_stats->avg_latency);
205 		prev_latency = latency[i];
206 	}
207 	rte_spinlock_unlock(&glob_stats->lock);
208 
209 	return nb_pkts;
210 }
211 
212 int
213 rte_latencystats_init(uint64_t app_samp_intvl,
214 		rte_latency_stats_flow_type_fn user_cb)
215 {
216 	unsigned int i;
217 	uint16_t pid;
218 	uint16_t qid;
219 	struct rxtx_cbs *cbs = NULL;
220 	const char *ptr_strings[NUM_LATENCY_STATS] = {0};
221 	const struct rte_memzone *mz = NULL;
222 	const unsigned int flags = 0;
223 	int ret;
224 
225 	if (rte_memzone_lookup(MZ_RTE_LATENCY_STATS))
226 		return -EEXIST;
227 
228 	/** Allocate stats in shared memory fo multi process support */
229 	mz = rte_memzone_reserve(MZ_RTE_LATENCY_STATS, sizeof(*glob_stats),
230 					rte_socket_id(), flags);
231 	if (mz == NULL) {
232 		LATENCY_STATS_LOG(ERR, "Cannot reserve memory: %s:%d",
233 			__func__, __LINE__);
234 		return -ENOMEM;
235 	}
236 
237 	glob_stats = mz->addr;
238 	rte_spinlock_init(&glob_stats->lock);
239 	samp_intvl = app_samp_intvl * latencystat_cycles_per_ns();
240 
241 	/** Register latency stats with stats library */
242 	for (i = 0; i < NUM_LATENCY_STATS; i++)
243 		ptr_strings[i] = lat_stats_strings[i].name;
244 
245 	latency_stats_index = rte_metrics_reg_names(ptr_strings,
246 							NUM_LATENCY_STATS);
247 	if (latency_stats_index < 0) {
248 		LATENCY_STATS_LOG(DEBUG,
249 			"Failed to register latency stats names");
250 		return -1;
251 	}
252 
253 	/* Register mbuf field and flag for Rx timestamp */
254 	ret = rte_mbuf_dyn_rx_timestamp_register(&timestamp_dynfield_offset,
255 			&timestamp_dynflag);
256 	if (ret != 0) {
257 		LATENCY_STATS_LOG(ERR,
258 			"Cannot register mbuf field/flag for timestamp");
259 		return -rte_errno;
260 	}
261 
262 	/** Register Rx/Tx callbacks */
263 	RTE_ETH_FOREACH_DEV(pid) {
264 		struct rte_eth_dev_info dev_info;
265 
266 		ret = rte_eth_dev_info_get(pid, &dev_info);
267 		if (ret != 0) {
268 			LATENCY_STATS_LOG(INFO,
269 				"Error during getting device (port %u) info: %s",
270 				pid, strerror(-ret));
271 
272 			continue;
273 		}
274 
275 		for (qid = 0; qid < dev_info.nb_rx_queues; qid++) {
276 			cbs = &rx_cbs[pid][qid];
277 			cbs->cb = rte_eth_add_first_rx_callback(pid, qid,
278 					add_time_stamps, user_cb);
279 			if (!cbs->cb)
280 				LATENCY_STATS_LOG(INFO, "Failed to "
281 					"register Rx callback for pid=%d, "
282 					"qid=%d", pid, qid);
283 		}
284 		for (qid = 0; qid < dev_info.nb_tx_queues; qid++) {
285 			cbs = &tx_cbs[pid][qid];
286 			cbs->cb =  rte_eth_add_tx_callback(pid, qid,
287 					calc_latency, user_cb);
288 			if (!cbs->cb)
289 				LATENCY_STATS_LOG(INFO, "Failed to "
290 					"register Tx callback for pid=%d, "
291 					"qid=%d", pid, qid);
292 		}
293 	}
294 	return 0;
295 }
296 
297 int
298 rte_latencystats_uninit(void)
299 {
300 	uint16_t pid;
301 	uint16_t qid;
302 	int ret = 0;
303 	struct rxtx_cbs *cbs = NULL;
304 	const struct rte_memzone *mz = NULL;
305 
306 	/** De register Rx/Tx callbacks */
307 	RTE_ETH_FOREACH_DEV(pid) {
308 		struct rte_eth_dev_info dev_info;
309 
310 		ret = rte_eth_dev_info_get(pid, &dev_info);
311 		if (ret != 0) {
312 			LATENCY_STATS_LOG(INFO,
313 				"Error during getting device (port %u) info: %s",
314 				pid, strerror(-ret));
315 
316 			continue;
317 		}
318 
319 		for (qid = 0; qid < dev_info.nb_rx_queues; qid++) {
320 			cbs = &rx_cbs[pid][qid];
321 			ret = rte_eth_remove_rx_callback(pid, qid, cbs->cb);
322 			if (ret)
323 				LATENCY_STATS_LOG(INFO, "failed to "
324 					"remove Rx callback for pid=%d, "
325 					"qid=%d", pid, qid);
326 		}
327 		for (qid = 0; qid < dev_info.nb_tx_queues; qid++) {
328 			cbs = &tx_cbs[pid][qid];
329 			ret = rte_eth_remove_tx_callback(pid, qid, cbs->cb);
330 			if (ret)
331 				LATENCY_STATS_LOG(INFO, "failed to "
332 					"remove Tx callback for pid=%d, "
333 					"qid=%d", pid, qid);
334 		}
335 	}
336 
337 	/* free up the memzone */
338 	mz = rte_memzone_lookup(MZ_RTE_LATENCY_STATS);
339 	if (mz)
340 		rte_memzone_free(mz);
341 
342 	return 0;
343 }
344 
345 int
346 rte_latencystats_get_names(struct rte_metric_name *names, uint16_t size)
347 {
348 	unsigned int i;
349 
350 	if (names == NULL || size < NUM_LATENCY_STATS)
351 		return NUM_LATENCY_STATS;
352 
353 	for (i = 0; i < NUM_LATENCY_STATS; i++)
354 		strlcpy(names[i].name, lat_stats_strings[i].name,
355 			sizeof(names[i].name));
356 
357 	return NUM_LATENCY_STATS;
358 }
359 
360 int
361 rte_latencystats_get(struct rte_metric_value *values, uint16_t size)
362 {
363 	if (size < NUM_LATENCY_STATS || values == NULL)
364 		return NUM_LATENCY_STATS;
365 
366 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
367 		const struct rte_memzone *mz;
368 		mz = rte_memzone_lookup(MZ_RTE_LATENCY_STATS);
369 		if (mz == NULL) {
370 			LATENCY_STATS_LOG(ERR,
371 				"Latency stats memzone not found");
372 			return -ENOMEM;
373 		}
374 		glob_stats =  mz->addr;
375 	}
376 
377 	/* Retrieve latency stats */
378 	rte_latencystats_fill_values(values);
379 
380 	return NUM_LATENCY_STATS;
381 }
382