1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
3 */
4
5 #include <math.h>
6
7 #include <rte_string_fns.h>
8 #include <rte_mbuf_dyn.h>
9 #include <rte_log.h>
10 #include <rte_cycles.h>
11 #include <rte_ethdev.h>
12 #include <rte_metrics.h>
13 #include <rte_memzone.h>
14 #include <rte_lcore.h>
15
16 #include "rte_latencystats.h"
17
18 /** Nano seconds per second */
19 #define NS_PER_SEC 1E9
20
21 /** Clock cycles per nano second */
22 static uint64_t
latencystat_cycles_per_ns(void)23 latencystat_cycles_per_ns(void)
24 {
25 return rte_get_timer_hz() / NS_PER_SEC;
26 }
27
28 RTE_LOG_REGISTER_DEFAULT(latencystat_logtype, INFO);
29 #define RTE_LOGTYPE_LATENCY_STATS latencystat_logtype
30 #define LATENCY_STATS_LOG(level, ...) \
31 RTE_LOG_LINE(level, LATENCY_STATS, "" __VA_ARGS__)
32
33 static uint64_t timestamp_dynflag;
34 static int timestamp_dynfield_offset = -1;
35
36 static inline rte_mbuf_timestamp_t *
timestamp_dynfield(struct rte_mbuf * mbuf)37 timestamp_dynfield(struct rte_mbuf *mbuf)
38 {
39 return RTE_MBUF_DYNFIELD(mbuf,
40 timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
41 }
42
43 static const char *MZ_RTE_LATENCY_STATS = "rte_latencystats";
44 static int latency_stats_index;
45 static uint64_t samp_intvl;
46 static uint64_t timer_tsc;
47 static uint64_t prev_tsc;
48
49 struct rte_latency_stats {
50 float min_latency; /**< Minimum latency in nano seconds */
51 float avg_latency; /**< Average latency in nano seconds */
52 float max_latency; /**< Maximum latency in nano seconds */
53 float jitter; /** Latency variation */
54 rte_spinlock_t lock; /** Latency calculation lock */
55 };
56
57 static struct rte_latency_stats *glob_stats;
58
59 struct rxtx_cbs {
60 const struct rte_eth_rxtx_callback *cb;
61 };
62
63 static struct rxtx_cbs rx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
64 static struct rxtx_cbs tx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
65
66 struct latency_stats_nameoff {
67 char name[RTE_ETH_XSTATS_NAME_SIZE];
68 unsigned int offset;
69 };
70
71 static const struct latency_stats_nameoff lat_stats_strings[] = {
72 {"min_latency_ns", offsetof(struct rte_latency_stats, min_latency)},
73 {"avg_latency_ns", offsetof(struct rte_latency_stats, avg_latency)},
74 {"max_latency_ns", offsetof(struct rte_latency_stats, max_latency)},
75 {"jitter_ns", offsetof(struct rte_latency_stats, jitter)},
76 };
77
78 #define NUM_LATENCY_STATS (sizeof(lat_stats_strings) / \
79 sizeof(lat_stats_strings[0]))
80
81 int32_t
rte_latencystats_update(void)82 rte_latencystats_update(void)
83 {
84 unsigned int i;
85 float *stats_ptr = NULL;
86 uint64_t values[NUM_LATENCY_STATS] = {0};
87 int ret;
88
89 for (i = 0; i < NUM_LATENCY_STATS; i++) {
90 stats_ptr = RTE_PTR_ADD(glob_stats,
91 lat_stats_strings[i].offset);
92 values[i] = (uint64_t)floor((*stats_ptr)/
93 latencystat_cycles_per_ns());
94 }
95
96 ret = rte_metrics_update_values(RTE_METRICS_GLOBAL,
97 latency_stats_index,
98 values, NUM_LATENCY_STATS);
99 if (ret < 0)
100 LATENCY_STATS_LOG(INFO, "Failed to push the stats");
101
102 return ret;
103 }
104
105 static void
rte_latencystats_fill_values(struct rte_metric_value * values)106 rte_latencystats_fill_values(struct rte_metric_value *values)
107 {
108 unsigned int i;
109 float *stats_ptr = NULL;
110
111 for (i = 0; i < NUM_LATENCY_STATS; i++) {
112 stats_ptr = RTE_PTR_ADD(glob_stats,
113 lat_stats_strings[i].offset);
114 values[i].key = i;
115 values[i].value = (uint64_t)floor((*stats_ptr)/
116 latencystat_cycles_per_ns());
117 }
118 }
119
120 static uint16_t
add_time_stamps(uint16_t pid __rte_unused,uint16_t qid __rte_unused,struct rte_mbuf ** pkts,uint16_t nb_pkts,uint16_t max_pkts __rte_unused,void * user_cb __rte_unused)121 add_time_stamps(uint16_t pid __rte_unused,
122 uint16_t qid __rte_unused,
123 struct rte_mbuf **pkts,
124 uint16_t nb_pkts,
125 uint16_t max_pkts __rte_unused,
126 void *user_cb __rte_unused)
127 {
128 unsigned int i;
129 uint64_t diff_tsc, now;
130
131 /*
132 * For every sample interval,
133 * time stamp is marked on one received packet.
134 */
135 now = rte_rdtsc();
136 for (i = 0; i < nb_pkts; i++) {
137 diff_tsc = now - prev_tsc;
138 timer_tsc += diff_tsc;
139
140 if ((pkts[i]->ol_flags & timestamp_dynflag) == 0
141 && (timer_tsc >= samp_intvl)) {
142 *timestamp_dynfield(pkts[i]) = now;
143 pkts[i]->ol_flags |= timestamp_dynflag;
144 timer_tsc = 0;
145 }
146 prev_tsc = now;
147 now = rte_rdtsc();
148 }
149
150 return nb_pkts;
151 }
152
153 static uint16_t
calc_latency(uint16_t pid __rte_unused,uint16_t qid __rte_unused,struct rte_mbuf ** pkts,uint16_t nb_pkts,void * _ __rte_unused)154 calc_latency(uint16_t pid __rte_unused,
155 uint16_t qid __rte_unused,
156 struct rte_mbuf **pkts,
157 uint16_t nb_pkts,
158 void *_ __rte_unused)
159 {
160 unsigned int i;
161 uint64_t now;
162 float latency;
163 static float prev_latency;
164 /*
165 * Alpha represents degree of weighting decrease in EWMA,
166 * a constant smoothing factor between 0 and 1. The value
167 * is used below for measuring average latency.
168 */
169 const float alpha = 0.2f;
170
171 now = rte_rdtsc();
172
173 rte_spinlock_lock(&glob_stats->lock);
174 for (i = 0; i < nb_pkts; i++) {
175 if (!(pkts[i]->ol_flags & timestamp_dynflag))
176 continue;
177
178 latency = now - *timestamp_dynfield(pkts[i]);
179
180 /*
181 * The jitter is calculated as statistical mean of interpacket
182 * delay variation. The "jitter estimate" is computed by taking
183 * the absolute values of the ipdv sequence and applying an
184 * exponential filter with parameter 1/16 to generate the
185 * estimate. i.e J=J+(|D(i-1,i)|-J)/16. Where J is jitter,
186 * D(i-1,i) is difference in latency of two consecutive packets
187 * i-1 and i.
188 * Reference: Calculated as per RFC 5481, sec 4.1,
189 * RFC 3393 sec 4.5, RFC 1889 sec.
190 */
191 glob_stats->jitter += (fabsf(prev_latency - latency)
192 - glob_stats->jitter)/16;
193 if (glob_stats->min_latency == 0)
194 glob_stats->min_latency = latency;
195 else if (latency < glob_stats->min_latency)
196 glob_stats->min_latency = latency;
197 else if (latency > glob_stats->max_latency)
198 glob_stats->max_latency = latency;
199 /*
200 * The average latency is measured using exponential moving
201 * average, i.e. using EWMA
202 * https://en.wikipedia.org/wiki/Moving_average
203 */
204 glob_stats->avg_latency +=
205 alpha * (latency - glob_stats->avg_latency);
206 prev_latency = latency;
207 }
208 rte_spinlock_unlock(&glob_stats->lock);
209
210 return nb_pkts;
211 }
212
213 int
rte_latencystats_init(uint64_t app_samp_intvl,rte_latency_stats_flow_type_fn user_cb)214 rte_latencystats_init(uint64_t app_samp_intvl,
215 rte_latency_stats_flow_type_fn user_cb)
216 {
217 unsigned int i;
218 uint16_t pid;
219 uint16_t qid;
220 struct rxtx_cbs *cbs = NULL;
221 const char *ptr_strings[NUM_LATENCY_STATS] = {0};
222 const struct rte_memzone *mz = NULL;
223 const unsigned int flags = 0;
224 int ret;
225
226 if (rte_memzone_lookup(MZ_RTE_LATENCY_STATS))
227 return -EEXIST;
228
229 /** Allocate stats in shared memory fo multi process support */
230 mz = rte_memzone_reserve(MZ_RTE_LATENCY_STATS, sizeof(*glob_stats),
231 rte_socket_id(), flags);
232 if (mz == NULL) {
233 LATENCY_STATS_LOG(ERR, "Cannot reserve memory: %s:%d",
234 __func__, __LINE__);
235 return -ENOMEM;
236 }
237
238 glob_stats = mz->addr;
239 rte_spinlock_init(&glob_stats->lock);
240 samp_intvl = app_samp_intvl * latencystat_cycles_per_ns();
241
242 /** Register latency stats with stats library */
243 for (i = 0; i < NUM_LATENCY_STATS; i++)
244 ptr_strings[i] = lat_stats_strings[i].name;
245
246 latency_stats_index = rte_metrics_reg_names(ptr_strings,
247 NUM_LATENCY_STATS);
248 if (latency_stats_index < 0) {
249 LATENCY_STATS_LOG(DEBUG,
250 "Failed to register latency stats names");
251 return -1;
252 }
253
254 /* Register mbuf field and flag for Rx timestamp */
255 ret = rte_mbuf_dyn_rx_timestamp_register(×tamp_dynfield_offset,
256 ×tamp_dynflag);
257 if (ret != 0) {
258 LATENCY_STATS_LOG(ERR,
259 "Cannot register mbuf field/flag for timestamp");
260 return -rte_errno;
261 }
262
263 /** Register Rx/Tx callbacks */
264 RTE_ETH_FOREACH_DEV(pid) {
265 struct rte_eth_dev_info dev_info;
266
267 ret = rte_eth_dev_info_get(pid, &dev_info);
268 if (ret != 0) {
269 LATENCY_STATS_LOG(INFO,
270 "Error during getting device (port %u) info: %s",
271 pid, strerror(-ret));
272
273 continue;
274 }
275
276 for (qid = 0; qid < dev_info.nb_rx_queues; qid++) {
277 cbs = &rx_cbs[pid][qid];
278 cbs->cb = rte_eth_add_first_rx_callback(pid, qid,
279 add_time_stamps, user_cb);
280 if (!cbs->cb)
281 LATENCY_STATS_LOG(INFO, "Failed to "
282 "register Rx callback for pid=%d, "
283 "qid=%d", pid, qid);
284 }
285 for (qid = 0; qid < dev_info.nb_tx_queues; qid++) {
286 cbs = &tx_cbs[pid][qid];
287 cbs->cb = rte_eth_add_tx_callback(pid, qid,
288 calc_latency, user_cb);
289 if (!cbs->cb)
290 LATENCY_STATS_LOG(INFO, "Failed to "
291 "register Tx callback for pid=%d, "
292 "qid=%d", pid, qid);
293 }
294 }
295 return 0;
296 }
297
298 int
rte_latencystats_uninit(void)299 rte_latencystats_uninit(void)
300 {
301 uint16_t pid;
302 uint16_t qid;
303 int ret = 0;
304 struct rxtx_cbs *cbs = NULL;
305 const struct rte_memzone *mz = NULL;
306
307 /** De register Rx/Tx callbacks */
308 RTE_ETH_FOREACH_DEV(pid) {
309 struct rte_eth_dev_info dev_info;
310
311 ret = rte_eth_dev_info_get(pid, &dev_info);
312 if (ret != 0) {
313 LATENCY_STATS_LOG(INFO,
314 "Error during getting device (port %u) info: %s",
315 pid, strerror(-ret));
316
317 continue;
318 }
319
320 for (qid = 0; qid < dev_info.nb_rx_queues; qid++) {
321 cbs = &rx_cbs[pid][qid];
322 ret = rte_eth_remove_rx_callback(pid, qid, cbs->cb);
323 if (ret)
324 LATENCY_STATS_LOG(INFO, "failed to "
325 "remove Rx callback for pid=%d, "
326 "qid=%d", pid, qid);
327 }
328 for (qid = 0; qid < dev_info.nb_tx_queues; qid++) {
329 cbs = &tx_cbs[pid][qid];
330 ret = rte_eth_remove_tx_callback(pid, qid, cbs->cb);
331 if (ret)
332 LATENCY_STATS_LOG(INFO, "failed to "
333 "remove Tx callback for pid=%d, "
334 "qid=%d", pid, qid);
335 }
336 }
337
338 /* free up the memzone */
339 mz = rte_memzone_lookup(MZ_RTE_LATENCY_STATS);
340 rte_memzone_free(mz);
341
342 return 0;
343 }
344
345 int
rte_latencystats_get_names(struct rte_metric_name * names,uint16_t size)346 rte_latencystats_get_names(struct rte_metric_name *names, uint16_t size)
347 {
348 unsigned int i;
349
350 if (names == NULL || size < NUM_LATENCY_STATS)
351 return NUM_LATENCY_STATS;
352
353 for (i = 0; i < NUM_LATENCY_STATS; i++)
354 strlcpy(names[i].name, lat_stats_strings[i].name,
355 sizeof(names[i].name));
356
357 return NUM_LATENCY_STATS;
358 }
359
360 int
rte_latencystats_get(struct rte_metric_value * values,uint16_t size)361 rte_latencystats_get(struct rte_metric_value *values, uint16_t size)
362 {
363 if (size < NUM_LATENCY_STATS || values == NULL)
364 return NUM_LATENCY_STATS;
365
366 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
367 const struct rte_memzone *mz;
368 mz = rte_memzone_lookup(MZ_RTE_LATENCY_STATS);
369 if (mz == NULL) {
370 LATENCY_STATS_LOG(ERR,
371 "Latency stats memzone not found");
372 return -ENOMEM;
373 }
374 glob_stats = mz->addr;
375 }
376
377 /* Retrieve latency stats */
378 rte_latencystats_fill_values(values);
379
380 return NUM_LATENCY_STATS;
381 }
382