xref: /dpdk/drivers/event/octeontx/timvf_evdev.c (revision e977e4199a8d6bab72cf94e154adcad1fb964e5e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include "timvf_evdev.h"
6 
7 int otx_logtype_timvf;
8 
9 RTE_INIT(otx_timvf_init_log);
10 static void
11 otx_timvf_init_log(void)
12 {
13 	otx_logtype_timvf = rte_log_register("pmd.event.octeontx.timer");
14 	if (otx_logtype_timvf >= 0)
15 		rte_log_set_level(otx_logtype_timvf, RTE_LOG_NOTICE);
16 }
17 
18 struct __rte_packed timvf_mbox_dev_info {
19 	uint64_t ring_active[4];
20 	uint64_t clk_freq;
21 };
22 
23 /* Response messages */
24 enum {
25 	MBOX_RET_SUCCESS,
26 	MBOX_RET_INVALID,
27 	MBOX_RET_INTERNAL_ERR,
28 };
29 
30 static int
31 timvf_mbox_dev_info_get(struct timvf_mbox_dev_info *info)
32 {
33 	struct octeontx_mbox_hdr hdr = {0};
34 	uint16_t len = sizeof(struct timvf_mbox_dev_info);
35 
36 	hdr.coproc = TIM_COPROC;
37 	hdr.msg = TIM_GET_DEV_INFO;
38 	hdr.vfid = 0; /* TIM DEV is always 0. TIM RING ID changes. */
39 
40 	memset(info, 0, len);
41 	return octeontx_mbox_send(&hdr, NULL, 0, info, len);
42 }
43 
44 static void
45 timvf_ring_info_get(const struct rte_event_timer_adapter *adptr,
46 		struct rte_event_timer_adapter_info *adptr_info)
47 {
48 	struct timvf_ring *timr = adptr->data->adapter_priv;
49 	adptr_info->max_tmo_ns = timr->max_tout;
50 	adptr_info->min_resolution_ns = timr->tck_nsec;
51 	rte_memcpy(&adptr_info->conf, &adptr->data->conf,
52 			sizeof(struct rte_event_timer_adapter_conf));
53 }
54 
55 static int
56 timvf_ring_conf_set(struct timvf_ctrl_reg *rctl, uint8_t ring_id)
57 {
58 	struct octeontx_mbox_hdr hdr = {0};
59 	uint16_t len = sizeof(struct timvf_ctrl_reg);
60 	int ret;
61 
62 	hdr.coproc = TIM_COPROC;
63 	hdr.msg = TIM_SET_RING_INFO;
64 	hdr.vfid = ring_id;
65 
66 	ret = octeontx_mbox_send(&hdr, rctl, len, NULL, 0);
67 	if (ret < 0 || hdr.res_code != MBOX_RET_SUCCESS)
68 		return -EACCES;
69 	return 0;
70 }
71 
72 static int
73 timvf_get_start_cyc(uint64_t *now, uint8_t ring_id)
74 {
75 	struct octeontx_mbox_hdr hdr = {0};
76 
77 	hdr.coproc = TIM_COPROC;
78 	hdr.msg = TIM_RING_START_CYC_GET;
79 	hdr.vfid = ring_id;
80 	*now = 0;
81 	return octeontx_mbox_send(&hdr, NULL, 0, now, sizeof(uint64_t));
82 }
83 
84 static int
85 optimize_bucket_parameters(struct timvf_ring *timr)
86 {
87 	uint32_t hbkts;
88 	uint32_t lbkts;
89 	uint64_t tck_nsec;
90 
91 	hbkts = rte_align32pow2(timr->nb_bkts);
92 	tck_nsec = RTE_ALIGN_MUL_CEIL(timr->max_tout / (hbkts - 1), 10);
93 
94 	if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
95 		hbkts = 0;
96 
97 	lbkts = rte_align32prevpow2(timr->nb_bkts);
98 	tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout / (lbkts - 1)), 10);
99 
100 	if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
101 		lbkts = 0;
102 
103 	if (!hbkts && !lbkts)
104 		return 0;
105 
106 	if (!hbkts) {
107 		timr->nb_bkts = lbkts;
108 		goto end;
109 	} else if (!lbkts) {
110 		timr->nb_bkts = hbkts;
111 		goto end;
112 	}
113 
114 	timr->nb_bkts = (hbkts - timr->nb_bkts) <
115 		(timr->nb_bkts - lbkts) ? hbkts : lbkts;
116 end:
117 	timr->get_target_bkt = bkt_and;
118 	timr->tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout /
119 				(timr->nb_bkts - 1)), 10);
120 	return 1;
121 }
122 
123 static int
124 timvf_ring_start(const struct rte_event_timer_adapter *adptr)
125 {
126 	int ret;
127 	uint8_t use_fpa = 0;
128 	uint64_t interval;
129 	uintptr_t pool;
130 	struct timvf_ctrl_reg rctrl;
131 	struct timvf_mbox_dev_info dinfo;
132 	struct timvf_ring *timr = adptr->data->adapter_priv;
133 
134 	ret = timvf_mbox_dev_info_get(&dinfo);
135 	if (ret < 0 || ret != sizeof(struct timvf_mbox_dev_info))
136 		return -EINVAL;
137 
138 	/* Calculate the interval cycles according to clock source. */
139 	switch (timr->clk_src) {
140 	case TIM_CLK_SRC_SCLK:
141 		interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
142 		break;
143 	case TIM_CLK_SRC_GPIO:
144 		/* GPIO doesn't work on tck_nsec. */
145 		interval = 0;
146 		break;
147 	case TIM_CLK_SRC_GTI:
148 		interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
149 		break;
150 	case TIM_CLK_SRC_PTP:
151 		interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
152 		break;
153 	default:
154 		timvf_log_err("Unsupported clock source configured %d",
155 				timr->clk_src);
156 		return -EINVAL;
157 	}
158 
159 	if (!strcmp(rte_mbuf_best_mempool_ops(), "octeontx_fpavf"))
160 		use_fpa = 1;
161 
162 	/*CTRL0 register.*/
163 	rctrl.rctrl0 = interval;
164 
165 	/*CTRL1	register.*/
166 	rctrl.rctrl1 =	(uint64_t)(timr->clk_src) << 51 |
167 		1ull << 48 /* LOCK_EN (Enable hw bucket lock mechanism) */ |
168 		1ull << 47 /* ENA */ |
169 		1ull << 44 /* ENA_LDWB */ |
170 		(timr->nb_bkts - 1);
171 
172 	rctrl.rctrl2 = (uint64_t)(TIM_CHUNK_SIZE / 16) << 40;
173 
174 	if (use_fpa) {
175 		pool = (uintptr_t)((struct rte_mempool *)
176 				timr->chunk_pool)->pool_id;
177 		ret = octeontx_fpa_bufpool_gpool(pool);
178 		if (ret < 0) {
179 			timvf_log_dbg("Unable to get gaura id");
180 			ret = -ENOMEM;
181 			goto error;
182 		}
183 		timvf_write64((uint64_t)ret,
184 				(uint8_t *)timr->vbar0 + TIM_VRING_AURA);
185 	} else {
186 		rctrl.rctrl1 |= 1ull << 43 /* ENA_DFB (Enable don't free) */;
187 	}
188 
189 	timvf_write64((uintptr_t)timr->bkt,
190 			(uint8_t *)timr->vbar0 + TIM_VRING_BASE);
191 	timvf_set_chunk_refill(timr, use_fpa);
192 	if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) {
193 		ret = -EACCES;
194 		goto error;
195 	}
196 
197 	if (timvf_get_start_cyc(&timr->ring_start_cyc,
198 				timr->tim_ring_id) < 0) {
199 		ret = -EACCES;
200 		goto error;
201 	}
202 	timr->tck_int = NSEC2CLK(timr->tck_nsec, rte_get_timer_hz());
203 	timr->fast_div = rte_reciprocal_value_u64(timr->tck_int);
204 	timvf_log_info("nb_bkts %d min_ns %"PRIu64" min_cyc %"PRIu64""
205 			" maxtmo %"PRIu64"\n",
206 			timr->nb_bkts, timr->tck_nsec, interval,
207 			timr->max_tout);
208 
209 	return 0;
210 error:
211 	rte_free(timr->bkt);
212 	rte_mempool_free(timr->chunk_pool);
213 	return ret;
214 }
215 
216 static int
217 timvf_ring_stop(const struct rte_event_timer_adapter *adptr)
218 {
219 	struct timvf_ring *timr = adptr->data->adapter_priv;
220 	struct timvf_ctrl_reg rctrl = {0};
221 	rctrl.rctrl0 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL0);
222 	rctrl.rctrl1 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL1);
223 	rctrl.rctrl1 &= ~(1ull << 47); /* Disable */
224 	rctrl.rctrl2 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL2);
225 
226 	if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id))
227 		return -EACCES;
228 	return 0;
229 }
230 
231 static int
232 timvf_ring_create(struct rte_event_timer_adapter *adptr)
233 {
234 	char pool_name[25];
235 	int ret;
236 	uint64_t nb_timers;
237 	struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
238 	struct timvf_ring *timr;
239 	struct timvf_info tinfo;
240 	const char *mempool_ops;
241 	unsigned int mp_flags = 0;
242 
243 	if (timvf_info(&tinfo) < 0)
244 		return -ENODEV;
245 
246 	if (adptr->data->id >= tinfo.total_timvfs)
247 		return -ENODEV;
248 
249 	timr = rte_zmalloc("octeontx_timvf_priv",
250 			sizeof(struct timvf_ring), 0);
251 	if (timr == NULL)
252 		return -ENOMEM;
253 
254 	adptr->data->adapter_priv = timr;
255 	/* Check config parameters. */
256 	if ((rcfg->clk_src != RTE_EVENT_TIMER_ADAPTER_CPU_CLK) &&
257 			(!rcfg->timer_tick_ns ||
258 			 rcfg->timer_tick_ns < TIM_MIN_INTERVAL)) {
259 		timvf_log_err("Too low timer ticks");
260 		goto cfg_err;
261 	}
262 
263 	timr->clk_src = (int) rcfg->clk_src;
264 	timr->tim_ring_id = adptr->data->id;
265 	timr->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10);
266 	timr->max_tout = rcfg->max_tmo_ns;
267 	timr->nb_bkts = (timr->max_tout / timr->tck_nsec);
268 	timr->vbar0 = timvf_bar(timr->tim_ring_id, 0);
269 	timr->bkt_pos = (uint8_t *)timr->vbar0 + TIM_VRING_REL;
270 	nb_timers = rcfg->nb_timers;
271 	timr->get_target_bkt = bkt_mod;
272 
273 	timr->nb_chunks = nb_timers / nb_chunk_slots;
274 
275 	/* Try to optimize the bucket parameters. */
276 	if ((rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)
277 			&& !rte_is_power_of_2(timr->nb_bkts)) {
278 		if (optimize_bucket_parameters(timr)) {
279 			timvf_log_info("Optimized configured values");
280 			timvf_log_dbg("nb_bkts  : %"PRIu32"", timr->nb_bkts);
281 			timvf_log_dbg("tck_nsec : %"PRIu64"", timr->tck_nsec);
282 		} else
283 			timvf_log_info("Failed to Optimize configured values");
284 	}
285 
286 	if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
287 		mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
288 		timvf_log_info("Using single producer mode");
289 	}
290 
291 	timr->bkt = rte_zmalloc("octeontx_timvf_bucket",
292 			(timr->nb_bkts) * sizeof(struct tim_mem_bucket),
293 			0);
294 	if (timr->bkt == NULL)
295 		goto mem_err;
296 
297 	snprintf(pool_name, sizeof(pool_name), "timvf_chunk_pool%d",
298 			timr->tim_ring_id);
299 	timr->chunk_pool = (void *)rte_mempool_create_empty(pool_name,
300 			timr->nb_chunks, TIM_CHUNK_SIZE, 0, 0, rte_socket_id(),
301 			mp_flags);
302 
303 	if (!timr->chunk_pool) {
304 		rte_free(timr->bkt);
305 		timvf_log_err("Unable to create chunkpool.");
306 		return -ENOMEM;
307 	}
308 
309 	mempool_ops = rte_mbuf_best_mempool_ops();
310 	ret = rte_mempool_set_ops_byname(timr->chunk_pool,
311 			mempool_ops, NULL);
312 
313 	if (ret != 0) {
314 		timvf_log_err("Unable to set chunkpool ops.");
315 		goto mem_err;
316 	}
317 
318 	ret = rte_mempool_populate_default(timr->chunk_pool);
319 	if (ret < 0) {
320 		timvf_log_err("Unable to set populate chunkpool.");
321 		goto mem_err;
322 	}
323 	timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
324 	timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT);
325 	timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT_W1S);
326 	timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1C);
327 	timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1S);
328 
329 	return 0;
330 mem_err:
331 	rte_free(timr);
332 	return -ENOMEM;
333 cfg_err:
334 	rte_free(timr);
335 	return -EINVAL;
336 }
337 
338 static int
339 timvf_ring_free(struct rte_event_timer_adapter *adptr)
340 {
341 	struct timvf_ring *timr = adptr->data->adapter_priv;
342 	rte_mempool_free(timr->chunk_pool);
343 	rte_free(timr->bkt);
344 	rte_free(adptr->data->adapter_priv);
345 	return 0;
346 }
347 
348 static int
349 timvf_stats_get(const struct rte_event_timer_adapter *adapter,
350 		struct rte_event_timer_adapter_stats *stats)
351 {
352 	struct timvf_ring *timr = adapter->data->adapter_priv;
353 	uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc;
354 
355 	stats->evtim_exp_count = timr->tim_arm_cnt;
356 	stats->ev_enq_count = timr->tim_arm_cnt;
357 	stats->adapter_tick_count = rte_reciprocal_divide_u64(bkt_cyc,
358 				&timr->fast_div);
359 	return 0;
360 }
361 
362 static int
363 timvf_stats_reset(const struct rte_event_timer_adapter *adapter)
364 {
365 	struct timvf_ring *timr = adapter->data->adapter_priv;
366 
367 	timr->tim_arm_cnt = 0;
368 	return 0;
369 }
370 
371 static struct rte_event_timer_adapter_ops timvf_ops = {
372 	.init		= timvf_ring_create,
373 	.uninit		= timvf_ring_free,
374 	.start		= timvf_ring_start,
375 	.stop		= timvf_ring_stop,
376 	.get_info	= timvf_ring_info_get,
377 };
378 
379 int
380 timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
381 		uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
382 		uint8_t enable_stats)
383 {
384 	RTE_SET_USED(dev);
385 
386 	if (enable_stats) {
387 		timvf_ops.stats_get   = timvf_stats_get;
388 		timvf_ops.stats_reset = timvf_stats_reset;
389 	}
390 
391 	if (flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT)
392 		timvf_ops.arm_burst = enable_stats ?
393 			timvf_timer_arm_burst_sp_stats :
394 			timvf_timer_arm_burst_sp;
395 	else
396 		timvf_ops.arm_burst = enable_stats ?
397 			timvf_timer_arm_burst_mp_stats :
398 			timvf_timer_arm_burst_mp;
399 
400 	timvf_ops.arm_tmo_tick_burst = enable_stats ?
401 		timvf_timer_arm_tmo_brst_stats :
402 		timvf_timer_arm_tmo_brst;
403 	timvf_ops.cancel_burst = timvf_timer_cancel_burst;
404 	*caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT;
405 	*ops = &timvf_ops;
406 	return 0;
407 }
408