xref: /dpdk/drivers/event/octeontx/timvf_evdev.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include "ssovf_evdev.h"
6 #include "timvf_evdev.h"
7 
8 RTE_LOG_REGISTER_SUFFIX(otx_logtype_timvf, timer, NOTICE);
9 
10 static struct rte_eventdev *event_dev;
11 
12 struct __rte_packed timvf_mbox_dev_info {
13 	uint64_t ring_active[4];
14 	uint64_t clk_freq;
15 };
16 
17 /* Response messages */
18 enum {
19 	MBOX_RET_SUCCESS,
20 	MBOX_RET_INVALID,
21 	MBOX_RET_INTERNAL_ERR,
22 };
23 
24 static int
25 timvf_mbox_dev_info_get(struct timvf_mbox_dev_info *info)
26 {
27 	struct octeontx_mbox_hdr hdr = {0};
28 	uint16_t len = sizeof(struct timvf_mbox_dev_info);
29 
30 	hdr.coproc = TIM_COPROC;
31 	hdr.msg = TIM_GET_DEV_INFO;
32 	hdr.vfid = 0; /* TIM DEV is always 0. TIM RING ID changes. */
33 
34 	memset(info, 0, len);
35 	return octeontx_mbox_send(&hdr, NULL, 0, info, len);
36 }
37 
38 static void
39 timvf_ring_info_get(const struct rte_event_timer_adapter *adptr,
40 		struct rte_event_timer_adapter_info *adptr_info)
41 {
42 	struct timvf_ring *timr = adptr->data->adapter_priv;
43 	adptr_info->max_tmo_ns = timr->max_tout;
44 	adptr_info->min_resolution_ns = timr->tck_nsec;
45 	rte_memcpy(&adptr_info->conf, &adptr->data->conf,
46 			sizeof(struct rte_event_timer_adapter_conf));
47 }
48 
49 static int
50 timvf_ring_conf_set(struct timvf_ctrl_reg *rctl, uint8_t ring_id)
51 {
52 	struct octeontx_mbox_hdr hdr = {0};
53 	uint16_t len = sizeof(struct timvf_ctrl_reg);
54 	int ret;
55 
56 	hdr.coproc = TIM_COPROC;
57 	hdr.msg = TIM_SET_RING_INFO;
58 	hdr.vfid = ring_id;
59 
60 	ret = octeontx_mbox_send(&hdr, rctl, len, NULL, 0);
61 	if (ret < 0 || hdr.res_code != MBOX_RET_SUCCESS)
62 		return -EACCES;
63 	return 0;
64 }
65 
66 static int
67 timvf_get_start_cyc(uint64_t *now, uint8_t ring_id)
68 {
69 	struct octeontx_mbox_hdr hdr = {0};
70 
71 	hdr.coproc = TIM_COPROC;
72 	hdr.msg = TIM_RING_START_CYC_GET;
73 	hdr.vfid = ring_id;
74 	*now = 0;
75 	return octeontx_mbox_send(&hdr, NULL, 0, now, sizeof(uint64_t));
76 }
77 
78 static int
79 optimize_bucket_parameters(struct timvf_ring *timr)
80 {
81 	uint32_t hbkts;
82 	uint32_t lbkts;
83 	uint64_t tck_nsec;
84 
85 	hbkts = rte_align32pow2(timr->nb_bkts);
86 	tck_nsec = RTE_ALIGN_MUL_CEIL(timr->max_tout / (hbkts - 1), 10);
87 
88 	if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
89 		hbkts = 0;
90 
91 	lbkts = rte_align32prevpow2(timr->nb_bkts);
92 	tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout / (lbkts - 1)), 10);
93 
94 	if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
95 		lbkts = 0;
96 
97 	if (!hbkts && !lbkts)
98 		return 0;
99 
100 	if (!hbkts) {
101 		timr->nb_bkts = lbkts;
102 		goto end;
103 	} else if (!lbkts) {
104 		timr->nb_bkts = hbkts;
105 		goto end;
106 	}
107 
108 	timr->nb_bkts = (hbkts - timr->nb_bkts) <
109 		(timr->nb_bkts - lbkts) ? hbkts : lbkts;
110 end:
111 	timr->get_target_bkt = bkt_and;
112 	timr->tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout /
113 				(timr->nb_bkts - 1)), 10);
114 	return 1;
115 }
116 
117 static int
118 timvf_ring_start(const struct rte_event_timer_adapter *adptr)
119 {
120 	int ret;
121 	uint8_t use_fpa = 0;
122 	uint64_t interval;
123 	uintptr_t pool;
124 	struct timvf_ctrl_reg rctrl;
125 	struct timvf_mbox_dev_info dinfo;
126 	struct timvf_ring *timr = adptr->data->adapter_priv;
127 
128 	ret = timvf_mbox_dev_info_get(&dinfo);
129 	if (ret < 0 || ret != sizeof(struct timvf_mbox_dev_info))
130 		return -EINVAL;
131 
132 	/* Calculate the interval cycles according to clock source. */
133 	switch (timr->clk_src) {
134 	case TIM_CLK_SRC_SCLK:
135 		interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
136 		break;
137 	case TIM_CLK_SRC_GPIO:
138 		/* GPIO doesn't work on tck_nsec. */
139 		interval = 0;
140 		break;
141 	case TIM_CLK_SRC_GTI:
142 		interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
143 		break;
144 	case TIM_CLK_SRC_PTP:
145 		interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
146 		break;
147 	default:
148 		timvf_log_err("Unsupported clock source configured %d",
149 				timr->clk_src);
150 		return -EINVAL;
151 	}
152 
153 	if (!strcmp(rte_mbuf_best_mempool_ops(), "octeontx_fpavf"))
154 		use_fpa = 1;
155 
156 	/*CTRL0 register.*/
157 	rctrl.rctrl0 = interval;
158 
159 	/*CTRL1	register.*/
160 	rctrl.rctrl1 =	(uint64_t)(timr->clk_src) << 51 |
161 		1ull << 48 /* LOCK_EN (Enable hw bucket lock mechanism) */ |
162 		1ull << 47 /* ENA */ |
163 		1ull << 44 /* ENA_LDWB */ |
164 		(timr->nb_bkts - 1);
165 
166 	rctrl.rctrl2 = (uint64_t)(TIM_CHUNK_SIZE / 16) << 40;
167 
168 	if (use_fpa) {
169 		pool = (uintptr_t)((struct rte_mempool *)
170 				timr->chunk_pool)->pool_id;
171 		ret = octeontx_fpa_bufpool_gaura(pool);
172 		if (ret < 0) {
173 			timvf_log_dbg("Unable to get gaura id");
174 			ret = -ENOMEM;
175 			goto error;
176 		}
177 		timvf_write64((uint64_t)ret,
178 				(uint8_t *)timr->vbar0 + TIM_VRING_AURA);
179 	} else {
180 		rctrl.rctrl1 |= 1ull << 43 /* ENA_DFB (Enable don't free) */;
181 	}
182 
183 	timvf_write64((uintptr_t)timr->bkt,
184 			(uint8_t *)timr->vbar0 + TIM_VRING_BASE);
185 	timvf_set_chunk_refill(timr, use_fpa);
186 	if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) {
187 		ret = -EACCES;
188 		goto error;
189 	}
190 
191 	if (timvf_get_start_cyc(&timr->ring_start_cyc,
192 				timr->tim_ring_id) < 0) {
193 		ret = -EACCES;
194 		goto error;
195 	}
196 	timr->tck_int = NSEC2CLK(timr->tck_nsec, rte_get_timer_hz());
197 	timr->fast_div = rte_reciprocal_value_u64(timr->tck_int);
198 	timvf_log_info("nb_bkts %d min_ns %"PRIu64" min_cyc %"PRIu64""
199 			" maxtmo %"PRIu64"\n",
200 			timr->nb_bkts, timr->tck_nsec, interval,
201 			timr->max_tout);
202 
203 	return 0;
204 error:
205 	rte_free(timr->bkt);
206 	rte_mempool_free(timr->chunk_pool);
207 	return ret;
208 }
209 
210 static int
211 timvf_ring_stop(const struct rte_event_timer_adapter *adptr)
212 {
213 	struct timvf_ring *timr = adptr->data->adapter_priv;
214 	struct timvf_ctrl_reg rctrl = {0};
215 	rctrl.rctrl0 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL0);
216 	rctrl.rctrl1 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL1);
217 	rctrl.rctrl1 &= ~(1ull << 47); /* Disable */
218 	rctrl.rctrl2 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL2);
219 
220 	if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id))
221 		return -EACCES;
222 	return 0;
223 }
224 
225 static int
226 timvf_ring_create(struct rte_event_timer_adapter *adptr)
227 {
228 	struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
229 	uint16_t free_idx = UINT16_MAX;
230 	unsigned int mp_flags = 0;
231 	struct ssovf_evdev *edev;
232 	struct timvf_ring *timr;
233 	const char *mempool_ops;
234 	uint8_t tim_ring_id;
235 	char pool_name[25];
236 	int i, ret;
237 
238 	tim_ring_id = timvf_get_ring();
239 	if (tim_ring_id == UINT8_MAX)
240 		return -ENODEV;
241 
242 	edev = ssovf_pmd_priv(event_dev);
243 	timr = rte_zmalloc("octeontx_timvf_priv",
244 			sizeof(struct timvf_ring), 0);
245 	if (timr == NULL)
246 		return -ENOMEM;
247 
248 	adptr->data->adapter_priv = timr;
249 	/* Check config parameters. */
250 	if ((rcfg->clk_src != RTE_EVENT_TIMER_ADAPTER_CPU_CLK) &&
251 			(!rcfg->timer_tick_ns ||
252 			 rcfg->timer_tick_ns < TIM_MIN_INTERVAL)) {
253 		timvf_log_err("Too low timer ticks");
254 		goto cfg_err;
255 	}
256 
257 	timr->clk_src = (int) rcfg->clk_src;
258 	timr->tim_ring_id = tim_ring_id;
259 	timr->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10);
260 	timr->max_tout = rcfg->max_tmo_ns;
261 	timr->nb_bkts = (timr->max_tout / timr->tck_nsec);
262 	timr->vbar0 = timvf_bar(timr->tim_ring_id, 0);
263 	timr->bkt_pos = (uint8_t *)timr->vbar0 + TIM_VRING_REL;
264 	timr->nb_timers = rcfg->nb_timers;
265 	timr->get_target_bkt = bkt_mod;
266 
267 	if (edev->available_events < timr->nb_timers) {
268 		timvf_log_err(
269 			"Max available events %"PRIu32" requested timer events %"PRIu64"",
270 			edev->available_events, timr->nb_timers);
271 		return -ENOMEM;
272 	}
273 
274 	for (i = 0; i < edev->tim_ring_cnt; i++) {
275 		if (edev->tim_ring_ids[i] == UINT16_MAX)
276 			free_idx = i;
277 	}
278 
279 	if (free_idx == UINT16_MAX) {
280 		void *old_ptr;
281 
282 		edev->tim_ring_cnt++;
283 		old_ptr = edev->tim_ring_ids;
284 		edev->tim_ring_ids =
285 			rte_realloc(edev->tim_ring_ids,
286 				    sizeof(uint16_t) * edev->tim_ring_cnt, 0);
287 		if (edev->tim_ring_ids == NULL) {
288 			edev->tim_ring_ids = old_ptr;
289 			edev->tim_ring_cnt--;
290 			return -ENOMEM;
291 		}
292 
293 		edev->available_events -= timr->nb_timers;
294 	} else {
295 		edev->tim_ring_ids[free_idx] = tim_ring_id;
296 		edev->available_events -= timr->nb_timers;
297 	}
298 
299 	timr->nb_chunks = timr->nb_timers / nb_chunk_slots;
300 
301 	/* Try to optimize the bucket parameters. */
302 	if ((rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)
303 			&& !rte_is_power_of_2(timr->nb_bkts)) {
304 		if (optimize_bucket_parameters(timr)) {
305 			timvf_log_info("Optimized configured values");
306 			timvf_log_dbg("nb_bkts  : %"PRIu32"", timr->nb_bkts);
307 			timvf_log_dbg("tck_nsec : %"PRIu64"", timr->tck_nsec);
308 		} else
309 			timvf_log_info("Failed to Optimize configured values");
310 	}
311 
312 	if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
313 		mp_flags = RTE_MEMPOOL_F_SP_PUT | RTE_MEMPOOL_F_SC_GET;
314 		timvf_log_info("Using single producer mode");
315 	}
316 
317 	timr->bkt = rte_zmalloc("octeontx_timvf_bucket",
318 			(timr->nb_bkts) * sizeof(struct tim_mem_bucket),
319 			0);
320 	if (timr->bkt == NULL)
321 		goto mem_err;
322 
323 	snprintf(pool_name, sizeof(pool_name), "timvf_chunk_pool%d",
324 			timr->tim_ring_id);
325 	timr->chunk_pool = (void *)rte_mempool_create_empty(pool_name,
326 			timr->nb_chunks, TIM_CHUNK_SIZE, 0, 0, rte_socket_id(),
327 			mp_flags);
328 
329 	if (!timr->chunk_pool) {
330 		rte_free(timr->bkt);
331 		timvf_log_err("Unable to create chunkpool.");
332 		return -ENOMEM;
333 	}
334 
335 	mempool_ops = rte_mbuf_best_mempool_ops();
336 	ret = rte_mempool_set_ops_byname(timr->chunk_pool,
337 			mempool_ops, NULL);
338 
339 	if (ret != 0) {
340 		timvf_log_err("Unable to set chunkpool ops.");
341 		goto mem_err;
342 	}
343 
344 	ret = rte_mempool_populate_default(timr->chunk_pool);
345 	if (ret < 0) {
346 		timvf_log_err("Unable to set populate chunkpool.");
347 		goto mem_err;
348 	}
349 	timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
350 	timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT);
351 	timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT_W1S);
352 	timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1C);
353 	timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1S);
354 
355 	return 0;
356 mem_err:
357 	rte_free(timr);
358 	return -ENOMEM;
359 cfg_err:
360 	rte_free(timr);
361 	return -EINVAL;
362 }
363 
364 static int
365 timvf_ring_free(struct rte_event_timer_adapter *adptr)
366 {
367 	struct timvf_ring *timr = adptr->data->adapter_priv;
368 	struct ssovf_evdev *edev;
369 	int i;
370 
371 	edev = ssovf_pmd_priv(event_dev);
372 	for (i = 0; i < edev->tim_ring_cnt; i++) {
373 		if (edev->tim_ring_ids[i] == timr->tim_ring_id) {
374 			edev->available_events += timr->nb_timers;
375 			edev->tim_ring_ids[i] = UINT16_MAX;
376 			break;
377 		}
378 	}
379 
380 	rte_mempool_free(timr->chunk_pool);
381 	rte_free(timr->bkt);
382 	timvf_release_ring(timr->tim_ring_id);
383 	rte_free(adptr->data->adapter_priv);
384 	return 0;
385 }
386 
387 static int
388 timvf_stats_get(const struct rte_event_timer_adapter *adapter,
389 		struct rte_event_timer_adapter_stats *stats)
390 {
391 	struct timvf_ring *timr = adapter->data->adapter_priv;
392 	uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc;
393 
394 	stats->evtim_exp_count = timr->tim_arm_cnt;
395 	stats->ev_enq_count = timr->tim_arm_cnt;
396 	stats->adapter_tick_count = rte_reciprocal_divide_u64(bkt_cyc,
397 				&timr->fast_div);
398 	return 0;
399 }
400 
401 static int
402 timvf_stats_reset(const struct rte_event_timer_adapter *adapter)
403 {
404 	struct timvf_ring *timr = adapter->data->adapter_priv;
405 
406 	timr->tim_arm_cnt = 0;
407 	return 0;
408 }
409 
410 static struct event_timer_adapter_ops timvf_ops = {
411 	.init = timvf_ring_create,
412 	.uninit = timvf_ring_free,
413 	.start = timvf_ring_start,
414 	.stop = timvf_ring_stop,
415 	.get_info = timvf_ring_info_get,
416 };
417 
418 int
419 timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
420 			     uint32_t *caps,
421 			     const struct event_timer_adapter_ops **ops,
422 			     uint8_t enable_stats)
423 {
424 	RTE_SET_USED(dev);
425 
426 	if (enable_stats) {
427 		timvf_ops.stats_get   = timvf_stats_get;
428 		timvf_ops.stats_reset = timvf_stats_reset;
429 	}
430 
431 	if (flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT)
432 		timvf_ops.arm_burst = enable_stats ?
433 			timvf_timer_arm_burst_sp_stats :
434 			timvf_timer_arm_burst_sp;
435 	else
436 		timvf_ops.arm_burst = enable_stats ?
437 			timvf_timer_arm_burst_mp_stats :
438 			timvf_timer_arm_burst_mp;
439 
440 	timvf_ops.arm_tmo_tick_burst = enable_stats ?
441 		timvf_timer_arm_tmo_brst_stats :
442 		timvf_timer_arm_tmo_brst;
443 	timvf_ops.cancel_burst = timvf_timer_cancel_burst;
444 	*caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT;
445 	*ops = &timvf_ops;
446 	return 0;
447 }
448 
449 void
450 timvf_set_eventdevice(struct rte_eventdev *dev)
451 {
452 	event_dev = dev;
453 }
454