xref: /dpdk/drivers/event/octeontx/timvf_evdev.c (revision 3639b4ad20e2b51e6be1a110435700024d890ef3)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include "timvf_evdev.h"
6 
7 int otx_logtype_timvf;
8 
9 RTE_INIT(otx_timvf_init_log)
10 {
11 	otx_logtype_timvf = rte_log_register("pmd.event.octeontx.timer");
12 	if (otx_logtype_timvf >= 0)
13 		rte_log_set_level(otx_logtype_timvf, RTE_LOG_NOTICE);
14 }
15 
16 struct __rte_packed timvf_mbox_dev_info {
17 	uint64_t ring_active[4];
18 	uint64_t clk_freq;
19 };
20 
21 /* Response messages */
22 enum {
23 	MBOX_RET_SUCCESS,
24 	MBOX_RET_INVALID,
25 	MBOX_RET_INTERNAL_ERR,
26 };
27 
28 static int
29 timvf_mbox_dev_info_get(struct timvf_mbox_dev_info *info)
30 {
31 	struct octeontx_mbox_hdr hdr = {0};
32 	uint16_t len = sizeof(struct timvf_mbox_dev_info);
33 
34 	hdr.coproc = TIM_COPROC;
35 	hdr.msg = TIM_GET_DEV_INFO;
36 	hdr.vfid = 0; /* TIM DEV is always 0. TIM RING ID changes. */
37 
38 	memset(info, 0, len);
39 	return octeontx_mbox_send(&hdr, NULL, 0, info, len);
40 }
41 
42 static void
43 timvf_ring_info_get(const struct rte_event_timer_adapter *adptr,
44 		struct rte_event_timer_adapter_info *adptr_info)
45 {
46 	struct timvf_ring *timr = adptr->data->adapter_priv;
47 	adptr_info->max_tmo_ns = timr->max_tout;
48 	adptr_info->min_resolution_ns = timr->tck_nsec;
49 	rte_memcpy(&adptr_info->conf, &adptr->data->conf,
50 			sizeof(struct rte_event_timer_adapter_conf));
51 }
52 
53 static int
54 timvf_ring_conf_set(struct timvf_ctrl_reg *rctl, uint8_t ring_id)
55 {
56 	struct octeontx_mbox_hdr hdr = {0};
57 	uint16_t len = sizeof(struct timvf_ctrl_reg);
58 	int ret;
59 
60 	hdr.coproc = TIM_COPROC;
61 	hdr.msg = TIM_SET_RING_INFO;
62 	hdr.vfid = ring_id;
63 
64 	ret = octeontx_mbox_send(&hdr, rctl, len, NULL, 0);
65 	if (ret < 0 || hdr.res_code != MBOX_RET_SUCCESS)
66 		return -EACCES;
67 	return 0;
68 }
69 
70 static int
71 timvf_get_start_cyc(uint64_t *now, uint8_t ring_id)
72 {
73 	struct octeontx_mbox_hdr hdr = {0};
74 
75 	hdr.coproc = TIM_COPROC;
76 	hdr.msg = TIM_RING_START_CYC_GET;
77 	hdr.vfid = ring_id;
78 	*now = 0;
79 	return octeontx_mbox_send(&hdr, NULL, 0, now, sizeof(uint64_t));
80 }
81 
82 static int
83 optimize_bucket_parameters(struct timvf_ring *timr)
84 {
85 	uint32_t hbkts;
86 	uint32_t lbkts;
87 	uint64_t tck_nsec;
88 
89 	hbkts = rte_align32pow2(timr->nb_bkts);
90 	tck_nsec = RTE_ALIGN_MUL_CEIL(timr->max_tout / (hbkts - 1), 10);
91 
92 	if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
93 		hbkts = 0;
94 
95 	lbkts = rte_align32prevpow2(timr->nb_bkts);
96 	tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout / (lbkts - 1)), 10);
97 
98 	if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
99 		lbkts = 0;
100 
101 	if (!hbkts && !lbkts)
102 		return 0;
103 
104 	if (!hbkts) {
105 		timr->nb_bkts = lbkts;
106 		goto end;
107 	} else if (!lbkts) {
108 		timr->nb_bkts = hbkts;
109 		goto end;
110 	}
111 
112 	timr->nb_bkts = (hbkts - timr->nb_bkts) <
113 		(timr->nb_bkts - lbkts) ? hbkts : lbkts;
114 end:
115 	timr->get_target_bkt = bkt_and;
116 	timr->tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout /
117 				(timr->nb_bkts - 1)), 10);
118 	return 1;
119 }
120 
121 static int
122 timvf_ring_start(const struct rte_event_timer_adapter *adptr)
123 {
124 	int ret;
125 	uint8_t use_fpa = 0;
126 	uint64_t interval;
127 	uintptr_t pool;
128 	struct timvf_ctrl_reg rctrl;
129 	struct timvf_mbox_dev_info dinfo;
130 	struct timvf_ring *timr = adptr->data->adapter_priv;
131 
132 	ret = timvf_mbox_dev_info_get(&dinfo);
133 	if (ret < 0 || ret != sizeof(struct timvf_mbox_dev_info))
134 		return -EINVAL;
135 
136 	/* Calculate the interval cycles according to clock source. */
137 	switch (timr->clk_src) {
138 	case TIM_CLK_SRC_SCLK:
139 		interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
140 		break;
141 	case TIM_CLK_SRC_GPIO:
142 		/* GPIO doesn't work on tck_nsec. */
143 		interval = 0;
144 		break;
145 	case TIM_CLK_SRC_GTI:
146 		interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
147 		break;
148 	case TIM_CLK_SRC_PTP:
149 		interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
150 		break;
151 	default:
152 		timvf_log_err("Unsupported clock source configured %d",
153 				timr->clk_src);
154 		return -EINVAL;
155 	}
156 
157 	if (!strcmp(rte_mbuf_best_mempool_ops(), "octeontx_fpavf"))
158 		use_fpa = 1;
159 
160 	/*CTRL0 register.*/
161 	rctrl.rctrl0 = interval;
162 
163 	/*CTRL1	register.*/
164 	rctrl.rctrl1 =	(uint64_t)(timr->clk_src) << 51 |
165 		1ull << 48 /* LOCK_EN (Enable hw bucket lock mechanism) */ |
166 		1ull << 47 /* ENA */ |
167 		1ull << 44 /* ENA_LDWB */ |
168 		(timr->nb_bkts - 1);
169 
170 	rctrl.rctrl2 = (uint64_t)(TIM_CHUNK_SIZE / 16) << 40;
171 
172 	if (use_fpa) {
173 		pool = (uintptr_t)((struct rte_mempool *)
174 				timr->chunk_pool)->pool_id;
175 		ret = octeontx_fpa_bufpool_gaura(pool);
176 		if (ret < 0) {
177 			timvf_log_dbg("Unable to get gaura id");
178 			ret = -ENOMEM;
179 			goto error;
180 		}
181 		timvf_write64((uint64_t)ret,
182 				(uint8_t *)timr->vbar0 + TIM_VRING_AURA);
183 	} else {
184 		rctrl.rctrl1 |= 1ull << 43 /* ENA_DFB (Enable don't free) */;
185 	}
186 
187 	timvf_write64((uintptr_t)timr->bkt,
188 			(uint8_t *)timr->vbar0 + TIM_VRING_BASE);
189 	timvf_set_chunk_refill(timr, use_fpa);
190 	if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) {
191 		ret = -EACCES;
192 		goto error;
193 	}
194 
195 	if (timvf_get_start_cyc(&timr->ring_start_cyc,
196 				timr->tim_ring_id) < 0) {
197 		ret = -EACCES;
198 		goto error;
199 	}
200 	timr->tck_int = NSEC2CLK(timr->tck_nsec, rte_get_timer_hz());
201 	timr->fast_div = rte_reciprocal_value_u64(timr->tck_int);
202 	timvf_log_info("nb_bkts %d min_ns %"PRIu64" min_cyc %"PRIu64""
203 			" maxtmo %"PRIu64"\n",
204 			timr->nb_bkts, timr->tck_nsec, interval,
205 			timr->max_tout);
206 
207 	return 0;
208 error:
209 	rte_free(timr->bkt);
210 	rte_mempool_free(timr->chunk_pool);
211 	return ret;
212 }
213 
214 static int
215 timvf_ring_stop(const struct rte_event_timer_adapter *adptr)
216 {
217 	struct timvf_ring *timr = adptr->data->adapter_priv;
218 	struct timvf_ctrl_reg rctrl = {0};
219 	rctrl.rctrl0 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL0);
220 	rctrl.rctrl1 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL1);
221 	rctrl.rctrl1 &= ~(1ull << 47); /* Disable */
222 	rctrl.rctrl2 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL2);
223 
224 	if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id))
225 		return -EACCES;
226 	return 0;
227 }
228 
229 static int
230 timvf_ring_create(struct rte_event_timer_adapter *adptr)
231 {
232 	char pool_name[25];
233 	int ret;
234 	uint8_t tim_ring_id;
235 	uint64_t nb_timers;
236 	struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
237 	struct timvf_ring *timr;
238 	const char *mempool_ops;
239 	unsigned int mp_flags = 0;
240 
241 	tim_ring_id = timvf_get_ring();
242 	if (tim_ring_id == UINT8_MAX)
243 		return -ENODEV;
244 
245 	timr = rte_zmalloc("octeontx_timvf_priv",
246 			sizeof(struct timvf_ring), 0);
247 	if (timr == NULL)
248 		return -ENOMEM;
249 
250 	adptr->data->adapter_priv = timr;
251 	/* Check config parameters. */
252 	if ((rcfg->clk_src != RTE_EVENT_TIMER_ADAPTER_CPU_CLK) &&
253 			(!rcfg->timer_tick_ns ||
254 			 rcfg->timer_tick_ns < TIM_MIN_INTERVAL)) {
255 		timvf_log_err("Too low timer ticks");
256 		goto cfg_err;
257 	}
258 
259 	timr->clk_src = (int) rcfg->clk_src;
260 	timr->tim_ring_id = tim_ring_id;
261 	timr->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10);
262 	timr->max_tout = rcfg->max_tmo_ns;
263 	timr->nb_bkts = (timr->max_tout / timr->tck_nsec);
264 	timr->vbar0 = timvf_bar(timr->tim_ring_id, 0);
265 	timr->bkt_pos = (uint8_t *)timr->vbar0 + TIM_VRING_REL;
266 	nb_timers = rcfg->nb_timers;
267 	timr->get_target_bkt = bkt_mod;
268 
269 	timr->nb_chunks = nb_timers / nb_chunk_slots;
270 
271 	/* Try to optimize the bucket parameters. */
272 	if ((rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)
273 			&& !rte_is_power_of_2(timr->nb_bkts)) {
274 		if (optimize_bucket_parameters(timr)) {
275 			timvf_log_info("Optimized configured values");
276 			timvf_log_dbg("nb_bkts  : %"PRIu32"", timr->nb_bkts);
277 			timvf_log_dbg("tck_nsec : %"PRIu64"", timr->tck_nsec);
278 		} else
279 			timvf_log_info("Failed to Optimize configured values");
280 	}
281 
282 	if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
283 		mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
284 		timvf_log_info("Using single producer mode");
285 	}
286 
287 	timr->bkt = rte_zmalloc("octeontx_timvf_bucket",
288 			(timr->nb_bkts) * sizeof(struct tim_mem_bucket),
289 			0);
290 	if (timr->bkt == NULL)
291 		goto mem_err;
292 
293 	snprintf(pool_name, sizeof(pool_name), "timvf_chunk_pool%d",
294 			timr->tim_ring_id);
295 	timr->chunk_pool = (void *)rte_mempool_create_empty(pool_name,
296 			timr->nb_chunks, TIM_CHUNK_SIZE, 0, 0, rte_socket_id(),
297 			mp_flags);
298 
299 	if (!timr->chunk_pool) {
300 		rte_free(timr->bkt);
301 		timvf_log_err("Unable to create chunkpool.");
302 		return -ENOMEM;
303 	}
304 
305 	mempool_ops = rte_mbuf_best_mempool_ops();
306 	ret = rte_mempool_set_ops_byname(timr->chunk_pool,
307 			mempool_ops, NULL);
308 
309 	if (ret != 0) {
310 		timvf_log_err("Unable to set chunkpool ops.");
311 		goto mem_err;
312 	}
313 
314 	ret = rte_mempool_populate_default(timr->chunk_pool);
315 	if (ret < 0) {
316 		timvf_log_err("Unable to set populate chunkpool.");
317 		goto mem_err;
318 	}
319 	timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
320 	timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT);
321 	timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT_W1S);
322 	timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1C);
323 	timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1S);
324 
325 	return 0;
326 mem_err:
327 	rte_free(timr);
328 	return -ENOMEM;
329 cfg_err:
330 	rte_free(timr);
331 	return -EINVAL;
332 }
333 
334 static int
335 timvf_ring_free(struct rte_event_timer_adapter *adptr)
336 {
337 	struct timvf_ring *timr = adptr->data->adapter_priv;
338 
339 	rte_mempool_free(timr->chunk_pool);
340 	rte_free(timr->bkt);
341 	timvf_release_ring(timr->tim_ring_id);
342 	rte_free(adptr->data->adapter_priv);
343 	return 0;
344 }
345 
346 static int
347 timvf_stats_get(const struct rte_event_timer_adapter *adapter,
348 		struct rte_event_timer_adapter_stats *stats)
349 {
350 	struct timvf_ring *timr = adapter->data->adapter_priv;
351 	uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc;
352 
353 	stats->evtim_exp_count = timr->tim_arm_cnt;
354 	stats->ev_enq_count = timr->tim_arm_cnt;
355 	stats->adapter_tick_count = rte_reciprocal_divide_u64(bkt_cyc,
356 				&timr->fast_div);
357 	return 0;
358 }
359 
360 static int
361 timvf_stats_reset(const struct rte_event_timer_adapter *adapter)
362 {
363 	struct timvf_ring *timr = adapter->data->adapter_priv;
364 
365 	timr->tim_arm_cnt = 0;
366 	return 0;
367 }
368 
369 static struct rte_event_timer_adapter_ops timvf_ops = {
370 	.init		= timvf_ring_create,
371 	.uninit		= timvf_ring_free,
372 	.start		= timvf_ring_start,
373 	.stop		= timvf_ring_stop,
374 	.get_info	= timvf_ring_info_get,
375 };
376 
377 int
378 timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
379 		uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
380 		uint8_t enable_stats)
381 {
382 	RTE_SET_USED(dev);
383 
384 	if (enable_stats) {
385 		timvf_ops.stats_get   = timvf_stats_get;
386 		timvf_ops.stats_reset = timvf_stats_reset;
387 	}
388 
389 	if (flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT)
390 		timvf_ops.arm_burst = enable_stats ?
391 			timvf_timer_arm_burst_sp_stats :
392 			timvf_timer_arm_burst_sp;
393 	else
394 		timvf_ops.arm_burst = enable_stats ?
395 			timvf_timer_arm_burst_mp_stats :
396 			timvf_timer_arm_burst_mp;
397 
398 	timvf_ops.arm_tmo_tick_burst = enable_stats ?
399 		timvf_timer_arm_tmo_brst_stats :
400 		timvf_timer_arm_tmo_brst;
401 	timvf_ops.cancel_burst = timvf_timer_cancel_burst;
402 	*caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT;
403 	*ops = &timvf_ops;
404 	return 0;
405 }
406