xref: /dpdk/drivers/event/octeontx/timvf_evdev.c (revision 10b71caecbe1cddcbb65c050ca775fba575e88db)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include "timvf_evdev.h"
6 
7 RTE_LOG_REGISTER(otx_logtype_timvf, pmd.event.octeontx.timer, NOTICE);
8 
9 struct __rte_packed timvf_mbox_dev_info {
10 	uint64_t ring_active[4];
11 	uint64_t clk_freq;
12 };
13 
14 /* Response messages */
15 enum {
16 	MBOX_RET_SUCCESS,
17 	MBOX_RET_INVALID,
18 	MBOX_RET_INTERNAL_ERR,
19 };
20 
21 static int
22 timvf_mbox_dev_info_get(struct timvf_mbox_dev_info *info)
23 {
24 	struct octeontx_mbox_hdr hdr = {0};
25 	uint16_t len = sizeof(struct timvf_mbox_dev_info);
26 
27 	hdr.coproc = TIM_COPROC;
28 	hdr.msg = TIM_GET_DEV_INFO;
29 	hdr.vfid = 0; /* TIM DEV is always 0. TIM RING ID changes. */
30 
31 	memset(info, 0, len);
32 	return octeontx_mbox_send(&hdr, NULL, 0, info, len);
33 }
34 
35 static void
36 timvf_ring_info_get(const struct rte_event_timer_adapter *adptr,
37 		struct rte_event_timer_adapter_info *adptr_info)
38 {
39 	struct timvf_ring *timr = adptr->data->adapter_priv;
40 	adptr_info->max_tmo_ns = timr->max_tout;
41 	adptr_info->min_resolution_ns = timr->tck_nsec;
42 	rte_memcpy(&adptr_info->conf, &adptr->data->conf,
43 			sizeof(struct rte_event_timer_adapter_conf));
44 }
45 
46 static int
47 timvf_ring_conf_set(struct timvf_ctrl_reg *rctl, uint8_t ring_id)
48 {
49 	struct octeontx_mbox_hdr hdr = {0};
50 	uint16_t len = sizeof(struct timvf_ctrl_reg);
51 	int ret;
52 
53 	hdr.coproc = TIM_COPROC;
54 	hdr.msg = TIM_SET_RING_INFO;
55 	hdr.vfid = ring_id;
56 
57 	ret = octeontx_mbox_send(&hdr, rctl, len, NULL, 0);
58 	if (ret < 0 || hdr.res_code != MBOX_RET_SUCCESS)
59 		return -EACCES;
60 	return 0;
61 }
62 
63 static int
64 timvf_get_start_cyc(uint64_t *now, uint8_t ring_id)
65 {
66 	struct octeontx_mbox_hdr hdr = {0};
67 
68 	hdr.coproc = TIM_COPROC;
69 	hdr.msg = TIM_RING_START_CYC_GET;
70 	hdr.vfid = ring_id;
71 	*now = 0;
72 	return octeontx_mbox_send(&hdr, NULL, 0, now, sizeof(uint64_t));
73 }
74 
75 static int
76 optimize_bucket_parameters(struct timvf_ring *timr)
77 {
78 	uint32_t hbkts;
79 	uint32_t lbkts;
80 	uint64_t tck_nsec;
81 
82 	hbkts = rte_align32pow2(timr->nb_bkts);
83 	tck_nsec = RTE_ALIGN_MUL_CEIL(timr->max_tout / (hbkts - 1), 10);
84 
85 	if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
86 		hbkts = 0;
87 
88 	lbkts = rte_align32prevpow2(timr->nb_bkts);
89 	tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout / (lbkts - 1)), 10);
90 
91 	if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
92 		lbkts = 0;
93 
94 	if (!hbkts && !lbkts)
95 		return 0;
96 
97 	if (!hbkts) {
98 		timr->nb_bkts = lbkts;
99 		goto end;
100 	} else if (!lbkts) {
101 		timr->nb_bkts = hbkts;
102 		goto end;
103 	}
104 
105 	timr->nb_bkts = (hbkts - timr->nb_bkts) <
106 		(timr->nb_bkts - lbkts) ? hbkts : lbkts;
107 end:
108 	timr->get_target_bkt = bkt_and;
109 	timr->tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout /
110 				(timr->nb_bkts - 1)), 10);
111 	return 1;
112 }
113 
114 static int
115 timvf_ring_start(const struct rte_event_timer_adapter *adptr)
116 {
117 	int ret;
118 	uint8_t use_fpa = 0;
119 	uint64_t interval;
120 	uintptr_t pool;
121 	struct timvf_ctrl_reg rctrl;
122 	struct timvf_mbox_dev_info dinfo;
123 	struct timvf_ring *timr = adptr->data->adapter_priv;
124 
125 	ret = timvf_mbox_dev_info_get(&dinfo);
126 	if (ret < 0 || ret != sizeof(struct timvf_mbox_dev_info))
127 		return -EINVAL;
128 
129 	/* Calculate the interval cycles according to clock source. */
130 	switch (timr->clk_src) {
131 	case TIM_CLK_SRC_SCLK:
132 		interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
133 		break;
134 	case TIM_CLK_SRC_GPIO:
135 		/* GPIO doesn't work on tck_nsec. */
136 		interval = 0;
137 		break;
138 	case TIM_CLK_SRC_GTI:
139 		interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
140 		break;
141 	case TIM_CLK_SRC_PTP:
142 		interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
143 		break;
144 	default:
145 		timvf_log_err("Unsupported clock source configured %d",
146 				timr->clk_src);
147 		return -EINVAL;
148 	}
149 
150 	if (!strcmp(rte_mbuf_best_mempool_ops(), "octeontx_fpavf"))
151 		use_fpa = 1;
152 
153 	/*CTRL0 register.*/
154 	rctrl.rctrl0 = interval;
155 
156 	/*CTRL1	register.*/
157 	rctrl.rctrl1 =	(uint64_t)(timr->clk_src) << 51 |
158 		1ull << 48 /* LOCK_EN (Enable hw bucket lock mechanism) */ |
159 		1ull << 47 /* ENA */ |
160 		1ull << 44 /* ENA_LDWB */ |
161 		(timr->nb_bkts - 1);
162 
163 	rctrl.rctrl2 = (uint64_t)(TIM_CHUNK_SIZE / 16) << 40;
164 
165 	if (use_fpa) {
166 		pool = (uintptr_t)((struct rte_mempool *)
167 				timr->chunk_pool)->pool_id;
168 		ret = octeontx_fpa_bufpool_gaura(pool);
169 		if (ret < 0) {
170 			timvf_log_dbg("Unable to get gaura id");
171 			ret = -ENOMEM;
172 			goto error;
173 		}
174 		timvf_write64((uint64_t)ret,
175 				(uint8_t *)timr->vbar0 + TIM_VRING_AURA);
176 	} else {
177 		rctrl.rctrl1 |= 1ull << 43 /* ENA_DFB (Enable don't free) */;
178 	}
179 
180 	timvf_write64((uintptr_t)timr->bkt,
181 			(uint8_t *)timr->vbar0 + TIM_VRING_BASE);
182 	timvf_set_chunk_refill(timr, use_fpa);
183 	if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) {
184 		ret = -EACCES;
185 		goto error;
186 	}
187 
188 	if (timvf_get_start_cyc(&timr->ring_start_cyc,
189 				timr->tim_ring_id) < 0) {
190 		ret = -EACCES;
191 		goto error;
192 	}
193 	timr->tck_int = NSEC2CLK(timr->tck_nsec, rte_get_timer_hz());
194 	timr->fast_div = rte_reciprocal_value_u64(timr->tck_int);
195 	timvf_log_info("nb_bkts %d min_ns %"PRIu64" min_cyc %"PRIu64""
196 			" maxtmo %"PRIu64"\n",
197 			timr->nb_bkts, timr->tck_nsec, interval,
198 			timr->max_tout);
199 
200 	return 0;
201 error:
202 	rte_free(timr->bkt);
203 	rte_mempool_free(timr->chunk_pool);
204 	return ret;
205 }
206 
207 static int
208 timvf_ring_stop(const struct rte_event_timer_adapter *adptr)
209 {
210 	struct timvf_ring *timr = adptr->data->adapter_priv;
211 	struct timvf_ctrl_reg rctrl = {0};
212 	rctrl.rctrl0 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL0);
213 	rctrl.rctrl1 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL1);
214 	rctrl.rctrl1 &= ~(1ull << 47); /* Disable */
215 	rctrl.rctrl2 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL2);
216 
217 	if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id))
218 		return -EACCES;
219 	return 0;
220 }
221 
222 static int
223 timvf_ring_create(struct rte_event_timer_adapter *adptr)
224 {
225 	char pool_name[25];
226 	int ret;
227 	uint8_t tim_ring_id;
228 	uint64_t nb_timers;
229 	struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
230 	struct timvf_ring *timr;
231 	const char *mempool_ops;
232 	unsigned int mp_flags = 0;
233 
234 	tim_ring_id = timvf_get_ring();
235 	if (tim_ring_id == UINT8_MAX)
236 		return -ENODEV;
237 
238 	timr = rte_zmalloc("octeontx_timvf_priv",
239 			sizeof(struct timvf_ring), 0);
240 	if (timr == NULL)
241 		return -ENOMEM;
242 
243 	adptr->data->adapter_priv = timr;
244 	/* Check config parameters. */
245 	if ((rcfg->clk_src != RTE_EVENT_TIMER_ADAPTER_CPU_CLK) &&
246 			(!rcfg->timer_tick_ns ||
247 			 rcfg->timer_tick_ns < TIM_MIN_INTERVAL)) {
248 		timvf_log_err("Too low timer ticks");
249 		goto cfg_err;
250 	}
251 
252 	timr->clk_src = (int) rcfg->clk_src;
253 	timr->tim_ring_id = tim_ring_id;
254 	timr->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10);
255 	timr->max_tout = rcfg->max_tmo_ns;
256 	timr->nb_bkts = (timr->max_tout / timr->tck_nsec);
257 	timr->vbar0 = timvf_bar(timr->tim_ring_id, 0);
258 	timr->bkt_pos = (uint8_t *)timr->vbar0 + TIM_VRING_REL;
259 	nb_timers = rcfg->nb_timers;
260 	timr->get_target_bkt = bkt_mod;
261 
262 	timr->nb_chunks = nb_timers / nb_chunk_slots;
263 
264 	/* Try to optimize the bucket parameters. */
265 	if ((rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)
266 			&& !rte_is_power_of_2(timr->nb_bkts)) {
267 		if (optimize_bucket_parameters(timr)) {
268 			timvf_log_info("Optimized configured values");
269 			timvf_log_dbg("nb_bkts  : %"PRIu32"", timr->nb_bkts);
270 			timvf_log_dbg("tck_nsec : %"PRIu64"", timr->tck_nsec);
271 		} else
272 			timvf_log_info("Failed to Optimize configured values");
273 	}
274 
275 	if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
276 		mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
277 		timvf_log_info("Using single producer mode");
278 	}
279 
280 	timr->bkt = rte_zmalloc("octeontx_timvf_bucket",
281 			(timr->nb_bkts) * sizeof(struct tim_mem_bucket),
282 			0);
283 	if (timr->bkt == NULL)
284 		goto mem_err;
285 
286 	snprintf(pool_name, sizeof(pool_name), "timvf_chunk_pool%d",
287 			timr->tim_ring_id);
288 	timr->chunk_pool = (void *)rte_mempool_create_empty(pool_name,
289 			timr->nb_chunks, TIM_CHUNK_SIZE, 0, 0, rte_socket_id(),
290 			mp_flags);
291 
292 	if (!timr->chunk_pool) {
293 		rte_free(timr->bkt);
294 		timvf_log_err("Unable to create chunkpool.");
295 		return -ENOMEM;
296 	}
297 
298 	mempool_ops = rte_mbuf_best_mempool_ops();
299 	ret = rte_mempool_set_ops_byname(timr->chunk_pool,
300 			mempool_ops, NULL);
301 
302 	if (ret != 0) {
303 		timvf_log_err("Unable to set chunkpool ops.");
304 		goto mem_err;
305 	}
306 
307 	ret = rte_mempool_populate_default(timr->chunk_pool);
308 	if (ret < 0) {
309 		timvf_log_err("Unable to set populate chunkpool.");
310 		goto mem_err;
311 	}
312 	timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
313 	timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT);
314 	timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT_W1S);
315 	timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1C);
316 	timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1S);
317 
318 	return 0;
319 mem_err:
320 	rte_free(timr);
321 	return -ENOMEM;
322 cfg_err:
323 	rte_free(timr);
324 	return -EINVAL;
325 }
326 
327 static int
328 timvf_ring_free(struct rte_event_timer_adapter *adptr)
329 {
330 	struct timvf_ring *timr = adptr->data->adapter_priv;
331 
332 	rte_mempool_free(timr->chunk_pool);
333 	rte_free(timr->bkt);
334 	timvf_release_ring(timr->tim_ring_id);
335 	rte_free(adptr->data->adapter_priv);
336 	return 0;
337 }
338 
339 static int
340 timvf_stats_get(const struct rte_event_timer_adapter *adapter,
341 		struct rte_event_timer_adapter_stats *stats)
342 {
343 	struct timvf_ring *timr = adapter->data->adapter_priv;
344 	uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc;
345 
346 	stats->evtim_exp_count = timr->tim_arm_cnt;
347 	stats->ev_enq_count = timr->tim_arm_cnt;
348 	stats->adapter_tick_count = rte_reciprocal_divide_u64(bkt_cyc,
349 				&timr->fast_div);
350 	return 0;
351 }
352 
353 static int
354 timvf_stats_reset(const struct rte_event_timer_adapter *adapter)
355 {
356 	struct timvf_ring *timr = adapter->data->adapter_priv;
357 
358 	timr->tim_arm_cnt = 0;
359 	return 0;
360 }
361 
362 static struct rte_event_timer_adapter_ops timvf_ops = {
363 	.init		= timvf_ring_create,
364 	.uninit		= timvf_ring_free,
365 	.start		= timvf_ring_start,
366 	.stop		= timvf_ring_stop,
367 	.get_info	= timvf_ring_info_get,
368 };
369 
370 int
371 timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
372 		uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
373 		uint8_t enable_stats)
374 {
375 	RTE_SET_USED(dev);
376 
377 	if (enable_stats) {
378 		timvf_ops.stats_get   = timvf_stats_get;
379 		timvf_ops.stats_reset = timvf_stats_reset;
380 	}
381 
382 	if (flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT)
383 		timvf_ops.arm_burst = enable_stats ?
384 			timvf_timer_arm_burst_sp_stats :
385 			timvf_timer_arm_burst_sp;
386 	else
387 		timvf_ops.arm_burst = enable_stats ?
388 			timvf_timer_arm_burst_mp_stats :
389 			timvf_timer_arm_burst_mp;
390 
391 	timvf_ops.arm_tmo_tick_burst = enable_stats ?
392 		timvf_timer_arm_tmo_brst_stats :
393 		timvf_timer_arm_tmo_brst;
394 	timvf_ops.cancel_burst = timvf_timer_cancel_burst;
395 	*caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT;
396 	*ops = &timvf_ops;
397 	return 0;
398 }
399