xref: /dpdk/drivers/event/octeontx/timvf_evdev.c (revision d1925c87d0ddadc689d76010f0fa71e060a13aff)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright(c) 2017 Cavium, Inc
4  */
5 
6 #include "timvf_evdev.h"
7 
8 int otx_logtype_timvf;
9 
10 RTE_INIT(otx_timvf_init_log);
11 static void
12 otx_timvf_init_log(void)
13 {
14 	otx_logtype_timvf = rte_log_register("pmd.event.octeontx.timer");
15 	if (otx_logtype_timvf >= 0)
16 		rte_log_set_level(otx_logtype_timvf, RTE_LOG_NOTICE);
17 }
18 
19 struct __rte_packed timvf_mbox_dev_info {
20 	uint64_t ring_active[4];
21 	uint64_t clk_freq;
22 };
23 
24 /* Response messages */
25 enum {
26 	MBOX_RET_SUCCESS,
27 	MBOX_RET_INVALID,
28 	MBOX_RET_INTERNAL_ERR,
29 };
30 
31 static int
32 timvf_mbox_dev_info_get(struct timvf_mbox_dev_info *info)
33 {
34 	struct octeontx_mbox_hdr hdr = {0};
35 	uint16_t len = sizeof(struct timvf_mbox_dev_info);
36 
37 	hdr.coproc = TIM_COPROC;
38 	hdr.msg = TIM_GET_DEV_INFO;
39 	hdr.vfid = 0; /* TIM DEV is always 0. TIM RING ID changes. */
40 
41 	memset(info, 0, len);
42 	return octeontx_mbox_send(&hdr, NULL, 0, info, len);
43 }
44 
45 static void
46 timvf_ring_info_get(const struct rte_event_timer_adapter *adptr,
47 		struct rte_event_timer_adapter_info *adptr_info)
48 {
49 	struct timvf_ring *timr = adptr->data->adapter_priv;
50 	adptr_info->max_tmo_ns = timr->max_tout;
51 	adptr_info->min_resolution_ns = timr->tck_nsec;
52 	rte_memcpy(&adptr_info->conf, &adptr->data->conf,
53 			sizeof(struct rte_event_timer_adapter_conf));
54 }
55 
56 static int
57 timvf_ring_conf_set(struct timvf_ctrl_reg *rctl, uint8_t ring_id)
58 {
59 	struct octeontx_mbox_hdr hdr = {0};
60 	uint16_t len = sizeof(struct timvf_ctrl_reg);
61 	int ret;
62 
63 	hdr.coproc = TIM_COPROC;
64 	hdr.msg = TIM_SET_RING_INFO;
65 	hdr.vfid = ring_id;
66 
67 	ret = octeontx_mbox_send(&hdr, rctl, len, NULL, 0);
68 	if (ret < 0 || hdr.res_code != MBOX_RET_SUCCESS)
69 		return -EACCES;
70 	return 0;
71 }
72 
73 static int
74 timvf_get_start_cyc(uint64_t *now, uint8_t ring_id)
75 {
76 	struct octeontx_mbox_hdr hdr = {0};
77 
78 	hdr.coproc = TIM_COPROC;
79 	hdr.msg = TIM_RING_START_CYC_GET;
80 	hdr.vfid = ring_id;
81 	*now = 0;
82 	return octeontx_mbox_send(&hdr, NULL, 0, now, sizeof(uint64_t));
83 }
84 
85 static int
86 timvf_ring_start(const struct rte_event_timer_adapter *adptr)
87 {
88 	int ret;
89 	uint64_t interval;
90 	struct timvf_ctrl_reg rctrl;
91 	struct timvf_mbox_dev_info dinfo;
92 	struct timvf_ring *timr = adptr->data->adapter_priv;
93 
94 	ret = timvf_mbox_dev_info_get(&dinfo);
95 	if (ret < 0 || ret != sizeof(struct timvf_mbox_dev_info))
96 		return -EINVAL;
97 
98 	/* Calculate the interval cycles according to clock source. */
99 	switch (timr->clk_src) {
100 	case TIM_CLK_SRC_SCLK:
101 		interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
102 		break;
103 	case TIM_CLK_SRC_GPIO:
104 		/* GPIO doesn't work on tck_nsec. */
105 		interval = 0;
106 		break;
107 	case TIM_CLK_SRC_GTI:
108 		interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
109 		break;
110 	case TIM_CLK_SRC_PTP:
111 		interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
112 		break;
113 	default:
114 		timvf_log_err("Unsupported clock source configured %d",
115 				timr->clk_src);
116 		return -EINVAL;
117 	}
118 
119 	/*CTRL0 register.*/
120 	rctrl.rctrl0 = interval;
121 
122 	/*CTRL1	register.*/
123 	rctrl.rctrl1 =	(uint64_t)(timr->clk_src) << 51 |
124 		1ull << 48 /* LOCK_EN (Enable hw bucket lock mechanism) */ |
125 		1ull << 47 /* ENA */ |
126 		1ull << 44 /* ENA_LDWB */ |
127 		(timr->nb_bkts - 1);
128 
129 	rctrl.rctrl2 = (uint64_t)(TIM_CHUNK_SIZE / 16) << 40;
130 
131 	timvf_write64((uintptr_t)timr->bkt,
132 			(uint8_t *)timr->vbar0 + TIM_VRING_BASE);
133 	if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) {
134 		ret = -EACCES;
135 		goto error;
136 	}
137 
138 	if (timvf_get_start_cyc(&timr->ring_start_cyc,
139 				timr->tim_ring_id) < 0) {
140 		ret = -EACCES;
141 		goto error;
142 	}
143 	timr->tck_int = NSEC2CLK(timr->tck_nsec, rte_get_timer_hz());
144 	timr->fast_div = rte_reciprocal_value_u64(timr->tck_int);
145 	timvf_log_info("nb_bkts %d min_ns %"PRIu64" min_cyc %"PRIu64""
146 			" maxtmo %"PRIu64"\n",
147 			timr->nb_bkts, timr->tck_nsec, interval,
148 			timr->max_tout);
149 
150 	return 0;
151 error:
152 	rte_free(timr->bkt);
153 	rte_mempool_free(timr->chunk_pool);
154 	return ret;
155 }
156 
157 static int
158 timvf_ring_stop(const struct rte_event_timer_adapter *adptr)
159 {
160 	struct timvf_ring *timr = adptr->data->adapter_priv;
161 	struct timvf_ctrl_reg rctrl = {0};
162 	rctrl.rctrl0 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL0);
163 	rctrl.rctrl1 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL1);
164 	rctrl.rctrl1 &= ~(1ull << 47); /* Disable */
165 	rctrl.rctrl2 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL2);
166 
167 	if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id))
168 		return -EACCES;
169 	return 0;
170 }
171 
172 static int
173 timvf_ring_create(struct rte_event_timer_adapter *adptr)
174 {
175 	char pool_name[25];
176 	int ret;
177 	uint64_t nb_timers;
178 	struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
179 	struct timvf_ring *timr;
180 	struct timvf_info tinfo;
181 	const char *mempool_ops;
182 
183 	if (timvf_info(&tinfo) < 0)
184 		return -ENODEV;
185 
186 	if (adptr->data->id >= tinfo.total_timvfs)
187 		return -ENODEV;
188 
189 	timr = rte_zmalloc("octeontx_timvf_priv",
190 			sizeof(struct timvf_ring), 0);
191 	if (timr == NULL)
192 		return -ENOMEM;
193 
194 	adptr->data->adapter_priv = timr;
195 	/* Check config parameters. */
196 	if ((rcfg->clk_src != RTE_EVENT_TIMER_ADAPTER_CPU_CLK) &&
197 			(!rcfg->timer_tick_ns ||
198 			 rcfg->timer_tick_ns < TIM_MIN_INTERVAL)) {
199 		timvf_log_err("Too low timer ticks");
200 		goto cfg_err;
201 	}
202 
203 	timr->clk_src = (int) rcfg->clk_src;
204 	timr->tim_ring_id = adptr->data->id;
205 	timr->tck_nsec = rcfg->timer_tick_ns;
206 	timr->max_tout = rcfg->max_tmo_ns;
207 	timr->nb_bkts = (timr->max_tout / timr->tck_nsec);
208 	timr->vbar0 = timvf_bar(timr->tim_ring_id, 0);
209 	timr->bkt_pos = (uint8_t *)timr->vbar0 + TIM_VRING_REL;
210 	nb_timers = rcfg->nb_timers;
211 	timr->get_target_bkt = bkt_mod;
212 
213 	timr->nb_chunks = nb_timers / nb_chunk_slots;
214 
215 	timr->bkt = rte_zmalloc("octeontx_timvf_bucket",
216 			(timr->nb_bkts) * sizeof(struct tim_mem_bucket),
217 			0);
218 	if (timr->bkt == NULL)
219 		goto mem_err;
220 
221 	snprintf(pool_name, 30, "timvf_chunk_pool%d", timr->tim_ring_id);
222 	timr->chunk_pool = (void *)rte_mempool_create_empty(pool_name,
223 			timr->nb_chunks, TIM_CHUNK_SIZE, 0, 0, rte_socket_id(),
224 			0);
225 
226 	if (!timr->chunk_pool) {
227 		rte_free(timr->bkt);
228 		timvf_log_err("Unable to create chunkpool.");
229 		return -ENOMEM;
230 	}
231 
232 	mempool_ops = rte_mbuf_best_mempool_ops();
233 	ret = rte_mempool_set_ops_byname(timr->chunk_pool,
234 			mempool_ops, NULL);
235 
236 	if (ret != 0) {
237 		timvf_log_err("Unable to set chunkpool ops.");
238 		goto mem_err;
239 	}
240 
241 	ret = rte_mempool_populate_default(timr->chunk_pool);
242 	if (ret < 0) {
243 		timvf_log_err("Unable to set populate chunkpool.");
244 		goto mem_err;
245 	}
246 	timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
247 	timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT);
248 	timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT_W1S);
249 	timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1C);
250 	timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1S);
251 
252 	return 0;
253 mem_err:
254 	rte_free(timr);
255 	return -ENOMEM;
256 cfg_err:
257 	rte_free(timr);
258 	return -EINVAL;
259 }
260 
261 static int
262 timvf_ring_free(struct rte_event_timer_adapter *adptr)
263 {
264 	struct timvf_ring *timr = adptr->data->adapter_priv;
265 	rte_mempool_free(timr->chunk_pool);
266 	rte_free(timr->bkt);
267 	rte_free(adptr->data->adapter_priv);
268 	return 0;
269 }
270 
271 static int
272 timvf_stats_get(const struct rte_event_timer_adapter *adapter,
273 		struct rte_event_timer_adapter_stats *stats)
274 {
275 	struct timvf_ring *timr = adapter->data->adapter_priv;
276 	uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc;
277 
278 	stats->evtim_exp_count = timr->tim_arm_cnt;
279 	stats->ev_enq_count = timr->tim_arm_cnt;
280 	stats->adapter_tick_count = rte_reciprocal_divide_u64(bkt_cyc,
281 				&timr->fast_div);
282 	return 0;
283 }
284 
285 static int
286 timvf_stats_reset(const struct rte_event_timer_adapter *adapter)
287 {
288 	struct timvf_ring *timr = adapter->data->adapter_priv;
289 
290 	timr->tim_arm_cnt = 0;
291 	return 0;
292 }
293 
294 static struct rte_event_timer_adapter_ops timvf_ops = {
295 	.init		= timvf_ring_create,
296 	.uninit		= timvf_ring_free,
297 	.start		= timvf_ring_start,
298 	.stop		= timvf_ring_stop,
299 	.get_info	= timvf_ring_info_get,
300 };
301 
302 int
303 timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
304 		uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
305 		uint8_t enable_stats)
306 {
307 	RTE_SET_USED(dev);
308 	RTE_SET_USED(flags);
309 
310 	if (enable_stats) {
311 		timvf_ops.stats_get   = timvf_stats_get;
312 		timvf_ops.stats_reset = timvf_stats_reset;
313 	}
314 
315 	*caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT;
316 	*ops = &timvf_ops;
317 	return -EINVAL;
318 }
319