xref: /dpdk/lib/eventdev/rte_event_dma_adapter.c (revision cfa81500ac39f473e4026d51e989dfc356b61856)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2023 Marvell.
3  */
4 
5 #include <eventdev_pmd.h>
6 #include <rte_service_component.h>
7 
8 #include "rte_event_dma_adapter.h"
9 
10 #define DMA_BATCH_SIZE 32
11 #define DMA_DEFAULT_MAX_NB 128
12 #define DMA_ADAPTER_NAME_LEN 32
13 #define DMA_ADAPTER_BUFFER_SIZE 1024
14 
15 #define DMA_ADAPTER_OPS_BUFFER_SIZE (DMA_BATCH_SIZE + DMA_BATCH_SIZE)
16 
17 #define DMA_ADAPTER_ARRAY "event_dma_adapter_array"
18 
19 /* Macros to check for valid adapter */
20 #define EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) \
21 	do { \
22 		if (!edma_adapter_valid_id(id)) { \
23 			RTE_EDEV_LOG_ERR("Invalid DMA adapter id = %d", id); \
24 			return retval; \
25 		} \
26 	} while (0)
27 
28 /* DMA ops circular buffer */
29 struct __rte_cache_aligned dma_ops_circular_buffer {
30 	/* Index of head element */
31 	uint16_t head;
32 
33 	/* Index of tail element */
34 	uint16_t tail;
35 
36 	/* Number of elements in buffer */
37 	uint16_t count;
38 
39 	/* Size of circular buffer */
40 	uint16_t size;
41 
42 	/* Pointer to hold rte_event_dma_adapter_op for processing */
43 	struct rte_event_dma_adapter_op **op_buffer;
44 };
45 
46 /* Vchan information */
47 struct __rte_cache_aligned dma_vchan_info {
48 	/* Set to indicate vchan queue is enabled */
49 	bool vq_enabled;
50 
51 	/* Circular buffer for batching DMA ops to dma_dev */
52 	struct dma_ops_circular_buffer dma_buf;
53 };
54 
55 /* DMA device information */
56 struct __rte_cache_aligned dma_device_info {
57 	/* Pointer to vchan queue info */
58 	struct dma_vchan_info *vchanq;
59 
60 	/* Pointer to vchan queue info.
61 	 * This holds ops passed by application till the
62 	 * dma completion is done.
63 	 */
64 	struct dma_vchan_info *tqmap;
65 
66 	/* If num_vchanq > 0, the start callback will
67 	 * be invoked if not already invoked
68 	 */
69 	uint16_t num_vchanq;
70 
71 	/* Number of vchans configured for a DMA device. */
72 	uint16_t num_dma_dev_vchan;
73 
74 	/* Next queue pair to be processed */
75 	uint16_t next_vchan_id;
76 
77 	/* Set to indicate processing has been started */
78 	uint8_t dev_started;
79 
80 	/* Set to indicate dmadev->eventdev packet
81 	 * transfer uses a hardware mechanism
82 	 */
83 	uint8_t internal_event_port;
84 };
85 
86 struct __rte_cache_aligned event_dma_adapter {
87 	/* Event device identifier */
88 	uint8_t eventdev_id;
89 
90 	/* Event port identifier */
91 	uint8_t event_port_id;
92 
93 	/* Adapter mode */
94 	enum rte_event_dma_adapter_mode mode;
95 
96 	/* Memory allocation name */
97 	char mem_name[DMA_ADAPTER_NAME_LEN];
98 
99 	/* Socket identifier cached from eventdev */
100 	int socket_id;
101 
102 	/* Lock to serialize config updates with service function */
103 	rte_spinlock_t lock;
104 
105 	/* Next dma device to be processed */
106 	uint16_t next_dmadev_id;
107 
108 	/* DMA device structure array */
109 	struct dma_device_info *dma_devs;
110 
111 	/* Circular buffer for processing DMA ops to eventdev */
112 	struct dma_ops_circular_buffer ebuf;
113 
114 	/* Configuration callback for rte_service configuration */
115 	rte_event_dma_adapter_conf_cb conf_cb;
116 
117 	/* Configuration callback argument */
118 	void *conf_arg;
119 
120 	/* Set if  default_cb is being used */
121 	int default_cb_arg;
122 
123 	/* No. of vchan queue configured */
124 	uint16_t nb_vchanq;
125 
126 	/* Per adapter EAL service ID */
127 	uint32_t service_id;
128 
129 	/* Service initialization state */
130 	uint8_t service_initialized;
131 
132 	/* Max DMA ops processed in any service function invocation */
133 	uint32_t max_nb;
134 
135 	/* Store event port's implicit release capability */
136 	uint8_t implicit_release_disabled;
137 
138 	/* Flag to indicate backpressure at dma_dev
139 	 * Stop further dequeuing events from eventdev
140 	 */
141 	bool stop_enq_to_dma_dev;
142 
143 	/* Loop counter to flush dma ops */
144 	uint16_t transmit_loop_count;
145 
146 	/* Per instance stats structure */
147 	struct rte_event_dma_adapter_stats dma_stats;
148 };
149 
150 static struct event_dma_adapter **event_dma_adapter;
151 
152 static inline int
edma_adapter_valid_id(uint8_t id)153 edma_adapter_valid_id(uint8_t id)
154 {
155 	return id < RTE_EVENT_DMA_ADAPTER_MAX_INSTANCE;
156 }
157 
158 static inline struct event_dma_adapter *
edma_id_to_adapter(uint8_t id)159 edma_id_to_adapter(uint8_t id)
160 {
161 	return event_dma_adapter ? event_dma_adapter[id] : NULL;
162 }
163 
164 static int
edma_array_init(void)165 edma_array_init(void)
166 {
167 	const struct rte_memzone *mz;
168 	uint32_t sz;
169 
170 	mz = rte_memzone_lookup(DMA_ADAPTER_ARRAY);
171 	if (mz == NULL) {
172 		sz = sizeof(struct event_dma_adapter *) * RTE_EVENT_DMA_ADAPTER_MAX_INSTANCE;
173 		sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
174 
175 		mz = rte_memzone_reserve_aligned(DMA_ADAPTER_ARRAY, sz, rte_socket_id(), 0,
176 						 RTE_CACHE_LINE_SIZE);
177 		if (mz == NULL) {
178 			RTE_EDEV_LOG_ERR("Failed to reserve memzone : %s, err = %d",
179 					 DMA_ADAPTER_ARRAY, rte_errno);
180 			return -rte_errno;
181 		}
182 	}
183 
184 	event_dma_adapter = mz->addr;
185 
186 	return 0;
187 }
188 
189 static inline bool
edma_circular_buffer_batch_ready(struct dma_ops_circular_buffer * bufp)190 edma_circular_buffer_batch_ready(struct dma_ops_circular_buffer *bufp)
191 {
192 	return bufp->count >= DMA_BATCH_SIZE;
193 }
194 
195 static inline bool
edma_circular_buffer_space_for_batch(struct dma_ops_circular_buffer * bufp)196 edma_circular_buffer_space_for_batch(struct dma_ops_circular_buffer *bufp)
197 {
198 	return (bufp->size - bufp->count) >= DMA_BATCH_SIZE;
199 }
200 
201 static inline int
edma_circular_buffer_init(const char * name,struct dma_ops_circular_buffer * buf,uint16_t sz)202 edma_circular_buffer_init(const char *name, struct dma_ops_circular_buffer *buf, uint16_t sz)
203 {
204 	buf->op_buffer = rte_zmalloc(name, sizeof(struct rte_event_dma_adapter_op *) * sz, 0);
205 	if (buf->op_buffer == NULL)
206 		return -ENOMEM;
207 
208 	buf->size = sz;
209 
210 	return 0;
211 }
212 
213 static inline void
edma_circular_buffer_free(struct dma_ops_circular_buffer * buf)214 edma_circular_buffer_free(struct dma_ops_circular_buffer *buf)
215 {
216 	rte_free(buf->op_buffer);
217 }
218 
219 static inline int
edma_circular_buffer_add(struct dma_ops_circular_buffer * bufp,struct rte_event_dma_adapter_op * op)220 edma_circular_buffer_add(struct dma_ops_circular_buffer *bufp, struct rte_event_dma_adapter_op *op)
221 {
222 	uint16_t *tail = &bufp->tail;
223 
224 	bufp->op_buffer[*tail] = op;
225 
226 	/* circular buffer, go round */
227 	*tail = (*tail + 1) % bufp->size;
228 	bufp->count++;
229 
230 	return 0;
231 }
232 
233 static inline int
edma_circular_buffer_flush_to_dma_dev(struct event_dma_adapter * adapter,struct dma_ops_circular_buffer * bufp,uint8_t dma_dev_id,uint16_t vchan,uint16_t * nb_ops_flushed)234 edma_circular_buffer_flush_to_dma_dev(struct event_dma_adapter *adapter,
235 				      struct dma_ops_circular_buffer *bufp, uint8_t dma_dev_id,
236 				      uint16_t vchan, uint16_t *nb_ops_flushed)
237 {
238 	struct rte_event_dma_adapter_op *op;
239 	uint16_t *head = &bufp->head;
240 	uint16_t *tail = &bufp->tail;
241 	struct dma_vchan_info *tq;
242 	uint16_t n;
243 	uint16_t i;
244 	int ret;
245 
246 	if (*tail > *head)
247 		n = *tail - *head;
248 	else if (*tail < *head)
249 		n = bufp->size - *head;
250 	else {
251 		*nb_ops_flushed = 0;
252 		return 0; /* buffer empty */
253 	}
254 
255 	tq = &adapter->dma_devs[dma_dev_id].tqmap[vchan];
256 
257 	for (i = 0; i < n; i++)	{
258 		op = bufp->op_buffer[*head];
259 		if (op->nb_src == 1 && op->nb_dst == 1)
260 			ret = rte_dma_copy(dma_dev_id, vchan, op->src_dst_seg[0].addr,
261 					   op->src_dst_seg[1].addr, op->src_dst_seg[0].length,
262 					   op->flags);
263 		else
264 			ret = rte_dma_copy_sg(dma_dev_id, vchan, &op->src_dst_seg[0],
265 					      &op->src_dst_seg[op->nb_src], op->nb_src, op->nb_dst,
266 					      op->flags);
267 		if (ret < 0)
268 			break;
269 
270 		/* Enqueue in transaction queue. */
271 		edma_circular_buffer_add(&tq->dma_buf, op);
272 
273 		*head = (*head + 1) % bufp->size;
274 	}
275 
276 	*nb_ops_flushed = i;
277 	bufp->count -= *nb_ops_flushed;
278 	if (!bufp->count) {
279 		*head = 0;
280 		*tail = 0;
281 	}
282 
283 	return *nb_ops_flushed == n ? 0 : -1;
284 }
285 
286 static int
edma_default_config_cb(uint8_t id,uint8_t evdev_id,struct rte_event_dma_adapter_conf * conf,void * arg)287 edma_default_config_cb(uint8_t id, uint8_t evdev_id, struct rte_event_dma_adapter_conf *conf,
288 		       void *arg)
289 {
290 	struct rte_event_port_conf *port_conf;
291 	struct rte_event_dev_config dev_conf;
292 	struct event_dma_adapter *adapter;
293 	struct rte_eventdev *dev;
294 	uint8_t port_id;
295 	int started;
296 	int ret;
297 
298 	adapter = edma_id_to_adapter(id);
299 	if (adapter == NULL)
300 		return -EINVAL;
301 
302 	dev = &rte_eventdevs[adapter->eventdev_id];
303 	dev_conf = dev->data->dev_conf;
304 
305 	started = dev->data->dev_started;
306 	if (started)
307 		rte_event_dev_stop(evdev_id);
308 
309 	port_id = dev_conf.nb_event_ports;
310 	dev_conf.nb_event_ports += 1;
311 
312 	port_conf = arg;
313 	if (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_SINGLE_LINK)
314 		dev_conf.nb_single_link_event_port_queues += 1;
315 
316 	ret = rte_event_dev_configure(evdev_id, &dev_conf);
317 	if (ret) {
318 		RTE_EDEV_LOG_ERR("Failed to configure event dev %u", evdev_id);
319 		if (started) {
320 			if (rte_event_dev_start(evdev_id))
321 				return -EIO;
322 		}
323 		return ret;
324 	}
325 
326 	ret = rte_event_port_setup(evdev_id, port_id, port_conf);
327 	if (ret) {
328 		RTE_EDEV_LOG_ERR("Failed to setup event port %u", port_id);
329 		return ret;
330 	}
331 
332 	conf->event_port_id = port_id;
333 	conf->max_nb = DMA_DEFAULT_MAX_NB;
334 	if (started)
335 		ret = rte_event_dev_start(evdev_id);
336 
337 	adapter->default_cb_arg = 1;
338 	adapter->event_port_id = conf->event_port_id;
339 
340 	return ret;
341 }
342 
343 int
rte_event_dma_adapter_create_ext(uint8_t id,uint8_t evdev_id,rte_event_dma_adapter_conf_cb conf_cb,enum rte_event_dma_adapter_mode mode,void * conf_arg)344 rte_event_dma_adapter_create_ext(uint8_t id, uint8_t evdev_id,
345 				 rte_event_dma_adapter_conf_cb conf_cb,
346 				 enum rte_event_dma_adapter_mode mode, void *conf_arg)
347 {
348 	struct rte_event_dev_info dev_info;
349 	struct event_dma_adapter *adapter;
350 	char name[DMA_ADAPTER_NAME_LEN];
351 	struct rte_dma_info info;
352 	uint16_t num_dma_dev;
353 	int socket_id;
354 	uint8_t i;
355 	int ret;
356 
357 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
358 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(evdev_id, -EINVAL);
359 
360 	if (conf_cb == NULL)
361 		return -EINVAL;
362 
363 	if (event_dma_adapter == NULL) {
364 		ret = edma_array_init();
365 		if (ret)
366 			return ret;
367 	}
368 
369 	adapter = edma_id_to_adapter(id);
370 	if (adapter != NULL) {
371 		RTE_EDEV_LOG_ERR("ML adapter ID %d already exists!", id);
372 		return -EEXIST;
373 	}
374 
375 	socket_id = rte_event_dev_socket_id(evdev_id);
376 	snprintf(name, DMA_ADAPTER_NAME_LEN, "rte_event_dma_adapter_%d", id);
377 	adapter = rte_zmalloc_socket(name, sizeof(struct event_dma_adapter), RTE_CACHE_LINE_SIZE,
378 				     socket_id);
379 	if (adapter == NULL) {
380 		RTE_EDEV_LOG_ERR("Failed to get mem for event ML adapter!");
381 		return -ENOMEM;
382 	}
383 
384 	if (edma_circular_buffer_init("edma_circular_buffer", &adapter->ebuf,
385 				      DMA_ADAPTER_BUFFER_SIZE)) {
386 		RTE_EDEV_LOG_ERR("Failed to get memory for event adapter circular buffer");
387 		rte_free(adapter);
388 		return -ENOMEM;
389 	}
390 
391 	ret = rte_event_dev_info_get(evdev_id, &dev_info);
392 	if (ret < 0) {
393 		RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s", evdev_id,
394 				 dev_info.driver_name);
395 		edma_circular_buffer_free(&adapter->ebuf);
396 		rte_free(adapter);
397 		return ret;
398 	}
399 
400 	num_dma_dev = rte_dma_count_avail();
401 
402 	adapter->eventdev_id = evdev_id;
403 	adapter->mode = mode;
404 	rte_strscpy(adapter->mem_name, name, DMA_ADAPTER_NAME_LEN);
405 	adapter->socket_id = socket_id;
406 	adapter->conf_cb = conf_cb;
407 	adapter->conf_arg = conf_arg;
408 	adapter->dma_devs = rte_zmalloc_socket(adapter->mem_name,
409 					       num_dma_dev * sizeof(struct dma_device_info), 0,
410 					       socket_id);
411 	if (adapter->dma_devs == NULL) {
412 		RTE_EDEV_LOG_ERR("Failed to get memory for DMA devices");
413 		edma_circular_buffer_free(&adapter->ebuf);
414 		rte_free(adapter);
415 		return -ENOMEM;
416 	}
417 
418 	rte_spinlock_init(&adapter->lock);
419 	for (i = 0; i < num_dma_dev; i++) {
420 		ret = rte_dma_info_get(i, &info);
421 		if (ret) {
422 			RTE_EDEV_LOG_ERR("Failed to get dma device info");
423 			edma_circular_buffer_free(&adapter->ebuf);
424 			rte_free(adapter);
425 			return ret;
426 		}
427 
428 		adapter->dma_devs[i].num_dma_dev_vchan = info.nb_vchans;
429 	}
430 
431 	event_dma_adapter[id] = adapter;
432 
433 	return 0;
434 }
435 
436 int
rte_event_dma_adapter_create(uint8_t id,uint8_t evdev_id,struct rte_event_port_conf * port_config,enum rte_event_dma_adapter_mode mode)437 rte_event_dma_adapter_create(uint8_t id, uint8_t evdev_id, struct rte_event_port_conf *port_config,
438 			    enum rte_event_dma_adapter_mode mode)
439 {
440 	struct rte_event_port_conf *pc;
441 	int ret;
442 
443 	if (port_config == NULL)
444 		return -EINVAL;
445 
446 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
447 
448 	pc = rte_malloc(NULL, sizeof(struct rte_event_port_conf), 0);
449 	if (pc == NULL)
450 		return -ENOMEM;
451 
452 	rte_memcpy(pc, port_config, sizeof(struct rte_event_port_conf));
453 	ret = rte_event_dma_adapter_create_ext(id, evdev_id, edma_default_config_cb, mode, pc);
454 	if (ret != 0)
455 		rte_free(pc);
456 
457 	return ret;
458 }
459 
460 int
rte_event_dma_adapter_free(uint8_t id)461 rte_event_dma_adapter_free(uint8_t id)
462 {
463 	struct event_dma_adapter *adapter;
464 
465 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
466 
467 	adapter = edma_id_to_adapter(id);
468 	if (adapter == NULL)
469 		return -EINVAL;
470 
471 	rte_free(adapter->conf_arg);
472 	rte_free(adapter->dma_devs);
473 	edma_circular_buffer_free(&adapter->ebuf);
474 	rte_free(adapter);
475 	event_dma_adapter[id] = NULL;
476 
477 	return 0;
478 }
479 
480 int
rte_event_dma_adapter_event_port_get(uint8_t id,uint8_t * event_port_id)481 rte_event_dma_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
482 {
483 	struct event_dma_adapter *adapter;
484 
485 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
486 
487 	adapter = edma_id_to_adapter(id);
488 	if (adapter == NULL || event_port_id == NULL)
489 		return -EINVAL;
490 
491 	*event_port_id = adapter->event_port_id;
492 
493 	return 0;
494 }
495 
496 static inline unsigned int
edma_enq_to_dma_dev(struct event_dma_adapter * adapter,struct rte_event * ev,unsigned int cnt)497 edma_enq_to_dma_dev(struct event_dma_adapter *adapter, struct rte_event *ev, unsigned int cnt)
498 {
499 	struct rte_event_dma_adapter_stats *stats = &adapter->dma_stats;
500 	struct dma_vchan_info *vchan_qinfo = NULL;
501 	struct rte_event_dma_adapter_op *dma_op;
502 	uint16_t vchan, nb_enqueued = 0;
503 	int16_t dma_dev_id;
504 	unsigned int i, n;
505 	int ret;
506 
507 	ret = 0;
508 	n = 0;
509 	stats->event_deq_count += cnt;
510 
511 	for (i = 0; i < cnt; i++) {
512 		dma_op = ev[i].event_ptr;
513 		if (dma_op == NULL)
514 			continue;
515 
516 		dma_op->impl_opaque[0] = ev[i].event;
517 		dma_dev_id = dma_op->dma_dev_id;
518 		vchan = dma_op->vchan;
519 		vchan_qinfo = &adapter->dma_devs[dma_dev_id].vchanq[vchan];
520 		if (!vchan_qinfo->vq_enabled) {
521 			if (dma_op != NULL && dma_op->op_mp != NULL)
522 				rte_mempool_put(dma_op->op_mp, dma_op);
523 			continue;
524 		}
525 		edma_circular_buffer_add(&vchan_qinfo->dma_buf, dma_op);
526 
527 		if (edma_circular_buffer_batch_ready(&vchan_qinfo->dma_buf)) {
528 			ret = edma_circular_buffer_flush_to_dma_dev(adapter, &vchan_qinfo->dma_buf,
529 								    dma_dev_id, vchan,
530 								    &nb_enqueued);
531 			stats->dma_enq_count += nb_enqueued;
532 			n += nb_enqueued;
533 
534 			/**
535 			 * If some dma ops failed to flush to dma_dev and
536 			 * space for another batch is not available, stop
537 			 * dequeue from eventdev momentarily
538 			 */
539 			if (unlikely(ret < 0 &&
540 				     !edma_circular_buffer_space_for_batch(&vchan_qinfo->dma_buf)))
541 				adapter->stop_enq_to_dma_dev = true;
542 		}
543 	}
544 
545 	return n;
546 }
547 
548 static unsigned int
edma_adapter_dev_flush(struct event_dma_adapter * adapter,int16_t dma_dev_id,uint16_t * nb_ops_flushed)549 edma_adapter_dev_flush(struct event_dma_adapter *adapter, int16_t dma_dev_id,
550 		       uint16_t *nb_ops_flushed)
551 {
552 	struct dma_vchan_info *vchan_info;
553 	struct dma_device_info *dev_info;
554 	uint16_t nb = 0, nb_enqueued = 0;
555 	uint16_t vchan, nb_vchans;
556 
557 	dev_info = &adapter->dma_devs[dma_dev_id];
558 	nb_vchans = dev_info->num_vchanq;
559 
560 	for (vchan = 0; vchan < nb_vchans; vchan++) {
561 
562 		vchan_info = &dev_info->vchanq[vchan];
563 		if (unlikely(vchan_info == NULL || !vchan_info->vq_enabled))
564 			continue;
565 
566 		edma_circular_buffer_flush_to_dma_dev(adapter, &vchan_info->dma_buf, dma_dev_id,
567 						      vchan, &nb_enqueued);
568 		*nb_ops_flushed += vchan_info->dma_buf.count;
569 		nb += nb_enqueued;
570 	}
571 
572 	return nb;
573 }
574 
575 static unsigned int
edma_adapter_enq_flush(struct event_dma_adapter * adapter)576 edma_adapter_enq_flush(struct event_dma_adapter *adapter)
577 {
578 	struct rte_event_dma_adapter_stats *stats = &adapter->dma_stats;
579 	int16_t dma_dev_id;
580 	uint16_t nb_enqueued = 0;
581 	uint16_t nb_ops_flushed = 0;
582 	uint16_t num_dma_dev = rte_dma_count_avail();
583 
584 	for (dma_dev_id = 0; dma_dev_id < num_dma_dev; dma_dev_id++)
585 		nb_enqueued += edma_adapter_dev_flush(adapter, dma_dev_id, &nb_ops_flushed);
586 	/**
587 	 * Enable dequeue from eventdev if all ops from circular
588 	 * buffer flushed to dma_dev
589 	 */
590 	if (!nb_ops_flushed)
591 		adapter->stop_enq_to_dma_dev = false;
592 
593 	stats->dma_enq_count += nb_enqueued;
594 
595 	return nb_enqueued;
596 }
597 
598 /* Flush an instance's enqueue buffers every DMA_ENQ_FLUSH_THRESHOLD
599  * iterations of edma_adapter_enq_run()
600  */
601 #define DMA_ENQ_FLUSH_THRESHOLD 1024
602 
603 static int
edma_adapter_enq_run(struct event_dma_adapter * adapter,unsigned int max_enq)604 edma_adapter_enq_run(struct event_dma_adapter *adapter, unsigned int max_enq)
605 {
606 	struct rte_event_dma_adapter_stats *stats = &adapter->dma_stats;
607 	uint8_t event_port_id = adapter->event_port_id;
608 	uint8_t event_dev_id = adapter->eventdev_id;
609 	struct rte_event ev[DMA_BATCH_SIZE];
610 	unsigned int nb_enq, nb_enqueued;
611 	uint16_t n;
612 
613 	if (adapter->mode == RTE_EVENT_DMA_ADAPTER_OP_NEW)
614 		return 0;
615 
616 	nb_enqueued = 0;
617 	for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
618 
619 		if (unlikely(adapter->stop_enq_to_dma_dev)) {
620 			nb_enqueued += edma_adapter_enq_flush(adapter);
621 
622 			if (unlikely(adapter->stop_enq_to_dma_dev))
623 				break;
624 		}
625 
626 		stats->event_poll_count++;
627 		n = rte_event_dequeue_burst(event_dev_id, event_port_id, ev, DMA_BATCH_SIZE, 0);
628 
629 		if (!n)
630 			break;
631 
632 		nb_enqueued += edma_enq_to_dma_dev(adapter, ev, n);
633 	}
634 
635 	if ((++adapter->transmit_loop_count & (DMA_ENQ_FLUSH_THRESHOLD - 1)) == 0)
636 		nb_enqueued += edma_adapter_enq_flush(adapter);
637 
638 	return nb_enqueued;
639 }
640 
641 #define DMA_ADAPTER_MAX_EV_ENQ_RETRIES 100
642 
643 static inline uint16_t
edma_ops_enqueue_burst(struct event_dma_adapter * adapter,struct rte_event_dma_adapter_op ** ops,uint16_t num)644 edma_ops_enqueue_burst(struct event_dma_adapter *adapter, struct rte_event_dma_adapter_op **ops,
645 		       uint16_t num)
646 {
647 	struct rte_event_dma_adapter_stats *stats = &adapter->dma_stats;
648 	uint8_t event_port_id = adapter->event_port_id;
649 	uint8_t event_dev_id = adapter->eventdev_id;
650 	struct rte_event events[DMA_BATCH_SIZE];
651 	uint16_t nb_enqueued, nb_ev;
652 	uint8_t retry;
653 	uint8_t i;
654 
655 	nb_ev = 0;
656 	retry = 0;
657 	nb_enqueued = 0;
658 	num = RTE_MIN(num, DMA_BATCH_SIZE);
659 	for (i = 0; i < num; i++) {
660 		struct rte_event *ev = &events[nb_ev++];
661 
662 		ev->event = ops[i]->impl_opaque[0];
663 		ev->event_ptr = ops[i];
664 		ev->event_type = RTE_EVENT_TYPE_DMADEV;
665 		if (adapter->implicit_release_disabled)
666 			ev->op = RTE_EVENT_OP_FORWARD;
667 		else
668 			ev->op = RTE_EVENT_OP_NEW;
669 		ev->event = ops[i]->event_meta;
670 	}
671 
672 	do {
673 		nb_enqueued += rte_event_enqueue_burst(event_dev_id, event_port_id,
674 						       &events[nb_enqueued], nb_ev - nb_enqueued);
675 
676 	} while (retry++ < DMA_ADAPTER_MAX_EV_ENQ_RETRIES && nb_enqueued < nb_ev);
677 
678 	stats->event_enq_fail_count += nb_ev - nb_enqueued;
679 	stats->event_enq_count += nb_enqueued;
680 	stats->event_enq_retry_count += retry - 1;
681 
682 	return nb_enqueued;
683 }
684 
685 static int
edma_circular_buffer_flush_to_evdev(struct event_dma_adapter * adapter,struct dma_ops_circular_buffer * bufp,uint16_t * enqueue_count)686 edma_circular_buffer_flush_to_evdev(struct event_dma_adapter *adapter,
687 				    struct dma_ops_circular_buffer *bufp,
688 				    uint16_t *enqueue_count)
689 {
690 	struct rte_event_dma_adapter_op **ops = bufp->op_buffer;
691 	uint16_t n = 0, nb_ops_flushed;
692 	uint16_t *head = &bufp->head;
693 	uint16_t *tail = &bufp->tail;
694 
695 	if (*tail > *head)
696 		n = *tail - *head;
697 	else if (*tail < *head)
698 		n = bufp->size - *head;
699 	else {
700 		if (enqueue_count)
701 			*enqueue_count = 0;
702 		return 0; /* buffer empty */
703 	}
704 
705 	if (enqueue_count && n > *enqueue_count)
706 		n = *enqueue_count;
707 
708 	nb_ops_flushed = edma_ops_enqueue_burst(adapter, &ops[*head], n);
709 	if (enqueue_count)
710 		*enqueue_count = nb_ops_flushed;
711 
712 	bufp->count -= nb_ops_flushed;
713 	if (!bufp->count) {
714 		*head = 0;
715 		*tail = 0;
716 		return 0; /* buffer empty */
717 	}
718 
719 	*head = (*head + nb_ops_flushed) % bufp->size;
720 	return 1;
721 }
722 
723 static void
edma_ops_buffer_flush(struct event_dma_adapter * adapter)724 edma_ops_buffer_flush(struct event_dma_adapter *adapter)
725 {
726 	if (likely(adapter->ebuf.count == 0))
727 		return;
728 
729 	while (edma_circular_buffer_flush_to_evdev(adapter, &adapter->ebuf, NULL))
730 		;
731 }
732 
733 static inline unsigned int
edma_adapter_deq_run(struct event_dma_adapter * adapter,unsigned int max_deq)734 edma_adapter_deq_run(struct event_dma_adapter *adapter, unsigned int max_deq)
735 {
736 	struct rte_event_dma_adapter_stats *stats = &adapter->dma_stats;
737 	struct dma_vchan_info *vchan_info;
738 	struct dma_ops_circular_buffer *tq_buf;
739 	struct rte_event_dma_adapter_op *ops;
740 	uint16_t n, nb_deq, nb_enqueued, i;
741 	struct dma_device_info *dev_info;
742 	uint16_t vchan, num_vchan;
743 	uint16_t num_dma_dev;
744 	int16_t dma_dev_id;
745 	uint16_t index;
746 	bool done;
747 	bool err;
748 
749 	nb_deq = 0;
750 	edma_ops_buffer_flush(adapter);
751 
752 	num_dma_dev = rte_dma_count_avail();
753 	do {
754 		done = true;
755 
756 		for (dma_dev_id = adapter->next_dmadev_id; dma_dev_id < num_dma_dev; dma_dev_id++) {
757 			uint16_t queues = 0;
758 			dev_info = &adapter->dma_devs[dma_dev_id];
759 			num_vchan = dev_info->num_vchanq;
760 
761 			for (vchan = dev_info->next_vchan_id; queues < num_vchan;
762 			     vchan = (vchan + 1) % num_vchan, queues++) {
763 
764 				vchan_info = &dev_info->vchanq[vchan];
765 				if (unlikely(vchan_info == NULL || !vchan_info->vq_enabled))
766 					continue;
767 
768 				n = rte_dma_completed(dma_dev_id, vchan, DMA_BATCH_SIZE,
769 						&index, &err);
770 				if (!n)
771 					continue;
772 
773 				done = false;
774 				stats->dma_deq_count += n;
775 
776 				tq_buf = &dev_info->tqmap[vchan].dma_buf;
777 
778 				nb_enqueued = n;
779 				if (unlikely(!adapter->ebuf.count))
780 					edma_circular_buffer_flush_to_evdev(adapter, tq_buf,
781 									    &nb_enqueued);
782 
783 				if (likely(nb_enqueued == n))
784 					goto check;
785 
786 				/* Failed to enqueue events case */
787 				for (i = nb_enqueued; i < n; i++) {
788 					ops = tq_buf->op_buffer[tq_buf->head];
789 					edma_circular_buffer_add(&adapter->ebuf, ops);
790 					tq_buf->head = (tq_buf->head + 1) % tq_buf->size;
791 				}
792 
793 check:
794 				nb_deq += n;
795 				if (nb_deq >= max_deq) {
796 					if ((vchan + 1) == num_vchan)
797 						adapter->next_dmadev_id =
798 								(dma_dev_id + 1) % num_dma_dev;
799 
800 					dev_info->next_vchan_id = (vchan + 1) % num_vchan;
801 
802 					return nb_deq;
803 				}
804 			}
805 		}
806 		adapter->next_dmadev_id = 0;
807 
808 	} while (done == false);
809 
810 	return nb_deq;
811 }
812 
813 static int
edma_adapter_run(struct event_dma_adapter * adapter,unsigned int max_ops)814 edma_adapter_run(struct event_dma_adapter *adapter, unsigned int max_ops)
815 {
816 	unsigned int ops_left = max_ops;
817 
818 	while (ops_left > 0) {
819 		unsigned int e_cnt, d_cnt;
820 
821 		e_cnt = edma_adapter_deq_run(adapter, ops_left);
822 		ops_left -= RTE_MIN(ops_left, e_cnt);
823 
824 		d_cnt = edma_adapter_enq_run(adapter, ops_left);
825 		ops_left -= RTE_MIN(ops_left, d_cnt);
826 
827 		if (e_cnt == 0 && d_cnt == 0)
828 			break;
829 	}
830 
831 	if (ops_left == max_ops) {
832 		rte_event_maintain(adapter->eventdev_id, adapter->event_port_id, 0);
833 		return -EAGAIN;
834 	} else
835 		return 0;
836 }
837 
838 static int
edma_service_func(void * args)839 edma_service_func(void *args)
840 {
841 	struct event_dma_adapter *adapter = args;
842 	int ret;
843 
844 	if (rte_spinlock_trylock(&adapter->lock) == 0)
845 		return 0;
846 	ret = edma_adapter_run(adapter, adapter->max_nb);
847 	rte_spinlock_unlock(&adapter->lock);
848 
849 	return ret;
850 }
851 
852 static int
edma_init_service(struct event_dma_adapter * adapter,uint8_t id)853 edma_init_service(struct event_dma_adapter *adapter, uint8_t id)
854 {
855 	struct rte_event_dma_adapter_conf adapter_conf;
856 	struct rte_service_spec service;
857 	uint32_t impl_rel;
858 	int ret;
859 
860 	if (adapter->service_initialized)
861 		return 0;
862 
863 	memset(&service, 0, sizeof(service));
864 	snprintf(service.name, DMA_ADAPTER_NAME_LEN, "rte_event_dma_adapter_%d", id);
865 	service.socket_id = adapter->socket_id;
866 	service.callback = edma_service_func;
867 	service.callback_userdata = adapter;
868 
869 	/* Service function handles locking for queue add/del updates */
870 	service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
871 	ret = rte_service_component_register(&service, &adapter->service_id);
872 	if (ret) {
873 		RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32, service.name, ret);
874 		return ret;
875 	}
876 
877 	ret = adapter->conf_cb(id, adapter->eventdev_id, &adapter_conf, adapter->conf_arg);
878 	if (ret) {
879 		RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32, ret);
880 		return ret;
881 	}
882 
883 	adapter->max_nb = adapter_conf.max_nb;
884 	adapter->event_port_id = adapter_conf.event_port_id;
885 
886 	if (rte_event_port_attr_get(adapter->eventdev_id, adapter->event_port_id,
887 				    RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE, &impl_rel)) {
888 		RTE_EDEV_LOG_ERR("Failed to get port info for eventdev %" PRId32,
889 				 adapter->eventdev_id);
890 		edma_circular_buffer_free(&adapter->ebuf);
891 		rte_free(adapter);
892 		return -EINVAL;
893 	}
894 
895 	adapter->implicit_release_disabled = (uint8_t)impl_rel;
896 	adapter->service_initialized = 1;
897 
898 	return ret;
899 }
900 
901 static void
edma_update_vchanq_info(struct event_dma_adapter * adapter,struct dma_device_info * dev_info,uint16_t vchan,uint8_t add)902 edma_update_vchanq_info(struct event_dma_adapter *adapter, struct dma_device_info *dev_info,
903 			uint16_t vchan, uint8_t add)
904 {
905 	struct dma_vchan_info *vchan_info;
906 	struct dma_vchan_info *tqmap_info;
907 	int enabled;
908 	uint16_t i;
909 
910 	if (dev_info->vchanq == NULL)
911 		return;
912 
913 	if (vchan == RTE_DMA_ALL_VCHAN) {
914 		for (i = 0; i < dev_info->num_dma_dev_vchan; i++)
915 			edma_update_vchanq_info(adapter, dev_info, i, add);
916 	} else {
917 		tqmap_info = &dev_info->tqmap[vchan];
918 		vchan_info = &dev_info->vchanq[vchan];
919 		enabled = vchan_info->vq_enabled;
920 		if (add) {
921 			adapter->nb_vchanq += !enabled;
922 			dev_info->num_vchanq += !enabled;
923 		} else {
924 			adapter->nb_vchanq -= enabled;
925 			dev_info->num_vchanq -= enabled;
926 		}
927 		vchan_info->vq_enabled = !!add;
928 		tqmap_info->vq_enabled = !!add;
929 	}
930 }
931 
932 static int
edma_add_vchan(struct event_dma_adapter * adapter,int16_t dma_dev_id,uint16_t vchan)933 edma_add_vchan(struct event_dma_adapter *adapter, int16_t dma_dev_id, uint16_t vchan)
934 {
935 	struct dma_device_info *dev_info = &adapter->dma_devs[dma_dev_id];
936 	struct dma_vchan_info *vchanq;
937 	struct dma_vchan_info *tqmap;
938 	uint16_t nb_vchans;
939 	uint32_t i;
940 
941 	if (dev_info->vchanq == NULL) {
942 		nb_vchans = dev_info->num_dma_dev_vchan;
943 
944 		dev_info->vchanq = rte_zmalloc_socket(adapter->mem_name,
945 				nb_vchans * sizeof(struct dma_vchan_info),
946 				0, adapter->socket_id);
947 		if (dev_info->vchanq == NULL)
948 			return -ENOMEM;
949 
950 		dev_info->tqmap = rte_zmalloc_socket(adapter->mem_name,
951 				nb_vchans * sizeof(struct dma_vchan_info),
952 				0, adapter->socket_id);
953 		if (dev_info->tqmap == NULL)
954 			return -ENOMEM;
955 
956 		for (i = 0; i < nb_vchans; i++) {
957 			vchanq = &dev_info->vchanq[i];
958 
959 			if (edma_circular_buffer_init("dma_dev_circular_buffer", &vchanq->dma_buf,
960 						DMA_ADAPTER_OPS_BUFFER_SIZE)) {
961 				RTE_EDEV_LOG_ERR("Failed to get memory for dma_dev buffer");
962 				rte_free(vchanq);
963 				return -ENOMEM;
964 			}
965 
966 			tqmap = &dev_info->tqmap[i];
967 			if (edma_circular_buffer_init("dma_dev_circular_trans_buf", &tqmap->dma_buf,
968 						DMA_ADAPTER_OPS_BUFFER_SIZE)) {
969 				RTE_EDEV_LOG_ERR(
970 					"Failed to get memory for dma_dev transaction buffer");
971 				rte_free(tqmap);
972 				return -ENOMEM;
973 			}
974 		}
975 	}
976 
977 	if (vchan == RTE_DMA_ALL_VCHAN) {
978 		for (i = 0; i < dev_info->num_dma_dev_vchan; i++)
979 			edma_update_vchanq_info(adapter, dev_info, i, 1);
980 	} else
981 		edma_update_vchanq_info(adapter, dev_info, vchan, 1);
982 
983 	return 0;
984 }
985 
986 int
rte_event_dma_adapter_vchan_add(uint8_t id,int16_t dma_dev_id,uint16_t vchan,const struct rte_event * event)987 rte_event_dma_adapter_vchan_add(uint8_t id, int16_t dma_dev_id, uint16_t vchan,
988 				const struct rte_event *event)
989 {
990 	struct event_dma_adapter *adapter;
991 	struct dma_device_info *dev_info;
992 	struct rte_eventdev *dev;
993 	uint32_t cap;
994 	int ret;
995 
996 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
997 
998 	if (!rte_dma_is_valid(dma_dev_id)) {
999 		RTE_EDEV_LOG_ERR("Invalid dma_dev_id = %" PRIu8, dma_dev_id);
1000 		return -EINVAL;
1001 	}
1002 
1003 	adapter = edma_id_to_adapter(id);
1004 	if (adapter == NULL)
1005 		return -EINVAL;
1006 
1007 	dev = &rte_eventdevs[adapter->eventdev_id];
1008 	ret = rte_event_dma_adapter_caps_get(adapter->eventdev_id, dma_dev_id, &cap);
1009 	if (ret) {
1010 		RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %u dma_dev %u", id, dma_dev_id);
1011 		return ret;
1012 	}
1013 
1014 	if ((cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) && (event == NULL)) {
1015 		RTE_EDEV_LOG_ERR("Event can not be NULL for dma_dev_id = %u", dma_dev_id);
1016 		return -EINVAL;
1017 	}
1018 
1019 	dev_info = &adapter->dma_devs[dma_dev_id];
1020 	if (vchan != RTE_DMA_ALL_VCHAN && vchan >= dev_info->num_dma_dev_vchan) {
1021 		RTE_EDEV_LOG_ERR("Invalid vhcan %u", vchan);
1022 		return -EINVAL;
1023 	}
1024 
1025 	/* In case HW cap is RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD, no
1026 	 * need of service core as HW supports event forward capability.
1027 	 */
1028 	if ((cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1029 	    (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND &&
1030 	     adapter->mode == RTE_EVENT_DMA_ADAPTER_OP_NEW) ||
1031 	    (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1032 	     adapter->mode == RTE_EVENT_DMA_ADAPTER_OP_NEW)) {
1033 		if (*dev->dev_ops->dma_adapter_vchan_add == NULL)
1034 			return -ENOTSUP;
1035 		if (dev_info->vchanq == NULL) {
1036 			dev_info->vchanq = rte_zmalloc_socket(adapter->mem_name,
1037 							dev_info->num_dma_dev_vchan *
1038 							sizeof(struct dma_vchan_info),
1039 							0, adapter->socket_id);
1040 			if (dev_info->vchanq == NULL) {
1041 				RTE_EDEV_LOG_ERR("Queue pair add not supported");
1042 				return -ENOMEM;
1043 			}
1044 		}
1045 
1046 		if (dev_info->tqmap == NULL) {
1047 			dev_info->tqmap = rte_zmalloc_socket(adapter->mem_name,
1048 						dev_info->num_dma_dev_vchan *
1049 						sizeof(struct dma_vchan_info),
1050 						0, adapter->socket_id);
1051 			if (dev_info->tqmap == NULL) {
1052 				RTE_EDEV_LOG_ERR("tq pair add not supported");
1053 				return -ENOMEM;
1054 			}
1055 		}
1056 
1057 		ret = (*dev->dev_ops->dma_adapter_vchan_add)(dev, dma_dev_id, vchan, event);
1058 		if (ret)
1059 			return ret;
1060 
1061 		else
1062 			edma_update_vchanq_info(adapter, &adapter->dma_devs[dma_dev_id], vchan, 1);
1063 	}
1064 
1065 	/* In case HW cap is RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW, or SW adapter, initiate
1066 	 * services so the application can choose which ever way it wants to use the adapter.
1067 	 *
1068 	 * Case 1: RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW. Application may wants to use one
1069 	 * of below two modes
1070 	 *
1071 	 * a. OP_FORWARD mode -> HW Dequeue + SW enqueue
1072 	 * b. OP_NEW mode -> HW Dequeue
1073 	 *
1074 	 * Case 2: No HW caps, use SW adapter
1075 	 *
1076 	 * a. OP_FORWARD mode -> SW enqueue & dequeue
1077 	 * b. OP_NEW mode -> SW Dequeue
1078 	 */
1079 	if ((cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1080 	     !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1081 	     adapter->mode == RTE_EVENT_DMA_ADAPTER_OP_FORWARD) ||
1082 	    (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
1083 	     !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1084 	     !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND))) {
1085 		rte_spinlock_lock(&adapter->lock);
1086 		ret = edma_init_service(adapter, id);
1087 		if (ret == 0)
1088 			ret = edma_add_vchan(adapter, dma_dev_id, vchan);
1089 		rte_spinlock_unlock(&adapter->lock);
1090 
1091 		if (ret)
1092 			return ret;
1093 
1094 		rte_service_component_runstate_set(adapter->service_id, 1);
1095 	}
1096 
1097 	return 0;
1098 }
1099 
1100 int
rte_event_dma_adapter_vchan_del(uint8_t id,int16_t dma_dev_id,uint16_t vchan)1101 rte_event_dma_adapter_vchan_del(uint8_t id, int16_t dma_dev_id, uint16_t vchan)
1102 {
1103 	struct event_dma_adapter *adapter;
1104 	struct dma_device_info *dev_info;
1105 	struct rte_eventdev *dev;
1106 	uint32_t cap;
1107 	int ret;
1108 
1109 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1110 
1111 	if (!rte_dma_is_valid(dma_dev_id)) {
1112 		RTE_EDEV_LOG_ERR("Invalid dma_dev_id = %" PRIu8, dma_dev_id);
1113 		return -EINVAL;
1114 	}
1115 
1116 	adapter = edma_id_to_adapter(id);
1117 	if (adapter == NULL)
1118 		return -EINVAL;
1119 
1120 	dev = &rte_eventdevs[adapter->eventdev_id];
1121 	ret = rte_event_dma_adapter_caps_get(adapter->eventdev_id, dma_dev_id, &cap);
1122 	if (ret)
1123 		return ret;
1124 
1125 	dev_info = &adapter->dma_devs[dma_dev_id];
1126 
1127 	if (vchan != RTE_DMA_ALL_VCHAN && vchan >= dev_info->num_dma_dev_vchan) {
1128 		RTE_EDEV_LOG_ERR("Invalid vhcan %" PRIu16, vchan);
1129 		return -EINVAL;
1130 	}
1131 
1132 	if ((cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1133 	    (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1134 	     adapter->mode == RTE_EVENT_DMA_ADAPTER_OP_NEW)) {
1135 		if (*dev->dev_ops->dma_adapter_vchan_del == NULL)
1136 			return -ENOTSUP;
1137 		ret = (*dev->dev_ops->dma_adapter_vchan_del)(dev, dma_dev_id, vchan);
1138 		if (ret == 0) {
1139 			edma_update_vchanq_info(adapter, dev_info, vchan, 0);
1140 			if (dev_info->num_vchanq == 0) {
1141 				rte_free(dev_info->vchanq);
1142 				dev_info->vchanq = NULL;
1143 			}
1144 		}
1145 	} else {
1146 		if (adapter->nb_vchanq == 0)
1147 			return 0;
1148 
1149 		rte_spinlock_lock(&adapter->lock);
1150 		edma_update_vchanq_info(adapter, dev_info, vchan, 0);
1151 
1152 		if (dev_info->num_vchanq == 0) {
1153 			rte_free(dev_info->vchanq);
1154 			rte_free(dev_info->tqmap);
1155 			dev_info->vchanq = NULL;
1156 			dev_info->tqmap = NULL;
1157 		}
1158 
1159 		rte_spinlock_unlock(&adapter->lock);
1160 		rte_service_component_runstate_set(adapter->service_id, adapter->nb_vchanq);
1161 	}
1162 
1163 	return ret;
1164 }
1165 
1166 int
rte_event_dma_adapter_service_id_get(uint8_t id,uint32_t * service_id)1167 rte_event_dma_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1168 {
1169 	struct event_dma_adapter *adapter;
1170 
1171 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1172 
1173 	adapter = edma_id_to_adapter(id);
1174 	if (adapter == NULL || service_id == NULL)
1175 		return -EINVAL;
1176 
1177 	if (adapter->service_initialized)
1178 		*service_id = adapter->service_id;
1179 
1180 	return adapter->service_initialized ? 0 : -ESRCH;
1181 }
1182 
1183 static int
edma_adapter_ctrl(uint8_t id,int start)1184 edma_adapter_ctrl(uint8_t id, int start)
1185 {
1186 	struct event_dma_adapter *adapter;
1187 	struct dma_device_info *dev_info;
1188 	struct rte_eventdev *dev;
1189 	uint16_t num_dma_dev;
1190 	int stop = !start;
1191 	int use_service;
1192 	uint32_t i;
1193 
1194 	use_service = 0;
1195 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1196 	adapter = edma_id_to_adapter(id);
1197 	if (adapter == NULL)
1198 		return -EINVAL;
1199 
1200 	num_dma_dev = rte_dma_count_avail();
1201 	dev = &rte_eventdevs[adapter->eventdev_id];
1202 
1203 	for (i = 0; i < num_dma_dev; i++) {
1204 		dev_info = &adapter->dma_devs[i];
1205 		/* start check for num queue pairs */
1206 		if (start && !dev_info->num_vchanq)
1207 			continue;
1208 		/* stop check if dev has been started */
1209 		if (stop && !dev_info->dev_started)
1210 			continue;
1211 		use_service |= !dev_info->internal_event_port;
1212 		dev_info->dev_started = start;
1213 		if (dev_info->internal_event_port == 0)
1214 			continue;
1215 		start ? (*dev->dev_ops->dma_adapter_start)(dev, i) :
1216 			(*dev->dev_ops->dma_adapter_stop)(dev, i);
1217 	}
1218 
1219 	if (use_service)
1220 		rte_service_runstate_set(adapter->service_id, start);
1221 
1222 	return 0;
1223 }
1224 
1225 int
rte_event_dma_adapter_start(uint8_t id)1226 rte_event_dma_adapter_start(uint8_t id)
1227 {
1228 	struct event_dma_adapter *adapter;
1229 
1230 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1231 
1232 	adapter = edma_id_to_adapter(id);
1233 	if (adapter == NULL)
1234 		return -EINVAL;
1235 
1236 	return edma_adapter_ctrl(id, 1);
1237 }
1238 
1239 int
rte_event_dma_adapter_stop(uint8_t id)1240 rte_event_dma_adapter_stop(uint8_t id)
1241 {
1242 	return edma_adapter_ctrl(id, 0);
1243 }
1244 
1245 #define DEFAULT_MAX_NB 128
1246 
1247 int
rte_event_dma_adapter_runtime_params_init(struct rte_event_dma_adapter_runtime_params * params)1248 rte_event_dma_adapter_runtime_params_init(struct rte_event_dma_adapter_runtime_params *params)
1249 {
1250 	if (params == NULL)
1251 		return -EINVAL;
1252 
1253 	memset(params, 0, sizeof(*params));
1254 	params->max_nb = DEFAULT_MAX_NB;
1255 
1256 	return 0;
1257 }
1258 
1259 static int
dma_adapter_cap_check(struct event_dma_adapter * adapter)1260 dma_adapter_cap_check(struct event_dma_adapter *adapter)
1261 {
1262 	uint32_t caps;
1263 	int ret;
1264 
1265 	if (!adapter->nb_vchanq)
1266 		return -EINVAL;
1267 
1268 	ret = rte_event_dma_adapter_caps_get(adapter->eventdev_id, adapter->next_dmadev_id, &caps);
1269 	if (ret) {
1270 		RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8 " cdev %" PRIu8,
1271 				 adapter->eventdev_id, adapter->next_dmadev_id);
1272 		return ret;
1273 	}
1274 
1275 	if ((caps & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1276 	    (caps & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW))
1277 		return -ENOTSUP;
1278 
1279 	return 0;
1280 }
1281 
1282 int
rte_event_dma_adapter_runtime_params_set(uint8_t id,struct rte_event_dma_adapter_runtime_params * params)1283 rte_event_dma_adapter_runtime_params_set(uint8_t id,
1284 					 struct rte_event_dma_adapter_runtime_params *params)
1285 {
1286 	struct event_dma_adapter *adapter;
1287 	int ret;
1288 
1289 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1290 
1291 	if (params == NULL) {
1292 		RTE_EDEV_LOG_ERR("params pointer is NULL");
1293 		return -EINVAL;
1294 	}
1295 
1296 	adapter = edma_id_to_adapter(id);
1297 	if (adapter == NULL)
1298 		return -EINVAL;
1299 
1300 	ret = dma_adapter_cap_check(adapter);
1301 	if (ret)
1302 		return ret;
1303 
1304 	rte_spinlock_lock(&adapter->lock);
1305 	adapter->max_nb = params->max_nb;
1306 	rte_spinlock_unlock(&adapter->lock);
1307 
1308 	return 0;
1309 }
1310 
1311 int
rte_event_dma_adapter_runtime_params_get(uint8_t id,struct rte_event_dma_adapter_runtime_params * params)1312 rte_event_dma_adapter_runtime_params_get(uint8_t id,
1313 					 struct rte_event_dma_adapter_runtime_params *params)
1314 {
1315 	struct event_dma_adapter *adapter;
1316 	int ret;
1317 
1318 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1319 
1320 	if (params == NULL) {
1321 		RTE_EDEV_LOG_ERR("params pointer is NULL");
1322 		return -EINVAL;
1323 	}
1324 
1325 	adapter = edma_id_to_adapter(id);
1326 	if (adapter == NULL)
1327 		return -EINVAL;
1328 
1329 	ret = dma_adapter_cap_check(adapter);
1330 	if (ret)
1331 		return ret;
1332 
1333 	params->max_nb = adapter->max_nb;
1334 
1335 	return 0;
1336 }
1337 
1338 int
rte_event_dma_adapter_stats_get(uint8_t id,struct rte_event_dma_adapter_stats * stats)1339 rte_event_dma_adapter_stats_get(uint8_t id, struct rte_event_dma_adapter_stats *stats)
1340 {
1341 	struct rte_event_dma_adapter_stats dev_stats_sum = {0};
1342 	struct rte_event_dma_adapter_stats dev_stats;
1343 	struct event_dma_adapter *adapter;
1344 	struct dma_device_info *dev_info;
1345 	struct rte_eventdev *dev;
1346 	uint16_t num_dma_dev;
1347 	uint32_t i;
1348 	int ret;
1349 
1350 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1351 
1352 	adapter = edma_id_to_adapter(id);
1353 	if (adapter == NULL || stats == NULL)
1354 		return -EINVAL;
1355 
1356 	num_dma_dev = rte_dma_count_avail();
1357 	dev = &rte_eventdevs[adapter->eventdev_id];
1358 	memset(stats, 0, sizeof(*stats));
1359 	for (i = 0; i < num_dma_dev; i++) {
1360 		dev_info = &adapter->dma_devs[i];
1361 
1362 		if (dev_info->internal_event_port == 0 ||
1363 		    dev->dev_ops->dma_adapter_stats_get == NULL)
1364 			continue;
1365 
1366 		ret = (*dev->dev_ops->dma_adapter_stats_get)(dev, i, &dev_stats);
1367 		if (ret)
1368 			continue;
1369 
1370 		dev_stats_sum.dma_deq_count += dev_stats.dma_deq_count;
1371 		dev_stats_sum.event_enq_count += dev_stats.event_enq_count;
1372 	}
1373 
1374 	if (adapter->service_initialized)
1375 		*stats = adapter->dma_stats;
1376 
1377 	stats->dma_deq_count += dev_stats_sum.dma_deq_count;
1378 	stats->event_enq_count += dev_stats_sum.event_enq_count;
1379 
1380 	return 0;
1381 }
1382 
1383 int
rte_event_dma_adapter_stats_reset(uint8_t id)1384 rte_event_dma_adapter_stats_reset(uint8_t id)
1385 {
1386 	struct event_dma_adapter *adapter;
1387 	struct dma_device_info *dev_info;
1388 	struct rte_eventdev *dev;
1389 	uint16_t num_dma_dev;
1390 	uint32_t i;
1391 
1392 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1393 
1394 	adapter = edma_id_to_adapter(id);
1395 	if (adapter == NULL)
1396 		return -EINVAL;
1397 
1398 	num_dma_dev = rte_dma_count_avail();
1399 	dev = &rte_eventdevs[adapter->eventdev_id];
1400 	for (i = 0; i < num_dma_dev; i++) {
1401 		dev_info = &adapter->dma_devs[i];
1402 
1403 		if (dev_info->internal_event_port == 0 ||
1404 		    dev->dev_ops->dma_adapter_stats_reset == NULL)
1405 			continue;
1406 
1407 		(*dev->dev_ops->dma_adapter_stats_reset)(dev, i);
1408 	}
1409 
1410 	memset(&adapter->dma_stats, 0, sizeof(adapter->dma_stats));
1411 
1412 	return 0;
1413 }
1414 
1415 uint16_t
rte_event_dma_adapter_enqueue(uint8_t dev_id,uint8_t port_id,struct rte_event ev[],uint16_t nb_events)1416 rte_event_dma_adapter_enqueue(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
1417 			      uint16_t nb_events)
1418 {
1419 	const struct rte_event_fp_ops *fp_ops;
1420 	void *port;
1421 
1422 	fp_ops = &rte_event_fp_ops[dev_id];
1423 	port = fp_ops->data[port_id];
1424 
1425 	return fp_ops->dma_enqueue(port, ev, nb_events);
1426 }
1427