xref: /dpdk/lib/eventdev/rte_event_dma_adapter.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2023 Marvell.
3  */
4 
5 #include <eventdev_pmd.h>
6 #include <rte_service_component.h>
7 
8 #include "rte_event_dma_adapter.h"
9 
10 #define DMA_BATCH_SIZE 32
11 #define DMA_DEFAULT_MAX_NB 128
12 #define DMA_ADAPTER_NAME_LEN 32
13 #define DMA_ADAPTER_BUFFER_SIZE 1024
14 
15 #define DMA_ADAPTER_OPS_BUFFER_SIZE (DMA_BATCH_SIZE + DMA_BATCH_SIZE)
16 
17 #define DMA_ADAPTER_ARRAY "event_dma_adapter_array"
18 
19 /* Macros to check for valid adapter */
20 #define EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) \
21 	do { \
22 		if (!edma_adapter_valid_id(id)) { \
23 			RTE_EDEV_LOG_ERR("Invalid DMA adapter id = %d", id); \
24 			return retval; \
25 		} \
26 	} while (0)
27 
28 /* DMA ops circular buffer */
29 struct dma_ops_circular_buffer {
30 	/* Index of head element */
31 	uint16_t head;
32 
33 	/* Index of tail element */
34 	uint16_t tail;
35 
36 	/* Number of elements in buffer */
37 	uint16_t count;
38 
39 	/* Size of circular buffer */
40 	uint16_t size;
41 
42 	/* Pointer to hold rte_event_dma_adapter_op for processing */
43 	struct rte_event_dma_adapter_op **op_buffer;
44 } __rte_cache_aligned;
45 
46 /* Vchan information */
47 struct dma_vchan_info {
48 	/* Set to indicate vchan queue is enabled */
49 	bool vq_enabled;
50 
51 	/* Circular buffer for batching DMA ops to dma_dev */
52 	struct dma_ops_circular_buffer dma_buf;
53 } __rte_cache_aligned;
54 
55 /* DMA device information */
56 struct dma_device_info {
57 	/* Pointer to vchan queue info */
58 	struct dma_vchan_info *vchanq;
59 
60 	/* Pointer to vchan queue info.
61 	 * This holds ops passed by application till the
62 	 * dma completion is done.
63 	 */
64 	struct dma_vchan_info *tqmap;
65 
66 	/* If num_vchanq > 0, the start callback will
67 	 * be invoked if not already invoked
68 	 */
69 	uint16_t num_vchanq;
70 
71 	/* Number of vchans configured for a DMA device. */
72 	uint16_t num_dma_dev_vchan;
73 
74 	/* Next queue pair to be processed */
75 	uint16_t next_vchan_id;
76 
77 	/* Set to indicate processing has been started */
78 	uint8_t dev_started;
79 
80 	/* Set to indicate dmadev->eventdev packet
81 	 * transfer uses a hardware mechanism
82 	 */
83 	uint8_t internal_event_port;
84 } __rte_cache_aligned;
85 
86 struct event_dma_adapter {
87 	/* Event device identifier */
88 	uint8_t eventdev_id;
89 
90 	/* Event port identifier */
91 	uint8_t event_port_id;
92 
93 	/* Adapter mode */
94 	enum rte_event_dma_adapter_mode mode;
95 
96 	/* Memory allocation name */
97 	char mem_name[DMA_ADAPTER_NAME_LEN];
98 
99 	/* Socket identifier cached from eventdev */
100 	int socket_id;
101 
102 	/* Lock to serialize config updates with service function */
103 	rte_spinlock_t lock;
104 
105 	/* Next dma device to be processed */
106 	uint16_t next_dmadev_id;
107 
108 	/* DMA device structure array */
109 	struct dma_device_info *dma_devs;
110 
111 	/* Circular buffer for processing DMA ops to eventdev */
112 	struct dma_ops_circular_buffer ebuf;
113 
114 	/* Configuration callback for rte_service configuration */
115 	rte_event_dma_adapter_conf_cb conf_cb;
116 
117 	/* Configuration callback argument */
118 	void *conf_arg;
119 
120 	/* Set if  default_cb is being used */
121 	int default_cb_arg;
122 
123 	/* No. of vchan queue configured */
124 	uint16_t nb_vchanq;
125 
126 	/* Per adapter EAL service ID */
127 	uint32_t service_id;
128 
129 	/* Service initialization state */
130 	uint8_t service_initialized;
131 
132 	/* Max DMA ops processed in any service function invocation */
133 	uint32_t max_nb;
134 
135 	/* Store event port's implicit release capability */
136 	uint8_t implicit_release_disabled;
137 
138 	/* Flag to indicate backpressure at dma_dev
139 	 * Stop further dequeuing events from eventdev
140 	 */
141 	bool stop_enq_to_dma_dev;
142 
143 	/* Loop counter to flush dma ops */
144 	uint16_t transmit_loop_count;
145 
146 	/* Per instance stats structure */
147 	struct rte_event_dma_adapter_stats dma_stats;
148 } __rte_cache_aligned;
149 
150 static struct event_dma_adapter **event_dma_adapter;
151 
152 static inline int
153 edma_adapter_valid_id(uint8_t id)
154 {
155 	return id < RTE_EVENT_DMA_ADAPTER_MAX_INSTANCE;
156 }
157 
158 static inline struct event_dma_adapter *
159 edma_id_to_adapter(uint8_t id)
160 {
161 	return event_dma_adapter ? event_dma_adapter[id] : NULL;
162 }
163 
164 static int
165 edma_array_init(void)
166 {
167 	const struct rte_memzone *mz;
168 	uint32_t sz;
169 
170 	mz = rte_memzone_lookup(DMA_ADAPTER_ARRAY);
171 	if (mz == NULL) {
172 		sz = sizeof(struct event_dma_adapter *) * RTE_EVENT_DMA_ADAPTER_MAX_INSTANCE;
173 		sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
174 
175 		mz = rte_memzone_reserve_aligned(DMA_ADAPTER_ARRAY, sz, rte_socket_id(), 0,
176 						 RTE_CACHE_LINE_SIZE);
177 		if (mz == NULL) {
178 			RTE_EDEV_LOG_ERR("Failed to reserve memzone : %s, err = %d",
179 					 DMA_ADAPTER_ARRAY, rte_errno);
180 			return -rte_errno;
181 		}
182 	}
183 
184 	event_dma_adapter = mz->addr;
185 
186 	return 0;
187 }
188 
189 static inline bool
190 edma_circular_buffer_batch_ready(struct dma_ops_circular_buffer *bufp)
191 {
192 	return bufp->count >= DMA_BATCH_SIZE;
193 }
194 
195 static inline bool
196 edma_circular_buffer_space_for_batch(struct dma_ops_circular_buffer *bufp)
197 {
198 	return (bufp->size - bufp->count) >= DMA_BATCH_SIZE;
199 }
200 
201 static inline int
202 edma_circular_buffer_init(const char *name, struct dma_ops_circular_buffer *buf, uint16_t sz)
203 {
204 	buf->op_buffer = rte_zmalloc(name, sizeof(struct rte_event_dma_adapter_op *) * sz, 0);
205 	if (buf->op_buffer == NULL)
206 		return -ENOMEM;
207 
208 	buf->size = sz;
209 
210 	return 0;
211 }
212 
213 static inline void
214 edma_circular_buffer_free(struct dma_ops_circular_buffer *buf)
215 {
216 	rte_free(buf->op_buffer);
217 }
218 
219 static inline int
220 edma_circular_buffer_add(struct dma_ops_circular_buffer *bufp, struct rte_event_dma_adapter_op *op)
221 {
222 	uint16_t *tail = &bufp->tail;
223 
224 	bufp->op_buffer[*tail] = op;
225 
226 	/* circular buffer, go round */
227 	*tail = (*tail + 1) % bufp->size;
228 	bufp->count++;
229 
230 	return 0;
231 }
232 
233 static inline int
234 edma_circular_buffer_flush_to_dma_dev(struct event_dma_adapter *adapter,
235 				      struct dma_ops_circular_buffer *bufp, uint8_t dma_dev_id,
236 				      uint16_t vchan, uint16_t *nb_ops_flushed)
237 {
238 	struct rte_event_dma_adapter_op *op;
239 	struct dma_vchan_info *tq;
240 	uint16_t *head = &bufp->head;
241 	uint16_t *tail = &bufp->tail;
242 	uint16_t n;
243 	uint16_t i;
244 	int ret;
245 
246 	if (*tail > *head)
247 		n = *tail - *head;
248 	else if (*tail < *head)
249 		n = bufp->size - *head;
250 	else {
251 		*nb_ops_flushed = 0;
252 		return 0; /* buffer empty */
253 	}
254 
255 	tq = &adapter->dma_devs[dma_dev_id].tqmap[vchan];
256 
257 	for (i = 0; i < n; i++)	{
258 		op = bufp->op_buffer[*head];
259 		if (op->nb_src == 1 && op->nb_dst == 1)
260 			ret = rte_dma_copy(dma_dev_id, vchan, op->src_seg->addr, op->dst_seg->addr,
261 					   op->src_seg->length, op->flags);
262 		else
263 			ret = rte_dma_copy_sg(dma_dev_id, vchan, op->src_seg, op->dst_seg,
264 					      op->nb_src, op->nb_dst, op->flags);
265 		if (ret < 0)
266 			break;
267 
268 		/* Enqueue in transaction queue. */
269 		edma_circular_buffer_add(&tq->dma_buf, op);
270 
271 		*head = (*head + 1) % bufp->size;
272 	}
273 
274 	*nb_ops_flushed = i;
275 	bufp->count -= *nb_ops_flushed;
276 	if (!bufp->count) {
277 		*head = 0;
278 		*tail = 0;
279 	}
280 
281 	return *nb_ops_flushed == n ? 0 : -1;
282 }
283 
284 static int
285 edma_default_config_cb(uint8_t id, uint8_t evdev_id, struct rte_event_dma_adapter_conf *conf,
286 		       void *arg)
287 {
288 	struct rte_event_port_conf *port_conf;
289 	struct rte_event_dev_config dev_conf;
290 	struct event_dma_adapter *adapter;
291 	struct rte_eventdev *dev;
292 	uint8_t port_id;
293 	int started;
294 	int ret;
295 
296 	adapter = edma_id_to_adapter(id);
297 	if (adapter == NULL)
298 		return -EINVAL;
299 
300 	dev = &rte_eventdevs[adapter->eventdev_id];
301 	dev_conf = dev->data->dev_conf;
302 
303 	started = dev->data->dev_started;
304 	if (started)
305 		rte_event_dev_stop(evdev_id);
306 
307 	port_id = dev_conf.nb_event_ports;
308 	dev_conf.nb_event_ports += 1;
309 
310 	port_conf = arg;
311 	if (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_SINGLE_LINK)
312 		dev_conf.nb_single_link_event_port_queues += 1;
313 
314 	ret = rte_event_dev_configure(evdev_id, &dev_conf);
315 	if (ret) {
316 		RTE_EDEV_LOG_ERR("Failed to configure event dev %u", evdev_id);
317 		if (started) {
318 			if (rte_event_dev_start(evdev_id))
319 				return -EIO;
320 		}
321 		return ret;
322 	}
323 
324 	ret = rte_event_port_setup(evdev_id, port_id, port_conf);
325 	if (ret) {
326 		RTE_EDEV_LOG_ERR("Failed to setup event port %u", port_id);
327 		return ret;
328 	}
329 
330 	conf->event_port_id = port_id;
331 	conf->max_nb = DMA_DEFAULT_MAX_NB;
332 	if (started)
333 		ret = rte_event_dev_start(evdev_id);
334 
335 	adapter->default_cb_arg = 1;
336 	adapter->event_port_id = conf->event_port_id;
337 
338 	return ret;
339 }
340 
341 int
342 rte_event_dma_adapter_create_ext(uint8_t id, uint8_t evdev_id,
343 				 rte_event_dma_adapter_conf_cb conf_cb,
344 				 enum rte_event_dma_adapter_mode mode, void *conf_arg)
345 {
346 	struct rte_event_dev_info dev_info;
347 	struct event_dma_adapter *adapter;
348 	char name[DMA_ADAPTER_NAME_LEN];
349 	struct rte_dma_info info;
350 	uint16_t num_dma_dev;
351 	int socket_id;
352 	uint8_t i;
353 	int ret;
354 
355 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
356 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(evdev_id, -EINVAL);
357 
358 	if (conf_cb == NULL)
359 		return -EINVAL;
360 
361 	if (event_dma_adapter == NULL) {
362 		ret = edma_array_init();
363 		if (ret)
364 			return ret;
365 	}
366 
367 	adapter = edma_id_to_adapter(id);
368 	if (adapter != NULL) {
369 		RTE_EDEV_LOG_ERR("ML adapter ID %d already exists!", id);
370 		return -EEXIST;
371 	}
372 
373 	socket_id = rte_event_dev_socket_id(evdev_id);
374 	snprintf(name, DMA_ADAPTER_NAME_LEN, "rte_event_dma_adapter_%d", id);
375 	adapter = rte_zmalloc_socket(name, sizeof(struct event_dma_adapter), RTE_CACHE_LINE_SIZE,
376 				     socket_id);
377 	if (adapter == NULL) {
378 		RTE_EDEV_LOG_ERR("Failed to get mem for event ML adapter!");
379 		return -ENOMEM;
380 	}
381 
382 	if (edma_circular_buffer_init("edma_circular_buffer", &adapter->ebuf,
383 				      DMA_ADAPTER_BUFFER_SIZE)) {
384 		RTE_EDEV_LOG_ERR("Failed to get memory for event adapter circular buffer");
385 		rte_free(adapter);
386 		return -ENOMEM;
387 	}
388 
389 	ret = rte_event_dev_info_get(evdev_id, &dev_info);
390 	if (ret < 0) {
391 		RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s", evdev_id,
392 				 dev_info.driver_name);
393 		edma_circular_buffer_free(&adapter->ebuf);
394 		rte_free(adapter);
395 		return ret;
396 	}
397 
398 	num_dma_dev = rte_dma_count_avail();
399 
400 	adapter->eventdev_id = evdev_id;
401 	adapter->mode = mode;
402 	rte_strscpy(adapter->mem_name, name, DMA_ADAPTER_NAME_LEN);
403 	adapter->socket_id = socket_id;
404 	adapter->conf_cb = conf_cb;
405 	adapter->conf_arg = conf_arg;
406 	adapter->dma_devs = rte_zmalloc_socket(adapter->mem_name,
407 					       num_dma_dev * sizeof(struct dma_device_info), 0,
408 					       socket_id);
409 	if (adapter->dma_devs == NULL) {
410 		RTE_EDEV_LOG_ERR("Failed to get memory for DMA devices");
411 		edma_circular_buffer_free(&adapter->ebuf);
412 		rte_free(adapter);
413 		return -ENOMEM;
414 	}
415 
416 	rte_spinlock_init(&adapter->lock);
417 	for (i = 0; i < num_dma_dev; i++) {
418 		ret = rte_dma_info_get(i, &info);
419 		if (ret) {
420 			RTE_EDEV_LOG_ERR("Failed to get dma device info");
421 			edma_circular_buffer_free(&adapter->ebuf);
422 			rte_free(adapter);
423 			return ret;
424 		}
425 
426 		adapter->dma_devs[i].num_dma_dev_vchan = info.nb_vchans;
427 	}
428 
429 	event_dma_adapter[id] = adapter;
430 
431 	return 0;
432 }
433 
434 int
435 rte_event_dma_adapter_create(uint8_t id, uint8_t evdev_id, struct rte_event_port_conf *port_config,
436 			    enum rte_event_dma_adapter_mode mode)
437 {
438 	struct rte_event_port_conf *pc;
439 	int ret;
440 
441 	if (port_config == NULL)
442 		return -EINVAL;
443 
444 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
445 
446 	pc = rte_malloc(NULL, sizeof(struct rte_event_port_conf), 0);
447 	if (pc == NULL)
448 		return -ENOMEM;
449 
450 	rte_memcpy(pc, port_config, sizeof(struct rte_event_port_conf));
451 	ret = rte_event_dma_adapter_create_ext(id, evdev_id, edma_default_config_cb, mode, pc);
452 	if (ret != 0)
453 		rte_free(pc);
454 
455 	return ret;
456 }
457 
458 int
459 rte_event_dma_adapter_free(uint8_t id)
460 {
461 	struct event_dma_adapter *adapter;
462 
463 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
464 
465 	adapter = edma_id_to_adapter(id);
466 	if (adapter == NULL)
467 		return -EINVAL;
468 
469 	rte_free(adapter->conf_arg);
470 	rte_free(adapter->dma_devs);
471 	edma_circular_buffer_free(&adapter->ebuf);
472 	rte_free(adapter);
473 	event_dma_adapter[id] = NULL;
474 
475 	return 0;
476 }
477 
478 int
479 rte_event_dma_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
480 {
481 	struct event_dma_adapter *adapter;
482 
483 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
484 
485 	adapter = edma_id_to_adapter(id);
486 	if (adapter == NULL || event_port_id == NULL)
487 		return -EINVAL;
488 
489 	*event_port_id = adapter->event_port_id;
490 
491 	return 0;
492 }
493 
494 static inline unsigned int
495 edma_enq_to_dma_dev(struct event_dma_adapter *adapter, struct rte_event *ev, unsigned int cnt)
496 {
497 	struct rte_event_dma_adapter_stats *stats = &adapter->dma_stats;
498 	struct dma_vchan_info *vchan_qinfo = NULL;
499 	struct rte_event_dma_adapter_op *dma_op;
500 	uint16_t vchan, nb_enqueued = 0;
501 	int16_t dma_dev_id;
502 	unsigned int i, n;
503 	int ret;
504 
505 	ret = 0;
506 	n = 0;
507 	stats->event_deq_count += cnt;
508 
509 	for (i = 0; i < cnt; i++) {
510 		dma_op = ev[i].event_ptr;
511 		if (dma_op == NULL)
512 			continue;
513 
514 		/* Expected to have response info appended to dma_op. */
515 
516 		dma_dev_id = dma_op->dma_dev_id;
517 		vchan = dma_op->vchan;
518 		vchan_qinfo = &adapter->dma_devs[dma_dev_id].vchanq[vchan];
519 		if (!vchan_qinfo->vq_enabled) {
520 			if (dma_op != NULL && dma_op->op_mp != NULL)
521 				rte_mempool_put(dma_op->op_mp, dma_op);
522 			continue;
523 		}
524 		edma_circular_buffer_add(&vchan_qinfo->dma_buf, dma_op);
525 
526 		if (edma_circular_buffer_batch_ready(&vchan_qinfo->dma_buf)) {
527 			ret = edma_circular_buffer_flush_to_dma_dev(adapter, &vchan_qinfo->dma_buf,
528 								    dma_dev_id, vchan,
529 								    &nb_enqueued);
530 			stats->dma_enq_count += nb_enqueued;
531 			n += nb_enqueued;
532 
533 			/**
534 			 * If some dma ops failed to flush to dma_dev and
535 			 * space for another batch is not available, stop
536 			 * dequeue from eventdev momentarily
537 			 */
538 			if (unlikely(ret < 0 &&
539 				     !edma_circular_buffer_space_for_batch(&vchan_qinfo->dma_buf)))
540 				adapter->stop_enq_to_dma_dev = true;
541 		}
542 	}
543 
544 	return n;
545 }
546 
547 static unsigned int
548 edma_adapter_dev_flush(struct event_dma_adapter *adapter, int16_t dma_dev_id,
549 		       uint16_t *nb_ops_flushed)
550 {
551 	struct dma_vchan_info *vchan_info;
552 	struct dma_device_info *dev_info;
553 	uint16_t nb = 0, nb_enqueued = 0;
554 	uint16_t vchan, nb_vchans;
555 
556 	dev_info = &adapter->dma_devs[dma_dev_id];
557 	nb_vchans = dev_info->num_vchanq;
558 
559 	for (vchan = 0; vchan < nb_vchans; vchan++) {
560 
561 		vchan_info = &dev_info->vchanq[vchan];
562 		if (unlikely(vchan_info == NULL || !vchan_info->vq_enabled))
563 			continue;
564 
565 		edma_circular_buffer_flush_to_dma_dev(adapter, &vchan_info->dma_buf, dma_dev_id,
566 						      vchan, &nb_enqueued);
567 		*nb_ops_flushed += vchan_info->dma_buf.count;
568 		nb += nb_enqueued;
569 	}
570 
571 	return nb;
572 }
573 
574 static unsigned int
575 edma_adapter_enq_flush(struct event_dma_adapter *adapter)
576 {
577 	struct rte_event_dma_adapter_stats *stats = &adapter->dma_stats;
578 	int16_t dma_dev_id;
579 	uint16_t nb_enqueued = 0;
580 	uint16_t nb_ops_flushed = 0;
581 	uint16_t num_dma_dev = rte_dma_count_avail();
582 
583 	for (dma_dev_id = 0; dma_dev_id < num_dma_dev; dma_dev_id++)
584 		nb_enqueued += edma_adapter_dev_flush(adapter, dma_dev_id, &nb_ops_flushed);
585 	/**
586 	 * Enable dequeue from eventdev if all ops from circular
587 	 * buffer flushed to dma_dev
588 	 */
589 	if (!nb_ops_flushed)
590 		adapter->stop_enq_to_dma_dev = false;
591 
592 	stats->dma_enq_count += nb_enqueued;
593 
594 	return nb_enqueued;
595 }
596 
597 /* Flush an instance's enqueue buffers every DMA_ENQ_FLUSH_THRESHOLD
598  * iterations of edma_adapter_enq_run()
599  */
600 #define DMA_ENQ_FLUSH_THRESHOLD 1024
601 
602 static int
603 edma_adapter_enq_run(struct event_dma_adapter *adapter, unsigned int max_enq)
604 {
605 	struct rte_event_dma_adapter_stats *stats = &adapter->dma_stats;
606 	uint8_t event_port_id = adapter->event_port_id;
607 	uint8_t event_dev_id = adapter->eventdev_id;
608 	struct rte_event ev[DMA_BATCH_SIZE];
609 	unsigned int nb_enq, nb_enqueued;
610 	uint16_t n;
611 
612 	if (adapter->mode == RTE_EVENT_DMA_ADAPTER_OP_NEW)
613 		return 0;
614 
615 	nb_enqueued = 0;
616 	for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
617 
618 		if (unlikely(adapter->stop_enq_to_dma_dev)) {
619 			nb_enqueued += edma_adapter_enq_flush(adapter);
620 
621 			if (unlikely(adapter->stop_enq_to_dma_dev))
622 				break;
623 		}
624 
625 		stats->event_poll_count++;
626 		n = rte_event_dequeue_burst(event_dev_id, event_port_id, ev, DMA_BATCH_SIZE, 0);
627 
628 		if (!n)
629 			break;
630 
631 		nb_enqueued += edma_enq_to_dma_dev(adapter, ev, n);
632 	}
633 
634 	if ((++adapter->transmit_loop_count & (DMA_ENQ_FLUSH_THRESHOLD - 1)) == 0)
635 		nb_enqueued += edma_adapter_enq_flush(adapter);
636 
637 	return nb_enqueued;
638 }
639 
640 #define DMA_ADAPTER_MAX_EV_ENQ_RETRIES 100
641 
642 static inline uint16_t
643 edma_ops_enqueue_burst(struct event_dma_adapter *adapter, struct rte_event_dma_adapter_op **ops,
644 		       uint16_t num)
645 {
646 	struct rte_event_dma_adapter_stats *stats = &adapter->dma_stats;
647 	uint8_t event_port_id = adapter->event_port_id;
648 	uint8_t event_dev_id = adapter->eventdev_id;
649 	struct rte_event events[DMA_BATCH_SIZE];
650 	struct rte_event *response_info;
651 	uint16_t nb_enqueued, nb_ev;
652 	uint8_t retry;
653 	uint8_t i;
654 
655 	nb_ev = 0;
656 	retry = 0;
657 	nb_enqueued = 0;
658 	num = RTE_MIN(num, DMA_BATCH_SIZE);
659 	for (i = 0; i < num; i++) {
660 		struct rte_event *ev = &events[nb_ev++];
661 
662 		/* Expected to have response info appended to dma_op. */
663 		response_info = (struct rte_event *)((uint8_t *)ops[i] +
664 							  sizeof(struct rte_event_dma_adapter_op));
665 		if (unlikely(response_info == NULL)) {
666 			if (ops[i] != NULL && ops[i]->op_mp != NULL)
667 				rte_mempool_put(ops[i]->op_mp, ops[i]);
668 			continue;
669 		}
670 
671 		rte_memcpy(ev, response_info, sizeof(struct rte_event));
672 		ev->event_ptr = ops[i];
673 		ev->event_type = RTE_EVENT_TYPE_DMADEV;
674 		if (adapter->implicit_release_disabled)
675 			ev->op = RTE_EVENT_OP_FORWARD;
676 		else
677 			ev->op = RTE_EVENT_OP_NEW;
678 	}
679 
680 	do {
681 		nb_enqueued += rte_event_enqueue_burst(event_dev_id, event_port_id,
682 						       &events[nb_enqueued], nb_ev - nb_enqueued);
683 
684 	} while (retry++ < DMA_ADAPTER_MAX_EV_ENQ_RETRIES && nb_enqueued < nb_ev);
685 
686 	stats->event_enq_fail_count += nb_ev - nb_enqueued;
687 	stats->event_enq_count += nb_enqueued;
688 	stats->event_enq_retry_count += retry - 1;
689 
690 	return nb_enqueued;
691 }
692 
693 static int
694 edma_circular_buffer_flush_to_evdev(struct event_dma_adapter *adapter,
695 				    struct dma_ops_circular_buffer *bufp,
696 				    uint16_t *enqueue_count)
697 {
698 	struct rte_event_dma_adapter_op **ops = bufp->op_buffer;
699 	uint16_t n = 0, nb_ops_flushed;
700 	uint16_t *head = &bufp->head;
701 	uint16_t *tail = &bufp->tail;
702 
703 	if (*tail > *head)
704 		n = *tail - *head;
705 	else if (*tail < *head)
706 		n = bufp->size - *head;
707 	else {
708 		if (enqueue_count)
709 			*enqueue_count = 0;
710 		return 0; /* buffer empty */
711 	}
712 
713 	if (enqueue_count && n > *enqueue_count)
714 		n = *enqueue_count;
715 
716 	nb_ops_flushed = edma_ops_enqueue_burst(adapter, &ops[*head], n);
717 	if (enqueue_count)
718 		*enqueue_count = nb_ops_flushed;
719 
720 	bufp->count -= nb_ops_flushed;
721 	if (!bufp->count) {
722 		*head = 0;
723 		*tail = 0;
724 		return 0; /* buffer empty */
725 	}
726 
727 	*head = (*head + nb_ops_flushed) % bufp->size;
728 	return 1;
729 }
730 
731 static void
732 edma_ops_buffer_flush(struct event_dma_adapter *adapter)
733 {
734 	if (likely(adapter->ebuf.count == 0))
735 		return;
736 
737 	while (edma_circular_buffer_flush_to_evdev(adapter, &adapter->ebuf, NULL))
738 		;
739 }
740 
741 static inline unsigned int
742 edma_adapter_deq_run(struct event_dma_adapter *adapter, unsigned int max_deq)
743 {
744 	struct rte_event_dma_adapter_stats *stats = &adapter->dma_stats;
745 	struct dma_vchan_info *vchan_info;
746 	struct dma_ops_circular_buffer *tq_buf;
747 	struct rte_event_dma_adapter_op *ops;
748 	uint16_t n, nb_deq, nb_enqueued, i;
749 	struct dma_device_info *dev_info;
750 	uint16_t vchan, num_vchan;
751 	uint16_t num_dma_dev;
752 	int16_t dma_dev_id;
753 	uint16_t index;
754 	bool done;
755 	bool err;
756 
757 	nb_deq = 0;
758 	edma_ops_buffer_flush(adapter);
759 
760 	num_dma_dev = rte_dma_count_avail();
761 	do {
762 		done = true;
763 
764 		for (dma_dev_id = adapter->next_dmadev_id; dma_dev_id < num_dma_dev; dma_dev_id++) {
765 			uint16_t queues = 0;
766 			dev_info = &adapter->dma_devs[dma_dev_id];
767 			num_vchan = dev_info->num_vchanq;
768 
769 			for (vchan = dev_info->next_vchan_id; queues < num_vchan;
770 			     vchan = (vchan + 1) % num_vchan, queues++) {
771 
772 				vchan_info = &dev_info->vchanq[vchan];
773 				if (unlikely(vchan_info == NULL || !vchan_info->vq_enabled))
774 					continue;
775 
776 				n = rte_dma_completed(dma_dev_id, vchan, DMA_BATCH_SIZE,
777 						&index, &err);
778 				if (!n)
779 					continue;
780 
781 				done = false;
782 				stats->dma_deq_count += n;
783 
784 				tq_buf = &dev_info->tqmap[vchan].dma_buf;
785 
786 				nb_enqueued = n;
787 				if (unlikely(!adapter->ebuf.count))
788 					edma_circular_buffer_flush_to_evdev(adapter, tq_buf,
789 									    &nb_enqueued);
790 
791 				if (likely(nb_enqueued == n))
792 					goto check;
793 
794 				/* Failed to enqueue events case */
795 				for (i = nb_enqueued; i < n; i++) {
796 					ops = tq_buf->op_buffer[tq_buf->head];
797 					edma_circular_buffer_add(&adapter->ebuf, ops);
798 					tq_buf->head = (tq_buf->head + 1) % tq_buf->size;
799 				}
800 
801 check:
802 				nb_deq += n;
803 				if (nb_deq >= max_deq) {
804 					if ((vchan + 1) == num_vchan)
805 						adapter->next_dmadev_id =
806 								(dma_dev_id + 1) % num_dma_dev;
807 
808 					dev_info->next_vchan_id = (vchan + 1) % num_vchan;
809 
810 					return nb_deq;
811 				}
812 			}
813 		}
814 		adapter->next_dmadev_id = 0;
815 
816 	} while (done == false);
817 
818 	return nb_deq;
819 }
820 
821 static int
822 edma_adapter_run(struct event_dma_adapter *adapter, unsigned int max_ops)
823 {
824 	unsigned int ops_left = max_ops;
825 
826 	while (ops_left > 0) {
827 		unsigned int e_cnt, d_cnt;
828 
829 		e_cnt = edma_adapter_deq_run(adapter, ops_left);
830 		ops_left -= RTE_MIN(ops_left, e_cnt);
831 
832 		d_cnt = edma_adapter_enq_run(adapter, ops_left);
833 		ops_left -= RTE_MIN(ops_left, d_cnt);
834 
835 		if (e_cnt == 0 && d_cnt == 0)
836 			break;
837 	}
838 
839 	if (ops_left == max_ops) {
840 		rte_event_maintain(adapter->eventdev_id, adapter->event_port_id, 0);
841 		return -EAGAIN;
842 	} else
843 		return 0;
844 }
845 
846 static int
847 edma_service_func(void *args)
848 {
849 	struct event_dma_adapter *adapter = args;
850 	int ret;
851 
852 	if (rte_spinlock_trylock(&adapter->lock) == 0)
853 		return 0;
854 	ret = edma_adapter_run(adapter, adapter->max_nb);
855 	rte_spinlock_unlock(&adapter->lock);
856 
857 	return ret;
858 }
859 
860 static int
861 edma_init_service(struct event_dma_adapter *adapter, uint8_t id)
862 {
863 	struct rte_event_dma_adapter_conf adapter_conf;
864 	struct rte_service_spec service;
865 	uint32_t impl_rel;
866 	int ret;
867 
868 	if (adapter->service_initialized)
869 		return 0;
870 
871 	memset(&service, 0, sizeof(service));
872 	snprintf(service.name, DMA_ADAPTER_NAME_LEN, "rte_event_dma_adapter_%d", id);
873 	service.socket_id = adapter->socket_id;
874 	service.callback = edma_service_func;
875 	service.callback_userdata = adapter;
876 
877 	/* Service function handles locking for queue add/del updates */
878 	service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
879 	ret = rte_service_component_register(&service, &adapter->service_id);
880 	if (ret) {
881 		RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32, service.name, ret);
882 		return ret;
883 	}
884 
885 	ret = adapter->conf_cb(id, adapter->eventdev_id, &adapter_conf, adapter->conf_arg);
886 	if (ret) {
887 		RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32, ret);
888 		return ret;
889 	}
890 
891 	adapter->max_nb = adapter_conf.max_nb;
892 	adapter->event_port_id = adapter_conf.event_port_id;
893 
894 	if (rte_event_port_attr_get(adapter->eventdev_id, adapter->event_port_id,
895 				    RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE, &impl_rel)) {
896 		RTE_EDEV_LOG_ERR("Failed to get port info for eventdev %" PRId32,
897 				 adapter->eventdev_id);
898 		edma_circular_buffer_free(&adapter->ebuf);
899 		rte_free(adapter);
900 		return -EINVAL;
901 	}
902 
903 	adapter->implicit_release_disabled = (uint8_t)impl_rel;
904 	adapter->service_initialized = 1;
905 
906 	return ret;
907 }
908 
909 static void
910 edma_update_vchanq_info(struct event_dma_adapter *adapter, struct dma_device_info *dev_info,
911 			uint16_t vchan, uint8_t add)
912 {
913 	struct dma_vchan_info *vchan_info;
914 	struct dma_vchan_info *tqmap_info;
915 	int enabled;
916 	uint16_t i;
917 
918 	if (dev_info->vchanq == NULL)
919 		return;
920 
921 	if (vchan == RTE_DMA_ALL_VCHAN) {
922 		for (i = 0; i < dev_info->num_dma_dev_vchan; i++)
923 			edma_update_vchanq_info(adapter, dev_info, i, add);
924 	} else {
925 		tqmap_info = &dev_info->tqmap[vchan];
926 		vchan_info = &dev_info->vchanq[vchan];
927 		enabled = vchan_info->vq_enabled;
928 		if (add) {
929 			adapter->nb_vchanq += !enabled;
930 			dev_info->num_vchanq += !enabled;
931 		} else {
932 			adapter->nb_vchanq -= enabled;
933 			dev_info->num_vchanq -= enabled;
934 		}
935 		vchan_info->vq_enabled = !!add;
936 		tqmap_info->vq_enabled = !!add;
937 	}
938 }
939 
940 static int
941 edma_add_vchan(struct event_dma_adapter *adapter, int16_t dma_dev_id, uint16_t vchan)
942 {
943 	struct dma_device_info *dev_info = &adapter->dma_devs[dma_dev_id];
944 	struct dma_vchan_info *vchanq;
945 	struct dma_vchan_info *tqmap;
946 	uint16_t nb_vchans;
947 	uint32_t i;
948 
949 	if (dev_info->vchanq == NULL) {
950 		nb_vchans = dev_info->num_dma_dev_vchan;
951 
952 		dev_info->vchanq = rte_zmalloc_socket(adapter->mem_name,
953 				nb_vchans * sizeof(struct dma_vchan_info),
954 				0, adapter->socket_id);
955 		if (dev_info->vchanq == NULL)
956 			return -ENOMEM;
957 
958 		dev_info->tqmap = rte_zmalloc_socket(adapter->mem_name,
959 				nb_vchans * sizeof(struct dma_vchan_info),
960 				0, adapter->socket_id);
961 		if (dev_info->tqmap == NULL)
962 			return -ENOMEM;
963 
964 		for (i = 0; i < nb_vchans; i++) {
965 			vchanq = &dev_info->vchanq[i];
966 
967 			if (edma_circular_buffer_init("dma_dev_circular_buffer", &vchanq->dma_buf,
968 						DMA_ADAPTER_OPS_BUFFER_SIZE)) {
969 				RTE_EDEV_LOG_ERR("Failed to get memory for dma_dev buffer");
970 				rte_free(vchanq);
971 				return -ENOMEM;
972 			}
973 
974 			tqmap = &dev_info->tqmap[i];
975 			if (edma_circular_buffer_init("dma_dev_circular_trans_buf", &tqmap->dma_buf,
976 						DMA_ADAPTER_OPS_BUFFER_SIZE)) {
977 				RTE_EDEV_LOG_ERR(
978 					"Failed to get memory for dma_dev transaction buffer");
979 				rte_free(tqmap);
980 				return -ENOMEM;
981 			}
982 		}
983 	}
984 
985 	if (vchan == RTE_DMA_ALL_VCHAN) {
986 		for (i = 0; i < dev_info->num_dma_dev_vchan; i++)
987 			edma_update_vchanq_info(adapter, dev_info, i, 1);
988 	} else
989 		edma_update_vchanq_info(adapter, dev_info, vchan, 1);
990 
991 	return 0;
992 }
993 
994 int
995 rte_event_dma_adapter_vchan_add(uint8_t id, int16_t dma_dev_id, uint16_t vchan,
996 				const struct rte_event *event)
997 {
998 	struct event_dma_adapter *adapter;
999 	struct dma_device_info *dev_info;
1000 	struct rte_eventdev *dev;
1001 	uint32_t cap;
1002 	int ret;
1003 
1004 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1005 
1006 	if (!rte_dma_is_valid(dma_dev_id)) {
1007 		RTE_EDEV_LOG_ERR("Invalid dma_dev_id = %" PRIu8, dma_dev_id);
1008 		return -EINVAL;
1009 	}
1010 
1011 	adapter = edma_id_to_adapter(id);
1012 	if (adapter == NULL)
1013 		return -EINVAL;
1014 
1015 	dev = &rte_eventdevs[adapter->eventdev_id];
1016 	ret = rte_event_dma_adapter_caps_get(adapter->eventdev_id, dma_dev_id, &cap);
1017 	if (ret) {
1018 		RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %u dma_dev %u", id, dma_dev_id);
1019 		return ret;
1020 	}
1021 
1022 	if ((cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) && (event == NULL)) {
1023 		RTE_EDEV_LOG_ERR("Event can not be NULL for dma_dev_id = %u", dma_dev_id);
1024 		return -EINVAL;
1025 	}
1026 
1027 	dev_info = &adapter->dma_devs[dma_dev_id];
1028 	if (vchan != RTE_DMA_ALL_VCHAN && vchan >= dev_info->num_dma_dev_vchan) {
1029 		RTE_EDEV_LOG_ERR("Invalid vhcan %u", vchan);
1030 		return -EINVAL;
1031 	}
1032 
1033 	/* In case HW cap is RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD, no
1034 	 * need of service core as HW supports event forward capability.
1035 	 */
1036 	if ((cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1037 	    (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND &&
1038 	     adapter->mode == RTE_EVENT_DMA_ADAPTER_OP_NEW) ||
1039 	    (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1040 	     adapter->mode == RTE_EVENT_DMA_ADAPTER_OP_NEW)) {
1041 		if (*dev->dev_ops->dma_adapter_vchan_add == NULL)
1042 			return -ENOTSUP;
1043 		if (dev_info->vchanq == NULL) {
1044 			dev_info->vchanq = rte_zmalloc_socket(adapter->mem_name,
1045 							dev_info->num_dma_dev_vchan *
1046 							sizeof(struct dma_vchan_info),
1047 							0, adapter->socket_id);
1048 			if (dev_info->vchanq == NULL) {
1049 				RTE_EDEV_LOG_ERR("Queue pair add not supported");
1050 				return -ENOMEM;
1051 			}
1052 		}
1053 
1054 		if (dev_info->tqmap == NULL) {
1055 			dev_info->tqmap = rte_zmalloc_socket(adapter->mem_name,
1056 						dev_info->num_dma_dev_vchan *
1057 						sizeof(struct dma_vchan_info),
1058 						0, adapter->socket_id);
1059 			if (dev_info->tqmap == NULL) {
1060 				RTE_EDEV_LOG_ERR("tq pair add not supported");
1061 				return -ENOMEM;
1062 			}
1063 		}
1064 
1065 		ret = (*dev->dev_ops->dma_adapter_vchan_add)(dev, dma_dev_id, vchan, event);
1066 		if (ret)
1067 			return ret;
1068 
1069 		else
1070 			edma_update_vchanq_info(adapter, &adapter->dma_devs[dma_dev_id], vchan, 1);
1071 	}
1072 
1073 	/* In case HW cap is RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW, or SW adapter, initiate
1074 	 * services so the application can choose which ever way it wants to use the adapter.
1075 	 *
1076 	 * Case 1: RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW. Application may wants to use one
1077 	 * of below two modes
1078 	 *
1079 	 * a. OP_FORWARD mode -> HW Dequeue + SW enqueue
1080 	 * b. OP_NEW mode -> HW Dequeue
1081 	 *
1082 	 * Case 2: No HW caps, use SW adapter
1083 	 *
1084 	 * a. OP_FORWARD mode -> SW enqueue & dequeue
1085 	 * b. OP_NEW mode -> SW Dequeue
1086 	 */
1087 	if ((cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1088 	     !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1089 	     adapter->mode == RTE_EVENT_DMA_ADAPTER_OP_FORWARD) ||
1090 	    (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
1091 	     !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1092 	     !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND))) {
1093 		rte_spinlock_lock(&adapter->lock);
1094 		ret = edma_init_service(adapter, id);
1095 		if (ret == 0)
1096 			ret = edma_add_vchan(adapter, dma_dev_id, vchan);
1097 		rte_spinlock_unlock(&adapter->lock);
1098 
1099 		if (ret)
1100 			return ret;
1101 
1102 		rte_service_component_runstate_set(adapter->service_id, 1);
1103 	}
1104 
1105 	return 0;
1106 }
1107 
1108 int
1109 rte_event_dma_adapter_vchan_del(uint8_t id, int16_t dma_dev_id, uint16_t vchan)
1110 {
1111 	struct event_dma_adapter *adapter;
1112 	struct dma_device_info *dev_info;
1113 	struct rte_eventdev *dev;
1114 	uint32_t cap;
1115 	int ret;
1116 
1117 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1118 
1119 	if (!rte_dma_is_valid(dma_dev_id)) {
1120 		RTE_EDEV_LOG_ERR("Invalid dma_dev_id = %" PRIu8, dma_dev_id);
1121 		return -EINVAL;
1122 	}
1123 
1124 	adapter = edma_id_to_adapter(id);
1125 	if (adapter == NULL)
1126 		return -EINVAL;
1127 
1128 	dev = &rte_eventdevs[adapter->eventdev_id];
1129 	ret = rte_event_dma_adapter_caps_get(adapter->eventdev_id, dma_dev_id, &cap);
1130 	if (ret)
1131 		return ret;
1132 
1133 	dev_info = &adapter->dma_devs[dma_dev_id];
1134 
1135 	if (vchan != RTE_DMA_ALL_VCHAN && vchan >= dev_info->num_dma_dev_vchan) {
1136 		RTE_EDEV_LOG_ERR("Invalid vhcan %" PRIu16, vchan);
1137 		return -EINVAL;
1138 	}
1139 
1140 	if ((cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1141 	    (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1142 	     adapter->mode == RTE_EVENT_DMA_ADAPTER_OP_NEW)) {
1143 		if (*dev->dev_ops->dma_adapter_vchan_del == NULL)
1144 			return -ENOTSUP;
1145 		ret = (*dev->dev_ops->dma_adapter_vchan_del)(dev, dma_dev_id, vchan);
1146 		if (ret == 0) {
1147 			edma_update_vchanq_info(adapter, dev_info, vchan, 0);
1148 			if (dev_info->num_vchanq == 0) {
1149 				rte_free(dev_info->vchanq);
1150 				dev_info->vchanq = NULL;
1151 			}
1152 		}
1153 	} else {
1154 		if (adapter->nb_vchanq == 0)
1155 			return 0;
1156 
1157 		rte_spinlock_lock(&adapter->lock);
1158 		edma_update_vchanq_info(adapter, dev_info, vchan, 0);
1159 
1160 		if (dev_info->num_vchanq == 0) {
1161 			rte_free(dev_info->vchanq);
1162 			rte_free(dev_info->tqmap);
1163 			dev_info->vchanq = NULL;
1164 			dev_info->tqmap = NULL;
1165 		}
1166 
1167 		rte_spinlock_unlock(&adapter->lock);
1168 		rte_service_component_runstate_set(adapter->service_id, adapter->nb_vchanq);
1169 	}
1170 
1171 	return ret;
1172 }
1173 
1174 int
1175 rte_event_dma_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1176 {
1177 	struct event_dma_adapter *adapter;
1178 
1179 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1180 
1181 	adapter = edma_id_to_adapter(id);
1182 	if (adapter == NULL || service_id == NULL)
1183 		return -EINVAL;
1184 
1185 	if (adapter->service_initialized)
1186 		*service_id = adapter->service_id;
1187 
1188 	return adapter->service_initialized ? 0 : -ESRCH;
1189 }
1190 
1191 static int
1192 edma_adapter_ctrl(uint8_t id, int start)
1193 {
1194 	struct event_dma_adapter *adapter;
1195 	struct dma_device_info *dev_info;
1196 	struct rte_eventdev *dev;
1197 	uint16_t num_dma_dev;
1198 	int stop = !start;
1199 	int use_service;
1200 	uint32_t i;
1201 
1202 	use_service = 0;
1203 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1204 	adapter = edma_id_to_adapter(id);
1205 	if (adapter == NULL)
1206 		return -EINVAL;
1207 
1208 	num_dma_dev = rte_dma_count_avail();
1209 	dev = &rte_eventdevs[adapter->eventdev_id];
1210 
1211 	for (i = 0; i < num_dma_dev; i++) {
1212 		dev_info = &adapter->dma_devs[i];
1213 		/* start check for num queue pairs */
1214 		if (start && !dev_info->num_vchanq)
1215 			continue;
1216 		/* stop check if dev has been started */
1217 		if (stop && !dev_info->dev_started)
1218 			continue;
1219 		use_service |= !dev_info->internal_event_port;
1220 		dev_info->dev_started = start;
1221 		if (dev_info->internal_event_port == 0)
1222 			continue;
1223 		start ? (*dev->dev_ops->dma_adapter_start)(dev, i) :
1224 			(*dev->dev_ops->dma_adapter_stop)(dev, i);
1225 	}
1226 
1227 	if (use_service)
1228 		rte_service_runstate_set(adapter->service_id, start);
1229 
1230 	return 0;
1231 }
1232 
1233 int
1234 rte_event_dma_adapter_start(uint8_t id)
1235 {
1236 	struct event_dma_adapter *adapter;
1237 
1238 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1239 
1240 	adapter = edma_id_to_adapter(id);
1241 	if (adapter == NULL)
1242 		return -EINVAL;
1243 
1244 	return edma_adapter_ctrl(id, 1);
1245 }
1246 
1247 int
1248 rte_event_dma_adapter_stop(uint8_t id)
1249 {
1250 	return edma_adapter_ctrl(id, 0);
1251 }
1252 
1253 #define DEFAULT_MAX_NB 128
1254 
1255 int
1256 rte_event_dma_adapter_runtime_params_init(struct rte_event_dma_adapter_runtime_params *params)
1257 {
1258 	if (params == NULL)
1259 		return -EINVAL;
1260 
1261 	memset(params, 0, sizeof(*params));
1262 	params->max_nb = DEFAULT_MAX_NB;
1263 
1264 	return 0;
1265 }
1266 
1267 static int
1268 dma_adapter_cap_check(struct event_dma_adapter *adapter)
1269 {
1270 	uint32_t caps;
1271 	int ret;
1272 
1273 	if (!adapter->nb_vchanq)
1274 		return -EINVAL;
1275 
1276 	ret = rte_event_dma_adapter_caps_get(adapter->eventdev_id, adapter->next_dmadev_id, &caps);
1277 	if (ret) {
1278 		RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8 " cdev %" PRIu8,
1279 				 adapter->eventdev_id, adapter->next_dmadev_id);
1280 		return ret;
1281 	}
1282 
1283 	if ((caps & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1284 	    (caps & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW))
1285 		return -ENOTSUP;
1286 
1287 	return 0;
1288 }
1289 
1290 int
1291 rte_event_dma_adapter_runtime_params_set(uint8_t id,
1292 					 struct rte_event_dma_adapter_runtime_params *params)
1293 {
1294 	struct event_dma_adapter *adapter;
1295 	int ret;
1296 
1297 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1298 
1299 	if (params == NULL) {
1300 		RTE_EDEV_LOG_ERR("params pointer is NULL");
1301 		return -EINVAL;
1302 	}
1303 
1304 	adapter = edma_id_to_adapter(id);
1305 	if (adapter == NULL)
1306 		return -EINVAL;
1307 
1308 	ret = dma_adapter_cap_check(adapter);
1309 	if (ret)
1310 		return ret;
1311 
1312 	rte_spinlock_lock(&adapter->lock);
1313 	adapter->max_nb = params->max_nb;
1314 	rte_spinlock_unlock(&adapter->lock);
1315 
1316 	return 0;
1317 }
1318 
1319 int
1320 rte_event_dma_adapter_runtime_params_get(uint8_t id,
1321 					 struct rte_event_dma_adapter_runtime_params *params)
1322 {
1323 	struct event_dma_adapter *adapter;
1324 	int ret;
1325 
1326 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1327 
1328 	if (params == NULL) {
1329 		RTE_EDEV_LOG_ERR("params pointer is NULL");
1330 		return -EINVAL;
1331 	}
1332 
1333 	adapter = edma_id_to_adapter(id);
1334 	if (adapter == NULL)
1335 		return -EINVAL;
1336 
1337 	ret = dma_adapter_cap_check(adapter);
1338 	if (ret)
1339 		return ret;
1340 
1341 	params->max_nb = adapter->max_nb;
1342 
1343 	return 0;
1344 }
1345 
1346 int
1347 rte_event_dma_adapter_stats_get(uint8_t id, struct rte_event_dma_adapter_stats *stats)
1348 {
1349 	struct rte_event_dma_adapter_stats dev_stats_sum = {0};
1350 	struct rte_event_dma_adapter_stats dev_stats;
1351 	struct event_dma_adapter *adapter;
1352 	struct dma_device_info *dev_info;
1353 	struct rte_eventdev *dev;
1354 	uint16_t num_dma_dev;
1355 	uint32_t i;
1356 	int ret;
1357 
1358 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1359 
1360 	adapter = edma_id_to_adapter(id);
1361 	if (adapter == NULL || stats == NULL)
1362 		return -EINVAL;
1363 
1364 	num_dma_dev = rte_dma_count_avail();
1365 	dev = &rte_eventdevs[adapter->eventdev_id];
1366 	memset(stats, 0, sizeof(*stats));
1367 	for (i = 0; i < num_dma_dev; i++) {
1368 		dev_info = &adapter->dma_devs[i];
1369 
1370 		if (dev_info->internal_event_port == 0 ||
1371 		    dev->dev_ops->dma_adapter_stats_get == NULL)
1372 			continue;
1373 
1374 		ret = (*dev->dev_ops->dma_adapter_stats_get)(dev, i, &dev_stats);
1375 		if (ret)
1376 			continue;
1377 
1378 		dev_stats_sum.dma_deq_count += dev_stats.dma_deq_count;
1379 		dev_stats_sum.event_enq_count += dev_stats.event_enq_count;
1380 	}
1381 
1382 	if (adapter->service_initialized)
1383 		*stats = adapter->dma_stats;
1384 
1385 	stats->dma_deq_count += dev_stats_sum.dma_deq_count;
1386 	stats->event_enq_count += dev_stats_sum.event_enq_count;
1387 
1388 	return 0;
1389 }
1390 
1391 int
1392 rte_event_dma_adapter_stats_reset(uint8_t id)
1393 {
1394 	struct event_dma_adapter *adapter;
1395 	struct dma_device_info *dev_info;
1396 	struct rte_eventdev *dev;
1397 	uint16_t num_dma_dev;
1398 	uint32_t i;
1399 
1400 	EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1401 
1402 	adapter = edma_id_to_adapter(id);
1403 	if (adapter == NULL)
1404 		return -EINVAL;
1405 
1406 	num_dma_dev = rte_dma_count_avail();
1407 	dev = &rte_eventdevs[adapter->eventdev_id];
1408 	for (i = 0; i < num_dma_dev; i++) {
1409 		dev_info = &adapter->dma_devs[i];
1410 
1411 		if (dev_info->internal_event_port == 0 ||
1412 		    dev->dev_ops->dma_adapter_stats_reset == NULL)
1413 			continue;
1414 
1415 		(*dev->dev_ops->dma_adapter_stats_reset)(dev, i);
1416 	}
1417 
1418 	memset(&adapter->dma_stats, 0, sizeof(adapter->dma_stats));
1419 
1420 	return 0;
1421 }
1422 
1423 uint16_t
1424 rte_event_dma_adapter_enqueue(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
1425 			      uint16_t nb_events)
1426 {
1427 	const struct rte_event_fp_ops *fp_ops;
1428 	void *port;
1429 
1430 	fp_ops = &rte_event_fp_ops[dev_id];
1431 	port = fp_ops->data[port_id];
1432 
1433 	return fp_ops->dma_enqueue(port, ev, nb_events);
1434 }
1435