xref: /dpdk/lib/eventdev/rte_event_crypto_adapter.h (revision 3a80d7fb2ecdd6e8e48e56e3726b26980fa2a089)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation.
3  * All rights reserved.
4  */
5 
6 #ifndef _RTE_EVENT_CRYPTO_ADAPTER_
7 #define _RTE_EVENT_CRYPTO_ADAPTER_
8 
9 /**
10  * @file
11  *
12  * RTE Event crypto adapter
13  *
14  * Eventdev library provides couple of adapters to bridge between various
15  * components for providing new event source. The event crypto adapter is
16  * one of those adapters which is intended to bridge between event devices
17  * and crypto devices.
18  *
19  * The crypto adapter adds support to enqueue/dequeue crypto operations to/
20  * from event device. The packet flow between crypto device and the event
21  * device can be accomplished using both SW and HW based transfer mechanisms.
22  * The adapter uses an EAL service core function for SW based packet transfer
23  * and uses the eventdev PMD functions to configure HW based packet transfer
24  * between the crypto device and the event device.
25  *
26  * The application can choose to submit a crypto operation directly to
27  * crypto device or send it to the crypto adapter via eventdev based on
28  * RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability.
29  * The first mode is known as the event new(RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
30  * mode and the second as the event forward(RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD)
31  * mode. The choice of mode can be specified while creating the adapter.
32  * In the former mode, it is an application responsibility to enable ingress
33  * packet ordering. In the latter mode, it is the adapter responsibility to
34  * enable the ingress packet ordering.
35  *
36  *
37  * Working model of RTE_EVENT_CRYPTO_ADAPTER_OP_NEW mode:
38  *
39  *                +--------------+         +--------------+
40  *                |              |         | Crypto stage |
41  *                | Application  |---[2]-->| + enqueue to |
42  *                |              |         |   cryptodev  |
43  *                +--------------+         +--------------+
44  *                    ^   ^                       |
45  *                    |   |                      [3]
46  *                   [6] [1]                      |
47  *                    |   |                       |
48  *                +--------------+                |
49  *                |              |                |
50  *                | Event device |                |
51  *                |              |                |
52  *                +--------------+                |
53  *                       ^                        |
54  *                       |                        |
55  *                      [5]                       |
56  *                       |                        v
57  *                +--------------+         +--------------+
58  *                |              |         |              |
59  *                |Crypto adapter|<--[4]---|  Cryptodev   |
60  *                |              |         |              |
61  *                +--------------+         +--------------+
62  *
63  *
64  *         [1] Application dequeues events from the previous stage.
65  *         [2] Application prepares the crypto operations.
66  *         [3] Crypto operations are submitted to cryptodev by application.
67  *         [4] Crypto adapter dequeues crypto completions from cryptodev.
68  *         [5] Crypto adapter enqueues events to the eventdev.
69  *         [6] Application dequeues from eventdev and prepare for further
70  *             processing.
71  *
72  * In the RTE_EVENT_CRYPTO_ADAPTER_OP_NEW mode, application submits crypto
73  * operations directly to crypto device. The adapter then dequeues crypto
74  * completions from crypto device and enqueue events to the event device.
75  * This mode does not ensure ingress ordering, if the application directly
76  * enqueues to cryptodev without going through crypto/atomic stage i.e.
77  * removing item [1] and [2].
78  * Events dequeued from the adapter will be treated as new events.
79  * In this mode, application needs to specify event information (response
80  * information) which is needed to enqueue an event after the crypto operation
81  * is completed.
82  *
83  *
84  * Working model of RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode:
85  *
86  *                +--------------+         +--------------+
87  *        --[1]-->|              |---[2]-->|  Application |
88  *                | Event device |         |      in      |
89  *        <--[8]--|              |<--[3]---| Ordered stage|
90  *                +--------------+         +--------------+
91  *                    ^      |
92  *                    |     [4]
93  *                   [7]     |
94  *                    |      v
95  *               +----------------+       +--------------+
96  *               |                |--[5]->|              |
97  *               | Crypto adapter |       |   Cryptodev  |
98  *               |                |<-[6]--|              |
99  *               +----------------+       +--------------+
100  *
101  *
102  *         [1] Events from the previous stage.
103  *         [2] Application in ordered stage dequeues events from eventdev.
104  *         [3] Application enqueues crypto operations as events to eventdev.
105  *         [4] Crypto adapter dequeues event from eventdev.
106  *         [5] Crypto adapter submits crypto operations to cryptodev
107  *             (Atomic stage).
108  *         [6] Crypto adapter dequeues crypto completions from cryptodev
109  *         [7] Crypto adapter enqueues events to the eventdev
110  *         [8] Events to the next stage
111  *
112  * In the RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode, if HW supports
113  * RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability the application
114  * can directly submit the crypto operations to the cryptodev.
115  * If not, application retrieves crypto adapter's event port using
116  * rte_event_crypto_adapter_event_port_get() API. Then, links its event
117  * queue to this port and starts enqueuing crypto operations as events
118  * to the eventdev. The adapter then dequeues the events and submits the
119  * crypto operations to the cryptodev. After the crypto completions, the
120  * adapter enqueues events to the event device.
121  * Application can use this mode, when ingress packet ordering is needed.
122  * Events dequeued from the adapter will be treated as forwarded events.
123  * In this mode, the application needs to specify the cryptodev ID
124  * and queue pair ID (request information) needed to enqueue a crypto
125  * operation in addition to the event information (response information)
126  * needed to enqueue an event after the crypto operation has completed.
127  *
128  *
129  * The event crypto adapter provides common APIs to configure the packet flow
130  * from the crypto device to event devices for both SW and HW based transfers.
131  * The crypto event adapter's functions are:
132  *  - rte_event_crypto_adapter_create_ext()
133  *  - rte_event_crypto_adapter_create()
134  *  - rte_event_crypto_adapter_free()
135  *  - rte_event_crypto_adapter_queue_pair_add()
136  *  - rte_event_crypto_adapter_queue_pair_del()
137  *  - rte_event_crypto_adapter_start()
138  *  - rte_event_crypto_adapter_stop()
139  *  - rte_event_crypto_adapter_stats_get()
140  *  - rte_event_crypto_adapter_stats_reset()
141 
142  * The application creates an instance using rte_event_crypto_adapter_create()
143  * or rte_event_crypto_adapter_create_ext().
144  *
145  * Cryptodev queue pair addition/deletion is done using the
146  * rte_event_crypto_adapter_queue_pair_xxx() APIs. If HW supports
147  * RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND capability, event
148  * information must be passed to the add API.
149  *
150  * The SW adapter or HW PMD uses rte_crypto_op::sess_type to decide whether
151  * request/response(private) data is located in the crypto/security session
152  * or at an offset in the rte_crypto_op.
153  *
154  * For session-based operations, the set and get API provides a mechanism for
155  * an application to store and retrieve the data information stored
156  * along with the crypto session.
157  * The RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA capability indicates
158  * whether HW or SW supports this feature.
159  *
160  * For session-less mode, the adapter gets the private data information placed
161  * along with the ``struct rte_crypto_op``.
162  * The rte_crypto_op::private_data_offset provides an offset to locate the
163  * request/response information in the rte_crypto_op. This offset is counted
164  * from the start of the rte_crypto_op including initialization vector (IV).
165  */
166 
167 #ifdef __cplusplus
168 extern "C" {
169 #endif
170 
171 #include <stdint.h>
172 
173 #include "rte_eventdev.h"
174 
175 /**
176  * Crypto event adapter mode
177  */
178 enum rte_event_crypto_adapter_mode {
179 	RTE_EVENT_CRYPTO_ADAPTER_OP_NEW,
180 	/**< Start the crypto adapter in event new mode.
181 	 * @see RTE_EVENT_OP_NEW.
182 	 * Application submits crypto operations to the cryptodev.
183 	 * Adapter only dequeues the crypto completions from cryptodev
184 	 * and enqueue events to the eventdev.
185 	 */
186 	RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD,
187 	/**< Start the crypto adapter in event forward mode.
188 	 * @see RTE_EVENT_OP_FORWARD.
189 	 * Application submits crypto requests as events to the crypto
190 	 * adapter or crypto device based on
191 	 * RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability.
192 	 * Crypto completions are enqueued back to the eventdev by
193 	 * crypto adapter.
194 	 */
195 };
196 
197 /**
198  * Crypto event request structure will be filled by application to
199  * provide event request information to the adapter.
200  */
201 struct rte_event_crypto_request {
202 	uint8_t resv[8];
203 	/**< Overlaps with first 8 bytes of struct rte_event
204 	 * that encode the response event information. Application
205 	 * is expected to fill in struct rte_event response_info.
206 	 */
207 	uint16_t cdev_id;
208 	/**< cryptodev ID to be used */
209 	uint16_t queue_pair_id;
210 	/**< cryptodev queue pair ID to be used */
211 	uint32_t resv1;
212 	/**< Reserved bits */
213 };
214 
215 /**
216  * Crypto event metadata structure will be filled by application
217  * to provide crypto request and event response information.
218  *
219  * If crypto events are enqueued using a HW mechanism, the cryptodev
220  * PMD will use the event response information to set up the event
221  * that is enqueued back to eventdev after completion of the crypto
222  * operation. If the transfer is done by SW, event response information
223  * will be used by the adapter.
224  */
225 union rte_event_crypto_metadata {
226 	struct rte_event_crypto_request request_info;
227 	/**< Request information to be filled in by application
228 	 * for RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode.
229 	 * First 8 bytes of request_info is reserved for response_info.
230 	 */
231 	struct rte_event response_info;
232 	/**< Response information to be filled in by application
233 	 * for RTE_EVENT_CRYPTO_ADAPTER_OP_NEW and
234 	 * RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode.
235 	 */
236 };
237 
238 /**
239  * Adapter configuration structure that the adapter configuration callback
240  * function is expected to fill out
241  * @see rte_event_crypto_adapter_conf_cb
242  */
243 struct rte_event_crypto_adapter_conf {
244 	uint8_t event_port_id;
245 	/**< Event port identifier, the adapter enqueues events to this
246 	 * port and dequeues crypto request events in
247 	 * RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode.
248 	 */
249 	uint32_t max_nb;
250 	/**< The adapter can return early if it has processed at least
251 	 * max_nb crypto ops. This isn't treated as a requirement; batching
252 	 * may cause the adapter to process more than max_nb crypto ops.
253 	 */
254 };
255 
256 #define RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR	0x1
257 /**< This flag indicates that crypto operations processed on the crypto
258  * adapter need to be vectorized
259  * @see rte_event_crypto_adapter_queue_conf::flags
260  */
261 
262 /**
263  * Adapter queue configuration structure
264  */
265 struct rte_event_crypto_adapter_queue_conf {
266 	uint32_t flags;
267 	/**< Flags for handling crypto operations
268 	 * @see RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR
269 	 */
270 	struct rte_event ev;
271 	/**< If HW supports cryptodev queue pair to event queue binding,
272 	 * application is expected to fill in event information.
273 	 * @see RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
274 	 */
275 	uint16_t vector_sz;
276 	/**< Indicates the maximum number for crypto operations to combine and
277 	 * form a vector.
278 	 * @see rte_event_crypto_adapter_vector_limits::min_sz
279 	 * @see rte_event_crypto_adapter_vector_limits::max_sz
280 	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is set in
281 	 * @see rte_event_crypto_adapter_queue_conf::flags
282 	 */
283 	uint64_t vector_timeout_ns;
284 	/**<
285 	 * Indicates the maximum number of nanoseconds to wait for aggregating
286 	 * crypto operations. Should be within vectorization limits of the
287 	 * adapter
288 	 * @see rte_event_crypto_adapter_vector_limits::min_timeout_ns
289 	 * @see rte_event_crypto_adapter_vector_limits::max_timeout_ns
290 	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is set in
291 	 * @see rte_event_crypto_adapter_queue_conf::flags
292 	 */
293 	struct rte_mempool *vector_mp;
294 	/**< Indicates the mempool that should be used for allocating
295 	 * rte_event_vector container.
296 	 * Should be created by using `rte_event_vector_pool_create`.
297 	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is set in
298 	 * @see rte_event_crypto_adapter_queue_conf::flags.
299 	 */
300 };
301 
302 /**
303  * A structure used to retrieve event crypto adapter vector limits.
304  */
305 struct rte_event_crypto_adapter_vector_limits {
306 	uint16_t min_sz;
307 	/**< Minimum vector limit configurable.
308 	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
309 	 */
310 	uint16_t max_sz;
311 	/**< Maximum vector limit configurable.
312 	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
313 	 */
314 	uint8_t log2_sz;
315 	/**< True if the size configured should be in log2.
316 	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
317 	 */
318 	uint64_t min_timeout_ns;
319 	/**< Minimum vector timeout configurable.
320 	 * @see rte_event_crypto_adapter_queue_conf::vector_timeout_ns
321 	 */
322 	uint64_t max_timeout_ns;
323 	/**< Maximum vector timeout configurable.
324 	 * @see rte_event_crypto_adapter_queue_conf::vector_timeout_ns
325 	 */
326 };
327 
328 /**
329  * Function type used for adapter configuration callback. The callback is
330  * used to fill in members of the struct rte_event_crypto_adapter_conf, this
331  * callback is invoked when creating a SW service for packet transfer from
332  * cryptodev queue pair to the event device. The SW service is created within
333  * the rte_event_crypto_adapter_queue_pair_add() function if SW based packet
334  * transfers from cryptodev queue pair to the event device are required.
335  *
336  * @param id
337  *  Adapter identifier.
338  *
339  * @param dev_id
340  *  Event device identifier.
341  *
342  * @param conf
343  *  Structure that needs to be populated by this callback.
344  *
345  * @param arg
346  *  Argument to the callback. This is the same as the conf_arg passed to the
347  *  rte_event_crypto_adapter_create_ext().
348  */
349 typedef int (*rte_event_crypto_adapter_conf_cb) (uint8_t id, uint8_t dev_id,
350 			struct rte_event_crypto_adapter_conf *conf,
351 			void *arg);
352 
353 /**
354  * A structure used to retrieve statistics for an event crypto adapter
355  * instance.
356  */
357 
358 struct rte_event_crypto_adapter_stats {
359 	uint64_t event_poll_count;
360 	/**< Event port poll count */
361 	uint64_t event_deq_count;
362 	/**< Event dequeue count */
363 	uint64_t crypto_enq_count;
364 	/**< Cryptodev enqueue count */
365 	uint64_t crypto_enq_fail;
366 	/**< Cryptodev enqueue failed count */
367 	uint64_t crypto_deq_count;
368 	/**< Cryptodev dequeue count */
369 	uint64_t event_enq_count;
370 	/**< Event enqueue count */
371 	uint64_t event_enq_retry_count;
372 	/**< Event enqueue retry count */
373 	uint64_t event_enq_fail_count;
374 	/**< Event enqueue fail count */
375 };
376 
377 /**
378  * Create a new event crypto adapter with the specified identifier.
379  *
380  * @param id
381  *  Adapter identifier.
382  *
383  * @param dev_id
384  *  Event device identifier.
385  *
386  * @param conf_cb
387  *  Callback function that fills in members of a
388  *  struct rte_event_crypto_adapter_conf struct passed into
389  *  it.
390  *
391  * @param mode
392  *  Flag to indicate the mode of the adapter.
393  *  @see rte_event_crypto_adapter_mode
394  *
395  * @param conf_arg
396  *  Argument that is passed to the conf_cb function.
397  *
398  * @return
399  *   - 0: Success
400  *   - <0: Error code on failure
401  */
402 int
403 rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
404 				    rte_event_crypto_adapter_conf_cb conf_cb,
405 				    enum rte_event_crypto_adapter_mode mode,
406 				    void *conf_arg);
407 
408 /**
409  * Create a new event crypto adapter with the specified identifier.
410  * This function uses an internal configuration function that creates an event
411  * port. This default function reconfigures the event device with an
412  * additional event port and set up the event port using the port_config
413  * parameter passed into this function. In case the application needs more
414  * control in configuration of the service, it should use the
415  * rte_event_crypto_adapter_create_ext() version.
416  *
417  * When this API is used for creating adapter instance,
418  * ``rte_event_dev_config::nb_event_ports`` is automatically incremented,
419  * and the event device is reconfigured with additional event port during
420  * service initialization. This event device reconfigure logic also increments
421  * the ``rte_event_dev_config::nb_single_link_event_port_queues``
422  * parameter if the adapter event port config is of type
423  * ``RTE_EVENT_PORT_CFG_SINGLE_LINK``.
424  *
425  * Application no longer needs to account for
426  * ``rte_event_dev_config::nb_event_ports`` and
427  * ``rte_event_dev_config::nb_single_link_event_port_queues``
428  * parameters required for crypto adapter in event device configuration
429  * when the adapter is created with this API.
430  *
431  * @param id
432  *  Adapter identifier.
433  *
434  * @param dev_id
435  *  Event device identifier.
436  *
437  * @param port_config
438  *  Argument of type *rte_event_port_conf* that is passed to the conf_cb
439  *  function.
440  *
441  * @param mode
442  *  Flag to indicate the mode of the adapter.
443  *  @see rte_event_crypto_adapter_mode
444  *
445  * @return
446  *   - 0: Success
447  *   - <0: Error code on failure
448  */
449 int
450 rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
451 				struct rte_event_port_conf *port_config,
452 				enum rte_event_crypto_adapter_mode mode);
453 
454 /**
455  * Free an event crypto adapter
456  *
457  * @param id
458  *  Adapter identifier.
459  *
460  * @return
461  *   - 0: Success
462  *   - <0: Error code on failure, If the adapter still has queue pairs
463  *      added to it, the function returns -EBUSY.
464  */
465 int
466 rte_event_crypto_adapter_free(uint8_t id);
467 
468 /**
469  * Add a queue pair to an event crypto adapter.
470  *
471  * @param id
472  *  Adapter identifier.
473  *
474  * @param cdev_id
475  *  Cryptodev identifier.
476  *
477  * @param queue_pair_id
478  *  Cryptodev queue pair identifier. If queue_pair_id is set -1,
479  *  adapter adds all the pre configured queue pairs to the instance.
480  *
481  * @param conf
482  *  Additional configuration structure of type
483  *  *rte_event_crypto_adapter_queue_conf*
484  *
485  * @return
486  *  - 0: Success, queue pair added correctly.
487  *  - <0: Error code on failure.
488  */
489 int
490 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
491 			uint8_t cdev_id,
492 			int32_t queue_pair_id,
493 			const struct rte_event_crypto_adapter_queue_conf *conf);
494 
495 /**
496  * Delete a queue pair from an event crypto adapter.
497  *
498  * @param id
499  *  Adapter identifier.
500  *
501  * @param cdev_id
502  *  Cryptodev identifier.
503  *
504  * @param queue_pair_id
505  *  Cryptodev queue pair identifier.
506  *
507  * @return
508  *  - 0: Success, queue pair deleted successfully.
509  *  - <0: Error code on failure.
510  */
511 int
512 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
513 					int32_t queue_pair_id);
514 
515 /**
516  * Start event crypto adapter
517  *
518  * @param id
519  *  Adapter identifier.
520  *
521  *
522  * @return
523  *  - 0: Success, adapter started successfully.
524  *  - <0: Error code on failure.
525  *
526  * @note
527  *  The eventdev and cryptodev to which the event_crypto_adapter is connected
528  *  needs to be started before calling rte_event_crypto_adapter_start().
529  */
530 int
531 rte_event_crypto_adapter_start(uint8_t id);
532 
533 /**
534  * Stop event crypto adapter
535  *
536  * @param id
537  *  Adapter identifier.
538  *
539  * @return
540  *  - 0: Success, adapter stopped successfully.
541  *  - <0: Error code on failure.
542  */
543 int
544 rte_event_crypto_adapter_stop(uint8_t id);
545 
546 /**
547  * Retrieve statistics for an adapter
548  *
549  * @param id
550  *  Adapter identifier.
551  *
552  * @param [out] stats
553  *  A pointer to structure used to retrieve statistics for an adapter.
554  *
555  * @return
556  *  - 0: Success, retrieved successfully.
557  *  - <0: Error code on failure.
558  */
559 int
560 rte_event_crypto_adapter_stats_get(uint8_t id,
561 				struct rte_event_crypto_adapter_stats *stats);
562 
563 /**
564  * Reset statistics for an adapter.
565  *
566  * @param id
567  *  Adapter identifier.
568  *
569  * @return
570  *  - 0: Success, statistics reset successfully.
571  *  - <0: Error code on failure.
572  */
573 int
574 rte_event_crypto_adapter_stats_reset(uint8_t id);
575 
576 /**
577  * Retrieve the service ID of an adapter. If the adapter doesn't use
578  * a rte_service function, this function returns -ESRCH.
579  *
580  * @param id
581  *  Adapter identifier.
582  *
583  * @param [out] service_id
584  *  A pointer to a uint32_t, to be filled in with the service id.
585  *
586  * @return
587  *  - 0: Success
588  *  - <0: Error code on failure, if the adapter doesn't use a rte_service
589  * function, this function returns -ESRCH.
590  */
591 int
592 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id);
593 
594 /**
595  * Retrieve the event port of an adapter.
596  *
597  * @param id
598  *  Adapter identifier.
599  *
600  * @param [out] event_port_id
601  *  Application links its event queue to this adapter port which is used
602  *  in RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode.
603  *
604  * @return
605  *  - 0: Success
606  *  - <0: Error code on failure.
607  */
608 int
609 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id);
610 
611 /**
612  * Retrieve vector limits for a given event dev and crypto dev pair.
613  * @see rte_event_crypto_adapter_vector_limits
614  *
615  * @param dev_id
616  *  Event device identifier.
617  * @param cdev_id
618  *  Crypto device identifier.
619  * @param [out] limits
620  *  A pointer to rte_event_crypto_adapter_vector_limits structure that has to
621  *  be filled.
622  *
623  * @return
624  *  - 0: Success.
625  *  - <0: Error code on failure.
626  */
627 int rte_event_crypto_adapter_vector_limits_get(
628 	uint8_t dev_id, uint16_t cdev_id,
629 	struct rte_event_crypto_adapter_vector_limits *limits);
630 
631 /**
632  * Enqueue a burst of crypto operations as event objects supplied in *rte_event*
633  * structure on an event crypto adapter designated by its event *dev_id* through
634  * the event port specified by *port_id*. This function is supported if the
635  * eventdev PMD has the #RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD
636  * capability flag set.
637  *
638  * The *nb_events* parameter is the number of event objects to enqueue which are
639  * supplied in the *ev* array of *rte_event* structure.
640  *
641  * The rte_event_crypto_adapter_enqueue() function returns the number of
642  * event objects it actually enqueued. A return value equal to *nb_events*
643  * means that all event objects have been enqueued.
644  *
645  * @param dev_id
646  *  The identifier of the device.
647  * @param port_id
648  *  The identifier of the event port.
649  * @param ev
650  *  Points to an array of *nb_events* objects of type *rte_event* structure
651  *  which contain the event object enqueue operations to be processed.
652  * @param nb_events
653  *  The number of event objects to enqueue, typically number of
654  *  rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
655  *  available for this port.
656  *
657  * @return
658  *   The number of event objects actually enqueued on the event device. The
659  *   return value can be less than the value of the *nb_events* parameter when
660  *   the event devices queue is full or if invalid parameters are specified in a
661  *   *rte_event*. If the return value is less than *nb_events*, the remaining
662  *   events at the end of ev[] are not consumed and the caller has to take care
663  *   of them, and rte_errno is set accordingly. Possible errno values include:
664  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
665  *              ID is invalid, or an event's sched type doesn't match the
666  *              capabilities of the destination queue.
667  *   - ENOSPC   The event port was backpressured and unable to enqueue
668  *              one or more events. This error code is only applicable to
669  *              closed systems.
670  */
671 static inline uint16_t
672 rte_event_crypto_adapter_enqueue(uint8_t dev_id,
673 				uint8_t port_id,
674 				struct rte_event ev[],
675 				uint16_t nb_events)
676 {
677 	const struct rte_event_fp_ops *fp_ops;
678 	void *port;
679 
680 	fp_ops = &rte_event_fp_ops[dev_id];
681 	port = fp_ops->data[port_id];
682 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
683 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
684 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
685 		rte_errno = EINVAL;
686 		return 0;
687 	}
688 
689 	if (port == NULL) {
690 		rte_errno = EINVAL;
691 		return 0;
692 	}
693 #endif
694 	rte_eventdev_trace_crypto_adapter_enqueue(dev_id, port_id, ev,
695 		nb_events);
696 
697 	return fp_ops->ca_enqueue(port, ev, nb_events);
698 }
699 
700 #ifdef __cplusplus
701 }
702 #endif
703 #endif	/* _RTE_EVENT_CRYPTO_ADAPTER_ */
704