xref: /dpdk/lib/eventdev/rte_event_crypto_adapter.h (revision 719834a6849e1daf4a70ff7742bbcc3ae7e25607)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation.
3  * All rights reserved.
4  */
5 
6 #ifndef _RTE_EVENT_CRYPTO_ADAPTER_
7 #define _RTE_EVENT_CRYPTO_ADAPTER_
8 
9 /**
10  * @file
11  *
12  * RTE Event crypto adapter
13  *
14  * Eventdev library provides couple of adapters to bridge between various
15  * components for providing new event source. The event crypto adapter is
16  * one of those adapters which is intended to bridge between event devices
17  * and crypto devices.
18  *
19  * The crypto adapter adds support to enqueue/dequeue crypto operations to/
20  * from event device. The packet flow between crypto device and the event
21  * device can be accomplished using both SW and HW based transfer mechanisms.
22  * The adapter uses an EAL service core function for SW based packet transfer
23  * and uses the eventdev PMD functions to configure HW based packet transfer
24  * between the crypto device and the event device.
25  *
26  * The application can choose to submit a crypto operation directly to
27  * crypto device or send it to the crypto adapter via eventdev based on
28  * RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability.
29  * The first mode is known as the event new(RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
30  * mode and the second as the event forward(RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD)
31  * mode. The choice of mode can be specified while creating the adapter.
32  * In the former mode, it is an application responsibility to enable ingress
33  * packet ordering. In the latter mode, it is the adapter responsibility to
34  * enable the ingress packet ordering.
35  *
36  *
37  * Working model of RTE_EVENT_CRYPTO_ADAPTER_OP_NEW mode:
38  *
39  *                +--------------+         +--------------+
40  *                |              |         | Crypto stage |
41  *                | Application  |---[2]-->| + enqueue to |
42  *                |              |         |   cryptodev  |
43  *                +--------------+         +--------------+
44  *                    ^   ^                       |
45  *                    |   |                      [3]
46  *                   [6] [1]                      |
47  *                    |   |                       |
48  *                +--------------+                |
49  *                |              |                |
50  *                | Event device |                |
51  *                |              |                |
52  *                +--------------+                |
53  *                       ^                        |
54  *                       |                        |
55  *                      [5]                       |
56  *                       |                        v
57  *                +--------------+         +--------------+
58  *                |              |         |              |
59  *                |Crypto adapter|<--[4]---|  Cryptodev   |
60  *                |              |         |              |
61  *                +--------------+         +--------------+
62  *
63  *
64  *         [1] Application dequeues events from the previous stage.
65  *         [2] Application prepares the crypto operations.
66  *         [3] Crypto operations are submitted to cryptodev by application.
67  *         [4] Crypto adapter dequeues crypto completions from cryptodev.
68  *         [5] Crypto adapter enqueues events to the eventdev.
69  *         [6] Application dequeues from eventdev and prepare for further
70  *             processing.
71  *
72  * In the RTE_EVENT_CRYPTO_ADAPTER_OP_NEW mode, application submits crypto
73  * operations directly to crypto device. The adapter then dequeues crypto
74  * completions from crypto device and enqueue events to the event device.
75  * This mode does not ensure ingress ordering, if the application directly
76  * enqueues to cryptodev without going through crypto/atomic stage i.e.
77  * removing item [1] and [2].
78  * Events dequeued from the adapter will be treated as new events.
79  * In this mode, application needs to specify event information (response
80  * information) which is needed to enqueue an event after the crypto operation
81  * is completed.
82  *
83  *
84  * Working model of RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode:
85  *
86  *                +--------------+         +--------------+
87  *        --[1]-->|              |---[2]-->|  Application |
88  *                | Event device |         |      in      |
89  *        <--[8]--|              |<--[3]---| Ordered stage|
90  *                +--------------+         +--------------+
91  *                    ^      |
92  *                    |     [4]
93  *                   [7]     |
94  *                    |      v
95  *               +----------------+       +--------------+
96  *               |                |--[5]->|              |
97  *               | Crypto adapter |       |   Cryptodev  |
98  *               |                |<-[6]--|              |
99  *               +----------------+       +--------------+
100  *
101  *
102  *         [1] Events from the previous stage.
103  *         [2] Application in ordered stage dequeues events from eventdev.
104  *         [3] Application enqueues crypto operations as events to eventdev.
105  *         [4] Crypto adapter dequeues event from eventdev.
106  *         [5] Crypto adapter submits crypto operations to cryptodev
107  *             (Atomic stage).
108  *         [6] Crypto adapter dequeues crypto completions from cryptodev
109  *         [7] Crypto adapter enqueues events to the eventdev
110  *         [8] Events to the next stage
111  *
112  * In the RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode, if HW supports
113  * RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability the application
114  * can directly submit the crypto operations to the cryptodev.
115  * If not, application retrieves crypto adapter's event port using
116  * rte_event_crypto_adapter_event_port_get() API. Then, links its event
117  * queue to this port and starts enqueuing crypto operations as events
118  * to the eventdev. The adapter then dequeues the events and submits the
119  * crypto operations to the cryptodev. After the crypto completions, the
120  * adapter enqueues events to the event device.
121  * Application can use this mode, when ingress packet ordering is needed.
122  * Events dequeued from the adapter will be treated as forwarded events.
123  * In this mode, the application needs to specify the cryptodev ID
124  * and queue pair ID (request information) needed to enqueue a crypto
125  * operation in addition to the event information (response information)
126  * needed to enqueue an event after the crypto operation has completed.
127  *
128  *
129  * The event crypto adapter provides common APIs to configure the packet flow
130  * from the crypto device to event devices for both SW and HW based transfers.
131  * The crypto event adapter's functions are:
132  *  - rte_event_crypto_adapter_create_ext()
133  *  - rte_event_crypto_adapter_create()
134  *  - rte_event_crypto_adapter_free()
135  *  - rte_event_crypto_adapter_queue_pair_add()
136  *  - rte_event_crypto_adapter_queue_pair_del()
137  *  - rte_event_crypto_adapter_start()
138  *  - rte_event_crypto_adapter_stop()
139  *  - rte_event_crypto_adapter_stats_get()
140  *  - rte_event_crypto_adapter_stats_reset()
141  *  - rte_event_crypto_adapter_runtime_params_get()
142  *  - rte_event_crypto_adapter_runtime_params_init()
143  *  - rte_event_crypto_adapter_runtime_params_set()
144 
145  * The application creates an instance using rte_event_crypto_adapter_create()
146  * or rte_event_crypto_adapter_create_ext().
147  *
148  * Cryptodev queue pair addition/deletion is done using the
149  * rte_event_crypto_adapter_queue_pair_xxx() APIs. If HW supports
150  * RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND capability, event
151  * information must be passed to the add API.
152  *
153  * The SW adapter or HW PMD uses rte_crypto_op::sess_type to decide whether
154  * request/response(private) data is located in the crypto/security session
155  * or at an offset in the rte_crypto_op.
156  *
157  * For session-based operations, the set and get API provides a mechanism for
158  * an application to store and retrieve the data information stored
159  * along with the crypto session.
160  * The RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA capability indicates
161  * whether HW or SW supports this feature.
162  *
163  * For session-less mode, the adapter gets the private data information placed
164  * along with the ``struct rte_crypto_op``.
165  * The rte_crypto_op::private_data_offset provides an offset to locate the
166  * request/response information in the rte_crypto_op. This offset is counted
167  * from the start of the rte_crypto_op including initialization vector (IV).
168  */
169 
170 #include <stdint.h>
171 
172 #include "rte_eventdev.h"
173 
174 #ifdef __cplusplus
175 extern "C" {
176 #endif
177 
178 /**
179  * Crypto event adapter mode
180  */
181 enum rte_event_crypto_adapter_mode {
182 	RTE_EVENT_CRYPTO_ADAPTER_OP_NEW,
183 	/**< Start the crypto adapter in event new mode.
184 	 * @see RTE_EVENT_OP_NEW.
185 	 * Application submits crypto operations to the cryptodev.
186 	 * Adapter only dequeues the crypto completions from cryptodev
187 	 * and enqueue events to the eventdev.
188 	 */
189 	RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD,
190 	/**< Start the crypto adapter in event forward mode.
191 	 * @see RTE_EVENT_OP_FORWARD.
192 	 * Application submits crypto requests as events to the crypto
193 	 * adapter or crypto device based on
194 	 * RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability.
195 	 * Crypto completions are enqueued back to the eventdev by
196 	 * crypto adapter.
197 	 */
198 };
199 
200 /**
201  * Crypto event request structure will be filled by application to
202  * provide event request information to the adapter.
203  */
204 struct rte_event_crypto_request {
205 	uint8_t resv[8];
206 	/**< Overlaps with first 8 bytes of struct rte_event
207 	 * that encode the response event information. Application
208 	 * is expected to fill in struct rte_event response_info.
209 	 */
210 	uint16_t cdev_id;
211 	/**< cryptodev ID to be used */
212 	uint16_t queue_pair_id;
213 	/**< cryptodev queue pair ID to be used */
214 	uint32_t resv1;
215 	/**< Reserved bits */
216 };
217 
218 /**
219  * Crypto event metadata structure will be filled by application
220  * to provide crypto request and event response information.
221  *
222  * If crypto events are enqueued using a HW mechanism, the cryptodev
223  * PMD will use the event response information to set up the event
224  * that is enqueued back to eventdev after completion of the crypto
225  * operation. If the transfer is done by SW, event response information
226  * will be used by the adapter.
227  */
228 union rte_event_crypto_metadata {
229 	struct rte_event_crypto_request request_info;
230 	/**< Request information to be filled in by application
231 	 * for RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode.
232 	 * First 8 bytes of request_info is reserved for response_info.
233 	 */
234 	struct rte_event response_info;
235 	/**< Response information to be filled in by application
236 	 * for RTE_EVENT_CRYPTO_ADAPTER_OP_NEW and
237 	 * RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode.
238 	 */
239 };
240 
241 /**
242  * Adapter configuration structure that the adapter configuration callback
243  * function is expected to fill out
244  * @see rte_event_crypto_adapter_conf_cb
245  */
246 struct rte_event_crypto_adapter_conf {
247 	uint8_t event_port_id;
248 	/**< Event port identifier, the adapter enqueues events to this
249 	 * port and dequeues crypto request events in
250 	 * RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode.
251 	 */
252 	uint32_t max_nb;
253 	/**< The adapter can return early if it has processed at least
254 	 * max_nb crypto ops. This isn't treated as a requirement; batching
255 	 * may cause the adapter to process more than max_nb crypto ops.
256 	 */
257 };
258 
259 /**
260  * Adapter runtime configuration parameters
261  */
262 struct rte_event_crypto_adapter_runtime_params {
263 	uint32_t max_nb;
264 	/**< The adapter can return early if it has processed at least
265 	 * max_nb crypto ops. This isn't treated as a requirement; batching
266 	 * may cause the adapter to process more than max_nb crypto ops.
267 	 *
268 	 * rte_event_crypto_adapter_create() configures the
269 	 * adapter with default value of max_nb.
270 	 * rte_event_crypto_adapter_create_ext() configures the adapter with
271 	 * user provided value of max_nb through
272 	 * rte_event_crypto_adapter_conf::max_nb parameter.
273 	 * rte_event_cryptoadapter_runtime_params_set() allows to re-configure
274 	 * max_nb during runtime (after adding at least one queue pair)
275 	 *
276 	 * This is valid for the devices without
277 	 * RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD or
278 	 * RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW capability.
279 	 */
280 	uint32_t rsvd[15];
281 	/**< Reserved fields for future expansion */
282 };
283 
284 #define RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR	0x1
285 /**< This flag indicates that crypto operations processed on the crypto
286  * adapter need to be vectorized
287  * @see rte_event_crypto_adapter_queue_conf::flags
288  */
289 
290 /**
291  * Adapter queue configuration structure
292  */
293 struct rte_event_crypto_adapter_queue_conf {
294 	uint32_t flags;
295 	/**< Flags for handling crypto operations
296 	 * @see RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR
297 	 */
298 	struct rte_event ev;
299 	/**< If HW supports cryptodev queue pair to event queue binding,
300 	 * application is expected to fill in event information.
301 	 * @see RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
302 	 */
303 	uint16_t vector_sz;
304 	/**< Indicates the maximum number for crypto operations to combine and
305 	 * form a vector.
306 	 * @see rte_event_crypto_adapter_vector_limits::min_sz
307 	 * @see rte_event_crypto_adapter_vector_limits::max_sz
308 	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is set in
309 	 * @see rte_event_crypto_adapter_queue_conf::flags
310 	 */
311 	uint64_t vector_timeout_ns;
312 	/**<
313 	 * Indicates the maximum number of nanoseconds to wait for aggregating
314 	 * crypto operations. Should be within vectorization limits of the
315 	 * adapter
316 	 * @see rte_event_crypto_adapter_vector_limits::min_timeout_ns
317 	 * @see rte_event_crypto_adapter_vector_limits::max_timeout_ns
318 	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is set in
319 	 * @see rte_event_crypto_adapter_queue_conf::flags
320 	 */
321 	struct rte_mempool *vector_mp;
322 	/**< Indicates the mempool that should be used for allocating
323 	 * rte_event_vector container.
324 	 * Should be created by using `rte_event_vector_pool_create`.
325 	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is set in
326 	 * @see rte_event_crypto_adapter_queue_conf::flags.
327 	 */
328 };
329 
330 /**
331  * A structure used to retrieve event crypto adapter vector limits.
332  */
333 struct rte_event_crypto_adapter_vector_limits {
334 	uint16_t min_sz;
335 	/**< Minimum vector limit configurable.
336 	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
337 	 */
338 	uint16_t max_sz;
339 	/**< Maximum vector limit configurable.
340 	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
341 	 */
342 	uint8_t log2_sz;
343 	/**< True if the size configured should be in log2.
344 	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
345 	 */
346 	uint64_t min_timeout_ns;
347 	/**< Minimum vector timeout configurable.
348 	 * @see rte_event_crypto_adapter_queue_conf::vector_timeout_ns
349 	 */
350 	uint64_t max_timeout_ns;
351 	/**< Maximum vector timeout configurable.
352 	 * @see rte_event_crypto_adapter_queue_conf::vector_timeout_ns
353 	 */
354 };
355 
356 /**
357  * Function type used for adapter configuration callback. The callback is
358  * used to fill in members of the struct rte_event_crypto_adapter_conf, this
359  * callback is invoked when creating a SW service for packet transfer from
360  * cryptodev queue pair to the event device. The SW service is created within
361  * the rte_event_crypto_adapter_queue_pair_add() function if SW based packet
362  * transfers from cryptodev queue pair to the event device are required.
363  *
364  * @param id
365  *  Adapter identifier.
366  *
367  * @param dev_id
368  *  Event device identifier.
369  *
370  * @param conf
371  *  Structure that needs to be populated by this callback.
372  *
373  * @param arg
374  *  Argument to the callback. This is the same as the conf_arg passed to the
375  *  rte_event_crypto_adapter_create_ext().
376  */
377 typedef int (*rte_event_crypto_adapter_conf_cb) (uint8_t id, uint8_t dev_id,
378 			struct rte_event_crypto_adapter_conf *conf,
379 			void *arg);
380 
381 /**
382  * A structure used to retrieve statistics for an event crypto adapter
383  * instance.
384  */
385 
386 struct rte_event_crypto_adapter_stats {
387 	uint64_t event_poll_count;
388 	/**< Event port poll count */
389 	uint64_t event_deq_count;
390 	/**< Event dequeue count */
391 	uint64_t crypto_enq_count;
392 	/**< Cryptodev enqueue count */
393 	uint64_t crypto_enq_fail;
394 	/**< Cryptodev enqueue failed count */
395 	uint64_t crypto_deq_count;
396 	/**< Cryptodev dequeue count */
397 	uint64_t event_enq_count;
398 	/**< Event enqueue count */
399 	uint64_t event_enq_retry_count;
400 	/**< Event enqueue retry count */
401 	uint64_t event_enq_fail_count;
402 	/**< Event enqueue fail count */
403 };
404 
405 /**
406  * Create a new event crypto adapter with the specified identifier.
407  *
408  * @param id
409  *  Adapter identifier.
410  *
411  * @param dev_id
412  *  Event device identifier.
413  *
414  * @param conf_cb
415  *  Callback function that fills in members of a
416  *  struct rte_event_crypto_adapter_conf struct passed into
417  *  it.
418  *
419  * @param mode
420  *  Flag to indicate the mode of the adapter.
421  *  @see rte_event_crypto_adapter_mode
422  *
423  * @param conf_arg
424  *  Argument that is passed to the conf_cb function.
425  *
426  * @return
427  *   - 0: Success
428  *   - <0: Error code on failure
429  */
430 int
431 rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
432 				    rte_event_crypto_adapter_conf_cb conf_cb,
433 				    enum rte_event_crypto_adapter_mode mode,
434 				    void *conf_arg);
435 
436 /**
437  * Create a new event crypto adapter with the specified identifier.
438  * This function uses an internal configuration function that creates an event
439  * port. This default function reconfigures the event device with an
440  * additional event port and set up the event port using the port_config
441  * parameter passed into this function. In case the application needs more
442  * control in configuration of the service, it should use the
443  * rte_event_crypto_adapter_create_ext() version.
444  *
445  * When this API is used for creating adapter instance,
446  * ``rte_event_dev_config::nb_event_ports`` is automatically incremented,
447  * and the event device is reconfigured with additional event port during
448  * service initialization. This event device reconfigure logic also increments
449  * the ``rte_event_dev_config::nb_single_link_event_port_queues``
450  * parameter if the adapter event port config is of type
451  * ``RTE_EVENT_PORT_CFG_SINGLE_LINK``.
452  *
453  * Application no longer needs to account for
454  * ``rte_event_dev_config::nb_event_ports`` and
455  * ``rte_event_dev_config::nb_single_link_event_port_queues``
456  * parameters required for crypto adapter in event device configuration
457  * when the adapter is created with this API.
458  *
459  * @param id
460  *  Adapter identifier.
461  *
462  * @param dev_id
463  *  Event device identifier.
464  *
465  * @param port_config
466  *  Argument of type *rte_event_port_conf* that is passed to the conf_cb
467  *  function.
468  *
469  * @param mode
470  *  Flag to indicate the mode of the adapter.
471  *  @see rte_event_crypto_adapter_mode
472  *
473  * @return
474  *   - 0: Success
475  *   - <0: Error code on failure
476  */
477 int
478 rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
479 				struct rte_event_port_conf *port_config,
480 				enum rte_event_crypto_adapter_mode mode);
481 
482 /**
483  * Free an event crypto adapter
484  *
485  * @param id
486  *  Adapter identifier.
487  *
488  * @return
489  *   - 0: Success
490  *   - <0: Error code on failure, If the adapter still has queue pairs
491  *      added to it, the function returns -EBUSY.
492  */
493 int
494 rte_event_crypto_adapter_free(uint8_t id);
495 
496 /**
497  * Add a queue pair to an event crypto adapter.
498  *
499  * @param id
500  *  Adapter identifier.
501  *
502  * @param cdev_id
503  *  Cryptodev identifier.
504  *
505  * @param queue_pair_id
506  *  Cryptodev queue pair identifier. If queue_pair_id is set -1,
507  *  adapter adds all the pre configured queue pairs to the instance.
508  *
509  * @param conf
510  *  Additional configuration structure of type
511  *  *rte_event_crypto_adapter_queue_conf*
512  *
513  * @return
514  *  - 0: Success, queue pair added correctly.
515  *  - <0: Error code on failure.
516  */
517 int
518 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
519 			uint8_t cdev_id,
520 			int32_t queue_pair_id,
521 			const struct rte_event_crypto_adapter_queue_conf *conf);
522 
523 /**
524  * Delete a queue pair from an event crypto adapter.
525  *
526  * @param id
527  *  Adapter identifier.
528  *
529  * @param cdev_id
530  *  Cryptodev identifier.
531  *
532  * @param queue_pair_id
533  *  Cryptodev queue pair identifier.
534  *
535  * @return
536  *  - 0: Success, queue pair deleted successfully.
537  *  - <0: Error code on failure.
538  */
539 int
540 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
541 					int32_t queue_pair_id);
542 
543 /**
544  * Start event crypto adapter
545  *
546  * @param id
547  *  Adapter identifier.
548  *
549  *
550  * @return
551  *  - 0: Success, adapter started successfully.
552  *  - <0: Error code on failure.
553  *
554  * @note
555  *  The eventdev and cryptodev to which the event_crypto_adapter is connected
556  *  needs to be started before calling rte_event_crypto_adapter_start().
557  */
558 int
559 rte_event_crypto_adapter_start(uint8_t id);
560 
561 /**
562  * Stop event crypto adapter
563  *
564  * @param id
565  *  Adapter identifier.
566  *
567  * @return
568  *  - 0: Success, adapter stopped successfully.
569  *  - <0: Error code on failure.
570  */
571 int
572 rte_event_crypto_adapter_stop(uint8_t id);
573 
574 /**
575  * Retrieve statistics for an adapter
576  *
577  * @param id
578  *  Adapter identifier.
579  *
580  * @param [out] stats
581  *  A pointer to structure used to retrieve statistics for an adapter.
582  *
583  * @return
584  *  - 0: Success, retrieved successfully.
585  *  - <0: Error code on failure.
586  */
587 int
588 rte_event_crypto_adapter_stats_get(uint8_t id,
589 				struct rte_event_crypto_adapter_stats *stats);
590 
591 /**
592  * Reset statistics for an adapter.
593  *
594  * @param id
595  *  Adapter identifier.
596  *
597  * @return
598  *  - 0: Success, statistics reset successfully.
599  *  - <0: Error code on failure.
600  */
601 int
602 rte_event_crypto_adapter_stats_reset(uint8_t id);
603 
604 /**
605  * Retrieve the service ID of an adapter. If the adapter doesn't use
606  * a rte_service function, this function returns -ESRCH.
607  *
608  * @param id
609  *  Adapter identifier.
610  *
611  * @param [out] service_id
612  *  A pointer to a uint32_t, to be filled in with the service id.
613  *
614  * @return
615  *  - 0: Success
616  *  - <0: Error code on failure, if the adapter doesn't use a rte_service
617  * function, this function returns -ESRCH.
618  */
619 int
620 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id);
621 
622 /**
623  * Retrieve the event port of an adapter.
624  *
625  * @param id
626  *  Adapter identifier.
627  *
628  * @param [out] event_port_id
629  *  Application links its event queue to this adapter port which is used
630  *  in RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode.
631  *
632  * @return
633  *  - 0: Success
634  *  - <0: Error code on failure.
635  */
636 int
637 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id);
638 
639 /**
640  * Initialize the adapter runtime configuration parameters
641  *
642  * @param params
643  *  A pointer to structure of type struct rte_event_crypto_adapter_runtime_params
644  *
645  * @return
646  *  -  0: Success
647  *  - <0: Error code on failure
648  */
649 __rte_experimental
650 int
651 rte_event_crypto_adapter_runtime_params_init(
652 		struct rte_event_crypto_adapter_runtime_params *params);
653 
654 /**
655  * Set the adapter runtime configuration parameters
656  *
657  * @param id
658  *  Adapter identifier
659  *
660  * @param params
661  *  A pointer to structure of type struct rte_event_crypto_adapter_runtime_params
662  *  with configuration parameter values. The reserved fields of this structure
663  *  must be initialized to zero and the valid fields need to be set appropriately.
664  *  This struct can be initialized using
665  *  rte_event_crypto_adapter_runtime_params_init() API to default values or
666  *  application may reset this struct and update required fields.
667  *
668  * @return
669  *  -  0: Success
670  *  - <0: Error code on failure
671  */
672 __rte_experimental
673 int
674 rte_event_crypto_adapter_runtime_params_set(uint8_t id,
675 		struct rte_event_crypto_adapter_runtime_params *params);
676 
677 /**
678  * Get the adapter runtime configuration parameters
679  *
680  * @param id
681  *  Adapter identifier
682  *
683  * @param[out] params
684  *  A pointer to structure of type struct rte_event_crypto_adapter_runtime_params
685  *  containing valid adapter parameters when return value is 0.
686  *
687  * @return
688  *  -  0: Success
689  *  - <0: Error code on failure
690  */
691 __rte_experimental
692 int
693 rte_event_crypto_adapter_runtime_params_get(uint8_t id,
694 		struct rte_event_crypto_adapter_runtime_params *params);
695 
696 /**
697  * Retrieve vector limits for a given event dev and crypto dev pair.
698  * @see rte_event_crypto_adapter_vector_limits
699  *
700  * @param dev_id
701  *  Event device identifier.
702  * @param cdev_id
703  *  Crypto device identifier.
704  * @param [out] limits
705  *  A pointer to rte_event_crypto_adapter_vector_limits structure that has to
706  *  be filled.
707  *
708  * @return
709  *  - 0: Success.
710  *  - <0: Error code on failure.
711  */
712 int rte_event_crypto_adapter_vector_limits_get(
713 	uint8_t dev_id, uint16_t cdev_id,
714 	struct rte_event_crypto_adapter_vector_limits *limits);
715 
716 /**
717  * Enqueue a burst of crypto operations as event objects supplied in *rte_event*
718  * structure on an event crypto adapter designated by its event *dev_id* through
719  * the event port specified by *port_id*. This function is supported if the
720  * eventdev PMD has the #RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD
721  * capability flag set.
722  *
723  * The *nb_events* parameter is the number of event objects to enqueue which are
724  * supplied in the *ev* array of *rte_event* structure.
725  *
726  * The rte_event_crypto_adapter_enqueue() function returns the number of
727  * event objects it actually enqueued. A return value equal to *nb_events*
728  * means that all event objects have been enqueued.
729  *
730  * @param dev_id
731  *  The identifier of the device.
732  * @param port_id
733  *  The identifier of the event port.
734  * @param ev
735  *  Points to an array of *nb_events* objects of type *rte_event* structure
736  *  which contain the event object enqueue operations to be processed.
737  * @param nb_events
738  *  The number of event objects to enqueue, typically number of
739  *  rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
740  *  available for this port.
741  *
742  * @return
743  *   The number of event objects actually enqueued on the event device. The
744  *   return value can be less than the value of the *nb_events* parameter when
745  *   the event devices queue is full or if invalid parameters are specified in a
746  *   *rte_event*. If the return value is less than *nb_events*, the remaining
747  *   events at the end of ev[] are not consumed and the caller has to take care
748  *   of them, and rte_errno is set accordingly. Possible errno values include:
749  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
750  *              ID is invalid, or an event's sched type doesn't match the
751  *              capabilities of the destination queue.
752  *   - ENOSPC   The event port was backpressured and unable to enqueue
753  *              one or more events. This error code is only applicable to
754  *              closed systems.
755  */
756 static inline uint16_t
757 rte_event_crypto_adapter_enqueue(uint8_t dev_id,
758 				uint8_t port_id,
759 				struct rte_event ev[],
760 				uint16_t nb_events)
761 {
762 	const struct rte_event_fp_ops *fp_ops;
763 	void *port;
764 
765 	fp_ops = &rte_event_fp_ops[dev_id];
766 	port = fp_ops->data[port_id];
767 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
768 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
769 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
770 		rte_errno = EINVAL;
771 		return 0;
772 	}
773 
774 	if (port == NULL) {
775 		rte_errno = EINVAL;
776 		return 0;
777 	}
778 #endif
779 	rte_eventdev_trace_crypto_adapter_enqueue(dev_id, port_id, ev,
780 		nb_events);
781 
782 	return fp_ops->ca_enqueue(port, ev, nb_events);
783 }
784 
785 #ifdef __cplusplus
786 }
787 #endif
788 #endif	/* _RTE_EVENT_CRYPTO_ADAPTER_ */
789