1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Intel Corporation 3 */ 4 5 #ifndef _SCHEDULER_PMD_PRIVATE_H 6 #define _SCHEDULER_PMD_PRIVATE_H 7 8 #include "rte_cryptodev_scheduler.h" 9 10 #define CRYPTODEV_NAME_SCHEDULER_PMD crypto_scheduler 11 /**< Scheduler Crypto PMD device name */ 12 13 #define PER_WORKER_BUFF_SIZE (256) 14 15 extern int scheduler_logtype_driver; 16 17 #define CR_SCHED_LOG(level, fmt, args...) \ 18 rte_log(RTE_LOG_ ## level, scheduler_logtype_driver, \ 19 "%s() line %u: "fmt "\n", __func__, __LINE__, ##args) 20 21 struct scheduler_worker { 22 uint8_t dev_id; 23 uint16_t qp_id; 24 uint32_t nb_inflight_cops; 25 26 uint8_t driver_id; 27 }; 28 29 struct scheduler_ctx { 30 void *private_ctx; 31 /**< private scheduler context pointer */ 32 33 struct rte_cryptodev_capabilities *capabilities; 34 uint32_t nb_capabilities; 35 36 uint32_t max_nb_queue_pairs; 37 38 struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]; 39 uint32_t nb_workers; 40 41 enum rte_cryptodev_scheduler_mode mode; 42 43 struct rte_cryptodev_scheduler_ops ops; 44 45 uint8_t reordering_enabled; 46 47 char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN]; 48 char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN]; 49 uint16_t wc_pool[RTE_MAX_LCORE]; 50 uint16_t nb_wc; 51 52 char *init_worker_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]; 53 int nb_init_workers; 54 } __rte_cache_aligned; 55 56 struct scheduler_qp_ctx { 57 void *private_qp_ctx; 58 59 uint32_t max_nb_objs; 60 61 struct rte_ring *order_ring; 62 } __rte_cache_aligned; 63 64 65 extern uint8_t cryptodev_scheduler_driver_id; 66 67 static __rte_always_inline uint16_t 68 get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops) 69 { 70 uint32_t count = rte_ring_free_count(order_ring); 71 72 return count > nb_ops ? nb_ops : count; 73 } 74 75 static __rte_always_inline void 76 scheduler_order_insert(struct rte_ring *order_ring, 77 struct rte_crypto_op **ops, uint16_t nb_ops) 78 { 79 rte_ring_sp_enqueue_burst(order_ring, (void **)ops, nb_ops, NULL); 80 } 81 82 static __rte_always_inline uint16_t 83 scheduler_order_drain(struct rte_ring *order_ring, 84 struct rte_crypto_op **ops, uint16_t nb_ops) 85 { 86 struct rte_crypto_op *op; 87 uint32_t nb_objs, nb_ops_to_deq; 88 89 nb_objs = rte_ring_dequeue_burst_start(order_ring, (void **)ops, 90 nb_ops, NULL); 91 if (nb_objs == 0) 92 return 0; 93 94 for (nb_ops_to_deq = 0; nb_ops_to_deq != nb_objs; nb_ops_to_deq++) { 95 op = ops[nb_ops_to_deq]; 96 if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) 97 break; 98 } 99 100 rte_ring_dequeue_finish(order_ring, nb_ops_to_deq); 101 return nb_ops_to_deq; 102 } 103 104 /** device specific operations function pointer structure */ 105 extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops; 106 107 #endif /* _SCHEDULER_PMD_PRIVATE_H */ 108