1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Intel Corporation 3 */ 4 5 #include <cryptodev_pmd.h> 6 #include <rte_malloc.h> 7 8 #include "rte_cryptodev_scheduler_operations.h" 9 #include "scheduler_pmd_private.h" 10 11 struct rr_scheduler_qp_ctx { 12 struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]; 13 uint32_t nb_workers; 14 15 uint32_t last_enq_worker_idx; 16 uint32_t last_deq_worker_idx; 17 }; 18 19 static uint16_t 20 schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) 21 { 22 struct rr_scheduler_qp_ctx *rr_qp_ctx = 23 ((struct scheduler_qp_ctx *)qp)->private_qp_ctx; 24 uint32_t worker_idx = rr_qp_ctx->last_enq_worker_idx; 25 struct scheduler_worker *worker = &rr_qp_ctx->workers[worker_idx]; 26 uint16_t processed_ops; 27 28 if (unlikely(nb_ops == 0)) 29 return 0; 30 31 scheduler_set_worker_sessions(ops, nb_ops, worker_idx); 32 processed_ops = rte_cryptodev_enqueue_burst(worker->dev_id, 33 worker->qp_id, ops, nb_ops); 34 if (processed_ops < nb_ops) 35 scheduler_retrieve_sessions(ops + processed_ops, 36 nb_ops - processed_ops); 37 38 worker->nb_inflight_cops += processed_ops; 39 40 rr_qp_ctx->last_enq_worker_idx += 1; 41 rr_qp_ctx->last_enq_worker_idx %= rr_qp_ctx->nb_workers; 42 43 return processed_ops; 44 } 45 46 static uint16_t 47 schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops, 48 uint16_t nb_ops) 49 { 50 struct rte_ring *order_ring = 51 ((struct scheduler_qp_ctx *)qp)->order_ring; 52 uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring, 53 nb_ops); 54 uint16_t nb_ops_enqd = schedule_enqueue(qp, ops, 55 nb_ops_to_enq); 56 57 scheduler_order_insert(order_ring, ops, nb_ops_enqd); 58 59 return nb_ops_enqd; 60 } 61 62 63 static uint16_t 64 schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) 65 { 66 struct rr_scheduler_qp_ctx *rr_qp_ctx = 67 ((struct scheduler_qp_ctx *)qp)->private_qp_ctx; 68 struct scheduler_worker *worker; 69 uint32_t last_worker_idx = rr_qp_ctx->last_deq_worker_idx; 70 uint16_t nb_deq_ops; 71 72 if (unlikely(rr_qp_ctx->workers[last_worker_idx].nb_inflight_cops 73 == 0)) { 74 do { 75 last_worker_idx += 1; 76 77 if (unlikely(last_worker_idx >= rr_qp_ctx->nb_workers)) 78 last_worker_idx = 0; 79 /* looped back, means no inflight cops in the queue */ 80 if (last_worker_idx == rr_qp_ctx->last_deq_worker_idx) 81 return 0; 82 } while (rr_qp_ctx->workers[last_worker_idx].nb_inflight_cops 83 == 0); 84 } 85 86 worker = &rr_qp_ctx->workers[last_worker_idx]; 87 88 nb_deq_ops = rte_cryptodev_dequeue_burst(worker->dev_id, 89 worker->qp_id, ops, nb_ops); 90 scheduler_retrieve_sessions(ops, nb_deq_ops); 91 last_worker_idx += 1; 92 last_worker_idx %= rr_qp_ctx->nb_workers; 93 94 rr_qp_ctx->last_deq_worker_idx = last_worker_idx; 95 96 worker->nb_inflight_cops -= nb_deq_ops; 97 98 return nb_deq_ops; 99 } 100 101 static uint16_t 102 schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops, 103 uint16_t nb_ops) 104 { 105 struct rte_ring *order_ring = 106 ((struct scheduler_qp_ctx *)qp)->order_ring; 107 108 schedule_dequeue(qp, ops, nb_ops); 109 110 return scheduler_order_drain(order_ring, ops, nb_ops); 111 } 112 113 static int 114 worker_attach(__rte_unused struct rte_cryptodev *dev, 115 __rte_unused uint8_t worker_id) 116 { 117 return 0; 118 } 119 120 static int 121 worker_detach(__rte_unused struct rte_cryptodev *dev, 122 __rte_unused uint8_t worker_id) 123 { 124 return 0; 125 } 126 127 static int 128 scheduler_start(struct rte_cryptodev *dev) 129 { 130 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 131 uint16_t i; 132 133 if (sched_ctx->reordering_enabled) { 134 dev->enqueue_burst = &schedule_enqueue_ordering; 135 dev->dequeue_burst = &schedule_dequeue_ordering; 136 } else { 137 dev->enqueue_burst = &schedule_enqueue; 138 dev->dequeue_burst = &schedule_dequeue; 139 } 140 141 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 142 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i]; 143 struct rr_scheduler_qp_ctx *rr_qp_ctx = 144 qp_ctx->private_qp_ctx; 145 uint32_t j; 146 147 memset(rr_qp_ctx->workers, 0, 148 RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS * 149 sizeof(struct scheduler_worker)); 150 for (j = 0; j < sched_ctx->nb_workers; j++) { 151 rr_qp_ctx->workers[j].dev_id = 152 sched_ctx->workers[j].dev_id; 153 rr_qp_ctx->workers[j].qp_id = i; 154 } 155 156 rr_qp_ctx->nb_workers = sched_ctx->nb_workers; 157 158 rr_qp_ctx->last_enq_worker_idx = 0; 159 rr_qp_ctx->last_deq_worker_idx = 0; 160 } 161 162 return 0; 163 } 164 165 static int 166 scheduler_stop(__rte_unused struct rte_cryptodev *dev) 167 { 168 return 0; 169 } 170 171 static int 172 scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id) 173 { 174 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; 175 struct rr_scheduler_qp_ctx *rr_qp_ctx; 176 177 rr_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*rr_qp_ctx), 0, 178 rte_socket_id()); 179 if (!rr_qp_ctx) { 180 CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair"); 181 return -ENOMEM; 182 } 183 184 qp_ctx->private_qp_ctx = (void *)rr_qp_ctx; 185 186 return 0; 187 } 188 189 static int 190 scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev) 191 { 192 return 0; 193 } 194 195 static struct rte_cryptodev_scheduler_ops scheduler_rr_ops = { 196 worker_attach, 197 worker_detach, 198 scheduler_start, 199 scheduler_stop, 200 scheduler_config_qp, 201 scheduler_create_private_ctx, 202 NULL, /* option_set */ 203 NULL /* option_get */ 204 }; 205 206 static struct rte_cryptodev_scheduler scheduler = { 207 .name = "roundrobin-scheduler", 208 .description = "scheduler which will round robin burst across " 209 "worker crypto devices", 210 .mode = CDEV_SCHED_MODE_ROUNDROBIN, 211 .ops = &scheduler_rr_ops 212 }; 213 214 struct rte_cryptodev_scheduler *crypto_scheduler_roundrobin = &scheduler; 215