1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Intel Corporation 3 */ 4 5 #include <cryptodev_pmd.h> 6 #include <rte_malloc.h> 7 8 #include "rte_cryptodev_scheduler_operations.h" 9 #include "scheduler_pmd_private.h" 10 11 #define PRIMARY_WORKER_IDX 0 12 #define SECONDARY_WORKER_IDX 1 13 #define NB_FAILOVER_WORKERS 2 14 #define WORKER_SWITCH_MASK (0x01) 15 16 struct fo_scheduler_qp_ctx { 17 struct scheduler_worker primary_worker; 18 struct scheduler_worker secondary_worker; 19 uint8_t primary_worker_index; 20 uint8_t secondary_worker_index; 21 22 uint8_t deq_idx; 23 }; 24 25 static __rte_always_inline uint16_t 26 failover_worker_enqueue(struct scheduler_worker *worker, 27 struct rte_crypto_op **ops, uint16_t nb_ops, uint8_t index) 28 { 29 uint16_t processed_ops; 30 31 scheduler_set_worker_sessions(ops, nb_ops, index); 32 33 processed_ops = rte_cryptodev_enqueue_burst(worker->dev_id, 34 worker->qp_id, ops, nb_ops); 35 worker->nb_inflight_cops += processed_ops; 36 37 return processed_ops; 38 } 39 40 static uint16_t 41 schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) 42 { 43 struct fo_scheduler_qp_ctx *qp_ctx = 44 ((struct scheduler_qp_ctx *)qp)->private_qp_ctx; 45 uint16_t enqueued_ops; 46 47 if (unlikely(nb_ops == 0)) 48 return 0; 49 50 enqueued_ops = failover_worker_enqueue(&qp_ctx->primary_worker, 51 ops, nb_ops, PRIMARY_WORKER_IDX); 52 53 if (enqueued_ops < nb_ops) { 54 scheduler_retrieve_sessions(&ops[enqueued_ops], 55 nb_ops - enqueued_ops); 56 enqueued_ops += failover_worker_enqueue( 57 &qp_ctx->secondary_worker, 58 &ops[enqueued_ops], 59 nb_ops - enqueued_ops, 60 SECONDARY_WORKER_IDX); 61 if (enqueued_ops < nb_ops) 62 scheduler_retrieve_sessions(&ops[enqueued_ops], 63 nb_ops - enqueued_ops); 64 } 65 66 return enqueued_ops; 67 } 68 69 70 static uint16_t 71 schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops, 72 uint16_t nb_ops) 73 { 74 struct rte_ring *order_ring = 75 ((struct scheduler_qp_ctx *)qp)->order_ring; 76 uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring, 77 nb_ops); 78 uint16_t nb_ops_enqd = schedule_enqueue(qp, ops, 79 nb_ops_to_enq); 80 81 scheduler_order_insert(order_ring, ops, nb_ops_enqd); 82 83 return nb_ops_enqd; 84 } 85 86 static uint16_t 87 schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) 88 { 89 struct fo_scheduler_qp_ctx *qp_ctx = 90 ((struct scheduler_qp_ctx *)qp)->private_qp_ctx; 91 struct scheduler_worker *workers[NB_FAILOVER_WORKERS] = { 92 &qp_ctx->primary_worker, &qp_ctx->secondary_worker}; 93 struct scheduler_worker *worker = workers[qp_ctx->deq_idx]; 94 uint16_t nb_deq_ops = 0, nb_deq_ops2 = 0; 95 96 if (worker->nb_inflight_cops) { 97 nb_deq_ops = rte_cryptodev_dequeue_burst(worker->dev_id, 98 worker->qp_id, ops, nb_ops); 99 worker->nb_inflight_cops -= nb_deq_ops; 100 } 101 102 qp_ctx->deq_idx = (~qp_ctx->deq_idx) & WORKER_SWITCH_MASK; 103 104 if (nb_deq_ops == nb_ops) 105 goto retrieve_sessions; 106 107 worker = workers[qp_ctx->deq_idx]; 108 109 if (worker->nb_inflight_cops) { 110 nb_deq_ops2 = rte_cryptodev_dequeue_burst(worker->dev_id, 111 worker->qp_id, &ops[nb_deq_ops], nb_ops - nb_deq_ops); 112 worker->nb_inflight_cops -= nb_deq_ops2; 113 } 114 115 retrieve_sessions: 116 scheduler_retrieve_sessions(ops, nb_deq_ops + nb_deq_ops2); 117 118 return nb_deq_ops + nb_deq_ops2; 119 } 120 121 static uint16_t 122 schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops, 123 uint16_t nb_ops) 124 { 125 struct rte_ring *order_ring = 126 ((struct scheduler_qp_ctx *)qp)->order_ring; 127 128 schedule_dequeue(qp, ops, nb_ops); 129 130 return scheduler_order_drain(order_ring, ops, nb_ops); 131 } 132 133 static int 134 worker_attach(__rte_unused struct rte_cryptodev *dev, 135 __rte_unused uint8_t worker_id) 136 { 137 return 0; 138 } 139 140 static int 141 worker_detach(__rte_unused struct rte_cryptodev *dev, 142 __rte_unused uint8_t worker_id) 143 { 144 return 0; 145 } 146 147 static int 148 scheduler_start(struct rte_cryptodev *dev) 149 { 150 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 151 uint16_t i; 152 153 if (sched_ctx->nb_workers < 2) { 154 CR_SCHED_LOG(ERR, "Number of workers shall no less than 2"); 155 return -ENOMEM; 156 } 157 158 if (sched_ctx->reordering_enabled) { 159 dev->enqueue_burst = schedule_enqueue_ordering; 160 dev->dequeue_burst = schedule_dequeue_ordering; 161 } else { 162 dev->enqueue_burst = schedule_enqueue; 163 dev->dequeue_burst = schedule_dequeue; 164 } 165 166 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 167 struct fo_scheduler_qp_ctx *qp_ctx = 168 ((struct scheduler_qp_ctx *) 169 dev->data->queue_pairs[i])->private_qp_ctx; 170 171 sched_ctx->workers[PRIMARY_WORKER_IDX].qp_id = i; 172 sched_ctx->workers[SECONDARY_WORKER_IDX].qp_id = i; 173 174 rte_memcpy(&qp_ctx->primary_worker, 175 &sched_ctx->workers[PRIMARY_WORKER_IDX], 176 sizeof(struct scheduler_worker)); 177 rte_memcpy(&qp_ctx->secondary_worker, 178 &sched_ctx->workers[SECONDARY_WORKER_IDX], 179 sizeof(struct scheduler_worker)); 180 } 181 182 return 0; 183 } 184 185 static int 186 scheduler_stop(__rte_unused struct rte_cryptodev *dev) 187 { 188 return 0; 189 } 190 191 static int 192 scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id) 193 { 194 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; 195 struct fo_scheduler_qp_ctx *fo_qp_ctx; 196 197 fo_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*fo_qp_ctx), 0, 198 rte_socket_id()); 199 if (!fo_qp_ctx) { 200 CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair"); 201 return -ENOMEM; 202 } 203 204 qp_ctx->private_qp_ctx = (void *)fo_qp_ctx; 205 206 return 0; 207 } 208 209 static int 210 scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev) 211 { 212 return 0; 213 } 214 215 static struct rte_cryptodev_scheduler_ops scheduler_fo_ops = { 216 worker_attach, 217 worker_detach, 218 scheduler_start, 219 scheduler_stop, 220 scheduler_config_qp, 221 scheduler_create_private_ctx, 222 NULL, /* option_set */ 223 NULL /*option_get */ 224 }; 225 226 static struct rte_cryptodev_scheduler fo_scheduler = { 227 .name = "failover-scheduler", 228 .description = "scheduler which enqueues to the primary worker, " 229 "and only then enqueues to the secondary worker " 230 "upon failing on enqueuing to primary", 231 .mode = CDEV_SCHED_MODE_FAILOVER, 232 .ops = &scheduler_fo_ops 233 }; 234 235 struct rte_cryptodev_scheduler *crypto_scheduler_failover = &fo_scheduler; 236