1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2017 Intel Corporation. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * * Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * * Neither the name of Intel Corporation nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <rte_cryptodev.h> 34 #include <rte_malloc.h> 35 36 #include "rte_cryptodev_scheduler_operations.h" 37 #include "scheduler_pmd_private.h" 38 39 struct rr_scheduler_qp_ctx { 40 struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]; 41 uint32_t nb_slaves; 42 43 uint32_t last_enq_slave_idx; 44 uint32_t last_deq_slave_idx; 45 }; 46 47 static uint16_t 48 schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) 49 { 50 struct rr_scheduler_qp_ctx *rr_qp_ctx = 51 ((struct scheduler_qp_ctx *)qp)->private_qp_ctx; 52 uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx; 53 struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx]; 54 uint16_t i, processed_ops; 55 56 if (unlikely(nb_ops == 0)) 57 return 0; 58 59 for (i = 0; i < nb_ops && i < 4; i++) 60 rte_prefetch0(ops[i]->sym->session); 61 62 processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id, 63 slave->qp_id, ops, nb_ops); 64 65 slave->nb_inflight_cops += processed_ops; 66 67 rr_qp_ctx->last_enq_slave_idx += 1; 68 rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves; 69 70 return processed_ops; 71 } 72 73 static uint16_t 74 schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops, 75 uint16_t nb_ops) 76 { 77 struct rte_ring *order_ring = 78 ((struct scheduler_qp_ctx *)qp)->order_ring; 79 uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring, 80 nb_ops); 81 uint16_t nb_ops_enqd = schedule_enqueue(qp, ops, 82 nb_ops_to_enq); 83 84 scheduler_order_insert(order_ring, ops, nb_ops_enqd); 85 86 return nb_ops_enqd; 87 } 88 89 90 static uint16_t 91 schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) 92 { 93 struct rr_scheduler_qp_ctx *rr_qp_ctx = 94 ((struct scheduler_qp_ctx *)qp)->private_qp_ctx; 95 struct scheduler_slave *slave; 96 uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx; 97 uint16_t nb_deq_ops; 98 99 if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) { 100 do { 101 last_slave_idx += 1; 102 103 if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves)) 104 last_slave_idx = 0; 105 /* looped back, means no inflight cops in the queue */ 106 if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx) 107 return 0; 108 } while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops 109 == 0); 110 } 111 112 slave = &rr_qp_ctx->slaves[last_slave_idx]; 113 114 nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id, 115 slave->qp_id, ops, nb_ops); 116 117 last_slave_idx += 1; 118 last_slave_idx %= rr_qp_ctx->nb_slaves; 119 120 rr_qp_ctx->last_deq_slave_idx = last_slave_idx; 121 122 slave->nb_inflight_cops -= nb_deq_ops; 123 124 return nb_deq_ops; 125 } 126 127 static uint16_t 128 schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops, 129 uint16_t nb_ops) 130 { 131 struct rte_ring *order_ring = 132 ((struct scheduler_qp_ctx *)qp)->order_ring; 133 134 schedule_dequeue(qp, ops, nb_ops); 135 136 return scheduler_order_drain(order_ring, ops, nb_ops); 137 } 138 139 static int 140 slave_attach(__rte_unused struct rte_cryptodev *dev, 141 __rte_unused uint8_t slave_id) 142 { 143 return 0; 144 } 145 146 static int 147 slave_detach(__rte_unused struct rte_cryptodev *dev, 148 __rte_unused uint8_t slave_id) 149 { 150 return 0; 151 } 152 153 static int 154 scheduler_start(struct rte_cryptodev *dev) 155 { 156 struct scheduler_ctx *sched_ctx = dev->data->dev_private; 157 uint16_t i; 158 159 if (sched_ctx->reordering_enabled) { 160 dev->enqueue_burst = &schedule_enqueue_ordering; 161 dev->dequeue_burst = &schedule_dequeue_ordering; 162 } else { 163 dev->enqueue_burst = &schedule_enqueue; 164 dev->dequeue_burst = &schedule_dequeue; 165 } 166 167 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 168 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i]; 169 struct rr_scheduler_qp_ctx *rr_qp_ctx = 170 qp_ctx->private_qp_ctx; 171 uint32_t j; 172 173 memset(rr_qp_ctx->slaves, 0, 174 RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES * 175 sizeof(struct scheduler_slave)); 176 for (j = 0; j < sched_ctx->nb_slaves; j++) { 177 rr_qp_ctx->slaves[j].dev_id = 178 sched_ctx->slaves[j].dev_id; 179 rr_qp_ctx->slaves[j].qp_id = i; 180 } 181 182 rr_qp_ctx->nb_slaves = sched_ctx->nb_slaves; 183 184 rr_qp_ctx->last_enq_slave_idx = 0; 185 rr_qp_ctx->last_deq_slave_idx = 0; 186 } 187 188 return 0; 189 } 190 191 static int 192 scheduler_stop(__rte_unused struct rte_cryptodev *dev) 193 { 194 return 0; 195 } 196 197 static int 198 scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id) 199 { 200 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; 201 struct rr_scheduler_qp_ctx *rr_qp_ctx; 202 203 rr_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*rr_qp_ctx), 0, 204 rte_socket_id()); 205 if (!rr_qp_ctx) { 206 CS_LOG_ERR("failed allocate memory for private queue pair"); 207 return -ENOMEM; 208 } 209 210 qp_ctx->private_qp_ctx = (void *)rr_qp_ctx; 211 212 return 0; 213 } 214 215 static int 216 scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev) 217 { 218 return 0; 219 } 220 221 struct rte_cryptodev_scheduler_ops scheduler_rr_ops = { 222 slave_attach, 223 slave_detach, 224 scheduler_start, 225 scheduler_stop, 226 scheduler_config_qp, 227 scheduler_create_private_ctx, 228 NULL, /* option_set */ 229 NULL /* option_get */ 230 }; 231 232 struct rte_cryptodev_scheduler scheduler = { 233 .name = "roundrobin-scheduler", 234 .description = "scheduler which will round robin burst across " 235 "slave crypto devices", 236 .mode = CDEV_SCHED_MODE_ROUNDROBIN, 237 .ops = &scheduler_rr_ops 238 }; 239 240 struct rte_cryptodev_scheduler *roundrobin_scheduler = &scheduler; 241