1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2021-2024 Advanced Micro Devices, Inc. 3 */ 4 5 #include <stdbool.h> 6 7 #include <rte_malloc.h> 8 #include <rte_memzone.h> 9 10 #include "ionic_crypto.h" 11 12 /* queuetype support level */ 13 static const uint8_t iocpt_qtype_vers[IOCPT_QTYPE_MAX] = { 14 [IOCPT_QTYPE_ADMINQ] = 0, /* 0 = Base version */ 15 [IOCPT_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */ 16 [IOCPT_QTYPE_CRYPTOQ] = 0, /* 0 = Base version */ 17 }; 18 19 static const char * 20 iocpt_opcode_to_str(enum iocpt_cmd_opcode opcode) 21 { 22 switch (opcode) { 23 case IOCPT_CMD_NOP: 24 return "IOCPT_CMD_NOP"; 25 case IOCPT_CMD_IDENTIFY: 26 return "IOCPT_CMD_IDENTIFY"; 27 case IOCPT_CMD_RESET: 28 return "IOCPT_CMD_RESET"; 29 case IOCPT_CMD_LIF_IDENTIFY: 30 return "IOCPT_CMD_LIF_IDENTIFY"; 31 case IOCPT_CMD_LIF_INIT: 32 return "IOCPT_CMD_LIF_INIT"; 33 case IOCPT_CMD_LIF_RESET: 34 return "IOCPT_CMD_LIF_RESET"; 35 case IOCPT_CMD_LIF_GETATTR: 36 return "IOCPT_CMD_LIF_GETATTR"; 37 case IOCPT_CMD_LIF_SETATTR: 38 return "IOCPT_CMD_LIF_SETATTR"; 39 case IOCPT_CMD_Q_IDENTIFY: 40 return "IOCPT_CMD_Q_IDENTIFY"; 41 case IOCPT_CMD_Q_INIT: 42 return "IOCPT_CMD_Q_INIT"; 43 case IOCPT_CMD_Q_CONTROL: 44 return "IOCPT_CMD_Q_CONTROL"; 45 case IOCPT_CMD_SESS_CONTROL: 46 return "IOCPT_CMD_SESS_CONTROL"; 47 default: 48 return "DEVCMD_UNKNOWN"; 49 } 50 } 51 52 /* Dev_cmd Interface */ 53 54 static void 55 iocpt_dev_cmd_go(struct iocpt_dev *dev, union iocpt_dev_cmd *cmd) 56 { 57 uint32_t cmd_size = RTE_DIM(cmd->words); 58 uint32_t i; 59 60 IOCPT_PRINT(DEBUG, "Sending %s (%d) via dev_cmd", 61 iocpt_opcode_to_str(cmd->cmd.opcode), cmd->cmd.opcode); 62 63 for (i = 0; i < cmd_size; i++) 64 iowrite32(cmd->words[i], &dev->dev_cmd->cmd.words[i]); 65 66 iowrite32(0, &dev->dev_cmd->done); 67 iowrite32(1, &dev->dev_cmd->doorbell); 68 } 69 70 static int 71 iocpt_dev_cmd_wait(struct iocpt_dev *dev, unsigned long max_wait) 72 { 73 unsigned long step_usec = IONIC_DEVCMD_CHECK_PERIOD_US; 74 unsigned long max_wait_usec = max_wait * 1000000L; 75 unsigned long elapsed_usec = 0; 76 int done; 77 78 /* Wait for dev cmd to complete.. but no more than max_wait sec */ 79 80 do { 81 done = ioread32(&dev->dev_cmd->done) & IONIC_DEV_CMD_DONE; 82 if (done != 0) { 83 IOCPT_PRINT(DEBUG, "DEVCMD %d done took %lu usecs", 84 ioread8(&dev->dev_cmd->cmd.cmd.opcode), 85 elapsed_usec); 86 return 0; 87 } 88 89 rte_delay_us_block(step_usec); 90 91 elapsed_usec += step_usec; 92 } while (elapsed_usec < max_wait_usec); 93 94 IOCPT_PRINT(ERR, "DEVCMD %d timeout after %lu usecs", 95 ioread8(&dev->dev_cmd->cmd.cmd.opcode), elapsed_usec); 96 97 return -ETIMEDOUT; 98 } 99 100 static int 101 iocpt_dev_cmd_wait_check(struct iocpt_dev *dev, unsigned long max_wait) 102 { 103 uint8_t status; 104 int err; 105 106 err = iocpt_dev_cmd_wait(dev, max_wait); 107 if (err == 0) { 108 status = ioread8(&dev->dev_cmd->comp.comp.status); 109 if (status == IOCPT_RC_EAGAIN) 110 err = -EAGAIN; 111 else if (status != 0) 112 err = -EIO; 113 } 114 115 IOCPT_PRINT(DEBUG, "dev_cmd returned %d", err); 116 return err; 117 } 118 119 /* Dev_cmds */ 120 121 static void 122 iocpt_dev_cmd_reset(struct iocpt_dev *dev) 123 { 124 union iocpt_dev_cmd cmd = { 125 .reset.opcode = IOCPT_CMD_RESET, 126 }; 127 128 iocpt_dev_cmd_go(dev, &cmd); 129 } 130 131 static void 132 iocpt_dev_cmd_lif_identify(struct iocpt_dev *dev, uint8_t ver) 133 { 134 union iocpt_dev_cmd cmd = { 135 .lif_identify.opcode = IOCPT_CMD_LIF_IDENTIFY, 136 .lif_identify.type = IOCPT_LIF_TYPE_DEFAULT, 137 .lif_identify.ver = ver, 138 }; 139 140 iocpt_dev_cmd_go(dev, &cmd); 141 } 142 143 static void 144 iocpt_dev_cmd_lif_init(struct iocpt_dev *dev, rte_iova_t info_pa) 145 { 146 union iocpt_dev_cmd cmd = { 147 .lif_init.opcode = IOCPT_CMD_LIF_INIT, 148 .lif_init.type = IOCPT_LIF_TYPE_DEFAULT, 149 .lif_init.info_pa = info_pa, 150 }; 151 152 iocpt_dev_cmd_go(dev, &cmd); 153 } 154 155 static void 156 iocpt_dev_cmd_lif_reset(struct iocpt_dev *dev) 157 { 158 union iocpt_dev_cmd cmd = { 159 .lif_reset.opcode = IOCPT_CMD_LIF_RESET, 160 }; 161 162 iocpt_dev_cmd_go(dev, &cmd); 163 } 164 165 static void 166 iocpt_dev_cmd_queue_identify(struct iocpt_dev *dev, 167 uint8_t qtype, uint8_t qver) 168 { 169 union iocpt_dev_cmd cmd = { 170 .q_identify.opcode = IOCPT_CMD_Q_IDENTIFY, 171 .q_identify.type = qtype, 172 .q_identify.ver = qver, 173 }; 174 175 iocpt_dev_cmd_go(dev, &cmd); 176 } 177 178 /* Dev_cmd consumers */ 179 180 static void 181 iocpt_queue_identify(struct iocpt_dev *dev) 182 { 183 union iocpt_q_identity *q_ident = &dev->ident.q; 184 uint32_t q_words = RTE_DIM(q_ident->words); 185 uint32_t cmd_words = RTE_DIM(dev->dev_cmd->data); 186 uint32_t i, nwords, qtype; 187 int err; 188 189 for (qtype = 0; qtype < RTE_DIM(iocpt_qtype_vers); qtype++) { 190 struct iocpt_qtype_info *qti = &dev->qtype_info[qtype]; 191 192 /* Filter out the types this driver knows about */ 193 switch (qtype) { 194 case IOCPT_QTYPE_ADMINQ: 195 case IOCPT_QTYPE_NOTIFYQ: 196 case IOCPT_QTYPE_CRYPTOQ: 197 break; 198 default: 199 continue; 200 } 201 202 memset(qti, 0, sizeof(*qti)); 203 204 if (iocpt_is_embedded()) { 205 /* When embedded, FW will always match the driver */ 206 qti->version = iocpt_qtype_vers[qtype]; 207 continue; 208 } 209 210 /* On the host, query the FW for info */ 211 iocpt_dev_cmd_queue_identify(dev, 212 qtype, iocpt_qtype_vers[qtype]); 213 err = iocpt_dev_cmd_wait_check(dev, IONIC_DEVCMD_TIMEOUT); 214 if (err == -EINVAL) { 215 IOCPT_PRINT(ERR, "qtype %d not supported", qtype); 216 continue; 217 } else if (err == -EIO) { 218 IOCPT_PRINT(ERR, "q_ident failed, older FW"); 219 return; 220 } else if (err != 0) { 221 IOCPT_PRINT(ERR, "q_ident failed, qtype %d: %d", 222 qtype, err); 223 return; 224 } 225 226 nwords = RTE_MIN(q_words, cmd_words); 227 for (i = 0; i < nwords; i++) 228 q_ident->words[i] = ioread32(&dev->dev_cmd->data[i]); 229 230 qti->version = q_ident->version; 231 qti->supported = q_ident->supported; 232 qti->features = rte_le_to_cpu_64(q_ident->features); 233 qti->desc_sz = rte_le_to_cpu_16(q_ident->desc_sz); 234 qti->comp_sz = rte_le_to_cpu_16(q_ident->comp_sz); 235 qti->sg_desc_sz = rte_le_to_cpu_16(q_ident->sg_desc_sz); 236 qti->max_sg_elems = rte_le_to_cpu_16(q_ident->max_sg_elems); 237 qti->sg_desc_stride = 238 rte_le_to_cpu_16(q_ident->sg_desc_stride); 239 240 IOCPT_PRINT(DEBUG, " qtype[%d].version = %d", 241 qtype, qti->version); 242 IOCPT_PRINT(DEBUG, " qtype[%d].supported = %#x", 243 qtype, qti->supported); 244 IOCPT_PRINT(DEBUG, " qtype[%d].features = %#jx", 245 qtype, qti->features); 246 IOCPT_PRINT(DEBUG, " qtype[%d].desc_sz = %d", 247 qtype, qti->desc_sz); 248 IOCPT_PRINT(DEBUG, " qtype[%d].comp_sz = %d", 249 qtype, qti->comp_sz); 250 IOCPT_PRINT(DEBUG, " qtype[%d].sg_desc_sz = %d", 251 qtype, qti->sg_desc_sz); 252 IOCPT_PRINT(DEBUG, " qtype[%d].max_sg_elems = %d", 253 qtype, qti->max_sg_elems); 254 IOCPT_PRINT(DEBUG, " qtype[%d].sg_desc_stride = %d", 255 qtype, qti->sg_desc_stride); 256 } 257 } 258 259 int 260 iocpt_dev_identify(struct iocpt_dev *dev) 261 { 262 union iocpt_lif_identity *ident = &dev->ident.lif; 263 union iocpt_lif_config *cfg = &ident->config; 264 uint64_t features; 265 uint32_t cmd_size = RTE_DIM(dev->dev_cmd->data); 266 uint32_t dev_size = RTE_DIM(ident->words); 267 uint32_t i, nwords; 268 int err; 269 270 memset(ident, 0, sizeof(*ident)); 271 272 iocpt_dev_cmd_lif_identify(dev, IOCPT_IDENTITY_VERSION_1); 273 err = iocpt_dev_cmd_wait_check(dev, IONIC_DEVCMD_TIMEOUT); 274 if (err != 0) 275 return err; 276 277 nwords = RTE_MIN(dev_size, cmd_size); 278 for (i = 0; i < nwords; i++) 279 ident->words[i] = ioread32(&dev->dev_cmd->data[i]); 280 281 dev->max_qps = 282 rte_le_to_cpu_32(cfg->queue_count[IOCPT_QTYPE_CRYPTOQ]); 283 dev->max_sessions = 284 rte_le_to_cpu_32(ident->max_nb_sessions); 285 286 features = rte_le_to_cpu_64(ident->features); 287 dev->features = RTE_CRYPTODEV_FF_HW_ACCELERATED; 288 if (features & IOCPT_HW_SYM) 289 dev->features |= RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO; 290 if (features & IOCPT_HW_ASYM) 291 dev->features |= RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO; 292 if (features & IOCPT_HW_CHAIN) 293 dev->features |= RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING; 294 if (features & IOCPT_HW_IP) 295 dev->features |= RTE_CRYPTODEV_FF_IN_PLACE_SGL; 296 if (features & IOCPT_HW_OOP) { 297 dev->features |= RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT; 298 dev->features |= RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT; 299 dev->features |= RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 300 dev->features |= RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT; 301 } 302 303 IOCPT_PRINT(INFO, "crypto.features %#jx", 304 rte_le_to_cpu_64(ident->features)); 305 IOCPT_PRINT(INFO, "crypto.features_active %#jx", 306 rte_le_to_cpu_64(cfg->features)); 307 IOCPT_PRINT(INFO, "crypto.queue_count[IOCPT_QTYPE_ADMINQ] %#x", 308 rte_le_to_cpu_32(cfg->queue_count[IOCPT_QTYPE_ADMINQ])); 309 IOCPT_PRINT(INFO, "crypto.queue_count[IOCPT_QTYPE_NOTIFYQ] %#x", 310 rte_le_to_cpu_32(cfg->queue_count[IOCPT_QTYPE_NOTIFYQ])); 311 IOCPT_PRINT(INFO, "crypto.queue_count[IOCPT_QTYPE_CRYPTOQ] %#x", 312 rte_le_to_cpu_32(cfg->queue_count[IOCPT_QTYPE_CRYPTOQ])); 313 IOCPT_PRINT(INFO, "crypto.max_sessions %u", 314 rte_le_to_cpu_32(ident->max_nb_sessions)); 315 316 iocpt_queue_identify(dev); 317 318 return 0; 319 } 320 321 int 322 iocpt_dev_init(struct iocpt_dev *dev, rte_iova_t info_pa) 323 { 324 uint32_t retries = 5; 325 int err; 326 327 retry_lif_init: 328 iocpt_dev_cmd_lif_init(dev, info_pa); 329 330 err = iocpt_dev_cmd_wait_check(dev, IONIC_DEVCMD_TIMEOUT); 331 if (err == -EAGAIN && retries > 0) { 332 retries--; 333 rte_delay_us_block(IONIC_DEVCMD_RETRY_WAIT_US); 334 goto retry_lif_init; 335 } 336 337 return err; 338 } 339 340 void 341 iocpt_dev_reset(struct iocpt_dev *dev) 342 { 343 iocpt_dev_cmd_lif_reset(dev); 344 (void)iocpt_dev_cmd_wait_check(dev, IONIC_DEVCMD_TIMEOUT); 345 346 iocpt_dev_cmd_reset(dev); 347 (void)iocpt_dev_cmd_wait_check(dev, IONIC_DEVCMD_TIMEOUT); 348 } 349