1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Cavium, Inc 3 */ 4 #include <assert.h> 5 #include <string.h> 6 #include <unistd.h> 7 8 #include <rte_branch_prediction.h> 9 #include <rte_common.h> 10 #include <rte_errno.h> 11 #include <rte_memzone.h> 12 #include <rte_string_fns.h> 13 14 #include "otx_cryptodev_hw_access.h" 15 #include "otx_cryptodev_mbox.h" 16 17 #include "cpt_pmd_logs.h" 18 #include "cpt_hw_types.h" 19 20 /* 21 * VF HAL functions 22 * Access its own BAR0/4 registers by passing VF number as 0. 23 * OS/PCI maps them accordingly. 24 */ 25 26 static int 27 otx_cpt_vf_init(struct cpt_vf *cptvf) 28 { 29 int ret = 0; 30 31 /* Check ready with PF */ 32 /* Gets chip ID / device Id from PF if ready */ 33 ret = otx_cpt_check_pf_ready(cptvf); 34 if (ret) { 35 CPT_LOG_ERR("%s: PF not responding to READY msg", 36 cptvf->dev_name); 37 ret = -EBUSY; 38 goto exit; 39 } 40 41 CPT_LOG_DP_DEBUG("%s: %s done", cptvf->dev_name, __func__); 42 43 exit: 44 return ret; 45 } 46 47 /* 48 * Read Interrupt status of the VF 49 * 50 * @param cptvf cptvf structure 51 */ 52 static uint64_t 53 otx_cpt_read_vf_misc_intr_status(struct cpt_vf *cptvf) 54 { 55 return CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), CPTX_VQX_MISC_INT(0, 0)); 56 } 57 58 /* 59 * Clear mailbox interrupt of the VF 60 * 61 * @param cptvf cptvf structure 62 */ 63 static void 64 otx_cpt_clear_mbox_intr(struct cpt_vf *cptvf) 65 { 66 cptx_vqx_misc_int_t vqx_misc_int; 67 68 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), 69 CPTX_VQX_MISC_INT(0, 0)); 70 /* W1C for the VF */ 71 vqx_misc_int.s.mbox = 1; 72 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 73 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); 74 } 75 76 /* 77 * Clear instruction NCB read error interrupt of the VF 78 * 79 * @param cptvf cptvf structure 80 */ 81 static void 82 otx_cpt_clear_irde_intr(struct cpt_vf *cptvf) 83 { 84 cptx_vqx_misc_int_t vqx_misc_int; 85 86 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), 87 CPTX_VQX_MISC_INT(0, 0)); 88 /* W1C for the VF */ 89 vqx_misc_int.s.irde = 1; 90 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 91 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); 92 } 93 94 /* 95 * Clear NCB result write response error interrupt of the VF 96 * 97 * @param cptvf cptvf structure 98 */ 99 static void 100 otx_cpt_clear_nwrp_intr(struct cpt_vf *cptvf) 101 { 102 cptx_vqx_misc_int_t vqx_misc_int; 103 104 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), 105 CPTX_VQX_MISC_INT(0, 0)); 106 /* W1C for the VF */ 107 vqx_misc_int.s.nwrp = 1; 108 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 109 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); 110 } 111 112 /* 113 * Clear swerr interrupt of the VF 114 * 115 * @param cptvf cptvf structure 116 */ 117 static void 118 otx_cpt_clear_swerr_intr(struct cpt_vf *cptvf) 119 { 120 cptx_vqx_misc_int_t vqx_misc_int; 121 122 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), 123 CPTX_VQX_MISC_INT(0, 0)); 124 /* W1C for the VF */ 125 vqx_misc_int.s.swerr = 1; 126 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 127 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); 128 } 129 130 /* 131 * Clear hwerr interrupt of the VF 132 * 133 * @param cptvf cptvf structure 134 */ 135 static void 136 otx_cpt_clear_hwerr_intr(struct cpt_vf *cptvf) 137 { 138 cptx_vqx_misc_int_t vqx_misc_int; 139 140 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), 141 CPTX_VQX_MISC_INT(0, 0)); 142 /* W1C for the VF */ 143 vqx_misc_int.s.hwerr = 1; 144 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 145 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); 146 } 147 148 /* 149 * Clear translation fault interrupt of the VF 150 * 151 * @param cptvf cptvf structure 152 */ 153 static void 154 otx_cpt_clear_fault_intr(struct cpt_vf *cptvf) 155 { 156 cptx_vqx_misc_int_t vqx_misc_int; 157 158 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), 159 CPTX_VQX_MISC_INT(0, 0)); 160 /* W1C for the VF */ 161 vqx_misc_int.s.fault = 1; 162 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 163 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); 164 } 165 166 /* 167 * Clear doorbell overflow interrupt of the VF 168 * 169 * @param cptvf cptvf structure 170 */ 171 static void 172 otx_cpt_clear_dovf_intr(struct cpt_vf *cptvf) 173 { 174 cptx_vqx_misc_int_t vqx_misc_int; 175 176 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), 177 CPTX_VQX_MISC_INT(0, 0)); 178 /* W1C for the VF */ 179 vqx_misc_int.s.dovf = 1; 180 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 181 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); 182 } 183 184 /* Write to VQX_CTL register 185 */ 186 static void 187 otx_cpt_write_vq_ctl(struct cpt_vf *cptvf, bool val) 188 { 189 cptx_vqx_ctl_t vqx_ctl; 190 191 vqx_ctl.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), 192 CPTX_VQX_CTL(0, 0)); 193 vqx_ctl.s.ena = val; 194 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 195 CPTX_VQX_CTL(0, 0), vqx_ctl.u); 196 } 197 198 /* Write to VQX_INPROG register 199 */ 200 static void 201 otx_cpt_write_vq_inprog(struct cpt_vf *cptvf, uint8_t val) 202 { 203 cptx_vqx_inprog_t vqx_inprg; 204 205 vqx_inprg.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), 206 CPTX_VQX_INPROG(0, 0)); 207 vqx_inprg.s.inflight = val; 208 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 209 CPTX_VQX_INPROG(0, 0), vqx_inprg.u); 210 } 211 212 /* Write to VQX_DONE_WAIT NUMWAIT register 213 */ 214 static void 215 otx_cpt_write_vq_done_numwait(struct cpt_vf *cptvf, uint32_t val) 216 { 217 cptx_vqx_done_wait_t vqx_dwait; 218 219 vqx_dwait.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), 220 CPTX_VQX_DONE_WAIT(0, 0)); 221 vqx_dwait.s.num_wait = val; 222 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 223 CPTX_VQX_DONE_WAIT(0, 0), vqx_dwait.u); 224 } 225 226 /* Write to VQX_DONE_WAIT NUM_WAIT register 227 */ 228 static void 229 otx_cpt_write_vq_done_timewait(struct cpt_vf *cptvf, uint16_t val) 230 { 231 cptx_vqx_done_wait_t vqx_dwait; 232 233 vqx_dwait.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), 234 CPTX_VQX_DONE_WAIT(0, 0)); 235 vqx_dwait.s.time_wait = val; 236 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 237 CPTX_VQX_DONE_WAIT(0, 0), vqx_dwait.u); 238 } 239 240 /* Write to VQX_SADDR register 241 */ 242 static void 243 otx_cpt_write_vq_saddr(struct cpt_vf *cptvf, uint64_t val) 244 { 245 cptx_vqx_saddr_t vqx_saddr; 246 247 vqx_saddr.u = val; 248 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 249 CPTX_VQX_SADDR(0, 0), vqx_saddr.u); 250 } 251 252 static void 253 otx_cpt_vfvq_init(struct cpt_vf *cptvf) 254 { 255 uint64_t base_addr = 0; 256 257 /* Disable the VQ */ 258 otx_cpt_write_vq_ctl(cptvf, 0); 259 260 /* Reset the doorbell */ 261 otx_cpt_write_vq_doorbell(cptvf, 0); 262 /* Clear inflight */ 263 otx_cpt_write_vq_inprog(cptvf, 0); 264 265 /* Write VQ SADDR */ 266 base_addr = (uint64_t)(cptvf->cqueue.chead[0].dma_addr); 267 otx_cpt_write_vq_saddr(cptvf, base_addr); 268 269 /* Configure timerhold / coalescence */ 270 otx_cpt_write_vq_done_timewait(cptvf, CPT_TIMER_THOLD); 271 otx_cpt_write_vq_done_numwait(cptvf, CPT_COUNT_THOLD); 272 273 /* Enable the VQ */ 274 otx_cpt_write_vq_ctl(cptvf, 1); 275 } 276 277 static int 278 cpt_vq_init(struct cpt_vf *cptvf, uint8_t group) 279 { 280 int err; 281 282 /* Convey VQ LEN to PF */ 283 err = otx_cpt_send_vq_size_msg(cptvf); 284 if (err) { 285 CPT_LOG_ERR("%s: PF not responding to QLEN msg", 286 cptvf->dev_name); 287 err = -EBUSY; 288 goto cleanup; 289 } 290 291 /* CPT VF device initialization */ 292 otx_cpt_vfvq_init(cptvf); 293 294 /* Send msg to PF to assign currnet Q to required group */ 295 cptvf->vfgrp = group; 296 err = otx_cpt_send_vf_grp_msg(cptvf, group); 297 if (err) { 298 CPT_LOG_ERR("%s: PF not responding to VF_GRP msg", 299 cptvf->dev_name); 300 err = -EBUSY; 301 goto cleanup; 302 } 303 304 CPT_LOG_DP_DEBUG("%s: %s done", cptvf->dev_name, __func__); 305 return 0; 306 307 cleanup: 308 return err; 309 } 310 311 void 312 otx_cpt_poll_misc(struct cpt_vf *cptvf) 313 { 314 uint64_t intr; 315 316 intr = otx_cpt_read_vf_misc_intr_status(cptvf); 317 318 if (!intr) 319 return; 320 321 /* Check for MISC interrupt types */ 322 if (likely(intr & CPT_VF_INTR_MBOX_MASK)) { 323 CPT_LOG_DP_DEBUG("%s: Mailbox interrupt 0x%lx on CPT VF %d", 324 cptvf->dev_name, (unsigned int long)intr, cptvf->vfid); 325 otx_cpt_handle_mbox_intr(cptvf); 326 otx_cpt_clear_mbox_intr(cptvf); 327 } else if (unlikely(intr & CPT_VF_INTR_IRDE_MASK)) { 328 otx_cpt_clear_irde_intr(cptvf); 329 CPT_LOG_DP_DEBUG("%s: Instruction NCB read error interrupt " 330 "0x%lx on CPT VF %d", cptvf->dev_name, 331 (unsigned int long)intr, cptvf->vfid); 332 } else if (unlikely(intr & CPT_VF_INTR_NWRP_MASK)) { 333 otx_cpt_clear_nwrp_intr(cptvf); 334 CPT_LOG_DP_DEBUG("%s: NCB response write error interrupt 0x%lx" 335 " on CPT VF %d", cptvf->dev_name, 336 (unsigned int long)intr, cptvf->vfid); 337 } else if (unlikely(intr & CPT_VF_INTR_SWERR_MASK)) { 338 otx_cpt_clear_swerr_intr(cptvf); 339 CPT_LOG_DP_DEBUG("%s: Software error interrupt 0x%lx on CPT VF " 340 "%d", cptvf->dev_name, (unsigned int long)intr, 341 cptvf->vfid); 342 } else if (unlikely(intr & CPT_VF_INTR_HWERR_MASK)) { 343 otx_cpt_clear_hwerr_intr(cptvf); 344 CPT_LOG_DP_DEBUG("%s: Hardware error interrupt 0x%lx on CPT VF " 345 "%d", cptvf->dev_name, (unsigned int long)intr, 346 cptvf->vfid); 347 } else if (unlikely(intr & CPT_VF_INTR_FAULT_MASK)) { 348 otx_cpt_clear_fault_intr(cptvf); 349 CPT_LOG_DP_DEBUG("%s: Translation fault interrupt 0x%lx on CPT VF " 350 "%d", cptvf->dev_name, (unsigned int long)intr, 351 cptvf->vfid); 352 } else if (unlikely(intr & CPT_VF_INTR_DOVF_MASK)) { 353 otx_cpt_clear_dovf_intr(cptvf); 354 CPT_LOG_DP_DEBUG("%s: Doorbell overflow interrupt 0x%lx on CPT VF " 355 "%d", cptvf->dev_name, (unsigned int long)intr, 356 cptvf->vfid); 357 } else 358 CPT_LOG_DP_ERR("%s: Unhandled interrupt 0x%lx in CPT VF %d", 359 cptvf->dev_name, (unsigned int long)intr, 360 cptvf->vfid); 361 } 362 363 int 364 otx_cpt_hw_init(struct cpt_vf *cptvf, void *pdev, void *reg_base, char *name) 365 { 366 memset(cptvf, 0, sizeof(struct cpt_vf)); 367 368 /* Bar0 base address */ 369 cptvf->reg_base = reg_base; 370 371 /* Save device name */ 372 strlcpy(cptvf->dev_name, name, (sizeof(cptvf->dev_name))); 373 374 cptvf->pdev = pdev; 375 376 /* To clear if there are any pending mbox msgs */ 377 otx_cpt_poll_misc(cptvf); 378 379 if (otx_cpt_vf_init(cptvf)) { 380 CPT_LOG_ERR("Failed to initialize CPT VF device"); 381 return -1; 382 } 383 384 return 0; 385 } 386 387 int 388 otx_cpt_deinit_device(void *dev) 389 { 390 struct cpt_vf *cptvf = (struct cpt_vf *)dev; 391 392 /* Do misc work one last time */ 393 otx_cpt_poll_misc(cptvf); 394 395 return 0; 396 } 397 398 int 399 otx_cpt_get_resource(void *dev, uint8_t group, struct cpt_instance **instance) 400 { 401 int ret = -ENOENT, len, qlen, i; 402 int chunk_len, chunks, chunk_size; 403 struct cpt_vf *cptvf = (struct cpt_vf *)dev; 404 struct cpt_instance *cpt_instance; 405 struct command_chunk *chunk_head = NULL, *chunk_prev = NULL; 406 struct command_chunk *chunk = NULL; 407 uint8_t *mem; 408 const struct rte_memzone *rz; 409 uint64_t dma_addr = 0, alloc_len, used_len; 410 uint64_t *next_ptr; 411 uint64_t pg_sz = sysconf(_SC_PAGESIZE); 412 413 CPT_LOG_DP_DEBUG("Initializing cpt resource %s", cptvf->dev_name); 414 415 cpt_instance = &cptvf->instance; 416 417 memset(&cptvf->cqueue, 0, sizeof(cptvf->cqueue)); 418 memset(&cptvf->pqueue, 0, sizeof(cptvf->pqueue)); 419 420 /* Chunks are of fixed size buffers */ 421 chunks = DEFAULT_CMD_QCHUNKS; 422 chunk_len = DEFAULT_CMD_QCHUNK_SIZE; 423 424 qlen = chunks * chunk_len; 425 /* Chunk size includes 8 bytes of next chunk ptr */ 426 chunk_size = chunk_len * CPT_INST_SIZE + CPT_NEXT_CHUNK_PTR_SIZE; 427 428 /* For command chunk structures */ 429 len = chunks * RTE_ALIGN(sizeof(struct command_chunk), 8); 430 431 /* For pending queue */ 432 len += qlen * RTE_ALIGN(sizeof(struct rid), 8); 433 434 /* So that instruction queues start as pg size aligned */ 435 len = RTE_ALIGN(len, pg_sz); 436 437 /* For Instruction queues */ 438 len += chunks * RTE_ALIGN(chunk_size, 128); 439 440 /* Wastage after instruction queues */ 441 len = RTE_ALIGN(len, pg_sz); 442 443 rz = rte_memzone_reserve_aligned(cptvf->dev_name, len, cptvf->node, 444 RTE_MEMZONE_SIZE_HINT_ONLY | 445 RTE_MEMZONE_256MB, 446 RTE_CACHE_LINE_SIZE); 447 if (!rz) { 448 ret = rte_errno; 449 goto cleanup; 450 } 451 452 mem = rz->addr; 453 dma_addr = rz->phys_addr; 454 alloc_len = len; 455 456 memset(mem, 0, len); 457 458 cpt_instance->rsvd = (uintptr_t)rz; 459 460 /* Pending queue setup */ 461 cptvf->pqueue.rid_queue = (struct rid *)mem; 462 cptvf->pqueue.enq_tail = 0; 463 cptvf->pqueue.deq_head = 0; 464 cptvf->pqueue.pending_count = 0; 465 466 mem += qlen * RTE_ALIGN(sizeof(struct rid), 8); 467 len -= qlen * RTE_ALIGN(sizeof(struct rid), 8); 468 dma_addr += qlen * RTE_ALIGN(sizeof(struct rid), 8); 469 470 /* Alignment wastage */ 471 used_len = alloc_len - len; 472 mem += RTE_ALIGN(used_len, pg_sz) - used_len; 473 len -= RTE_ALIGN(used_len, pg_sz) - used_len; 474 dma_addr += RTE_ALIGN(used_len, pg_sz) - used_len; 475 476 /* Init instruction queues */ 477 chunk_head = &cptvf->cqueue.chead[0]; 478 i = qlen; 479 480 chunk_prev = NULL; 481 for (i = 0; i < DEFAULT_CMD_QCHUNKS; i++) { 482 int csize; 483 484 chunk = &cptvf->cqueue.chead[i]; 485 chunk->head = mem; 486 chunk->dma_addr = dma_addr; 487 488 csize = RTE_ALIGN(chunk_size, 128); 489 mem += csize; 490 dma_addr += csize; 491 len -= csize; 492 493 if (chunk_prev) { 494 next_ptr = (uint64_t *)(chunk_prev->head + 495 chunk_size - 8); 496 *next_ptr = (uint64_t)chunk->dma_addr; 497 } 498 chunk_prev = chunk; 499 } 500 /* Circular loop */ 501 next_ptr = (uint64_t *)(chunk_prev->head + chunk_size - 8); 502 *next_ptr = (uint64_t)chunk_head->dma_addr; 503 504 assert(!len); 505 506 /* This is used for CPT(0)_PF_Q(0..15)_CTL.size config */ 507 cptvf->qsize = chunk_size / 8; 508 cptvf->cqueue.qhead = chunk_head->head; 509 cptvf->cqueue.idx = 0; 510 cptvf->cqueue.cchunk = 0; 511 512 if (cpt_vq_init(cptvf, group)) { 513 CPT_LOG_ERR("Failed to initialize CPT VQ of device %s", 514 cptvf->dev_name); 515 ret = -EBUSY; 516 goto cleanup; 517 } 518 519 *instance = cpt_instance; 520 521 CPT_LOG_DP_DEBUG("Crypto device (%s) initialized", cptvf->dev_name); 522 523 return 0; 524 cleanup: 525 rte_memzone_free(rz); 526 *instance = NULL; 527 return ret; 528 } 529 530 int 531 otx_cpt_put_resource(struct cpt_instance *instance) 532 { 533 struct cpt_vf *cptvf = (struct cpt_vf *)instance; 534 struct rte_memzone *rz; 535 536 if (!cptvf) { 537 CPT_LOG_ERR("Invalid CPTVF handle"); 538 return -EINVAL; 539 } 540 541 CPT_LOG_DP_DEBUG("Releasing cpt device %s", cptvf->dev_name); 542 543 rz = (struct rte_memzone *)instance->rsvd; 544 rte_memzone_free(rz); 545 return 0; 546 } 547 548 int 549 otx_cpt_start_device(void *dev) 550 { 551 int rc; 552 struct cpt_vf *cptvf = (struct cpt_vf *)dev; 553 554 rc = otx_cpt_send_vf_up(cptvf); 555 if (rc) { 556 CPT_LOG_ERR("Failed to mark CPT VF device %s UP, rc = %d", 557 cptvf->dev_name, rc); 558 return -EFAULT; 559 } 560 561 if ((cptvf->vftype != SE_TYPE) && (cptvf->vftype != AE_TYPE)) { 562 CPT_LOG_ERR("Fatal error, unexpected vf type %u, for CPT VF " 563 "device %s", cptvf->vftype, cptvf->dev_name); 564 return -ENOENT; 565 } 566 567 return 0; 568 } 569 570 void 571 otx_cpt_stop_device(void *dev) 572 { 573 int rc; 574 uint32_t pending, retries = 5; 575 struct cpt_vf *cptvf = (struct cpt_vf *)dev; 576 577 /* Wait for pending entries to complete */ 578 pending = otx_cpt_read_vq_doorbell(cptvf); 579 while (pending) { 580 CPT_LOG_DP_DEBUG("%s: Waiting for pending %u cmds to complete", 581 cptvf->dev_name, pending); 582 sleep(1); 583 pending = otx_cpt_read_vq_doorbell(cptvf); 584 retries--; 585 if (!retries) 586 break; 587 } 588 589 if (!retries && pending) { 590 CPT_LOG_ERR("%s: Timeout waiting for commands(%u)", 591 cptvf->dev_name, pending); 592 return; 593 } 594 595 rc = otx_cpt_send_vf_down(cptvf); 596 if (rc) { 597 CPT_LOG_ERR("Failed to bring down vf %s, rc %d", 598 cptvf->dev_name, rc); 599 return; 600 } 601 } 602