1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Cavium, Inc 3 */ 4 #include <assert.h> 5 #include <string.h> 6 #include <unistd.h> 7 8 #include <rte_branch_prediction.h> 9 #include <rte_common.h> 10 #include <rte_cryptodev.h> 11 #include <rte_errno.h> 12 #include <rte_mempool.h> 13 #include <rte_memzone.h> 14 #include <rte_string_fns.h> 15 16 #include "otx_cryptodev_hw_access.h" 17 #include "otx_cryptodev_mbox.h" 18 19 #include "cpt_pmd_logs.h" 20 #include "cpt_pmd_ops_helper.h" 21 #include "cpt_hw_types.h" 22 23 #define METABUF_POOL_CACHE_SIZE 512 24 25 /* 26 * VF HAL functions 27 * Access its own BAR0/4 registers by passing VF number as 0. 28 * OS/PCI maps them accordingly. 29 */ 30 31 static int 32 otx_cpt_vf_init(struct cpt_vf *cptvf) 33 { 34 int ret = 0; 35 36 /* Check ready with PF */ 37 /* Gets chip ID / device Id from PF if ready */ 38 ret = otx_cpt_check_pf_ready(cptvf); 39 if (ret) { 40 CPT_LOG_ERR("%s: PF not responding to READY msg", 41 cptvf->dev_name); 42 ret = -EBUSY; 43 goto exit; 44 } 45 46 CPT_LOG_DP_DEBUG("%s: %s done", cptvf->dev_name, __func__); 47 48 exit: 49 return ret; 50 } 51 52 /* 53 * Read Interrupt status of the VF 54 * 55 * @param cptvf cptvf structure 56 */ 57 static uint64_t 58 otx_cpt_read_vf_misc_intr_status(struct cpt_vf *cptvf) 59 { 60 return CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), CPTX_VQX_MISC_INT(0, 0)); 61 } 62 63 /* 64 * Clear mailbox interrupt of the VF 65 * 66 * @param cptvf cptvf structure 67 */ 68 static void 69 otx_cpt_clear_mbox_intr(struct cpt_vf *cptvf) 70 { 71 cptx_vqx_misc_int_t vqx_misc_int; 72 73 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), 74 CPTX_VQX_MISC_INT(0, 0)); 75 /* W1C for the VF */ 76 vqx_misc_int.s.mbox = 1; 77 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 78 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); 79 } 80 81 /* 82 * Clear instruction NCB read error interrupt of the VF 83 * 84 * @param cptvf cptvf structure 85 */ 86 static void 87 otx_cpt_clear_irde_intr(struct cpt_vf *cptvf) 88 { 89 cptx_vqx_misc_int_t vqx_misc_int; 90 91 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), 92 CPTX_VQX_MISC_INT(0, 0)); 93 /* W1C for the VF */ 94 vqx_misc_int.s.irde = 1; 95 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 96 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); 97 } 98 99 /* 100 * Clear NCB result write response error interrupt of the VF 101 * 102 * @param cptvf cptvf structure 103 */ 104 static void 105 otx_cpt_clear_nwrp_intr(struct cpt_vf *cptvf) 106 { 107 cptx_vqx_misc_int_t vqx_misc_int; 108 109 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), 110 CPTX_VQX_MISC_INT(0, 0)); 111 /* W1C for the VF */ 112 vqx_misc_int.s.nwrp = 1; 113 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 114 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); 115 } 116 117 /* 118 * Clear swerr interrupt of the VF 119 * 120 * @param cptvf cptvf structure 121 */ 122 static void 123 otx_cpt_clear_swerr_intr(struct cpt_vf *cptvf) 124 { 125 cptx_vqx_misc_int_t vqx_misc_int; 126 127 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), 128 CPTX_VQX_MISC_INT(0, 0)); 129 /* W1C for the VF */ 130 vqx_misc_int.s.swerr = 1; 131 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 132 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); 133 } 134 135 /* 136 * Clear hwerr interrupt of the VF 137 * 138 * @param cptvf cptvf structure 139 */ 140 static void 141 otx_cpt_clear_hwerr_intr(struct cpt_vf *cptvf) 142 { 143 cptx_vqx_misc_int_t vqx_misc_int; 144 145 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), 146 CPTX_VQX_MISC_INT(0, 0)); 147 /* W1C for the VF */ 148 vqx_misc_int.s.hwerr = 1; 149 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 150 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); 151 } 152 153 /* 154 * Clear translation fault interrupt of the VF 155 * 156 * @param cptvf cptvf structure 157 */ 158 static void 159 otx_cpt_clear_fault_intr(struct cpt_vf *cptvf) 160 { 161 cptx_vqx_misc_int_t vqx_misc_int; 162 163 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), 164 CPTX_VQX_MISC_INT(0, 0)); 165 /* W1C for the VF */ 166 vqx_misc_int.s.fault = 1; 167 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 168 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); 169 } 170 171 /* 172 * Clear doorbell overflow interrupt of the VF 173 * 174 * @param cptvf cptvf structure 175 */ 176 static void 177 otx_cpt_clear_dovf_intr(struct cpt_vf *cptvf) 178 { 179 cptx_vqx_misc_int_t vqx_misc_int; 180 181 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), 182 CPTX_VQX_MISC_INT(0, 0)); 183 /* W1C for the VF */ 184 vqx_misc_int.s.dovf = 1; 185 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 186 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); 187 } 188 189 /* Write to VQX_CTL register 190 */ 191 static void 192 otx_cpt_write_vq_ctl(struct cpt_vf *cptvf, bool val) 193 { 194 cptx_vqx_ctl_t vqx_ctl; 195 196 vqx_ctl.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), 197 CPTX_VQX_CTL(0, 0)); 198 vqx_ctl.s.ena = val; 199 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 200 CPTX_VQX_CTL(0, 0), vqx_ctl.u); 201 } 202 203 /* Write to VQX_INPROG register 204 */ 205 static void 206 otx_cpt_write_vq_inprog(struct cpt_vf *cptvf, uint8_t val) 207 { 208 cptx_vqx_inprog_t vqx_inprg; 209 210 vqx_inprg.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), 211 CPTX_VQX_INPROG(0, 0)); 212 vqx_inprg.s.inflight = val; 213 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 214 CPTX_VQX_INPROG(0, 0), vqx_inprg.u); 215 } 216 217 /* Write to VQX_DONE_WAIT NUMWAIT register 218 */ 219 static void 220 otx_cpt_write_vq_done_numwait(struct cpt_vf *cptvf, uint32_t val) 221 { 222 cptx_vqx_done_wait_t vqx_dwait; 223 224 vqx_dwait.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), 225 CPTX_VQX_DONE_WAIT(0, 0)); 226 vqx_dwait.s.num_wait = val; 227 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 228 CPTX_VQX_DONE_WAIT(0, 0), vqx_dwait.u); 229 } 230 231 /* Write to VQX_DONE_WAIT NUM_WAIT register 232 */ 233 static void 234 otx_cpt_write_vq_done_timewait(struct cpt_vf *cptvf, uint16_t val) 235 { 236 cptx_vqx_done_wait_t vqx_dwait; 237 238 vqx_dwait.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), 239 CPTX_VQX_DONE_WAIT(0, 0)); 240 vqx_dwait.s.time_wait = val; 241 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 242 CPTX_VQX_DONE_WAIT(0, 0), vqx_dwait.u); 243 } 244 245 /* Write to VQX_SADDR register 246 */ 247 static void 248 otx_cpt_write_vq_saddr(struct cpt_vf *cptvf, uint64_t val) 249 { 250 cptx_vqx_saddr_t vqx_saddr; 251 252 vqx_saddr.u = val; 253 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 254 CPTX_VQX_SADDR(0, 0), vqx_saddr.u); 255 } 256 257 static void 258 otx_cpt_vfvq_init(struct cpt_vf *cptvf) 259 { 260 uint64_t base_addr = 0; 261 262 /* Disable the VQ */ 263 otx_cpt_write_vq_ctl(cptvf, 0); 264 265 /* Reset the doorbell */ 266 otx_cpt_write_vq_doorbell(cptvf, 0); 267 /* Clear inflight */ 268 otx_cpt_write_vq_inprog(cptvf, 0); 269 270 /* Write VQ SADDR */ 271 base_addr = (uint64_t)(cptvf->cqueue.chead[0].dma_addr); 272 otx_cpt_write_vq_saddr(cptvf, base_addr); 273 274 /* Configure timerhold / coalescence */ 275 otx_cpt_write_vq_done_timewait(cptvf, CPT_TIMER_THOLD); 276 otx_cpt_write_vq_done_numwait(cptvf, CPT_COUNT_THOLD); 277 278 /* Enable the VQ */ 279 otx_cpt_write_vq_ctl(cptvf, 1); 280 } 281 282 static int 283 cpt_vq_init(struct cpt_vf *cptvf, uint8_t group) 284 { 285 int err; 286 287 /* Convey VQ LEN to PF */ 288 err = otx_cpt_send_vq_size_msg(cptvf); 289 if (err) { 290 CPT_LOG_ERR("%s: PF not responding to QLEN msg", 291 cptvf->dev_name); 292 err = -EBUSY; 293 goto cleanup; 294 } 295 296 /* CPT VF device initialization */ 297 otx_cpt_vfvq_init(cptvf); 298 299 /* Send msg to PF to assign currnet Q to required group */ 300 cptvf->vfgrp = group; 301 err = otx_cpt_send_vf_grp_msg(cptvf, group); 302 if (err) { 303 CPT_LOG_ERR("%s: PF not responding to VF_GRP msg", 304 cptvf->dev_name); 305 err = -EBUSY; 306 goto cleanup; 307 } 308 309 CPT_LOG_DP_DEBUG("%s: %s done", cptvf->dev_name, __func__); 310 return 0; 311 312 cleanup: 313 return err; 314 } 315 316 void 317 otx_cpt_poll_misc(struct cpt_vf *cptvf) 318 { 319 uint64_t intr; 320 321 intr = otx_cpt_read_vf_misc_intr_status(cptvf); 322 323 if (!intr) 324 return; 325 326 /* Check for MISC interrupt types */ 327 if (likely(intr & CPT_VF_INTR_MBOX_MASK)) { 328 CPT_LOG_DP_DEBUG("%s: Mailbox interrupt 0x%lx on CPT VF %d", 329 cptvf->dev_name, (unsigned int long)intr, cptvf->vfid); 330 otx_cpt_handle_mbox_intr(cptvf); 331 otx_cpt_clear_mbox_intr(cptvf); 332 } else if (unlikely(intr & CPT_VF_INTR_IRDE_MASK)) { 333 otx_cpt_clear_irde_intr(cptvf); 334 CPT_LOG_DP_DEBUG("%s: Instruction NCB read error interrupt " 335 "0x%lx on CPT VF %d", cptvf->dev_name, 336 (unsigned int long)intr, cptvf->vfid); 337 } else if (unlikely(intr & CPT_VF_INTR_NWRP_MASK)) { 338 otx_cpt_clear_nwrp_intr(cptvf); 339 CPT_LOG_DP_DEBUG("%s: NCB response write error interrupt 0x%lx" 340 " on CPT VF %d", cptvf->dev_name, 341 (unsigned int long)intr, cptvf->vfid); 342 } else if (unlikely(intr & CPT_VF_INTR_SWERR_MASK)) { 343 otx_cpt_clear_swerr_intr(cptvf); 344 CPT_LOG_DP_DEBUG("%s: Software error interrupt 0x%lx on CPT VF " 345 "%d", cptvf->dev_name, (unsigned int long)intr, 346 cptvf->vfid); 347 } else if (unlikely(intr & CPT_VF_INTR_HWERR_MASK)) { 348 otx_cpt_clear_hwerr_intr(cptvf); 349 CPT_LOG_DP_DEBUG("%s: Hardware error interrupt 0x%lx on CPT VF " 350 "%d", cptvf->dev_name, (unsigned int long)intr, 351 cptvf->vfid); 352 } else if (unlikely(intr & CPT_VF_INTR_FAULT_MASK)) { 353 otx_cpt_clear_fault_intr(cptvf); 354 CPT_LOG_DP_DEBUG("%s: Translation fault interrupt 0x%lx on CPT VF " 355 "%d", cptvf->dev_name, (unsigned int long)intr, 356 cptvf->vfid); 357 } else if (unlikely(intr & CPT_VF_INTR_DOVF_MASK)) { 358 otx_cpt_clear_dovf_intr(cptvf); 359 CPT_LOG_DP_DEBUG("%s: Doorbell overflow interrupt 0x%lx on CPT VF " 360 "%d", cptvf->dev_name, (unsigned int long)intr, 361 cptvf->vfid); 362 } else 363 CPT_LOG_DP_ERR("%s: Unhandled interrupt 0x%lx in CPT VF %d", 364 cptvf->dev_name, (unsigned int long)intr, 365 cptvf->vfid); 366 } 367 368 int 369 otx_cpt_hw_init(struct cpt_vf *cptvf, void *pdev, void *reg_base, char *name) 370 { 371 memset(cptvf, 0, sizeof(struct cpt_vf)); 372 373 /* Bar0 base address */ 374 cptvf->reg_base = reg_base; 375 376 /* Save device name */ 377 strlcpy(cptvf->dev_name, name, (sizeof(cptvf->dev_name))); 378 379 cptvf->pdev = pdev; 380 381 /* To clear if there are any pending mbox msgs */ 382 otx_cpt_poll_misc(cptvf); 383 384 if (otx_cpt_vf_init(cptvf)) { 385 CPT_LOG_ERR("Failed to initialize CPT VF device"); 386 return -1; 387 } 388 389 return 0; 390 } 391 392 int 393 otx_cpt_deinit_device(void *dev) 394 { 395 struct cpt_vf *cptvf = (struct cpt_vf *)dev; 396 397 /* Do misc work one last time */ 398 otx_cpt_poll_misc(cptvf); 399 400 return 0; 401 } 402 403 static int 404 otx_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev, 405 struct cpt_instance *instance, uint8_t qp_id, 406 int nb_elements) 407 { 408 char mempool_name[RTE_MEMPOOL_NAMESIZE]; 409 int sg_mlen, lb_mlen, max_mlen, ret; 410 struct cpt_qp_meta_info *meta_info; 411 struct rte_mempool *pool; 412 413 /* Get meta len for scatter gather mode */ 414 sg_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode(); 415 416 /* Extra 32B saved for future considerations */ 417 sg_mlen += 4 * sizeof(uint64_t); 418 419 /* Get meta len for linear buffer (direct) mode */ 420 lb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode(); 421 422 /* Extra 32B saved for future considerations */ 423 lb_mlen += 4 * sizeof(uint64_t); 424 425 /* Check max requirement for meta buffer */ 426 max_mlen = RTE_MAX(lb_mlen, sg_mlen); 427 428 /* Allocate mempool */ 429 430 snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "otx_cpt_mb_%u:%u", 431 dev->data->dev_id, qp_id); 432 433 pool = rte_mempool_create_empty(mempool_name, nb_elements, max_mlen, 434 METABUF_POOL_CACHE_SIZE, 0, 435 rte_socket_id(), 0); 436 437 if (pool == NULL) { 438 CPT_LOG_ERR("Could not create mempool for metabuf"); 439 return rte_errno; 440 } 441 442 ret = rte_mempool_set_ops_byname(pool, RTE_MBUF_DEFAULT_MEMPOOL_OPS, 443 NULL); 444 if (ret) { 445 CPT_LOG_ERR("Could not set mempool ops"); 446 goto mempool_free; 447 } 448 449 ret = rte_mempool_populate_default(pool); 450 if (ret <= 0) { 451 CPT_LOG_ERR("Could not populate metabuf pool"); 452 goto mempool_free; 453 } 454 455 meta_info = &instance->meta_info; 456 457 meta_info->pool = pool; 458 meta_info->lb_mlen = lb_mlen; 459 meta_info->sg_mlen = sg_mlen; 460 461 return 0; 462 463 mempool_free: 464 rte_mempool_free(pool); 465 return ret; 466 } 467 468 static void 469 otx_cpt_metabuf_mempool_destroy(struct cpt_instance *instance) 470 { 471 struct cpt_qp_meta_info *meta_info = &instance->meta_info; 472 473 rte_mempool_free(meta_info->pool); 474 475 meta_info->pool = NULL; 476 meta_info->lb_mlen = 0; 477 meta_info->sg_mlen = 0; 478 } 479 480 int 481 otx_cpt_get_resource(const struct rte_cryptodev *dev, uint8_t group, 482 struct cpt_instance **instance, uint16_t qp_id) 483 { 484 int ret = -ENOENT, len, qlen, i; 485 int chunk_len, chunks, chunk_size; 486 struct cpt_vf *cptvf = dev->data->dev_private; 487 struct cpt_instance *cpt_instance; 488 struct command_chunk *chunk_head = NULL, *chunk_prev = NULL; 489 struct command_chunk *chunk = NULL; 490 uint8_t *mem; 491 const struct rte_memzone *rz; 492 uint64_t dma_addr = 0, alloc_len, used_len; 493 uint64_t *next_ptr; 494 uint64_t pg_sz = sysconf(_SC_PAGESIZE); 495 496 CPT_LOG_DP_DEBUG("Initializing cpt resource %s", cptvf->dev_name); 497 498 cpt_instance = &cptvf->instance; 499 500 memset(&cptvf->cqueue, 0, sizeof(cptvf->cqueue)); 501 memset(&cptvf->pqueue, 0, sizeof(cptvf->pqueue)); 502 503 /* Chunks are of fixed size buffers */ 504 chunks = DEFAULT_CMD_QCHUNKS; 505 chunk_len = DEFAULT_CMD_QCHUNK_SIZE; 506 507 qlen = chunks * chunk_len; 508 /* Chunk size includes 8 bytes of next chunk ptr */ 509 chunk_size = chunk_len * CPT_INST_SIZE + CPT_NEXT_CHUNK_PTR_SIZE; 510 511 /* For command chunk structures */ 512 len = chunks * RTE_ALIGN(sizeof(struct command_chunk), 8); 513 514 /* For pending queue */ 515 len += qlen * RTE_ALIGN(sizeof(struct rid), 8); 516 517 /* So that instruction queues start as pg size aligned */ 518 len = RTE_ALIGN(len, pg_sz); 519 520 /* For Instruction queues */ 521 len += chunks * RTE_ALIGN(chunk_size, 128); 522 523 /* Wastage after instruction queues */ 524 len = RTE_ALIGN(len, pg_sz); 525 526 rz = rte_memzone_reserve_aligned(cptvf->dev_name, len, cptvf->node, 527 RTE_MEMZONE_SIZE_HINT_ONLY | 528 RTE_MEMZONE_256MB, 529 RTE_CACHE_LINE_SIZE); 530 if (!rz) { 531 ret = rte_errno; 532 goto exit; 533 } 534 535 mem = rz->addr; 536 dma_addr = rz->phys_addr; 537 alloc_len = len; 538 539 memset(mem, 0, len); 540 541 cpt_instance->rsvd = (uintptr_t)rz; 542 543 ret = otx_cpt_metabuf_mempool_create(dev, cpt_instance, qp_id, qlen); 544 if (ret) { 545 CPT_LOG_ERR("Could not create mempool for metabuf"); 546 goto memzone_free; 547 } 548 549 /* Pending queue setup */ 550 cptvf->pqueue.rid_queue = (struct rid *)mem; 551 cptvf->pqueue.enq_tail = 0; 552 cptvf->pqueue.deq_head = 0; 553 cptvf->pqueue.pending_count = 0; 554 555 mem += qlen * RTE_ALIGN(sizeof(struct rid), 8); 556 len -= qlen * RTE_ALIGN(sizeof(struct rid), 8); 557 dma_addr += qlen * RTE_ALIGN(sizeof(struct rid), 8); 558 559 /* Alignment wastage */ 560 used_len = alloc_len - len; 561 mem += RTE_ALIGN(used_len, pg_sz) - used_len; 562 len -= RTE_ALIGN(used_len, pg_sz) - used_len; 563 dma_addr += RTE_ALIGN(used_len, pg_sz) - used_len; 564 565 /* Init instruction queues */ 566 chunk_head = &cptvf->cqueue.chead[0]; 567 i = qlen; 568 569 chunk_prev = NULL; 570 for (i = 0; i < DEFAULT_CMD_QCHUNKS; i++) { 571 int csize; 572 573 chunk = &cptvf->cqueue.chead[i]; 574 chunk->head = mem; 575 chunk->dma_addr = dma_addr; 576 577 csize = RTE_ALIGN(chunk_size, 128); 578 mem += csize; 579 dma_addr += csize; 580 len -= csize; 581 582 if (chunk_prev) { 583 next_ptr = (uint64_t *)(chunk_prev->head + 584 chunk_size - 8); 585 *next_ptr = (uint64_t)chunk->dma_addr; 586 } 587 chunk_prev = chunk; 588 } 589 /* Circular loop */ 590 next_ptr = (uint64_t *)(chunk_prev->head + chunk_size - 8); 591 *next_ptr = (uint64_t)chunk_head->dma_addr; 592 593 assert(!len); 594 595 /* This is used for CPT(0)_PF_Q(0..15)_CTL.size config */ 596 cptvf->qsize = chunk_size / 8; 597 cptvf->cqueue.qhead = chunk_head->head; 598 cptvf->cqueue.idx = 0; 599 cptvf->cqueue.cchunk = 0; 600 601 if (cpt_vq_init(cptvf, group)) { 602 CPT_LOG_ERR("Failed to initialize CPT VQ of device %s", 603 cptvf->dev_name); 604 ret = -EBUSY; 605 goto mempool_destroy; 606 } 607 608 *instance = cpt_instance; 609 610 CPT_LOG_DP_DEBUG("Crypto device (%s) initialized", cptvf->dev_name); 611 612 return 0; 613 614 mempool_destroy: 615 otx_cpt_metabuf_mempool_destroy(cpt_instance); 616 memzone_free: 617 rte_memzone_free(rz); 618 exit: 619 *instance = NULL; 620 return ret; 621 } 622 623 int 624 otx_cpt_put_resource(struct cpt_instance *instance) 625 { 626 struct cpt_vf *cptvf = (struct cpt_vf *)instance; 627 struct rte_memzone *rz; 628 629 if (!cptvf) { 630 CPT_LOG_ERR("Invalid CPTVF handle"); 631 return -EINVAL; 632 } 633 634 CPT_LOG_DP_DEBUG("Releasing cpt device %s", cptvf->dev_name); 635 636 otx_cpt_metabuf_mempool_destroy(instance); 637 638 rz = (struct rte_memzone *)instance->rsvd; 639 rte_memzone_free(rz); 640 return 0; 641 } 642 643 int 644 otx_cpt_start_device(void *dev) 645 { 646 int rc; 647 struct cpt_vf *cptvf = (struct cpt_vf *)dev; 648 649 rc = otx_cpt_send_vf_up(cptvf); 650 if (rc) { 651 CPT_LOG_ERR("Failed to mark CPT VF device %s UP, rc = %d", 652 cptvf->dev_name, rc); 653 return -EFAULT; 654 } 655 656 if ((cptvf->vftype != SE_TYPE) && (cptvf->vftype != AE_TYPE)) { 657 CPT_LOG_ERR("Fatal error, unexpected vf type %u, for CPT VF " 658 "device %s", cptvf->vftype, cptvf->dev_name); 659 return -ENOENT; 660 } 661 662 return 0; 663 } 664 665 void 666 otx_cpt_stop_device(void *dev) 667 { 668 int rc; 669 uint32_t pending, retries = 5; 670 struct cpt_vf *cptvf = (struct cpt_vf *)dev; 671 672 /* Wait for pending entries to complete */ 673 pending = otx_cpt_read_vq_doorbell(cptvf); 674 while (pending) { 675 CPT_LOG_DP_DEBUG("%s: Waiting for pending %u cmds to complete", 676 cptvf->dev_name, pending); 677 sleep(1); 678 pending = otx_cpt_read_vq_doorbell(cptvf); 679 retries--; 680 if (!retries) 681 break; 682 } 683 684 if (!retries && pending) { 685 CPT_LOG_ERR("%s: Timeout waiting for commands(%u)", 686 cptvf->dev_name, pending); 687 return; 688 } 689 690 rc = otx_cpt_send_vf_down(cptvf); 691 if (rc) { 692 CPT_LOG_ERR("Failed to bring down vf %s, rc %d", 693 cptvf->dev_name, rc); 694 return; 695 } 696 } 697