1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #include <stdlib.h> 6 #include <string.h> 7 #include <stdbool.h> 8 #include <stdio.h> 9 #include <unistd.h> 10 #include <fcntl.h> 11 #include <errno.h> 12 #include <sys/mman.h> 13 14 #include <rte_atomic.h> 15 #include <rte_eal.h> 16 #include <rte_bus_pci.h> 17 #include <rte_errno.h> 18 #include <rte_memory.h> 19 #include <rte_malloc.h> 20 #include <rte_spinlock.h> 21 #include <rte_mbuf.h> 22 23 #include "octeontx_mbox.h" 24 #include "octeontx_fpavf.h" 25 26 /* FPA Mbox Message */ 27 #define IDENTIFY 0x0 28 29 #define FPA_CONFIGSET 0x1 30 #define FPA_CONFIGGET 0x2 31 #define FPA_START_COUNT 0x3 32 #define FPA_STOP_COUNT 0x4 33 #define FPA_ATTACHAURA 0x5 34 #define FPA_DETACHAURA 0x6 35 #define FPA_SETAURALVL 0x7 36 #define FPA_GETAURALVL 0x8 37 38 #define FPA_COPROC 0x1 39 40 /* fpa mbox struct */ 41 struct octeontx_mbox_fpa_cfg { 42 int aid; 43 uint64_t pool_cfg; 44 uint64_t pool_stack_base; 45 uint64_t pool_stack_end; 46 uint64_t aura_cfg; 47 }; 48 49 struct __attribute__((__packed__)) gen_req { 50 uint32_t value; 51 }; 52 53 struct __attribute__((__packed__)) idn_req { 54 uint8_t domain_id; 55 }; 56 57 struct __attribute__((__packed__)) gen_resp { 58 uint16_t domain_id; 59 uint16_t vfid; 60 }; 61 62 struct __attribute__((__packed__)) dcfg_resp { 63 uint8_t sso_count; 64 uint8_t ssow_count; 65 uint8_t fpa_count; 66 uint8_t pko_count; 67 uint8_t tim_count; 68 uint8_t net_port_count; 69 uint8_t virt_port_count; 70 }; 71 72 #define FPA_MAX_POOL 32 73 #define FPA_PF_PAGE_SZ 4096 74 75 #define FPA_LN_SIZE 128 76 #define FPA_ROUND_UP(x, size) \ 77 ((((unsigned long)(x)) + size-1) & (~(size-1))) 78 #define FPA_OBJSZ_2_CACHE_LINE(sz) (((sz) + RTE_CACHE_LINE_MASK) >> 7) 79 #define FPA_CACHE_LINE_2_OBJSZ(sz) ((sz) << 7) 80 81 #define POOL_ENA (0x1 << 0) 82 #define POOL_DIS (0x0 << 0) 83 #define POOL_SET_NAT_ALIGN (0x1 << 1) 84 #define POOL_DIS_NAT_ALIGN (0x0 << 1) 85 #define POOL_STYPE(x) (((x) & 0x1) << 2) 86 #define POOL_LTYPE(x) (((x) & 0x3) << 3) 87 #define POOL_BUF_OFFSET(x) (((x) & 0x7fffULL) << 16) 88 #define POOL_BUF_SIZE(x) (((x) & 0x7ffULL) << 32) 89 90 struct fpavf_res { 91 void *pool_stack_base; 92 void *bar0; 93 uint64_t stack_ln_ptr; 94 uint16_t domain_id; 95 uint16_t vf_id; /* gpool_id */ 96 uint16_t sz128; /* Block size in cache lines */ 97 bool is_inuse; 98 }; 99 100 struct octeontx_fpadev { 101 rte_spinlock_t lock; 102 uint8_t total_gpool_cnt; 103 struct fpavf_res pool[FPA_VF_MAX]; 104 }; 105 106 static struct octeontx_fpadev fpadev; 107 108 int octeontx_logtype_fpavf; 109 int octeontx_logtype_fpavf_mbox; 110 111 RTE_INIT(otx_pool_init_log) 112 { 113 octeontx_logtype_fpavf = rte_log_register("pmd.mempool.octeontx"); 114 if (octeontx_logtype_fpavf >= 0) 115 rte_log_set_level(octeontx_logtype_fpavf, RTE_LOG_NOTICE); 116 } 117 118 /* lock is taken by caller */ 119 static int 120 octeontx_fpa_gpool_alloc(unsigned int object_size) 121 { 122 uint16_t global_domain = octeontx_get_global_domain(); 123 struct fpavf_res *res = NULL; 124 unsigned int sz128; 125 int i; 126 127 sz128 = FPA_OBJSZ_2_CACHE_LINE(object_size); 128 129 for (i = 0; i < FPA_VF_MAX; i++) { 130 131 /* Skip VF that is not mapped Or _inuse */ 132 if ((fpadev.pool[i].bar0 == NULL) || 133 (fpadev.pool[i].is_inuse == true) || 134 (fpadev.pool[i].domain_id != global_domain)) 135 continue; 136 137 res = &fpadev.pool[i]; 138 139 RTE_ASSERT(res->domain_id != (uint16_t)~0); 140 RTE_ASSERT(res->vf_id != (uint16_t)~0); 141 RTE_ASSERT(res->stack_ln_ptr != 0); 142 143 if (res->sz128 == 0) { 144 res->sz128 = sz128; 145 fpavf_log_dbg("gpool %d blk_sz %d\n", res->vf_id, 146 sz128); 147 148 return res->vf_id; 149 } 150 } 151 152 return -ENOSPC; 153 } 154 155 static __rte_always_inline struct fpavf_res * 156 octeontx_get_fpavf(uint16_t gpool) 157 { 158 uint16_t global_domain = octeontx_get_global_domain(); 159 int i; 160 161 for (i = 0; i < FPA_VF_MAX; i++) { 162 if (fpadev.pool[i].domain_id != global_domain) 163 continue; 164 if (fpadev.pool[i].vf_id != gpool) 165 continue; 166 167 return &fpadev.pool[i]; 168 } 169 170 return NULL; 171 } 172 173 /* lock is taken by caller */ 174 static __rte_always_inline uintptr_t 175 octeontx_fpa_gpool2handle(uint16_t gpool) 176 { 177 struct fpavf_res *res = NULL; 178 179 RTE_ASSERT(gpool < FPA_VF_MAX); 180 res = octeontx_get_fpavf(gpool); 181 if (res == NULL) 182 return 0; 183 184 return (uintptr_t)res->bar0 | gpool; 185 } 186 187 static __rte_always_inline bool 188 octeontx_fpa_handle_valid(uintptr_t handle) 189 { 190 struct fpavf_res *res = NULL; 191 uint8_t gpool; 192 int i; 193 bool ret = false; 194 195 if (unlikely(!handle)) 196 return ret; 197 198 /* get the gpool */ 199 gpool = octeontx_fpa_bufpool_gpool(handle); 200 201 /* get the bar address */ 202 handle &= ~(uint64_t)FPA_GPOOL_MASK; 203 for (i = 0; i < FPA_VF_MAX; i++) { 204 if ((uintptr_t)fpadev.pool[i].bar0 != handle) 205 continue; 206 207 /* validate gpool */ 208 if (gpool != fpadev.pool[i].vf_id) 209 return false; 210 211 res = &fpadev.pool[i]; 212 213 if (res->sz128 == 0 || res->domain_id == (uint16_t)~0 || 214 res->stack_ln_ptr == 0) 215 ret = false; 216 else 217 ret = true; 218 break; 219 } 220 221 return ret; 222 } 223 224 static int 225 octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size, 226 signed short buf_offset, unsigned int max_buf_count) 227 { 228 void *memptr = NULL; 229 rte_iova_t phys_addr; 230 unsigned int memsz; 231 struct fpavf_res *fpa = NULL; 232 uint64_t reg; 233 struct octeontx_mbox_hdr hdr; 234 struct dcfg_resp resp; 235 struct octeontx_mbox_fpa_cfg cfg; 236 int ret = -1; 237 238 fpa = octeontx_get_fpavf(gpool); 239 if (fpa == NULL) 240 return -EINVAL; 241 242 memsz = FPA_ROUND_UP(max_buf_count / fpa->stack_ln_ptr, FPA_LN_SIZE) * 243 FPA_LN_SIZE; 244 245 /* Round-up to page size */ 246 memsz = (memsz + FPA_PF_PAGE_SZ - 1) & ~(uintptr_t)(FPA_PF_PAGE_SZ-1); 247 memptr = rte_malloc(NULL, memsz, RTE_CACHE_LINE_SIZE); 248 if (memptr == NULL) { 249 ret = -ENOMEM; 250 goto err; 251 } 252 253 /* Configure stack */ 254 fpa->pool_stack_base = memptr; 255 phys_addr = rte_malloc_virt2iova(memptr); 256 257 buf_size /= FPA_LN_SIZE; 258 259 /* POOL setup */ 260 hdr.coproc = FPA_COPROC; 261 hdr.msg = FPA_CONFIGSET; 262 hdr.vfid = fpa->vf_id; 263 hdr.res_code = 0; 264 265 buf_offset /= FPA_LN_SIZE; 266 reg = POOL_BUF_SIZE(buf_size) | POOL_BUF_OFFSET(buf_offset) | 267 POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN | 268 POOL_ENA; 269 270 cfg.aid = FPA_AURA_IDX(gpool); 271 cfg.pool_cfg = reg; 272 cfg.pool_stack_base = phys_addr; 273 cfg.pool_stack_end = phys_addr + memsz; 274 cfg.aura_cfg = (1 << 9); 275 276 ret = octeontx_mbox_send(&hdr, &cfg, 277 sizeof(struct octeontx_mbox_fpa_cfg), 278 &resp, sizeof(resp)); 279 if (ret < 0) { 280 ret = -EACCES; 281 goto err; 282 } 283 284 fpavf_log_dbg(" vfid %d gpool %d aid %d pool_cfg 0x%x pool_stack_base %" PRIx64 " pool_stack_end %" PRIx64" aura_cfg %" PRIx64 "\n", 285 fpa->vf_id, gpool, cfg.aid, (unsigned int)cfg.pool_cfg, 286 cfg.pool_stack_base, cfg.pool_stack_end, cfg.aura_cfg); 287 288 /* Now pool is in_use */ 289 fpa->is_inuse = true; 290 291 err: 292 if (ret < 0) 293 rte_free(memptr); 294 295 return ret; 296 } 297 298 static int 299 octeontx_fpapf_pool_destroy(unsigned int gpool_index) 300 { 301 struct octeontx_mbox_hdr hdr; 302 struct dcfg_resp resp; 303 struct octeontx_mbox_fpa_cfg cfg; 304 struct fpavf_res *fpa = NULL; 305 int ret = -1; 306 307 fpa = octeontx_get_fpavf(gpool_index); 308 if (fpa == NULL) 309 return -EINVAL; 310 311 hdr.coproc = FPA_COPROC; 312 hdr.msg = FPA_CONFIGSET; 313 hdr.vfid = fpa->vf_id; 314 hdr.res_code = 0; 315 316 /* reset and free the pool */ 317 cfg.aid = 0; 318 cfg.pool_cfg = 0; 319 cfg.pool_stack_base = 0; 320 cfg.pool_stack_end = 0; 321 cfg.aura_cfg = 0; 322 323 ret = octeontx_mbox_send(&hdr, &cfg, 324 sizeof(struct octeontx_mbox_fpa_cfg), 325 &resp, sizeof(resp)); 326 if (ret < 0) { 327 ret = -EACCES; 328 goto err; 329 } 330 331 ret = 0; 332 err: 333 /* anycase free pool stack memory */ 334 rte_free(fpa->pool_stack_base); 335 fpa->pool_stack_base = NULL; 336 return ret; 337 } 338 339 static int 340 octeontx_fpapf_aura_attach(unsigned int gpool_index) 341 { 342 struct octeontx_mbox_hdr hdr; 343 struct dcfg_resp resp; 344 struct octeontx_mbox_fpa_cfg cfg; 345 int ret = 0; 346 347 if (gpool_index >= FPA_MAX_POOL) { 348 ret = -EINVAL; 349 goto err; 350 } 351 hdr.coproc = FPA_COPROC; 352 hdr.msg = FPA_ATTACHAURA; 353 hdr.vfid = gpool_index; 354 hdr.res_code = 0; 355 memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg)); 356 cfg.aid = FPA_AURA_IDX(gpool_index); 357 358 ret = octeontx_mbox_send(&hdr, &cfg, 359 sizeof(struct octeontx_mbox_fpa_cfg), 360 &resp, sizeof(resp)); 361 if (ret < 0) { 362 fpavf_log_err("Could not attach fpa "); 363 fpavf_log_err("aura %d to pool %d. Err=%d. FuncErr=%d\n", 364 FPA_AURA_IDX(gpool_index), gpool_index, ret, 365 hdr.res_code); 366 ret = -EACCES; 367 goto err; 368 } 369 err: 370 return ret; 371 } 372 373 static int 374 octeontx_fpapf_aura_detach(unsigned int gpool_index) 375 { 376 struct octeontx_mbox_fpa_cfg cfg = {0}; 377 struct octeontx_mbox_hdr hdr = {0}; 378 int ret = 0; 379 380 if (gpool_index >= FPA_MAX_POOL) { 381 ret = -EINVAL; 382 goto err; 383 } 384 385 cfg.aid = FPA_AURA_IDX(gpool_index); 386 hdr.coproc = FPA_COPROC; 387 hdr.msg = FPA_DETACHAURA; 388 hdr.vfid = gpool_index; 389 ret = octeontx_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0); 390 if (ret < 0) { 391 fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d\n", 392 FPA_AURA_IDX(gpool_index), ret, 393 hdr.res_code); 394 ret = -EINVAL; 395 } 396 397 err: 398 return ret; 399 } 400 401 int 402 octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz, 403 void *memva, uint16_t gpool) 404 { 405 uint64_t va_end; 406 407 if (unlikely(!handle)) 408 return -ENODEV; 409 410 va_end = (uintptr_t)memva + memsz; 411 va_end &= ~RTE_CACHE_LINE_MASK; 412 413 /* VHPOOL setup */ 414 fpavf_write64((uintptr_t)memva, 415 (void *)((uintptr_t)handle + 416 FPA_VF_VHPOOL_START_ADDR(gpool))); 417 fpavf_write64(va_end, 418 (void *)((uintptr_t)handle + 419 FPA_VF_VHPOOL_END_ADDR(gpool))); 420 return 0; 421 } 422 423 static int 424 octeontx_fpapf_start_count(uint16_t gpool_index) 425 { 426 int ret = 0; 427 struct octeontx_mbox_hdr hdr = {0}; 428 429 if (gpool_index >= FPA_MAX_POOL) { 430 ret = -EINVAL; 431 goto err; 432 } 433 434 hdr.coproc = FPA_COPROC; 435 hdr.msg = FPA_START_COUNT; 436 hdr.vfid = gpool_index; 437 ret = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0); 438 if (ret < 0) { 439 fpavf_log_err("Could not start buffer counting for "); 440 fpavf_log_err("FPA pool %d. Err=%d. FuncErr=%d\n", 441 gpool_index, ret, hdr.res_code); 442 ret = -EINVAL; 443 goto err; 444 } 445 446 err: 447 return ret; 448 } 449 450 static __rte_always_inline int 451 octeontx_fpavf_free(unsigned int gpool) 452 { 453 struct fpavf_res *res = octeontx_get_fpavf(gpool); 454 int ret = 0; 455 456 if (gpool >= FPA_MAX_POOL) { 457 ret = -EINVAL; 458 goto err; 459 } 460 461 /* Pool is free */ 462 if (res != NULL) 463 res->is_inuse = false; 464 465 err: 466 return ret; 467 } 468 469 static __rte_always_inline int 470 octeontx_gpool_free(uint16_t gpool) 471 { 472 struct fpavf_res *res = octeontx_get_fpavf(gpool); 473 474 if (res && res->sz128 != 0) { 475 res->sz128 = 0; 476 return 0; 477 } 478 return -EINVAL; 479 } 480 481 /* 482 * Return buffer size for a given pool 483 */ 484 int 485 octeontx_fpa_bufpool_block_size(uintptr_t handle) 486 { 487 struct fpavf_res *res = NULL; 488 uint8_t gpool; 489 490 if (unlikely(!octeontx_fpa_handle_valid(handle))) 491 return -EINVAL; 492 493 /* get the gpool */ 494 gpool = octeontx_fpa_bufpool_gpool(handle); 495 res = octeontx_get_fpavf(gpool); 496 return res ? FPA_CACHE_LINE_2_OBJSZ(res->sz128) : 0; 497 } 498 499 int 500 octeontx_fpa_bufpool_free_count(uintptr_t handle) 501 { 502 uint64_t cnt, limit, avail; 503 uint8_t gpool; 504 uint16_t gaura; 505 uintptr_t pool_bar; 506 507 if (unlikely(!octeontx_fpa_handle_valid(handle))) 508 return -EINVAL; 509 510 /* get the gpool */ 511 gpool = octeontx_fpa_bufpool_gpool(handle); 512 /* get the aura */ 513 gaura = octeontx_fpa_bufpool_gaura(handle); 514 515 /* Get pool bar address from handle */ 516 pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK; 517 518 cnt = fpavf_read64((void *)((uintptr_t)pool_bar + 519 FPA_VF_VHAURA_CNT(gaura))); 520 limit = fpavf_read64((void *)((uintptr_t)pool_bar + 521 FPA_VF_VHAURA_CNT_LIMIT(gaura))); 522 523 avail = fpavf_read64((void *)((uintptr_t)pool_bar + 524 FPA_VF_VHPOOL_AVAILABLE(gpool))); 525 526 return RTE_MIN(avail, (limit - cnt)); 527 } 528 529 uintptr_t 530 octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count, 531 unsigned int buf_offset, int node_id) 532 { 533 unsigned int gpool; 534 unsigned int gaura; 535 uintptr_t gpool_handle; 536 uintptr_t pool_bar; 537 int res; 538 539 RTE_SET_USED(node_id); 540 RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) > OCTEONTX_FPAVF_BUF_OFFSET); 541 542 octeontx_mbox_init(); 543 object_size = RTE_CACHE_LINE_ROUNDUP(object_size); 544 if (object_size > FPA_MAX_OBJ_SIZE) { 545 errno = EINVAL; 546 goto error_end; 547 } 548 549 rte_spinlock_lock(&fpadev.lock); 550 res = octeontx_fpa_gpool_alloc(object_size); 551 552 /* Bail if failed */ 553 if (unlikely(res < 0)) { 554 errno = res; 555 goto error_unlock; 556 } 557 558 /* get fpavf */ 559 gpool = res; 560 561 /* get pool handle */ 562 gpool_handle = octeontx_fpa_gpool2handle(gpool); 563 if (!octeontx_fpa_handle_valid(gpool_handle)) { 564 errno = ENOSPC; 565 goto error_gpool_free; 566 } 567 568 /* Get pool bar address from handle */ 569 pool_bar = gpool_handle & ~(uint64_t)FPA_GPOOL_MASK; 570 571 res = octeontx_fpapf_pool_setup(gpool, object_size, buf_offset, 572 object_count); 573 if (res < 0) { 574 errno = res; 575 goto error_gpool_free; 576 } 577 578 /* populate AURA fields */ 579 res = octeontx_fpapf_aura_attach(gpool); 580 if (res < 0) { 581 errno = res; 582 goto error_pool_destroy; 583 } 584 585 gaura = FPA_AURA_IDX(gpool); 586 587 /* Release lock */ 588 rte_spinlock_unlock(&fpadev.lock); 589 590 /* populate AURA registers */ 591 fpavf_write64(object_count, (void *)((uintptr_t)pool_bar + 592 FPA_VF_VHAURA_CNT(gaura))); 593 fpavf_write64(object_count, (void *)((uintptr_t)pool_bar + 594 FPA_VF_VHAURA_CNT_LIMIT(gaura))); 595 fpavf_write64(object_count + 1, (void *)((uintptr_t)pool_bar + 596 FPA_VF_VHAURA_CNT_THRESHOLD(gaura))); 597 598 octeontx_fpapf_start_count(gpool); 599 600 return gpool_handle; 601 602 error_pool_destroy: 603 octeontx_fpavf_free(gpool); 604 octeontx_fpapf_pool_destroy(gpool); 605 error_gpool_free: 606 octeontx_gpool_free(gpool); 607 error_unlock: 608 rte_spinlock_unlock(&fpadev.lock); 609 error_end: 610 return (uintptr_t)NULL; 611 } 612 613 /* 614 * Destroy a buffer pool. 615 */ 616 int 617 octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id) 618 { 619 void **node, **curr, *head = NULL; 620 uint64_t sz; 621 uint64_t cnt, avail; 622 uint8_t gpool; 623 uint16_t gaura; 624 uintptr_t pool_bar; 625 int ret; 626 627 RTE_SET_USED(node_id); 628 629 /* Wait for all outstanding writes to be committed */ 630 rte_smp_wmb(); 631 632 if (unlikely(!octeontx_fpa_handle_valid(handle))) 633 return -EINVAL; 634 635 /* get the pool */ 636 gpool = octeontx_fpa_bufpool_gpool(handle); 637 /* get the aura */ 638 gaura = octeontx_fpa_bufpool_gaura(handle); 639 640 /* Get pool bar address from handle */ 641 pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK; 642 643 /* Check for no outstanding buffers */ 644 cnt = fpavf_read64((void *)((uintptr_t)pool_bar + 645 FPA_VF_VHAURA_CNT(gaura))); 646 if (cnt) { 647 fpavf_log_dbg("buffer exist in pool cnt %" PRId64 "\n", cnt); 648 return -EBUSY; 649 } 650 651 rte_spinlock_lock(&fpadev.lock); 652 653 avail = fpavf_read64((void *)((uintptr_t)pool_bar + 654 FPA_VF_VHPOOL_AVAILABLE(gpool))); 655 656 /* Prepare to empty the entire POOL */ 657 fpavf_write64(avail, (void *)((uintptr_t)pool_bar + 658 FPA_VF_VHAURA_CNT_LIMIT(gaura))); 659 fpavf_write64(avail + 1, (void *)((uintptr_t)pool_bar + 660 FPA_VF_VHAURA_CNT_THRESHOLD(gaura))); 661 662 /* Empty the pool */ 663 /* Invalidate the POOL */ 664 octeontx_gpool_free(gpool); 665 666 /* Process all buffers in the pool */ 667 while (avail--) { 668 669 /* Yank a buffer from the pool */ 670 node = (void *)(uintptr_t) 671 fpavf_read64((void *) 672 (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gaura))); 673 674 if (node == NULL) { 675 fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf\n", 676 gaura, avail); 677 break; 678 } 679 680 /* Imsert it into an ordered linked list */ 681 for (curr = &head; curr[0] != NULL; curr = curr[0]) { 682 if ((uintptr_t)node <= (uintptr_t)curr[0]) 683 break; 684 } 685 node[0] = curr[0]; 686 curr[0] = node; 687 } 688 689 /* Verify the linked list to be a perfect series */ 690 sz = octeontx_fpa_bufpool_block_size(handle) << 7; 691 for (curr = head; curr != NULL && curr[0] != NULL; 692 curr = curr[0]) { 693 if (curr == curr[0] || 694 ((uintptr_t)curr != ((uintptr_t)curr[0] - sz))) { 695 fpavf_log_err("POOL# %u buf sequence err (%p vs. %p)\n", 696 gpool, curr, curr[0]); 697 } 698 } 699 700 /* Disable pool operation */ 701 fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar + 702 FPA_VF_VHPOOL_START_ADDR(gpool))); 703 fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar + 704 FPA_VF_VHPOOL_END_ADDR(gpool))); 705 706 (void)octeontx_fpapf_pool_destroy(gpool); 707 708 /* Deactivate the AURA */ 709 fpavf_write64(0, (void *)((uintptr_t)pool_bar + 710 FPA_VF_VHAURA_CNT_LIMIT(gaura))); 711 fpavf_write64(0, (void *)((uintptr_t)pool_bar + 712 FPA_VF_VHAURA_CNT_THRESHOLD(gaura))); 713 714 ret = octeontx_fpapf_aura_detach(gpool); 715 if (ret) { 716 fpavf_log_err("Failed to dettach gaura %u. error code=%d\n", 717 gpool, ret); 718 } 719 720 /* Free VF */ 721 (void)octeontx_fpavf_free(gpool); 722 723 rte_spinlock_unlock(&fpadev.lock); 724 return 0; 725 } 726 727 static void 728 octeontx_fpavf_setup(void) 729 { 730 uint8_t i; 731 static bool init_once; 732 733 if (!init_once) { 734 rte_spinlock_init(&fpadev.lock); 735 fpadev.total_gpool_cnt = 0; 736 737 for (i = 0; i < FPA_VF_MAX; i++) { 738 739 fpadev.pool[i].domain_id = ~0; 740 fpadev.pool[i].stack_ln_ptr = 0; 741 fpadev.pool[i].sz128 = 0; 742 fpadev.pool[i].bar0 = NULL; 743 fpadev.pool[i].pool_stack_base = NULL; 744 fpadev.pool[i].is_inuse = false; 745 } 746 init_once = 1; 747 } 748 } 749 750 static int 751 octeontx_fpavf_identify(void *bar0) 752 { 753 uint64_t val; 754 uint16_t domain_id; 755 uint16_t vf_id; 756 uint64_t stack_ln_ptr; 757 static uint16_t vf_idx; 758 759 val = fpavf_read64((void *)((uintptr_t)bar0 + 760 FPA_VF_VHAURA_CNT_THRESHOLD(0))); 761 762 domain_id = (val >> 8) & 0xffff; 763 vf_id = (val >> 24) & 0xffff; 764 765 stack_ln_ptr = fpavf_read64((void *)((uintptr_t)bar0 + 766 FPA_VF_VHPOOL_THRESHOLD(0))); 767 if (vf_idx >= FPA_VF_MAX) { 768 fpavf_log_err("vf_id(%d) greater than max vf (32)\n", vf_id); 769 return -E2BIG; 770 } 771 772 fpadev.pool[vf_idx].domain_id = domain_id; 773 fpadev.pool[vf_idx].vf_id = vf_id; 774 fpadev.pool[vf_idx].bar0 = bar0; 775 fpadev.pool[vf_idx].stack_ln_ptr = stack_ln_ptr; 776 777 /* SUCCESS */ 778 return vf_idx++; 779 } 780 781 /* FPAVF pcie device aka mempool probe */ 782 static int 783 fpavf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) 784 { 785 uint8_t *idreg; 786 int res; 787 struct fpavf_res *fpa = NULL; 788 789 RTE_SET_USED(pci_drv); 790 RTE_SET_USED(fpa); 791 792 /* For secondary processes, the primary has done all the work */ 793 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 794 return 0; 795 796 if (pci_dev->mem_resource[0].addr == NULL) { 797 fpavf_log_err("Empty bars %p ", pci_dev->mem_resource[0].addr); 798 return -ENODEV; 799 } 800 idreg = pci_dev->mem_resource[0].addr; 801 802 octeontx_fpavf_setup(); 803 804 res = octeontx_fpavf_identify(idreg); 805 if (res < 0) 806 return -1; 807 808 fpa = &fpadev.pool[res]; 809 fpadev.total_gpool_cnt++; 810 rte_wmb(); 811 812 fpavf_log_dbg("total_fpavfs %d bar0 %p domain %d vf %d stk_ln_ptr 0x%x", 813 fpadev.total_gpool_cnt, fpa->bar0, fpa->domain_id, 814 fpa->vf_id, (unsigned int)fpa->stack_ln_ptr); 815 816 return 0; 817 } 818 819 static const struct rte_pci_id pci_fpavf_map[] = { 820 { 821 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 822 PCI_DEVICE_ID_OCTEONTX_FPA_VF) 823 }, 824 { 825 .vendor_id = 0, 826 }, 827 }; 828 829 static struct rte_pci_driver pci_fpavf = { 830 .id_table = pci_fpavf_map, 831 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA, 832 .probe = fpavf_probe, 833 }; 834 835 RTE_PMD_REGISTER_PCI(octeontx_fpavf, pci_fpavf); 836