1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #include <stdlib.h> 6 #include <string.h> 7 #include <stdbool.h> 8 #include <stdio.h> 9 #include <unistd.h> 10 #include <fcntl.h> 11 #include <errno.h> 12 #include <sys/mman.h> 13 14 #include <rte_atomic.h> 15 #include <rte_eal.h> 16 #include <bus_pci_driver.h> 17 #include <rte_errno.h> 18 #include <rte_memory.h> 19 #include <rte_malloc.h> 20 #include <rte_spinlock.h> 21 #include <rte_mbuf.h> 22 23 #include "octeontx_mbox.h" 24 #include "octeontx_fpavf.h" 25 26 /* FPA Mbox Message */ 27 #define IDENTIFY 0x0 28 29 #define FPA_CONFIGSET 0x1 30 #define FPA_CONFIGGET 0x2 31 #define FPA_START_COUNT 0x3 32 #define FPA_STOP_COUNT 0x4 33 #define FPA_ATTACHAURA 0x5 34 #define FPA_DETACHAURA 0x6 35 #define FPA_SETAURALVL 0x7 36 #define FPA_GETAURALVL 0x8 37 38 #define FPA_COPROC 0x1 39 40 /* fpa mbox struct */ 41 struct octeontx_mbox_fpa_cfg { 42 int aid; 43 uint64_t pool_cfg; 44 uint64_t pool_stack_base; 45 uint64_t pool_stack_end; 46 uint64_t aura_cfg; 47 }; 48 49 struct __rte_packed_begin gen_req { 50 uint32_t value; 51 } __rte_packed_end; 52 53 struct __rte_packed_begin idn_req { 54 uint8_t domain_id; 55 } __rte_packed_end; 56 57 struct __rte_packed_begin gen_resp { 58 uint16_t domain_id; 59 uint16_t vfid; 60 } __rte_packed_end; 61 62 struct __rte_packed_begin dcfg_resp { 63 uint8_t sso_count; 64 uint8_t ssow_count; 65 uint8_t fpa_count; 66 uint8_t pko_count; 67 uint8_t tim_count; 68 uint8_t net_port_count; 69 uint8_t virt_port_count; 70 } __rte_packed_end; 71 72 #define FPA_MAX_POOL 32 73 #define FPA_PF_PAGE_SZ 4096 74 75 #define FPA_LN_SIZE 128 76 #define FPA_ROUND_UP(x, size) \ 77 ((((unsigned long)(x)) + size-1) & (~(size-1))) 78 #define FPA_OBJSZ_2_CACHE_LINE(sz) (((sz) + RTE_CACHE_LINE_MASK) >> 7) 79 #define FPA_CACHE_LINE_2_OBJSZ(sz) ((sz) << 7) 80 81 #define POOL_ENA (0x1 << 0) 82 #define POOL_DIS (0x0 << 0) 83 #define POOL_SET_NAT_ALIGN (0x1 << 1) 84 #define POOL_DIS_NAT_ALIGN (0x0 << 1) 85 #define POOL_STYPE(x) (((x) & 0x1) << 2) 86 #define POOL_LTYPE(x) (((x) & 0x3) << 3) 87 #define POOL_BUF_OFFSET(x) (((x) & 0x7fffULL) << 16) 88 #define POOL_BUF_SIZE(x) (((x) & 0x7ffULL) << 32) 89 90 struct fpavf_res { 91 void *pool_stack_base; 92 void *bar0; 93 uint64_t stack_ln_ptr; 94 uint16_t domain_id; 95 uint16_t vf_id; /* gpool_id */ 96 uint16_t sz128; /* Block size in cache lines */ 97 bool is_inuse; 98 }; 99 100 struct octeontx_fpadev { 101 rte_spinlock_t lock; 102 uint8_t total_gpool_cnt; 103 struct fpavf_res pool[FPA_VF_MAX]; 104 }; 105 106 static struct octeontx_fpadev fpadev; 107 108 RTE_LOG_REGISTER(octeontx_logtype_fpavf, pmd.mempool.octeontx, NOTICE); 109 110 /* lock is taken by caller */ 111 static int 112 octeontx_fpa_gpool_alloc(unsigned int object_size) 113 { 114 uint16_t global_domain = octeontx_get_global_domain(); 115 struct fpavf_res *res = NULL; 116 unsigned int sz128; 117 int i; 118 119 sz128 = FPA_OBJSZ_2_CACHE_LINE(object_size); 120 121 for (i = 0; i < FPA_VF_MAX; i++) { 122 123 /* Skip VF that is not mapped Or _inuse */ 124 if ((fpadev.pool[i].bar0 == NULL) || 125 (fpadev.pool[i].is_inuse == true) || 126 (fpadev.pool[i].domain_id != global_domain)) 127 continue; 128 129 res = &fpadev.pool[i]; 130 131 RTE_ASSERT(res->domain_id != (uint16_t)~0); 132 RTE_ASSERT(res->vf_id != (uint16_t)~0); 133 RTE_ASSERT(res->stack_ln_ptr != 0); 134 135 if (res->sz128 == 0) { 136 res->sz128 = sz128; 137 fpavf_log_dbg("gpool %d blk_sz %d", res->vf_id, 138 sz128); 139 140 return res->vf_id; 141 } 142 } 143 144 return -ENOSPC; 145 } 146 147 static __rte_always_inline struct fpavf_res * 148 octeontx_get_fpavf(uint16_t gpool) 149 { 150 uint16_t global_domain = octeontx_get_global_domain(); 151 int i; 152 153 for (i = 0; i < FPA_VF_MAX; i++) { 154 if (fpadev.pool[i].domain_id != global_domain) 155 continue; 156 if (fpadev.pool[i].vf_id != gpool) 157 continue; 158 159 return &fpadev.pool[i]; 160 } 161 162 return NULL; 163 } 164 165 /* lock is taken by caller */ 166 static __rte_always_inline uintptr_t 167 octeontx_fpa_gpool2handle(uint16_t gpool) 168 { 169 struct fpavf_res *res = NULL; 170 171 RTE_ASSERT(gpool < FPA_VF_MAX); 172 res = octeontx_get_fpavf(gpool); 173 if (res == NULL) 174 return 0; 175 176 return (uintptr_t)res->bar0 | gpool; 177 } 178 179 static __rte_always_inline bool 180 octeontx_fpa_handle_valid(uintptr_t handle) 181 { 182 struct fpavf_res *res = NULL; 183 uint8_t gpool; 184 int i; 185 bool ret = false; 186 187 if (unlikely(!handle)) 188 return ret; 189 190 /* get the gpool */ 191 gpool = octeontx_fpa_bufpool_gpool(handle); 192 193 /* get the bar address */ 194 handle &= ~(uint64_t)FPA_GPOOL_MASK; 195 for (i = 0; i < FPA_VF_MAX; i++) { 196 if ((uintptr_t)fpadev.pool[i].bar0 != handle) 197 continue; 198 199 /* validate gpool */ 200 if (gpool != fpadev.pool[i].vf_id) 201 return false; 202 203 res = &fpadev.pool[i]; 204 205 if (res->sz128 == 0 || res->domain_id == (uint16_t)~0 || 206 res->stack_ln_ptr == 0) 207 ret = false; 208 else 209 ret = true; 210 break; 211 } 212 213 return ret; 214 } 215 216 static int 217 octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size, 218 signed short buf_offset, unsigned int max_buf_count) 219 { 220 void *memptr = NULL; 221 rte_iova_t phys_addr; 222 unsigned int memsz; 223 struct fpavf_res *fpa = NULL; 224 uint64_t reg; 225 struct octeontx_mbox_hdr hdr; 226 struct dcfg_resp resp; 227 struct octeontx_mbox_fpa_cfg cfg; 228 int ret = -1; 229 230 fpa = octeontx_get_fpavf(gpool); 231 if (fpa == NULL) 232 return -EINVAL; 233 234 memsz = FPA_ROUND_UP(max_buf_count / fpa->stack_ln_ptr, FPA_LN_SIZE) * 235 FPA_LN_SIZE; 236 237 /* Round-up to page size */ 238 memsz = (memsz + FPA_PF_PAGE_SZ - 1) & ~(uintptr_t)(FPA_PF_PAGE_SZ-1); 239 memptr = rte_malloc(NULL, memsz, RTE_CACHE_LINE_SIZE); 240 if (memptr == NULL) { 241 ret = -ENOMEM; 242 goto err; 243 } 244 245 /* Configure stack */ 246 fpa->pool_stack_base = memptr; 247 phys_addr = rte_malloc_virt2iova(memptr); 248 249 buf_size /= FPA_LN_SIZE; 250 251 /* POOL setup */ 252 hdr.coproc = FPA_COPROC; 253 hdr.msg = FPA_CONFIGSET; 254 hdr.vfid = fpa->vf_id; 255 hdr.res_code = 0; 256 257 buf_offset /= FPA_LN_SIZE; 258 reg = POOL_BUF_SIZE(buf_size) | POOL_BUF_OFFSET(buf_offset) | 259 POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN | 260 POOL_ENA; 261 262 cfg.aid = 0; 263 cfg.pool_cfg = reg; 264 cfg.pool_stack_base = phys_addr; 265 cfg.pool_stack_end = phys_addr + memsz; 266 cfg.aura_cfg = (1 << 9); 267 268 ret = octeontx_mbox_send(&hdr, &cfg, 269 sizeof(struct octeontx_mbox_fpa_cfg), 270 &resp, sizeof(resp)); 271 if (ret < 0) { 272 ret = -EACCES; 273 goto err; 274 } 275 276 fpavf_log_dbg(" vfid %d gpool %d aid %d pool_cfg 0x%x pool_stack_base %" PRIx64 " pool_stack_end %" PRIx64" aura_cfg %" PRIx64, 277 fpa->vf_id, gpool, cfg.aid, (unsigned int)cfg.pool_cfg, 278 cfg.pool_stack_base, cfg.pool_stack_end, cfg.aura_cfg); 279 280 /* Now pool is in_use */ 281 fpa->is_inuse = true; 282 283 err: 284 if (ret < 0) 285 rte_free(memptr); 286 287 return ret; 288 } 289 290 static int 291 octeontx_fpapf_pool_destroy(unsigned int gpool_index) 292 { 293 struct octeontx_mbox_hdr hdr; 294 struct dcfg_resp resp; 295 struct octeontx_mbox_fpa_cfg cfg; 296 struct fpavf_res *fpa = NULL; 297 int ret = -1; 298 299 fpa = octeontx_get_fpavf(gpool_index); 300 if (fpa == NULL) 301 return -EINVAL; 302 303 hdr.coproc = FPA_COPROC; 304 hdr.msg = FPA_CONFIGSET; 305 hdr.vfid = fpa->vf_id; 306 hdr.res_code = 0; 307 308 /* reset and free the pool */ 309 cfg.aid = 0; 310 cfg.pool_cfg = 0; 311 cfg.pool_stack_base = 0; 312 cfg.pool_stack_end = 0; 313 cfg.aura_cfg = 0; 314 315 ret = octeontx_mbox_send(&hdr, &cfg, 316 sizeof(struct octeontx_mbox_fpa_cfg), 317 &resp, sizeof(resp)); 318 if (ret < 0) { 319 ret = -EACCES; 320 goto err; 321 } 322 323 ret = 0; 324 err: 325 /* anycase free pool stack memory */ 326 rte_free(fpa->pool_stack_base); 327 fpa->pool_stack_base = NULL; 328 return ret; 329 } 330 331 static int 332 octeontx_fpapf_aura_attach(unsigned int gpool_index) 333 { 334 struct octeontx_mbox_hdr hdr; 335 struct dcfg_resp resp; 336 struct octeontx_mbox_fpa_cfg cfg; 337 int ret = 0; 338 339 if (gpool_index >= FPA_MAX_POOL) { 340 ret = -EINVAL; 341 goto err; 342 } 343 hdr.coproc = FPA_COPROC; 344 hdr.msg = FPA_ATTACHAURA; 345 hdr.vfid = gpool_index; 346 hdr.res_code = 0; 347 memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg)); 348 cfg.aid = 0; 349 350 ret = octeontx_mbox_send(&hdr, &cfg, 351 sizeof(struct octeontx_mbox_fpa_cfg), 352 &resp, sizeof(resp)); 353 if (ret < 0) { 354 fpavf_log_err("Could not attach fpa aura %d to pool %d. Err=%d. FuncErr=%d", 355 FPA_AURA_IDX(gpool_index), gpool_index, ret, 356 hdr.res_code); 357 ret = -EACCES; 358 goto err; 359 } 360 err: 361 return ret; 362 } 363 364 static int 365 octeontx_fpapf_aura_detach(unsigned int gpool_index) 366 { 367 struct octeontx_mbox_fpa_cfg cfg = {0}; 368 struct octeontx_mbox_hdr hdr = {0}; 369 int ret = 0; 370 371 if (gpool_index >= FPA_MAX_POOL) { 372 ret = -EINVAL; 373 goto err; 374 } 375 376 cfg.aid = 0; 377 hdr.coproc = FPA_COPROC; 378 hdr.msg = FPA_DETACHAURA; 379 hdr.vfid = gpool_index; 380 ret = octeontx_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0); 381 if (ret < 0) { 382 fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d", 383 FPA_AURA_IDX(gpool_index), ret, 384 hdr.res_code); 385 ret = -EINVAL; 386 } 387 388 err: 389 return ret; 390 } 391 392 int 393 octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz, 394 void *memva, uint16_t gpool) 395 { 396 uint64_t va_end; 397 398 if (unlikely(!handle)) 399 return -ENODEV; 400 401 va_end = (uintptr_t)memva + memsz; 402 va_end &= ~RTE_CACHE_LINE_MASK; 403 404 /* VHPOOL setup */ 405 fpavf_write64((uintptr_t)memva, 406 (void *)((uintptr_t)handle + 407 FPA_VF_VHPOOL_START_ADDR(gpool))); 408 fpavf_write64(va_end, 409 (void *)((uintptr_t)handle + 410 FPA_VF_VHPOOL_END_ADDR(gpool))); 411 return 0; 412 } 413 414 static int 415 octeontx_fpapf_start_count(uint16_t gpool_index) 416 { 417 int ret = 0; 418 struct octeontx_mbox_hdr hdr = {0}; 419 420 if (gpool_index >= FPA_MAX_POOL) { 421 ret = -EINVAL; 422 goto err; 423 } 424 425 hdr.coproc = FPA_COPROC; 426 hdr.msg = FPA_START_COUNT; 427 hdr.vfid = gpool_index; 428 ret = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0); 429 if (ret < 0) { 430 fpavf_log_err("Could not start buffer counting for FPA pool %d. Err=%d. FuncErr=%d", 431 gpool_index, ret, hdr.res_code); 432 ret = -EINVAL; 433 goto err; 434 } 435 436 err: 437 return ret; 438 } 439 440 static __rte_always_inline int 441 octeontx_fpavf_free(unsigned int gpool) 442 { 443 struct fpavf_res *res = octeontx_get_fpavf(gpool); 444 int ret = 0; 445 446 if (gpool >= FPA_MAX_POOL) { 447 ret = -EINVAL; 448 goto err; 449 } 450 451 /* Pool is free */ 452 if (res != NULL) 453 res->is_inuse = false; 454 455 err: 456 return ret; 457 } 458 459 static __rte_always_inline int 460 octeontx_gpool_free(uint16_t gpool) 461 { 462 struct fpavf_res *res = octeontx_get_fpavf(gpool); 463 464 if (res && res->sz128 != 0) { 465 res->sz128 = 0; 466 return 0; 467 } 468 return -EINVAL; 469 } 470 471 /* 472 * Return buffer size for a given pool 473 */ 474 int 475 octeontx_fpa_bufpool_block_size(uintptr_t handle) 476 { 477 struct fpavf_res *res = NULL; 478 uint8_t gpool; 479 480 if (unlikely(!octeontx_fpa_handle_valid(handle))) 481 return -EINVAL; 482 483 /* get the gpool */ 484 gpool = octeontx_fpa_bufpool_gpool(handle); 485 res = octeontx_get_fpavf(gpool); 486 return res ? FPA_CACHE_LINE_2_OBJSZ(res->sz128) : 0; 487 } 488 489 int 490 octeontx_fpa_bufpool_free_count(uintptr_t handle) 491 { 492 uint64_t cnt, limit, avail; 493 uint8_t gpool; 494 uint16_t gaura; 495 uintptr_t pool_bar; 496 497 if (unlikely(!octeontx_fpa_handle_valid(handle))) 498 return -EINVAL; 499 500 /* get the gpool */ 501 gpool = octeontx_fpa_bufpool_gpool(handle); 502 /* get the aura */ 503 gaura = octeontx_fpa_bufpool_gaura(handle); 504 505 /* Get pool bar address from handle */ 506 pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK; 507 508 cnt = fpavf_read64((void *)((uintptr_t)pool_bar + 509 FPA_VF_VHAURA_CNT(gaura))); 510 limit = fpavf_read64((void *)((uintptr_t)pool_bar + 511 FPA_VF_VHAURA_CNT_LIMIT(gaura))); 512 513 avail = fpavf_read64((void *)((uintptr_t)pool_bar + 514 FPA_VF_VHPOOL_AVAILABLE(gpool))); 515 516 return RTE_MIN(avail, (limit - cnt)); 517 } 518 519 uintptr_t 520 octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count, 521 unsigned int buf_offset, int node_id) 522 { 523 unsigned int gpool; 524 unsigned int gaura; 525 uintptr_t gpool_handle; 526 uintptr_t pool_bar; 527 int res; 528 529 RTE_SET_USED(node_id); 530 RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) > OCTEONTX_FPAVF_BUF_OFFSET); 531 532 octeontx_mbox_init(); 533 object_size = RTE_CACHE_LINE_ROUNDUP(object_size); 534 if (object_size > FPA_MAX_OBJ_SIZE) { 535 errno = EINVAL; 536 goto error_end; 537 } 538 539 rte_spinlock_lock(&fpadev.lock); 540 res = octeontx_fpa_gpool_alloc(object_size); 541 542 /* Bail if failed */ 543 if (unlikely(res < 0)) { 544 errno = res; 545 goto error_unlock; 546 } 547 548 /* get fpavf */ 549 gpool = res; 550 551 /* get pool handle */ 552 gpool_handle = octeontx_fpa_gpool2handle(gpool); 553 if (!octeontx_fpa_handle_valid(gpool_handle)) { 554 errno = ENOSPC; 555 goto error_gpool_free; 556 } 557 558 /* Get pool bar address from handle */ 559 pool_bar = gpool_handle & ~(uint64_t)FPA_GPOOL_MASK; 560 561 res = octeontx_fpapf_pool_setup(gpool, object_size, buf_offset, 562 object_count); 563 if (res < 0) { 564 errno = res; 565 goto error_gpool_free; 566 } 567 568 /* populate AURA fields */ 569 res = octeontx_fpapf_aura_attach(gpool); 570 if (res < 0) { 571 errno = res; 572 goto error_pool_destroy; 573 } 574 575 gaura = FPA_AURA_IDX(gpool); 576 577 /* Release lock */ 578 rte_spinlock_unlock(&fpadev.lock); 579 580 /* populate AURA registers */ 581 fpavf_write64(object_count, (void *)((uintptr_t)pool_bar + 582 FPA_VF_VHAURA_CNT(gaura))); 583 fpavf_write64(object_count, (void *)((uintptr_t)pool_bar + 584 FPA_VF_VHAURA_CNT_LIMIT(gaura))); 585 fpavf_write64(object_count + 1, (void *)((uintptr_t)pool_bar + 586 FPA_VF_VHAURA_CNT_THRESHOLD(gaura))); 587 588 octeontx_fpapf_start_count(gpool); 589 590 return gpool_handle; 591 592 error_pool_destroy: 593 octeontx_fpavf_free(gpool); 594 octeontx_fpapf_pool_destroy(gpool); 595 error_gpool_free: 596 octeontx_gpool_free(gpool); 597 error_unlock: 598 rte_spinlock_unlock(&fpadev.lock); 599 error_end: 600 return (uintptr_t)NULL; 601 } 602 603 /* 604 * Destroy a buffer pool. 605 */ 606 int 607 octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id) 608 { 609 void **node, **curr, *head = NULL; 610 uint64_t sz; 611 uint64_t cnt, avail; 612 uint8_t gpool; 613 uint16_t gaura; 614 uintptr_t pool_bar; 615 int ret; 616 617 RTE_SET_USED(node_id); 618 619 /* Wait for all outstanding writes to be committed */ 620 rte_smp_wmb(); 621 622 if (unlikely(!octeontx_fpa_handle_valid(handle))) 623 return -EINVAL; 624 625 /* get the pool */ 626 gpool = octeontx_fpa_bufpool_gpool(handle); 627 /* get the aura */ 628 gaura = octeontx_fpa_bufpool_gaura(handle); 629 630 /* Get pool bar address from handle */ 631 pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK; 632 633 /* Check for no outstanding buffers */ 634 cnt = fpavf_read64((void *)((uintptr_t)pool_bar + 635 FPA_VF_VHAURA_CNT(gaura))); 636 if (cnt) { 637 fpavf_log_dbg("buffer exist in pool cnt %" PRId64, cnt); 638 return -EBUSY; 639 } 640 641 rte_spinlock_lock(&fpadev.lock); 642 643 avail = fpavf_read64((void *)((uintptr_t)pool_bar + 644 FPA_VF_VHPOOL_AVAILABLE(gpool))); 645 646 /* Prepare to empty the entire POOL */ 647 fpavf_write64(avail, (void *)((uintptr_t)pool_bar + 648 FPA_VF_VHAURA_CNT_LIMIT(gaura))); 649 fpavf_write64(avail + 1, (void *)((uintptr_t)pool_bar + 650 FPA_VF_VHAURA_CNT_THRESHOLD(gaura))); 651 652 /* Empty the pool */ 653 /* Invalidate the POOL */ 654 octeontx_gpool_free(gpool); 655 656 /* Process all buffers in the pool */ 657 while (avail--) { 658 659 /* Yank a buffer from the pool */ 660 node = (void *)(uintptr_t) 661 fpavf_read64((void *) 662 (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gaura))); 663 664 if (node == NULL) { 665 fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf", 666 gaura, avail); 667 break; 668 } 669 670 /* Insert it into an ordered linked list */ 671 for (curr = &head; curr[0] != NULL; curr = curr[0]) { 672 if ((uintptr_t)node <= (uintptr_t)curr[0]) 673 break; 674 } 675 node[0] = curr[0]; 676 curr[0] = node; 677 } 678 679 /* Verify the linked list to be a perfect series */ 680 sz = octeontx_fpa_bufpool_block_size(handle) << 7; 681 for (curr = head; curr != NULL && curr[0] != NULL; 682 curr = curr[0]) { 683 if (curr == curr[0] || 684 ((uintptr_t)curr != ((uintptr_t)curr[0] - sz))) { 685 fpavf_log_err("POOL# %u buf sequence err (%p vs. %p)", 686 gpool, curr, curr[0]); 687 } 688 } 689 690 /* Disable pool operation */ 691 fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar + 692 FPA_VF_VHPOOL_START_ADDR(gpool))); 693 fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar + 694 FPA_VF_VHPOOL_END_ADDR(gpool))); 695 696 (void)octeontx_fpapf_pool_destroy(gpool); 697 698 /* Deactivate the AURA */ 699 fpavf_write64(0, (void *)((uintptr_t)pool_bar + 700 FPA_VF_VHAURA_CNT_LIMIT(gaura))); 701 fpavf_write64(0, (void *)((uintptr_t)pool_bar + 702 FPA_VF_VHAURA_CNT_THRESHOLD(gaura))); 703 704 ret = octeontx_fpapf_aura_detach(gpool); 705 if (ret) { 706 fpavf_log_err("Failed to detach gaura %u. error code=%d", 707 gpool, ret); 708 } 709 710 /* Free VF */ 711 (void)octeontx_fpavf_free(gpool); 712 713 rte_spinlock_unlock(&fpadev.lock); 714 return 0; 715 } 716 717 static void 718 octeontx_fpavf_setup(void) 719 { 720 uint8_t i; 721 static bool init_once; 722 723 if (!init_once) { 724 rte_spinlock_init(&fpadev.lock); 725 fpadev.total_gpool_cnt = 0; 726 727 for (i = 0; i < FPA_VF_MAX; i++) { 728 729 fpadev.pool[i].domain_id = ~0; 730 fpadev.pool[i].stack_ln_ptr = 0; 731 fpadev.pool[i].sz128 = 0; 732 fpadev.pool[i].bar0 = NULL; 733 fpadev.pool[i].pool_stack_base = NULL; 734 fpadev.pool[i].is_inuse = false; 735 } 736 init_once = 1; 737 } 738 } 739 740 static int 741 octeontx_fpavf_identify(void *bar0) 742 { 743 uint64_t val; 744 uint16_t domain_id; 745 uint16_t vf_id; 746 uint64_t stack_ln_ptr; 747 static uint16_t vf_idx; 748 749 val = fpavf_read64((void *)((uintptr_t)bar0 + 750 FPA_VF_VHAURA_CNT_THRESHOLD(0))); 751 752 domain_id = (val >> 8) & 0xffff; 753 vf_id = (val >> 24) & 0xffff; 754 755 stack_ln_ptr = fpavf_read64((void *)((uintptr_t)bar0 + 756 FPA_VF_VHPOOL_THRESHOLD(0))); 757 if (vf_idx >= FPA_VF_MAX) { 758 fpavf_log_err("vf_id(%d) greater than max vf (32)", vf_id); 759 return -E2BIG; 760 } 761 762 fpadev.pool[vf_idx].domain_id = domain_id; 763 fpadev.pool[vf_idx].vf_id = vf_id; 764 fpadev.pool[vf_idx].bar0 = bar0; 765 fpadev.pool[vf_idx].stack_ln_ptr = stack_ln_ptr; 766 767 /* SUCCESS */ 768 return vf_idx++; 769 } 770 771 /* FPAVF pcie device aka mempool probe */ 772 static int 773 fpavf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) 774 { 775 uint8_t *idreg; 776 int res; 777 struct fpavf_res *fpa = NULL; 778 779 RTE_SET_USED(pci_drv); 780 RTE_SET_USED(fpa); 781 782 /* For secondary processes, the primary has done all the work */ 783 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 784 return 0; 785 786 if (pci_dev->mem_resource[0].addr == NULL) { 787 fpavf_log_err("Empty bars %p ", pci_dev->mem_resource[0].addr); 788 return -ENODEV; 789 } 790 idreg = pci_dev->mem_resource[0].addr; 791 792 octeontx_fpavf_setup(); 793 794 res = octeontx_fpavf_identify(idreg); 795 if (res < 0) 796 return -1; 797 798 fpa = &fpadev.pool[res]; 799 fpadev.total_gpool_cnt++; 800 rte_wmb(); 801 802 fpavf_log_dbg("total_fpavfs %d bar0 %p domain %d vf %d stk_ln_ptr 0x%x", 803 fpadev.total_gpool_cnt, fpa->bar0, fpa->domain_id, 804 fpa->vf_id, (unsigned int)fpa->stack_ln_ptr); 805 806 return 0; 807 } 808 809 static const struct rte_pci_id pci_fpavf_map[] = { 810 { 811 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 812 PCI_DEVICE_ID_OCTEONTX_FPA_VF) 813 }, 814 { 815 .vendor_id = 0, 816 }, 817 }; 818 819 static struct rte_pci_driver pci_fpavf = { 820 .id_table = pci_fpavf_map, 821 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA, 822 .probe = fpavf_probe, 823 }; 824 825 RTE_PMD_REGISTER_PCI(octeontx_fpavf, pci_fpavf); 826