1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #include <stdlib.h> 6 #include <string.h> 7 #include <stdbool.h> 8 #include <stdio.h> 9 #include <unistd.h> 10 #include <fcntl.h> 11 #include <errno.h> 12 #include <sys/mman.h> 13 14 #include <rte_atomic.h> 15 #include <rte_eal.h> 16 #include <rte_bus_pci.h> 17 #include <rte_errno.h> 18 #include <rte_memory.h> 19 #include <rte_malloc.h> 20 #include <rte_spinlock.h> 21 #include <rte_mbuf.h> 22 23 #include "octeontx_mbox.h" 24 #include "octeontx_fpavf.h" 25 26 /* FPA Mbox Message */ 27 #define IDENTIFY 0x0 28 29 #define FPA_CONFIGSET 0x1 30 #define FPA_CONFIGGET 0x2 31 #define FPA_START_COUNT 0x3 32 #define FPA_STOP_COUNT 0x4 33 #define FPA_ATTACHAURA 0x5 34 #define FPA_DETACHAURA 0x6 35 #define FPA_SETAURALVL 0x7 36 #define FPA_GETAURALVL 0x8 37 38 #define FPA_COPROC 0x1 39 40 /* fpa mbox struct */ 41 struct octeontx_mbox_fpa_cfg { 42 int aid; 43 uint64_t pool_cfg; 44 uint64_t pool_stack_base; 45 uint64_t pool_stack_end; 46 uint64_t aura_cfg; 47 }; 48 49 struct __attribute__((__packed__)) gen_req { 50 uint32_t value; 51 }; 52 53 struct __attribute__((__packed__)) idn_req { 54 uint8_t domain_id; 55 }; 56 57 struct __attribute__((__packed__)) gen_resp { 58 uint16_t domain_id; 59 uint16_t vfid; 60 }; 61 62 struct __attribute__((__packed__)) dcfg_resp { 63 uint8_t sso_count; 64 uint8_t ssow_count; 65 uint8_t fpa_count; 66 uint8_t pko_count; 67 uint8_t tim_count; 68 uint8_t net_port_count; 69 uint8_t virt_port_count; 70 }; 71 72 #define FPA_MAX_POOL 32 73 #define FPA_PF_PAGE_SZ 4096 74 75 #define FPA_LN_SIZE 128 76 #define FPA_ROUND_UP(x, size) \ 77 ((((unsigned long)(x)) + size-1) & (~(size-1))) 78 #define FPA_OBJSZ_2_CACHE_LINE(sz) (((sz) + RTE_CACHE_LINE_MASK) >> 7) 79 #define FPA_CACHE_LINE_2_OBJSZ(sz) ((sz) << 7) 80 81 #define POOL_ENA (0x1 << 0) 82 #define POOL_DIS (0x0 << 0) 83 #define POOL_SET_NAT_ALIGN (0x1 << 1) 84 #define POOL_DIS_NAT_ALIGN (0x0 << 1) 85 #define POOL_STYPE(x) (((x) & 0x1) << 2) 86 #define POOL_LTYPE(x) (((x) & 0x3) << 3) 87 #define POOL_BUF_OFFSET(x) (((x) & 0x7fffULL) << 16) 88 #define POOL_BUF_SIZE(x) (((x) & 0x7ffULL) << 32) 89 90 struct fpavf_res { 91 void *pool_stack_base; 92 void *bar0; 93 uint64_t stack_ln_ptr; 94 uint16_t domain_id; 95 uint16_t vf_id; /* gpool_id */ 96 uint16_t sz128; /* Block size in cache lines */ 97 bool is_inuse; 98 }; 99 100 struct octeontx_fpadev { 101 rte_spinlock_t lock; 102 uint8_t total_gpool_cnt; 103 struct fpavf_res pool[FPA_VF_MAX]; 104 }; 105 106 static struct octeontx_fpadev fpadev; 107 108 int octeontx_logtype_fpavf; 109 int octeontx_logtype_fpavf_mbox; 110 111 RTE_INIT(otx_pool_init_log); 112 static void 113 otx_pool_init_log(void) 114 { 115 octeontx_logtype_fpavf = rte_log_register("pmd.otx.mempool"); 116 if (octeontx_logtype_fpavf >= 0) 117 rte_log_set_level(octeontx_logtype_fpavf, RTE_LOG_NOTICE); 118 119 octeontx_logtype_fpavf_mbox = rte_log_register("pmd.otx.mempool.mbox"); 120 if (octeontx_logtype_fpavf_mbox >= 0) 121 rte_log_set_level(octeontx_logtype_fpavf_mbox, RTE_LOG_NOTICE); 122 } 123 124 /* lock is taken by caller */ 125 static int 126 octeontx_fpa_gpool_alloc(unsigned int object_size) 127 { 128 struct fpavf_res *res = NULL; 129 uint16_t gpool; 130 unsigned int sz128; 131 132 sz128 = FPA_OBJSZ_2_CACHE_LINE(object_size); 133 134 for (gpool = 0; gpool < FPA_VF_MAX; gpool++) { 135 136 /* Skip VF that is not mapped Or _inuse */ 137 if ((fpadev.pool[gpool].bar0 == NULL) || 138 (fpadev.pool[gpool].is_inuse == true)) 139 continue; 140 141 res = &fpadev.pool[gpool]; 142 143 RTE_ASSERT(res->domain_id != (uint16_t)~0); 144 RTE_ASSERT(res->vf_id != (uint16_t)~0); 145 RTE_ASSERT(res->stack_ln_ptr != 0); 146 147 if (res->sz128 == 0) { 148 res->sz128 = sz128; 149 150 fpavf_log_dbg("gpool %d blk_sz %d\n", gpool, sz128); 151 return gpool; 152 } 153 } 154 155 return -ENOSPC; 156 } 157 158 /* lock is taken by caller */ 159 static __rte_always_inline uintptr_t 160 octeontx_fpa_gpool2handle(uint16_t gpool) 161 { 162 struct fpavf_res *res = NULL; 163 164 RTE_ASSERT(gpool < FPA_VF_MAX); 165 166 res = &fpadev.pool[gpool]; 167 return (uintptr_t)res->bar0 | gpool; 168 } 169 170 static __rte_always_inline bool 171 octeontx_fpa_handle_valid(uintptr_t handle) 172 { 173 struct fpavf_res *res = NULL; 174 uint8_t gpool; 175 int i; 176 bool ret = false; 177 178 if (unlikely(!handle)) 179 return ret; 180 181 /* get the gpool */ 182 gpool = octeontx_fpa_bufpool_gpool(handle); 183 184 /* get the bar address */ 185 handle &= ~(uint64_t)FPA_GPOOL_MASK; 186 for (i = 0; i < FPA_VF_MAX; i++) { 187 if ((uintptr_t)fpadev.pool[i].bar0 != handle) 188 continue; 189 190 /* validate gpool */ 191 if (gpool != i) 192 return false; 193 194 res = &fpadev.pool[i]; 195 196 if (res->sz128 == 0 || res->domain_id == (uint16_t)~0 || 197 res->stack_ln_ptr == 0) 198 ret = false; 199 else 200 ret = true; 201 break; 202 } 203 204 return ret; 205 } 206 207 static int 208 octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size, 209 signed short buf_offset, unsigned int max_buf_count) 210 { 211 void *memptr = NULL; 212 rte_iova_t phys_addr; 213 unsigned int memsz; 214 struct fpavf_res *fpa = NULL; 215 uint64_t reg; 216 struct octeontx_mbox_hdr hdr; 217 struct dcfg_resp resp; 218 struct octeontx_mbox_fpa_cfg cfg; 219 int ret = -1; 220 221 fpa = &fpadev.pool[gpool]; 222 memsz = FPA_ROUND_UP(max_buf_count / fpa->stack_ln_ptr, FPA_LN_SIZE) * 223 FPA_LN_SIZE; 224 225 /* Round-up to page size */ 226 memsz = (memsz + FPA_PF_PAGE_SZ - 1) & ~(uintptr_t)(FPA_PF_PAGE_SZ-1); 227 memptr = rte_malloc(NULL, memsz, RTE_CACHE_LINE_SIZE); 228 if (memptr == NULL) { 229 ret = -ENOMEM; 230 goto err; 231 } 232 233 /* Configure stack */ 234 fpa->pool_stack_base = memptr; 235 phys_addr = rte_malloc_virt2iova(memptr); 236 237 buf_size /= FPA_LN_SIZE; 238 239 /* POOL setup */ 240 hdr.coproc = FPA_COPROC; 241 hdr.msg = FPA_CONFIGSET; 242 hdr.vfid = fpa->vf_id; 243 hdr.res_code = 0; 244 245 buf_offset /= FPA_LN_SIZE; 246 reg = POOL_BUF_SIZE(buf_size) | POOL_BUF_OFFSET(buf_offset) | 247 POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN | 248 POOL_ENA; 249 250 cfg.aid = 0; 251 cfg.pool_cfg = reg; 252 cfg.pool_stack_base = phys_addr; 253 cfg.pool_stack_end = phys_addr + memsz; 254 cfg.aura_cfg = (1 << 9); 255 256 ret = octeontx_ssovf_mbox_send(&hdr, &cfg, 257 sizeof(struct octeontx_mbox_fpa_cfg), 258 &resp, sizeof(resp)); 259 if (ret < 0) { 260 ret = -EACCES; 261 goto err; 262 } 263 264 fpavf_log_dbg(" vfid %d gpool %d aid %d pool_cfg 0x%x pool_stack_base %" PRIx64 " pool_stack_end %" PRIx64" aura_cfg %" PRIx64 "\n", 265 fpa->vf_id, gpool, cfg.aid, (unsigned int)cfg.pool_cfg, 266 cfg.pool_stack_base, cfg.pool_stack_end, cfg.aura_cfg); 267 268 /* Now pool is in_use */ 269 fpa->is_inuse = true; 270 271 err: 272 if (ret < 0) 273 rte_free(memptr); 274 275 return ret; 276 } 277 278 static int 279 octeontx_fpapf_pool_destroy(unsigned int gpool_index) 280 { 281 struct octeontx_mbox_hdr hdr; 282 struct dcfg_resp resp; 283 struct octeontx_mbox_fpa_cfg cfg; 284 struct fpavf_res *fpa = NULL; 285 int ret = -1; 286 287 fpa = &fpadev.pool[gpool_index]; 288 289 hdr.coproc = FPA_COPROC; 290 hdr.msg = FPA_CONFIGSET; 291 hdr.vfid = fpa->vf_id; 292 hdr.res_code = 0; 293 294 /* reset and free the pool */ 295 cfg.aid = 0; 296 cfg.pool_cfg = 0; 297 cfg.pool_stack_base = 0; 298 cfg.pool_stack_end = 0; 299 cfg.aura_cfg = 0; 300 301 ret = octeontx_ssovf_mbox_send(&hdr, &cfg, 302 sizeof(struct octeontx_mbox_fpa_cfg), 303 &resp, sizeof(resp)); 304 if (ret < 0) { 305 ret = -EACCES; 306 goto err; 307 } 308 309 ret = 0; 310 err: 311 /* anycase free pool stack memory */ 312 rte_free(fpa->pool_stack_base); 313 fpa->pool_stack_base = NULL; 314 return ret; 315 } 316 317 static int 318 octeontx_fpapf_aura_attach(unsigned int gpool_index) 319 { 320 struct octeontx_mbox_hdr hdr; 321 struct dcfg_resp resp; 322 struct octeontx_mbox_fpa_cfg cfg; 323 int ret = 0; 324 325 if (gpool_index >= FPA_MAX_POOL) { 326 ret = -EINVAL; 327 goto err; 328 } 329 hdr.coproc = FPA_COPROC; 330 hdr.msg = FPA_ATTACHAURA; 331 hdr.vfid = gpool_index; 332 hdr.res_code = 0; 333 memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg)); 334 cfg.aid = gpool_index; /* gpool is guara */ 335 336 ret = octeontx_ssovf_mbox_send(&hdr, &cfg, 337 sizeof(struct octeontx_mbox_fpa_cfg), 338 &resp, sizeof(resp)); 339 if (ret < 0) { 340 fpavf_log_err("Could not attach fpa "); 341 fpavf_log_err("aura %d to pool %d. Err=%d. FuncErr=%d\n", 342 gpool_index, gpool_index, ret, hdr.res_code); 343 ret = -EACCES; 344 goto err; 345 } 346 err: 347 return ret; 348 } 349 350 static int 351 octeontx_fpapf_aura_detach(unsigned int gpool_index) 352 { 353 struct octeontx_mbox_fpa_cfg cfg = {0}; 354 struct octeontx_mbox_hdr hdr = {0}; 355 int ret = 0; 356 357 if (gpool_index >= FPA_MAX_POOL) { 358 ret = -EINVAL; 359 goto err; 360 } 361 362 cfg.aid = gpool_index; /* gpool is gaura */ 363 hdr.coproc = FPA_COPROC; 364 hdr.msg = FPA_DETACHAURA; 365 hdr.vfid = gpool_index; 366 ret = octeontx_ssovf_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0); 367 if (ret < 0) { 368 fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d\n", 369 gpool_index, ret, hdr.res_code); 370 ret = -EINVAL; 371 } 372 373 err: 374 return ret; 375 } 376 377 int 378 octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz, 379 void *memva, uint16_t gpool) 380 { 381 uint64_t va_end; 382 383 if (unlikely(!handle)) 384 return -ENODEV; 385 386 va_end = (uintptr_t)memva + memsz; 387 va_end &= ~RTE_CACHE_LINE_MASK; 388 389 /* VHPOOL setup */ 390 fpavf_write64((uintptr_t)memva, 391 (void *)((uintptr_t)handle + 392 FPA_VF_VHPOOL_START_ADDR(gpool))); 393 fpavf_write64(va_end, 394 (void *)((uintptr_t)handle + 395 FPA_VF_VHPOOL_END_ADDR(gpool))); 396 return 0; 397 } 398 399 static int 400 octeontx_fpapf_start_count(uint16_t gpool_index) 401 { 402 int ret = 0; 403 struct octeontx_mbox_hdr hdr = {0}; 404 405 if (gpool_index >= FPA_MAX_POOL) { 406 ret = -EINVAL; 407 goto err; 408 } 409 410 hdr.coproc = FPA_COPROC; 411 hdr.msg = FPA_START_COUNT; 412 hdr.vfid = gpool_index; 413 ret = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0); 414 if (ret < 0) { 415 fpavf_log_err("Could not start buffer counting for "); 416 fpavf_log_err("FPA pool %d. Err=%d. FuncErr=%d\n", 417 gpool_index, ret, hdr.res_code); 418 ret = -EINVAL; 419 goto err; 420 } 421 422 err: 423 return ret; 424 } 425 426 static __rte_always_inline int 427 octeontx_fpavf_free(unsigned int gpool) 428 { 429 int ret = 0; 430 431 if (gpool >= FPA_MAX_POOL) { 432 ret = -EINVAL; 433 goto err; 434 } 435 436 /* Pool is free */ 437 fpadev.pool[gpool].is_inuse = false; 438 439 err: 440 return ret; 441 } 442 443 static __rte_always_inline int 444 octeontx_gpool_free(uint16_t gpool) 445 { 446 if (fpadev.pool[gpool].sz128 != 0) { 447 fpadev.pool[gpool].sz128 = 0; 448 return 0; 449 } 450 return -EINVAL; 451 } 452 453 /* 454 * Return buffer size for a given pool 455 */ 456 int 457 octeontx_fpa_bufpool_block_size(uintptr_t handle) 458 { 459 struct fpavf_res *res = NULL; 460 uint8_t gpool; 461 462 if (unlikely(!octeontx_fpa_handle_valid(handle))) 463 return -EINVAL; 464 465 /* get the gpool */ 466 gpool = octeontx_fpa_bufpool_gpool(handle); 467 res = &fpadev.pool[gpool]; 468 return FPA_CACHE_LINE_2_OBJSZ(res->sz128); 469 } 470 471 int 472 octeontx_fpa_bufpool_free_count(uintptr_t handle) 473 { 474 uint64_t cnt, limit, avail; 475 uint8_t gpool; 476 uintptr_t pool_bar; 477 478 if (unlikely(!octeontx_fpa_handle_valid(handle))) 479 return -EINVAL; 480 481 /* get the gpool */ 482 gpool = octeontx_fpa_bufpool_gpool(handle); 483 484 /* Get pool bar address from handle */ 485 pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK; 486 487 cnt = fpavf_read64((void *)((uintptr_t)pool_bar + 488 FPA_VF_VHAURA_CNT(gpool))); 489 limit = fpavf_read64((void *)((uintptr_t)pool_bar + 490 FPA_VF_VHAURA_CNT_LIMIT(gpool))); 491 492 avail = fpavf_read64((void *)((uintptr_t)pool_bar + 493 FPA_VF_VHPOOL_AVAILABLE(gpool))); 494 495 return RTE_MIN(avail, (limit - cnt)); 496 } 497 498 uintptr_t 499 octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count, 500 unsigned int buf_offset, int node_id) 501 { 502 unsigned int gpool; 503 uintptr_t gpool_handle; 504 uintptr_t pool_bar; 505 int res; 506 507 RTE_SET_USED(node_id); 508 RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) > OCTEONTX_FPAVF_BUF_OFFSET); 509 510 object_size = RTE_CACHE_LINE_ROUNDUP(object_size); 511 if (object_size > FPA_MAX_OBJ_SIZE) { 512 errno = EINVAL; 513 goto error_end; 514 } 515 516 rte_spinlock_lock(&fpadev.lock); 517 res = octeontx_fpa_gpool_alloc(object_size); 518 519 /* Bail if failed */ 520 if (unlikely(res < 0)) { 521 errno = res; 522 goto error_unlock; 523 } 524 525 /* get fpavf */ 526 gpool = res; 527 528 /* get pool handle */ 529 gpool_handle = octeontx_fpa_gpool2handle(gpool); 530 if (!octeontx_fpa_handle_valid(gpool_handle)) { 531 errno = ENOSPC; 532 goto error_gpool_free; 533 } 534 535 /* Get pool bar address from handle */ 536 pool_bar = gpool_handle & ~(uint64_t)FPA_GPOOL_MASK; 537 538 res = octeontx_fpapf_pool_setup(gpool, object_size, buf_offset, 539 object_count); 540 if (res < 0) { 541 errno = res; 542 goto error_gpool_free; 543 } 544 545 /* populate AURA fields */ 546 res = octeontx_fpapf_aura_attach(gpool); 547 if (res < 0) { 548 errno = res; 549 goto error_pool_destroy; 550 } 551 552 /* Release lock */ 553 rte_spinlock_unlock(&fpadev.lock); 554 555 /* populate AURA registers */ 556 fpavf_write64(object_count, (void *)((uintptr_t)pool_bar + 557 FPA_VF_VHAURA_CNT(gpool))); 558 fpavf_write64(object_count, (void *)((uintptr_t)pool_bar + 559 FPA_VF_VHAURA_CNT_LIMIT(gpool))); 560 fpavf_write64(object_count + 1, (void *)((uintptr_t)pool_bar + 561 FPA_VF_VHAURA_CNT_THRESHOLD(gpool))); 562 563 octeontx_fpapf_start_count(gpool); 564 565 return gpool_handle; 566 567 error_pool_destroy: 568 octeontx_fpavf_free(gpool); 569 octeontx_fpapf_pool_destroy(gpool); 570 error_gpool_free: 571 octeontx_gpool_free(gpool); 572 error_unlock: 573 rte_spinlock_unlock(&fpadev.lock); 574 error_end: 575 return (uintptr_t)NULL; 576 } 577 578 /* 579 * Destroy a buffer pool. 580 */ 581 int 582 octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id) 583 { 584 void **node, **curr, *head = NULL; 585 uint64_t sz; 586 uint64_t cnt, avail; 587 uint8_t gpool; 588 uintptr_t pool_bar; 589 int ret; 590 591 RTE_SET_USED(node_id); 592 593 /* Wait for all outstanding writes to be committed */ 594 rte_smp_wmb(); 595 596 if (unlikely(!octeontx_fpa_handle_valid(handle))) 597 return -EINVAL; 598 599 /* get the pool */ 600 gpool = octeontx_fpa_bufpool_gpool(handle); 601 602 /* Get pool bar address from handle */ 603 pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK; 604 605 /* Check for no outstanding buffers */ 606 cnt = fpavf_read64((void *)((uintptr_t)pool_bar + 607 FPA_VF_VHAURA_CNT(gpool))); 608 if (cnt) { 609 fpavf_log_dbg("buffer exist in pool cnt %" PRId64 "\n", cnt); 610 return -EBUSY; 611 } 612 613 rte_spinlock_lock(&fpadev.lock); 614 615 avail = fpavf_read64((void *)((uintptr_t)pool_bar + 616 FPA_VF_VHPOOL_AVAILABLE(gpool))); 617 618 /* Prepare to empty the entire POOL */ 619 fpavf_write64(avail, (void *)((uintptr_t)pool_bar + 620 FPA_VF_VHAURA_CNT_LIMIT(gpool))); 621 fpavf_write64(avail + 1, (void *)((uintptr_t)pool_bar + 622 FPA_VF_VHAURA_CNT_THRESHOLD(gpool))); 623 624 /* Empty the pool */ 625 /* Invalidate the POOL */ 626 octeontx_gpool_free(gpool); 627 628 /* Process all buffers in the pool */ 629 while (avail--) { 630 631 /* Yank a buffer from the pool */ 632 node = (void *)(uintptr_t) 633 fpavf_read64((void *) 634 (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gpool))); 635 636 if (node == NULL) { 637 fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf\n", 638 gpool, avail); 639 break; 640 } 641 642 /* Imsert it into an ordered linked list */ 643 for (curr = &head; curr[0] != NULL; curr = curr[0]) { 644 if ((uintptr_t)node <= (uintptr_t)curr[0]) 645 break; 646 } 647 node[0] = curr[0]; 648 curr[0] = node; 649 } 650 651 /* Verify the linked list to be a perfect series */ 652 sz = octeontx_fpa_bufpool_block_size(handle) << 7; 653 for (curr = head; curr != NULL && curr[0] != NULL; 654 curr = curr[0]) { 655 if (curr == curr[0] || 656 ((uintptr_t)curr != ((uintptr_t)curr[0] - sz))) { 657 fpavf_log_err("POOL# %u buf sequence err (%p vs. %p)\n", 658 gpool, curr, curr[0]); 659 } 660 } 661 662 /* Disable pool operation */ 663 fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar + 664 FPA_VF_VHPOOL_START_ADDR(gpool))); 665 fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar + 666 FPA_VF_VHPOOL_END_ADDR(gpool))); 667 668 (void)octeontx_fpapf_pool_destroy(gpool); 669 670 /* Deactivate the AURA */ 671 fpavf_write64(0, (void *)((uintptr_t)pool_bar + 672 FPA_VF_VHAURA_CNT_LIMIT(gpool))); 673 fpavf_write64(0, (void *)((uintptr_t)pool_bar + 674 FPA_VF_VHAURA_CNT_THRESHOLD(gpool))); 675 676 ret = octeontx_fpapf_aura_detach(gpool); 677 if (ret) { 678 fpavf_log_err("Failed to dettach gaura %u. error code=%d\n", 679 gpool, ret); 680 } 681 682 /* Free VF */ 683 (void)octeontx_fpavf_free(gpool); 684 685 rte_spinlock_unlock(&fpadev.lock); 686 return 0; 687 } 688 689 static void 690 octeontx_fpavf_setup(void) 691 { 692 uint8_t i; 693 static bool init_once; 694 695 if (!init_once) { 696 rte_spinlock_init(&fpadev.lock); 697 fpadev.total_gpool_cnt = 0; 698 699 for (i = 0; i < FPA_VF_MAX; i++) { 700 701 fpadev.pool[i].domain_id = ~0; 702 fpadev.pool[i].stack_ln_ptr = 0; 703 fpadev.pool[i].sz128 = 0; 704 fpadev.pool[i].bar0 = NULL; 705 fpadev.pool[i].pool_stack_base = NULL; 706 fpadev.pool[i].is_inuse = false; 707 } 708 init_once = 1; 709 } 710 } 711 712 static int 713 octeontx_fpavf_identify(void *bar0) 714 { 715 uint64_t val; 716 uint16_t domain_id; 717 uint16_t vf_id; 718 uint64_t stack_ln_ptr; 719 720 val = fpavf_read64((void *)((uintptr_t)bar0 + 721 FPA_VF_VHAURA_CNT_THRESHOLD(0))); 722 723 domain_id = (val >> 8) & 0xffff; 724 vf_id = (val >> 24) & 0xffff; 725 726 stack_ln_ptr = fpavf_read64((void *)((uintptr_t)bar0 + 727 FPA_VF_VHPOOL_THRESHOLD(0))); 728 if (vf_id >= FPA_VF_MAX) { 729 fpavf_log_err("vf_id(%d) greater than max vf (32)\n", vf_id); 730 return -1; 731 } 732 733 if (fpadev.pool[vf_id].is_inuse) { 734 fpavf_log_err("vf_id %d is_inuse\n", vf_id); 735 return -1; 736 } 737 738 fpadev.pool[vf_id].domain_id = domain_id; 739 fpadev.pool[vf_id].vf_id = vf_id; 740 fpadev.pool[vf_id].bar0 = bar0; 741 fpadev.pool[vf_id].stack_ln_ptr = stack_ln_ptr; 742 743 /* SUCCESS */ 744 return vf_id; 745 } 746 747 /* FPAVF pcie device aka mempool probe */ 748 static int 749 fpavf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) 750 { 751 uint8_t *idreg; 752 int res; 753 struct fpavf_res *fpa = NULL; 754 755 RTE_SET_USED(pci_drv); 756 RTE_SET_USED(fpa); 757 758 /* For secondary processes, the primary has done all the work */ 759 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 760 return 0; 761 762 if (pci_dev->mem_resource[0].addr == NULL) { 763 fpavf_log_err("Empty bars %p ", pci_dev->mem_resource[0].addr); 764 return -ENODEV; 765 } 766 idreg = pci_dev->mem_resource[0].addr; 767 768 octeontx_fpavf_setup(); 769 770 res = octeontx_fpavf_identify(idreg); 771 if (res < 0) 772 return -1; 773 774 fpa = &fpadev.pool[res]; 775 fpadev.total_gpool_cnt++; 776 rte_wmb(); 777 778 fpavf_log_dbg("total_fpavfs %d bar0 %p domain %d vf %d stk_ln_ptr 0x%x", 779 fpadev.total_gpool_cnt, fpa->bar0, fpa->domain_id, 780 fpa->vf_id, (unsigned int)fpa->stack_ln_ptr); 781 782 return 0; 783 } 784 785 static const struct rte_pci_id pci_fpavf_map[] = { 786 { 787 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 788 PCI_DEVICE_ID_OCTEONTX_FPA_VF) 789 }, 790 { 791 .vendor_id = 0, 792 }, 793 }; 794 795 static struct rte_pci_driver pci_fpavf = { 796 .id_table = pci_fpavf_map, 797 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA, 798 .probe = fpavf_probe, 799 }; 800 801 RTE_PMD_REGISTER_PCI(octeontx_fpavf, pci_fpavf); 802