xref: /dpdk/drivers/mempool/octeontx/octeontx_fpavf.c (revision fd8c20aab4c2fe2c568455be4efab76db126791f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include <stdlib.h>
6 #include <string.h>
7 #include <stdbool.h>
8 #include <stdio.h>
9 #include <unistd.h>
10 #include <fcntl.h>
11 #include <errno.h>
12 #include <sys/mman.h>
13 
14 #include <rte_atomic.h>
15 #include <rte_eal.h>
16 #include <rte_bus_pci.h>
17 #include <rte_errno.h>
18 #include <rte_memory.h>
19 #include <rte_malloc.h>
20 #include <rte_spinlock.h>
21 #include <rte_mbuf.h>
22 
23 #include "octeontx_mbox.h"
24 #include "octeontx_fpavf.h"
25 
26 /* FPA Mbox Message */
27 #define IDENTIFY		0x0
28 
29 #define FPA_CONFIGSET		0x1
30 #define FPA_CONFIGGET		0x2
31 #define FPA_START_COUNT		0x3
32 #define FPA_STOP_COUNT		0x4
33 #define FPA_ATTACHAURA		0x5
34 #define FPA_DETACHAURA		0x6
35 #define FPA_SETAURALVL		0x7
36 #define FPA_GETAURALVL		0x8
37 
38 #define FPA_COPROC		0x1
39 
40 /* fpa mbox struct */
41 struct octeontx_mbox_fpa_cfg {
42 	int		aid;
43 	uint64_t	pool_cfg;
44 	uint64_t	pool_stack_base;
45 	uint64_t	pool_stack_end;
46 	uint64_t	aura_cfg;
47 };
48 
49 struct __attribute__((__packed__)) gen_req {
50 	uint32_t	value;
51 };
52 
53 struct __attribute__((__packed__)) idn_req {
54 	uint8_t	domain_id;
55 };
56 
57 struct __attribute__((__packed__)) gen_resp {
58 	uint16_t	domain_id;
59 	uint16_t	vfid;
60 };
61 
62 struct __attribute__((__packed__)) dcfg_resp {
63 	uint8_t	sso_count;
64 	uint8_t	ssow_count;
65 	uint8_t	fpa_count;
66 	uint8_t	pko_count;
67 	uint8_t	tim_count;
68 	uint8_t	net_port_count;
69 	uint8_t	virt_port_count;
70 };
71 
72 #define FPA_MAX_POOL	32
73 #define FPA_PF_PAGE_SZ	4096
74 
75 #define FPA_LN_SIZE	128
76 #define FPA_ROUND_UP(x, size) \
77 	((((unsigned long)(x)) + size-1) & (~(size-1)))
78 #define FPA_OBJSZ_2_CACHE_LINE(sz)	(((sz) + RTE_CACHE_LINE_MASK) >> 7)
79 #define FPA_CACHE_LINE_2_OBJSZ(sz)	((sz) << 7)
80 
81 #define POOL_ENA			(0x1 << 0)
82 #define POOL_DIS			(0x0 << 0)
83 #define POOL_SET_NAT_ALIGN		(0x1 << 1)
84 #define POOL_DIS_NAT_ALIGN		(0x0 << 1)
85 #define POOL_STYPE(x)			(((x) & 0x1) << 2)
86 #define POOL_LTYPE(x)			(((x) & 0x3) << 3)
87 #define POOL_BUF_OFFSET(x)		(((x) & 0x7fffULL) << 16)
88 #define POOL_BUF_SIZE(x)		(((x) & 0x7ffULL) << 32)
89 
90 struct fpavf_res {
91 	void		*pool_stack_base;
92 	void		*bar0;
93 	uint64_t	stack_ln_ptr;
94 	uint16_t	domain_id;
95 	uint16_t	vf_id;	/* gpool_id */
96 	uint16_t	sz128;	/* Block size in cache lines */
97 	bool		is_inuse;
98 };
99 
100 struct octeontx_fpadev {
101 	rte_spinlock_t lock;
102 	uint8_t	total_gpool_cnt;
103 	struct fpavf_res pool[FPA_VF_MAX];
104 };
105 
106 static struct octeontx_fpadev fpadev;
107 
108 /* lock is taken by caller */
109 static int
110 octeontx_fpa_gpool_alloc(unsigned int object_size)
111 {
112 	struct fpavf_res *res = NULL;
113 	uint16_t gpool;
114 	unsigned int sz128;
115 
116 	sz128 = FPA_OBJSZ_2_CACHE_LINE(object_size);
117 
118 	for (gpool = 0; gpool < FPA_VF_MAX; gpool++) {
119 
120 		/* Skip VF that is not mapped Or _inuse */
121 		if ((fpadev.pool[gpool].bar0 == NULL) ||
122 		    (fpadev.pool[gpool].is_inuse == true))
123 			continue;
124 
125 		res = &fpadev.pool[gpool];
126 
127 		RTE_ASSERT(res->domain_id != (uint16_t)~0);
128 		RTE_ASSERT(res->vf_id != (uint16_t)~0);
129 		RTE_ASSERT(res->stack_ln_ptr != 0);
130 
131 		if (res->sz128 == 0) {
132 			res->sz128 = sz128;
133 
134 			fpavf_log_dbg("gpool %d blk_sz %d\n", gpool, sz128);
135 			return gpool;
136 		}
137 	}
138 
139 	return -ENOSPC;
140 }
141 
142 /* lock is taken by caller */
143 static __rte_always_inline uintptr_t
144 octeontx_fpa_gpool2handle(uint16_t gpool)
145 {
146 	struct fpavf_res *res = NULL;
147 
148 	RTE_ASSERT(gpool < FPA_VF_MAX);
149 
150 	res = &fpadev.pool[gpool];
151 	return (uintptr_t)res->bar0 | gpool;
152 }
153 
154 static __rte_always_inline bool
155 octeontx_fpa_handle_valid(uintptr_t handle)
156 {
157 	struct fpavf_res *res = NULL;
158 	uint8_t gpool;
159 	int i;
160 	bool ret = false;
161 
162 	if (unlikely(!handle))
163 		return ret;
164 
165 	/* get the gpool */
166 	gpool = octeontx_fpa_bufpool_gpool(handle);
167 
168 	/* get the bar address */
169 	handle &= ~(uint64_t)FPA_GPOOL_MASK;
170 	for (i = 0; i < FPA_VF_MAX; i++) {
171 		if ((uintptr_t)fpadev.pool[i].bar0 != handle)
172 			continue;
173 
174 		/* validate gpool */
175 		if (gpool != i)
176 			return false;
177 
178 		res = &fpadev.pool[i];
179 
180 		if (res->sz128 == 0 || res->domain_id == (uint16_t)~0 ||
181 		    res->stack_ln_ptr == 0)
182 			ret = false;
183 		else
184 			ret = true;
185 		break;
186 	}
187 
188 	return ret;
189 }
190 
191 static int
192 octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size,
193 			  signed short buf_offset, unsigned int max_buf_count)
194 {
195 	void *memptr = NULL;
196 	rte_iova_t phys_addr;
197 	unsigned int memsz;
198 	struct fpavf_res *fpa = NULL;
199 	uint64_t reg;
200 	struct octeontx_mbox_hdr hdr;
201 	struct dcfg_resp resp;
202 	struct octeontx_mbox_fpa_cfg cfg;
203 	int ret = -1;
204 
205 	fpa = &fpadev.pool[gpool];
206 	memsz = FPA_ROUND_UP(max_buf_count / fpa->stack_ln_ptr, FPA_LN_SIZE) *
207 			FPA_LN_SIZE;
208 
209 	/* Round-up to page size */
210 	memsz = (memsz + FPA_PF_PAGE_SZ - 1) & ~(uintptr_t)(FPA_PF_PAGE_SZ-1);
211 	memptr = rte_malloc(NULL, memsz, RTE_CACHE_LINE_SIZE);
212 	if (memptr == NULL) {
213 		ret = -ENOMEM;
214 		goto err;
215 	}
216 
217 	/* Configure stack */
218 	fpa->pool_stack_base = memptr;
219 	phys_addr = rte_malloc_virt2iova(memptr);
220 
221 	buf_size /= FPA_LN_SIZE;
222 
223 	/* POOL setup */
224 	hdr.coproc = FPA_COPROC;
225 	hdr.msg = FPA_CONFIGSET;
226 	hdr.vfid = fpa->vf_id;
227 	hdr.res_code = 0;
228 
229 	buf_offset /= FPA_LN_SIZE;
230 	reg = POOL_BUF_SIZE(buf_size) | POOL_BUF_OFFSET(buf_offset) |
231 		POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN |
232 		POOL_ENA;
233 
234 	cfg.aid = 0;
235 	cfg.pool_cfg = reg;
236 	cfg.pool_stack_base = phys_addr;
237 	cfg.pool_stack_end = phys_addr + memsz;
238 	cfg.aura_cfg = (1 << 9);
239 
240 	ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
241 					sizeof(struct octeontx_mbox_fpa_cfg),
242 					&resp, sizeof(resp));
243 	if (ret < 0) {
244 		ret = -EACCES;
245 		goto err;
246 	}
247 
248 	fpavf_log_dbg(" vfid %d gpool %d aid %d pool_cfg 0x%x pool_stack_base %" PRIx64 " pool_stack_end %" PRIx64" aura_cfg %" PRIx64 "\n",
249 		      fpa->vf_id, gpool, cfg.aid, (unsigned int)cfg.pool_cfg,
250 		      cfg.pool_stack_base, cfg.pool_stack_end, cfg.aura_cfg);
251 
252 	/* Now pool is in_use */
253 	fpa->is_inuse = true;
254 
255 err:
256 	if (ret < 0)
257 		rte_free(memptr);
258 
259 	return ret;
260 }
261 
262 static int
263 octeontx_fpapf_pool_destroy(unsigned int gpool_index)
264 {
265 	struct octeontx_mbox_hdr hdr;
266 	struct dcfg_resp resp;
267 	struct octeontx_mbox_fpa_cfg cfg;
268 	struct fpavf_res *fpa = NULL;
269 	int ret = -1;
270 
271 	fpa = &fpadev.pool[gpool_index];
272 
273 	hdr.coproc = FPA_COPROC;
274 	hdr.msg = FPA_CONFIGSET;
275 	hdr.vfid = fpa->vf_id;
276 	hdr.res_code = 0;
277 
278 	/* reset and free the pool */
279 	cfg.aid = 0;
280 	cfg.pool_cfg = 0;
281 	cfg.pool_stack_base = 0;
282 	cfg.pool_stack_end = 0;
283 	cfg.aura_cfg = 0;
284 
285 	ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
286 					sizeof(struct octeontx_mbox_fpa_cfg),
287 					&resp, sizeof(resp));
288 	if (ret < 0) {
289 		ret = -EACCES;
290 		goto err;
291 	}
292 
293 	ret = 0;
294 err:
295 	/* anycase free pool stack memory */
296 	rte_free(fpa->pool_stack_base);
297 	fpa->pool_stack_base = NULL;
298 	return ret;
299 }
300 
301 static int
302 octeontx_fpapf_aura_attach(unsigned int gpool_index)
303 {
304 	struct octeontx_mbox_hdr hdr;
305 	struct dcfg_resp resp;
306 	struct octeontx_mbox_fpa_cfg cfg;
307 	int ret = 0;
308 
309 	if (gpool_index >= FPA_MAX_POOL) {
310 		ret = -EINVAL;
311 		goto err;
312 	}
313 	hdr.coproc = FPA_COPROC;
314 	hdr.msg = FPA_ATTACHAURA;
315 	hdr.vfid = gpool_index;
316 	hdr.res_code = 0;
317 	memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg));
318 	cfg.aid = gpool_index; /* gpool is guara */
319 
320 	ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
321 					sizeof(struct octeontx_mbox_fpa_cfg),
322 					&resp, sizeof(resp));
323 	if (ret < 0) {
324 		fpavf_log_err("Could not attach fpa ");
325 		fpavf_log_err("aura %d to pool %d. Err=%d. FuncErr=%d\n",
326 			      gpool_index, gpool_index, ret, hdr.res_code);
327 		ret = -EACCES;
328 		goto err;
329 	}
330 err:
331 	return ret;
332 }
333 
334 static int
335 octeontx_fpapf_aura_detach(unsigned int gpool_index)
336 {
337 	struct octeontx_mbox_fpa_cfg cfg = {0};
338 	struct octeontx_mbox_hdr hdr = {0};
339 	int ret = 0;
340 
341 	if (gpool_index >= FPA_MAX_POOL) {
342 		ret = -EINVAL;
343 		goto err;
344 	}
345 
346 	cfg.aid = gpool_index; /* gpool is gaura */
347 	hdr.coproc = FPA_COPROC;
348 	hdr.msg = FPA_DETACHAURA;
349 	hdr.vfid = gpool_index;
350 	ret = octeontx_ssovf_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0);
351 	if (ret < 0) {
352 		fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d\n",
353 			      gpool_index, ret, hdr.res_code);
354 		ret = -EINVAL;
355 	}
356 
357 err:
358 	return ret;
359 }
360 
361 int
362 octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz,
363 			  void *memva, uint16_t gpool)
364 {
365 	uint64_t va_end;
366 
367 	if (unlikely(!handle))
368 		return -ENODEV;
369 
370 	va_end = (uintptr_t)memva + memsz;
371 	va_end &= ~RTE_CACHE_LINE_MASK;
372 
373 	/* VHPOOL setup */
374 	fpavf_write64((uintptr_t)memva,
375 			 (void *)((uintptr_t)handle +
376 			 FPA_VF_VHPOOL_START_ADDR(gpool)));
377 	fpavf_write64(va_end,
378 			 (void *)((uintptr_t)handle +
379 			 FPA_VF_VHPOOL_END_ADDR(gpool)));
380 	return 0;
381 }
382 
383 static int
384 octeontx_fpapf_start_count(uint16_t gpool_index)
385 {
386 	int ret = 0;
387 	struct octeontx_mbox_hdr hdr = {0};
388 
389 	if (gpool_index >= FPA_MAX_POOL) {
390 		ret = -EINVAL;
391 		goto err;
392 	}
393 
394 	hdr.coproc = FPA_COPROC;
395 	hdr.msg = FPA_START_COUNT;
396 	hdr.vfid = gpool_index;
397 	ret = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
398 	if (ret < 0) {
399 		fpavf_log_err("Could not start buffer counting for ");
400 		fpavf_log_err("FPA pool %d. Err=%d. FuncErr=%d\n",
401 			      gpool_index, ret, hdr.res_code);
402 		ret = -EINVAL;
403 		goto err;
404 	}
405 
406 err:
407 	return ret;
408 }
409 
410 static __rte_always_inline int
411 octeontx_fpavf_free(unsigned int gpool)
412 {
413 	int ret = 0;
414 
415 	if (gpool >= FPA_MAX_POOL) {
416 		ret = -EINVAL;
417 		goto err;
418 	}
419 
420 	/* Pool is free */
421 	fpadev.pool[gpool].is_inuse = false;
422 
423 err:
424 	return ret;
425 }
426 
427 static __rte_always_inline int
428 octeontx_gpool_free(uint16_t gpool)
429 {
430 	if (fpadev.pool[gpool].sz128 != 0) {
431 		fpadev.pool[gpool].sz128 = 0;
432 		return 0;
433 	}
434 	return -EINVAL;
435 }
436 
437 /*
438  * Return buffer size for a given pool
439  */
440 int
441 octeontx_fpa_bufpool_block_size(uintptr_t handle)
442 {
443 	struct fpavf_res *res = NULL;
444 	uint8_t gpool;
445 
446 	if (unlikely(!octeontx_fpa_handle_valid(handle)))
447 		return -EINVAL;
448 
449 	/* get the gpool */
450 	gpool = octeontx_fpa_bufpool_gpool(handle);
451 	res = &fpadev.pool[gpool];
452 	return FPA_CACHE_LINE_2_OBJSZ(res->sz128);
453 }
454 
455 int
456 octeontx_fpa_bufpool_free_count(uintptr_t handle)
457 {
458 	uint64_t cnt, limit, avail;
459 	uint8_t gpool;
460 	uintptr_t pool_bar;
461 
462 	if (unlikely(!octeontx_fpa_handle_valid(handle)))
463 		return -EINVAL;
464 
465 	/* get the gpool */
466 	gpool = octeontx_fpa_bufpool_gpool(handle);
467 
468 	/* Get pool bar address from handle */
469 	pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
470 
471 	cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
472 				FPA_VF_VHAURA_CNT(gpool)));
473 	limit = fpavf_read64((void *)((uintptr_t)pool_bar +
474 				FPA_VF_VHAURA_CNT_LIMIT(gpool)));
475 
476 	avail = fpavf_read64((void *)((uintptr_t)pool_bar +
477 				FPA_VF_VHPOOL_AVAILABLE(gpool)));
478 
479 	return RTE_MIN(avail, (limit - cnt));
480 }
481 
482 uintptr_t
483 octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
484 				unsigned int buf_offset, int node_id)
485 {
486 	unsigned int gpool;
487 	uintptr_t gpool_handle;
488 	uintptr_t pool_bar;
489 	int res;
490 
491 	RTE_SET_USED(node_id);
492 	RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) > OCTEONTX_FPAVF_BUF_OFFSET);
493 
494 	object_size = RTE_CACHE_LINE_ROUNDUP(object_size);
495 	if (object_size > FPA_MAX_OBJ_SIZE) {
496 		errno = EINVAL;
497 		goto error_end;
498 	}
499 
500 	rte_spinlock_lock(&fpadev.lock);
501 	res = octeontx_fpa_gpool_alloc(object_size);
502 
503 	/* Bail if failed */
504 	if (unlikely(res < 0)) {
505 		errno = res;
506 		goto error_unlock;
507 	}
508 
509 	/* get fpavf */
510 	gpool = res;
511 
512 	/* get pool handle */
513 	gpool_handle = octeontx_fpa_gpool2handle(gpool);
514 	if (!octeontx_fpa_handle_valid(gpool_handle)) {
515 		errno = ENOSPC;
516 		goto error_gpool_free;
517 	}
518 
519 	/* Get pool bar address from handle */
520 	pool_bar = gpool_handle & ~(uint64_t)FPA_GPOOL_MASK;
521 
522 	res = octeontx_fpapf_pool_setup(gpool, object_size, buf_offset,
523 					object_count);
524 	if (res < 0) {
525 		errno = res;
526 		goto error_gpool_free;
527 	}
528 
529 	/* populate AURA fields */
530 	res = octeontx_fpapf_aura_attach(gpool);
531 	if (res < 0) {
532 		errno = res;
533 		goto error_pool_destroy;
534 	}
535 
536 	/* Release lock */
537 	rte_spinlock_unlock(&fpadev.lock);
538 
539 	/* populate AURA registers */
540 	fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
541 			 FPA_VF_VHAURA_CNT(gpool)));
542 	fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
543 			 FPA_VF_VHAURA_CNT_LIMIT(gpool)));
544 	fpavf_write64(object_count + 1, (void *)((uintptr_t)pool_bar +
545 			 FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
546 
547 	octeontx_fpapf_start_count(gpool);
548 
549 	return gpool_handle;
550 
551 error_pool_destroy:
552 	octeontx_fpavf_free(gpool);
553 	octeontx_fpapf_pool_destroy(gpool);
554 error_gpool_free:
555 	octeontx_gpool_free(gpool);
556 error_unlock:
557 	rte_spinlock_unlock(&fpadev.lock);
558 error_end:
559 	return (uintptr_t)NULL;
560 }
561 
562 /*
563  * Destroy a buffer pool.
564  */
565 int
566 octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
567 {
568 	void **node, **curr, *head = NULL;
569 	uint64_t sz;
570 	uint64_t cnt, avail;
571 	uint8_t gpool;
572 	uintptr_t pool_bar;
573 	int ret;
574 
575 	RTE_SET_USED(node_id);
576 
577 	/* Wait for all outstanding writes to be committed */
578 	rte_smp_wmb();
579 
580 	if (unlikely(!octeontx_fpa_handle_valid(handle)))
581 		return -EINVAL;
582 
583 	/* get the pool */
584 	gpool = octeontx_fpa_bufpool_gpool(handle);
585 
586 	/* Get pool bar address from handle */
587 	pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
588 
589 	 /* Check for no outstanding buffers */
590 	cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
591 					FPA_VF_VHAURA_CNT(gpool)));
592 	if (cnt) {
593 		fpavf_log_dbg("buffer exist in pool cnt %" PRId64 "\n", cnt);
594 		return -EBUSY;
595 	}
596 
597 	rte_spinlock_lock(&fpadev.lock);
598 
599 	avail = fpavf_read64((void *)((uintptr_t)pool_bar +
600 				FPA_VF_VHPOOL_AVAILABLE(gpool)));
601 
602 	/* Prepare to empty the entire POOL */
603 	fpavf_write64(avail, (void *)((uintptr_t)pool_bar +
604 			 FPA_VF_VHAURA_CNT_LIMIT(gpool)));
605 	fpavf_write64(avail + 1, (void *)((uintptr_t)pool_bar +
606 			 FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
607 
608 	/* Empty the pool */
609 	/* Invalidate the POOL */
610 	octeontx_gpool_free(gpool);
611 
612 	/* Process all buffers in the pool */
613 	while (avail--) {
614 
615 		/* Yank a buffer from the pool */
616 		node = (void *)(uintptr_t)
617 			fpavf_read64((void *)
618 				    (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gpool)));
619 
620 		if (node == NULL) {
621 			fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf\n",
622 				      gpool, avail);
623 			break;
624 		}
625 
626 		/* Imsert it into an ordered linked list */
627 		for (curr = &head; curr[0] != NULL; curr = curr[0]) {
628 			if ((uintptr_t)node <= (uintptr_t)curr[0])
629 				break;
630 		}
631 		node[0] = curr[0];
632 		curr[0] = node;
633 	}
634 
635 	/* Verify the linked list to be a perfect series */
636 	sz = octeontx_fpa_bufpool_block_size(handle) << 7;
637 	for (curr = head; curr != NULL && curr[0] != NULL;
638 		curr = curr[0]) {
639 		if (curr == curr[0] ||
640 			((uintptr_t)curr != ((uintptr_t)curr[0] - sz))) {
641 			fpavf_log_err("POOL# %u buf sequence err (%p vs. %p)\n",
642 				      gpool, curr, curr[0]);
643 		}
644 	}
645 
646 	/* Disable pool operation */
647 	fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar +
648 			 FPA_VF_VHPOOL_START_ADDR(gpool)));
649 	fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar +
650 			FPA_VF_VHPOOL_END_ADDR(gpool)));
651 
652 	(void)octeontx_fpapf_pool_destroy(gpool);
653 
654 	/* Deactivate the AURA */
655 	fpavf_write64(0, (void *)((uintptr_t)pool_bar +
656 			FPA_VF_VHAURA_CNT_LIMIT(gpool)));
657 	fpavf_write64(0, (void *)((uintptr_t)pool_bar +
658 			FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
659 
660 	ret = octeontx_fpapf_aura_detach(gpool);
661 	if (ret) {
662 		fpavf_log_err("Failed to dettach gaura %u. error code=%d\n",
663 			      gpool, ret);
664 	}
665 
666 	/* Free VF */
667 	(void)octeontx_fpavf_free(gpool);
668 
669 	rte_spinlock_unlock(&fpadev.lock);
670 	return 0;
671 }
672 
673 static void
674 octeontx_fpavf_setup(void)
675 {
676 	uint8_t i;
677 	static bool init_once;
678 
679 	if (!init_once) {
680 		rte_spinlock_init(&fpadev.lock);
681 		fpadev.total_gpool_cnt = 0;
682 
683 		for (i = 0; i < FPA_VF_MAX; i++) {
684 
685 			fpadev.pool[i].domain_id = ~0;
686 			fpadev.pool[i].stack_ln_ptr = 0;
687 			fpadev.pool[i].sz128 = 0;
688 			fpadev.pool[i].bar0 = NULL;
689 			fpadev.pool[i].pool_stack_base = NULL;
690 			fpadev.pool[i].is_inuse = false;
691 		}
692 		init_once = 1;
693 	}
694 }
695 
696 static int
697 octeontx_fpavf_identify(void *bar0)
698 {
699 	uint64_t val;
700 	uint16_t domain_id;
701 	uint16_t vf_id;
702 	uint64_t stack_ln_ptr;
703 
704 	val = fpavf_read64((void *)((uintptr_t)bar0 +
705 				FPA_VF_VHAURA_CNT_THRESHOLD(0)));
706 
707 	domain_id = (val >> 8) & 0xffff;
708 	vf_id = (val >> 24) & 0xffff;
709 
710 	stack_ln_ptr = fpavf_read64((void *)((uintptr_t)bar0 +
711 					FPA_VF_VHPOOL_THRESHOLD(0)));
712 	if (vf_id >= FPA_VF_MAX) {
713 		fpavf_log_err("vf_id(%d) greater than max vf (32)\n", vf_id);
714 		return -1;
715 	}
716 
717 	if (fpadev.pool[vf_id].is_inuse) {
718 		fpavf_log_err("vf_id %d is_inuse\n", vf_id);
719 		return -1;
720 	}
721 
722 	fpadev.pool[vf_id].domain_id = domain_id;
723 	fpadev.pool[vf_id].vf_id = vf_id;
724 	fpadev.pool[vf_id].bar0 = bar0;
725 	fpadev.pool[vf_id].stack_ln_ptr = stack_ln_ptr;
726 
727 	/* SUCCESS */
728 	return vf_id;
729 }
730 
731 /* FPAVF pcie device aka mempool probe */
732 static int
733 fpavf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
734 {
735 	uint8_t *idreg;
736 	int res;
737 	struct fpavf_res *fpa = NULL;
738 
739 	RTE_SET_USED(pci_drv);
740 	RTE_SET_USED(fpa);
741 
742 	/* For secondary processes, the primary has done all the work */
743 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
744 		return 0;
745 
746 	if (pci_dev->mem_resource[0].addr == NULL) {
747 		fpavf_log_err("Empty bars %p ", pci_dev->mem_resource[0].addr);
748 		return -ENODEV;
749 	}
750 	idreg = pci_dev->mem_resource[0].addr;
751 
752 	octeontx_fpavf_setup();
753 
754 	res = octeontx_fpavf_identify(idreg);
755 	if (res < 0)
756 		return -1;
757 
758 	fpa = &fpadev.pool[res];
759 	fpadev.total_gpool_cnt++;
760 	rte_wmb();
761 
762 	fpavf_log_dbg("total_fpavfs %d bar0 %p domain %d vf %d stk_ln_ptr 0x%x",
763 		       fpadev.total_gpool_cnt, fpa->bar0, fpa->domain_id,
764 		       fpa->vf_id, (unsigned int)fpa->stack_ln_ptr);
765 
766 	return 0;
767 }
768 
769 static const struct rte_pci_id pci_fpavf_map[] = {
770 	{
771 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
772 				PCI_DEVICE_ID_OCTEONTX_FPA_VF)
773 	},
774 	{
775 		.vendor_id = 0,
776 	},
777 };
778 
779 static struct rte_pci_driver pci_fpavf = {
780 	.id_table = pci_fpavf_map,
781 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
782 	.probe = fpavf_probe,
783 };
784 
785 RTE_PMD_REGISTER_PCI(octeontx_fpavf, pci_fpavf);
786