xref: /dpdk/drivers/mempool/octeontx/octeontx_fpavf.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include <stdlib.h>
6 #include <string.h>
7 #include <stdbool.h>
8 #include <stdio.h>
9 #include <unistd.h>
10 #include <fcntl.h>
11 #include <errno.h>
12 #include <sys/mman.h>
13 
14 #include <rte_atomic.h>
15 #include <rte_eal.h>
16 #include <rte_bus_pci.h>
17 #include <rte_errno.h>
18 #include <rte_memory.h>
19 #include <rte_malloc.h>
20 #include <rte_spinlock.h>
21 #include <rte_mbuf.h>
22 
23 #include "octeontx_mbox.h"
24 #include "octeontx_fpavf.h"
25 
26 /* FPA Mbox Message */
27 #define IDENTIFY		0x0
28 
29 #define FPA_CONFIGSET		0x1
30 #define FPA_CONFIGGET		0x2
31 #define FPA_START_COUNT		0x3
32 #define FPA_STOP_COUNT		0x4
33 #define FPA_ATTACHAURA		0x5
34 #define FPA_DETACHAURA		0x6
35 #define FPA_SETAURALVL		0x7
36 #define FPA_GETAURALVL		0x8
37 
38 #define FPA_COPROC		0x1
39 
40 /* fpa mbox struct */
41 struct octeontx_mbox_fpa_cfg {
42 	int		aid;
43 	uint64_t	pool_cfg;
44 	uint64_t	pool_stack_base;
45 	uint64_t	pool_stack_end;
46 	uint64_t	aura_cfg;
47 };
48 
49 struct __rte_packed gen_req {
50 	uint32_t	value;
51 };
52 
53 struct __rte_packed idn_req {
54 	uint8_t	domain_id;
55 };
56 
57 struct __rte_packed gen_resp {
58 	uint16_t	domain_id;
59 	uint16_t	vfid;
60 };
61 
62 struct __rte_packed dcfg_resp {
63 	uint8_t	sso_count;
64 	uint8_t	ssow_count;
65 	uint8_t	fpa_count;
66 	uint8_t	pko_count;
67 	uint8_t	tim_count;
68 	uint8_t	net_port_count;
69 	uint8_t	virt_port_count;
70 };
71 
72 #define FPA_MAX_POOL	32
73 #define FPA_PF_PAGE_SZ	4096
74 
75 #define FPA_LN_SIZE	128
76 #define FPA_ROUND_UP(x, size) \
77 	((((unsigned long)(x)) + size-1) & (~(size-1)))
78 #define FPA_OBJSZ_2_CACHE_LINE(sz)	(((sz) + RTE_CACHE_LINE_MASK) >> 7)
79 #define FPA_CACHE_LINE_2_OBJSZ(sz)	((sz) << 7)
80 
81 #define POOL_ENA			(0x1 << 0)
82 #define POOL_DIS			(0x0 << 0)
83 #define POOL_SET_NAT_ALIGN		(0x1 << 1)
84 #define POOL_DIS_NAT_ALIGN		(0x0 << 1)
85 #define POOL_STYPE(x)			(((x) & 0x1) << 2)
86 #define POOL_LTYPE(x)			(((x) & 0x3) << 3)
87 #define POOL_BUF_OFFSET(x)		(((x) & 0x7fffULL) << 16)
88 #define POOL_BUF_SIZE(x)		(((x) & 0x7ffULL) << 32)
89 
90 struct fpavf_res {
91 	void		*pool_stack_base;
92 	void		*bar0;
93 	uint64_t	stack_ln_ptr;
94 	uint16_t	domain_id;
95 	uint16_t	vf_id;	/* gpool_id */
96 	uint16_t	sz128;	/* Block size in cache lines */
97 	bool		is_inuse;
98 };
99 
100 struct octeontx_fpadev {
101 	rte_spinlock_t lock;
102 	uint8_t	total_gpool_cnt;
103 	struct fpavf_res pool[FPA_VF_MAX];
104 };
105 
106 static struct octeontx_fpadev fpadev;
107 
108 RTE_LOG_REGISTER(octeontx_logtype_fpavf, pmd.mempool.octeontx, NOTICE);
109 
110 /* lock is taken by caller */
111 static int
112 octeontx_fpa_gpool_alloc(unsigned int object_size)
113 {
114 	uint16_t global_domain = octeontx_get_global_domain();
115 	struct fpavf_res *res = NULL;
116 	unsigned int sz128;
117 	int i;
118 
119 	sz128 = FPA_OBJSZ_2_CACHE_LINE(object_size);
120 
121 	for (i = 0; i < FPA_VF_MAX; i++) {
122 
123 		/* Skip VF that is not mapped Or _inuse */
124 		if ((fpadev.pool[i].bar0 == NULL) ||
125 		    (fpadev.pool[i].is_inuse == true) ||
126 		    (fpadev.pool[i].domain_id != global_domain))
127 			continue;
128 
129 		res = &fpadev.pool[i];
130 
131 		RTE_ASSERT(res->domain_id != (uint16_t)~0);
132 		RTE_ASSERT(res->vf_id != (uint16_t)~0);
133 		RTE_ASSERT(res->stack_ln_ptr != 0);
134 
135 		if (res->sz128 == 0) {
136 			res->sz128 = sz128;
137 			fpavf_log_dbg("gpool %d blk_sz %d\n", res->vf_id,
138 				      sz128);
139 
140 			return res->vf_id;
141 		}
142 	}
143 
144 	return -ENOSPC;
145 }
146 
147 static __rte_always_inline struct fpavf_res *
148 octeontx_get_fpavf(uint16_t gpool)
149 {
150 	uint16_t global_domain = octeontx_get_global_domain();
151 	int i;
152 
153 	for (i = 0; i < FPA_VF_MAX; i++) {
154 		if (fpadev.pool[i].domain_id != global_domain)
155 			continue;
156 		if (fpadev.pool[i].vf_id != gpool)
157 			continue;
158 
159 		return &fpadev.pool[i];
160 	}
161 
162 	return NULL;
163 }
164 
165 /* lock is taken by caller */
166 static __rte_always_inline uintptr_t
167 octeontx_fpa_gpool2handle(uint16_t gpool)
168 {
169 	struct fpavf_res *res = NULL;
170 
171 	RTE_ASSERT(gpool < FPA_VF_MAX);
172 	res = octeontx_get_fpavf(gpool);
173 	if (res == NULL)
174 		return 0;
175 
176 	return (uintptr_t)res->bar0 | gpool;
177 }
178 
179 static __rte_always_inline bool
180 octeontx_fpa_handle_valid(uintptr_t handle)
181 {
182 	struct fpavf_res *res = NULL;
183 	uint8_t gpool;
184 	int i;
185 	bool ret = false;
186 
187 	if (unlikely(!handle))
188 		return ret;
189 
190 	/* get the gpool */
191 	gpool = octeontx_fpa_bufpool_gpool(handle);
192 
193 	/* get the bar address */
194 	handle &= ~(uint64_t)FPA_GPOOL_MASK;
195 	for (i = 0; i < FPA_VF_MAX; i++) {
196 		if ((uintptr_t)fpadev.pool[i].bar0 != handle)
197 			continue;
198 
199 		/* validate gpool */
200 		if (gpool != fpadev.pool[i].vf_id)
201 			return false;
202 
203 		res = &fpadev.pool[i];
204 
205 		if (res->sz128 == 0 || res->domain_id == (uint16_t)~0 ||
206 		    res->stack_ln_ptr == 0)
207 			ret = false;
208 		else
209 			ret = true;
210 		break;
211 	}
212 
213 	return ret;
214 }
215 
216 static int
217 octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size,
218 			  signed short buf_offset, unsigned int max_buf_count)
219 {
220 	void *memptr = NULL;
221 	rte_iova_t phys_addr;
222 	unsigned int memsz;
223 	struct fpavf_res *fpa = NULL;
224 	uint64_t reg;
225 	struct octeontx_mbox_hdr hdr;
226 	struct dcfg_resp resp;
227 	struct octeontx_mbox_fpa_cfg cfg;
228 	int ret = -1;
229 
230 	fpa = octeontx_get_fpavf(gpool);
231 	if (fpa == NULL)
232 		return -EINVAL;
233 
234 	memsz = FPA_ROUND_UP(max_buf_count / fpa->stack_ln_ptr, FPA_LN_SIZE) *
235 			FPA_LN_SIZE;
236 
237 	/* Round-up to page size */
238 	memsz = (memsz + FPA_PF_PAGE_SZ - 1) & ~(uintptr_t)(FPA_PF_PAGE_SZ-1);
239 	memptr = rte_malloc(NULL, memsz, RTE_CACHE_LINE_SIZE);
240 	if (memptr == NULL) {
241 		ret = -ENOMEM;
242 		goto err;
243 	}
244 
245 	/* Configure stack */
246 	fpa->pool_stack_base = memptr;
247 	phys_addr = rte_malloc_virt2iova(memptr);
248 
249 	buf_size /= FPA_LN_SIZE;
250 
251 	/* POOL setup */
252 	hdr.coproc = FPA_COPROC;
253 	hdr.msg = FPA_CONFIGSET;
254 	hdr.vfid = fpa->vf_id;
255 	hdr.res_code = 0;
256 
257 	buf_offset /= FPA_LN_SIZE;
258 	reg = POOL_BUF_SIZE(buf_size) | POOL_BUF_OFFSET(buf_offset) |
259 		POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN |
260 		POOL_ENA;
261 
262 	cfg.aid = 0;
263 	cfg.pool_cfg = reg;
264 	cfg.pool_stack_base = phys_addr;
265 	cfg.pool_stack_end = phys_addr + memsz;
266 	cfg.aura_cfg = (1 << 9);
267 
268 	ret = octeontx_mbox_send(&hdr, &cfg,
269 					sizeof(struct octeontx_mbox_fpa_cfg),
270 					&resp, sizeof(resp));
271 	if (ret < 0) {
272 		ret = -EACCES;
273 		goto err;
274 	}
275 
276 	fpavf_log_dbg(" vfid %d gpool %d aid %d pool_cfg 0x%x pool_stack_base %" PRIx64 " pool_stack_end %" PRIx64" aura_cfg %" PRIx64 "\n",
277 		      fpa->vf_id, gpool, cfg.aid, (unsigned int)cfg.pool_cfg,
278 		      cfg.pool_stack_base, cfg.pool_stack_end, cfg.aura_cfg);
279 
280 	/* Now pool is in_use */
281 	fpa->is_inuse = true;
282 
283 err:
284 	if (ret < 0)
285 		rte_free(memptr);
286 
287 	return ret;
288 }
289 
290 static int
291 octeontx_fpapf_pool_destroy(unsigned int gpool_index)
292 {
293 	struct octeontx_mbox_hdr hdr;
294 	struct dcfg_resp resp;
295 	struct octeontx_mbox_fpa_cfg cfg;
296 	struct fpavf_res *fpa = NULL;
297 	int ret = -1;
298 
299 	fpa = octeontx_get_fpavf(gpool_index);
300 	if (fpa == NULL)
301 		return -EINVAL;
302 
303 	hdr.coproc = FPA_COPROC;
304 	hdr.msg = FPA_CONFIGSET;
305 	hdr.vfid = fpa->vf_id;
306 	hdr.res_code = 0;
307 
308 	/* reset and free the pool */
309 	cfg.aid = 0;
310 	cfg.pool_cfg = 0;
311 	cfg.pool_stack_base = 0;
312 	cfg.pool_stack_end = 0;
313 	cfg.aura_cfg = 0;
314 
315 	ret = octeontx_mbox_send(&hdr, &cfg,
316 					sizeof(struct octeontx_mbox_fpa_cfg),
317 					&resp, sizeof(resp));
318 	if (ret < 0) {
319 		ret = -EACCES;
320 		goto err;
321 	}
322 
323 	ret = 0;
324 err:
325 	/* anycase free pool stack memory */
326 	rte_free(fpa->pool_stack_base);
327 	fpa->pool_stack_base = NULL;
328 	return ret;
329 }
330 
331 static int
332 octeontx_fpapf_aura_attach(unsigned int gpool_index)
333 {
334 	struct octeontx_mbox_hdr hdr;
335 	struct dcfg_resp resp;
336 	struct octeontx_mbox_fpa_cfg cfg;
337 	int ret = 0;
338 
339 	if (gpool_index >= FPA_MAX_POOL) {
340 		ret = -EINVAL;
341 		goto err;
342 	}
343 	hdr.coproc = FPA_COPROC;
344 	hdr.msg = FPA_ATTACHAURA;
345 	hdr.vfid = gpool_index;
346 	hdr.res_code = 0;
347 	memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg));
348 	cfg.aid = 0;
349 
350 	ret = octeontx_mbox_send(&hdr, &cfg,
351 					sizeof(struct octeontx_mbox_fpa_cfg),
352 					&resp, sizeof(resp));
353 	if (ret < 0) {
354 		fpavf_log_err("Could not attach fpa ");
355 		fpavf_log_err("aura %d to pool %d. Err=%d. FuncErr=%d\n",
356 			      FPA_AURA_IDX(gpool_index), gpool_index, ret,
357 			      hdr.res_code);
358 		ret = -EACCES;
359 		goto err;
360 	}
361 err:
362 	return ret;
363 }
364 
365 static int
366 octeontx_fpapf_aura_detach(unsigned int gpool_index)
367 {
368 	struct octeontx_mbox_fpa_cfg cfg = {0};
369 	struct octeontx_mbox_hdr hdr = {0};
370 	int ret = 0;
371 
372 	if (gpool_index >= FPA_MAX_POOL) {
373 		ret = -EINVAL;
374 		goto err;
375 	}
376 
377 	cfg.aid = 0;
378 	hdr.coproc = FPA_COPROC;
379 	hdr.msg = FPA_DETACHAURA;
380 	hdr.vfid = gpool_index;
381 	ret = octeontx_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0);
382 	if (ret < 0) {
383 		fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d\n",
384 			      FPA_AURA_IDX(gpool_index), ret,
385 			      hdr.res_code);
386 		ret = -EINVAL;
387 	}
388 
389 err:
390 	return ret;
391 }
392 
393 int
394 octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz,
395 			  void *memva, uint16_t gpool)
396 {
397 	uint64_t va_end;
398 
399 	if (unlikely(!handle))
400 		return -ENODEV;
401 
402 	va_end = (uintptr_t)memva + memsz;
403 	va_end &= ~RTE_CACHE_LINE_MASK;
404 
405 	/* VHPOOL setup */
406 	fpavf_write64((uintptr_t)memva,
407 			 (void *)((uintptr_t)handle +
408 			 FPA_VF_VHPOOL_START_ADDR(gpool)));
409 	fpavf_write64(va_end,
410 			 (void *)((uintptr_t)handle +
411 			 FPA_VF_VHPOOL_END_ADDR(gpool)));
412 	return 0;
413 }
414 
415 static int
416 octeontx_fpapf_start_count(uint16_t gpool_index)
417 {
418 	int ret = 0;
419 	struct octeontx_mbox_hdr hdr = {0};
420 
421 	if (gpool_index >= FPA_MAX_POOL) {
422 		ret = -EINVAL;
423 		goto err;
424 	}
425 
426 	hdr.coproc = FPA_COPROC;
427 	hdr.msg = FPA_START_COUNT;
428 	hdr.vfid = gpool_index;
429 	ret = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
430 	if (ret < 0) {
431 		fpavf_log_err("Could not start buffer counting for ");
432 		fpavf_log_err("FPA pool %d. Err=%d. FuncErr=%d\n",
433 			      gpool_index, ret, hdr.res_code);
434 		ret = -EINVAL;
435 		goto err;
436 	}
437 
438 err:
439 	return ret;
440 }
441 
442 static __rte_always_inline int
443 octeontx_fpavf_free(unsigned int gpool)
444 {
445 	struct fpavf_res *res = octeontx_get_fpavf(gpool);
446 	int ret = 0;
447 
448 	if (gpool >= FPA_MAX_POOL) {
449 		ret = -EINVAL;
450 		goto err;
451 	}
452 
453 	/* Pool is free */
454 	if (res != NULL)
455 		res->is_inuse = false;
456 
457 err:
458 	return ret;
459 }
460 
461 static __rte_always_inline int
462 octeontx_gpool_free(uint16_t gpool)
463 {
464 	struct fpavf_res *res = octeontx_get_fpavf(gpool);
465 
466 	if (res && res->sz128 != 0) {
467 		res->sz128 = 0;
468 		return 0;
469 	}
470 	return -EINVAL;
471 }
472 
473 /*
474  * Return buffer size for a given pool
475  */
476 int
477 octeontx_fpa_bufpool_block_size(uintptr_t handle)
478 {
479 	struct fpavf_res *res = NULL;
480 	uint8_t gpool;
481 
482 	if (unlikely(!octeontx_fpa_handle_valid(handle)))
483 		return -EINVAL;
484 
485 	/* get the gpool */
486 	gpool = octeontx_fpa_bufpool_gpool(handle);
487 	res = octeontx_get_fpavf(gpool);
488 	return res ? FPA_CACHE_LINE_2_OBJSZ(res->sz128) : 0;
489 }
490 
491 int
492 octeontx_fpa_bufpool_free_count(uintptr_t handle)
493 {
494 	uint64_t cnt, limit, avail;
495 	uint8_t gpool;
496 	uint16_t gaura;
497 	uintptr_t pool_bar;
498 
499 	if (unlikely(!octeontx_fpa_handle_valid(handle)))
500 		return -EINVAL;
501 
502 	/* get the gpool */
503 	gpool = octeontx_fpa_bufpool_gpool(handle);
504 	/* get the aura */
505 	gaura = octeontx_fpa_bufpool_gaura(handle);
506 
507 	/* Get pool bar address from handle */
508 	pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
509 
510 	cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
511 				FPA_VF_VHAURA_CNT(gaura)));
512 	limit = fpavf_read64((void *)((uintptr_t)pool_bar +
513 				FPA_VF_VHAURA_CNT_LIMIT(gaura)));
514 
515 	avail = fpavf_read64((void *)((uintptr_t)pool_bar +
516 				FPA_VF_VHPOOL_AVAILABLE(gpool)));
517 
518 	return RTE_MIN(avail, (limit - cnt));
519 }
520 
521 uintptr_t
522 octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
523 				unsigned int buf_offset, int node_id)
524 {
525 	unsigned int gpool;
526 	unsigned int gaura;
527 	uintptr_t gpool_handle;
528 	uintptr_t pool_bar;
529 	int res;
530 
531 	RTE_SET_USED(node_id);
532 	RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) > OCTEONTX_FPAVF_BUF_OFFSET);
533 
534 	octeontx_mbox_init();
535 	object_size = RTE_CACHE_LINE_ROUNDUP(object_size);
536 	if (object_size > FPA_MAX_OBJ_SIZE) {
537 		errno = EINVAL;
538 		goto error_end;
539 	}
540 
541 	rte_spinlock_lock(&fpadev.lock);
542 	res = octeontx_fpa_gpool_alloc(object_size);
543 
544 	/* Bail if failed */
545 	if (unlikely(res < 0)) {
546 		errno = res;
547 		goto error_unlock;
548 	}
549 
550 	/* get fpavf */
551 	gpool = res;
552 
553 	/* get pool handle */
554 	gpool_handle = octeontx_fpa_gpool2handle(gpool);
555 	if (!octeontx_fpa_handle_valid(gpool_handle)) {
556 		errno = ENOSPC;
557 		goto error_gpool_free;
558 	}
559 
560 	/* Get pool bar address from handle */
561 	pool_bar = gpool_handle & ~(uint64_t)FPA_GPOOL_MASK;
562 
563 	res = octeontx_fpapf_pool_setup(gpool, object_size, buf_offset,
564 					object_count);
565 	if (res < 0) {
566 		errno = res;
567 		goto error_gpool_free;
568 	}
569 
570 	/* populate AURA fields */
571 	res = octeontx_fpapf_aura_attach(gpool);
572 	if (res < 0) {
573 		errno = res;
574 		goto error_pool_destroy;
575 	}
576 
577 	gaura = FPA_AURA_IDX(gpool);
578 
579 	/* Release lock */
580 	rte_spinlock_unlock(&fpadev.lock);
581 
582 	/* populate AURA registers */
583 	fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
584 			 FPA_VF_VHAURA_CNT(gaura)));
585 	fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
586 			 FPA_VF_VHAURA_CNT_LIMIT(gaura)));
587 	fpavf_write64(object_count + 1, (void *)((uintptr_t)pool_bar +
588 			 FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
589 
590 	octeontx_fpapf_start_count(gpool);
591 
592 	return gpool_handle;
593 
594 error_pool_destroy:
595 	octeontx_fpavf_free(gpool);
596 	octeontx_fpapf_pool_destroy(gpool);
597 error_gpool_free:
598 	octeontx_gpool_free(gpool);
599 error_unlock:
600 	rte_spinlock_unlock(&fpadev.lock);
601 error_end:
602 	return (uintptr_t)NULL;
603 }
604 
605 /*
606  * Destroy a buffer pool.
607  */
608 int
609 octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
610 {
611 	void **node, **curr, *head = NULL;
612 	uint64_t sz;
613 	uint64_t cnt, avail;
614 	uint8_t gpool;
615 	uint16_t gaura;
616 	uintptr_t pool_bar;
617 	int ret;
618 
619 	RTE_SET_USED(node_id);
620 
621 	/* Wait for all outstanding writes to be committed */
622 	rte_smp_wmb();
623 
624 	if (unlikely(!octeontx_fpa_handle_valid(handle)))
625 		return -EINVAL;
626 
627 	/* get the pool */
628 	gpool = octeontx_fpa_bufpool_gpool(handle);
629 	/* get the aura */
630 	gaura = octeontx_fpa_bufpool_gaura(handle);
631 
632 	/* Get pool bar address from handle */
633 	pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
634 
635 	 /* Check for no outstanding buffers */
636 	cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
637 					FPA_VF_VHAURA_CNT(gaura)));
638 	if (cnt) {
639 		fpavf_log_dbg("buffer exist in pool cnt %" PRId64 "\n", cnt);
640 		return -EBUSY;
641 	}
642 
643 	rte_spinlock_lock(&fpadev.lock);
644 
645 	avail = fpavf_read64((void *)((uintptr_t)pool_bar +
646 				FPA_VF_VHPOOL_AVAILABLE(gpool)));
647 
648 	/* Prepare to empty the entire POOL */
649 	fpavf_write64(avail, (void *)((uintptr_t)pool_bar +
650 			 FPA_VF_VHAURA_CNT_LIMIT(gaura)));
651 	fpavf_write64(avail + 1, (void *)((uintptr_t)pool_bar +
652 			 FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
653 
654 	/* Empty the pool */
655 	/* Invalidate the POOL */
656 	octeontx_gpool_free(gpool);
657 
658 	/* Process all buffers in the pool */
659 	while (avail--) {
660 
661 		/* Yank a buffer from the pool */
662 		node = (void *)(uintptr_t)
663 			fpavf_read64((void *)
664 				    (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gaura)));
665 
666 		if (node == NULL) {
667 			fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf\n",
668 				      gaura, avail);
669 			break;
670 		}
671 
672 		/* Imsert it into an ordered linked list */
673 		for (curr = &head; curr[0] != NULL; curr = curr[0]) {
674 			if ((uintptr_t)node <= (uintptr_t)curr[0])
675 				break;
676 		}
677 		node[0] = curr[0];
678 		curr[0] = node;
679 	}
680 
681 	/* Verify the linked list to be a perfect series */
682 	sz = octeontx_fpa_bufpool_block_size(handle) << 7;
683 	for (curr = head; curr != NULL && curr[0] != NULL;
684 		curr = curr[0]) {
685 		if (curr == curr[0] ||
686 			((uintptr_t)curr != ((uintptr_t)curr[0] - sz))) {
687 			fpavf_log_err("POOL# %u buf sequence err (%p vs. %p)\n",
688 				      gpool, curr, curr[0]);
689 		}
690 	}
691 
692 	/* Disable pool operation */
693 	fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar +
694 			 FPA_VF_VHPOOL_START_ADDR(gpool)));
695 	fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar +
696 			FPA_VF_VHPOOL_END_ADDR(gpool)));
697 
698 	(void)octeontx_fpapf_pool_destroy(gpool);
699 
700 	/* Deactivate the AURA */
701 	fpavf_write64(0, (void *)((uintptr_t)pool_bar +
702 			FPA_VF_VHAURA_CNT_LIMIT(gaura)));
703 	fpavf_write64(0, (void *)((uintptr_t)pool_bar +
704 			FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
705 
706 	ret = octeontx_fpapf_aura_detach(gpool);
707 	if (ret) {
708 		fpavf_log_err("Failed to dettach gaura %u. error code=%d\n",
709 			      gpool, ret);
710 	}
711 
712 	/* Free VF */
713 	(void)octeontx_fpavf_free(gpool);
714 
715 	rte_spinlock_unlock(&fpadev.lock);
716 	return 0;
717 }
718 
719 static void
720 octeontx_fpavf_setup(void)
721 {
722 	uint8_t i;
723 	static bool init_once;
724 
725 	if (!init_once) {
726 		rte_spinlock_init(&fpadev.lock);
727 		fpadev.total_gpool_cnt = 0;
728 
729 		for (i = 0; i < FPA_VF_MAX; i++) {
730 
731 			fpadev.pool[i].domain_id = ~0;
732 			fpadev.pool[i].stack_ln_ptr = 0;
733 			fpadev.pool[i].sz128 = 0;
734 			fpadev.pool[i].bar0 = NULL;
735 			fpadev.pool[i].pool_stack_base = NULL;
736 			fpadev.pool[i].is_inuse = false;
737 		}
738 		init_once = 1;
739 	}
740 }
741 
742 static int
743 octeontx_fpavf_identify(void *bar0)
744 {
745 	uint64_t val;
746 	uint16_t domain_id;
747 	uint16_t vf_id;
748 	uint64_t stack_ln_ptr;
749 	static uint16_t vf_idx;
750 
751 	val = fpavf_read64((void *)((uintptr_t)bar0 +
752 				FPA_VF_VHAURA_CNT_THRESHOLD(0)));
753 
754 	domain_id = (val >> 8) & 0xffff;
755 	vf_id = (val >> 24) & 0xffff;
756 
757 	stack_ln_ptr = fpavf_read64((void *)((uintptr_t)bar0 +
758 					FPA_VF_VHPOOL_THRESHOLD(0)));
759 	if (vf_idx >= FPA_VF_MAX) {
760 		fpavf_log_err("vf_id(%d) greater than max vf (32)\n", vf_id);
761 		return -E2BIG;
762 	}
763 
764 	fpadev.pool[vf_idx].domain_id = domain_id;
765 	fpadev.pool[vf_idx].vf_id = vf_id;
766 	fpadev.pool[vf_idx].bar0 = bar0;
767 	fpadev.pool[vf_idx].stack_ln_ptr = stack_ln_ptr;
768 
769 	/* SUCCESS */
770 	return vf_idx++;
771 }
772 
773 /* FPAVF pcie device aka mempool probe */
774 static int
775 fpavf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
776 {
777 	uint8_t *idreg;
778 	int res;
779 	struct fpavf_res *fpa = NULL;
780 
781 	RTE_SET_USED(pci_drv);
782 	RTE_SET_USED(fpa);
783 
784 	/* For secondary processes, the primary has done all the work */
785 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
786 		return 0;
787 
788 	if (pci_dev->mem_resource[0].addr == NULL) {
789 		fpavf_log_err("Empty bars %p ", pci_dev->mem_resource[0].addr);
790 		return -ENODEV;
791 	}
792 	idreg = pci_dev->mem_resource[0].addr;
793 
794 	octeontx_fpavf_setup();
795 
796 	res = octeontx_fpavf_identify(idreg);
797 	if (res < 0)
798 		return -1;
799 
800 	fpa = &fpadev.pool[res];
801 	fpadev.total_gpool_cnt++;
802 	rte_wmb();
803 
804 	fpavf_log_dbg("total_fpavfs %d bar0 %p domain %d vf %d stk_ln_ptr 0x%x",
805 		       fpadev.total_gpool_cnt, fpa->bar0, fpa->domain_id,
806 		       fpa->vf_id, (unsigned int)fpa->stack_ln_ptr);
807 
808 	return 0;
809 }
810 
811 static const struct rte_pci_id pci_fpavf_map[] = {
812 	{
813 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
814 				PCI_DEVICE_ID_OCTEONTX_FPA_VF)
815 	},
816 	{
817 		.vendor_id = 0,
818 	},
819 };
820 
821 static struct rte_pci_driver pci_fpavf = {
822 	.id_table = pci_fpavf_map,
823 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
824 	.probe = fpavf_probe,
825 };
826 
827 RTE_PMD_REGISTER_PCI(octeontx_fpavf, pci_fpavf);
828