xref: /dpdk/drivers/mempool/octeontx/octeontx_fpavf.c (revision 089e5ed727a15da2729cfee9b63533dd120bd04c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include <stdlib.h>
6 #include <string.h>
7 #include <stdbool.h>
8 #include <stdio.h>
9 #include <unistd.h>
10 #include <fcntl.h>
11 #include <errno.h>
12 #include <sys/mman.h>
13 
14 #include <rte_atomic.h>
15 #include <rte_eal.h>
16 #include <rte_bus_pci.h>
17 #include <rte_errno.h>
18 #include <rte_memory.h>
19 #include <rte_malloc.h>
20 #include <rte_spinlock.h>
21 #include <rte_mbuf.h>
22 
23 #include "octeontx_mbox.h"
24 #include "octeontx_fpavf.h"
25 
26 /* FPA Mbox Message */
27 #define IDENTIFY		0x0
28 
29 #define FPA_CONFIGSET		0x1
30 #define FPA_CONFIGGET		0x2
31 #define FPA_START_COUNT		0x3
32 #define FPA_STOP_COUNT		0x4
33 #define FPA_ATTACHAURA		0x5
34 #define FPA_DETACHAURA		0x6
35 #define FPA_SETAURALVL		0x7
36 #define FPA_GETAURALVL		0x8
37 
38 #define FPA_COPROC		0x1
39 
40 /* fpa mbox struct */
41 struct octeontx_mbox_fpa_cfg {
42 	int		aid;
43 	uint64_t	pool_cfg;
44 	uint64_t	pool_stack_base;
45 	uint64_t	pool_stack_end;
46 	uint64_t	aura_cfg;
47 };
48 
49 struct __attribute__((__packed__)) gen_req {
50 	uint32_t	value;
51 };
52 
53 struct __attribute__((__packed__)) idn_req {
54 	uint8_t	domain_id;
55 };
56 
57 struct __attribute__((__packed__)) gen_resp {
58 	uint16_t	domain_id;
59 	uint16_t	vfid;
60 };
61 
62 struct __attribute__((__packed__)) dcfg_resp {
63 	uint8_t	sso_count;
64 	uint8_t	ssow_count;
65 	uint8_t	fpa_count;
66 	uint8_t	pko_count;
67 	uint8_t	tim_count;
68 	uint8_t	net_port_count;
69 	uint8_t	virt_port_count;
70 };
71 
72 #define FPA_MAX_POOL	32
73 #define FPA_PF_PAGE_SZ	4096
74 
75 #define FPA_LN_SIZE	128
76 #define FPA_ROUND_UP(x, size) \
77 	((((unsigned long)(x)) + size-1) & (~(size-1)))
78 #define FPA_OBJSZ_2_CACHE_LINE(sz)	(((sz) + RTE_CACHE_LINE_MASK) >> 7)
79 #define FPA_CACHE_LINE_2_OBJSZ(sz)	((sz) << 7)
80 
81 #define POOL_ENA			(0x1 << 0)
82 #define POOL_DIS			(0x0 << 0)
83 #define POOL_SET_NAT_ALIGN		(0x1 << 1)
84 #define POOL_DIS_NAT_ALIGN		(0x0 << 1)
85 #define POOL_STYPE(x)			(((x) & 0x1) << 2)
86 #define POOL_LTYPE(x)			(((x) & 0x3) << 3)
87 #define POOL_BUF_OFFSET(x)		(((x) & 0x7fffULL) << 16)
88 #define POOL_BUF_SIZE(x)		(((x) & 0x7ffULL) << 32)
89 
90 struct fpavf_res {
91 	void		*pool_stack_base;
92 	void		*bar0;
93 	uint64_t	stack_ln_ptr;
94 	uint16_t	domain_id;
95 	uint16_t	vf_id;	/* gpool_id */
96 	uint16_t	sz128;	/* Block size in cache lines */
97 	bool		is_inuse;
98 };
99 
100 struct octeontx_fpadev {
101 	rte_spinlock_t lock;
102 	uint8_t	total_gpool_cnt;
103 	struct fpavf_res pool[FPA_VF_MAX];
104 };
105 
106 static struct octeontx_fpadev fpadev;
107 
108 int octeontx_logtype_fpavf;
109 int octeontx_logtype_fpavf_mbox;
110 
111 RTE_INIT(otx_pool_init_log)
112 {
113 	octeontx_logtype_fpavf = rte_log_register("pmd.mempool.octeontx");
114 	if (octeontx_logtype_fpavf >= 0)
115 		rte_log_set_level(octeontx_logtype_fpavf, RTE_LOG_NOTICE);
116 }
117 
118 /* lock is taken by caller */
119 static int
120 octeontx_fpa_gpool_alloc(unsigned int object_size)
121 {
122 	struct fpavf_res *res = NULL;
123 	uint16_t gpool;
124 	unsigned int sz128;
125 
126 	sz128 = FPA_OBJSZ_2_CACHE_LINE(object_size);
127 
128 	for (gpool = 0; gpool < FPA_VF_MAX; gpool++) {
129 
130 		/* Skip VF that is not mapped Or _inuse */
131 		if ((fpadev.pool[gpool].bar0 == NULL) ||
132 		    (fpadev.pool[gpool].is_inuse == true))
133 			continue;
134 
135 		res = &fpadev.pool[gpool];
136 
137 		RTE_ASSERT(res->domain_id != (uint16_t)~0);
138 		RTE_ASSERT(res->vf_id != (uint16_t)~0);
139 		RTE_ASSERT(res->stack_ln_ptr != 0);
140 
141 		if (res->sz128 == 0) {
142 			res->sz128 = sz128;
143 
144 			fpavf_log_dbg("gpool %d blk_sz %d\n", gpool, sz128);
145 			return gpool;
146 		}
147 	}
148 
149 	return -ENOSPC;
150 }
151 
152 /* lock is taken by caller */
153 static __rte_always_inline uintptr_t
154 octeontx_fpa_gpool2handle(uint16_t gpool)
155 {
156 	struct fpavf_res *res = NULL;
157 
158 	RTE_ASSERT(gpool < FPA_VF_MAX);
159 
160 	res = &fpadev.pool[gpool];
161 	return (uintptr_t)res->bar0 | gpool;
162 }
163 
164 static __rte_always_inline bool
165 octeontx_fpa_handle_valid(uintptr_t handle)
166 {
167 	struct fpavf_res *res = NULL;
168 	uint8_t gpool;
169 	int i;
170 	bool ret = false;
171 
172 	if (unlikely(!handle))
173 		return ret;
174 
175 	/* get the gpool */
176 	gpool = octeontx_fpa_bufpool_gpool(handle);
177 
178 	/* get the bar address */
179 	handle &= ~(uint64_t)FPA_GPOOL_MASK;
180 	for (i = 0; i < FPA_VF_MAX; i++) {
181 		if ((uintptr_t)fpadev.pool[i].bar0 != handle)
182 			continue;
183 
184 		/* validate gpool */
185 		if (gpool != i)
186 			return false;
187 
188 		res = &fpadev.pool[i];
189 
190 		if (res->sz128 == 0 || res->domain_id == (uint16_t)~0 ||
191 		    res->stack_ln_ptr == 0)
192 			ret = false;
193 		else
194 			ret = true;
195 		break;
196 	}
197 
198 	return ret;
199 }
200 
201 static int
202 octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size,
203 			  signed short buf_offset, unsigned int max_buf_count)
204 {
205 	void *memptr = NULL;
206 	rte_iova_t phys_addr;
207 	unsigned int memsz;
208 	struct fpavf_res *fpa = NULL;
209 	uint64_t reg;
210 	struct octeontx_mbox_hdr hdr;
211 	struct dcfg_resp resp;
212 	struct octeontx_mbox_fpa_cfg cfg;
213 	int ret = -1;
214 
215 	fpa = &fpadev.pool[gpool];
216 	memsz = FPA_ROUND_UP(max_buf_count / fpa->stack_ln_ptr, FPA_LN_SIZE) *
217 			FPA_LN_SIZE;
218 
219 	/* Round-up to page size */
220 	memsz = (memsz + FPA_PF_PAGE_SZ - 1) & ~(uintptr_t)(FPA_PF_PAGE_SZ-1);
221 	memptr = rte_malloc(NULL, memsz, RTE_CACHE_LINE_SIZE);
222 	if (memptr == NULL) {
223 		ret = -ENOMEM;
224 		goto err;
225 	}
226 
227 	/* Configure stack */
228 	fpa->pool_stack_base = memptr;
229 	phys_addr = rte_malloc_virt2iova(memptr);
230 
231 	buf_size /= FPA_LN_SIZE;
232 
233 	/* POOL setup */
234 	hdr.coproc = FPA_COPROC;
235 	hdr.msg = FPA_CONFIGSET;
236 	hdr.vfid = fpa->vf_id;
237 	hdr.res_code = 0;
238 
239 	buf_offset /= FPA_LN_SIZE;
240 	reg = POOL_BUF_SIZE(buf_size) | POOL_BUF_OFFSET(buf_offset) |
241 		POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN |
242 		POOL_ENA;
243 
244 	cfg.aid = FPA_AURA_IDX(gpool);
245 	cfg.pool_cfg = reg;
246 	cfg.pool_stack_base = phys_addr;
247 	cfg.pool_stack_end = phys_addr + memsz;
248 	cfg.aura_cfg = (1 << 9);
249 
250 	ret = octeontx_mbox_send(&hdr, &cfg,
251 					sizeof(struct octeontx_mbox_fpa_cfg),
252 					&resp, sizeof(resp));
253 	if (ret < 0) {
254 		ret = -EACCES;
255 		goto err;
256 	}
257 
258 	fpavf_log_dbg(" vfid %d gpool %d aid %d pool_cfg 0x%x pool_stack_base %" PRIx64 " pool_stack_end %" PRIx64" aura_cfg %" PRIx64 "\n",
259 		      fpa->vf_id, gpool, cfg.aid, (unsigned int)cfg.pool_cfg,
260 		      cfg.pool_stack_base, cfg.pool_stack_end, cfg.aura_cfg);
261 
262 	/* Now pool is in_use */
263 	fpa->is_inuse = true;
264 
265 err:
266 	if (ret < 0)
267 		rte_free(memptr);
268 
269 	return ret;
270 }
271 
272 static int
273 octeontx_fpapf_pool_destroy(unsigned int gpool_index)
274 {
275 	struct octeontx_mbox_hdr hdr;
276 	struct dcfg_resp resp;
277 	struct octeontx_mbox_fpa_cfg cfg;
278 	struct fpavf_res *fpa = NULL;
279 	int ret = -1;
280 
281 	fpa = &fpadev.pool[gpool_index];
282 
283 	hdr.coproc = FPA_COPROC;
284 	hdr.msg = FPA_CONFIGSET;
285 	hdr.vfid = fpa->vf_id;
286 	hdr.res_code = 0;
287 
288 	/* reset and free the pool */
289 	cfg.aid = 0;
290 	cfg.pool_cfg = 0;
291 	cfg.pool_stack_base = 0;
292 	cfg.pool_stack_end = 0;
293 	cfg.aura_cfg = 0;
294 
295 	ret = octeontx_mbox_send(&hdr, &cfg,
296 					sizeof(struct octeontx_mbox_fpa_cfg),
297 					&resp, sizeof(resp));
298 	if (ret < 0) {
299 		ret = -EACCES;
300 		goto err;
301 	}
302 
303 	ret = 0;
304 err:
305 	/* anycase free pool stack memory */
306 	rte_free(fpa->pool_stack_base);
307 	fpa->pool_stack_base = NULL;
308 	return ret;
309 }
310 
311 static int
312 octeontx_fpapf_aura_attach(unsigned int gpool_index)
313 {
314 	struct octeontx_mbox_hdr hdr;
315 	struct dcfg_resp resp;
316 	struct octeontx_mbox_fpa_cfg cfg;
317 	int ret = 0;
318 
319 	if (gpool_index >= FPA_MAX_POOL) {
320 		ret = -EINVAL;
321 		goto err;
322 	}
323 	hdr.coproc = FPA_COPROC;
324 	hdr.msg = FPA_ATTACHAURA;
325 	hdr.vfid = gpool_index;
326 	hdr.res_code = 0;
327 	memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg));
328 	cfg.aid = FPA_AURA_IDX(gpool_index);
329 
330 	ret = octeontx_mbox_send(&hdr, &cfg,
331 					sizeof(struct octeontx_mbox_fpa_cfg),
332 					&resp, sizeof(resp));
333 	if (ret < 0) {
334 		fpavf_log_err("Could not attach fpa ");
335 		fpavf_log_err("aura %d to pool %d. Err=%d. FuncErr=%d\n",
336 			      FPA_AURA_IDX(gpool_index), gpool_index, ret,
337 			      hdr.res_code);
338 		ret = -EACCES;
339 		goto err;
340 	}
341 err:
342 	return ret;
343 }
344 
345 static int
346 octeontx_fpapf_aura_detach(unsigned int gpool_index)
347 {
348 	struct octeontx_mbox_fpa_cfg cfg = {0};
349 	struct octeontx_mbox_hdr hdr = {0};
350 	int ret = 0;
351 
352 	if (gpool_index >= FPA_MAX_POOL) {
353 		ret = -EINVAL;
354 		goto err;
355 	}
356 
357 	cfg.aid = FPA_AURA_IDX(gpool_index);
358 	hdr.coproc = FPA_COPROC;
359 	hdr.msg = FPA_DETACHAURA;
360 	hdr.vfid = gpool_index;
361 	ret = octeontx_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0);
362 	if (ret < 0) {
363 		fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d\n",
364 			      FPA_AURA_IDX(gpool_index), ret,
365 			      hdr.res_code);
366 		ret = -EINVAL;
367 	}
368 
369 err:
370 	return ret;
371 }
372 
373 int
374 octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz,
375 			  void *memva, uint16_t gpool)
376 {
377 	uint64_t va_end;
378 
379 	if (unlikely(!handle))
380 		return -ENODEV;
381 
382 	va_end = (uintptr_t)memva + memsz;
383 	va_end &= ~RTE_CACHE_LINE_MASK;
384 
385 	/* VHPOOL setup */
386 	fpavf_write64((uintptr_t)memva,
387 			 (void *)((uintptr_t)handle +
388 			 FPA_VF_VHPOOL_START_ADDR(gpool)));
389 	fpavf_write64(va_end,
390 			 (void *)((uintptr_t)handle +
391 			 FPA_VF_VHPOOL_END_ADDR(gpool)));
392 	return 0;
393 }
394 
395 static int
396 octeontx_fpapf_start_count(uint16_t gpool_index)
397 {
398 	int ret = 0;
399 	struct octeontx_mbox_hdr hdr = {0};
400 
401 	if (gpool_index >= FPA_MAX_POOL) {
402 		ret = -EINVAL;
403 		goto err;
404 	}
405 
406 	hdr.coproc = FPA_COPROC;
407 	hdr.msg = FPA_START_COUNT;
408 	hdr.vfid = gpool_index;
409 	ret = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
410 	if (ret < 0) {
411 		fpavf_log_err("Could not start buffer counting for ");
412 		fpavf_log_err("FPA pool %d. Err=%d. FuncErr=%d\n",
413 			      gpool_index, ret, hdr.res_code);
414 		ret = -EINVAL;
415 		goto err;
416 	}
417 
418 err:
419 	return ret;
420 }
421 
422 static __rte_always_inline int
423 octeontx_fpavf_free(unsigned int gpool)
424 {
425 	int ret = 0;
426 
427 	if (gpool >= FPA_MAX_POOL) {
428 		ret = -EINVAL;
429 		goto err;
430 	}
431 
432 	/* Pool is free */
433 	fpadev.pool[gpool].is_inuse = false;
434 
435 err:
436 	return ret;
437 }
438 
439 static __rte_always_inline int
440 octeontx_gpool_free(uint16_t gpool)
441 {
442 	if (fpadev.pool[gpool].sz128 != 0) {
443 		fpadev.pool[gpool].sz128 = 0;
444 		return 0;
445 	}
446 	return -EINVAL;
447 }
448 
449 /*
450  * Return buffer size for a given pool
451  */
452 int
453 octeontx_fpa_bufpool_block_size(uintptr_t handle)
454 {
455 	struct fpavf_res *res = NULL;
456 	uint8_t gpool;
457 
458 	if (unlikely(!octeontx_fpa_handle_valid(handle)))
459 		return -EINVAL;
460 
461 	/* get the gpool */
462 	gpool = octeontx_fpa_bufpool_gpool(handle);
463 	res = &fpadev.pool[gpool];
464 	return FPA_CACHE_LINE_2_OBJSZ(res->sz128);
465 }
466 
467 int
468 octeontx_fpa_bufpool_free_count(uintptr_t handle)
469 {
470 	uint64_t cnt, limit, avail;
471 	uint8_t gpool;
472 	uint16_t gaura;
473 	uintptr_t pool_bar;
474 
475 	if (unlikely(!octeontx_fpa_handle_valid(handle)))
476 		return -EINVAL;
477 
478 	/* get the gpool */
479 	gpool = octeontx_fpa_bufpool_gpool(handle);
480 	/* get the aura */
481 	gaura = octeontx_fpa_bufpool_gaura(handle);
482 
483 	/* Get pool bar address from handle */
484 	pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
485 
486 	cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
487 				FPA_VF_VHAURA_CNT(gaura)));
488 	limit = fpavf_read64((void *)((uintptr_t)pool_bar +
489 				FPA_VF_VHAURA_CNT_LIMIT(gaura)));
490 
491 	avail = fpavf_read64((void *)((uintptr_t)pool_bar +
492 				FPA_VF_VHPOOL_AVAILABLE(gpool)));
493 
494 	return RTE_MIN(avail, (limit - cnt));
495 }
496 
497 uintptr_t
498 octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
499 				unsigned int buf_offset, int node_id)
500 {
501 	unsigned int gpool;
502 	unsigned int gaura;
503 	uintptr_t gpool_handle;
504 	uintptr_t pool_bar;
505 	int res;
506 
507 	RTE_SET_USED(node_id);
508 	RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) > OCTEONTX_FPAVF_BUF_OFFSET);
509 
510 	object_size = RTE_CACHE_LINE_ROUNDUP(object_size);
511 	if (object_size > FPA_MAX_OBJ_SIZE) {
512 		errno = EINVAL;
513 		goto error_end;
514 	}
515 
516 	rte_spinlock_lock(&fpadev.lock);
517 	res = octeontx_fpa_gpool_alloc(object_size);
518 
519 	/* Bail if failed */
520 	if (unlikely(res < 0)) {
521 		errno = res;
522 		goto error_unlock;
523 	}
524 
525 	/* get fpavf */
526 	gpool = res;
527 
528 	/* get pool handle */
529 	gpool_handle = octeontx_fpa_gpool2handle(gpool);
530 	if (!octeontx_fpa_handle_valid(gpool_handle)) {
531 		errno = ENOSPC;
532 		goto error_gpool_free;
533 	}
534 
535 	/* Get pool bar address from handle */
536 	pool_bar = gpool_handle & ~(uint64_t)FPA_GPOOL_MASK;
537 
538 	res = octeontx_fpapf_pool_setup(gpool, object_size, buf_offset,
539 					object_count);
540 	if (res < 0) {
541 		errno = res;
542 		goto error_gpool_free;
543 	}
544 
545 	/* populate AURA fields */
546 	res = octeontx_fpapf_aura_attach(gpool);
547 	if (res < 0) {
548 		errno = res;
549 		goto error_pool_destroy;
550 	}
551 
552 	gaura = FPA_AURA_IDX(gpool);
553 
554 	/* Release lock */
555 	rte_spinlock_unlock(&fpadev.lock);
556 
557 	/* populate AURA registers */
558 	fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
559 			 FPA_VF_VHAURA_CNT(gaura)));
560 	fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
561 			 FPA_VF_VHAURA_CNT_LIMIT(gaura)));
562 	fpavf_write64(object_count + 1, (void *)((uintptr_t)pool_bar +
563 			 FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
564 
565 	octeontx_fpapf_start_count(gpool);
566 
567 	return gpool_handle;
568 
569 error_pool_destroy:
570 	octeontx_fpavf_free(gpool);
571 	octeontx_fpapf_pool_destroy(gpool);
572 error_gpool_free:
573 	octeontx_gpool_free(gpool);
574 error_unlock:
575 	rte_spinlock_unlock(&fpadev.lock);
576 error_end:
577 	return (uintptr_t)NULL;
578 }
579 
580 /*
581  * Destroy a buffer pool.
582  */
583 int
584 octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
585 {
586 	void **node, **curr, *head = NULL;
587 	uint64_t sz;
588 	uint64_t cnt, avail;
589 	uint8_t gpool;
590 	uint16_t gaura;
591 	uintptr_t pool_bar;
592 	int ret;
593 
594 	RTE_SET_USED(node_id);
595 
596 	/* Wait for all outstanding writes to be committed */
597 	rte_smp_wmb();
598 
599 	if (unlikely(!octeontx_fpa_handle_valid(handle)))
600 		return -EINVAL;
601 
602 	/* get the pool */
603 	gpool = octeontx_fpa_bufpool_gpool(handle);
604 	/* get the aura */
605 	gaura = octeontx_fpa_bufpool_gaura(handle);
606 
607 	/* Get pool bar address from handle */
608 	pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
609 
610 	 /* Check for no outstanding buffers */
611 	cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
612 					FPA_VF_VHAURA_CNT(gaura)));
613 	if (cnt) {
614 		fpavf_log_dbg("buffer exist in pool cnt %" PRId64 "\n", cnt);
615 		return -EBUSY;
616 	}
617 
618 	rte_spinlock_lock(&fpadev.lock);
619 
620 	avail = fpavf_read64((void *)((uintptr_t)pool_bar +
621 				FPA_VF_VHPOOL_AVAILABLE(gpool)));
622 
623 	/* Prepare to empty the entire POOL */
624 	fpavf_write64(avail, (void *)((uintptr_t)pool_bar +
625 			 FPA_VF_VHAURA_CNT_LIMIT(gaura)));
626 	fpavf_write64(avail + 1, (void *)((uintptr_t)pool_bar +
627 			 FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
628 
629 	/* Empty the pool */
630 	/* Invalidate the POOL */
631 	octeontx_gpool_free(gpool);
632 
633 	/* Process all buffers in the pool */
634 	while (avail--) {
635 
636 		/* Yank a buffer from the pool */
637 		node = (void *)(uintptr_t)
638 			fpavf_read64((void *)
639 				    (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gaura)));
640 
641 		if (node == NULL) {
642 			fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf\n",
643 				      gaura, avail);
644 			break;
645 		}
646 
647 		/* Imsert it into an ordered linked list */
648 		for (curr = &head; curr[0] != NULL; curr = curr[0]) {
649 			if ((uintptr_t)node <= (uintptr_t)curr[0])
650 				break;
651 		}
652 		node[0] = curr[0];
653 		curr[0] = node;
654 	}
655 
656 	/* Verify the linked list to be a perfect series */
657 	sz = octeontx_fpa_bufpool_block_size(handle) << 7;
658 	for (curr = head; curr != NULL && curr[0] != NULL;
659 		curr = curr[0]) {
660 		if (curr == curr[0] ||
661 			((uintptr_t)curr != ((uintptr_t)curr[0] - sz))) {
662 			fpavf_log_err("POOL# %u buf sequence err (%p vs. %p)\n",
663 				      gpool, curr, curr[0]);
664 		}
665 	}
666 
667 	/* Disable pool operation */
668 	fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar +
669 			 FPA_VF_VHPOOL_START_ADDR(gpool)));
670 	fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar +
671 			FPA_VF_VHPOOL_END_ADDR(gpool)));
672 
673 	(void)octeontx_fpapf_pool_destroy(gpool);
674 
675 	/* Deactivate the AURA */
676 	fpavf_write64(0, (void *)((uintptr_t)pool_bar +
677 			FPA_VF_VHAURA_CNT_LIMIT(gaura)));
678 	fpavf_write64(0, (void *)((uintptr_t)pool_bar +
679 			FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
680 
681 	ret = octeontx_fpapf_aura_detach(gpool);
682 	if (ret) {
683 		fpavf_log_err("Failed to dettach gaura %u. error code=%d\n",
684 			      gpool, ret);
685 	}
686 
687 	/* Free VF */
688 	(void)octeontx_fpavf_free(gpool);
689 
690 	rte_spinlock_unlock(&fpadev.lock);
691 	return 0;
692 }
693 
694 static void
695 octeontx_fpavf_setup(void)
696 {
697 	uint8_t i;
698 	static bool init_once;
699 
700 	if (!init_once) {
701 		rte_spinlock_init(&fpadev.lock);
702 		fpadev.total_gpool_cnt = 0;
703 
704 		for (i = 0; i < FPA_VF_MAX; i++) {
705 
706 			fpadev.pool[i].domain_id = ~0;
707 			fpadev.pool[i].stack_ln_ptr = 0;
708 			fpadev.pool[i].sz128 = 0;
709 			fpadev.pool[i].bar0 = NULL;
710 			fpadev.pool[i].pool_stack_base = NULL;
711 			fpadev.pool[i].is_inuse = false;
712 		}
713 		init_once = 1;
714 	}
715 }
716 
717 static int
718 octeontx_fpavf_identify(void *bar0)
719 {
720 	uint64_t val;
721 	uint16_t domain_id;
722 	uint16_t vf_id;
723 	uint64_t stack_ln_ptr;
724 
725 	val = fpavf_read64((void *)((uintptr_t)bar0 +
726 				FPA_VF_VHAURA_CNT_THRESHOLD(0)));
727 
728 	domain_id = (val >> 8) & 0xffff;
729 	vf_id = (val >> 24) & 0xffff;
730 
731 	stack_ln_ptr = fpavf_read64((void *)((uintptr_t)bar0 +
732 					FPA_VF_VHPOOL_THRESHOLD(0)));
733 	if (vf_id >= FPA_VF_MAX) {
734 		fpavf_log_err("vf_id(%d) greater than max vf (32)\n", vf_id);
735 		return -1;
736 	}
737 
738 	if (fpadev.pool[vf_id].is_inuse) {
739 		fpavf_log_err("vf_id %d is_inuse\n", vf_id);
740 		return -1;
741 	}
742 
743 	fpadev.pool[vf_id].domain_id = domain_id;
744 	fpadev.pool[vf_id].vf_id = vf_id;
745 	fpadev.pool[vf_id].bar0 = bar0;
746 	fpadev.pool[vf_id].stack_ln_ptr = stack_ln_ptr;
747 
748 	/* SUCCESS */
749 	return vf_id;
750 }
751 
752 /* FPAVF pcie device aka mempool probe */
753 static int
754 fpavf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
755 {
756 	uint8_t *idreg;
757 	int res;
758 	struct fpavf_res *fpa = NULL;
759 
760 	RTE_SET_USED(pci_drv);
761 	RTE_SET_USED(fpa);
762 
763 	/* For secondary processes, the primary has done all the work */
764 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
765 		return 0;
766 
767 	if (pci_dev->mem_resource[0].addr == NULL) {
768 		fpavf_log_err("Empty bars %p ", pci_dev->mem_resource[0].addr);
769 		return -ENODEV;
770 	}
771 	idreg = pci_dev->mem_resource[0].addr;
772 
773 	octeontx_fpavf_setup();
774 
775 	res = octeontx_fpavf_identify(idreg);
776 	if (res < 0)
777 		return -1;
778 
779 	fpa = &fpadev.pool[res];
780 	fpadev.total_gpool_cnt++;
781 	rte_wmb();
782 
783 	fpavf_log_dbg("total_fpavfs %d bar0 %p domain %d vf %d stk_ln_ptr 0x%x",
784 		       fpadev.total_gpool_cnt, fpa->bar0, fpa->domain_id,
785 		       fpa->vf_id, (unsigned int)fpa->stack_ln_ptr);
786 
787 	return 0;
788 }
789 
790 static const struct rte_pci_id pci_fpavf_map[] = {
791 	{
792 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
793 				PCI_DEVICE_ID_OCTEONTX_FPA_VF)
794 	},
795 	{
796 		.vendor_id = 0,
797 	},
798 };
799 
800 static struct rte_pci_driver pci_fpavf = {
801 	.id_table = pci_fpavf_map,
802 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
803 	.probe = fpavf_probe,
804 };
805 
806 RTE_PMD_REGISTER_PCI(octeontx_fpavf, pci_fpavf);
807