xref: /dpdk/drivers/mempool/octeontx/octeontx_fpavf.c (revision f26ab687a74fc86455e0c98371553a53bf68c76e)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (C) Cavium Inc. 2017. All Right reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Cavium networks nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <stdlib.h>
34 #include <string.h>
35 #include <stdbool.h>
36 #include <stdio.h>
37 #include <unistd.h>
38 #include <fcntl.h>
39 #include <errno.h>
40 #include <sys/mman.h>
41 
42 #include <rte_atomic.h>
43 #include <rte_eal.h>
44 #include <rte_pci.h>
45 #include <rte_errno.h>
46 #include <rte_memory.h>
47 #include <rte_malloc.h>
48 #include <rte_spinlock.h>
49 #include <rte_mbuf.h>
50 
51 #include <rte_pmd_octeontx_ssovf.h>
52 #include "octeontx_fpavf.h"
53 
54 /* FPA Mbox Message */
55 #define IDENTIFY		0x0
56 
57 #define FPA_CONFIGSET		0x1
58 #define FPA_CONFIGGET		0x2
59 #define FPA_START_COUNT		0x3
60 #define FPA_STOP_COUNT		0x4
61 #define FPA_ATTACHAURA		0x5
62 #define FPA_DETACHAURA		0x6
63 #define FPA_SETAURALVL		0x7
64 #define FPA_GETAURALVL		0x8
65 
66 #define FPA_COPROC		0x1
67 
68 /* fpa mbox struct */
69 struct octeontx_mbox_fpa_cfg {
70 	int		aid;
71 	uint64_t	pool_cfg;
72 	uint64_t	pool_stack_base;
73 	uint64_t	pool_stack_end;
74 	uint64_t	aura_cfg;
75 };
76 
77 struct __attribute__((__packed__)) gen_req {
78 	uint32_t	value;
79 };
80 
81 struct __attribute__((__packed__)) idn_req {
82 	uint8_t	domain_id;
83 };
84 
85 struct __attribute__((__packed__)) gen_resp {
86 	uint16_t	domain_id;
87 	uint16_t	vfid;
88 };
89 
90 struct __attribute__((__packed__)) dcfg_resp {
91 	uint8_t	sso_count;
92 	uint8_t	ssow_count;
93 	uint8_t	fpa_count;
94 	uint8_t	pko_count;
95 	uint8_t	tim_count;
96 	uint8_t	net_port_count;
97 	uint8_t	virt_port_count;
98 };
99 
100 #define FPA_MAX_POOL	32
101 #define FPA_PF_PAGE_SZ	4096
102 
103 #define FPA_LN_SIZE	128
104 #define FPA_ROUND_UP(x, size) \
105 	((((unsigned long)(x)) + size-1) & (~(size-1)))
106 #define FPA_OBJSZ_2_CACHE_LINE(sz)	(((sz) + RTE_CACHE_LINE_MASK) >> 7)
107 #define FPA_CACHE_LINE_2_OBJSZ(sz)	((sz) << 7)
108 
109 #define POOL_ENA			(0x1 << 0)
110 #define POOL_DIS			(0x0 << 0)
111 #define POOL_SET_NAT_ALIGN		(0x1 << 1)
112 #define POOL_DIS_NAT_ALIGN		(0x0 << 1)
113 #define POOL_STYPE(x)			(((x) & 0x1) << 2)
114 #define POOL_LTYPE(x)			(((x) & 0x3) << 3)
115 #define POOL_BUF_OFFSET(x)		(((x) & 0x7fffULL) << 16)
116 #define POOL_BUF_SIZE(x)		(((x) & 0x7ffULL) << 32)
117 
118 struct fpavf_res {
119 	void		*pool_stack_base;
120 	void		*bar0;
121 	uint64_t	stack_ln_ptr;
122 	uint16_t	domain_id;
123 	uint16_t	vf_id;	/* gpool_id */
124 	uint16_t	sz128;	/* Block size in cache lines */
125 	bool		is_inuse;
126 };
127 
128 struct octeontx_fpadev {
129 	rte_spinlock_t lock;
130 	uint8_t	total_gpool_cnt;
131 	struct fpavf_res pool[FPA_VF_MAX];
132 };
133 
134 static struct octeontx_fpadev fpadev;
135 
136 /* lock is taken by caller */
137 static int
138 octeontx_fpa_gpool_alloc(unsigned int object_size)
139 {
140 	struct fpavf_res *res = NULL;
141 	uint16_t gpool;
142 	unsigned int sz128;
143 
144 	sz128 = FPA_OBJSZ_2_CACHE_LINE(object_size);
145 
146 	for (gpool = 0; gpool < FPA_VF_MAX; gpool++) {
147 
148 		/* Skip VF that is not mapped Or _inuse */
149 		if ((fpadev.pool[gpool].bar0 == NULL) ||
150 		    (fpadev.pool[gpool].is_inuse == true))
151 			continue;
152 
153 		res = &fpadev.pool[gpool];
154 
155 		RTE_ASSERT(res->domain_id != (uint16_t)~0);
156 		RTE_ASSERT(res->vf_id != (uint16_t)~0);
157 		RTE_ASSERT(res->stack_ln_ptr != 0);
158 
159 		if (res->sz128 == 0) {
160 			res->sz128 = sz128;
161 
162 			fpavf_log_dbg("gpool %d blk_sz %d\n", gpool, sz128);
163 			return gpool;
164 		}
165 	}
166 
167 	return -ENOSPC;
168 }
169 
170 /* lock is taken by caller */
171 static __rte_always_inline uintptr_t
172 octeontx_fpa_gpool2handle(uint16_t gpool)
173 {
174 	struct fpavf_res *res = NULL;
175 
176 	RTE_ASSERT(gpool < FPA_VF_MAX);
177 
178 	res = &fpadev.pool[gpool];
179 	if (unlikely(res == NULL))
180 		return 0;
181 
182 	return (uintptr_t)res->bar0 | gpool;
183 }
184 
185 static __rte_always_inline bool
186 octeontx_fpa_handle_valid(uintptr_t handle)
187 {
188 	struct fpavf_res *res = NULL;
189 	uint8_t gpool;
190 	int i;
191 	bool ret = false;
192 
193 	if (unlikely(!handle))
194 		return ret;
195 
196 	/* get the gpool */
197 	gpool = octeontx_fpa_bufpool_gpool(handle);
198 
199 	/* get the bar address */
200 	handle &= ~(uint64_t)FPA_GPOOL_MASK;
201 	for (i = 0; i < FPA_VF_MAX; i++) {
202 		if ((uintptr_t)fpadev.pool[i].bar0 != handle)
203 			continue;
204 
205 		/* validate gpool */
206 		if (gpool != i)
207 			return false;
208 
209 		res = &fpadev.pool[i];
210 
211 		if (res->sz128 == 0 || res->domain_id == (uint16_t)~0 ||
212 		    res->stack_ln_ptr == 0)
213 			ret = false;
214 		else
215 			ret = true;
216 		break;
217 	}
218 
219 	return ret;
220 }
221 
222 static int
223 octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size,
224 			  signed short buf_offset, unsigned int max_buf_count)
225 {
226 	void *memptr = NULL;
227 	phys_addr_t phys_addr;
228 	unsigned int memsz;
229 	struct fpavf_res *fpa = NULL;
230 	uint64_t reg;
231 	struct octeontx_mbox_hdr hdr;
232 	struct dcfg_resp resp;
233 	struct octeontx_mbox_fpa_cfg cfg;
234 	int ret = -1;
235 
236 	fpa = &fpadev.pool[gpool];
237 	memsz = FPA_ROUND_UP(max_buf_count / fpa->stack_ln_ptr, FPA_LN_SIZE) *
238 			FPA_LN_SIZE;
239 
240 	/* Round-up to page size */
241 	memsz = (memsz + FPA_PF_PAGE_SZ - 1) & ~(uintptr_t)(FPA_PF_PAGE_SZ-1);
242 	memptr = rte_malloc(NULL, memsz, RTE_CACHE_LINE_SIZE);
243 	if (memptr == NULL) {
244 		ret = -ENOMEM;
245 		goto err;
246 	}
247 
248 	/* Configure stack */
249 	fpa->pool_stack_base = memptr;
250 	phys_addr = rte_malloc_virt2phy(memptr);
251 
252 	buf_size /= FPA_LN_SIZE;
253 
254 	/* POOL setup */
255 	hdr.coproc = FPA_COPROC;
256 	hdr.msg = FPA_CONFIGSET;
257 	hdr.vfid = fpa->vf_id;
258 	hdr.res_code = 0;
259 
260 	buf_offset /= FPA_LN_SIZE;
261 	reg = POOL_BUF_SIZE(buf_size) | POOL_BUF_OFFSET(buf_offset) |
262 		POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN |
263 		POOL_ENA;
264 
265 	cfg.aid = 0;
266 	cfg.pool_cfg = reg;
267 	cfg.pool_stack_base = phys_addr;
268 	cfg.pool_stack_end = phys_addr + memsz;
269 	cfg.aura_cfg = (1 << 9);
270 
271 	ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
272 					sizeof(struct octeontx_mbox_fpa_cfg),
273 					&resp, sizeof(resp));
274 	if (ret < 0) {
275 		ret = -EACCES;
276 		goto err;
277 	}
278 
279 	fpavf_log_dbg(" vfid %d gpool %d aid %d pool_cfg 0x%x pool_stack_base %" PRIx64 " pool_stack_end %" PRIx64" aura_cfg %" PRIx64 "\n",
280 		      fpa->vf_id, gpool, cfg.aid, (unsigned int)cfg.pool_cfg,
281 		      cfg.pool_stack_base, cfg.pool_stack_end, cfg.aura_cfg);
282 
283 	/* Now pool is in_use */
284 	fpa->is_inuse = true;
285 
286 err:
287 	if (ret < 0)
288 		rte_free(memptr);
289 
290 	return ret;
291 }
292 
293 static int
294 octeontx_fpapf_pool_destroy(unsigned int gpool_index)
295 {
296 	struct octeontx_mbox_hdr hdr;
297 	struct dcfg_resp resp;
298 	struct octeontx_mbox_fpa_cfg cfg;
299 	struct fpavf_res *fpa = NULL;
300 	int ret = -1;
301 
302 	fpa = &fpadev.pool[gpool_index];
303 
304 	hdr.coproc = FPA_COPROC;
305 	hdr.msg = FPA_CONFIGSET;
306 	hdr.vfid = fpa->vf_id;
307 	hdr.res_code = 0;
308 
309 	/* reset and free the pool */
310 	cfg.aid = 0;
311 	cfg.pool_cfg = 0;
312 	cfg.pool_stack_base = 0;
313 	cfg.pool_stack_end = 0;
314 	cfg.aura_cfg = 0;
315 
316 	ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
317 					sizeof(struct octeontx_mbox_fpa_cfg),
318 					&resp, sizeof(resp));
319 	if (ret < 0) {
320 		ret = -EACCES;
321 		goto err;
322 	}
323 
324 	ret = 0;
325 err:
326 	/* anycase free pool stack memory */
327 	rte_free(fpa->pool_stack_base);
328 	fpa->pool_stack_base = NULL;
329 	return ret;
330 }
331 
332 static int
333 octeontx_fpapf_aura_attach(unsigned int gpool_index)
334 {
335 	struct octeontx_mbox_hdr hdr;
336 	struct dcfg_resp resp;
337 	struct octeontx_mbox_fpa_cfg cfg;
338 	int ret = 0;
339 
340 	if (gpool_index >= FPA_MAX_POOL) {
341 		ret = -EINVAL;
342 		goto err;
343 	}
344 	hdr.coproc = FPA_COPROC;
345 	hdr.msg = FPA_ATTACHAURA;
346 	hdr.vfid = gpool_index;
347 	hdr.res_code = 0;
348 	memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg));
349 	cfg.aid = gpool_index; /* gpool is guara */
350 
351 	ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
352 					sizeof(struct octeontx_mbox_fpa_cfg),
353 					&resp, sizeof(resp));
354 	if (ret < 0) {
355 		fpavf_log_err("Could not attach fpa ");
356 		fpavf_log_err("aura %d to pool %d. Err=%d. FuncErr=%d\n",
357 			      gpool_index, gpool_index, ret, hdr.res_code);
358 		ret = -EACCES;
359 		goto err;
360 	}
361 err:
362 	return ret;
363 }
364 
365 static int
366 octeontx_fpapf_aura_detach(unsigned int gpool_index)
367 {
368 	struct octeontx_mbox_fpa_cfg cfg = {0};
369 	struct octeontx_mbox_hdr hdr = {0};
370 	int ret = 0;
371 
372 	if (gpool_index >= FPA_MAX_POOL) {
373 		ret = -EINVAL;
374 		goto err;
375 	}
376 
377 	cfg.aid = gpool_index; /* gpool is gaura */
378 	hdr.coproc = FPA_COPROC;
379 	hdr.msg = FPA_DETACHAURA;
380 	hdr.vfid = gpool_index;
381 	ret = octeontx_ssovf_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0);
382 	if (ret < 0) {
383 		fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d\n",
384 			      gpool_index, ret, hdr.res_code);
385 		ret = -EINVAL;
386 	}
387 
388 err:
389 	return ret;
390 }
391 
392 static int
393 octeontx_fpavf_pool_setup(uintptr_t handle, unsigned long memsz,
394 			  void *memva, uint16_t gpool)
395 {
396 	uint64_t va_end;
397 
398 	if (unlikely(!handle))
399 		return -ENODEV;
400 
401 	va_end = (uintptr_t)memva + memsz;
402 	va_end &= ~RTE_CACHE_LINE_MASK;
403 
404 	/* VHPOOL setup */
405 	fpavf_write64((uintptr_t)memva,
406 			 (void *)((uintptr_t)handle +
407 			 FPA_VF_VHPOOL_START_ADDR(gpool)));
408 	fpavf_write64(va_end,
409 			 (void *)((uintptr_t)handle +
410 			 FPA_VF_VHPOOL_END_ADDR(gpool)));
411 	return 0;
412 }
413 
414 static int
415 octeontx_fpapf_start_count(uint16_t gpool_index)
416 {
417 	int ret = 0;
418 	struct octeontx_mbox_hdr hdr = {0};
419 
420 	if (gpool_index >= FPA_MAX_POOL) {
421 		ret = -EINVAL;
422 		goto err;
423 	}
424 
425 	hdr.coproc = FPA_COPROC;
426 	hdr.msg = FPA_START_COUNT;
427 	hdr.vfid = gpool_index;
428 	ret = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
429 	if (ret < 0) {
430 		fpavf_log_err("Could not start buffer counting for ");
431 		fpavf_log_err("FPA pool %d. Err=%d. FuncErr=%d\n",
432 			      gpool_index, ret, hdr.res_code);
433 		ret = -EINVAL;
434 		goto err;
435 	}
436 
437 err:
438 	return ret;
439 }
440 
441 static __rte_always_inline int
442 octeontx_fpavf_free(unsigned int gpool)
443 {
444 	int ret = 0;
445 
446 	if (gpool >= FPA_MAX_POOL) {
447 		ret = -EINVAL;
448 		goto err;
449 	}
450 
451 	/* Pool is free */
452 	fpadev.pool[gpool].is_inuse = false;
453 
454 err:
455 	return ret;
456 }
457 
458 static __rte_always_inline int
459 octeontx_gpool_free(uint16_t gpool)
460 {
461 	if (fpadev.pool[gpool].sz128 != 0) {
462 		fpadev.pool[gpool].sz128 = 0;
463 		return 0;
464 	}
465 	return -EINVAL;
466 }
467 
468 /*
469  * Return buffer size for a given pool
470  */
471 int
472 octeontx_fpa_bufpool_block_size(uintptr_t handle)
473 {
474 	struct fpavf_res *res = NULL;
475 	uint8_t gpool;
476 
477 	if (unlikely(!octeontx_fpa_handle_valid(handle)))
478 		return -EINVAL;
479 
480 	/* get the gpool */
481 	gpool = octeontx_fpa_bufpool_gpool(handle);
482 	res = &fpadev.pool[gpool];
483 	return FPA_CACHE_LINE_2_OBJSZ(res->sz128);
484 }
485 
486 int
487 octeontx_fpa_bufpool_free_count(uintptr_t handle)
488 {
489 	uint64_t cnt, limit, avail;
490 	uint8_t gpool;
491 	uintptr_t pool_bar;
492 
493 	if (unlikely(!octeontx_fpa_handle_valid(handle)))
494 		return -EINVAL;
495 
496 	/* get the gpool */
497 	gpool = octeontx_fpa_bufpool_gpool(handle);
498 
499 	/* Get pool bar address from handle */
500 	pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
501 
502 	cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
503 				FPA_VF_VHAURA_CNT(gpool)));
504 	limit = fpavf_read64((void *)((uintptr_t)pool_bar +
505 				FPA_VF_VHAURA_CNT_LIMIT(gpool)));
506 
507 	avail = fpavf_read64((void *)((uintptr_t)pool_bar +
508 				FPA_VF_VHPOOL_AVAILABLE(gpool)));
509 
510 	return RTE_MIN(avail, (limit - cnt));
511 }
512 
513 uintptr_t
514 octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
515 				unsigned int buf_offset, char **va_start,
516 				int node_id)
517 {
518 	unsigned int gpool;
519 	void *memva;
520 	unsigned long memsz;
521 	uintptr_t gpool_handle;
522 	uintptr_t pool_bar;
523 	int res;
524 
525 	RTE_SET_USED(node_id);
526 	FPAVF_STATIC_ASSERTION(sizeof(struct rte_mbuf) <=
527 				OCTEONTX_FPAVF_BUF_OFFSET);
528 
529 	if (unlikely(*va_start == NULL))
530 		goto error_end;
531 
532 	object_size = RTE_CACHE_LINE_ROUNDUP(object_size);
533 	if (object_size > FPA_MAX_OBJ_SIZE) {
534 		errno = EINVAL;
535 		goto error_end;
536 	}
537 
538 	rte_spinlock_lock(&fpadev.lock);
539 	res = octeontx_fpa_gpool_alloc(object_size);
540 
541 	/* Bail if failed */
542 	if (unlikely(res < 0)) {
543 		errno = res;
544 		goto error_unlock;
545 	}
546 
547 	/* get fpavf */
548 	gpool = res;
549 
550 	/* get pool handle */
551 	gpool_handle = octeontx_fpa_gpool2handle(gpool);
552 	if (!octeontx_fpa_handle_valid(gpool_handle)) {
553 		errno = ENOSPC;
554 		goto error_gpool_free;
555 	}
556 
557 	/* Get pool bar address from handle */
558 	pool_bar = gpool_handle & ~(uint64_t)FPA_GPOOL_MASK;
559 
560 	res = octeontx_fpapf_pool_setup(gpool, object_size, buf_offset,
561 					object_count);
562 	if (res < 0) {
563 		errno = res;
564 		goto error_gpool_free;
565 	}
566 
567 	/* populate AURA fields */
568 	res = octeontx_fpapf_aura_attach(gpool);
569 	if (res < 0) {
570 		errno = res;
571 		goto error_pool_destroy;
572 	}
573 
574 	/* vf pool setup */
575 	memsz = object_size * object_count;
576 	memva = *va_start;
577 	res = octeontx_fpavf_pool_setup(pool_bar, memsz, memva, gpool);
578 	if (res < 0) {
579 		errno = res;
580 		goto error_gaura_detach;
581 	}
582 
583 	/* Release lock */
584 	rte_spinlock_unlock(&fpadev.lock);
585 
586 	/* populate AURA registers */
587 	fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
588 			 FPA_VF_VHAURA_CNT(gpool)));
589 	fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
590 			 FPA_VF_VHAURA_CNT_LIMIT(gpool)));
591 	fpavf_write64(object_count + 1, (void *)((uintptr_t)pool_bar +
592 			 FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
593 
594 	octeontx_fpapf_start_count(gpool);
595 
596 	return gpool_handle;
597 
598 error_gaura_detach:
599 	(void) octeontx_fpapf_aura_detach(gpool);
600 error_pool_destroy:
601 	octeontx_fpavf_free(gpool);
602 	octeontx_fpapf_pool_destroy(gpool);
603 error_gpool_free:
604 	octeontx_gpool_free(gpool);
605 error_unlock:
606 	rte_spinlock_unlock(&fpadev.lock);
607 error_end:
608 	return (uintptr_t)NULL;
609 }
610 
611 /*
612  * Destroy a buffer pool.
613  */
614 int
615 octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
616 {
617 	void **node, **curr, *head = NULL;
618 	uint64_t sz;
619 	uint64_t cnt, avail;
620 	uint8_t gpool;
621 	uintptr_t pool_bar;
622 	int ret;
623 
624 	RTE_SET_USED(node_id);
625 
626 	/* Wait for all outstanding writes to be committed */
627 	rte_smp_wmb();
628 
629 	if (unlikely(!octeontx_fpa_handle_valid(handle)))
630 		return -EINVAL;
631 
632 	/* get the pool */
633 	gpool = octeontx_fpa_bufpool_gpool(handle);
634 
635 	/* Get pool bar address from handle */
636 	pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
637 
638 	 /* Check for no outstanding buffers */
639 	cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
640 					FPA_VF_VHAURA_CNT(gpool)));
641 	if (cnt) {
642 		fpavf_log_dbg("buffer exist in pool cnt %ld\n", cnt);
643 		return -EBUSY;
644 	}
645 
646 	rte_spinlock_lock(&fpadev.lock);
647 
648 	avail = fpavf_read64((void *)((uintptr_t)pool_bar +
649 				FPA_VF_VHPOOL_AVAILABLE(gpool)));
650 
651 	/* Prepare to empty the entire POOL */
652 	fpavf_write64(avail, (void *)((uintptr_t)pool_bar +
653 			 FPA_VF_VHAURA_CNT_LIMIT(gpool)));
654 	fpavf_write64(avail + 1, (void *)((uintptr_t)pool_bar +
655 			 FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
656 
657 	/* Empty the pool */
658 	/* Invalidate the POOL */
659 	octeontx_gpool_free(gpool);
660 
661 	/* Process all buffers in the pool */
662 	while (avail--) {
663 
664 		/* Yank a buffer from the pool */
665 		node = (void *)(uintptr_t)
666 			fpavf_read64((void *)
667 				    (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gpool)));
668 
669 		if (node == NULL) {
670 			fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf\n",
671 				      gpool, avail);
672 			break;
673 		}
674 
675 		/* Imsert it into an ordered linked list */
676 		for (curr = &head; curr[0] != NULL; curr = curr[0]) {
677 			if ((uintptr_t)node <= (uintptr_t)curr[0])
678 				break;
679 		}
680 		node[0] = curr[0];
681 		curr[0] = node;
682 	}
683 
684 	/* Verify the linked list to be a perfect series */
685 	sz = octeontx_fpa_bufpool_block_size(handle) << 7;
686 	for (curr = head; curr != NULL && curr[0] != NULL;
687 		curr = curr[0]) {
688 		if (curr == curr[0] ||
689 			((uintptr_t)curr != ((uintptr_t)curr[0] - sz))) {
690 			fpavf_log_err("POOL# %u buf sequence err (%p vs. %p)\n",
691 				      gpool, curr, curr[0]);
692 		}
693 	}
694 
695 	/* Disable pool operation */
696 	fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar +
697 			 FPA_VF_VHPOOL_START_ADDR(gpool)));
698 	fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar +
699 			FPA_VF_VHPOOL_END_ADDR(gpool)));
700 
701 	(void)octeontx_fpapf_pool_destroy(gpool);
702 
703 	/* Deactivate the AURA */
704 	fpavf_write64(0, (void *)((uintptr_t)pool_bar +
705 			FPA_VF_VHAURA_CNT_LIMIT(gpool)));
706 	fpavf_write64(0, (void *)((uintptr_t)pool_bar +
707 			FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
708 
709 	ret = octeontx_fpapf_aura_detach(gpool);
710 	if (ret) {
711 		fpavf_log_err("Failed to dettach gaura %u. error code=%d\n",
712 			      gpool, ret);
713 	}
714 
715 	/* Free VF */
716 	(void)octeontx_fpavf_free(gpool);
717 
718 	rte_spinlock_unlock(&fpadev.lock);
719 	return 0;
720 }
721 
722 static void
723 octeontx_fpavf_setup(void)
724 {
725 	uint8_t i;
726 	static bool init_once;
727 
728 	if (!init_once) {
729 		rte_spinlock_init(&fpadev.lock);
730 		fpadev.total_gpool_cnt = 0;
731 
732 		for (i = 0; i < FPA_VF_MAX; i++) {
733 
734 			fpadev.pool[i].domain_id = ~0;
735 			fpadev.pool[i].stack_ln_ptr = 0;
736 			fpadev.pool[i].sz128 = 0;
737 			fpadev.pool[i].bar0 = NULL;
738 			fpadev.pool[i].pool_stack_base = NULL;
739 			fpadev.pool[i].is_inuse = false;
740 		}
741 		init_once = 1;
742 	}
743 }
744 
745 static int
746 octeontx_fpavf_identify(void *bar0)
747 {
748 	uint64_t val;
749 	uint16_t domain_id;
750 	uint16_t vf_id;
751 	uint64_t stack_ln_ptr;
752 
753 	val = fpavf_read64((void *)((uintptr_t)bar0 +
754 				FPA_VF_VHAURA_CNT_THRESHOLD(0)));
755 
756 	domain_id = (val >> 8) & 0xffff;
757 	vf_id = (val >> 24) & 0xffff;
758 
759 	stack_ln_ptr = fpavf_read64((void *)((uintptr_t)bar0 +
760 					FPA_VF_VHPOOL_THRESHOLD(0)));
761 	if (vf_id >= FPA_VF_MAX) {
762 		fpavf_log_err("vf_id(%d) greater than max vf (32)\n", vf_id);
763 		return -1;
764 	}
765 
766 	if (fpadev.pool[vf_id].is_inuse) {
767 		fpavf_log_err("vf_id %d is_inuse\n", vf_id);
768 		return -1;
769 	}
770 
771 	fpadev.pool[vf_id].domain_id = domain_id;
772 	fpadev.pool[vf_id].vf_id = vf_id;
773 	fpadev.pool[vf_id].bar0 = bar0;
774 	fpadev.pool[vf_id].stack_ln_ptr = stack_ln_ptr;
775 
776 	/* SUCCESS */
777 	return vf_id;
778 }
779 
780 /* FPAVF pcie device aka mempool probe */
781 static int
782 fpavf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
783 {
784 	uint8_t *idreg;
785 	int res;
786 	struct fpavf_res *fpa;
787 
788 	RTE_SET_USED(pci_drv);
789 	RTE_SET_USED(fpa);
790 
791 	/* For secondary processes, the primary has done all the work */
792 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
793 		return 0;
794 
795 	if (pci_dev->mem_resource[0].addr == NULL) {
796 		fpavf_log_err("Empty bars %p ", pci_dev->mem_resource[0].addr);
797 		return -ENODEV;
798 	}
799 	idreg = pci_dev->mem_resource[0].addr;
800 
801 	octeontx_fpavf_setup();
802 
803 	res = octeontx_fpavf_identify(idreg);
804 	if (res < 0)
805 		return -1;
806 
807 	fpa = &fpadev.pool[res];
808 	fpadev.total_gpool_cnt++;
809 	rte_wmb();
810 
811 	fpavf_log_dbg("total_fpavfs %d bar0 %p domain %d vf %d stk_ln_ptr 0x%x",
812 		       fpadev.total_gpool_cnt, fpa->bar0, fpa->domain_id,
813 		       fpa->vf_id, (unsigned int)fpa->stack_ln_ptr);
814 
815 	return 0;
816 }
817 
818 static const struct rte_pci_id pci_fpavf_map[] = {
819 	{
820 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
821 				PCI_DEVICE_ID_OCTEONTX_FPA_VF)
822 	},
823 	{
824 		.vendor_id = 0,
825 	},
826 };
827 
828 static struct rte_pci_driver pci_fpavf = {
829 	.id_table = pci_fpavf_map,
830 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
831 	.probe = fpavf_probe,
832 };
833 
834 RTE_PMD_REGISTER_PCI(octeontx_fpavf, pci_fpavf);
835