1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
3 */
4
5 #ifndef __OCTEONTX_FPAVF_H__
6 #define __OCTEONTX_FPAVF_H__
7
8 #include <rte_io.h>
9 #include "octeontx_pool_logs.h"
10
11 /* fpa pool Vendor ID and Device ID */
12 #define PCI_VENDOR_ID_CAVIUM 0x177D
13 #define PCI_DEVICE_ID_OCTEONTX_FPA_VF 0xA053
14
15 #define FPA_VF_MAX 32
16 #define FPA_GPOOL_MASK (FPA_VF_MAX-1)
17 #define FPA_GAURA_SHIFT 4
18
19 /* FPA VF register offsets */
20 #define FPA_VF_INT(x) (0x200ULL | ((x) << 22))
21 #define FPA_VF_INT_W1S(x) (0x210ULL | ((x) << 22))
22 #define FPA_VF_INT_ENA_W1S(x) (0x220ULL | ((x) << 22))
23 #define FPA_VF_INT_ENA_W1C(x) (0x230ULL | ((x) << 22))
24
25 #define FPA_VF_VHPOOL_AVAILABLE(vhpool) (0x04150 | ((vhpool)&0x0))
26 #define FPA_VF_VHPOOL_THRESHOLD(vhpool) (0x04160 | ((vhpool)&0x0))
27 #define FPA_VF_VHPOOL_START_ADDR(vhpool) (0x04200 | ((vhpool)&0x0))
28 #define FPA_VF_VHPOOL_END_ADDR(vhpool) (0x04210 | ((vhpool)&0x0))
29
30 #define FPA_VF_VHAURA_CNT(vaura) (0x20120 | ((vaura)&0xf)<<18)
31 #define FPA_VF_VHAURA_CNT_ADD(vaura) (0x20128 | ((vaura)&0xf)<<18)
32 #define FPA_VF_VHAURA_CNT_LIMIT(vaura) (0x20130 | ((vaura)&0xf)<<18)
33 #define FPA_VF_VHAURA_CNT_THRESHOLD(vaura) (0x20140 | ((vaura)&0xf)<<18)
34 #define FPA_VF_VHAURA_OP_ALLOC(vaura) (0x30000 | ((vaura)&0xf)<<18)
35 #define FPA_VF_VHAURA_OP_FREE(vaura) (0x38000 | ((vaura)&0xf)<<18)
36
37 #define FPA_VF_FREE_ADDRS_S(x, y, z) \
38 ((x) | (((y) & 0x1ff) << 3) | ((((z) & 1)) << 14))
39
40 #define FPA_AURA_IDX(gpool) (gpool << FPA_GAURA_SHIFT)
41 /* FPA VF register offsets from VF_BAR4, size 2 MByte */
42 #define FPA_VF_MSIX_VEC_ADDR 0x00000
43 #define FPA_VF_MSIX_VEC_CTL 0x00008
44 #define FPA_VF_MSIX_PBA 0xF0000
45
46 #define FPA_VF0_APERTURE_SHIFT 22
47 #define FPA_AURA_SET_SIZE 16
48
49 #define FPA_MAX_OBJ_SIZE (128 * 1024)
50 #define OCTEONTX_FPAVF_BUF_OFFSET 128
51
52 /*
53 * In Cavium OCTEON TX SoC, all accesses to the device registers are
54 * implicitly strongly ordered. So, the relaxed version of IO operation is
55 * safe to use with out any IO memory barriers.
56 */
57 #define fpavf_read64 rte_read64_relaxed
58 #define fpavf_write64 rte_write64_relaxed
59
60 /* ARM64 specific functions */
61 #if defined(RTE_ARCH_ARM64)
62 #define fpavf_load_pair(val0, val1, addr) __extension__ ({ \
63 asm volatile( \
64 "ldp %x[x0], %x[x1], [%x[p1]]" \
65 :[x0]"=r"(val0), [x1]"=r"(val1) \
66 :[p1]"r"(addr) \
67 ); })
68
69 #define fpavf_store_pair(val0, val1, addr) __extension__ ({ \
70 asm volatile( \
71 "stp %x[x0], %x[x1], [%x[p1]]" \
72 ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
73 ); })
74 #else /* Un optimized functions for building on non arm64 arch */
75
76 #define fpavf_load_pair(val0, val1, addr) \
77 do { \
78 val0 = rte_read64(addr); \
79 val1 = rte_read64(((uint8_t *)addr) + 8); \
80 } while (0)
81
82 #define fpavf_store_pair(val0, val1, addr) \
83 do { \
84 rte_write64(val0, addr); \
85 rte_write64(val1, (((uint8_t *)addr) + 8)); \
86 } while (0)
87 #endif
88
89 uintptr_t
90 octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
91 unsigned int buf_offset, int node);
92 int
93 octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz,
94 void *memva, uint16_t gpool);
95 int
96 octeontx_fpa_bufpool_destroy(uintptr_t handle, int node);
97 int
98 octeontx_fpa_bufpool_block_size(uintptr_t handle);
99 int
100 octeontx_fpa_bufpool_free_count(uintptr_t handle);
101
102 static __rte_always_inline uint8_t
octeontx_fpa_bufpool_gpool(uintptr_t handle)103 octeontx_fpa_bufpool_gpool(uintptr_t handle)
104 {
105 return (uint8_t)handle & FPA_GPOOL_MASK;
106 }
107
108 static __rte_always_inline uint16_t
octeontx_fpa_bufpool_gaura(uintptr_t handle)109 octeontx_fpa_bufpool_gaura(uintptr_t handle)
110 {
111 return octeontx_fpa_bufpool_gpool(handle) << FPA_GAURA_SHIFT;
112 }
113
114 #endif /* __OCTEONTX_FPAVF_H__ */
115