1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #ifndef __OCTEONTX_FPAVF_H__ 6 #define __OCTEONTX_FPAVF_H__ 7 8 #include <rte_io.h> 9 #include "octeontx_pool_logs.h" 10 11 /* fpa pool Vendor ID and Device ID */ 12 #define PCI_VENDOR_ID_CAVIUM 0x177D 13 #define PCI_DEVICE_ID_OCTEONTX_FPA_VF 0xA053 14 15 #define FPA_VF_MAX 32 16 #define FPA_GPOOL_MASK (FPA_VF_MAX-1) 17 18 /* FPA VF register offsets */ 19 #define FPA_VF_INT(x) (0x200ULL | ((x) << 22)) 20 #define FPA_VF_INT_W1S(x) (0x210ULL | ((x) << 22)) 21 #define FPA_VF_INT_ENA_W1S(x) (0x220ULL | ((x) << 22)) 22 #define FPA_VF_INT_ENA_W1C(x) (0x230ULL | ((x) << 22)) 23 24 #define FPA_VF_VHPOOL_AVAILABLE(vhpool) (0x04150 | ((vhpool)&0x0)) 25 #define FPA_VF_VHPOOL_THRESHOLD(vhpool) (0x04160 | ((vhpool)&0x0)) 26 #define FPA_VF_VHPOOL_START_ADDR(vhpool) (0x04200 | ((vhpool)&0x0)) 27 #define FPA_VF_VHPOOL_END_ADDR(vhpool) (0x04210 | ((vhpool)&0x0)) 28 29 #define FPA_VF_VHAURA_CNT(vaura) (0x20120 | ((vaura)&0xf)<<18) 30 #define FPA_VF_VHAURA_CNT_ADD(vaura) (0x20128 | ((vaura)&0xf)<<18) 31 #define FPA_VF_VHAURA_CNT_LIMIT(vaura) (0x20130 | ((vaura)&0xf)<<18) 32 #define FPA_VF_VHAURA_CNT_THRESHOLD(vaura) (0x20140 | ((vaura)&0xf)<<18) 33 #define FPA_VF_VHAURA_OP_ALLOC(vaura) (0x30000 | ((vaura)&0xf)<<18) 34 #define FPA_VF_VHAURA_OP_FREE(vaura) (0x38000 | ((vaura)&0xf)<<18) 35 36 #define FPA_VF_FREE_ADDRS_S(x, y, z) \ 37 ((x) | (((y) & 0x1ff) << 3) | ((((z) & 1)) << 14)) 38 39 /* FPA VF register offsets from VF_BAR4, size 2 MByte */ 40 #define FPA_VF_MSIX_VEC_ADDR 0x00000 41 #define FPA_VF_MSIX_VEC_CTL 0x00008 42 #define FPA_VF_MSIX_PBA 0xF0000 43 44 #define FPA_VF0_APERTURE_SHIFT 22 45 #define FPA_AURA_SET_SIZE 16 46 47 #define FPA_MAX_OBJ_SIZE (128 * 1024) 48 #define OCTEONTX_FPAVF_BUF_OFFSET 128 49 50 /* 51 * In Cavium OcteonTX SoC, all accesses to the device registers are 52 * implicitly strongly ordered. So, the relaxed version of IO operation is 53 * safe to use with out any IO memory barriers. 54 */ 55 #define fpavf_read64 rte_read64_relaxed 56 #define fpavf_write64 rte_write64_relaxed 57 58 /* ARM64 specific functions */ 59 #if defined(RTE_ARCH_ARM64) 60 #define fpavf_load_pair(val0, val1, addr) ({ \ 61 asm volatile( \ 62 "ldp %x[x0], %x[x1], [%x[p1]]" \ 63 :[x0]"=r"(val0), [x1]"=r"(val1) \ 64 :[p1]"r"(addr) \ 65 ); }) 66 67 #define fpavf_store_pair(val0, val1, addr) ({ \ 68 asm volatile( \ 69 "stp %x[x0], %x[x1], [%x[p1]]" \ 70 ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \ 71 ); }) 72 #else /* Un optimized functions for building on non arm64 arch */ 73 74 #define fpavf_load_pair(val0, val1, addr) \ 75 do { \ 76 val0 = rte_read64(addr); \ 77 val1 = rte_read64(((uint8_t *)addr) + 8); \ 78 } while (0) 79 80 #define fpavf_store_pair(val0, val1, addr) \ 81 do { \ 82 rte_write64(val0, addr); \ 83 rte_write64(val1, (((uint8_t *)addr) + 8)); \ 84 } while (0) 85 #endif 86 87 uintptr_t 88 octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count, 89 unsigned int buf_offset, int node); 90 int 91 octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz, 92 void *memva, uint16_t gpool); 93 int 94 octeontx_fpa_bufpool_destroy(uintptr_t handle, int node); 95 int 96 octeontx_fpa_bufpool_block_size(uintptr_t handle); 97 int 98 octeontx_fpa_bufpool_free_count(uintptr_t handle); 99 100 static __rte_always_inline uint8_t 101 octeontx_fpa_bufpool_gpool(uintptr_t handle) 102 { 103 return (uint8_t)handle & FPA_GPOOL_MASK; 104 } 105 #endif /* __OCTEONTX_FPAVF_H__ */ 106