1*f0c7bb1bSJerin Jacob /* 2*f0c7bb1bSJerin Jacob * BSD LICENSE 3*f0c7bb1bSJerin Jacob * 4*f0c7bb1bSJerin Jacob * Copyright (C) Cavium Inc. 2017. All rights reserved. 5*f0c7bb1bSJerin Jacob * 6*f0c7bb1bSJerin Jacob * Redistribution and use in source and binary forms, with or without 7*f0c7bb1bSJerin Jacob * modification, are permitted provided that the following conditions 8*f0c7bb1bSJerin Jacob * are met: 9*f0c7bb1bSJerin Jacob * 10*f0c7bb1bSJerin Jacob * * Redistributions of source code must retain the above copyright 11*f0c7bb1bSJerin Jacob * notice, this list of conditions and the following disclaimer. 12*f0c7bb1bSJerin Jacob * * Redistributions in binary form must reproduce the above copyright 13*f0c7bb1bSJerin Jacob * notice, this list of conditions and the following disclaimer in 14*f0c7bb1bSJerin Jacob * the documentation and/or other materials provided with the 15*f0c7bb1bSJerin Jacob * distribution. 16*f0c7bb1bSJerin Jacob * * Neither the name of Cavium networks nor the names of its 17*f0c7bb1bSJerin Jacob * contributors may be used to endorse or promote products derived 18*f0c7bb1bSJerin Jacob * from this software without specific prior written permission. 19*f0c7bb1bSJerin Jacob * 20*f0c7bb1bSJerin Jacob * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21*f0c7bb1bSJerin Jacob * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22*f0c7bb1bSJerin Jacob * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23*f0c7bb1bSJerin Jacob * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24*f0c7bb1bSJerin Jacob * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25*f0c7bb1bSJerin Jacob * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26*f0c7bb1bSJerin Jacob * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27*f0c7bb1bSJerin Jacob * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28*f0c7bb1bSJerin Jacob * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29*f0c7bb1bSJerin Jacob * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30*f0c7bb1bSJerin Jacob * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31*f0c7bb1bSJerin Jacob */ 32*f0c7bb1bSJerin Jacob 33*f0c7bb1bSJerin Jacob #ifndef __OCTEONTX_IO_H__ 34*f0c7bb1bSJerin Jacob #define __OCTEONTX_IO_H__ 35*f0c7bb1bSJerin Jacob 36*f0c7bb1bSJerin Jacob #include <stddef.h> 37*f0c7bb1bSJerin Jacob #include <stdint.h> 38*f0c7bb1bSJerin Jacob 39*f0c7bb1bSJerin Jacob #include <rte_io.h> 40*f0c7bb1bSJerin Jacob 41*f0c7bb1bSJerin Jacob /* In Cavium OcteonTX SoC, all accesses to the device registers are 42*f0c7bb1bSJerin Jacob * implicitly strongly ordered. So, The relaxed version of IO operation is 43*f0c7bb1bSJerin Jacob * safe to use with out any IO memory barriers. 44*f0c7bb1bSJerin Jacob */ 45*f0c7bb1bSJerin Jacob #define octeontx_read64 rte_read64_relaxed 46*f0c7bb1bSJerin Jacob #define octeontx_write64 rte_write64_relaxed 47*f0c7bb1bSJerin Jacob 48*f0c7bb1bSJerin Jacob /* ARM64 specific functions */ 49*f0c7bb1bSJerin Jacob #if defined(RTE_ARCH_ARM64) 50*f0c7bb1bSJerin Jacob #define octeontx_prefetch_store_keep(_ptr) ({\ 51*f0c7bb1bSJerin Jacob asm volatile("prfm pstl1keep, %a0\n" : : "p" (_ptr)); }) 52*f0c7bb1bSJerin Jacob 53*f0c7bb1bSJerin Jacob #define octeontx_load_pair(val0, val1, addr) ({ \ 54*f0c7bb1bSJerin Jacob asm volatile( \ 55*f0c7bb1bSJerin Jacob "ldp %x[x0], %x[x1], [%x[p1]]" \ 56*f0c7bb1bSJerin Jacob :[x0]"=r"(val0), [x1]"=r"(val1) \ 57*f0c7bb1bSJerin Jacob :[p1]"r"(addr) \ 58*f0c7bb1bSJerin Jacob ); }) 59*f0c7bb1bSJerin Jacob 60*f0c7bb1bSJerin Jacob #define octeontx_store_pair(val0, val1, addr) ({ \ 61*f0c7bb1bSJerin Jacob asm volatile( \ 62*f0c7bb1bSJerin Jacob "stp %x[x0], %x[x1], [%x[p1]]" \ 63*f0c7bb1bSJerin Jacob ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \ 64*f0c7bb1bSJerin Jacob ); }) 65*f0c7bb1bSJerin Jacob #else /* Un optimized functions for building on non arm64 arch */ 66*f0c7bb1bSJerin Jacob 67*f0c7bb1bSJerin Jacob #define octeontx_prefetch_store_keep(_ptr) do {} while (0) 68*f0c7bb1bSJerin Jacob 69*f0c7bb1bSJerin Jacob #define octeontx_load_pair(val0, val1, addr) \ 70*f0c7bb1bSJerin Jacob do { \ 71*f0c7bb1bSJerin Jacob val0 = rte_read64(addr); \ 72*f0c7bb1bSJerin Jacob val1 = rte_read64(((uint8_t *)addr) + 8); \ 73*f0c7bb1bSJerin Jacob } while (0) 74*f0c7bb1bSJerin Jacob 75*f0c7bb1bSJerin Jacob #define octeontx_store_pair(val0, val1, addr) \ 76*f0c7bb1bSJerin Jacob do { \ 77*f0c7bb1bSJerin Jacob rte_write64(val0, addr); \ 78*f0c7bb1bSJerin Jacob rte_write64(val1, (((uint8_t *)addr) + 8)); \ 79*f0c7bb1bSJerin Jacob } while (0) 80*f0c7bb1bSJerin Jacob #endif 81*f0c7bb1bSJerin Jacob 82*f0c7bb1bSJerin Jacob #if defined(RTE_ARCH_ARM64) 83*f0c7bb1bSJerin Jacob /** 84*f0c7bb1bSJerin Jacob * Perform an atomic fetch-and-add operation. 85*f0c7bb1bSJerin Jacob */ 86*f0c7bb1bSJerin Jacob static inline uint64_t 87*f0c7bb1bSJerin Jacob octeontx_reg_ldadd_u64(void *addr, int64_t off) 88*f0c7bb1bSJerin Jacob { 89*f0c7bb1bSJerin Jacob uint64_t old_val; 90*f0c7bb1bSJerin Jacob 91*f0c7bb1bSJerin Jacob __asm__ volatile( 92*f0c7bb1bSJerin Jacob " .cpu generic+lse\n" 93*f0c7bb1bSJerin Jacob " ldadd %1, %0, [%2]\n" 94*f0c7bb1bSJerin Jacob : "=r" (old_val) : "r" (off), "r" (addr) : "memory"); 95*f0c7bb1bSJerin Jacob 96*f0c7bb1bSJerin Jacob return old_val; 97*f0c7bb1bSJerin Jacob } 98*f0c7bb1bSJerin Jacob 99*f0c7bb1bSJerin Jacob /** 100*f0c7bb1bSJerin Jacob * Perform a LMTST operation - an atomic write of up to 128 byte to 101*f0c7bb1bSJerin Jacob * an I/O block that supports this operation type. 102*f0c7bb1bSJerin Jacob * 103*f0c7bb1bSJerin Jacob * @param lmtline_va is the address where LMTLINE is mapped 104*f0c7bb1bSJerin Jacob * @param ioreg_va is the virtual address of the device register 105*f0c7bb1bSJerin Jacob * @param cmdbuf is the array of peripheral commands to execute 106*f0c7bb1bSJerin Jacob * @param cmdsize is the number of 64-bit words in 'cmdbuf' 107*f0c7bb1bSJerin Jacob * 108*f0c7bb1bSJerin Jacob * @return N/A 109*f0c7bb1bSJerin Jacob */ 110*f0c7bb1bSJerin Jacob static inline void 111*f0c7bb1bSJerin Jacob octeontx_reg_lmtst(void *lmtline_va, void *ioreg_va, const uint64_t cmdbuf[], 112*f0c7bb1bSJerin Jacob uint64_t cmdsize) 113*f0c7bb1bSJerin Jacob { 114*f0c7bb1bSJerin Jacob uint64_t result; 115*f0c7bb1bSJerin Jacob uint64_t word_count; 116*f0c7bb1bSJerin Jacob uint64_t *lmtline = lmtline_va; 117*f0c7bb1bSJerin Jacob 118*f0c7bb1bSJerin Jacob word_count = cmdsize; 119*f0c7bb1bSJerin Jacob 120*f0c7bb1bSJerin Jacob do { 121*f0c7bb1bSJerin Jacob /* Copy commands to LMTLINE */ 122*f0c7bb1bSJerin Jacob for (result = 0; result < word_count; result += 2) { 123*f0c7bb1bSJerin Jacob lmtline[result + 0] = cmdbuf[result + 0]; 124*f0c7bb1bSJerin Jacob lmtline[result + 1] = cmdbuf[result + 1]; 125*f0c7bb1bSJerin Jacob } 126*f0c7bb1bSJerin Jacob 127*f0c7bb1bSJerin Jacob /* LDEOR initiates atomic transfer to I/O device */ 128*f0c7bb1bSJerin Jacob __asm__ volatile( 129*f0c7bb1bSJerin Jacob " .cpu generic+lse\n" 130*f0c7bb1bSJerin Jacob " ldeor xzr, %0, [%1]\n" 131*f0c7bb1bSJerin Jacob : "=r" (result) : "r" (ioreg_va) : "memory"); 132*f0c7bb1bSJerin Jacob } while (!result); 133*f0c7bb1bSJerin Jacob } 134*f0c7bb1bSJerin Jacob 135*f0c7bb1bSJerin Jacob #else 136*f0c7bb1bSJerin Jacob 137*f0c7bb1bSJerin Jacob static inline uint64_t 138*f0c7bb1bSJerin Jacob octeontx_reg_ldadd_u64(void *addr, int64_t off) 139*f0c7bb1bSJerin Jacob { 140*f0c7bb1bSJerin Jacob RTE_SET_USED(addr); 141*f0c7bb1bSJerin Jacob RTE_SET_USED(off); 142*f0c7bb1bSJerin Jacob return 0; 143*f0c7bb1bSJerin Jacob } 144*f0c7bb1bSJerin Jacob 145*f0c7bb1bSJerin Jacob static inline void 146*f0c7bb1bSJerin Jacob octeontx_reg_lmtst(void *lmtline_va, void *ioreg_va, const uint64_t cmdbuf[], 147*f0c7bb1bSJerin Jacob uint64_t cmdsize) 148*f0c7bb1bSJerin Jacob { 149*f0c7bb1bSJerin Jacob RTE_SET_USED(lmtline_va); 150*f0c7bb1bSJerin Jacob RTE_SET_USED(ioreg_va); 151*f0c7bb1bSJerin Jacob RTE_SET_USED(cmdbuf); 152*f0c7bb1bSJerin Jacob RTE_SET_USED(cmdsize); 153*f0c7bb1bSJerin Jacob } 154*f0c7bb1bSJerin Jacob 155*f0c7bb1bSJerin Jacob #endif 156*f0c7bb1bSJerin Jacob #endif /* __OCTEONTX_IO_H__ */ 157