1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #ifndef __SSOVF_EVDEV_H__ 6 #define __SSOVF_EVDEV_H__ 7 8 #include <rte_event_eth_tx_adapter.h> 9 #include <eventdev_pmd_vdev.h> 10 #include <rte_io.h> 11 12 #include <octeontx_mbox.h> 13 #include <octeontx_ethdev.h> 14 15 #include "octeontx_rxtx.h" 16 17 #define SSO_RX_ADPTR_ENQ_FASTPATH_FUNC OCCTX_RX_FASTPATH_MODES 18 #define SSO_TX_ADPTR_ENQ_FASTPATH_FUNC OCCTX_TX_FASTPATH_MODES 19 20 #define EVENTDEV_NAME_OCTEONTX_PMD event_octeontx 21 22 #define SSOVF_LOG(level, fmt, args...) \ 23 rte_log(RTE_LOG_ ## level, otx_logtype_ssovf, \ 24 "[%s] %s() " fmt "\n", \ 25 RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args) 26 27 #define ssovf_log_info(fmt, ...) SSOVF_LOG(INFO, fmt, ##__VA_ARGS__) 28 #define ssovf_log_dbg(fmt, ...) SSOVF_LOG(DEBUG, fmt, ##__VA_ARGS__) 29 #define ssovf_log_err(fmt, ...) SSOVF_LOG(ERR, fmt, ##__VA_ARGS__) 30 #define ssovf_func_trace ssovf_log_dbg 31 #define ssovf_log_selftest ssovf_log_info 32 33 #define SSO_MAX_VHGRP (64) 34 #define SSO_MAX_VHWS (32) 35 36 /* SSO VF register offsets */ 37 #define SSO_VHGRP_QCTL (0x010ULL) 38 #define SSO_VHGRP_INT (0x100ULL) 39 #define SSO_VHGRP_INT_W1S (0x108ULL) 40 #define SSO_VHGRP_INT_ENA_W1S (0x110ULL) 41 #define SSO_VHGRP_INT_ENA_W1C (0x118ULL) 42 #define SSO_VHGRP_INT_THR (0x140ULL) 43 #define SSO_VHGRP_INT_CNT (0x180ULL) 44 #define SSO_VHGRP_XAQ_CNT (0x1B0ULL) 45 #define SSO_VHGRP_AQ_CNT (0x1C0ULL) 46 #define SSO_VHGRP_AQ_THR (0x1E0ULL) 47 48 /* BAR2 */ 49 #define SSO_VHGRP_OP_ADD_WORK0 (0x00ULL) 50 #define SSO_VHGRP_OP_ADD_WORK1 (0x08ULL) 51 52 /* SSOW VF register offsets (BAR0) */ 53 #define SSOW_VHWS_GRPMSK_CHGX(x) (0x080ULL | ((x) << 3)) 54 #define SSOW_VHWS_TAG (0x300ULL) 55 #define SSOW_VHWS_WQP (0x308ULL) 56 #define SSOW_VHWS_LINKS (0x310ULL) 57 #define SSOW_VHWS_PENDTAG (0x340ULL) 58 #define SSOW_VHWS_PENDWQP (0x348ULL) 59 #define SSOW_VHWS_SWTP (0x400ULL) 60 #define SSOW_VHWS_OP_ALLOC_WE (0x410ULL) 61 #define SSOW_VHWS_OP_UPD_WQP_GRP0 (0x440ULL) 62 #define SSOW_VHWS_OP_UPD_WQP_GRP1 (0x448ULL) 63 #define SSOW_VHWS_OP_SWTAG_UNTAG (0x490ULL) 64 #define SSOW_VHWS_OP_SWTAG_CLR (0x820ULL) 65 #define SSOW_VHWS_OP_DESCHED (0x860ULL) 66 #define SSOW_VHWS_OP_DESCHED_NOSCH (0x870ULL) 67 #define SSOW_VHWS_OP_SWTAG_DESCHED (0x8C0ULL) 68 #define SSOW_VHWS_OP_SWTAG_NOSCHED (0x8D0ULL) 69 #define SSOW_VHWS_OP_SWTP_SET (0xC20ULL) 70 #define SSOW_VHWS_OP_SWTAG_NORM (0xC80ULL) 71 #define SSOW_VHWS_OP_SWTAG_FULL0 (0xCA0UL) 72 #define SSOW_VHWS_OP_SWTAG_FULL1 (0xCA8ULL) 73 #define SSOW_VHWS_OP_CLR_NSCHED (0x10000ULL) 74 #define SSOW_VHWS_OP_GET_WORK0 (0x80000ULL) 75 #define SSOW_VHWS_OP_GET_WORK1 (0x80008ULL) 76 77 /* Mailbox message constants */ 78 #define SSO_COPROC 0x2 79 80 #define SSO_GETDOMAINCFG 0x1 81 #define SSO_IDENTIFY 0x2 82 #define SSO_GET_DEV_INFO 0x3 83 #define SSO_GET_GETWORK_WAIT 0x4 84 #define SSO_SET_GETWORK_WAIT 0x5 85 #define SSO_CONVERT_NS_GETWORK_ITER 0x6 86 #define SSO_GRP_GET_PRIORITY 0x7 87 #define SSO_GRP_SET_PRIORITY 0x8 88 89 /* 90 * In Cavium OCTEON TX SoC, all accesses to the device registers are 91 * implictly strongly ordered. So, The relaxed version of IO operation is 92 * safe to use with out any IO memory barriers. 93 */ 94 #define ssovf_read64 rte_read64_relaxed 95 #define ssovf_write64 rte_write64_relaxed 96 97 /* ARM64 specific functions */ 98 #if defined(RTE_ARCH_ARM64) 99 #define ssovf_load_pair(val0, val1, addr) ({ \ 100 asm volatile( \ 101 "ldp %x[x0], %x[x1], [%x[p1]]" \ 102 :[x0]"=r"(val0), [x1]"=r"(val1) \ 103 :[p1]"r"(addr) \ 104 ); }) 105 106 #define ssovf_store_pair(val0, val1, addr) ({ \ 107 asm volatile( \ 108 "stp %x[x0], %x[x1], [%x[p1]]" \ 109 ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \ 110 ); }) 111 #else /* Un optimized functions for building on non arm64 arch */ 112 113 #define ssovf_load_pair(val0, val1, addr) \ 114 do { \ 115 val0 = rte_read64(addr); \ 116 val1 = rte_read64(((uint8_t *)addr) + 8); \ 117 } while (0) 118 119 #define ssovf_store_pair(val0, val1, addr) \ 120 do { \ 121 rte_write64(val0, addr); \ 122 rte_write64(val1, (((uint8_t *)addr) + 8)); \ 123 } while (0) 124 #endif 125 126 struct ssovf_info { 127 uint16_t domain; /* Domain id */ 128 uint8_t total_ssovfs; /* Total sso groups available in domain */ 129 uint8_t total_ssowvfs;/* Total sso hws available in domain */ 130 }; 131 132 enum ssovf_type { 133 OCTEONTX_SSO_GROUP, /* SSO group vf */ 134 OCTEONTX_SSO_HWS, /* SSO hardware workslot vf */ 135 }; 136 137 struct ssovf_evdev { 138 OFFLOAD_FLAGS; /*Sequence should not be changed */ 139 uint8_t max_event_queues; 140 uint8_t max_event_ports; 141 uint8_t is_timeout_deq; 142 uint8_t nb_event_queues; 143 uint8_t nb_event_ports; 144 uint32_t min_deq_timeout_ns; 145 uint32_t max_deq_timeout_ns; 146 int32_t max_num_events; 147 uint32_t available_events; 148 uint16_t rxq_pools; 149 uint64_t *rxq_pool_array; 150 uint8_t *rxq_pool_rcnt; 151 uint16_t tim_ring_cnt; 152 uint16_t *tim_ring_ids; 153 } __rte_cache_aligned; 154 155 /* Event port aka HWS */ 156 struct ssows { 157 uint8_t cur_tt; 158 uint8_t cur_grp; 159 uint8_t swtag_req; 160 uint8_t *base; 161 uint8_t *getwork; 162 uint8_t *grps[SSO_MAX_VHGRP]; 163 uint8_t port; 164 void *lookup_mem; 165 } __rte_cache_aligned; 166 167 static inline struct ssovf_evdev * 168 ssovf_pmd_priv(const struct rte_eventdev *eventdev) 169 { 170 return eventdev->data->dev_private; 171 } 172 173 extern int otx_logtype_ssovf; 174 175 uint16_t ssows_enq(void *port, const struct rte_event *ev); 176 uint16_t ssows_enq_burst(void *port, 177 const struct rte_event ev[], uint16_t nb_events); 178 uint16_t ssows_enq_new_burst(void *port, 179 const struct rte_event ev[], uint16_t nb_events); 180 uint16_t ssows_enq_fwd_burst(void *port, 181 const struct rte_event ev[], uint16_t nb_events); 182 typedef void (*ssows_handle_event_t)(void *arg, struct rte_event ev); 183 void ssows_flush_events(struct ssows *ws, uint8_t queue_id, 184 ssows_handle_event_t fn, void *arg); 185 void ssows_reset(struct ssows *ws); 186 int ssovf_info(struct ssovf_info *info); 187 void *ssovf_bar(enum ssovf_type, uint8_t id, uint8_t bar); 188 int test_eventdev_octeontx(void); 189 void ssovf_fastpath_fns_set(struct rte_eventdev *dev); 190 void *octeontx_fastpath_lookup_mem_get(void); 191 192 #endif /* __SSOVF_EVDEV_H__ */ 193