1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #include <string.h> 6 7 #include <rte_atomic.h> 8 #include <rte_common.h> 9 #include <rte_cycles.h> 10 #include <rte_io.h> 11 #include <rte_spinlock.h> 12 13 #include "octeontx_mbox.h" 14 15 /* Mbox operation timeout in seconds */ 16 #define MBOX_WAIT_TIME_SEC 3 17 #define MAX_RAM_MBOX_LEN ((SSOW_BAR4_LEN >> 1) - 8 /* Mbox header */) 18 19 /* Mbox channel state */ 20 enum { 21 MBOX_CHAN_STATE_REQ = 1, 22 MBOX_CHAN_STATE_RES = 0, 23 }; 24 25 /* Response messages */ 26 enum { 27 MBOX_RET_SUCCESS, 28 MBOX_RET_INVALID, 29 MBOX_RET_INTERNAL_ERR, 30 }; 31 32 struct mbox { 33 int init_once; 34 uint8_t *ram_mbox_base; /* Base address of mbox message stored in ram */ 35 uint8_t *reg; /* Store to this register triggers PF mbox interrupt */ 36 uint16_t tag_own; /* Last tag which was written to own channel */ 37 rte_spinlock_t lock; 38 }; 39 40 static struct mbox octeontx_mbox; 41 42 /* 43 * Structure used for mbox synchronization 44 * This structure sits at the begin of Mbox RAM and used as main 45 * synchronization point for channel communication 46 */ 47 struct mbox_ram_hdr { 48 union { 49 uint64_t u64; 50 struct { 51 uint8_t chan_state : 1; 52 uint8_t coproc : 7; 53 uint8_t msg; 54 uint8_t vfid; 55 uint8_t res_code; 56 uint16_t tag; 57 uint16_t len; 58 }; 59 }; 60 }; 61 62 int octeontx_logtype_mbox; 63 64 RTE_INIT(otx_init_log); 65 static void 66 otx_init_log(void) 67 { 68 octeontx_logtype_mbox = rte_log_register("pmd.octeontx.mbox"); 69 if (octeontx_logtype_mbox >= 0) 70 rte_log_set_level(octeontx_logtype_mbox, RTE_LOG_NOTICE); 71 } 72 73 static inline void 74 mbox_msgcpy(volatile uint8_t *d, volatile const uint8_t *s, uint16_t size) 75 { 76 uint16_t i; 77 78 for (i = 0; i < size; i++) 79 d[i] = s[i]; 80 } 81 82 static inline void 83 mbox_send_request(struct mbox *m, struct octeontx_mbox_hdr *hdr, 84 const void *txmsg, uint16_t txsize) 85 { 86 struct mbox_ram_hdr old_hdr; 87 struct mbox_ram_hdr new_hdr = { {0} }; 88 uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base; 89 uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr); 90 91 /* 92 * Initialize the channel with the tag left by last send. 93 * On success full mbox send complete, PF increments the tag by one. 94 * The sender can validate integrity of PF message with this scheme 95 */ 96 old_hdr.u64 = rte_read64(ram_mbox_hdr); 97 m->tag_own = (old_hdr.tag + 2) & (~0x1ul); /* next even number */ 98 99 /* Copy msg body */ 100 if (txmsg) 101 mbox_msgcpy(ram_mbox_msg, txmsg, txsize); 102 103 /* Prepare new hdr */ 104 new_hdr.chan_state = MBOX_CHAN_STATE_REQ; 105 new_hdr.coproc = hdr->coproc; 106 new_hdr.msg = hdr->msg; 107 new_hdr.vfid = hdr->vfid; 108 new_hdr.tag = m->tag_own; 109 new_hdr.len = txsize; 110 111 /* Write the msg header */ 112 rte_write64(new_hdr.u64, ram_mbox_hdr); 113 rte_smp_wmb(); 114 /* Notify PF about the new msg - write to MBOX reg generates PF IRQ */ 115 rte_write64(0, m->reg); 116 } 117 118 static inline int 119 mbox_wait_response(struct mbox *m, struct octeontx_mbox_hdr *hdr, 120 void *rxmsg, uint16_t rxsize) 121 { 122 int res = 0, wait; 123 uint16_t len; 124 struct mbox_ram_hdr rx_hdr; 125 uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base; 126 uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr); 127 128 /* Wait for response */ 129 wait = MBOX_WAIT_TIME_SEC * 1000 * 10; 130 while (wait > 0) { 131 rte_delay_us(100); 132 rx_hdr.u64 = rte_read64(ram_mbox_hdr); 133 if (rx_hdr.chan_state == MBOX_CHAN_STATE_RES) 134 break; 135 --wait; 136 } 137 138 hdr->res_code = rx_hdr.res_code; 139 m->tag_own++; 140 141 /* Timeout */ 142 if (wait <= 0) { 143 res = -ETIMEDOUT; 144 goto error; 145 } 146 147 /* Tag mismatch */ 148 if (m->tag_own != rx_hdr.tag) { 149 res = -EINVAL; 150 goto error; 151 } 152 153 /* PF nacked the msg */ 154 if (rx_hdr.res_code != MBOX_RET_SUCCESS) { 155 res = -EBADMSG; 156 goto error; 157 } 158 159 len = RTE_MIN(rx_hdr.len, rxsize); 160 if (rxmsg) 161 mbox_msgcpy(rxmsg, ram_mbox_msg, len); 162 163 return len; 164 165 error: 166 mbox_log_err("Failed to send mbox(%d/%d) coproc=%d msg=%d ret=(%d,%d)", 167 m->tag_own, rx_hdr.tag, hdr->coproc, hdr->msg, res, 168 hdr->res_code); 169 return res; 170 } 171 172 static inline int 173 mbox_send(struct mbox *m, struct octeontx_mbox_hdr *hdr, const void *txmsg, 174 uint16_t txsize, void *rxmsg, uint16_t rxsize) 175 { 176 int res = -EINVAL; 177 178 if (m->init_once == 0 || hdr == NULL || 179 txsize > MAX_RAM_MBOX_LEN || rxsize > MAX_RAM_MBOX_LEN) { 180 mbox_log_err("Invalid init_once=%d hdr=%p txsz=%d rxsz=%d", 181 m->init_once, hdr, txsize, rxsize); 182 return res; 183 } 184 185 rte_spinlock_lock(&m->lock); 186 187 mbox_send_request(m, hdr, txmsg, txsize); 188 res = mbox_wait_response(m, hdr, rxmsg, rxsize); 189 190 rte_spinlock_unlock(&m->lock); 191 return res; 192 } 193 194 int 195 octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base) 196 { 197 struct mbox *m = &octeontx_mbox; 198 199 if (m->init_once) 200 return -EALREADY; 201 202 if (ram_mbox_base == NULL) { 203 mbox_log_err("Invalid ram_mbox_base=%p", ram_mbox_base); 204 return -EINVAL; 205 } 206 207 m->ram_mbox_base = ram_mbox_base; 208 209 if (m->reg != NULL) { 210 rte_spinlock_init(&m->lock); 211 m->init_once = 1; 212 } 213 214 return 0; 215 } 216 217 int 218 octeontx_mbox_set_reg(uint8_t *reg) 219 { 220 struct mbox *m = &octeontx_mbox; 221 222 if (m->init_once) 223 return -EALREADY; 224 225 if (reg == NULL) { 226 mbox_log_err("Invalid reg=%p", reg); 227 return -EINVAL; 228 } 229 230 m->reg = reg; 231 232 if (m->ram_mbox_base != NULL) { 233 rte_spinlock_init(&m->lock); 234 m->init_once = 1; 235 } 236 237 return 0; 238 } 239 240 int 241 octeontx_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata, 242 uint16_t txlen, void *rxdata, uint16_t rxlen) 243 { 244 struct mbox *m = &octeontx_mbox; 245 246 RTE_BUILD_BUG_ON(sizeof(struct mbox_ram_hdr) != 8); 247 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 248 return -EINVAL; 249 250 return mbox_send(m, hdr, txdata, txlen, rxdata, rxlen); 251 } 252