1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #include <string.h> 6 7 #include <rte_atomic.h> 8 #include <rte_common.h> 9 #include <rte_cycles.h> 10 #include <rte_io.h> 11 #include <rte_spinlock.h> 12 13 #include "octeontx_mbox.h" 14 15 /* Mbox operation timeout in seconds */ 16 #define MBOX_WAIT_TIME_SEC 3 17 #define MAX_RAM_MBOX_LEN ((SSOW_BAR4_LEN >> 1) - 8 /* Mbox header */) 18 19 /* Mbox channel state */ 20 enum { 21 MBOX_CHAN_STATE_REQ = 1, 22 MBOX_CHAN_STATE_RES = 0, 23 }; 24 25 /* Response messages */ 26 enum { 27 MBOX_RET_SUCCESS, 28 MBOX_RET_INVALID, 29 MBOX_RET_INTERNAL_ERR, 30 }; 31 32 struct mbox { 33 int init_once; 34 uint8_t *ram_mbox_base; /* Base address of mbox message stored in ram */ 35 uint8_t *reg; /* Store to this register triggers PF mbox interrupt */ 36 uint16_t tag_own; /* Last tag which was written to own channel */ 37 rte_spinlock_t lock; 38 }; 39 40 static struct mbox octeontx_mbox; 41 42 /* 43 * Structure used for mbox synchronization 44 * This structure sits at the begin of Mbox RAM and used as main 45 * synchronization point for channel communication 46 */ 47 struct mbox_ram_hdr { 48 union { 49 uint64_t u64; 50 struct { 51 uint8_t chan_state : 1; 52 uint8_t coproc : 7; 53 uint8_t msg; 54 uint8_t vfid; 55 uint8_t res_code; 56 uint16_t tag; 57 uint16_t len; 58 }; 59 }; 60 }; 61 62 int octeontx_logtype_mbox; 63 64 RTE_INIT(otx_init_log) 65 { 66 octeontx_logtype_mbox = rte_log_register("pmd.octeontx.mbox"); 67 if (octeontx_logtype_mbox >= 0) 68 rte_log_set_level(octeontx_logtype_mbox, RTE_LOG_NOTICE); 69 } 70 71 static inline void 72 mbox_msgcpy(volatile uint8_t *d, volatile const uint8_t *s, uint16_t size) 73 { 74 uint16_t i; 75 76 for (i = 0; i < size; i++) 77 d[i] = s[i]; 78 } 79 80 static inline void 81 mbox_send_request(struct mbox *m, struct octeontx_mbox_hdr *hdr, 82 const void *txmsg, uint16_t txsize) 83 { 84 struct mbox_ram_hdr old_hdr; 85 struct mbox_ram_hdr new_hdr = { {0} }; 86 uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base; 87 uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr); 88 89 /* 90 * Initialize the channel with the tag left by last send. 91 * On success full mbox send complete, PF increments the tag by one. 92 * The sender can validate integrity of PF message with this scheme 93 */ 94 old_hdr.u64 = rte_read64(ram_mbox_hdr); 95 m->tag_own = (old_hdr.tag + 2) & (~0x1ul); /* next even number */ 96 97 /* Copy msg body */ 98 if (txmsg) 99 mbox_msgcpy(ram_mbox_msg, txmsg, txsize); 100 101 /* Prepare new hdr */ 102 new_hdr.chan_state = MBOX_CHAN_STATE_REQ; 103 new_hdr.coproc = hdr->coproc; 104 new_hdr.msg = hdr->msg; 105 new_hdr.vfid = hdr->vfid; 106 new_hdr.tag = m->tag_own; 107 new_hdr.len = txsize; 108 109 /* Write the msg header */ 110 rte_write64(new_hdr.u64, ram_mbox_hdr); 111 rte_smp_wmb(); 112 /* Notify PF about the new msg - write to MBOX reg generates PF IRQ */ 113 rte_write64(0, m->reg); 114 } 115 116 static inline int 117 mbox_wait_response(struct mbox *m, struct octeontx_mbox_hdr *hdr, 118 void *rxmsg, uint16_t rxsize) 119 { 120 int res = 0, wait; 121 uint16_t len; 122 struct mbox_ram_hdr rx_hdr; 123 uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base; 124 uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr); 125 126 /* Wait for response */ 127 wait = MBOX_WAIT_TIME_SEC * 1000 * 10; 128 while (wait > 0) { 129 rte_delay_us(100); 130 rx_hdr.u64 = rte_read64(ram_mbox_hdr); 131 if (rx_hdr.chan_state == MBOX_CHAN_STATE_RES) 132 break; 133 --wait; 134 } 135 136 hdr->res_code = rx_hdr.res_code; 137 m->tag_own++; 138 139 /* Timeout */ 140 if (wait <= 0) { 141 res = -ETIMEDOUT; 142 goto error; 143 } 144 145 /* Tag mismatch */ 146 if (m->tag_own != rx_hdr.tag) { 147 res = -EINVAL; 148 goto error; 149 } 150 151 /* PF nacked the msg */ 152 if (rx_hdr.res_code != MBOX_RET_SUCCESS) { 153 res = -EBADMSG; 154 goto error; 155 } 156 157 len = RTE_MIN(rx_hdr.len, rxsize); 158 if (rxmsg) 159 mbox_msgcpy(rxmsg, ram_mbox_msg, len); 160 161 return len; 162 163 error: 164 mbox_log_err("Failed to send mbox(%d/%d) coproc=%d msg=%d ret=(%d,%d)", 165 m->tag_own, rx_hdr.tag, hdr->coproc, hdr->msg, res, 166 hdr->res_code); 167 return res; 168 } 169 170 static inline int 171 mbox_send(struct mbox *m, struct octeontx_mbox_hdr *hdr, const void *txmsg, 172 uint16_t txsize, void *rxmsg, uint16_t rxsize) 173 { 174 int res = -EINVAL; 175 176 if (m->init_once == 0 || hdr == NULL || 177 txsize > MAX_RAM_MBOX_LEN || rxsize > MAX_RAM_MBOX_LEN) { 178 mbox_log_err("Invalid init_once=%d hdr=%p txsz=%d rxsz=%d", 179 m->init_once, hdr, txsize, rxsize); 180 return res; 181 } 182 183 rte_spinlock_lock(&m->lock); 184 185 mbox_send_request(m, hdr, txmsg, txsize); 186 res = mbox_wait_response(m, hdr, rxmsg, rxsize); 187 188 rte_spinlock_unlock(&m->lock); 189 return res; 190 } 191 192 int 193 octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base) 194 { 195 struct mbox *m = &octeontx_mbox; 196 197 if (m->init_once) 198 return -EALREADY; 199 200 if (ram_mbox_base == NULL) { 201 mbox_log_err("Invalid ram_mbox_base=%p", ram_mbox_base); 202 return -EINVAL; 203 } 204 205 m->ram_mbox_base = ram_mbox_base; 206 207 if (m->reg != NULL) { 208 rte_spinlock_init(&m->lock); 209 m->init_once = 1; 210 } 211 212 return 0; 213 } 214 215 int 216 octeontx_mbox_set_reg(uint8_t *reg) 217 { 218 struct mbox *m = &octeontx_mbox; 219 220 if (m->init_once) 221 return -EALREADY; 222 223 if (reg == NULL) { 224 mbox_log_err("Invalid reg=%p", reg); 225 return -EINVAL; 226 } 227 228 m->reg = reg; 229 230 if (m->ram_mbox_base != NULL) { 231 rte_spinlock_init(&m->lock); 232 m->init_once = 1; 233 } 234 235 return 0; 236 } 237 238 int 239 octeontx_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata, 240 uint16_t txlen, void *rxdata, uint16_t rxlen) 241 { 242 struct mbox *m = &octeontx_mbox; 243 244 RTE_BUILD_BUG_ON(sizeof(struct mbox_ram_hdr) != 8); 245 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 246 return -EINVAL; 247 248 return mbox_send(m, hdr, txdata, txlen, rxdata, rxlen); 249 } 250