1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Cavium, Inc 3 */ 4 #ifndef _OTX_CRYPTODEV_HW_ACCESS_H_ 5 #define _OTX_CRYPTODEV_HW_ACCESS_H_ 6 7 #include <stdbool.h> 8 9 #include <rte_branch_prediction.h> 10 #include <rte_cycles.h> 11 #include <rte_io.h> 12 #include <rte_memory.h> 13 #include <rte_prefetch.h> 14 15 #include "cpt_common.h" 16 #include "cpt_hw_types.h" 17 #include "cpt_mcode_defines.h" 18 #include "cpt_pmd_logs.h" 19 20 #define CPT_INTR_POLL_INTERVAL_MS (50) 21 22 /* Default command queue length */ 23 #define DEFAULT_CMD_QCHUNKS 2 24 #define DEFAULT_CMD_QCHUNK_SIZE 1023 25 #define DEFAULT_CMD_QLEN \ 26 (DEFAULT_CMD_QCHUNK_SIZE * DEFAULT_CMD_QCHUNKS) 27 28 #define CPT_CSR_REG_BASE(cpt) ((cpt)->reg_base) 29 30 /* Read hw register */ 31 #define CPT_READ_CSR(__hw_addr, __offset) \ 32 rte_read64_relaxed((uint8_t *)__hw_addr + __offset) 33 34 /* Write hw register */ 35 #define CPT_WRITE_CSR(__hw_addr, __offset, __val) \ 36 rte_write64_relaxed((__val), ((uint8_t *)__hw_addr + __offset)) 37 38 /* cpt instance */ 39 struct cpt_instance { 40 uint32_t queue_id; 41 uintptr_t rsvd; 42 }; 43 44 struct command_chunk { 45 /** 128-byte aligned real_vaddr */ 46 uint8_t *head; 47 /** 128-byte aligned real_dma_addr */ 48 phys_addr_t dma_addr; 49 }; 50 51 /** 52 * Command queue structure 53 */ 54 struct command_queue { 55 /** Command queue host write idx */ 56 uint32_t idx; 57 /** Command queue chunk */ 58 uint32_t cchunk; 59 /** Command queue head; instructions are inserted here */ 60 uint8_t *qhead; 61 /** Command chunk list head */ 62 struct command_chunk chead[DEFAULT_CMD_QCHUNKS]; 63 }; 64 65 /** 66 * CPT VF device structure 67 */ 68 struct cpt_vf { 69 /** CPT instance */ 70 struct cpt_instance instance; 71 /** Register start address */ 72 uint8_t *reg_base; 73 /** Command queue information */ 74 struct command_queue cqueue; 75 /** Pending queue information */ 76 struct pending_queue pqueue; 77 /** Meta information per vf */ 78 struct cptvf_meta_info meta_info; 79 80 /** Below fields are accessed only in control path */ 81 82 /** Env specific pdev representing the pci dev */ 83 void *pdev; 84 /** Calculated queue size */ 85 uint32_t qsize; 86 /** Device index (0...CPT_MAX_VQ_NUM)*/ 87 uint8_t vfid; 88 /** VF type of cpt_vf_type_t (SE_TYPE(2) or AE_TYPE(1) */ 89 uint8_t vftype; 90 /** VF group (0 - 8) */ 91 uint8_t vfgrp; 92 /** Operating node: Bits (46:44) in BAR0 address */ 93 uint8_t node; 94 95 /** VF-PF mailbox communication */ 96 97 /** Flag if acked */ 98 bool pf_acked; 99 /** Flag if not acked */ 100 bool pf_nacked; 101 102 /** Device name */ 103 char dev_name[32]; 104 } __rte_cache_aligned; 105 106 /* 107 * CPT Registers map for 81xx 108 */ 109 110 /* VF registers */ 111 #define CPTX_VQX_CTL(a, b) (0x0000100ll + 0x1000000000ll * \ 112 ((a) & 0x0) + 0x100000ll * (b)) 113 #define CPTX_VQX_SADDR(a, b) (0x0000200ll + 0x1000000000ll * \ 114 ((a) & 0x0) + 0x100000ll * (b)) 115 #define CPTX_VQX_DONE_WAIT(a, b) (0x0000400ll + 0x1000000000ll * \ 116 ((a) & 0x0) + 0x100000ll * (b)) 117 #define CPTX_VQX_INPROG(a, b) (0x0000410ll + 0x1000000000ll * \ 118 ((a) & 0x0) + 0x100000ll * (b)) 119 #define CPTX_VQX_DONE(a, b) (0x0000420ll + 0x1000000000ll * \ 120 ((a) & 0x1) + 0x100000ll * (b)) 121 #define CPTX_VQX_DONE_ACK(a, b) (0x0000440ll + 0x1000000000ll * \ 122 ((a) & 0x1) + 0x100000ll * (b)) 123 #define CPTX_VQX_DONE_INT_W1S(a, b) (0x0000460ll + 0x1000000000ll * \ 124 ((a) & 0x1) + 0x100000ll * (b)) 125 #define CPTX_VQX_DONE_INT_W1C(a, b) (0x0000468ll + 0x1000000000ll * \ 126 ((a) & 0x1) + 0x100000ll * (b)) 127 #define CPTX_VQX_DONE_ENA_W1S(a, b) (0x0000470ll + 0x1000000000ll * \ 128 ((a) & 0x1) + 0x100000ll * (b)) 129 #define CPTX_VQX_DONE_ENA_W1C(a, b) (0x0000478ll + 0x1000000000ll * \ 130 ((a) & 0x1) + 0x100000ll * (b)) 131 #define CPTX_VQX_MISC_INT(a, b) (0x0000500ll + 0x1000000000ll * \ 132 ((a) & 0x1) + 0x100000ll * (b)) 133 #define CPTX_VQX_MISC_INT_W1S(a, b) (0x0000508ll + 0x1000000000ll * \ 134 ((a) & 0x1) + 0x100000ll * (b)) 135 #define CPTX_VQX_MISC_ENA_W1S(a, b) (0x0000510ll + 0x1000000000ll * \ 136 ((a) & 0x1) + 0x100000ll * (b)) 137 #define CPTX_VQX_MISC_ENA_W1C(a, b) (0x0000518ll + 0x1000000000ll * \ 138 ((a) & 0x1) + 0x100000ll * (b)) 139 #define CPTX_VQX_DOORBELL(a, b) (0x0000600ll + 0x1000000000ll * \ 140 ((a) & 0x1) + 0x100000ll * (b)) 141 #define CPTX_VFX_PF_MBOXX(a, b, c) (0x0001000ll + 0x1000000000ll * \ 142 ((a) & 0x1) + 0x100000ll * (b) + \ 143 8ll * ((c) & 0x1)) 144 145 /* VF HAL functions */ 146 147 void 148 otx_cpt_poll_misc(struct cpt_vf *cptvf); 149 150 int 151 otx_cpt_hw_init(struct cpt_vf *cptvf, void *pdev, void *reg_base, char *name); 152 153 int 154 otx_cpt_deinit_device(void *dev); 155 156 int 157 otx_cpt_get_resource(void *dev, uint8_t group, struct cpt_instance **instance); 158 159 int 160 otx_cpt_put_resource(struct cpt_instance *instance); 161 162 int 163 otx_cpt_start_device(void *cptvf); 164 165 void 166 otx_cpt_stop_device(void *cptvf); 167 168 /* Write to VQX_DOORBELL register 169 */ 170 static __rte_always_inline void 171 otx_cpt_write_vq_doorbell(struct cpt_vf *cptvf, uint32_t val) 172 { 173 cptx_vqx_doorbell_t vqx_dbell; 174 175 vqx_dbell.u = 0; 176 vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */ 177 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf), 178 CPTX_VQX_DOORBELL(0, 0), vqx_dbell.u); 179 } 180 181 static __rte_always_inline uint32_t 182 otx_cpt_read_vq_doorbell(struct cpt_vf *cptvf) 183 { 184 cptx_vqx_doorbell_t vqx_dbell; 185 186 vqx_dbell.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), 187 CPTX_VQX_DOORBELL(0, 0)); 188 return vqx_dbell.s.dbell_cnt; 189 } 190 191 static __rte_always_inline void 192 otx_cpt_ring_dbell(struct cpt_instance *instance, uint16_t count) 193 { 194 struct cpt_vf *cptvf = (struct cpt_vf *)instance; 195 /* Memory barrier to flush pending writes */ 196 rte_smp_wmb(); 197 otx_cpt_write_vq_doorbell(cptvf, count); 198 } 199 200 static __rte_always_inline void * 201 get_cpt_inst(struct command_queue *cqueue) 202 { 203 CPT_LOG_DP_DEBUG("CPT queue idx %u\n", cqueue->idx); 204 return &cqueue->qhead[cqueue->idx * CPT_INST_SIZE]; 205 } 206 207 static __rte_always_inline void 208 fill_cpt_inst(struct cpt_instance *instance, void *req) 209 { 210 struct command_queue *cqueue; 211 cpt_inst_s_t *cpt_ist_p; 212 struct cpt_vf *cptvf = (struct cpt_vf *)instance; 213 struct cpt_request_info *user_req = (struct cpt_request_info *)req; 214 cqueue = &cptvf->cqueue; 215 cpt_ist_p = get_cpt_inst(cqueue); 216 rte_prefetch_non_temporal(cpt_ist_p); 217 218 /* EI0, EI1, EI2, EI3 are already prepared */ 219 /* HW W0 */ 220 cpt_ist_p->u[0] = 0; 221 /* HW W1 */ 222 cpt_ist_p->s8x.res_addr = user_req->comp_baddr; 223 /* HW W2 */ 224 cpt_ist_p->u[2] = 0; 225 /* HW W3 */ 226 cpt_ist_p->s8x.wq_ptr = 0; 227 228 /* MC EI0 */ 229 cpt_ist_p->s8x.ei0 = user_req->ist.ei0; 230 /* MC EI1 */ 231 cpt_ist_p->s8x.ei1 = user_req->ist.ei1; 232 /* MC EI2 */ 233 cpt_ist_p->s8x.ei2 = user_req->ist.ei2; 234 /* MC EI3 */ 235 cpt_ist_p->s8x.ei3 = user_req->ist.ei3; 236 } 237 238 static __rte_always_inline void 239 mark_cpt_inst(struct cpt_instance *instance) 240 { 241 struct cpt_vf *cptvf = (struct cpt_vf *)instance; 242 struct command_queue *queue = &cptvf->cqueue; 243 if (unlikely(++queue->idx >= DEFAULT_CMD_QCHUNK_SIZE)) { 244 uint32_t cchunk = queue->cchunk; 245 MOD_INC(cchunk, DEFAULT_CMD_QCHUNKS); 246 queue->qhead = queue->chead[cchunk].head; 247 queue->idx = 0; 248 queue->cchunk = cchunk; 249 } 250 } 251 252 static __rte_always_inline uint8_t 253 check_nb_command_id(struct cpt_request_info *user_req, 254 struct cpt_instance *instance) 255 { 256 uint8_t ret = ERR_REQ_PENDING; 257 struct cpt_vf *cptvf = (struct cpt_vf *)instance; 258 volatile cpt_res_s_t *cptres; 259 260 cptres = (volatile cpt_res_s_t *)user_req->completion_addr; 261 262 if (unlikely(cptres->s8x.compcode == CPT_8X_COMP_E_NOTDONE)) { 263 /* 264 * Wait for some time for this command to get completed 265 * before timing out 266 */ 267 if (rte_get_timer_cycles() < user_req->time_out) 268 return ret; 269 /* 270 * TODO: See if alternate caddr can be used to not loop 271 * longer than needed. 272 */ 273 if ((cptres->s8x.compcode == CPT_8X_COMP_E_NOTDONE) && 274 (user_req->extra_time < TIME_IN_RESET_COUNT)) { 275 user_req->extra_time++; 276 return ret; 277 } 278 279 if (cptres->s8x.compcode != CPT_8X_COMP_E_NOTDONE) 280 goto complete; 281 282 ret = ERR_REQ_TIMEOUT; 283 CPT_LOG_DP_ERR("Request %p timedout", user_req); 284 otx_cpt_poll_misc(cptvf); 285 goto exit; 286 } 287 288 complete: 289 if (likely(cptres->s8x.compcode == CPT_8X_COMP_E_GOOD)) { 290 ret = 0; /* success */ 291 if (unlikely((uint8_t)*user_req->alternate_caddr)) { 292 ret = (uint8_t)*user_req->alternate_caddr; 293 CPT_LOG_DP_ERR("Request %p : failed with microcode" 294 " error, MC completion code : 0x%x", user_req, 295 ret); 296 } 297 CPT_LOG_DP_DEBUG("MC status %.8x\n", 298 *((volatile uint32_t *)user_req->alternate_caddr)); 299 CPT_LOG_DP_DEBUG("HW status %.8x\n", 300 *((volatile uint32_t *)user_req->completion_addr)); 301 } else if ((cptres->s8x.compcode == CPT_8X_COMP_E_SWERR) || 302 (cptres->s8x.compcode == CPT_8X_COMP_E_FAULT)) { 303 ret = (uint8_t)*user_req->alternate_caddr; 304 if (!ret) 305 ret = ERR_BAD_ALT_CCODE; 306 CPT_LOG_DP_DEBUG("Request %p : failed with %s : err code :%x", 307 user_req, 308 (cptres->s8x.compcode == CPT_8X_COMP_E_FAULT) ? 309 "DMA Fault" : "Software error", ret); 310 } else { 311 CPT_LOG_DP_ERR("Request %p : unexpected completion code %d", 312 user_req, cptres->s8x.compcode); 313 ret = (uint8_t)*user_req->alternate_caddr; 314 } 315 316 exit: 317 return ret; 318 } 319 320 #endif /* _OTX_CRYPTODEV_HW_ACCESS_H_ */ 321