1 /* 2 * Copyright (c) 2016 - 2018 Cavium Inc. 3 * All rights reserved. 4 * www.cavium.com 5 * 6 * See LICENSE.qede_pmd for copyright and licensing details. 7 */ 8 9 #include <rte_memzone.h> 10 #include <rte_errno.h> 11 12 #include "bcm_osal.h" 13 #include "ecore.h" 14 #include "ecore_hw.h" 15 #include "ecore_iov_api.h" 16 #include "ecore_mcp_api.h" 17 #include "ecore_l2_api.h" 18 19 /* Array of memzone pointers */ 20 static const struct rte_memzone *ecore_mz_mapping[RTE_MAX_MEMZONE]; 21 /* Counter to track current memzone allocated */ 22 uint16_t ecore_mz_count; 23 24 unsigned long qede_log2_align(unsigned long n) 25 { 26 unsigned long ret = n ? 1 : 0; 27 unsigned long _n = n >> 1; 28 29 while (_n) { 30 _n >>= 1; 31 ret <<= 1; 32 } 33 34 if (ret < n) 35 ret <<= 1; 36 37 return ret; 38 } 39 40 u32 qede_osal_log2(u32 val) 41 { 42 u32 log = 0; 43 44 while (val >>= 1) 45 log++; 46 47 return log; 48 } 49 50 inline void qede_set_bit(u32 nr, unsigned long *addr) 51 { 52 __sync_fetch_and_or(addr, (1UL << nr)); 53 } 54 55 inline void qede_clr_bit(u32 nr, unsigned long *addr) 56 { 57 __sync_fetch_and_and(addr, ~(1UL << nr)); 58 } 59 60 inline bool qede_test_bit(u32 nr, unsigned long *addr) 61 { 62 bool res; 63 64 rte_mb(); 65 res = ((*addr) & (1UL << nr)) != 0; 66 rte_mb(); 67 return res; 68 } 69 70 static inline u32 qede_ffb(unsigned long word) 71 { 72 unsigned long first_bit; 73 74 first_bit = __builtin_ffsl(word); 75 return first_bit ? (first_bit - 1) : OSAL_BITS_PER_UL; 76 } 77 78 inline u32 qede_find_first_bit(unsigned long *addr, u32 limit) 79 { 80 u32 i; 81 u32 nwords = 0; 82 OSAL_BUILD_BUG_ON(!limit); 83 nwords = (limit - 1) / OSAL_BITS_PER_UL + 1; 84 for (i = 0; i < nwords; i++) 85 if (addr[i] != 0) 86 break; 87 88 return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffb(addr[i]); 89 } 90 91 static inline u32 qede_ffz(unsigned long word) 92 { 93 unsigned long first_zero; 94 95 first_zero = __builtin_ffsl(~word); 96 return first_zero ? (first_zero - 1) : OSAL_BITS_PER_UL; 97 } 98 99 inline u32 qede_find_first_zero_bit(unsigned long *addr, u32 limit) 100 { 101 u32 i; 102 u32 nwords = 0; 103 OSAL_BUILD_BUG_ON(!limit); 104 nwords = (limit - 1) / OSAL_BITS_PER_UL + 1; 105 for (i = 0; i < nwords && ~(addr[i]) == 0; i++); 106 return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffz(addr[i]); 107 } 108 109 void qede_vf_fill_driver_data(struct ecore_hwfn *hwfn, 110 __rte_unused struct vf_pf_resc_request *resc_req, 111 struct ecore_vf_acquire_sw_info *vf_sw_info) 112 { 113 vf_sw_info->os_type = VFPF_ACQUIRE_OS_LINUX_USERSPACE; 114 vf_sw_info->override_fw_version = 1; 115 } 116 117 void *osal_dma_alloc_coherent(struct ecore_dev *p_dev, 118 dma_addr_t *phys, size_t size) 119 { 120 const struct rte_memzone *mz; 121 char mz_name[RTE_MEMZONE_NAMESIZE]; 122 uint32_t core_id = rte_lcore_id(); 123 unsigned int socket_id; 124 125 if (ecore_mz_count >= RTE_MAX_MEMZONE) { 126 DP_ERR(p_dev, "Memzone allocation count exceeds %u\n", 127 RTE_MAX_MEMZONE); 128 *phys = 0; 129 return OSAL_NULL; 130 } 131 132 OSAL_MEM_ZERO(mz_name, sizeof(*mz_name)); 133 snprintf(mz_name, sizeof(mz_name) - 1, "%lx", 134 (unsigned long)rte_get_timer_cycles()); 135 if (core_id == (unsigned int)LCORE_ID_ANY) 136 core_id = rte_get_master_lcore(); 137 socket_id = rte_lcore_to_socket_id(core_id); 138 mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, 139 RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE); 140 if (!mz) { 141 DP_ERR(p_dev, "Unable to allocate DMA memory " 142 "of size %zu bytes - %s\n", 143 size, rte_strerror(rte_errno)); 144 *phys = 0; 145 return OSAL_NULL; 146 } 147 *phys = mz->iova; 148 ecore_mz_mapping[ecore_mz_count++] = mz; 149 DP_VERBOSE(p_dev, ECORE_MSG_SP, 150 "Allocated dma memory size=%zu phys=0x%lx" 151 " virt=%p core=%d\n", 152 mz->len, (unsigned long)mz->iova, mz->addr, core_id); 153 return mz->addr; 154 } 155 156 void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev, 157 dma_addr_t *phys, size_t size, int align) 158 { 159 const struct rte_memzone *mz; 160 char mz_name[RTE_MEMZONE_NAMESIZE]; 161 uint32_t core_id = rte_lcore_id(); 162 unsigned int socket_id; 163 164 if (ecore_mz_count >= RTE_MAX_MEMZONE) { 165 DP_ERR(p_dev, "Memzone allocation count exceeds %u\n", 166 RTE_MAX_MEMZONE); 167 *phys = 0; 168 return OSAL_NULL; 169 } 170 171 OSAL_MEM_ZERO(mz_name, sizeof(*mz_name)); 172 snprintf(mz_name, sizeof(mz_name) - 1, "%lx", 173 (unsigned long)rte_get_timer_cycles()); 174 if (core_id == (unsigned int)LCORE_ID_ANY) 175 core_id = rte_get_master_lcore(); 176 socket_id = rte_lcore_to_socket_id(core_id); 177 mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, 178 RTE_MEMZONE_IOVA_CONTIG, align); 179 if (!mz) { 180 DP_ERR(p_dev, "Unable to allocate DMA memory " 181 "of size %zu bytes - %s\n", 182 size, rte_strerror(rte_errno)); 183 *phys = 0; 184 return OSAL_NULL; 185 } 186 *phys = mz->iova; 187 ecore_mz_mapping[ecore_mz_count++] = mz; 188 DP_VERBOSE(p_dev, ECORE_MSG_SP, 189 "Allocated aligned dma memory size=%zu phys=0x%lx" 190 " virt=%p core=%d\n", 191 mz->len, (unsigned long)mz->iova, mz->addr, core_id); 192 return mz->addr; 193 } 194 195 void osal_dma_free_mem(struct ecore_dev *p_dev, dma_addr_t phys) 196 { 197 uint16_t j; 198 199 for (j = 0 ; j < ecore_mz_count; j++) { 200 if (phys == ecore_mz_mapping[j]->iova) { 201 DP_VERBOSE(p_dev, ECORE_MSG_SP, 202 "Free memzone %s\n", ecore_mz_mapping[j]->name); 203 rte_memzone_free(ecore_mz_mapping[j]); 204 while (j < ecore_mz_count - 1) { 205 ecore_mz_mapping[j] = ecore_mz_mapping[j + 1]; 206 j++; 207 } 208 ecore_mz_count--; 209 return; 210 } 211 } 212 213 DP_ERR(p_dev, "Unexpected memory free request\n"); 214 } 215 216 #ifdef CONFIG_ECORE_ZIPPED_FW 217 u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len, 218 u8 *input_buf, u32 max_size, u8 *unzip_buf) 219 { 220 int rc; 221 222 p_hwfn->stream->next_in = input_buf; 223 p_hwfn->stream->avail_in = input_len; 224 p_hwfn->stream->next_out = unzip_buf; 225 p_hwfn->stream->avail_out = max_size; 226 227 rc = inflateInit2(p_hwfn->stream, MAX_WBITS); 228 229 if (rc != Z_OK) { 230 DP_ERR(p_hwfn, 231 "zlib init failed, rc = %d\n", rc); 232 return 0; 233 } 234 235 rc = inflate(p_hwfn->stream, Z_FINISH); 236 inflateEnd(p_hwfn->stream); 237 238 if (rc != Z_OK && rc != Z_STREAM_END) { 239 DP_ERR(p_hwfn, 240 "FW unzip error: %s, rc=%d\n", p_hwfn->stream->msg, 241 rc); 242 return 0; 243 } 244 245 return p_hwfn->stream->total_out / 4; 246 } 247 #endif 248 249 void 250 qede_get_mcp_proto_stats(struct ecore_dev *edev, 251 enum ecore_mcp_protocol_type type, 252 union ecore_mcp_protocol_stats *stats) 253 { 254 struct ecore_eth_stats lan_stats; 255 256 if (type == ECORE_MCP_LAN_STATS) { 257 ecore_get_vport_stats(edev, &lan_stats); 258 259 /* @DPDK */ 260 stats->lan_stats.ucast_rx_pkts = lan_stats.common.rx_ucast_pkts; 261 stats->lan_stats.ucast_tx_pkts = lan_stats.common.tx_ucast_pkts; 262 263 stats->lan_stats.fcs_err = -1; 264 } else { 265 DP_INFO(edev, "Statistics request type %d not supported\n", 266 type); 267 } 268 } 269 270 void 271 qede_hw_err_notify(struct ecore_hwfn *p_hwfn, enum ecore_hw_err_type err_type) 272 { 273 char err_str[64]; 274 275 switch (err_type) { 276 case ECORE_HW_ERR_FAN_FAIL: 277 strcpy(err_str, "Fan Failure"); 278 break; 279 case ECORE_HW_ERR_MFW_RESP_FAIL: 280 strcpy(err_str, "MFW Response Failure"); 281 break; 282 case ECORE_HW_ERR_HW_ATTN: 283 strcpy(err_str, "HW Attention"); 284 break; 285 case ECORE_HW_ERR_DMAE_FAIL: 286 strcpy(err_str, "DMAE Failure"); 287 break; 288 case ECORE_HW_ERR_RAMROD_FAIL: 289 strcpy(err_str, "Ramrod Failure"); 290 break; 291 case ECORE_HW_ERR_FW_ASSERT: 292 strcpy(err_str, "FW Assertion"); 293 break; 294 default: 295 strcpy(err_str, "Unknown"); 296 } 297 298 DP_ERR(p_hwfn, "HW error occurred [%s]\n", err_str); 299 ecore_int_attn_clr_enable(p_hwfn->p_dev, true); 300 } 301 302 u32 qede_crc32(u32 crc, u8 *ptr, u32 length) 303 { 304 int i; 305 306 while (length--) { 307 crc ^= *ptr++; 308 for (i = 0; i < 8; i++) 309 crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0); 310 } 311 return crc; 312 } 313