xref: /dpdk/drivers/net/qede/base/bcm_osal.c (revision cb440babbd45a80c059f8bc80e87c48d09086fd7)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6 
7 #include <rte_memzone.h>
8 #include <rte_errno.h>
9 
10 #include "bcm_osal.h"
11 #include "ecore.h"
12 #include "ecore_hw.h"
13 #include "ecore_dev_api.h"
14 #include "ecore_iov_api.h"
15 #include "ecore_mcp_api.h"
16 #include "ecore_l2_api.h"
17 
18 /* Array of memzone pointers */
19 static const struct rte_memzone *ecore_mz_mapping[RTE_MAX_MEMZONE];
20 /* Counter to track current memzone allocated */
21 static uint16_t ecore_mz_count;
22 
23 unsigned long qede_log2_align(unsigned long n)
24 {
25 	unsigned long ret = n ? 1 : 0;
26 	unsigned long _n = n >> 1;
27 
28 	while (_n) {
29 		_n >>= 1;
30 		ret <<= 1;
31 	}
32 
33 	if (ret < n)
34 		ret <<= 1;
35 
36 	return ret;
37 }
38 
39 u32 qede_osal_log2(u32 val)
40 {
41 	u32 log = 0;
42 
43 	while (val >>= 1)
44 		log++;
45 
46 	return log;
47 }
48 
49 static inline u32 qede_ffb(unsigned long word)
50 {
51 	unsigned long first_bit;
52 
53 	first_bit = __builtin_ffsl(word);
54 	return first_bit ? (first_bit - 1) : OSAL_BITS_PER_UL;
55 }
56 
57 inline u32 qede_find_first_bit(unsigned long *addr, u32 limit)
58 {
59 	u32 i;
60 	u32 nwords = 0;
61 	OSAL_BUILD_BUG_ON(!limit);
62 	nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
63 	for (i = 0; i < nwords; i++)
64 		if (addr[i] != 0)
65 			break;
66 
67 	return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffb(addr[i]);
68 }
69 
70 static inline u32 qede_ffz(unsigned long word)
71 {
72 	unsigned long first_zero;
73 
74 	first_zero = __builtin_ffsl(~word);
75 	return first_zero ? (first_zero - 1) : OSAL_BITS_PER_UL;
76 }
77 
78 inline u32 qede_find_first_zero_bit(u32 *addr, u32 limit)
79 {
80 	u32 i;
81 	u32 nwords = 0;
82 	OSAL_BUILD_BUG_ON(!limit);
83 	nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
84 	for (i = 0; i < nwords && ~(addr[i]) == 0; i++);
85 	return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffz(addr[i]);
86 }
87 
88 void qede_vf_fill_driver_data(struct ecore_hwfn *hwfn,
89 			      __rte_unused struct vf_pf_resc_request *resc_req,
90 			      struct ecore_vf_acquire_sw_info *vf_sw_info)
91 {
92 	vf_sw_info->os_type = VFPF_ACQUIRE_OS_LINUX_USERSPACE;
93 	vf_sw_info->override_fw_version = 1;
94 }
95 
96 void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
97 			      dma_addr_t *phys, size_t size)
98 {
99 	const struct rte_memzone *mz;
100 	char mz_name[RTE_MEMZONE_NAMESIZE];
101 	uint32_t core_id = rte_lcore_id();
102 	unsigned int socket_id;
103 
104 	if (ecore_mz_count >= RTE_MAX_MEMZONE) {
105 		DP_ERR(p_dev, "Memzone allocation count exceeds %u\n",
106 		       RTE_MAX_MEMZONE);
107 		*phys = 0;
108 		return OSAL_NULL;
109 	}
110 
111 	OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
112 	snprintf(mz_name, sizeof(mz_name), "%lx",
113 					(unsigned long)rte_get_timer_cycles());
114 	if (core_id == (unsigned int)LCORE_ID_ANY)
115 		core_id = rte_get_master_lcore();
116 	socket_id = rte_lcore_to_socket_id(core_id);
117 	mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
118 			RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
119 	if (!mz) {
120 		DP_ERR(p_dev, "Unable to allocate DMA memory "
121 		       "of size %zu bytes - %s\n",
122 		       size, rte_strerror(rte_errno));
123 		*phys = 0;
124 		return OSAL_NULL;
125 	}
126 	*phys = mz->iova;
127 	ecore_mz_mapping[ecore_mz_count++] = mz;
128 	DP_VERBOSE(p_dev, ECORE_MSG_SP,
129 		   "Allocated dma memory size=%zu phys=0x%lx"
130 		   " virt=%p core=%d\n",
131 		   mz->len, (unsigned long)mz->iova, mz->addr, core_id);
132 	return mz->addr;
133 }
134 
135 void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
136 				      dma_addr_t *phys, size_t size, int align)
137 {
138 	const struct rte_memzone *mz;
139 	char mz_name[RTE_MEMZONE_NAMESIZE];
140 	uint32_t core_id = rte_lcore_id();
141 	unsigned int socket_id;
142 
143 	if (ecore_mz_count >= RTE_MAX_MEMZONE) {
144 		DP_ERR(p_dev, "Memzone allocation count exceeds %u\n",
145 		       RTE_MAX_MEMZONE);
146 		*phys = 0;
147 		return OSAL_NULL;
148 	}
149 
150 	OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
151 	snprintf(mz_name, sizeof(mz_name), "%lx",
152 					(unsigned long)rte_get_timer_cycles());
153 	if (core_id == (unsigned int)LCORE_ID_ANY)
154 		core_id = rte_get_master_lcore();
155 	socket_id = rte_lcore_to_socket_id(core_id);
156 	mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
157 			RTE_MEMZONE_IOVA_CONTIG, align);
158 	if (!mz) {
159 		DP_ERR(p_dev, "Unable to allocate DMA memory "
160 		       "of size %zu bytes - %s\n",
161 		       size, rte_strerror(rte_errno));
162 		*phys = 0;
163 		return OSAL_NULL;
164 	}
165 	*phys = mz->iova;
166 	ecore_mz_mapping[ecore_mz_count++] = mz;
167 	DP_VERBOSE(p_dev, ECORE_MSG_SP,
168 		   "Allocated aligned dma memory size=%zu phys=0x%lx"
169 		   " virt=%p core=%d\n",
170 		   mz->len, (unsigned long)mz->iova, mz->addr, core_id);
171 	return mz->addr;
172 }
173 
174 void osal_dma_free_mem(struct ecore_dev *p_dev, dma_addr_t phys)
175 {
176 	uint16_t j;
177 
178 	for (j = 0 ; j < ecore_mz_count; j++) {
179 		if (phys == ecore_mz_mapping[j]->iova) {
180 			DP_VERBOSE(p_dev, ECORE_MSG_SP,
181 				"Free memzone %s\n", ecore_mz_mapping[j]->name);
182 			rte_memzone_free(ecore_mz_mapping[j]);
183 			while (j < ecore_mz_count - 1) {
184 				ecore_mz_mapping[j] = ecore_mz_mapping[j + 1];
185 				j++;
186 			}
187 			ecore_mz_count--;
188 			return;
189 		}
190 	}
191 
192 	DP_ERR(p_dev, "Unexpected memory free request\n");
193 }
194 
195 #ifdef CONFIG_ECORE_ZIPPED_FW
196 u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
197 		    u8 *input_buf, u32 max_size, u8 *unzip_buf)
198 {
199 	int rc;
200 
201 	p_hwfn->stream->next_in = input_buf;
202 	p_hwfn->stream->avail_in = input_len;
203 	p_hwfn->stream->next_out = unzip_buf;
204 	p_hwfn->stream->avail_out = max_size;
205 
206 	rc = inflateInit2(p_hwfn->stream, MAX_WBITS);
207 
208 	if (rc != Z_OK) {
209 		DP_ERR(p_hwfn,
210 			   "zlib init failed, rc = %d\n", rc);
211 		return 0;
212 	}
213 
214 	rc = inflate(p_hwfn->stream, Z_FINISH);
215 	inflateEnd(p_hwfn->stream);
216 
217 	if (rc != Z_OK && rc != Z_STREAM_END) {
218 		DP_ERR(p_hwfn,
219 			   "FW unzip error: %s, rc=%d\n", p_hwfn->stream->msg,
220 			   rc);
221 		return 0;
222 	}
223 
224 	return p_hwfn->stream->total_out / 4;
225 }
226 #endif
227 
228 void
229 qede_get_mcp_proto_stats(struct ecore_dev *edev,
230 			 enum ecore_mcp_protocol_type type,
231 			 union ecore_mcp_protocol_stats *stats)
232 {
233 	struct ecore_eth_stats lan_stats;
234 
235 	if (type == ECORE_MCP_LAN_STATS) {
236 		ecore_get_vport_stats(edev, &lan_stats);
237 
238 		/* @DPDK */
239 		stats->lan_stats.ucast_rx_pkts = lan_stats.common.rx_ucast_pkts;
240 		stats->lan_stats.ucast_tx_pkts = lan_stats.common.tx_ucast_pkts;
241 
242 		stats->lan_stats.fcs_err = -1;
243 	} else {
244 		DP_INFO(edev, "Statistics request type %d not supported\n",
245 		       type);
246 	}
247 }
248 
249 static void qede_hw_err_handler(void *dev, enum ecore_hw_err_type err_type)
250 {
251 	struct ecore_dev *edev = dev;
252 
253 	switch (err_type) {
254 	case ECORE_HW_ERR_FAN_FAIL:
255 		break;
256 
257 	case ECORE_HW_ERR_MFW_RESP_FAIL:
258 	case ECORE_HW_ERR_HW_ATTN:
259 	case ECORE_HW_ERR_DMAE_FAIL:
260 	case ECORE_HW_ERR_RAMROD_FAIL:
261 	case ECORE_HW_ERR_FW_ASSERT:
262 		OSAL_SAVE_FW_DUMP(0); /* Using port 0 as default port_id */
263 		break;
264 
265 	default:
266 		DP_NOTICE(edev, false, "Unknown HW error [%d]\n", err_type);
267 		return;
268 	}
269 }
270 
271 void
272 qede_hw_err_notify(struct ecore_hwfn *p_hwfn, enum ecore_hw_err_type err_type)
273 {
274 	char err_str[64];
275 
276 	switch (err_type) {
277 	case ECORE_HW_ERR_FAN_FAIL:
278 		strcpy(err_str, "Fan Failure");
279 		break;
280 	case ECORE_HW_ERR_MFW_RESP_FAIL:
281 		strcpy(err_str, "MFW Response Failure");
282 		break;
283 	case ECORE_HW_ERR_HW_ATTN:
284 		strcpy(err_str, "HW Attention");
285 		break;
286 	case ECORE_HW_ERR_DMAE_FAIL:
287 		strcpy(err_str, "DMAE Failure");
288 		break;
289 	case ECORE_HW_ERR_RAMROD_FAIL:
290 		strcpy(err_str, "Ramrod Failure");
291 		break;
292 	case ECORE_HW_ERR_FW_ASSERT:
293 		strcpy(err_str, "FW Assertion");
294 		break;
295 	default:
296 		strcpy(err_str, "Unknown");
297 	}
298 
299 	DP_ERR(p_hwfn, "HW error occurred [%s]\n", err_str);
300 
301 	qede_hw_err_handler(p_hwfn->p_dev, err_type);
302 
303 	ecore_int_attn_clr_enable(p_hwfn->p_dev, true);
304 }
305 
306 u32 qede_crc32(u32 crc, u8 *ptr, u32 length)
307 {
308 	int i;
309 
310 	while (length--) {
311 		crc ^= *ptr++;
312 		for (i = 0; i < 8; i++)
313 			crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
314 	}
315 	return crc;
316 }
317 
318 void qed_set_platform_str(struct ecore_hwfn *p_hwfn,
319 			  char *buf_str, u32 buf_size)
320 {
321 	snprintf(buf_str, buf_size, "%s.", rte_version());
322 }
323