xref: /dpdk/drivers/net/qede/base/bcm_osal.c (revision e12a0166c80f65e35408f4715b2f3a60763c3741)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6 
7 #include <rte_memzone.h>
8 #include <rte_errno.h>
9 
10 #include "bcm_osal.h"
11 #include "ecore.h"
12 #include "ecore_hw.h"
13 #include "ecore_dev_api.h"
14 #include "ecore_iov_api.h"
15 #include "ecore_mcp_api.h"
16 #include "ecore_l2_api.h"
17 #include "../qede_sriov.h"
18 
osal_pf_vf_msg(struct ecore_hwfn * p_hwfn)19 int osal_pf_vf_msg(struct ecore_hwfn *p_hwfn)
20 {
21 	int rc;
22 
23 	rc = qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
24 	if (rc) {
25 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
26 			   "Failed to schedule alarm handler rc=%d\n", rc);
27 	}
28 
29 	return rc;
30 }
31 
osal_vf_flr_update(struct ecore_hwfn * p_hwfn)32 void osal_vf_flr_update(struct ecore_hwfn *p_hwfn)
33 {
34 	qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
35 }
36 
osal_poll_mode_dpc(osal_int_ptr_t hwfn_cookie)37 void osal_poll_mode_dpc(osal_int_ptr_t hwfn_cookie)
38 {
39 	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
40 
41 	if (!p_hwfn)
42 		return;
43 
44 	OSAL_SPIN_LOCK(&p_hwfn->spq_lock);
45 	ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
46 	OSAL_SPIN_UNLOCK(&p_hwfn->spq_lock);
47 }
48 
49 /* Array of memzone pointers */
50 static const struct rte_memzone **ecore_mz_mapping;
51 /* Counter to track current memzone allocated */
52 static uint16_t ecore_mz_count;
53 
54 static RTE_ATOMIC(uint32_t) ref_cnt;
55 
ecore_mz_mapping_alloc(void)56 int ecore_mz_mapping_alloc(void)
57 {
58 	if (rte_atomic_fetch_add_explicit(&ref_cnt, 1, rte_memory_order_relaxed) == 0) {
59 		ecore_mz_mapping = rte_calloc("ecore_mz_map",
60 				rte_memzone_max_get(), sizeof(struct rte_memzone *), 0);
61 	}
62 
63 	if (!ecore_mz_mapping)
64 		return -ENOMEM;
65 
66 	return 0;
67 }
68 
ecore_mz_mapping_free(void)69 void ecore_mz_mapping_free(void)
70 {
71 	if (rte_atomic_fetch_sub_explicit(&ref_cnt, 1, rte_memory_order_relaxed) - 1 == 0) {
72 		rte_free(ecore_mz_mapping);
73 		ecore_mz_mapping = NULL;
74 	}
75 }
76 
qede_log2_align(unsigned long n)77 unsigned long qede_log2_align(unsigned long n)
78 {
79 	unsigned long ret = n ? 1 : 0;
80 	unsigned long _n = n >> 1;
81 
82 	while (_n) {
83 		_n >>= 1;
84 		ret <<= 1;
85 	}
86 
87 	if (ret < n)
88 		ret <<= 1;
89 
90 	return ret;
91 }
92 
qede_osal_log2(u32 val)93 u32 qede_osal_log2(u32 val)
94 {
95 	u32 log = 0;
96 
97 	while (val >>= 1)
98 		log++;
99 
100 	return log;
101 }
102 
qede_ffb(unsigned long word)103 static inline u32 qede_ffb(unsigned long word)
104 {
105 	unsigned long first_bit;
106 
107 	first_bit = __builtin_ffsl(word);
108 	return first_bit ? (first_bit - 1) : OSAL_BITS_PER_UL;
109 }
110 
qede_find_first_bit(unsigned long * addr,u32 limit)111 inline u32 qede_find_first_bit(unsigned long *addr, u32 limit)
112 {
113 	u32 i;
114 	u32 nwords = 0;
115 	OSAL_BUILD_BUG_ON(!limit);
116 	nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
117 	for (i = 0; i < nwords; i++)
118 		if (addr[i] != 0)
119 			break;
120 
121 	return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffb(addr[i]);
122 }
123 
qede_ffz(unsigned long word)124 static inline u32 qede_ffz(unsigned long word)
125 {
126 	unsigned long first_zero;
127 
128 	first_zero = __builtin_ffsl(~word);
129 	return first_zero ? (first_zero - 1) : OSAL_BITS_PER_UL;
130 }
131 
qede_find_first_zero_bit(u32 * addr,u32 limit)132 inline u32 qede_find_first_zero_bit(u32 *addr, u32 limit)
133 {
134 	u32 i;
135 	u32 nwords = 0;
136 	OSAL_BUILD_BUG_ON(!limit);
137 	nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
138 	for (i = 0; i < nwords && ~(addr[i]) == 0; i++);
139 	return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffz(addr[i]);
140 }
141 
qede_vf_fill_driver_data(struct ecore_hwfn * hwfn,__rte_unused struct vf_pf_resc_request * resc_req,struct ecore_vf_acquire_sw_info * vf_sw_info)142 void qede_vf_fill_driver_data(struct ecore_hwfn *hwfn,
143 			      __rte_unused struct vf_pf_resc_request *resc_req,
144 			      struct ecore_vf_acquire_sw_info *vf_sw_info)
145 {
146 	vf_sw_info->os_type = VFPF_ACQUIRE_OS_LINUX_USERSPACE;
147 	vf_sw_info->override_fw_version = 1;
148 }
149 
osal_dma_alloc_coherent(struct ecore_dev * p_dev,dma_addr_t * phys,size_t size)150 void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
151 			      dma_addr_t *phys, size_t size)
152 {
153 	const struct rte_memzone *mz;
154 	char mz_name[RTE_MEMZONE_NAMESIZE];
155 	uint32_t core_id = rte_lcore_id();
156 	unsigned int socket_id;
157 
158 	if (ecore_mz_count >= rte_memzone_max_get()) {
159 		DP_ERR(p_dev, "Memzone allocation count exceeds %zu\n",
160 		       rte_memzone_max_get());
161 		*phys = 0;
162 		return OSAL_NULL;
163 	}
164 
165 	OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
166 	snprintf(mz_name, sizeof(mz_name), "%lx",
167 					(unsigned long)rte_get_timer_cycles());
168 	if (core_id == (unsigned int)LCORE_ID_ANY)
169 		core_id = rte_get_main_lcore();
170 	socket_id = rte_lcore_to_socket_id(core_id);
171 	mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
172 			RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
173 	if (!mz) {
174 		DP_ERR(p_dev, "Unable to allocate DMA memory "
175 		       "of size %zu bytes - %s\n",
176 		       size, rte_strerror(rte_errno));
177 		*phys = 0;
178 		return OSAL_NULL;
179 	}
180 	*phys = mz->iova;
181 	ecore_mz_mapping[ecore_mz_count++] = mz;
182 	DP_VERBOSE(p_dev, ECORE_MSG_SP,
183 		   "Allocated dma memory size=%zu phys=0x%lx"
184 		   " virt=%p core=%d\n",
185 		   mz->len, (unsigned long)mz->iova, mz->addr, core_id);
186 	return mz->addr;
187 }
188 
osal_dma_alloc_coherent_aligned(struct ecore_dev * p_dev,dma_addr_t * phys,size_t size,int align)189 void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
190 				      dma_addr_t *phys, size_t size, int align)
191 {
192 	const struct rte_memzone *mz;
193 	char mz_name[RTE_MEMZONE_NAMESIZE];
194 	uint32_t core_id = rte_lcore_id();
195 	unsigned int socket_id;
196 
197 	if (ecore_mz_count >= rte_memzone_max_get()) {
198 		DP_ERR(p_dev, "Memzone allocation count exceeds %zu\n",
199 		       rte_memzone_max_get());
200 		*phys = 0;
201 		return OSAL_NULL;
202 	}
203 
204 	OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
205 	snprintf(mz_name, sizeof(mz_name), "%lx",
206 					(unsigned long)rte_get_timer_cycles());
207 	if (core_id == (unsigned int)LCORE_ID_ANY)
208 		core_id = rte_get_main_lcore();
209 	socket_id = rte_lcore_to_socket_id(core_id);
210 	mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
211 			RTE_MEMZONE_IOVA_CONTIG, align);
212 	if (!mz) {
213 		DP_ERR(p_dev, "Unable to allocate DMA memory "
214 		       "of size %zu bytes - %s\n",
215 		       size, rte_strerror(rte_errno));
216 		*phys = 0;
217 		return OSAL_NULL;
218 	}
219 	*phys = mz->iova;
220 	ecore_mz_mapping[ecore_mz_count++] = mz;
221 	DP_VERBOSE(p_dev, ECORE_MSG_SP,
222 		   "Allocated aligned dma memory size=%zu phys=0x%lx"
223 		   " virt=%p core=%d\n",
224 		   mz->len, (unsigned long)mz->iova, mz->addr, core_id);
225 	return mz->addr;
226 }
227 
osal_dma_free_mem(struct ecore_dev * p_dev,dma_addr_t phys)228 void osal_dma_free_mem(struct ecore_dev *p_dev, dma_addr_t phys)
229 {
230 	uint16_t j;
231 
232 	for (j = 0 ; j < ecore_mz_count; j++) {
233 		if (phys == ecore_mz_mapping[j]->iova) {
234 			DP_VERBOSE(p_dev, ECORE_MSG_SP,
235 				"Free memzone %s\n", ecore_mz_mapping[j]->name);
236 			rte_memzone_free(ecore_mz_mapping[j]);
237 			while (j < ecore_mz_count - 1) {
238 				ecore_mz_mapping[j] = ecore_mz_mapping[j + 1];
239 				j++;
240 			}
241 			ecore_mz_count--;
242 			return;
243 		}
244 	}
245 
246 	DP_ERR(p_dev, "Unexpected memory free request\n");
247 }
248 
249 #ifdef CONFIG_ECORE_ZIPPED_FW
qede_unzip_data(struct ecore_hwfn * p_hwfn,u32 input_len,u8 * input_buf,u32 max_size,u8 * unzip_buf)250 u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
251 		    u8 *input_buf, u32 max_size, u8 *unzip_buf)
252 {
253 	int rc;
254 
255 	p_hwfn->stream->next_in = input_buf;
256 	p_hwfn->stream->avail_in = input_len;
257 	p_hwfn->stream->next_out = unzip_buf;
258 	p_hwfn->stream->avail_out = max_size;
259 
260 	rc = inflateInit2(p_hwfn->stream, MAX_WBITS);
261 
262 	if (rc != Z_OK) {
263 		DP_ERR(p_hwfn,
264 			   "zlib init failed, rc = %d\n", rc);
265 		return 0;
266 	}
267 
268 	rc = inflate(p_hwfn->stream, Z_FINISH);
269 	inflateEnd(p_hwfn->stream);
270 
271 	if (rc != Z_OK && rc != Z_STREAM_END) {
272 		DP_ERR(p_hwfn,
273 			   "FW unzip error: %s, rc=%d\n", p_hwfn->stream->msg,
274 			   rc);
275 		return 0;
276 	}
277 
278 	return p_hwfn->stream->total_out / 4;
279 }
280 #endif
281 
282 void
qede_get_mcp_proto_stats(struct ecore_dev * edev,enum ecore_mcp_protocol_type type,union ecore_mcp_protocol_stats * stats)283 qede_get_mcp_proto_stats(struct ecore_dev *edev,
284 			 enum ecore_mcp_protocol_type type,
285 			 union ecore_mcp_protocol_stats *stats)
286 {
287 	struct ecore_eth_stats lan_stats;
288 
289 	if (type == ECORE_MCP_LAN_STATS) {
290 		ecore_get_vport_stats(edev, &lan_stats);
291 
292 		/* @DPDK */
293 		stats->lan_stats.ucast_rx_pkts = lan_stats.common.rx_ucast_pkts;
294 		stats->lan_stats.ucast_tx_pkts = lan_stats.common.tx_ucast_pkts;
295 
296 		stats->lan_stats.fcs_err = -1;
297 	} else {
298 		DP_INFO(edev, "Statistics request type %d not supported\n",
299 		       type);
300 	}
301 }
302 
qede_hw_err_handler(void * dev,enum ecore_hw_err_type err_type)303 static void qede_hw_err_handler(void *dev, enum ecore_hw_err_type err_type)
304 {
305 	struct ecore_dev *edev = dev;
306 
307 	switch (err_type) {
308 	case ECORE_HW_ERR_FAN_FAIL:
309 		break;
310 
311 	case ECORE_HW_ERR_MFW_RESP_FAIL:
312 	case ECORE_HW_ERR_HW_ATTN:
313 	case ECORE_HW_ERR_DMAE_FAIL:
314 	case ECORE_HW_ERR_RAMROD_FAIL:
315 	case ECORE_HW_ERR_FW_ASSERT:
316 		OSAL_SAVE_FW_DUMP(0); /* Using port 0 as default port_id */
317 		break;
318 
319 	default:
320 		DP_NOTICE(edev, false, "Unknown HW error [%d]\n", err_type);
321 		return;
322 	}
323 }
324 
325 void
qede_hw_err_notify(struct ecore_hwfn * p_hwfn,enum ecore_hw_err_type err_type)326 qede_hw_err_notify(struct ecore_hwfn *p_hwfn, enum ecore_hw_err_type err_type)
327 {
328 	char err_str[64];
329 
330 	switch (err_type) {
331 	case ECORE_HW_ERR_FAN_FAIL:
332 		strcpy(err_str, "Fan Failure");
333 		break;
334 	case ECORE_HW_ERR_MFW_RESP_FAIL:
335 		strcpy(err_str, "MFW Response Failure");
336 		break;
337 	case ECORE_HW_ERR_HW_ATTN:
338 		strcpy(err_str, "HW Attention");
339 		break;
340 	case ECORE_HW_ERR_DMAE_FAIL:
341 		strcpy(err_str, "DMAE Failure");
342 		break;
343 	case ECORE_HW_ERR_RAMROD_FAIL:
344 		strcpy(err_str, "Ramrod Failure");
345 		break;
346 	case ECORE_HW_ERR_FW_ASSERT:
347 		strcpy(err_str, "FW Assertion");
348 		break;
349 	default:
350 		strcpy(err_str, "Unknown");
351 	}
352 
353 	DP_ERR(p_hwfn, "HW error occurred [%s]\n", err_str);
354 
355 	qede_hw_err_handler(p_hwfn->p_dev, err_type);
356 
357 	ecore_int_attn_clr_enable(p_hwfn->p_dev, true);
358 }
359 
qede_crc32(u32 crc,u8 * ptr,u32 length)360 u32 qede_crc32(u32 crc, u8 *ptr, u32 length)
361 {
362 	int i;
363 
364 	while (length--) {
365 		crc ^= *ptr++;
366 		for (i = 0; i < 8; i++)
367 			crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
368 	}
369 	return crc;
370 }
371 
qed_set_platform_str(struct ecore_hwfn * p_hwfn,char * buf_str,u32 buf_size)372 void qed_set_platform_str(struct ecore_hwfn *p_hwfn,
373 			  char *buf_str, u32 buf_size)
374 {
375 	snprintf(buf_str, buf_size, "%s.", rte_version());
376 }
377