xref: /dpdk/drivers/net/qede/base/bcm_osal.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6 
7 #include <rte_memzone.h>
8 #include <rte_errno.h>
9 
10 #include "bcm_osal.h"
11 #include "ecore.h"
12 #include "ecore_hw.h"
13 #include "ecore_dev_api.h"
14 #include "ecore_iov_api.h"
15 #include "ecore_mcp_api.h"
16 #include "ecore_l2_api.h"
17 #include "../qede_sriov.h"
18 
19 int osal_pf_vf_msg(struct ecore_hwfn *p_hwfn)
20 {
21 	int rc;
22 
23 	rc = qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
24 	if (rc) {
25 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
26 			   "Failed to schedule alarm handler rc=%d\n", rc);
27 	}
28 
29 	return rc;
30 }
31 
32 void osal_vf_flr_update(struct ecore_hwfn *p_hwfn)
33 {
34 	qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
35 }
36 
37 void osal_poll_mode_dpc(osal_int_ptr_t hwfn_cookie)
38 {
39 	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
40 
41 	if (!p_hwfn)
42 		return;
43 
44 	OSAL_SPIN_LOCK(&p_hwfn->spq_lock);
45 	ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
46 	OSAL_SPIN_UNLOCK(&p_hwfn->spq_lock);
47 }
48 
49 /* Array of memzone pointers */
50 static const struct rte_memzone *ecore_mz_mapping[RTE_MAX_MEMZONE];
51 /* Counter to track current memzone allocated */
52 static uint16_t ecore_mz_count;
53 
54 unsigned long qede_log2_align(unsigned long n)
55 {
56 	unsigned long ret = n ? 1 : 0;
57 	unsigned long _n = n >> 1;
58 
59 	while (_n) {
60 		_n >>= 1;
61 		ret <<= 1;
62 	}
63 
64 	if (ret < n)
65 		ret <<= 1;
66 
67 	return ret;
68 }
69 
70 u32 qede_osal_log2(u32 val)
71 {
72 	u32 log = 0;
73 
74 	while (val >>= 1)
75 		log++;
76 
77 	return log;
78 }
79 
80 static inline u32 qede_ffb(unsigned long word)
81 {
82 	unsigned long first_bit;
83 
84 	first_bit = __builtin_ffsl(word);
85 	return first_bit ? (first_bit - 1) : OSAL_BITS_PER_UL;
86 }
87 
88 inline u32 qede_find_first_bit(unsigned long *addr, u32 limit)
89 {
90 	u32 i;
91 	u32 nwords = 0;
92 	OSAL_BUILD_BUG_ON(!limit);
93 	nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
94 	for (i = 0; i < nwords; i++)
95 		if (addr[i] != 0)
96 			break;
97 
98 	return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffb(addr[i]);
99 }
100 
101 static inline u32 qede_ffz(unsigned long word)
102 {
103 	unsigned long first_zero;
104 
105 	first_zero = __builtin_ffsl(~word);
106 	return first_zero ? (first_zero - 1) : OSAL_BITS_PER_UL;
107 }
108 
109 inline u32 qede_find_first_zero_bit(u32 *addr, u32 limit)
110 {
111 	u32 i;
112 	u32 nwords = 0;
113 	OSAL_BUILD_BUG_ON(!limit);
114 	nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
115 	for (i = 0; i < nwords && ~(addr[i]) == 0; i++);
116 	return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffz(addr[i]);
117 }
118 
119 void qede_vf_fill_driver_data(struct ecore_hwfn *hwfn,
120 			      __rte_unused struct vf_pf_resc_request *resc_req,
121 			      struct ecore_vf_acquire_sw_info *vf_sw_info)
122 {
123 	vf_sw_info->os_type = VFPF_ACQUIRE_OS_LINUX_USERSPACE;
124 	vf_sw_info->override_fw_version = 1;
125 }
126 
127 void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
128 			      dma_addr_t *phys, size_t size)
129 {
130 	const struct rte_memzone *mz;
131 	char mz_name[RTE_MEMZONE_NAMESIZE];
132 	uint32_t core_id = rte_lcore_id();
133 	unsigned int socket_id;
134 
135 	if (ecore_mz_count >= RTE_MAX_MEMZONE) {
136 		DP_ERR(p_dev, "Memzone allocation count exceeds %u\n",
137 		       RTE_MAX_MEMZONE);
138 		*phys = 0;
139 		return OSAL_NULL;
140 	}
141 
142 	OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
143 	snprintf(mz_name, sizeof(mz_name), "%lx",
144 					(unsigned long)rte_get_timer_cycles());
145 	if (core_id == (unsigned int)LCORE_ID_ANY)
146 		core_id = rte_get_main_lcore();
147 	socket_id = rte_lcore_to_socket_id(core_id);
148 	mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
149 			RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
150 	if (!mz) {
151 		DP_ERR(p_dev, "Unable to allocate DMA memory "
152 		       "of size %zu bytes - %s\n",
153 		       size, rte_strerror(rte_errno));
154 		*phys = 0;
155 		return OSAL_NULL;
156 	}
157 	*phys = mz->iova;
158 	ecore_mz_mapping[ecore_mz_count++] = mz;
159 	DP_VERBOSE(p_dev, ECORE_MSG_SP,
160 		   "Allocated dma memory size=%zu phys=0x%lx"
161 		   " virt=%p core=%d\n",
162 		   mz->len, (unsigned long)mz->iova, mz->addr, core_id);
163 	return mz->addr;
164 }
165 
166 void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
167 				      dma_addr_t *phys, size_t size, int align)
168 {
169 	const struct rte_memzone *mz;
170 	char mz_name[RTE_MEMZONE_NAMESIZE];
171 	uint32_t core_id = rte_lcore_id();
172 	unsigned int socket_id;
173 
174 	if (ecore_mz_count >= RTE_MAX_MEMZONE) {
175 		DP_ERR(p_dev, "Memzone allocation count exceeds %u\n",
176 		       RTE_MAX_MEMZONE);
177 		*phys = 0;
178 		return OSAL_NULL;
179 	}
180 
181 	OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
182 	snprintf(mz_name, sizeof(mz_name), "%lx",
183 					(unsigned long)rte_get_timer_cycles());
184 	if (core_id == (unsigned int)LCORE_ID_ANY)
185 		core_id = rte_get_main_lcore();
186 	socket_id = rte_lcore_to_socket_id(core_id);
187 	mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
188 			RTE_MEMZONE_IOVA_CONTIG, align);
189 	if (!mz) {
190 		DP_ERR(p_dev, "Unable to allocate DMA memory "
191 		       "of size %zu bytes - %s\n",
192 		       size, rte_strerror(rte_errno));
193 		*phys = 0;
194 		return OSAL_NULL;
195 	}
196 	*phys = mz->iova;
197 	ecore_mz_mapping[ecore_mz_count++] = mz;
198 	DP_VERBOSE(p_dev, ECORE_MSG_SP,
199 		   "Allocated aligned dma memory size=%zu phys=0x%lx"
200 		   " virt=%p core=%d\n",
201 		   mz->len, (unsigned long)mz->iova, mz->addr, core_id);
202 	return mz->addr;
203 }
204 
205 void osal_dma_free_mem(struct ecore_dev *p_dev, dma_addr_t phys)
206 {
207 	uint16_t j;
208 
209 	for (j = 0 ; j < ecore_mz_count; j++) {
210 		if (phys == ecore_mz_mapping[j]->iova) {
211 			DP_VERBOSE(p_dev, ECORE_MSG_SP,
212 				"Free memzone %s\n", ecore_mz_mapping[j]->name);
213 			rte_memzone_free(ecore_mz_mapping[j]);
214 			while (j < ecore_mz_count - 1) {
215 				ecore_mz_mapping[j] = ecore_mz_mapping[j + 1];
216 				j++;
217 			}
218 			ecore_mz_count--;
219 			return;
220 		}
221 	}
222 
223 	DP_ERR(p_dev, "Unexpected memory free request\n");
224 }
225 
226 #ifdef CONFIG_ECORE_ZIPPED_FW
227 u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
228 		    u8 *input_buf, u32 max_size, u8 *unzip_buf)
229 {
230 	int rc;
231 
232 	p_hwfn->stream->next_in = input_buf;
233 	p_hwfn->stream->avail_in = input_len;
234 	p_hwfn->stream->next_out = unzip_buf;
235 	p_hwfn->stream->avail_out = max_size;
236 
237 	rc = inflateInit2(p_hwfn->stream, MAX_WBITS);
238 
239 	if (rc != Z_OK) {
240 		DP_ERR(p_hwfn,
241 			   "zlib init failed, rc = %d\n", rc);
242 		return 0;
243 	}
244 
245 	rc = inflate(p_hwfn->stream, Z_FINISH);
246 	inflateEnd(p_hwfn->stream);
247 
248 	if (rc != Z_OK && rc != Z_STREAM_END) {
249 		DP_ERR(p_hwfn,
250 			   "FW unzip error: %s, rc=%d\n", p_hwfn->stream->msg,
251 			   rc);
252 		return 0;
253 	}
254 
255 	return p_hwfn->stream->total_out / 4;
256 }
257 #endif
258 
259 void
260 qede_get_mcp_proto_stats(struct ecore_dev *edev,
261 			 enum ecore_mcp_protocol_type type,
262 			 union ecore_mcp_protocol_stats *stats)
263 {
264 	struct ecore_eth_stats lan_stats;
265 
266 	if (type == ECORE_MCP_LAN_STATS) {
267 		ecore_get_vport_stats(edev, &lan_stats);
268 
269 		/* @DPDK */
270 		stats->lan_stats.ucast_rx_pkts = lan_stats.common.rx_ucast_pkts;
271 		stats->lan_stats.ucast_tx_pkts = lan_stats.common.tx_ucast_pkts;
272 
273 		stats->lan_stats.fcs_err = -1;
274 	} else {
275 		DP_INFO(edev, "Statistics request type %d not supported\n",
276 		       type);
277 	}
278 }
279 
280 static void qede_hw_err_handler(void *dev, enum ecore_hw_err_type err_type)
281 {
282 	struct ecore_dev *edev = dev;
283 
284 	switch (err_type) {
285 	case ECORE_HW_ERR_FAN_FAIL:
286 		break;
287 
288 	case ECORE_HW_ERR_MFW_RESP_FAIL:
289 	case ECORE_HW_ERR_HW_ATTN:
290 	case ECORE_HW_ERR_DMAE_FAIL:
291 	case ECORE_HW_ERR_RAMROD_FAIL:
292 	case ECORE_HW_ERR_FW_ASSERT:
293 		OSAL_SAVE_FW_DUMP(0); /* Using port 0 as default port_id */
294 		break;
295 
296 	default:
297 		DP_NOTICE(edev, false, "Unknown HW error [%d]\n", err_type);
298 		return;
299 	}
300 }
301 
302 void
303 qede_hw_err_notify(struct ecore_hwfn *p_hwfn, enum ecore_hw_err_type err_type)
304 {
305 	char err_str[64];
306 
307 	switch (err_type) {
308 	case ECORE_HW_ERR_FAN_FAIL:
309 		strcpy(err_str, "Fan Failure");
310 		break;
311 	case ECORE_HW_ERR_MFW_RESP_FAIL:
312 		strcpy(err_str, "MFW Response Failure");
313 		break;
314 	case ECORE_HW_ERR_HW_ATTN:
315 		strcpy(err_str, "HW Attention");
316 		break;
317 	case ECORE_HW_ERR_DMAE_FAIL:
318 		strcpy(err_str, "DMAE Failure");
319 		break;
320 	case ECORE_HW_ERR_RAMROD_FAIL:
321 		strcpy(err_str, "Ramrod Failure");
322 		break;
323 	case ECORE_HW_ERR_FW_ASSERT:
324 		strcpy(err_str, "FW Assertion");
325 		break;
326 	default:
327 		strcpy(err_str, "Unknown");
328 	}
329 
330 	DP_ERR(p_hwfn, "HW error occurred [%s]\n", err_str);
331 
332 	qede_hw_err_handler(p_hwfn->p_dev, err_type);
333 
334 	ecore_int_attn_clr_enable(p_hwfn->p_dev, true);
335 }
336 
337 u32 qede_crc32(u32 crc, u8 *ptr, u32 length)
338 {
339 	int i;
340 
341 	while (length--) {
342 		crc ^= *ptr++;
343 		for (i = 0; i < 8; i++)
344 			crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
345 	}
346 	return crc;
347 }
348 
349 void qed_set_platform_str(struct ecore_hwfn *p_hwfn,
350 			  char *buf_str, u32 buf_size)
351 {
352 	snprintf(buf_str, buf_size, "%s.", rte_version());
353 }
354