xref: /dpdk/drivers/net/qede/base/bcm_osal.c (revision d80e42cce4c7017ed8c99dabb8ae444a492acc1c)
1 /*
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8 
9 #include <rte_memzone.h>
10 #include <rte_errno.h>
11 
12 #include "bcm_osal.h"
13 #include "ecore.h"
14 #include "ecore_hw.h"
15 #include "ecore_iov_api.h"
16 #include "ecore_mcp_api.h"
17 #include "ecore_l2_api.h"
18 
19 /* Array of memzone pointers */
20 static const struct rte_memzone *ecore_mz_mapping[RTE_MAX_MEMZONE];
21 /* Counter to track current memzone allocated */
22 uint16_t ecore_mz_count;
23 
24 unsigned long qede_log2_align(unsigned long n)
25 {
26 	unsigned long ret = n ? 1 : 0;
27 	unsigned long _n = n >> 1;
28 
29 	while (_n) {
30 		_n >>= 1;
31 		ret <<= 1;
32 	}
33 
34 	if (ret < n)
35 		ret <<= 1;
36 
37 	return ret;
38 }
39 
40 u32 qede_osal_log2(u32 val)
41 {
42 	u32 log = 0;
43 
44 	while (val >>= 1)
45 		log++;
46 
47 	return log;
48 }
49 
50 inline void qede_set_bit(u32 nr, unsigned long *addr)
51 {
52 	__sync_fetch_and_or(addr, (1UL << nr));
53 }
54 
55 inline void qede_clr_bit(u32 nr, unsigned long *addr)
56 {
57 	__sync_fetch_and_and(addr, ~(1UL << nr));
58 }
59 
60 inline bool qede_test_bit(u32 nr, unsigned long *addr)
61 {
62 	bool res;
63 
64 	rte_mb();
65 	res = ((*addr) & (1UL << nr)) != 0;
66 	rte_mb();
67 	return res;
68 }
69 
70 static inline u32 qede_ffb(unsigned long word)
71 {
72 	unsigned long first_bit;
73 
74 	first_bit = __builtin_ffsl(word);
75 	return first_bit ? (first_bit - 1) : OSAL_BITS_PER_UL;
76 }
77 
78 inline u32 qede_find_first_bit(unsigned long *addr, u32 limit)
79 {
80 	u32 i;
81 	u32 nwords = 0;
82 	OSAL_BUILD_BUG_ON(!limit);
83 	nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
84 	for (i = 0; i < nwords; i++)
85 		if (addr[i] != 0)
86 			break;
87 
88 	return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffb(addr[i]);
89 }
90 
91 static inline u32 qede_ffz(unsigned long word)
92 {
93 	unsigned long first_zero;
94 
95 	first_zero = __builtin_ffsl(~word);
96 	return first_zero ? (first_zero - 1) : OSAL_BITS_PER_UL;
97 }
98 
99 inline u32 qede_find_first_zero_bit(unsigned long *addr, u32 limit)
100 {
101 	u32 i;
102 	u32 nwords = 0;
103 	OSAL_BUILD_BUG_ON(!limit);
104 	nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
105 	for (i = 0; i < nwords && ~(addr[i]) == 0; i++);
106 	return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffz(addr[i]);
107 }
108 
109 void qede_vf_fill_driver_data(struct ecore_hwfn *hwfn,
110 			      __rte_unused struct vf_pf_resc_request *resc_req,
111 			      struct ecore_vf_acquire_sw_info *vf_sw_info)
112 {
113 	vf_sw_info->os_type = VFPF_ACQUIRE_OS_LINUX_USERSPACE;
114 	vf_sw_info->override_fw_version = 1;
115 }
116 
117 void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
118 			      dma_addr_t *phys, size_t size)
119 {
120 	const struct rte_memzone *mz;
121 	char mz_name[RTE_MEMZONE_NAMESIZE];
122 	uint32_t core_id = rte_lcore_id();
123 	unsigned int socket_id;
124 
125 	if (ecore_mz_count >= RTE_MAX_MEMZONE) {
126 		DP_ERR(p_dev, "Memzone allocation count exceeds %u\n",
127 		       RTE_MAX_MEMZONE);
128 		*phys = 0;
129 		return OSAL_NULL;
130 	}
131 
132 	OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
133 	snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
134 					(unsigned long)rte_get_timer_cycles());
135 	if (core_id == (unsigned int)LCORE_ID_ANY)
136 		core_id = rte_get_master_lcore();
137 	socket_id = rte_lcore_to_socket_id(core_id);
138 	mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
139 			RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
140 	if (!mz) {
141 		DP_ERR(p_dev, "Unable to allocate DMA memory "
142 		       "of size %zu bytes - %s\n",
143 		       size, rte_strerror(rte_errno));
144 		*phys = 0;
145 		return OSAL_NULL;
146 	}
147 	*phys = mz->iova;
148 	ecore_mz_mapping[ecore_mz_count++] = mz;
149 	DP_VERBOSE(p_dev, ECORE_MSG_SP,
150 		   "Allocated dma memory size=%zu phys=0x%lx"
151 		   " virt=%p core=%d\n",
152 		   mz->len, (unsigned long)mz->iova, mz->addr, core_id);
153 	return mz->addr;
154 }
155 
156 void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
157 				      dma_addr_t *phys, size_t size, int align)
158 {
159 	const struct rte_memzone *mz;
160 	char mz_name[RTE_MEMZONE_NAMESIZE];
161 	uint32_t core_id = rte_lcore_id();
162 	unsigned int socket_id;
163 
164 	if (ecore_mz_count >= RTE_MAX_MEMZONE) {
165 		DP_ERR(p_dev, "Memzone allocation count exceeds %u\n",
166 		       RTE_MAX_MEMZONE);
167 		*phys = 0;
168 		return OSAL_NULL;
169 	}
170 
171 	OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
172 	snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
173 					(unsigned long)rte_get_timer_cycles());
174 	if (core_id == (unsigned int)LCORE_ID_ANY)
175 		core_id = rte_get_master_lcore();
176 	socket_id = rte_lcore_to_socket_id(core_id);
177 	mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
178 			RTE_MEMZONE_IOVA_CONTIG, align);
179 	if (!mz) {
180 		DP_ERR(p_dev, "Unable to allocate DMA memory "
181 		       "of size %zu bytes - %s\n",
182 		       size, rte_strerror(rte_errno));
183 		*phys = 0;
184 		return OSAL_NULL;
185 	}
186 	*phys = mz->iova;
187 	ecore_mz_mapping[ecore_mz_count++] = mz;
188 	DP_VERBOSE(p_dev, ECORE_MSG_SP,
189 		   "Allocated aligned dma memory size=%zu phys=0x%lx"
190 		   " virt=%p core=%d\n",
191 		   mz->len, (unsigned long)mz->iova, mz->addr, core_id);
192 	return mz->addr;
193 }
194 
195 void osal_dma_free_mem(struct ecore_dev *p_dev, dma_addr_t phys)
196 {
197 	uint16_t j;
198 
199 	for (j = 0 ; j < ecore_mz_count; j++) {
200 		if (phys == ecore_mz_mapping[j]->iova) {
201 			DP_VERBOSE(p_dev, ECORE_MSG_SP,
202 				"Free memzone %s\n", ecore_mz_mapping[j]->name);
203 			rte_memzone_free(ecore_mz_mapping[j]);
204 			return;
205 		}
206 	}
207 
208 	DP_ERR(p_dev, "Unexpected memory free request\n");
209 }
210 
211 #ifdef CONFIG_ECORE_ZIPPED_FW
212 u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
213 		    u8 *input_buf, u32 max_size, u8 *unzip_buf)
214 {
215 	int rc;
216 
217 	p_hwfn->stream->next_in = input_buf;
218 	p_hwfn->stream->avail_in = input_len;
219 	p_hwfn->stream->next_out = unzip_buf;
220 	p_hwfn->stream->avail_out = max_size;
221 
222 	rc = inflateInit2(p_hwfn->stream, MAX_WBITS);
223 
224 	if (rc != Z_OK) {
225 		DP_ERR(p_hwfn,
226 			   "zlib init failed, rc = %d\n", rc);
227 		return 0;
228 	}
229 
230 	rc = inflate(p_hwfn->stream, Z_FINISH);
231 	inflateEnd(p_hwfn->stream);
232 
233 	if (rc != Z_OK && rc != Z_STREAM_END) {
234 		DP_ERR(p_hwfn,
235 			   "FW unzip error: %s, rc=%d\n", p_hwfn->stream->msg,
236 			   rc);
237 		return 0;
238 	}
239 
240 	return p_hwfn->stream->total_out / 4;
241 }
242 #endif
243 
244 void
245 qede_get_mcp_proto_stats(struct ecore_dev *edev,
246 			 enum ecore_mcp_protocol_type type,
247 			 union ecore_mcp_protocol_stats *stats)
248 {
249 	struct ecore_eth_stats lan_stats;
250 
251 	if (type == ECORE_MCP_LAN_STATS) {
252 		ecore_get_vport_stats(edev, &lan_stats);
253 
254 		/* @DPDK */
255 		stats->lan_stats.ucast_rx_pkts = lan_stats.common.rx_ucast_pkts;
256 		stats->lan_stats.ucast_tx_pkts = lan_stats.common.tx_ucast_pkts;
257 
258 		stats->lan_stats.fcs_err = -1;
259 	} else {
260 		DP_INFO(edev, "Statistics request type %d not supported\n",
261 		       type);
262 	}
263 }
264 
265 void
266 qede_hw_err_notify(struct ecore_hwfn *p_hwfn, enum ecore_hw_err_type err_type)
267 {
268 	char err_str[64];
269 
270 	switch (err_type) {
271 	case ECORE_HW_ERR_FAN_FAIL:
272 		strcpy(err_str, "Fan Failure");
273 		break;
274 	case ECORE_HW_ERR_MFW_RESP_FAIL:
275 		strcpy(err_str, "MFW Response Failure");
276 		break;
277 	case ECORE_HW_ERR_HW_ATTN:
278 		strcpy(err_str, "HW Attention");
279 		break;
280 	case ECORE_HW_ERR_DMAE_FAIL:
281 		strcpy(err_str, "DMAE Failure");
282 		break;
283 	case ECORE_HW_ERR_RAMROD_FAIL:
284 		strcpy(err_str, "Ramrod Failure");
285 		break;
286 	case ECORE_HW_ERR_FW_ASSERT:
287 		strcpy(err_str, "FW Assertion");
288 		break;
289 	default:
290 		strcpy(err_str, "Unknown");
291 	}
292 
293 	DP_ERR(p_hwfn, "HW error occurred [%s]\n", err_str);
294 	ecore_int_attn_clr_enable(p_hwfn->p_dev, true);
295 }
296 
297 u32 qede_crc32(u32 crc, u8 *ptr, u32 length)
298 {
299 	int i;
300 
301 	while (length--) {
302 		crc ^= *ptr++;
303 		for (i = 0; i < 8; i++)
304 			crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
305 	}
306 	return crc;
307 }
308