xref: /dpdk/drivers/common/sfc_efx/base/ef10_nic.c (revision bf10e750ac82949e140c5834c6cbb99f1dd9b37a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2012-2019 Solarflare Communications Inc.
5  */
6 
7 #include "efx.h"
8 #include "efx_impl.h"
9 #if EFSYS_OPT_MON_MCDI
10 #include "mcdi_mon.h"
11 #endif
12 
13 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
14 
15 #include "ef10_tlv_layout.h"
16 
17 	__checkReturn	efx_rc_t
efx_mcdi_get_port_assignment(__in efx_nic_t * enp,__out uint32_t * portp)18 efx_mcdi_get_port_assignment(
19 	__in		efx_nic_t *enp,
20 	__out		uint32_t *portp)
21 {
22 	efx_mcdi_req_t req;
23 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
24 		MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
25 	efx_rc_t rc;
26 
27 	EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp));
28 
29 	req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
30 	req.emr_in_buf = payload;
31 	req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN;
32 	req.emr_out_buf = payload;
33 	req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN;
34 
35 	efx_mcdi_execute(enp, &req);
36 
37 	if (req.emr_rc != 0) {
38 		rc = req.emr_rc;
39 		goto fail1;
40 	}
41 
42 	if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) {
43 		rc = EMSGSIZE;
44 		goto fail2;
45 	}
46 
47 	*portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT);
48 
49 	return (0);
50 
51 fail2:
52 	EFSYS_PROBE(fail2);
53 fail1:
54 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
55 
56 	return (rc);
57 }
58 
59 	__checkReturn	efx_rc_t
efx_mcdi_get_port_modes(__in efx_nic_t * enp,__out uint32_t * modesp,__out_opt uint32_t * current_modep,__out_opt uint32_t * default_modep)60 efx_mcdi_get_port_modes(
61 	__in		efx_nic_t *enp,
62 	__out		uint32_t *modesp,
63 	__out_opt	uint32_t *current_modep,
64 	__out_opt	uint32_t *default_modep)
65 {
66 	efx_mcdi_req_t req;
67 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_MODES_IN_LEN,
68 		MC_CMD_GET_PORT_MODES_OUT_LEN);
69 	efx_rc_t rc;
70 
71 	EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp));
72 
73 	req.emr_cmd = MC_CMD_GET_PORT_MODES;
74 	req.emr_in_buf = payload;
75 	req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN;
76 	req.emr_out_buf = payload;
77 	req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN;
78 
79 	efx_mcdi_execute(enp, &req);
80 
81 	if (req.emr_rc != 0) {
82 		rc = req.emr_rc;
83 		goto fail1;
84 	}
85 
86 	/*
87 	 * Require only Modes and DefaultMode fields, unless the current mode
88 	 * was requested (CurrentMode field was added for Medford).
89 	 */
90 	if (req.emr_out_length_used <
91 	    MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) {
92 		rc = EMSGSIZE;
93 		goto fail2;
94 	}
95 	if ((current_modep != NULL) && (req.emr_out_length_used <
96 	    MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST + 4)) {
97 		rc = EMSGSIZE;
98 		goto fail3;
99 	}
100 
101 	*modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES);
102 
103 	if (current_modep != NULL) {
104 		*current_modep = MCDI_OUT_DWORD(req,
105 					    GET_PORT_MODES_OUT_CURRENT_MODE);
106 	}
107 
108 	if (default_modep != NULL) {
109 		*default_modep = MCDI_OUT_DWORD(req,
110 					    GET_PORT_MODES_OUT_DEFAULT_MODE);
111 	}
112 
113 	return (0);
114 
115 fail3:
116 	EFSYS_PROBE(fail3);
117 fail2:
118 	EFSYS_PROBE(fail2);
119 fail1:
120 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
121 
122 	return (rc);
123 }
124 
125 	__checkReturn	efx_rc_t
ef10_nic_get_port_mode_bandwidth(__in efx_nic_t * enp,__out uint32_t * bandwidth_mbpsp)126 ef10_nic_get_port_mode_bandwidth(
127 	__in		efx_nic_t *enp,
128 	__out		uint32_t *bandwidth_mbpsp)
129 {
130 	uint32_t port_modes;
131 	uint32_t current_mode;
132 	efx_port_t *epp = &(enp->en_port);
133 
134 	uint32_t single_lane;
135 	uint32_t dual_lane;
136 	uint32_t quad_lane;
137 	uint32_t bandwidth;
138 	efx_rc_t rc;
139 
140 	if ((rc = efx_mcdi_get_port_modes(enp, &port_modes,
141 				    &current_mode, NULL)) != 0) {
142 		/* No port mode info available. */
143 		goto fail1;
144 	}
145 
146 	if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_25000FDX))
147 		single_lane = 25000;
148 	else
149 		single_lane = 10000;
150 
151 	if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_50000FDX))
152 		dual_lane = 50000;
153 	else
154 		dual_lane = 20000;
155 
156 	if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_100000FDX))
157 		quad_lane = 100000;
158 	else
159 		quad_lane = 40000;
160 
161 	switch (current_mode) {
162 	case TLV_PORT_MODE_1x1_NA:			/* mode 0 */
163 		bandwidth = single_lane;
164 		break;
165 	case TLV_PORT_MODE_1x2_NA:			/* mode 10 */
166 	case TLV_PORT_MODE_NA_1x2:			/* mode 11 */
167 		bandwidth = dual_lane;
168 		break;
169 	case TLV_PORT_MODE_1x1_1x1:			/* mode 2 */
170 		bandwidth = single_lane + single_lane;
171 		break;
172 	case TLV_PORT_MODE_4x1_NA:			/* mode 4 */
173 	case TLV_PORT_MODE_NA_4x1:			/* mode 8 */
174 		bandwidth = 4 * single_lane;
175 		break;
176 	case TLV_PORT_MODE_2x1_2x1:			/* mode 5 */
177 		bandwidth = (2 * single_lane) + (2 * single_lane);
178 		break;
179 	case TLV_PORT_MODE_1x2_1x2:			/* mode 12 */
180 		bandwidth = dual_lane + dual_lane;
181 		break;
182 	case TLV_PORT_MODE_1x2_2x1:			/* mode 17 */
183 	case TLV_PORT_MODE_2x1_1x2:			/* mode 18 */
184 		bandwidth = dual_lane + (2 * single_lane);
185 		break;
186 	/* Legacy Medford-only mode. Do not use (see bug63270) */
187 	case TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2:	/* mode 9 */
188 		bandwidth = 4 * single_lane;
189 		break;
190 	case TLV_PORT_MODE_1x4_NA:			/* mode 1 */
191 	case TLV_PORT_MODE_NA_1x4:			/* mode 22 */
192 		bandwidth = quad_lane;
193 		break;
194 	case TLV_PORT_MODE_2x2_NA:			/* mode 13 */
195 	case TLV_PORT_MODE_NA_2x2:			/* mode 14 */
196 		bandwidth = 2 * dual_lane;
197 		break;
198 	case TLV_PORT_MODE_1x4_2x1:			/* mode 6 */
199 	case TLV_PORT_MODE_2x1_1x4:			/* mode 7 */
200 		bandwidth = quad_lane + (2 * single_lane);
201 		break;
202 	case TLV_PORT_MODE_1x4_1x2:			/* mode 15 */
203 	case TLV_PORT_MODE_1x2_1x4:			/* mode 16 */
204 		bandwidth = quad_lane + dual_lane;
205 		break;
206 	case TLV_PORT_MODE_1x4_1x4:			/* mode 3 */
207 		bandwidth = quad_lane + quad_lane;
208 		break;
209 	default:
210 		rc = EINVAL;
211 		goto fail2;
212 	}
213 
214 	*bandwidth_mbpsp = bandwidth;
215 
216 	return (0);
217 
218 fail2:
219 	EFSYS_PROBE(fail2);
220 fail1:
221 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
222 
223 	return (rc);
224 }
225 
226 #endif	/* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
227 
228 #if EFX_OPTS_EF10()
229 
230 	__checkReturn		efx_rc_t
efx_mcdi_vadaptor_alloc(__in efx_nic_t * enp,__in uint32_t port_id)231 efx_mcdi_vadaptor_alloc(
232 	__in			efx_nic_t *enp,
233 	__in			uint32_t port_id)
234 {
235 	efx_mcdi_req_t req;
236 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_ALLOC_IN_LEN,
237 		MC_CMD_VADAPTOR_ALLOC_OUT_LEN);
238 	efx_rc_t rc;
239 
240 	req.emr_cmd = MC_CMD_VADAPTOR_ALLOC;
241 	req.emr_in_buf = payload;
242 	req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN;
243 	req.emr_out_buf = payload;
244 	req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN;
245 
246 	MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
247 	MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS,
248 	    VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED,
249 	    enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0);
250 
251 	efx_mcdi_execute(enp, &req);
252 
253 	if (req.emr_rc != 0) {
254 		rc = req.emr_rc;
255 		goto fail1;
256 	}
257 
258 	return (0);
259 
260 fail1:
261 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
262 
263 	return (rc);
264 }
265 
266 	__checkReturn		efx_rc_t
efx_mcdi_vadaptor_free(__in efx_nic_t * enp,__in uint32_t port_id)267 efx_mcdi_vadaptor_free(
268 	__in			efx_nic_t *enp,
269 	__in			uint32_t port_id)
270 {
271 	efx_mcdi_req_t req;
272 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_FREE_IN_LEN,
273 		MC_CMD_VADAPTOR_FREE_OUT_LEN);
274 	efx_rc_t rc;
275 
276 	req.emr_cmd = MC_CMD_VADAPTOR_FREE;
277 	req.emr_in_buf = payload;
278 	req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN;
279 	req.emr_out_buf = payload;
280 	req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN;
281 
282 	MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
283 
284 	efx_mcdi_execute(enp, &req);
285 
286 	if (req.emr_rc != 0) {
287 		rc = req.emr_rc;
288 		goto fail1;
289 	}
290 
291 	return (0);
292 
293 fail1:
294 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
295 
296 	return (rc);
297 }
298 
299 #endif	/* EFX_OPTS_EF10() */
300 
301 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
302 
303 	__checkReturn	efx_rc_t
304 efx_mcdi_get_mac_address_pf(
305 	__in			efx_nic_t *enp,
306 	__out_ecount_opt(6)	uint8_t mac_addrp[6])
307 {
308 	efx_mcdi_req_t req;
309 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
310 		MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
311 	efx_rc_t rc;
312 
313 	EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp));
314 
315 	req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
316 	req.emr_in_buf = payload;
317 	req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN;
318 	req.emr_out_buf = payload;
319 	req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN;
320 
321 	efx_mcdi_execute(enp, &req);
322 
323 	if (req.emr_rc != 0) {
324 		rc = req.emr_rc;
325 		goto fail1;
326 	}
327 
328 	if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) {
329 		rc = EMSGSIZE;
330 		goto fail2;
331 	}
332 
333 	if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) {
334 		rc = ENOENT;
335 		goto fail3;
336 	}
337 
338 	if (mac_addrp != NULL) {
339 		uint8_t *addrp;
340 
341 		addrp = MCDI_OUT2(req, uint8_t,
342 		    GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE);
343 
344 		EFX_MAC_ADDR_COPY(mac_addrp, addrp);
345 	}
346 
347 	return (0);
348 
349 fail3:
350 	EFSYS_PROBE(fail3);
351 fail2:
352 	EFSYS_PROBE(fail2);
353 fail1:
354 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
355 
356 	return (rc);
357 }
358 
359 	__checkReturn	efx_rc_t
360 efx_mcdi_get_mac_address_vf(
361 	__in			efx_nic_t *enp,
362 	__out_ecount_opt(6)	uint8_t mac_addrp[6])
363 {
364 	efx_mcdi_req_t req;
365 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
366 		MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
367 	efx_rc_t rc;
368 
369 	EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp));
370 
371 	req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
372 	req.emr_in_buf = payload;
373 	req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN;
374 	req.emr_out_buf = payload;
375 	req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX;
376 
377 	MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
378 	    EVB_PORT_ID_ASSIGNED);
379 
380 	efx_mcdi_execute(enp, &req);
381 
382 	if (req.emr_rc != 0) {
383 		rc = req.emr_rc;
384 		goto fail1;
385 	}
386 
387 	if (req.emr_out_length_used <
388 	    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) {
389 		rc = EMSGSIZE;
390 		goto fail2;
391 	}
392 
393 	if (MCDI_OUT_DWORD(req,
394 		VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) {
395 		rc = ENOENT;
396 		goto fail3;
397 	}
398 
399 	if (mac_addrp != NULL) {
400 		uint8_t *addrp;
401 
402 		addrp = MCDI_OUT2(req, uint8_t,
403 		    VPORT_GET_MAC_ADDRESSES_OUT_MACADDR);
404 
405 		EFX_MAC_ADDR_COPY(mac_addrp, addrp);
406 	}
407 
408 	return (0);
409 
410 fail3:
411 	EFSYS_PROBE(fail3);
412 fail2:
413 	EFSYS_PROBE(fail2);
414 fail1:
415 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
416 
417 	return (rc);
418 }
419 
420 	__checkReturn	efx_rc_t
efx_mcdi_get_clock(__in efx_nic_t * enp,__out uint32_t * sys_freqp,__out uint32_t * dpcpu_freqp)421 efx_mcdi_get_clock(
422 	__in		efx_nic_t *enp,
423 	__out		uint32_t *sys_freqp,
424 	__out		uint32_t *dpcpu_freqp)
425 {
426 	efx_mcdi_req_t req;
427 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CLOCK_IN_LEN,
428 		MC_CMD_GET_CLOCK_OUT_LEN);
429 	efx_rc_t rc;
430 
431 	EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp));
432 
433 	req.emr_cmd = MC_CMD_GET_CLOCK;
434 	req.emr_in_buf = payload;
435 	req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN;
436 	req.emr_out_buf = payload;
437 	req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN;
438 
439 	efx_mcdi_execute(enp, &req);
440 
441 	if (req.emr_rc != 0) {
442 		rc = req.emr_rc;
443 		goto fail1;
444 	}
445 
446 	if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) {
447 		rc = EMSGSIZE;
448 		goto fail2;
449 	}
450 
451 	*sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ);
452 	if (*sys_freqp == 0) {
453 		rc = EINVAL;
454 		goto fail3;
455 	}
456 	*dpcpu_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_DPCPU_FREQ);
457 	if (*dpcpu_freqp == 0) {
458 		rc = EINVAL;
459 		goto fail4;
460 	}
461 
462 	return (0);
463 
464 fail4:
465 	EFSYS_PROBE(fail4);
466 fail3:
467 	EFSYS_PROBE(fail3);
468 fail2:
469 	EFSYS_PROBE(fail2);
470 fail1:
471 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
472 
473 	return (rc);
474 }
475 
476 	__checkReturn	efx_rc_t
efx_mcdi_get_rxdp_config(__in efx_nic_t * enp,__out uint32_t * end_paddingp)477 efx_mcdi_get_rxdp_config(
478 	__in		efx_nic_t *enp,
479 	__out		uint32_t *end_paddingp)
480 {
481 	efx_mcdi_req_t req;
482 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_RXDP_CONFIG_IN_LEN,
483 		MC_CMD_GET_RXDP_CONFIG_OUT_LEN);
484 	uint32_t end_padding;
485 	efx_rc_t rc;
486 
487 	req.emr_cmd = MC_CMD_GET_RXDP_CONFIG;
488 	req.emr_in_buf = payload;
489 	req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN;
490 	req.emr_out_buf = payload;
491 	req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN;
492 
493 	efx_mcdi_execute(enp, &req);
494 
495 	if (req.emr_rc != 0) {
496 		rc = req.emr_rc;
497 		goto fail1;
498 	}
499 
500 	if (req.emr_out_length_used < MC_CMD_GET_RXDP_CONFIG_OUT_LEN) {
501 		rc = EMSGSIZE;
502 		goto fail2;
503 	}
504 
505 	if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
506 				    GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) {
507 		/* RX DMA end padding is disabled */
508 		end_padding = 0;
509 	} else {
510 		switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
511 					    GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) {
512 		case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64:
513 			end_padding = 64;
514 			break;
515 		case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128:
516 			end_padding = 128;
517 			break;
518 		case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256:
519 			end_padding = 256;
520 			break;
521 		default:
522 			rc = ENOTSUP;
523 			goto fail3;
524 		}
525 	}
526 
527 	*end_paddingp = end_padding;
528 
529 	return (0);
530 
531 fail3:
532 	EFSYS_PROBE(fail3);
533 fail2:
534 	EFSYS_PROBE(fail2);
535 fail1:
536 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
537 
538 	return (rc);
539 }
540 
541 	__checkReturn	efx_rc_t
efx_mcdi_get_vector_cfg(__in efx_nic_t * enp,__out_opt uint32_t * vec_basep,__out_opt uint32_t * pf_nvecp,__out_opt uint32_t * vf_nvecp)542 efx_mcdi_get_vector_cfg(
543 	__in		efx_nic_t *enp,
544 	__out_opt	uint32_t *vec_basep,
545 	__out_opt	uint32_t *pf_nvecp,
546 	__out_opt	uint32_t *vf_nvecp)
547 {
548 	efx_mcdi_req_t req;
549 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_VECTOR_CFG_IN_LEN,
550 		MC_CMD_GET_VECTOR_CFG_OUT_LEN);
551 	efx_rc_t rc;
552 
553 	req.emr_cmd = MC_CMD_GET_VECTOR_CFG;
554 	req.emr_in_buf = payload;
555 	req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN;
556 	req.emr_out_buf = payload;
557 	req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN;
558 
559 	efx_mcdi_execute(enp, &req);
560 
561 	if (req.emr_rc != 0) {
562 		rc = req.emr_rc;
563 		goto fail1;
564 	}
565 
566 	if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) {
567 		rc = EMSGSIZE;
568 		goto fail2;
569 	}
570 
571 	if (vec_basep != NULL)
572 		*vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE);
573 	if (pf_nvecp != NULL)
574 		*pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF);
575 	if (vf_nvecp != NULL)
576 		*vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF);
577 
578 	return (0);
579 
580 fail2:
581 	EFSYS_PROBE(fail2);
582 fail1:
583 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
584 
585 	return (rc);
586 }
587 
588 	__checkReturn	efx_rc_t
efx_mcdi_alloc_vis(__in efx_nic_t * enp,__in uint32_t min_vi_count,__in uint32_t max_vi_count,__out uint32_t * vi_basep,__out uint32_t * vi_countp,__out uint32_t * vi_shiftp)589 efx_mcdi_alloc_vis(
590 	__in		efx_nic_t *enp,
591 	__in		uint32_t min_vi_count,
592 	__in		uint32_t max_vi_count,
593 	__out		uint32_t *vi_basep,
594 	__out		uint32_t *vi_countp,
595 	__out		uint32_t *vi_shiftp)
596 {
597 	efx_mcdi_req_t req;
598 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_VIS_IN_LEN,
599 		MC_CMD_ALLOC_VIS_EXT_OUT_LEN);
600 	efx_rc_t rc;
601 
602 	if (vi_countp == NULL) {
603 		rc = EINVAL;
604 		goto fail1;
605 	}
606 
607 	req.emr_cmd = MC_CMD_ALLOC_VIS;
608 	req.emr_in_buf = payload;
609 	req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN;
610 	req.emr_out_buf = payload;
611 	req.emr_out_length = MC_CMD_ALLOC_VIS_EXT_OUT_LEN;
612 
613 	MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count);
614 	MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count);
615 
616 	efx_mcdi_execute(enp, &req);
617 
618 	if (req.emr_rc != 0) {
619 		rc = req.emr_rc;
620 		goto fail2;
621 	}
622 
623 	if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) {
624 		rc = EMSGSIZE;
625 		goto fail3;
626 	}
627 
628 	*vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE);
629 	*vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT);
630 
631 	/* Report VI_SHIFT if available (always zero for Huntington) */
632 	if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN)
633 		*vi_shiftp = 0;
634 	else
635 		*vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT);
636 
637 	return (0);
638 
639 fail3:
640 	EFSYS_PROBE(fail3);
641 fail2:
642 	EFSYS_PROBE(fail2);
643 fail1:
644 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
645 
646 	return (rc);
647 }
648 
649 
650 	__checkReturn	efx_rc_t
efx_mcdi_free_vis(__in efx_nic_t * enp)651 efx_mcdi_free_vis(
652 	__in		efx_nic_t *enp)
653 {
654 	efx_mcdi_req_t req;
655 	efx_rc_t rc;
656 
657 	EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0);
658 	EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0);
659 
660 	req.emr_cmd = MC_CMD_FREE_VIS;
661 	req.emr_in_buf = NULL;
662 	req.emr_in_length = 0;
663 	req.emr_out_buf = NULL;
664 	req.emr_out_length = 0;
665 
666 	efx_mcdi_execute_quiet(enp, &req);
667 
668 	/* Ignore ELREADY (no allocated VIs, so nothing to free) */
669 	if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
670 		rc = req.emr_rc;
671 		goto fail1;
672 	}
673 
674 	return (0);
675 
676 fail1:
677 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
678 
679 	return (rc);
680 }
681 
682 #endif	/* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
683 
684 #if EFX_OPTS_EF10()
685 
686 static	__checkReturn	efx_rc_t
efx_mcdi_alloc_piobuf(__in efx_nic_t * enp,__out efx_piobuf_handle_t * handlep)687 efx_mcdi_alloc_piobuf(
688 	__in		efx_nic_t *enp,
689 	__out		efx_piobuf_handle_t *handlep)
690 {
691 	efx_mcdi_req_t req;
692 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_PIOBUF_IN_LEN,
693 		MC_CMD_ALLOC_PIOBUF_OUT_LEN);
694 	efx_rc_t rc;
695 
696 	if (handlep == NULL) {
697 		rc = EINVAL;
698 		goto fail1;
699 	}
700 
701 	req.emr_cmd = MC_CMD_ALLOC_PIOBUF;
702 	req.emr_in_buf = payload;
703 	req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN;
704 	req.emr_out_buf = payload;
705 	req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN;
706 
707 	efx_mcdi_execute_quiet(enp, &req);
708 
709 	if (req.emr_rc != 0) {
710 		rc = req.emr_rc;
711 		goto fail2;
712 	}
713 
714 	if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
715 		rc = EMSGSIZE;
716 		goto fail3;
717 	}
718 
719 	*handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
720 
721 	return (0);
722 
723 fail3:
724 	EFSYS_PROBE(fail3);
725 fail2:
726 	EFSYS_PROBE(fail2);
727 fail1:
728 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
729 
730 	return (rc);
731 }
732 
733 static	__checkReturn	efx_rc_t
efx_mcdi_free_piobuf(__in efx_nic_t * enp,__in efx_piobuf_handle_t handle)734 efx_mcdi_free_piobuf(
735 	__in		efx_nic_t *enp,
736 	__in		efx_piobuf_handle_t handle)
737 {
738 	efx_mcdi_req_t req;
739 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FREE_PIOBUF_IN_LEN,
740 		MC_CMD_FREE_PIOBUF_OUT_LEN);
741 	efx_rc_t rc;
742 
743 	req.emr_cmd = MC_CMD_FREE_PIOBUF;
744 	req.emr_in_buf = payload;
745 	req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN;
746 	req.emr_out_buf = payload;
747 	req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN;
748 
749 	MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle);
750 
751 	efx_mcdi_execute_quiet(enp, &req);
752 
753 	if (req.emr_rc != 0) {
754 		rc = req.emr_rc;
755 		goto fail1;
756 	}
757 
758 	return (0);
759 
760 fail1:
761 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
762 
763 	return (rc);
764 }
765 
766 static	__checkReturn	efx_rc_t
efx_mcdi_link_piobuf(__in efx_nic_t * enp,__in uint32_t vi_index,__in efx_piobuf_handle_t handle)767 efx_mcdi_link_piobuf(
768 	__in		efx_nic_t *enp,
769 	__in		uint32_t vi_index,
770 	__in		efx_piobuf_handle_t handle)
771 {
772 	efx_mcdi_req_t req;
773 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LINK_PIOBUF_IN_LEN,
774 		MC_CMD_LINK_PIOBUF_OUT_LEN);
775 	efx_rc_t rc;
776 
777 	req.emr_cmd = MC_CMD_LINK_PIOBUF;
778 	req.emr_in_buf = payload;
779 	req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN;
780 	req.emr_out_buf = payload;
781 	req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN;
782 
783 	MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle);
784 	MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
785 
786 	efx_mcdi_execute(enp, &req);
787 
788 	if (req.emr_rc != 0) {
789 		rc = req.emr_rc;
790 		goto fail1;
791 	}
792 
793 	return (0);
794 
795 fail1:
796 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
797 
798 	return (rc);
799 }
800 
801 static	__checkReturn	efx_rc_t
efx_mcdi_unlink_piobuf(__in efx_nic_t * enp,__in uint32_t vi_index)802 efx_mcdi_unlink_piobuf(
803 	__in		efx_nic_t *enp,
804 	__in		uint32_t vi_index)
805 {
806 	efx_mcdi_req_t req;
807 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_UNLINK_PIOBUF_IN_LEN,
808 		MC_CMD_UNLINK_PIOBUF_OUT_LEN);
809 	efx_rc_t rc;
810 
811 	req.emr_cmd = MC_CMD_UNLINK_PIOBUF;
812 	req.emr_in_buf = payload;
813 	req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN;
814 	req.emr_out_buf = payload;
815 	req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN;
816 
817 	MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
818 
819 	efx_mcdi_execute_quiet(enp, &req);
820 
821 	if (req.emr_rc != 0) {
822 		rc = req.emr_rc;
823 		goto fail1;
824 	}
825 
826 	return (0);
827 
828 fail1:
829 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
830 
831 	return (rc);
832 }
833 
834 static			void
ef10_nic_alloc_piobufs(__in efx_nic_t * enp,__in uint32_t max_piobuf_count)835 ef10_nic_alloc_piobufs(
836 	__in		efx_nic_t *enp,
837 	__in		uint32_t max_piobuf_count)
838 {
839 	efx_piobuf_handle_t *handlep;
840 	unsigned int i;
841 
842 	EFSYS_ASSERT3U(max_piobuf_count, <=,
843 	    EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle));
844 
845 	enp->en_arch.ef10.ena_piobuf_count = 0;
846 
847 	for (i = 0; i < max_piobuf_count; i++) {
848 		handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
849 
850 		if (efx_mcdi_alloc_piobuf(enp, handlep) != 0)
851 			goto fail1;
852 
853 		enp->en_arch.ef10.ena_pio_alloc_map[i] = 0;
854 		enp->en_arch.ef10.ena_piobuf_count++;
855 	}
856 
857 	return;
858 
859 fail1:
860 	for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
861 		handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
862 
863 		(void) efx_mcdi_free_piobuf(enp, *handlep);
864 		*handlep = EFX_PIOBUF_HANDLE_INVALID;
865 	}
866 	enp->en_arch.ef10.ena_piobuf_count = 0;
867 }
868 
869 
870 static			void
ef10_nic_free_piobufs(__in efx_nic_t * enp)871 ef10_nic_free_piobufs(
872 	__in		efx_nic_t *enp)
873 {
874 	efx_piobuf_handle_t *handlep;
875 	unsigned int i;
876 
877 	for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
878 		handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
879 
880 		(void) efx_mcdi_free_piobuf(enp, *handlep);
881 		*handlep = EFX_PIOBUF_HANDLE_INVALID;
882 	}
883 	enp->en_arch.ef10.ena_piobuf_count = 0;
884 }
885 
886 /* Sub-allocate a block from a piobuf */
887 	__checkReturn	efx_rc_t
ef10_nic_pio_alloc(__inout efx_nic_t * enp,__out uint32_t * bufnump,__out efx_piobuf_handle_t * handlep,__out uint32_t * blknump,__out uint32_t * offsetp,__out size_t * sizep)888 ef10_nic_pio_alloc(
889 	__inout		efx_nic_t *enp,
890 	__out		uint32_t *bufnump,
891 	__out		efx_piobuf_handle_t *handlep,
892 	__out		uint32_t *blknump,
893 	__out		uint32_t *offsetp,
894 	__out		size_t *sizep)
895 {
896 	efx_nic_cfg_t *encp = &enp->en_nic_cfg;
897 	efx_drv_cfg_t *edcp = &enp->en_drv_cfg;
898 	uint32_t blk_per_buf;
899 	uint32_t buf, blk;
900 	efx_rc_t rc;
901 
902 	EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
903 	EFSYS_ASSERT(bufnump);
904 	EFSYS_ASSERT(handlep);
905 	EFSYS_ASSERT(blknump);
906 	EFSYS_ASSERT(offsetp);
907 	EFSYS_ASSERT(sizep);
908 
909 	if ((edcp->edc_pio_alloc_size == 0) ||
910 	    (enp->en_arch.ef10.ena_piobuf_count == 0)) {
911 		rc = ENOMEM;
912 		goto fail1;
913 	}
914 	blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size;
915 
916 	for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) {
917 		uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf];
918 
919 		if (~(*map) == 0)
920 			continue;
921 
922 		EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map)));
923 		for (blk = 0; blk < blk_per_buf; blk++) {
924 			if ((*map & (1u << blk)) == 0) {
925 				*map |= (1u << blk);
926 				goto done;
927 			}
928 		}
929 	}
930 	rc = ENOMEM;
931 	goto fail2;
932 
933 done:
934 	*handlep = enp->en_arch.ef10.ena_piobuf_handle[buf];
935 	*bufnump = buf;
936 	*blknump = blk;
937 	*sizep = edcp->edc_pio_alloc_size;
938 	*offsetp = blk * (*sizep);
939 
940 	return (0);
941 
942 fail2:
943 	EFSYS_PROBE(fail2);
944 fail1:
945 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
946 
947 	return (rc);
948 }
949 
950 /* Free a piobuf sub-allocated block */
951 	__checkReturn	efx_rc_t
ef10_nic_pio_free(__inout efx_nic_t * enp,__in uint32_t bufnum,__in uint32_t blknum)952 ef10_nic_pio_free(
953 	__inout		efx_nic_t *enp,
954 	__in		uint32_t bufnum,
955 	__in		uint32_t blknum)
956 {
957 	uint32_t *map;
958 	efx_rc_t rc;
959 
960 	if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) ||
961 	    (blknum >= (8 * sizeof (*map)))) {
962 		rc = EINVAL;
963 		goto fail1;
964 	}
965 
966 	map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum];
967 	if ((*map & (1u << blknum)) == 0) {
968 		rc = ENOENT;
969 		goto fail2;
970 	}
971 	*map &= ~(1u << blknum);
972 
973 	return (0);
974 
975 fail2:
976 	EFSYS_PROBE(fail2);
977 fail1:
978 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
979 
980 	return (rc);
981 }
982 
983 	__checkReturn	efx_rc_t
ef10_nic_pio_link(__inout efx_nic_t * enp,__in uint32_t vi_index,__in efx_piobuf_handle_t handle)984 ef10_nic_pio_link(
985 	__inout		efx_nic_t *enp,
986 	__in		uint32_t vi_index,
987 	__in		efx_piobuf_handle_t handle)
988 {
989 	return (efx_mcdi_link_piobuf(enp, vi_index, handle));
990 }
991 
992 	__checkReturn	efx_rc_t
ef10_nic_pio_unlink(__inout efx_nic_t * enp,__in uint32_t vi_index)993 ef10_nic_pio_unlink(
994 	__inout		efx_nic_t *enp,
995 	__in		uint32_t vi_index)
996 {
997 	return (efx_mcdi_unlink_piobuf(enp, vi_index));
998 }
999 
1000 #endif	/* EFX_OPTS_EF10() */
1001 
1002 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
1003 
1004 static	__checkReturn	efx_rc_t
ef10_mcdi_get_pf_count(__in efx_nic_t * enp,__out uint32_t * pf_countp)1005 ef10_mcdi_get_pf_count(
1006 	__in		efx_nic_t *enp,
1007 	__out		uint32_t *pf_countp)
1008 {
1009 	efx_mcdi_req_t req;
1010 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PF_COUNT_IN_LEN,
1011 		MC_CMD_GET_PF_COUNT_OUT_LEN);
1012 	efx_rc_t rc;
1013 
1014 	req.emr_cmd = MC_CMD_GET_PF_COUNT;
1015 	req.emr_in_buf = payload;
1016 	req.emr_in_length = MC_CMD_GET_PF_COUNT_IN_LEN;
1017 	req.emr_out_buf = payload;
1018 	req.emr_out_length = MC_CMD_GET_PF_COUNT_OUT_LEN;
1019 
1020 	efx_mcdi_execute(enp, &req);
1021 
1022 	if (req.emr_rc != 0) {
1023 		rc = req.emr_rc;
1024 		goto fail1;
1025 	}
1026 
1027 	if (req.emr_out_length_used < MC_CMD_GET_PF_COUNT_OUT_LEN) {
1028 		rc = EMSGSIZE;
1029 		goto fail2;
1030 	}
1031 
1032 	*pf_countp = *MCDI_OUT(req, uint8_t,
1033 				MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST);
1034 
1035 	EFSYS_ASSERT(*pf_countp != 0);
1036 
1037 	return (0);
1038 
1039 fail2:
1040 	EFSYS_PROBE(fail2);
1041 fail1:
1042 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
1043 
1044 	return (rc);
1045 }
1046 
1047 static	__checkReturn			efx_rc_t
ef10_nic_get_physical_port_usage(__in efx_nic_t * enp,__in_ecount (pfs_to_ports_size)uint8_t * pfs_to_ports,__in size_t pfs_to_ports_size,__out efx_port_usage_t * port_usagep)1048 ef10_nic_get_physical_port_usage(
1049 	__in				efx_nic_t *enp,
1050 	__in_ecount(pfs_to_ports_size)	uint8_t *pfs_to_ports,
1051 	__in				size_t pfs_to_ports_size,
1052 	__out				efx_port_usage_t *port_usagep)
1053 {
1054 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1055 	efx_port_usage_t port_usage;
1056 	uint8_t phy_port;
1057 	efx_rc_t rc;
1058 	size_t pf;
1059 
1060 	/*
1061 	 * The sharing of physical ports between functions are determined
1062 	 * in the following way.
1063 	 * 1. If VFs are enabled then the physical port is shared.
1064 	 * 2. Retrieve PFs to ports assignment.
1065 	 * 3. If PF 0 assignment cannot be retrieved(ACCESS_DENIED), it
1066 	 *    implies this is an unprivileged function. An unprivileged
1067 	 *    function indicates the physical port must be shared with
1068 	 *    another privileged function.
1069 	 * 4. If PF 0 assignment can be retrieved, it indicates this
1070 	 *    function is privileged. Now, read all other PF's physical
1071 	 *    port number assignment and check if the current PF's physical
1072 	 *    port is shared with any other PF's physical port.
1073 	 * NOTE: PF 0 is always privileged function.
1074 	 */
1075 
1076 	if (EFX_PCI_FUNCTION_IS_VF(encp)) {
1077 		port_usage = EFX_PORT_USAGE_SHARED;
1078 		goto out;
1079 	}
1080 
1081 	if (pfs_to_ports[0] ==
1082 	    MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED) {
1083 		/*
1084 		 * This is unprivileged function as it do not have sufficient
1085 		 * privileges to read the value, this implies the physical port
1086 		 * is shared between this function and another privileged
1087 		 * function
1088 		 */
1089 		port_usage = EFX_PORT_USAGE_SHARED;
1090 		goto out;
1091 	}
1092 
1093 	if (encp->enc_pf >= pfs_to_ports_size) {
1094 		rc = EINVAL;
1095 		goto fail1;
1096 	}
1097 	phy_port = pfs_to_ports[encp->enc_pf];
1098 
1099 	/*
1100 	 * This is privileged function as it is able read the value of
1101 	 * PF 0. Now, check if any other function share the same physical
1102 	 * port number as this function.
1103 	 */
1104 	for (pf = 0; pf < pfs_to_ports_size; pf++) {
1105 		if ((encp->enc_pf != pf) && (phy_port == pfs_to_ports[pf])) {
1106 			/* Found match, PFs share the same physical port */
1107 			port_usage = EFX_PORT_USAGE_SHARED;
1108 			goto out;
1109 		}
1110 	}
1111 
1112 	port_usage = EFX_PORT_USAGE_EXCLUSIVE;
1113 
1114 out:
1115 	*port_usagep = port_usage;
1116 	return (0);
1117 
1118 fail1:
1119 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
1120 
1121 	return (rc);
1122 }
1123 
1124 static	__checkReturn	efx_rc_t
ef10_get_datapath_caps(__in efx_nic_t * enp)1125 ef10_get_datapath_caps(
1126 	__in		efx_nic_t *enp)
1127 {
1128 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1129 	efx_mcdi_req_t req;
1130 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CAPABILITIES_IN_LEN,
1131 		MC_CMD_GET_CAPABILITIES_V9_OUT_LEN);
1132 	efx_rc_t rc;
1133 
1134 	req.emr_cmd = MC_CMD_GET_CAPABILITIES;
1135 	req.emr_in_buf = payload;
1136 	req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
1137 	req.emr_out_buf = payload;
1138 	req.emr_out_length = MC_CMD_GET_CAPABILITIES_V9_OUT_LEN;
1139 
1140 	efx_mcdi_execute_quiet(enp, &req);
1141 
1142 	if (req.emr_rc != 0) {
1143 		rc = req.emr_rc;
1144 		goto fail1;
1145 	}
1146 
1147 	if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
1148 		rc = EMSGSIZE;
1149 		goto fail2;
1150 	}
1151 
1152 #define	CAP_FLAGS1(_req, _flag)						\
1153 	(MCDI_OUT_DWORD((_req), GET_CAPABILITIES_OUT_FLAGS1) &		\
1154 	(1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN)))
1155 
1156 #define	CAP_FLAGS2(_req, _flag)						\
1157 	(((_req).emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) && \
1158 	    (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V2_OUT_FLAGS2) &	\
1159 	    (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN))))
1160 
1161 #define	CAP_FLAGS3(_req, _flag)						\
1162 	(((_req).emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V7_OUT_LEN) && \
1163 	    (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V7_OUT_FLAGS3) &	\
1164 	    (1u << (MC_CMD_GET_CAPABILITIES_V7_OUT_ ## _flag ## _LBN))))
1165 
1166 	/* Check if RXDP firmware inserts 14 byte prefix */
1167 	if (CAP_FLAGS1(req, RX_PREFIX_LEN_14))
1168 		encp->enc_rx_prefix_size = 14;
1169 	else
1170 		encp->enc_rx_prefix_size = 0;
1171 
1172 #if EFSYS_OPT_RX_SCALE
1173 	/* Check if the firmware supports additional RSS modes */
1174 	if (CAP_FLAGS1(req, ADDITIONAL_RSS_MODES))
1175 		encp->enc_rx_scale_additional_modes_supported = B_TRUE;
1176 	else
1177 		encp->enc_rx_scale_additional_modes_supported = B_FALSE;
1178 #endif /* EFSYS_OPT_RX_SCALE */
1179 
1180 	/* Check if the firmware supports TSO */
1181 	if (CAP_FLAGS1(req, TX_TSO))
1182 		encp->enc_fw_assisted_tso_enabled = B_TRUE;
1183 	else
1184 		encp->enc_fw_assisted_tso_enabled = B_FALSE;
1185 
1186 	/* Check if the firmware supports FATSOv2 */
1187 	if (CAP_FLAGS2(req, TX_TSO_V2)) {
1188 		encp->enc_fw_assisted_tso_v2_enabled = B_TRUE;
1189 		encp->enc_fw_assisted_tso_v2_n_contexts = MCDI_OUT_WORD(req,
1190 		    GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS);
1191 	} else {
1192 		encp->enc_fw_assisted_tso_v2_enabled = B_FALSE;
1193 		encp->enc_fw_assisted_tso_v2_n_contexts = 0;
1194 	}
1195 
1196 	/* Check if the firmware supports FATSOv2 encap */
1197 	if (CAP_FLAGS2(req, TX_TSO_V2_ENCAP))
1198 		encp->enc_fw_assisted_tso_v2_encap_enabled = B_TRUE;
1199 	else
1200 		encp->enc_fw_assisted_tso_v2_encap_enabled = B_FALSE;
1201 
1202 	/* Check if TSOv3 is supported */
1203 	if (CAP_FLAGS2(req, TX_TSO_V3))
1204 		encp->enc_tso_v3_enabled = B_TRUE;
1205 	else
1206 		encp->enc_tso_v3_enabled = B_FALSE;
1207 
1208 	/* Check if the firmware has vadapter/vport/vswitch support */
1209 	if (CAP_FLAGS1(req, EVB))
1210 		encp->enc_datapath_cap_evb = B_TRUE;
1211 	else
1212 		encp->enc_datapath_cap_evb = B_FALSE;
1213 
1214 	/* Check if the firmware supports vport reconfiguration */
1215 	if (CAP_FLAGS1(req, VPORT_RECONFIGURE))
1216 		encp->enc_vport_reconfigure_supported = B_TRUE;
1217 	else
1218 		encp->enc_vport_reconfigure_supported = B_FALSE;
1219 
1220 	/* Check if the firmware supports VLAN insertion */
1221 	if (CAP_FLAGS1(req, TX_VLAN_INSERTION))
1222 		encp->enc_hw_tx_insert_vlan_enabled = B_TRUE;
1223 	else
1224 		encp->enc_hw_tx_insert_vlan_enabled = B_FALSE;
1225 
1226 	/* Check if firmware supports VLAN stripping. */
1227 	if (CAP_FLAGS1(req, RX_VLAN_STRIPPING))
1228 		encp->enc_rx_vlan_stripping_supported = B_TRUE;
1229 	else
1230 		encp->enc_rx_vlan_stripping_supported = B_FALSE;
1231 
1232 	/* Check if the firmware supports RX event batching */
1233 	if (CAP_FLAGS1(req, RX_BATCHING))
1234 		encp->enc_rx_batching_enabled = B_TRUE;
1235 	else
1236 		encp->enc_rx_batching_enabled = B_FALSE;
1237 
1238 	/*
1239 	 * Even if batching isn't reported as supported, we may still get
1240 	 * batched events (see bug61153).
1241 	 */
1242 	encp->enc_rx_batch_max = 16;
1243 
1244 	/* Check if the firmware supports disabling scatter on RXQs */
1245 	if (CAP_FLAGS1(req, RX_DISABLE_SCATTER))
1246 		encp->enc_rx_disable_scatter_supported = B_TRUE;
1247 	else
1248 		encp->enc_rx_disable_scatter_supported = B_FALSE;
1249 
1250 	/* No limit on maximum number of Rx scatter elements per packet. */
1251 	encp->enc_rx_scatter_max = -1;
1252 
1253 	/* Check if the firmware supports include FCS on RX */
1254 	if (CAP_FLAGS1(req, RX_INCLUDE_FCS))
1255 		encp->enc_rx_include_fcs_supported = B_TRUE;
1256 	else
1257 		encp->enc_rx_include_fcs_supported = B_FALSE;
1258 
1259 	/* Check if the firmware supports packed stream mode */
1260 	if (CAP_FLAGS1(req, RX_PACKED_STREAM))
1261 		encp->enc_rx_packed_stream_supported = B_TRUE;
1262 	else
1263 		encp->enc_rx_packed_stream_supported = B_FALSE;
1264 
1265 	/*
1266 	 * Check if the firmware supports configurable buffer sizes
1267 	 * for packed stream mode (otherwise buffer size is 1Mbyte)
1268 	 */
1269 	if (CAP_FLAGS1(req, RX_PACKED_STREAM_VAR_BUFFERS))
1270 		encp->enc_rx_var_packed_stream_supported = B_TRUE;
1271 	else
1272 		encp->enc_rx_var_packed_stream_supported = B_FALSE;
1273 
1274 	/* Check if the firmware supports equal stride super-buffer mode */
1275 	if (CAP_FLAGS2(req, EQUAL_STRIDE_SUPER_BUFFER))
1276 		encp->enc_rx_es_super_buffer_supported = B_TRUE;
1277 	else
1278 		encp->enc_rx_es_super_buffer_supported = B_FALSE;
1279 
1280 	/* Check if the firmware supports FW subvariant w/o Tx checksumming */
1281 	if (CAP_FLAGS2(req, FW_SUBVARIANT_NO_TX_CSUM))
1282 		encp->enc_fw_subvariant_no_tx_csum_supported = B_TRUE;
1283 	else
1284 		encp->enc_fw_subvariant_no_tx_csum_supported = B_FALSE;
1285 
1286 	/* Check if the firmware supports set mac with running filters */
1287 	if (CAP_FLAGS1(req, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED))
1288 		encp->enc_allow_set_mac_with_installed_filters = B_TRUE;
1289 	else
1290 		encp->enc_allow_set_mac_with_installed_filters = B_FALSE;
1291 
1292 	/*
1293 	 * Check if firmware supports the extended MC_CMD_SET_MAC, which allows
1294 	 * specifying which parameters to configure.
1295 	 */
1296 	if (CAP_FLAGS1(req, SET_MAC_ENHANCED))
1297 		encp->enc_enhanced_set_mac_supported = B_TRUE;
1298 	else
1299 		encp->enc_enhanced_set_mac_supported = B_FALSE;
1300 
1301 	/*
1302 	 * Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows
1303 	 * us to let the firmware choose the settings to use on an EVQ.
1304 	 */
1305 	if (CAP_FLAGS2(req, INIT_EVQ_V2))
1306 		encp->enc_init_evq_v2_supported = B_TRUE;
1307 	else
1308 		encp->enc_init_evq_v2_supported = B_FALSE;
1309 
1310 	/*
1311 	 * Check if firmware supports extended width event queues, which have
1312 	 * a different event descriptor layout.
1313 	 */
1314 	if (CAP_FLAGS3(req, EXTENDED_WIDTH_EVQS_SUPPORTED))
1315 		encp->enc_init_evq_extended_width_supported = B_TRUE;
1316 	else
1317 		encp->enc_init_evq_extended_width_supported = B_FALSE;
1318 
1319 	/*
1320 	 * Check if the NO_CONT_EV mode for RX events is supported.
1321 	 */
1322 	if (CAP_FLAGS2(req, INIT_RXQ_NO_CONT_EV))
1323 		encp->enc_no_cont_ev_mode_supported = B_TRUE;
1324 	else
1325 		encp->enc_no_cont_ev_mode_supported = B_FALSE;
1326 
1327 	/*
1328 	 * Check if buffer size may and must be specified on INIT_RXQ.
1329 	 * It may be always specified to efx_rx_qcreate(), but will be
1330 	 * just kept libefx internal if MCDI does not support it.
1331 	 */
1332 	if (CAP_FLAGS2(req, INIT_RXQ_WITH_BUFFER_SIZE))
1333 		encp->enc_init_rxq_with_buffer_size = B_TRUE;
1334 	else
1335 		encp->enc_init_rxq_with_buffer_size = B_FALSE;
1336 
1337 	/*
1338 	 * Check if firmware-verified NVRAM updates must be used.
1339 	 *
1340 	 * The firmware trusted installer requires all NVRAM updates to use
1341 	 * version 2 of MC_CMD_NVRAM_UPDATE_START (to enable verified update)
1342 	 * and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated
1343 	 * partition and report the result).
1344 	 */
1345 	if (CAP_FLAGS2(req, NVRAM_UPDATE_REPORT_VERIFY_RESULT))
1346 		encp->enc_nvram_update_verify_result_supported = B_TRUE;
1347 	else
1348 		encp->enc_nvram_update_verify_result_supported = B_FALSE;
1349 
1350 	if (CAP_FLAGS2(req, NVRAM_UPDATE_POLL_VERIFY_RESULT))
1351 		encp->enc_nvram_update_poll_verify_result_supported = B_TRUE;
1352 	else
1353 		encp->enc_nvram_update_poll_verify_result_supported = B_FALSE;
1354 
1355 	/*
1356 	 * Check if firmware update via the BUNDLE partition is supported
1357 	 */
1358 	if (CAP_FLAGS2(req, BUNDLE_UPDATE))
1359 		encp->enc_nvram_bundle_update_supported = B_TRUE;
1360 	else
1361 		encp->enc_nvram_bundle_update_supported = B_FALSE;
1362 
1363 	/*
1364 	 * Check if firmware provides packet memory and Rx datapath
1365 	 * counters.
1366 	 */
1367 	if (CAP_FLAGS1(req, PM_AND_RXDP_COUNTERS))
1368 		encp->enc_pm_and_rxdp_counters = B_TRUE;
1369 	else
1370 		encp->enc_pm_and_rxdp_counters = B_FALSE;
1371 
1372 	/*
1373 	 * Check if the 40G MAC hardware is capable of reporting
1374 	 * statistics for Tx size bins.
1375 	 */
1376 	if (CAP_FLAGS2(req, MAC_STATS_40G_TX_SIZE_BINS))
1377 		encp->enc_mac_stats_40g_tx_size_bins = B_TRUE;
1378 	else
1379 		encp->enc_mac_stats_40g_tx_size_bins = B_FALSE;
1380 
1381 	/*
1382 	 * Check if firmware supports VXLAN and NVGRE tunnels.
1383 	 * The capability indicates Geneve protocol support as well.
1384 	 */
1385 	if (CAP_FLAGS1(req, VXLAN_NVGRE)) {
1386 		encp->enc_tunnel_encapsulations_supported =
1387 		    (1u << EFX_TUNNEL_PROTOCOL_VXLAN) |
1388 		    (1u << EFX_TUNNEL_PROTOCOL_GENEVE) |
1389 		    (1u << EFX_TUNNEL_PROTOCOL_NVGRE);
1390 
1391 		EFX_STATIC_ASSERT(EFX_TUNNEL_MAXNENTRIES ==
1392 		    MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
1393 		encp->enc_tunnel_config_udp_entries_max =
1394 		    EFX_TUNNEL_MAXNENTRIES;
1395 	} else {
1396 		encp->enc_tunnel_config_udp_entries_max = 0;
1397 	}
1398 
1399 #define CAP_PFS_TO_PORTS(_n)	\
1400 	(MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_ ## _n)
1401 
1402 	encp->enc_port_usage = EFX_PORT_USAGE_UNKNOWN;
1403 
1404 	if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) {
1405 		/* PFs to ports assignment */
1406 		uint8_t pfs_to_ports[CAP_PFS_TO_PORTS(NUM)];
1407 
1408 		EFX_STATIC_ASSERT((CAP_PFS_TO_PORTS(NUM) * CAP_PFS_TO_PORTS(LEN)) ==
1409 		    EFX_ARRAY_SIZE(pfs_to_ports));
1410 
1411 		memcpy(pfs_to_ports, MCDI_OUT(req, efx_byte_t, CAP_PFS_TO_PORTS(OFST)),
1412 		    EFX_ARRAY_SIZE(pfs_to_ports));
1413 
1414 		rc = ef10_nic_get_physical_port_usage(enp, pfs_to_ports,
1415 		    EFX_ARRAY_SIZE(pfs_to_ports), &encp->enc_port_usage);
1416 		if (rc != 0) {
1417 			/* PF to port mapping lookup failed */
1418 			encp->enc_port_usage = EFX_PORT_USAGE_UNKNOWN;
1419 		}
1420 	}
1421 #undef  CAP_PFS_TO_PORTS
1422 
1423 	/*
1424 	 * Check if firmware reports the VI window mode.
1425 	 * Medford2 has a variable VI window size (8K, 16K or 64K).
1426 	 * Medford and Huntington have a fixed 8K VI window size.
1427 	 */
1428 	if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
1429 		uint8_t mode =
1430 		    MCDI_OUT_BYTE(req, GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
1431 
1432 		switch (mode) {
1433 		case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
1434 			encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
1435 			break;
1436 		case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
1437 			encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_16K;
1438 			break;
1439 		case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
1440 			encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_64K;
1441 			break;
1442 		default:
1443 			encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
1444 			break;
1445 		}
1446 	} else if ((enp->en_family == EFX_FAMILY_HUNTINGTON) ||
1447 		    (enp->en_family == EFX_FAMILY_MEDFORD)) {
1448 		/* Huntington and Medford have fixed 8K window size */
1449 		encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
1450 	} else {
1451 		encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
1452 	}
1453 
1454 	/* Check if firmware supports extended MAC stats. */
1455 	if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
1456 		/* Extended stats buffer supported */
1457 		encp->enc_mac_stats_nstats = MCDI_OUT_WORD(req,
1458 		    GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
1459 	} else {
1460 		/* Use Siena-compatible legacy MAC stats */
1461 		encp->enc_mac_stats_nstats = MC_CMD_MAC_NSTATS;
1462 	}
1463 
1464 	if (encp->enc_mac_stats_nstats >= MC_CMD_MAC_NSTATS_V2)
1465 		encp->enc_fec_counters = B_TRUE;
1466 	else
1467 		encp->enc_fec_counters = B_FALSE;
1468 
1469 	/* Check if the firmware provides head-of-line blocking counters */
1470 	if (CAP_FLAGS2(req, RXDP_HLB_IDLE))
1471 		encp->enc_hlb_counters = B_TRUE;
1472 	else
1473 		encp->enc_hlb_counters = B_FALSE;
1474 
1475 #if EFSYS_OPT_RX_SCALE
1476 	if (CAP_FLAGS1(req, RX_RSS_LIMITED)) {
1477 		/* Only one exclusive RSS context is available per port. */
1478 		encp->enc_rx_scale_max_exclusive_contexts = 1;
1479 
1480 		switch (enp->en_family) {
1481 		case EFX_FAMILY_MEDFORD2:
1482 			encp->enc_rx_scale_hash_alg_mask =
1483 			    (1U << EFX_RX_HASHALG_TOEPLITZ);
1484 			break;
1485 
1486 		case EFX_FAMILY_MEDFORD:
1487 		case EFX_FAMILY_HUNTINGTON:
1488 			/*
1489 			 * Packed stream firmware variant maintains a
1490 			 * non-standard algorithm for hash computation.
1491 			 * It implies explicit XORing together
1492 			 * source + destination IP addresses (or last
1493 			 * four bytes in the case of IPv6) and using the
1494 			 * resulting value as the input to a Toeplitz hash.
1495 			 */
1496 			encp->enc_rx_scale_hash_alg_mask =
1497 			    (1U << EFX_RX_HASHALG_PACKED_STREAM);
1498 			break;
1499 
1500 		default:
1501 			rc = EINVAL;
1502 			goto fail3;
1503 		}
1504 
1505 		/* Port numbers cannot contribute to the hash value */
1506 		encp->enc_rx_scale_l4_hash_supported = B_FALSE;
1507 	} else {
1508 		/*
1509 		 * Maximum number of exclusive RSS contexts.
1510 		 * EF10 hardware supports 64 in total, but 6 are reserved
1511 		 * for shared contexts. They are a global resource so
1512 		 * not all may be available.
1513 		 */
1514 		encp->enc_rx_scale_max_exclusive_contexts = 64 - 6;
1515 
1516 		encp->enc_rx_scale_hash_alg_mask =
1517 		    (1U << EFX_RX_HASHALG_TOEPLITZ);
1518 
1519 		/*
1520 		 * It is possible to use port numbers as
1521 		 * the input data for hash computation.
1522 		 */
1523 		encp->enc_rx_scale_l4_hash_supported = B_TRUE;
1524 	}
1525 
1526 	if (CAP_FLAGS3(req, RSS_SELECTABLE_TABLE_SIZE))
1527 		encp->enc_rx_scale_tbl_entry_count_is_selectable = B_TRUE;
1528 	else
1529 		encp->enc_rx_scale_tbl_entry_count_is_selectable = B_FALSE;
1530 #endif /* EFSYS_OPT_RX_SCALE */
1531 
1532 	/* Check if the firmware supports "FLAG" and "MARK" filter actions */
1533 	if (CAP_FLAGS2(req, FILTER_ACTION_FLAG))
1534 		encp->enc_filter_action_flag_supported = B_TRUE;
1535 	else
1536 		encp->enc_filter_action_flag_supported = B_FALSE;
1537 
1538 	if (CAP_FLAGS2(req, FILTER_ACTION_MARK))
1539 		encp->enc_filter_action_mark_supported = B_TRUE;
1540 	else
1541 		encp->enc_filter_action_mark_supported = B_FALSE;
1542 
1543 	/* Get maximum supported value for "MARK" filter action */
1544 	if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V5_OUT_LEN)
1545 		encp->enc_filter_action_mark_max = MCDI_OUT_DWORD(req,
1546 		    GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX);
1547 	else
1548 		encp->enc_filter_action_mark_max = 0;
1549 
1550 #if EFSYS_OPT_MAE
1551 	/*
1552 	 * Check support for EF100 Match Action Engine (MAE).
1553 	 * MAE hardware is present on Riverhead boards (from R2),
1554 	 * and on Keystone, and requires support in firmware.
1555 	 *
1556 	 * MAE control operations require MAE control privilege,
1557 	 * which is not available for VFs.
1558 	 *
1559 	 * Privileges can change dynamically at runtime: we assume
1560 	 * MAE support requires the privilege is granted initially,
1561 	 * and ignore later dynamic changes.
1562 	 */
1563 	if (CAP_FLAGS3(req, MAE_SUPPORTED)) {
1564 		encp->enc_mae_supported = B_TRUE;
1565 		if (EFX_MCDI_HAVE_PRIVILEGE(encp->enc_privilege_mask, MAE))
1566 			encp->enc_mae_admin = B_TRUE;
1567 		else
1568 			encp->enc_mae_admin = B_FALSE;
1569 	} else {
1570 		encp->enc_mae_supported = B_FALSE;
1571 		encp->enc_mae_admin = B_FALSE;
1572 	}
1573 
1574 	/*
1575 	 * Check support for MAE action set v2 features.
1576 	 * These provide support for packet edits.
1577 	 */
1578 	if (CAP_FLAGS3(req, MAE_ACTION_SET_ALLOC_V2_SUPPORTED))
1579 		encp->enc_mae_aset_v2_supported = B_TRUE;
1580 	else
1581 		encp->enc_mae_aset_v2_supported = B_FALSE;
1582 #else
1583 	encp->enc_mae_supported = B_FALSE;
1584 	encp->enc_mae_admin = B_FALSE;
1585 #endif /* EFSYS_OPT_MAE */
1586 
1587 #if EFSYS_OPT_RX_SCALE
1588 	if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V9_OUT_LEN) {
1589 		encp->enc_rx_scale_indirection_max_nqueues =
1590 		    MCDI_OUT_DWORD(req,
1591 			GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_QUEUES);
1592 		encp->enc_rx_scale_tbl_min_nentries =
1593 		    MCDI_OUT_DWORD(req,
1594 			GET_CAPABILITIES_V9_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE);
1595 		encp->enc_rx_scale_tbl_max_nentries =
1596 		    MCDI_OUT_DWORD(req,
1597 			GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE);
1598 
1599 		if (CAP_FLAGS3(req, RSS_EVEN_SPREADING)) {
1600 #define	RSS_MAX_EVEN_SPREADING_QUEUES				\
1601 	GET_CAPABILITIES_V9_OUT_RSS_MAX_EVEN_SPREADING_QUEUES
1602 			/*
1603 			 * The even spreading mode distributes traffic across
1604 			 * the specified number of queues without the need to
1605 			 * allocate precious indirection entry pool resources.
1606 			 */
1607 			encp->enc_rx_scale_even_spread_max_nqueues =
1608 			    MCDI_OUT_DWORD(req, RSS_MAX_EVEN_SPREADING_QUEUES);
1609 #undef RSS_MAX_EVEN_SPREADING_QUEUES
1610 		} else {
1611 			/* There is no support for the even spread contexts. */
1612 			encp->enc_rx_scale_even_spread_max_nqueues = 0;
1613 		}
1614 	} else {
1615 		encp->enc_rx_scale_indirection_max_nqueues = EFX_MAXRSS;
1616 		encp->enc_rx_scale_tbl_min_nentries = EFX_RSS_TBL_SIZE;
1617 		encp->enc_rx_scale_tbl_max_nentries = EFX_RSS_TBL_SIZE;
1618 
1619 		/*
1620 		 * Assume that there is no support
1621 		 * for the even spread contexts.
1622 		 */
1623 		encp->enc_rx_scale_even_spread_max_nqueues = 0;
1624 	}
1625 #endif /* EFSYS_OPT_RX_SCALE */
1626 
1627 #undef CAP_FLAGS1
1628 #undef CAP_FLAGS2
1629 #undef CAP_FLAGS3
1630 
1631 	return (0);
1632 
1633 #if EFSYS_OPT_RX_SCALE
1634 fail3:
1635 	EFSYS_PROBE(fail3);
1636 #endif /* EFSYS_OPT_RX_SCALE */
1637 fail2:
1638 	EFSYS_PROBE(fail2);
1639 fail1:
1640 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
1641 
1642 	return (rc);
1643 }
1644 
1645 
1646 #define	EF10_LEGACY_PF_PRIVILEGE_MASK					\
1647 	(MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN			|	\
1648 	MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK			|	\
1649 	MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD			|	\
1650 	MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP			|	\
1651 	MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS		|	\
1652 	MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING		|	\
1653 	MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST			|	\
1654 	MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST			|	\
1655 	MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST			|	\
1656 	MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST		|	\
1657 	MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS)
1658 
1659 #define	EF10_LEGACY_VF_PRIVILEGE_MASK	0
1660 
1661 
1662 	__checkReturn		efx_rc_t
ef10_get_privilege_mask(__in efx_nic_t * enp,__out uint32_t * maskp)1663 ef10_get_privilege_mask(
1664 	__in			efx_nic_t *enp,
1665 	__out			uint32_t *maskp)
1666 {
1667 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1668 	uint32_t mask;
1669 	efx_rc_t rc;
1670 
1671 	if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf,
1672 					    &mask)) != 0) {
1673 		if (rc != ENOTSUP)
1674 			goto fail1;
1675 
1676 		/* Fallback for old firmware without privilege mask support */
1677 		if (EFX_PCI_FUNCTION_IS_PF(encp)) {
1678 			/* Assume PF has admin privilege */
1679 			mask = EF10_LEGACY_PF_PRIVILEGE_MASK;
1680 		} else {
1681 			/* VF is always unprivileged by default */
1682 			mask = EF10_LEGACY_VF_PRIVILEGE_MASK;
1683 		}
1684 	}
1685 
1686 	*maskp = mask;
1687 
1688 	return (0);
1689 
1690 fail1:
1691 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
1692 
1693 	return (rc);
1694 }
1695 
1696 
1697 #define	EFX_EXT_PORT_MAX	4
1698 #define	EFX_EXT_PORT_NA		0xFF
1699 
1700 /*
1701  * Table of mapping schemes from port number to external number.
1702  *
1703  * Each port number ultimately corresponds to a connector: either as part of
1704  * a cable assembly attached to a module inserted in an SFP+/QSFP+ cage on
1705  * the board, or fixed to the board (e.g. 10GBASE-T magjack on SFN5121T
1706  * "Salina"). In general:
1707  *
1708  * Port number (0-based)
1709  *     |
1710  *   port mapping (n:1)
1711  *     |
1712  *     v
1713  * External port number (1-based)
1714  *     |
1715  *   fixed (1:1) or cable assembly (1:m)
1716  *     |
1717  *     v
1718  * Connector
1719  *
1720  * The external numbering refers to the cages or magjacks on the board,
1721  * as visibly annotated on the board or back panel. This table describes
1722  * how to determine which external cage/magjack corresponds to the port
1723  * numbers used by the driver.
1724  *
1725  * The count of consecutive port numbers that map to each external number,
1726  * is determined by the chip family and the current port mode.
1727  *
1728  * For the Huntington family, the current port mode cannot be discovered,
1729  * but a single mapping is used by all modes for a given chip variant,
1730  * so the mapping used is instead the last match in the table to the full
1731  * set of port modes to which the NIC can be configured. Therefore the
1732  * ordering of entries in the mapping table is significant.
1733  */
1734 static struct ef10_external_port_map_s {
1735 	efx_family_t	family;
1736 	uint32_t	modes_mask;
1737 	uint8_t		base_port[EFX_EXT_PORT_MAX];
1738 }	__ef10_external_port_mappings[] = {
1739 	/*
1740 	 * Modes used by Huntington family controllers where each port
1741 	 * number maps to a separate cage.
1742 	 * SFN7x22F (Torino):
1743 	 *	port 0 -> cage 1
1744 	 *	port 1 -> cage 2
1745 	 * SFN7xx4F (Pavia):
1746 	 *	port 0 -> cage 1
1747 	 *	port 1 -> cage 2
1748 	 *	port 2 -> cage 3
1749 	 *	port 3 -> cage 4
1750 	 */
1751 	{
1752 		EFX_FAMILY_HUNTINGTON,
1753 		(1U << TLV_PORT_MODE_10G) |			/* mode 0 */
1754 		(1U << TLV_PORT_MODE_10G_10G) |			/* mode 2 */
1755 		(1U << TLV_PORT_MODE_10G_10G_10G_10G),		/* mode 4 */
1756 		{ 0, 1, 2, 3 }
1757 	},
1758 	/*
1759 	 * Modes which for Huntington identify a chip variant where 2
1760 	 * adjacent port numbers map to each cage.
1761 	 * SFN7x42Q (Monza):
1762 	 *	port 0 -> cage 1
1763 	 *	port 1 -> cage 1
1764 	 *	port 2 -> cage 2
1765 	 *	port 3 -> cage 2
1766 	 */
1767 	{
1768 		EFX_FAMILY_HUNTINGTON,
1769 		(1U << TLV_PORT_MODE_40G) |			/* mode 1 */
1770 		(1U << TLV_PORT_MODE_40G_40G) |			/* mode 3 */
1771 		(1U << TLV_PORT_MODE_40G_10G_10G) |		/* mode 6 */
1772 		(1U << TLV_PORT_MODE_10G_10G_40G),		/* mode 7 */
1773 		{ 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1774 	},
1775 	/*
1776 	 * Modes that on Medford allocate each port number to a separate
1777 	 * cage.
1778 	 *	port 0 -> cage 1
1779 	 *	port 1 -> cage 2
1780 	 *	port 2 -> cage 3
1781 	 *	port 3 -> cage 4
1782 	 */
1783 	{
1784 		EFX_FAMILY_MEDFORD,
1785 		(1U << TLV_PORT_MODE_1x1_NA) |			/* mode 0 */
1786 		(1U << TLV_PORT_MODE_1x4_NA) |			/* mode 1 */
1787 		(1U << TLV_PORT_MODE_1x1_1x1),			/* mode 2 */
1788 		{ 0, 1, 2, 3 }
1789 	},
1790 	/*
1791 	 * Modes that on Medford allocate 2 adjacent port numbers to each
1792 	 * cage.
1793 	 *	port 0 -> cage 1
1794 	 *	port 1 -> cage 1
1795 	 *	port 2 -> cage 2
1796 	 *	port 3 -> cage 2
1797 	 */
1798 	{
1799 		EFX_FAMILY_MEDFORD,
1800 		(1U << TLV_PORT_MODE_1x4_1x4) |			/* mode 3 */
1801 		(1U << TLV_PORT_MODE_2x1_2x1) |			/* mode 5 */
1802 		(1U << TLV_PORT_MODE_1x4_2x1) |			/* mode 6 */
1803 		(1U << TLV_PORT_MODE_2x1_1x4) |			/* mode 7 */
1804 		/* Do not use 10G_10G_10G_10G_Q1_Q2 (see bug63270) */
1805 		(1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2),	/* mode 9 */
1806 		{ 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1807 	},
1808 	/*
1809 	 * Modes that on Medford allocate 4 adjacent port numbers to
1810 	 * cage 1.
1811 	 *	port 0 -> cage 1
1812 	 *	port 1 -> cage 1
1813 	 *	port 2 -> cage 1
1814 	 *	port 3 -> cage 1
1815 	 */
1816 	{
1817 		EFX_FAMILY_MEDFORD,
1818 		/* Do not use 10G_10G_10G_10G_Q1 (see bug63270) */
1819 		(1U << TLV_PORT_MODE_4x1_NA),			/* mode 4 */
1820 		{ 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1821 	},
1822 	/*
1823 	 * Modes that on Medford allocate 4 adjacent port numbers to
1824 	 * cage 2.
1825 	 *	port 0 -> cage 2
1826 	 *	port 1 -> cage 2
1827 	 *	port 2 -> cage 2
1828 	 *	port 3 -> cage 2
1829 	 */
1830 	{
1831 		EFX_FAMILY_MEDFORD,
1832 		(1U << TLV_PORT_MODE_NA_4x1),			/* mode 8 */
1833 		{ EFX_EXT_PORT_NA, 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1834 	},
1835 	/*
1836 	 * Modes that on Medford2 allocate each port number to a separate
1837 	 * cage.
1838 	 *	port 0 -> cage 1
1839 	 *	port 1 -> cage 2
1840 	 *	port 2 -> cage 3
1841 	 *	port 3 -> cage 4
1842 	 */
1843 	{
1844 		EFX_FAMILY_MEDFORD2,
1845 		(1U << TLV_PORT_MODE_1x1_NA) |			/* mode 0 */
1846 		(1U << TLV_PORT_MODE_1x4_NA) |			/* mode 1 */
1847 		(1U << TLV_PORT_MODE_1x1_1x1) |			/* mode 2 */
1848 		(1U << TLV_PORT_MODE_1x4_1x4) |			/* mode 3 */
1849 		(1U << TLV_PORT_MODE_1x2_NA) |			/* mode 10 */
1850 		(1U << TLV_PORT_MODE_1x2_1x2) |			/* mode 12 */
1851 		(1U << TLV_PORT_MODE_1x4_1x2) |			/* mode 15 */
1852 		(1U << TLV_PORT_MODE_1x2_1x4),			/* mode 16 */
1853 		{ 0, 1, 2, 3 }
1854 	},
1855 	/*
1856 	 * Modes that on Medford2 allocate 1 port to cage 1 and the rest
1857 	 * to cage 2.
1858 	 *	port 0 -> cage 1
1859 	 *	port 1 -> cage 2
1860 	 *	port 2 -> cage 2
1861 	 */
1862 	{
1863 		EFX_FAMILY_MEDFORD2,
1864 		(1U << TLV_PORT_MODE_1x2_2x1) |			/* mode 17 */
1865 		(1U << TLV_PORT_MODE_1x4_2x1),			/* mode 6 */
1866 		{ 0, 1, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1867 	},
1868 	/*
1869 	 * Modes that on Medford2 allocate 2 adjacent port numbers to cage 1
1870 	 * and the rest to cage 2.
1871 	 *	port 0 -> cage 1
1872 	 *	port 1 -> cage 1
1873 	 *	port 2 -> cage 2
1874 	 *	port 3 -> cage 2
1875 	 */
1876 	{
1877 		EFX_FAMILY_MEDFORD2,
1878 		(1U << TLV_PORT_MODE_2x1_2x1) |			/* mode 4 */
1879 		(1U << TLV_PORT_MODE_2x1_1x4) |			/* mode 7 */
1880 		(1U << TLV_PORT_MODE_2x2_NA) |			/* mode 13 */
1881 		(1U << TLV_PORT_MODE_2x1_1x2),			/* mode 18 */
1882 		{ 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1883 	},
1884 	/*
1885 	 * Modes that on Medford2 allocate up to 4 adjacent port numbers
1886 	 * to cage 1.
1887 	 *	port 0 -> cage 1
1888 	 *	port 1 -> cage 1
1889 	 *	port 2 -> cage 1
1890 	 *	port 3 -> cage 1
1891 	 */
1892 	{
1893 		EFX_FAMILY_MEDFORD2,
1894 		(1U << TLV_PORT_MODE_4x1_NA),			/* mode 5 */
1895 		{ 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1896 	},
1897 	/*
1898 	 * Modes that on Medford2 allocate up to 4 adjacent port numbers
1899 	 * to cage 2.
1900 	 *	port 0 -> cage 2
1901 	 *	port 1 -> cage 2
1902 	 *	port 2 -> cage 2
1903 	 *	port 3 -> cage 2
1904 	 */
1905 	{
1906 		EFX_FAMILY_MEDFORD2,
1907 		(1U << TLV_PORT_MODE_NA_4x1) |			/* mode 8 */
1908 		(1U << TLV_PORT_MODE_NA_1x2) |			/* mode 11 */
1909 		(1U << TLV_PORT_MODE_NA_2x2),			/* mode 14 */
1910 		{ EFX_EXT_PORT_NA, 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1911 	},
1912 	/*
1913 	 * Modes that on Riverhead allocate each port number to a separate
1914 	 * cage.
1915 	 *	port 0 -> cage 1
1916 	 *	port 1 -> cage 2
1917 	 */
1918 	{
1919 		EFX_FAMILY_RIVERHEAD,
1920 		(1U << TLV_PORT_MODE_1x1_NA) |			/* mode 0 */
1921 		(1U << TLV_PORT_MODE_1x4_NA) |			/* mode 1 */
1922 		(1U << TLV_PORT_MODE_1x1_1x1),			/* mode 2 */
1923 		{ 0, 1, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1924 	},
1925 };
1926 
1927 static	__checkReturn	efx_rc_t
ef10_external_port_mapping(__in efx_nic_t * enp,__in uint32_t port,__out uint8_t * external_portp)1928 ef10_external_port_mapping(
1929 	__in		efx_nic_t *enp,
1930 	__in		uint32_t port,
1931 	__out		uint8_t *external_portp)
1932 {
1933 	efx_rc_t rc;
1934 	int i;
1935 	uint32_t port_modes;
1936 	uint32_t matches;
1937 	uint32_t current;
1938 	struct ef10_external_port_map_s *mapp = NULL;
1939 	int ext_index = port; /* Default 1-1 mapping */
1940 
1941 	if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, &current,
1942 		    NULL)) != 0) {
1943 		/*
1944 		 * No current port mode information (i.e. Huntington)
1945 		 * - infer mapping from available modes
1946 		 */
1947 		if ((rc = efx_mcdi_get_port_modes(enp,
1948 			    &port_modes, NULL, NULL)) != 0) {
1949 			/*
1950 			 * No port mode information available
1951 			 * - use default mapping
1952 			 */
1953 			goto out;
1954 		}
1955 	} else {
1956 		/* Only need to scan the current mode */
1957 		port_modes = 1 << current;
1958 	}
1959 
1960 	/*
1961 	 * Infer the internal port -> external number mapping from
1962 	 * the possible port modes for this NIC.
1963 	 */
1964 	for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {
1965 		struct ef10_external_port_map_s *eepmp =
1966 		    &__ef10_external_port_mappings[i];
1967 		if (eepmp->family != enp->en_family)
1968 			continue;
1969 		matches = (eepmp->modes_mask & port_modes);
1970 		if (matches != 0) {
1971 			/*
1972 			 * Some modes match. For some Huntington boards
1973 			 * there will be multiple matches. The mapping on the
1974 			 * last match is used.
1975 			 */
1976 			mapp = eepmp;
1977 			port_modes &= ~matches;
1978 		}
1979 	}
1980 
1981 	if (port_modes != 0) {
1982 		/* Some advertised modes are not supported */
1983 		rc = ENOTSUP;
1984 		goto fail1;
1985 	}
1986 
1987 out:
1988 	if (mapp != NULL) {
1989 		/*
1990 		 * External ports are assigned a sequence of consecutive
1991 		 * port numbers, so find the one with the closest base_port.
1992 		 */
1993 		uint32_t delta = EFX_EXT_PORT_NA;
1994 
1995 		for (i = 0; i < EFX_EXT_PORT_MAX; i++) {
1996 			uint32_t base = mapp->base_port[i];
1997 			if ((base != EFX_EXT_PORT_NA) && (base <= port)) {
1998 				if ((port - base) < delta) {
1999 					delta = (port - base);
2000 					ext_index = i;
2001 				}
2002 			}
2003 		}
2004 	}
2005 	*external_portp = (uint8_t)(ext_index + 1);
2006 
2007 	return (0);
2008 
2009 fail1:
2010 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2011 
2012 	return (rc);
2013 }
2014 
2015 static __checkReturn	efx_rc_t
efx_mcdi_get_nic_addr_caps(__in efx_nic_t * enp)2016 efx_mcdi_get_nic_addr_caps(
2017 	__in		efx_nic_t *enp)
2018 {
2019 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
2020 	uint32_t mapping_type;
2021 	efx_rc_t rc;
2022 
2023 	rc = efx_mcdi_get_nic_addr_info(enp, &mapping_type);
2024 	if (rc != 0) {
2025 		if (rc == ENOTSUP) {
2026 			encp->enc_dma_mapping = EFX_NIC_DMA_MAPPING_FLAT;
2027 			goto out;
2028 		}
2029 		goto fail1;
2030 	}
2031 
2032 	switch (mapping_type) {
2033 	case MC_CMD_GET_DESC_ADDR_INFO_OUT_MAPPING_FLAT:
2034 		encp->enc_dma_mapping = EFX_NIC_DMA_MAPPING_FLAT;
2035 		break;
2036 	case MC_CMD_GET_DESC_ADDR_INFO_OUT_MAPPING_REGIONED:
2037 		encp->enc_dma_mapping = EFX_NIC_DMA_MAPPING_REGIONED;
2038 		rc = efx_mcdi_get_nic_addr_regions(enp,
2039 		    &enp->en_dma.end_u.endu_region_info);
2040 		if (rc != 0)
2041 			goto fail2;
2042 		break;
2043 	default:
2044 		goto fail3;
2045 	}
2046 
2047 out:
2048 	return (0);
2049 
2050 fail3:
2051 	EFSYS_PROBE(fail3);
2052 fail2:
2053 	EFSYS_PROBE(fail2);
2054 fail1:
2055 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2056 
2057 	return (rc);
2058 }
2059 
2060 	__checkReturn	efx_rc_t
efx_mcdi_nic_board_cfg(__in efx_nic_t * enp)2061 efx_mcdi_nic_board_cfg(
2062 	__in		efx_nic_t *enp)
2063 {
2064 	efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
2065 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
2066 	ef10_link_state_t els;
2067 	efx_port_t *epp = &(enp->en_port);
2068 	efx_pcie_interface_t intf;
2069 	uint32_t board_type = 0;
2070 	uint32_t base, nvec;
2071 	uint32_t port;
2072 	uint32_t mask;
2073 	uint32_t pf;
2074 	uint32_t vf;
2075 	uint8_t mac_addr[6] = { 0 };
2076 	efx_rc_t rc;
2077 
2078 	/* Get the (zero-based) MCDI port number */
2079 	if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
2080 		goto fail1;
2081 
2082 	/* EFX MCDI interface uses one-based port numbers */
2083 	emip->emi_port = port + 1;
2084 
2085 	encp->enc_assigned_port = port;
2086 
2087 	if ((rc = ef10_external_port_mapping(enp, port,
2088 		    &encp->enc_external_port)) != 0)
2089 		goto fail2;
2090 
2091 	/*
2092 	 * Get PCIe function number from firmware (used for
2093 	 * per-function privilege and dynamic config info).
2094 	 *  - PCIe PF: pf = PF number, vf = 0xffff.
2095 	 *  - PCIe VF: pf = parent PF, vf = VF number.
2096 	 */
2097 	if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf, &intf)) != 0)
2098 		goto fail3;
2099 
2100 	encp->enc_pf = pf;
2101 	encp->enc_vf = vf;
2102 	encp->enc_intf = intf;
2103 
2104 	if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0)
2105 		goto fail4;
2106 
2107 	rc = efx_mcdi_client_mac_addr_get(enp, CLIENT_HANDLE_SELF, mac_addr);
2108 	if ((rc != 0) && EFX_PCI_FUNCTION_IS_PF(encp)) {
2109 		/* Fallback for legacy MAC address get approach (PF) */
2110 		rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
2111 #if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
2112 		/*
2113 		 * Disable static config checking, ONLY for manufacturing test
2114 		 * and setup at the factory, to allow the static config to be
2115 		 * installed.
2116 		 */
2117 #else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
2118 		if ((rc == 0) && (mac_addr[0] & 0x02)) {
2119 			/*
2120 			 * If the static config does not include a global MAC
2121 			 * address pool then the board may return a locally
2122 			 * administered MAC address (this should only happen on
2123 			 * incorrectly programmed boards).
2124 			 */
2125 			rc = EINVAL;
2126 		}
2127 #endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
2128 	} else if (rc != 0) {
2129 		/* Fallback for legacy MAC address get approach (VF) */
2130 		rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
2131 	}
2132 
2133 	if (rc != 0)
2134 		goto fail5;
2135 
2136 	EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
2137 
2138 	/*
2139 	 * Get the current privilege mask. Note that this may be modified
2140 	 * dynamically, so for most cases the value is informational only.
2141 	 * If the privilege being discovered can't be granted dynamically,
2142 	 * it's fine to rely on the value. In all other cases, DO NOT use
2143 	 * the privilege mask to check for sufficient privileges, as that
2144 	 * can result in time-of-check/time-of-use bugs.
2145 	 */
2146 	if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
2147 		goto fail6;
2148 	encp->enc_privilege_mask = mask;
2149 
2150 	/* Board configuration (legacy) */
2151 	rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
2152 	if (rc != 0) {
2153 		/* Unprivileged functions may not be able to read board cfg */
2154 		if (rc == EACCES)
2155 			board_type = 0;
2156 		else
2157 			goto fail7;
2158 	}
2159 
2160 	encp->enc_board_type = board_type;
2161 
2162 	/* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
2163 	if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
2164 		goto fail8;
2165 
2166 	/*
2167 	 * Firmware with support for *_FEC capability bits does not
2168 	 * report that the corresponding *_FEC_REQUESTED bits are supported.
2169 	 * Add them here so that drivers understand that they are supported.
2170 	 */
2171 	if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_BASER_FEC))
2172 		epp->ep_phy_cap_mask |=
2173 		    (1u << EFX_PHY_CAP_BASER_FEC_REQUESTED);
2174 	if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_RS_FEC))
2175 		epp->ep_phy_cap_mask |=
2176 		    (1u << EFX_PHY_CAP_RS_FEC_REQUESTED);
2177 	if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_25G_BASER_FEC))
2178 		epp->ep_phy_cap_mask |=
2179 		    (1u << EFX_PHY_CAP_25G_BASER_FEC_REQUESTED);
2180 
2181 	/* Obtain the default PHY advertised capabilities */
2182 	if ((rc = ef10_phy_get_link(enp, &els)) != 0)
2183 		goto fail9;
2184 	epp->ep_default_adv_cap_mask = els.epls.epls_adv_cap_mask;
2185 	epp->ep_adv_cap_mask = els.epls.epls_adv_cap_mask;
2186 
2187 	/* Check capabilities of running datapath firmware */
2188 	if ((rc = ef10_get_datapath_caps(enp)) != 0)
2189 		goto fail10;
2190 
2191 	/* Get interrupt vector limits */
2192 	if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
2193 		if (EFX_PCI_FUNCTION_IS_PF(encp))
2194 			goto fail11;
2195 
2196 		/* Ignore error (cannot query vector limits from a VF). */
2197 		base = 0;
2198 		nvec = 1024;
2199 	}
2200 	encp->enc_intr_vec_base = base;
2201 	encp->enc_intr_limit = nvec;
2202 
2203 	rc = efx_mcdi_get_nic_addr_caps(enp);
2204 	if (rc != 0)
2205 		goto fail12;
2206 
2207 	return (0);
2208 
2209 fail12:
2210 	EFSYS_PROBE(fail12);
2211 fail11:
2212 	EFSYS_PROBE(fail11);
2213 fail10:
2214 	EFSYS_PROBE(fail10);
2215 fail9:
2216 	EFSYS_PROBE(fail9);
2217 fail8:
2218 	EFSYS_PROBE(fail8);
2219 fail7:
2220 	EFSYS_PROBE(fail7);
2221 fail6:
2222 	EFSYS_PROBE(fail6);
2223 fail5:
2224 	EFSYS_PROBE(fail5);
2225 fail4:
2226 	EFSYS_PROBE(fail4);
2227 fail3:
2228 	EFSYS_PROBE(fail3);
2229 fail2:
2230 	EFSYS_PROBE(fail2);
2231 fail1:
2232 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2233 
2234 	return (rc);
2235 }
2236 
2237 	__checkReturn	efx_rc_t
efx_mcdi_entity_reset(__in efx_nic_t * enp)2238 efx_mcdi_entity_reset(
2239 	__in		efx_nic_t *enp)
2240 {
2241 	efx_mcdi_req_t req;
2242 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ENTITY_RESET_IN_LEN,
2243 		MC_CMD_ENTITY_RESET_OUT_LEN);
2244 	efx_rc_t rc;
2245 
2246 	req.emr_cmd = MC_CMD_ENTITY_RESET;
2247 	req.emr_in_buf = payload;
2248 	req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
2249 	req.emr_out_buf = payload;
2250 	req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN;
2251 
2252 	MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG,
2253 	    ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
2254 
2255 	efx_mcdi_execute(enp, &req);
2256 
2257 	if (req.emr_rc != 0) {
2258 		rc = req.emr_rc;
2259 		goto fail1;
2260 	}
2261 
2262 	return (0);
2263 
2264 fail1:
2265 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2266 
2267 	return (rc);
2268 }
2269 
2270 #endif	/* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
2271 
2272 #if EFX_OPTS_EF10()
2273 
2274 static	__checkReturn	efx_rc_t
ef10_set_workaround_bug26807(__in efx_nic_t * enp)2275 ef10_set_workaround_bug26807(
2276 	__in		efx_nic_t *enp)
2277 {
2278 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
2279 	uint32_t flags;
2280 	efx_rc_t rc;
2281 
2282 	/*
2283 	 * If the bug26807 workaround is enabled, then firmware has enabled
2284 	 * support for chained multicast filters. Firmware will reset (FLR)
2285 	 * functions which have filters in the hardware filter table when the
2286 	 * workaround is enabled/disabled.
2287 	 *
2288 	 * We must recheck if the workaround is enabled after inserting the
2289 	 * first hardware filter, in case it has been changed since this check.
2290 	 */
2291 	rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG26807,
2292 	    B_TRUE, &flags);
2293 	if (rc == 0) {
2294 		encp->enc_bug26807_workaround = B_TRUE;
2295 		if (flags & (1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN)) {
2296 			/*
2297 			 * Other functions had installed filters before the
2298 			 * workaround was enabled, and they have been reset
2299 			 * by firmware.
2300 			 */
2301 			EFSYS_PROBE(bug26807_workaround_flr_done);
2302 			/* FIXME: bump MC warm boot count ? */
2303 		}
2304 	} else if (rc == EACCES) {
2305 		/*
2306 		 * Unprivileged functions cannot enable the workaround in older
2307 		 * firmware.
2308 		 */
2309 		encp->enc_bug26807_workaround = B_FALSE;
2310 	} else if ((rc == ENOTSUP) || (rc == ENOENT)) {
2311 		encp->enc_bug26807_workaround = B_FALSE;
2312 	} else {
2313 		goto fail1;
2314 	}
2315 
2316 	return (0);
2317 
2318 fail1:
2319 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2320 
2321 	return (rc);
2322 }
2323 
2324 static	__checkReturn	efx_rc_t
ef10_nic_board_cfg(__in efx_nic_t * enp)2325 ef10_nic_board_cfg(
2326 	__in		efx_nic_t *enp)
2327 {
2328 	const efx_nic_ops_t *enop = enp->en_enop;
2329 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
2330 	efx_rc_t rc;
2331 
2332 	if ((rc = efx_mcdi_nic_board_cfg(enp)) != 0)
2333 		goto fail1;
2334 
2335 	/*
2336 	 * Huntington RXDP firmware inserts a 0 or 14 byte prefix.
2337 	 * We only support the 14 byte prefix here.
2338 	 */
2339 	if (encp->enc_rx_prefix_size != 14) {
2340 		rc = ENOTSUP;
2341 		goto fail2;
2342 	}
2343 
2344 	encp->enc_clk_mult = 1; /* not used for EF10 */
2345 
2346 	/* Alignment for WPTR updates */
2347 	encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
2348 
2349 	encp->enc_rx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
2350 	encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_TX_KER_BYTE_CNT);
2351 	/* No boundary crossing limits */
2352 	encp->enc_tx_dma_desc_boundary = 0;
2353 
2354 	/*
2355 	 * Maximum number of bytes into the frame the TCP header can start for
2356 	 * firmware assisted TSO to work.
2357 	 */
2358 	encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
2359 
2360 	/* EF10 TSO engine demands that packet header be contiguous. */
2361 	encp->enc_tx_tso_max_header_ndescs = 1;
2362 
2363 	/* The overall TSO header length is not limited. */
2364 	encp->enc_tx_tso_max_header_length = UINT32_MAX;
2365 
2366 	/*
2367 	 * There are no specific limitations on the number of
2368 	 * TSO payload descriptors.
2369 	 */
2370 	encp->enc_tx_tso_max_payload_ndescs = UINT32_MAX;
2371 
2372 	/* TSO superframe payload length is not limited. */
2373 	encp->enc_tx_tso_max_payload_length = UINT32_MAX;
2374 
2375 	/*
2376 	 * Limitation on the maximum number of outgoing packets per
2377 	 * TSO transaction described in SF-108452-SW.
2378 	 */
2379 	encp->enc_tx_tso_max_nframes = 32767;
2380 
2381 	/*
2382 	 * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
2383 	 * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
2384 	 * resources (allocated to this PCIe function), which is zero until
2385 	 * after we have allocated VIs.
2386 	 */
2387 	encp->enc_evq_limit = 1024;
2388 	encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
2389 	encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
2390 
2391 	encp->enc_buftbl_limit = UINT32_MAX;
2392 
2393 	if ((rc = ef10_set_workaround_bug26807(enp)) != 0)
2394 		goto fail3;
2395 
2396 	/* Get remaining controller-specific board config */
2397 	if ((rc = enop->eno_board_cfg(enp)) != 0)
2398 		if (rc != EACCES)
2399 			goto fail4;
2400 
2401 	return (0);
2402 
2403 fail4:
2404 	EFSYS_PROBE(fail4);
2405 fail3:
2406 	EFSYS_PROBE(fail3);
2407 fail2:
2408 	EFSYS_PROBE(fail2);
2409 fail1:
2410 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2411 
2412 	return (rc);
2413 }
2414 
2415 	__checkReturn	efx_rc_t
ef10_nic_probe(__in efx_nic_t * enp)2416 ef10_nic_probe(
2417 	__in		efx_nic_t *enp)
2418 {
2419 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
2420 	efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
2421 	efx_rc_t rc;
2422 
2423 	EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
2424 
2425 	/* Read and clear any assertion state */
2426 	if ((rc = efx_mcdi_read_assertion(enp)) != 0)
2427 		goto fail1;
2428 
2429 	/* Exit the assertion handler */
2430 	if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
2431 		if (rc != EACCES)
2432 			goto fail2;
2433 
2434 	if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
2435 		goto fail3;
2436 
2437 	if ((rc = ef10_nic_board_cfg(enp)) != 0)
2438 		goto fail4;
2439 
2440 	/*
2441 	 * Set default driver config limits (based on board config).
2442 	 *
2443 	 * FIXME: For now allocate a fixed number of VIs which is likely to be
2444 	 * sufficient and small enough to allow multiple functions on the same
2445 	 * port.
2446 	 */
2447 	edcp->edc_min_vi_count = edcp->edc_max_vi_count =
2448 	    MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit));
2449 
2450 	/* The client driver must configure and enable PIO buffer support */
2451 	edcp->edc_max_piobuf_count = 0;
2452 	edcp->edc_pio_alloc_size = 0;
2453 
2454 #if EFSYS_OPT_MAC_STATS
2455 	/* Wipe the MAC statistics */
2456 	if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
2457 		goto fail5;
2458 #endif
2459 
2460 #if EFSYS_OPT_LOOPBACK
2461 	if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
2462 		goto fail6;
2463 #endif
2464 
2465 #if EFSYS_OPT_MON_STATS
2466 	if ((rc = mcdi_mon_cfg_build(enp)) != 0) {
2467 		/* Unprivileged functions do not have access to sensors */
2468 		if (rc != EACCES)
2469 			goto fail7;
2470 	}
2471 #endif
2472 
2473 	return (0);
2474 
2475 #if EFSYS_OPT_MON_STATS
2476 fail7:
2477 	EFSYS_PROBE(fail7);
2478 #endif
2479 #if EFSYS_OPT_LOOPBACK
2480 fail6:
2481 	EFSYS_PROBE(fail6);
2482 #endif
2483 #if EFSYS_OPT_MAC_STATS
2484 fail5:
2485 	EFSYS_PROBE(fail5);
2486 #endif
2487 fail4:
2488 	EFSYS_PROBE(fail4);
2489 fail3:
2490 	EFSYS_PROBE(fail3);
2491 fail2:
2492 	EFSYS_PROBE(fail2);
2493 fail1:
2494 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2495 
2496 	return (rc);
2497 }
2498 
2499 	__checkReturn	efx_rc_t
ef10_nic_set_drv_limits(__inout efx_nic_t * enp,__in efx_drv_limits_t * edlp)2500 ef10_nic_set_drv_limits(
2501 	__inout		efx_nic_t *enp,
2502 	__in		efx_drv_limits_t *edlp)
2503 {
2504 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
2505 	efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
2506 	uint32_t min_evq_count, max_evq_count;
2507 	uint32_t min_rxq_count, max_rxq_count;
2508 	uint32_t min_txq_count, max_txq_count;
2509 	efx_rc_t rc;
2510 
2511 	if (edlp == NULL) {
2512 		rc = EINVAL;
2513 		goto fail1;
2514 	}
2515 
2516 	/* Get minimum required and maximum usable VI limits */
2517 	min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit);
2518 	min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit);
2519 	min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit);
2520 
2521 	edcp->edc_min_vi_count =
2522 	    MAX(min_evq_count, MAX(min_rxq_count, min_txq_count));
2523 
2524 	max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit);
2525 	max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit);
2526 	max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit);
2527 
2528 	edcp->edc_max_vi_count =
2529 	    MAX(max_evq_count, MAX(max_rxq_count, max_txq_count));
2530 
2531 	/*
2532 	 * Check limits for sub-allocated piobuf blocks.
2533 	 * PIO is optional, so don't fail if the limits are incorrect.
2534 	 */
2535 	if ((encp->enc_piobuf_size == 0) ||
2536 	    (encp->enc_piobuf_limit == 0) ||
2537 	    (edlp->edl_min_pio_alloc_size == 0) ||
2538 	    (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) {
2539 		/* Disable PIO */
2540 		edcp->edc_max_piobuf_count = 0;
2541 		edcp->edc_pio_alloc_size = 0;
2542 	} else {
2543 		uint32_t blk_size, blk_count, blks_per_piobuf;
2544 
2545 		blk_size =
2546 		    MAX(edlp->edl_min_pio_alloc_size,
2547 			    encp->enc_piobuf_min_alloc_size);
2548 
2549 		blks_per_piobuf = encp->enc_piobuf_size / blk_size;
2550 		EFSYS_ASSERT3U(blks_per_piobuf, <=, 32);
2551 
2552 		blk_count = (encp->enc_piobuf_limit * blks_per_piobuf);
2553 
2554 		/* A zero max pio alloc count means unlimited */
2555 		if ((edlp->edl_max_pio_alloc_count > 0) &&
2556 		    (edlp->edl_max_pio_alloc_count < blk_count)) {
2557 			blk_count = edlp->edl_max_pio_alloc_count;
2558 		}
2559 
2560 		edcp->edc_pio_alloc_size = blk_size;
2561 		edcp->edc_max_piobuf_count =
2562 		    (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf;
2563 	}
2564 
2565 	return (0);
2566 
2567 fail1:
2568 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2569 
2570 	return (rc);
2571 }
2572 
2573 
2574 	__checkReturn	efx_rc_t
ef10_nic_reset(__in efx_nic_t * enp)2575 ef10_nic_reset(
2576 	__in		efx_nic_t *enp)
2577 {
2578 	efx_rc_t rc;
2579 
2580 	/* ef10_nic_reset() is called to recover from BADASSERT failures. */
2581 	if ((rc = efx_mcdi_read_assertion(enp)) != 0)
2582 		goto fail1;
2583 	if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
2584 		goto fail2;
2585 
2586 	if ((rc = efx_mcdi_entity_reset(enp)) != 0)
2587 		goto fail3;
2588 
2589 	/* Clear RX/TX DMA queue errors */
2590 	enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);
2591 
2592 	return (0);
2593 
2594 fail3:
2595 	EFSYS_PROBE(fail3);
2596 fail2:
2597 	EFSYS_PROBE(fail2);
2598 fail1:
2599 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2600 
2601 	return (rc);
2602 }
2603 
2604 #endif	/* EFX_OPTS_EF10() */
2605 
2606 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
2607 
2608 	__checkReturn	efx_rc_t
ef10_upstream_port_vadaptor_alloc(__in efx_nic_t * enp)2609 ef10_upstream_port_vadaptor_alloc(
2610 	__in		efx_nic_t *enp)
2611 {
2612 	uint32_t retry;
2613 	uint32_t delay_us;
2614 	efx_rc_t rc;
2615 
2616 	/*
2617 	 * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF
2618 	 * driver has yet to bring up the EVB port. See bug 56147. In this case,
2619 	 * retry the request several times after waiting a while. The wait time
2620 	 * between retries starts small (10ms) and exponentially increases.
2621 	 * Total wait time is a little over two seconds. Retry logic in the
2622 	 * client driver may mean this whole loop is repeated if it continues to
2623 	 * fail.
2624 	 */
2625 	retry = 0;
2626 	delay_us = 10000;
2627 	while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) {
2628 		if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) ||
2629 		    (rc != ENOENT)) {
2630 			/*
2631 			 * Do not retry alloc for PF, or for other errors on
2632 			 * a VF.
2633 			 */
2634 			goto fail1;
2635 		}
2636 
2637 		/* VF startup before PF is ready. Retry allocation. */
2638 		if (retry > 5) {
2639 			/* Too many attempts */
2640 			rc = EINVAL;
2641 			goto fail2;
2642 		}
2643 		EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry);
2644 		EFSYS_SLEEP(delay_us);
2645 		retry++;
2646 		if (delay_us < 500000)
2647 			delay_us <<= 2;
2648 	}
2649 
2650 	return (0);
2651 
2652 fail2:
2653 	EFSYS_PROBE(fail2);
2654 fail1:
2655 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2656 
2657 	return (rc);
2658 }
2659 
2660 #endif	/* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
2661 
2662 #if EFX_OPTS_EF10()
2663 
2664 	__checkReturn	efx_rc_t
ef10_nic_init(__in efx_nic_t * enp)2665 ef10_nic_init(
2666 	__in		efx_nic_t *enp)
2667 {
2668 	efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
2669 	uint32_t min_vi_count, max_vi_count;
2670 	uint32_t vi_count, vi_base, vi_shift;
2671 	uint32_t i;
2672 	uint32_t vi_window_size;
2673 	efx_rc_t rc;
2674 	boolean_t alloc_vadaptor = B_TRUE;
2675 
2676 	EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
2677 
2678 	/* Enable reporting of some events (e.g. link change) */
2679 	if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
2680 		goto fail1;
2681 
2682 	/* Allocate (optional) on-chip PIO buffers */
2683 	ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count);
2684 
2685 	/*
2686 	 * For best performance, PIO writes should use a write-combined
2687 	 * (WC) memory mapping. Using a separate WC mapping for the PIO
2688 	 * aperture of each VI would be a burden to drivers (and not
2689 	 * possible if the host page size is >4Kbyte).
2690 	 *
2691 	 * To avoid this we use a single uncached (UC) mapping for VI
2692 	 * register access, and a single WC mapping for extra VIs used
2693 	 * for PIO writes.
2694 	 *
2695 	 * Each piobuf must be linked to a VI in the WC mapping, and to
2696 	 * each VI that is using a sub-allocated block from the piobuf.
2697 	 */
2698 	min_vi_count = edcp->edc_min_vi_count;
2699 	max_vi_count =
2700 	    edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count;
2701 
2702 	/* Ensure that the previously attached driver's VIs are freed */
2703 	if ((rc = efx_mcdi_free_vis(enp)) != 0)
2704 		goto fail2;
2705 
2706 	/*
2707 	 * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this
2708 	 * fails then retrying the request for fewer VI resources may succeed.
2709 	 */
2710 	vi_count = 0;
2711 	if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count,
2712 		    &vi_base, &vi_count, &vi_shift)) != 0)
2713 		goto fail3;
2714 
2715 	EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count);
2716 
2717 	if (vi_count < min_vi_count) {
2718 		rc = ENOMEM;
2719 		goto fail4;
2720 	}
2721 
2722 	enp->en_arch.ef10.ena_vi_base = vi_base;
2723 	enp->en_arch.ef10.ena_vi_count = vi_count;
2724 	enp->en_arch.ef10.ena_vi_shift = vi_shift;
2725 
2726 	if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) {
2727 		/* Not enough extra VIs to map piobufs */
2728 		ef10_nic_free_piobufs(enp);
2729 	}
2730 
2731 	enp->en_arch.ef10.ena_pio_write_vi_base =
2732 	    vi_count - enp->en_arch.ef10.ena_piobuf_count;
2733 
2734 	EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, !=,
2735 	    EFX_VI_WINDOW_SHIFT_INVALID);
2736 	EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, <=,
2737 	    EFX_VI_WINDOW_SHIFT_64K);
2738 	vi_window_size = 1U << enp->en_nic_cfg.enc_vi_window_shift;
2739 
2740 	/* Save UC memory mapping details */
2741 	enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
2742 	if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2743 		enp->en_arch.ef10.ena_uc_mem_map_size =
2744 		    (vi_window_size *
2745 		    enp->en_arch.ef10.ena_pio_write_vi_base);
2746 	} else {
2747 		enp->en_arch.ef10.ena_uc_mem_map_size =
2748 		    (vi_window_size *
2749 		    enp->en_arch.ef10.ena_vi_count);
2750 	}
2751 
2752 	/* Save WC memory mapping details */
2753 	enp->en_arch.ef10.ena_wc_mem_map_offset =
2754 	    enp->en_arch.ef10.ena_uc_mem_map_offset +
2755 	    enp->en_arch.ef10.ena_uc_mem_map_size;
2756 
2757 	enp->en_arch.ef10.ena_wc_mem_map_size =
2758 	    (vi_window_size *
2759 	    enp->en_arch.ef10.ena_piobuf_count);
2760 
2761 	/* Link piobufs to extra VIs in WC mapping */
2762 	if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2763 		for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
2764 			rc = efx_mcdi_link_piobuf(enp,
2765 			    enp->en_arch.ef10.ena_pio_write_vi_base + i,
2766 			    enp->en_arch.ef10.ena_piobuf_handle[i]);
2767 			if (rc != 0)
2768 				break;
2769 		}
2770 	}
2771 
2772 	/*
2773 	 * For SR-IOV use case, vAdaptor is allocated for PF and associated VFs
2774 	 * during NIC initialization when vSwitch is created and vports are
2775 	 * allocated. Hence, skip vAdaptor allocation for EVB and update vport
2776 	 * id in NIC structure with the one allocated for PF.
2777 	 */
2778 
2779 	enp->en_vport_id = EVB_PORT_ID_ASSIGNED;
2780 #if EFSYS_OPT_EVB
2781 	if ((enp->en_vswitchp != NULL) && (enp->en_vswitchp->ev_evcp != NULL)) {
2782 		/* For EVB use vport allocated on vswitch */
2783 		enp->en_vport_id = enp->en_vswitchp->ev_evcp->evc_vport_id;
2784 		alloc_vadaptor = B_FALSE;
2785 	}
2786 #endif
2787 	if (alloc_vadaptor != B_FALSE) {
2788 		/* Allocate a vAdaptor attached to our upstream vPort/pPort */
2789 		if ((rc = ef10_upstream_port_vadaptor_alloc(enp)) != 0)
2790 			goto fail5;
2791 	}
2792 	enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2;
2793 
2794 	return (0);
2795 
2796 fail5:
2797 	EFSYS_PROBE(fail5);
2798 fail4:
2799 	EFSYS_PROBE(fail4);
2800 fail3:
2801 	EFSYS_PROBE(fail3);
2802 fail2:
2803 	EFSYS_PROBE(fail2);
2804 
2805 	ef10_nic_free_piobufs(enp);
2806 
2807 fail1:
2808 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2809 
2810 	return (rc);
2811 }
2812 
2813 	__checkReturn	efx_rc_t
ef10_nic_get_vi_pool(__in efx_nic_t * enp,__out uint32_t * vi_countp)2814 ef10_nic_get_vi_pool(
2815 	__in		efx_nic_t *enp,
2816 	__out		uint32_t *vi_countp)
2817 {
2818 	EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
2819 
2820 	/*
2821 	 * Report VIs that the client driver can use.
2822 	 * Do not include VIs used for PIO buffer writes.
2823 	 */
2824 	*vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base;
2825 
2826 	return (0);
2827 }
2828 
2829 	__checkReturn	efx_rc_t
ef10_nic_get_bar_region(__in efx_nic_t * enp,__in efx_nic_region_t region,__out uint32_t * offsetp,__out size_t * sizep)2830 ef10_nic_get_bar_region(
2831 	__in		efx_nic_t *enp,
2832 	__in		efx_nic_region_t region,
2833 	__out		uint32_t *offsetp,
2834 	__out		size_t *sizep)
2835 {
2836 	efx_rc_t rc;
2837 
2838 	EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
2839 
2840 	/*
2841 	 * TODO: Specify host memory mapping alignment and granularity
2842 	 * in efx_drv_limits_t so that they can be taken into account
2843 	 * when allocating extra VIs for PIO writes.
2844 	 */
2845 	switch (region) {
2846 	case EFX_REGION_VI:
2847 		/* UC mapped memory BAR region for VI registers */
2848 		*offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset;
2849 		*sizep = enp->en_arch.ef10.ena_uc_mem_map_size;
2850 		break;
2851 
2852 	case EFX_REGION_PIO_WRITE_VI:
2853 		/* WC mapped memory BAR region for piobuf writes */
2854 		*offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset;
2855 		*sizep = enp->en_arch.ef10.ena_wc_mem_map_size;
2856 		break;
2857 
2858 	default:
2859 		rc = EINVAL;
2860 		goto fail1;
2861 	}
2862 
2863 	return (0);
2864 
2865 fail1:
2866 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2867 
2868 	return (rc);
2869 }
2870 
2871 	__checkReturn	boolean_t
ef10_nic_hw_unavailable(__in efx_nic_t * enp)2872 ef10_nic_hw_unavailable(
2873 	__in		efx_nic_t *enp)
2874 {
2875 	efx_dword_t dword;
2876 
2877 	if (enp->en_reset_flags & EFX_RESET_HW_UNAVAIL)
2878 		return (B_TRUE);
2879 
2880 	EFX_BAR_READD(enp, ER_DZ_BIU_MC_SFT_STATUS_REG, &dword, B_FALSE);
2881 	if (EFX_DWORD_FIELD(dword, EFX_DWORD_0) == 0xffffffff)
2882 		goto unavail;
2883 
2884 	return (B_FALSE);
2885 
2886 unavail:
2887 	ef10_nic_set_hw_unavailable(enp);
2888 
2889 	return (B_TRUE);
2890 }
2891 
2892 			void
ef10_nic_set_hw_unavailable(__in efx_nic_t * enp)2893 ef10_nic_set_hw_unavailable(
2894 	__in		efx_nic_t *enp)
2895 {
2896 	EFSYS_PROBE(hw_unavail);
2897 	enp->en_reset_flags |= EFX_RESET_HW_UNAVAIL;
2898 }
2899 
2900 
2901 			void
ef10_nic_fini(__in efx_nic_t * enp)2902 ef10_nic_fini(
2903 	__in		efx_nic_t *enp)
2904 {
2905 	uint32_t i;
2906 	efx_rc_t rc;
2907 	boolean_t do_vadaptor_free = B_TRUE;
2908 
2909 #if EFSYS_OPT_EVB
2910 	if (enp->en_vswitchp != NULL) {
2911 		/*
2912 		 * For SR-IOV the vAdaptor is freed with the vswitch,
2913 		 * so do not free it here.
2914 		 */
2915 		do_vadaptor_free = B_FALSE;
2916 	}
2917 #endif
2918 	if (do_vadaptor_free != B_FALSE) {
2919 		(void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id);
2920 		enp->en_vport_id = EVB_PORT_ID_NULL;
2921 	}
2922 
2923 	/* Unlink piobufs from extra VIs in WC mapping */
2924 	if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2925 		for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
2926 			rc = efx_mcdi_unlink_piobuf(enp,
2927 			    enp->en_arch.ef10.ena_pio_write_vi_base + i);
2928 			if (rc != 0)
2929 				break;
2930 		}
2931 	}
2932 
2933 	ef10_nic_free_piobufs(enp);
2934 
2935 	(void) efx_mcdi_free_vis(enp);
2936 	enp->en_arch.ef10.ena_vi_count = 0;
2937 }
2938 
2939 			void
ef10_nic_unprobe(__in efx_nic_t * enp)2940 ef10_nic_unprobe(
2941 	__in		efx_nic_t *enp)
2942 {
2943 #if EFSYS_OPT_MON_STATS
2944 	mcdi_mon_cfg_free(enp);
2945 #endif /* EFSYS_OPT_MON_STATS */
2946 	(void) efx_mcdi_drv_attach(enp, B_FALSE);
2947 }
2948 
2949 #if EFSYS_OPT_DIAG
2950 
2951 	__checkReturn	efx_rc_t
ef10_nic_register_test(__in efx_nic_t * enp)2952 ef10_nic_register_test(
2953 	__in		efx_nic_t *enp)
2954 {
2955 	efx_rc_t rc;
2956 
2957 	/* FIXME */
2958 	_NOTE(ARGUNUSED(enp))
2959 	_NOTE(CONSTANTCONDITION)
2960 	if (B_FALSE) {
2961 		rc = ENOTSUP;
2962 		goto fail1;
2963 	}
2964 	/* FIXME */
2965 
2966 	return (0);
2967 
2968 fail1:
2969 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2970 
2971 	return (rc);
2972 }
2973 
2974 #endif	/* EFSYS_OPT_DIAG */
2975 
2976 #if EFSYS_OPT_FW_SUBVARIANT_AWARE
2977 
2978 	__checkReturn	efx_rc_t
efx_mcdi_get_nic_global(__in efx_nic_t * enp,__in uint32_t key,__out uint32_t * valuep)2979 efx_mcdi_get_nic_global(
2980 	__in		efx_nic_t *enp,
2981 	__in		uint32_t key,
2982 	__out		uint32_t *valuep)
2983 {
2984 	efx_mcdi_req_t req;
2985 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_NIC_GLOBAL_IN_LEN,
2986 		MC_CMD_GET_NIC_GLOBAL_OUT_LEN);
2987 	efx_rc_t rc;
2988 
2989 	req.emr_cmd = MC_CMD_GET_NIC_GLOBAL;
2990 	req.emr_in_buf = payload;
2991 	req.emr_in_length = MC_CMD_GET_NIC_GLOBAL_IN_LEN;
2992 	req.emr_out_buf = payload;
2993 	req.emr_out_length = MC_CMD_GET_NIC_GLOBAL_OUT_LEN;
2994 
2995 	MCDI_IN_SET_DWORD(req, GET_NIC_GLOBAL_IN_KEY, key);
2996 
2997 	efx_mcdi_execute(enp, &req);
2998 
2999 	if (req.emr_rc != 0) {
3000 		rc = req.emr_rc;
3001 		goto fail1;
3002 	}
3003 
3004 	if (req.emr_out_length_used != MC_CMD_GET_NIC_GLOBAL_OUT_LEN) {
3005 		rc = EMSGSIZE;
3006 		goto fail2;
3007 	}
3008 
3009 	*valuep = MCDI_OUT_DWORD(req, GET_NIC_GLOBAL_OUT_VALUE);
3010 
3011 	return (0);
3012 
3013 fail2:
3014 	EFSYS_PROBE(fail2);
3015 fail1:
3016 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
3017 
3018 	return (rc);
3019 }
3020 
3021 	__checkReturn	efx_rc_t
efx_mcdi_set_nic_global(__in efx_nic_t * enp,__in uint32_t key,__in uint32_t value)3022 efx_mcdi_set_nic_global(
3023 	__in		efx_nic_t *enp,
3024 	__in		uint32_t key,
3025 	__in		uint32_t value)
3026 {
3027 	efx_mcdi_req_t req;
3028 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_NIC_GLOBAL_IN_LEN, 0);
3029 	efx_rc_t rc;
3030 
3031 	req.emr_cmd = MC_CMD_SET_NIC_GLOBAL;
3032 	req.emr_in_buf = payload;
3033 	req.emr_in_length = MC_CMD_SET_NIC_GLOBAL_IN_LEN;
3034 	req.emr_out_buf = NULL;
3035 	req.emr_out_length = 0;
3036 
3037 	MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_KEY, key);
3038 	MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_VALUE, value);
3039 
3040 	efx_mcdi_execute(enp, &req);
3041 
3042 	if (req.emr_rc != 0) {
3043 		rc = req.emr_rc;
3044 		goto fail1;
3045 	}
3046 
3047 	return (0);
3048 
3049 fail1:
3050 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
3051 
3052 	return (rc);
3053 }
3054 
3055 #endif	/* EFSYS_OPT_FW_SUBVARIANT_AWARE */
3056 
3057 #endif	/* EFX_OPTS_EF10() */
3058