xref: /dpdk/drivers/common/sfc_efx/base/ef10_nic.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2012-2019 Solarflare Communications Inc.
5  */
6 
7 #include "efx.h"
8 #include "efx_impl.h"
9 #if EFSYS_OPT_MON_MCDI
10 #include "mcdi_mon.h"
11 #endif
12 
13 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
14 
15 #include "ef10_tlv_layout.h"
16 
17 	__checkReturn	efx_rc_t
18 efx_mcdi_get_port_assignment(
19 	__in		efx_nic_t *enp,
20 	__out		uint32_t *portp)
21 {
22 	efx_mcdi_req_t req;
23 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
24 		MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
25 	efx_rc_t rc;
26 
27 	EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp));
28 
29 	req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
30 	req.emr_in_buf = payload;
31 	req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN;
32 	req.emr_out_buf = payload;
33 	req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN;
34 
35 	efx_mcdi_execute(enp, &req);
36 
37 	if (req.emr_rc != 0) {
38 		rc = req.emr_rc;
39 		goto fail1;
40 	}
41 
42 	if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) {
43 		rc = EMSGSIZE;
44 		goto fail2;
45 	}
46 
47 	*portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT);
48 
49 	return (0);
50 
51 fail2:
52 	EFSYS_PROBE(fail2);
53 fail1:
54 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
55 
56 	return (rc);
57 }
58 
59 	__checkReturn	efx_rc_t
60 efx_mcdi_get_port_modes(
61 	__in		efx_nic_t *enp,
62 	__out		uint32_t *modesp,
63 	__out_opt	uint32_t *current_modep,
64 	__out_opt	uint32_t *default_modep)
65 {
66 	efx_mcdi_req_t req;
67 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_MODES_IN_LEN,
68 		MC_CMD_GET_PORT_MODES_OUT_LEN);
69 	efx_rc_t rc;
70 
71 	EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp));
72 
73 	req.emr_cmd = MC_CMD_GET_PORT_MODES;
74 	req.emr_in_buf = payload;
75 	req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN;
76 	req.emr_out_buf = payload;
77 	req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN;
78 
79 	efx_mcdi_execute(enp, &req);
80 
81 	if (req.emr_rc != 0) {
82 		rc = req.emr_rc;
83 		goto fail1;
84 	}
85 
86 	/*
87 	 * Require only Modes and DefaultMode fields, unless the current mode
88 	 * was requested (CurrentMode field was added for Medford).
89 	 */
90 	if (req.emr_out_length_used <
91 	    MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) {
92 		rc = EMSGSIZE;
93 		goto fail2;
94 	}
95 	if ((current_modep != NULL) && (req.emr_out_length_used <
96 	    MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST + 4)) {
97 		rc = EMSGSIZE;
98 		goto fail3;
99 	}
100 
101 	*modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES);
102 
103 	if (current_modep != NULL) {
104 		*current_modep = MCDI_OUT_DWORD(req,
105 					    GET_PORT_MODES_OUT_CURRENT_MODE);
106 	}
107 
108 	if (default_modep != NULL) {
109 		*default_modep = MCDI_OUT_DWORD(req,
110 					    GET_PORT_MODES_OUT_DEFAULT_MODE);
111 	}
112 
113 	return (0);
114 
115 fail3:
116 	EFSYS_PROBE(fail3);
117 fail2:
118 	EFSYS_PROBE(fail2);
119 fail1:
120 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
121 
122 	return (rc);
123 }
124 
125 	__checkReturn	efx_rc_t
126 ef10_nic_get_port_mode_bandwidth(
127 	__in		efx_nic_t *enp,
128 	__out		uint32_t *bandwidth_mbpsp)
129 {
130 	uint32_t port_modes;
131 	uint32_t current_mode;
132 	efx_port_t *epp = &(enp->en_port);
133 
134 	uint32_t single_lane;
135 	uint32_t dual_lane;
136 	uint32_t quad_lane;
137 	uint32_t bandwidth;
138 	efx_rc_t rc;
139 
140 	if ((rc = efx_mcdi_get_port_modes(enp, &port_modes,
141 				    &current_mode, NULL)) != 0) {
142 		/* No port mode info available. */
143 		goto fail1;
144 	}
145 
146 	if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_25000FDX))
147 		single_lane = 25000;
148 	else
149 		single_lane = 10000;
150 
151 	if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_50000FDX))
152 		dual_lane = 50000;
153 	else
154 		dual_lane = 20000;
155 
156 	if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_100000FDX))
157 		quad_lane = 100000;
158 	else
159 		quad_lane = 40000;
160 
161 	switch (current_mode) {
162 	case TLV_PORT_MODE_1x1_NA:			/* mode 0 */
163 		bandwidth = single_lane;
164 		break;
165 	case TLV_PORT_MODE_1x2_NA:			/* mode 10 */
166 	case TLV_PORT_MODE_NA_1x2:			/* mode 11 */
167 		bandwidth = dual_lane;
168 		break;
169 	case TLV_PORT_MODE_1x1_1x1:			/* mode 2 */
170 		bandwidth = single_lane + single_lane;
171 		break;
172 	case TLV_PORT_MODE_4x1_NA:			/* mode 4 */
173 	case TLV_PORT_MODE_NA_4x1:			/* mode 8 */
174 		bandwidth = 4 * single_lane;
175 		break;
176 	case TLV_PORT_MODE_2x1_2x1:			/* mode 5 */
177 		bandwidth = (2 * single_lane) + (2 * single_lane);
178 		break;
179 	case TLV_PORT_MODE_1x2_1x2:			/* mode 12 */
180 		bandwidth = dual_lane + dual_lane;
181 		break;
182 	case TLV_PORT_MODE_1x2_2x1:			/* mode 17 */
183 	case TLV_PORT_MODE_2x1_1x2:			/* mode 18 */
184 		bandwidth = dual_lane + (2 * single_lane);
185 		break;
186 	/* Legacy Medford-only mode. Do not use (see bug63270) */
187 	case TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2:	/* mode 9 */
188 		bandwidth = 4 * single_lane;
189 		break;
190 	case TLV_PORT_MODE_1x4_NA:			/* mode 1 */
191 	case TLV_PORT_MODE_NA_1x4:			/* mode 22 */
192 		bandwidth = quad_lane;
193 		break;
194 	case TLV_PORT_MODE_2x2_NA:			/* mode 13 */
195 	case TLV_PORT_MODE_NA_2x2:			/* mode 14 */
196 		bandwidth = 2 * dual_lane;
197 		break;
198 	case TLV_PORT_MODE_1x4_2x1:			/* mode 6 */
199 	case TLV_PORT_MODE_2x1_1x4:			/* mode 7 */
200 		bandwidth = quad_lane + (2 * single_lane);
201 		break;
202 	case TLV_PORT_MODE_1x4_1x2:			/* mode 15 */
203 	case TLV_PORT_MODE_1x2_1x4:			/* mode 16 */
204 		bandwidth = quad_lane + dual_lane;
205 		break;
206 	case TLV_PORT_MODE_1x4_1x4:			/* mode 3 */
207 		bandwidth = quad_lane + quad_lane;
208 		break;
209 	default:
210 		rc = EINVAL;
211 		goto fail2;
212 	}
213 
214 	*bandwidth_mbpsp = bandwidth;
215 
216 	return (0);
217 
218 fail2:
219 	EFSYS_PROBE(fail2);
220 fail1:
221 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
222 
223 	return (rc);
224 }
225 
226 #endif	/* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
227 
228 #if EFX_OPTS_EF10()
229 
230 	__checkReturn		efx_rc_t
231 efx_mcdi_vadaptor_alloc(
232 	__in			efx_nic_t *enp,
233 	__in			uint32_t port_id)
234 {
235 	efx_mcdi_req_t req;
236 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_ALLOC_IN_LEN,
237 		MC_CMD_VADAPTOR_ALLOC_OUT_LEN);
238 	efx_rc_t rc;
239 
240 	req.emr_cmd = MC_CMD_VADAPTOR_ALLOC;
241 	req.emr_in_buf = payload;
242 	req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN;
243 	req.emr_out_buf = payload;
244 	req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN;
245 
246 	MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
247 	MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS,
248 	    VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED,
249 	    enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0);
250 
251 	efx_mcdi_execute(enp, &req);
252 
253 	if (req.emr_rc != 0) {
254 		rc = req.emr_rc;
255 		goto fail1;
256 	}
257 
258 	return (0);
259 
260 fail1:
261 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
262 
263 	return (rc);
264 }
265 
266 	__checkReturn		efx_rc_t
267 efx_mcdi_vadaptor_free(
268 	__in			efx_nic_t *enp,
269 	__in			uint32_t port_id)
270 {
271 	efx_mcdi_req_t req;
272 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_FREE_IN_LEN,
273 		MC_CMD_VADAPTOR_FREE_OUT_LEN);
274 	efx_rc_t rc;
275 
276 	req.emr_cmd = MC_CMD_VADAPTOR_FREE;
277 	req.emr_in_buf = payload;
278 	req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN;
279 	req.emr_out_buf = payload;
280 	req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN;
281 
282 	MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
283 
284 	efx_mcdi_execute(enp, &req);
285 
286 	if (req.emr_rc != 0) {
287 		rc = req.emr_rc;
288 		goto fail1;
289 	}
290 
291 	return (0);
292 
293 fail1:
294 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
295 
296 	return (rc);
297 }
298 
299 #endif	/* EFX_OPTS_EF10() */
300 
301 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
302 
303 	__checkReturn	efx_rc_t
304 efx_mcdi_get_mac_address_pf(
305 	__in			efx_nic_t *enp,
306 	__out_ecount_opt(6)	uint8_t mac_addrp[6])
307 {
308 	efx_mcdi_req_t req;
309 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
310 		MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
311 	efx_rc_t rc;
312 
313 	EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp));
314 
315 	req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
316 	req.emr_in_buf = payload;
317 	req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN;
318 	req.emr_out_buf = payload;
319 	req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN;
320 
321 	efx_mcdi_execute(enp, &req);
322 
323 	if (req.emr_rc != 0) {
324 		rc = req.emr_rc;
325 		goto fail1;
326 	}
327 
328 	if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) {
329 		rc = EMSGSIZE;
330 		goto fail2;
331 	}
332 
333 	if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) {
334 		rc = ENOENT;
335 		goto fail3;
336 	}
337 
338 	if (mac_addrp != NULL) {
339 		uint8_t *addrp;
340 
341 		addrp = MCDI_OUT2(req, uint8_t,
342 		    GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE);
343 
344 		EFX_MAC_ADDR_COPY(mac_addrp, addrp);
345 	}
346 
347 	return (0);
348 
349 fail3:
350 	EFSYS_PROBE(fail3);
351 fail2:
352 	EFSYS_PROBE(fail2);
353 fail1:
354 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
355 
356 	return (rc);
357 }
358 
359 	__checkReturn	efx_rc_t
360 efx_mcdi_get_mac_address_vf(
361 	__in			efx_nic_t *enp,
362 	__out_ecount_opt(6)	uint8_t mac_addrp[6])
363 {
364 	efx_mcdi_req_t req;
365 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
366 		MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
367 	efx_rc_t rc;
368 
369 	EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp));
370 
371 	req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
372 	req.emr_in_buf = payload;
373 	req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN;
374 	req.emr_out_buf = payload;
375 	req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX;
376 
377 	MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
378 	    EVB_PORT_ID_ASSIGNED);
379 
380 	efx_mcdi_execute(enp, &req);
381 
382 	if (req.emr_rc != 0) {
383 		rc = req.emr_rc;
384 		goto fail1;
385 	}
386 
387 	if (req.emr_out_length_used <
388 	    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) {
389 		rc = EMSGSIZE;
390 		goto fail2;
391 	}
392 
393 	if (MCDI_OUT_DWORD(req,
394 		VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) {
395 		rc = ENOENT;
396 		goto fail3;
397 	}
398 
399 	if (mac_addrp != NULL) {
400 		uint8_t *addrp;
401 
402 		addrp = MCDI_OUT2(req, uint8_t,
403 		    VPORT_GET_MAC_ADDRESSES_OUT_MACADDR);
404 
405 		EFX_MAC_ADDR_COPY(mac_addrp, addrp);
406 	}
407 
408 	return (0);
409 
410 fail3:
411 	EFSYS_PROBE(fail3);
412 fail2:
413 	EFSYS_PROBE(fail2);
414 fail1:
415 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
416 
417 	return (rc);
418 }
419 
420 	__checkReturn	efx_rc_t
421 efx_mcdi_get_clock(
422 	__in		efx_nic_t *enp,
423 	__out		uint32_t *sys_freqp,
424 	__out		uint32_t *dpcpu_freqp)
425 {
426 	efx_mcdi_req_t req;
427 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CLOCK_IN_LEN,
428 		MC_CMD_GET_CLOCK_OUT_LEN);
429 	efx_rc_t rc;
430 
431 	EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp));
432 
433 	req.emr_cmd = MC_CMD_GET_CLOCK;
434 	req.emr_in_buf = payload;
435 	req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN;
436 	req.emr_out_buf = payload;
437 	req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN;
438 
439 	efx_mcdi_execute(enp, &req);
440 
441 	if (req.emr_rc != 0) {
442 		rc = req.emr_rc;
443 		goto fail1;
444 	}
445 
446 	if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) {
447 		rc = EMSGSIZE;
448 		goto fail2;
449 	}
450 
451 	*sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ);
452 	if (*sys_freqp == 0) {
453 		rc = EINVAL;
454 		goto fail3;
455 	}
456 	*dpcpu_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_DPCPU_FREQ);
457 	if (*dpcpu_freqp == 0) {
458 		rc = EINVAL;
459 		goto fail4;
460 	}
461 
462 	return (0);
463 
464 fail4:
465 	EFSYS_PROBE(fail4);
466 fail3:
467 	EFSYS_PROBE(fail3);
468 fail2:
469 	EFSYS_PROBE(fail2);
470 fail1:
471 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
472 
473 	return (rc);
474 }
475 
476 	__checkReturn	efx_rc_t
477 efx_mcdi_get_rxdp_config(
478 	__in		efx_nic_t *enp,
479 	__out		uint32_t *end_paddingp)
480 {
481 	efx_mcdi_req_t req;
482 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_RXDP_CONFIG_IN_LEN,
483 		MC_CMD_GET_RXDP_CONFIG_OUT_LEN);
484 	uint32_t end_padding;
485 	efx_rc_t rc;
486 
487 	req.emr_cmd = MC_CMD_GET_RXDP_CONFIG;
488 	req.emr_in_buf = payload;
489 	req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN;
490 	req.emr_out_buf = payload;
491 	req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN;
492 
493 	efx_mcdi_execute(enp, &req);
494 	if (req.emr_rc != 0) {
495 		rc = req.emr_rc;
496 		goto fail1;
497 	}
498 
499 	if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
500 				    GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) {
501 		/* RX DMA end padding is disabled */
502 		end_padding = 0;
503 	} else {
504 		switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
505 					    GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) {
506 		case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64:
507 			end_padding = 64;
508 			break;
509 		case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128:
510 			end_padding = 128;
511 			break;
512 		case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256:
513 			end_padding = 256;
514 			break;
515 		default:
516 			rc = ENOTSUP;
517 			goto fail2;
518 		}
519 	}
520 
521 	*end_paddingp = end_padding;
522 
523 	return (0);
524 
525 fail2:
526 	EFSYS_PROBE(fail2);
527 fail1:
528 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
529 
530 	return (rc);
531 }
532 
533 	__checkReturn	efx_rc_t
534 efx_mcdi_get_vector_cfg(
535 	__in		efx_nic_t *enp,
536 	__out_opt	uint32_t *vec_basep,
537 	__out_opt	uint32_t *pf_nvecp,
538 	__out_opt	uint32_t *vf_nvecp)
539 {
540 	efx_mcdi_req_t req;
541 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_VECTOR_CFG_IN_LEN,
542 		MC_CMD_GET_VECTOR_CFG_OUT_LEN);
543 	efx_rc_t rc;
544 
545 	req.emr_cmd = MC_CMD_GET_VECTOR_CFG;
546 	req.emr_in_buf = payload;
547 	req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN;
548 	req.emr_out_buf = payload;
549 	req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN;
550 
551 	efx_mcdi_execute(enp, &req);
552 
553 	if (req.emr_rc != 0) {
554 		rc = req.emr_rc;
555 		goto fail1;
556 	}
557 
558 	if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) {
559 		rc = EMSGSIZE;
560 		goto fail2;
561 	}
562 
563 	if (vec_basep != NULL)
564 		*vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE);
565 	if (pf_nvecp != NULL)
566 		*pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF);
567 	if (vf_nvecp != NULL)
568 		*vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF);
569 
570 	return (0);
571 
572 fail2:
573 	EFSYS_PROBE(fail2);
574 fail1:
575 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
576 
577 	return (rc);
578 }
579 
580 	__checkReturn	efx_rc_t
581 efx_mcdi_alloc_vis(
582 	__in		efx_nic_t *enp,
583 	__in		uint32_t min_vi_count,
584 	__in		uint32_t max_vi_count,
585 	__out		uint32_t *vi_basep,
586 	__out		uint32_t *vi_countp,
587 	__out		uint32_t *vi_shiftp)
588 {
589 	efx_mcdi_req_t req;
590 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_VIS_IN_LEN,
591 		MC_CMD_ALLOC_VIS_EXT_OUT_LEN);
592 	efx_rc_t rc;
593 
594 	if (vi_countp == NULL) {
595 		rc = EINVAL;
596 		goto fail1;
597 	}
598 
599 	req.emr_cmd = MC_CMD_ALLOC_VIS;
600 	req.emr_in_buf = payload;
601 	req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN;
602 	req.emr_out_buf = payload;
603 	req.emr_out_length = MC_CMD_ALLOC_VIS_EXT_OUT_LEN;
604 
605 	MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count);
606 	MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count);
607 
608 	efx_mcdi_execute(enp, &req);
609 
610 	if (req.emr_rc != 0) {
611 		rc = req.emr_rc;
612 		goto fail2;
613 	}
614 
615 	if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) {
616 		rc = EMSGSIZE;
617 		goto fail3;
618 	}
619 
620 	*vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE);
621 	*vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT);
622 
623 	/* Report VI_SHIFT if available (always zero for Huntington) */
624 	if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN)
625 		*vi_shiftp = 0;
626 	else
627 		*vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT);
628 
629 	return (0);
630 
631 fail3:
632 	EFSYS_PROBE(fail3);
633 fail2:
634 	EFSYS_PROBE(fail2);
635 fail1:
636 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
637 
638 	return (rc);
639 }
640 
641 
642 	__checkReturn	efx_rc_t
643 efx_mcdi_free_vis(
644 	__in		efx_nic_t *enp)
645 {
646 	efx_mcdi_req_t req;
647 	efx_rc_t rc;
648 
649 	EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0);
650 	EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0);
651 
652 	req.emr_cmd = MC_CMD_FREE_VIS;
653 	req.emr_in_buf = NULL;
654 	req.emr_in_length = 0;
655 	req.emr_out_buf = NULL;
656 	req.emr_out_length = 0;
657 
658 	efx_mcdi_execute_quiet(enp, &req);
659 
660 	/* Ignore ELREADY (no allocated VIs, so nothing to free) */
661 	if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
662 		rc = req.emr_rc;
663 		goto fail1;
664 	}
665 
666 	return (0);
667 
668 fail1:
669 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
670 
671 	return (rc);
672 }
673 
674 #endif	/* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
675 
676 #if EFX_OPTS_EF10()
677 
678 static	__checkReturn	efx_rc_t
679 efx_mcdi_alloc_piobuf(
680 	__in		efx_nic_t *enp,
681 	__out		efx_piobuf_handle_t *handlep)
682 {
683 	efx_mcdi_req_t req;
684 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_PIOBUF_IN_LEN,
685 		MC_CMD_ALLOC_PIOBUF_OUT_LEN);
686 	efx_rc_t rc;
687 
688 	if (handlep == NULL) {
689 		rc = EINVAL;
690 		goto fail1;
691 	}
692 
693 	req.emr_cmd = MC_CMD_ALLOC_PIOBUF;
694 	req.emr_in_buf = payload;
695 	req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN;
696 	req.emr_out_buf = payload;
697 	req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN;
698 
699 	efx_mcdi_execute_quiet(enp, &req);
700 
701 	if (req.emr_rc != 0) {
702 		rc = req.emr_rc;
703 		goto fail2;
704 	}
705 
706 	if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
707 		rc = EMSGSIZE;
708 		goto fail3;
709 	}
710 
711 	*handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
712 
713 	return (0);
714 
715 fail3:
716 	EFSYS_PROBE(fail3);
717 fail2:
718 	EFSYS_PROBE(fail2);
719 fail1:
720 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
721 
722 	return (rc);
723 }
724 
725 static	__checkReturn	efx_rc_t
726 efx_mcdi_free_piobuf(
727 	__in		efx_nic_t *enp,
728 	__in		efx_piobuf_handle_t handle)
729 {
730 	efx_mcdi_req_t req;
731 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FREE_PIOBUF_IN_LEN,
732 		MC_CMD_FREE_PIOBUF_OUT_LEN);
733 	efx_rc_t rc;
734 
735 	req.emr_cmd = MC_CMD_FREE_PIOBUF;
736 	req.emr_in_buf = payload;
737 	req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN;
738 	req.emr_out_buf = payload;
739 	req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN;
740 
741 	MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle);
742 
743 	efx_mcdi_execute_quiet(enp, &req);
744 
745 	if (req.emr_rc != 0) {
746 		rc = req.emr_rc;
747 		goto fail1;
748 	}
749 
750 	return (0);
751 
752 fail1:
753 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
754 
755 	return (rc);
756 }
757 
758 static	__checkReturn	efx_rc_t
759 efx_mcdi_link_piobuf(
760 	__in		efx_nic_t *enp,
761 	__in		uint32_t vi_index,
762 	__in		efx_piobuf_handle_t handle)
763 {
764 	efx_mcdi_req_t req;
765 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LINK_PIOBUF_IN_LEN,
766 		MC_CMD_LINK_PIOBUF_OUT_LEN);
767 	efx_rc_t rc;
768 
769 	req.emr_cmd = MC_CMD_LINK_PIOBUF;
770 	req.emr_in_buf = payload;
771 	req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN;
772 	req.emr_out_buf = payload;
773 	req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN;
774 
775 	MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle);
776 	MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
777 
778 	efx_mcdi_execute(enp, &req);
779 
780 	if (req.emr_rc != 0) {
781 		rc = req.emr_rc;
782 		goto fail1;
783 	}
784 
785 	return (0);
786 
787 fail1:
788 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
789 
790 	return (rc);
791 }
792 
793 static	__checkReturn	efx_rc_t
794 efx_mcdi_unlink_piobuf(
795 	__in		efx_nic_t *enp,
796 	__in		uint32_t vi_index)
797 {
798 	efx_mcdi_req_t req;
799 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_UNLINK_PIOBUF_IN_LEN,
800 		MC_CMD_UNLINK_PIOBUF_OUT_LEN);
801 	efx_rc_t rc;
802 
803 	req.emr_cmd = MC_CMD_UNLINK_PIOBUF;
804 	req.emr_in_buf = payload;
805 	req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN;
806 	req.emr_out_buf = payload;
807 	req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN;
808 
809 	MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
810 
811 	efx_mcdi_execute_quiet(enp, &req);
812 
813 	if (req.emr_rc != 0) {
814 		rc = req.emr_rc;
815 		goto fail1;
816 	}
817 
818 	return (0);
819 
820 fail1:
821 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
822 
823 	return (rc);
824 }
825 
826 static			void
827 ef10_nic_alloc_piobufs(
828 	__in		efx_nic_t *enp,
829 	__in		uint32_t max_piobuf_count)
830 {
831 	efx_piobuf_handle_t *handlep;
832 	unsigned int i;
833 
834 	EFSYS_ASSERT3U(max_piobuf_count, <=,
835 	    EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle));
836 
837 	enp->en_arch.ef10.ena_piobuf_count = 0;
838 
839 	for (i = 0; i < max_piobuf_count; i++) {
840 		handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
841 
842 		if (efx_mcdi_alloc_piobuf(enp, handlep) != 0)
843 			goto fail1;
844 
845 		enp->en_arch.ef10.ena_pio_alloc_map[i] = 0;
846 		enp->en_arch.ef10.ena_piobuf_count++;
847 	}
848 
849 	return;
850 
851 fail1:
852 	for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
853 		handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
854 
855 		(void) efx_mcdi_free_piobuf(enp, *handlep);
856 		*handlep = EFX_PIOBUF_HANDLE_INVALID;
857 	}
858 	enp->en_arch.ef10.ena_piobuf_count = 0;
859 }
860 
861 
862 static			void
863 ef10_nic_free_piobufs(
864 	__in		efx_nic_t *enp)
865 {
866 	efx_piobuf_handle_t *handlep;
867 	unsigned int i;
868 
869 	for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
870 		handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
871 
872 		(void) efx_mcdi_free_piobuf(enp, *handlep);
873 		*handlep = EFX_PIOBUF_HANDLE_INVALID;
874 	}
875 	enp->en_arch.ef10.ena_piobuf_count = 0;
876 }
877 
878 /* Sub-allocate a block from a piobuf */
879 	__checkReturn	efx_rc_t
880 ef10_nic_pio_alloc(
881 	__inout		efx_nic_t *enp,
882 	__out		uint32_t *bufnump,
883 	__out		efx_piobuf_handle_t *handlep,
884 	__out		uint32_t *blknump,
885 	__out		uint32_t *offsetp,
886 	__out		size_t *sizep)
887 {
888 	efx_nic_cfg_t *encp = &enp->en_nic_cfg;
889 	efx_drv_cfg_t *edcp = &enp->en_drv_cfg;
890 	uint32_t blk_per_buf;
891 	uint32_t buf, blk;
892 	efx_rc_t rc;
893 
894 	EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
895 	EFSYS_ASSERT(bufnump);
896 	EFSYS_ASSERT(handlep);
897 	EFSYS_ASSERT(blknump);
898 	EFSYS_ASSERT(offsetp);
899 	EFSYS_ASSERT(sizep);
900 
901 	if ((edcp->edc_pio_alloc_size == 0) ||
902 	    (enp->en_arch.ef10.ena_piobuf_count == 0)) {
903 		rc = ENOMEM;
904 		goto fail1;
905 	}
906 	blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size;
907 
908 	for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) {
909 		uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf];
910 
911 		if (~(*map) == 0)
912 			continue;
913 
914 		EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map)));
915 		for (blk = 0; blk < blk_per_buf; blk++) {
916 			if ((*map & (1u << blk)) == 0) {
917 				*map |= (1u << blk);
918 				goto done;
919 			}
920 		}
921 	}
922 	rc = ENOMEM;
923 	goto fail2;
924 
925 done:
926 	*handlep = enp->en_arch.ef10.ena_piobuf_handle[buf];
927 	*bufnump = buf;
928 	*blknump = blk;
929 	*sizep = edcp->edc_pio_alloc_size;
930 	*offsetp = blk * (*sizep);
931 
932 	return (0);
933 
934 fail2:
935 	EFSYS_PROBE(fail2);
936 fail1:
937 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
938 
939 	return (rc);
940 }
941 
942 /* Free a piobuf sub-allocated block */
943 	__checkReturn	efx_rc_t
944 ef10_nic_pio_free(
945 	__inout		efx_nic_t *enp,
946 	__in		uint32_t bufnum,
947 	__in		uint32_t blknum)
948 {
949 	uint32_t *map;
950 	efx_rc_t rc;
951 
952 	if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) ||
953 	    (blknum >= (8 * sizeof (*map)))) {
954 		rc = EINVAL;
955 		goto fail1;
956 	}
957 
958 	map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum];
959 	if ((*map & (1u << blknum)) == 0) {
960 		rc = ENOENT;
961 		goto fail2;
962 	}
963 	*map &= ~(1u << blknum);
964 
965 	return (0);
966 
967 fail2:
968 	EFSYS_PROBE(fail2);
969 fail1:
970 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
971 
972 	return (rc);
973 }
974 
975 	__checkReturn	efx_rc_t
976 ef10_nic_pio_link(
977 	__inout		efx_nic_t *enp,
978 	__in		uint32_t vi_index,
979 	__in		efx_piobuf_handle_t handle)
980 {
981 	return (efx_mcdi_link_piobuf(enp, vi_index, handle));
982 }
983 
984 	__checkReturn	efx_rc_t
985 ef10_nic_pio_unlink(
986 	__inout		efx_nic_t *enp,
987 	__in		uint32_t vi_index)
988 {
989 	return (efx_mcdi_unlink_piobuf(enp, vi_index));
990 }
991 
992 #endif	/* EFX_OPTS_EF10() */
993 
994 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
995 
996 static	__checkReturn	efx_rc_t
997 ef10_mcdi_get_pf_count(
998 	__in		efx_nic_t *enp,
999 	__out		uint32_t *pf_countp)
1000 {
1001 	efx_mcdi_req_t req;
1002 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PF_COUNT_IN_LEN,
1003 		MC_CMD_GET_PF_COUNT_OUT_LEN);
1004 	efx_rc_t rc;
1005 
1006 	req.emr_cmd = MC_CMD_GET_PF_COUNT;
1007 	req.emr_in_buf = payload;
1008 	req.emr_in_length = MC_CMD_GET_PF_COUNT_IN_LEN;
1009 	req.emr_out_buf = payload;
1010 	req.emr_out_length = MC_CMD_GET_PF_COUNT_OUT_LEN;
1011 
1012 	efx_mcdi_execute(enp, &req);
1013 
1014 	if (req.emr_rc != 0) {
1015 		rc = req.emr_rc;
1016 		goto fail1;
1017 	}
1018 
1019 	if (req.emr_out_length_used < MC_CMD_GET_PF_COUNT_OUT_LEN) {
1020 		rc = EMSGSIZE;
1021 		goto fail2;
1022 	}
1023 
1024 	*pf_countp = *MCDI_OUT(req, uint8_t,
1025 				MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST);
1026 
1027 	EFSYS_ASSERT(*pf_countp != 0);
1028 
1029 	return (0);
1030 
1031 fail2:
1032 	EFSYS_PROBE(fail2);
1033 fail1:
1034 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
1035 
1036 	return (rc);
1037 }
1038 
1039 static	__checkReturn	efx_rc_t
1040 ef10_get_datapath_caps(
1041 	__in		efx_nic_t *enp)
1042 {
1043 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1044 	efx_mcdi_req_t req;
1045 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CAPABILITIES_IN_LEN,
1046 		MC_CMD_GET_CAPABILITIES_V7_OUT_LEN);
1047 	efx_rc_t rc;
1048 
1049 	req.emr_cmd = MC_CMD_GET_CAPABILITIES;
1050 	req.emr_in_buf = payload;
1051 	req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
1052 	req.emr_out_buf = payload;
1053 	req.emr_out_length = MC_CMD_GET_CAPABILITIES_V7_OUT_LEN;
1054 
1055 	efx_mcdi_execute_quiet(enp, &req);
1056 
1057 	if (req.emr_rc != 0) {
1058 		rc = req.emr_rc;
1059 		goto fail1;
1060 	}
1061 
1062 	if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
1063 		rc = EMSGSIZE;
1064 		goto fail2;
1065 	}
1066 
1067 #define	CAP_FLAGS1(_req, _flag)						\
1068 	(MCDI_OUT_DWORD((_req), GET_CAPABILITIES_OUT_FLAGS1) &		\
1069 	(1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN)))
1070 
1071 #define	CAP_FLAGS2(_req, _flag)						\
1072 	(((_req).emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) && \
1073 	    (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V2_OUT_FLAGS2) &	\
1074 	    (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN))))
1075 
1076 #define	CAP_FLAGS3(_req, _flag)						\
1077 	(((_req).emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V7_OUT_LEN) && \
1078 	    (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V7_OUT_FLAGS3) &	\
1079 	    (1u << (MC_CMD_GET_CAPABILITIES_V7_OUT_ ## _flag ## _LBN))))
1080 
1081 	/* Check if RXDP firmware inserts 14 byte prefix */
1082 	if (CAP_FLAGS1(req, RX_PREFIX_LEN_14))
1083 		encp->enc_rx_prefix_size = 14;
1084 	else
1085 		encp->enc_rx_prefix_size = 0;
1086 
1087 #if EFSYS_OPT_RX_SCALE
1088 	/* Check if the firmware supports additional RSS modes */
1089 	if (CAP_FLAGS1(req, ADDITIONAL_RSS_MODES))
1090 		encp->enc_rx_scale_additional_modes_supported = B_TRUE;
1091 	else
1092 		encp->enc_rx_scale_additional_modes_supported = B_FALSE;
1093 #endif /* EFSYS_OPT_RX_SCALE */
1094 
1095 	/* Check if the firmware supports TSO */
1096 	if (CAP_FLAGS1(req, TX_TSO))
1097 		encp->enc_fw_assisted_tso_enabled = B_TRUE;
1098 	else
1099 		encp->enc_fw_assisted_tso_enabled = B_FALSE;
1100 
1101 	/* Check if the firmware supports FATSOv2 */
1102 	if (CAP_FLAGS2(req, TX_TSO_V2)) {
1103 		encp->enc_fw_assisted_tso_v2_enabled = B_TRUE;
1104 		encp->enc_fw_assisted_tso_v2_n_contexts = MCDI_OUT_WORD(req,
1105 		    GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS);
1106 	} else {
1107 		encp->enc_fw_assisted_tso_v2_enabled = B_FALSE;
1108 		encp->enc_fw_assisted_tso_v2_n_contexts = 0;
1109 	}
1110 
1111 	/* Check if the firmware supports FATSOv2 encap */
1112 	if (CAP_FLAGS2(req, TX_TSO_V2_ENCAP))
1113 		encp->enc_fw_assisted_tso_v2_encap_enabled = B_TRUE;
1114 	else
1115 		encp->enc_fw_assisted_tso_v2_encap_enabled = B_FALSE;
1116 
1117 	/* Check if TSOv3 is supported */
1118 	if (CAP_FLAGS2(req, TX_TSO_V3))
1119 		encp->enc_tso_v3_enabled = B_TRUE;
1120 	else
1121 		encp->enc_tso_v3_enabled = B_FALSE;
1122 
1123 	/* Check if the firmware has vadapter/vport/vswitch support */
1124 	if (CAP_FLAGS1(req, EVB))
1125 		encp->enc_datapath_cap_evb = B_TRUE;
1126 	else
1127 		encp->enc_datapath_cap_evb = B_FALSE;
1128 
1129 	/* Check if the firmware supports vport reconfiguration */
1130 	if (CAP_FLAGS1(req, VPORT_RECONFIGURE))
1131 		encp->enc_vport_reconfigure_supported = B_TRUE;
1132 	else
1133 		encp->enc_vport_reconfigure_supported = B_FALSE;
1134 
1135 	/* Check if the firmware supports VLAN insertion */
1136 	if (CAP_FLAGS1(req, TX_VLAN_INSERTION))
1137 		encp->enc_hw_tx_insert_vlan_enabled = B_TRUE;
1138 	else
1139 		encp->enc_hw_tx_insert_vlan_enabled = B_FALSE;
1140 
1141 	/* Check if the firmware supports RX event batching */
1142 	if (CAP_FLAGS1(req, RX_BATCHING))
1143 		encp->enc_rx_batching_enabled = B_TRUE;
1144 	else
1145 		encp->enc_rx_batching_enabled = B_FALSE;
1146 
1147 	/*
1148 	 * Even if batching isn't reported as supported, we may still get
1149 	 * batched events (see bug61153).
1150 	 */
1151 	encp->enc_rx_batch_max = 16;
1152 
1153 	/* Check if the firmware supports disabling scatter on RXQs */
1154 	if (CAP_FLAGS1(req, RX_DISABLE_SCATTER))
1155 		encp->enc_rx_disable_scatter_supported = B_TRUE;
1156 	else
1157 		encp->enc_rx_disable_scatter_supported = B_FALSE;
1158 
1159 	/* No limit on maximum number of Rx scatter elements per packet. */
1160 	encp->enc_rx_scatter_max = -1;
1161 
1162 	/* Check if the firmware supports packed stream mode */
1163 	if (CAP_FLAGS1(req, RX_PACKED_STREAM))
1164 		encp->enc_rx_packed_stream_supported = B_TRUE;
1165 	else
1166 		encp->enc_rx_packed_stream_supported = B_FALSE;
1167 
1168 	/*
1169 	 * Check if the firmware supports configurable buffer sizes
1170 	 * for packed stream mode (otherwise buffer size is 1Mbyte)
1171 	 */
1172 	if (CAP_FLAGS1(req, RX_PACKED_STREAM_VAR_BUFFERS))
1173 		encp->enc_rx_var_packed_stream_supported = B_TRUE;
1174 	else
1175 		encp->enc_rx_var_packed_stream_supported = B_FALSE;
1176 
1177 	/* Check if the firmware supports equal stride super-buffer mode */
1178 	if (CAP_FLAGS2(req, EQUAL_STRIDE_SUPER_BUFFER))
1179 		encp->enc_rx_es_super_buffer_supported = B_TRUE;
1180 	else
1181 		encp->enc_rx_es_super_buffer_supported = B_FALSE;
1182 
1183 	/* Check if the firmware supports FW subvariant w/o Tx checksumming */
1184 	if (CAP_FLAGS2(req, FW_SUBVARIANT_NO_TX_CSUM))
1185 		encp->enc_fw_subvariant_no_tx_csum_supported = B_TRUE;
1186 	else
1187 		encp->enc_fw_subvariant_no_tx_csum_supported = B_FALSE;
1188 
1189 	/* Check if the firmware supports set mac with running filters */
1190 	if (CAP_FLAGS1(req, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED))
1191 		encp->enc_allow_set_mac_with_installed_filters = B_TRUE;
1192 	else
1193 		encp->enc_allow_set_mac_with_installed_filters = B_FALSE;
1194 
1195 	/*
1196 	 * Check if firmware supports the extended MC_CMD_SET_MAC, which allows
1197 	 * specifying which parameters to configure.
1198 	 */
1199 	if (CAP_FLAGS1(req, SET_MAC_ENHANCED))
1200 		encp->enc_enhanced_set_mac_supported = B_TRUE;
1201 	else
1202 		encp->enc_enhanced_set_mac_supported = B_FALSE;
1203 
1204 	/*
1205 	 * Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows
1206 	 * us to let the firmware choose the settings to use on an EVQ.
1207 	 */
1208 	if (CAP_FLAGS2(req, INIT_EVQ_V2))
1209 		encp->enc_init_evq_v2_supported = B_TRUE;
1210 	else
1211 		encp->enc_init_evq_v2_supported = B_FALSE;
1212 
1213 	/*
1214 	 * Check if firmware supports extended width event queues, which have
1215 	 * a different event descriptor layout.
1216 	 */
1217 	if (CAP_FLAGS3(req, EXTENDED_WIDTH_EVQS_SUPPORTED))
1218 		encp->enc_init_evq_extended_width_supported = B_TRUE;
1219 	else
1220 		encp->enc_init_evq_extended_width_supported = B_FALSE;
1221 
1222 	/*
1223 	 * Check if the NO_CONT_EV mode for RX events is supported.
1224 	 */
1225 	if (CAP_FLAGS2(req, INIT_RXQ_NO_CONT_EV))
1226 		encp->enc_no_cont_ev_mode_supported = B_TRUE;
1227 	else
1228 		encp->enc_no_cont_ev_mode_supported = B_FALSE;
1229 
1230 	/*
1231 	 * Check if buffer size may and must be specified on INIT_RXQ.
1232 	 * It may be always specified to efx_rx_qcreate(), but will be
1233 	 * just kept libefx internal if MCDI does not support it.
1234 	 */
1235 	if (CAP_FLAGS2(req, INIT_RXQ_WITH_BUFFER_SIZE))
1236 		encp->enc_init_rxq_with_buffer_size = B_TRUE;
1237 	else
1238 		encp->enc_init_rxq_with_buffer_size = B_FALSE;
1239 
1240 	/*
1241 	 * Check if firmware-verified NVRAM updates must be used.
1242 	 *
1243 	 * The firmware trusted installer requires all NVRAM updates to use
1244 	 * version 2 of MC_CMD_NVRAM_UPDATE_START (to enable verified update)
1245 	 * and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated
1246 	 * partition and report the result).
1247 	 */
1248 	if (CAP_FLAGS2(req, NVRAM_UPDATE_REPORT_VERIFY_RESULT))
1249 		encp->enc_nvram_update_verify_result_supported = B_TRUE;
1250 	else
1251 		encp->enc_nvram_update_verify_result_supported = B_FALSE;
1252 
1253 	if (CAP_FLAGS2(req, NVRAM_UPDATE_POLL_VERIFY_RESULT))
1254 		encp->enc_nvram_update_poll_verify_result_supported = B_TRUE;
1255 	else
1256 		encp->enc_nvram_update_poll_verify_result_supported = B_FALSE;
1257 
1258 	/*
1259 	 * Check if firmware update via the BUNDLE partition is supported
1260 	 */
1261 	if (CAP_FLAGS2(req, BUNDLE_UPDATE))
1262 		encp->enc_nvram_bundle_update_supported = B_TRUE;
1263 	else
1264 		encp->enc_nvram_bundle_update_supported = B_FALSE;
1265 
1266 	/*
1267 	 * Check if firmware provides packet memory and Rx datapath
1268 	 * counters.
1269 	 */
1270 	if (CAP_FLAGS1(req, PM_AND_RXDP_COUNTERS))
1271 		encp->enc_pm_and_rxdp_counters = B_TRUE;
1272 	else
1273 		encp->enc_pm_and_rxdp_counters = B_FALSE;
1274 
1275 	/*
1276 	 * Check if the 40G MAC hardware is capable of reporting
1277 	 * statistics for Tx size bins.
1278 	 */
1279 	if (CAP_FLAGS2(req, MAC_STATS_40G_TX_SIZE_BINS))
1280 		encp->enc_mac_stats_40g_tx_size_bins = B_TRUE;
1281 	else
1282 		encp->enc_mac_stats_40g_tx_size_bins = B_FALSE;
1283 
1284 	/*
1285 	 * Check if firmware supports VXLAN and NVGRE tunnels.
1286 	 * The capability indicates Geneve protocol support as well.
1287 	 */
1288 	if (CAP_FLAGS1(req, VXLAN_NVGRE)) {
1289 		encp->enc_tunnel_encapsulations_supported =
1290 		    (1u << EFX_TUNNEL_PROTOCOL_VXLAN) |
1291 		    (1u << EFX_TUNNEL_PROTOCOL_GENEVE) |
1292 		    (1u << EFX_TUNNEL_PROTOCOL_NVGRE);
1293 
1294 		EFX_STATIC_ASSERT(EFX_TUNNEL_MAXNENTRIES ==
1295 		    MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
1296 		encp->enc_tunnel_config_udp_entries_max =
1297 		    EFX_TUNNEL_MAXNENTRIES;
1298 	} else {
1299 		encp->enc_tunnel_config_udp_entries_max = 0;
1300 	}
1301 
1302 	/*
1303 	 * Check if firmware reports the VI window mode.
1304 	 * Medford2 has a variable VI window size (8K, 16K or 64K).
1305 	 * Medford and Huntington have a fixed 8K VI window size.
1306 	 */
1307 	if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
1308 		uint8_t mode =
1309 		    MCDI_OUT_BYTE(req, GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
1310 
1311 		switch (mode) {
1312 		case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
1313 			encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
1314 			break;
1315 		case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
1316 			encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_16K;
1317 			break;
1318 		case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
1319 			encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_64K;
1320 			break;
1321 		default:
1322 			encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
1323 			break;
1324 		}
1325 	} else if ((enp->en_family == EFX_FAMILY_HUNTINGTON) ||
1326 		    (enp->en_family == EFX_FAMILY_MEDFORD)) {
1327 		/* Huntington and Medford have fixed 8K window size */
1328 		encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
1329 	} else {
1330 		encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
1331 	}
1332 
1333 	/* Check if firmware supports extended MAC stats. */
1334 	if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
1335 		/* Extended stats buffer supported */
1336 		encp->enc_mac_stats_nstats = MCDI_OUT_WORD(req,
1337 		    GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
1338 	} else {
1339 		/* Use Siena-compatible legacy MAC stats */
1340 		encp->enc_mac_stats_nstats = MC_CMD_MAC_NSTATS;
1341 	}
1342 
1343 	if (encp->enc_mac_stats_nstats >= MC_CMD_MAC_NSTATS_V2)
1344 		encp->enc_fec_counters = B_TRUE;
1345 	else
1346 		encp->enc_fec_counters = B_FALSE;
1347 
1348 	/* Check if the firmware provides head-of-line blocking counters */
1349 	if (CAP_FLAGS2(req, RXDP_HLB_IDLE))
1350 		encp->enc_hlb_counters = B_TRUE;
1351 	else
1352 		encp->enc_hlb_counters = B_FALSE;
1353 
1354 #if EFSYS_OPT_RX_SCALE
1355 	if (CAP_FLAGS1(req, RX_RSS_LIMITED)) {
1356 		/* Only one exclusive RSS context is available per port. */
1357 		encp->enc_rx_scale_max_exclusive_contexts = 1;
1358 
1359 		switch (enp->en_family) {
1360 		case EFX_FAMILY_MEDFORD2:
1361 			encp->enc_rx_scale_hash_alg_mask =
1362 			    (1U << EFX_RX_HASHALG_TOEPLITZ);
1363 			break;
1364 
1365 		case EFX_FAMILY_MEDFORD:
1366 		case EFX_FAMILY_HUNTINGTON:
1367 			/*
1368 			 * Packed stream firmware variant maintains a
1369 			 * non-standard algorithm for hash computation.
1370 			 * It implies explicit XORing together
1371 			 * source + destination IP addresses (or last
1372 			 * four bytes in the case of IPv6) and using the
1373 			 * resulting value as the input to a Toeplitz hash.
1374 			 */
1375 			encp->enc_rx_scale_hash_alg_mask =
1376 			    (1U << EFX_RX_HASHALG_PACKED_STREAM);
1377 			break;
1378 
1379 		default:
1380 			rc = EINVAL;
1381 			goto fail3;
1382 		}
1383 
1384 		/* Port numbers cannot contribute to the hash value */
1385 		encp->enc_rx_scale_l4_hash_supported = B_FALSE;
1386 	} else {
1387 		/*
1388 		 * Maximum number of exclusive RSS contexts.
1389 		 * EF10 hardware supports 64 in total, but 6 are reserved
1390 		 * for shared contexts. They are a global resource so
1391 		 * not all may be available.
1392 		 */
1393 		encp->enc_rx_scale_max_exclusive_contexts = 64 - 6;
1394 
1395 		encp->enc_rx_scale_hash_alg_mask =
1396 		    (1U << EFX_RX_HASHALG_TOEPLITZ);
1397 
1398 		/*
1399 		 * It is possible to use port numbers as
1400 		 * the input data for hash computation.
1401 		 */
1402 		encp->enc_rx_scale_l4_hash_supported = B_TRUE;
1403 	}
1404 #endif /* EFSYS_OPT_RX_SCALE */
1405 
1406 	/* Check if the firmware supports "FLAG" and "MARK" filter actions */
1407 	if (CAP_FLAGS2(req, FILTER_ACTION_FLAG))
1408 		encp->enc_filter_action_flag_supported = B_TRUE;
1409 	else
1410 		encp->enc_filter_action_flag_supported = B_FALSE;
1411 
1412 	if (CAP_FLAGS2(req, FILTER_ACTION_MARK))
1413 		encp->enc_filter_action_mark_supported = B_TRUE;
1414 	else
1415 		encp->enc_filter_action_mark_supported = B_FALSE;
1416 
1417 	/* Get maximum supported value for "MARK" filter action */
1418 	if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V5_OUT_LEN)
1419 		encp->enc_filter_action_mark_max = MCDI_OUT_DWORD(req,
1420 		    GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX);
1421 	else
1422 		encp->enc_filter_action_mark_max = 0;
1423 
1424 #if EFSYS_OPT_MAE
1425 	/*
1426 	 * Check support for EF100 Match Action Engine (MAE).
1427 	 * MAE hardware is present on Riverhead boards (from R2),
1428 	 * and on Keystone, and requires support in firmware.
1429 	 *
1430 	 * MAE control operations require MAE control privilege,
1431 	 * which is not available for VFs.
1432 	 *
1433 	 * Privileges can change dynamically at runtime: we assume
1434 	 * MAE support requires the privilege is granted initially,
1435 	 * and ignore later dynamic changes.
1436 	 */
1437 	if (CAP_FLAGS3(req, MAE_SUPPORTED) &&
1438 	    EFX_MCDI_HAVE_PRIVILEGE(encp->enc_privilege_mask, MAE))
1439 		encp->enc_mae_supported = B_TRUE;
1440 	else
1441 		encp->enc_mae_supported = B_FALSE;
1442 #else
1443 	encp->enc_mae_supported = B_FALSE;
1444 #endif /* EFSYS_OPT_MAE */
1445 
1446 #undef CAP_FLAGS1
1447 #undef CAP_FLAGS2
1448 #undef CAP_FLAGS3
1449 
1450 	return (0);
1451 
1452 #if EFSYS_OPT_RX_SCALE
1453 fail3:
1454 	EFSYS_PROBE(fail3);
1455 #endif /* EFSYS_OPT_RX_SCALE */
1456 fail2:
1457 	EFSYS_PROBE(fail2);
1458 fail1:
1459 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
1460 
1461 	return (rc);
1462 }
1463 
1464 
1465 #define	EF10_LEGACY_PF_PRIVILEGE_MASK					\
1466 	(MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN			|	\
1467 	MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK			|	\
1468 	MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD			|	\
1469 	MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP			|	\
1470 	MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS		|	\
1471 	MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING		|	\
1472 	MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST			|	\
1473 	MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST			|	\
1474 	MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST			|	\
1475 	MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST		|	\
1476 	MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS)
1477 
1478 #define	EF10_LEGACY_VF_PRIVILEGE_MASK	0
1479 
1480 
1481 	__checkReturn		efx_rc_t
1482 ef10_get_privilege_mask(
1483 	__in			efx_nic_t *enp,
1484 	__out			uint32_t *maskp)
1485 {
1486 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1487 	uint32_t mask;
1488 	efx_rc_t rc;
1489 
1490 	if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf,
1491 					    &mask)) != 0) {
1492 		if (rc != ENOTSUP)
1493 			goto fail1;
1494 
1495 		/* Fallback for old firmware without privilege mask support */
1496 		if (EFX_PCI_FUNCTION_IS_PF(encp)) {
1497 			/* Assume PF has admin privilege */
1498 			mask = EF10_LEGACY_PF_PRIVILEGE_MASK;
1499 		} else {
1500 			/* VF is always unprivileged by default */
1501 			mask = EF10_LEGACY_VF_PRIVILEGE_MASK;
1502 		}
1503 	}
1504 
1505 	*maskp = mask;
1506 
1507 	return (0);
1508 
1509 fail1:
1510 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
1511 
1512 	return (rc);
1513 }
1514 
1515 
1516 #define	EFX_EXT_PORT_MAX	4
1517 #define	EFX_EXT_PORT_NA		0xFF
1518 
1519 /*
1520  * Table of mapping schemes from port number to external number.
1521  *
1522  * Each port number ultimately corresponds to a connector: either as part of
1523  * a cable assembly attached to a module inserted in an SFP+/QSFP+ cage on
1524  * the board, or fixed to the board (e.g. 10GBASE-T magjack on SFN5121T
1525  * "Salina"). In general:
1526  *
1527  * Port number (0-based)
1528  *     |
1529  *   port mapping (n:1)
1530  *     |
1531  *     v
1532  * External port number (1-based)
1533  *     |
1534  *   fixed (1:1) or cable assembly (1:m)
1535  *     |
1536  *     v
1537  * Connector
1538  *
1539  * The external numbering refers to the cages or magjacks on the board,
1540  * as visibly annotated on the board or back panel. This table describes
1541  * how to determine which external cage/magjack corresponds to the port
1542  * numbers used by the driver.
1543  *
1544  * The count of consecutive port numbers that map to each external number,
1545  * is determined by the chip family and the current port mode.
1546  *
1547  * For the Huntington family, the current port mode cannot be discovered,
1548  * but a single mapping is used by all modes for a given chip variant,
1549  * so the mapping used is instead the last match in the table to the full
1550  * set of port modes to which the NIC can be configured. Therefore the
1551  * ordering of entries in the mapping table is significant.
1552  */
1553 static struct ef10_external_port_map_s {
1554 	efx_family_t	family;
1555 	uint32_t	modes_mask;
1556 	uint8_t		base_port[EFX_EXT_PORT_MAX];
1557 }	__ef10_external_port_mappings[] = {
1558 	/*
1559 	 * Modes used by Huntington family controllers where each port
1560 	 * number maps to a separate cage.
1561 	 * SFN7x22F (Torino):
1562 	 *	port 0 -> cage 1
1563 	 *	port 1 -> cage 2
1564 	 * SFN7xx4F (Pavia):
1565 	 *	port 0 -> cage 1
1566 	 *	port 1 -> cage 2
1567 	 *	port 2 -> cage 3
1568 	 *	port 3 -> cage 4
1569 	 */
1570 	{
1571 		EFX_FAMILY_HUNTINGTON,
1572 		(1U << TLV_PORT_MODE_10G) |			/* mode 0 */
1573 		(1U << TLV_PORT_MODE_10G_10G) |			/* mode 2 */
1574 		(1U << TLV_PORT_MODE_10G_10G_10G_10G),		/* mode 4 */
1575 		{ 0, 1, 2, 3 }
1576 	},
1577 	/*
1578 	 * Modes which for Huntington identify a chip variant where 2
1579 	 * adjacent port numbers map to each cage.
1580 	 * SFN7x42Q (Monza):
1581 	 *	port 0 -> cage 1
1582 	 *	port 1 -> cage 1
1583 	 *	port 2 -> cage 2
1584 	 *	port 3 -> cage 2
1585 	 */
1586 	{
1587 		EFX_FAMILY_HUNTINGTON,
1588 		(1U << TLV_PORT_MODE_40G) |			/* mode 1 */
1589 		(1U << TLV_PORT_MODE_40G_40G) |			/* mode 3 */
1590 		(1U << TLV_PORT_MODE_40G_10G_10G) |		/* mode 6 */
1591 		(1U << TLV_PORT_MODE_10G_10G_40G),		/* mode 7 */
1592 		{ 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1593 	},
1594 	/*
1595 	 * Modes that on Medford allocate each port number to a separate
1596 	 * cage.
1597 	 *	port 0 -> cage 1
1598 	 *	port 1 -> cage 2
1599 	 *	port 2 -> cage 3
1600 	 *	port 3 -> cage 4
1601 	 */
1602 	{
1603 		EFX_FAMILY_MEDFORD,
1604 		(1U << TLV_PORT_MODE_1x1_NA) |			/* mode 0 */
1605 		(1U << TLV_PORT_MODE_1x4_NA) |			/* mode 1 */
1606 		(1U << TLV_PORT_MODE_1x1_1x1),			/* mode 2 */
1607 		{ 0, 1, 2, 3 }
1608 	},
1609 	/*
1610 	 * Modes that on Medford allocate 2 adjacent port numbers to each
1611 	 * cage.
1612 	 *	port 0 -> cage 1
1613 	 *	port 1 -> cage 1
1614 	 *	port 2 -> cage 2
1615 	 *	port 3 -> cage 2
1616 	 */
1617 	{
1618 		EFX_FAMILY_MEDFORD,
1619 		(1U << TLV_PORT_MODE_1x4_1x4) |			/* mode 3 */
1620 		(1U << TLV_PORT_MODE_2x1_2x1) |			/* mode 5 */
1621 		(1U << TLV_PORT_MODE_1x4_2x1) |			/* mode 6 */
1622 		(1U << TLV_PORT_MODE_2x1_1x4) |			/* mode 7 */
1623 		/* Do not use 10G_10G_10G_10G_Q1_Q2 (see bug63270) */
1624 		(1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2),	/* mode 9 */
1625 		{ 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1626 	},
1627 	/*
1628 	 * Modes that on Medford allocate 4 adjacent port numbers to
1629 	 * cage 1.
1630 	 *	port 0 -> cage 1
1631 	 *	port 1 -> cage 1
1632 	 *	port 2 -> cage 1
1633 	 *	port 3 -> cage 1
1634 	 */
1635 	{
1636 		EFX_FAMILY_MEDFORD,
1637 		/* Do not use 10G_10G_10G_10G_Q1 (see bug63270) */
1638 		(1U << TLV_PORT_MODE_4x1_NA),			/* mode 4 */
1639 		{ 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1640 	},
1641 	/*
1642 	 * Modes that on Medford allocate 4 adjacent port numbers to
1643 	 * cage 2.
1644 	 *	port 0 -> cage 2
1645 	 *	port 1 -> cage 2
1646 	 *	port 2 -> cage 2
1647 	 *	port 3 -> cage 2
1648 	 */
1649 	{
1650 		EFX_FAMILY_MEDFORD,
1651 		(1U << TLV_PORT_MODE_NA_4x1),			/* mode 8 */
1652 		{ EFX_EXT_PORT_NA, 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1653 	},
1654 	/*
1655 	 * Modes that on Medford2 allocate each port number to a separate
1656 	 * cage.
1657 	 *	port 0 -> cage 1
1658 	 *	port 1 -> cage 2
1659 	 *	port 2 -> cage 3
1660 	 *	port 3 -> cage 4
1661 	 */
1662 	{
1663 		EFX_FAMILY_MEDFORD2,
1664 		(1U << TLV_PORT_MODE_1x1_NA) |			/* mode 0 */
1665 		(1U << TLV_PORT_MODE_1x4_NA) |			/* mode 1 */
1666 		(1U << TLV_PORT_MODE_1x1_1x1) |			/* mode 2 */
1667 		(1U << TLV_PORT_MODE_1x4_1x4) |			/* mode 3 */
1668 		(1U << TLV_PORT_MODE_1x2_NA) |			/* mode 10 */
1669 		(1U << TLV_PORT_MODE_1x2_1x2) |			/* mode 12 */
1670 		(1U << TLV_PORT_MODE_1x4_1x2) |			/* mode 15 */
1671 		(1U << TLV_PORT_MODE_1x2_1x4),			/* mode 16 */
1672 		{ 0, 1, 2, 3 }
1673 	},
1674 	/*
1675 	 * Modes that on Medford2 allocate 1 port to cage 1 and the rest
1676 	 * to cage 2.
1677 	 *	port 0 -> cage 1
1678 	 *	port 1 -> cage 2
1679 	 *	port 2 -> cage 2
1680 	 */
1681 	{
1682 		EFX_FAMILY_MEDFORD2,
1683 		(1U << TLV_PORT_MODE_1x2_2x1) |			/* mode 17 */
1684 		(1U << TLV_PORT_MODE_1x4_2x1),			/* mode 6 */
1685 		{ 0, 1, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1686 	},
1687 	/*
1688 	 * Modes that on Medford2 allocate 2 adjacent port numbers to cage 1
1689 	 * and the rest to cage 2.
1690 	 *	port 0 -> cage 1
1691 	 *	port 1 -> cage 1
1692 	 *	port 2 -> cage 2
1693 	 *	port 3 -> cage 2
1694 	 */
1695 	{
1696 		EFX_FAMILY_MEDFORD2,
1697 		(1U << TLV_PORT_MODE_2x1_2x1) |			/* mode 4 */
1698 		(1U << TLV_PORT_MODE_2x1_1x4) |			/* mode 7 */
1699 		(1U << TLV_PORT_MODE_2x2_NA) |			/* mode 13 */
1700 		(1U << TLV_PORT_MODE_2x1_1x2),			/* mode 18 */
1701 		{ 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1702 	},
1703 	/*
1704 	 * Modes that on Medford2 allocate up to 4 adjacent port numbers
1705 	 * to cage 1.
1706 	 *	port 0 -> cage 1
1707 	 *	port 1 -> cage 1
1708 	 *	port 2 -> cage 1
1709 	 *	port 3 -> cage 1
1710 	 */
1711 	{
1712 		EFX_FAMILY_MEDFORD2,
1713 		(1U << TLV_PORT_MODE_4x1_NA),			/* mode 5 */
1714 		{ 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1715 	},
1716 	/*
1717 	 * Modes that on Medford2 allocate up to 4 adjacent port numbers
1718 	 * to cage 2.
1719 	 *	port 0 -> cage 2
1720 	 *	port 1 -> cage 2
1721 	 *	port 2 -> cage 2
1722 	 *	port 3 -> cage 2
1723 	 */
1724 	{
1725 		EFX_FAMILY_MEDFORD2,
1726 		(1U << TLV_PORT_MODE_NA_4x1) |			/* mode 8 */
1727 		(1U << TLV_PORT_MODE_NA_1x2) |			/* mode 11 */
1728 		(1U << TLV_PORT_MODE_NA_2x2),			/* mode 14 */
1729 		{ EFX_EXT_PORT_NA, 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1730 	},
1731 	/*
1732 	 * Modes that on Riverhead allocate each port number to a separate
1733 	 * cage.
1734 	 *	port 0 -> cage 1
1735 	 *	port 1 -> cage 2
1736 	 */
1737 	{
1738 		EFX_FAMILY_RIVERHEAD,
1739 		(1U << TLV_PORT_MODE_1x1_NA) |			/* mode 0 */
1740 		(1U << TLV_PORT_MODE_1x4_NA) |			/* mode 1 */
1741 		(1U << TLV_PORT_MODE_1x1_1x1),			/* mode 2 */
1742 		{ 0, 1, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1743 	},
1744 };
1745 
1746 static	__checkReturn	efx_rc_t
1747 ef10_external_port_mapping(
1748 	__in		efx_nic_t *enp,
1749 	__in		uint32_t port,
1750 	__out		uint8_t *external_portp)
1751 {
1752 	efx_rc_t rc;
1753 	int i;
1754 	uint32_t port_modes;
1755 	uint32_t matches;
1756 	uint32_t current;
1757 	struct ef10_external_port_map_s *mapp = NULL;
1758 	int ext_index = port; /* Default 1-1 mapping */
1759 
1760 	if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, &current,
1761 		    NULL)) != 0) {
1762 		/*
1763 		 * No current port mode information (i.e. Huntington)
1764 		 * - infer mapping from available modes
1765 		 */
1766 		if ((rc = efx_mcdi_get_port_modes(enp,
1767 			    &port_modes, NULL, NULL)) != 0) {
1768 			/*
1769 			 * No port mode information available
1770 			 * - use default mapping
1771 			 */
1772 			goto out;
1773 		}
1774 	} else {
1775 		/* Only need to scan the current mode */
1776 		port_modes = 1 << current;
1777 	}
1778 
1779 	/*
1780 	 * Infer the internal port -> external number mapping from
1781 	 * the possible port modes for this NIC.
1782 	 */
1783 	for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {
1784 		struct ef10_external_port_map_s *eepmp =
1785 		    &__ef10_external_port_mappings[i];
1786 		if (eepmp->family != enp->en_family)
1787 			continue;
1788 		matches = (eepmp->modes_mask & port_modes);
1789 		if (matches != 0) {
1790 			/*
1791 			 * Some modes match. For some Huntington boards
1792 			 * there will be multiple matches. The mapping on the
1793 			 * last match is used.
1794 			 */
1795 			mapp = eepmp;
1796 			port_modes &= ~matches;
1797 		}
1798 	}
1799 
1800 	if (port_modes != 0) {
1801 		/* Some advertised modes are not supported */
1802 		rc = ENOTSUP;
1803 		goto fail1;
1804 	}
1805 
1806 out:
1807 	if (mapp != NULL) {
1808 		/*
1809 		 * External ports are assigned a sequence of consecutive
1810 		 * port numbers, so find the one with the closest base_port.
1811 		 */
1812 		uint32_t delta = EFX_EXT_PORT_NA;
1813 
1814 		for (i = 0; i < EFX_EXT_PORT_MAX; i++) {
1815 			uint32_t base = mapp->base_port[i];
1816 			if ((base != EFX_EXT_PORT_NA) && (base <= port)) {
1817 				if ((port - base) < delta) {
1818 					delta = (port - base);
1819 					ext_index = i;
1820 				}
1821 			}
1822 		}
1823 	}
1824 	*external_portp = (uint8_t)(ext_index + 1);
1825 
1826 	return (0);
1827 
1828 fail1:
1829 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
1830 
1831 	return (rc);
1832 }
1833 
1834 	__checkReturn	efx_rc_t
1835 efx_mcdi_nic_board_cfg(
1836 	__in		efx_nic_t *enp)
1837 {
1838 	efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
1839 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1840 	ef10_link_state_t els;
1841 	efx_port_t *epp = &(enp->en_port);
1842 	uint32_t board_type = 0;
1843 	uint32_t base, nvec;
1844 	uint32_t port;
1845 	uint32_t mask;
1846 	uint32_t pf;
1847 	uint32_t vf;
1848 	uint8_t mac_addr[6] = { 0 };
1849 	efx_rc_t rc;
1850 
1851 	/* Get the (zero-based) MCDI port number */
1852 	if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
1853 		goto fail1;
1854 
1855 	/* EFX MCDI interface uses one-based port numbers */
1856 	emip->emi_port = port + 1;
1857 
1858 	encp->enc_assigned_port = port;
1859 
1860 	if ((rc = ef10_external_port_mapping(enp, port,
1861 		    &encp->enc_external_port)) != 0)
1862 		goto fail2;
1863 
1864 	/*
1865 	 * Get PCIe function number from firmware (used for
1866 	 * per-function privilege and dynamic config info).
1867 	 *  - PCIe PF: pf = PF number, vf = 0xffff.
1868 	 *  - PCIe VF: pf = parent PF, vf = VF number.
1869 	 */
1870 	if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
1871 		goto fail3;
1872 
1873 	encp->enc_pf = pf;
1874 	encp->enc_vf = vf;
1875 
1876 	if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0)
1877 		goto fail4;
1878 
1879 	/* MAC address for this function */
1880 	if (EFX_PCI_FUNCTION_IS_PF(encp)) {
1881 		rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
1882 #if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
1883 		/*
1884 		 * Disable static config checking, ONLY for manufacturing test
1885 		 * and setup at the factory, to allow the static config to be
1886 		 * installed.
1887 		 */
1888 #else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
1889 		if ((rc == 0) && (mac_addr[0] & 0x02)) {
1890 			/*
1891 			 * If the static config does not include a global MAC
1892 			 * address pool then the board may return a locally
1893 			 * administered MAC address (this should only happen on
1894 			 * incorrectly programmed boards).
1895 			 */
1896 			rc = EINVAL;
1897 		}
1898 #endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
1899 	} else {
1900 		rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
1901 	}
1902 	if (rc != 0)
1903 		goto fail5;
1904 
1905 	EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
1906 
1907 	/*
1908 	 * Get the current privilege mask. Note that this may be modified
1909 	 * dynamically, so for most cases the value is informational only.
1910 	 * If the privilege being discovered can't be granted dynamically,
1911 	 * it's fine to rely on the value. In all other cases, DO NOT use
1912 	 * the privilege mask to check for sufficient privileges, as that
1913 	 * can result in time-of-check/time-of-use bugs.
1914 	 */
1915 	if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
1916 		goto fail6;
1917 	encp->enc_privilege_mask = mask;
1918 
1919 	/* Board configuration (legacy) */
1920 	rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
1921 	if (rc != 0) {
1922 		/* Unprivileged functions may not be able to read board cfg */
1923 		if (rc == EACCES)
1924 			board_type = 0;
1925 		else
1926 			goto fail7;
1927 	}
1928 
1929 	encp->enc_board_type = board_type;
1930 
1931 	/* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
1932 	if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
1933 		goto fail8;
1934 
1935 	/*
1936 	 * Firmware with support for *_FEC capability bits does not
1937 	 * report that the corresponding *_FEC_REQUESTED bits are supported.
1938 	 * Add them here so that drivers understand that they are supported.
1939 	 */
1940 	if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_BASER_FEC))
1941 		epp->ep_phy_cap_mask |=
1942 		    (1u << EFX_PHY_CAP_BASER_FEC_REQUESTED);
1943 	if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_RS_FEC))
1944 		epp->ep_phy_cap_mask |=
1945 		    (1u << EFX_PHY_CAP_RS_FEC_REQUESTED);
1946 	if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_25G_BASER_FEC))
1947 		epp->ep_phy_cap_mask |=
1948 		    (1u << EFX_PHY_CAP_25G_BASER_FEC_REQUESTED);
1949 
1950 	/* Obtain the default PHY advertised capabilities */
1951 	if ((rc = ef10_phy_get_link(enp, &els)) != 0)
1952 		goto fail9;
1953 	epp->ep_default_adv_cap_mask = els.epls.epls_adv_cap_mask;
1954 	epp->ep_adv_cap_mask = els.epls.epls_adv_cap_mask;
1955 
1956 	/* Check capabilities of running datapath firmware */
1957 	if ((rc = ef10_get_datapath_caps(enp)) != 0)
1958 		goto fail10;
1959 
1960 	/* Get interrupt vector limits */
1961 	if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
1962 		if (EFX_PCI_FUNCTION_IS_PF(encp))
1963 			goto fail11;
1964 
1965 		/* Ignore error (cannot query vector limits from a VF). */
1966 		base = 0;
1967 		nvec = 1024;
1968 	}
1969 	encp->enc_intr_vec_base = base;
1970 	encp->enc_intr_limit = nvec;
1971 
1972 	return (0);
1973 
1974 fail11:
1975 	EFSYS_PROBE(fail11);
1976 fail10:
1977 	EFSYS_PROBE(fail10);
1978 fail9:
1979 	EFSYS_PROBE(fail9);
1980 fail8:
1981 	EFSYS_PROBE(fail8);
1982 fail7:
1983 	EFSYS_PROBE(fail7);
1984 fail6:
1985 	EFSYS_PROBE(fail6);
1986 fail5:
1987 	EFSYS_PROBE(fail5);
1988 fail4:
1989 	EFSYS_PROBE(fail4);
1990 fail3:
1991 	EFSYS_PROBE(fail3);
1992 fail2:
1993 	EFSYS_PROBE(fail2);
1994 fail1:
1995 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
1996 
1997 	return (rc);
1998 }
1999 
2000 	__checkReturn	efx_rc_t
2001 efx_mcdi_entity_reset(
2002 	__in		efx_nic_t *enp)
2003 {
2004 	efx_mcdi_req_t req;
2005 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ENTITY_RESET_IN_LEN,
2006 		MC_CMD_ENTITY_RESET_OUT_LEN);
2007 	efx_rc_t rc;
2008 
2009 	req.emr_cmd = MC_CMD_ENTITY_RESET;
2010 	req.emr_in_buf = payload;
2011 	req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
2012 	req.emr_out_buf = payload;
2013 	req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN;
2014 
2015 	MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG,
2016 	    ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
2017 
2018 	efx_mcdi_execute(enp, &req);
2019 
2020 	if (req.emr_rc != 0) {
2021 		rc = req.emr_rc;
2022 		goto fail1;
2023 	}
2024 
2025 	return (0);
2026 
2027 fail1:
2028 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2029 
2030 	return (rc);
2031 }
2032 
2033 #endif	/* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
2034 
2035 #if EFX_OPTS_EF10()
2036 
2037 static	__checkReturn	efx_rc_t
2038 ef10_set_workaround_bug26807(
2039 	__in		efx_nic_t *enp)
2040 {
2041 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
2042 	uint32_t flags;
2043 	efx_rc_t rc;
2044 
2045 	/*
2046 	 * If the bug26807 workaround is enabled, then firmware has enabled
2047 	 * support for chained multicast filters. Firmware will reset (FLR)
2048 	 * functions which have filters in the hardware filter table when the
2049 	 * workaround is enabled/disabled.
2050 	 *
2051 	 * We must recheck if the workaround is enabled after inserting the
2052 	 * first hardware filter, in case it has been changed since this check.
2053 	 */
2054 	rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG26807,
2055 	    B_TRUE, &flags);
2056 	if (rc == 0) {
2057 		encp->enc_bug26807_workaround = B_TRUE;
2058 		if (flags & (1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN)) {
2059 			/*
2060 			 * Other functions had installed filters before the
2061 			 * workaround was enabled, and they have been reset
2062 			 * by firmware.
2063 			 */
2064 			EFSYS_PROBE(bug26807_workaround_flr_done);
2065 			/* FIXME: bump MC warm boot count ? */
2066 		}
2067 	} else if (rc == EACCES) {
2068 		/*
2069 		 * Unprivileged functions cannot enable the workaround in older
2070 		 * firmware.
2071 		 */
2072 		encp->enc_bug26807_workaround = B_FALSE;
2073 	} else if ((rc == ENOTSUP) || (rc == ENOENT)) {
2074 		encp->enc_bug26807_workaround = B_FALSE;
2075 	} else {
2076 		goto fail1;
2077 	}
2078 
2079 	return (0);
2080 
2081 fail1:
2082 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2083 
2084 	return (rc);
2085 }
2086 
2087 static	__checkReturn	efx_rc_t
2088 ef10_nic_board_cfg(
2089 	__in		efx_nic_t *enp)
2090 {
2091 	const efx_nic_ops_t *enop = enp->en_enop;
2092 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
2093 	efx_rc_t rc;
2094 
2095 	if ((rc = efx_mcdi_nic_board_cfg(enp)) != 0)
2096 		goto fail1;
2097 
2098 	/*
2099 	 * Huntington RXDP firmware inserts a 0 or 14 byte prefix.
2100 	 * We only support the 14 byte prefix here.
2101 	 */
2102 	if (encp->enc_rx_prefix_size != 14) {
2103 		rc = ENOTSUP;
2104 		goto fail2;
2105 	}
2106 
2107 	encp->enc_clk_mult = 1; /* not used for EF10 */
2108 
2109 	/* Alignment for WPTR updates */
2110 	encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
2111 
2112 	encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
2113 	/* No boundary crossing limits */
2114 	encp->enc_tx_dma_desc_boundary = 0;
2115 
2116 	/*
2117 	 * Maximum number of bytes into the frame the TCP header can start for
2118 	 * firmware assisted TSO to work.
2119 	 */
2120 	encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
2121 
2122 	/* EF10 TSO engine demands that packet header be contiguous. */
2123 	encp->enc_tx_tso_max_header_ndescs = 1;
2124 
2125 	/* The overall TSO header length is not limited. */
2126 	encp->enc_tx_tso_max_header_length = UINT32_MAX;
2127 
2128 	/*
2129 	 * There are no specific limitations on the number of
2130 	 * TSO payload descriptors.
2131 	 */
2132 	encp->enc_tx_tso_max_payload_ndescs = UINT32_MAX;
2133 
2134 	/* TSO superframe payload length is not limited. */
2135 	encp->enc_tx_tso_max_payload_length = UINT32_MAX;
2136 
2137 	/*
2138 	 * Limitation on the maximum number of outgoing packets per
2139 	 * TSO transaction described in SF-108452-SW.
2140 	 */
2141 	encp->enc_tx_tso_max_nframes = 32767;
2142 
2143 	/*
2144 	 * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
2145 	 * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
2146 	 * resources (allocated to this PCIe function), which is zero until
2147 	 * after we have allocated VIs.
2148 	 */
2149 	encp->enc_evq_limit = 1024;
2150 	encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
2151 	encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
2152 
2153 	encp->enc_buftbl_limit = UINT32_MAX;
2154 
2155 	if ((rc = ef10_set_workaround_bug26807(enp)) != 0)
2156 		goto fail3;
2157 
2158 	/* Get remaining controller-specific board config */
2159 	if ((rc = enop->eno_board_cfg(enp)) != 0)
2160 		if (rc != EACCES)
2161 			goto fail4;
2162 
2163 	return (0);
2164 
2165 fail4:
2166 	EFSYS_PROBE(fail4);
2167 fail3:
2168 	EFSYS_PROBE(fail3);
2169 fail2:
2170 	EFSYS_PROBE(fail2);
2171 fail1:
2172 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2173 
2174 	return (rc);
2175 }
2176 
2177 	__checkReturn	efx_rc_t
2178 ef10_nic_probe(
2179 	__in		efx_nic_t *enp)
2180 {
2181 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
2182 	efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
2183 	efx_rc_t rc;
2184 
2185 	EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
2186 
2187 	/* Read and clear any assertion state */
2188 	if ((rc = efx_mcdi_read_assertion(enp)) != 0)
2189 		goto fail1;
2190 
2191 	/* Exit the assertion handler */
2192 	if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
2193 		if (rc != EACCES)
2194 			goto fail2;
2195 
2196 	if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
2197 		goto fail3;
2198 
2199 	if ((rc = ef10_nic_board_cfg(enp)) != 0)
2200 		goto fail4;
2201 
2202 	/*
2203 	 * Set default driver config limits (based on board config).
2204 	 *
2205 	 * FIXME: For now allocate a fixed number of VIs which is likely to be
2206 	 * sufficient and small enough to allow multiple functions on the same
2207 	 * port.
2208 	 */
2209 	edcp->edc_min_vi_count = edcp->edc_max_vi_count =
2210 	    MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit));
2211 
2212 	/* The client driver must configure and enable PIO buffer support */
2213 	edcp->edc_max_piobuf_count = 0;
2214 	edcp->edc_pio_alloc_size = 0;
2215 
2216 #if EFSYS_OPT_MAC_STATS
2217 	/* Wipe the MAC statistics */
2218 	if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
2219 		goto fail5;
2220 #endif
2221 
2222 #if EFSYS_OPT_LOOPBACK
2223 	if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
2224 		goto fail6;
2225 #endif
2226 
2227 #if EFSYS_OPT_MON_STATS
2228 	if ((rc = mcdi_mon_cfg_build(enp)) != 0) {
2229 		/* Unprivileged functions do not have access to sensors */
2230 		if (rc != EACCES)
2231 			goto fail7;
2232 	}
2233 #endif
2234 
2235 	return (0);
2236 
2237 #if EFSYS_OPT_MON_STATS
2238 fail7:
2239 	EFSYS_PROBE(fail7);
2240 #endif
2241 #if EFSYS_OPT_LOOPBACK
2242 fail6:
2243 	EFSYS_PROBE(fail6);
2244 #endif
2245 #if EFSYS_OPT_MAC_STATS
2246 fail5:
2247 	EFSYS_PROBE(fail5);
2248 #endif
2249 fail4:
2250 	EFSYS_PROBE(fail4);
2251 fail3:
2252 	EFSYS_PROBE(fail3);
2253 fail2:
2254 	EFSYS_PROBE(fail2);
2255 fail1:
2256 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2257 
2258 	return (rc);
2259 }
2260 
2261 	__checkReturn	efx_rc_t
2262 ef10_nic_set_drv_limits(
2263 	__inout		efx_nic_t *enp,
2264 	__in		efx_drv_limits_t *edlp)
2265 {
2266 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
2267 	efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
2268 	uint32_t min_evq_count, max_evq_count;
2269 	uint32_t min_rxq_count, max_rxq_count;
2270 	uint32_t min_txq_count, max_txq_count;
2271 	efx_rc_t rc;
2272 
2273 	if (edlp == NULL) {
2274 		rc = EINVAL;
2275 		goto fail1;
2276 	}
2277 
2278 	/* Get minimum required and maximum usable VI limits */
2279 	min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit);
2280 	min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit);
2281 	min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit);
2282 
2283 	edcp->edc_min_vi_count =
2284 	    MAX(min_evq_count, MAX(min_rxq_count, min_txq_count));
2285 
2286 	max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit);
2287 	max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit);
2288 	max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit);
2289 
2290 	edcp->edc_max_vi_count =
2291 	    MAX(max_evq_count, MAX(max_rxq_count, max_txq_count));
2292 
2293 	/*
2294 	 * Check limits for sub-allocated piobuf blocks.
2295 	 * PIO is optional, so don't fail if the limits are incorrect.
2296 	 */
2297 	if ((encp->enc_piobuf_size == 0) ||
2298 	    (encp->enc_piobuf_limit == 0) ||
2299 	    (edlp->edl_min_pio_alloc_size == 0) ||
2300 	    (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) {
2301 		/* Disable PIO */
2302 		edcp->edc_max_piobuf_count = 0;
2303 		edcp->edc_pio_alloc_size = 0;
2304 	} else {
2305 		uint32_t blk_size, blk_count, blks_per_piobuf;
2306 
2307 		blk_size =
2308 		    MAX(edlp->edl_min_pio_alloc_size,
2309 			    encp->enc_piobuf_min_alloc_size);
2310 
2311 		blks_per_piobuf = encp->enc_piobuf_size / blk_size;
2312 		EFSYS_ASSERT3U(blks_per_piobuf, <=, 32);
2313 
2314 		blk_count = (encp->enc_piobuf_limit * blks_per_piobuf);
2315 
2316 		/* A zero max pio alloc count means unlimited */
2317 		if ((edlp->edl_max_pio_alloc_count > 0) &&
2318 		    (edlp->edl_max_pio_alloc_count < blk_count)) {
2319 			blk_count = edlp->edl_max_pio_alloc_count;
2320 		}
2321 
2322 		edcp->edc_pio_alloc_size = blk_size;
2323 		edcp->edc_max_piobuf_count =
2324 		    (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf;
2325 	}
2326 
2327 	return (0);
2328 
2329 fail1:
2330 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2331 
2332 	return (rc);
2333 }
2334 
2335 
2336 	__checkReturn	efx_rc_t
2337 ef10_nic_reset(
2338 	__in		efx_nic_t *enp)
2339 {
2340 	efx_rc_t rc;
2341 
2342 	/* ef10_nic_reset() is called to recover from BADASSERT failures. */
2343 	if ((rc = efx_mcdi_read_assertion(enp)) != 0)
2344 		goto fail1;
2345 	if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
2346 		goto fail2;
2347 
2348 	if ((rc = efx_mcdi_entity_reset(enp)) != 0)
2349 		goto fail3;
2350 
2351 	/* Clear RX/TX DMA queue errors */
2352 	enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);
2353 
2354 	return (0);
2355 
2356 fail3:
2357 	EFSYS_PROBE(fail3);
2358 fail2:
2359 	EFSYS_PROBE(fail2);
2360 fail1:
2361 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2362 
2363 	return (rc);
2364 }
2365 
2366 #endif	/* EFX_OPTS_EF10() */
2367 
2368 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
2369 
2370 	__checkReturn	efx_rc_t
2371 ef10_upstream_port_vadaptor_alloc(
2372 	__in		efx_nic_t *enp)
2373 {
2374 	uint32_t retry;
2375 	uint32_t delay_us;
2376 	efx_rc_t rc;
2377 
2378 	/*
2379 	 * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF
2380 	 * driver has yet to bring up the EVB port. See bug 56147. In this case,
2381 	 * retry the request several times after waiting a while. The wait time
2382 	 * between retries starts small (10ms) and exponentially increases.
2383 	 * Total wait time is a little over two seconds. Retry logic in the
2384 	 * client driver may mean this whole loop is repeated if it continues to
2385 	 * fail.
2386 	 */
2387 	retry = 0;
2388 	delay_us = 10000;
2389 	while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) {
2390 		if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) ||
2391 		    (rc != ENOENT)) {
2392 			/*
2393 			 * Do not retry alloc for PF, or for other errors on
2394 			 * a VF.
2395 			 */
2396 			goto fail1;
2397 		}
2398 
2399 		/* VF startup before PF is ready. Retry allocation. */
2400 		if (retry > 5) {
2401 			/* Too many attempts */
2402 			rc = EINVAL;
2403 			goto fail2;
2404 		}
2405 		EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry);
2406 		EFSYS_SLEEP(delay_us);
2407 		retry++;
2408 		if (delay_us < 500000)
2409 			delay_us <<= 2;
2410 	}
2411 
2412 	return (0);
2413 
2414 fail2:
2415 	EFSYS_PROBE(fail2);
2416 fail1:
2417 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2418 
2419 	return (rc);
2420 }
2421 
2422 #endif	/* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
2423 
2424 #if EFX_OPTS_EF10()
2425 
2426 	__checkReturn	efx_rc_t
2427 ef10_nic_init(
2428 	__in		efx_nic_t *enp)
2429 {
2430 	efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
2431 	uint32_t min_vi_count, max_vi_count;
2432 	uint32_t vi_count, vi_base, vi_shift;
2433 	uint32_t i;
2434 	uint32_t vi_window_size;
2435 	efx_rc_t rc;
2436 	boolean_t alloc_vadaptor = B_TRUE;
2437 
2438 	EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
2439 
2440 	/* Enable reporting of some events (e.g. link change) */
2441 	if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
2442 		goto fail1;
2443 
2444 	/* Allocate (optional) on-chip PIO buffers */
2445 	ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count);
2446 
2447 	/*
2448 	 * For best performance, PIO writes should use a write-combined
2449 	 * (WC) memory mapping. Using a separate WC mapping for the PIO
2450 	 * aperture of each VI would be a burden to drivers (and not
2451 	 * possible if the host page size is >4Kbyte).
2452 	 *
2453 	 * To avoid this we use a single uncached (UC) mapping for VI
2454 	 * register access, and a single WC mapping for extra VIs used
2455 	 * for PIO writes.
2456 	 *
2457 	 * Each piobuf must be linked to a VI in the WC mapping, and to
2458 	 * each VI that is using a sub-allocated block from the piobuf.
2459 	 */
2460 	min_vi_count = edcp->edc_min_vi_count;
2461 	max_vi_count =
2462 	    edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count;
2463 
2464 	/* Ensure that the previously attached driver's VIs are freed */
2465 	if ((rc = efx_mcdi_free_vis(enp)) != 0)
2466 		goto fail2;
2467 
2468 	/*
2469 	 * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this
2470 	 * fails then retrying the request for fewer VI resources may succeed.
2471 	 */
2472 	vi_count = 0;
2473 	if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count,
2474 		    &vi_base, &vi_count, &vi_shift)) != 0)
2475 		goto fail3;
2476 
2477 	EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count);
2478 
2479 	if (vi_count < min_vi_count) {
2480 		rc = ENOMEM;
2481 		goto fail4;
2482 	}
2483 
2484 	enp->en_arch.ef10.ena_vi_base = vi_base;
2485 	enp->en_arch.ef10.ena_vi_count = vi_count;
2486 	enp->en_arch.ef10.ena_vi_shift = vi_shift;
2487 
2488 	if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) {
2489 		/* Not enough extra VIs to map piobufs */
2490 		ef10_nic_free_piobufs(enp);
2491 	}
2492 
2493 	enp->en_arch.ef10.ena_pio_write_vi_base =
2494 	    vi_count - enp->en_arch.ef10.ena_piobuf_count;
2495 
2496 	EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, !=,
2497 	    EFX_VI_WINDOW_SHIFT_INVALID);
2498 	EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, <=,
2499 	    EFX_VI_WINDOW_SHIFT_64K);
2500 	vi_window_size = 1U << enp->en_nic_cfg.enc_vi_window_shift;
2501 
2502 	/* Save UC memory mapping details */
2503 	enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
2504 	if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2505 		enp->en_arch.ef10.ena_uc_mem_map_size =
2506 		    (vi_window_size *
2507 		    enp->en_arch.ef10.ena_pio_write_vi_base);
2508 	} else {
2509 		enp->en_arch.ef10.ena_uc_mem_map_size =
2510 		    (vi_window_size *
2511 		    enp->en_arch.ef10.ena_vi_count);
2512 	}
2513 
2514 	/* Save WC memory mapping details */
2515 	enp->en_arch.ef10.ena_wc_mem_map_offset =
2516 	    enp->en_arch.ef10.ena_uc_mem_map_offset +
2517 	    enp->en_arch.ef10.ena_uc_mem_map_size;
2518 
2519 	enp->en_arch.ef10.ena_wc_mem_map_size =
2520 	    (vi_window_size *
2521 	    enp->en_arch.ef10.ena_piobuf_count);
2522 
2523 	/* Link piobufs to extra VIs in WC mapping */
2524 	if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2525 		for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
2526 			rc = efx_mcdi_link_piobuf(enp,
2527 			    enp->en_arch.ef10.ena_pio_write_vi_base + i,
2528 			    enp->en_arch.ef10.ena_piobuf_handle[i]);
2529 			if (rc != 0)
2530 				break;
2531 		}
2532 	}
2533 
2534 	/*
2535 	 * For SR-IOV use case, vAdaptor is allocated for PF and associated VFs
2536 	 * during NIC initialization when vSwitch is created and vports are
2537 	 * allocated. Hence, skip vAdaptor allocation for EVB and update vport
2538 	 * id in NIC structure with the one allocated for PF.
2539 	 */
2540 
2541 	enp->en_vport_id = EVB_PORT_ID_ASSIGNED;
2542 #if EFSYS_OPT_EVB
2543 	if ((enp->en_vswitchp != NULL) && (enp->en_vswitchp->ev_evcp != NULL)) {
2544 		/* For EVB use vport allocated on vswitch */
2545 		enp->en_vport_id = enp->en_vswitchp->ev_evcp->evc_vport_id;
2546 		alloc_vadaptor = B_FALSE;
2547 	}
2548 #endif
2549 	if (alloc_vadaptor != B_FALSE) {
2550 		/* Allocate a vAdaptor attached to our upstream vPort/pPort */
2551 		if ((rc = ef10_upstream_port_vadaptor_alloc(enp)) != 0)
2552 			goto fail5;
2553 	}
2554 	enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2;
2555 
2556 	return (0);
2557 
2558 fail5:
2559 	EFSYS_PROBE(fail5);
2560 fail4:
2561 	EFSYS_PROBE(fail4);
2562 fail3:
2563 	EFSYS_PROBE(fail3);
2564 fail2:
2565 	EFSYS_PROBE(fail2);
2566 
2567 	ef10_nic_free_piobufs(enp);
2568 
2569 fail1:
2570 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2571 
2572 	return (rc);
2573 }
2574 
2575 	__checkReturn	efx_rc_t
2576 ef10_nic_get_vi_pool(
2577 	__in		efx_nic_t *enp,
2578 	__out		uint32_t *vi_countp)
2579 {
2580 	EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
2581 
2582 	/*
2583 	 * Report VIs that the client driver can use.
2584 	 * Do not include VIs used for PIO buffer writes.
2585 	 */
2586 	*vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base;
2587 
2588 	return (0);
2589 }
2590 
2591 	__checkReturn	efx_rc_t
2592 ef10_nic_get_bar_region(
2593 	__in		efx_nic_t *enp,
2594 	__in		efx_nic_region_t region,
2595 	__out		uint32_t *offsetp,
2596 	__out		size_t *sizep)
2597 {
2598 	efx_rc_t rc;
2599 
2600 	EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
2601 
2602 	/*
2603 	 * TODO: Specify host memory mapping alignment and granularity
2604 	 * in efx_drv_limits_t so that they can be taken into account
2605 	 * when allocating extra VIs for PIO writes.
2606 	 */
2607 	switch (region) {
2608 	case EFX_REGION_VI:
2609 		/* UC mapped memory BAR region for VI registers */
2610 		*offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset;
2611 		*sizep = enp->en_arch.ef10.ena_uc_mem_map_size;
2612 		break;
2613 
2614 	case EFX_REGION_PIO_WRITE_VI:
2615 		/* WC mapped memory BAR region for piobuf writes */
2616 		*offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset;
2617 		*sizep = enp->en_arch.ef10.ena_wc_mem_map_size;
2618 		break;
2619 
2620 	default:
2621 		rc = EINVAL;
2622 		goto fail1;
2623 	}
2624 
2625 	return (0);
2626 
2627 fail1:
2628 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2629 
2630 	return (rc);
2631 }
2632 
2633 	__checkReturn	boolean_t
2634 ef10_nic_hw_unavailable(
2635 	__in		efx_nic_t *enp)
2636 {
2637 	efx_dword_t dword;
2638 
2639 	if (enp->en_reset_flags & EFX_RESET_HW_UNAVAIL)
2640 		return (B_TRUE);
2641 
2642 	EFX_BAR_READD(enp, ER_DZ_BIU_MC_SFT_STATUS_REG, &dword, B_FALSE);
2643 	if (EFX_DWORD_FIELD(dword, EFX_DWORD_0) == 0xffffffff)
2644 		goto unavail;
2645 
2646 	return (B_FALSE);
2647 
2648 unavail:
2649 	ef10_nic_set_hw_unavailable(enp);
2650 
2651 	return (B_TRUE);
2652 }
2653 
2654 			void
2655 ef10_nic_set_hw_unavailable(
2656 	__in		efx_nic_t *enp)
2657 {
2658 	EFSYS_PROBE(hw_unavail);
2659 	enp->en_reset_flags |= EFX_RESET_HW_UNAVAIL;
2660 }
2661 
2662 
2663 			void
2664 ef10_nic_fini(
2665 	__in		efx_nic_t *enp)
2666 {
2667 	uint32_t i;
2668 	efx_rc_t rc;
2669 	boolean_t do_vadaptor_free = B_TRUE;
2670 
2671 #if EFSYS_OPT_EVB
2672 	if (enp->en_vswitchp != NULL) {
2673 		/*
2674 		 * For SR-IOV the vAdaptor is freed with the vswitch,
2675 		 * so do not free it here.
2676 		 */
2677 		do_vadaptor_free = B_FALSE;
2678 	}
2679 #endif
2680 	if (do_vadaptor_free != B_FALSE) {
2681 		(void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id);
2682 		enp->en_vport_id = EVB_PORT_ID_NULL;
2683 	}
2684 
2685 	/* Unlink piobufs from extra VIs in WC mapping */
2686 	if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2687 		for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
2688 			rc = efx_mcdi_unlink_piobuf(enp,
2689 			    enp->en_arch.ef10.ena_pio_write_vi_base + i);
2690 			if (rc != 0)
2691 				break;
2692 		}
2693 	}
2694 
2695 	ef10_nic_free_piobufs(enp);
2696 
2697 	(void) efx_mcdi_free_vis(enp);
2698 	enp->en_arch.ef10.ena_vi_count = 0;
2699 }
2700 
2701 			void
2702 ef10_nic_unprobe(
2703 	__in		efx_nic_t *enp)
2704 {
2705 #if EFSYS_OPT_MON_STATS
2706 	mcdi_mon_cfg_free(enp);
2707 #endif /* EFSYS_OPT_MON_STATS */
2708 	(void) efx_mcdi_drv_attach(enp, B_FALSE);
2709 }
2710 
2711 #if EFSYS_OPT_DIAG
2712 
2713 	__checkReturn	efx_rc_t
2714 ef10_nic_register_test(
2715 	__in		efx_nic_t *enp)
2716 {
2717 	efx_rc_t rc;
2718 
2719 	/* FIXME */
2720 	_NOTE(ARGUNUSED(enp))
2721 	_NOTE(CONSTANTCONDITION)
2722 	if (B_FALSE) {
2723 		rc = ENOTSUP;
2724 		goto fail1;
2725 	}
2726 	/* FIXME */
2727 
2728 	return (0);
2729 
2730 fail1:
2731 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2732 
2733 	return (rc);
2734 }
2735 
2736 #endif	/* EFSYS_OPT_DIAG */
2737 
2738 #if EFSYS_OPT_FW_SUBVARIANT_AWARE
2739 
2740 	__checkReturn	efx_rc_t
2741 efx_mcdi_get_nic_global(
2742 	__in		efx_nic_t *enp,
2743 	__in		uint32_t key,
2744 	__out		uint32_t *valuep)
2745 {
2746 	efx_mcdi_req_t req;
2747 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_NIC_GLOBAL_IN_LEN,
2748 		MC_CMD_GET_NIC_GLOBAL_OUT_LEN);
2749 	efx_rc_t rc;
2750 
2751 	req.emr_cmd = MC_CMD_GET_NIC_GLOBAL;
2752 	req.emr_in_buf = payload;
2753 	req.emr_in_length = MC_CMD_GET_NIC_GLOBAL_IN_LEN;
2754 	req.emr_out_buf = payload;
2755 	req.emr_out_length = MC_CMD_GET_NIC_GLOBAL_OUT_LEN;
2756 
2757 	MCDI_IN_SET_DWORD(req, GET_NIC_GLOBAL_IN_KEY, key);
2758 
2759 	efx_mcdi_execute(enp, &req);
2760 
2761 	if (req.emr_rc != 0) {
2762 		rc = req.emr_rc;
2763 		goto fail1;
2764 	}
2765 
2766 	if (req.emr_out_length_used != MC_CMD_GET_NIC_GLOBAL_OUT_LEN) {
2767 		rc = EMSGSIZE;
2768 		goto fail2;
2769 	}
2770 
2771 	*valuep = MCDI_OUT_DWORD(req, GET_NIC_GLOBAL_OUT_VALUE);
2772 
2773 	return (0);
2774 
2775 fail2:
2776 	EFSYS_PROBE(fail2);
2777 fail1:
2778 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2779 
2780 	return (rc);
2781 }
2782 
2783 	__checkReturn	efx_rc_t
2784 efx_mcdi_set_nic_global(
2785 	__in		efx_nic_t *enp,
2786 	__in		uint32_t key,
2787 	__in		uint32_t value)
2788 {
2789 	efx_mcdi_req_t req;
2790 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_NIC_GLOBAL_IN_LEN, 0);
2791 	efx_rc_t rc;
2792 
2793 	req.emr_cmd = MC_CMD_SET_NIC_GLOBAL;
2794 	req.emr_in_buf = payload;
2795 	req.emr_in_length = MC_CMD_SET_NIC_GLOBAL_IN_LEN;
2796 	req.emr_out_buf = NULL;
2797 	req.emr_out_length = 0;
2798 
2799 	MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_KEY, key);
2800 	MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_VALUE, value);
2801 
2802 	efx_mcdi_execute(enp, &req);
2803 
2804 	if (req.emr_rc != 0) {
2805 		rc = req.emr_rc;
2806 		goto fail1;
2807 	}
2808 
2809 	return (0);
2810 
2811 fail1:
2812 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2813 
2814 	return (rc);
2815 }
2816 
2817 #endif	/* EFSYS_OPT_FW_SUBVARIANT_AWARE */
2818 
2819 #endif	/* EFX_OPTS_EF10() */
2820