1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2008-2019 Solarflare Communications Inc.
5 */
6
7 #include "efx.h"
8 #include "efx_impl.h"
9
10 #if EFSYS_OPT_MCDI
11
12 /*
13 * There are three versions of the MCDI interface:
14 * - MCDIv0: Siena BootROM. Transport uses MCDIv1 headers.
15 * - MCDIv1: Siena firmware and Huntington BootROM.
16 * - MCDIv2: EF10 firmware (Huntington/Medford) and Medford BootROM.
17 * Transport uses MCDIv2 headers.
18 *
19 * MCDIv2 Header NOT_EPOCH flag
20 * ----------------------------
21 * A new epoch begins at initial startup or after an MC reboot, and defines when
22 * the MC should reject stale MCDI requests.
23 *
24 * The first MCDI request sent by the host should contain NOT_EPOCH=0, and all
25 * subsequent requests (until the next MC reboot) should contain NOT_EPOCH=1.
26 *
27 * After rebooting the MC will fail all requests with NOT_EPOCH=1 by writing a
28 * response with ERROR=1 and DATALEN=0 until a request is seen with NOT_EPOCH=0.
29 */
30
31
32
33 #if EFSYS_OPT_SIENA
34
35 static const efx_mcdi_ops_t __efx_mcdi_siena_ops = {
36 siena_mcdi_init, /* emco_init */
37 siena_mcdi_send_request, /* emco_send_request */
38 siena_mcdi_poll_reboot, /* emco_poll_reboot */
39 siena_mcdi_poll_response, /* emco_poll_response */
40 siena_mcdi_read_response, /* emco_read_response */
41 siena_mcdi_fini, /* emco_fini */
42 siena_mcdi_feature_supported, /* emco_feature_supported */
43 siena_mcdi_get_timeout, /* emco_get_timeout */
44 };
45
46 #endif /* EFSYS_OPT_SIENA */
47
48 #if EFX_OPTS_EF10()
49
50 static const efx_mcdi_ops_t __efx_mcdi_ef10_ops = {
51 ef10_mcdi_init, /* emco_init */
52 ef10_mcdi_send_request, /* emco_send_request */
53 ef10_mcdi_poll_reboot, /* emco_poll_reboot */
54 ef10_mcdi_poll_response, /* emco_poll_response */
55 ef10_mcdi_read_response, /* emco_read_response */
56 ef10_mcdi_fini, /* emco_fini */
57 ef10_mcdi_feature_supported, /* emco_feature_supported */
58 ef10_mcdi_get_timeout, /* emco_get_timeout */
59 };
60
61 #endif /* EFX_OPTS_EF10() */
62
63 #if EFSYS_OPT_RIVERHEAD
64
65 static const efx_mcdi_ops_t __efx_mcdi_rhead_ops = {
66 ef10_mcdi_init, /* emco_init */
67 ef10_mcdi_send_request, /* emco_send_request */
68 ef10_mcdi_poll_reboot, /* emco_poll_reboot */
69 ef10_mcdi_poll_response, /* emco_poll_response */
70 ef10_mcdi_read_response, /* emco_read_response */
71 ef10_mcdi_fini, /* emco_fini */
72 ef10_mcdi_feature_supported, /* emco_feature_supported */
73 ef10_mcdi_get_timeout, /* emco_get_timeout */
74 };
75
76 #endif /* EFSYS_OPT_RIVERHEAD */
77
78
79
80 __checkReturn efx_rc_t
efx_mcdi_init(__in efx_nic_t * enp,__in const efx_mcdi_transport_t * emtp)81 efx_mcdi_init(
82 __in efx_nic_t *enp,
83 __in const efx_mcdi_transport_t *emtp)
84 {
85 const efx_mcdi_ops_t *emcop;
86 efx_rc_t rc;
87
88 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
89 EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0);
90
91 switch (enp->en_family) {
92 #if EFSYS_OPT_SIENA
93 case EFX_FAMILY_SIENA:
94 emcop = &__efx_mcdi_siena_ops;
95 break;
96 #endif /* EFSYS_OPT_SIENA */
97
98 #if EFSYS_OPT_HUNTINGTON
99 case EFX_FAMILY_HUNTINGTON:
100 emcop = &__efx_mcdi_ef10_ops;
101 break;
102 #endif /* EFSYS_OPT_HUNTINGTON */
103
104 #if EFSYS_OPT_MEDFORD
105 case EFX_FAMILY_MEDFORD:
106 emcop = &__efx_mcdi_ef10_ops;
107 break;
108 #endif /* EFSYS_OPT_MEDFORD */
109
110 #if EFSYS_OPT_MEDFORD2
111 case EFX_FAMILY_MEDFORD2:
112 emcop = &__efx_mcdi_ef10_ops;
113 break;
114 #endif /* EFSYS_OPT_MEDFORD2 */
115
116 #if EFSYS_OPT_RIVERHEAD
117 case EFX_FAMILY_RIVERHEAD:
118 emcop = &__efx_mcdi_rhead_ops;
119 break;
120 #endif /* EFSYS_OPT_RIVERHEAD */
121
122 default:
123 EFSYS_ASSERT(0);
124 rc = ENOTSUP;
125 goto fail1;
126 }
127
128 if (enp->en_features & EFX_FEATURE_MCDI_DMA) {
129 /* MCDI requires a DMA buffer in host memory */
130 if ((emtp == NULL) || (emtp->emt_dma_mem) == NULL) {
131 rc = EINVAL;
132 goto fail2;
133 }
134 }
135 enp->en_mcdi.em_emtp = emtp;
136
137 if (emcop != NULL && emcop->emco_init != NULL) {
138 if ((rc = emcop->emco_init(enp, emtp)) != 0)
139 goto fail3;
140 }
141
142 enp->en_mcdi.em_emcop = emcop;
143 enp->en_mod_flags |= EFX_MOD_MCDI;
144
145 return (0);
146
147 fail3:
148 EFSYS_PROBE(fail3);
149 fail2:
150 EFSYS_PROBE(fail2);
151 fail1:
152 EFSYS_PROBE1(fail1, efx_rc_t, rc);
153
154 enp->en_mcdi.em_emcop = NULL;
155 enp->en_mcdi.em_emtp = NULL;
156 enp->en_mod_flags &= ~EFX_MOD_MCDI;
157
158 return (rc);
159 }
160
161 void
efx_mcdi_fini(__in efx_nic_t * enp)162 efx_mcdi_fini(
163 __in efx_nic_t *enp)
164 {
165 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
166 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
167
168 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
169 EFSYS_ASSERT3U(enp->en_mod_flags, ==, EFX_MOD_MCDI);
170
171 if (emcop != NULL && emcop->emco_fini != NULL)
172 emcop->emco_fini(enp);
173
174 emip->emi_port = 0;
175 emip->emi_aborted = 0;
176
177 enp->en_mcdi.em_emcop = NULL;
178 enp->en_mod_flags &= ~EFX_MOD_MCDI;
179 }
180
181 void
efx_mcdi_new_epoch(__in efx_nic_t * enp)182 efx_mcdi_new_epoch(
183 __in efx_nic_t *enp)
184 {
185 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
186 efsys_lock_state_t state;
187
188 /* Start a new epoch (allow fresh MCDI requests to succeed) */
189 EFSYS_LOCK(enp->en_eslp, state);
190 emip->emi_new_epoch = B_TRUE;
191 EFSYS_UNLOCK(enp->en_eslp, state);
192 }
193
194 static void
efx_mcdi_send_request(__in efx_nic_t * enp,__in void * hdrp,__in size_t hdr_len,__in void * sdup,__in size_t sdu_len)195 efx_mcdi_send_request(
196 __in efx_nic_t *enp,
197 __in void *hdrp,
198 __in size_t hdr_len,
199 __in void *sdup,
200 __in size_t sdu_len)
201 {
202 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
203
204 emcop->emco_send_request(enp, hdrp, hdr_len, sdup, sdu_len);
205 }
206
207 static efx_rc_t
efx_mcdi_poll_reboot(__in efx_nic_t * enp)208 efx_mcdi_poll_reboot(
209 __in efx_nic_t *enp)
210 {
211 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
212 efx_rc_t rc;
213
214 rc = emcop->emco_poll_reboot(enp);
215 return (rc);
216 }
217
218 static boolean_t
efx_mcdi_poll_response(__in efx_nic_t * enp)219 efx_mcdi_poll_response(
220 __in efx_nic_t *enp)
221 {
222 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
223 boolean_t available;
224
225 available = emcop->emco_poll_response(enp);
226 return (available);
227 }
228
229 static void
efx_mcdi_read_response(__in efx_nic_t * enp,__out void * bufferp,__in size_t offset,__in size_t length)230 efx_mcdi_read_response(
231 __in efx_nic_t *enp,
232 __out void *bufferp,
233 __in size_t offset,
234 __in size_t length)
235 {
236 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
237
238 emcop->emco_read_response(enp, bufferp, offset, length);
239 }
240
241 void
efx_mcdi_request_start(__in efx_nic_t * enp,__in efx_mcdi_req_t * emrp,__in boolean_t ev_cpl)242 efx_mcdi_request_start(
243 __in efx_nic_t *enp,
244 __in efx_mcdi_req_t *emrp,
245 __in boolean_t ev_cpl)
246 {
247 #if EFSYS_OPT_MCDI_LOGGING
248 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
249 #endif
250 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
251 efx_dword_t hdr[2];
252 size_t hdr_len;
253 unsigned int max_version;
254 unsigned int seq;
255 unsigned int xflags;
256 boolean_t new_epoch;
257 efsys_lock_state_t state;
258
259 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
260 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
261 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
262
263 /*
264 * efx_mcdi_request_start() is naturally serialised against both
265 * efx_mcdi_request_poll() and efx_mcdi_ev_cpl()/efx_mcdi_ev_death(),
266 * by virtue of there only being one outstanding MCDI request.
267 * Unfortunately, upper layers may also call efx_mcdi_request_abort()
268 * at any time, to timeout a pending mcdi request, That request may
269 * then subsequently complete, meaning efx_mcdi_ev_cpl() or
270 * efx_mcdi_ev_death() may end up running in parallel with
271 * efx_mcdi_request_start(). This race is handled by ensuring that
272 * %emi_pending_req, %emi_ev_cpl and %emi_seq are protected by the
273 * en_eslp lock.
274 */
275 EFSYS_LOCK(enp->en_eslp, state);
276 EFSYS_ASSERT(emip->emi_pending_req == NULL);
277 emip->emi_pending_req = emrp;
278 emip->emi_ev_cpl = ev_cpl;
279 emip->emi_poll_cnt = 0;
280 seq = emip->emi_seq++ & EFX_MASK32(MCDI_HEADER_SEQ);
281 new_epoch = emip->emi_new_epoch;
282 max_version = emip->emi_max_version;
283 EFSYS_UNLOCK(enp->en_eslp, state);
284
285 xflags = 0;
286 if (ev_cpl)
287 xflags |= MCDI_HEADER_XFLAGS_EVREQ;
288
289 /*
290 * Huntington firmware supports MCDIv2, but the Huntington BootROM only
291 * supports MCDIv1. Use MCDIv1 headers for MCDIv1 commands where
292 * possible to support this.
293 */
294 if ((max_version >= 2) &&
295 ((emrp->emr_cmd > MC_CMD_CMD_SPACE_ESCAPE_7) ||
296 (emrp->emr_in_length > MCDI_CTL_SDU_LEN_MAX_V1) ||
297 (emrp->emr_out_length > MCDI_CTL_SDU_LEN_MAX_V1))) {
298 /* Construct MCDI v2 header */
299 hdr_len = sizeof (hdr);
300 EFX_POPULATE_DWORD_8(hdr[0],
301 MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
302 MCDI_HEADER_RESYNC, 1,
303 MCDI_HEADER_DATALEN, 0,
304 MCDI_HEADER_SEQ, seq,
305 MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1,
306 MCDI_HEADER_ERROR, 0,
307 MCDI_HEADER_RESPONSE, 0,
308 MCDI_HEADER_XFLAGS, xflags);
309
310 EFX_POPULATE_DWORD_2(hdr[1],
311 MC_CMD_V2_EXTN_IN_EXTENDED_CMD, emrp->emr_cmd,
312 MC_CMD_V2_EXTN_IN_ACTUAL_LEN, emrp->emr_in_length);
313 } else {
314 /* Construct MCDI v1 header */
315 hdr_len = sizeof (hdr[0]);
316 EFX_POPULATE_DWORD_8(hdr[0],
317 MCDI_HEADER_CODE, emrp->emr_cmd,
318 MCDI_HEADER_RESYNC, 1,
319 MCDI_HEADER_DATALEN, emrp->emr_in_length,
320 MCDI_HEADER_SEQ, seq,
321 MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1,
322 MCDI_HEADER_ERROR, 0,
323 MCDI_HEADER_RESPONSE, 0,
324 MCDI_HEADER_XFLAGS, xflags);
325 }
326
327 #if EFSYS_OPT_MCDI_LOGGING
328 if (emtp->emt_logger != NULL) {
329 emtp->emt_logger(emtp->emt_context, EFX_LOG_MCDI_REQUEST,
330 &hdr[0], hdr_len,
331 emrp->emr_in_buf, emrp->emr_in_length);
332 }
333 #endif /* EFSYS_OPT_MCDI_LOGGING */
334
335 efx_mcdi_send_request(enp, &hdr[0], hdr_len,
336 emrp->emr_in_buf, emrp->emr_in_length);
337 }
338
339
340 static void
efx_mcdi_read_response_header(__in efx_nic_t * enp,__inout efx_mcdi_req_t * emrp)341 efx_mcdi_read_response_header(
342 __in efx_nic_t *enp,
343 __inout efx_mcdi_req_t *emrp)
344 {
345 #if EFSYS_OPT_MCDI_LOGGING
346 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
347 #endif /* EFSYS_OPT_MCDI_LOGGING */
348 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
349 efx_dword_t hdr[2];
350 unsigned int hdr_len;
351 unsigned int data_len;
352 unsigned int seq;
353 unsigned int cmd;
354 unsigned int error;
355 efx_rc_t rc;
356
357 EFSYS_ASSERT(emrp != NULL);
358
359 efx_mcdi_read_response(enp, &hdr[0], 0, sizeof (hdr[0]));
360 hdr_len = sizeof (hdr[0]);
361
362 cmd = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE);
363 seq = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_SEQ);
364 error = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_ERROR);
365
366 if (cmd != MC_CMD_V2_EXTN) {
367 data_len = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_DATALEN);
368 } else {
369 efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1]));
370 hdr_len += sizeof (hdr[1]);
371
372 cmd = EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD);
373 data_len =
374 EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
375 }
376
377 if (error && (data_len == 0)) {
378 /* The MC has rebooted since the request was sent. */
379 EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US);
380 efx_mcdi_poll_reboot(enp);
381 rc = EIO;
382 goto fail1;
383 }
384 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER
385 if (((cmd != emrp->emr_cmd) && (emrp->emr_cmd != MC_CMD_PROXY_CMD)) ||
386 #else
387 if ((cmd != emrp->emr_cmd) ||
388 #endif
389 (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) {
390 /* Response is for a different request */
391 rc = EIO;
392 goto fail2;
393 }
394 if (error) {
395 efx_dword_t err[2];
396 unsigned int err_len = MIN(data_len, sizeof (err));
397 int err_code = MC_CMD_ERR_EPROTO;
398 int err_arg = 0;
399
400 /* Read error code (and arg num for MCDI v2 commands) */
401 efx_mcdi_read_response(enp, &err, hdr_len, err_len);
402
403 if (err_len >= (MC_CMD_ERR_CODE_OFST + sizeof (efx_dword_t)))
404 err_code = EFX_DWORD_FIELD(err[0], EFX_DWORD_0);
405 #ifdef WITH_MCDI_V2
406 if (err_len >= (MC_CMD_ERR_ARG_OFST + sizeof (efx_dword_t)))
407 err_arg = EFX_DWORD_FIELD(err[1], EFX_DWORD_0);
408 #endif
409 emrp->emr_err_code = err_code;
410 emrp->emr_err_arg = err_arg;
411
412 #if EFSYS_OPT_MCDI_PROXY_AUTH
413 if ((err_code == MC_CMD_ERR_PROXY_PENDING) &&
414 (err_len == sizeof (err))) {
415 /*
416 * The MCDI request would normally fail with EPERM, but
417 * firmware has forwarded it to an authorization agent
418 * attached to a privileged PF.
419 *
420 * Save the authorization request handle. The client
421 * must wait for a PROXY_RESPONSE event, or timeout.
422 */
423 emrp->emr_proxy_handle = err_arg;
424 }
425 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
426
427 #if EFSYS_OPT_MCDI_LOGGING
428 if (emtp->emt_logger != NULL) {
429 emtp->emt_logger(emtp->emt_context,
430 EFX_LOG_MCDI_RESPONSE,
431 &hdr[0], hdr_len,
432 &err[0], err_len);
433 }
434 #endif /* EFSYS_OPT_MCDI_LOGGING */
435
436 if (!emrp->emr_quiet) {
437 EFSYS_PROBE3(mcdi_err_arg, int, emrp->emr_cmd,
438 int, err_code, int, err_arg);
439 }
440
441 rc = efx_mcdi_request_errcode(err_code);
442 goto fail3;
443 }
444
445 emrp->emr_rc = 0;
446 emrp->emr_out_length_used = data_len;
447 #if EFSYS_OPT_MCDI_PROXY_AUTH
448 emrp->emr_proxy_handle = 0;
449 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
450 return;
451
452 fail3:
453 fail2:
454 fail1:
455 emrp->emr_rc = rc;
456 emrp->emr_out_length_used = 0;
457 }
458
459 static void
efx_mcdi_finish_response(__in efx_nic_t * enp,__in efx_mcdi_req_t * emrp)460 efx_mcdi_finish_response(
461 __in efx_nic_t *enp,
462 __in efx_mcdi_req_t *emrp)
463 {
464 #if EFSYS_OPT_MCDI_LOGGING
465 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
466 #endif /* EFSYS_OPT_MCDI_LOGGING */
467 efx_dword_t hdr[2];
468 unsigned int hdr_len;
469 size_t bytes;
470 unsigned int resp_off;
471 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER
472 unsigned int resp_cmd;
473 boolean_t proxied_cmd_resp = B_FALSE;
474 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */
475
476 if (emrp->emr_out_buf == NULL)
477 return;
478
479 /* Read the command header to detect MCDI response format */
480 hdr_len = sizeof (hdr[0]);
481 efx_mcdi_read_response(enp, &hdr[0], 0, hdr_len);
482 if (EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE) == MC_CMD_V2_EXTN) {
483 /*
484 * Read the actual payload length. The length given in the event
485 * is only correct for responses with the V1 format.
486 */
487 efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1]));
488 hdr_len += sizeof (hdr[1]);
489 resp_off = hdr_len;
490
491 emrp->emr_out_length_used = EFX_DWORD_FIELD(hdr[1],
492 MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
493 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER
494 /*
495 * A proxy MCDI command is executed by PF on behalf of
496 * one of its VFs. The command to be proxied follows
497 * immediately afterward in the host buffer.
498 * PROXY_CMD inner call complete response should be copied to
499 * output buffer so that it can be returned to the requesting
500 * function in MC_CMD_PROXY_COMPLETE payload.
501 */
502 resp_cmd =
503 EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD);
504 proxied_cmd_resp = ((emrp->emr_cmd == MC_CMD_PROXY_CMD) &&
505 (resp_cmd != MC_CMD_PROXY_CMD));
506 if (proxied_cmd_resp) {
507 resp_off = 0;
508 emrp->emr_out_length_used += hdr_len;
509 }
510 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */
511 } else {
512 resp_off = hdr_len;
513 }
514
515 /* Copy payload out into caller supplied buffer */
516 bytes = MIN(emrp->emr_out_length_used, emrp->emr_out_length);
517 efx_mcdi_read_response(enp, emrp->emr_out_buf, resp_off, bytes);
518
519 /* Report bytes copied to caller (response message may be larger) */
520 emrp->emr_out_length_used = bytes;
521
522 #if EFSYS_OPT_MCDI_LOGGING
523 if (emtp->emt_logger != NULL) {
524 emtp->emt_logger(emtp->emt_context,
525 EFX_LOG_MCDI_RESPONSE,
526 &hdr[0], hdr_len,
527 emrp->emr_out_buf, bytes);
528 }
529 #endif /* EFSYS_OPT_MCDI_LOGGING */
530 }
531
532
533 __checkReturn boolean_t
efx_mcdi_request_poll(__in efx_nic_t * enp)534 efx_mcdi_request_poll(
535 __in efx_nic_t *enp)
536 {
537 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
538 efx_mcdi_req_t *emrp;
539 efsys_lock_state_t state;
540 efx_rc_t rc;
541
542 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
543 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
544 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
545
546 /* Serialise against post-watchdog efx_mcdi_ev* */
547 EFSYS_LOCK(enp->en_eslp, state);
548
549 EFSYS_ASSERT(emip->emi_pending_req != NULL);
550 EFSYS_ASSERT(!emip->emi_ev_cpl);
551 emrp = emip->emi_pending_req;
552
553 /* Check if hardware is unavailable */
554 if (efx_nic_hw_unavailable(enp)) {
555 EFSYS_UNLOCK(enp->en_eslp, state);
556 return (B_FALSE);
557 }
558
559 /* Check for reboot atomically w.r.t efx_mcdi_request_start */
560 if (emip->emi_poll_cnt++ == 0) {
561 if ((rc = efx_mcdi_poll_reboot(enp)) != 0) {
562 emip->emi_pending_req = NULL;
563 EFSYS_UNLOCK(enp->en_eslp, state);
564
565 /* Reboot/Assertion */
566 if (rc == EIO || rc == EINTR)
567 efx_mcdi_raise_exception(enp, emrp, rc);
568
569 goto fail1;
570 }
571 }
572
573 /* Check if a response is available */
574 if (efx_mcdi_poll_response(enp) == B_FALSE) {
575 EFSYS_UNLOCK(enp->en_eslp, state);
576 return (B_FALSE);
577 }
578
579 /* Read the response header */
580 efx_mcdi_read_response_header(enp, emrp);
581
582 /* Request complete */
583 emip->emi_pending_req = NULL;
584
585 /* Ensure stale MCDI requests fail after an MC reboot. */
586 emip->emi_new_epoch = B_FALSE;
587
588 EFSYS_UNLOCK(enp->en_eslp, state);
589
590 if ((rc = emrp->emr_rc) != 0)
591 goto fail2;
592
593 efx_mcdi_finish_response(enp, emrp);
594 return (B_TRUE);
595
596 fail2:
597 if (!emrp->emr_quiet)
598 EFSYS_PROBE(fail2);
599 fail1:
600 if (!emrp->emr_quiet)
601 EFSYS_PROBE1(fail1, efx_rc_t, rc);
602
603 return (B_TRUE);
604 }
605
606 __checkReturn boolean_t
efx_mcdi_request_abort(__in efx_nic_t * enp)607 efx_mcdi_request_abort(
608 __in efx_nic_t *enp)
609 {
610 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
611 efx_mcdi_req_t *emrp;
612 boolean_t aborted;
613 efsys_lock_state_t state;
614
615 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
616 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
617 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
618
619 /*
620 * efx_mcdi_ev_* may have already completed this event, and be
621 * spinning/blocked on the upper layer lock. So it *is* legitimate
622 * to for emi_pending_req to be NULL. If there is a pending event
623 * completed request, then provide a "credit" to allow
624 * efx_mcdi_ev_cpl() to accept a single spurious completion.
625 */
626 EFSYS_LOCK(enp->en_eslp, state);
627 emrp = emip->emi_pending_req;
628 aborted = (emrp != NULL);
629 if (aborted) {
630 emip->emi_pending_req = NULL;
631
632 /* Error the request */
633 emrp->emr_out_length_used = 0;
634 emrp->emr_rc = ETIMEDOUT;
635
636 /* Provide a credit for seqno/emr_pending_req mismatches */
637 if (emip->emi_ev_cpl)
638 ++emip->emi_aborted;
639
640 /*
641 * The upper layer has called us, so we don't
642 * need to complete the request.
643 */
644 }
645 EFSYS_UNLOCK(enp->en_eslp, state);
646
647 return (aborted);
648 }
649
650 __checkReturn efx_rc_t
efx_mcdi_get_client_handle(__in efx_nic_t * enp,__in efx_pcie_interface_t intf,__in uint16_t pf,__in uint16_t vf,__out uint32_t * handle)651 efx_mcdi_get_client_handle(
652 __in efx_nic_t *enp,
653 __in efx_pcie_interface_t intf,
654 __in uint16_t pf,
655 __in uint16_t vf,
656 __out uint32_t *handle)
657 {
658 efx_mcdi_req_t req;
659 EFX_MCDI_DECLARE_BUF(payload,
660 MC_CMD_GET_CLIENT_HANDLE_IN_LEN,
661 MC_CMD_GET_CLIENT_HANDLE_OUT_LEN);
662 uint32_t pcie_intf;
663 efx_rc_t rc;
664
665 if (handle == NULL) {
666 rc = EINVAL;
667 goto fail1;
668 }
669
670 rc = efx_mcdi_intf_to_pcie(intf, &pcie_intf);
671 if (rc != 0)
672 goto fail2;
673
674 req.emr_cmd = MC_CMD_GET_CLIENT_HANDLE;
675 req.emr_in_buf = payload;
676 req.emr_in_length = MC_CMD_GET_CLIENT_HANDLE_IN_LEN;
677 req.emr_out_buf = payload;
678 req.emr_out_length = MC_CMD_GET_CLIENT_HANDLE_OUT_LEN;
679
680 MCDI_IN_SET_DWORD(req, GET_CLIENT_HANDLE_IN_TYPE,
681 MC_CMD_GET_CLIENT_HANDLE_IN_TYPE_FUNC);
682 MCDI_IN_SET_WORD(req, GET_CLIENT_HANDLE_IN_FUNC_PF, pf);
683 MCDI_IN_SET_WORD(req, GET_CLIENT_HANDLE_IN_FUNC_VF, vf);
684 MCDI_IN_SET_DWORD(req, GET_CLIENT_HANDLE_IN_FUNC_INTF, pcie_intf);
685
686 efx_mcdi_execute(enp, &req);
687
688 if (req.emr_rc != 0) {
689 rc = req.emr_rc;
690 goto fail3;
691 }
692
693 if (req.emr_out_length_used < MC_CMD_GET_CLIENT_HANDLE_OUT_LEN) {
694 rc = EMSGSIZE;
695 goto fail4;
696 }
697
698 *handle = MCDI_OUT_DWORD(req, GET_CLIENT_HANDLE_OUT_HANDLE);
699
700 return 0;
701 fail4:
702 EFSYS_PROBE(fail4);
703 fail3:
704 EFSYS_PROBE(fail3);
705 fail2:
706 EFSYS_PROBE(fail2);
707 fail1:
708 EFSYS_PROBE1(fail1, efx_rc_t, rc);
709 return (rc);
710 }
711
712 __checkReturn efx_rc_t
efx_mcdi_get_own_client_handle(__in efx_nic_t * enp,__out uint32_t * handle)713 efx_mcdi_get_own_client_handle(
714 __in efx_nic_t *enp,
715 __out uint32_t *handle)
716 {
717 efx_rc_t rc;
718
719 rc = efx_mcdi_get_client_handle(enp, EFX_PCIE_INTERFACE_CALLER,
720 PCIE_FUNCTION_PF_NULL, PCIE_FUNCTION_VF_NULL, handle);
721 if (rc != 0)
722 goto fail1;
723
724 return (0);
725 fail1:
726 EFSYS_PROBE1(fail1, efx_rc_t, rc);
727 return (rc);
728 }
729
730 __checkReturn efx_rc_t
efx_mcdi_client_mac_addr_get(__in efx_nic_t * enp,__in uint32_t client_handle,__out uint8_t addr_bytes[EFX_MAC_ADDR_LEN])731 efx_mcdi_client_mac_addr_get(
732 __in efx_nic_t *enp,
733 __in uint32_t client_handle,
734 __out uint8_t addr_bytes[EFX_MAC_ADDR_LEN])
735 {
736 efx_mcdi_req_t req;
737 EFX_MCDI_DECLARE_BUF(payload,
738 MC_CMD_GET_CLIENT_MAC_ADDRESSES_IN_LEN,
739 MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LEN(1));
740 efx_rc_t rc;
741
742 req.emr_cmd = MC_CMD_GET_CLIENT_MAC_ADDRESSES;
743 req.emr_in_buf = payload;
744 req.emr_in_length = MC_CMD_GET_CLIENT_MAC_ADDRESSES_IN_LEN;
745 req.emr_out_buf = payload;
746 req.emr_out_length = MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LEN(1);
747
748 MCDI_IN_SET_DWORD(req, GET_CLIENT_MAC_ADDRESSES_IN_CLIENT_HANDLE,
749 client_handle);
750
751 efx_mcdi_execute(enp, &req);
752
753 if (req.emr_rc != 0) {
754 rc = req.emr_rc;
755 goto fail1;
756 }
757
758 if (req.emr_out_length_used <
759 MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LEN(1)) {
760 rc = EMSGSIZE;
761 goto fail2;
762 }
763
764 memcpy(addr_bytes,
765 MCDI_OUT2(req, uint8_t, GET_CLIENT_MAC_ADDRESSES_OUT_MAC_ADDRS),
766 EFX_MAC_ADDR_LEN);
767
768 return (0);
769
770 fail2:
771 EFSYS_PROBE(fail2);
772 fail1:
773 EFSYS_PROBE1(fail1, efx_rc_t, rc);
774 return (rc);
775 }
776
777 __checkReturn efx_rc_t
efx_mcdi_client_mac_addr_set(__in efx_nic_t * enp,__in uint32_t client_handle,__in const uint8_t addr_bytes[EFX_MAC_ADDR_LEN])778 efx_mcdi_client_mac_addr_set(
779 __in efx_nic_t *enp,
780 __in uint32_t client_handle,
781 __in const uint8_t addr_bytes[EFX_MAC_ADDR_LEN])
782 {
783 efx_mcdi_req_t req;
784 EFX_MCDI_DECLARE_BUF(payload,
785 MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_LEN(1),
786 MC_CMD_SET_CLIENT_MAC_ADDRESSES_OUT_LEN);
787 uint32_t oui;
788 efx_rc_t rc;
789
790 if (EFX_MAC_ADDR_IS_MULTICAST(addr_bytes)) {
791 rc = EINVAL;
792 goto fail1;
793 }
794
795 oui = addr_bytes[0] << 16 | addr_bytes[1] << 8 | addr_bytes[2];
796 if (oui == 0x000000) {
797 rc = EINVAL;
798 goto fail2;
799 }
800
801 req.emr_cmd = MC_CMD_SET_CLIENT_MAC_ADDRESSES;
802 req.emr_in_buf = payload;
803 req.emr_in_length = MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_LEN(1);
804 req.emr_out_buf = payload;
805 req.emr_out_length = MC_CMD_SET_CLIENT_MAC_ADDRESSES_OUT_LEN;
806
807 MCDI_IN_SET_DWORD(req, SET_CLIENT_MAC_ADDRESSES_IN_CLIENT_HANDLE,
808 client_handle);
809
810 memcpy(MCDI_IN2(req, uint8_t, SET_CLIENT_MAC_ADDRESSES_IN_MAC_ADDRS),
811 addr_bytes, EFX_MAC_ADDR_LEN);
812
813 efx_mcdi_execute(enp, &req);
814
815 if (req.emr_rc != 0) {
816 rc = req.emr_rc;
817 goto fail3;
818 }
819
820 return (0);
821
822 fail3:
823 EFSYS_PROBE(fail3);
824 fail2:
825 EFSYS_PROBE(fail2);
826 fail1:
827 EFSYS_PROBE1(fail1, efx_rc_t, rc);
828 return (rc);
829 }
830
831 void
efx_mcdi_get_timeout(__in efx_nic_t * enp,__in efx_mcdi_req_t * emrp,__out uint32_t * timeoutp)832 efx_mcdi_get_timeout(
833 __in efx_nic_t *enp,
834 __in efx_mcdi_req_t *emrp,
835 __out uint32_t *timeoutp)
836 {
837 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
838
839 emcop->emco_get_timeout(enp, emrp, timeoutp);
840 }
841
842 __checkReturn efx_rc_t
efx_mcdi_request_errcode(__in unsigned int err)843 efx_mcdi_request_errcode(
844 __in unsigned int err)
845 {
846
847 switch (err) {
848 /* MCDI v1 */
849 case MC_CMD_ERR_EPERM:
850 return (EACCES);
851 case MC_CMD_ERR_ENOENT:
852 return (ENOENT);
853 case MC_CMD_ERR_EINTR:
854 return (EINTR);
855 case MC_CMD_ERR_EACCES:
856 return (EACCES);
857 case MC_CMD_ERR_EBUSY:
858 return (EBUSY);
859 case MC_CMD_ERR_EINVAL:
860 return (EINVAL);
861 case MC_CMD_ERR_EDEADLK:
862 return (EDEADLK);
863 case MC_CMD_ERR_ENOSYS:
864 return (ENOTSUP);
865 case MC_CMD_ERR_ETIME:
866 return (ETIMEDOUT);
867 case MC_CMD_ERR_ENOTSUP:
868 return (ENOTSUP);
869 case MC_CMD_ERR_EALREADY:
870 return (EALREADY);
871
872 /* MCDI v2 */
873 case MC_CMD_ERR_EEXIST:
874 return (EEXIST);
875 #ifdef MC_CMD_ERR_EAGAIN
876 case MC_CMD_ERR_EAGAIN:
877 return (EAGAIN);
878 #endif
879 #ifdef MC_CMD_ERR_ENOSPC
880 case MC_CMD_ERR_ENOSPC:
881 return (ENOSPC);
882 #endif
883 case MC_CMD_ERR_ERANGE:
884 return (ERANGE);
885
886 case MC_CMD_ERR_ALLOC_FAIL:
887 return (ENOMEM);
888 case MC_CMD_ERR_NO_VADAPTOR:
889 return (ENOENT);
890 case MC_CMD_ERR_NO_EVB_PORT:
891 return (ENOENT);
892 case MC_CMD_ERR_NO_VSWITCH:
893 return (ENODEV);
894 case MC_CMD_ERR_VLAN_LIMIT:
895 return (EINVAL);
896 case MC_CMD_ERR_BAD_PCI_FUNC:
897 return (ENODEV);
898 case MC_CMD_ERR_BAD_VLAN_MODE:
899 return (EINVAL);
900 case MC_CMD_ERR_BAD_VSWITCH_TYPE:
901 return (EINVAL);
902 case MC_CMD_ERR_BAD_VPORT_TYPE:
903 return (EINVAL);
904 case MC_CMD_ERR_MAC_EXIST:
905 return (EEXIST);
906
907 case MC_CMD_ERR_PROXY_PENDING:
908 return (EAGAIN);
909
910 default:
911 EFSYS_PROBE1(mc_pcol_error, int, err);
912 return (EIO);
913 }
914 }
915
916 void
efx_mcdi_raise_exception(__in efx_nic_t * enp,__in_opt efx_mcdi_req_t * emrp,__in int rc)917 efx_mcdi_raise_exception(
918 __in efx_nic_t *enp,
919 __in_opt efx_mcdi_req_t *emrp,
920 __in int rc)
921 {
922 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
923 efx_mcdi_exception_t exception;
924
925 /* Reboot or Assertion failure only */
926 EFSYS_ASSERT(rc == EIO || rc == EINTR);
927
928 /*
929 * If MC_CMD_REBOOT causes a reboot (dependent on parameters),
930 * then the EIO is not worthy of an exception.
931 */
932 if (emrp != NULL && emrp->emr_cmd == MC_CMD_REBOOT && rc == EIO)
933 return;
934
935 exception = (rc == EIO)
936 ? EFX_MCDI_EXCEPTION_MC_REBOOT
937 : EFX_MCDI_EXCEPTION_MC_BADASSERT;
938
939 emtp->emt_exception(emtp->emt_context, exception);
940 }
941
942 void
efx_mcdi_execute(__in efx_nic_t * enp,__inout efx_mcdi_req_t * emrp)943 efx_mcdi_execute(
944 __in efx_nic_t *enp,
945 __inout efx_mcdi_req_t *emrp)
946 {
947 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
948
949 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
950 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
951
952 emrp->emr_quiet = B_FALSE;
953 emtp->emt_execute(emtp->emt_context, emrp);
954 }
955
956 void
efx_mcdi_execute_quiet(__in efx_nic_t * enp,__inout efx_mcdi_req_t * emrp)957 efx_mcdi_execute_quiet(
958 __in efx_nic_t *enp,
959 __inout efx_mcdi_req_t *emrp)
960 {
961 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
962
963 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
964 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
965
966 emrp->emr_quiet = B_TRUE;
967 emtp->emt_execute(emtp->emt_context, emrp);
968 }
969
970 void
efx_mcdi_ev_cpl(__in efx_nic_t * enp,__in unsigned int seq,__in unsigned int outlen,__in int errcode)971 efx_mcdi_ev_cpl(
972 __in efx_nic_t *enp,
973 __in unsigned int seq,
974 __in unsigned int outlen,
975 __in int errcode)
976 {
977 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
978 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
979 efx_mcdi_req_t *emrp;
980 efsys_lock_state_t state;
981
982 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
983 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
984
985 /*
986 * Serialise against efx_mcdi_request_poll()/efx_mcdi_request_start()
987 * when we're completing an aborted request.
988 */
989 EFSYS_LOCK(enp->en_eslp, state);
990 if (emip->emi_pending_req == NULL || !emip->emi_ev_cpl ||
991 (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) {
992 EFSYS_ASSERT(emip->emi_aborted > 0);
993 if (emip->emi_aborted > 0)
994 --emip->emi_aborted;
995 EFSYS_UNLOCK(enp->en_eslp, state);
996 return;
997 }
998
999 emrp = emip->emi_pending_req;
1000 emip->emi_pending_req = NULL;
1001 EFSYS_UNLOCK(enp->en_eslp, state);
1002
1003 if (emip->emi_max_version >= 2) {
1004 /* MCDIv2 response details do not fit into an event. */
1005 efx_mcdi_read_response_header(enp, emrp);
1006 } else {
1007 if (errcode != 0) {
1008 if (!emrp->emr_quiet) {
1009 EFSYS_PROBE2(mcdi_err, int, emrp->emr_cmd,
1010 int, errcode);
1011 }
1012 emrp->emr_out_length_used = 0;
1013 emrp->emr_rc = efx_mcdi_request_errcode(errcode);
1014 } else {
1015 emrp->emr_out_length_used = outlen;
1016 emrp->emr_rc = 0;
1017 }
1018 }
1019 if (emrp->emr_rc == 0)
1020 efx_mcdi_finish_response(enp, emrp);
1021
1022 emtp->emt_ev_cpl(emtp->emt_context);
1023 }
1024
1025 #if EFSYS_OPT_MCDI_PROXY_AUTH
1026
1027 __checkReturn efx_rc_t
efx_mcdi_get_proxy_handle(__in efx_nic_t * enp,__in efx_mcdi_req_t * emrp,__out uint32_t * handlep)1028 efx_mcdi_get_proxy_handle(
1029 __in efx_nic_t *enp,
1030 __in efx_mcdi_req_t *emrp,
1031 __out uint32_t *handlep)
1032 {
1033 efx_rc_t rc;
1034
1035 _NOTE(ARGUNUSED(enp))
1036
1037 /*
1038 * Return proxy handle from MCDI request that returned with error
1039 * MC_MCD_ERR_PROXY_PENDING. This handle is used to wait for a matching
1040 * PROXY_RESPONSE event.
1041 */
1042 if ((emrp == NULL) || (handlep == NULL)) {
1043 rc = EINVAL;
1044 goto fail1;
1045 }
1046 if ((emrp->emr_rc != 0) &&
1047 (emrp->emr_err_code == MC_CMD_ERR_PROXY_PENDING)) {
1048 *handlep = emrp->emr_proxy_handle;
1049 rc = 0;
1050 } else {
1051 *handlep = 0;
1052 rc = ENOENT;
1053 }
1054 return (rc);
1055
1056 fail1:
1057 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1058 return (rc);
1059 }
1060
1061 void
efx_mcdi_ev_proxy_response(__in efx_nic_t * enp,__in unsigned int handle,__in unsigned int status)1062 efx_mcdi_ev_proxy_response(
1063 __in efx_nic_t *enp,
1064 __in unsigned int handle,
1065 __in unsigned int status)
1066 {
1067 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
1068 efx_rc_t rc;
1069
1070 /*
1071 * Handle results of an authorization request for a privileged MCDI
1072 * command. If authorization was granted then we must re-issue the
1073 * original MCDI request. If authorization failed or timed out,
1074 * then the original MCDI request should be completed with the
1075 * result code from this event.
1076 */
1077 rc = (status == 0) ? 0 : efx_mcdi_request_errcode(status);
1078
1079 emtp->emt_ev_proxy_response(emtp->emt_context, handle, rc);
1080 }
1081 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
1082
1083 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER
1084 void
efx_mcdi_ev_proxy_request(__in efx_nic_t * enp,__in unsigned int index)1085 efx_mcdi_ev_proxy_request(
1086 __in efx_nic_t *enp,
1087 __in unsigned int index)
1088 {
1089 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
1090
1091 if (emtp->emt_ev_proxy_request != NULL)
1092 emtp->emt_ev_proxy_request(emtp->emt_context, index);
1093 }
1094 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */
1095 void
efx_mcdi_ev_death(__in efx_nic_t * enp,__in int rc)1096 efx_mcdi_ev_death(
1097 __in efx_nic_t *enp,
1098 __in int rc)
1099 {
1100 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
1101 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
1102 efx_mcdi_req_t *emrp = NULL;
1103 boolean_t ev_cpl;
1104 efsys_lock_state_t state;
1105
1106 /*
1107 * The MCDI request (if there is one) has been terminated, either
1108 * by a BADASSERT or REBOOT event.
1109 *
1110 * If there is an outstanding event-completed MCDI operation, then we
1111 * will never receive the completion event (because both MCDI
1112 * completions and BADASSERT events are sent to the same evq). So
1113 * complete this MCDI op.
1114 *
1115 * This function might run in parallel with efx_mcdi_request_poll()
1116 * for poll completed mcdi requests, and also with
1117 * efx_mcdi_request_start() for post-watchdog completions.
1118 */
1119 EFSYS_LOCK(enp->en_eslp, state);
1120 emrp = emip->emi_pending_req;
1121 ev_cpl = emip->emi_ev_cpl;
1122 if (emrp != NULL && emip->emi_ev_cpl) {
1123 emip->emi_pending_req = NULL;
1124
1125 emrp->emr_out_length_used = 0;
1126 emrp->emr_rc = rc;
1127 ++emip->emi_aborted;
1128 }
1129
1130 /*
1131 * Since we're running in parallel with a request, consume the
1132 * status word before dropping the lock.
1133 */
1134 if (rc == EIO || rc == EINTR) {
1135 EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US);
1136 (void) efx_mcdi_poll_reboot(enp);
1137 emip->emi_new_epoch = B_TRUE;
1138 }
1139
1140 EFSYS_UNLOCK(enp->en_eslp, state);
1141
1142 efx_mcdi_raise_exception(enp, emrp, rc);
1143
1144 if (emrp != NULL && ev_cpl)
1145 emtp->emt_ev_cpl(emtp->emt_context);
1146 }
1147
1148 __checkReturn efx_rc_t
efx_mcdi_get_version(__in efx_nic_t * enp,__in uint32_t flags,__out efx_mcdi_version_t * verp)1149 efx_mcdi_get_version(
1150 __in efx_nic_t *enp,
1151 __in uint32_t flags,
1152 __out efx_mcdi_version_t *verp)
1153 {
1154 efx_nic_board_info_t *board_infop = &verp->emv_board_info;
1155 EFX_MCDI_DECLARE_BUF(payload,
1156 MC_CMD_GET_VERSION_EXT_IN_LEN,
1157 MC_CMD_GET_VERSION_V2_OUT_LEN);
1158 efx_word_t *ver_words;
1159 uint16_t version[4];
1160 efx_mcdi_req_t req;
1161 uint32_t firmware;
1162 efx_rc_t rc;
1163
1164 EFX_STATIC_ASSERT(sizeof (verp->emv_version) ==
1165 MC_CMD_GET_VERSION_OUT_VERSION_LEN);
1166 EFX_STATIC_ASSERT(sizeof (verp->emv_firmware) ==
1167 MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN);
1168
1169 EFX_STATIC_ASSERT(EFX_MCDI_VERSION_BOARD_INFO ==
1170 (1U << MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_LBN));
1171
1172 EFX_STATIC_ASSERT(sizeof (board_infop->enbi_serial) ==
1173 MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_LEN);
1174 EFX_STATIC_ASSERT(sizeof (board_infop->enbi_name) ==
1175 MC_CMD_GET_VERSION_V2_OUT_BOARD_NAME_LEN);
1176 EFX_STATIC_ASSERT(sizeof (board_infop->enbi_revision) ==
1177 MC_CMD_GET_VERSION_V2_OUT_BOARD_REVISION_LEN);
1178
1179 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
1180
1181 req.emr_cmd = MC_CMD_GET_VERSION;
1182 req.emr_in_buf = payload;
1183 req.emr_out_buf = payload;
1184
1185 if ((flags & EFX_MCDI_VERSION_BOARD_INFO) != 0) {
1186 /* Request basic + extended version information. */
1187 req.emr_in_length = MC_CMD_GET_VERSION_EXT_IN_LEN;
1188 req.emr_out_length = MC_CMD_GET_VERSION_V2_OUT_LEN;
1189 } else {
1190 /* Request only basic version information. */
1191 req.emr_in_length = MC_CMD_GET_VERSION_IN_LEN;
1192 req.emr_out_length = MC_CMD_GET_VERSION_OUT_LEN;
1193 }
1194
1195 efx_mcdi_execute(enp, &req);
1196
1197 if (req.emr_rc != 0) {
1198 rc = req.emr_rc;
1199 goto fail1;
1200 }
1201
1202 /* bootrom support */
1203 if (req.emr_out_length_used == MC_CMD_GET_VERSION_V0_OUT_LEN) {
1204 version[0] = version[1] = version[2] = version[3] = 0;
1205 firmware = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE);
1206 goto out;
1207 }
1208
1209 if (req.emr_out_length_used < req.emr_out_length) {
1210 rc = EMSGSIZE;
1211 goto fail2;
1212 }
1213
1214 ver_words = MCDI_OUT2(req, efx_word_t, GET_VERSION_OUT_VERSION);
1215 version[0] = EFX_WORD_FIELD(ver_words[0], EFX_WORD_0);
1216 version[1] = EFX_WORD_FIELD(ver_words[1], EFX_WORD_0);
1217 version[2] = EFX_WORD_FIELD(ver_words[2], EFX_WORD_0);
1218 version[3] = EFX_WORD_FIELD(ver_words[3], EFX_WORD_0);
1219 firmware = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE);
1220
1221 out:
1222 memset(verp, 0, sizeof (*verp));
1223
1224 verp->emv_version[0] = version[0];
1225 verp->emv_version[1] = version[1];
1226 verp->emv_version[2] = version[2];
1227 verp->emv_version[3] = version[3];
1228 verp->emv_firmware = firmware;
1229
1230 verp->emv_flags = MCDI_OUT_DWORD(req, GET_VERSION_V2_OUT_FLAGS);
1231 verp->emv_flags &= flags;
1232
1233 if ((verp->emv_flags & EFX_MCDI_VERSION_BOARD_INFO) != 0) {
1234 memcpy(board_infop->enbi_serial,
1235 MCDI_OUT2(req, char, GET_VERSION_V2_OUT_BOARD_SERIAL),
1236 sizeof (board_infop->enbi_serial));
1237 memcpy(board_infop->enbi_name,
1238 MCDI_OUT2(req, char, GET_VERSION_V2_OUT_BOARD_NAME),
1239 sizeof (board_infop->enbi_name));
1240 board_infop->enbi_revision =
1241 MCDI_OUT_DWORD(req, GET_VERSION_V2_OUT_BOARD_REVISION);
1242 }
1243
1244 return (0);
1245
1246 fail2:
1247 EFSYS_PROBE(fail2);
1248 fail1:
1249 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1250
1251 return (rc);
1252 }
1253
1254 static __checkReturn efx_rc_t
efx_mcdi_get_boot_status(__in efx_nic_t * enp,__out efx_mcdi_boot_t * statusp)1255 efx_mcdi_get_boot_status(
1256 __in efx_nic_t *enp,
1257 __out efx_mcdi_boot_t *statusp)
1258 {
1259 EFX_MCDI_DECLARE_BUF(payload,
1260 MC_CMD_GET_BOOT_STATUS_IN_LEN,
1261 MC_CMD_GET_BOOT_STATUS_OUT_LEN);
1262 efx_mcdi_req_t req;
1263 efx_rc_t rc;
1264
1265 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
1266
1267 req.emr_cmd = MC_CMD_GET_BOOT_STATUS;
1268 req.emr_in_buf = payload;
1269 req.emr_in_length = MC_CMD_GET_BOOT_STATUS_IN_LEN;
1270 req.emr_out_buf = payload;
1271 req.emr_out_length = MC_CMD_GET_BOOT_STATUS_OUT_LEN;
1272
1273 efx_mcdi_execute_quiet(enp, &req);
1274
1275 /*
1276 * NOTE: Unprivileged functions cannot access boot status,
1277 * so the MCDI request will return EACCES. This is
1278 * also checked in efx_mcdi_version.
1279 */
1280
1281 if (req.emr_rc != 0) {
1282 rc = req.emr_rc;
1283 goto fail1;
1284 }
1285
1286 if (req.emr_out_length_used < MC_CMD_GET_BOOT_STATUS_OUT_LEN) {
1287 rc = EMSGSIZE;
1288 goto fail2;
1289 }
1290
1291 if (MCDI_OUT_DWORD_FIELD(req, GET_BOOT_STATUS_OUT_FLAGS,
1292 GET_BOOT_STATUS_OUT_FLAGS_PRIMARY))
1293 *statusp = EFX_MCDI_BOOT_PRIMARY;
1294 else
1295 *statusp = EFX_MCDI_BOOT_SECONDARY;
1296
1297 return (0);
1298
1299 fail2:
1300 EFSYS_PROBE(fail2);
1301 fail1:
1302 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1303
1304 return (rc);
1305 }
1306
1307 __checkReturn efx_rc_t
1308 efx_mcdi_version(
1309 __in efx_nic_t *enp,
1310 __out_ecount_opt(4) uint16_t versionp[4],
1311 __out_opt uint32_t *buildp,
1312 __out_opt efx_mcdi_boot_t *statusp)
1313 {
1314 efx_mcdi_version_t ver;
1315 efx_mcdi_boot_t status;
1316 efx_rc_t rc;
1317
1318 rc = efx_mcdi_get_version(enp, 0, &ver);
1319 if (rc != 0)
1320 goto fail1;
1321
1322 /* The bootrom doesn't understand BOOT_STATUS */
1323 if (MC_FW_VERSION_IS_BOOTLOADER(ver.emv_firmware)) {
1324 status = EFX_MCDI_BOOT_ROM;
1325 goto out;
1326 }
1327
1328 rc = efx_mcdi_get_boot_status(enp, &status);
1329 if (rc == EACCES) {
1330 /* Unprivileged functions cannot access BOOT_STATUS */
1331 status = EFX_MCDI_BOOT_PRIMARY;
1332 memset(ver.emv_version, 0, sizeof (ver.emv_version));
1333 ver.emv_firmware = 0;
1334 } else if (rc != 0) {
1335 goto fail2;
1336 }
1337
1338 out:
1339 if (versionp != NULL)
1340 memcpy(versionp, ver.emv_version, sizeof (ver.emv_version));
1341 if (buildp != NULL)
1342 *buildp = ver.emv_firmware;
1343 if (statusp != NULL)
1344 *statusp = status;
1345
1346 return (0);
1347
1348 fail2:
1349 EFSYS_PROBE(fail2);
1350 fail1:
1351 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1352
1353 return (rc);
1354 }
1355
1356 __checkReturn efx_rc_t
efx_mcdi_get_capabilities(__in efx_nic_t * enp,__out_opt uint32_t * flagsp,__out_opt uint16_t * rx_dpcpu_fw_idp,__out_opt uint16_t * tx_dpcpu_fw_idp,__out_opt uint32_t * flags2p,__out_opt uint32_t * tso2ncp)1357 efx_mcdi_get_capabilities(
1358 __in efx_nic_t *enp,
1359 __out_opt uint32_t *flagsp,
1360 __out_opt uint16_t *rx_dpcpu_fw_idp,
1361 __out_opt uint16_t *tx_dpcpu_fw_idp,
1362 __out_opt uint32_t *flags2p,
1363 __out_opt uint32_t *tso2ncp)
1364 {
1365 efx_mcdi_req_t req;
1366 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CAPABILITIES_IN_LEN,
1367 MC_CMD_GET_CAPABILITIES_V2_OUT_LEN);
1368 boolean_t v2_capable;
1369 efx_rc_t rc;
1370
1371 req.emr_cmd = MC_CMD_GET_CAPABILITIES;
1372 req.emr_in_buf = payload;
1373 req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
1374 req.emr_out_buf = payload;
1375 req.emr_out_length = MC_CMD_GET_CAPABILITIES_V2_OUT_LEN;
1376
1377 efx_mcdi_execute_quiet(enp, &req);
1378
1379 if (req.emr_rc != 0) {
1380 rc = req.emr_rc;
1381 goto fail1;
1382 }
1383
1384 if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
1385 rc = EMSGSIZE;
1386 goto fail2;
1387 }
1388
1389 if (flagsp != NULL)
1390 *flagsp = MCDI_OUT_DWORD(req, GET_CAPABILITIES_OUT_FLAGS1);
1391
1392 if (rx_dpcpu_fw_idp != NULL)
1393 *rx_dpcpu_fw_idp = MCDI_OUT_WORD(req,
1394 GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
1395
1396 if (tx_dpcpu_fw_idp != NULL)
1397 *tx_dpcpu_fw_idp = MCDI_OUT_WORD(req,
1398 GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
1399
1400 if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)
1401 v2_capable = B_FALSE;
1402 else
1403 v2_capable = B_TRUE;
1404
1405 if (flags2p != NULL) {
1406 *flags2p = (v2_capable) ?
1407 MCDI_OUT_DWORD(req, GET_CAPABILITIES_V2_OUT_FLAGS2) :
1408 0;
1409 }
1410
1411 if (tso2ncp != NULL) {
1412 *tso2ncp = (v2_capable) ?
1413 MCDI_OUT_WORD(req,
1414 GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS) :
1415 0;
1416 }
1417
1418 return (0);
1419
1420 fail2:
1421 EFSYS_PROBE(fail2);
1422 fail1:
1423 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1424
1425 return (rc);
1426 }
1427
1428 static __checkReturn efx_rc_t
efx_mcdi_do_reboot(__in efx_nic_t * enp,__in boolean_t after_assertion)1429 efx_mcdi_do_reboot(
1430 __in efx_nic_t *enp,
1431 __in boolean_t after_assertion)
1432 {
1433 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_REBOOT_IN_LEN,
1434 MC_CMD_REBOOT_OUT_LEN);
1435 efx_mcdi_req_t req;
1436 efx_rc_t rc;
1437
1438 /*
1439 * We could require the caller to have caused en_mod_flags=0 to
1440 * call this function. This doesn't help the other port though,
1441 * who's about to get the MC ripped out from underneath them.
1442 * Since they have to cope with the subsequent fallout of MCDI
1443 * failures, we should as well.
1444 */
1445 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
1446
1447 req.emr_cmd = MC_CMD_REBOOT;
1448 req.emr_in_buf = payload;
1449 req.emr_in_length = MC_CMD_REBOOT_IN_LEN;
1450 req.emr_out_buf = payload;
1451 req.emr_out_length = MC_CMD_REBOOT_OUT_LEN;
1452
1453 MCDI_IN_SET_DWORD(req, REBOOT_IN_FLAGS,
1454 (after_assertion ? MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION : 0));
1455
1456 efx_mcdi_execute_quiet(enp, &req);
1457
1458 if (req.emr_rc == EACCES) {
1459 /* Unprivileged functions cannot reboot the MC. */
1460 goto out;
1461 }
1462
1463 /* A successful reboot request returns EIO. */
1464 if (req.emr_rc != 0 && req.emr_rc != EIO) {
1465 rc = req.emr_rc;
1466 goto fail1;
1467 }
1468
1469 out:
1470 return (0);
1471
1472 fail1:
1473 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1474
1475 return (rc);
1476 }
1477
1478 __checkReturn efx_rc_t
efx_mcdi_reboot(__in efx_nic_t * enp)1479 efx_mcdi_reboot(
1480 __in efx_nic_t *enp)
1481 {
1482 return (efx_mcdi_do_reboot(enp, B_FALSE));
1483 }
1484
1485 __checkReturn efx_rc_t
efx_mcdi_exit_assertion_handler(__in efx_nic_t * enp)1486 efx_mcdi_exit_assertion_handler(
1487 __in efx_nic_t *enp)
1488 {
1489 return (efx_mcdi_do_reboot(enp, B_TRUE));
1490 }
1491
1492 __checkReturn efx_rc_t
efx_mcdi_read_assertion(__in efx_nic_t * enp)1493 efx_mcdi_read_assertion(
1494 __in efx_nic_t *enp)
1495 {
1496 efx_mcdi_req_t req;
1497 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_ASSERTS_IN_LEN,
1498 MC_CMD_GET_ASSERTS_OUT_LEN);
1499 const char *reason;
1500 unsigned int flags;
1501 unsigned int index;
1502 unsigned int ofst;
1503 int retry;
1504 efx_rc_t rc;
1505
1506 /*
1507 * Before we attempt to chat to the MC, we should verify that the MC
1508 * isn't in it's assertion handler, either due to a previous reboot,
1509 * or because we're reinitializing due to an eec_exception().
1510 *
1511 * Use GET_ASSERTS to read any assertion state that may be present.
1512 * Retry this command twice. Once because a boot-time assertion failure
1513 * might cause the 1st MCDI request to fail. And once again because
1514 * we might race with efx_mcdi_exit_assertion_handler() running on
1515 * partner port(s) on the same NIC.
1516 */
1517 retry = 2;
1518 do {
1519 (void) memset(payload, 0, sizeof (payload));
1520 req.emr_cmd = MC_CMD_GET_ASSERTS;
1521 req.emr_in_buf = payload;
1522 req.emr_in_length = MC_CMD_GET_ASSERTS_IN_LEN;
1523 req.emr_out_buf = payload;
1524 req.emr_out_length = MC_CMD_GET_ASSERTS_OUT_LEN;
1525
1526 MCDI_IN_SET_DWORD(req, GET_ASSERTS_IN_CLEAR, 1);
1527 efx_mcdi_execute_quiet(enp, &req);
1528
1529 } while ((req.emr_rc == EINTR || req.emr_rc == EIO) && retry-- > 0);
1530
1531 if (req.emr_rc != 0) {
1532 if (req.emr_rc == EACCES) {
1533 /* Unprivileged functions cannot clear assertions. */
1534 goto out;
1535 }
1536 rc = req.emr_rc;
1537 goto fail1;
1538 }
1539
1540 if (req.emr_out_length_used < MC_CMD_GET_ASSERTS_OUT_LEN) {
1541 rc = EMSGSIZE;
1542 goto fail2;
1543 }
1544
1545 /* Print out any assertion state recorded */
1546 flags = MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_GLOBAL_FLAGS);
1547 if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
1548 return (0);
1549
1550 reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
1551 ? "system-level assertion"
1552 : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
1553 ? "thread-level assertion"
1554 : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
1555 ? "watchdog reset"
1556 : (flags == MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP)
1557 ? "illegal address trap"
1558 : "unknown assertion";
1559 EFSYS_PROBE3(mcpu_assertion,
1560 const char *, reason, unsigned int,
1561 MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_SAVED_PC_OFFS),
1562 unsigned int,
1563 MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_THREAD_OFFS));
1564
1565 /* Print out the registers (r1 ... r31) */
1566 ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
1567 for (index = 1;
1568 index < 1 + MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
1569 index++) {
1570 EFSYS_PROBE2(mcpu_register, unsigned int, index, unsigned int,
1571 EFX_DWORD_FIELD(*MCDI_OUT(req, efx_dword_t, ofst),
1572 EFX_DWORD_0));
1573 ofst += sizeof (efx_dword_t);
1574 }
1575 EFSYS_ASSERT(ofst <= MC_CMD_GET_ASSERTS_OUT_LEN);
1576
1577 out:
1578 return (0);
1579
1580 fail2:
1581 EFSYS_PROBE(fail2);
1582 fail1:
1583 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1584
1585 return (rc);
1586 }
1587
1588
1589 /*
1590 * Internal routines for for specific MCDI requests.
1591 */
1592
1593 __checkReturn efx_rc_t
efx_mcdi_drv_attach(__in efx_nic_t * enp,__in boolean_t attach)1594 efx_mcdi_drv_attach(
1595 __in efx_nic_t *enp,
1596 __in boolean_t attach)
1597 {
1598 efx_mcdi_req_t req;
1599 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRV_ATTACH_IN_V2_LEN,
1600 MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
1601 efx_rc_t rc;
1602
1603 req.emr_cmd = MC_CMD_DRV_ATTACH;
1604 req.emr_in_buf = payload;
1605 if (enp->en_drv_version[0] == '\0') {
1606 req.emr_in_length = MC_CMD_DRV_ATTACH_IN_LEN;
1607 } else {
1608 req.emr_in_length = MC_CMD_DRV_ATTACH_IN_V2_LEN;
1609 }
1610 req.emr_out_buf = payload;
1611 req.emr_out_length = MC_CMD_DRV_ATTACH_EXT_OUT_LEN;
1612
1613 /*
1614 * Typically, client drivers use DONT_CARE for the datapath firmware
1615 * type to ensure that the driver can attach to an unprivileged
1616 * function. The datapath firmware type to use is controlled by the
1617 * 'sfboot' utility.
1618 * If a client driver wishes to attach with a specific datapath firmware
1619 * type, that can be passed in second argument of efx_nic_probe API. One
1620 * such example is the ESXi native driver that attempts attaching with
1621 * FULL_FEATURED datapath firmware type first and fall backs to
1622 * DONT_CARE datapath firmware type if MC_CMD_DRV_ATTACH fails.
1623 *
1624 * Always set WANT_V2_LINKCHANGES to 1. Old firmware that only supports
1625 * v1 will ignore it, and for newer firmware it ensures that it always
1626 * sends v2 if possible. While EF100 always uses v2, there are some
1627 * older EF10 firmwares that only send v2 if it is requested.
1628 */
1629 MCDI_IN_POPULATE_DWORD_3(req, DRV_ATTACH_IN_NEW_STATE,
1630 DRV_ATTACH_IN_ATTACH, attach ? 1 : 0,
1631 DRV_ATTACH_IN_SUBVARIANT_AWARE, EFSYS_OPT_FW_SUBVARIANT_AWARE,
1632 DRV_ATTACH_IN_WANT_V2_LINKCHANGES, 1);
1633 MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_UPDATE, 1);
1634 MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_FIRMWARE_ID, enp->efv);
1635
1636 if (req.emr_in_length >= MC_CMD_DRV_ATTACH_IN_V2_LEN) {
1637 EFX_STATIC_ASSERT(sizeof (enp->en_drv_version) ==
1638 MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN);
1639 memcpy(MCDI_IN2(req, char, DRV_ATTACH_IN_V2_DRIVER_VERSION),
1640 enp->en_drv_version,
1641 MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN);
1642 }
1643
1644 efx_mcdi_execute(enp, &req);
1645
1646 if (req.emr_rc != 0) {
1647 rc = req.emr_rc;
1648 goto fail1;
1649 }
1650
1651 if (req.emr_out_length_used < MC_CMD_DRV_ATTACH_OUT_LEN) {
1652 rc = EMSGSIZE;
1653 goto fail2;
1654 }
1655
1656 return (0);
1657
1658 fail2:
1659 EFSYS_PROBE(fail2);
1660 fail1:
1661 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1662
1663 return (rc);
1664 }
1665
1666 __checkReturn efx_rc_t
1667 efx_mcdi_get_board_cfg(
1668 __in efx_nic_t *enp,
1669 __out_opt uint32_t *board_typep,
1670 __out_opt efx_dword_t *capabilitiesp,
1671 __out_ecount_opt(6) uint8_t mac_addrp[6])
1672 {
1673 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
1674 efx_mcdi_req_t req;
1675 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_BOARD_CFG_IN_LEN,
1676 MC_CMD_GET_BOARD_CFG_OUT_LENMIN);
1677 efx_rc_t rc;
1678
1679 req.emr_cmd = MC_CMD_GET_BOARD_CFG;
1680 req.emr_in_buf = payload;
1681 req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN;
1682 req.emr_out_buf = payload;
1683 req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LENMIN;
1684
1685 efx_mcdi_execute(enp, &req);
1686
1687 if (req.emr_rc != 0) {
1688 rc = req.emr_rc;
1689 goto fail1;
1690 }
1691
1692 if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
1693 rc = EMSGSIZE;
1694 goto fail2;
1695 }
1696
1697 if (mac_addrp != NULL) {
1698 uint8_t *addrp;
1699
1700 if (emip->emi_port == 1) {
1701 addrp = MCDI_OUT2(req, uint8_t,
1702 GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0);
1703 } else if (emip->emi_port == 2) {
1704 addrp = MCDI_OUT2(req, uint8_t,
1705 GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1);
1706 } else {
1707 rc = EINVAL;
1708 goto fail3;
1709 }
1710
1711 EFX_MAC_ADDR_COPY(mac_addrp, addrp);
1712 }
1713
1714 if (capabilitiesp != NULL) {
1715 if (emip->emi_port == 1) {
1716 *capabilitiesp = *MCDI_OUT2(req, efx_dword_t,
1717 GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
1718 } else if (emip->emi_port == 2) {
1719 *capabilitiesp = *MCDI_OUT2(req, efx_dword_t,
1720 GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
1721 } else {
1722 rc = EINVAL;
1723 goto fail4;
1724 }
1725 }
1726
1727 if (board_typep != NULL) {
1728 *board_typep = MCDI_OUT_DWORD(req,
1729 GET_BOARD_CFG_OUT_BOARD_TYPE);
1730 }
1731
1732 return (0);
1733
1734 fail4:
1735 EFSYS_PROBE(fail4);
1736 fail3:
1737 EFSYS_PROBE(fail3);
1738 fail2:
1739 EFSYS_PROBE(fail2);
1740 fail1:
1741 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1742
1743 return (rc);
1744 }
1745
1746 __checkReturn efx_rc_t
efx_mcdi_get_resource_limits(__in efx_nic_t * enp,__out_opt uint32_t * nevqp,__out_opt uint32_t * nrxqp,__out_opt uint32_t * ntxqp)1747 efx_mcdi_get_resource_limits(
1748 __in efx_nic_t *enp,
1749 __out_opt uint32_t *nevqp,
1750 __out_opt uint32_t *nrxqp,
1751 __out_opt uint32_t *ntxqp)
1752 {
1753 efx_mcdi_req_t req;
1754 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_RESOURCE_LIMITS_IN_LEN,
1755 MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN);
1756 efx_rc_t rc;
1757
1758 req.emr_cmd = MC_CMD_GET_RESOURCE_LIMITS;
1759 req.emr_in_buf = payload;
1760 req.emr_in_length = MC_CMD_GET_RESOURCE_LIMITS_IN_LEN;
1761 req.emr_out_buf = payload;
1762 req.emr_out_length = MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN;
1763
1764 efx_mcdi_execute(enp, &req);
1765
1766 if (req.emr_rc != 0) {
1767 rc = req.emr_rc;
1768 goto fail1;
1769 }
1770
1771 if (req.emr_out_length_used < MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN) {
1772 rc = EMSGSIZE;
1773 goto fail2;
1774 }
1775
1776 if (nevqp != NULL)
1777 *nevqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_EVQ);
1778 if (nrxqp != NULL)
1779 *nrxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_RXQ);
1780 if (ntxqp != NULL)
1781 *ntxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_TXQ);
1782
1783 return (0);
1784
1785 fail2:
1786 EFSYS_PROBE(fail2);
1787 fail1:
1788 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1789
1790 return (rc);
1791 }
1792
1793 __checkReturn efx_rc_t
efx_mcdi_get_phy_cfg(__in efx_nic_t * enp)1794 efx_mcdi_get_phy_cfg(
1795 __in efx_nic_t *enp)
1796 {
1797 efx_port_t *epp = &(enp->en_port);
1798 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1799 efx_mcdi_req_t req;
1800 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_CFG_IN_LEN,
1801 MC_CMD_GET_PHY_CFG_OUT_LEN);
1802 #if EFSYS_OPT_NAMES
1803 const char *namep;
1804 size_t namelen;
1805 #endif
1806 uint32_t phy_media_type;
1807 efx_rc_t rc;
1808
1809 req.emr_cmd = MC_CMD_GET_PHY_CFG;
1810 req.emr_in_buf = payload;
1811 req.emr_in_length = MC_CMD_GET_PHY_CFG_IN_LEN;
1812 req.emr_out_buf = payload;
1813 req.emr_out_length = MC_CMD_GET_PHY_CFG_OUT_LEN;
1814
1815 efx_mcdi_execute(enp, &req);
1816
1817 if (req.emr_rc != 0) {
1818 rc = req.emr_rc;
1819 goto fail1;
1820 }
1821
1822 if (req.emr_out_length_used < MC_CMD_GET_PHY_CFG_OUT_LEN) {
1823 rc = EMSGSIZE;
1824 goto fail2;
1825 }
1826
1827 encp->enc_phy_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_TYPE);
1828 #if EFSYS_OPT_NAMES
1829 namep = MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME);
1830 namelen = MIN(sizeof (encp->enc_phy_name) - 1,
1831 strnlen(namep, MC_CMD_GET_PHY_CFG_OUT_NAME_LEN));
1832 (void) memset(encp->enc_phy_name, 0,
1833 sizeof (encp->enc_phy_name));
1834 memcpy(encp->enc_phy_name, namep, namelen);
1835 #endif /* EFSYS_OPT_NAMES */
1836 (void) memset(encp->enc_phy_revision, 0,
1837 sizeof (encp->enc_phy_revision));
1838 memcpy(encp->enc_phy_revision,
1839 MCDI_OUT2(req, char, GET_PHY_CFG_OUT_REVISION),
1840 MIN(sizeof (encp->enc_phy_revision) - 1,
1841 MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN));
1842 #if EFSYS_OPT_PHY_LED_CONTROL
1843 encp->enc_led_mask = ((1 << EFX_PHY_LED_DEFAULT) |
1844 (1 << EFX_PHY_LED_OFF) |
1845 (1 << EFX_PHY_LED_ON));
1846 #endif /* EFSYS_OPT_PHY_LED_CONTROL */
1847
1848 /* Get the media type of the fixed port, if recognised. */
1849 EFX_STATIC_ASSERT(MC_CMD_MEDIA_XAUI == EFX_PHY_MEDIA_XAUI);
1850 EFX_STATIC_ASSERT(MC_CMD_MEDIA_CX4 == EFX_PHY_MEDIA_CX4);
1851 EFX_STATIC_ASSERT(MC_CMD_MEDIA_KX4 == EFX_PHY_MEDIA_KX4);
1852 EFX_STATIC_ASSERT(MC_CMD_MEDIA_XFP == EFX_PHY_MEDIA_XFP);
1853 EFX_STATIC_ASSERT(MC_CMD_MEDIA_SFP_PLUS == EFX_PHY_MEDIA_SFP_PLUS);
1854 EFX_STATIC_ASSERT(MC_CMD_MEDIA_BASE_T == EFX_PHY_MEDIA_BASE_T);
1855 EFX_STATIC_ASSERT(MC_CMD_MEDIA_QSFP_PLUS == EFX_PHY_MEDIA_QSFP_PLUS);
1856 phy_media_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE);
1857 epp->ep_fixed_port_type = (efx_phy_media_type_t)phy_media_type;
1858 if (epp->ep_fixed_port_type >= EFX_PHY_MEDIA_NTYPES)
1859 epp->ep_fixed_port_type = EFX_PHY_MEDIA_INVALID;
1860
1861 epp->ep_phy_cap_mask =
1862 MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_SUPPORTED_CAP);
1863 #if EFSYS_OPT_PHY_FLAGS
1864 encp->enc_phy_flags_mask = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_FLAGS);
1865 #endif /* EFSYS_OPT_PHY_FLAGS */
1866
1867 encp->enc_port = (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_PRT);
1868
1869 /* Populate internal state */
1870 encp->enc_mcdi_mdio_channel =
1871 (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_CHANNEL);
1872
1873 #if EFSYS_OPT_PHY_STATS
1874 encp->enc_mcdi_phy_stat_mask =
1875 MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_STATS_MASK);
1876 #endif /* EFSYS_OPT_PHY_STATS */
1877
1878 #if EFSYS_OPT_BIST
1879 encp->enc_bist_mask = 0;
1880 if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
1881 GET_PHY_CFG_OUT_BIST_CABLE_SHORT))
1882 encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_SHORT);
1883 if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
1884 GET_PHY_CFG_OUT_BIST_CABLE_LONG))
1885 encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_LONG);
1886 if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
1887 GET_PHY_CFG_OUT_BIST))
1888 encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_NORMAL);
1889 #endif /* EFSYS_OPT_BIST */
1890
1891 return (0);
1892
1893 fail2:
1894 EFSYS_PROBE(fail2);
1895 fail1:
1896 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1897
1898 return (rc);
1899 }
1900
1901 __checkReturn efx_rc_t
efx_mcdi_firmware_update_supported(__in efx_nic_t * enp,__out boolean_t * supportedp)1902 efx_mcdi_firmware_update_supported(
1903 __in efx_nic_t *enp,
1904 __out boolean_t *supportedp)
1905 {
1906 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
1907 efx_rc_t rc;
1908
1909 if (emcop != NULL) {
1910 if ((rc = emcop->emco_feature_supported(enp,
1911 EFX_MCDI_FEATURE_FW_UPDATE, supportedp)) != 0)
1912 goto fail1;
1913 } else {
1914 /* Earlier devices always supported updates */
1915 *supportedp = B_TRUE;
1916 }
1917
1918 return (0);
1919
1920 fail1:
1921 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1922
1923 return (rc);
1924 }
1925
1926 __checkReturn efx_rc_t
efx_mcdi_macaddr_change_supported(__in efx_nic_t * enp,__out boolean_t * supportedp)1927 efx_mcdi_macaddr_change_supported(
1928 __in efx_nic_t *enp,
1929 __out boolean_t *supportedp)
1930 {
1931 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
1932 efx_rc_t rc;
1933
1934 if (emcop != NULL) {
1935 if ((rc = emcop->emco_feature_supported(enp,
1936 EFX_MCDI_FEATURE_MACADDR_CHANGE, supportedp)) != 0)
1937 goto fail1;
1938 } else {
1939 /* Earlier devices always supported MAC changes */
1940 *supportedp = B_TRUE;
1941 }
1942
1943 return (0);
1944
1945 fail1:
1946 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1947
1948 return (rc);
1949 }
1950
1951 __checkReturn efx_rc_t
efx_mcdi_link_control_supported(__in efx_nic_t * enp,__out boolean_t * supportedp)1952 efx_mcdi_link_control_supported(
1953 __in efx_nic_t *enp,
1954 __out boolean_t *supportedp)
1955 {
1956 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
1957 efx_rc_t rc;
1958
1959 if (emcop != NULL) {
1960 if ((rc = emcop->emco_feature_supported(enp,
1961 EFX_MCDI_FEATURE_LINK_CONTROL, supportedp)) != 0)
1962 goto fail1;
1963 } else {
1964 /* Earlier devices always supported link control */
1965 *supportedp = B_TRUE;
1966 }
1967
1968 return (0);
1969
1970 fail1:
1971 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1972
1973 return (rc);
1974 }
1975
1976 __checkReturn efx_rc_t
efx_mcdi_mac_spoofing_supported(__in efx_nic_t * enp,__out boolean_t * supportedp)1977 efx_mcdi_mac_spoofing_supported(
1978 __in efx_nic_t *enp,
1979 __out boolean_t *supportedp)
1980 {
1981 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
1982 efx_rc_t rc;
1983
1984 if (emcop != NULL) {
1985 if ((rc = emcop->emco_feature_supported(enp,
1986 EFX_MCDI_FEATURE_MAC_SPOOFING, supportedp)) != 0)
1987 goto fail1;
1988 } else {
1989 /* Earlier devices always supported MAC spoofing */
1990 *supportedp = B_TRUE;
1991 }
1992
1993 return (0);
1994
1995 fail1:
1996 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1997
1998 return (rc);
1999 }
2000
2001 #if EFSYS_OPT_BIST
2002
2003 #if EFX_OPTS_EF10()
2004 /*
2005 * Enter bist offline mode. This is a fw mode which puts the NIC into a state
2006 * where memory BIST tests can be run and not much else can interfere or happen.
2007 * A reboot is required to exit this mode.
2008 */
2009 __checkReturn efx_rc_t
efx_mcdi_bist_enable_offline(__in efx_nic_t * enp)2010 efx_mcdi_bist_enable_offline(
2011 __in efx_nic_t *enp)
2012 {
2013 efx_mcdi_req_t req;
2014 efx_rc_t rc;
2015
2016 EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN == 0);
2017 EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN == 0);
2018
2019 req.emr_cmd = MC_CMD_ENABLE_OFFLINE_BIST;
2020 req.emr_in_buf = NULL;
2021 req.emr_in_length = 0;
2022 req.emr_out_buf = NULL;
2023 req.emr_out_length = 0;
2024
2025 efx_mcdi_execute(enp, &req);
2026
2027 if (req.emr_rc != 0) {
2028 rc = req.emr_rc;
2029 goto fail1;
2030 }
2031
2032 return (0);
2033
2034 fail1:
2035 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2036
2037 return (rc);
2038 }
2039 #endif /* EFX_OPTS_EF10() */
2040
2041 __checkReturn efx_rc_t
efx_mcdi_bist_start(__in efx_nic_t * enp,__in efx_bist_type_t type)2042 efx_mcdi_bist_start(
2043 __in efx_nic_t *enp,
2044 __in efx_bist_type_t type)
2045 {
2046 efx_mcdi_req_t req;
2047 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_START_BIST_IN_LEN,
2048 MC_CMD_START_BIST_OUT_LEN);
2049 efx_rc_t rc;
2050
2051 req.emr_cmd = MC_CMD_START_BIST;
2052 req.emr_in_buf = payload;
2053 req.emr_in_length = MC_CMD_START_BIST_IN_LEN;
2054 req.emr_out_buf = payload;
2055 req.emr_out_length = MC_CMD_START_BIST_OUT_LEN;
2056
2057 switch (type) {
2058 case EFX_BIST_TYPE_PHY_NORMAL:
2059 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_PHY_BIST);
2060 break;
2061 case EFX_BIST_TYPE_PHY_CABLE_SHORT:
2062 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
2063 MC_CMD_PHY_BIST_CABLE_SHORT);
2064 break;
2065 case EFX_BIST_TYPE_PHY_CABLE_LONG:
2066 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
2067 MC_CMD_PHY_BIST_CABLE_LONG);
2068 break;
2069 case EFX_BIST_TYPE_MC_MEM:
2070 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
2071 MC_CMD_MC_MEM_BIST);
2072 break;
2073 case EFX_BIST_TYPE_SAT_MEM:
2074 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
2075 MC_CMD_PORT_MEM_BIST);
2076 break;
2077 case EFX_BIST_TYPE_REG:
2078 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
2079 MC_CMD_REG_BIST);
2080 break;
2081 default:
2082 EFSYS_ASSERT(0);
2083 }
2084
2085 efx_mcdi_execute(enp, &req);
2086
2087 if (req.emr_rc != 0) {
2088 rc = req.emr_rc;
2089 goto fail1;
2090 }
2091
2092 return (0);
2093
2094 fail1:
2095 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2096
2097 return (rc);
2098 }
2099
2100 #endif /* EFSYS_OPT_BIST */
2101
2102
2103 /* Enable logging of some events (e.g. link state changes) */
2104 __checkReturn efx_rc_t
efx_mcdi_log_ctrl(__in efx_nic_t * enp)2105 efx_mcdi_log_ctrl(
2106 __in efx_nic_t *enp)
2107 {
2108 efx_mcdi_req_t req;
2109 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LOG_CTRL_IN_LEN,
2110 MC_CMD_LOG_CTRL_OUT_LEN);
2111 efx_rc_t rc;
2112
2113 req.emr_cmd = MC_CMD_LOG_CTRL;
2114 req.emr_in_buf = payload;
2115 req.emr_in_length = MC_CMD_LOG_CTRL_IN_LEN;
2116 req.emr_out_buf = payload;
2117 req.emr_out_length = MC_CMD_LOG_CTRL_OUT_LEN;
2118
2119 MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST,
2120 MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ);
2121 MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST_EVQ, 0);
2122
2123 efx_mcdi_execute(enp, &req);
2124
2125 if (req.emr_rc != 0) {
2126 rc = req.emr_rc;
2127 goto fail1;
2128 }
2129
2130 return (0);
2131
2132 fail1:
2133 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2134
2135 return (rc);
2136 }
2137
2138
2139 #if EFSYS_OPT_MAC_STATS
2140
2141 __checkReturn efx_rc_t
efx_mcdi_mac_stats(__in efx_nic_t * enp,__in uint32_t vport_id,__in_opt efsys_mem_t * esmp,__in efx_stats_action_t action,__in uint16_t period_ms)2142 efx_mcdi_mac_stats(
2143 __in efx_nic_t *enp,
2144 __in uint32_t vport_id,
2145 __in_opt efsys_mem_t *esmp,
2146 __in efx_stats_action_t action,
2147 __in uint16_t period_ms)
2148 {
2149 efx_mcdi_req_t req;
2150 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_MAC_STATS_IN_LEN,
2151 MC_CMD_MAC_STATS_V2_OUT_DMA_LEN);
2152 int clear = (action == EFX_STATS_CLEAR);
2153 int upload = (action == EFX_STATS_UPLOAD);
2154 int enable = (action == EFX_STATS_ENABLE_NOEVENTS);
2155 int events = (action == EFX_STATS_ENABLE_EVENTS);
2156 int disable = (action == EFX_STATS_DISABLE);
2157 efx_rc_t rc;
2158
2159 req.emr_cmd = MC_CMD_MAC_STATS;
2160 req.emr_in_buf = payload;
2161 req.emr_in_length = MC_CMD_MAC_STATS_IN_LEN;
2162 req.emr_out_buf = payload;
2163 req.emr_out_length = MC_CMD_MAC_STATS_V2_OUT_DMA_LEN;
2164
2165 MCDI_IN_POPULATE_DWORD_6(req, MAC_STATS_IN_CMD,
2166 MAC_STATS_IN_DMA, upload,
2167 MAC_STATS_IN_CLEAR, clear,
2168 MAC_STATS_IN_PERIODIC_CHANGE, enable | events | disable,
2169 MAC_STATS_IN_PERIODIC_ENABLE, enable | events,
2170 MAC_STATS_IN_PERIODIC_NOEVENT, !events,
2171 MAC_STATS_IN_PERIOD_MS, (enable | events) ? period_ms : 0);
2172
2173 if (enable || events || upload) {
2174 const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
2175 uint32_t bytes;
2176
2177 /* Periodic stats or stats upload require a DMA buffer */
2178 if (esmp == NULL) {
2179 rc = EINVAL;
2180 goto fail1;
2181 }
2182
2183 if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) {
2184 /* MAC stats count too small for legacy MAC stats */
2185 rc = ENOSPC;
2186 goto fail2;
2187 }
2188
2189 bytes = encp->enc_mac_stats_nstats * sizeof (efx_qword_t);
2190
2191 if (EFSYS_MEM_SIZE(esmp) < bytes) {
2192 /* DMA buffer too small */
2193 rc = ENOSPC;
2194 goto fail3;
2195 }
2196
2197 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO,
2198 EFSYS_MEM_ADDR(esmp) & 0xffffffff);
2199 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI,
2200 EFSYS_MEM_ADDR(esmp) >> 32);
2201 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes);
2202 }
2203
2204 /*
2205 * NOTE: Do not use EVB_PORT_ID_ASSIGNED when disabling periodic stats,
2206 * as this may fail (and leave periodic DMA enabled) if the
2207 * vadapter has already been deleted.
2208 */
2209 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_PORT_ID,
2210 (disable ? EVB_PORT_ID_NULL : vport_id));
2211
2212 efx_mcdi_execute(enp, &req);
2213
2214 if (req.emr_rc != 0) {
2215 /* EF10: Expect ENOENT if no DMA queues are initialised */
2216 if ((req.emr_rc != ENOENT) ||
2217 (enp->en_rx_qcount + enp->en_tx_qcount != 0)) {
2218 rc = req.emr_rc;
2219 goto fail4;
2220 }
2221 }
2222
2223 return (0);
2224
2225 fail4:
2226 EFSYS_PROBE(fail4);
2227 fail3:
2228 EFSYS_PROBE(fail3);
2229 fail2:
2230 EFSYS_PROBE(fail2);
2231 fail1:
2232 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2233
2234 return (rc);
2235 }
2236
2237 __checkReturn efx_rc_t
efx_mcdi_mac_stats_clear(__in efx_nic_t * enp)2238 efx_mcdi_mac_stats_clear(
2239 __in efx_nic_t *enp)
2240 {
2241 efx_rc_t rc;
2242
2243 if ((rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, NULL,
2244 EFX_STATS_CLEAR, 0)) != 0)
2245 goto fail1;
2246
2247 return (0);
2248
2249 fail1:
2250 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2251
2252 return (rc);
2253 }
2254
2255 __checkReturn efx_rc_t
efx_mcdi_mac_stats_upload(__in efx_nic_t * enp,__in efsys_mem_t * esmp)2256 efx_mcdi_mac_stats_upload(
2257 __in efx_nic_t *enp,
2258 __in efsys_mem_t *esmp)
2259 {
2260 efx_rc_t rc;
2261
2262 /*
2263 * The MC DMAs aggregate statistics for our convenience, so we can
2264 * avoid having to pull the statistics buffer into the cache to
2265 * maintain cumulative statistics.
2266 */
2267 if ((rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, esmp,
2268 EFX_STATS_UPLOAD, 0)) != 0)
2269 goto fail1;
2270
2271 return (0);
2272
2273 fail1:
2274 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2275
2276 return (rc);
2277 }
2278
2279 __checkReturn efx_rc_t
efx_mcdi_mac_stats_periodic(__in efx_nic_t * enp,__in efsys_mem_t * esmp,__in uint16_t period_ms,__in boolean_t events)2280 efx_mcdi_mac_stats_periodic(
2281 __in efx_nic_t *enp,
2282 __in efsys_mem_t *esmp,
2283 __in uint16_t period_ms,
2284 __in boolean_t events)
2285 {
2286 efx_rc_t rc;
2287
2288 /*
2289 * The MC DMAs aggregate statistics for our convenience, so we can
2290 * avoid having to pull the statistics buffer into the cache to
2291 * maintain cumulative statistics.
2292 * Huntington uses a fixed 1sec period.
2293 * Medford uses a fixed 1sec period before v6.2.1.1033 firmware.
2294 */
2295 if (period_ms == 0)
2296 rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, NULL,
2297 EFX_STATS_DISABLE, 0);
2298 else if (events)
2299 rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, esmp,
2300 EFX_STATS_ENABLE_EVENTS, period_ms);
2301 else
2302 rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, esmp,
2303 EFX_STATS_ENABLE_NOEVENTS, period_ms);
2304
2305 if (rc != 0)
2306 goto fail1;
2307
2308 return (0);
2309
2310 fail1:
2311 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2312
2313 return (rc);
2314 }
2315
2316 #endif /* EFSYS_OPT_MAC_STATS */
2317
2318 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
2319
2320 __checkReturn efx_rc_t
efx_mcdi_intf_from_pcie(__in uint32_t pcie_intf,__out efx_pcie_interface_t * efx_intf)2321 efx_mcdi_intf_from_pcie(
2322 __in uint32_t pcie_intf,
2323 __out efx_pcie_interface_t *efx_intf)
2324 {
2325 efx_rc_t rc;
2326
2327 switch (pcie_intf) {
2328 case PCIE_INTERFACE_CALLER:
2329 *efx_intf = EFX_PCIE_INTERFACE_CALLER;
2330 break;
2331 case PCIE_INTERFACE_HOST_PRIMARY:
2332 *efx_intf = EFX_PCIE_INTERFACE_HOST_PRIMARY;
2333 break;
2334 case PCIE_INTERFACE_NIC_EMBEDDED:
2335 *efx_intf = EFX_PCIE_INTERFACE_NIC_EMBEDDED;
2336 break;
2337 default:
2338 rc = EINVAL;
2339 goto fail1;
2340 }
2341
2342 return (0);
2343
2344 fail1:
2345 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2346
2347 return (rc);
2348 }
2349
2350 __checkReturn efx_rc_t
efx_mcdi_intf_to_pcie(__in efx_pcie_interface_t efx_intf,__out uint32_t * pcie_intf)2351 efx_mcdi_intf_to_pcie(
2352 __in efx_pcie_interface_t efx_intf,
2353 __out uint32_t *pcie_intf)
2354 {
2355 efx_rc_t rc;
2356
2357 switch (efx_intf) {
2358 case EFX_PCIE_INTERFACE_CALLER:
2359 *pcie_intf = PCIE_INTERFACE_CALLER;
2360 break;
2361 case EFX_PCIE_INTERFACE_HOST_PRIMARY:
2362 *pcie_intf = PCIE_INTERFACE_HOST_PRIMARY;
2363 break;
2364 case EFX_PCIE_INTERFACE_NIC_EMBEDDED:
2365 *pcie_intf = PCIE_INTERFACE_NIC_EMBEDDED;
2366 break;
2367 default:
2368 rc = EINVAL;
2369 goto fail1;
2370 }
2371
2372 return (0);
2373
2374 fail1:
2375 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2376 return (rc);
2377 }
2378
2379 /*
2380 * This function returns the pf and vf number of a function. If it is a pf the
2381 * vf number is 0xffff. The vf number is the index of the vf on that
2382 * function. So if you have 3 vfs on pf 0 the 3 vfs will return (pf=0,vf=0),
2383 * (pf=0,vf=1), (pf=0,vf=2) aand the pf will return (pf=0, vf=0xffff).
2384 */
2385 __checkReturn efx_rc_t
efx_mcdi_get_function_info(__in efx_nic_t * enp,__out uint32_t * pfp,__out_opt uint32_t * vfp,__out_opt efx_pcie_interface_t * intfp)2386 efx_mcdi_get_function_info(
2387 __in efx_nic_t *enp,
2388 __out uint32_t *pfp,
2389 __out_opt uint32_t *vfp,
2390 __out_opt efx_pcie_interface_t *intfp)
2391 {
2392 efx_pcie_interface_t intf;
2393 efx_mcdi_req_t req;
2394 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_FUNCTION_INFO_IN_LEN,
2395 MC_CMD_GET_FUNCTION_INFO_OUT_V2_LEN);
2396 uint32_t pcie_intf;
2397 efx_rc_t rc;
2398
2399 req.emr_cmd = MC_CMD_GET_FUNCTION_INFO;
2400 req.emr_in_buf = payload;
2401 req.emr_in_length = MC_CMD_GET_FUNCTION_INFO_IN_LEN;
2402 req.emr_out_buf = payload;
2403 req.emr_out_length = MC_CMD_GET_FUNCTION_INFO_OUT_V2_LEN;
2404
2405 efx_mcdi_execute(enp, &req);
2406
2407 if (req.emr_rc != 0) {
2408 rc = req.emr_rc;
2409 goto fail1;
2410 }
2411
2412 if (req.emr_out_length_used < MC_CMD_GET_FUNCTION_INFO_OUT_LEN) {
2413 rc = EMSGSIZE;
2414 goto fail2;
2415 }
2416
2417 *pfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_PF);
2418 if (vfp != NULL)
2419 *vfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_VF);
2420
2421 if (req.emr_out_length < MC_CMD_GET_FUNCTION_INFO_OUT_V2_LEN) {
2422 intf = EFX_PCIE_INTERFACE_HOST_PRIMARY;
2423 } else {
2424 pcie_intf = MCDI_OUT_DWORD(req,
2425 GET_FUNCTION_INFO_OUT_V2_INTF);
2426
2427 rc = efx_mcdi_intf_from_pcie(pcie_intf, &intf);
2428 if (rc != 0)
2429 goto fail3;
2430 }
2431
2432 if (intfp != NULL)
2433 *intfp = intf;
2434
2435 return (0);
2436
2437 fail3:
2438 EFSYS_PROBE(fail3);
2439 fail2:
2440 EFSYS_PROBE(fail2);
2441 fail1:
2442 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2443
2444 return (rc);
2445 }
2446
2447 __checkReturn efx_rc_t
efx_mcdi_privilege_mask(__in efx_nic_t * enp,__in uint32_t pf,__in uint32_t vf,__out uint32_t * maskp)2448 efx_mcdi_privilege_mask(
2449 __in efx_nic_t *enp,
2450 __in uint32_t pf,
2451 __in uint32_t vf,
2452 __out uint32_t *maskp)
2453 {
2454 efx_mcdi_req_t req;
2455 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_PRIVILEGE_MASK_IN_LEN,
2456 MC_CMD_PRIVILEGE_MASK_OUT_LEN);
2457 efx_rc_t rc;
2458
2459 req.emr_cmd = MC_CMD_PRIVILEGE_MASK;
2460 req.emr_in_buf = payload;
2461 req.emr_in_length = MC_CMD_PRIVILEGE_MASK_IN_LEN;
2462 req.emr_out_buf = payload;
2463 req.emr_out_length = MC_CMD_PRIVILEGE_MASK_OUT_LEN;
2464
2465 MCDI_IN_POPULATE_DWORD_2(req, PRIVILEGE_MASK_IN_FUNCTION,
2466 PRIVILEGE_MASK_IN_FUNCTION_PF, pf,
2467 PRIVILEGE_MASK_IN_FUNCTION_VF, vf);
2468
2469 efx_mcdi_execute(enp, &req);
2470
2471 if (req.emr_rc != 0) {
2472 rc = req.emr_rc;
2473 goto fail1;
2474 }
2475
2476 if (req.emr_out_length_used < MC_CMD_PRIVILEGE_MASK_OUT_LEN) {
2477 rc = EMSGSIZE;
2478 goto fail2;
2479 }
2480
2481 *maskp = MCDI_OUT_DWORD(req, PRIVILEGE_MASK_OUT_OLD_MASK);
2482
2483 return (0);
2484
2485 fail2:
2486 EFSYS_PROBE(fail2);
2487 fail1:
2488 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2489
2490 return (rc);
2491 }
2492
2493 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
2494
2495 __checkReturn efx_rc_t
efx_mcdi_set_workaround(__in efx_nic_t * enp,__in uint32_t type,__in boolean_t enabled,__out_opt uint32_t * flagsp)2496 efx_mcdi_set_workaround(
2497 __in efx_nic_t *enp,
2498 __in uint32_t type,
2499 __in boolean_t enabled,
2500 __out_opt uint32_t *flagsp)
2501 {
2502 efx_mcdi_req_t req;
2503 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_WORKAROUND_IN_LEN,
2504 MC_CMD_WORKAROUND_EXT_OUT_LEN);
2505 efx_rc_t rc;
2506
2507 req.emr_cmd = MC_CMD_WORKAROUND;
2508 req.emr_in_buf = payload;
2509 req.emr_in_length = MC_CMD_WORKAROUND_IN_LEN;
2510 req.emr_out_buf = payload;
2511 req.emr_out_length = MC_CMD_WORKAROUND_OUT_LEN;
2512
2513 MCDI_IN_SET_DWORD(req, WORKAROUND_IN_TYPE, type);
2514 MCDI_IN_SET_DWORD(req, WORKAROUND_IN_ENABLED, enabled ? 1 : 0);
2515
2516 efx_mcdi_execute_quiet(enp, &req);
2517
2518 if (req.emr_rc != 0) {
2519 rc = req.emr_rc;
2520 goto fail1;
2521 }
2522
2523 if (flagsp != NULL) {
2524 if (req.emr_out_length_used >= MC_CMD_WORKAROUND_EXT_OUT_LEN)
2525 *flagsp = MCDI_OUT_DWORD(req, WORKAROUND_EXT_OUT_FLAGS);
2526 else
2527 *flagsp = 0;
2528 }
2529
2530 return (0);
2531
2532 fail1:
2533 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2534
2535 return (rc);
2536 }
2537
2538
2539 __checkReturn efx_rc_t
efx_mcdi_get_workarounds(__in efx_nic_t * enp,__out_opt uint32_t * implementedp,__out_opt uint32_t * enabledp)2540 efx_mcdi_get_workarounds(
2541 __in efx_nic_t *enp,
2542 __out_opt uint32_t *implementedp,
2543 __out_opt uint32_t *enabledp)
2544 {
2545 efx_mcdi_req_t req;
2546 EFX_MCDI_DECLARE_BUF(payload, 0, MC_CMD_GET_WORKAROUNDS_OUT_LEN);
2547 efx_rc_t rc;
2548
2549 req.emr_cmd = MC_CMD_GET_WORKAROUNDS;
2550 req.emr_in_buf = NULL;
2551 req.emr_in_length = 0;
2552 req.emr_out_buf = payload;
2553 req.emr_out_length = MC_CMD_GET_WORKAROUNDS_OUT_LEN;
2554
2555 efx_mcdi_execute(enp, &req);
2556
2557 if (req.emr_rc != 0) {
2558 rc = req.emr_rc;
2559 goto fail1;
2560 }
2561
2562 if (req.emr_out_length_used < MC_CMD_GET_WORKAROUNDS_OUT_LEN) {
2563 rc = EMSGSIZE;
2564 goto fail2;
2565 }
2566
2567 if (implementedp != NULL) {
2568 *implementedp =
2569 MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_IMPLEMENTED);
2570 }
2571
2572 if (enabledp != NULL) {
2573 *enabledp = MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_ENABLED);
2574 }
2575
2576 return (0);
2577
2578 fail2:
2579 EFSYS_PROBE(fail2);
2580 fail1:
2581 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2582
2583 return (rc);
2584 }
2585
2586 /*
2587 * Size of media information page in accordance with SFF-8472 and SFF-8436.
2588 * It is used in MCDI interface as well.
2589 */
2590 #define EFX_PHY_MEDIA_INFO_PAGE_SIZE 0x80
2591
2592 /*
2593 * Transceiver identifiers from SFF-8024 Table 4-1.
2594 */
2595 #define EFX_SFF_TRANSCEIVER_ID_SFP 0x03 /* SFP/SFP+/SFP28 */
2596 #define EFX_SFF_TRANSCEIVER_ID_QSFP 0x0c /* QSFP */
2597 #define EFX_SFF_TRANSCEIVER_ID_QSFP_PLUS 0x0d /* QSFP+ or later */
2598 #define EFX_SFF_TRANSCEIVER_ID_QSFP28 0x11 /* QSFP28 or later */
2599
2600 static __checkReturn efx_rc_t
efx_mcdi_get_phy_media_info(__in efx_nic_t * enp,__in uint32_t mcdi_page,__in uint8_t offset,__in uint8_t len,__out_bcount (len)uint8_t * data)2601 efx_mcdi_get_phy_media_info(
2602 __in efx_nic_t *enp,
2603 __in uint32_t mcdi_page,
2604 __in uint8_t offset,
2605 __in uint8_t len,
2606 __out_bcount(len) uint8_t *data)
2607 {
2608 efx_mcdi_req_t req;
2609 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN,
2610 MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(
2611 EFX_PHY_MEDIA_INFO_PAGE_SIZE));
2612 efx_rc_t rc;
2613
2614 EFSYS_ASSERT((uint32_t)offset + len <= EFX_PHY_MEDIA_INFO_PAGE_SIZE);
2615
2616 req.emr_cmd = MC_CMD_GET_PHY_MEDIA_INFO;
2617 req.emr_in_buf = payload;
2618 req.emr_in_length = MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN;
2619 req.emr_out_buf = payload;
2620 req.emr_out_length =
2621 MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE);
2622
2623 MCDI_IN_SET_DWORD(req, GET_PHY_MEDIA_INFO_IN_PAGE, mcdi_page);
2624
2625 efx_mcdi_execute(enp, &req);
2626
2627 if (req.emr_rc != 0) {
2628 rc = req.emr_rc;
2629 goto fail1;
2630 }
2631
2632 if (req.emr_out_length_used !=
2633 MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE)) {
2634 rc = EMSGSIZE;
2635 goto fail2;
2636 }
2637
2638 if (MCDI_OUT_DWORD(req, GET_PHY_MEDIA_INFO_OUT_DATALEN) !=
2639 EFX_PHY_MEDIA_INFO_PAGE_SIZE) {
2640 rc = EIO;
2641 goto fail3;
2642 }
2643
2644 memcpy(data,
2645 MCDI_OUT2(req, uint8_t, GET_PHY_MEDIA_INFO_OUT_DATA) + offset,
2646 len);
2647
2648 return (0);
2649
2650 fail3:
2651 EFSYS_PROBE(fail3);
2652 fail2:
2653 EFSYS_PROBE(fail2);
2654 fail1:
2655 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2656
2657 return (rc);
2658 }
2659
2660 __checkReturn efx_rc_t
efx_mcdi_phy_module_get_info(__in efx_nic_t * enp,__in uint8_t dev_addr,__in size_t offset,__in size_t len,__out_bcount (len)uint8_t * data)2661 efx_mcdi_phy_module_get_info(
2662 __in efx_nic_t *enp,
2663 __in uint8_t dev_addr,
2664 __in size_t offset,
2665 __in size_t len,
2666 __out_bcount(len) uint8_t *data)
2667 {
2668 efx_port_t *epp = &(enp->en_port);
2669 efx_rc_t rc;
2670 uint32_t mcdi_lower_page;
2671 uint32_t mcdi_upper_page;
2672 uint8_t id;
2673
2674 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
2675
2676 /*
2677 * Map device address to MC_CMD_GET_PHY_MEDIA_INFO pages.
2678 * Offset plus length interface allows to access page 0 only.
2679 * I.e. non-zero upper pages are not accessible.
2680 * See SFF-8472 section 4 Memory Organization and SFF-8436 section 7.6
2681 * QSFP+ Memory Map for details on how information is structured
2682 * and accessible.
2683 */
2684 switch (epp->ep_fixed_port_type) {
2685 case EFX_PHY_MEDIA_SFP_PLUS:
2686 case EFX_PHY_MEDIA_QSFP_PLUS:
2687 /* Port type supports modules */
2688 break;
2689 default:
2690 rc = ENOTSUP;
2691 goto fail1;
2692 }
2693
2694 /*
2695 * For all supported port types, MCDI page 0 offset 0 holds the
2696 * transceiver identifier. Probe to determine the data layout.
2697 * Definitions from SFF-8024 Table 4-1.
2698 */
2699 rc = efx_mcdi_get_phy_media_info(enp,
2700 0, 0, sizeof(id), &id);
2701 if (rc != 0)
2702 goto fail2;
2703
2704 switch (id) {
2705 case EFX_SFF_TRANSCEIVER_ID_SFP:
2706 /*
2707 * In accordance with SFF-8472 Diagnostic Monitoring
2708 * Interface for Optical Transceivers section 4 Memory
2709 * Organization two 2-wire addresses are defined.
2710 */
2711 switch (dev_addr) {
2712 /* Base information */
2713 case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE:
2714 /*
2715 * MCDI page 0 should be used to access lower
2716 * page 0 (0x00 - 0x7f) at the device address 0xA0.
2717 */
2718 mcdi_lower_page = 0;
2719 /*
2720 * MCDI page 1 should be used to access upper
2721 * page 0 (0x80 - 0xff) at the device address 0xA0.
2722 */
2723 mcdi_upper_page = 1;
2724 break;
2725 /* Diagnostics */
2726 case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM:
2727 /*
2728 * MCDI page 2 should be used to access lower
2729 * page 0 (0x00 - 0x7f) at the device address 0xA2.
2730 */
2731 mcdi_lower_page = 2;
2732 /*
2733 * MCDI page 3 should be used to access upper
2734 * page 0 (0x80 - 0xff) at the device address 0xA2.
2735 */
2736 mcdi_upper_page = 3;
2737 break;
2738 default:
2739 rc = ENOTSUP;
2740 goto fail3;
2741 }
2742 break;
2743 case EFX_SFF_TRANSCEIVER_ID_QSFP:
2744 case EFX_SFF_TRANSCEIVER_ID_QSFP_PLUS:
2745 case EFX_SFF_TRANSCEIVER_ID_QSFP28:
2746 switch (dev_addr) {
2747 case EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP:
2748 /*
2749 * MCDI page -1 should be used to access lower page 0
2750 * (0x00 - 0x7f).
2751 */
2752 mcdi_lower_page = (uint32_t)-1;
2753 /*
2754 * MCDI page 0 should be used to access upper page 0
2755 * (0x80h - 0xff).
2756 */
2757 mcdi_upper_page = 0;
2758 break;
2759 default:
2760 rc = ENOTSUP;
2761 goto fail3;
2762 }
2763 break;
2764 default:
2765 rc = ENOTSUP;
2766 goto fail3;
2767 }
2768
2769 EFX_STATIC_ASSERT(EFX_PHY_MEDIA_INFO_PAGE_SIZE <= 0xFF);
2770
2771 if (offset < EFX_PHY_MEDIA_INFO_PAGE_SIZE) {
2772 size_t read_len =
2773 MIN(len, EFX_PHY_MEDIA_INFO_PAGE_SIZE - offset);
2774
2775 rc = efx_mcdi_get_phy_media_info(enp,
2776 mcdi_lower_page, (uint8_t)offset, (uint8_t)read_len, data);
2777 if (rc != 0)
2778 goto fail4;
2779
2780 data += read_len;
2781 len -= read_len;
2782
2783 offset = 0;
2784 } else {
2785 offset -= EFX_PHY_MEDIA_INFO_PAGE_SIZE;
2786 }
2787
2788 if (len > 0) {
2789 EFSYS_ASSERT3U(len, <=, EFX_PHY_MEDIA_INFO_PAGE_SIZE);
2790 EFSYS_ASSERT3U(offset, <, EFX_PHY_MEDIA_INFO_PAGE_SIZE);
2791
2792 rc = efx_mcdi_get_phy_media_info(enp,
2793 mcdi_upper_page, (uint8_t)offset, (uint8_t)len, data);
2794 if (rc != 0)
2795 goto fail5;
2796 }
2797
2798 return (0);
2799
2800 fail5:
2801 EFSYS_PROBE(fail5);
2802 fail4:
2803 EFSYS_PROBE(fail4);
2804 fail3:
2805 EFSYS_PROBE(fail3);
2806 fail2:
2807 EFSYS_PROBE(fail2);
2808 fail1:
2809 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2810
2811 return (rc);
2812 }
2813
2814 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
2815
2816 #define INIT_EVQ_MAXNBUFS MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM
2817
2818 #if EFX_OPTS_EF10()
2819 # if (INIT_EVQ_MAXNBUFS < EF10_EVQ_MAXNBUFS)
2820 # error "INIT_EVQ_MAXNBUFS too small"
2821 # endif
2822 #endif /* EFX_OPTS_EF10 */
2823 #if EFSYS_OPT_RIVERHEAD
2824 # if (INIT_EVQ_MAXNBUFS < RHEAD_EVQ_MAXNBUFS)
2825 # error "INIT_EVQ_MAXNBUFS too small"
2826 # endif
2827 #endif /* EFSYS_OPT_RIVERHEAD */
2828
2829 __checkReturn efx_rc_t
efx_mcdi_init_evq(__in efx_nic_t * enp,__in unsigned int instance,__in efsys_mem_t * esmp,__in size_t nevs,__in uint32_t irq,__in uint32_t target_evq,__in uint32_t us,__in uint32_t flags,__in boolean_t low_latency)2830 efx_mcdi_init_evq(
2831 __in efx_nic_t *enp,
2832 __in unsigned int instance,
2833 __in efsys_mem_t *esmp,
2834 __in size_t nevs,
2835 __in uint32_t irq,
2836 __in uint32_t target_evq,
2837 __in uint32_t us,
2838 __in uint32_t flags,
2839 __in boolean_t low_latency)
2840 {
2841 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
2842 efx_mcdi_req_t req;
2843 EFX_MCDI_DECLARE_BUF(payload,
2844 MC_CMD_INIT_EVQ_V2_IN_LEN(INIT_EVQ_MAXNBUFS),
2845 MC_CMD_INIT_EVQ_V2_OUT_LEN);
2846 boolean_t interrupting;
2847 int ev_extended_width;
2848 int ev_cut_through;
2849 int ev_merge;
2850 unsigned int evq_type;
2851 efx_qword_t *dma_addr;
2852 uint64_t addr;
2853 int npages;
2854 int i;
2855 efx_rc_t rc;
2856
2857 npages = efx_evq_nbufs(enp, nevs, flags);
2858 if (npages > INIT_EVQ_MAXNBUFS) {
2859 rc = EINVAL;
2860 goto fail1;
2861 }
2862
2863 req.emr_cmd = MC_CMD_INIT_EVQ;
2864 req.emr_in_buf = payload;
2865 req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
2866 req.emr_out_buf = payload;
2867 req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;
2868
2869 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
2870 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
2871
2872 interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
2873 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
2874
2875 if (interrupting)
2876 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);
2877 else
2878 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TARGET_EVQ, target_evq);
2879
2880 if (encp->enc_init_evq_v2_supported) {
2881 /*
2882 * On Medford the low latency license is required to enable RX
2883 * and event cut through and to disable RX batching. If event
2884 * queue type in flags is auto, we let the firmware decide the
2885 * settings to use. If the adapter has a low latency license,
2886 * it will choose the best settings for low latency, otherwise
2887 * it will choose the best settings for throughput.
2888 */
2889 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
2890 case EFX_EVQ_FLAGS_TYPE_AUTO:
2891 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
2892 break;
2893 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
2894 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
2895 break;
2896 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
2897 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
2898 break;
2899 default:
2900 rc = EINVAL;
2901 goto fail2;
2902 }
2903 /* EvQ type controls merging, no manual settings */
2904 ev_merge = 0;
2905 ev_cut_through = 0;
2906 } else {
2907 /* EvQ types other than manual are not supported */
2908 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_MANUAL;
2909 /*
2910 * On Huntington RX and TX event batching can only be requested
2911 * together (even if the datapath firmware doesn't actually
2912 * support RX batching). If event cut through is enabled no RX
2913 * batching will occur.
2914 *
2915 * So always enable RX and TX event batching, and enable event
2916 * cut through if we want low latency operation.
2917 */
2918 ev_merge = 1;
2919 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
2920 case EFX_EVQ_FLAGS_TYPE_AUTO:
2921 ev_cut_through = low_latency ? 1 : 0;
2922 break;
2923 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
2924 ev_cut_through = 0;
2925 break;
2926 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
2927 ev_cut_through = 1;
2928 break;
2929 default:
2930 rc = EINVAL;
2931 goto fail2;
2932 }
2933 }
2934
2935 /*
2936 * On EF100, extended width event queues have a different event
2937 * descriptor layout and are used to support descriptor proxy queues.
2938 */
2939 ev_extended_width = 0;
2940 #if EFSYS_OPT_EV_EXTENDED_WIDTH
2941 if (encp->enc_init_evq_extended_width_supported) {
2942 if (flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH)
2943 ev_extended_width = 1;
2944 }
2945 #endif
2946
2947 MCDI_IN_POPULATE_DWORD_8(req, INIT_EVQ_V2_IN_FLAGS,
2948 INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting,
2949 INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
2950 INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
2951 INIT_EVQ_V2_IN_FLAG_CUT_THRU, ev_cut_through,
2952 INIT_EVQ_V2_IN_FLAG_RX_MERGE, ev_merge,
2953 INIT_EVQ_V2_IN_FLAG_TX_MERGE, ev_merge,
2954 INIT_EVQ_V2_IN_FLAG_TYPE, evq_type,
2955 INIT_EVQ_V2_IN_FLAG_EXT_WIDTH, ev_extended_width);
2956
2957 /* If the value is zero then disable the timer */
2958 if (us == 0) {
2959 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
2960 MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
2961 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
2962 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
2963 } else {
2964 unsigned int ticks;
2965
2966 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
2967 goto fail3;
2968
2969 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
2970 MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
2971 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
2972 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
2973 }
2974
2975 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
2976 MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
2977 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);
2978
2979 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
2980 addr = EFSYS_MEM_ADDR(esmp);
2981
2982 for (i = 0; i < npages; i++) {
2983 EFX_POPULATE_QWORD_2(*dma_addr,
2984 EFX_DWORD_1, (uint32_t)(addr >> 32),
2985 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
2986
2987 dma_addr++;
2988 addr += EFX_BUF_SIZE;
2989 }
2990
2991 efx_mcdi_execute(enp, &req);
2992
2993 if (req.emr_rc != 0) {
2994 rc = req.emr_rc;
2995 goto fail4;
2996 }
2997
2998 if (encp->enc_init_evq_v2_supported) {
2999 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
3000 rc = EMSGSIZE;
3001 goto fail5;
3002 }
3003 EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
3004 MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));
3005 } else {
3006 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
3007 rc = EMSGSIZE;
3008 goto fail6;
3009 }
3010 }
3011
3012 /* NOTE: ignore the returned IRQ param as firmware does not set it. */
3013
3014 return (0);
3015
3016 fail6:
3017 EFSYS_PROBE(fail6);
3018 fail5:
3019 EFSYS_PROBE(fail5);
3020 fail4:
3021 EFSYS_PROBE(fail4);
3022 fail3:
3023 EFSYS_PROBE(fail3);
3024 fail2:
3025 EFSYS_PROBE(fail2);
3026 fail1:
3027 EFSYS_PROBE1(fail1, efx_rc_t, rc);
3028
3029 return (rc);
3030 }
3031
3032 __checkReturn efx_rc_t
efx_mcdi_fini_evq(__in efx_nic_t * enp,__in uint32_t instance)3033 efx_mcdi_fini_evq(
3034 __in efx_nic_t *enp,
3035 __in uint32_t instance)
3036 {
3037 efx_mcdi_req_t req;
3038 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_EVQ_IN_LEN,
3039 MC_CMD_FINI_EVQ_OUT_LEN);
3040 efx_rc_t rc;
3041
3042 req.emr_cmd = MC_CMD_FINI_EVQ;
3043 req.emr_in_buf = payload;
3044 req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
3045 req.emr_out_buf = payload;
3046 req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
3047
3048 MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
3049
3050 efx_mcdi_execute_quiet(enp, &req);
3051
3052 if (req.emr_rc != 0) {
3053 rc = req.emr_rc;
3054 goto fail1;
3055 }
3056
3057 return (0);
3058
3059 fail1:
3060 /*
3061 * EALREADY is not an error, but indicates that the MC has rebooted and
3062 * that the EVQ has already been destroyed.
3063 */
3064 if (rc != EALREADY)
3065 EFSYS_PROBE1(fail1, efx_rc_t, rc);
3066
3067 return (rc);
3068 }
3069
3070 __checkReturn efx_rc_t
efx_mcdi_init_rxq(__in efx_nic_t * enp,__in uint32_t ndescs,__in efx_evq_t * eep,__in uint32_t label,__in uint32_t instance,__in efsys_mem_t * esmp,__in const efx_mcdi_init_rxq_params_t * params)3071 efx_mcdi_init_rxq(
3072 __in efx_nic_t *enp,
3073 __in uint32_t ndescs,
3074 __in efx_evq_t *eep,
3075 __in uint32_t label,
3076 __in uint32_t instance,
3077 __in efsys_mem_t *esmp,
3078 __in const efx_mcdi_init_rxq_params_t *params)
3079 {
3080 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
3081 efx_mcdi_req_t req;
3082 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_RXQ_V5_IN_LEN,
3083 MC_CMD_INIT_RXQ_V5_OUT_LEN);
3084 int npages = efx_rxq_nbufs(enp, ndescs);
3085 int i;
3086 efx_qword_t *dma_addr;
3087 uint64_t addr;
3088 efx_rc_t rc;
3089 uint32_t dma_mode;
3090 boolean_t want_outer_classes;
3091 boolean_t no_cont_ev;
3092
3093 EFSYS_ASSERT3U(ndescs, <=, encp->enc_rxq_max_ndescs);
3094
3095 if ((esmp == NULL) ||
3096 (EFSYS_MEM_SIZE(esmp) < efx_rxq_size(enp, ndescs))) {
3097 rc = EINVAL;
3098 goto fail1;
3099 }
3100
3101 no_cont_ev = (eep->ee_flags & EFX_EVQ_FLAGS_NO_CONT_EV);
3102 if ((no_cont_ev == B_TRUE) && (params->disable_scatter == B_FALSE)) {
3103 /* TODO: Support scatter in NO_CONT_EV mode */
3104 rc = EINVAL;
3105 goto fail2;
3106 }
3107
3108 if (params->ps_buf_size > 0)
3109 dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM;
3110 else if (params->es_bufs_per_desc > 0)
3111 dma_mode = MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER;
3112 else
3113 dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET;
3114
3115 if (encp->enc_tunnel_encapsulations_supported != 0 &&
3116 !params->want_inner_classes) {
3117 /*
3118 * WANT_OUTER_CLASSES can only be specified on hardware which
3119 * supports tunnel encapsulation offloads, even though it is
3120 * effectively the behaviour the hardware gives.
3121 *
3122 * Also, on hardware which does support such offloads, older
3123 * firmware rejects the flag if the offloads are not supported
3124 * by the current firmware variant, which means this may fail if
3125 * the capabilities are not updated when the firmware variant
3126 * changes. This is not an issue on newer firmware, as it was
3127 * changed in bug 69842 (v6.4.2.1007) to permit this flag to be
3128 * specified on all firmware variants.
3129 */
3130 want_outer_classes = B_TRUE;
3131 } else {
3132 want_outer_classes = B_FALSE;
3133 }
3134
3135 req.emr_cmd = MC_CMD_INIT_RXQ;
3136 req.emr_in_buf = payload;
3137 req.emr_in_length = MC_CMD_INIT_RXQ_V5_IN_LEN;
3138 req.emr_out_buf = payload;
3139 req.emr_out_length = MC_CMD_INIT_RXQ_V5_OUT_LEN;
3140
3141 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, ndescs);
3142 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, eep->ee_index);
3143 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_LABEL, label);
3144 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_INSTANCE, instance);
3145 MCDI_IN_POPULATE_DWORD_10(req, INIT_RXQ_EXT_IN_FLAGS,
3146 INIT_RXQ_EXT_IN_FLAG_BUFF_MODE, 0,
3147 INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT, 0,
3148 INIT_RXQ_EXT_IN_FLAG_TIMESTAMP, 0,
3149 INIT_RXQ_EXT_IN_CRC_MODE, 0,
3150 INIT_RXQ_EXT_IN_FLAG_PREFIX, 1,
3151 INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER, params->disable_scatter,
3152 INIT_RXQ_EXT_IN_DMA_MODE,
3153 dma_mode,
3154 INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, params->ps_buf_size,
3155 INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES, want_outer_classes,
3156 INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV, no_cont_ev);
3157 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0);
3158 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, enp->en_vport_id);
3159
3160 if (params->es_bufs_per_desc > 0) {
3161 MCDI_IN_SET_DWORD(req,
3162 INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET,
3163 params->es_bufs_per_desc);
3164 MCDI_IN_SET_DWORD(req,
3165 INIT_RXQ_V3_IN_ES_MAX_DMA_LEN, params->es_max_dma_len);
3166 MCDI_IN_SET_DWORD(req,
3167 INIT_RXQ_V3_IN_ES_PACKET_STRIDE, params->es_buf_stride);
3168 MCDI_IN_SET_DWORD(req,
3169 INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT,
3170 params->hol_block_timeout);
3171 }
3172
3173 if (encp->enc_init_rxq_with_buffer_size)
3174 MCDI_IN_SET_DWORD(req, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES,
3175 params->buf_size);
3176
3177 MCDI_IN_SET_DWORD(req, INIT_RXQ_V5_IN_RX_PREFIX_ID, params->prefix_id);
3178
3179 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR);
3180 addr = EFSYS_MEM_ADDR(esmp);
3181
3182 for (i = 0; i < npages; i++) {
3183 EFX_POPULATE_QWORD_2(*dma_addr,
3184 EFX_DWORD_1, (uint32_t)(addr >> 32),
3185 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
3186
3187 dma_addr++;
3188 addr += EFX_BUF_SIZE;
3189 }
3190
3191 efx_mcdi_execute(enp, &req);
3192
3193 if (req.emr_rc != 0) {
3194 rc = req.emr_rc;
3195 goto fail3;
3196 }
3197
3198 return (0);
3199
3200 fail3:
3201 EFSYS_PROBE(fail3);
3202 fail2:
3203 EFSYS_PROBE(fail2);
3204 fail1:
3205 EFSYS_PROBE1(fail1, efx_rc_t, rc);
3206
3207 return (rc);
3208 }
3209
3210 __checkReturn efx_rc_t
efx_mcdi_fini_rxq(__in efx_nic_t * enp,__in uint32_t instance)3211 efx_mcdi_fini_rxq(
3212 __in efx_nic_t *enp,
3213 __in uint32_t instance)
3214 {
3215 efx_mcdi_req_t req;
3216 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_RXQ_IN_LEN,
3217 MC_CMD_FINI_RXQ_OUT_LEN);
3218 efx_rc_t rc;
3219
3220 req.emr_cmd = MC_CMD_FINI_RXQ;
3221 req.emr_in_buf = payload;
3222 req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN;
3223 req.emr_out_buf = payload;
3224 req.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN;
3225
3226 MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance);
3227
3228 efx_mcdi_execute_quiet(enp, &req);
3229
3230 if (req.emr_rc != 0) {
3231 rc = req.emr_rc;
3232 goto fail1;
3233 }
3234
3235 return (0);
3236
3237 fail1:
3238 /*
3239 * EALREADY is not an error, but indicates that the MC has rebooted and
3240 * that the RXQ has already been destroyed.
3241 */
3242 if (rc != EALREADY)
3243 EFSYS_PROBE1(fail1, efx_rc_t, rc);
3244
3245 return (rc);
3246 }
3247
3248 __checkReturn efx_rc_t
efx_mcdi_init_txq(__in efx_nic_t * enp,__in uint32_t ndescs,__in uint32_t target_evq,__in uint32_t label,__in uint32_t instance,__in uint16_t flags,__in efsys_mem_t * esmp)3249 efx_mcdi_init_txq(
3250 __in efx_nic_t *enp,
3251 __in uint32_t ndescs,
3252 __in uint32_t target_evq,
3253 __in uint32_t label,
3254 __in uint32_t instance,
3255 __in uint16_t flags,
3256 __in efsys_mem_t *esmp)
3257 {
3258 efx_mcdi_req_t req;
3259 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_TXQ_EXT_IN_LEN,
3260 MC_CMD_INIT_TXQ_OUT_LEN);
3261 efx_qword_t *dma_addr;
3262 uint64_t addr;
3263 int npages;
3264 int i;
3265 efx_rc_t rc;
3266
3267 EFSYS_ASSERT(MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM >=
3268 efx_txq_nbufs(enp, enp->en_nic_cfg.enc_txq_max_ndescs));
3269
3270 if ((esmp == NULL) ||
3271 (EFSYS_MEM_SIZE(esmp) < efx_txq_size(enp, ndescs))) {
3272 rc = EINVAL;
3273 goto fail1;
3274 }
3275
3276 npages = efx_txq_nbufs(enp, ndescs);
3277 if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) {
3278 rc = EINVAL;
3279 goto fail2;
3280 }
3281
3282 req.emr_cmd = MC_CMD_INIT_TXQ;
3283 req.emr_in_buf = payload;
3284 req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages);
3285 req.emr_out_buf = payload;
3286 req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN;
3287
3288 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, ndescs);
3289 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq);
3290 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label);
3291 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance);
3292
3293 MCDI_IN_POPULATE_DWORD_9(req, INIT_TXQ_IN_FLAGS,
3294 INIT_TXQ_IN_FLAG_BUFF_MODE, 0,
3295 INIT_TXQ_IN_FLAG_IP_CSUM_DIS,
3296 (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1,
3297 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS,
3298 (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1,
3299 INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN,
3300 (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0,
3301 INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN,
3302 (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0,
3303 INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0,
3304 INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0,
3305 INIT_TXQ_IN_CRC_MODE, 0,
3306 INIT_TXQ_IN_FLAG_TIMESTAMP, 0);
3307
3308 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0);
3309 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, enp->en_vport_id);
3310
3311 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR);
3312 addr = EFSYS_MEM_ADDR(esmp);
3313
3314 for (i = 0; i < npages; i++) {
3315 EFX_POPULATE_QWORD_2(*dma_addr,
3316 EFX_DWORD_1, (uint32_t)(addr >> 32),
3317 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
3318
3319 dma_addr++;
3320 addr += EFX_BUF_SIZE;
3321 }
3322
3323 efx_mcdi_execute(enp, &req);
3324
3325 if (req.emr_rc != 0) {
3326 rc = req.emr_rc;
3327 goto fail3;
3328 }
3329
3330 return (0);
3331
3332 fail3:
3333 EFSYS_PROBE(fail3);
3334 fail2:
3335 EFSYS_PROBE(fail2);
3336 fail1:
3337 EFSYS_PROBE1(fail1, efx_rc_t, rc);
3338
3339 return (rc);
3340 }
3341
3342 __checkReturn efx_rc_t
efx_mcdi_fini_txq(__in efx_nic_t * enp,__in uint32_t instance)3343 efx_mcdi_fini_txq(
3344 __in efx_nic_t *enp,
3345 __in uint32_t instance)
3346 {
3347 efx_mcdi_req_t req;
3348 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_TXQ_IN_LEN,
3349 MC_CMD_FINI_TXQ_OUT_LEN);
3350 efx_rc_t rc;
3351
3352 req.emr_cmd = MC_CMD_FINI_TXQ;
3353 req.emr_in_buf = payload;
3354 req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN;
3355 req.emr_out_buf = payload;
3356 req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN;
3357
3358 MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance);
3359
3360 efx_mcdi_execute_quiet(enp, &req);
3361
3362 if (req.emr_rc != 0) {
3363 rc = req.emr_rc;
3364 goto fail1;
3365 }
3366
3367 return (0);
3368
3369 fail1:
3370 /*
3371 * EALREADY is not an error, but indicates that the MC has rebooted and
3372 * that the TXQ has already been destroyed.
3373 */
3374 if (rc != EALREADY)
3375 EFSYS_PROBE1(fail1, efx_rc_t, rc);
3376
3377 return (rc);
3378 }
3379
3380 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
3381
3382 __checkReturn efx_rc_t
efx_mcdi_get_nic_addr_info(__in efx_nic_t * enp,__out uint32_t * mapping_typep)3383 efx_mcdi_get_nic_addr_info(
3384 __in efx_nic_t *enp,
3385 __out uint32_t *mapping_typep)
3386 {
3387 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_DESC_ADDR_INFO_IN_LEN,
3388 MC_CMD_GET_DESC_ADDR_INFO_OUT_LEN);
3389 efx_mcdi_req_t req;
3390 efx_rc_t rc;
3391
3392 req.emr_cmd = MC_CMD_GET_DESC_ADDR_INFO;
3393 req.emr_in_buf = payload;
3394 req.emr_in_length = MC_CMD_GET_DESC_ADDR_INFO_IN_LEN;
3395 req.emr_out_buf = payload;
3396 req.emr_out_length = MC_CMD_GET_DESC_ADDR_INFO_OUT_LEN;
3397
3398 efx_mcdi_execute_quiet(enp, &req);
3399
3400 if (req.emr_rc != 0) {
3401 rc = req.emr_rc;
3402 goto fail1;
3403 }
3404
3405 if (req.emr_out_length_used < MC_CMD_GET_DESC_ADDR_INFO_OUT_LEN) {
3406 rc = EMSGSIZE;
3407 goto fail2;
3408 }
3409
3410 *mapping_typep =
3411 MCDI_OUT_DWORD(req, GET_DESC_ADDR_INFO_OUT_MAPPING_TYPE);
3412
3413 return (0);
3414
3415 fail2:
3416 EFSYS_PROBE(fail2);
3417 fail1:
3418 EFSYS_PROBE1(fail1, efx_rc_t, rc);
3419
3420 return (rc);
3421 }
3422
3423 __checkReturn efx_rc_t
efx_mcdi_get_nic_addr_regions(__in efx_nic_t * enp,__out efx_nic_dma_region_info_t * endrip)3424 efx_mcdi_get_nic_addr_regions(
3425 __in efx_nic_t *enp,
3426 __out efx_nic_dma_region_info_t *endrip)
3427 {
3428 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_DESC_ADDR_REGIONS_IN_LEN,
3429 MC_CMD_GET_DESC_ADDR_REGIONS_OUT_LENMAX_MCDI2);
3430 efx_xword_t *regions;
3431 efx_mcdi_req_t req;
3432 efx_rc_t rc;
3433 size_t alloc_size;
3434 unsigned int nregions;
3435 unsigned int i;
3436
3437 req.emr_cmd = MC_CMD_GET_DESC_ADDR_REGIONS;
3438 req.emr_in_buf = payload;
3439 req.emr_in_length = MC_CMD_GET_DESC_ADDR_REGIONS_IN_LEN;
3440 req.emr_out_buf = payload;
3441 req.emr_out_length = MC_CMD_GET_DESC_ADDR_REGIONS_OUT_LENMAX_MCDI2;
3442
3443 efx_mcdi_execute_quiet(enp, &req);
3444
3445 if (req.emr_rc != 0) {
3446 rc = req.emr_rc;
3447 goto fail1;
3448 }
3449
3450 if (req.emr_out_length_used <
3451 MC_CMD_GET_DESC_ADDR_REGIONS_OUT_LENMIN) {
3452 rc = EMSGSIZE;
3453 goto fail2;
3454 }
3455
3456 nregions = MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_NUM(
3457 req.emr_out_length_used);
3458
3459 EFX_STATIC_ASSERT(sizeof (*regions) == DESC_ADDR_REGION_LEN);
3460 regions = MCDI_OUT2(req, efx_xword_t,
3461 GET_DESC_ADDR_REGIONS_OUT_REGIONS);
3462
3463 alloc_size = nregions * sizeof(endrip->endri_regions[0]);
3464 if (alloc_size / sizeof (endrip->endri_regions[0]) != nregions) {
3465 rc = ENOMEM;
3466 goto fail3;
3467 }
3468
3469 EFSYS_KMEM_ALLOC(enp->en_esip,
3470 alloc_size,
3471 endrip->endri_regions);
3472 if (endrip->endri_regions == NULL) {
3473 rc = ENOMEM;
3474 goto fail4;
3475 }
3476
3477 endrip->endri_count = nregions;
3478 for (i = 0; i < nregions; ++i) {
3479 efx_nic_dma_region_t *region_info;
3480
3481 region_info = &endrip->endri_regions[i];
3482
3483 region_info->endr_inuse = B_FALSE;
3484
3485 region_info->endr_nic_base =
3486 MCDI_OUT_INDEXED_MEMBER_QWORD(req,
3487 GET_DESC_ADDR_REGIONS_OUT_REGIONS, i,
3488 DESC_ADDR_REGION_DESC_ADDR_BASE);
3489
3490 region_info->endr_trgt_base =
3491 MCDI_OUT_INDEXED_MEMBER_QWORD(req,
3492 GET_DESC_ADDR_REGIONS_OUT_REGIONS, i,
3493 DESC_ADDR_REGION_TRGT_ADDR_BASE);
3494
3495 region_info->endr_window_log2 =
3496 MCDI_OUT_INDEXED_MEMBER_DWORD(req,
3497 GET_DESC_ADDR_REGIONS_OUT_REGIONS, i,
3498 DESC_ADDR_REGION_WINDOW_SIZE_LOG2);
3499
3500 region_info->endr_align_log2 =
3501 MCDI_OUT_INDEXED_MEMBER_DWORD(req,
3502 GET_DESC_ADDR_REGIONS_OUT_REGIONS, i,
3503 DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2);
3504 }
3505
3506 return (0);
3507
3508 fail4:
3509 EFSYS_PROBE(fail4);
3510 fail3:
3511 EFSYS_PROBE(fail3);
3512 fail2:
3513 EFSYS_PROBE(fail2);
3514 fail1:
3515 EFSYS_PROBE1(fail1, efx_rc_t, rc);
3516
3517 return (rc);
3518 }
3519
3520 __checkReturn efx_rc_t
efx_mcdi_set_nic_addr_regions(__in efx_nic_t * enp,__in const efx_nic_dma_region_info_t * endrip)3521 efx_mcdi_set_nic_addr_regions(
3522 __in efx_nic_t *enp,
3523 __in const efx_nic_dma_region_info_t *endrip)
3524 {
3525 EFX_MCDI_DECLARE_BUF(payload,
3526 MC_CMD_SET_DESC_ADDR_REGIONS_IN_LENMAX_MCDI2,
3527 MC_CMD_SET_DESC_ADDR_REGIONS_OUT_LEN);
3528 efx_qword_t *trgt_addr_base;
3529 efx_mcdi_req_t req;
3530 unsigned int i;
3531 efx_rc_t rc;
3532
3533 if (endrip->endri_count >
3534 MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_MAXNUM) {
3535 rc = EINVAL;
3536 goto fail1;
3537 }
3538
3539 req.emr_cmd = MC_CMD_SET_DESC_ADDR_REGIONS;
3540 req.emr_in_buf = payload;
3541 req.emr_in_length =
3542 MC_CMD_SET_DESC_ADDR_REGIONS_IN_LEN(endrip->endri_count);
3543 req.emr_out_buf = payload;
3544 req.emr_out_length = MC_CMD_SET_DESC_ADDR_REGIONS_OUT_LEN;
3545
3546 EFX_STATIC_ASSERT(sizeof (*trgt_addr_base) ==
3547 MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_LEN);
3548 trgt_addr_base = MCDI_OUT2(req, efx_qword_t,
3549 SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE);
3550
3551 for (i = 0; i < endrip->endri_count; ++i) {
3552 const efx_nic_dma_region_t *region_info;
3553
3554 region_info = &endrip->endri_regions[i];
3555
3556 if (region_info->endr_inuse != B_TRUE)
3557 continue;
3558
3559 EFX_STATIC_ASSERT(sizeof (1U) * 8 >=
3560 MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_MAXNUM);
3561 MCDI_IN_SET_DWORD(req,
3562 SET_DESC_ADDR_REGIONS_IN_SET_REGION_MASK, 1U << i);
3563
3564 MCDI_IN_SET_INDEXED_QWORD(req,
3565 SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE, i,
3566 region_info->endr_trgt_base);
3567 }
3568
3569 efx_mcdi_execute_quiet(enp, &req);
3570
3571 if (req.emr_rc != 0) {
3572 rc = req.emr_rc;
3573 goto fail2;
3574 }
3575
3576 return (0);
3577
3578 fail2:
3579 EFSYS_PROBE(fail2);
3580 fail1:
3581 EFSYS_PROBE1(fail1, efx_rc_t, rc);
3582
3583 return (rc);
3584 }
3585
3586 #endif /* EFSYS_OPT_MCDI */
3587