1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <sys/asm_linkage.h>
28 #include <sys/bootconf.h>
29 #include <sys/cpuvar.h>
30 #include <sys/cmn_err.h>
31 #include <sys/controlregs.h>
32 #include <sys/debug.h>
33 #include <sys/kobj.h>
34 #include <sys/kobj_impl.h>
35 #include <sys/machsystm.h>
36 #include <sys/param.h>
37 #include <sys/machparam.h>
38 #include <sys/promif.h>
39 #include <sys/sysmacros.h>
40 #include <sys/systm.h>
41 #include <sys/types.h>
42 #include <sys/thread.h>
43 #include <sys/ucode.h>
44 #include <sys/x86_archext.h>
45 #include <sys/x_call.h>
46 #ifdef __xpv
47 #include <sys/hypervisor.h>
48 #endif
49
50 /*
51 * AMD-specific equivalence table
52 */
53 static ucode_eqtbl_amd_t *ucode_eqtbl_amd;
54
55 /*
56 * mcpu_ucode_info for the boot CPU. Statically allocated.
57 */
58 static struct cpu_ucode_info cpu_ucode_info0;
59
60 static ucode_file_t ucodefile;
61
62 static void* ucode_zalloc(processorid_t, size_t);
63 static void ucode_free(processorid_t, void *, size_t);
64
65 static int ucode_capable_amd(cpu_t *);
66 static int ucode_capable_intel(cpu_t *);
67
68 static ucode_errno_t ucode_extract_amd(ucode_update_t *, uint8_t *, int);
69 static ucode_errno_t ucode_extract_intel(ucode_update_t *, uint8_t *,
70 int);
71
72 static void ucode_file_reset_amd(ucode_file_t *, processorid_t);
73 static void ucode_file_reset_intel(ucode_file_t *, processorid_t);
74
75 static uint32_t ucode_load_amd(ucode_file_t *, cpu_ucode_info_t *, cpu_t *);
76 static uint32_t ucode_load_intel(ucode_file_t *, cpu_ucode_info_t *, cpu_t *);
77
78 #ifdef __xpv
79 static void ucode_load_xpv(ucode_update_t *);
80 static void ucode_chipset_amd(uint8_t *, int);
81 #endif
82
83 static int ucode_equiv_cpu_amd(cpu_t *, uint16_t *);
84
85 static ucode_errno_t ucode_locate_amd(cpu_t *, cpu_ucode_info_t *,
86 ucode_file_t *);
87 static ucode_errno_t ucode_locate_intel(cpu_t *, cpu_ucode_info_t *,
88 ucode_file_t *);
89
90 #ifndef __xpv
91 static ucode_errno_t ucode_match_amd(uint16_t, cpu_ucode_info_t *,
92 ucode_file_amd_t *, int);
93 #endif
94 static ucode_errno_t ucode_match_intel(int, cpu_ucode_info_t *,
95 ucode_header_intel_t *, ucode_ext_table_intel_t *);
96
97 static void ucode_read_rev_amd(cpu_ucode_info_t *);
98 static void ucode_read_rev_intel(cpu_ucode_info_t *);
99
100 static const struct ucode_ops ucode_amd = {
101 MSR_AMD_PATCHLOADER,
102 ucode_capable_amd,
103 ucode_file_reset_amd,
104 ucode_read_rev_amd,
105 ucode_load_amd,
106 ucode_validate_amd,
107 ucode_extract_amd,
108 ucode_locate_amd
109 };
110
111 static const struct ucode_ops ucode_intel = {
112 MSR_INTC_UCODE_WRITE,
113 ucode_capable_intel,
114 ucode_file_reset_intel,
115 ucode_read_rev_intel,
116 ucode_load_intel,
117 ucode_validate_intel,
118 ucode_extract_intel,
119 ucode_locate_intel
120 };
121
122 const struct ucode_ops *ucode;
123
124 static const char ucode_failure_fmt[] =
125 "cpu%d: failed to update microcode from version 0x%x to 0x%x\n";
126 static const char ucode_success_fmt[] =
127 "?cpu%d: microcode has been updated from version 0x%x to 0x%x\n";
128
129 /*
130 * Force flag. If set, the first microcode binary that matches
131 * signature and platform id will be used for microcode update,
132 * regardless of version. Should only be used for debugging.
133 */
134 int ucode_force_update = 0;
135
136 /*
137 * Allocate space for mcpu_ucode_info in the machcpu structure
138 * for all non-boot CPUs.
139 */
140 void
ucode_alloc_space(cpu_t * cp)141 ucode_alloc_space(cpu_t *cp)
142 {
143 ASSERT(cp->cpu_id != 0);
144 ASSERT(cp->cpu_m.mcpu_ucode_info == NULL);
145 cp->cpu_m.mcpu_ucode_info =
146 kmem_zalloc(sizeof (*cp->cpu_m.mcpu_ucode_info), KM_SLEEP);
147 }
148
149 void
ucode_free_space(cpu_t * cp)150 ucode_free_space(cpu_t *cp)
151 {
152 ASSERT(cp->cpu_m.mcpu_ucode_info != NULL);
153 ASSERT(cp->cpu_m.mcpu_ucode_info != &cpu_ucode_info0);
154 kmem_free(cp->cpu_m.mcpu_ucode_info,
155 sizeof (*cp->cpu_m.mcpu_ucode_info));
156 cp->cpu_m.mcpu_ucode_info = NULL;
157 }
158
159 /*
160 * Called when we are done with microcode update on all processors to free up
161 * space allocated for the microcode file.
162 */
163 void
ucode_cleanup()164 ucode_cleanup()
165 {
166 if (ucode == NULL)
167 return;
168
169 ucode->file_reset(&ucodefile, -1);
170 }
171
172 /*
173 * Allocate/free a buffer used to hold ucode data. Space for the boot CPU is
174 * allocated with BOP_ALLOC() and does not require a free.
175 */
176 static void*
ucode_zalloc(processorid_t id,size_t size)177 ucode_zalloc(processorid_t id, size_t size)
178 {
179 if (id)
180 return (kmem_zalloc(size, KM_NOSLEEP));
181
182 /* BOP_ALLOC() failure results in panic */
183 return (BOP_ALLOC(bootops, NULL, size, MMU_PAGESIZE));
184 }
185
186 static void
ucode_free(processorid_t id,void * buf,size_t size)187 ucode_free(processorid_t id, void* buf, size_t size)
188 {
189 if (id)
190 kmem_free(buf, size);
191 }
192
193 /*
194 * Check whether or not a processor is capable of microcode operations
195 * Returns 1 if it is capable, 0 if not.
196 *
197 * At this point we only support microcode update for:
198 * - Intel processors family 6 and above, and
199 * - AMD processors family 0x10 and above.
200 *
201 * We also assume that we don't support a mix of Intel and
202 * AMD processors in the same box.
203 *
204 * An i86xpv guest domain can't update the microcode.
205 */
206 /*ARGSUSED*/
207 static int
ucode_capable_amd(cpu_t * cp)208 ucode_capable_amd(cpu_t *cp)
209 {
210 int hwenv = get_hwenv();
211
212 if (hwenv == HW_XEN_HVM || (hwenv == HW_XEN_PV && !is_controldom())) {
213 return (0);
214 }
215 return (cpuid_getfamily(cp) >= 0x10);
216 }
217
218 static int
ucode_capable_intel(cpu_t * cp)219 ucode_capable_intel(cpu_t *cp)
220 {
221 int hwenv = get_hwenv();
222
223 if (hwenv == HW_XEN_HVM || (hwenv == HW_XEN_PV && !is_controldom())) {
224 return (0);
225 }
226 return (cpuid_getfamily(cp) >= 6);
227 }
228
229 /*
230 * Called when it is no longer necessary to keep the microcode around,
231 * or when the cached microcode doesn't match the CPU being processed.
232 */
233 static void
ucode_file_reset_amd(ucode_file_t * ufp,processorid_t id)234 ucode_file_reset_amd(ucode_file_t *ufp, processorid_t id)
235 {
236 ucode_file_amd_t *ucodefp = ufp->amd;
237
238 if (ucodefp == NULL)
239 return;
240
241 ucode_free(id, ucodefp, sizeof (ucode_file_amd_t));
242 ufp->amd = NULL;
243 }
244
245 static void
ucode_file_reset_intel(ucode_file_t * ufp,processorid_t id)246 ucode_file_reset_intel(ucode_file_t *ufp, processorid_t id)
247 {
248 ucode_file_intel_t *ucodefp = &ufp->intel;
249 int total_size, body_size;
250
251 if (ucodefp == NULL || ucodefp->uf_header == NULL)
252 return;
253
254 total_size = UCODE_TOTAL_SIZE_INTEL(ucodefp->uf_header->uh_total_size);
255 body_size = UCODE_BODY_SIZE_INTEL(ucodefp->uf_header->uh_body_size);
256 if (ucodefp->uf_body) {
257 ucode_free(id, ucodefp->uf_body, body_size);
258 ucodefp->uf_body = NULL;
259 }
260
261 if (ucodefp->uf_ext_table) {
262 int size = total_size - body_size - UCODE_HEADER_SIZE_INTEL;
263
264 ucode_free(id, ucodefp->uf_ext_table, size);
265 ucodefp->uf_ext_table = NULL;
266 }
267
268 ucode_free(id, ucodefp->uf_header, UCODE_HEADER_SIZE_INTEL);
269 ucodefp->uf_header = NULL;
270 }
271
272 /*
273 * Find the equivalent CPU id in the equivalence table.
274 */
275 static int
ucode_equiv_cpu_amd(cpu_t * cp,uint16_t * eq_sig)276 ucode_equiv_cpu_amd(cpu_t *cp, uint16_t *eq_sig)
277 {
278 char name[MAXPATHLEN];
279 intptr_t fd;
280 int count;
281 int offset = 0, cpi_sig = cpuid_getsig(cp);
282 ucode_eqtbl_amd_t *eqtbl = ucode_eqtbl_amd;
283
284 (void) snprintf(name, MAXPATHLEN, "/%s/%s/equivalence-table",
285 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp));
286
287 /*
288 * No kmem_zalloc() etc. available on boot cpu.
289 */
290 if (cp->cpu_id == 0) {
291 if ((fd = kobj_open(name)) == -1)
292 return (EM_OPENFILE);
293 /* ucode_zalloc() cannot fail on boot cpu */
294 eqtbl = ucode_zalloc(cp->cpu_id, sizeof (*eqtbl));
295 ASSERT(eqtbl);
296 do {
297 count = kobj_read(fd, (int8_t *)eqtbl,
298 sizeof (*eqtbl), offset);
299 if (count != sizeof (*eqtbl)) {
300 (void) kobj_close(fd);
301 return (EM_HIGHERREV);
302 }
303 offset += count;
304 } while (eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != cpi_sig);
305 (void) kobj_close(fd);
306 }
307
308 /*
309 * If not already done, load the equivalence table.
310 * Not done on boot CPU.
311 */
312 if (eqtbl == NULL) {
313 struct _buf *eq;
314 uint64_t size;
315
316 if ((eq = kobj_open_file(name)) == (struct _buf *)-1)
317 return (EM_OPENFILE);
318
319 if (kobj_get_filesize(eq, &size) < 0) {
320 kobj_close_file(eq);
321 return (EM_OPENFILE);
322 }
323
324 ucode_eqtbl_amd = kmem_zalloc(size, KM_NOSLEEP);
325 if (ucode_eqtbl_amd == NULL) {
326 kobj_close_file(eq);
327 return (EM_NOMEM);
328 }
329
330 count = kobj_read_file(eq, (char *)ucode_eqtbl_amd, size, 0);
331 kobj_close_file(eq);
332
333 if (count != size)
334 return (EM_FILESIZE);
335 }
336
337 /* Get the equivalent CPU id. */
338 if (cp->cpu_id)
339 for (eqtbl = ucode_eqtbl_amd;
340 eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != cpi_sig;
341 eqtbl++)
342 ;
343
344 *eq_sig = eqtbl->ue_equiv_cpu;
345
346 /* No equivalent CPU id found, assume outdated microcode file. */
347 if (*eq_sig == 0)
348 return (EM_HIGHERREV);
349
350 return (EM_OK);
351 }
352
353 /*
354 * xVM cannot check for the presence of PCI devices. Look for chipset-
355 * specific microcode patches in the container file and disable them
356 * by setting their CPU revision to an invalid value.
357 */
358 #ifdef __xpv
359 static void
ucode_chipset_amd(uint8_t * buf,int size)360 ucode_chipset_amd(uint8_t *buf, int size)
361 {
362 ucode_header_amd_t *uh;
363 uint32_t *ptr = (uint32_t *)buf;
364 int len = 0;
365
366 /* skip to first microcode patch */
367 ptr += 2; len = *ptr++; ptr += len >> 2; size -= len;
368
369 while (size >= sizeof (ucode_header_amd_t) + 8) {
370 ptr++; len = *ptr++;
371 uh = (ucode_header_amd_t *)ptr;
372 ptr += len >> 2; size -= len;
373
374 if (uh->uh_nb_id) {
375 cmn_err(CE_WARN, "ignoring northbridge-specific ucode: "
376 "chipset id %x, revision %x",
377 uh->uh_nb_id, uh->uh_nb_rev);
378 uh->uh_cpu_rev = 0xffff;
379 }
380
381 if (uh->uh_sb_id) {
382 cmn_err(CE_WARN, "ignoring southbridge-specific ucode: "
383 "chipset id %x, revision %x",
384 uh->uh_sb_id, uh->uh_sb_rev);
385 uh->uh_cpu_rev = 0xffff;
386 }
387 }
388 }
389 #endif
390
391 /*
392 * Populate the ucode file structure from microcode file corresponding to
393 * this CPU, if exists.
394 *
395 * Return EM_OK on success, corresponding error code on failure.
396 */
397 /*ARGSUSED*/
398 static ucode_errno_t
ucode_locate_amd(cpu_t * cp,cpu_ucode_info_t * uinfop,ucode_file_t * ufp)399 ucode_locate_amd(cpu_t *cp, cpu_ucode_info_t *uinfop, ucode_file_t *ufp)
400 {
401 char name[MAXPATHLEN];
402 intptr_t fd;
403 int count, rc;
404 ucode_file_amd_t *ucodefp = ufp->amd;
405
406 #ifndef __xpv
407 uint16_t eq_sig = 0;
408 int i;
409
410 /* get equivalent CPU id */
411 if ((rc = ucode_equiv_cpu_amd(cp, &eq_sig)) != EM_OK)
412 return (rc);
413
414 /*
415 * Allocate a buffer for the microcode patch. If the buffer has been
416 * allocated before, check for a matching microcode to avoid loading
417 * the file again.
418 */
419 if (ucodefp == NULL)
420 ucodefp = ucode_zalloc(cp->cpu_id, sizeof (*ucodefp));
421 else if (ucode_match_amd(eq_sig, uinfop, ucodefp, sizeof (*ucodefp))
422 == EM_OK)
423 return (EM_OK);
424
425 if (ucodefp == NULL)
426 return (EM_NOMEM);
427
428 ufp->amd = ucodefp;
429
430 /*
431 * Find the patch for this CPU. The patch files are named XXXX-YY, where
432 * XXXX is the equivalent CPU id and YY is the running patch number.
433 * Patches specific to certain chipsets are guaranteed to have lower
434 * numbers than less specific patches, so we can just load the first
435 * patch that matches.
436 */
437
438 for (i = 0; i < 0xff; i++) {
439 (void) snprintf(name, MAXPATHLEN, "/%s/%s/%04X-%02X",
440 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), eq_sig, i);
441 if ((fd = kobj_open(name)) == -1)
442 return (EM_NOMATCH);
443 count = kobj_read(fd, (char *)ucodefp, sizeof (*ucodefp), 0);
444 (void) kobj_close(fd);
445
446 if (ucode_match_amd(eq_sig, uinfop, ucodefp, count) == EM_OK)
447 return (EM_OK);
448 }
449 return (EM_NOMATCH);
450 #else
451 int size = 0;
452 char c;
453
454 /*
455 * The xVM case is special. To support mixed-revision systems, the
456 * hypervisor will choose which patch to load for which CPU, so the
457 * whole microcode patch container file will have to be loaded.
458 *
459 * Since this code is only run on the boot cpu, we don't have to care
460 * about failing ucode_zalloc() or freeing allocated memory.
461 */
462 if (cp->cpu_id != 0)
463 return (EM_INVALIDARG);
464
465 (void) snprintf(name, MAXPATHLEN, "/%s/%s/container",
466 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp));
467
468 if ((fd = kobj_open(name)) == -1)
469 return (EM_OPENFILE);
470
471 /* get the file size by counting bytes */
472 do {
473 count = kobj_read(fd, &c, 1, size);
474 size += count;
475 } while (count);
476
477 ucodefp = ucode_zalloc(cp->cpu_id, sizeof (*ucodefp));
478 ASSERT(ucodefp);
479 ufp->amd = ucodefp;
480
481 ucodefp->usize = size;
482 ucodefp->ucodep = ucode_zalloc(cp->cpu_id, size);
483 ASSERT(ucodefp->ucodep);
484
485 /* load the microcode patch container file */
486 count = kobj_read(fd, (char *)ucodefp->ucodep, size, 0);
487 (void) kobj_close(fd);
488
489 if (count != size)
490 return (EM_FILESIZE);
491
492 /* make sure the container file is valid */
493 rc = ucode->validate(ucodefp->ucodep, ucodefp->usize);
494
495 if (rc != EM_OK)
496 return (rc);
497
498 /* disable chipset-specific patches */
499 ucode_chipset_amd(ucodefp->ucodep, ucodefp->usize);
500
501 return (EM_OK);
502 #endif
503 }
504
505 static ucode_errno_t
ucode_locate_intel(cpu_t * cp,cpu_ucode_info_t * uinfop,ucode_file_t * ufp)506 ucode_locate_intel(cpu_t *cp, cpu_ucode_info_t *uinfop, ucode_file_t *ufp)
507 {
508 char name[MAXPATHLEN];
509 intptr_t fd;
510 int count;
511 int header_size = UCODE_HEADER_SIZE_INTEL;
512 int cpi_sig = cpuid_getsig(cp);
513 ucode_errno_t rc = EM_OK;
514 ucode_file_intel_t *ucodefp = &ufp->intel;
515
516 ASSERT(ucode);
517
518 /*
519 * If the microcode matches the CPU we are processing, use it.
520 */
521 if (ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header,
522 ucodefp->uf_ext_table) == EM_OK && ucodefp->uf_body != NULL) {
523 return (EM_OK);
524 }
525
526 /*
527 * Look for microcode file with the right name.
528 */
529 (void) snprintf(name, MAXPATHLEN, "/%s/%s/%08X-%02X",
530 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), cpi_sig,
531 uinfop->cui_platid);
532 if ((fd = kobj_open(name)) == -1) {
533 return (EM_OPENFILE);
534 }
535
536 /*
537 * We found a microcode file for the CPU we are processing,
538 * reset the microcode data structure and read in the new
539 * file.
540 */
541 ucode->file_reset(ufp, cp->cpu_id);
542
543 ucodefp->uf_header = ucode_zalloc(cp->cpu_id, header_size);
544 if (ucodefp->uf_header == NULL)
545 return (EM_NOMEM);
546
547 count = kobj_read(fd, (char *)ucodefp->uf_header, header_size, 0);
548
549 switch (count) {
550 case UCODE_HEADER_SIZE_INTEL: {
551
552 ucode_header_intel_t *uhp = ucodefp->uf_header;
553 uint32_t offset = header_size;
554 int total_size, body_size, ext_size;
555 uint32_t sum = 0;
556
557 /*
558 * Make sure that the header contains valid fields.
559 */
560 if ((rc = ucode_header_validate_intel(uhp)) == EM_OK) {
561 total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
562 body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size);
563 ucodefp->uf_body = ucode_zalloc(cp->cpu_id, body_size);
564 if (ucodefp->uf_body == NULL) {
565 rc = EM_NOMEM;
566 break;
567 }
568
569 if (kobj_read(fd, (char *)ucodefp->uf_body,
570 body_size, offset) != body_size)
571 rc = EM_FILESIZE;
572 }
573
574 if (rc)
575 break;
576
577 sum = ucode_checksum_intel(0, header_size,
578 (uint8_t *)ucodefp->uf_header);
579 if (ucode_checksum_intel(sum, body_size, ucodefp->uf_body)) {
580 rc = EM_CHECKSUM;
581 break;
582 }
583
584 /*
585 * Check to see if there is extended signature table.
586 */
587 offset = body_size + header_size;
588 ext_size = total_size - offset;
589
590 if (ext_size <= 0)
591 break;
592
593 ucodefp->uf_ext_table = ucode_zalloc(cp->cpu_id, ext_size);
594 if (ucodefp->uf_ext_table == NULL) {
595 rc = EM_NOMEM;
596 break;
597 }
598
599 if (kobj_read(fd, (char *)ucodefp->uf_ext_table,
600 ext_size, offset) != ext_size) {
601 rc = EM_FILESIZE;
602 } else if (ucode_checksum_intel(0, ext_size,
603 (uint8_t *)(ucodefp->uf_ext_table))) {
604 rc = EM_CHECKSUM;
605 } else {
606 int i;
607
608 ext_size -= UCODE_EXT_TABLE_SIZE_INTEL;
609 for (i = 0; i < ucodefp->uf_ext_table->uet_count;
610 i++) {
611 if (ucode_checksum_intel(0,
612 UCODE_EXT_SIG_SIZE_INTEL,
613 (uint8_t *)(&(ucodefp->uf_ext_table->
614 uet_ext_sig[i])))) {
615 rc = EM_CHECKSUM;
616 break;
617 }
618 }
619 }
620 break;
621 }
622
623 default:
624 rc = EM_FILESIZE;
625 break;
626 }
627
628 kobj_close(fd);
629
630 if (rc != EM_OK)
631 return (rc);
632
633 rc = ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header,
634 ucodefp->uf_ext_table);
635
636 return (rc);
637 }
638
639 #ifndef __xpv
640 static ucode_errno_t
ucode_match_amd(uint16_t eq_sig,cpu_ucode_info_t * uinfop,ucode_file_amd_t * ucodefp,int size)641 ucode_match_amd(uint16_t eq_sig, cpu_ucode_info_t *uinfop,
642 ucode_file_amd_t *ucodefp, int size)
643 {
644 ucode_header_amd_t *uh;
645
646 if (ucodefp == NULL || size < sizeof (ucode_header_amd_t))
647 return (EM_NOMATCH);
648
649 /*
650 * Don't even think about loading patches that would require code
651 * execution.
652 */
653 if (size > offsetof(ucode_file_amd_t, uf_code_present) &&
654 ucodefp->uf_code_present)
655 return (EM_NOMATCH);
656
657 uh = &ucodefp->uf_header;
658
659 if (eq_sig != uh->uh_cpu_rev)
660 return (EM_NOMATCH);
661
662 if (uh->uh_nb_id) {
663 cmn_err(CE_WARN, "ignoring northbridge-specific ucode: "
664 "chipset id %x, revision %x", uh->uh_nb_id, uh->uh_nb_rev);
665 return (EM_NOMATCH);
666 }
667
668 if (uh->uh_sb_id) {
669 cmn_err(CE_WARN, "ignoring southbridge-specific ucode: "
670 "chipset id %x, revision %x", uh->uh_sb_id, uh->uh_sb_rev);
671 return (EM_NOMATCH);
672 }
673
674 if (uh->uh_patch_id <= uinfop->cui_rev)
675 return (EM_HIGHERREV);
676
677 return (EM_OK);
678 }
679 #endif
680
681 /*
682 * Returns 1 if the microcode is for this processor; 0 otherwise.
683 */
684 static ucode_errno_t
ucode_match_intel(int cpi_sig,cpu_ucode_info_t * uinfop,ucode_header_intel_t * uhp,ucode_ext_table_intel_t * uetp)685 ucode_match_intel(int cpi_sig, cpu_ucode_info_t *uinfop,
686 ucode_header_intel_t *uhp, ucode_ext_table_intel_t *uetp)
687 {
688 if (uhp == NULL)
689 return (EM_NOMATCH);
690
691 if (UCODE_MATCH_INTEL(cpi_sig, uhp->uh_signature,
692 uinfop->cui_platid, uhp->uh_proc_flags)) {
693
694 if (uinfop->cui_rev >= uhp->uh_rev && !ucode_force_update)
695 return (EM_HIGHERREV);
696
697 return (EM_OK);
698 }
699
700 if (uetp != NULL) {
701 int i;
702
703 for (i = 0; i < uetp->uet_count; i++) {
704 ucode_ext_sig_intel_t *uesp;
705
706 uesp = &uetp->uet_ext_sig[i];
707
708 if (UCODE_MATCH_INTEL(cpi_sig, uesp->ues_signature,
709 uinfop->cui_platid, uesp->ues_proc_flags)) {
710
711 if (uinfop->cui_rev >= uhp->uh_rev &&
712 !ucode_force_update)
713 return (EM_HIGHERREV);
714
715 return (EM_OK);
716 }
717 }
718 }
719
720 return (EM_NOMATCH);
721 }
722
723 /*ARGSUSED*/
724 static int
ucode_write(xc_arg_t arg1,xc_arg_t unused2,xc_arg_t unused3)725 ucode_write(xc_arg_t arg1, xc_arg_t unused2, xc_arg_t unused3)
726 {
727 ucode_update_t *uusp = (ucode_update_t *)arg1;
728 cpu_ucode_info_t *uinfop = CPU->cpu_m.mcpu_ucode_info;
729
730 ASSERT(ucode);
731 ASSERT(uusp->ucodep);
732
733 #ifndef __xpv
734 /*
735 * Check one more time to see if it is really necessary to update
736 * microcode just in case this is a hyperthreaded processor where
737 * the threads share the same microcode.
738 */
739 if (!ucode_force_update) {
740 ucode->read_rev(uinfop);
741 uusp->new_rev = uinfop->cui_rev;
742 if (uinfop->cui_rev >= uusp->expected_rev)
743 return (0);
744 }
745
746 wrmsr(ucode->write_msr, (uintptr_t)uusp->ucodep);
747 #endif
748 ucode->read_rev(uinfop);
749 uusp->new_rev = uinfop->cui_rev;
750
751 return (0);
752 }
753
754 /*ARGSUSED*/
755 static uint32_t
ucode_load_amd(ucode_file_t * ufp,cpu_ucode_info_t * uinfop,cpu_t * cp)756 ucode_load_amd(ucode_file_t *ufp, cpu_ucode_info_t *uinfop, cpu_t *cp)
757 {
758 ucode_file_amd_t *ucodefp = ufp->amd;
759 #ifdef __xpv
760 ucode_update_t uus;
761 #endif
762
763 ASSERT(ucode);
764 ASSERT(ucodefp);
765
766 #ifndef __xpv
767 kpreempt_disable();
768 wrmsr(ucode->write_msr, (uintptr_t)ucodefp);
769 ucode->read_rev(uinfop);
770 kpreempt_enable();
771
772 return (ucodefp->uf_header.uh_patch_id);
773 #else
774 uus.ucodep = ucodefp->ucodep;
775 uus.usize = ucodefp->usize;
776 ucode_load_xpv(&uus);
777 ucode->read_rev(uinfop);
778 uus.new_rev = uinfop->cui_rev;
779
780 return (uus.new_rev);
781 #endif
782 }
783
784 /*ARGSUSED2*/
785 static uint32_t
ucode_load_intel(ucode_file_t * ufp,cpu_ucode_info_t * uinfop,cpu_t * cp)786 ucode_load_intel(ucode_file_t *ufp, cpu_ucode_info_t *uinfop, cpu_t *cp)
787 {
788 ucode_file_intel_t *ucodefp = &ufp->intel;
789 #ifdef __xpv
790 uint32_t ext_offset;
791 uint32_t body_size;
792 uint32_t ext_size;
793 uint8_t *ustart;
794 uint32_t usize;
795 ucode_update_t uus;
796 #endif
797
798 ASSERT(ucode);
799
800 #ifdef __xpv
801 /*
802 * the hypervisor wants the header, data, and extended
803 * signature tables. We can only get here from the boot
804 * CPU (cpu #0), we don't need to free as ucode_zalloc() will
805 * use BOP_ALLOC().
806 */
807 usize = UCODE_TOTAL_SIZE_INTEL(ucodefp->uf_header->uh_total_size);
808 ustart = ucode_zalloc(cp->cpu_id, usize);
809 ASSERT(ustart);
810
811 body_size = UCODE_BODY_SIZE_INTEL(ucodefp->uf_header->uh_body_size);
812 ext_offset = body_size + UCODE_HEADER_SIZE_INTEL;
813 ext_size = usize - ext_offset;
814 ASSERT(ext_size >= 0);
815
816 (void) memcpy(ustart, ucodefp->uf_header, UCODE_HEADER_SIZE_INTEL);
817 (void) memcpy(&ustart[UCODE_HEADER_SIZE_INTEL], ucodefp->uf_body,
818 body_size);
819 if (ext_size > 0) {
820 (void) memcpy(&ustart[ext_offset],
821 ucodefp->uf_ext_table, ext_size);
822 }
823 uus.ucodep = ustart;
824 uus.usize = usize;
825 ucode_load_xpv(&uus);
826 ucode->read_rev(uinfop);
827 uus.new_rev = uinfop->cui_rev;
828 #else
829 kpreempt_disable();
830 wrmsr(ucode->write_msr, (uintptr_t)ucodefp->uf_body);
831 ucode->read_rev(uinfop);
832 kpreempt_enable();
833 #endif
834
835 return (ucodefp->uf_header->uh_rev);
836 }
837
838
839 #ifdef __xpv
840 static void
ucode_load_xpv(ucode_update_t * uusp)841 ucode_load_xpv(ucode_update_t *uusp)
842 {
843 xen_platform_op_t op;
844 int e;
845
846 ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
847
848 kpreempt_disable();
849 op.cmd = XENPF_microcode_update;
850 op.interface_version = XENPF_INTERFACE_VERSION;
851 /*LINTED: constant in conditional context*/
852 set_xen_guest_handle(op.u.microcode.data, uusp->ucodep);
853 op.u.microcode.length = uusp->usize;
854 e = HYPERVISOR_platform_op(&op);
855 if (e != 0) {
856 cmn_err(CE_WARN, "hypervisor failed to accept uCode update");
857 }
858 kpreempt_enable();
859 }
860 #endif /* __xpv */
861
862 static void
ucode_read_rev_amd(cpu_ucode_info_t * uinfop)863 ucode_read_rev_amd(cpu_ucode_info_t *uinfop)
864 {
865 uinfop->cui_rev = rdmsr(MSR_AMD_PATCHLEVEL);
866 }
867
868 static void
ucode_read_rev_intel(cpu_ucode_info_t * uinfop)869 ucode_read_rev_intel(cpu_ucode_info_t *uinfop)
870 {
871 struct cpuid_regs crs;
872
873 /*
874 * The Intel 64 and IA-32 Architecture Software Developer's Manual
875 * recommends that MSR_INTC_UCODE_REV be loaded with 0 first, then
876 * execute cpuid to guarantee the correct reading of this register.
877 */
878 wrmsr(MSR_INTC_UCODE_REV, 0);
879 (void) __cpuid_insn(&crs);
880 uinfop->cui_rev = (rdmsr(MSR_INTC_UCODE_REV) >> INTC_UCODE_REV_SHIFT);
881 }
882
883 static ucode_errno_t
ucode_extract_amd(ucode_update_t * uusp,uint8_t * ucodep,int size)884 ucode_extract_amd(ucode_update_t *uusp, uint8_t *ucodep, int size)
885 {
886 #ifndef __xpv
887 uint32_t *ptr = (uint32_t *)ucodep;
888 ucode_eqtbl_amd_t *eqtbl;
889 ucode_file_amd_t *ufp;
890 int count;
891 int higher = 0;
892 ucode_errno_t rc = EM_NOMATCH;
893 uint16_t eq_sig;
894
895 /* skip over magic number & equivalence table header */
896 ptr += 2; size -= 8;
897
898 count = *ptr++; size -= 4;
899 for (eqtbl = (ucode_eqtbl_amd_t *)ptr;
900 eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != uusp->sig;
901 eqtbl++)
902 ;
903
904 eq_sig = eqtbl->ue_equiv_cpu;
905
906 /* No equivalent CPU id found, assume outdated microcode file. */
907 if (eq_sig == 0)
908 return (EM_HIGHERREV);
909
910 /* Use the first microcode patch that matches. */
911 do {
912 ptr += count >> 2; size -= count;
913
914 if (!size)
915 return (higher ? EM_HIGHERREV : EM_NOMATCH);
916
917 ptr++; size -= 4;
918 count = *ptr++; size -= 4;
919 ufp = (ucode_file_amd_t *)ptr;
920
921 rc = ucode_match_amd(eq_sig, &uusp->info, ufp, count);
922 if (rc == EM_HIGHERREV)
923 higher = 1;
924 } while (rc != EM_OK);
925
926 uusp->ucodep = (uint8_t *)ufp;
927 uusp->usize = count;
928 uusp->expected_rev = ufp->uf_header.uh_patch_id;
929 #else
930 /*
931 * The hypervisor will choose the patch to load, so there is no way to
932 * know the "expected revision" in advance. This is especially true on
933 * mixed-revision systems where more than one patch will be loaded.
934 */
935 uusp->expected_rev = 0;
936 uusp->ucodep = ucodep;
937 uusp->usize = size;
938
939 ucode_chipset_amd(ucodep, size);
940 #endif
941
942 return (EM_OK);
943 }
944
945 static ucode_errno_t
ucode_extract_intel(ucode_update_t * uusp,uint8_t * ucodep,int size)946 ucode_extract_intel(ucode_update_t *uusp, uint8_t *ucodep, int size)
947 {
948 uint32_t header_size = UCODE_HEADER_SIZE_INTEL;
949 int remaining;
950 int found = 0;
951 ucode_errno_t search_rc = EM_NOMATCH; /* search result */
952
953 /*
954 * Go through the whole buffer in case there are
955 * multiple versions of matching microcode for this
956 * processor.
957 */
958 for (remaining = size; remaining > 0; ) {
959 int total_size, body_size, ext_size;
960 uint8_t *curbuf = &ucodep[size - remaining];
961 ucode_header_intel_t *uhp = (ucode_header_intel_t *)curbuf;
962 ucode_ext_table_intel_t *uetp = NULL;
963 ucode_errno_t tmprc;
964
965 total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
966 body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size);
967 ext_size = total_size - (header_size + body_size);
968
969 if (ext_size > 0)
970 uetp = (ucode_ext_table_intel_t *)
971 &curbuf[header_size + body_size];
972
973 tmprc = ucode_match_intel(uusp->sig, &uusp->info, uhp, uetp);
974
975 /*
976 * Since we are searching through a big file
977 * containing microcode for pretty much all the
978 * processors, we are bound to get EM_NOMATCH
979 * at one point. However, if we return
980 * EM_NOMATCH to users, it will really confuse
981 * them. Therefore, if we ever find a match of
982 * a lower rev, we will set return code to
983 * EM_HIGHERREV.
984 */
985 if (tmprc == EM_HIGHERREV)
986 search_rc = EM_HIGHERREV;
987
988 if (tmprc == EM_OK &&
989 uusp->expected_rev < uhp->uh_rev) {
990 #ifndef __xpv
991 uusp->ucodep = (uint8_t *)&curbuf[header_size];
992 #else
993 uusp->ucodep = (uint8_t *)curbuf;
994 #endif
995 uusp->usize =
996 UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
997 uusp->expected_rev = uhp->uh_rev;
998 found = 1;
999 }
1000
1001 remaining -= total_size;
1002 }
1003
1004 if (!found)
1005 return (search_rc);
1006
1007 return (EM_OK);
1008 }
1009 /*
1010 * Entry point to microcode update from the ucode_drv driver.
1011 *
1012 * Returns EM_OK on success, corresponding error code on failure.
1013 */
1014 ucode_errno_t
ucode_update(uint8_t * ucodep,int size)1015 ucode_update(uint8_t *ucodep, int size)
1016 {
1017 int found = 0;
1018 processorid_t id;
1019 ucode_update_t cached = { 0 };
1020 ucode_update_t *cachedp = NULL;
1021 ucode_errno_t rc = EM_OK;
1022 ucode_errno_t search_rc = EM_NOMATCH; /* search result */
1023 cpuset_t cpuset;
1024
1025 ASSERT(ucode);
1026 ASSERT(ucodep);
1027 CPUSET_ZERO(cpuset);
1028
1029 if (!ucode->capable(CPU))
1030 return (EM_NOTSUP);
1031
1032 mutex_enter(&cpu_lock);
1033
1034 for (id = 0; id < max_ncpus; id++) {
1035 cpu_t *cpu;
1036 ucode_update_t uus = { 0 };
1037 ucode_update_t *uusp = &uus;
1038
1039 /*
1040 * If there is no such CPU or it is not xcall ready, skip it.
1041 */
1042 if ((cpu = cpu_get(id)) == NULL ||
1043 !(cpu->cpu_flags & CPU_READY))
1044 continue;
1045
1046 uusp->sig = cpuid_getsig(cpu);
1047 bcopy(cpu->cpu_m.mcpu_ucode_info, &uusp->info,
1048 sizeof (uusp->info));
1049
1050 /*
1051 * If the current CPU has the same signature and platform
1052 * id as the previous one we processed, reuse the information.
1053 */
1054 if (cachedp && cachedp->sig == cpuid_getsig(cpu) &&
1055 cachedp->info.cui_platid == uusp->info.cui_platid) {
1056 uusp->ucodep = cachedp->ucodep;
1057 uusp->expected_rev = cachedp->expected_rev;
1058 /*
1059 * Intuitively we should check here to see whether the
1060 * running microcode rev is >= the expected rev, and
1061 * quit if it is. But we choose to proceed with the
1062 * xcall regardless of the running version so that
1063 * the other threads in an HT processor can update
1064 * the cpu_ucode_info structure in machcpu.
1065 */
1066 } else if ((search_rc = ucode->extract(uusp, ucodep, size))
1067 == EM_OK) {
1068 bcopy(uusp, &cached, sizeof (cached));
1069 cachedp = &cached;
1070 found = 1;
1071 }
1072
1073 /* Nothing to do */
1074 if (uusp->ucodep == NULL)
1075 continue;
1076
1077 #ifdef __xpv
1078 /*
1079 * for i86xpv, the hypervisor will update all the CPUs.
1080 * the hypervisor wants the header, data, and extended
1081 * signature tables. ucode_write will just read in the
1082 * updated version on all the CPUs after the update has
1083 * completed.
1084 */
1085 if (id == 0) {
1086 ucode_load_xpv(uusp);
1087 }
1088 #endif
1089
1090 CPUSET_ADD(cpuset, id);
1091 kpreempt_disable();
1092 xc_sync((xc_arg_t)uusp, 0, 0, CPUSET2BV(cpuset), ucode_write);
1093 kpreempt_enable();
1094 CPUSET_DEL(cpuset, id);
1095
1096 if (uusp->new_rev != 0 && uusp->info.cui_rev == uusp->new_rev) {
1097 rc = EM_HIGHERREV;
1098 } else if ((uusp->new_rev == 0) || (uusp->expected_rev != 0 &&
1099 uusp->expected_rev != uusp->new_rev)) {
1100 cmn_err(CE_WARN, ucode_failure_fmt,
1101 id, uusp->info.cui_rev, uusp->expected_rev);
1102 rc = EM_UPDATE;
1103 } else {
1104 cmn_err(CE_CONT, ucode_success_fmt,
1105 id, uusp->info.cui_rev, uusp->new_rev);
1106 }
1107 }
1108
1109 mutex_exit(&cpu_lock);
1110
1111 if (!found)
1112 rc = search_rc;
1113
1114 return (rc);
1115 }
1116
1117 /*
1118 * Initialize mcpu_ucode_info, and perform microcode update if necessary.
1119 * This is the entry point from boot path where pointer to CPU structure
1120 * is available.
1121 *
1122 * cpuid_info must be initialized before ucode_check can be called.
1123 */
1124 void
ucode_check(cpu_t * cp)1125 ucode_check(cpu_t *cp)
1126 {
1127 cpu_ucode_info_t *uinfop;
1128 ucode_errno_t rc = EM_OK;
1129 uint32_t new_rev = 0;
1130
1131 ASSERT(cp);
1132 /*
1133 * Space statically allocated for BSP, ensure pointer is set
1134 */
1135 if (cp->cpu_id == 0 && cp->cpu_m.mcpu_ucode_info == NULL)
1136 cp->cpu_m.mcpu_ucode_info = &cpu_ucode_info0;
1137
1138 uinfop = cp->cpu_m.mcpu_ucode_info;
1139 ASSERT(uinfop);
1140
1141 /* set up function pointers if not already done */
1142 if (!ucode)
1143 switch (cpuid_getvendor(cp)) {
1144 case X86_VENDOR_AMD:
1145 ucode = &ucode_amd;
1146 break;
1147 case X86_VENDOR_Intel:
1148 ucode = &ucode_intel;
1149 break;
1150 default:
1151 ucode = NULL;
1152 return;
1153 }
1154
1155 if (!ucode->capable(cp))
1156 return;
1157
1158 /*
1159 * The MSR_INTC_PLATFORM_ID is supported in Celeron and Xeon
1160 * (Family 6, model 5 and above) and all processors after.
1161 */
1162 if ((cpuid_getvendor(cp) == X86_VENDOR_Intel) &&
1163 ((cpuid_getmodel(cp) >= 5) || (cpuid_getfamily(cp) > 6))) {
1164 uinfop->cui_platid = 1 << ((rdmsr(MSR_INTC_PLATFORM_ID) >>
1165 INTC_PLATFORM_ID_SHIFT) & INTC_PLATFORM_ID_MASK);
1166 }
1167
1168 ucode->read_rev(uinfop);
1169
1170 #ifdef __xpv
1171 /*
1172 * for i86xpv, the hypervisor will update all the CPUs. We only need
1173 * do do this on one of the CPUs (and there always is a CPU 0).
1174 */
1175 if (cp->cpu_id != 0) {
1176 return;
1177 }
1178 #endif
1179
1180 /*
1181 * Check to see if we need ucode update
1182 */
1183 if ((rc = ucode->locate(cp, uinfop, &ucodefile)) == EM_OK) {
1184 new_rev = ucode->load(&ucodefile, uinfop, cp);
1185
1186 if (uinfop->cui_rev != new_rev)
1187 cmn_err(CE_WARN, ucode_failure_fmt, cp->cpu_id,
1188 uinfop->cui_rev, new_rev);
1189 }
1190
1191 /*
1192 * If we fail to find a match for any reason, free the file structure
1193 * just in case we have read in a partial file.
1194 *
1195 * Since the scratch memory for holding the microcode for the boot CPU
1196 * came from BOP_ALLOC, we will reset the data structure as if we
1197 * never did the allocation so we don't have to keep track of this
1198 * special chunk of memory. We free the memory used for the rest
1199 * of the CPUs in start_other_cpus().
1200 */
1201 if (rc != EM_OK || cp->cpu_id == 0)
1202 ucode->file_reset(&ucodefile, cp->cpu_id);
1203 }
1204
1205 /*
1206 * Returns microcode revision from the machcpu structure.
1207 */
1208 ucode_errno_t
ucode_get_rev(uint32_t * revp)1209 ucode_get_rev(uint32_t *revp)
1210 {
1211 int i;
1212
1213 ASSERT(ucode);
1214 ASSERT(revp);
1215
1216 if (!ucode->capable(CPU))
1217 return (EM_NOTSUP);
1218
1219 mutex_enter(&cpu_lock);
1220 for (i = 0; i < max_ncpus; i++) {
1221 cpu_t *cpu;
1222
1223 if ((cpu = cpu_get(i)) == NULL)
1224 continue;
1225
1226 revp[i] = cpu->cpu_m.mcpu_ucode_info->cui_rev;
1227 }
1228 mutex_exit(&cpu_lock);
1229
1230 return (EM_OK);
1231 }
1232