1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /*
26 * Fault Management Architecture (FMA) Resource and Protocol Support
27 *
28 * The routines contained herein provide services to support kernel subsystems
29 * in publishing fault management telemetry (see PSARC 2002/412 and 2003/089).
30 *
31 * Name-Value Pair Lists
32 *
33 * The embodiment of an FMA protocol element (event, fmri or authority) is a
34 * name-value pair list (nvlist_t). FMA-specific nvlist construtor and
35 * destructor functions, fm_nvlist_create() and fm_nvlist_destroy(), are used
36 * to create an nvpair list using custom allocators. Callers may choose to
37 * allocate either from the kernel memory allocator, or from a preallocated
38 * buffer, useful in constrained contexts like high-level interrupt routines.
39 *
40 * Protocol Event and FMRI Construction
41 *
42 * Convenience routines are provided to construct nvlist events according to
43 * the FMA Event Protocol and Naming Schema specification for ereports and
44 * FMRIs for the dev, cpu, hc, mem, legacy hc and de schemes.
45 *
46 * ENA Manipulation
47 *
48 * Routines to generate ENA formats 0, 1 and 2 are available as well as
49 * routines to increment formats 1 and 2. Individual fields within the
50 * ENA are extractable via fm_ena_time_get(), fm_ena_id_get(),
51 * fm_ena_format_get() and fm_ena_gen_get().
52 */
53
54 #include <sys/types.h>
55 #include <sys/time.h>
56 #include <sys/sysevent.h>
57 #include <sys/nvpair.h>
58 #include <sys/cmn_err.h>
59 #include <sys/cpuvar.h>
60 #include <sys/sysmacros.h>
61 #include <sys/systm.h>
62 #include <sys/compress.h>
63 #include <sys/cpuvar.h>
64 #include <sys/kobj.h>
65 #include <sys/kstat.h>
66 #include <sys/processor.h>
67 #ifdef __NetBSD__
68 #include <sys/atomic.h>
69 #include <sys/cpu.h>
70 #else
71 #include <sys/pcpu.h>
72 #endif
73 #include <sys/sunddi.h>
74 #include <sys/systeminfo.h>
75 #include <sys/sysevent/eventdefs.h>
76 #include <sys/fm/util.h>
77 #include <sys/fm/protocol.h>
78
79 /*
80 * URL and SUNW-MSG-ID value to display for fm_panic(), defined below. These
81 * values must be kept in sync with the FMA source code in usr/src/cmd/fm.
82 */
83 static const char *fm_url = "http://www.sun.com/msg";
84 static const char *fm_msgid = "SUNOS-8000-0G";
85 static char *volatile fm_panicstr = NULL;
86
87 #ifdef illumos
88 errorq_t *ereport_errorq;
89 #endif
90 void *ereport_dumpbuf;
91 size_t ereport_dumplen;
92
93 static uint_t ereport_chanlen = ERPT_EVCH_MAX;
94 static evchan_t *ereport_chan = NULL;
95 static ulong_t ereport_qlen = 0;
96 static size_t ereport_size = 0;
97 static int ereport_cols = 80;
98
99 extern void fastreboot_disable_highpil(void);
100
101 /*
102 * Common fault management kstats to record ereport generation
103 * failures
104 */
105
106 struct erpt_kstat {
107 kstat_named_t erpt_dropped; /* num erpts dropped on post */
108 kstat_named_t erpt_set_failed; /* num erpt set failures */
109 kstat_named_t fmri_set_failed; /* num fmri set failures */
110 kstat_named_t payload_set_failed; /* num payload set failures */
111 };
112
113 static struct erpt_kstat erpt_kstat_data = {
114 { "erpt-dropped", KSTAT_DATA_UINT64 },
115 { "erpt-set-failed", KSTAT_DATA_UINT64 },
116 { "fmri-set-failed", KSTAT_DATA_UINT64 },
117 { "payload-set-failed", KSTAT_DATA_UINT64 }
118 };
119
120 #ifdef illumos
121 /*ARGSUSED*/
122 static void
fm_drain(void * private,void * data,errorq_elem_t * eep)123 fm_drain(void *private, void *data, errorq_elem_t *eep)
124 {
125 nvlist_t *nvl = errorq_elem_nvl(ereport_errorq, eep);
126
127 if (!panicstr)
128 (void) fm_ereport_post(nvl, EVCH_TRYHARD);
129 else
130 fm_nvprint(nvl);
131 }
132 #endif
133
134 void
fm_init(void)135 fm_init(void)
136 {
137 kstat_t *ksp;
138
139 #ifdef illumos
140 (void) sysevent_evc_bind(FM_ERROR_CHAN,
141 &ereport_chan, EVCH_CREAT | EVCH_HOLD_PEND);
142
143 (void) sysevent_evc_control(ereport_chan,
144 EVCH_SET_CHAN_LEN, &ereport_chanlen);
145 #endif
146
147 if (ereport_qlen == 0)
148 ereport_qlen = ERPT_MAX_ERRS * MAX(max_ncpus, 4);
149
150 if (ereport_size == 0)
151 ereport_size = ERPT_DATA_SZ;
152
153 #ifdef illumos
154 ereport_errorq = errorq_nvcreate("fm_ereport_queue",
155 (errorq_func_t)fm_drain, NULL, ereport_qlen, ereport_size,
156 FM_ERR_PIL, ERRORQ_VITAL);
157 if (ereport_errorq == NULL)
158 panic("failed to create required ereport error queue");
159 #endif
160
161 ereport_dumpbuf = kmem_alloc(ereport_size, KM_SLEEP);
162 ereport_dumplen = ereport_size;
163
164 /* Initialize ereport allocation and generation kstats */
165 ksp = kstat_create("unix", 0, "fm", "misc", KSTAT_TYPE_NAMED,
166 sizeof (struct erpt_kstat) / sizeof (kstat_named_t),
167 KSTAT_FLAG_VIRTUAL);
168
169 if (ksp != NULL) {
170 ksp->ks_data = &erpt_kstat_data;
171 kstat_install(ksp);
172 } else {
173 cmn_err(CE_NOTE, "failed to create fm/misc kstat\n");
174
175 }
176 }
177
178 #ifdef illumos
179 /*
180 * Formatting utility function for fm_nvprintr. We attempt to wrap chunks of
181 * output so they aren't split across console lines, and return the end column.
182 */
183 /*PRINTFLIKE4*/
184 static int
fm_printf(int depth,int c,int cols,const char * format,...)185 fm_printf(int depth, int c, int cols, const char *format, ...)
186 {
187 va_list ap;
188 int width;
189 char c1;
190
191 va_start(ap, format);
192 width = vsnprintf(&c1, sizeof (c1), format, ap);
193 va_end(ap);
194
195 if (c + width >= cols) {
196 console_printf("\n\r");
197 c = 0;
198 if (format[0] != ' ' && depth > 0) {
199 console_printf(" ");
200 c++;
201 }
202 }
203
204 va_start(ap, format);
205 console_vprintf(format, ap);
206 va_end(ap);
207
208 return ((c + width) % cols);
209 }
210
211 /*
212 * Recursively print a nvlist in the specified column width and return the
213 * column we end up in. This function is called recursively by fm_nvprint(),
214 * below. We generically format the entire nvpair using hexadecimal
215 * integers and strings, and elide any integer arrays. Arrays are basically
216 * used for cache dumps right now, so we suppress them so as not to overwhelm
217 * the amount of console output we produce at panic time. This can be further
218 * enhanced as FMA technology grows based upon the needs of consumers. All
219 * FMA telemetry is logged using the dump device transport, so the console
220 * output serves only as a fallback in case this procedure is unsuccessful.
221 */
222 static int
fm_nvprintr(nvlist_t * nvl,int d,int c,int cols)223 fm_nvprintr(nvlist_t *nvl, int d, int c, int cols)
224 {
225 nvpair_t *nvp;
226
227 for (nvp = nvlist_next_nvpair(nvl, NULL);
228 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
229
230 data_type_t type = nvpair_type(nvp);
231 const char *name = nvpair_name(nvp);
232
233 boolean_t b;
234 uint8_t i8;
235 uint16_t i16;
236 uint32_t i32;
237 uint64_t i64;
238 char *str;
239 nvlist_t *cnv;
240
241 if (strcmp(name, FM_CLASS) == 0)
242 continue; /* already printed by caller */
243
244 c = fm_printf(d, c, cols, " %s=", name);
245
246 switch (type) {
247 case DATA_TYPE_BOOLEAN:
248 c = fm_printf(d + 1, c, cols, " 1");
249 break;
250
251 case DATA_TYPE_BOOLEAN_VALUE:
252 (void) nvpair_value_boolean_value(nvp, &b);
253 c = fm_printf(d + 1, c, cols, b ? "1" : "0");
254 break;
255
256 case DATA_TYPE_BYTE:
257 (void) nvpair_value_byte(nvp, &i8);
258 c = fm_printf(d + 1, c, cols, "%x", i8);
259 break;
260
261 case DATA_TYPE_INT8:
262 (void) nvpair_value_int8(nvp, (void *)&i8);
263 c = fm_printf(d + 1, c, cols, "%x", i8);
264 break;
265
266 case DATA_TYPE_UINT8:
267 (void) nvpair_value_uint8(nvp, &i8);
268 c = fm_printf(d + 1, c, cols, "%x", i8);
269 break;
270
271 case DATA_TYPE_INT16:
272 (void) nvpair_value_int16(nvp, (void *)&i16);
273 c = fm_printf(d + 1, c, cols, "%x", i16);
274 break;
275
276 case DATA_TYPE_UINT16:
277 (void) nvpair_value_uint16(nvp, &i16);
278 c = fm_printf(d + 1, c, cols, "%x", i16);
279 break;
280
281 case DATA_TYPE_INT32:
282 (void) nvpair_value_int32(nvp, (void *)&i32);
283 c = fm_printf(d + 1, c, cols, "%x", i32);
284 break;
285
286 case DATA_TYPE_UINT32:
287 (void) nvpair_value_uint32(nvp, &i32);
288 c = fm_printf(d + 1, c, cols, "%x", i32);
289 break;
290
291 case DATA_TYPE_INT64:
292 (void) nvpair_value_int64(nvp, (void *)&i64);
293 c = fm_printf(d + 1, c, cols, "%llx",
294 (u_longlong_t)i64);
295 break;
296
297 case DATA_TYPE_UINT64:
298 (void) nvpair_value_uint64(nvp, &i64);
299 c = fm_printf(d + 1, c, cols, "%llx",
300 (u_longlong_t)i64);
301 break;
302
303 case DATA_TYPE_HRTIME:
304 (void) nvpair_value_hrtime(nvp, (void *)&i64);
305 c = fm_printf(d + 1, c, cols, "%llx",
306 (u_longlong_t)i64);
307 break;
308
309 case DATA_TYPE_STRING:
310 (void) nvpair_value_string(nvp, &str);
311 c = fm_printf(d + 1, c, cols, "\"%s\"",
312 str ? str : "<NULL>");
313 break;
314
315 case DATA_TYPE_NVLIST:
316 c = fm_printf(d + 1, c, cols, "[");
317 (void) nvpair_value_nvlist(nvp, &cnv);
318 c = fm_nvprintr(cnv, d + 1, c, cols);
319 c = fm_printf(d + 1, c, cols, " ]");
320 break;
321
322 case DATA_TYPE_NVLIST_ARRAY: {
323 nvlist_t **val;
324 uint_t i, nelem;
325
326 c = fm_printf(d + 1, c, cols, "[");
327 (void) nvpair_value_nvlist_array(nvp, &val, &nelem);
328 for (i = 0; i < nelem; i++) {
329 c = fm_nvprintr(val[i], d + 1, c, cols);
330 }
331 c = fm_printf(d + 1, c, cols, " ]");
332 }
333 break;
334
335 case DATA_TYPE_BOOLEAN_ARRAY:
336 case DATA_TYPE_BYTE_ARRAY:
337 case DATA_TYPE_INT8_ARRAY:
338 case DATA_TYPE_UINT8_ARRAY:
339 case DATA_TYPE_INT16_ARRAY:
340 case DATA_TYPE_UINT16_ARRAY:
341 case DATA_TYPE_INT32_ARRAY:
342 case DATA_TYPE_UINT32_ARRAY:
343 case DATA_TYPE_INT64_ARRAY:
344 case DATA_TYPE_UINT64_ARRAY:
345 case DATA_TYPE_STRING_ARRAY:
346 c = fm_printf(d + 1, c, cols, "[...]");
347 break;
348 case DATA_TYPE_UNKNOWN:
349 c = fm_printf(d + 1, c, cols, "<unknown>");
350 break;
351 }
352 }
353
354 return (c);
355 }
356
357 void
fm_nvprint(nvlist_t * nvl)358 fm_nvprint(nvlist_t *nvl)
359 {
360 char *class;
361 int c = 0;
362
363 console_printf("\r");
364
365 if (nvlist_lookup_string(nvl, FM_CLASS, &class) == 0)
366 c = fm_printf(0, c, ereport_cols, "%s", class);
367
368 if (fm_nvprintr(nvl, 0, c, ereport_cols) != 0)
369 console_printf("\n");
370
371 console_printf("\n");
372 }
373
374 /*
375 * Wrapper for panic() that first produces an FMA-style message for admins.
376 * Normally such messages are generated by fmd(1M)'s syslog-msgs agent: this
377 * is the one exception to that rule and the only error that gets messaged.
378 * This function is intended for use by subsystems that have detected a fatal
379 * error and enqueued appropriate ereports and wish to then force a panic.
380 */
381 /*PRINTFLIKE1*/
382 void
fm_panic(const char * format,...)383 fm_panic(const char *format, ...)
384 {
385 va_list ap;
386
387 (void) atomic_cas_ptr((void *)&fm_panicstr, NULL, (void *)format);
388 #if defined(__i386) || defined(__amd64)
389 fastreboot_disable_highpil();
390 #endif /* __i386 || __amd64 */
391 va_start(ap, format);
392 vpanic(format, ap);
393 va_end(ap);
394 }
395
396 /*
397 * Simply tell the caller if fm_panicstr is set, ie. an fma event has
398 * caused the panic. If so, something other than the default panic
399 * diagnosis method will diagnose the cause of the panic.
400 */
401 int
is_fm_panic()402 is_fm_panic()
403 {
404 if (fm_panicstr)
405 return (1);
406 else
407 return (0);
408 }
409
410 /*
411 * Print any appropriate FMA banner message before the panic message. This
412 * function is called by panicsys() and prints the message for fm_panic().
413 * We print the message here so that it comes after the system is quiesced.
414 * A one-line summary is recorded in the log only (cmn_err(9F) with "!" prefix).
415 * The rest of the message is for the console only and not needed in the log,
416 * so it is printed using console_printf(). We break it up into multiple
417 * chunks so as to avoid overflowing any small legacy prom_printf() buffers.
418 */
419 void
fm_banner(void)420 fm_banner(void)
421 {
422 timespec_t tod;
423 hrtime_t now;
424
425 if (!fm_panicstr)
426 return; /* panic was not initiated by fm_panic(); do nothing */
427
428 if (panicstr) {
429 tod = panic_hrestime;
430 now = panic_hrtime;
431 } else {
432 gethrestime(&tod);
433 now = gethrtime_waitfree();
434 }
435
436 cmn_err(CE_NOTE, "!SUNW-MSG-ID: %s, "
437 "TYPE: Error, VER: 1, SEVERITY: Major\n", fm_msgid);
438
439 console_printf(
440 "\n\rSUNW-MSG-ID: %s, TYPE: Error, VER: 1, SEVERITY: Major\n"
441 "EVENT-TIME: 0x%lx.0x%lx (0x%llx)\n",
442 fm_msgid, tod.tv_sec, tod.tv_nsec, (u_longlong_t)now);
443
444 console_printf(
445 "PLATFORM: %s, CSN: -, HOSTNAME: %s\n"
446 "SOURCE: %s, REV: %s %s\n",
447 platform, utsname.nodename, utsname.sysname,
448 utsname.release, utsname.version);
449
450 console_printf(
451 "DESC: Errors have been detected that require a reboot to ensure system\n"
452 "integrity. See %s/%s for more information.\n",
453 fm_url, fm_msgid);
454
455 console_printf(
456 "AUTO-RESPONSE: Solaris will attempt to save and diagnose the error telemetry\n"
457 "IMPACT: The system will sync files, save a crash dump if needed, and reboot\n"
458 "REC-ACTION: Save the error summary below in case telemetry cannot be saved\n");
459
460 console_printf("\n");
461 }
462
463 /*
464 * Utility function to write all of the pending ereports to the dump device.
465 * This function is called at either normal reboot or panic time, and simply
466 * iterates over the in-transit messages in the ereport sysevent channel.
467 */
468 void
fm_ereport_dump(void)469 fm_ereport_dump(void)
470 {
471 evchanq_t *chq;
472 sysevent_t *sep;
473 erpt_dump_t ed;
474
475 timespec_t tod;
476 hrtime_t now;
477 char *buf;
478 size_t len;
479
480 if (panicstr) {
481 tod = panic_hrestime;
482 now = panic_hrtime;
483 } else {
484 if (ereport_errorq != NULL)
485 errorq_drain(ereport_errorq);
486 gethrestime(&tod);
487 now = gethrtime_waitfree();
488 }
489
490 /*
491 * In the panic case, sysevent_evc_walk_init() will return NULL.
492 */
493 if ((chq = sysevent_evc_walk_init(ereport_chan, NULL)) == NULL &&
494 !panicstr)
495 return; /* event channel isn't initialized yet */
496
497 while ((sep = sysevent_evc_walk_step(chq)) != NULL) {
498 if ((buf = sysevent_evc_event_attr(sep, &len)) == NULL)
499 break;
500
501 ed.ed_magic = ERPT_MAGIC;
502 ed.ed_chksum = checksum32(buf, len);
503 ed.ed_size = (uint32_t)len;
504 ed.ed_pad = 0;
505 ed.ed_hrt_nsec = SE_TIME(sep);
506 ed.ed_hrt_base = now;
507 ed.ed_tod_base.sec = tod.tv_sec;
508 ed.ed_tod_base.nsec = tod.tv_nsec;
509
510 dumpvp_write(&ed, sizeof (ed));
511 dumpvp_write(buf, len);
512 }
513
514 sysevent_evc_walk_fini(chq);
515 }
516 #endif
517
518 /*
519 * Post an error report (ereport) to the sysevent error channel. The error
520 * channel must be established with a prior call to sysevent_evc_create()
521 * before publication may occur.
522 */
523 void
fm_ereport_post(nvlist_t * ereport,int evc_flag)524 fm_ereport_post(nvlist_t *ereport, int evc_flag)
525 {
526 size_t nvl_size = 0;
527 evchan_t *error_chan;
528 sysevent_id_t eid;
529
530 (void) nvlist_size(ereport, &nvl_size, NV_ENCODE_NATIVE);
531 if (nvl_size > ERPT_DATA_SZ || nvl_size == 0) {
532 atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
533 return;
534 }
535
536 #ifdef illumos
537 if (sysevent_evc_bind(FM_ERROR_CHAN, &error_chan,
538 EVCH_CREAT|EVCH_HOLD_PEND) != 0) {
539 atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
540 return;
541 }
542
543 if (sysevent_evc_publish(error_chan, EC_FM, ESC_FM_ERROR,
544 SUNW_VENDOR, FM_PUB, ereport, evc_flag) != 0) {
545 atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
546 (void) sysevent_evc_unbind(error_chan);
547 return;
548 }
549 (void) sysevent_evc_unbind(error_chan);
550 #else
551 (void) ddi_log_sysevent(NULL, SUNW_VENDOR, EC_DEV_STATUS,
552 ESC_DEV_DLE, ereport, &eid, DDI_SLEEP);
553 #endif
554 }
555
556 /*
557 * Wrapppers for FM nvlist allocators
558 */
559 /* ARGSUSED */
560 static void *
i_fm_alloc(nv_alloc_t * nva,size_t size)561 i_fm_alloc(nv_alloc_t *nva, size_t size)
562 {
563 return (kmem_zalloc(size, KM_SLEEP));
564 }
565
566 /* ARGSUSED */
567 static void
i_fm_free(nv_alloc_t * nva,void * buf,size_t size)568 i_fm_free(nv_alloc_t *nva, void *buf, size_t size)
569 {
570 kmem_free(buf, size);
571 }
572
573 const nv_alloc_ops_t fm_mem_alloc_ops = {
574 NULL,
575 NULL,
576 i_fm_alloc,
577 i_fm_free,
578 NULL
579 };
580
581 /*
582 * Create and initialize a new nv_alloc_t for a fixed buffer, buf. A pointer
583 * to the newly allocated nv_alloc_t structure is returned upon success or NULL
584 * is returned to indicate that the nv_alloc structure could not be created.
585 */
586 nv_alloc_t *
fm_nva_xcreate(char * buf,size_t bufsz)587 fm_nva_xcreate(char *buf, size_t bufsz)
588 {
589 nv_alloc_t *nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP);
590
591 if (bufsz == 0 || nv_alloc_init(nvhdl, nv_fixed_ops, buf, bufsz) != 0) {
592 kmem_free(nvhdl, sizeof (nv_alloc_t));
593 return (NULL);
594 }
595
596 return (nvhdl);
597 }
598
599 /*
600 * Destroy a previously allocated nv_alloc structure. The fixed buffer
601 * associated with nva must be freed by the caller.
602 */
603 void
fm_nva_xdestroy(nv_alloc_t * nva)604 fm_nva_xdestroy(nv_alloc_t *nva)
605 {
606 nv_alloc_fini(nva);
607 kmem_free(nva, sizeof (nv_alloc_t));
608 }
609
610 /*
611 * Create a new nv list. A pointer to a new nv list structure is returned
612 * upon success or NULL is returned to indicate that the structure could
613 * not be created. The newly created nv list is created and managed by the
614 * operations installed in nva. If nva is NULL, the default FMA nva
615 * operations are installed and used.
616 *
617 * When called from the kernel and nva == NULL, this function must be called
618 * from passive kernel context with no locks held that can prevent a
619 * sleeping memory allocation from occurring. Otherwise, this function may
620 * be called from other kernel contexts as long a valid nva created via
621 * fm_nva_create() is supplied.
622 */
623 nvlist_t *
fm_nvlist_create(nv_alloc_t * nva)624 fm_nvlist_create(nv_alloc_t *nva)
625 {
626 int hdl_alloced = 0;
627 nvlist_t *nvl;
628 nv_alloc_t *nvhdl;
629
630 if (nva == NULL) {
631 nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP);
632
633 if (nv_alloc_init(nvhdl, &fm_mem_alloc_ops, NULL, 0) != 0) {
634 kmem_free(nvhdl, sizeof (nv_alloc_t));
635 return (NULL);
636 }
637 hdl_alloced = 1;
638 } else {
639 nvhdl = nva;
640 }
641
642 if (nvlist_xalloc(&nvl, NV_UNIQUE_NAME, nvhdl) != 0) {
643 if (hdl_alloced) {
644 nv_alloc_fini(nvhdl);
645 kmem_free(nvhdl, sizeof (nv_alloc_t));
646 }
647 return (NULL);
648 }
649
650 return (nvl);
651 }
652
653 /*
654 * Destroy a previously allocated nvlist structure. flag indicates whether
655 * or not the associated nva structure should be freed (FM_NVA_FREE) or
656 * retained (FM_NVA_RETAIN). Retaining the nv alloc structure allows
657 * it to be re-used for future nvlist creation operations.
658 */
659 void
fm_nvlist_destroy(nvlist_t * nvl,int flag)660 fm_nvlist_destroy(nvlist_t *nvl, int flag)
661 {
662 nv_alloc_t *nva = nvlist_lookup_nv_alloc(nvl);
663
664 nvlist_free(nvl);
665
666 if (nva != NULL) {
667 if (flag == FM_NVA_FREE)
668 fm_nva_xdestroy(nva);
669 }
670 }
671
672 int
i_fm_payload_set(nvlist_t * payload,const char * name,va_list ap)673 i_fm_payload_set(nvlist_t *payload, const char *name, va_list ap)
674 {
675 int nelem, ret = 0;
676 data_type_t type;
677
678 while (ret == 0 && name != NULL) {
679 type = va_arg(ap, data_type_t);
680 switch (type) {
681 case DATA_TYPE_BYTE:
682 ret = nvlist_add_byte(payload, name,
683 va_arg(ap, uint_t));
684 break;
685 case DATA_TYPE_BYTE_ARRAY:
686 nelem = va_arg(ap, int);
687 ret = nvlist_add_byte_array(payload, name,
688 va_arg(ap, uchar_t *), nelem);
689 break;
690 case DATA_TYPE_BOOLEAN_VALUE:
691 ret = nvlist_add_boolean_value(payload, name,
692 va_arg(ap, boolean_t));
693 break;
694 case DATA_TYPE_BOOLEAN_ARRAY:
695 nelem = va_arg(ap, int);
696 ret = nvlist_add_boolean_array(payload, name,
697 va_arg(ap, boolean_t *), nelem);
698 break;
699 case DATA_TYPE_INT8:
700 ret = nvlist_add_int8(payload, name,
701 va_arg(ap, int));
702 break;
703 case DATA_TYPE_INT8_ARRAY:
704 nelem = va_arg(ap, int);
705 ret = nvlist_add_int8_array(payload, name,
706 va_arg(ap, int8_t *), nelem);
707 break;
708 case DATA_TYPE_UINT8:
709 ret = nvlist_add_uint8(payload, name,
710 va_arg(ap, uint_t));
711 break;
712 case DATA_TYPE_UINT8_ARRAY:
713 nelem = va_arg(ap, int);
714 ret = nvlist_add_uint8_array(payload, name,
715 va_arg(ap, uint8_t *), nelem);
716 break;
717 case DATA_TYPE_INT16:
718 ret = nvlist_add_int16(payload, name,
719 va_arg(ap, int));
720 break;
721 case DATA_TYPE_INT16_ARRAY:
722 nelem = va_arg(ap, int);
723 ret = nvlist_add_int16_array(payload, name,
724 va_arg(ap, int16_t *), nelem);
725 break;
726 case DATA_TYPE_UINT16:
727 ret = nvlist_add_uint16(payload, name,
728 va_arg(ap, uint_t));
729 break;
730 case DATA_TYPE_UINT16_ARRAY:
731 nelem = va_arg(ap, int);
732 ret = nvlist_add_uint16_array(payload, name,
733 va_arg(ap, uint16_t *), nelem);
734 break;
735 case DATA_TYPE_INT32:
736 ret = nvlist_add_int32(payload, name,
737 va_arg(ap, int32_t));
738 break;
739 case DATA_TYPE_INT32_ARRAY:
740 nelem = va_arg(ap, int);
741 ret = nvlist_add_int32_array(payload, name,
742 va_arg(ap, int32_t *), nelem);
743 break;
744 case DATA_TYPE_UINT32:
745 ret = nvlist_add_uint32(payload, name,
746 va_arg(ap, uint32_t));
747 break;
748 case DATA_TYPE_UINT32_ARRAY:
749 nelem = va_arg(ap, int);
750 ret = nvlist_add_uint32_array(payload, name,
751 va_arg(ap, uint32_t *), nelem);
752 break;
753 case DATA_TYPE_INT64:
754 ret = nvlist_add_int64(payload, name,
755 va_arg(ap, int64_t));
756 break;
757 case DATA_TYPE_INT64_ARRAY:
758 nelem = va_arg(ap, int);
759 ret = nvlist_add_int64_array(payload, name,
760 va_arg(ap, int64_t *), nelem);
761 break;
762 case DATA_TYPE_UINT64:
763 ret = nvlist_add_uint64(payload, name,
764 va_arg(ap, uint64_t));
765 break;
766 case DATA_TYPE_UINT64_ARRAY:
767 nelem = va_arg(ap, int);
768 ret = nvlist_add_uint64_array(payload, name,
769 va_arg(ap, uint64_t *), nelem);
770 break;
771 case DATA_TYPE_STRING:
772 ret = nvlist_add_string(payload, name,
773 va_arg(ap, char *));
774 break;
775 case DATA_TYPE_STRING_ARRAY:
776 nelem = va_arg(ap, int);
777 ret = nvlist_add_string_array(payload, name,
778 va_arg(ap, char **), nelem);
779 break;
780 case DATA_TYPE_NVLIST:
781 ret = nvlist_add_nvlist(payload, name,
782 va_arg(ap, nvlist_t *));
783 break;
784 case DATA_TYPE_NVLIST_ARRAY:
785 nelem = va_arg(ap, int);
786 ret = nvlist_add_nvlist_array(payload, name,
787 va_arg(ap, nvlist_t **), nelem);
788 break;
789 default:
790 ret = EINVAL;
791 }
792
793 name = va_arg(ap, char *);
794 }
795 return (ret);
796 }
797
798 void
fm_payload_set(nvlist_t * payload,...)799 fm_payload_set(nvlist_t *payload, ...)
800 {
801 int ret;
802 const char *name;
803 va_list ap;
804
805 va_start(ap, payload);
806 name = va_arg(ap, char *);
807 ret = i_fm_payload_set(payload, name, ap);
808 va_end(ap);
809
810 if (ret)
811 atomic_inc_64(&erpt_kstat_data.payload_set_failed.value.ui64);
812 }
813
814 /*
815 * Set-up and validate the members of an ereport event according to:
816 *
817 * Member name Type Value
818 * ====================================================
819 * class string ereport
820 * version uint8_t 0
821 * ena uint64_t <ena>
822 * detector nvlist_t <detector>
823 * ereport-payload nvlist_t <var args>
824 *
825 * We don't actually add a 'version' member to the payload. Really,
826 * the version quoted to us by our caller is that of the category 1
827 * "ereport" event class (and we require FM_EREPORT_VERS0) but
828 * the payload version of the actual leaf class event under construction
829 * may be something else. Callers should supply a version in the varargs,
830 * or (better) we could take two version arguments - one for the
831 * ereport category 1 classification (expect FM_EREPORT_VERS0) and one
832 * for the leaf class.
833 */
834 void
fm_ereport_set(nvlist_t * ereport,int version,const char * erpt_class,uint64_t ena,const nvlist_t * detector,...)835 fm_ereport_set(nvlist_t *ereport, int version, const char *erpt_class,
836 uint64_t ena, const nvlist_t *detector, ...)
837 {
838 char ereport_class[FM_MAX_CLASS];
839 const char *name;
840 va_list ap;
841 int ret;
842
843 if (version != FM_EREPORT_VERS0) {
844 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
845 return;
846 }
847
848 (void) snprintf(ereport_class, FM_MAX_CLASS, "%s.%s",
849 FM_EREPORT_CLASS, erpt_class);
850 if (nvlist_add_string(ereport, FM_CLASS, ereport_class) != 0) {
851 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
852 return;
853 }
854
855 if (nvlist_add_uint64(ereport, FM_EREPORT_ENA, ena)) {
856 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
857 }
858
859 if (nvlist_add_nvlist(ereport, FM_EREPORT_DETECTOR,
860 (nvlist_t *)detector) != 0) {
861 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
862 }
863
864 va_start(ap, detector);
865 name = va_arg(ap, const char *);
866 ret = i_fm_payload_set(ereport, name, ap);
867 va_end(ap);
868
869 if (ret)
870 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
871 }
872
873 /*
874 * Set-up and validate the members of an hc fmri according to;
875 *
876 * Member name Type Value
877 * ===================================================
878 * version uint8_t 0
879 * auth nvlist_t <auth>
880 * hc-name string <name>
881 * hc-id string <id>
882 *
883 * Note that auth and hc-id are optional members.
884 */
885
886 #define HC_MAXPAIRS 20
887 #define HC_MAXNAMELEN 50
888
889 static int
fm_fmri_hc_set_common(nvlist_t * fmri,int version,const nvlist_t * auth)890 fm_fmri_hc_set_common(nvlist_t *fmri, int version, const nvlist_t *auth)
891 {
892 if (version != FM_HC_SCHEME_VERSION) {
893 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
894 return (0);
895 }
896
897 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0 ||
898 nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_HC) != 0) {
899 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
900 return (0);
901 }
902
903 if (auth != NULL && nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
904 (nvlist_t *)auth) != 0) {
905 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
906 return (0);
907 }
908
909 return (1);
910 }
911
912 void
fm_fmri_hc_set(nvlist_t * fmri,int version,const nvlist_t * auth,nvlist_t * snvl,int npairs,...)913 fm_fmri_hc_set(nvlist_t *fmri, int version, const nvlist_t *auth,
914 nvlist_t *snvl, int npairs, ...)
915 {
916 nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
917 nvlist_t *pairs[HC_MAXPAIRS];
918 va_list ap;
919 int i;
920
921 if (!fm_fmri_hc_set_common(fmri, version, auth))
922 return;
923
924 npairs = MIN(npairs, HC_MAXPAIRS);
925
926 va_start(ap, npairs);
927 for (i = 0; i < npairs; i++) {
928 const char *name = va_arg(ap, const char *);
929 uint32_t id = va_arg(ap, uint32_t);
930 char idstr[11];
931
932 (void) snprintf(idstr, sizeof (idstr), "%u", id);
933
934 pairs[i] = fm_nvlist_create(nva);
935 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
936 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
937 atomic_inc_64(
938 &erpt_kstat_data.fmri_set_failed.value.ui64);
939 }
940 }
941 va_end(ap);
942
943 if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs, npairs) != 0)
944 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
945
946 for (i = 0; i < npairs; i++)
947 fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
948
949 if (snvl != NULL) {
950 if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
951 atomic_inc_64(
952 &erpt_kstat_data.fmri_set_failed.value.ui64);
953 }
954 }
955 }
956
957 /*
958 * Set-up and validate the members of an dev fmri according to:
959 *
960 * Member name Type Value
961 * ====================================================
962 * version uint8_t 0
963 * auth nvlist_t <auth>
964 * devpath string <devpath>
965 * [devid] string <devid>
966 * [target-port-l0id] string <target-port-lun0-id>
967 *
968 * Note that auth and devid are optional members.
969 */
970 void
fm_fmri_dev_set(nvlist_t * fmri_dev,int version,const nvlist_t * auth,const char * devpath,const char * devid,const char * tpl0)971 fm_fmri_dev_set(nvlist_t *fmri_dev, int version, const nvlist_t *auth,
972 const char *devpath, const char *devid, const char *tpl0)
973 {
974 int err = 0;
975
976 if (version != DEV_SCHEME_VERSION0) {
977 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
978 return;
979 }
980
981 err |= nvlist_add_uint8(fmri_dev, FM_VERSION, version);
982 err |= nvlist_add_string(fmri_dev, FM_FMRI_SCHEME, FM_FMRI_SCHEME_DEV);
983
984 if (auth != NULL) {
985 err |= nvlist_add_nvlist(fmri_dev, FM_FMRI_AUTHORITY,
986 (nvlist_t *)auth);
987 }
988
989 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_PATH, devpath);
990
991 if (devid != NULL)
992 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_ID, devid);
993
994 if (tpl0 != NULL)
995 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_TGTPTLUN0, tpl0);
996
997 if (err)
998 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
999
1000 }
1001
1002 /*
1003 * Set-up and validate the members of an cpu fmri according to:
1004 *
1005 * Member name Type Value
1006 * ====================================================
1007 * version uint8_t 0
1008 * auth nvlist_t <auth>
1009 * cpuid uint32_t <cpu_id>
1010 * cpumask uint8_t <cpu_mask>
1011 * serial uint64_t <serial_id>
1012 *
1013 * Note that auth, cpumask, serial are optional members.
1014 *
1015 */
1016 void
fm_fmri_cpu_set(nvlist_t * fmri_cpu,int version,const nvlist_t * auth,uint32_t cpu_id,uint8_t * cpu_maskp,const char * serial_idp)1017 fm_fmri_cpu_set(nvlist_t *fmri_cpu, int version, const nvlist_t *auth,
1018 uint32_t cpu_id, uint8_t *cpu_maskp, const char *serial_idp)
1019 {
1020 uint64_t *failedp = &erpt_kstat_data.fmri_set_failed.value.ui64;
1021
1022 if (version < CPU_SCHEME_VERSION1) {
1023 atomic_inc_64(failedp);
1024 return;
1025 }
1026
1027 if (nvlist_add_uint8(fmri_cpu, FM_VERSION, version) != 0) {
1028 atomic_inc_64(failedp);
1029 return;
1030 }
1031
1032 if (nvlist_add_string(fmri_cpu, FM_FMRI_SCHEME,
1033 FM_FMRI_SCHEME_CPU) != 0) {
1034 atomic_inc_64(failedp);
1035 return;
1036 }
1037
1038 if (auth != NULL && nvlist_add_nvlist(fmri_cpu, FM_FMRI_AUTHORITY,
1039 (nvlist_t *)auth) != 0)
1040 atomic_inc_64(failedp);
1041
1042 if (nvlist_add_uint32(fmri_cpu, FM_FMRI_CPU_ID, cpu_id) != 0)
1043 atomic_inc_64(failedp);
1044
1045 if (cpu_maskp != NULL && nvlist_add_uint8(fmri_cpu, FM_FMRI_CPU_MASK,
1046 *cpu_maskp) != 0)
1047 atomic_inc_64(failedp);
1048
1049 if (serial_idp == NULL || nvlist_add_string(fmri_cpu,
1050 FM_FMRI_CPU_SERIAL_ID, (char *)serial_idp) != 0)
1051 atomic_inc_64(failedp);
1052 }
1053
1054 /*
1055 * Set-up and validate the members of a mem according to:
1056 *
1057 * Member name Type Value
1058 * ====================================================
1059 * version uint8_t 0
1060 * auth nvlist_t <auth> [optional]
1061 * unum string <unum>
1062 * serial string <serial> [optional*]
1063 * offset uint64_t <offset> [optional]
1064 *
1065 * * serial is required if offset is present
1066 */
1067 void
fm_fmri_mem_set(nvlist_t * fmri,int version,const nvlist_t * auth,const char * unum,const char * serial,uint64_t offset)1068 fm_fmri_mem_set(nvlist_t *fmri, int version, const nvlist_t *auth,
1069 const char *unum, const char *serial, uint64_t offset)
1070 {
1071 if (version != MEM_SCHEME_VERSION0) {
1072 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1073 return;
1074 }
1075
1076 if (!serial && (offset != (uint64_t)-1)) {
1077 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1078 return;
1079 }
1080
1081 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
1082 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1083 return;
1084 }
1085
1086 if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_MEM) != 0) {
1087 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1088 return;
1089 }
1090
1091 if (auth != NULL) {
1092 if (nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
1093 (nvlist_t *)auth) != 0) {
1094 atomic_inc_64(
1095 &erpt_kstat_data.fmri_set_failed.value.ui64);
1096 }
1097 }
1098
1099 if (nvlist_add_string(fmri, FM_FMRI_MEM_UNUM, unum) != 0) {
1100 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1101 }
1102
1103 if (serial != NULL) {
1104 if (nvlist_add_string_array(fmri, FM_FMRI_MEM_SERIAL_ID,
1105 (char **)&serial, 1) != 0) {
1106 atomic_inc_64(
1107 &erpt_kstat_data.fmri_set_failed.value.ui64);
1108 }
1109 if (offset != (uint64_t)-1 && nvlist_add_uint64(fmri,
1110 FM_FMRI_MEM_OFFSET, offset) != 0) {
1111 atomic_inc_64(
1112 &erpt_kstat_data.fmri_set_failed.value.ui64);
1113 }
1114 }
1115 }
1116
1117 void
fm_fmri_zfs_set(nvlist_t * fmri,int version,uint64_t pool_guid,uint64_t vdev_guid)1118 fm_fmri_zfs_set(nvlist_t *fmri, int version, uint64_t pool_guid,
1119 uint64_t vdev_guid)
1120 {
1121 if (version != ZFS_SCHEME_VERSION0) {
1122 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1123 return;
1124 }
1125
1126 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
1127 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1128 return;
1129 }
1130
1131 if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_ZFS) != 0) {
1132 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1133 return;
1134 }
1135
1136 if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_POOL, pool_guid) != 0) {
1137 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1138 }
1139
1140 if (vdev_guid != 0) {
1141 if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_VDEV, vdev_guid) != 0) {
1142 atomic_inc_64(
1143 &erpt_kstat_data.fmri_set_failed.value.ui64);
1144 }
1145 }
1146 }
1147
1148 uint64_t
fm_ena_increment(uint64_t ena)1149 fm_ena_increment(uint64_t ena)
1150 {
1151 uint64_t new_ena;
1152
1153 switch (ENA_FORMAT(ena)) {
1154 case FM_ENA_FMT1:
1155 new_ena = ena + (1 << ENA_FMT1_GEN_SHFT);
1156 break;
1157 case FM_ENA_FMT2:
1158 new_ena = ena + (1 << ENA_FMT2_GEN_SHFT);
1159 break;
1160 default:
1161 new_ena = 0;
1162 }
1163
1164 return (new_ena);
1165 }
1166
1167 uint64_t
fm_ena_generate_cpu(uint64_t timestamp,processorid_t cpuid,uchar_t format)1168 fm_ena_generate_cpu(uint64_t timestamp, processorid_t cpuid, uchar_t format)
1169 {
1170 uint64_t ena = 0;
1171
1172 switch (format) {
1173 case FM_ENA_FMT1:
1174 if (timestamp) {
1175 ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1176 ((cpuid << ENA_FMT1_CPUID_SHFT) &
1177 ENA_FMT1_CPUID_MASK) |
1178 ((timestamp << ENA_FMT1_TIME_SHFT) &
1179 ENA_FMT1_TIME_MASK));
1180 } else {
1181 ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1182 ((cpuid << ENA_FMT1_CPUID_SHFT) &
1183 ENA_FMT1_CPUID_MASK) |
1184 ((gethrtime_waitfree() << ENA_FMT1_TIME_SHFT) &
1185 ENA_FMT1_TIME_MASK));
1186 }
1187 break;
1188 case FM_ENA_FMT2:
1189 ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1190 ((timestamp << ENA_FMT2_TIME_SHFT) & ENA_FMT2_TIME_MASK));
1191 break;
1192 default:
1193 break;
1194 }
1195
1196 return (ena);
1197 }
1198
1199 uint64_t
fm_ena_generate(uint64_t timestamp,uchar_t format)1200 fm_ena_generate(uint64_t timestamp, uchar_t format)
1201 {
1202 #ifdef __NetBSD__
1203 return (fm_ena_generate_cpu(timestamp, cpu_index(curcpu()), format));
1204 #else
1205 return (fm_ena_generate_cpu(timestamp, PCPU_GET(cpuid), format));
1206 #endif
1207 }
1208
1209 uint64_t
fm_ena_generation_get(uint64_t ena)1210 fm_ena_generation_get(uint64_t ena)
1211 {
1212 uint64_t gen;
1213
1214 switch (ENA_FORMAT(ena)) {
1215 case FM_ENA_FMT1:
1216 gen = (ena & ENA_FMT1_GEN_MASK) >> ENA_FMT1_GEN_SHFT;
1217 break;
1218 case FM_ENA_FMT2:
1219 gen = (ena & ENA_FMT2_GEN_MASK) >> ENA_FMT2_GEN_SHFT;
1220 break;
1221 default:
1222 gen = 0;
1223 break;
1224 }
1225
1226 return (gen);
1227 }
1228
1229 uchar_t
fm_ena_format_get(uint64_t ena)1230 fm_ena_format_get(uint64_t ena)
1231 {
1232
1233 return (ENA_FORMAT(ena));
1234 }
1235
1236 uint64_t
fm_ena_id_get(uint64_t ena)1237 fm_ena_id_get(uint64_t ena)
1238 {
1239 uint64_t id;
1240
1241 switch (ENA_FORMAT(ena)) {
1242 case FM_ENA_FMT1:
1243 id = (ena & ENA_FMT1_ID_MASK) >> ENA_FMT1_ID_SHFT;
1244 break;
1245 case FM_ENA_FMT2:
1246 id = (ena & ENA_FMT2_ID_MASK) >> ENA_FMT2_ID_SHFT;
1247 break;
1248 default:
1249 id = 0;
1250 }
1251
1252 return (id);
1253 }
1254
1255 uint64_t
fm_ena_time_get(uint64_t ena)1256 fm_ena_time_get(uint64_t ena)
1257 {
1258 uint64_t time;
1259
1260 switch (ENA_FORMAT(ena)) {
1261 case FM_ENA_FMT1:
1262 time = (ena & ENA_FMT1_TIME_MASK) >> ENA_FMT1_TIME_SHFT;
1263 break;
1264 case FM_ENA_FMT2:
1265 time = (ena & ENA_FMT2_TIME_MASK) >> ENA_FMT2_TIME_SHFT;
1266 break;
1267 default:
1268 time = 0;
1269 }
1270
1271 return (time);
1272 }
1273
1274 #ifdef illumos
1275 /*
1276 * Convert a getpcstack() trace to symbolic name+offset, and add the resulting
1277 * string array to a Fault Management ereport as FM_EREPORT_PAYLOAD_NAME_STACK.
1278 */
1279 void
fm_payload_stack_add(nvlist_t * payload,const pc_t * stack,int depth)1280 fm_payload_stack_add(nvlist_t *payload, const pc_t *stack, int depth)
1281 {
1282 int i;
1283 char *sym;
1284 ulong_t off;
1285 char *stkpp[FM_STK_DEPTH];
1286 char buf[FM_STK_DEPTH * FM_SYM_SZ];
1287 char *stkp = buf;
1288
1289 for (i = 0; i < depth && i != FM_STK_DEPTH; i++, stkp += FM_SYM_SZ) {
1290 if ((sym = kobj_getsymname(stack[i], &off)) != NULL)
1291 (void) snprintf(stkp, FM_SYM_SZ, "%s+%lx", sym, off);
1292 else
1293 (void) snprintf(stkp, FM_SYM_SZ, "%lx", (long)stack[i]);
1294 stkpp[i] = stkp;
1295 }
1296
1297 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_STACK,
1298 DATA_TYPE_STRING_ARRAY, depth, stkpp, NULL);
1299 }
1300 #endif
1301
1302 #ifdef illumos
1303 void
print_msg_hwerr(ctid_t ct_id,proc_t * p)1304 print_msg_hwerr(ctid_t ct_id, proc_t *p)
1305 {
1306 uprintf("Killed process %d (%s) in contract id %d "
1307 "due to hardware error\n", p->p_pid, p->p_user.u_comm, ct_id);
1308 }
1309 #endif
1310
1311 void
fm_fmri_hc_create(nvlist_t * fmri,int version,const nvlist_t * auth,nvlist_t * snvl,nvlist_t * bboard,int npairs,...)1312 fm_fmri_hc_create(nvlist_t *fmri, int version, const nvlist_t *auth,
1313 nvlist_t *snvl, nvlist_t *bboard, int npairs, ...)
1314 {
1315 nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
1316 nvlist_t *pairs[HC_MAXPAIRS];
1317 nvlist_t **hcl;
1318 uint_t n;
1319 int i, j;
1320 va_list ap;
1321 char *hcname, *hcid;
1322
1323 if (!fm_fmri_hc_set_common(fmri, version, auth))
1324 return;
1325
1326 /*
1327 * copy the bboard nvpairs to the pairs array
1328 */
1329 if (nvlist_lookup_nvlist_array(bboard, FM_FMRI_HC_LIST, &hcl, &n)
1330 != 0) {
1331 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1332 return;
1333 }
1334
1335 for (i = 0; i < n; i++) {
1336 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME,
1337 &hcname) != 0) {
1338 atomic_inc_64(
1339 &erpt_kstat_data.fmri_set_failed.value.ui64);
1340 return;
1341 }
1342 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, &hcid) != 0) {
1343 atomic_inc_64(
1344 &erpt_kstat_data.fmri_set_failed.value.ui64);
1345 return;
1346 }
1347
1348 pairs[i] = fm_nvlist_create(nva);
1349 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, hcname) != 0 ||
1350 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, hcid) != 0) {
1351 for (j = 0; j <= i; j++) {
1352 if (pairs[j] != NULL)
1353 fm_nvlist_destroy(pairs[j],
1354 FM_NVA_RETAIN);
1355 }
1356 atomic_inc_64(
1357 &erpt_kstat_data.fmri_set_failed.value.ui64);
1358 return;
1359 }
1360 }
1361
1362 /*
1363 * create the pairs from passed in pairs
1364 */
1365 npairs = MIN(npairs, HC_MAXPAIRS);
1366
1367 va_start(ap, npairs);
1368 for (i = n; i < npairs + n; i++) {
1369 const char *name = va_arg(ap, const char *);
1370 uint32_t id = va_arg(ap, uint32_t);
1371 char idstr[11];
1372 (void) snprintf(idstr, sizeof (idstr), "%u", id);
1373 pairs[i] = fm_nvlist_create(nva);
1374 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
1375 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
1376 for (j = 0; j <= i; j++) {
1377 if (pairs[j] != NULL)
1378 fm_nvlist_destroy(pairs[j],
1379 FM_NVA_RETAIN);
1380 }
1381 atomic_inc_64(
1382 &erpt_kstat_data.fmri_set_failed.value.ui64);
1383 return;
1384 }
1385 }
1386 va_end(ap);
1387
1388 /*
1389 * Create the fmri hc list
1390 */
1391 if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs,
1392 npairs + n) != 0) {
1393 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1394 return;
1395 }
1396
1397 for (i = 0; i < npairs + n; i++) {
1398 fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
1399 }
1400
1401 if (snvl != NULL) {
1402 if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
1403 atomic_inc_64(
1404 &erpt_kstat_data.fmri_set_failed.value.ui64);
1405 return;
1406 }
1407 }
1408 }
1409