1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25 /*
26 * Copyright (c) 2010, Intel Corporation.
27 * All rights reserved.
28 */
29
30 #include <sys/types.h>
31 #include <sys/cmn_err.h>
32 #include <sys/conf.h>
33 #include <sys/debug.h>
34 #include <sys/errno.h>
35 #include <sys/note.h>
36 #include <sys/dditypes.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/sunndi.h>
40 #include <sys/ddi_impldefs.h>
41 #include <sys/ndi_impldefs.h>
42 #include <sys/varargs.h>
43 #include <sys/modctl.h>
44 #include <sys/kmem.h>
45 #include <sys/cpuvar.h>
46 #include <sys/cpupart.h>
47 #include <sys/mem_config.h>
48 #include <sys/mem_cage.h>
49 #include <sys/memnode.h>
50 #include <sys/callb.h>
51 #include <sys/ontrap.h>
52 #include <sys/obpdefs.h>
53 #include <sys/promif.h>
54 #include <sys/synch.h>
55 #include <sys/systm.h>
56 #include <sys/sysmacros.h>
57 #include <sys/archsystm.h>
58 #include <sys/machsystm.h>
59 #include <sys/x_call.h>
60 #include <sys/x86_archext.h>
61 #include <sys/fastboot_impl.h>
62 #include <sys/sysevent.h>
63 #include <sys/sysevent/dr.h>
64 #include <sys/sysevent/eventdefs.h>
65 #include <sys/acpi/acpi.h>
66 #include <sys/acpica.h>
67 #include <sys/acpidev.h>
68 #include <sys/acpidev_rsc.h>
69 #include <sys/acpidev_dr.h>
70 #include <sys/dr.h>
71 #include <sys/dr_util.h>
72 #include <sys/drmach.h>
73 #include "drmach_acpi.h"
74
75 /* utility */
76 #define MBYTE (1048576ull)
77 #define _ptob64(p) ((uint64_t)(p) << PAGESHIFT)
78 #define _b64top(b) ((pgcnt_t)((b) >> PAGESHIFT))
79
80 static int drmach_init(void);
81 static void drmach_fini(void);
82 static int drmach_name2type_idx(char *);
83 static sbd_error_t *drmach_mem_update_lgrp(drmachid_t);
84
85 #ifdef DEBUG
86 int drmach_debug = 1; /* set to non-zero to enable debug messages */
87 #endif /* DEBUG */
88
89 drmach_domain_info_t drmach_domain;
90
91 static char *drmach_ie_fmt = "drmach_acpi.c %d";
92 static drmach_array_t *drmach_boards;
93
94 /* rwlock to protect drmach_boards. */
95 static krwlock_t drmach_boards_rwlock;
96
97 /* rwlock to block out CPR thread. */
98 static krwlock_t drmach_cpr_rwlock;
99
100 /* CPR callb id. */
101 static callb_id_t drmach_cpr_cid;
102
103 static struct {
104 const char *name;
105 const char *type;
106 sbd_error_t *(*new)(drmach_device_t *, drmachid_t *);
107 } drmach_name2type[] = {
108 { ACPIDEV_NODE_NAME_CPU, DRMACH_DEVTYPE_CPU, drmach_cpu_new },
109 { ACPIDEV_NODE_NAME_MEMORY, DRMACH_DEVTYPE_MEM, drmach_mem_new },
110 { ACPIDEV_NODE_NAME_PCI, DRMACH_DEVTYPE_PCI, drmach_io_new },
111 };
112
113 /*
114 * drmach autoconfiguration data structures and interfaces
115 */
116 static struct modlmisc modlmisc = {
117 &mod_miscops,
118 "ACPI based DR v1.0"
119 };
120
121 static struct modlinkage modlinkage = {
122 MODREV_1,
123 (void *)&modlmisc,
124 NULL
125 };
126
127 int
_init(void)128 _init(void)
129 {
130 int err;
131
132 if ((err = drmach_init()) != 0) {
133 return (err);
134 }
135
136 if ((err = mod_install(&modlinkage)) != 0) {
137 drmach_fini();
138 }
139
140 return (err);
141 }
142
143 int
_fini(void)144 _fini(void)
145 {
146 int err;
147
148 if ((err = mod_remove(&modlinkage)) == 0) {
149 drmach_fini();
150 }
151
152 return (err);
153 }
154
155 int
_info(struct modinfo * modinfop)156 _info(struct modinfo *modinfop)
157 {
158 return (mod_info(&modlinkage, modinfop));
159 }
160
161 /*
162 * Internal support functions.
163 */
164 static DRMACH_HANDLE
drmach_node_acpi_get_dnode(drmach_node_t * np)165 drmach_node_acpi_get_dnode(drmach_node_t *np)
166 {
167 return ((DRMACH_HANDLE)(uintptr_t)np->here);
168 }
169
170 static dev_info_t *
drmach_node_acpi_get_dip(drmach_node_t * np)171 drmach_node_acpi_get_dip(drmach_node_t *np)
172 {
173 dev_info_t *dip = NULL;
174
175 if (ACPI_FAILURE(acpica_get_devinfo((DRMACH_HANDLE)(np->here), &dip))) {
176 return (NULL);
177 }
178
179 return (dip);
180 }
181
182 static int
drmach_node_acpi_get_prop(drmach_node_t * np,char * name,void * buf,int len)183 drmach_node_acpi_get_prop(drmach_node_t *np, char *name, void *buf, int len)
184 {
185 int rv = 0;
186 DRMACH_HANDLE hdl;
187
188 hdl = np->get_dnode(np);
189 if (hdl == NULL) {
190 DRMACH_PR("!drmach_node_acpi_get_prop: NULL handle");
191 rv = -1;
192 } else {
193 rv = acpidev_dr_device_getprop(hdl, name, buf, len);
194 if (rv >= 0) {
195 ASSERT(rv <= len);
196 rv = 0;
197 }
198 }
199
200 return (rv);
201 }
202
203 static int
drmach_node_acpi_get_proplen(drmach_node_t * np,char * name,int * len)204 drmach_node_acpi_get_proplen(drmach_node_t *np, char *name, int *len)
205 {
206 int rv = 0;
207 DRMACH_HANDLE hdl;
208
209 hdl = np->get_dnode(np);
210 if (hdl == NULL) {
211 DRMACH_PR("!drmach_node_acpi_get_proplen: NULL handle");
212 rv = -1;
213 } else {
214 rv = acpidev_dr_device_getprop(hdl, name, NULL, 0);
215 if (rv >= 0) {
216 *len = rv;
217 return (0);
218 }
219 }
220
221 return (-1);
222 }
223
224 static ACPI_STATUS
drmach_node_acpi_callback(ACPI_HANDLE hdl,uint_t lvl,void * ctx,void ** retval)225 drmach_node_acpi_callback(ACPI_HANDLE hdl, uint_t lvl, void *ctx, void **retval)
226 {
227 _NOTE(ARGUNUSED(lvl));
228
229 int rv;
230 dev_info_t *dip;
231 drmach_node_walk_args_t *argp = ctx;
232 int (*cb)(drmach_node_walk_args_t *args);
233 acpidev_class_id_t clsid;
234
235 ASSERT(hdl != NULL);
236 ASSERT(ctx != NULL);
237 ASSERT(retval != NULL);
238
239 /* Skip subtree if the device is not powered. */
240 if (!acpidev_dr_device_is_powered(hdl)) {
241 return (AE_CTRL_DEPTH);
242 }
243
244 /*
245 * Keep scanning subtree if it fails to lookup device node.
246 * There may be some ACPI objects without device nodes created.
247 */
248 if (ACPI_FAILURE(acpica_get_devinfo(hdl, &dip))) {
249 return (AE_OK);
250 }
251
252 argp->node->here = hdl;
253 cb = (int (*)(drmach_node_walk_args_t *args))argp->func;
254 rv = (*cb)(argp);
255 argp->node->here = NULL;
256 if (rv) {
257 *(int *)retval = rv;
258 return (AE_CTRL_TERMINATE);
259 }
260
261 /*
262 * Skip descendants of PCI/PCIex host bridges.
263 * PCI/PCIex devices will be handled by pcihp.
264 */
265 clsid = acpidev_dr_device_get_class(hdl);
266 if (clsid == ACPIDEV_CLASS_ID_PCI || clsid == ACPIDEV_CLASS_ID_PCIEX) {
267 return (AE_CTRL_DEPTH);
268 }
269
270 return (AE_OK);
271 }
272
273 static int
drmach_node_acpi_walk(drmach_node_t * np,void * data,int (* cb)(drmach_node_walk_args_t * args))274 drmach_node_acpi_walk(drmach_node_t *np, void *data,
275 int (*cb)(drmach_node_walk_args_t *args))
276 {
277 DRMACH_HANDLE hdl;
278 int rv = 0;
279 drmach_node_walk_args_t args;
280
281 /* initialize the args structure for callback */
282 args.node = np;
283 args.data = data;
284 args.func = (void *)cb;
285
286 /* save the handle, it will be modified when walking the tree. */
287 hdl = np->get_dnode(np);
288 if (hdl == NULL) {
289 DRMACH_PR("!drmach_node_acpi_walk: failed to get device node.");
290 return (EX86_INAPPROP);
291 }
292
293 if (ACPI_FAILURE(acpidev_dr_device_walk_device(hdl,
294 ACPIDEV_MAX_ENUM_LEVELS, drmach_node_acpi_callback,
295 &args, (void *)&rv))) {
296 /*
297 * If acpidev_dr_device_walk_device() itself fails, rv won't
298 * be set to suitable error code. Set it here.
299 */
300 if (rv == 0) {
301 cmn_err(CE_WARN, "!drmach_node_acpi_walk: failed to "
302 "walk ACPI namespace.");
303 rv = EX86_ACPIWALK;
304 }
305 }
306
307 /* restore the handle to original value after walking the tree. */
308 np->here = (void *)hdl;
309
310 return ((int)rv);
311 }
312
313 static drmach_node_t *
drmach_node_new(void)314 drmach_node_new(void)
315 {
316 drmach_node_t *np;
317
318 np = kmem_zalloc(sizeof (drmach_node_t), KM_SLEEP);
319
320 np->get_dnode = drmach_node_acpi_get_dnode;
321 np->getdip = drmach_node_acpi_get_dip;
322 np->getproplen = drmach_node_acpi_get_proplen;
323 np->getprop = drmach_node_acpi_get_prop;
324 np->walk = drmach_node_acpi_walk;
325
326 return (np);
327 }
328
329 static drmachid_t
drmach_node_dup(drmach_node_t * np)330 drmach_node_dup(drmach_node_t *np)
331 {
332 drmach_node_t *dup;
333
334 dup = drmach_node_new();
335 dup->here = np->here;
336 dup->get_dnode = np->get_dnode;
337 dup->getdip = np->getdip;
338 dup->getproplen = np->getproplen;
339 dup->getprop = np->getprop;
340 dup->walk = np->walk;
341
342 return (dup);
343 }
344
345 static void
drmach_node_dispose(drmach_node_t * np)346 drmach_node_dispose(drmach_node_t *np)
347 {
348 kmem_free(np, sizeof (*np));
349 }
350
351 static int
drmach_node_walk(drmach_node_t * np,void * param,int (* cb)(drmach_node_walk_args_t * args))352 drmach_node_walk(drmach_node_t *np, void *param,
353 int (*cb)(drmach_node_walk_args_t *args))
354 {
355 return (np->walk(np, param, cb));
356 }
357
358 static DRMACH_HANDLE
drmach_node_get_dnode(drmach_node_t * np)359 drmach_node_get_dnode(drmach_node_t *np)
360 {
361 return (np->get_dnode(np));
362 }
363
364 /*
365 * drmach_array provides convenient array construction, access,
366 * bounds checking and array destruction logic.
367 */
368 static drmach_array_t *
drmach_array_new(uint_t min_index,uint_t max_index)369 drmach_array_new(uint_t min_index, uint_t max_index)
370 {
371 drmach_array_t *arr;
372
373 arr = kmem_zalloc(sizeof (drmach_array_t), KM_SLEEP);
374
375 arr->arr_sz = (max_index - min_index + 1) * sizeof (void *);
376 if (arr->arr_sz > 0) {
377 arr->min_index = min_index;
378 arr->max_index = max_index;
379
380 arr->arr = kmem_zalloc(arr->arr_sz, KM_SLEEP);
381 return (arr);
382 } else {
383 kmem_free(arr, sizeof (*arr));
384 return (0);
385 }
386 }
387
388 static int
drmach_array_set(drmach_array_t * arr,uint_t idx,drmachid_t val)389 drmach_array_set(drmach_array_t *arr, uint_t idx, drmachid_t val)
390 {
391 if (idx < arr->min_index || idx > arr->max_index)
392 return (-1);
393 arr->arr[idx - arr->min_index] = val;
394 return (0);
395 }
396
397 /*
398 * Get the item with index idx.
399 * Return 0 with the value stored in val if succeeds, otherwise return -1.
400 */
401 static int
drmach_array_get(drmach_array_t * arr,uint_t idx,drmachid_t * val)402 drmach_array_get(drmach_array_t *arr, uint_t idx, drmachid_t *val)
403 {
404 if (idx < arr->min_index || idx > arr->max_index)
405 return (-1);
406 *val = arr->arr[idx - arr->min_index];
407 return (0);
408 }
409
410 static int
drmach_array_first(drmach_array_t * arr,uint_t * idx,drmachid_t * val)411 drmach_array_first(drmach_array_t *arr, uint_t *idx, drmachid_t *val)
412 {
413 int rv;
414
415 *idx = arr->min_index;
416 while ((rv = drmach_array_get(arr, *idx, val)) == 0 && *val == NULL)
417 *idx += 1;
418
419 return (rv);
420 }
421
422 static int
drmach_array_next(drmach_array_t * arr,uint_t * idx,drmachid_t * val)423 drmach_array_next(drmach_array_t *arr, uint_t *idx, drmachid_t *val)
424 {
425 int rv;
426
427 *idx += 1;
428 while ((rv = drmach_array_get(arr, *idx, val)) == 0 && *val == NULL)
429 *idx += 1;
430
431 return (rv);
432 }
433
434 static void
drmach_array_dispose(drmach_array_t * arr,void (* disposer)(drmachid_t))435 drmach_array_dispose(drmach_array_t *arr, void (*disposer)(drmachid_t))
436 {
437 drmachid_t val;
438 uint_t idx;
439 int rv;
440
441 rv = drmach_array_first(arr, &idx, &val);
442 while (rv == 0) {
443 (*disposer)(val);
444 rv = drmach_array_next(arr, &idx, &val);
445 }
446
447 kmem_free(arr->arr, arr->arr_sz);
448 kmem_free(arr, sizeof (*arr));
449 }
450
451 static drmach_board_t *
drmach_get_board_by_bnum(uint_t bnum)452 drmach_get_board_by_bnum(uint_t bnum)
453 {
454 drmachid_t id;
455
456 if (drmach_array_get(drmach_boards, bnum, &id) == 0)
457 return ((drmach_board_t *)id);
458 else
459 return (NULL);
460 }
461
462 sbd_error_t *
drmach_device_new(drmach_node_t * node,drmach_board_t * bp,int portid,drmachid_t * idp)463 drmach_device_new(drmach_node_t *node,
464 drmach_board_t *bp, int portid, drmachid_t *idp)
465 {
466 int i;
467 int rv;
468 drmach_device_t proto;
469 sbd_error_t *err;
470 char name[OBP_MAXDRVNAME];
471
472 rv = node->getprop(node, ACPIDEV_DR_PROP_DEVNAME, name, OBP_MAXDRVNAME);
473 if (rv) {
474 /* every node is expected to have a name */
475 err = drerr_new(1, EX86_GETPROP, "device node %s: property %s",
476 ddi_node_name(node->getdip(node)),
477 ACPIDEV_DR_PROP_DEVNAME);
478 return (err);
479 }
480
481 /*
482 * The node currently being examined is not listed in the name2type[]
483 * array. In this case, the node is no interest to drmach. Both
484 * dp and err are initialized here to yield nothing (no device or
485 * error structure) for this case.
486 */
487 i = drmach_name2type_idx(name);
488 if (i < 0) {
489 *idp = (drmachid_t)0;
490 return (NULL);
491 }
492
493 /* device specific new function will set unum */
494 bzero(&proto, sizeof (proto));
495 proto.type = drmach_name2type[i].type;
496 proto.bp = bp;
497 proto.node = node;
498 proto.portid = portid;
499
500 return (drmach_name2type[i].new(&proto, idp));
501 }
502
503 static void
drmach_device_dispose(drmachid_t id)504 drmach_device_dispose(drmachid_t id)
505 {
506 drmach_device_t *self = id;
507
508 self->cm.dispose(id);
509 }
510
511 static sbd_error_t *
drmach_device_status(drmachid_t id,drmach_status_t * stat)512 drmach_device_status(drmachid_t id, drmach_status_t *stat)
513 {
514 drmach_common_t *cp;
515
516 if (!DRMACH_IS_ID(id))
517 return (drerr_new(0, EX86_NOTID, NULL));
518 cp = id;
519
520 return (cp->status(id, stat));
521 }
522
523 drmach_board_t *
drmach_board_new(uint_t bnum,int boot_board)524 drmach_board_new(uint_t bnum, int boot_board)
525 {
526 static void drmach_board_dispose(drmachid_t id);
527 static sbd_error_t *drmach_board_release(drmachid_t);
528 static sbd_error_t *drmach_board_status(drmachid_t, drmach_status_t *);
529
530 sbd_error_t *err;
531 drmach_board_t *bp;
532 dev_info_t *dip = NULL;
533
534 bp = kmem_zalloc(sizeof (drmach_board_t), KM_SLEEP);
535 bp->cm.isa = (void *)drmach_board_new;
536 bp->cm.release = drmach_board_release;
537 bp->cm.status = drmach_board_status;
538
539 bp->bnum = bnum;
540 bp->devices = NULL;
541 bp->tree = drmach_node_new();
542
543 acpidev_dr_lock_all();
544 if (ACPI_FAILURE(acpidev_dr_get_board_handle(bnum, &bp->tree->here))) {
545 acpidev_dr_unlock_all();
546 drmach_board_dispose(bp);
547 return (NULL);
548 }
549 acpidev_dr_unlock_all();
550 ASSERT(bp->tree->here != NULL);
551
552 err = drmach_board_name(bnum, bp->cm.name, sizeof (bp->cm.name));
553 if (err != NULL) {
554 sbd_err_clear(&err);
555 drmach_board_dispose(bp);
556 return (NULL);
557 }
558
559 if (acpidev_dr_device_is_powered(bp->tree->here)) {
560 bp->boot_board = boot_board;
561 bp->powered = 1;
562 } else {
563 bp->boot_board = 0;
564 bp->powered = 0;
565 }
566 bp->assigned = boot_board;
567 if (ACPI_SUCCESS(acpica_get_devinfo(bp->tree->here, &dip))) {
568 bp->connected = 1;
569 } else {
570 bp->connected = 0;
571 }
572
573 (void) drmach_array_set(drmach_boards, bnum, bp);
574
575 return (bp);
576 }
577
578 static void
drmach_board_dispose(drmachid_t id)579 drmach_board_dispose(drmachid_t id)
580 {
581 drmach_board_t *bp;
582
583 ASSERT(DRMACH_IS_BOARD_ID(id));
584 bp = id;
585
586 if (bp->tree)
587 drmach_node_dispose(bp->tree);
588
589 if (bp->devices)
590 drmach_array_dispose(bp->devices, drmach_device_dispose);
591
592 kmem_free(bp, sizeof (drmach_board_t));
593 }
594
595 static sbd_error_t *
drmach_board_release(drmachid_t id)596 drmach_board_release(drmachid_t id)
597 {
598 if (!DRMACH_IS_BOARD_ID(id))
599 return (drerr_new(0, EX86_INAPPROP, NULL));
600
601 return (NULL);
602 }
603
604 static int
drmach_board_check_power(drmach_board_t * bp)605 drmach_board_check_power(drmach_board_t *bp)
606 {
607 DRMACH_HANDLE hdl;
608
609 hdl = drmach_node_get_dnode(bp->tree);
610
611 return (acpidev_dr_device_is_powered(hdl));
612 }
613
614 struct drmach_board_list_dep_arg {
615 int count;
616 size_t len;
617 ssize_t off;
618 char *buf;
619 char temp[MAXPATHLEN];
620 };
621
622 static ACPI_STATUS
drmach_board_generate_name(ACPI_HANDLE hdl,UINT32 lvl,void * ctx,void ** retval)623 drmach_board_generate_name(ACPI_HANDLE hdl, UINT32 lvl, void *ctx,
624 void **retval)
625 {
626 _NOTE(ARGUNUSED(retval));
627
628 struct drmach_board_list_dep_arg *argp = ctx;
629
630 ASSERT(hdl != NULL);
631 ASSERT(lvl == UINT32_MAX);
632 ASSERT(ctx != NULL);
633
634 /* Skip non-board devices. */
635 if (!acpidev_dr_device_is_board(hdl)) {
636 return (AE_OK);
637 }
638
639 if (ACPI_FAILURE(acpidev_dr_get_board_name(hdl, argp->temp,
640 sizeof (argp->temp)))) {
641 DRMACH_PR("!drmach_board_generate_name: failed to "
642 "generate board name for handle %p.", hdl);
643 /* Keep on walking. */
644 return (AE_OK);
645 }
646 argp->count++;
647 argp->off += snprintf(argp->buf + argp->off, argp->len - argp->off,
648 " %s", argp->temp);
649 if (argp->off >= argp->len) {
650 return (AE_CTRL_TERMINATE);
651 }
652
653 return (AE_OK);
654 }
655
656 static ssize_t
drmach_board_list_dependency(ACPI_HANDLE hdl,boolean_t edl,char * prefix,char * buf,size_t len)657 drmach_board_list_dependency(ACPI_HANDLE hdl, boolean_t edl, char *prefix,
658 char *buf, size_t len)
659 {
660 ACPI_STATUS rc;
661 ssize_t off;
662 struct drmach_board_list_dep_arg *ap;
663
664 ASSERT(buf != NULL && len != 0);
665 if (buf == NULL || len == 0) {
666 return (-1);
667 }
668
669 ap = kmem_zalloc(sizeof (*ap), KM_SLEEP);
670 ap->buf = buf;
671 ap->len = len;
672 ap->off = snprintf(buf, len, "%s", prefix);
673 if (ap->off >= len) {
674 *buf = '\0';
675 kmem_free(ap, sizeof (*ap));
676 return (-1);
677 }
678
679 /* Generate the device dependency list. */
680 if (edl) {
681 rc = acpidev_dr_device_walk_edl(hdl,
682 drmach_board_generate_name, ap, NULL);
683 } else {
684 rc = acpidev_dr_device_walk_ejd(hdl,
685 drmach_board_generate_name, ap, NULL);
686 }
687 if (ACPI_FAILURE(rc)) {
688 *buf = '\0';
689 ap->off = -1;
690 /* No device has dependency on this board. */
691 } else if (ap->count == 0) {
692 *buf = '\0';
693 ap->off = 0;
694 }
695
696 off = ap->off;
697 kmem_free(ap, sizeof (*ap));
698
699 return (off);
700 }
701
702 static sbd_error_t *
drmach_board_status(drmachid_t id,drmach_status_t * stat)703 drmach_board_status(drmachid_t id, drmach_status_t *stat)
704 {
705 sbd_error_t *err = NULL;
706 drmach_board_t *bp;
707 DRMACH_HANDLE hdl;
708 size_t off;
709
710 if (!DRMACH_IS_BOARD_ID(id))
711 return (drerr_new(0, EX86_INAPPROP, NULL));
712 bp = id;
713
714 if (bp->tree == NULL)
715 return (drerr_new(0, EX86_INAPPROP, NULL));
716 hdl = drmach_node_get_dnode(bp->tree);
717 if (hdl == NULL)
718 return (drerr_new(0, EX86_INAPPROP, NULL));
719
720 stat->busy = 0; /* assume not busy */
721 stat->configured = 0; /* assume not configured */
722 stat->assigned = bp->assigned;
723 stat->powered = bp->powered = acpidev_dr_device_is_powered(hdl);
724 stat->empty = !acpidev_dr_device_is_present(hdl);
725 if (ACPI_SUCCESS(acpidev_dr_device_check_status(hdl))) {
726 stat->cond = bp->cond = SBD_COND_OK;
727 } else {
728 stat->cond = bp->cond = SBD_COND_FAILED;
729 }
730 stat->info[0] = '\0';
731
732 /* Generate the eject device list. */
733 if (drmach_board_list_dependency(hdl, B_TRUE, "EDL:",
734 stat->info, sizeof (stat->info)) < 0) {
735 DRMACH_PR("!drmach_board_status: failed to generate "
736 "eject device list for board %d.", bp->bnum);
737 stat->info[0] = '\0';
738 }
739 off = strlen(stat->info);
740 if (off < sizeof (stat->info)) {
741 if (drmach_board_list_dependency(hdl, B_FALSE,
742 off ? ", EJD:" : "EJD:",
743 stat->info + off, sizeof (stat->info) - off) < 0) {
744 DRMACH_PR("!drmach_board_status: failed to generate "
745 "eject dependent device for board %d.", bp->bnum);
746 stat->info[off] = '\0';
747 }
748 }
749
750 switch (acpidev_dr_get_board_type(bp->tree->get_dnode(bp->tree))) {
751 case ACPIDEV_CPU_BOARD:
752 (void) strlcpy(stat->type, "CPU Board", sizeof (stat->type));
753 break;
754 case ACPIDEV_MEMORY_BOARD:
755 (void) strlcpy(stat->type, "MemoryBoard", sizeof (stat->type));
756 break;
757 case ACPIDEV_IO_BOARD:
758 (void) strlcpy(stat->type, "IO Board", sizeof (stat->type));
759 break;
760 case ACPIDEV_SYSTEM_BOARD:
761 /*FALLTHROUGH*/
762 default:
763 (void) strlcpy(stat->type, "SystemBoard", sizeof (stat->type));
764 break;
765 }
766
767 if (bp->devices) {
768 int rv;
769 uint_t d_idx;
770 drmachid_t d_id;
771
772 rv = drmach_array_first(bp->devices, &d_idx, &d_id);
773 while (rv == 0) {
774 drmach_status_t d_stat;
775
776 err = drmach_device_status(d_id, &d_stat);
777 if (err)
778 break;
779
780 stat->busy |= d_stat.busy;
781 stat->configured |= d_stat.configured;
782
783 rv = drmach_array_next(bp->devices, &d_idx, &d_id);
784 }
785 }
786
787 return (err);
788 }
789
790 /*
791 * When DR is initialized, we walk the device tree and acquire a hold on
792 * all the nodes that are interesting to DR. This is so that the corresponding
793 * branches cannot be deleted.
794 */
795 static int
drmach_hold_rele_devtree(dev_info_t * rdip,void * arg)796 drmach_hold_rele_devtree(dev_info_t *rdip, void *arg)
797 {
798 int *holdp = (int *)arg;
799 ACPI_HANDLE hdl = NULL;
800 acpidev_data_handle_t dhdl;
801
802 /* Skip nodes and subtrees which are not created by acpidev. */
803 if (ACPI_FAILURE(acpica_get_handle(rdip, &hdl))) {
804 return (DDI_WALK_PRUNECHILD);
805 }
806 ASSERT(hdl != NULL);
807 dhdl = acpidev_data_get_handle(hdl);
808 if (dhdl == NULL) {
809 return (DDI_WALK_PRUNECHILD);
810 }
811
812 /* Hold/release devices which are interesting to DR operations. */
813 if (acpidev_data_dr_ready(dhdl)) {
814 if (*holdp) {
815 ASSERT(!e_ddi_branch_held(rdip));
816 e_ddi_branch_hold(rdip);
817 } else {
818 ASSERT(e_ddi_branch_held(rdip));
819 e_ddi_branch_rele(rdip);
820 }
821 }
822
823 return (DDI_WALK_CONTINUE);
824 }
825
826 static void
drmach_hold_devtree(void)827 drmach_hold_devtree(void)
828 {
829 dev_info_t *dip;
830 int circ;
831 int hold = 1;
832
833 dip = ddi_root_node();
834 ndi_devi_enter(dip, &circ);
835 ddi_walk_devs(ddi_get_child(dip), drmach_hold_rele_devtree, &hold);
836 ndi_devi_exit(dip, circ);
837 }
838
839 static void
drmach_release_devtree(void)840 drmach_release_devtree(void)
841 {
842 dev_info_t *dip;
843 int circ;
844 int hold = 0;
845
846 dip = ddi_root_node();
847 ndi_devi_enter(dip, &circ);
848 ddi_walk_devs(ddi_get_child(dip), drmach_hold_rele_devtree, &hold);
849 ndi_devi_exit(dip, circ);
850 }
851
852 static boolean_t
drmach_cpr_callb(void * arg,int code)853 drmach_cpr_callb(void *arg, int code)
854 {
855 _NOTE(ARGUNUSED(arg));
856
857 if (code == CB_CODE_CPR_CHKPT) {
858 /*
859 * Temporarily block CPR operations if there are DR operations
860 * ongoing.
861 */
862 rw_enter(&drmach_cpr_rwlock, RW_WRITER);
863 } else {
864 rw_exit(&drmach_cpr_rwlock);
865 }
866
867 return (B_TRUE);
868 }
869
870 static int
drmach_init(void)871 drmach_init(void)
872 {
873 DRMACH_HANDLE hdl;
874 drmachid_t id;
875 uint_t bnum;
876
877 if (MAX_BOARDS > SHRT_MAX) {
878 cmn_err(CE_WARN, "!drmach_init: system has too many (%d) "
879 "hotplug capable boards.", MAX_BOARDS);
880 return (ENXIO);
881 } else if (MAX_CMP_UNITS_PER_BOARD > 1) {
882 cmn_err(CE_WARN, "!drmach_init: DR doesn't support multiple "
883 "(%d) physical processors on one board.",
884 MAX_CMP_UNITS_PER_BOARD);
885 return (ENXIO);
886 } else if (MAX_CORES_PER_CMP & (MAX_CORES_PER_CMP - 1)) {
887 cmn_err(CE_WARN, "!drmach_init: number of logical CPUs (%d) in "
888 "physical processor is not power of 2.",
889 MAX_CORES_PER_CMP);
890 return (ENXIO);
891 } else if (MAX_CPU_UNITS_PER_BOARD > DEVSET_CPU_NUMBER ||
892 MAX_MEM_UNITS_PER_BOARD > DEVSET_MEM_NUMBER ||
893 MAX_IO_UNITS_PER_BOARD > DEVSET_IO_NUMBER) {
894 cmn_err(CE_WARN, "!drmach_init: system has more CPU/memory/IO "
895 "units than the DR driver can handle.");
896 return (ENXIO);
897 }
898
899 rw_init(&drmach_cpr_rwlock, NULL, RW_DEFAULT, NULL);
900 drmach_cpr_cid = callb_add(drmach_cpr_callb, NULL,
901 CB_CL_CPR_PM, "drmach");
902
903 rw_init(&drmach_boards_rwlock, NULL, RW_DEFAULT, NULL);
904 drmach_boards = drmach_array_new(0, MAX_BOARDS - 1);
905 drmach_domain.allow_dr = acpidev_dr_capable();
906
907 for (bnum = 0; bnum < MAX_BOARDS; bnum++) {
908 hdl = NULL;
909 if (ACPI_FAILURE(acpidev_dr_get_board_handle(bnum, &hdl)) ||
910 hdl == NULL) {
911 cmn_err(CE_WARN, "!drmach_init: failed to lookup ACPI "
912 "handle for board %d.", bnum);
913 continue;
914 }
915 if (drmach_array_get(drmach_boards, bnum, &id) == -1) {
916 DRMACH_PR("!drmach_init: failed to get handle "
917 "for board %d.", bnum);
918 ASSERT(0);
919 goto error;
920 } else if (id == NULL) {
921 (void) drmach_board_new(bnum, 1);
922 }
923 }
924
925 /*
926 * Walk descendants of the devinfo root node and hold
927 * all devinfo branches of interest.
928 */
929 drmach_hold_devtree();
930
931 return (0);
932
933 error:
934 drmach_array_dispose(drmach_boards, drmach_board_dispose);
935 rw_destroy(&drmach_boards_rwlock);
936 rw_destroy(&drmach_cpr_rwlock);
937 return (ENXIO);
938 }
939
940 static void
drmach_fini(void)941 drmach_fini(void)
942 {
943 rw_enter(&drmach_boards_rwlock, RW_WRITER);
944 if (drmach_boards != NULL) {
945 drmach_array_dispose(drmach_boards, drmach_board_dispose);
946 drmach_boards = NULL;
947 }
948 rw_exit(&drmach_boards_rwlock);
949
950 /*
951 * Walk descendants of the root devinfo node
952 * release holds acquired on branches in drmach_init()
953 */
954 drmach_release_devtree();
955
956 (void) callb_delete(drmach_cpr_cid);
957 rw_destroy(&drmach_cpr_rwlock);
958 rw_destroy(&drmach_boards_rwlock);
959 }
960
961 sbd_error_t *
drmach_io_new(drmach_device_t * proto,drmachid_t * idp)962 drmach_io_new(drmach_device_t *proto, drmachid_t *idp)
963 {
964 static void drmach_io_dispose(drmachid_t);
965 static sbd_error_t *drmach_io_release(drmachid_t);
966 static sbd_error_t *drmach_io_status(drmachid_t, drmach_status_t *);
967
968 drmach_io_t *ip;
969 int portid;
970
971 portid = proto->portid;
972 ASSERT(portid != -1);
973 proto->unum = portid;
974
975 ip = kmem_zalloc(sizeof (drmach_io_t), KM_SLEEP);
976 bcopy(proto, &ip->dev, sizeof (ip->dev));
977 ip->dev.node = drmach_node_dup(proto->node);
978 ip->dev.cm.isa = (void *)drmach_io_new;
979 ip->dev.cm.dispose = drmach_io_dispose;
980 ip->dev.cm.release = drmach_io_release;
981 ip->dev.cm.status = drmach_io_status;
982 (void) snprintf(ip->dev.cm.name, sizeof (ip->dev.cm.name), "%s%d",
983 ip->dev.type, ip->dev.unum);
984
985 *idp = (drmachid_t)ip;
986
987 return (NULL);
988 }
989
990 static void
drmach_io_dispose(drmachid_t id)991 drmach_io_dispose(drmachid_t id)
992 {
993 drmach_io_t *self;
994
995 ASSERT(DRMACH_IS_IO_ID(id));
996
997 self = id;
998 if (self->dev.node)
999 drmach_node_dispose(self->dev.node);
1000
1001 kmem_free(self, sizeof (*self));
1002 }
1003
1004 static sbd_error_t *
drmach_io_release(drmachid_t id)1005 drmach_io_release(drmachid_t id)
1006 {
1007 if (!DRMACH_IS_IO_ID(id))
1008 return (drerr_new(0, EX86_INAPPROP, NULL));
1009
1010 return (NULL);
1011 }
1012
1013 static sbd_error_t *
drmach_io_status(drmachid_t id,drmach_status_t * stat)1014 drmach_io_status(drmachid_t id, drmach_status_t *stat)
1015 {
1016 drmach_device_t *dp;
1017 sbd_error_t *err;
1018 int configured;
1019
1020 ASSERT(DRMACH_IS_IO_ID(id));
1021 dp = id;
1022
1023 err = drmach_io_is_attached(id, &configured);
1024 if (err)
1025 return (err);
1026
1027 stat->assigned = dp->bp->assigned;
1028 stat->powered = dp->bp->powered;
1029 stat->configured = (configured != 0);
1030 stat->busy = dp->busy;
1031 (void) strlcpy(stat->type, dp->type, sizeof (stat->type));
1032 stat->info[0] = '\0';
1033
1034 return (NULL);
1035 }
1036
1037 sbd_error_t *
drmach_cpu_new(drmach_device_t * proto,drmachid_t * idp)1038 drmach_cpu_new(drmach_device_t *proto, drmachid_t *idp)
1039 {
1040 static void drmach_cpu_dispose(drmachid_t);
1041 static sbd_error_t *drmach_cpu_release(drmachid_t);
1042 static sbd_error_t *drmach_cpu_status(drmachid_t, drmach_status_t *);
1043
1044 int portid;
1045 processorid_t cpuid;
1046 drmach_cpu_t *cp = NULL;
1047
1048 /* the portid is APIC ID of the node */
1049 portid = proto->portid;
1050 ASSERT(portid != -1);
1051
1052 /*
1053 * Assume all CPUs are homogeneous and have the same number of
1054 * cores/threads.
1055 */
1056 proto->unum = portid % MAX_CPU_UNITS_PER_BOARD;
1057
1058 cp = kmem_zalloc(sizeof (drmach_cpu_t), KM_SLEEP);
1059 bcopy(proto, &cp->dev, sizeof (cp->dev));
1060 cp->dev.node = drmach_node_dup(proto->node);
1061 cp->dev.cm.isa = (void *)drmach_cpu_new;
1062 cp->dev.cm.dispose = drmach_cpu_dispose;
1063 cp->dev.cm.release = drmach_cpu_release;
1064 cp->dev.cm.status = drmach_cpu_status;
1065 (void) snprintf(cp->dev.cm.name, sizeof (cp->dev.cm.name), "%s%d",
1066 cp->dev.type, cp->dev.unum);
1067
1068 cp->apicid = portid;
1069 if (ACPI_SUCCESS(acpica_get_cpu_id_by_object(
1070 drmach_node_get_dnode(proto->node), &cpuid))) {
1071 cp->cpuid = cpuid;
1072 } else {
1073 cp->cpuid = -1;
1074 }
1075
1076 /* Mark CPU0 as busy, many other components have dependency on it. */
1077 if (cp->cpuid == 0) {
1078 cp->dev.busy = 1;
1079 }
1080
1081 *idp = (drmachid_t)cp;
1082
1083 return (NULL);
1084 }
1085
1086 static void
drmach_cpu_dispose(drmachid_t id)1087 drmach_cpu_dispose(drmachid_t id)
1088 {
1089 drmach_cpu_t *self;
1090
1091 ASSERT(DRMACH_IS_CPU_ID(id));
1092
1093 self = id;
1094 if (self->dev.node)
1095 drmach_node_dispose(self->dev.node);
1096
1097 kmem_free(self, sizeof (*self));
1098 }
1099
1100 static sbd_error_t *
drmach_cpu_release(drmachid_t id)1101 drmach_cpu_release(drmachid_t id)
1102 {
1103 if (!DRMACH_IS_CPU_ID(id))
1104 return (drerr_new(0, EX86_INAPPROP, NULL));
1105
1106 return (NULL);
1107 }
1108
1109 static sbd_error_t *
drmach_cpu_status(drmachid_t id,drmach_status_t * stat)1110 drmach_cpu_status(drmachid_t id, drmach_status_t *stat)
1111 {
1112 drmach_cpu_t *cp;
1113 drmach_device_t *dp;
1114
1115 ASSERT(DRMACH_IS_CPU_ID(id));
1116 cp = (drmach_cpu_t *)id;
1117 dp = &cp->dev;
1118
1119 stat->assigned = dp->bp->assigned;
1120 stat->powered = dp->bp->powered;
1121 mutex_enter(&cpu_lock);
1122 stat->configured = (cpu_get(cp->cpuid) != NULL);
1123 mutex_exit(&cpu_lock);
1124 stat->busy = dp->busy;
1125 (void) strlcpy(stat->type, dp->type, sizeof (stat->type));
1126 stat->info[0] = '\0';
1127
1128 return (NULL);
1129 }
1130
1131 static int
drmach_setup_mc_info(DRMACH_HANDLE hdl,drmach_mem_t * mp)1132 drmach_setup_mc_info(DRMACH_HANDLE hdl, drmach_mem_t *mp)
1133 {
1134 uint_t i, j, count;
1135 struct memlist *ml = NULL, *ml2 = NULL;
1136 acpidev_regspec_t *regp;
1137 uint64_t align, addr_min, addr_max, total_size, skipped_size;
1138
1139 if (hdl == NULL) {
1140 return (-1);
1141 } else if (ACPI_FAILURE(acpidev_dr_get_mem_alignment(hdl, &align))) {
1142 return (-1);
1143 } else {
1144 ASSERT((align & (align - 1)) == 0);
1145 mp->mem_alignment = align;
1146 }
1147
1148 addr_min = UINT64_MAX;
1149 addr_max = 0;
1150 total_size = 0;
1151 skipped_size = 0;
1152 /*
1153 * There's a memory hole just below 4G on x86, which needs special
1154 * handling. All other addresses assigned to a specific memory device
1155 * should be contiguous.
1156 */
1157 if (ACPI_FAILURE(acpidev_dr_device_get_regspec(hdl, TRUE, ®p,
1158 &count))) {
1159 return (-1);
1160 }
1161 for (i = 0, j = 0; i < count; i++) {
1162 uint64_t addr, size;
1163
1164 addr = (uint64_t)regp[i].phys_mid << 32;
1165 addr |= (uint64_t)regp[i].phys_low;
1166 size = (uint64_t)regp[i].size_hi << 32;
1167 size |= (uint64_t)regp[i].size_low;
1168 if (size == 0)
1169 continue;
1170 else
1171 j++;
1172
1173 total_size += size;
1174 if (addr < addr_min)
1175 addr_min = addr;
1176 if (addr + size > addr_max)
1177 addr_max = addr + size;
1178 if (mp->dev.bp->boot_board ||
1179 j <= acpidev_dr_max_segments_per_mem_device()) {
1180 ml = memlist_add_span(ml, addr, size);
1181 } else {
1182 skipped_size += size;
1183 }
1184 }
1185 acpidev_dr_device_free_regspec(regp, count);
1186
1187 if (skipped_size != 0) {
1188 cmn_err(CE_WARN, "!drmach: too many (%d) segments on memory "
1189 "device, max (%d) segments supported, 0x%" PRIx64 " bytes "
1190 "of memory skipped.",
1191 j, acpidev_dr_max_segments_per_mem_device(), skipped_size);
1192 }
1193
1194 mp->slice_base = addr_min;
1195 mp->slice_top = addr_max;
1196 mp->slice_size = total_size;
1197
1198 if (mp->dev.bp->boot_board) {
1199 uint64_t endpa = _ptob64(physmax + 1);
1200
1201 /*
1202 * we intersect phys_install to get base_pa.
1203 * This only works at boot-up time.
1204 */
1205 memlist_read_lock();
1206 ml2 = memlist_dup(phys_install);
1207 memlist_read_unlock();
1208
1209 ml2 = memlist_del_span(ml2, 0ull, mp->slice_base);
1210 if (ml2 && endpa > addr_max) {
1211 ml2 = memlist_del_span(ml2, addr_max, endpa - addr_max);
1212 }
1213 }
1214
1215 /*
1216 * Create a memlist for the memory board.
1217 * The created memlist only contains configured memory if there's
1218 * configured memory on the board, otherwise it contains all memory
1219 * on the board.
1220 */
1221 if (ml2) {
1222 uint64_t nbytes = 0;
1223 struct memlist *p;
1224
1225 for (p = ml2; p; p = p->ml_next) {
1226 nbytes += p->ml_size;
1227 }
1228 if (nbytes == 0) {
1229 memlist_delete(ml2);
1230 ml2 = NULL;
1231 } else {
1232 /* Node has configured memory at boot time. */
1233 mp->base_pa = ml2->ml_address;
1234 mp->nbytes = nbytes;
1235 mp->memlist = ml2;
1236 if (ml)
1237 memlist_delete(ml);
1238 }
1239 }
1240 if (ml2 == NULL) {
1241 /* Not configured at boot time. */
1242 mp->base_pa = UINT64_MAX;
1243 mp->nbytes = 0;
1244 mp->memlist = ml;
1245 }
1246
1247 return (0);
1248 }
1249
1250 sbd_error_t *
drmach_mem_new(drmach_device_t * proto,drmachid_t * idp)1251 drmach_mem_new(drmach_device_t *proto, drmachid_t *idp)
1252 {
1253 static void drmach_mem_dispose(drmachid_t);
1254 static sbd_error_t *drmach_mem_release(drmachid_t);
1255 static sbd_error_t *drmach_mem_status(drmachid_t, drmach_status_t *);
1256
1257 DRMACH_HANDLE hdl;
1258 drmach_mem_t *mp;
1259 int portid;
1260
1261 mp = kmem_zalloc(sizeof (drmach_mem_t), KM_SLEEP);
1262 portid = proto->portid;
1263 ASSERT(portid != -1);
1264 proto->unum = portid;
1265
1266 bcopy(proto, &mp->dev, sizeof (mp->dev));
1267 mp->dev.node = drmach_node_dup(proto->node);
1268 mp->dev.cm.isa = (void *)drmach_mem_new;
1269 mp->dev.cm.dispose = drmach_mem_dispose;
1270 mp->dev.cm.release = drmach_mem_release;
1271 mp->dev.cm.status = drmach_mem_status;
1272
1273 (void) snprintf(mp->dev.cm.name, sizeof (mp->dev.cm.name), "%s%d",
1274 mp->dev.type, proto->unum);
1275 hdl = mp->dev.node->get_dnode(mp->dev.node);
1276 ASSERT(hdl != NULL);
1277 if (drmach_setup_mc_info(hdl, mp) != 0) {
1278 kmem_free(mp, sizeof (drmach_mem_t));
1279 *idp = (drmachid_t)NULL;
1280 return (drerr_new(1, EX86_MC_SETUP, NULL));
1281 }
1282
1283 /* make sure we do not create memoryless nodes */
1284 if (mp->nbytes == 0 && mp->slice_size == 0) {
1285 kmem_free(mp, sizeof (drmach_mem_t));
1286 *idp = (drmachid_t)NULL;
1287 } else
1288 *idp = (drmachid_t)mp;
1289
1290 return (NULL);
1291 }
1292
1293 static void
drmach_mem_dispose(drmachid_t id)1294 drmach_mem_dispose(drmachid_t id)
1295 {
1296 drmach_mem_t *mp;
1297
1298 ASSERT(DRMACH_IS_MEM_ID(id));
1299
1300 mp = id;
1301
1302 if (mp->dev.node)
1303 drmach_node_dispose(mp->dev.node);
1304
1305 if (mp->memlist) {
1306 memlist_delete(mp->memlist);
1307 mp->memlist = NULL;
1308 }
1309
1310 kmem_free(mp, sizeof (*mp));
1311 }
1312
1313 static sbd_error_t *
drmach_mem_release(drmachid_t id)1314 drmach_mem_release(drmachid_t id)
1315 {
1316 if (!DRMACH_IS_MEM_ID(id))
1317 return (drerr_new(0, EX86_INAPPROP, NULL));
1318
1319 return (NULL);
1320 }
1321
1322 static sbd_error_t *
drmach_mem_status(drmachid_t id,drmach_status_t * stat)1323 drmach_mem_status(drmachid_t id, drmach_status_t *stat)
1324 {
1325 uint64_t pa;
1326 drmach_mem_t *dp;
1327 struct memlist *ml = NULL;
1328
1329 ASSERT(DRMACH_IS_MEM_ID(id));
1330 dp = id;
1331
1332 /* get starting physical address of target memory */
1333 pa = dp->base_pa;
1334 /* round down to slice boundary */
1335 pa &= ~(dp->mem_alignment - 1);
1336
1337 /* stop at first span that is in slice */
1338 memlist_read_lock();
1339 for (ml = phys_install; ml; ml = ml->ml_next)
1340 if (ml->ml_address >= pa && ml->ml_address < dp->slice_top)
1341 break;
1342 memlist_read_unlock();
1343
1344 stat->assigned = dp->dev.bp->assigned;
1345 stat->powered = dp->dev.bp->powered;
1346 stat->configured = (ml != NULL);
1347 stat->busy = dp->dev.busy;
1348 (void) strlcpy(stat->type, dp->dev.type, sizeof (stat->type));
1349 stat->info[0] = '\0';
1350
1351 return (NULL);
1352 }
1353
1354 /*
1355 * Public interfaces exported to support platform independent dr driver.
1356 */
1357 uint_t
drmach_max_boards(void)1358 drmach_max_boards(void)
1359 {
1360 return (acpidev_dr_max_boards());
1361 }
1362
1363 uint_t
drmach_max_io_units_per_board(void)1364 drmach_max_io_units_per_board(void)
1365 {
1366 return (acpidev_dr_max_io_units_per_board());
1367 }
1368
1369 uint_t
drmach_max_cmp_units_per_board(void)1370 drmach_max_cmp_units_per_board(void)
1371 {
1372 return (acpidev_dr_max_cmp_units_per_board());
1373 }
1374
1375 uint_t
drmach_max_mem_units_per_board(void)1376 drmach_max_mem_units_per_board(void)
1377 {
1378 return (acpidev_dr_max_mem_units_per_board());
1379 }
1380
1381 uint_t
drmach_max_core_per_cmp(void)1382 drmach_max_core_per_cmp(void)
1383 {
1384 return (acpidev_dr_max_cpu_units_per_cmp());
1385 }
1386
1387 sbd_error_t *
drmach_pre_op(int cmd,drmachid_t id,drmach_opts_t * opts,void * argp)1388 drmach_pre_op(int cmd, drmachid_t id, drmach_opts_t *opts, void *argp)
1389 {
1390 drmach_board_t *bp = (drmach_board_t *)id;
1391 sbd_error_t *err = NULL;
1392
1393 /* allow status and ncm operations to always succeed */
1394 if ((cmd == SBD_CMD_STATUS) || (cmd == SBD_CMD_GETNCM)) {
1395 return (NULL);
1396 }
1397
1398 switch (cmd) {
1399 case SBD_CMD_POWERON:
1400 case SBD_CMD_POWEROFF:
1401 /*
1402 * Disable fast reboot if CPU/MEM/IOH hotplug event happens.
1403 * Note: this is a temporary solution and will be revised when
1404 * fast reboot can support CPU/MEM/IOH DR operations in future.
1405 *
1406 * ACPI BIOS generates some static ACPI tables, such as MADT,
1407 * SRAT and SLIT, to describe system hardware configuration on
1408 * power-on. When CPU/MEM/IOH hotplug event happens, those
1409 * static tables won't be updated and will become stale.
1410 *
1411 * If we reset system by fast reboot, BIOS will have no chance
1412 * to regenerate those staled static tables. Fast reboot can't
1413 * tolerate such inconsistency between staled ACPI tables and
1414 * real hardware configuration yet.
1415 *
1416 * A temporary solution is introduced to disable fast reboot if
1417 * CPU/MEM/IOH hotplug event happens. This solution should be
1418 * revised when fast reboot is enhanced to support CPU/MEM/IOH
1419 * DR operations.
1420 */
1421 fastreboot_disable(FBNS_HOTPLUG);
1422 /*FALLTHROUGH*/
1423
1424 default:
1425 /* Block out the CPR thread. */
1426 rw_enter(&drmach_cpr_rwlock, RW_READER);
1427 break;
1428 }
1429
1430 /* check all other commands for the required option string */
1431 if ((opts->size > 0) && (opts->copts != NULL)) {
1432 if (strstr(opts->copts, ACPIDEV_CMD_OST_PREFIX) == NULL) {
1433 err = drerr_new(1, EX86_SUPPORT, NULL);
1434 }
1435 } else {
1436 err = drerr_new(1, EX86_SUPPORT, NULL);
1437 }
1438
1439 if (!err && id && DRMACH_IS_BOARD_ID(id)) {
1440 switch (cmd) {
1441 case SBD_CMD_TEST:
1442 break;
1443 case SBD_CMD_CONNECT:
1444 if (bp->connected)
1445 err = drerr_new(0, ESBD_STATE, NULL);
1446 else if (!drmach_domain.allow_dr)
1447 err = drerr_new(1, EX86_SUPPORT, NULL);
1448 break;
1449 case SBD_CMD_DISCONNECT:
1450 if (!bp->connected)
1451 err = drerr_new(0, ESBD_STATE, NULL);
1452 else if (!drmach_domain.allow_dr)
1453 err = drerr_new(1, EX86_SUPPORT, NULL);
1454 break;
1455 default:
1456 if (!drmach_domain.allow_dr)
1457 err = drerr_new(1, EX86_SUPPORT, NULL);
1458 break;
1459
1460 }
1461 }
1462
1463 /*
1464 * CPU/memory/IO DR operations will be supported in stages on x86.
1465 * With early versions, some operations should be blocked here.
1466 * This temporary hook will be removed when all CPU/memory/IO DR
1467 * operations are supported on x86 systems.
1468 *
1469 * We only need to filter unsupported device types for
1470 * SBD_CMD_CONFIGURE/SBD_CMD_UNCONFIGURE commands, all other
1471 * commands are supported by all device types.
1472 */
1473 if (!err && (cmd == SBD_CMD_CONFIGURE || cmd == SBD_CMD_UNCONFIGURE)) {
1474 int i;
1475 dr_devset_t *devsetp = (dr_devset_t *)argp;
1476 dr_devset_t devset = *devsetp;
1477
1478 switch (cmd) {
1479 case SBD_CMD_CONFIGURE:
1480 if (!plat_dr_support_cpu()) {
1481 DEVSET_DEL(devset, SBD_COMP_CPU,
1482 DEVSET_ANYUNIT);
1483 } else {
1484 for (i = MAX_CPU_UNITS_PER_BOARD;
1485 i < DEVSET_CPU_NUMBER; i++) {
1486 DEVSET_DEL(devset, SBD_COMP_CPU, i);
1487 }
1488 }
1489
1490 if (!plat_dr_support_memory()) {
1491 DEVSET_DEL(devset, SBD_COMP_MEM,
1492 DEVSET_ANYUNIT);
1493 } else {
1494 for (i = MAX_MEM_UNITS_PER_BOARD;
1495 i < DEVSET_MEM_NUMBER; i++) {
1496 DEVSET_DEL(devset, SBD_COMP_MEM, i);
1497 }
1498 }
1499
1500 /* No support of configuring IOH devices yet. */
1501 DEVSET_DEL(devset, SBD_COMP_IO, DEVSET_ANYUNIT);
1502 break;
1503
1504 case SBD_CMD_UNCONFIGURE:
1505 if (!plat_dr_support_cpu()) {
1506 DEVSET_DEL(devset, SBD_COMP_CPU,
1507 DEVSET_ANYUNIT);
1508 } else {
1509 for (i = MAX_CPU_UNITS_PER_BOARD;
1510 i < DEVSET_CPU_NUMBER; i++) {
1511 DEVSET_DEL(devset, SBD_COMP_CPU, i);
1512 }
1513 }
1514
1515 /* No support of unconfiguring MEM/IOH devices yet. */
1516 DEVSET_DEL(devset, SBD_COMP_MEM, DEVSET_ANYUNIT);
1517 DEVSET_DEL(devset, SBD_COMP_IO, DEVSET_ANYUNIT);
1518 break;
1519 }
1520
1521 *devsetp = devset;
1522 if (DEVSET_IS_NULL(devset)) {
1523 err = drerr_new(1, EX86_SUPPORT, NULL);
1524 }
1525 }
1526
1527 return (err);
1528 }
1529
1530 sbd_error_t *
drmach_post_op(int cmd,drmachid_t id,drmach_opts_t * opts,int rv)1531 drmach_post_op(int cmd, drmachid_t id, drmach_opts_t *opts, int rv)
1532 {
1533 _NOTE(ARGUNUSED(id, opts, rv));
1534
1535 switch (cmd) {
1536 case SBD_CMD_STATUS:
1537 case SBD_CMD_GETNCM:
1538 break;
1539
1540 default:
1541 rw_exit(&drmach_cpr_rwlock);
1542 break;
1543 }
1544
1545 return (NULL);
1546 }
1547
1548 sbd_error_t *
drmach_configure(drmachid_t id,int flags)1549 drmach_configure(drmachid_t id, int flags)
1550 {
1551 _NOTE(ARGUNUSED(flags));
1552
1553 drmach_device_t *dp;
1554 sbd_error_t *err = NULL;
1555 dev_info_t *rdip;
1556 dev_info_t *fdip = NULL;
1557
1558 if (!DRMACH_IS_DEVICE_ID(id))
1559 return (drerr_new(0, EX86_INAPPROP, NULL));
1560 dp = id;
1561
1562 rdip = dp->node->getdip(dp->node);
1563 ASSERT(rdip);
1564 ASSERT(e_ddi_branch_held(rdip));
1565
1566 /* allocate cpu id for the CPU device. */
1567 if (DRMACH_IS_CPU_ID(id)) {
1568 DRMACH_HANDLE hdl = drmach_node_get_dnode(dp->node);
1569 ASSERT(hdl != NULL);
1570 if (ACPI_FAILURE(acpidev_dr_allocate_cpuid(hdl, NULL))) {
1571 err = drerr_new(1, EX86_ALLOC_CPUID, NULL);
1572 }
1573 return (err);
1574 }
1575
1576 if (DRMACH_IS_MEM_ID(id)) {
1577 err = drmach_mem_update_lgrp(id);
1578 if (err)
1579 return (err);
1580 }
1581
1582 if (e_ddi_branch_configure(rdip, &fdip, 0) != 0) {
1583 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1584 dev_info_t *dip = (fdip != NULL) ? fdip : rdip;
1585
1586 (void) ddi_pathname(dip, path);
1587 err = drerr_new(1, EX86_DRVFAIL, path);
1588 kmem_free(path, MAXPATHLEN);
1589
1590 /* If non-NULL, fdip is returned held and must be released */
1591 if (fdip != NULL)
1592 ddi_release_devi(fdip);
1593 }
1594
1595 return (err);
1596 }
1597
1598 sbd_error_t *
drmach_unconfigure(drmachid_t id,int flags)1599 drmach_unconfigure(drmachid_t id, int flags)
1600 {
1601 _NOTE(ARGUNUSED(flags));
1602
1603 drmach_device_t *dp;
1604 sbd_error_t *err = NULL;
1605 dev_info_t *rdip, *fdip = NULL;
1606
1607 if (!DRMACH_IS_DEVICE_ID(id))
1608 return (drerr_new(0, EX86_INAPPROP, NULL));
1609 dp = id;
1610
1611 rdip = dp->node->getdip(dp->node);
1612 ASSERT(rdip);
1613 ASSERT(e_ddi_branch_held(rdip));
1614
1615 if (DRMACH_IS_CPU_ID(id)) {
1616 DRMACH_HANDLE hdl = drmach_node_get_dnode(dp->node);
1617 ASSERT(hdl != NULL);
1618 if (ACPI_FAILURE(acpidev_dr_free_cpuid(hdl))) {
1619 err = drerr_new(1, EX86_FREE_CPUID, NULL);
1620 }
1621 return (err);
1622 }
1623
1624 /*
1625 * Note: FORCE flag is no longer necessary under devfs
1626 */
1627 if (e_ddi_branch_unconfigure(rdip, &fdip, 0)) {
1628 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1629
1630 /*
1631 * If non-NULL, fdip is returned held and must be released.
1632 */
1633 if (fdip != NULL) {
1634 (void) ddi_pathname(fdip, path);
1635 ndi_rele_devi(fdip);
1636 } else {
1637 (void) ddi_pathname(rdip, path);
1638 }
1639
1640 err = drerr_new(1, EX86_DRVFAIL, path);
1641
1642 kmem_free(path, MAXPATHLEN);
1643 }
1644
1645 return (err);
1646 }
1647
1648 sbd_error_t *
drmach_get_dip(drmachid_t id,dev_info_t ** dip)1649 drmach_get_dip(drmachid_t id, dev_info_t **dip)
1650 {
1651 drmach_device_t *dp;
1652
1653 if (!DRMACH_IS_DEVICE_ID(id))
1654 return (drerr_new(0, EX86_INAPPROP, NULL));
1655 dp = id;
1656
1657 *dip = dp->node->getdip(dp->node);
1658
1659 return (NULL);
1660 }
1661
1662 sbd_error_t *
drmach_release(drmachid_t id)1663 drmach_release(drmachid_t id)
1664 {
1665 drmach_common_t *cp;
1666
1667 if (!DRMACH_IS_DEVICE_ID(id))
1668 return (drerr_new(0, EX86_INAPPROP, NULL));
1669 cp = id;
1670
1671 return (cp->release(id));
1672 }
1673
1674 sbd_error_t *
drmach_status(drmachid_t id,drmach_status_t * stat)1675 drmach_status(drmachid_t id, drmach_status_t *stat)
1676 {
1677 drmach_common_t *cp;
1678 sbd_error_t *err;
1679
1680 rw_enter(&drmach_boards_rwlock, RW_READER);
1681 if (!DRMACH_IS_ID(id)) {
1682 rw_exit(&drmach_boards_rwlock);
1683 return (drerr_new(0, EX86_NOTID, NULL));
1684 }
1685 cp = (drmach_common_t *)id;
1686 err = cp->status(id, stat);
1687 rw_exit(&drmach_boards_rwlock);
1688
1689 return (err);
1690 }
1691
1692 static sbd_error_t *
drmach_update_acpi_status(drmachid_t id,drmach_opts_t * opts)1693 drmach_update_acpi_status(drmachid_t id, drmach_opts_t *opts)
1694 {
1695 char *copts;
1696 drmach_board_t *bp;
1697 DRMACH_HANDLE hdl;
1698 int event, code;
1699 boolean_t inprogress = B_FALSE;
1700
1701 if (DRMACH_NULL_ID(id) || !DRMACH_IS_BOARD_ID(id))
1702 return (drerr_new(0, EX86_INAPPROP, NULL));
1703 bp = (drmach_board_t *)id;
1704 hdl = drmach_node_get_dnode(bp->tree);
1705 ASSERT(hdl != NULL);
1706 if (hdl == NULL)
1707 return (drerr_new(0, EX86_INAPPROP, NULL));
1708
1709 /* Get the status code. */
1710 copts = opts->copts;
1711 if (strncmp(copts, ACPIDEV_CMD_OST_INPROGRESS,
1712 strlen(ACPIDEV_CMD_OST_INPROGRESS)) == 0) {
1713 inprogress = B_TRUE;
1714 code = ACPI_OST_STA_INSERT_IN_PROGRESS;
1715 copts += strlen(ACPIDEV_CMD_OST_INPROGRESS);
1716 } else if (strncmp(copts, ACPIDEV_CMD_OST_SUCCESS,
1717 strlen(ACPIDEV_CMD_OST_SUCCESS)) == 0) {
1718 code = ACPI_OST_STA_SUCCESS;
1719 copts += strlen(ACPIDEV_CMD_OST_SUCCESS);
1720 } else if (strncmp(copts, ACPIDEV_CMD_OST_FAILURE,
1721 strlen(ACPIDEV_CMD_OST_FAILURE)) == 0) {
1722 code = ACPI_OST_STA_FAILURE;
1723 copts += strlen(ACPIDEV_CMD_OST_FAILURE);
1724 } else if (strncmp(copts, ACPIDEV_CMD_OST_NOOP,
1725 strlen(ACPIDEV_CMD_OST_NOOP)) == 0) {
1726 return (NULL);
1727 } else {
1728 return (drerr_new(0, EX86_UNKPTCMD, opts->copts));
1729 }
1730
1731 /* Get the event type. */
1732 copts = strstr(copts, ACPIDEV_EVENT_TYPE_ATTR_NAME);
1733 if (copts == NULL) {
1734 return (drerr_new(0, EX86_UNKPTCMD, opts->copts));
1735 }
1736 copts += strlen(ACPIDEV_EVENT_TYPE_ATTR_NAME);
1737 if (copts[0] != '=') {
1738 return (drerr_new(0, EX86_UNKPTCMD, opts->copts));
1739 }
1740 copts += strlen("=");
1741 if (strncmp(copts, ACPIDEV_EVENT_TYPE_BUS_CHECK,
1742 strlen(ACPIDEV_EVENT_TYPE_BUS_CHECK)) == 0) {
1743 event = ACPI_NOTIFY_BUS_CHECK;
1744 } else if (strncmp(copts, ACPIDEV_EVENT_TYPE_DEVICE_CHECK,
1745 strlen(ACPIDEV_EVENT_TYPE_DEVICE_CHECK)) == 0) {
1746 event = ACPI_NOTIFY_DEVICE_CHECK;
1747 } else if (strncmp(copts, ACPIDEV_EVENT_TYPE_DEVICE_CHECK_LIGHT,
1748 strlen(ACPIDEV_EVENT_TYPE_DEVICE_CHECK_LIGHT)) == 0) {
1749 event = ACPI_NOTIFY_DEVICE_CHECK_LIGHT;
1750 } else if (strncmp(copts, ACPIDEV_EVENT_TYPE_EJECT_REQUEST,
1751 strlen(ACPIDEV_EVENT_TYPE_EJECT_REQUEST)) == 0) {
1752 event = ACPI_NOTIFY_EJECT_REQUEST;
1753 if (inprogress) {
1754 code = ACPI_OST_STA_EJECT_IN_PROGRESS;
1755 }
1756 } else {
1757 return (drerr_new(0, EX86_UNKPTCMD, opts->copts));
1758 }
1759
1760 (void) acpidev_eval_ost(hdl, event, code, NULL, 0);
1761
1762 return (NULL);
1763 }
1764
1765 static struct {
1766 const char *name;
1767 sbd_error_t *(*handler)(drmachid_t id, drmach_opts_t *opts);
1768 } drmach_pt_arr[] = {
1769 { ACPIDEV_CMD_OST_PREFIX, &drmach_update_acpi_status },
1770 /* the following line must always be last */
1771 { NULL, NULL }
1772 };
1773
1774 sbd_error_t *
drmach_passthru(drmachid_t id,drmach_opts_t * opts)1775 drmach_passthru(drmachid_t id, drmach_opts_t *opts)
1776 {
1777 int i;
1778 sbd_error_t *err;
1779
1780 i = 0;
1781 while (drmach_pt_arr[i].name != NULL) {
1782 int len = strlen(drmach_pt_arr[i].name);
1783
1784 if (strncmp(drmach_pt_arr[i].name, opts->copts, len) == 0)
1785 break;
1786
1787 i += 1;
1788 }
1789
1790 if (drmach_pt_arr[i].name == NULL)
1791 err = drerr_new(0, EX86_UNKPTCMD, opts->copts);
1792 else
1793 err = (*drmach_pt_arr[i].handler)(id, opts);
1794
1795 return (err);
1796 }
1797
1798 /*
1799 * Board specific interfaces to support dr driver
1800 */
1801 static int
drmach_get_portid(drmach_node_t * np)1802 drmach_get_portid(drmach_node_t *np)
1803 {
1804 uint32_t portid;
1805
1806 if (np->getprop(np, ACPIDEV_DR_PROP_PORTID,
1807 &portid, sizeof (portid)) == 0) {
1808 /*
1809 * acpidev returns portid as uint32_t, validates it.
1810 */
1811 if (portid > INT_MAX) {
1812 return (-1);
1813 } else {
1814 return (portid);
1815 }
1816 }
1817
1818 return (-1);
1819 }
1820
1821 /*
1822 * This is a helper function to determine if a given
1823 * node should be considered for a dr operation according
1824 * to predefined dr type nodes and the node's name.
1825 * Formal Parameter : The name of a device node.
1826 * Return Value: -1, name does not map to a valid dr type.
1827 * A value greater or equal to 0, name is a valid dr type.
1828 */
1829 static int
drmach_name2type_idx(char * name)1830 drmach_name2type_idx(char *name)
1831 {
1832 int index, ntypes;
1833
1834 if (name == NULL)
1835 return (-1);
1836
1837 /*
1838 * Determine how many possible types are currently supported
1839 * for dr.
1840 */
1841 ntypes = sizeof (drmach_name2type) / sizeof (drmach_name2type[0]);
1842
1843 /* Determine if the node's name correspond to a predefined type. */
1844 for (index = 0; index < ntypes; index++) {
1845 if (strcmp(drmach_name2type[index].name, name) == 0)
1846 /* The node is an allowed type for dr. */
1847 return (index);
1848 }
1849
1850 /*
1851 * If the name of the node does not map to any of the
1852 * types in the array drmach_name2type then the node is not of
1853 * interest to dr.
1854 */
1855 return (-1);
1856 }
1857
1858 static int
drmach_board_find_devices_cb(drmach_node_walk_args_t * args)1859 drmach_board_find_devices_cb(drmach_node_walk_args_t *args)
1860 {
1861 drmach_node_t *node = args->node;
1862 drmach_board_cb_data_t *data = args->data;
1863 drmach_board_t *obj = data->obj;
1864
1865 int rv, portid;
1866 uint32_t bnum;
1867 drmachid_t id;
1868 drmach_device_t *device;
1869 char name[OBP_MAXDRVNAME];
1870
1871 portid = drmach_get_portid(node);
1872 rv = node->getprop(node, ACPIDEV_DR_PROP_DEVNAME,
1873 name, OBP_MAXDRVNAME);
1874 if (rv)
1875 return (0);
1876
1877 rv = node->getprop(node, ACPIDEV_DR_PROP_BOARDNUM,
1878 &bnum, sizeof (bnum));
1879 if (rv) {
1880 return (0);
1881 }
1882 if (bnum > INT_MAX) {
1883 return (0);
1884 }
1885
1886 if (bnum != obj->bnum)
1887 return (0);
1888
1889 if (drmach_name2type_idx(name) < 0) {
1890 return (0);
1891 }
1892
1893 /*
1894 * Create a device data structure from this node data.
1895 * The call may yield nothing if the node is not of interest
1896 * to drmach.
1897 */
1898 data->err = drmach_device_new(node, obj, portid, &id);
1899 if (data->err)
1900 return (-1);
1901 else if (!id) {
1902 /*
1903 * drmach_device_new examined the node we passed in
1904 * and determined that it was one not of interest to
1905 * drmach. So, it is skipped.
1906 */
1907 return (0);
1908 }
1909
1910 rv = drmach_array_set(obj->devices, data->ndevs++, id);
1911 if (rv) {
1912 data->err = DRMACH_INTERNAL_ERROR();
1913 return (-1);
1914 }
1915 device = id;
1916
1917 data->err = (*data->found)(data->a, device->type, device->unum, id);
1918
1919 return (data->err == NULL ? 0 : -1);
1920 }
1921
1922 sbd_error_t *
drmach_board_find_devices(drmachid_t id,void * a,sbd_error_t * (* found)(void * a,const char *,int,drmachid_t))1923 drmach_board_find_devices(drmachid_t id, void *a,
1924 sbd_error_t *(*found)(void *a, const char *, int, drmachid_t))
1925 {
1926 drmach_board_t *bp = (drmach_board_t *)id;
1927 sbd_error_t *err;
1928 int max_devices;
1929 int rv;
1930 drmach_board_cb_data_t data;
1931
1932 if (!DRMACH_IS_BOARD_ID(id))
1933 return (drerr_new(0, EX86_INAPPROP, NULL));
1934
1935 max_devices = MAX_CPU_UNITS_PER_BOARD;
1936 max_devices += MAX_MEM_UNITS_PER_BOARD;
1937 max_devices += MAX_IO_UNITS_PER_BOARD;
1938
1939 if (bp->devices == NULL)
1940 bp->devices = drmach_array_new(0, max_devices);
1941 ASSERT(bp->tree != NULL);
1942
1943 data.obj = bp;
1944 data.ndevs = 0;
1945 data.found = found;
1946 data.a = a;
1947 data.err = NULL;
1948
1949 acpidev_dr_lock_all();
1950 rv = drmach_node_walk(bp->tree, &data, drmach_board_find_devices_cb);
1951 acpidev_dr_unlock_all();
1952 if (rv == 0) {
1953 err = NULL;
1954 } else {
1955 drmach_array_dispose(bp->devices, drmach_device_dispose);
1956 bp->devices = NULL;
1957
1958 if (data.err)
1959 err = data.err;
1960 else
1961 err = DRMACH_INTERNAL_ERROR();
1962 }
1963
1964 return (err);
1965 }
1966
1967 int
drmach_board_lookup(int bnum,drmachid_t * id)1968 drmach_board_lookup(int bnum, drmachid_t *id)
1969 {
1970 int rv = 0;
1971
1972 if (bnum < 0) {
1973 *id = 0;
1974 return (-1);
1975 }
1976
1977 rw_enter(&drmach_boards_rwlock, RW_READER);
1978 if (drmach_array_get(drmach_boards, (uint_t)bnum, id)) {
1979 *id = 0;
1980 rv = -1;
1981 }
1982 rw_exit(&drmach_boards_rwlock);
1983
1984 return (rv);
1985 }
1986
1987 sbd_error_t *
drmach_board_name(int bnum,char * buf,int buflen)1988 drmach_board_name(int bnum, char *buf, int buflen)
1989 {
1990 ACPI_HANDLE hdl;
1991 sbd_error_t *err = NULL;
1992
1993 if (bnum < 0) {
1994 return (drerr_new(1, EX86_BNUM, "%d", bnum));
1995 }
1996
1997 acpidev_dr_lock_all();
1998 if (ACPI_FAILURE(acpidev_dr_get_board_handle(bnum, &hdl))) {
1999 DRMACH_PR("!drmach_board_name: failed to lookup ACPI handle "
2000 "for board %d.", bnum);
2001 err = drerr_new(1, EX86_BNUM, "%d", bnum);
2002 } else if (ACPI_FAILURE(acpidev_dr_get_board_name(hdl, buf, buflen))) {
2003 DRMACH_PR("!drmach_board_name: failed to generate board name "
2004 "for board %d.", bnum);
2005 err = drerr_new(0, EX86_INVALID_ARG,
2006 ": buffer is too small for board name.");
2007 }
2008 acpidev_dr_unlock_all();
2009
2010 return (err);
2011 }
2012
2013 int
drmach_board_is_floating(drmachid_t id)2014 drmach_board_is_floating(drmachid_t id)
2015 {
2016 drmach_board_t *bp;
2017
2018 if (!DRMACH_IS_BOARD_ID(id))
2019 return (0);
2020
2021 bp = (drmach_board_t *)id;
2022
2023 return ((drmach_domain.floating & (1ULL << bp->bnum)) ? 1 : 0);
2024 }
2025
2026 static ACPI_STATUS
drmach_board_check_dependent_cb(ACPI_HANDLE hdl,UINT32 lvl,void * ctx,void ** retval)2027 drmach_board_check_dependent_cb(ACPI_HANDLE hdl, UINT32 lvl, void *ctx,
2028 void **retval)
2029 {
2030 uint32_t bdnum;
2031 drmach_board_t *bp;
2032 ACPI_STATUS rc = AE_OK;
2033 int cmd = (int)(intptr_t)ctx;
2034
2035 ASSERT(hdl != NULL);
2036 ASSERT(lvl == UINT32_MAX);
2037 ASSERT(retval != NULL);
2038
2039 /* Skip non-board devices. */
2040 if (!acpidev_dr_device_is_board(hdl)) {
2041 return (AE_OK);
2042 } else if (ACPI_FAILURE(acpidev_dr_get_board_number(hdl, &bdnum))) {
2043 DRMACH_PR("!drmach_board_check_dependent_cb: failed to get "
2044 "board number for object %p.\n", hdl);
2045 return (AE_ERROR);
2046 } else if (bdnum > MAX_BOARDS) {
2047 DRMACH_PR("!drmach_board_check_dependent_cb: board number %u "
2048 "is too big, max %u.", bdnum, MAX_BOARDS);
2049 return (AE_ERROR);
2050 }
2051
2052 bp = drmach_get_board_by_bnum(bdnum);
2053 switch (cmd) {
2054 case SBD_CMD_CONNECT:
2055 /*
2056 * Its parent board should be present, assigned, powered and
2057 * connected when connecting the child board.
2058 */
2059 if (bp == NULL) {
2060 *retval = hdl;
2061 rc = AE_ERROR;
2062 } else {
2063 bp->powered = acpidev_dr_device_is_powered(hdl);
2064 if (!bp->connected || !bp->powered || !bp->assigned) {
2065 *retval = hdl;
2066 rc = AE_ERROR;
2067 }
2068 }
2069 break;
2070
2071 case SBD_CMD_POWERON:
2072 /*
2073 * Its parent board should be present, assigned and powered when
2074 * powering on the child board.
2075 */
2076 if (bp == NULL) {
2077 *retval = hdl;
2078 rc = AE_ERROR;
2079 } else {
2080 bp->powered = acpidev_dr_device_is_powered(hdl);
2081 if (!bp->powered || !bp->assigned) {
2082 *retval = hdl;
2083 rc = AE_ERROR;
2084 }
2085 }
2086 break;
2087
2088 case SBD_CMD_ASSIGN:
2089 /*
2090 * Its parent board should be present and assigned when
2091 * assigning the child board.
2092 */
2093 if (bp == NULL) {
2094 *retval = hdl;
2095 rc = AE_ERROR;
2096 } else if (!bp->assigned) {
2097 *retval = hdl;
2098 rc = AE_ERROR;
2099 }
2100 break;
2101
2102 case SBD_CMD_DISCONNECT:
2103 /*
2104 * The child board should be disconnected if present when
2105 * disconnecting its parent board.
2106 */
2107 if (bp != NULL && bp->connected) {
2108 *retval = hdl;
2109 rc = AE_ERROR;
2110 }
2111 break;
2112
2113 case SBD_CMD_POWEROFF:
2114 /*
2115 * The child board should be disconnected and powered off if
2116 * present when powering off its parent board.
2117 */
2118 if (bp != NULL) {
2119 bp->powered = acpidev_dr_device_is_powered(hdl);
2120 if (bp->connected || bp->powered) {
2121 *retval = hdl;
2122 rc = AE_ERROR;
2123 }
2124 }
2125 break;
2126
2127 case SBD_CMD_UNASSIGN:
2128 /*
2129 * The child board should be disconnected, powered off and
2130 * unassigned if present when unassigning its parent board.
2131 */
2132 if (bp != NULL) {
2133 bp->powered = acpidev_dr_device_is_powered(hdl);
2134 if (bp->connected || bp->powered || bp->assigned) {
2135 *retval = hdl;
2136 rc = AE_ERROR;
2137 }
2138 }
2139 break;
2140
2141 default:
2142 /* Return success for all other commands. */
2143 break;
2144 }
2145
2146 return (rc);
2147 }
2148
2149 sbd_error_t *
drmach_board_check_dependent(int cmd,drmach_board_t * bp)2150 drmach_board_check_dependent(int cmd, drmach_board_t *bp)
2151 {
2152 int reverse;
2153 char *name;
2154 sbd_error_t *err = NULL;
2155 DRMACH_HANDLE hdl;
2156 DRMACH_HANDLE dp = NULL;
2157
2158 ASSERT(bp != NULL);
2159 ASSERT(DRMACH_IS_BOARD_ID(bp));
2160 ASSERT(RW_LOCK_HELD(&drmach_boards_rwlock));
2161
2162 hdl = drmach_node_get_dnode(bp->tree);
2163 if (hdl == NULL)
2164 return (drerr_new(0, EX86_INAPPROP, NULL));
2165
2166 switch (cmd) {
2167 case SBD_CMD_ASSIGN:
2168 case SBD_CMD_POWERON:
2169 case SBD_CMD_CONNECT:
2170 if (ACPI_SUCCESS(acpidev_dr_device_walk_ejd(hdl,
2171 &drmach_board_check_dependent_cb,
2172 (void *)(intptr_t)cmd, &dp))) {
2173 return (NULL);
2174 }
2175 reverse = 0;
2176 break;
2177
2178 case SBD_CMD_UNASSIGN:
2179 case SBD_CMD_POWEROFF:
2180 case SBD_CMD_DISCONNECT:
2181 if (ACPI_SUCCESS(acpidev_dr_device_walk_edl(hdl,
2182 &drmach_board_check_dependent_cb,
2183 (void *)(intptr_t)cmd, &dp))) {
2184 return (NULL);
2185 }
2186 reverse = 1;
2187 break;
2188
2189 default:
2190 return (drerr_new(0, EX86_INAPPROP, NULL));
2191 }
2192
2193 if (dp == NULL) {
2194 return (drerr_new(1, EX86_WALK_DEPENDENCY, "%s", bp->cm.name));
2195 }
2196 name = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
2197 if (ACPI_FAILURE(acpidev_dr_get_board_name(dp, name, MAXPATHLEN))) {
2198 err = drerr_new(1, EX86_WALK_DEPENDENCY, "%s", bp->cm.name);
2199 } else if (reverse == 0) {
2200 err = drerr_new(1, EX86_WALK_DEPENDENCY,
2201 "%s, depends on board %s", bp->cm.name, name);
2202 } else {
2203 err = drerr_new(1, EX86_WALK_DEPENDENCY,
2204 "board %s depends on %s", name, bp->cm.name);
2205 }
2206 kmem_free(name, MAXPATHLEN);
2207
2208 return (err);
2209 }
2210
2211 sbd_error_t *
drmach_board_assign(int bnum,drmachid_t * id)2212 drmach_board_assign(int bnum, drmachid_t *id)
2213 {
2214 sbd_error_t *err = NULL;
2215
2216 if (bnum < 0) {
2217 return (drerr_new(1, EX86_BNUM, "%d", bnum));
2218 }
2219
2220 rw_enter(&drmach_boards_rwlock, RW_WRITER);
2221
2222 if (drmach_array_get(drmach_boards, bnum, id) == -1) {
2223 err = drerr_new(1, EX86_BNUM, "%d", bnum);
2224 } else {
2225 drmach_board_t *bp;
2226
2227 /*
2228 * Board has already been created, downgrade to reader.
2229 */
2230 if (*id)
2231 rw_downgrade(&drmach_boards_rwlock);
2232
2233 bp = *id;
2234 if (!(*id))
2235 bp = *id =
2236 (drmachid_t)drmach_board_new(bnum, 0);
2237
2238 if (bp == NULL) {
2239 DRMACH_PR("!drmach_board_assign: failed to create "
2240 "object for board %d.", bnum);
2241 err = drerr_new(1, EX86_BNUM, "%d", bnum);
2242 } else {
2243 err = drmach_board_check_dependent(SBD_CMD_ASSIGN, bp);
2244 if (err == NULL)
2245 bp->assigned = 1;
2246 }
2247 }
2248
2249 rw_exit(&drmach_boards_rwlock);
2250
2251 return (err);
2252 }
2253
2254 sbd_error_t *
drmach_board_unassign(drmachid_t id)2255 drmach_board_unassign(drmachid_t id)
2256 {
2257 drmach_board_t *bp;
2258 sbd_error_t *err;
2259 drmach_status_t stat;
2260
2261 if (DRMACH_NULL_ID(id))
2262 return (NULL);
2263
2264 if (!DRMACH_IS_BOARD_ID(id)) {
2265 return (drerr_new(0, EX86_INAPPROP, NULL));
2266 }
2267 bp = id;
2268
2269 rw_enter(&drmach_boards_rwlock, RW_WRITER);
2270
2271 err = drmach_board_status(id, &stat);
2272 if (err) {
2273 rw_exit(&drmach_boards_rwlock);
2274 return (err);
2275 }
2276
2277 if (stat.configured || stat.busy) {
2278 err = drerr_new(0, EX86_CONFIGBUSY, bp->cm.name);
2279 } else if (bp->connected) {
2280 err = drerr_new(0, EX86_CONNECTBUSY, bp->cm.name);
2281 } else if (stat.powered) {
2282 err = drerr_new(0, EX86_POWERBUSY, bp->cm.name);
2283 } else {
2284 err = drmach_board_check_dependent(SBD_CMD_UNASSIGN, bp);
2285 if (err == NULL) {
2286 if (drmach_array_set(drmach_boards, bp->bnum, 0) != 0)
2287 err = DRMACH_INTERNAL_ERROR();
2288 else
2289 drmach_board_dispose(bp);
2290 }
2291 }
2292
2293 rw_exit(&drmach_boards_rwlock);
2294
2295 return (err);
2296 }
2297
2298 sbd_error_t *
drmach_board_poweron(drmachid_t id)2299 drmach_board_poweron(drmachid_t id)
2300 {
2301 drmach_board_t *bp;
2302 sbd_error_t *err = NULL;
2303 DRMACH_HANDLE hdl;
2304
2305 if (!DRMACH_IS_BOARD_ID(id))
2306 return (drerr_new(0, EX86_INAPPROP, NULL));
2307 bp = id;
2308
2309 hdl = drmach_node_get_dnode(bp->tree);
2310 if (hdl == NULL)
2311 return (drerr_new(0, EX86_INAPPROP, NULL));
2312
2313 bp->powered = drmach_board_check_power(bp);
2314 if (bp->powered) {
2315 return (NULL);
2316 }
2317
2318 rw_enter(&drmach_boards_rwlock, RW_WRITER);
2319 err = drmach_board_check_dependent(SBD_CMD_POWERON, bp);
2320 if (err == NULL) {
2321 acpidev_dr_lock_all();
2322 if (ACPI_FAILURE(acpidev_dr_device_poweron(hdl)))
2323 err = drerr_new(0, EX86_POWERON, NULL);
2324 acpidev_dr_unlock_all();
2325
2326 /* Check whether the board is powered on. */
2327 bp->powered = drmach_board_check_power(bp);
2328 if (err == NULL && bp->powered == 0)
2329 err = drerr_new(0, EX86_POWERON, NULL);
2330 }
2331 rw_exit(&drmach_boards_rwlock);
2332
2333 return (err);
2334 }
2335
2336 sbd_error_t *
drmach_board_poweroff(drmachid_t id)2337 drmach_board_poweroff(drmachid_t id)
2338 {
2339 sbd_error_t *err = NULL;
2340 drmach_board_t *bp;
2341 drmach_status_t stat;
2342 DRMACH_HANDLE hdl;
2343
2344 if (DRMACH_NULL_ID(id))
2345 return (NULL);
2346
2347 if (!DRMACH_IS_BOARD_ID(id))
2348 return (drerr_new(0, EX86_INAPPROP, NULL));
2349 bp = id;
2350
2351 hdl = drmach_node_get_dnode(bp->tree);
2352 if (hdl == NULL)
2353 return (drerr_new(0, EX86_INAPPROP, NULL));
2354
2355 /* Check whether the board is busy, configured or connected. */
2356 err = drmach_board_status(id, &stat);
2357 if (err != NULL)
2358 return (err);
2359 if (stat.configured || stat.busy) {
2360 return (drerr_new(0, EX86_CONFIGBUSY, bp->cm.name));
2361 } else if (bp->connected) {
2362 return (drerr_new(0, EX86_CONNECTBUSY, bp->cm.name));
2363 }
2364
2365 bp->powered = drmach_board_check_power(bp);
2366 if (bp->powered == 0) {
2367 return (NULL);
2368 }
2369
2370 rw_enter(&drmach_boards_rwlock, RW_WRITER);
2371 err = drmach_board_check_dependent(SBD_CMD_POWEROFF, bp);
2372 if (err == NULL) {
2373 acpidev_dr_lock_all();
2374 if (ACPI_FAILURE(acpidev_dr_device_poweroff(hdl)))
2375 err = drerr_new(0, EX86_POWEROFF, NULL);
2376 acpidev_dr_unlock_all();
2377
2378 bp->powered = drmach_board_check_power(bp);
2379 if (err == NULL && bp->powered != 0)
2380 err = drerr_new(0, EX86_POWEROFF, NULL);
2381 }
2382 rw_exit(&drmach_boards_rwlock);
2383
2384 return (err);
2385 }
2386
2387 sbd_error_t *
drmach_board_test(drmachid_t id,drmach_opts_t * opts,int force)2388 drmach_board_test(drmachid_t id, drmach_opts_t *opts, int force)
2389 {
2390 _NOTE(ARGUNUSED(opts, force));
2391
2392 drmach_board_t *bp;
2393 DRMACH_HANDLE hdl;
2394
2395 if (DRMACH_NULL_ID(id))
2396 return (NULL);
2397
2398 if (!DRMACH_IS_BOARD_ID(id))
2399 return (drerr_new(0, EX86_INAPPROP, NULL));
2400 bp = id;
2401
2402 hdl = drmach_node_get_dnode(bp->tree);
2403 if (hdl == NULL)
2404 return (drerr_new(0, EX86_INAPPROP, NULL));
2405
2406 if (ACPI_FAILURE(acpidev_dr_device_check_status(hdl)))
2407 return (drerr_new(0, EX86_IN_FAILURE, NULL));
2408
2409 return (NULL);
2410 }
2411
2412 sbd_error_t *
drmach_board_connect(drmachid_t id,drmach_opts_t * opts)2413 drmach_board_connect(drmachid_t id, drmach_opts_t *opts)
2414 {
2415 _NOTE(ARGUNUSED(opts));
2416
2417 sbd_error_t *err = NULL;
2418 drmach_board_t *bp = (drmach_board_t *)id;
2419 DRMACH_HANDLE hdl;
2420
2421 if (!DRMACH_IS_BOARD_ID(id))
2422 return (drerr_new(0, EX86_INAPPROP, NULL));
2423 bp = (drmach_board_t *)id;
2424
2425 hdl = drmach_node_get_dnode(bp->tree);
2426 if (hdl == NULL)
2427 return (drerr_new(0, EX86_INAPPROP, NULL));
2428
2429 rw_enter(&drmach_boards_rwlock, RW_WRITER);
2430 err = drmach_board_check_dependent(SBD_CMD_CONNECT, bp);
2431 if (err == NULL) {
2432 acpidev_dr_lock_all();
2433 if (ACPI_FAILURE(acpidev_dr_device_insert(hdl))) {
2434 (void) acpidev_dr_device_remove(hdl);
2435 err = drerr_new(1, EX86_PROBE, NULL);
2436 } else {
2437 bp->connected = 1;
2438 }
2439 acpidev_dr_unlock_all();
2440 }
2441 rw_exit(&drmach_boards_rwlock);
2442
2443 return (err);
2444 }
2445
2446 sbd_error_t *
drmach_board_disconnect(drmachid_t id,drmach_opts_t * opts)2447 drmach_board_disconnect(drmachid_t id, drmach_opts_t *opts)
2448 {
2449 _NOTE(ARGUNUSED(opts));
2450
2451 DRMACH_HANDLE hdl;
2452 drmach_board_t *bp;
2453 drmach_status_t stat;
2454 sbd_error_t *err = NULL;
2455
2456 if (DRMACH_NULL_ID(id))
2457 return (NULL);
2458 if (!DRMACH_IS_BOARD_ID(id))
2459 return (drerr_new(0, EX86_INAPPROP, NULL));
2460 bp = (drmach_board_t *)id;
2461
2462 hdl = drmach_node_get_dnode(bp->tree);
2463 if (hdl == NULL)
2464 return (drerr_new(0, EX86_INAPPROP, NULL));
2465
2466 /* Check whether the board is busy or configured. */
2467 err = drmach_board_status(id, &stat);
2468 if (err != NULL)
2469 return (err);
2470 if (stat.configured || stat.busy)
2471 return (drerr_new(0, EX86_CONFIGBUSY, bp->cm.name));
2472
2473 rw_enter(&drmach_boards_rwlock, RW_WRITER);
2474 err = drmach_board_check_dependent(SBD_CMD_DISCONNECT, bp);
2475 if (err == NULL) {
2476 acpidev_dr_lock_all();
2477 if (ACPI_SUCCESS(acpidev_dr_device_remove(hdl))) {
2478 bp->connected = 0;
2479 } else {
2480 err = drerr_new(1, EX86_DEPROBE, bp->cm.name);
2481 }
2482 acpidev_dr_unlock_all();
2483 }
2484 rw_exit(&drmach_boards_rwlock);
2485
2486 return (err);
2487 }
2488
2489 sbd_error_t *
drmach_board_deprobe(drmachid_t id)2490 drmach_board_deprobe(drmachid_t id)
2491 {
2492 drmach_board_t *bp;
2493
2494 if (!DRMACH_IS_BOARD_ID(id))
2495 return (drerr_new(0, EX86_INAPPROP, NULL));
2496 bp = id;
2497
2498 cmn_err(CE_CONT, "DR: detach board %d\n", bp->bnum);
2499
2500 if (bp->devices) {
2501 drmach_array_dispose(bp->devices, drmach_device_dispose);
2502 bp->devices = NULL;
2503 }
2504
2505 bp->boot_board = 0;
2506
2507 return (NULL);
2508 }
2509
2510 /*
2511 * CPU specific interfaces to support dr driver
2512 */
2513 sbd_error_t *
drmach_cpu_disconnect(drmachid_t id)2514 drmach_cpu_disconnect(drmachid_t id)
2515 {
2516 if (!DRMACH_IS_CPU_ID(id))
2517 return (drerr_new(0, EX86_INAPPROP, NULL));
2518
2519 return (NULL);
2520 }
2521
2522 sbd_error_t *
drmach_cpu_get_id(drmachid_t id,processorid_t * cpuid)2523 drmach_cpu_get_id(drmachid_t id, processorid_t *cpuid)
2524 {
2525 drmach_cpu_t *cpu;
2526
2527 if (!DRMACH_IS_CPU_ID(id))
2528 return (drerr_new(0, EX86_INAPPROP, NULL));
2529 cpu = (drmach_cpu_t *)id;
2530
2531 if (cpu->cpuid == -1) {
2532 if (ACPI_SUCCESS(acpica_get_cpu_id_by_object(
2533 drmach_node_get_dnode(cpu->dev.node), cpuid))) {
2534 cpu->cpuid = *cpuid;
2535 } else {
2536 *cpuid = -1;
2537 }
2538 } else {
2539 *cpuid = cpu->cpuid;
2540 }
2541
2542 return (NULL);
2543 }
2544
2545 sbd_error_t *
drmach_cpu_get_impl(drmachid_t id,int * ip)2546 drmach_cpu_get_impl(drmachid_t id, int *ip)
2547 {
2548 if (!DRMACH_IS_CPU_ID(id))
2549 return (drerr_new(0, EX86_INAPPROP, NULL));
2550
2551 /* Assume all CPUs in system are homogeneous. */
2552 *ip = X86_CPU_IMPL_UNKNOWN;
2553
2554 kpreempt_disable();
2555 if (cpuid_getvendor(CPU) == X86_VENDOR_Intel) {
2556 /* NHM-EX CPU */
2557 if (cpuid_getfamily(CPU) == 0x6 &&
2558 cpuid_getmodel(CPU) == 0x2e) {
2559 *ip = X86_CPU_IMPL_NEHALEM_EX;
2560 }
2561 }
2562 kpreempt_enable();
2563
2564 return (NULL);
2565 }
2566
2567 /*
2568 * Memory specific interfaces to support dr driver
2569 */
2570
2571 /*
2572 * When drmach_mem_new() is called, the mp->base_pa field is set to the base
2573 * address of configured memory if there's configured memory on the board,
2574 * otherwise set to UINT64_MAX. For hot-added memory board, there's no
2575 * configured memory when drmach_mem_new() is called, so mp->base_pa is set
2576 * to UINT64_MAX and we need to set a correct value for it after memory
2577 * hot-add operations.
2578 * A hot-added memory board may contain multiple memory segments,
2579 * drmach_mem_add_span() will be called once for each segment, so we can't
2580 * rely on the basepa argument. And it's possible that only part of a memory
2581 * segment is added into OS, so need to intersect with phys_installed list
2582 * to get the real base address of configured memory on the board.
2583 */
2584 sbd_error_t *
drmach_mem_add_span(drmachid_t id,uint64_t basepa,uint64_t size)2585 drmach_mem_add_span(drmachid_t id, uint64_t basepa, uint64_t size)
2586 {
2587 _NOTE(ARGUNUSED(basepa));
2588
2589 uint64_t nbytes = 0;
2590 uint64_t endpa;
2591 drmach_mem_t *mp;
2592 struct memlist *ml2;
2593 struct memlist *p;
2594
2595 ASSERT(size != 0);
2596
2597 if (!DRMACH_IS_MEM_ID(id))
2598 return (drerr_new(0, EX86_INAPPROP, NULL));
2599 mp = (drmach_mem_t *)id;
2600
2601 /* Compute basepa and size of installed memory. */
2602 endpa = _ptob64(physmax + 1);
2603 memlist_read_lock();
2604 ml2 = memlist_dup(phys_install);
2605 memlist_read_unlock();
2606 ml2 = memlist_del_span(ml2, 0ull, mp->slice_base);
2607 if (ml2 && endpa > mp->slice_top) {
2608 ml2 = memlist_del_span(ml2, mp->slice_top,
2609 endpa - mp->slice_top);
2610 }
2611
2612 ASSERT(ml2);
2613 if (ml2) {
2614 for (p = ml2; p; p = p->ml_next) {
2615 nbytes += p->ml_size;
2616 if (mp->base_pa > p->ml_address)
2617 mp->base_pa = p->ml_address;
2618 }
2619 ASSERT(nbytes > 0);
2620 mp->nbytes += nbytes;
2621 memlist_delete(ml2);
2622 }
2623
2624 return (NULL);
2625 }
2626
2627 static sbd_error_t *
drmach_mem_update_lgrp(drmachid_t id)2628 drmach_mem_update_lgrp(drmachid_t id)
2629 {
2630 ACPI_STATUS rc;
2631 DRMACH_HANDLE hdl;
2632 void *hdlp;
2633 drmach_mem_t *mp;
2634 update_membounds_t umb;
2635
2636 if (!DRMACH_IS_MEM_ID(id))
2637 return (drerr_new(0, EX86_INAPPROP, NULL));
2638 mp = (drmach_mem_t *)id;
2639 /* No need to update lgrp if memory is already installed. */
2640 if (mp->nbytes != 0)
2641 return (NULL);
2642 /* No need to update lgrp if lgrp is disabled. */
2643 if (max_mem_nodes == 1)
2644 return (NULL);
2645
2646 /* Add memory to lgroup */
2647 hdl = mp->dev.node->get_dnode(mp->dev.node);
2648 rc = acpidev_dr_device_get_memory_index(hdl, &umb.u_device_id);
2649 ASSERT(ACPI_SUCCESS(rc));
2650 if (ACPI_FAILURE(rc)) {
2651 cmn_err(CE_WARN, "drmach: failed to get device id of memory, "
2652 "can't update lgrp information.");
2653 return (drerr_new(0, EX86_INTERNAL, NULL));
2654 }
2655 rc = acpidev_dr_get_mem_numa_info(hdl, mp->memlist, &hdlp,
2656 &umb.u_domain, &umb.u_sli_cnt, &umb.u_sli_ptr);
2657 ASSERT(ACPI_SUCCESS(rc));
2658 if (ACPI_FAILURE(rc)) {
2659 cmn_err(CE_WARN, "drmach: failed to get lgrp info of memory, "
2660 "can't update lgrp information.");
2661 return (drerr_new(0, EX86_INTERNAL, NULL));
2662 }
2663 umb.u_base = (uint64_t)mp->slice_base;
2664 umb.u_length = (uint64_t)(mp->slice_top - mp->slice_base);
2665 lgrp_plat_config(LGRP_CONFIG_MEM_ADD, (uintptr_t)&umb);
2666 acpidev_dr_free_mem_numa_info(hdlp);
2667
2668 return (NULL);
2669 }
2670
2671 sbd_error_t *
drmach_mem_enable(drmachid_t id)2672 drmach_mem_enable(drmachid_t id)
2673 {
2674 if (!DRMACH_IS_MEM_ID(id))
2675 return (drerr_new(0, EX86_INAPPROP, NULL));
2676 else
2677 return (NULL);
2678 }
2679
2680 sbd_error_t *
drmach_mem_get_info(drmachid_t id,drmach_mem_info_t * mem)2681 drmach_mem_get_info(drmachid_t id, drmach_mem_info_t *mem)
2682 {
2683 drmach_mem_t *mp;
2684
2685 if (!DRMACH_IS_MEM_ID(id))
2686 return (drerr_new(0, EX86_INAPPROP, NULL));
2687 mp = (drmach_mem_t *)id;
2688
2689 /*
2690 * This is only used by dr to round up/down the memory
2691 * for copying.
2692 */
2693 mem->mi_alignment_mask = mp->mem_alignment - 1;
2694 mem->mi_basepa = mp->base_pa;
2695 mem->mi_size = mp->nbytes;
2696 mem->mi_slice_base = mp->slice_base;
2697 mem->mi_slice_top = mp->slice_top;
2698 mem->mi_slice_size = mp->slice_size;
2699
2700 return (NULL);
2701 }
2702
2703 sbd_error_t *
drmach_mem_get_slice_info(drmachid_t id,uint64_t * bp,uint64_t * ep,uint64_t * sp)2704 drmach_mem_get_slice_info(drmachid_t id,
2705 uint64_t *bp, uint64_t *ep, uint64_t *sp)
2706 {
2707 drmach_mem_t *mp;
2708
2709 if (!DRMACH_IS_MEM_ID(id))
2710 return (drerr_new(0, EX86_INAPPROP, NULL));
2711 mp = (drmach_mem_t *)id;
2712
2713 if (bp)
2714 *bp = mp->slice_base;
2715 if (ep)
2716 *ep = mp->slice_top;
2717 if (sp)
2718 *sp = mp->slice_size;
2719
2720 return (NULL);
2721 }
2722
2723 sbd_error_t *
drmach_mem_get_memlist(drmachid_t id,struct memlist ** ml)2724 drmach_mem_get_memlist(drmachid_t id, struct memlist **ml)
2725 {
2726 #ifdef DEBUG
2727 int rv;
2728 #endif
2729 drmach_mem_t *mem;
2730 struct memlist *mlist;
2731
2732 if (!DRMACH_IS_MEM_ID(id))
2733 return (drerr_new(0, EX86_INAPPROP, NULL));
2734 mem = (drmach_mem_t *)id;
2735
2736 mlist = memlist_dup(mem->memlist);
2737 *ml = mlist;
2738
2739 #ifdef DEBUG
2740 /*
2741 * Make sure the incoming memlist doesn't already
2742 * intersect with what's present in the system (phys_install).
2743 */
2744 memlist_read_lock();
2745 rv = memlist_intersect(phys_install, mlist);
2746 memlist_read_unlock();
2747 if (rv) {
2748 DRMACH_PR("Derived memlist intersects with phys_install\n");
2749 memlist_dump(mlist);
2750
2751 DRMACH_PR("phys_install memlist:\n");
2752 memlist_dump(phys_install);
2753
2754 memlist_delete(mlist);
2755 return (DRMACH_INTERNAL_ERROR());
2756 }
2757
2758 DRMACH_PR("Derived memlist:");
2759 memlist_dump(mlist);
2760 #endif
2761
2762 return (NULL);
2763 }
2764
2765 processorid_t
drmach_mem_cpu_affinity(drmachid_t id)2766 drmach_mem_cpu_affinity(drmachid_t id)
2767 {
2768 _NOTE(ARGUNUSED(id));
2769
2770 return (CPU_CURRENT);
2771 }
2772
2773 int
drmach_copy_rename_need_suspend(drmachid_t id)2774 drmach_copy_rename_need_suspend(drmachid_t id)
2775 {
2776 _NOTE(ARGUNUSED(id));
2777
2778 return (0);
2779 }
2780
2781 /*
2782 * IO specific interfaces to support dr driver
2783 */
2784 sbd_error_t *
drmach_io_pre_release(drmachid_t id)2785 drmach_io_pre_release(drmachid_t id)
2786 {
2787 if (!DRMACH_IS_IO_ID(id))
2788 return (drerr_new(0, EX86_INAPPROP, NULL));
2789
2790 return (NULL);
2791 }
2792
2793 sbd_error_t *
drmach_io_unrelease(drmachid_t id)2794 drmach_io_unrelease(drmachid_t id)
2795 {
2796 if (!DRMACH_IS_IO_ID(id))
2797 return (drerr_new(0, EX86_INAPPROP, NULL));
2798
2799 return (NULL);
2800 }
2801
2802 sbd_error_t *
drmach_io_post_release(drmachid_t id)2803 drmach_io_post_release(drmachid_t id)
2804 {
2805 _NOTE(ARGUNUSED(id));
2806
2807 return (NULL);
2808 }
2809
2810 sbd_error_t *
drmach_io_post_attach(drmachid_t id)2811 drmach_io_post_attach(drmachid_t id)
2812 {
2813 if (!DRMACH_IS_IO_ID(id))
2814 return (drerr_new(0, EX86_INAPPROP, NULL));
2815
2816 return (NULL);
2817 }
2818
2819 sbd_error_t *
drmach_io_is_attached(drmachid_t id,int * yes)2820 drmach_io_is_attached(drmachid_t id, int *yes)
2821 {
2822 drmach_device_t *dp;
2823 dev_info_t *dip;
2824 int state;
2825
2826 if (!DRMACH_IS_IO_ID(id))
2827 return (drerr_new(0, EX86_INAPPROP, NULL));
2828 dp = id;
2829
2830 dip = dp->node->getdip(dp->node);
2831 if (dip == NULL) {
2832 *yes = 0;
2833 return (NULL);
2834 }
2835
2836 state = ddi_get_devstate(dip);
2837 *yes = ((i_ddi_node_state(dip) >= DS_ATTACHED) ||
2838 (state == DDI_DEVSTATE_UP));
2839
2840 return (NULL);
2841 }
2842
2843 /*
2844 * Miscellaneous interfaces to support dr driver
2845 */
2846 int
drmach_verify_sr(dev_info_t * dip,int sflag)2847 drmach_verify_sr(dev_info_t *dip, int sflag)
2848 {
2849 _NOTE(ARGUNUSED(dip, sflag));
2850
2851 return (0);
2852 }
2853
2854 void
drmach_suspend_last(void)2855 drmach_suspend_last(void)
2856 {
2857 }
2858
2859 void
drmach_resume_first(void)2860 drmach_resume_first(void)
2861 {
2862 }
2863
2864 /*
2865 * Log a DR sysevent.
2866 * Return value: 0 success, non-zero failure.
2867 */
2868 int
drmach_log_sysevent(int board,char * hint,int flag,int verbose)2869 drmach_log_sysevent(int board, char *hint, int flag, int verbose)
2870 {
2871 sysevent_t *ev = NULL;
2872 sysevent_id_t eid;
2873 int rv, km_flag;
2874 sysevent_value_t evnt_val;
2875 sysevent_attr_list_t *evnt_attr_list = NULL;
2876 sbd_error_t *err;
2877 char attach_pnt[MAXNAMELEN];
2878
2879 km_flag = (flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
2880 attach_pnt[0] = '\0';
2881 err = drmach_board_name(board, attach_pnt, MAXNAMELEN);
2882 if (err != NULL) {
2883 sbd_err_clear(&err);
2884 rv = -1;
2885 goto logexit;
2886 }
2887 if (verbose) {
2888 DRMACH_PR("drmach_log_sysevent: %s %s, flag: %d, verbose: %d\n",
2889 attach_pnt, hint, flag, verbose);
2890 }
2891
2892 if ((ev = sysevent_alloc(EC_DR, ESC_DR_AP_STATE_CHANGE,
2893 SUNW_KERN_PUB"dr", km_flag)) == NULL) {
2894 rv = -2;
2895 goto logexit;
2896 }
2897 evnt_val.value_type = SE_DATA_TYPE_STRING;
2898 evnt_val.value.sv_string = attach_pnt;
2899 if ((rv = sysevent_add_attr(&evnt_attr_list, DR_AP_ID, &evnt_val,
2900 km_flag)) != 0)
2901 goto logexit;
2902
2903 evnt_val.value_type = SE_DATA_TYPE_STRING;
2904 evnt_val.value.sv_string = hint;
2905 if ((rv = sysevent_add_attr(&evnt_attr_list, DR_HINT, &evnt_val,
2906 km_flag)) != 0) {
2907 sysevent_free_attr(evnt_attr_list);
2908 goto logexit;
2909 }
2910
2911 (void) sysevent_attach_attributes(ev, evnt_attr_list);
2912
2913 /*
2914 * Log the event but do not sleep waiting for its
2915 * delivery. This provides insulation from syseventd.
2916 */
2917 rv = log_sysevent(ev, SE_NOSLEEP, &eid);
2918
2919 logexit:
2920 if (ev)
2921 sysevent_free(ev);
2922 if ((rv != 0) && verbose)
2923 cmn_err(CE_WARN, "!drmach_log_sysevent failed (rv %d) for %s "
2924 " %s\n", rv, attach_pnt, hint);
2925
2926 return (rv);
2927 }
2928