1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26 /*
27 * Copyright (c) 2010, Intel Corporation.
28 * All rights reserved.
29 */
30
31 /*
32 * PIM-DR layer of DR driver. Provides interface between user
33 * level applications and the PSM-DR layer.
34 */
35
36 #include <sys/note.h>
37 #include <sys/debug.h>
38 #include <sys/types.h>
39 #include <sys/errno.h>
40 #include <sys/cred.h>
41 #include <sys/dditypes.h>
42 #include <sys/devops.h>
43 #include <sys/modctl.h>
44 #include <sys/poll.h>
45 #include <sys/conf.h>
46 #include <sys/ddi.h>
47 #include <sys/sunddi.h>
48 #include <sys/sunndi.h>
49 #include <sys/stat.h>
50 #include <sys/kmem.h>
51 #include <sys/processor.h>
52 #include <sys/cpuvar.h>
53 #include <sys/mem_config.h>
54
55 #include <sys/autoconf.h>
56 #include <sys/cmn_err.h>
57
58 #include <sys/ddi_impldefs.h>
59 #include <sys/promif.h>
60 #include <sys/machsystm.h>
61
62 #include <sys/dr.h>
63 #include <sys/drmach.h>
64 #include <sys/dr_util.h>
65
66 extern int nulldev();
67 extern int nodev();
68 extern struct memlist *phys_install;
69
70 #ifdef DEBUG
71 uint_t dr_debug = 0; /* dr.h for bit values */
72 #endif /* DEBUG */
73
74 /*
75 * NOTE: state_str, nt_str and SBD_CMD_STR are only used in a debug
76 * kernel. They are, however, referenced during both debug and non-debug
77 * compiles.
78 */
79
80 static char *state_str[] = {
81 "EMPTY", "OCCUPIED", "CONNECTED", "UNCONFIGURED",
82 "PARTIAL", "CONFIGURED", "RELEASE", "UNREFERENCED",
83 "FATAL"
84 };
85
86 #define SBD_CMD_STR(c) \
87 (((c) == SBD_CMD_ASSIGN) ? "ASSIGN" : \
88 ((c) == SBD_CMD_UNASSIGN) ? "UNASSIGN" : \
89 ((c) == SBD_CMD_POWERON) ? "POWERON" : \
90 ((c) == SBD_CMD_POWEROFF) ? "POWEROFF" : \
91 ((c) == SBD_CMD_TEST) ? "TEST" : \
92 ((c) == SBD_CMD_CONNECT) ? "CONNECT" : \
93 ((c) == SBD_CMD_DISCONNECT) ? "DISCONNECT" : \
94 ((c) == SBD_CMD_CONFIGURE) ? "CONFIGURE" : \
95 ((c) == SBD_CMD_UNCONFIGURE) ? "UNCONFIGURE" : \
96 ((c) == SBD_CMD_GETNCM) ? "GETNCM" : \
97 ((c) == SBD_CMD_PASSTHRU) ? "PASSTHRU" : \
98 ((c) == SBD_CMD_STATUS) ? "STATUS" : "unknown")
99
100 #define DR_GET_BOARD_DEVUNIT(sb, ut, un) (&((sb)->b_dev[DEVSET_NIX(ut)][un]))
101
102 #define DR_MAKE_MINOR(i, b) (((i) << 16) | (b))
103 #define DR_MINOR2INST(m) (((m) >> 16) & 0xffff)
104 #define DR_MINOR2BNUM(m) ((m) & 0xffff)
105
106 /* for the DR*INTERNAL_ERROR macros. see sys/dr.h. */
107 static char *dr_ie_fmt = "dr.c %d";
108
109 /* struct for drmach device name to sbd_comp_type_t mapping */
110 typedef struct {
111 char *s_devtype;
112 sbd_comp_type_t s_nodetype;
113 } dr_devname_t;
114
115 /* struct to map starfire device attributes - name:sbd_comp_type_t */
116 static dr_devname_t dr_devattr[] = {
117 { DRMACH_DEVTYPE_MEM, SBD_COMP_MEM },
118 { DRMACH_DEVTYPE_CPU, SBD_COMP_CPU },
119 { DRMACH_DEVTYPE_PCI, SBD_COMP_IO },
120 #if defined(DRMACH_DEVTYPE_SBUS)
121 { DRMACH_DEVTYPE_SBUS, SBD_COMP_IO },
122 #endif
123 #if defined(DRMACH_DEVTYPE_WCI)
124 { DRMACH_DEVTYPE_WCI, SBD_COMP_IO },
125 #endif
126 /* last s_devtype must be NULL, s_nodetype must be SBD_COMP_UNKNOWN */
127 { NULL, SBD_COMP_UNKNOWN }
128 };
129
130 /*
131 * Per instance soft-state structure.
132 */
133 typedef struct dr_softstate {
134 dev_info_t *dip;
135 dr_board_t *boards;
136 kmutex_t i_lock;
137 int dr_initialized;
138 } dr_softstate_t;
139
140 /*
141 * dr Global data elements
142 */
143 struct dr_global {
144 dr_softstate_t *softsp; /* pointer to initialize soft state */
145 kmutex_t lock;
146 } dr_g;
147
148 dr_unsafe_devs_t dr_unsafe_devs;
149
150 /*
151 * Table of known passthru commands.
152 */
153 struct {
154 char *pt_name;
155 int (*pt_func)(dr_handle_t *);
156 } pt_arr[] = {
157 "quiesce", dr_pt_test_suspend,
158 };
159
160 int dr_modunload_okay = 0; /* set to non-zero to allow unload */
161
162 /*
163 * State transition table. States valid transitions for "board" state.
164 * Recall that non-zero return value terminates operation, however
165 * the herrno value is what really indicates an error , if any.
166 */
167 static int
_cmd2index(int c)168 _cmd2index(int c)
169 {
170 /*
171 * Translate DR CMD to index into dr_state_transition.
172 */
173 switch (c) {
174 case SBD_CMD_CONNECT: return (0);
175 case SBD_CMD_DISCONNECT: return (1);
176 case SBD_CMD_CONFIGURE: return (2);
177 case SBD_CMD_UNCONFIGURE: return (3);
178 case SBD_CMD_ASSIGN: return (4);
179 case SBD_CMD_UNASSIGN: return (5);
180 case SBD_CMD_POWERON: return (6);
181 case SBD_CMD_POWEROFF: return (7);
182 case SBD_CMD_TEST: return (8);
183 default: return (-1);
184 }
185 }
186
187 #define CMD2INDEX(c) _cmd2index(c)
188
189 static struct dr_state_trans {
190 int x_cmd;
191 struct {
192 int x_rv; /* return value of pre_op */
193 int x_err; /* error, if any */
194 } x_op[DR_STATE_MAX];
195 } dr_state_transition[] = {
196 { SBD_CMD_CONNECT,
197 {
198 { 0, 0 }, /* empty */
199 { 0, 0 }, /* occupied */
200 { -1, ESBD_STATE }, /* connected */
201 { -1, ESBD_STATE }, /* unconfigured */
202 { -1, ESBD_STATE }, /* partial */
203 { -1, ESBD_STATE }, /* configured */
204 { -1, ESBD_STATE }, /* release */
205 { -1, ESBD_STATE }, /* unreferenced */
206 { -1, ESBD_FATAL_STATE }, /* fatal */
207 }
208 },
209 { SBD_CMD_DISCONNECT,
210 {
211 { -1, ESBD_STATE }, /* empty */
212 { 0, 0 }, /* occupied */
213 { 0, 0 }, /* connected */
214 { 0, 0 }, /* unconfigured */
215 { -1, ESBD_STATE }, /* partial */
216 { -1, ESBD_STATE }, /* configured */
217 { -1, ESBD_STATE }, /* release */
218 { -1, ESBD_STATE }, /* unreferenced */
219 { -1, ESBD_FATAL_STATE }, /* fatal */
220 }
221 },
222 { SBD_CMD_CONFIGURE,
223 {
224 { -1, ESBD_STATE }, /* empty */
225 { -1, ESBD_STATE }, /* occupied */
226 { 0, 0 }, /* connected */
227 { 0, 0 }, /* unconfigured */
228 { 0, 0 }, /* partial */
229 { 0, 0 }, /* configured */
230 { -1, ESBD_STATE }, /* release */
231 { -1, ESBD_STATE }, /* unreferenced */
232 { -1, ESBD_FATAL_STATE }, /* fatal */
233 }
234 },
235 { SBD_CMD_UNCONFIGURE,
236 {
237 { -1, ESBD_STATE }, /* empty */
238 { -1, ESBD_STATE }, /* occupied */
239 { -1, ESBD_STATE }, /* connected */
240 { -1, ESBD_STATE }, /* unconfigured */
241 { 0, 0 }, /* partial */
242 { 0, 0 }, /* configured */
243 { 0, 0 }, /* release */
244 { 0, 0 }, /* unreferenced */
245 { -1, ESBD_FATAL_STATE }, /* fatal */
246 }
247 },
248 { SBD_CMD_ASSIGN,
249 {
250 { 0, 0 }, /* empty */
251 { 0, 0 }, /* occupied */
252 { -1, ESBD_STATE }, /* connected */
253 { -1, ESBD_STATE }, /* unconfigured */
254 { -1, ESBD_STATE }, /* partial */
255 { -1, ESBD_STATE }, /* configured */
256 { -1, ESBD_STATE }, /* release */
257 { -1, ESBD_STATE }, /* unreferenced */
258 { -1, ESBD_FATAL_STATE }, /* fatal */
259 }
260 },
261 { SBD_CMD_UNASSIGN,
262 {
263 { 0, 0 }, /* empty */
264 { 0, 0 }, /* occupied */
265 { -1, ESBD_STATE }, /* connected */
266 { -1, ESBD_STATE }, /* unconfigured */
267 { -1, ESBD_STATE }, /* partial */
268 { -1, ESBD_STATE }, /* configured */
269 { -1, ESBD_STATE }, /* release */
270 { -1, ESBD_STATE }, /* unreferenced */
271 { -1, ESBD_FATAL_STATE }, /* fatal */
272 }
273 },
274 { SBD_CMD_POWERON,
275 {
276 { 0, 0 }, /* empty */
277 { 0, 0 }, /* occupied */
278 { -1, ESBD_STATE }, /* connected */
279 { -1, ESBD_STATE }, /* unconfigured */
280 { -1, ESBD_STATE }, /* partial */
281 { -1, ESBD_STATE }, /* configured */
282 { -1, ESBD_STATE }, /* release */
283 { -1, ESBD_STATE }, /* unreferenced */
284 { -1, ESBD_FATAL_STATE }, /* fatal */
285 }
286 },
287 { SBD_CMD_POWEROFF,
288 {
289 { 0, 0 }, /* empty */
290 { 0, 0 }, /* occupied */
291 { -1, ESBD_STATE }, /* connected */
292 { -1, ESBD_STATE }, /* unconfigured */
293 { -1, ESBD_STATE }, /* partial */
294 { -1, ESBD_STATE }, /* configured */
295 { -1, ESBD_STATE }, /* release */
296 { -1, ESBD_STATE }, /* unreferenced */
297 { -1, ESBD_FATAL_STATE }, /* fatal */
298 }
299 },
300 { SBD_CMD_TEST,
301 {
302 { 0, 0 }, /* empty */
303 { 0, 0 }, /* occupied */
304 { -1, ESBD_STATE }, /* connected */
305 { -1, ESBD_STATE }, /* unconfigured */
306 { -1, ESBD_STATE }, /* partial */
307 { -1, ESBD_STATE }, /* configured */
308 { -1, ESBD_STATE }, /* release */
309 { -1, ESBD_STATE }, /* unreferenced */
310 { -1, ESBD_FATAL_STATE }, /* fatal */
311 }
312 },
313 };
314
315 /*
316 * Global R/W lock to synchronize access across
317 * multiple boards. Users wanting multi-board access
318 * must grab WRITE lock, others must grab READ lock.
319 */
320 krwlock_t dr_grwlock;
321
322 /*
323 * Head of the boardlist used as a reference point for
324 * locating board structs.
325 * TODO: eliminate dr_boardlist
326 */
327 dr_board_t *dr_boardlist;
328
329 /*
330 * DR support functions.
331 */
332 static dr_devset_t dr_dev2devset(sbd_comp_id_t *cid);
333 static int dr_check_transition(dr_board_t *bp,
334 dr_devset_t *devsetp,
335 struct dr_state_trans *transp,
336 int cmd);
337 static int dr_check_unit_attached(dr_common_unit_t *dp);
338 static sbd_error_t *dr_init_devlists(dr_board_t *bp);
339 static void dr_board_discovery(dr_board_t *bp);
340 static int dr_board_init(dr_board_t *bp, dev_info_t *dip, int bd);
341 static void dr_board_destroy(dr_board_t *bp);
342 static void dr_board_transition(dr_board_t *bp, dr_state_t st);
343
344 /*
345 * DR driver (DDI) entry points.
346 */
347 static int dr_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd,
348 void *arg, void **result);
349 static int dr_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
350 static int dr_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
351 static int dr_probe(dev_info_t *dip);
352 static int dr_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
353 cred_t *cred_p, int *rval_p);
354 static int dr_close(dev_t dev, int flag, int otyp, cred_t *cred_p);
355 static int dr_open(dev_t *dev, int flag, int otyp, cred_t *cred_p);
356
357 /*
358 * DR command processing operations.
359 */
360 static int dr_copyin_iocmd(dr_handle_t *hp);
361 static int dr_copyout_iocmd(dr_handle_t *hp);
362 static int dr_copyout_errs(dr_handle_t *hp);
363 static int dr_pre_op(dr_handle_t *hp);
364 static int dr_post_op(dr_handle_t *hp, int rv);
365 static int dr_exec_op(dr_handle_t *hp);
366 static void dr_assign_board(dr_handle_t *hp);
367 static void dr_unassign_board(dr_handle_t *hp);
368 static void dr_connect(dr_handle_t *hp);
369 static int dr_disconnect(dr_handle_t *hp);
370 static void dr_dev_configure(dr_handle_t *hp);
371 static void dr_dev_release(dr_handle_t *hp);
372 static int dr_dev_unconfigure(dr_handle_t *hp);
373 static void dr_dev_cancel(dr_handle_t *hp);
374 static int dr_dev_status(dr_handle_t *hp);
375 static int dr_get_ncm(dr_handle_t *hp);
376 static int dr_pt_ioctl(dr_handle_t *hp);
377 static void dr_poweron_board(dr_handle_t *hp);
378 static void dr_poweroff_board(dr_handle_t *hp);
379 static void dr_test_board(dr_handle_t *hp);
380
381 /*
382 * Autoconfiguration data structures
383 */
384 struct cb_ops dr_cb_ops = {
385 dr_open, /* open */
386 dr_close, /* close */
387 nodev, /* strategy */
388 nodev, /* print */
389 nodev, /* dump */
390 nodev, /* read */
391 nodev, /* write */
392 dr_ioctl, /* ioctl */
393 nodev, /* devmap */
394 nodev, /* mmap */
395 nodev, /* segmap */
396 nochpoll, /* chpoll */
397 ddi_prop_op, /* cb_prop_op */
398 NULL, /* struct streamtab */
399 D_NEW | D_MP | D_MTSAFE, /* compatibility flags */
400 CB_REV, /* Rev */
401 nodev, /* cb_aread */
402 nodev /* cb_awrite */
403 };
404
405 struct dev_ops dr_dev_ops = {
406 DEVO_REV, /* build version */
407 0, /* dev ref count */
408 dr_getinfo, /* getinfo */
409 nulldev, /* identify */
410 dr_probe, /* probe */
411 dr_attach, /* attach */
412 dr_detach, /* detach */
413 nodev, /* reset */
414 &dr_cb_ops, /* cb_ops */
415 (struct bus_ops *)NULL, /* bus ops */
416 NULL, /* power */
417 ddi_quiesce_not_needed, /* quiesce */
418 };
419
420 extern struct mod_ops mod_driverops;
421
422 static struct modldrv modldrv = {
423 &mod_driverops,
424 "Dynamic Reconfiguration",
425 &dr_dev_ops
426 };
427
428 static struct modlinkage modlinkage = {
429 MODREV_1,
430 (void *)&modldrv,
431 NULL
432 };
433
434 /*
435 * Driver entry points.
436 */
437 int
_init(void)438 _init(void)
439 {
440 int err;
441
442 /*
443 * If you need to support multiple nodes (instances), then
444 * whatever the maximum number of supported nodes is would
445 * need to passed as the third parameter to ddi_soft_state_init().
446 * Alternative would be to dynamically fini and re-init the
447 * soft state structure each time a node is attached.
448 */
449 err = ddi_soft_state_init((void **)&dr_g.softsp,
450 sizeof (dr_softstate_t), 1);
451 if (err)
452 return (err);
453
454 mutex_init(&dr_g.lock, NULL, MUTEX_DRIVER, NULL);
455 rw_init(&dr_grwlock, NULL, RW_DEFAULT, NULL);
456
457 return (mod_install(&modlinkage));
458 }
459
460 int
_fini(void)461 _fini(void)
462 {
463 int err;
464
465 if ((err = mod_remove(&modlinkage)) != 0)
466 return (err);
467
468 mutex_destroy(&dr_g.lock);
469 rw_destroy(&dr_grwlock);
470
471 ddi_soft_state_fini((void **)&dr_g.softsp);
472
473 return (0);
474 }
475
476 int
_info(struct modinfo * modinfop)477 _info(struct modinfo *modinfop)
478 {
479 return (mod_info(&modlinkage, modinfop));
480 }
481
482 /*ARGSUSED1*/
483 static int
dr_open(dev_t * dev,int flag,int otyp,cred_t * cred_p)484 dr_open(dev_t *dev, int flag, int otyp, cred_t *cred_p)
485 {
486 int instance;
487 dr_softstate_t *softsp;
488 dr_board_t *bp;
489
490 /*
491 * Don't open unless we've attached.
492 */
493 instance = DR_MINOR2INST(getminor(*dev));
494 softsp = ddi_get_soft_state(dr_g.softsp, instance);
495 if (softsp == NULL)
496 return (ENXIO);
497
498 mutex_enter(&softsp->i_lock);
499 if (!softsp->dr_initialized) {
500 int bd;
501 int rv = 0;
502
503 bp = softsp->boards;
504
505 /* initialize each array element */
506 for (bd = 0; bd < MAX_BOARDS; bd++, bp++) {
507 rv = dr_board_init(bp, softsp->dip, bd);
508 if (rv)
509 break;
510 }
511
512 if (rv == 0) {
513 softsp->dr_initialized = 1;
514 } else {
515 /* destroy elements initialized thus far */
516 while (--bp >= softsp->boards)
517 dr_board_destroy(bp);
518
519 /* TODO: should this be another errno val ? */
520 mutex_exit(&softsp->i_lock);
521 return (ENXIO);
522 }
523 }
524 mutex_exit(&softsp->i_lock);
525
526 bp = &softsp->boards[DR_MINOR2BNUM(getminor(*dev))];
527
528 /*
529 * prevent opening of a dyn-ap for a board
530 * that does not exist
531 */
532 if (!bp->b_assigned) {
533 if (drmach_board_lookup(bp->b_num, &bp->b_id) != 0)
534 return (ENODEV);
535 }
536
537 return (0);
538 }
539
540 /*ARGSUSED*/
541 static int
dr_close(dev_t dev,int flag,int otyp,cred_t * cred_p)542 dr_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
543 {
544 return (0);
545 }
546
547 /*
548 * Enable/disable DR features.
549 */
550 int dr_enable = 1;
551
552 /*ARGSUSED3*/
553 static int
dr_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * cred_p,int * rval_p)554 dr_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
555 cred_t *cred_p, int *rval_p)
556 {
557 static int dr_dev_type_to_nt(char *);
558
559 int rv = 0;
560 int instance;
561 int bd;
562 dr_handle_t *hp;
563 dr_softstate_t *softsp;
564 static fn_t f = "dr_ioctl";
565
566 PR_ALL("%s...\n", f);
567
568 instance = DR_MINOR2INST(getminor(dev));
569 softsp = ddi_get_soft_state(dr_g.softsp, instance);
570 if (softsp == NULL) {
571 cmn_err(CE_WARN, "dr%d: module not yet attached", instance);
572 return (ENXIO);
573 }
574
575 if (!dr_enable) {
576 switch (cmd) {
577 case SBD_CMD_STATUS:
578 case SBD_CMD_GETNCM:
579 case SBD_CMD_PASSTHRU:
580 break;
581 default:
582 return (ENOTSUP);
583 }
584 }
585
586 bd = DR_MINOR2BNUM(getminor(dev));
587 if (bd >= MAX_BOARDS)
588 return (ENXIO);
589
590 /* get and initialize storage for new handle */
591 hp = GETSTRUCT(dr_handle_t, 1);
592 hp->h_bd = &softsp->boards[bd];
593 hp->h_err = NULL;
594 hp->h_dev = getminor(dev);
595 hp->h_cmd = cmd;
596 hp->h_mode = mode;
597 hp->h_iap = (sbd_ioctl_arg_t *)arg;
598
599 /* copy sbd command into handle */
600 rv = dr_copyin_iocmd(hp);
601 if (rv) {
602 FREESTRUCT(hp, dr_handle_t, 1);
603 return (EINVAL);
604 }
605
606 /* translate canonical name to component type */
607 if (hp->h_sbdcmd.cmd_cm.c_id.c_name[0] != '\0') {
608 hp->h_sbdcmd.cmd_cm.c_id.c_type =
609 dr_dev_type_to_nt(hp->h_sbdcmd.cmd_cm.c_id.c_name);
610
611 PR_ALL("%s: c_name = %s, c_type = %d\n",
612 f,
613 hp->h_sbdcmd.cmd_cm.c_id.c_name,
614 hp->h_sbdcmd.cmd_cm.c_id.c_type);
615 } else {
616 /*EMPTY*/
617 PR_ALL("%s: c_name is NULL\n", f);
618 }
619
620 /* determine scope of operation */
621 hp->h_devset = dr_dev2devset(&hp->h_sbdcmd.cmd_cm.c_id);
622
623 switch (hp->h_cmd) {
624 case SBD_CMD_STATUS:
625 case SBD_CMD_GETNCM:
626 /* no locks needed for these commands */
627 break;
628
629 default:
630 rw_enter(&dr_grwlock, RW_WRITER);
631 mutex_enter(&hp->h_bd->b_lock);
632
633 /*
634 * If we're dealing with memory at all, then we have
635 * to keep the "exclusive" global lock held. This is
636 * necessary since we will probably need to look at
637 * multiple board structs. Otherwise, we only have
638 * to deal with the board in question and so can drop
639 * the global lock to "shared".
640 */
641 rv = DEVSET_IN_SET(hp->h_devset, SBD_COMP_MEM, DEVSET_ANYUNIT);
642 if (rv == 0)
643 rw_downgrade(&dr_grwlock);
644 break;
645 }
646 rv = 0;
647
648 if (rv == 0)
649 rv = dr_pre_op(hp);
650 if (rv == 0) {
651 rv = dr_exec_op(hp);
652 rv = dr_post_op(hp, rv);
653 }
654
655 if (rv == -1)
656 rv = EIO;
657
658 if (hp->h_err != NULL)
659 if (!(rv = dr_copyout_errs(hp)))
660 rv = EIO;
661
662 /* undo locking, if any, done before dr_pre_op */
663 switch (hp->h_cmd) {
664 case SBD_CMD_STATUS:
665 case SBD_CMD_GETNCM:
666 break;
667
668 case SBD_CMD_ASSIGN:
669 case SBD_CMD_UNASSIGN:
670 case SBD_CMD_POWERON:
671 case SBD_CMD_POWEROFF:
672 case SBD_CMD_CONNECT:
673 case SBD_CMD_CONFIGURE:
674 case SBD_CMD_UNCONFIGURE:
675 case SBD_CMD_DISCONNECT:
676 /* Board changed state. Log a sysevent. */
677 if (rv == 0)
678 (void) drmach_log_sysevent(hp->h_bd->b_num, "",
679 SE_SLEEP, 0);
680 /* Fall through */
681
682 default:
683 mutex_exit(&hp->h_bd->b_lock);
684 rw_exit(&dr_grwlock);
685 }
686
687 if (hp->h_opts.size != 0)
688 FREESTRUCT(hp->h_opts.copts, char, hp->h_opts.size);
689
690 FREESTRUCT(hp, dr_handle_t, 1);
691
692 return (rv);
693 }
694
695 /*ARGSUSED*/
696 static int
dr_probe(dev_info_t * dip)697 dr_probe(dev_info_t *dip)
698 {
699 return (DDI_PROBE_SUCCESS);
700 }
701
702 static int
dr_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)703 dr_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
704 {
705 int rv, rv2;
706 int bd;
707 int instance;
708 sbd_error_t *err;
709 dr_softstate_t *softsp;
710
711 instance = ddi_get_instance(dip);
712
713 switch (cmd) {
714 case DDI_ATTACH:
715 rw_enter(&dr_grwlock, RW_WRITER);
716
717 rv = ddi_soft_state_zalloc(dr_g.softsp, instance);
718 if (rv != DDI_SUCCESS) {
719 cmn_err(CE_WARN, "dr%d: failed to alloc soft-state",
720 instance);
721 return (DDI_FAILURE);
722 }
723
724 /* initialize softstate structure */
725 softsp = ddi_get_soft_state(dr_g.softsp, instance);
726 softsp->dip = dip;
727
728 mutex_init(&softsp->i_lock, NULL, MUTEX_DRIVER, NULL);
729
730 /* allocate board array (aka boardlist) */
731 softsp->boards = GETSTRUCT(dr_board_t, MAX_BOARDS);
732
733 /* TODO: eliminate dr_boardlist */
734 dr_boardlist = softsp->boards;
735
736 /* initialize each array element */
737 rv = DDI_SUCCESS;
738 for (bd = 0; bd < MAX_BOARDS; bd++) {
739 dr_board_t *bp = &softsp->boards[bd];
740 char *p, *name;
741 int l, minor_num;
742
743 /*
744 * initialized board attachment point path
745 * (relative to pseudo) in a form immediately
746 * reusable as an cfgadm command argument.
747 * TODO: clean this up
748 */
749 p = bp->b_path;
750 l = sizeof (bp->b_path);
751 (void) snprintf(p, l, "dr@%d:", instance);
752 while (*p != '\0') {
753 l--;
754 p++;
755 }
756
757 name = p;
758 err = drmach_board_name(bd, p, l);
759 if (err) {
760 sbd_err_clear(&err);
761 rv = DDI_FAILURE;
762 break;
763 }
764
765 minor_num = DR_MAKE_MINOR(instance, bd);
766 rv = ddi_create_minor_node(dip, name, S_IFCHR,
767 minor_num, DDI_NT_SBD_ATTACHMENT_POINT, NULL);
768 if (rv != DDI_SUCCESS)
769 rv = DDI_FAILURE;
770 }
771
772 if (rv == DDI_SUCCESS) {
773 /*
774 * Announce the node's presence.
775 */
776 ddi_report_dev(dip);
777 } else {
778 ddi_remove_minor_node(dip, NULL);
779 }
780 /*
781 * Init registered unsafe devs.
782 */
783 dr_unsafe_devs.devnames = NULL;
784 rv2 = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
785 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
786 "unsupported-io-drivers", &dr_unsafe_devs.devnames,
787 &dr_unsafe_devs.ndevs);
788
789 if (rv2 != DDI_PROP_SUCCESS)
790 dr_unsafe_devs.ndevs = 0;
791
792 rw_exit(&dr_grwlock);
793 return (rv);
794
795 default:
796 return (DDI_FAILURE);
797 }
798
799 /*NOTREACHED*/
800 }
801
802 static int
dr_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)803 dr_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
804 {
805 int instance;
806 dr_softstate_t *softsp;
807
808 switch (cmd) {
809 case DDI_DETACH:
810 if (!dr_modunload_okay)
811 return (DDI_FAILURE);
812
813 rw_enter(&dr_grwlock, RW_WRITER);
814
815 instance = ddi_get_instance(dip);
816 softsp = ddi_get_soft_state(dr_g.softsp, instance);
817
818 /* TODO: eliminate dr_boardlist */
819 ASSERT(softsp->boards == dr_boardlist);
820
821 /* remove all minor nodes */
822 ddi_remove_minor_node(dip, NULL);
823
824 if (softsp->dr_initialized) {
825 int bd;
826
827 for (bd = 0; bd < MAX_BOARDS; bd++)
828 dr_board_destroy(&softsp->boards[bd]);
829 }
830
831 FREESTRUCT(softsp->boards, dr_board_t, MAX_BOARDS);
832 mutex_destroy(&softsp->i_lock);
833 ddi_soft_state_free(dr_g.softsp, instance);
834
835 rw_exit(&dr_grwlock);
836 return (DDI_SUCCESS);
837
838 default:
839 return (DDI_FAILURE);
840 }
841 /*NOTREACHED*/
842 }
843
844 static int
dr_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** result)845 dr_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
846 {
847 _NOTE(ARGUNUSED(dip))
848
849 dev_t dev = (dev_t)arg;
850 int instance, error;
851 dr_softstate_t *softsp;
852
853 *result = NULL;
854 error = DDI_SUCCESS;
855 instance = DR_MINOR2INST(getminor(dev));
856
857 switch (cmd) {
858 case DDI_INFO_DEVT2DEVINFO:
859 softsp = ddi_get_soft_state(dr_g.softsp, instance);
860 if (softsp == NULL)
861 return (DDI_FAILURE);
862 *result = (void *)softsp->dip;
863 break;
864
865 case DDI_INFO_DEVT2INSTANCE:
866 *result = (void *)(uintptr_t)instance;
867 break;
868
869 default:
870 error = DDI_FAILURE;
871 break;
872 }
873
874 return (error);
875 }
876
877 /*
878 * DR operations.
879 */
880
881 static int
dr_copyin_iocmd(dr_handle_t * hp)882 dr_copyin_iocmd(dr_handle_t *hp)
883 {
884 static fn_t f = "dr_copyin_iocmd";
885 sbd_cmd_t *scp = &hp->h_sbdcmd;
886
887 if (hp->h_iap == NULL)
888 return (EINVAL);
889
890 bzero((caddr_t)scp, sizeof (sbd_cmd_t));
891
892 #ifdef _MULTI_DATAMODEL
893 if (ddi_model_convert_from(hp->h_mode & FMODELS) == DDI_MODEL_ILP32) {
894 sbd_cmd32_t scmd32;
895
896 bzero((caddr_t)&scmd32, sizeof (sbd_cmd32_t));
897
898 if (ddi_copyin((void *)hp->h_iap, (void *)&scmd32,
899 sizeof (sbd_cmd32_t), hp->h_mode)) {
900 cmn_err(CE_WARN,
901 "%s: (32bit) failed to copyin "
902 "sbdcmd-struct", f);
903 return (EFAULT);
904 }
905 scp->cmd_cm.c_id.c_type = scmd32.cmd_cm.c_id.c_type;
906 scp->cmd_cm.c_id.c_unit = scmd32.cmd_cm.c_id.c_unit;
907 bcopy(&scmd32.cmd_cm.c_id.c_name[0],
908 &scp->cmd_cm.c_id.c_name[0], OBP_MAXPROPNAME);
909 scp->cmd_cm.c_flags = scmd32.cmd_cm.c_flags;
910 scp->cmd_cm.c_len = scmd32.cmd_cm.c_len;
911 scp->cmd_cm.c_opts = (caddr_t)(uintptr_t)scmd32.cmd_cm.c_opts;
912
913 switch (hp->h_cmd) {
914 case SBD_CMD_STATUS:
915 scp->cmd_stat.s_nbytes = scmd32.cmd_stat.s_nbytes;
916 scp->cmd_stat.s_statp =
917 (caddr_t)(uintptr_t)scmd32.cmd_stat.s_statp;
918 break;
919 default:
920 break;
921
922 }
923 } else
924 #endif /* _MULTI_DATAMODEL */
925 if (ddi_copyin((void *)hp->h_iap, (void *)scp,
926 sizeof (sbd_cmd_t), hp->h_mode) != 0) {
927 cmn_err(CE_WARN,
928 "%s: failed to copyin sbdcmd-struct", f);
929 return (EFAULT);
930 }
931
932 if ((hp->h_opts.size = scp->cmd_cm.c_len) != 0) {
933 hp->h_opts.copts = GETSTRUCT(char, scp->cmd_cm.c_len + 1);
934 ++hp->h_opts.size;
935 if (ddi_copyin((void *)scp->cmd_cm.c_opts,
936 (void *)hp->h_opts.copts,
937 scp->cmd_cm.c_len, hp->h_mode) != 0) {
938 cmn_err(CE_WARN, "%s: failed to copyin options", f);
939 return (EFAULT);
940 }
941 }
942
943 return (0);
944 }
945
946 static int
dr_copyout_iocmd(dr_handle_t * hp)947 dr_copyout_iocmd(dr_handle_t *hp)
948 {
949 static fn_t f = "dr_copyout_iocmd";
950 sbd_cmd_t *scp = &hp->h_sbdcmd;
951
952 if (hp->h_iap == NULL)
953 return (EINVAL);
954
955 #ifdef _MULTI_DATAMODEL
956 if (ddi_model_convert_from(hp->h_mode & FMODELS) == DDI_MODEL_ILP32) {
957 sbd_cmd32_t scmd32;
958
959 scmd32.cmd_cm.c_id.c_type = scp->cmd_cm.c_id.c_type;
960 scmd32.cmd_cm.c_id.c_unit = scp->cmd_cm.c_id.c_unit;
961 bcopy(&scp->cmd_cm.c_id.c_name[0],
962 &scmd32.cmd_cm.c_id.c_name[0], OBP_MAXPROPNAME);
963
964 scmd32.cmd_cm.c_flags = scp->cmd_cm.c_flags;
965 scmd32.cmd_cm.c_len = scp->cmd_cm.c_len;
966 scmd32.cmd_cm.c_opts = (caddr32_t)(uintptr_t)scp->cmd_cm.c_opts;
967
968 switch (hp->h_cmd) {
969 case SBD_CMD_GETNCM:
970 scmd32.cmd_getncm.g_ncm = scp->cmd_getncm.g_ncm;
971 break;
972 default:
973 break;
974 }
975
976 if (ddi_copyout((void *)&scmd32, (void *)hp->h_iap,
977 sizeof (sbd_cmd32_t), hp->h_mode)) {
978 cmn_err(CE_WARN,
979 "%s: (32bit) failed to copyout "
980 "sbdcmd-struct", f);
981 return (EFAULT);
982 }
983 } else
984 #endif /* _MULTI_DATAMODEL */
985 if (ddi_copyout((void *)scp, (void *)hp->h_iap,
986 sizeof (sbd_cmd_t), hp->h_mode) != 0) {
987 cmn_err(CE_WARN,
988 "%s: failed to copyout sbdcmd-struct", f);
989 return (EFAULT);
990 }
991
992 return (0);
993 }
994
995 static int
dr_copyout_errs(dr_handle_t * hp)996 dr_copyout_errs(dr_handle_t *hp)
997 {
998 static fn_t f = "dr_copyout_errs";
999
1000 if (hp->h_err == NULL)
1001 return (0);
1002
1003 if (hp->h_err->e_code) {
1004 PR_ALL("%s: error %d %s",
1005 f, hp->h_err->e_code, hp->h_err->e_rsc);
1006 }
1007
1008 #ifdef _MULTI_DATAMODEL
1009 if (ddi_model_convert_from(hp->h_mode & FMODELS) == DDI_MODEL_ILP32) {
1010 sbd_error32_t *serr32p;
1011
1012 serr32p = GETSTRUCT(sbd_error32_t, 1);
1013
1014 serr32p->e_code = hp->h_err->e_code;
1015 bcopy(&hp->h_err->e_rsc[0], &serr32p->e_rsc[0],
1016 MAXPATHLEN);
1017 if (ddi_copyout((void *)serr32p,
1018 (void *)&((sbd_ioctl_arg32_t *)hp->h_iap)->i_err,
1019 sizeof (sbd_error32_t), hp->h_mode)) {
1020 cmn_err(CE_WARN,
1021 "%s: (32bit) failed to copyout", f);
1022 return (EFAULT);
1023 }
1024 FREESTRUCT(serr32p, sbd_error32_t, 1);
1025 } else
1026 #endif /* _MULTI_DATAMODEL */
1027 if (ddi_copyout((void *)hp->h_err,
1028 (void *)&hp->h_iap->i_err,
1029 sizeof (sbd_error_t), hp->h_mode)) {
1030 cmn_err(CE_WARN,
1031 "%s: failed to copyout", f);
1032 return (EFAULT);
1033 }
1034
1035 sbd_err_clear(&hp->h_err);
1036
1037 return (0);
1038
1039 }
1040
1041 /*
1042 * pre-op entry point must sbd_err_set_c(), if needed.
1043 * Return value of non-zero indicates failure.
1044 */
1045 static int
dr_pre_op(dr_handle_t * hp)1046 dr_pre_op(dr_handle_t *hp)
1047 {
1048 int rv = 0, t;
1049 int cmd, serr = 0;
1050 dr_devset_t devset;
1051 dr_board_t *bp = hp->h_bd;
1052 dr_handle_t *shp = hp;
1053 static fn_t f = "dr_pre_op";
1054
1055 cmd = hp->h_cmd;
1056 devset = shp->h_devset;
1057
1058 PR_ALL("%s (cmd = %s)...\n", f, SBD_CMD_STR(cmd));
1059
1060 devset = DEVSET_AND(devset, DR_DEVS_PRESENT(bp));
1061 hp->h_err = drmach_pre_op(cmd, bp->b_id, &hp->h_opts, &devset);
1062 if (hp->h_err != NULL) {
1063 PR_ALL("drmach_pre_op failed for cmd %s(%d)\n",
1064 SBD_CMD_STR(cmd), cmd);
1065 return (-1);
1066 }
1067
1068 /*
1069 * Check for valid state transitions.
1070 */
1071 if ((t = CMD2INDEX(cmd)) != -1) {
1072 struct dr_state_trans *transp;
1073 int state_err;
1074
1075 transp = &dr_state_transition[t];
1076 ASSERT(transp->x_cmd == cmd);
1077
1078 state_err = dr_check_transition(bp, &devset, transp, cmd);
1079
1080 if (state_err < 0) {
1081 /*
1082 * Invalidate device.
1083 */
1084 dr_op_err(CE_IGNORE, hp, ESBD_INVAL, NULL);
1085 serr = -1;
1086 PR_ALL("%s: invalid devset (0x%x)\n",
1087 f, (uint_t)devset);
1088 } else if (state_err != 0) {
1089 /*
1090 * State transition is not a valid one.
1091 */
1092 dr_op_err(CE_IGNORE, hp,
1093 transp->x_op[state_err].x_err, NULL);
1094
1095 serr = transp->x_op[state_err].x_rv;
1096
1097 PR_ALL("%s: invalid state %s(%d) for cmd %s(%d)\n",
1098 f, state_str[state_err], state_err,
1099 SBD_CMD_STR(cmd), cmd);
1100 } else {
1101 shp->h_devset = devset;
1102 }
1103 }
1104
1105 if (serr) {
1106 rv = -1;
1107 }
1108
1109 return (rv);
1110 }
1111
1112 static int
dr_post_op(dr_handle_t * hp,int rv)1113 dr_post_op(dr_handle_t *hp, int rv)
1114 {
1115 int cmd;
1116 sbd_error_t *err;
1117 dr_board_t *bp = hp->h_bd;
1118 static fn_t f = "dr_post_op";
1119
1120 cmd = hp->h_cmd;
1121
1122 PR_ALL("%s (cmd = %s)...\n", f, SBD_CMD_STR(cmd));
1123
1124 err = drmach_post_op(cmd, bp->b_id, &hp->h_opts, rv);
1125 if (err != NULL) {
1126 PR_ALL("drmach_post_op failed for cmd %s(%d)\n",
1127 SBD_CMD_STR(cmd), cmd);
1128 if (rv == 0) {
1129 ASSERT(hp->h_err == NULL);
1130 hp->h_err = err;
1131 rv = -1;
1132 } else if (hp->h_err == NULL) {
1133 hp->h_err = err;
1134 } else {
1135 sbd_err_clear(&err);
1136 }
1137 }
1138
1139 return (rv);
1140 }
1141
1142 static int
dr_exec_op(dr_handle_t * hp)1143 dr_exec_op(dr_handle_t *hp)
1144 {
1145 int rv = 0;
1146 static fn_t f = "dr_exec_op";
1147
1148 /* errors should have been caught by now */
1149 ASSERT(hp->h_err == NULL);
1150
1151 switch (hp->h_cmd) {
1152 case SBD_CMD_ASSIGN:
1153 dr_assign_board(hp);
1154 break;
1155
1156 case SBD_CMD_UNASSIGN:
1157 dr_unassign_board(hp);
1158 break;
1159
1160 case SBD_CMD_POWEROFF:
1161 dr_poweroff_board(hp);
1162 break;
1163
1164 case SBD_CMD_POWERON:
1165 dr_poweron_board(hp);
1166 break;
1167
1168 case SBD_CMD_TEST:
1169 dr_test_board(hp);
1170 break;
1171
1172 case SBD_CMD_CONNECT:
1173 dr_connect(hp);
1174 break;
1175
1176 case SBD_CMD_CONFIGURE:
1177 dr_dev_configure(hp);
1178 break;
1179
1180 case SBD_CMD_UNCONFIGURE:
1181 dr_dev_release(hp);
1182 if (hp->h_err == NULL)
1183 rv = dr_dev_unconfigure(hp);
1184 else
1185 dr_dev_cancel(hp);
1186 break;
1187
1188 case SBD_CMD_DISCONNECT:
1189 rv = dr_disconnect(hp);
1190 break;
1191
1192 case SBD_CMD_STATUS:
1193 rv = dr_dev_status(hp);
1194 break;
1195
1196 case SBD_CMD_GETNCM:
1197 hp->h_sbdcmd.cmd_getncm.g_ncm = dr_get_ncm(hp);
1198 rv = dr_copyout_iocmd(hp);
1199 break;
1200
1201 case SBD_CMD_PASSTHRU:
1202 rv = dr_pt_ioctl(hp);
1203 break;
1204
1205 default:
1206 cmn_err(CE_WARN,
1207 "%s: unknown command (%d)",
1208 f, hp->h_cmd);
1209 break;
1210 }
1211
1212 if (hp->h_err != NULL) {
1213 rv = -1;
1214 }
1215
1216 return (rv);
1217 }
1218
1219 static void
dr_assign_board(dr_handle_t * hp)1220 dr_assign_board(dr_handle_t *hp)
1221 {
1222 dr_board_t *bp = hp->h_bd;
1223
1224 hp->h_err = drmach_board_assign(bp->b_num, &bp->b_id);
1225 if (hp->h_err == NULL) {
1226 bp->b_assigned = 1;
1227 }
1228 }
1229
1230 static void
dr_unassign_board(dr_handle_t * hp)1231 dr_unassign_board(dr_handle_t *hp)
1232 {
1233 dr_board_t *bp = hp->h_bd;
1234
1235 /*
1236 * Block out status during unassign.
1237 * Not doing cv_wait_sig here as starfire SSP software
1238 * ignores unassign failure and removes board from
1239 * domain mask causing system panic.
1240 * TODO: Change cv_wait to cv_wait_sig when SSP software
1241 * handles unassign failure.
1242 */
1243 dr_lock_status(bp);
1244
1245 hp->h_err = drmach_board_unassign(bp->b_id);
1246 if (hp->h_err == NULL) {
1247 /*
1248 * clear drmachid_t handle; not valid after board unassign
1249 */
1250 bp->b_id = 0;
1251 bp->b_assigned = 0;
1252 }
1253
1254 dr_unlock_status(bp);
1255 }
1256
1257 static void
dr_poweron_board(dr_handle_t * hp)1258 dr_poweron_board(dr_handle_t *hp)
1259 {
1260 dr_board_t *bp = hp->h_bd;
1261
1262 hp->h_err = drmach_board_poweron(bp->b_id);
1263 }
1264
1265 static void
dr_poweroff_board(dr_handle_t * hp)1266 dr_poweroff_board(dr_handle_t *hp)
1267 {
1268 dr_board_t *bp = hp->h_bd;
1269
1270 hp->h_err = drmach_board_poweroff(bp->b_id);
1271 }
1272
1273 static void
dr_test_board(dr_handle_t * hp)1274 dr_test_board(dr_handle_t *hp)
1275 {
1276 dr_board_t *bp = hp->h_bd;
1277 hp->h_err = drmach_board_test(bp->b_id, &hp->h_opts,
1278 dr_cmd_flags(hp) & SBD_FLAG_FORCE);
1279 }
1280
1281 /*
1282 * Create and populate the component nodes for a board. Assumes that the
1283 * devlists for the board have been initialized.
1284 */
1285 static void
dr_make_comp_nodes(dr_board_t * bp)1286 dr_make_comp_nodes(dr_board_t *bp)
1287 {
1288 int i;
1289
1290 /*
1291 * Make nodes for the individual components on the board.
1292 * First we need to initialize memory unit data structures of board
1293 * structure.
1294 */
1295 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
1296 dr_mem_unit_t *mp;
1297
1298 mp = dr_get_mem_unit(bp, i);
1299 dr_init_mem_unit(mp);
1300 }
1301
1302 /*
1303 * Initialize cpu unit data structures.
1304 */
1305 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
1306 dr_cpu_unit_t *cp;
1307
1308 cp = dr_get_cpu_unit(bp, i);
1309 dr_init_cpu_unit(cp);
1310 }
1311
1312 /*
1313 * Initialize io unit data structures.
1314 */
1315 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
1316 dr_io_unit_t *ip;
1317
1318 ip = dr_get_io_unit(bp, i);
1319 dr_init_io_unit(ip);
1320 }
1321
1322 dr_board_transition(bp, DR_STATE_CONNECTED);
1323
1324 bp->b_rstate = SBD_STAT_CONNECTED;
1325 bp->b_ostate = SBD_STAT_UNCONFIGURED;
1326 bp->b_cond = SBD_COND_OK;
1327 (void) drv_getparm(TIME, (void *)&bp->b_time);
1328
1329 }
1330
1331 /*
1332 * Only do work if called to operate on an entire board
1333 * which doesn't already have components present.
1334 */
1335 static void
dr_connect(dr_handle_t * hp)1336 dr_connect(dr_handle_t *hp)
1337 {
1338 dr_board_t *bp = hp->h_bd;
1339 static fn_t f = "dr_connect";
1340
1341 PR_ALL("%s...\n", f);
1342
1343 if (DR_DEVS_PRESENT(bp)) {
1344 /*
1345 * Board already has devices present.
1346 */
1347 PR_ALL("%s: devices already present (" DEVSET_FMT_STR ")\n",
1348 f, DEVSET_FMT_ARG(DR_DEVS_PRESENT(bp)));
1349 return;
1350 }
1351
1352 hp->h_err = drmach_board_connect(bp->b_id, &hp->h_opts);
1353 if (hp->h_err)
1354 return;
1355
1356 hp->h_err = dr_init_devlists(bp);
1357 if (hp->h_err)
1358 return;
1359 else if (bp->b_ndev == 0) {
1360 dr_op_err(CE_WARN, hp, ESBD_EMPTY_BD, bp->b_path);
1361 return;
1362 } else {
1363 dr_make_comp_nodes(bp);
1364 return;
1365 }
1366 /*NOTREACHED*/
1367 }
1368
1369 static int
dr_disconnect(dr_handle_t * hp)1370 dr_disconnect(dr_handle_t *hp)
1371 {
1372 int i;
1373 dr_devset_t devset;
1374 dr_board_t *bp = hp->h_bd;
1375 static fn_t f = "dr_disconnect";
1376
1377 PR_ALL("%s...\n", f);
1378
1379 /*
1380 * Only devices which are present, but
1381 * unattached can be disconnected.
1382 */
1383 devset = hp->h_devset & DR_DEVS_PRESENT(bp) &
1384 DR_DEVS_UNATTACHED(bp);
1385
1386 if ((devset == 0) && DR_DEVS_PRESENT(bp)) {
1387 dr_op_err(CE_IGNORE, hp, ESBD_EMPTY_BD, bp->b_path);
1388 return (0);
1389 }
1390
1391 /*
1392 * Block out status during disconnect.
1393 */
1394 mutex_enter(&bp->b_slock);
1395 while (bp->b_sflags & DR_BSLOCK) {
1396 if (cv_wait_sig(&bp->b_scv, &bp->b_slock) == 0) {
1397 mutex_exit(&bp->b_slock);
1398 return (EINTR);
1399 }
1400 }
1401 bp->b_sflags |= DR_BSLOCK;
1402 mutex_exit(&bp->b_slock);
1403
1404 hp->h_err = drmach_board_disconnect(bp->b_id, &hp->h_opts);
1405 if (hp->h_err && hp->h_err->e_code == EX86_WALK_DEPENDENCY) {
1406 /*
1407 * Other boards have dependency on this board. No device nodes
1408 * have been destroyed so keep current board status.
1409 */
1410 goto disconnect_done;
1411 }
1412
1413 DR_DEVS_DISCONNECT(bp, devset);
1414
1415 ASSERT((DR_DEVS_ATTACHED(bp) & devset) == 0);
1416
1417 /*
1418 * Update per-device state transitions.
1419 */
1420 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
1421 dr_cpu_unit_t *cp;
1422
1423 if (!DEVSET_IN_SET(devset, SBD_COMP_CPU, i))
1424 continue;
1425
1426 cp = dr_get_cpu_unit(bp, i);
1427 if (dr_disconnect_cpu(cp) == 0)
1428 dr_device_transition(&cp->sbc_cm, DR_STATE_EMPTY);
1429 else if (cp->sbc_cm.sbdev_error != NULL)
1430 DRERR_SET_C(&hp->h_err, &cp->sbc_cm.sbdev_error);
1431
1432 ASSERT(cp->sbc_cm.sbdev_error == NULL);
1433 }
1434
1435 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
1436 dr_mem_unit_t *mp;
1437
1438 if (!DEVSET_IN_SET(devset, SBD_COMP_MEM, i))
1439 continue;
1440
1441 mp = dr_get_mem_unit(bp, i);
1442 if (dr_disconnect_mem(mp) == 0)
1443 dr_device_transition(&mp->sbm_cm, DR_STATE_EMPTY);
1444 else if (mp->sbm_cm.sbdev_error != NULL)
1445 DRERR_SET_C(&hp->h_err, &mp->sbm_cm.sbdev_error);
1446
1447 ASSERT(mp->sbm_cm.sbdev_error == NULL);
1448 }
1449
1450 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
1451 dr_io_unit_t *ip;
1452
1453 if (!DEVSET_IN_SET(devset, SBD_COMP_IO, i))
1454 continue;
1455
1456 ip = dr_get_io_unit(bp, i);
1457 if (dr_disconnect_io(ip) == 0)
1458 dr_device_transition(&ip->sbi_cm, DR_STATE_EMPTY);
1459 else if (ip->sbi_cm.sbdev_error != NULL)
1460 DRERR_SET_C(&hp->h_err, &ip->sbi_cm.sbdev_error);
1461
1462 ASSERT(ip->sbi_cm.sbdev_error == NULL);
1463 }
1464
1465 if (hp->h_err) {
1466 /*
1467 * For certain errors, drmach_board_disconnect will mark
1468 * the board as unusable; in these cases the devtree must
1469 * be purged so that status calls will succeed.
1470 * XXX
1471 * This implementation checks for discrete error codes -
1472 * someday, the i/f to drmach_board_disconnect should be
1473 * changed to avoid the e_code testing.
1474 */
1475 if (hp->h_err->e_code == EX86_DEPROBE) {
1476 bp->b_ostate = SBD_STAT_UNCONFIGURED;
1477 bp->b_busy = 0;
1478 (void) drv_getparm(TIME, (void *)&bp->b_time);
1479
1480 if (drmach_board_deprobe(bp->b_id))
1481 goto disconnect_done;
1482 else
1483 bp->b_ndev = 0;
1484 }
1485 }
1486
1487 /*
1488 * Once all the components on a board have been disconnect
1489 * the board's state can transition to disconnected and
1490 * we can allow the deprobe to take place.
1491 */
1492 if (hp->h_err == NULL && DR_DEVS_PRESENT(bp) == 0) {
1493 dr_board_transition(bp, DR_STATE_OCCUPIED);
1494 bp->b_rstate = SBD_STAT_DISCONNECTED;
1495 bp->b_ostate = SBD_STAT_UNCONFIGURED;
1496 bp->b_busy = 0;
1497 (void) drv_getparm(TIME, (void *)&bp->b_time);
1498
1499 hp->h_err = drmach_board_deprobe(bp->b_id);
1500
1501 if (hp->h_err == NULL) {
1502 bp->b_ndev = 0;
1503 dr_board_transition(bp, DR_STATE_EMPTY);
1504 bp->b_rstate = SBD_STAT_EMPTY;
1505 (void) drv_getparm(TIME, (void *)&bp->b_time);
1506 }
1507 }
1508
1509 disconnect_done:
1510 dr_unlock_status(bp);
1511
1512 return (0);
1513 }
1514
1515 /*
1516 * Check if a particular device is a valid target of the current
1517 * operation. Return 1 if it is a valid target, and 0 otherwise.
1518 */
1519 static int
dr_dev_is_target(dr_dev_unit_t * dp,int present_only,uint_t uset)1520 dr_dev_is_target(dr_dev_unit_t *dp, int present_only, uint_t uset)
1521 {
1522 dr_common_unit_t *cp;
1523 int is_present;
1524 int is_attached;
1525
1526 cp = &dp->du_common;
1527
1528 /* check if the user requested this device */
1529 if ((uset & (1 << cp->sbdev_unum)) == 0) {
1530 return (0);
1531 }
1532
1533 is_present = DR_DEV_IS_PRESENT(cp) ? 1 : 0;
1534 is_attached = DR_DEV_IS_ATTACHED(cp) ? 1 : 0;
1535
1536 /*
1537 * If the present_only flag is set, a valid target
1538 * must be present but not attached. Otherwise, it
1539 * must be both present and attached.
1540 */
1541 if (is_present && (present_only ^ is_attached)) {
1542 /* sanity check */
1543 ASSERT(cp->sbdev_id != (drmachid_t)0);
1544
1545 return (1);
1546 }
1547
1548 return (0);
1549 }
1550
1551 static void
dr_dev_make_list(dr_handle_t * hp,sbd_comp_type_t type,int present_only,dr_common_unit_t *** devlist,int * devnum)1552 dr_dev_make_list(dr_handle_t *hp, sbd_comp_type_t type, int present_only,
1553 dr_common_unit_t ***devlist, int *devnum)
1554 {
1555 dr_board_t *bp = hp->h_bd;
1556 int unum;
1557 int nunits;
1558 uint_t uset;
1559 int len;
1560 dr_common_unit_t **list, **wp;
1561
1562 switch (type) {
1563 case SBD_COMP_CPU:
1564 nunits = MAX_CPU_UNITS_PER_BOARD;
1565 break;
1566 case SBD_COMP_MEM:
1567 nunits = MAX_MEM_UNITS_PER_BOARD;
1568 break;
1569 case SBD_COMP_IO:
1570 nunits = MAX_IO_UNITS_PER_BOARD;
1571 break;
1572 default:
1573 /* catch this in debug kernels */
1574 ASSERT(0);
1575 break;
1576 }
1577
1578 /* allocate list storage. */
1579 len = sizeof (dr_common_unit_t *) * (nunits + 1);
1580 list = kmem_zalloc(len, KM_SLEEP);
1581
1582 /* record length of storage in first element */
1583 *list++ = (dr_common_unit_t *)(uintptr_t)len;
1584
1585 /* get bit array signifying which units are to be involved */
1586 uset = DEVSET_GET_UNITSET(hp->h_devset, type);
1587
1588 /*
1589 * Adjust the loop count for CPU devices since all cores
1590 * in a CMP will be examined in a single iteration.
1591 */
1592 if (type == SBD_COMP_CPU) {
1593 nunits = MAX_CMP_UNITS_PER_BOARD;
1594 }
1595
1596 /* populate list */
1597 for (wp = list, unum = 0; unum < nunits; unum++) {
1598 dr_dev_unit_t *dp;
1599 int core;
1600 int cunum;
1601
1602 dp = DR_GET_BOARD_DEVUNIT(bp, type, unum);
1603 if (dr_dev_is_target(dp, present_only, uset)) {
1604 *wp++ = &dp->du_common;
1605 }
1606
1607 /* further processing is only required for CPUs */
1608 if (type != SBD_COMP_CPU) {
1609 continue;
1610 }
1611
1612 /*
1613 * Add any additional cores from the current CPU
1614 * device. This is to ensure that all the cores
1615 * are grouped together in the device list, and
1616 * consequently sequenced together during the actual
1617 * operation.
1618 */
1619 for (core = 1; core < MAX_CORES_PER_CMP; core++) {
1620 cunum = DR_CMP_CORE_UNUM(unum, core);
1621 dp = DR_GET_BOARD_DEVUNIT(bp, type, cunum);
1622
1623 if (dr_dev_is_target(dp, present_only, uset)) {
1624 *wp++ = &dp->du_common;
1625 }
1626 }
1627 }
1628
1629 /* calculate number of units in list, return result and list pointer */
1630 *devnum = wp - list;
1631 *devlist = list;
1632 }
1633
1634 static void
dr_dev_clean_up(dr_handle_t * hp,dr_common_unit_t ** list,int devnum)1635 dr_dev_clean_up(dr_handle_t *hp, dr_common_unit_t **list, int devnum)
1636 {
1637 int len;
1638 int n = 0;
1639 dr_common_unit_t *cp, **rp = list;
1640
1641 /*
1642 * move first encountered unit error to handle if handle
1643 * does not yet have a recorded error.
1644 */
1645 if (hp->h_err == NULL) {
1646 while (n++ < devnum) {
1647 cp = *rp++;
1648 if (cp->sbdev_error != NULL) {
1649 hp->h_err = cp->sbdev_error;
1650 cp->sbdev_error = NULL;
1651 break;
1652 }
1653 }
1654 }
1655
1656 /* free remaining unit errors */
1657 while (n++ < devnum) {
1658 cp = *rp++;
1659 if (cp->sbdev_error != NULL) {
1660 sbd_err_clear(&cp->sbdev_error);
1661 cp->sbdev_error = NULL;
1662 }
1663 }
1664
1665 /* free list */
1666 list -= 1;
1667 len = (int)(uintptr_t)list[0];
1668 kmem_free(list, len);
1669 }
1670
1671 static int
dr_dev_walk(dr_handle_t * hp,sbd_comp_type_t type,int present_only,int (* pre_op)(dr_handle_t *,dr_common_unit_t **,int),void (* op)(dr_handle_t *,dr_common_unit_t *),int (* post_op)(dr_handle_t *,dr_common_unit_t **,int),void (* board_op)(dr_handle_t *,dr_common_unit_t **,int))1672 dr_dev_walk(dr_handle_t *hp, sbd_comp_type_t type, int present_only,
1673 int (*pre_op)(dr_handle_t *, dr_common_unit_t **, int),
1674 void (*op)(dr_handle_t *, dr_common_unit_t *),
1675 int (*post_op)(dr_handle_t *, dr_common_unit_t **, int),
1676 void (*board_op)(dr_handle_t *, dr_common_unit_t **, int))
1677 {
1678 int devnum, rv;
1679 dr_common_unit_t **devlist;
1680
1681 dr_dev_make_list(hp, type, present_only, &devlist, &devnum);
1682
1683 rv = 0;
1684 if (devnum > 0) {
1685 rv = (*pre_op)(hp, devlist, devnum);
1686 if (rv == 0) {
1687 int n;
1688
1689 for (n = 0; n < devnum; n++)
1690 (*op)(hp, devlist[n]);
1691
1692 rv = (*post_op)(hp, devlist, devnum);
1693
1694 (*board_op)(hp, devlist, devnum);
1695 }
1696 }
1697
1698 dr_dev_clean_up(hp, devlist, devnum);
1699 return (rv);
1700 }
1701
1702 /*ARGSUSED*/
1703 static int
dr_dev_noop(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)1704 dr_dev_noop(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
1705 {
1706 return (0);
1707 }
1708
1709 static void
dr_attach_update_state(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)1710 dr_attach_update_state(dr_handle_t *hp,
1711 dr_common_unit_t **devlist, int devnum)
1712 {
1713 dr_board_t *bp = hp->h_bd;
1714 int i;
1715 dr_devset_t devs_unattached, devs_present;
1716 static fn_t f = "dr_attach_update_state";
1717
1718 for (i = 0; i < devnum; i++) {
1719 dr_common_unit_t *cp = devlist[i];
1720
1721 if (dr_check_unit_attached(cp) == -1) {
1722 PR_ALL("%s: ERROR %s not attached\n",
1723 f, cp->sbdev_path);
1724 continue;
1725 }
1726
1727 DR_DEV_SET_ATTACHED(cp);
1728
1729 dr_device_transition(cp, DR_STATE_CONFIGURED);
1730 cp->sbdev_cond = SBD_COND_OK;
1731 }
1732
1733 devs_present = DR_DEVS_PRESENT(bp);
1734 devs_unattached = DR_DEVS_UNATTACHED(bp);
1735
1736 switch (bp->b_state) {
1737 case DR_STATE_CONNECTED:
1738 case DR_STATE_UNCONFIGURED:
1739 ASSERT(devs_present);
1740
1741 if (devs_unattached == 0) {
1742 /*
1743 * All devices finally attached.
1744 */
1745 dr_board_transition(bp, DR_STATE_CONFIGURED);
1746 hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
1747 hp->h_bd->b_rstate = SBD_STAT_CONNECTED;
1748 hp->h_bd->b_cond = SBD_COND_OK;
1749 hp->h_bd->b_busy = 0;
1750 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1751 } else if (devs_present != devs_unattached) {
1752 /*
1753 * Only some devices are fully attached.
1754 */
1755 dr_board_transition(bp, DR_STATE_PARTIAL);
1756 hp->h_bd->b_rstate = SBD_STAT_CONNECTED;
1757 hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
1758 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1759 }
1760 break;
1761
1762 case DR_STATE_PARTIAL:
1763 ASSERT(devs_present);
1764 /*
1765 * All devices finally attached.
1766 */
1767 if (devs_unattached == 0) {
1768 dr_board_transition(bp, DR_STATE_CONFIGURED);
1769 hp->h_bd->b_rstate = SBD_STAT_CONNECTED;
1770 hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
1771 hp->h_bd->b_cond = SBD_COND_OK;
1772 hp->h_bd->b_busy = 0;
1773 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1774 }
1775 break;
1776
1777 default:
1778 break;
1779 }
1780 }
1781
1782 static void
dr_dev_configure(dr_handle_t * hp)1783 dr_dev_configure(dr_handle_t *hp)
1784 {
1785 int rv;
1786
1787 rv = dr_dev_walk(hp, SBD_COMP_CPU, 1,
1788 dr_pre_attach_cpu,
1789 dr_attach_cpu,
1790 dr_post_attach_cpu,
1791 dr_attach_update_state);
1792
1793 if (rv >= 0) {
1794 rv = dr_dev_walk(hp, SBD_COMP_MEM, 1,
1795 dr_pre_attach_mem,
1796 dr_attach_mem,
1797 dr_post_attach_mem,
1798 dr_attach_update_state);
1799 }
1800
1801 if (rv >= 0) {
1802 (void) dr_dev_walk(hp, SBD_COMP_IO, 1,
1803 dr_pre_attach_io,
1804 dr_attach_io,
1805 dr_post_attach_io,
1806 dr_attach_update_state);
1807 }
1808 }
1809
1810 static void
dr_release_update_state(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)1811 dr_release_update_state(dr_handle_t *hp,
1812 dr_common_unit_t **devlist, int devnum)
1813 {
1814 _NOTE(ARGUNUSED(devlist))
1815 _NOTE(ARGUNUSED(devnum))
1816
1817 dr_board_t *bp = hp->h_bd;
1818
1819 /*
1820 * If the entire board was released and all components
1821 * unreferenced then transfer it to the UNREFERENCED state.
1822 */
1823 if ((bp->b_state != DR_STATE_RELEASE) &&
1824 (DR_DEVS_RELEASED(bp) == DR_DEVS_ATTACHED(bp))) {
1825 dr_board_transition(bp, DR_STATE_RELEASE);
1826 hp->h_bd->b_busy = 1;
1827 }
1828 }
1829
1830 /* called by dr_release_done [below] and dr_release_mem_done [dr_mem.c] */
1831 int
dr_release_dev_done(dr_common_unit_t * cp)1832 dr_release_dev_done(dr_common_unit_t *cp)
1833 {
1834 if (cp->sbdev_state == DR_STATE_RELEASE) {
1835 ASSERT(DR_DEV_IS_RELEASED(cp));
1836
1837 DR_DEV_SET_UNREFERENCED(cp);
1838
1839 dr_device_transition(cp, DR_STATE_UNREFERENCED);
1840
1841 return (0);
1842 } else {
1843 return (-1);
1844 }
1845 }
1846
1847 static void
dr_release_done(dr_handle_t * hp,dr_common_unit_t * cp)1848 dr_release_done(dr_handle_t *hp, dr_common_unit_t *cp)
1849 {
1850 _NOTE(ARGUNUSED(hp))
1851
1852 dr_board_t *bp;
1853 static fn_t f = "dr_release_done";
1854
1855 PR_ALL("%s...\n", f);
1856
1857 /* get board pointer & sanity check */
1858 bp = cp->sbdev_bp;
1859 ASSERT(bp == hp->h_bd);
1860
1861 /*
1862 * Transfer the device which just completed its release
1863 * to the UNREFERENCED state.
1864 */
1865 switch (cp->sbdev_type) {
1866 case SBD_COMP_MEM:
1867 dr_release_mem_done(cp);
1868 break;
1869
1870 default:
1871 DR_DEV_SET_RELEASED(cp);
1872
1873 dr_device_transition(cp, DR_STATE_RELEASE);
1874
1875 (void) dr_release_dev_done(cp);
1876 break;
1877 }
1878
1879 /*
1880 * If we're not already in the RELEASE state for this
1881 * board and we now have released all that were previously
1882 * attached, then transfer the board to the RELEASE state.
1883 */
1884 if ((bp->b_state == DR_STATE_RELEASE) &&
1885 (DR_DEVS_RELEASED(bp) == DR_DEVS_UNREFERENCED(bp))) {
1886 dr_board_transition(bp, DR_STATE_UNREFERENCED);
1887 bp->b_busy = 1;
1888 (void) drv_getparm(TIME, (void *)&bp->b_time);
1889 }
1890 }
1891
1892 static void
dr_dev_release_mem(dr_handle_t * hp,dr_common_unit_t * dv)1893 dr_dev_release_mem(dr_handle_t *hp, dr_common_unit_t *dv)
1894 {
1895 dr_release_mem(dv);
1896 dr_release_done(hp, dv);
1897 }
1898
1899 static void
dr_dev_release(dr_handle_t * hp)1900 dr_dev_release(dr_handle_t *hp)
1901 {
1902 int rv;
1903
1904 hp->h_bd->b_busy = 1;
1905
1906 rv = dr_dev_walk(hp, SBD_COMP_CPU, 0,
1907 dr_pre_release_cpu,
1908 dr_release_done,
1909 dr_dev_noop,
1910 dr_release_update_state);
1911
1912 if (rv >= 0) {
1913 rv = dr_dev_walk(hp, SBD_COMP_MEM, 0,
1914 dr_pre_release_mem,
1915 dr_dev_release_mem,
1916 dr_dev_noop,
1917 dr_release_update_state);
1918 }
1919
1920 if (rv >= 0) {
1921 rv = dr_dev_walk(hp, SBD_COMP_IO, 0,
1922 dr_pre_release_io,
1923 dr_release_done,
1924 dr_dev_noop,
1925 dr_release_update_state);
1926
1927 }
1928
1929 if (rv < 0)
1930 hp->h_bd->b_busy = 0;
1931 /* else, b_busy will be cleared in dr_detach_update_state() */
1932 }
1933
1934 static void
dr_detach_update_state(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)1935 dr_detach_update_state(dr_handle_t *hp,
1936 dr_common_unit_t **devlist, int devnum)
1937 {
1938 dr_board_t *bp = hp->h_bd;
1939 int i;
1940 dr_state_t bstate;
1941 static fn_t f = "dr_detach_update_state";
1942
1943 for (i = 0; i < devnum; i++) {
1944 dr_common_unit_t *cp = devlist[i];
1945
1946 if (dr_check_unit_attached(cp) >= 0) {
1947 /*
1948 * Device is still attached probably due
1949 * to an error. Need to keep track of it.
1950 */
1951 PR_ALL("%s: ERROR %s not detached\n",
1952 f, cp->sbdev_path);
1953
1954 continue;
1955 }
1956
1957 DR_DEV_CLR_ATTACHED(cp);
1958 DR_DEV_CLR_RELEASED(cp);
1959 DR_DEV_CLR_UNREFERENCED(cp);
1960 dr_device_transition(cp, DR_STATE_UNCONFIGURED);
1961 }
1962
1963 bstate = bp->b_state;
1964 if (bstate != DR_STATE_UNCONFIGURED) {
1965 if (DR_DEVS_PRESENT(bp) == DR_DEVS_UNATTACHED(bp)) {
1966 /*
1967 * All devices are finally detached.
1968 */
1969 dr_board_transition(bp, DR_STATE_UNCONFIGURED);
1970 hp->h_bd->b_ostate = SBD_STAT_UNCONFIGURED;
1971 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1972 } else if ((bp->b_state != DR_STATE_PARTIAL) &&
1973 (DR_DEVS_ATTACHED(bp) !=
1974 DR_DEVS_PRESENT(bp))) {
1975 /*
1976 * Some devices remain attached.
1977 */
1978 dr_board_transition(bp, DR_STATE_PARTIAL);
1979 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1980 }
1981
1982 if ((hp->h_devset & DR_DEVS_UNATTACHED(bp)) == hp->h_devset)
1983 hp->h_bd->b_busy = 0;
1984 }
1985 }
1986
1987 static int
dr_dev_unconfigure(dr_handle_t * hp)1988 dr_dev_unconfigure(dr_handle_t *hp)
1989 {
1990 dr_board_t *bp = hp->h_bd;
1991
1992 /*
1993 * Block out status during IO unconfig.
1994 */
1995 mutex_enter(&bp->b_slock);
1996 while (bp->b_sflags & DR_BSLOCK) {
1997 if (cv_wait_sig(&bp->b_scv, &bp->b_slock) == 0) {
1998 mutex_exit(&bp->b_slock);
1999 return (EINTR);
2000 }
2001 }
2002 bp->b_sflags |= DR_BSLOCK;
2003 mutex_exit(&bp->b_slock);
2004
2005 (void) dr_dev_walk(hp, SBD_COMP_IO, 0,
2006 dr_pre_detach_io,
2007 dr_detach_io,
2008 dr_post_detach_io,
2009 dr_detach_update_state);
2010
2011 dr_unlock_status(bp);
2012
2013 (void) dr_dev_walk(hp, SBD_COMP_CPU, 0,
2014 dr_pre_detach_cpu,
2015 dr_detach_cpu,
2016 dr_post_detach_cpu,
2017 dr_detach_update_state);
2018
2019 (void) dr_dev_walk(hp, SBD_COMP_MEM, 0,
2020 dr_pre_detach_mem,
2021 dr_detach_mem,
2022 dr_post_detach_mem,
2023 dr_detach_update_state);
2024
2025 return (0);
2026 }
2027
2028 static void
dr_dev_cancel(dr_handle_t * hp)2029 dr_dev_cancel(dr_handle_t *hp)
2030 {
2031 int i;
2032 dr_devset_t devset;
2033 dr_board_t *bp = hp->h_bd;
2034 static fn_t f = "dr_dev_cancel";
2035
2036 PR_ALL("%s...\n", f);
2037
2038 /*
2039 * Only devices which have been "released" are
2040 * subject to cancellation.
2041 */
2042 devset = hp->h_devset & DR_DEVS_RELEASED(bp);
2043
2044 /*
2045 * Nothing to do for CPUs or IO other than change back
2046 * their state.
2047 */
2048 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
2049 dr_cpu_unit_t *cp;
2050 dr_state_t nstate;
2051
2052 if (!DEVSET_IN_SET(devset, SBD_COMP_CPU, i))
2053 continue;
2054
2055 cp = dr_get_cpu_unit(bp, i);
2056 if (dr_cancel_cpu(cp) == 0)
2057 nstate = DR_STATE_CONFIGURED;
2058 else
2059 nstate = DR_STATE_FATAL;
2060
2061 dr_device_transition(&cp->sbc_cm, nstate);
2062 }
2063
2064 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
2065 dr_io_unit_t *ip;
2066
2067 if (!DEVSET_IN_SET(devset, SBD_COMP_IO, i))
2068 continue;
2069 ip = dr_get_io_unit(bp, i);
2070 dr_device_transition(&ip->sbi_cm, DR_STATE_CONFIGURED);
2071 }
2072 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
2073 dr_mem_unit_t *mp;
2074 dr_state_t nstate;
2075
2076 if (!DEVSET_IN_SET(devset, SBD_COMP_MEM, i))
2077 continue;
2078
2079 mp = dr_get_mem_unit(bp, i);
2080 if (dr_cancel_mem(mp) == 0)
2081 nstate = DR_STATE_CONFIGURED;
2082 else
2083 nstate = DR_STATE_FATAL;
2084
2085 dr_device_transition(&mp->sbm_cm, nstate);
2086 }
2087
2088 PR_ALL("%s: unreleasing devset (0x%x)\n", f, (uint_t)devset);
2089
2090 DR_DEVS_CANCEL(bp, devset);
2091
2092 if (DR_DEVS_RELEASED(bp) == 0) {
2093 dr_state_t new_state;
2094 /*
2095 * If the board no longer has any released devices
2096 * than transfer it back to the CONFIG/PARTIAL state.
2097 */
2098 if (DR_DEVS_ATTACHED(bp) == DR_DEVS_PRESENT(bp))
2099 new_state = DR_STATE_CONFIGURED;
2100 else
2101 new_state = DR_STATE_PARTIAL;
2102 if (bp->b_state != new_state) {
2103 dr_board_transition(bp, new_state);
2104 }
2105 hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
2106 hp->h_bd->b_busy = 0;
2107 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
2108 }
2109 }
2110
2111 static int
dr_dev_status(dr_handle_t * hp)2112 dr_dev_status(dr_handle_t *hp)
2113 {
2114 int nstat, mode, ncm, sz, pbsz, pnstat;
2115 dr_handle_t *shp;
2116 dr_devset_t devset = 0;
2117 sbd_stat_t *dstatp = NULL;
2118 sbd_dev_stat_t *devstatp;
2119 dr_board_t *bp;
2120 drmach_status_t pstat;
2121 int rv = 0;
2122
2123 #ifdef _MULTI_DATAMODEL
2124 int sz32 = 0;
2125 #endif /* _MULTI_DATAMODEL */
2126
2127 static fn_t f = "dr_dev_status";
2128
2129 PR_ALL("%s...\n", f);
2130
2131 mode = hp->h_mode;
2132 shp = hp;
2133 devset = shp->h_devset;
2134 bp = hp->h_bd;
2135
2136 /*
2137 * Block out disconnect, unassign, IO unconfigure and
2138 * devinfo branch creation during status.
2139 */
2140 mutex_enter(&bp->b_slock);
2141 while (bp->b_sflags & DR_BSLOCK) {
2142 if (cv_wait_sig(&bp->b_scv, &bp->b_slock) == 0) {
2143 mutex_exit(&bp->b_slock);
2144 return (EINTR);
2145 }
2146 }
2147 bp->b_sflags |= DR_BSLOCK;
2148 mutex_exit(&bp->b_slock);
2149
2150 ncm = 1;
2151 if (hp->h_sbdcmd.cmd_cm.c_id.c_type == SBD_COMP_NONE) {
2152 if (dr_cmd_flags(hp) & SBD_FLAG_ALLCMP) {
2153 /*
2154 * Calculate the maximum number of components possible
2155 * for a board. This number will be used to size the
2156 * status scratch buffer used by board and component
2157 * status functions.
2158 * This buffer may differ in size from what is provided
2159 * by the plugin, since the known component set on the
2160 * board may change between the plugin's GETNCM call, and
2161 * the status call. Sizing will be adjusted to the plugin's
2162 * receptacle buffer at copyout time.
2163 */
2164 ncm = MAX_CPU_UNITS_PER_BOARD +
2165 MAX_MEM_UNITS_PER_BOARD +
2166 MAX_IO_UNITS_PER_BOARD;
2167
2168 } else {
2169 /*
2170 * In the case of c_type == SBD_COMP_NONE, and
2171 * SBD_FLAG_ALLCMP not specified, only the board
2172 * info is to be returned, no components.
2173 */
2174 ncm = 0;
2175 devset = 0;
2176 }
2177 }
2178
2179 sz = sizeof (sbd_stat_t);
2180 if (ncm > 1)
2181 sz += sizeof (sbd_dev_stat_t) * (ncm - 1);
2182
2183
2184 pbsz = (int)hp->h_sbdcmd.cmd_stat.s_nbytes;
2185 pnstat = (pbsz - sizeof (sbd_stat_t)) / sizeof (sbd_dev_stat_t);
2186
2187 /*
2188 * s_nbytes describes the size of the preallocated user
2189 * buffer into which the application is execting to
2190 * receive the sbd_stat_t and sbd_dev_stat_t structures.
2191 */
2192
2193 #ifdef _MULTI_DATAMODEL
2194
2195 /*
2196 * More buffer space is required for the 64bit to 32bit
2197 * conversion of data structures.
2198 */
2199 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
2200 sz32 = sizeof (sbd_stat32_t);
2201 if (ncm > 1)
2202 sz32 += sizeof (sbd_dev_stat32_t) * (ncm - 1);
2203 pnstat = (pbsz - sizeof (sbd_stat32_t))/
2204 sizeof (sbd_dev_stat32_t);
2205 }
2206
2207 sz += sz32;
2208 #endif
2209 /*
2210 * Since one sbd_dev_stat_t is included in the sbd_stat_t,
2211 * increment the plugin's nstat count.
2212 */
2213 ++pnstat;
2214
2215 if (bp->b_id == 0) {
2216 bzero(&pstat, sizeof (pstat));
2217 } else {
2218 sbd_error_t *err;
2219
2220 err = drmach_status(bp->b_id, &pstat);
2221 if (err) {
2222 DRERR_SET_C(&hp->h_err, &err);
2223 rv = EIO;
2224 goto status_done;
2225 }
2226 }
2227
2228 dstatp = (sbd_stat_t *)(void *)GETSTRUCT(char, sz);
2229
2230 devstatp = &dstatp->s_stat[0];
2231
2232 dstatp->s_board = bp->b_num;
2233
2234 /*
2235 * Detect transitions between empty and disconnected.
2236 */
2237 if (!pstat.empty && (bp->b_rstate == SBD_STAT_EMPTY))
2238 bp->b_rstate = SBD_STAT_DISCONNECTED;
2239 else if (pstat.empty && (bp->b_rstate == SBD_STAT_DISCONNECTED))
2240 bp->b_rstate = SBD_STAT_EMPTY;
2241
2242 dstatp->s_rstate = bp->b_rstate;
2243 dstatp->s_ostate = bp->b_ostate;
2244 dstatp->s_cond = bp->b_cond = pstat.cond;
2245 dstatp->s_busy = bp->b_busy | pstat.busy;
2246 dstatp->s_time = bp->b_time;
2247 dstatp->s_power = pstat.powered;
2248 dstatp->s_assigned = bp->b_assigned = pstat.assigned;
2249 dstatp->s_nstat = nstat = 0;
2250 bcopy(&pstat.type[0], &dstatp->s_type[0], SBD_TYPE_LEN);
2251 bcopy(&pstat.info[0], &dstatp->s_info[0], SBD_MAX_INFO);
2252
2253 devset &= DR_DEVS_PRESENT(bp);
2254 if (devset == 0) {
2255 /*
2256 * No device chosen.
2257 */
2258 PR_ALL("%s: no device present\n", f);
2259 }
2260
2261 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT))
2262 if ((nstat = dr_cpu_status(hp, devset, devstatp)) > 0) {
2263 dstatp->s_nstat += nstat;
2264 devstatp += nstat;
2265 }
2266
2267 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT))
2268 if ((nstat = dr_mem_status(hp, devset, devstatp)) > 0) {
2269 dstatp->s_nstat += nstat;
2270 devstatp += nstat;
2271 }
2272
2273 if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT))
2274 if ((nstat = dr_io_status(hp, devset, devstatp)) > 0) {
2275 dstatp->s_nstat += nstat;
2276 devstatp += nstat;
2277 }
2278
2279 /*
2280 * Due to a possible change in number of components between
2281 * the time of plugin's GETNCM call and now, there may be
2282 * more or less components than the plugin's buffer can
2283 * hold. Adjust s_nstat accordingly.
2284 */
2285
2286 dstatp->s_nstat = dstatp->s_nstat > pnstat ? pnstat : dstatp->s_nstat;
2287
2288 #ifdef _MULTI_DATAMODEL
2289 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
2290 int i, j;
2291 sbd_stat32_t *dstat32p;
2292
2293 dstat32p = (sbd_stat32_t *)devstatp;
2294
2295 /* Alignment Paranoia */
2296 if ((ulong_t)dstat32p & 0x1) {
2297 PR_ALL("%s: alignment: sz=0x%lx dstat32p=0x%p\n",
2298 f, sizeof (sbd_stat32_t), (void *)dstat32p);
2299 DR_OP_INTERNAL_ERROR(hp);
2300 rv = EINVAL;
2301 goto status_done;
2302 }
2303
2304 /* paranoia: detect buffer overrun */
2305 if ((caddr_t)&dstat32p->s_stat[dstatp->s_nstat] >
2306 ((caddr_t)dstatp) + sz) {
2307 DR_OP_INTERNAL_ERROR(hp);
2308 rv = EINVAL;
2309 goto status_done;
2310 }
2311
2312 /* copy sbd_stat_t structure members */
2313 #define _SBD_STAT(t, m) dstat32p->m = (t)dstatp->m
2314 _SBD_STAT(int32_t, s_board);
2315 _SBD_STAT(int32_t, s_rstate);
2316 _SBD_STAT(int32_t, s_ostate);
2317 _SBD_STAT(int32_t, s_cond);
2318 _SBD_STAT(int32_t, s_busy);
2319 _SBD_STAT(time32_t, s_time);
2320 _SBD_STAT(uint32_t, s_power);
2321 _SBD_STAT(uint32_t, s_assigned);
2322 _SBD_STAT(int32_t, s_nstat);
2323 bcopy(&dstatp->s_type[0], &dstat32p->s_type[0],
2324 SBD_TYPE_LEN);
2325 bcopy(&dstatp->s_info[0], &dstat32p->s_info[0],
2326 SBD_MAX_INFO);
2327 #undef _SBD_STAT
2328
2329 for (i = 0; i < dstatp->s_nstat; i++) {
2330 sbd_dev_stat_t *dsp = &dstatp->s_stat[i];
2331 sbd_dev_stat32_t *ds32p = &dstat32p->s_stat[i];
2332 #define _SBD_DEV_STAT(t, m) ds32p->m = (t)dsp->m
2333
2334 /* copy sbd_cm_stat_t structure members */
2335 _SBD_DEV_STAT(int32_t, ds_type);
2336 _SBD_DEV_STAT(int32_t, ds_unit);
2337 _SBD_DEV_STAT(int32_t, ds_ostate);
2338 _SBD_DEV_STAT(int32_t, ds_cond);
2339 _SBD_DEV_STAT(int32_t, ds_busy);
2340 _SBD_DEV_STAT(int32_t, ds_suspend);
2341 _SBD_DEV_STAT(time32_t, ds_time);
2342 bcopy(&dsp->ds_name[0], &ds32p->ds_name[0],
2343 OBP_MAXPROPNAME);
2344
2345 switch (dsp->ds_type) {
2346 case SBD_COMP_CPU:
2347 /* copy sbd_cpu_stat_t structure members */
2348 _SBD_DEV_STAT(int32_t, d_cpu.cs_isbootproc);
2349 _SBD_DEV_STAT(int32_t, d_cpu.cs_cpuid);
2350 _SBD_DEV_STAT(int32_t, d_cpu.cs_speed);
2351 _SBD_DEV_STAT(int32_t, d_cpu.cs_ecache);
2352 break;
2353
2354 case SBD_COMP_MEM:
2355 /* copy sbd_mem_stat_t structure members */
2356 _SBD_DEV_STAT(int32_t, d_mem.ms_interleave);
2357 _SBD_DEV_STAT(uint32_t, d_mem.ms_basepfn);
2358 _SBD_DEV_STAT(uint32_t, d_mem.ms_totpages);
2359 _SBD_DEV_STAT(uint32_t, d_mem.ms_detpages);
2360 _SBD_DEV_STAT(int32_t, d_mem.ms_pageslost);
2361 _SBD_DEV_STAT(uint32_t, d_mem.ms_managed_pages);
2362 _SBD_DEV_STAT(uint32_t, d_mem.ms_noreloc_pages);
2363 _SBD_DEV_STAT(uint32_t, d_mem.ms_noreloc_first);
2364 _SBD_DEV_STAT(uint32_t, d_mem.ms_noreloc_last);
2365 _SBD_DEV_STAT(int32_t, d_mem.ms_cage_enabled);
2366 _SBD_DEV_STAT(int32_t, d_mem.ms_peer_is_target);
2367 bcopy(&dsp->d_mem.ms_peer_ap_id[0],
2368 &ds32p->d_mem.ms_peer_ap_id[0],
2369 sizeof (ds32p->d_mem.ms_peer_ap_id));
2370 break;
2371
2372 case SBD_COMP_IO:
2373 /* copy sbd_io_stat_t structure members */
2374 _SBD_DEV_STAT(int32_t, d_io.is_referenced);
2375 _SBD_DEV_STAT(int32_t, d_io.is_unsafe_count);
2376
2377 for (j = 0; j < SBD_MAX_UNSAFE; j++)
2378 _SBD_DEV_STAT(int32_t,
2379 d_io.is_unsafe_list[j]);
2380
2381 bcopy(&dsp->d_io.is_pathname[0],
2382 &ds32p->d_io.is_pathname[0], MAXPATHLEN);
2383 break;
2384
2385 case SBD_COMP_CMP:
2386 /* copy sbd_cmp_stat_t structure members */
2387 bcopy(&dsp->d_cmp.ps_cpuid[0],
2388 &ds32p->d_cmp.ps_cpuid[0],
2389 sizeof (ds32p->d_cmp.ps_cpuid));
2390 _SBD_DEV_STAT(int32_t, d_cmp.ps_ncores);
2391 _SBD_DEV_STAT(int32_t, d_cmp.ps_speed);
2392 _SBD_DEV_STAT(int32_t, d_cmp.ps_ecache);
2393 break;
2394
2395 default:
2396 cmn_err(CE_WARN, "%s: unknown dev type (%d)",
2397 f, (int)dsp->ds_type);
2398 rv = EFAULT;
2399 goto status_done;
2400 }
2401 #undef _SBD_DEV_STAT
2402 }
2403
2404
2405 if (ddi_copyout((void *)dstat32p,
2406 hp->h_sbdcmd.cmd_stat.s_statp, pbsz, mode) != 0) {
2407 cmn_err(CE_WARN,
2408 "%s: failed to copyout status "
2409 "for board %d", f, bp->b_num);
2410 rv = EFAULT;
2411 goto status_done;
2412 }
2413 } else
2414 #endif /* _MULTI_DATAMODEL */
2415
2416 if (ddi_copyout((void *)dstatp, hp->h_sbdcmd.cmd_stat.s_statp,
2417 pbsz, mode) != 0) {
2418 cmn_err(CE_WARN,
2419 "%s: failed to copyout status for board %d",
2420 f, bp->b_num);
2421 rv = EFAULT;
2422 goto status_done;
2423 }
2424
2425 status_done:
2426 if (dstatp != NULL)
2427 FREESTRUCT(dstatp, char, sz);
2428
2429 dr_unlock_status(bp);
2430
2431 return (rv);
2432 }
2433
2434 static int
dr_get_ncm(dr_handle_t * hp)2435 dr_get_ncm(dr_handle_t *hp)
2436 {
2437 int i;
2438 int ncm = 0;
2439 dr_devset_t devset;
2440
2441 devset = DR_DEVS_PRESENT(hp->h_bd);
2442 if (hp->h_sbdcmd.cmd_cm.c_id.c_type != SBD_COMP_NONE)
2443 devset &= DEVSET(hp->h_sbdcmd.cmd_cm.c_id.c_type,
2444 DEVSET_ANYUNIT);
2445
2446 /*
2447 * Handle CPUs first to deal with possible CMP
2448 * devices. If the CPU is a CMP, we need to only
2449 * increment ncm once even if there are multiple
2450 * cores for that CMP present in the devset.
2451 */
2452 for (i = 0; i < MAX_CMP_UNITS_PER_BOARD; i++) {
2453 if (devset & DEVSET(SBD_COMP_CMP, i)) {
2454 ncm++;
2455 }
2456 }
2457
2458 /* eliminate the CPU information from the devset */
2459 devset &= ~(DEVSET(SBD_COMP_CMP, DEVSET_ANYUNIT));
2460
2461 for (i = 0; i < (sizeof (dr_devset_t) * 8); i++) {
2462 ncm += devset & 0x1;
2463 devset >>= 1;
2464 }
2465
2466 return (ncm);
2467 }
2468
2469 /* used by dr_mem.c */
2470 /* TODO: eliminate dr_boardlist */
2471 dr_board_t *
dr_lookup_board(int board_num)2472 dr_lookup_board(int board_num)
2473 {
2474 dr_board_t *bp;
2475
2476 ASSERT(board_num >= 0 && board_num < MAX_BOARDS);
2477
2478 bp = &dr_boardlist[board_num];
2479 ASSERT(bp->b_num == board_num);
2480
2481 return (bp);
2482 }
2483
2484 static dr_dev_unit_t *
dr_get_dev_unit(dr_board_t * bp,sbd_comp_type_t nt,int unit_num)2485 dr_get_dev_unit(dr_board_t *bp, sbd_comp_type_t nt, int unit_num)
2486 {
2487 dr_dev_unit_t *dp;
2488
2489 dp = DR_GET_BOARD_DEVUNIT(bp, nt, unit_num);
2490 ASSERT(dp->du_common.sbdev_bp == bp);
2491 ASSERT(dp->du_common.sbdev_unum == unit_num);
2492 ASSERT(dp->du_common.sbdev_type == nt);
2493
2494 return (dp);
2495 }
2496
2497 dr_cpu_unit_t *
dr_get_cpu_unit(dr_board_t * bp,int unit_num)2498 dr_get_cpu_unit(dr_board_t *bp, int unit_num)
2499 {
2500 dr_dev_unit_t *dp;
2501
2502 ASSERT(unit_num >= 0 && unit_num < MAX_CPU_UNITS_PER_BOARD);
2503
2504 dp = dr_get_dev_unit(bp, SBD_COMP_CPU, unit_num);
2505 return (&dp->du_cpu);
2506 }
2507
2508 dr_mem_unit_t *
dr_get_mem_unit(dr_board_t * bp,int unit_num)2509 dr_get_mem_unit(dr_board_t *bp, int unit_num)
2510 {
2511 dr_dev_unit_t *dp;
2512
2513 ASSERT(unit_num >= 0 && unit_num < MAX_MEM_UNITS_PER_BOARD);
2514
2515 dp = dr_get_dev_unit(bp, SBD_COMP_MEM, unit_num);
2516 return (&dp->du_mem);
2517 }
2518
2519 dr_io_unit_t *
dr_get_io_unit(dr_board_t * bp,int unit_num)2520 dr_get_io_unit(dr_board_t *bp, int unit_num)
2521 {
2522 dr_dev_unit_t *dp;
2523
2524 ASSERT(unit_num >= 0 && unit_num < MAX_IO_UNITS_PER_BOARD);
2525
2526 dp = dr_get_dev_unit(bp, SBD_COMP_IO, unit_num);
2527 return (&dp->du_io);
2528 }
2529
2530 dr_common_unit_t *
dr_get_common_unit(dr_board_t * bp,sbd_comp_type_t nt,int unum)2531 dr_get_common_unit(dr_board_t *bp, sbd_comp_type_t nt, int unum)
2532 {
2533 dr_dev_unit_t *dp;
2534
2535 dp = dr_get_dev_unit(bp, nt, unum);
2536 return (&dp->du_common);
2537 }
2538
2539 static dr_devset_t
dr_dev2devset(sbd_comp_id_t * cid)2540 dr_dev2devset(sbd_comp_id_t *cid)
2541 {
2542 static fn_t f = "dr_dev2devset";
2543
2544 dr_devset_t devset;
2545 int unit = cid->c_unit;
2546
2547 switch (cid->c_type) {
2548 case SBD_COMP_NONE:
2549 devset = DEVSET(SBD_COMP_CPU, DEVSET_ANYUNIT);
2550 devset |= DEVSET(SBD_COMP_MEM, DEVSET_ANYUNIT);
2551 devset |= DEVSET(SBD_COMP_IO, DEVSET_ANYUNIT);
2552 PR_ALL("%s: COMP_NONE devset = " DEVSET_FMT_STR "\n",
2553 f, DEVSET_FMT_ARG(devset));
2554 break;
2555
2556 case SBD_COMP_CPU:
2557 if ((unit > MAX_CPU_UNITS_PER_BOARD) || (unit < 0)) {
2558 cmn_err(CE_WARN,
2559 "%s: invalid cpu unit# = %d",
2560 f, unit);
2561 devset = 0;
2562 } else {
2563 /*
2564 * Generate a devset that includes all the
2565 * cores of a CMP device. If this is not a
2566 * CMP, the extra cores will be eliminated
2567 * later since they are not present. This is
2568 * also true for CMP devices that do not have
2569 * all cores active.
2570 */
2571 devset = DEVSET(SBD_COMP_CMP, unit);
2572 }
2573
2574 PR_ALL("%s: CPU devset = " DEVSET_FMT_STR "\n",
2575 f, DEVSET_FMT_ARG(devset));
2576 break;
2577
2578 case SBD_COMP_MEM:
2579 if (unit == SBD_NULL_UNIT) {
2580 unit = 0;
2581 cid->c_unit = 0;
2582 }
2583
2584 if ((unit > MAX_MEM_UNITS_PER_BOARD) || (unit < 0)) {
2585 cmn_err(CE_WARN,
2586 "%s: invalid mem unit# = %d",
2587 f, unit);
2588 devset = 0;
2589 } else
2590 devset = DEVSET(cid->c_type, unit);
2591
2592 PR_ALL("%s: MEM devset = " DEVSET_FMT_STR "\n",
2593 f, DEVSET_FMT_ARG(devset));
2594 break;
2595
2596 case SBD_COMP_IO:
2597 if ((unit > MAX_IO_UNITS_PER_BOARD) || (unit < 0)) {
2598 cmn_err(CE_WARN,
2599 "%s: invalid io unit# = %d",
2600 f, unit);
2601 devset = 0;
2602 } else
2603 devset = DEVSET(cid->c_type, unit);
2604
2605 PR_ALL("%s: IO devset = " DEVSET_FMT_STR "\n",
2606 f, DEVSET_FMT_ARG(devset));
2607 break;
2608
2609 default:
2610 case SBD_COMP_UNKNOWN:
2611 devset = 0;
2612 break;
2613 }
2614
2615 return (devset);
2616 }
2617
2618 /*
2619 * Converts a dynamic attachment point name to a SBD_COMP_* type.
2620 * Returns SDB_COMP_UNKNOWN if name is not recognized.
2621 */
2622 static int
dr_dev_type_to_nt(char * type)2623 dr_dev_type_to_nt(char *type)
2624 {
2625 int i;
2626
2627 for (i = 0; dr_devattr[i].s_nodetype != SBD_COMP_UNKNOWN; i++)
2628 if (strcmp(dr_devattr[i].s_devtype, type) == 0)
2629 break;
2630
2631 return (dr_devattr[i].s_nodetype);
2632 }
2633
2634 /*
2635 * Converts a SBD_COMP_* type to a dynamic attachment point name.
2636 * Return NULL if SBD_COMP_ type is not recognized.
2637 */
2638 char *
dr_nt_to_dev_type(int nt)2639 dr_nt_to_dev_type(int nt)
2640 {
2641 int i;
2642
2643 for (i = 0; dr_devattr[i].s_nodetype != SBD_COMP_UNKNOWN; i++)
2644 if (dr_devattr[i].s_nodetype == nt)
2645 break;
2646
2647 return (dr_devattr[i].s_devtype);
2648 }
2649
2650 /*
2651 * State transition policy is that if there is some component for which
2652 * the state transition is valid, then let it through. The exception is
2653 * SBD_CMD_DISCONNECT. On disconnect, the state transition must be valid
2654 * for ALL components.
2655 * Returns the state that is in error, if any.
2656 */
2657 static int
dr_check_transition(dr_board_t * bp,dr_devset_t * devsetp,struct dr_state_trans * transp,int cmd)2658 dr_check_transition(dr_board_t *bp, dr_devset_t *devsetp,
2659 struct dr_state_trans *transp, int cmd)
2660 {
2661 int s, ut;
2662 int state_err = 0;
2663 dr_devset_t devset;
2664 dr_common_unit_t *cp;
2665 static fn_t f = "dr_check_transition";
2666
2667 devset = *devsetp;
2668
2669 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT)) {
2670 for (ut = 0; ut < MAX_CPU_UNITS_PER_BOARD; ut++) {
2671 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, ut) == 0)
2672 continue;
2673
2674 cp = dr_get_common_unit(bp, SBD_COMP_CPU, ut);
2675 s = (int)cp->sbdev_state;
2676 if (!DR_DEV_IS_PRESENT(cp)) {
2677 DEVSET_DEL(devset, SBD_COMP_CPU, ut);
2678 } else {
2679 if (transp->x_op[s].x_rv) {
2680 if (!state_err)
2681 state_err = s;
2682 DEVSET_DEL(devset, SBD_COMP_CPU, ut);
2683 }
2684 }
2685 }
2686 }
2687 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT)) {
2688 for (ut = 0; ut < MAX_MEM_UNITS_PER_BOARD; ut++) {
2689 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, ut) == 0)
2690 continue;
2691
2692 cp = dr_get_common_unit(bp, SBD_COMP_MEM, ut);
2693 s = (int)cp->sbdev_state;
2694 if (!DR_DEV_IS_PRESENT(cp)) {
2695 DEVSET_DEL(devset, SBD_COMP_MEM, ut);
2696 } else {
2697 if (transp->x_op[s].x_rv) {
2698 if (!state_err)
2699 state_err = s;
2700 DEVSET_DEL(devset, SBD_COMP_MEM, ut);
2701 }
2702 }
2703 }
2704 }
2705 if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT)) {
2706 for (ut = 0; ut < MAX_IO_UNITS_PER_BOARD; ut++) {
2707 if (DEVSET_IN_SET(devset, SBD_COMP_IO, ut) == 0)
2708 continue;
2709
2710 cp = dr_get_common_unit(bp, SBD_COMP_IO, ut);
2711 s = (int)cp->sbdev_state;
2712 if (!DR_DEV_IS_PRESENT(cp)) {
2713 DEVSET_DEL(devset, SBD_COMP_IO, ut);
2714 } else {
2715 if (transp->x_op[s].x_rv) {
2716 if (!state_err)
2717 state_err = s;
2718 DEVSET_DEL(devset, SBD_COMP_IO, ut);
2719 }
2720 }
2721 }
2722 }
2723
2724 PR_ALL("%s: requested devset = 0x%x, final devset = 0x%x\n",
2725 f, (uint_t)*devsetp, (uint_t)devset);
2726
2727 *devsetp = devset;
2728 /*
2729 * If there are some remaining components for which
2730 * this state transition is valid, then allow them
2731 * through, otherwise if none are left then return
2732 * the state error. The exception is SBD_CMD_DISCONNECT.
2733 * On disconnect, the state transition must be valid for ALL
2734 * components.
2735 */
2736 if (cmd == SBD_CMD_DISCONNECT)
2737 return (state_err);
2738 return (devset ? 0 : state_err);
2739 }
2740
2741 void
dr_device_transition(dr_common_unit_t * cp,dr_state_t st)2742 dr_device_transition(dr_common_unit_t *cp, dr_state_t st)
2743 {
2744 PR_STATE("%s STATE %s(%d) -> %s(%d)\n",
2745 cp->sbdev_path,
2746 state_str[cp->sbdev_state], cp->sbdev_state,
2747 state_str[st], st);
2748
2749 cp->sbdev_state = st;
2750 if (st == DR_STATE_CONFIGURED) {
2751 cp->sbdev_ostate = SBD_STAT_CONFIGURED;
2752 if (cp->sbdev_bp->b_ostate != SBD_STAT_CONFIGURED) {
2753 cp->sbdev_bp->b_ostate = SBD_STAT_CONFIGURED;
2754 (void) drv_getparm(TIME,
2755 (void *) &cp->sbdev_bp->b_time);
2756 }
2757 } else
2758 cp->sbdev_ostate = SBD_STAT_UNCONFIGURED;
2759
2760 (void) drv_getparm(TIME, (void *) &cp->sbdev_time);
2761 }
2762
2763 static void
dr_board_transition(dr_board_t * bp,dr_state_t st)2764 dr_board_transition(dr_board_t *bp, dr_state_t st)
2765 {
2766 PR_STATE("BOARD %d STATE: %s(%d) -> %s(%d)\n",
2767 bp->b_num,
2768 state_str[bp->b_state], bp->b_state,
2769 state_str[st], st);
2770
2771 bp->b_state = st;
2772 }
2773
2774 void
dr_op_err(int ce,dr_handle_t * hp,int code,char * fmt,...)2775 dr_op_err(int ce, dr_handle_t *hp, int code, char *fmt, ...)
2776 {
2777 sbd_error_t *err;
2778 va_list args;
2779
2780 va_start(args, fmt);
2781 err = drerr_new_v(code, fmt, args);
2782 va_end(args);
2783
2784 if (ce != CE_IGNORE)
2785 sbd_err_log(err, ce);
2786
2787 DRERR_SET_C(&hp->h_err, &err);
2788 }
2789
2790 void
dr_dev_err(int ce,dr_common_unit_t * cp,int code)2791 dr_dev_err(int ce, dr_common_unit_t *cp, int code)
2792 {
2793 sbd_error_t *err;
2794
2795 err = drerr_new(0, code, cp->sbdev_path, NULL);
2796
2797 if (ce != CE_IGNORE)
2798 sbd_err_log(err, ce);
2799
2800 DRERR_SET_C(&cp->sbdev_error, &err);
2801 }
2802
2803 /*
2804 * A callback routine. Called from the drmach layer as a result of
2805 * call to drmach_board_find_devices from dr_init_devlists.
2806 */
2807 static sbd_error_t *
dr_dev_found(void * data,const char * name,int unum,drmachid_t id)2808 dr_dev_found(void *data, const char *name, int unum, drmachid_t id)
2809 {
2810 dr_board_t *bp = data;
2811 dr_dev_unit_t *dp;
2812 int nt;
2813 static fn_t f = "dr_dev_found";
2814
2815 PR_ALL("%s (board = %d, name = %s, unum = %d, id = %p)...\n",
2816 f, bp->b_num, name, unum, id);
2817
2818 nt = dr_dev_type_to_nt((char *)name);
2819 if (nt == SBD_COMP_UNKNOWN) {
2820 /*
2821 * this should not happen. When it does, it indicates
2822 * a missmatch in devices supported by the drmach layer
2823 * vs devices supported by this layer.
2824 */
2825 return (DR_INTERNAL_ERROR());
2826 }
2827
2828 dp = DR_GET_BOARD_DEVUNIT(bp, nt, unum);
2829
2830 /* sanity check */
2831 ASSERT(dp->du_common.sbdev_bp == bp);
2832 ASSERT(dp->du_common.sbdev_unum == unum);
2833 ASSERT(dp->du_common.sbdev_type == nt);
2834
2835 /* render dynamic attachment point path of this unit */
2836 (void) snprintf(dp->du_common.sbdev_path,
2837 sizeof (dp->du_common.sbdev_path), "%s::%s%d",
2838 bp->b_path, name, DR_UNUM2SBD_UNUM(unum, nt));
2839
2840 dp->du_common.sbdev_id = id;
2841 DR_DEV_SET_PRESENT(&dp->du_common);
2842
2843 bp->b_ndev++;
2844
2845 return (NULL);
2846 }
2847
2848 static sbd_error_t *
dr_init_devlists(dr_board_t * bp)2849 dr_init_devlists(dr_board_t *bp)
2850 {
2851 int i;
2852 sbd_error_t *err;
2853 dr_dev_unit_t *dp;
2854 static fn_t f = "dr_init_devlists";
2855
2856 PR_ALL("%s (%s)...\n", f, bp->b_path);
2857
2858 /* sanity check */
2859 ASSERT(bp->b_ndev == 0);
2860
2861 DR_DEVS_DISCONNECT(bp, (uint_t)-1);
2862
2863 /*
2864 * This routine builds the board's devlist and initializes
2865 * the common portion of the unit data structures.
2866 * Note: because the common portion is considered
2867 * uninitialized, the dr_get_*_unit() routines can not
2868 * be used.
2869 */
2870
2871 /*
2872 * Clear out old entries, if any.
2873 */
2874 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
2875 dp = DR_GET_BOARD_DEVUNIT(bp, SBD_COMP_CPU, i);
2876
2877 bzero(dp, sizeof (*dp));
2878 dp->du_common.sbdev_bp = bp;
2879 dp->du_common.sbdev_unum = i;
2880 dp->du_common.sbdev_type = SBD_COMP_CPU;
2881 }
2882
2883 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
2884 dp = DR_GET_BOARD_DEVUNIT(bp, SBD_COMP_MEM, i);
2885
2886 bzero(dp, sizeof (*dp));
2887 dp->du_common.sbdev_bp = bp;
2888 dp->du_common.sbdev_unum = i;
2889 dp->du_common.sbdev_type = SBD_COMP_MEM;
2890 }
2891
2892 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
2893 dp = DR_GET_BOARD_DEVUNIT(bp, SBD_COMP_IO, i);
2894
2895 bzero(dp, sizeof (*dp));
2896 dp->du_common.sbdev_bp = bp;
2897 dp->du_common.sbdev_unum = i;
2898 dp->du_common.sbdev_type = SBD_COMP_IO;
2899 }
2900
2901 err = NULL;
2902 if (bp->b_id) {
2903 /* find devices on this board */
2904 err = drmach_board_find_devices(
2905 bp->b_id, bp, dr_dev_found);
2906 }
2907
2908 return (err);
2909 }
2910
2911 /*
2912 * Return the unit number of the respective drmachid if
2913 * it's found to be attached.
2914 */
2915 static int
dr_check_unit_attached(dr_common_unit_t * cp)2916 dr_check_unit_attached(dr_common_unit_t *cp)
2917 {
2918 int rv = 0;
2919 processorid_t cpuid;
2920 uint64_t basepa, endpa;
2921 struct memlist *ml;
2922 extern struct memlist *phys_install;
2923 sbd_error_t *err;
2924 int yes;
2925 static fn_t f = "dr_check_unit_attached";
2926
2927 switch (cp->sbdev_type) {
2928 case SBD_COMP_CPU:
2929 err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
2930 if (err) {
2931 DRERR_SET_C(&cp->sbdev_error, &err);
2932 rv = -1;
2933 break;
2934 }
2935 mutex_enter(&cpu_lock);
2936 if (cpu_get(cpuid) == NULL)
2937 rv = -1;
2938 mutex_exit(&cpu_lock);
2939 break;
2940
2941 case SBD_COMP_MEM:
2942 err = drmach_mem_get_slice_info(cp->sbdev_id,
2943 &basepa, &endpa, NULL);
2944 if (err) {
2945 DRERR_SET_C(&cp->sbdev_error, &err);
2946 rv = -1;
2947 break;
2948 }
2949
2950 /*
2951 * Check if base address is in phys_install.
2952 */
2953 memlist_read_lock();
2954 for (ml = phys_install; ml; ml = ml->ml_next)
2955 if ((endpa <= ml->ml_address) ||
2956 (basepa >= (ml->ml_address + ml->ml_size)))
2957 continue;
2958 else
2959 break;
2960 memlist_read_unlock();
2961 if (ml == NULL)
2962 rv = -1;
2963 break;
2964
2965 case SBD_COMP_IO:
2966 err = drmach_io_is_attached(cp->sbdev_id, &yes);
2967 if (err) {
2968 DRERR_SET_C(&cp->sbdev_error, &err);
2969 rv = -1;
2970 break;
2971 } else if (!yes)
2972 rv = -1;
2973 break;
2974
2975 default:
2976 PR_ALL("%s: unexpected nodetype(%d) for id 0x%p\n",
2977 f, cp->sbdev_type, cp->sbdev_id);
2978 rv = -1;
2979 break;
2980 }
2981
2982 return (rv);
2983 }
2984
2985 /*
2986 * See if drmach recognizes the passthru command. DRMACH expects the
2987 * id to identify the thing to which the command is being applied. Using
2988 * nonsense SBD terms, that information has been perversely encoded in the
2989 * c_id member of the sbd_cmd_t structure. This logic reads those tea
2990 * leaves, finds the associated drmach id, then calls drmach to process
2991 * the passthru command.
2992 */
2993 static int
dr_pt_try_drmach(dr_handle_t * hp)2994 dr_pt_try_drmach(dr_handle_t *hp)
2995 {
2996 dr_board_t *bp = hp->h_bd;
2997 sbd_comp_id_t *comp_id = &hp->h_sbdcmd.cmd_cm.c_id;
2998 drmachid_t id;
2999
3000 if (comp_id->c_type == SBD_COMP_NONE) {
3001 id = bp->b_id;
3002 } else {
3003 sbd_comp_type_t nt;
3004
3005 nt = dr_dev_type_to_nt(comp_id->c_name);
3006 if (nt == SBD_COMP_UNKNOWN) {
3007 dr_op_err(CE_IGNORE, hp, ESBD_INVAL, comp_id->c_name);
3008 id = 0;
3009 } else {
3010 /* pt command applied to dynamic attachment point */
3011 dr_common_unit_t *cp;
3012 cp = dr_get_common_unit(bp, nt, comp_id->c_unit);
3013 id = cp->sbdev_id;
3014 }
3015 }
3016
3017 if (hp->h_err == NULL)
3018 hp->h_err = drmach_passthru(id, &hp->h_opts);
3019
3020 return (hp->h_err == NULL ? 0 : -1);
3021 }
3022
3023 static int
dr_pt_ioctl(dr_handle_t * hp)3024 dr_pt_ioctl(dr_handle_t *hp)
3025 {
3026 int cmd, rv, len;
3027 int32_t sz;
3028 int found;
3029 char *copts;
3030 static fn_t f = "dr_pt_ioctl";
3031
3032 PR_ALL("%s...\n", f);
3033
3034 sz = hp->h_opts.size;
3035 copts = hp->h_opts.copts;
3036
3037 if (sz == 0 || copts == (char *)NULL) {
3038 cmn_err(CE_WARN, "%s: invalid passthru args", f);
3039 return (EINVAL);
3040 }
3041
3042 found = 0;
3043 for (cmd = 0; cmd < (sizeof (pt_arr) / sizeof (pt_arr[0])); cmd++) {
3044 len = strlen(pt_arr[cmd].pt_name);
3045 found = (strncmp(pt_arr[cmd].pt_name, copts, len) == 0);
3046 if (found)
3047 break;
3048 }
3049
3050 if (found)
3051 rv = (*pt_arr[cmd].pt_func)(hp);
3052 else
3053 rv = dr_pt_try_drmach(hp);
3054
3055 return (rv);
3056 }
3057
3058 /*
3059 * Called at driver load time to determine the state and condition
3060 * of an existing board in the system.
3061 */
3062 static void
dr_board_discovery(dr_board_t * bp)3063 dr_board_discovery(dr_board_t *bp)
3064 {
3065 int i;
3066 dr_devset_t devs_lost, devs_attached = 0;
3067 dr_cpu_unit_t *cp;
3068 dr_mem_unit_t *mp;
3069 dr_io_unit_t *ip;
3070 static fn_t f = "dr_board_discovery";
3071
3072 if (DR_DEVS_PRESENT(bp) == 0) {
3073 PR_ALL("%s: board %d has no devices present\n",
3074 f, bp->b_num);
3075 return;
3076 }
3077
3078 /*
3079 * Check for existence of cpus.
3080 */
3081 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
3082 cp = dr_get_cpu_unit(bp, i);
3083
3084 if (!DR_DEV_IS_PRESENT(&cp->sbc_cm))
3085 continue;
3086
3087 if (dr_check_unit_attached(&cp->sbc_cm) >= 0) {
3088 DR_DEV_SET_ATTACHED(&cp->sbc_cm);
3089 DEVSET_ADD(devs_attached, SBD_COMP_CPU, i);
3090 PR_ALL("%s: board %d, cpu-unit %d - attached\n",
3091 f, bp->b_num, i);
3092 }
3093 dr_init_cpu_unit(cp);
3094 }
3095
3096 /*
3097 * Check for existence of memory.
3098 */
3099 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
3100 mp = dr_get_mem_unit(bp, i);
3101
3102 if (!DR_DEV_IS_PRESENT(&mp->sbm_cm))
3103 continue;
3104
3105 if (dr_check_unit_attached(&mp->sbm_cm) >= 0) {
3106 DR_DEV_SET_ATTACHED(&mp->sbm_cm);
3107 DEVSET_ADD(devs_attached, SBD_COMP_MEM, i);
3108 PR_ALL("%s: board %d, mem-unit %d - attached\n",
3109 f, bp->b_num, i);
3110 }
3111 dr_init_mem_unit(mp);
3112 }
3113
3114 /*
3115 * Check for i/o state.
3116 */
3117 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
3118 ip = dr_get_io_unit(bp, i);
3119
3120 if (!DR_DEV_IS_PRESENT(&ip->sbi_cm))
3121 continue;
3122
3123 if (dr_check_unit_attached(&ip->sbi_cm) >= 0) {
3124 /*
3125 * Found it!
3126 */
3127 DR_DEV_SET_ATTACHED(&ip->sbi_cm);
3128 DEVSET_ADD(devs_attached, SBD_COMP_IO, i);
3129 PR_ALL("%s: board %d, io-unit %d - attached\n",
3130 f, bp->b_num, i);
3131 }
3132 dr_init_io_unit(ip);
3133 }
3134
3135 DR_DEVS_CONFIGURE(bp, devs_attached);
3136 if (devs_attached && ((devs_lost = DR_DEVS_UNATTACHED(bp)) != 0)) {
3137 int ut;
3138
3139 /*
3140 * It is not legal on board discovery to have a
3141 * board that is only partially attached. A board
3142 * is either all attached or all connected. If a
3143 * board has at least one attached device, then
3144 * the the remaining devices, if any, must have
3145 * been lost or disconnected. These devices can
3146 * only be recovered by a full attach from scratch.
3147 * Note that devices previously in the unreferenced
3148 * state are subsequently lost until the next full
3149 * attach. This is necessary since the driver unload
3150 * that must have occurred would have wiped out the
3151 * information necessary to re-configure the device
3152 * back online, e.g. memlist.
3153 */
3154 PR_ALL("%s: some devices LOST (" DEVSET_FMT_STR ")...\n",
3155 f, DEVSET_FMT_ARG(devs_lost));
3156
3157 for (ut = 0; ut < MAX_CPU_UNITS_PER_BOARD; ut++) {
3158 if (!DEVSET_IN_SET(devs_lost, SBD_COMP_CPU, ut))
3159 continue;
3160
3161 cp = dr_get_cpu_unit(bp, ut);
3162 dr_device_transition(&cp->sbc_cm, DR_STATE_EMPTY);
3163 }
3164
3165 for (ut = 0; ut < MAX_MEM_UNITS_PER_BOARD; ut++) {
3166 if (!DEVSET_IN_SET(devs_lost, SBD_COMP_MEM, ut))
3167 continue;
3168
3169 mp = dr_get_mem_unit(bp, ut);
3170 dr_device_transition(&mp->sbm_cm, DR_STATE_EMPTY);
3171 }
3172
3173 for (ut = 0; ut < MAX_IO_UNITS_PER_BOARD; ut++) {
3174 if (!DEVSET_IN_SET(devs_lost, SBD_COMP_IO, ut))
3175 continue;
3176
3177 ip = dr_get_io_unit(bp, ut);
3178 dr_device_transition(&ip->sbi_cm, DR_STATE_EMPTY);
3179 }
3180
3181 DR_DEVS_DISCONNECT(bp, devs_lost);
3182 }
3183 }
3184
3185 static int
dr_board_init(dr_board_t * bp,dev_info_t * dip,int bd)3186 dr_board_init(dr_board_t *bp, dev_info_t *dip, int bd)
3187 {
3188 sbd_error_t *err;
3189
3190 mutex_init(&bp->b_lock, NULL, MUTEX_DRIVER, NULL);
3191 mutex_init(&bp->b_slock, NULL, MUTEX_DRIVER, NULL);
3192 cv_init(&bp->b_scv, NULL, CV_DRIVER, NULL);
3193 bp->b_rstate = SBD_STAT_EMPTY;
3194 bp->b_ostate = SBD_STAT_UNCONFIGURED;
3195 bp->b_cond = SBD_COND_UNKNOWN;
3196 (void) drv_getparm(TIME, (void *)&bp->b_time);
3197
3198 (void) drmach_board_lookup(bd, &bp->b_id);
3199 bp->b_num = bd;
3200 bp->b_dip = dip;
3201
3202 bp->b_dev[DEVSET_NIX(SBD_COMP_CPU)] = GETSTRUCT(dr_dev_unit_t,
3203 MAX_CPU_UNITS_PER_BOARD);
3204
3205 bp->b_dev[DEVSET_NIX(SBD_COMP_MEM)] = GETSTRUCT(dr_dev_unit_t,
3206 MAX_MEM_UNITS_PER_BOARD);
3207
3208 bp->b_dev[DEVSET_NIX(SBD_COMP_IO)] = GETSTRUCT(dr_dev_unit_t,
3209 MAX_IO_UNITS_PER_BOARD);
3210
3211 /*
3212 * Initialize the devlists
3213 */
3214 err = dr_init_devlists(bp);
3215 if (err) {
3216 sbd_err_clear(&err);
3217 dr_board_destroy(bp);
3218 return (-1);
3219 } else if (bp->b_ndev == 0) {
3220 dr_board_transition(bp, DR_STATE_EMPTY);
3221 } else {
3222 /*
3223 * Couldn't have made it down here without
3224 * having found at least one device.
3225 */
3226 ASSERT(DR_DEVS_PRESENT(bp) != 0);
3227 /*
3228 * Check the state of any possible devices on the
3229 * board.
3230 */
3231 dr_board_discovery(bp);
3232
3233 bp->b_assigned = 1;
3234
3235 if (DR_DEVS_UNATTACHED(bp) == 0) {
3236 /*
3237 * The board has no unattached devices, therefore
3238 * by reason of insanity it must be configured!
3239 */
3240 dr_board_transition(bp, DR_STATE_CONFIGURED);
3241 bp->b_ostate = SBD_STAT_CONFIGURED;
3242 bp->b_rstate = SBD_STAT_CONNECTED;
3243 bp->b_cond = SBD_COND_OK;
3244 (void) drv_getparm(TIME, (void *)&bp->b_time);
3245 } else if (DR_DEVS_ATTACHED(bp)) {
3246 dr_board_transition(bp, DR_STATE_PARTIAL);
3247 bp->b_ostate = SBD_STAT_CONFIGURED;
3248 bp->b_rstate = SBD_STAT_CONNECTED;
3249 bp->b_cond = SBD_COND_OK;
3250 (void) drv_getparm(TIME, (void *)&bp->b_time);
3251 } else {
3252 dr_board_transition(bp, DR_STATE_CONNECTED);
3253 bp->b_rstate = SBD_STAT_CONNECTED;
3254 (void) drv_getparm(TIME, (void *)&bp->b_time);
3255 }
3256 }
3257
3258 return (0);
3259 }
3260
3261 static void
dr_board_destroy(dr_board_t * bp)3262 dr_board_destroy(dr_board_t *bp)
3263 {
3264 PR_ALL("dr_board_destroy: num %d, path %s\n",
3265 bp->b_num, bp->b_path);
3266
3267 dr_board_transition(bp, DR_STATE_EMPTY);
3268 bp->b_rstate = SBD_STAT_EMPTY;
3269 (void) drv_getparm(TIME, (void *)&bp->b_time);
3270
3271 /*
3272 * Free up MEM unit structs.
3273 */
3274 FREESTRUCT(bp->b_dev[DEVSET_NIX(SBD_COMP_MEM)],
3275 dr_dev_unit_t, MAX_MEM_UNITS_PER_BOARD);
3276 bp->b_dev[DEVSET_NIX(SBD_COMP_MEM)] = NULL;
3277 /*
3278 * Free up CPU unit structs.
3279 */
3280 FREESTRUCT(bp->b_dev[DEVSET_NIX(SBD_COMP_CPU)],
3281 dr_dev_unit_t, MAX_CPU_UNITS_PER_BOARD);
3282 bp->b_dev[DEVSET_NIX(SBD_COMP_CPU)] = NULL;
3283 /*
3284 * Free up IO unit structs.
3285 */
3286 FREESTRUCT(bp->b_dev[DEVSET_NIX(SBD_COMP_IO)],
3287 dr_dev_unit_t, MAX_IO_UNITS_PER_BOARD);
3288 bp->b_dev[DEVSET_NIX(SBD_COMP_IO)] = NULL;
3289
3290 mutex_destroy(&bp->b_lock);
3291 mutex_destroy(&bp->b_slock);
3292 cv_destroy(&bp->b_scv);
3293
3294 /*
3295 * Reset the board structure to its initial state, otherwise it will
3296 * cause trouble on the next call to dr_board_init() for the same board.
3297 * dr_board_init() may be called multiple times for the same board
3298 * if DR driver fails to initialize some boards.
3299 */
3300 bzero(bp, sizeof (*bp));
3301 }
3302
3303 void
dr_lock_status(dr_board_t * bp)3304 dr_lock_status(dr_board_t *bp)
3305 {
3306 mutex_enter(&bp->b_slock);
3307 while (bp->b_sflags & DR_BSLOCK)
3308 cv_wait(&bp->b_scv, &bp->b_slock);
3309 bp->b_sflags |= DR_BSLOCK;
3310 mutex_exit(&bp->b_slock);
3311 }
3312
3313 void
dr_unlock_status(dr_board_t * bp)3314 dr_unlock_status(dr_board_t *bp)
3315 {
3316 mutex_enter(&bp->b_slock);
3317 bp->b_sflags &= ~DR_BSLOCK;
3318 cv_signal(&bp->b_scv);
3319 mutex_exit(&bp->b_slock);
3320 }
3321
3322 /*
3323 * Extract flags passed via ioctl.
3324 */
3325 int
dr_cmd_flags(dr_handle_t * hp)3326 dr_cmd_flags(dr_handle_t *hp)
3327 {
3328 return (hp->h_sbdcmd.cmd_cm.c_flags);
3329 }
3330