xref: /netbsd-src/sys/dev/raidframe/rf_netbsdkintf.c (revision 7fa608457b817eca6e0977b37f758ae064f3c99c)
1 /*	$NetBSD: rf_netbsdkintf.c,v 1.235 2007/11/08 04:10:09 oster Exp $	*/
2 /*-
3  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to The NetBSD Foundation
7  * by Greg Oster; Jason R. Thorpe.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *        This product includes software developed by the NetBSD
20  *        Foundation, Inc. and its contributors.
21  * 4. Neither the name of The NetBSD Foundation nor the names of its
22  *    contributors may be used to endorse or promote products derived
23  *    from this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (c) 1990, 1993
40  *      The Regents of the University of California.  All rights reserved.
41  *
42  * This code is derived from software contributed to Berkeley by
43  * the Systems Programming Group of the University of Utah Computer
44  * Science Department.
45  *
46  * Redistribution and use in source and binary forms, with or without
47  * modification, are permitted provided that the following conditions
48  * are met:
49  * 1. Redistributions of source code must retain the above copyright
50  *    notice, this list of conditions and the following disclaimer.
51  * 2. Redistributions in binary form must reproduce the above copyright
52  *    notice, this list of conditions and the following disclaimer in the
53  *    documentation and/or other materials provided with the distribution.
54  * 3. Neither the name of the University nor the names of its contributors
55  *    may be used to endorse or promote products derived from this software
56  *    without specific prior written permission.
57  *
58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68  * SUCH DAMAGE.
69  *
70  * from: Utah $Hdr: cd.c 1.6 90/11/28$
71  *
72  *      @(#)cd.c        8.2 (Berkeley) 11/16/93
73  */
74 
75 /*
76  * Copyright (c) 1988 University of Utah.
77  *
78  * This code is derived from software contributed to Berkeley by
79  * the Systems Programming Group of the University of Utah Computer
80  * Science Department.
81  *
82  * Redistribution and use in source and binary forms, with or without
83  * modification, are permitted provided that the following conditions
84  * are met:
85  * 1. Redistributions of source code must retain the above copyright
86  *    notice, this list of conditions and the following disclaimer.
87  * 2. Redistributions in binary form must reproduce the above copyright
88  *    notice, this list of conditions and the following disclaimer in the
89  *    documentation and/or other materials provided with the distribution.
90  * 3. All advertising materials mentioning features or use of this software
91  *    must display the following acknowledgement:
92  *      This product includes software developed by the University of
93  *      California, Berkeley and its contributors.
94  * 4. Neither the name of the University nor the names of its contributors
95  *    may be used to endorse or promote products derived from this software
96  *    without specific prior written permission.
97  *
98  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
99  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
100  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
101  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
102  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
103  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
104  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
105  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
106  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
107  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
108  * SUCH DAMAGE.
109  *
110  * from: Utah $Hdr: cd.c 1.6 90/11/28$
111  *
112  *      @(#)cd.c        8.2 (Berkeley) 11/16/93
113  */
114 
115 /*
116  * Copyright (c) 1995 Carnegie-Mellon University.
117  * All rights reserved.
118  *
119  * Authors: Mark Holland, Jim Zelenka
120  *
121  * Permission to use, copy, modify and distribute this software and
122  * its documentation is hereby granted, provided that both the copyright
123  * notice and this permission notice appear in all copies of the
124  * software, derivative works or modified versions, and any portions
125  * thereof, and that both notices appear in supporting documentation.
126  *
127  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
128  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
129  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
130  *
131  * Carnegie Mellon requests users of this software to return to
132  *
133  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
134  *  School of Computer Science
135  *  Carnegie Mellon University
136  *  Pittsburgh PA 15213-3890
137  *
138  * any improvements or extensions that they make and grant Carnegie the
139  * rights to redistribute these changes.
140  */
141 
142 /***********************************************************
143  *
144  * rf_kintf.c -- the kernel interface routines for RAIDframe
145  *
146  ***********************************************************/
147 
148 #include <sys/cdefs.h>
149 __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.235 2007/11/08 04:10:09 oster Exp $");
150 
151 #include <sys/param.h>
152 #include <sys/errno.h>
153 #include <sys/pool.h>
154 #include <sys/proc.h>
155 #include <sys/queue.h>
156 #include <sys/disk.h>
157 #include <sys/device.h>
158 #include <sys/stat.h>
159 #include <sys/ioctl.h>
160 #include <sys/fcntl.h>
161 #include <sys/systm.h>
162 #include <sys/namei.h>
163 #include <sys/vnode.h>
164 #include <sys/disklabel.h>
165 #include <sys/conf.h>
166 #include <sys/lock.h>
167 #include <sys/buf.h>
168 #include <sys/bufq.h>
169 #include <sys/user.h>
170 #include <sys/reboot.h>
171 #include <sys/kauth.h>
172 
173 #include <prop/proplib.h>
174 
175 #include <dev/raidframe/raidframevar.h>
176 #include <dev/raidframe/raidframeio.h>
177 #include "raid.h"
178 #include "opt_raid_autoconfig.h"
179 #include "rf_raid.h"
180 #include "rf_copyback.h"
181 #include "rf_dag.h"
182 #include "rf_dagflags.h"
183 #include "rf_desc.h"
184 #include "rf_diskqueue.h"
185 #include "rf_etimer.h"
186 #include "rf_general.h"
187 #include "rf_kintf.h"
188 #include "rf_options.h"
189 #include "rf_driver.h"
190 #include "rf_parityscan.h"
191 #include "rf_threadstuff.h"
192 
193 #ifdef DEBUG
194 int     rf_kdebug_level = 0;
195 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
196 #else				/* DEBUG */
197 #define db1_printf(a) { }
198 #endif				/* DEBUG */
199 
200 static RF_Raid_t **raidPtrs;	/* global raid device descriptors */
201 
202 RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
203 
204 static RF_SparetWait_t *rf_sparet_wait_queue;	/* requests to install a
205 						 * spare table */
206 static RF_SparetWait_t *rf_sparet_resp_queue;	/* responses from
207 						 * installation process */
208 
209 MALLOC_DEFINE(M_RAIDFRAME, "RAIDframe", "RAIDframe structures");
210 
211 /* prototypes */
212 static void KernelWakeupFunc(struct buf *);
213 static void InitBP(struct buf *, struct vnode *, unsigned,
214     dev_t, RF_SectorNum_t, RF_SectorCount_t, void *, void (*) (struct buf *),
215     void *, int, struct proc *);
216 static void raidinit(RF_Raid_t *);
217 
218 void raidattach(int);
219 static int raid_match(struct device *, struct cfdata *, void *);
220 static void raid_attach(struct device *, struct device *, void *);
221 static int raid_detach(struct device *, int);
222 
223 dev_type_open(raidopen);
224 dev_type_close(raidclose);
225 dev_type_read(raidread);
226 dev_type_write(raidwrite);
227 dev_type_ioctl(raidioctl);
228 dev_type_strategy(raidstrategy);
229 dev_type_dump(raiddump);
230 dev_type_size(raidsize);
231 
232 const struct bdevsw raid_bdevsw = {
233 	raidopen, raidclose, raidstrategy, raidioctl,
234 	raiddump, raidsize, D_DISK
235 };
236 
237 const struct cdevsw raid_cdevsw = {
238 	raidopen, raidclose, raidread, raidwrite, raidioctl,
239 	nostop, notty, nopoll, nommap, nokqfilter, D_DISK
240 };
241 
242 static struct dkdriver rf_dkdriver = { raidstrategy, minphys };
243 
244 /* XXX Not sure if the following should be replacing the raidPtrs above,
245    or if it should be used in conjunction with that...
246 */
247 
248 struct raid_softc {
249 	struct device *sc_dev;
250 	int     sc_flags;	/* flags */
251 	int     sc_cflags;	/* configuration flags */
252 	uint64_t sc_size;	/* size of the raid device */
253 	char    sc_xname[20];	/* XXX external name */
254 	struct disk sc_dkdev;	/* generic disk device info */
255 	struct bufq_state *buf_queue;	/* used for the device queue */
256 };
257 /* sc_flags */
258 #define RAIDF_INITED	0x01	/* unit has been initialized */
259 #define RAIDF_WLABEL	0x02	/* label area is writable */
260 #define RAIDF_LABELLING	0x04	/* unit is currently being labelled */
261 #define RAIDF_WANTED	0x40	/* someone is waiting to obtain a lock */
262 #define RAIDF_LOCKED	0x80	/* unit is locked */
263 
264 #define	raidunit(x)	DISKUNIT(x)
265 int numraid = 0;
266 
267 extern struct cfdriver raid_cd;
268 CFATTACH_DECL(raid, sizeof(struct raid_softc),
269     raid_match, raid_attach, raid_detach, NULL);
270 
271 /*
272  * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
273  * Be aware that large numbers can allow the driver to consume a lot of
274  * kernel memory, especially on writes, and in degraded mode reads.
275  *
276  * For example: with a stripe width of 64 blocks (32k) and 5 disks,
277  * a single 64K write will typically require 64K for the old data,
278  * 64K for the old parity, and 64K for the new parity, for a total
279  * of 192K (if the parity buffer is not re-used immediately).
280  * Even it if is used immediately, that's still 128K, which when multiplied
281  * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
282  *
283  * Now in degraded mode, for example, a 64K read on the above setup may
284  * require data reconstruction, which will require *all* of the 4 remaining
285  * disks to participate -- 4 * 32K/disk == 128K again.
286  */
287 
288 #ifndef RAIDOUTSTANDING
289 #define RAIDOUTSTANDING   6
290 #endif
291 
292 #define RAIDLABELDEV(dev)	\
293 	(MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
294 
295 /* declared here, and made public, for the benefit of KVM stuff.. */
296 struct raid_softc *raid_softc;
297 
298 static void raidgetdefaultlabel(RF_Raid_t *, struct raid_softc *,
299 				     struct disklabel *);
300 static void raidgetdisklabel(dev_t);
301 static void raidmakedisklabel(struct raid_softc *);
302 
303 static int raidlock(struct raid_softc *);
304 static void raidunlock(struct raid_softc *);
305 
306 static void rf_markalldirty(RF_Raid_t *);
307 static void rf_set_properties(struct raid_softc *, RF_Raid_t *);
308 
309 void rf_ReconThread(struct rf_recon_req *);
310 void rf_RewriteParityThread(RF_Raid_t *raidPtr);
311 void rf_CopybackThread(RF_Raid_t *raidPtr);
312 void rf_ReconstructInPlaceThread(struct rf_recon_req *);
313 int rf_autoconfig(struct device *self);
314 void rf_buildroothack(RF_ConfigSet_t *);
315 
316 RF_AutoConfig_t *rf_find_raid_components(void);
317 RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
318 static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
319 static int rf_reasonable_label(RF_ComponentLabel_t *);
320 void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
321 int rf_set_autoconfig(RF_Raid_t *, int);
322 int rf_set_rootpartition(RF_Raid_t *, int);
323 void rf_release_all_vps(RF_ConfigSet_t *);
324 void rf_cleanup_config_set(RF_ConfigSet_t *);
325 int rf_have_enough_components(RF_ConfigSet_t *);
326 int rf_auto_config_set(RF_ConfigSet_t *, int *);
327 
328 static int raidautoconfig = 0; /* Debugging, mostly.  Set to 0 to not
329 				  allow autoconfig to take place.
330 				  Note that this is overridden by having
331 				  RAID_AUTOCONFIG as an option in the
332 				  kernel config file.  */
333 
334 struct RF_Pools_s rf_pools;
335 
336 void
337 raidattach(int num)
338 {
339 	int raidID;
340 	int i, rc;
341 
342 #ifdef DEBUG
343 	printf("raidattach: Asked for %d units\n", num);
344 #endif
345 
346 	if (num <= 0) {
347 #ifdef DIAGNOSTIC
348 		panic("raidattach: count <= 0");
349 #endif
350 		return;
351 	}
352 	/* This is where all the initialization stuff gets done. */
353 
354 	numraid = num;
355 
356 	/* Make some space for requested number of units... */
357 
358 	RF_Malloc(raidPtrs, num * sizeof(RF_Raid_t *), (RF_Raid_t **));
359 	if (raidPtrs == NULL) {
360 		panic("raidPtrs is NULL!!");
361 	}
362 
363 	rf_mutex_init(&rf_sparet_wait_mutex);
364 
365 	rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
366 
367 	for (i = 0; i < num; i++)
368 		raidPtrs[i] = NULL;
369 	rc = rf_BootRaidframe();
370 	if (rc == 0)
371 		printf("Kernelized RAIDframe activated\n");
372 	else
373 		panic("Serious error booting RAID!!");
374 
375 	/* put together some datastructures like the CCD device does.. This
376 	 * lets us lock the device and what-not when it gets opened. */
377 
378 	raid_softc = (struct raid_softc *)
379 		malloc(num * sizeof(struct raid_softc),
380 		       M_RAIDFRAME, M_NOWAIT);
381 	if (raid_softc == NULL) {
382 		printf("WARNING: no memory for RAIDframe driver\n");
383 		return;
384 	}
385 
386 	memset(raid_softc, 0, num * sizeof(struct raid_softc));
387 
388 	for (raidID = 0; raidID < num; raidID++) {
389 		bufq_alloc(&raid_softc[raidID].buf_queue, "fcfs", 0);
390 
391 		RF_Malloc(raidPtrs[raidID], sizeof(RF_Raid_t),
392 			  (RF_Raid_t *));
393 		if (raidPtrs[raidID] == NULL) {
394 			printf("WARNING: raidPtrs[%d] is NULL\n", raidID);
395 			numraid = raidID;
396 			return;
397 		}
398 	}
399 
400 	if (config_cfattach_attach(raid_cd.cd_name, &raid_ca)) {
401 		printf("config_cfattach_attach failed?\n");
402 	}
403 
404 #ifdef RAID_AUTOCONFIG
405 	raidautoconfig = 1;
406 #endif
407 
408 	/*
409 	 * Register a finalizer which will be used to auto-config RAID
410 	 * sets once all real hardware devices have been found.
411 	 */
412 	if (config_finalize_register(NULL, rf_autoconfig) != 0)
413 		printf("WARNING: unable to register RAIDframe finalizer\n");
414 }
415 
416 int
417 rf_autoconfig(struct device *self)
418 {
419 	RF_AutoConfig_t *ac_list;
420 	RF_ConfigSet_t *config_sets;
421 
422 	if (raidautoconfig == 0)
423 		return (0);
424 
425 	/* XXX This code can only be run once. */
426 	raidautoconfig = 0;
427 
428 	/* 1. locate all RAID components on the system */
429 #ifdef DEBUG
430 	printf("Searching for RAID components...\n");
431 #endif
432 	ac_list = rf_find_raid_components();
433 
434 	/* 2. Sort them into their respective sets. */
435 	config_sets = rf_create_auto_sets(ac_list);
436 
437 	/*
438 	 * 3. Evaluate each set andconfigure the valid ones.
439 	 * This gets done in rf_buildroothack().
440 	 */
441 	rf_buildroothack(config_sets);
442 
443 	return 1;
444 }
445 
446 void
447 rf_buildroothack(RF_ConfigSet_t *config_sets)
448 {
449 	RF_ConfigSet_t *cset;
450 	RF_ConfigSet_t *next_cset;
451 	int retcode;
452 	int raidID;
453 	int rootID;
454 	int col;
455 	int num_root;
456 	char *devname;
457 
458 	rootID = 0;
459 	num_root = 0;
460 	cset = config_sets;
461 	while(cset != NULL ) {
462 		next_cset = cset->next;
463 		if (rf_have_enough_components(cset) &&
464 		    cset->ac->clabel->autoconfigure==1) {
465 			retcode = rf_auto_config_set(cset,&raidID);
466 			if (!retcode) {
467 #ifdef DEBUG
468 				printf("raid%d: configured ok\n", raidID);
469 #endif
470 				if (cset->rootable) {
471 					rootID = raidID;
472 					num_root++;
473 				}
474 			} else {
475 				/* The autoconfig didn't work :( */
476 #ifdef DEBUG
477 				printf("Autoconfig failed with code %d for raid%d\n", retcode, raidID);
478 #endif
479 				rf_release_all_vps(cset);
480 			}
481 		} else {
482 #ifdef DEBUG
483 			printf("raid%d: not enough components\n", raidID);
484 #endif
485 			/* we're not autoconfiguring this set...
486 			   release the associated resources */
487 			rf_release_all_vps(cset);
488 		}
489 		/* cleanup */
490 		rf_cleanup_config_set(cset);
491 		cset = next_cset;
492 	}
493 
494 	/* if the user has specified what the root device should be
495 	   then we don't touch booted_device or boothowto... */
496 
497 	if (rootspec != NULL)
498 		return;
499 
500 	/* we found something bootable... */
501 
502 	if (num_root == 1) {
503 		booted_device = raid_softc[rootID].sc_dev;
504 	} else if (num_root > 1) {
505 
506 		/*
507 		 * Maybe the MD code can help. If it cannot, then
508 		 * setroot() will discover that we have no
509 		 * booted_device and will ask the user if nothing was
510 		 * hardwired in the kernel config file
511 		 */
512 
513 		if (booted_device == NULL)
514 			cpu_rootconf();
515 		if (booted_device == NULL)
516 			return;
517 
518 		num_root = 0;
519 		for (raidID = 0; raidID < numraid; raidID++) {
520 			if (raidPtrs[raidID]->valid == 0)
521 				continue;
522 
523 			if (raidPtrs[raidID]->root_partition == 0)
524 				continue;
525 
526 			for (col = 0; col < raidPtrs[raidID]->numCol; col++) {
527 				devname = raidPtrs[raidID]->Disks[col].devname;
528 				devname += sizeof("/dev/") - 1;
529 				if (strncmp(devname, booted_device->dv_xname,
530 					    strlen(booted_device->dv_xname)) != 0)
531 					continue;
532 #ifdef DEBUG
533 				printf("raid%d includes boot device %s\n",
534 				       raidID, devname);
535 #endif
536 				num_root++;
537 				rootID = raidID;
538 			}
539 		}
540 
541 		if (num_root == 1) {
542 			booted_device = raid_softc[rootID].sc_dev;
543 		} else {
544 			/* we can't guess.. require the user to answer... */
545 			boothowto |= RB_ASKNAME;
546 		}
547 	}
548 }
549 
550 
551 int
552 raidsize(dev_t dev)
553 {
554 	struct raid_softc *rs;
555 	struct disklabel *lp;
556 	int     part, unit, omask, size;
557 
558 	unit = raidunit(dev);
559 	if (unit >= numraid)
560 		return (-1);
561 	rs = &raid_softc[unit];
562 
563 	if ((rs->sc_flags & RAIDF_INITED) == 0)
564 		return (-1);
565 
566 	part = DISKPART(dev);
567 	omask = rs->sc_dkdev.dk_openmask & (1 << part);
568 	lp = rs->sc_dkdev.dk_label;
569 
570 	if (omask == 0 && raidopen(dev, 0, S_IFBLK, curlwp))
571 		return (-1);
572 
573 	if (lp->d_partitions[part].p_fstype != FS_SWAP)
574 		size = -1;
575 	else
576 		size = lp->d_partitions[part].p_size *
577 		    (lp->d_secsize / DEV_BSIZE);
578 
579 	if (omask == 0 && raidclose(dev, 0, S_IFBLK, curlwp))
580 		return (-1);
581 
582 	return (size);
583 
584 }
585 
586 int
587 raiddump(dev_t dev, daddr_t blkno, void *va, size_t size)
588 {
589 	int     unit = raidunit(dev);
590 	struct raid_softc *rs;
591 	const struct bdevsw *bdev;
592 	struct disklabel *lp;
593 	RF_Raid_t *raidPtr;
594 	daddr_t offset;
595 	int     part, c, sparecol, j, scol, dumpto;
596 	int     error = 0;
597 
598 	if (unit >= numraid)
599 		return (ENXIO);
600 
601 	rs = &raid_softc[unit];
602 	raidPtr = raidPtrs[unit];
603 
604 	if ((rs->sc_flags & RAIDF_INITED) == 0)
605 		return ENXIO;
606 
607 	/* we only support dumping to RAID 1 sets */
608 	if (raidPtr->Layout.numDataCol != 1 ||
609 	    raidPtr->Layout.numParityCol != 1)
610 		return EINVAL;
611 
612 
613 	if ((error = raidlock(rs)) != 0)
614 		return error;
615 
616 	if (size % DEV_BSIZE != 0) {
617 		error = EINVAL;
618 		goto out;
619 	}
620 
621 	if (blkno + size / DEV_BSIZE > rs->sc_size) {
622 		printf("%s: blkno (%" PRIu64 ") + size / DEV_BSIZE (%zu) > "
623 		    "sc->sc_size (%" PRIu64 ")\n", __func__, blkno,
624 		    size / DEV_BSIZE, rs->sc_size);
625 		error = EINVAL;
626 		goto out;
627 	}
628 
629 	part = DISKPART(dev);
630 	lp = rs->sc_dkdev.dk_label;
631 	offset = lp->d_partitions[part].p_offset + RF_PROTECTED_SECTORS;
632 
633 	/* figure out what device is alive.. */
634 
635 	/*
636 	   Look for a component to dump to.  The preference for the
637 	   component to dump to is as follows:
638 	   1) the master
639 	   2) a used_spare of the master
640 	   3) the slave
641 	   4) a used_spare of the slave
642 	*/
643 
644 	dumpto = -1;
645 	for (c = 0; c < raidPtr->numCol; c++) {
646 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
647 			/* this might be the one */
648 			dumpto = c;
649 			break;
650 		}
651 	}
652 
653 	/*
654 	   At this point we have possibly selected a live master or a
655 	   live slave.  We now check to see if there is a spared
656 	   master (or a spared slave), if we didn't find a live master
657 	   or a live slave.
658 	*/
659 
660 	for (c = 0; c < raidPtr->numSpare; c++) {
661 		sparecol = raidPtr->numCol + c;
662 		if (raidPtr->Disks[sparecol].status ==  rf_ds_used_spare) {
663 			/* How about this one? */
664 			scol = -1;
665 			for(j=0;j<raidPtr->numCol;j++) {
666 				if (raidPtr->Disks[j].spareCol == sparecol) {
667 					scol = j;
668 					break;
669 				}
670 			}
671 			if (scol == 0) {
672 				/*
673 				   We must have found a spared master!
674 				   We'll take that over anything else
675 				   found so far.  (We couldn't have
676 				   found a real master before, since
677 				   this is a used spare, and it's
678 				   saying that it's replacing the
679 				   master.)  On reboot (with
680 				   autoconfiguration turned on)
681 				   sparecol will become the 1st
682 				   component (component0) of this set.
683 				*/
684 				dumpto = sparecol;
685 				break;
686 			} else if (scol != -1) {
687 				/*
688 				   Must be a spared slave.  We'll dump
689 				   to that if we havn't found anything
690 				   else so far.
691 				*/
692 				if (dumpto == -1)
693 					dumpto = sparecol;
694 			}
695 		}
696 	}
697 
698 	if (dumpto == -1) {
699 		/* we couldn't find any live components to dump to!?!?
700 		 */
701 		error = EINVAL;
702 		goto out;
703 	}
704 
705 	bdev = bdevsw_lookup(raidPtr->Disks[dumpto].dev);
706 
707 	/*
708 	   Note that blkno is relative to this particular partition.
709 	   By adding the offset of this partition in the RAID
710 	   set, and also adding RF_PROTECTED_SECTORS, we get a
711 	   value that is relative to the partition used for the
712 	   underlying component.
713 	*/
714 
715 	error = (*bdev->d_dump)(raidPtr->Disks[dumpto].dev,
716 				blkno + offset, va, size);
717 
718 out:
719 	raidunlock(rs);
720 
721 	return error;
722 }
723 /* ARGSUSED */
724 int
725 raidopen(dev_t dev, int flags, int fmt,
726     struct lwp *l)
727 {
728 	int     unit = raidunit(dev);
729 	struct raid_softc *rs;
730 	struct disklabel *lp;
731 	int     part, pmask;
732 	int     error = 0;
733 
734 	if (unit >= numraid)
735 		return (ENXIO);
736 	rs = &raid_softc[unit];
737 
738 	if ((error = raidlock(rs)) != 0)
739 		return (error);
740 	lp = rs->sc_dkdev.dk_label;
741 
742 	part = DISKPART(dev);
743 
744 	/*
745 	 * If there are wedges, and this is not RAW_PART, then we
746 	 * need to fail.
747 	 */
748 	if (rs->sc_dkdev.dk_nwedges != 0 && part != RAW_PART) {
749 		error = EBUSY;
750 		goto bad;
751 	}
752 	pmask = (1 << part);
753 
754 	if ((rs->sc_flags & RAIDF_INITED) &&
755 	    (rs->sc_dkdev.dk_openmask == 0))
756 		raidgetdisklabel(dev);
757 
758 	/* make sure that this partition exists */
759 
760 	if (part != RAW_PART) {
761 		if (((rs->sc_flags & RAIDF_INITED) == 0) ||
762 		    ((part >= lp->d_npartitions) ||
763 			(lp->d_partitions[part].p_fstype == FS_UNUSED))) {
764 			error = ENXIO;
765 			goto bad;
766 		}
767 	}
768 	/* Prevent this unit from being unconfigured while open. */
769 	switch (fmt) {
770 	case S_IFCHR:
771 		rs->sc_dkdev.dk_copenmask |= pmask;
772 		break;
773 
774 	case S_IFBLK:
775 		rs->sc_dkdev.dk_bopenmask |= pmask;
776 		break;
777 	}
778 
779 	if ((rs->sc_dkdev.dk_openmask == 0) &&
780 	    ((rs->sc_flags & RAIDF_INITED) != 0)) {
781 		/* First one... mark things as dirty... Note that we *MUST*
782 		 have done a configure before this.  I DO NOT WANT TO BE
783 		 SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
784 		 THAT THEY BELONG TOGETHER!!!!! */
785 		/* XXX should check to see if we're only open for reading
786 		   here... If so, we needn't do this, but then need some
787 		   other way of keeping track of what's happened.. */
788 
789 		rf_markalldirty( raidPtrs[unit] );
790 	}
791 
792 
793 	rs->sc_dkdev.dk_openmask =
794 	    rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
795 
796 bad:
797 	raidunlock(rs);
798 
799 	return (error);
800 
801 
802 }
803 /* ARGSUSED */
804 int
805 raidclose(dev_t dev, int flags, int fmt, struct lwp *l)
806 {
807 	int     unit = raidunit(dev);
808 	struct cfdata *cf;
809 	struct raid_softc *rs;
810 	int     error = 0;
811 	int     part;
812 
813 	if (unit >= numraid)
814 		return (ENXIO);
815 	rs = &raid_softc[unit];
816 
817 	if ((error = raidlock(rs)) != 0)
818 		return (error);
819 
820 	part = DISKPART(dev);
821 
822 	/* ...that much closer to allowing unconfiguration... */
823 	switch (fmt) {
824 	case S_IFCHR:
825 		rs->sc_dkdev.dk_copenmask &= ~(1 << part);
826 		break;
827 
828 	case S_IFBLK:
829 		rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
830 		break;
831 	}
832 	rs->sc_dkdev.dk_openmask =
833 	    rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
834 
835 	if ((rs->sc_dkdev.dk_openmask == 0) &&
836 	    ((rs->sc_flags & RAIDF_INITED) != 0)) {
837 		/* Last one... device is not unconfigured yet.
838 		   Device shutdown has taken care of setting the
839 		   clean bits if RAIDF_INITED is not set
840 		   mark things as clean... */
841 
842 		rf_update_component_labels(raidPtrs[unit],
843 						 RF_FINAL_COMPONENT_UPDATE);
844 		if (doing_shutdown) {
845 			/* last one, and we're going down, so
846 			   lights out for this RAID set too. */
847 			error = rf_Shutdown(raidPtrs[unit]);
848 
849 			/* It's no longer initialized... */
850 			rs->sc_flags &= ~RAIDF_INITED;
851 
852 			/* detach the device */
853 
854 			cf = device_cfdata(rs->sc_dev);
855 			error = config_detach(rs->sc_dev, DETACH_QUIET);
856 			free(cf, M_RAIDFRAME);
857 
858 			/* Detach the disk. */
859 			disk_detach(&rs->sc_dkdev);
860 			disk_destroy(&rs->sc_dkdev);
861 		}
862 	}
863 
864 	raidunlock(rs);
865 	return (0);
866 
867 }
868 
869 void
870 raidstrategy(struct buf *bp)
871 {
872 	int s;
873 
874 	unsigned int raidID = raidunit(bp->b_dev);
875 	RF_Raid_t *raidPtr;
876 	struct raid_softc *rs = &raid_softc[raidID];
877 	int     wlabel;
878 
879 	if ((rs->sc_flags & RAIDF_INITED) ==0) {
880 		bp->b_error = ENXIO;
881 		goto done;
882 	}
883 	if (raidID >= numraid || !raidPtrs[raidID]) {
884 		bp->b_error = ENODEV;
885 		goto done;
886 	}
887 	raidPtr = raidPtrs[raidID];
888 	if (!raidPtr->valid) {
889 		bp->b_error = ENODEV;
890 		goto done;
891 	}
892 	if (bp->b_bcount == 0) {
893 		db1_printf(("b_bcount is zero..\n"));
894 		goto done;
895 	}
896 
897 	/*
898 	 * Do bounds checking and adjust transfer.  If there's an
899 	 * error, the bounds check will flag that for us.
900 	 */
901 
902 	wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
903 	if (DISKPART(bp->b_dev) == RAW_PART) {
904 		uint64_t size; /* device size in DEV_BSIZE unit */
905 
906 		if (raidPtr->logBytesPerSector > DEV_BSHIFT) {
907 			size = raidPtr->totalSectors <<
908 			    (raidPtr->logBytesPerSector - DEV_BSHIFT);
909 		} else {
910 			size = raidPtr->totalSectors >>
911 			    (DEV_BSHIFT - raidPtr->logBytesPerSector);
912 		}
913 		if (bounds_check_with_mediasize(bp, DEV_BSIZE, size) <= 0) {
914 			goto done;
915 		}
916 	} else {
917 		if (bounds_check_with_label(&rs->sc_dkdev, bp, wlabel) <= 0) {
918 			db1_printf(("Bounds check failed!!:%d %d\n",
919 				(int) bp->b_blkno, (int) wlabel));
920 			goto done;
921 		}
922 	}
923 	s = splbio();
924 
925 	bp->b_resid = 0;
926 
927 	/* stuff it onto our queue */
928 	BUFQ_PUT(rs->buf_queue, bp);
929 
930 	/* scheduled the IO to happen at the next convenient time */
931 	wakeup(&(raidPtrs[raidID]->iodone));
932 
933 	splx(s);
934 	return;
935 
936 done:
937 	bp->b_resid = bp->b_bcount;
938 	biodone(bp);
939 }
940 /* ARGSUSED */
941 int
942 raidread(dev_t dev, struct uio *uio, int flags)
943 {
944 	int     unit = raidunit(dev);
945 	struct raid_softc *rs;
946 
947 	if (unit >= numraid)
948 		return (ENXIO);
949 	rs = &raid_softc[unit];
950 
951 	if ((rs->sc_flags & RAIDF_INITED) == 0)
952 		return (ENXIO);
953 
954 	return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
955 
956 }
957 /* ARGSUSED */
958 int
959 raidwrite(dev_t dev, struct uio *uio, int flags)
960 {
961 	int     unit = raidunit(dev);
962 	struct raid_softc *rs;
963 
964 	if (unit >= numraid)
965 		return (ENXIO);
966 	rs = &raid_softc[unit];
967 
968 	if ((rs->sc_flags & RAIDF_INITED) == 0)
969 		return (ENXIO);
970 
971 	return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
972 
973 }
974 
975 int
976 raidioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
977 {
978 	int     unit = raidunit(dev);
979 	int     error = 0;
980 	int     part, pmask;
981 	struct cfdata *cf;
982 	struct raid_softc *rs;
983 	RF_Config_t *k_cfg, *u_cfg;
984 	RF_Raid_t *raidPtr;
985 	RF_RaidDisk_t *diskPtr;
986 	RF_AccTotals_t *totals;
987 	RF_DeviceConfig_t *d_cfg, **ucfgp;
988 	u_char *specific_buf;
989 	int retcode = 0;
990 	int column;
991 	int raidid;
992 	struct rf_recon_req *rrcopy, *rr;
993 	RF_ComponentLabel_t *clabel;
994 	RF_ComponentLabel_t *ci_label;
995 	RF_ComponentLabel_t **clabel_ptr;
996 	RF_SingleComponent_t *sparePtr,*componentPtr;
997 	RF_SingleComponent_t component;
998 	RF_ProgressInfo_t progressInfo, **progressInfoPtr;
999 	int i, j, d;
1000 #ifdef __HAVE_OLD_DISKLABEL
1001 	struct disklabel newlabel;
1002 #endif
1003 	struct dkwedge_info *dkw;
1004 
1005 	if (unit >= numraid)
1006 		return (ENXIO);
1007 	rs = &raid_softc[unit];
1008 	raidPtr = raidPtrs[unit];
1009 
1010 	db1_printf(("raidioctl: %d %d %d %d\n", (int) dev,
1011 		(int) DISKPART(dev), (int) unit, (int) cmd));
1012 
1013 	/* Must be open for writes for these commands... */
1014 	switch (cmd) {
1015 #ifdef DIOCGSECTORSIZE
1016 	case DIOCGSECTORSIZE:
1017 		*(u_int *)data = raidPtr->bytesPerSector;
1018 		return 0;
1019 	case DIOCGMEDIASIZE:
1020 		*(off_t *)data =
1021 		    (off_t)raidPtr->totalSectors * raidPtr->bytesPerSector;
1022 		return 0;
1023 #endif
1024 	case DIOCSDINFO:
1025 	case DIOCWDINFO:
1026 #ifdef __HAVE_OLD_DISKLABEL
1027 	case ODIOCWDINFO:
1028 	case ODIOCSDINFO:
1029 #endif
1030 	case DIOCWLABEL:
1031 	case DIOCAWEDGE:
1032 	case DIOCDWEDGE:
1033 		if ((flag & FWRITE) == 0)
1034 			return (EBADF);
1035 	}
1036 
1037 	/* Must be initialized for these... */
1038 	switch (cmd) {
1039 	case DIOCGDINFO:
1040 	case DIOCSDINFO:
1041 	case DIOCWDINFO:
1042 #ifdef __HAVE_OLD_DISKLABEL
1043 	case ODIOCGDINFO:
1044 	case ODIOCWDINFO:
1045 	case ODIOCSDINFO:
1046 	case ODIOCGDEFLABEL:
1047 #endif
1048 	case DIOCGPART:
1049 	case DIOCWLABEL:
1050 	case DIOCGDEFLABEL:
1051 	case DIOCAWEDGE:
1052 	case DIOCDWEDGE:
1053 	case DIOCLWEDGES:
1054 	case RAIDFRAME_SHUTDOWN:
1055 	case RAIDFRAME_REWRITEPARITY:
1056 	case RAIDFRAME_GET_INFO:
1057 	case RAIDFRAME_RESET_ACCTOTALS:
1058 	case RAIDFRAME_GET_ACCTOTALS:
1059 	case RAIDFRAME_KEEP_ACCTOTALS:
1060 	case RAIDFRAME_GET_SIZE:
1061 	case RAIDFRAME_FAIL_DISK:
1062 	case RAIDFRAME_COPYBACK:
1063 	case RAIDFRAME_CHECK_RECON_STATUS:
1064 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
1065 	case RAIDFRAME_GET_COMPONENT_LABEL:
1066 	case RAIDFRAME_SET_COMPONENT_LABEL:
1067 	case RAIDFRAME_ADD_HOT_SPARE:
1068 	case RAIDFRAME_REMOVE_HOT_SPARE:
1069 	case RAIDFRAME_INIT_LABELS:
1070 	case RAIDFRAME_REBUILD_IN_PLACE:
1071 	case RAIDFRAME_CHECK_PARITY:
1072 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1073 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
1074 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
1075 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
1076 	case RAIDFRAME_SET_AUTOCONFIG:
1077 	case RAIDFRAME_SET_ROOT:
1078 	case RAIDFRAME_DELETE_COMPONENT:
1079 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
1080 		if ((rs->sc_flags & RAIDF_INITED) == 0)
1081 			return (ENXIO);
1082 	}
1083 
1084 	switch (cmd) {
1085 
1086 		/* configure the system */
1087 	case RAIDFRAME_CONFIGURE:
1088 
1089 		if (raidPtr->valid) {
1090 			/* There is a valid RAID set running on this unit! */
1091 			printf("raid%d: Device already configured!\n",unit);
1092 			return(EINVAL);
1093 		}
1094 
1095 		/* copy-in the configuration information */
1096 		/* data points to a pointer to the configuration structure */
1097 
1098 		u_cfg = *((RF_Config_t **) data);
1099 		RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
1100 		if (k_cfg == NULL) {
1101 			return (ENOMEM);
1102 		}
1103 		retcode = copyin(u_cfg, k_cfg, sizeof(RF_Config_t));
1104 		if (retcode) {
1105 			RF_Free(k_cfg, sizeof(RF_Config_t));
1106 			db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
1107 				retcode));
1108 			return (retcode);
1109 		}
1110 		/* allocate a buffer for the layout-specific data, and copy it
1111 		 * in */
1112 		if (k_cfg->layoutSpecificSize) {
1113 			if (k_cfg->layoutSpecificSize > 10000) {
1114 				/* sanity check */
1115 				RF_Free(k_cfg, sizeof(RF_Config_t));
1116 				return (EINVAL);
1117 			}
1118 			RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
1119 			    (u_char *));
1120 			if (specific_buf == NULL) {
1121 				RF_Free(k_cfg, sizeof(RF_Config_t));
1122 				return (ENOMEM);
1123 			}
1124 			retcode = copyin(k_cfg->layoutSpecific, specific_buf,
1125 			    k_cfg->layoutSpecificSize);
1126 			if (retcode) {
1127 				RF_Free(k_cfg, sizeof(RF_Config_t));
1128 				RF_Free(specific_buf,
1129 					k_cfg->layoutSpecificSize);
1130 				db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
1131 					retcode));
1132 				return (retcode);
1133 			}
1134 		} else
1135 			specific_buf = NULL;
1136 		k_cfg->layoutSpecific = specific_buf;
1137 
1138 		/* should do some kind of sanity check on the configuration.
1139 		 * Store the sum of all the bytes in the last byte? */
1140 
1141 		/* configure the system */
1142 
1143 		/*
1144 		 * Clear the entire RAID descriptor, just to make sure
1145 		 *  there is no stale data left in the case of a
1146 		 *  reconfiguration
1147 		 */
1148 		memset((char *) raidPtr, 0, sizeof(RF_Raid_t));
1149 		raidPtr->raidid = unit;
1150 
1151 		retcode = rf_Configure(raidPtr, k_cfg, NULL);
1152 
1153 		if (retcode == 0) {
1154 
1155 			/* allow this many simultaneous IO's to
1156 			   this RAID device */
1157 			raidPtr->openings = RAIDOUTSTANDING;
1158 
1159 			raidinit(raidPtr);
1160 			rf_markalldirty(raidPtr);
1161 		}
1162 		/* free the buffers.  No return code here. */
1163 		if (k_cfg->layoutSpecificSize) {
1164 			RF_Free(specific_buf, k_cfg->layoutSpecificSize);
1165 		}
1166 		RF_Free(k_cfg, sizeof(RF_Config_t));
1167 
1168 		return (retcode);
1169 
1170 		/* shutdown the system */
1171 	case RAIDFRAME_SHUTDOWN:
1172 
1173 		if ((error = raidlock(rs)) != 0)
1174 			return (error);
1175 
1176 		/*
1177 		 * If somebody has a partition mounted, we shouldn't
1178 		 * shutdown.
1179 		 */
1180 
1181 		part = DISKPART(dev);
1182 		pmask = (1 << part);
1183 		if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
1184 		    ((rs->sc_dkdev.dk_bopenmask & pmask) &&
1185 			(rs->sc_dkdev.dk_copenmask & pmask))) {
1186 			raidunlock(rs);
1187 			return (EBUSY);
1188 		}
1189 
1190 		retcode = rf_Shutdown(raidPtr);
1191 
1192 		/* It's no longer initialized... */
1193 		rs->sc_flags &= ~RAIDF_INITED;
1194 
1195 		/* free the pseudo device attach bits */
1196 
1197 		cf = device_cfdata(rs->sc_dev);
1198 		/* XXX this causes us to not return any errors
1199 		   from the above call to rf_Shutdown() */
1200 		retcode = config_detach(rs->sc_dev, DETACH_QUIET);
1201 		free(cf, M_RAIDFRAME);
1202 
1203 		/* Detach the disk. */
1204 		disk_detach(&rs->sc_dkdev);
1205 		disk_destroy(&rs->sc_dkdev);
1206 
1207 		raidunlock(rs);
1208 
1209 		return (retcode);
1210 	case RAIDFRAME_GET_COMPONENT_LABEL:
1211 		clabel_ptr = (RF_ComponentLabel_t **) data;
1212 		/* need to read the component label for the disk indicated
1213 		   by row,column in clabel */
1214 
1215 		/* For practice, let's get it directly fromdisk, rather
1216 		   than from the in-core copy */
1217 		RF_Malloc( clabel, sizeof( RF_ComponentLabel_t ),
1218 			   (RF_ComponentLabel_t *));
1219 		if (clabel == NULL)
1220 			return (ENOMEM);
1221 
1222 		retcode = copyin( *clabel_ptr, clabel,
1223 				  sizeof(RF_ComponentLabel_t));
1224 
1225 		if (retcode) {
1226 			RF_Free( clabel, sizeof(RF_ComponentLabel_t));
1227 			return(retcode);
1228 		}
1229 
1230 		clabel->row = 0; /* Don't allow looking at anything else.*/
1231 
1232 		column = clabel->column;
1233 
1234 		if ((column < 0) || (column >= raidPtr->numCol +
1235 				     raidPtr->numSpare)) {
1236 			RF_Free( clabel, sizeof(RF_ComponentLabel_t));
1237 			return(EINVAL);
1238 		}
1239 
1240 		retcode = raidread_component_label(raidPtr->Disks[column].dev,
1241 				raidPtr->raid_cinfo[column].ci_vp,
1242 				clabel );
1243 
1244 		if (retcode == 0) {
1245 			retcode = copyout(clabel, *clabel_ptr,
1246 					  sizeof(RF_ComponentLabel_t));
1247 		}
1248 		RF_Free(clabel, sizeof(RF_ComponentLabel_t));
1249 		return (retcode);
1250 
1251 	case RAIDFRAME_SET_COMPONENT_LABEL:
1252 		clabel = (RF_ComponentLabel_t *) data;
1253 
1254 		/* XXX check the label for valid stuff... */
1255 		/* Note that some things *should not* get modified --
1256 		   the user should be re-initing the labels instead of
1257 		   trying to patch things.
1258 		   */
1259 
1260 		raidid = raidPtr->raidid;
1261 #ifdef DEBUG
1262 		printf("raid%d: Got component label:\n", raidid);
1263 		printf("raid%d: Version: %d\n", raidid, clabel->version);
1264 		printf("raid%d: Serial Number: %d\n", raidid, clabel->serial_number);
1265 		printf("raid%d: Mod counter: %d\n", raidid, clabel->mod_counter);
1266 		printf("raid%d: Column: %d\n", raidid, clabel->column);
1267 		printf("raid%d: Num Columns: %d\n", raidid, clabel->num_columns);
1268 		printf("raid%d: Clean: %d\n", raidid, clabel->clean);
1269 		printf("raid%d: Status: %d\n", raidid, clabel->status);
1270 #endif
1271 		clabel->row = 0;
1272 		column = clabel->column;
1273 
1274 		if ((column < 0) || (column >= raidPtr->numCol)) {
1275 			return(EINVAL);
1276 		}
1277 
1278 		/* XXX this isn't allowed to do anything for now :-) */
1279 
1280 		/* XXX and before it is, we need to fill in the rest
1281 		   of the fields!?!?!?! */
1282 #if 0
1283 		raidwrite_component_label(
1284 		     raidPtr->Disks[column].dev,
1285 			    raidPtr->raid_cinfo[column].ci_vp,
1286 			    clabel );
1287 #endif
1288 		return (0);
1289 
1290 	case RAIDFRAME_INIT_LABELS:
1291 		clabel = (RF_ComponentLabel_t *) data;
1292 		/*
1293 		   we only want the serial number from
1294 		   the above.  We get all the rest of the information
1295 		   from the config that was used to create this RAID
1296 		   set.
1297 		   */
1298 
1299 		raidPtr->serial_number = clabel->serial_number;
1300 
1301 		RF_Malloc(ci_label, sizeof(RF_ComponentLabel_t),
1302 			  (RF_ComponentLabel_t *));
1303 		if (ci_label == NULL)
1304 			return (ENOMEM);
1305 
1306 		raid_init_component_label(raidPtr, ci_label);
1307 		ci_label->serial_number = clabel->serial_number;
1308 		ci_label->row = 0; /* we dont' pretend to support more */
1309 
1310 		for(column=0;column<raidPtr->numCol;column++) {
1311 			diskPtr = &raidPtr->Disks[column];
1312 			if (!RF_DEAD_DISK(diskPtr->status)) {
1313 				ci_label->partitionSize = diskPtr->partitionSize;
1314 				ci_label->column = column;
1315 				raidwrite_component_label(
1316 							  raidPtr->Disks[column].dev,
1317 							  raidPtr->raid_cinfo[column].ci_vp,
1318 							  ci_label );
1319 			}
1320 		}
1321 		RF_Free(ci_label, sizeof(RF_ComponentLabel_t));
1322 
1323 		return (retcode);
1324 	case RAIDFRAME_SET_AUTOCONFIG:
1325 		d = rf_set_autoconfig(raidPtr, *(int *) data);
1326 		printf("raid%d: New autoconfig value is: %d\n",
1327 		       raidPtr->raidid, d);
1328 		*(int *) data = d;
1329 		return (retcode);
1330 
1331 	case RAIDFRAME_SET_ROOT:
1332 		d = rf_set_rootpartition(raidPtr, *(int *) data);
1333 		printf("raid%d: New rootpartition value is: %d\n",
1334 		       raidPtr->raidid, d);
1335 		*(int *) data = d;
1336 		return (retcode);
1337 
1338 		/* initialize all parity */
1339 	case RAIDFRAME_REWRITEPARITY:
1340 
1341 		if (raidPtr->Layout.map->faultsTolerated == 0) {
1342 			/* Parity for RAID 0 is trivially correct */
1343 			raidPtr->parity_good = RF_RAID_CLEAN;
1344 			return(0);
1345 		}
1346 
1347 		if (raidPtr->parity_rewrite_in_progress == 1) {
1348 			/* Re-write is already in progress! */
1349 			return(EINVAL);
1350 		}
1351 
1352 		retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
1353 					   rf_RewriteParityThread,
1354 					   raidPtr,"raid_parity");
1355 		return (retcode);
1356 
1357 
1358 	case RAIDFRAME_ADD_HOT_SPARE:
1359 		sparePtr = (RF_SingleComponent_t *) data;
1360 		memcpy( &component, sparePtr, sizeof(RF_SingleComponent_t));
1361 		retcode = rf_add_hot_spare(raidPtr, &component);
1362 		return(retcode);
1363 
1364 	case RAIDFRAME_REMOVE_HOT_SPARE:
1365 		return(retcode);
1366 
1367 	case RAIDFRAME_DELETE_COMPONENT:
1368 		componentPtr = (RF_SingleComponent_t *)data;
1369 		memcpy( &component, componentPtr,
1370 			sizeof(RF_SingleComponent_t));
1371 		retcode = rf_delete_component(raidPtr, &component);
1372 		return(retcode);
1373 
1374 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
1375 		componentPtr = (RF_SingleComponent_t *)data;
1376 		memcpy( &component, componentPtr,
1377 			sizeof(RF_SingleComponent_t));
1378 		retcode = rf_incorporate_hot_spare(raidPtr, &component);
1379 		return(retcode);
1380 
1381 	case RAIDFRAME_REBUILD_IN_PLACE:
1382 
1383 		if (raidPtr->Layout.map->faultsTolerated == 0) {
1384 			/* Can't do this on a RAID 0!! */
1385 			return(EINVAL);
1386 		}
1387 
1388 		if (raidPtr->recon_in_progress == 1) {
1389 			/* a reconstruct is already in progress! */
1390 			return(EINVAL);
1391 		}
1392 
1393 		componentPtr = (RF_SingleComponent_t *) data;
1394 		memcpy( &component, componentPtr,
1395 			sizeof(RF_SingleComponent_t));
1396 		component.row = 0; /* we don't support any more */
1397 		column = component.column;
1398 
1399 		if ((column < 0) || (column >= raidPtr->numCol)) {
1400 			return(EINVAL);
1401 		}
1402 
1403 		RF_LOCK_MUTEX(raidPtr->mutex);
1404 		if ((raidPtr->Disks[column].status == rf_ds_optimal) &&
1405 		    (raidPtr->numFailures > 0)) {
1406 			/* XXX 0 above shouldn't be constant!!! */
1407 			/* some component other than this has failed.
1408 			   Let's not make things worse than they already
1409 			   are... */
1410 			printf("raid%d: Unable to reconstruct to disk at:\n",
1411 			       raidPtr->raidid);
1412 			printf("raid%d:     Col: %d   Too many failures.\n",
1413 			       raidPtr->raidid, column);
1414 			RF_UNLOCK_MUTEX(raidPtr->mutex);
1415 			return (EINVAL);
1416 		}
1417 		if (raidPtr->Disks[column].status ==
1418 		    rf_ds_reconstructing) {
1419 			printf("raid%d: Unable to reconstruct to disk at:\n",
1420 			       raidPtr->raidid);
1421 			printf("raid%d:    Col: %d   Reconstruction already occuring!\n", raidPtr->raidid, column);
1422 
1423 			RF_UNLOCK_MUTEX(raidPtr->mutex);
1424 			return (EINVAL);
1425 		}
1426 		if (raidPtr->Disks[column].status == rf_ds_spared) {
1427 			RF_UNLOCK_MUTEX(raidPtr->mutex);
1428 			return (EINVAL);
1429 		}
1430 		RF_UNLOCK_MUTEX(raidPtr->mutex);
1431 
1432 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1433 		if (rrcopy == NULL)
1434 			return(ENOMEM);
1435 
1436 		rrcopy->raidPtr = (void *) raidPtr;
1437 		rrcopy->col = column;
1438 
1439 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1440 					   rf_ReconstructInPlaceThread,
1441 					   rrcopy,"raid_reconip");
1442 		return(retcode);
1443 
1444 	case RAIDFRAME_GET_INFO:
1445 		if (!raidPtr->valid)
1446 			return (ENODEV);
1447 		ucfgp = (RF_DeviceConfig_t **) data;
1448 		RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
1449 			  (RF_DeviceConfig_t *));
1450 		if (d_cfg == NULL)
1451 			return (ENOMEM);
1452 		d_cfg->rows = 1; /* there is only 1 row now */
1453 		d_cfg->cols = raidPtr->numCol;
1454 		d_cfg->ndevs = raidPtr->numCol;
1455 		if (d_cfg->ndevs >= RF_MAX_DISKS) {
1456 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1457 			return (ENOMEM);
1458 		}
1459 		d_cfg->nspares = raidPtr->numSpare;
1460 		if (d_cfg->nspares >= RF_MAX_DISKS) {
1461 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1462 			return (ENOMEM);
1463 		}
1464 		d_cfg->maxqdepth = raidPtr->maxQueueDepth;
1465 		d = 0;
1466 		for (j = 0; j < d_cfg->cols; j++) {
1467 			d_cfg->devs[d] = raidPtr->Disks[j];
1468 			d++;
1469 		}
1470 		for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
1471 			d_cfg->spares[i] = raidPtr->Disks[j];
1472 		}
1473 		retcode = copyout(d_cfg, *ucfgp, sizeof(RF_DeviceConfig_t));
1474 		RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1475 
1476 		return (retcode);
1477 
1478 	case RAIDFRAME_CHECK_PARITY:
1479 		*(int *) data = raidPtr->parity_good;
1480 		return (0);
1481 
1482 	case RAIDFRAME_RESET_ACCTOTALS:
1483 		memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
1484 		return (0);
1485 
1486 	case RAIDFRAME_GET_ACCTOTALS:
1487 		totals = (RF_AccTotals_t *) data;
1488 		*totals = raidPtr->acc_totals;
1489 		return (0);
1490 
1491 	case RAIDFRAME_KEEP_ACCTOTALS:
1492 		raidPtr->keep_acc_totals = *(int *)data;
1493 		return (0);
1494 
1495 	case RAIDFRAME_GET_SIZE:
1496 		*(int *) data = raidPtr->totalSectors;
1497 		return (0);
1498 
1499 		/* fail a disk & optionally start reconstruction */
1500 	case RAIDFRAME_FAIL_DISK:
1501 
1502 		if (raidPtr->Layout.map->faultsTolerated == 0) {
1503 			/* Can't do this on a RAID 0!! */
1504 			return(EINVAL);
1505 		}
1506 
1507 		rr = (struct rf_recon_req *) data;
1508 		rr->row = 0;
1509 		if (rr->col < 0 || rr->col >= raidPtr->numCol)
1510 			return (EINVAL);
1511 
1512 
1513 		RF_LOCK_MUTEX(raidPtr->mutex);
1514 		if (raidPtr->status == rf_rs_reconstructing) {
1515 			/* you can't fail a disk while we're reconstructing! */
1516 			/* XXX wrong for RAID6 */
1517 			RF_UNLOCK_MUTEX(raidPtr->mutex);
1518 			return (EINVAL);
1519 		}
1520 		if ((raidPtr->Disks[rr->col].status ==
1521 		     rf_ds_optimal) && (raidPtr->numFailures > 0)) {
1522 			/* some other component has failed.  Let's not make
1523 			   things worse. XXX wrong for RAID6 */
1524 			RF_UNLOCK_MUTEX(raidPtr->mutex);
1525 			return (EINVAL);
1526 		}
1527 		if (raidPtr->Disks[rr->col].status == rf_ds_spared) {
1528 			/* Can't fail a spared disk! */
1529 			RF_UNLOCK_MUTEX(raidPtr->mutex);
1530 			return (EINVAL);
1531 		}
1532 		RF_UNLOCK_MUTEX(raidPtr->mutex);
1533 
1534 		/* make a copy of the recon request so that we don't rely on
1535 		 * the user's buffer */
1536 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1537 		if (rrcopy == NULL)
1538 			return(ENOMEM);
1539 		memcpy(rrcopy, rr, sizeof(*rr));
1540 		rrcopy->raidPtr = (void *) raidPtr;
1541 
1542 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1543 					   rf_ReconThread,
1544 					   rrcopy,"raid_recon");
1545 		return (0);
1546 
1547 		/* invoke a copyback operation after recon on whatever disk
1548 		 * needs it, if any */
1549 	case RAIDFRAME_COPYBACK:
1550 
1551 		if (raidPtr->Layout.map->faultsTolerated == 0) {
1552 			/* This makes no sense on a RAID 0!! */
1553 			return(EINVAL);
1554 		}
1555 
1556 		if (raidPtr->copyback_in_progress == 1) {
1557 			/* Copyback is already in progress! */
1558 			return(EINVAL);
1559 		}
1560 
1561 		retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
1562 					   rf_CopybackThread,
1563 					   raidPtr,"raid_copyback");
1564 		return (retcode);
1565 
1566 		/* return the percentage completion of reconstruction */
1567 	case RAIDFRAME_CHECK_RECON_STATUS:
1568 		if (raidPtr->Layout.map->faultsTolerated == 0) {
1569 			/* This makes no sense on a RAID 0, so tell the
1570 			   user it's done. */
1571 			*(int *) data = 100;
1572 			return(0);
1573 		}
1574 		if (raidPtr->status != rf_rs_reconstructing)
1575 			*(int *) data = 100;
1576 		else {
1577 			if (raidPtr->reconControl->numRUsTotal > 0) {
1578 				*(int *) data = (raidPtr->reconControl->numRUsComplete * 100 / raidPtr->reconControl->numRUsTotal);
1579 			} else {
1580 				*(int *) data = 0;
1581 			}
1582 		}
1583 		return (0);
1584 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
1585 		progressInfoPtr = (RF_ProgressInfo_t **) data;
1586 		if (raidPtr->status != rf_rs_reconstructing) {
1587 			progressInfo.remaining = 0;
1588 			progressInfo.completed = 100;
1589 			progressInfo.total = 100;
1590 		} else {
1591 			progressInfo.total =
1592 				raidPtr->reconControl->numRUsTotal;
1593 			progressInfo.completed =
1594 				raidPtr->reconControl->numRUsComplete;
1595 			progressInfo.remaining = progressInfo.total -
1596 				progressInfo.completed;
1597 		}
1598 		retcode = copyout(&progressInfo, *progressInfoPtr,
1599 				  sizeof(RF_ProgressInfo_t));
1600 		return (retcode);
1601 
1602 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1603 		if (raidPtr->Layout.map->faultsTolerated == 0) {
1604 			/* This makes no sense on a RAID 0, so tell the
1605 			   user it's done. */
1606 			*(int *) data = 100;
1607 			return(0);
1608 		}
1609 		if (raidPtr->parity_rewrite_in_progress == 1) {
1610 			*(int *) data = 100 *
1611 				raidPtr->parity_rewrite_stripes_done /
1612 				raidPtr->Layout.numStripe;
1613 		} else {
1614 			*(int *) data = 100;
1615 		}
1616 		return (0);
1617 
1618 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
1619 		progressInfoPtr = (RF_ProgressInfo_t **) data;
1620 		if (raidPtr->parity_rewrite_in_progress == 1) {
1621 			progressInfo.total = raidPtr->Layout.numStripe;
1622 			progressInfo.completed =
1623 				raidPtr->parity_rewrite_stripes_done;
1624 			progressInfo.remaining = progressInfo.total -
1625 				progressInfo.completed;
1626 		} else {
1627 			progressInfo.remaining = 0;
1628 			progressInfo.completed = 100;
1629 			progressInfo.total = 100;
1630 		}
1631 		retcode = copyout(&progressInfo, *progressInfoPtr,
1632 				  sizeof(RF_ProgressInfo_t));
1633 		return (retcode);
1634 
1635 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
1636 		if (raidPtr->Layout.map->faultsTolerated == 0) {
1637 			/* This makes no sense on a RAID 0 */
1638 			*(int *) data = 100;
1639 			return(0);
1640 		}
1641 		if (raidPtr->copyback_in_progress == 1) {
1642 			*(int *) data = 100 * raidPtr->copyback_stripes_done /
1643 				raidPtr->Layout.numStripe;
1644 		} else {
1645 			*(int *) data = 100;
1646 		}
1647 		return (0);
1648 
1649 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
1650 		progressInfoPtr = (RF_ProgressInfo_t **) data;
1651 		if (raidPtr->copyback_in_progress == 1) {
1652 			progressInfo.total = raidPtr->Layout.numStripe;
1653 			progressInfo.completed =
1654 				raidPtr->copyback_stripes_done;
1655 			progressInfo.remaining = progressInfo.total -
1656 				progressInfo.completed;
1657 		} else {
1658 			progressInfo.remaining = 0;
1659 			progressInfo.completed = 100;
1660 			progressInfo.total = 100;
1661 		}
1662 		retcode = copyout(&progressInfo, *progressInfoPtr,
1663 				  sizeof(RF_ProgressInfo_t));
1664 		return (retcode);
1665 
1666 		/* the sparetable daemon calls this to wait for the kernel to
1667 		 * need a spare table. this ioctl does not return until a
1668 		 * spare table is needed. XXX -- calling mpsleep here in the
1669 		 * ioctl code is almost certainly wrong and evil. -- XXX XXX
1670 		 * -- I should either compute the spare table in the kernel,
1671 		 * or have a different -- XXX XXX -- interface (a different
1672 		 * character device) for delivering the table     -- XXX */
1673 #if 0
1674 	case RAIDFRAME_SPARET_WAIT:
1675 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1676 		while (!rf_sparet_wait_queue)
1677 			mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
1678 		waitreq = rf_sparet_wait_queue;
1679 		rf_sparet_wait_queue = rf_sparet_wait_queue->next;
1680 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1681 
1682 		/* structure assignment */
1683 		*((RF_SparetWait_t *) data) = *waitreq;
1684 
1685 		RF_Free(waitreq, sizeof(*waitreq));
1686 		return (0);
1687 
1688 		/* wakes up a process waiting on SPARET_WAIT and puts an error
1689 		 * code in it that will cause the dameon to exit */
1690 	case RAIDFRAME_ABORT_SPARET_WAIT:
1691 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1692 		waitreq->fcol = -1;
1693 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1694 		waitreq->next = rf_sparet_wait_queue;
1695 		rf_sparet_wait_queue = waitreq;
1696 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1697 		wakeup(&rf_sparet_wait_queue);
1698 		return (0);
1699 
1700 		/* used by the spare table daemon to deliver a spare table
1701 		 * into the kernel */
1702 	case RAIDFRAME_SEND_SPARET:
1703 
1704 		/* install the spare table */
1705 		retcode = rf_SetSpareTable(raidPtr, *(void **) data);
1706 
1707 		/* respond to the requestor.  the return status of the spare
1708 		 * table installation is passed in the "fcol" field */
1709 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1710 		waitreq->fcol = retcode;
1711 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1712 		waitreq->next = rf_sparet_resp_queue;
1713 		rf_sparet_resp_queue = waitreq;
1714 		wakeup(&rf_sparet_resp_queue);
1715 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1716 
1717 		return (retcode);
1718 #endif
1719 
1720 	default:
1721 		break; /* fall through to the os-specific code below */
1722 
1723 	}
1724 
1725 	if (!raidPtr->valid)
1726 		return (EINVAL);
1727 
1728 	/*
1729 	 * Add support for "regular" device ioctls here.
1730 	 */
1731 
1732 	switch (cmd) {
1733 	case DIOCGDINFO:
1734 		*(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
1735 		break;
1736 #ifdef __HAVE_OLD_DISKLABEL
1737 	case ODIOCGDINFO:
1738 		newlabel = *(rs->sc_dkdev.dk_label);
1739 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1740 			return ENOTTY;
1741 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
1742 		break;
1743 #endif
1744 
1745 	case DIOCGPART:
1746 		((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
1747 		((struct partinfo *) data)->part =
1748 		    &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
1749 		break;
1750 
1751 	case DIOCWDINFO:
1752 	case DIOCSDINFO:
1753 #ifdef __HAVE_OLD_DISKLABEL
1754 	case ODIOCWDINFO:
1755 	case ODIOCSDINFO:
1756 #endif
1757 	{
1758 		struct disklabel *lp;
1759 #ifdef __HAVE_OLD_DISKLABEL
1760 		if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1761 			memset(&newlabel, 0, sizeof newlabel);
1762 			memcpy(&newlabel, data, sizeof (struct olddisklabel));
1763 			lp = &newlabel;
1764 		} else
1765 #endif
1766 		lp = (struct disklabel *)data;
1767 
1768 		if ((error = raidlock(rs)) != 0)
1769 			return (error);
1770 
1771 		rs->sc_flags |= RAIDF_LABELLING;
1772 
1773 		error = setdisklabel(rs->sc_dkdev.dk_label,
1774 		    lp, 0, rs->sc_dkdev.dk_cpulabel);
1775 		if (error == 0) {
1776 			if (cmd == DIOCWDINFO
1777 #ifdef __HAVE_OLD_DISKLABEL
1778 			    || cmd == ODIOCWDINFO
1779 #endif
1780 			   )
1781 				error = writedisklabel(RAIDLABELDEV(dev),
1782 				    raidstrategy, rs->sc_dkdev.dk_label,
1783 				    rs->sc_dkdev.dk_cpulabel);
1784 		}
1785 		rs->sc_flags &= ~RAIDF_LABELLING;
1786 
1787 		raidunlock(rs);
1788 
1789 		if (error)
1790 			return (error);
1791 		break;
1792 	}
1793 
1794 	case DIOCWLABEL:
1795 		if (*(int *) data != 0)
1796 			rs->sc_flags |= RAIDF_WLABEL;
1797 		else
1798 			rs->sc_flags &= ~RAIDF_WLABEL;
1799 		break;
1800 
1801 	case DIOCGDEFLABEL:
1802 		raidgetdefaultlabel(raidPtr, rs, (struct disklabel *) data);
1803 		break;
1804 
1805 #ifdef __HAVE_OLD_DISKLABEL
1806 	case ODIOCGDEFLABEL:
1807 		raidgetdefaultlabel(raidPtr, rs, &newlabel);
1808 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1809 			return ENOTTY;
1810 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
1811 		break;
1812 #endif
1813 
1814 	case DIOCAWEDGE:
1815 	case DIOCDWEDGE:
1816 	    	dkw = (void *)data;
1817 
1818 		/* If the ioctl happens here, the parent is us. */
1819 		(void)strcpy(dkw->dkw_parent, rs->sc_xname);
1820 		return cmd == DIOCAWEDGE ? dkwedge_add(dkw) : dkwedge_del(dkw);
1821 
1822 	case DIOCLWEDGES:
1823 		return dkwedge_list(&rs->sc_dkdev,
1824 		    (struct dkwedge_list *)data, l);
1825 
1826 	default:
1827 		retcode = ENOTTY;
1828 	}
1829 	return (retcode);
1830 
1831 }
1832 
1833 
1834 /* raidinit -- complete the rest of the initialization for the
1835    RAIDframe device.  */
1836 
1837 
1838 static void
1839 raidinit(RF_Raid_t *raidPtr)
1840 {
1841 	struct cfdata *cf;
1842 	struct raid_softc *rs;
1843 	int     unit;
1844 
1845 	unit = raidPtr->raidid;
1846 
1847 	rs = &raid_softc[unit];
1848 
1849 	/* XXX should check return code first... */
1850 	rs->sc_flags |= RAIDF_INITED;
1851 
1852 	/* XXX doesn't check bounds. */
1853 	snprintf(rs->sc_xname, sizeof(rs->sc_xname), "raid%d", unit);
1854 
1855 	/* attach the pseudo device */
1856 	cf = malloc(sizeof(*cf), M_RAIDFRAME, M_WAITOK);
1857 	cf->cf_name = raid_cd.cd_name;
1858 	cf->cf_atname = raid_cd.cd_name;
1859 	cf->cf_unit = unit;
1860 	cf->cf_fstate = FSTATE_STAR;
1861 
1862 	rs->sc_dev = config_attach_pseudo(cf);
1863 
1864 	if (rs->sc_dev==NULL) {
1865 		printf("raid%d: config_attach_pseudo failed\n",
1866 		       raidPtr->raidid);
1867 	}
1868 
1869 	/* disk_attach actually creates space for the CPU disklabel, among
1870 	 * other things, so it's critical to call this *BEFORE* we try putzing
1871 	 * with disklabels. */
1872 
1873 	disk_init(&rs->sc_dkdev, rs->sc_xname, &rf_dkdriver);
1874 	disk_attach(&rs->sc_dkdev);
1875 
1876 	/* XXX There may be a weird interaction here between this, and
1877 	 * protectedSectors, as used in RAIDframe.  */
1878 
1879 	rs->sc_size = raidPtr->totalSectors;
1880 
1881 	dkwedge_discover(&rs->sc_dkdev);
1882 
1883 	rf_set_properties(rs, raidPtr);
1884 
1885 }
1886 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
1887 /* wake up the daemon & tell it to get us a spare table
1888  * XXX
1889  * the entries in the queues should be tagged with the raidPtr
1890  * so that in the extremely rare case that two recons happen at once,
1891  * we know for which device were requesting a spare table
1892  * XXX
1893  *
1894  * XXX This code is not currently used. GO
1895  */
1896 int
1897 rf_GetSpareTableFromDaemon(RF_SparetWait_t *req)
1898 {
1899 	int     retcode;
1900 
1901 	RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1902 	req->next = rf_sparet_wait_queue;
1903 	rf_sparet_wait_queue = req;
1904 	wakeup(&rf_sparet_wait_queue);
1905 
1906 	/* mpsleep unlocks the mutex */
1907 	while (!rf_sparet_resp_queue) {
1908 		tsleep(&rf_sparet_resp_queue, PRIBIO,
1909 		    "raidframe getsparetable", 0);
1910 	}
1911 	req = rf_sparet_resp_queue;
1912 	rf_sparet_resp_queue = req->next;
1913 	RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1914 
1915 	retcode = req->fcol;
1916 	RF_Free(req, sizeof(*req));	/* this is not the same req as we
1917 					 * alloc'd */
1918 	return (retcode);
1919 }
1920 #endif
1921 
1922 /* a wrapper around rf_DoAccess that extracts appropriate info from the
1923  * bp & passes it down.
1924  * any calls originating in the kernel must use non-blocking I/O
1925  * do some extra sanity checking to return "appropriate" error values for
1926  * certain conditions (to make some standard utilities work)
1927  *
1928  * Formerly known as: rf_DoAccessKernel
1929  */
1930 void
1931 raidstart(RF_Raid_t *raidPtr)
1932 {
1933 	RF_SectorCount_t num_blocks, pb, sum;
1934 	RF_RaidAddr_t raid_addr;
1935 	struct partition *pp;
1936 	daddr_t blocknum;
1937 	int     unit;
1938 	struct raid_softc *rs;
1939 	int     do_async;
1940 	struct buf *bp;
1941 	int rc;
1942 
1943 	unit = raidPtr->raidid;
1944 	rs = &raid_softc[unit];
1945 
1946 	/* quick check to see if anything has died recently */
1947 	RF_LOCK_MUTEX(raidPtr->mutex);
1948 	if (raidPtr->numNewFailures > 0) {
1949 		RF_UNLOCK_MUTEX(raidPtr->mutex);
1950 		rf_update_component_labels(raidPtr,
1951 					   RF_NORMAL_COMPONENT_UPDATE);
1952 		RF_LOCK_MUTEX(raidPtr->mutex);
1953 		raidPtr->numNewFailures--;
1954 	}
1955 
1956 	/* Check to see if we're at the limit... */
1957 	while (raidPtr->openings > 0) {
1958 		RF_UNLOCK_MUTEX(raidPtr->mutex);
1959 
1960 		/* get the next item, if any, from the queue */
1961 		if ((bp = BUFQ_GET(rs->buf_queue)) == NULL) {
1962 			/* nothing more to do */
1963 			return;
1964 		}
1965 
1966 		/* Ok, for the bp we have here, bp->b_blkno is relative to the
1967 		 * partition.. Need to make it absolute to the underlying
1968 		 * device.. */
1969 
1970 		blocknum = bp->b_blkno;
1971 		if (DISKPART(bp->b_dev) != RAW_PART) {
1972 			pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
1973 			blocknum += pp->p_offset;
1974 		}
1975 
1976 		db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
1977 			    (int) blocknum));
1978 
1979 		db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
1980 		db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
1981 
1982 		/* *THIS* is where we adjust what block we're going to...
1983 		 * but DO NOT TOUCH bp->b_blkno!!! */
1984 		raid_addr = blocknum;
1985 
1986 		num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
1987 		pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
1988 		sum = raid_addr + num_blocks + pb;
1989 		if (1 || rf_debugKernelAccess) {
1990 			db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
1991 				    (int) raid_addr, (int) sum, (int) num_blocks,
1992 				    (int) pb, (int) bp->b_resid));
1993 		}
1994 		if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
1995 		    || (sum < num_blocks) || (sum < pb)) {
1996 			bp->b_error = ENOSPC;
1997 			bp->b_resid = bp->b_bcount;
1998 			biodone(bp);
1999 			RF_LOCK_MUTEX(raidPtr->mutex);
2000 			continue;
2001 		}
2002 		/*
2003 		 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
2004 		 */
2005 
2006 		if (bp->b_bcount & raidPtr->sectorMask) {
2007 			bp->b_error = EINVAL;
2008 			bp->b_resid = bp->b_bcount;
2009 			biodone(bp);
2010 			RF_LOCK_MUTEX(raidPtr->mutex);
2011 			continue;
2012 
2013 		}
2014 		db1_printf(("Calling DoAccess..\n"));
2015 
2016 
2017 		RF_LOCK_MUTEX(raidPtr->mutex);
2018 		raidPtr->openings--;
2019 		RF_UNLOCK_MUTEX(raidPtr->mutex);
2020 
2021 		/*
2022 		 * Everything is async.
2023 		 */
2024 		do_async = 1;
2025 
2026 		disk_busy(&rs->sc_dkdev);
2027 
2028 		/* XXX we're still at splbio() here... do we *really*
2029 		   need to be? */
2030 
2031 		/* don't ever condition on bp->b_flags & B_WRITE.
2032 		 * always condition on B_READ instead */
2033 
2034 		rc = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
2035 				 RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
2036 				 do_async, raid_addr, num_blocks,
2037 				 bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
2038 
2039 		if (rc) {
2040 			bp->b_error = rc;
2041 			bp->b_resid = bp->b_bcount;
2042 			biodone(bp);
2043 			/* continue loop */
2044 		}
2045 
2046 		RF_LOCK_MUTEX(raidPtr->mutex);
2047 	}
2048 	RF_UNLOCK_MUTEX(raidPtr->mutex);
2049 }
2050 
2051 
2052 
2053 
2054 /* invoke an I/O from kernel mode.  Disk queue should be locked upon entry */
2055 
2056 int
2057 rf_DispatchKernelIO(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req)
2058 {
2059 	int     op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
2060 	struct buf *bp;
2061 
2062 	req->queue = queue;
2063 
2064 #if DIAGNOSTIC
2065 	if (queue->raidPtr->raidid >= numraid) {
2066 		printf("Invalid unit number: %d %d\n", queue->raidPtr->raidid,
2067 		    numraid);
2068 		panic("Invalid Unit number in rf_DispatchKernelIO");
2069 	}
2070 #endif
2071 
2072 	bp = req->bp;
2073 
2074 	switch (req->type) {
2075 	case RF_IO_TYPE_NOP:	/* used primarily to unlock a locked queue */
2076 		/* XXX need to do something extra here.. */
2077 		/* I'm leaving this in, as I've never actually seen it used,
2078 		 * and I'd like folks to report it... GO */
2079 		printf(("WAKEUP CALLED\n"));
2080 		queue->numOutstanding++;
2081 
2082 		bp->b_flags = 0;
2083 		bp->b_private = req;
2084 
2085 		KernelWakeupFunc(bp);
2086 		break;
2087 
2088 	case RF_IO_TYPE_READ:
2089 	case RF_IO_TYPE_WRITE:
2090 #if RF_ACC_TRACE > 0
2091 		if (req->tracerec) {
2092 			RF_ETIMER_START(req->tracerec->timer);
2093 		}
2094 #endif
2095 		InitBP(bp, queue->rf_cinfo->ci_vp,
2096 		    op, queue->rf_cinfo->ci_dev,
2097 		    req->sectorOffset, req->numSector,
2098 		    req->buf, KernelWakeupFunc, (void *) req,
2099 		    queue->raidPtr->logBytesPerSector, req->b_proc);
2100 
2101 		if (rf_debugKernelAccess) {
2102 			db1_printf(("dispatch: bp->b_blkno = %ld\n",
2103 				(long) bp->b_blkno));
2104 		}
2105 		queue->numOutstanding++;
2106 		queue->last_deq_sector = req->sectorOffset;
2107 		/* acc wouldn't have been let in if there were any pending
2108 		 * reqs at any other priority */
2109 		queue->curPriority = req->priority;
2110 
2111 		db1_printf(("Going for %c to unit %d col %d\n",
2112 			    req->type, queue->raidPtr->raidid,
2113 			    queue->col));
2114 		db1_printf(("sector %d count %d (%d bytes) %d\n",
2115 			(int) req->sectorOffset, (int) req->numSector,
2116 			(int) (req->numSector <<
2117 			    queue->raidPtr->logBytesPerSector),
2118 			(int) queue->raidPtr->logBytesPerSector));
2119 		VOP_STRATEGY(bp->b_vp, bp);
2120 
2121 		break;
2122 
2123 	default:
2124 		panic("bad req->type in rf_DispatchKernelIO");
2125 	}
2126 	db1_printf(("Exiting from DispatchKernelIO\n"));
2127 
2128 	return (0);
2129 }
2130 /* this is the callback function associated with a I/O invoked from
2131    kernel code.
2132  */
2133 static void
2134 KernelWakeupFunc(struct buf *bp)
2135 {
2136 	RF_DiskQueueData_t *req = NULL;
2137 	RF_DiskQueue_t *queue;
2138 	int s;
2139 
2140 	s = splbio();
2141 	db1_printf(("recovering the request queue:\n"));
2142 	req = bp->b_private;
2143 
2144 	queue = (RF_DiskQueue_t *) req->queue;
2145 
2146 #if RF_ACC_TRACE > 0
2147 	if (req->tracerec) {
2148 		RF_ETIMER_STOP(req->tracerec->timer);
2149 		RF_ETIMER_EVAL(req->tracerec->timer);
2150 		RF_LOCK_MUTEX(rf_tracing_mutex);
2151 		req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
2152 		req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
2153 		req->tracerec->num_phys_ios++;
2154 		RF_UNLOCK_MUTEX(rf_tracing_mutex);
2155 	}
2156 #endif
2157 
2158 	/* XXX Ok, let's get aggressive... If b_error is set, let's go
2159 	 * ballistic, and mark the component as hosed... */
2160 
2161 	if (bp->b_error != 0) {
2162 		/* Mark the disk as dead */
2163 		/* but only mark it once... */
2164 		/* and only if it wouldn't leave this RAID set
2165 		   completely broken */
2166 		if (((queue->raidPtr->Disks[queue->col].status ==
2167 		      rf_ds_optimal) ||
2168 		     (queue->raidPtr->Disks[queue->col].status ==
2169 		      rf_ds_used_spare)) &&
2170 		     (queue->raidPtr->numFailures <
2171 		      queue->raidPtr->Layout.map->faultsTolerated)) {
2172 			printf("raid%d: IO Error.  Marking %s as failed.\n",
2173 			       queue->raidPtr->raidid,
2174 			       queue->raidPtr->Disks[queue->col].devname);
2175 			queue->raidPtr->Disks[queue->col].status =
2176 			    rf_ds_failed;
2177 			queue->raidPtr->status = rf_rs_degraded;
2178 			queue->raidPtr->numFailures++;
2179 			queue->raidPtr->numNewFailures++;
2180 		} else {	/* Disk is already dead... */
2181 			/* printf("Disk already marked as dead!\n"); */
2182 		}
2183 
2184 	}
2185 
2186 	/* Fill in the error value */
2187 
2188 	req->error = bp->b_error;
2189 
2190 	simple_lock(&queue->raidPtr->iodone_lock);
2191 
2192 	/* Drop this one on the "finished" queue... */
2193 	TAILQ_INSERT_TAIL(&(queue->raidPtr->iodone), req, iodone_entries);
2194 
2195 	/* Let the raidio thread know there is work to be done. */
2196 	wakeup(&(queue->raidPtr->iodone));
2197 
2198 	simple_unlock(&queue->raidPtr->iodone_lock);
2199 
2200 	splx(s);
2201 }
2202 
2203 
2204 
2205 /*
2206  * initialize a buf structure for doing an I/O in the kernel.
2207  */
2208 static void
2209 InitBP(struct buf *bp, struct vnode *b_vp, unsigned rw_flag, dev_t dev,
2210        RF_SectorNum_t startSect, RF_SectorCount_t numSect, void *bf,
2211        void (*cbFunc) (struct buf *), void *cbArg, int logBytesPerSector,
2212        struct proc *b_proc)
2213 {
2214 	/* bp->b_flags       = B_PHYS | rw_flag; */
2215 	bp->b_flags = B_CALL | rw_flag;	/* XXX need B_PHYS here too??? */
2216 	bp->b_bcount = numSect << logBytesPerSector;
2217 	bp->b_bufsize = bp->b_bcount;
2218 	bp->b_error = 0;
2219 	bp->b_dev = dev;
2220 	bp->b_data = bf;
2221 	bp->b_blkno = startSect;
2222 	bp->b_resid = bp->b_bcount;	/* XXX is this right!??!?!! */
2223 	if (bp->b_bcount == 0) {
2224 		panic("bp->b_bcount is zero in InitBP!!");
2225 	}
2226 	bp->b_proc = b_proc;
2227 	bp->b_iodone = cbFunc;
2228 	bp->b_private = cbArg;
2229 	bp->b_vp = b_vp;
2230 	if ((bp->b_flags & B_READ) == 0) {
2231 		bp->b_vp->v_numoutput++;
2232 	}
2233 
2234 }
2235 
2236 static void
2237 raidgetdefaultlabel(RF_Raid_t *raidPtr, struct raid_softc *rs,
2238 		    struct disklabel *lp)
2239 {
2240 	memset(lp, 0, sizeof(*lp));
2241 
2242 	/* fabricate a label... */
2243 	lp->d_secperunit = raidPtr->totalSectors;
2244 	lp->d_secsize = raidPtr->bytesPerSector;
2245 	lp->d_nsectors = raidPtr->Layout.dataSectorsPerStripe;
2246 	lp->d_ntracks = 4 * raidPtr->numCol;
2247 	lp->d_ncylinders = raidPtr->totalSectors /
2248 		(lp->d_nsectors * lp->d_ntracks);
2249 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
2250 
2251 	strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
2252 	lp->d_type = DTYPE_RAID;
2253 	strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
2254 	lp->d_rpm = 3600;
2255 	lp->d_interleave = 1;
2256 	lp->d_flags = 0;
2257 
2258 	lp->d_partitions[RAW_PART].p_offset = 0;
2259 	lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
2260 	lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
2261 	lp->d_npartitions = RAW_PART + 1;
2262 
2263 	lp->d_magic = DISKMAGIC;
2264 	lp->d_magic2 = DISKMAGIC;
2265 	lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
2266 
2267 }
2268 /*
2269  * Read the disklabel from the raid device.  If one is not present, fake one
2270  * up.
2271  */
2272 static void
2273 raidgetdisklabel(dev_t dev)
2274 {
2275 	int     unit = raidunit(dev);
2276 	struct raid_softc *rs = &raid_softc[unit];
2277 	const char   *errstring;
2278 	struct disklabel *lp = rs->sc_dkdev.dk_label;
2279 	struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
2280 	RF_Raid_t *raidPtr;
2281 
2282 	db1_printf(("Getting the disklabel...\n"));
2283 
2284 	memset(clp, 0, sizeof(*clp));
2285 
2286 	raidPtr = raidPtrs[unit];
2287 
2288 	raidgetdefaultlabel(raidPtr, rs, lp);
2289 
2290 	/*
2291 	 * Call the generic disklabel extraction routine.
2292 	 */
2293 	errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
2294 	    rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
2295 	if (errstring)
2296 		raidmakedisklabel(rs);
2297 	else {
2298 		int     i;
2299 		struct partition *pp;
2300 
2301 		/*
2302 		 * Sanity check whether the found disklabel is valid.
2303 		 *
2304 		 * This is necessary since total size of the raid device
2305 		 * may vary when an interleave is changed even though exactly
2306 		 * same components are used, and old disklabel may used
2307 		 * if that is found.
2308 		 */
2309 		if (lp->d_secperunit != rs->sc_size)
2310 			printf("raid%d: WARNING: %s: "
2311 			    "total sector size in disklabel (%d) != "
2312 			    "the size of raid (%ld)\n", unit, rs->sc_xname,
2313 			    lp->d_secperunit, (long) rs->sc_size);
2314 		for (i = 0; i < lp->d_npartitions; i++) {
2315 			pp = &lp->d_partitions[i];
2316 			if (pp->p_offset + pp->p_size > rs->sc_size)
2317 				printf("raid%d: WARNING: %s: end of partition `%c' "
2318 				       "exceeds the size of raid (%ld)\n",
2319 				       unit, rs->sc_xname, 'a' + i, (long) rs->sc_size);
2320 		}
2321 	}
2322 
2323 }
2324 /*
2325  * Take care of things one might want to take care of in the event
2326  * that a disklabel isn't present.
2327  */
2328 static void
2329 raidmakedisklabel(struct raid_softc *rs)
2330 {
2331 	struct disklabel *lp = rs->sc_dkdev.dk_label;
2332 	db1_printf(("Making a label..\n"));
2333 
2334 	/*
2335 	 * For historical reasons, if there's no disklabel present
2336 	 * the raw partition must be marked FS_BSDFFS.
2337 	 */
2338 
2339 	lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
2340 
2341 	strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
2342 
2343 	lp->d_checksum = dkcksum(lp);
2344 }
2345 /*
2346  * Wait interruptibly for an exclusive lock.
2347  *
2348  * XXX
2349  * Several drivers do this; it should be abstracted and made MP-safe.
2350  * (Hmm... where have we seen this warning before :->  GO )
2351  */
2352 static int
2353 raidlock(struct raid_softc *rs)
2354 {
2355 	int     error;
2356 
2357 	while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
2358 		rs->sc_flags |= RAIDF_WANTED;
2359 		if ((error =
2360 			tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
2361 			return (error);
2362 	}
2363 	rs->sc_flags |= RAIDF_LOCKED;
2364 	return (0);
2365 }
2366 /*
2367  * Unlock and wake up any waiters.
2368  */
2369 static void
2370 raidunlock(struct raid_softc *rs)
2371 {
2372 
2373 	rs->sc_flags &= ~RAIDF_LOCKED;
2374 	if ((rs->sc_flags & RAIDF_WANTED) != 0) {
2375 		rs->sc_flags &= ~RAIDF_WANTED;
2376 		wakeup(rs);
2377 	}
2378 }
2379 
2380 
2381 #define RF_COMPONENT_INFO_OFFSET  16384 /* bytes */
2382 #define RF_COMPONENT_INFO_SIZE     1024 /* bytes */
2383 
2384 int
2385 raidmarkclean(dev_t dev, struct vnode *b_vp, int mod_counter)
2386 {
2387 	RF_ComponentLabel_t clabel;
2388 	raidread_component_label(dev, b_vp, &clabel);
2389 	clabel.mod_counter = mod_counter;
2390 	clabel.clean = RF_RAID_CLEAN;
2391 	raidwrite_component_label(dev, b_vp, &clabel);
2392 	return(0);
2393 }
2394 
2395 
2396 int
2397 raidmarkdirty(dev_t dev, struct vnode *b_vp, int mod_counter)
2398 {
2399 	RF_ComponentLabel_t clabel;
2400 	raidread_component_label(dev, b_vp, &clabel);
2401 	clabel.mod_counter = mod_counter;
2402 	clabel.clean = RF_RAID_DIRTY;
2403 	raidwrite_component_label(dev, b_vp, &clabel);
2404 	return(0);
2405 }
2406 
2407 /* ARGSUSED */
2408 int
2409 raidread_component_label(dev_t dev, struct vnode *b_vp,
2410 			 RF_ComponentLabel_t *clabel)
2411 {
2412 	struct buf *bp;
2413 	const struct bdevsw *bdev;
2414 	int error;
2415 
2416 	/* XXX should probably ensure that we don't try to do this if
2417 	   someone has changed rf_protected_sectors. */
2418 
2419 	if (b_vp == NULL) {
2420 		/* For whatever reason, this component is not valid.
2421 		   Don't try to read a component label from it. */
2422 		return(EINVAL);
2423 	}
2424 
2425 	/* get a block of the appropriate size... */
2426 	bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
2427 	bp->b_dev = dev;
2428 
2429 	/* get our ducks in a row for the read */
2430 	bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
2431 	bp->b_bcount = RF_COMPONENT_INFO_SIZE;
2432 	bp->b_flags |= B_READ;
2433  	bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
2434 
2435 	bdev = bdevsw_lookup(bp->b_dev);
2436 	if (bdev == NULL)
2437 		return (ENXIO);
2438 	(*bdev->d_strategy)(bp);
2439 
2440 	error = biowait(bp);
2441 
2442 	if (!error) {
2443 		memcpy(clabel, bp->b_data,
2444 		       sizeof(RF_ComponentLabel_t));
2445 	}
2446 
2447 	brelse(bp, 0);
2448 	return(error);
2449 }
2450 /* ARGSUSED */
2451 int
2452 raidwrite_component_label(dev_t dev, struct vnode *b_vp,
2453 			  RF_ComponentLabel_t *clabel)
2454 {
2455 	struct buf *bp;
2456 	const struct bdevsw *bdev;
2457 	int error;
2458 
2459 	/* get a block of the appropriate size... */
2460 	bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
2461 	bp->b_dev = dev;
2462 
2463 	/* get our ducks in a row for the write */
2464 	bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
2465 	bp->b_bcount = RF_COMPONENT_INFO_SIZE;
2466 	bp->b_flags |= B_WRITE;
2467  	bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
2468 
2469 	memset(bp->b_data, 0, RF_COMPONENT_INFO_SIZE );
2470 
2471 	memcpy(bp->b_data, clabel, sizeof(RF_ComponentLabel_t));
2472 
2473 	bdev = bdevsw_lookup(bp->b_dev);
2474 	if (bdev == NULL)
2475 		return (ENXIO);
2476 	(*bdev->d_strategy)(bp);
2477 	error = biowait(bp);
2478 	brelse(bp, 0);
2479 	if (error) {
2480 #if 1
2481 		printf("Failed to write RAID component info!\n");
2482 #endif
2483 	}
2484 
2485 	return(error);
2486 }
2487 
2488 void
2489 rf_markalldirty(RF_Raid_t *raidPtr)
2490 {
2491 	RF_ComponentLabel_t clabel;
2492 	int sparecol;
2493 	int c;
2494 	int j;
2495 	int scol = -1;
2496 
2497 	raidPtr->mod_counter++;
2498 	for (c = 0; c < raidPtr->numCol; c++) {
2499 		/* we don't want to touch (at all) a disk that has
2500 		   failed */
2501 		if (!RF_DEAD_DISK(raidPtr->Disks[c].status)) {
2502 			raidread_component_label(
2503 						 raidPtr->Disks[c].dev,
2504 						 raidPtr->raid_cinfo[c].ci_vp,
2505 						 &clabel);
2506 			if (clabel.status == rf_ds_spared) {
2507 				/* XXX do something special...
2508 				   but whatever you do, don't
2509 				   try to access it!! */
2510 			} else {
2511 				raidmarkdirty(
2512 					      raidPtr->Disks[c].dev,
2513 					      raidPtr->raid_cinfo[c].ci_vp,
2514 					      raidPtr->mod_counter);
2515 			}
2516 		}
2517 	}
2518 
2519 	for( c = 0; c < raidPtr->numSpare ; c++) {
2520 		sparecol = raidPtr->numCol + c;
2521 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2522 			/*
2523 
2524 			   we claim this disk is "optimal" if it's
2525 			   rf_ds_used_spare, as that means it should be
2526 			   directly substitutable for the disk it replaced.
2527 			   We note that too...
2528 
2529 			 */
2530 
2531 			for(j=0;j<raidPtr->numCol;j++) {
2532 				if (raidPtr->Disks[j].spareCol == sparecol) {
2533 					scol = j;
2534 					break;
2535 				}
2536 			}
2537 
2538 			raidread_component_label(
2539 				 raidPtr->Disks[sparecol].dev,
2540 				 raidPtr->raid_cinfo[sparecol].ci_vp,
2541 				 &clabel);
2542 			/* make sure status is noted */
2543 
2544 			raid_init_component_label(raidPtr, &clabel);
2545 
2546 			clabel.row = 0;
2547 			clabel.column = scol;
2548 			/* Note: we *don't* change status from rf_ds_used_spare
2549 			   to rf_ds_optimal */
2550 			/* clabel.status = rf_ds_optimal; */
2551 
2552 			raidmarkdirty(raidPtr->Disks[sparecol].dev,
2553 				      raidPtr->raid_cinfo[sparecol].ci_vp,
2554 				      raidPtr->mod_counter);
2555 		}
2556 	}
2557 }
2558 
2559 
2560 void
2561 rf_update_component_labels(RF_Raid_t *raidPtr, int final)
2562 {
2563 	RF_ComponentLabel_t clabel;
2564 	int sparecol;
2565 	int c;
2566 	int j;
2567 	int scol;
2568 
2569 	scol = -1;
2570 
2571 	/* XXX should do extra checks to make sure things really are clean,
2572 	   rather than blindly setting the clean bit... */
2573 
2574 	raidPtr->mod_counter++;
2575 
2576 	for (c = 0; c < raidPtr->numCol; c++) {
2577 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
2578 			raidread_component_label(
2579 						 raidPtr->Disks[c].dev,
2580 						 raidPtr->raid_cinfo[c].ci_vp,
2581 						 &clabel);
2582 			/* make sure status is noted */
2583 			clabel.status = rf_ds_optimal;
2584 
2585 			/* bump the counter */
2586 			clabel.mod_counter = raidPtr->mod_counter;
2587 
2588 			/* note what unit we are configured as */
2589 			clabel.last_unit = raidPtr->raidid;
2590 
2591 			raidwrite_component_label(
2592 						  raidPtr->Disks[c].dev,
2593 						  raidPtr->raid_cinfo[c].ci_vp,
2594 						  &clabel);
2595 			if (final == RF_FINAL_COMPONENT_UPDATE) {
2596 				if (raidPtr->parity_good == RF_RAID_CLEAN) {
2597 					raidmarkclean(
2598 						      raidPtr->Disks[c].dev,
2599 						      raidPtr->raid_cinfo[c].ci_vp,
2600 						      raidPtr->mod_counter);
2601 				}
2602 			}
2603 		}
2604 		/* else we don't touch it.. */
2605 	}
2606 
2607 	for( c = 0; c < raidPtr->numSpare ; c++) {
2608 		sparecol = raidPtr->numCol + c;
2609 		/* Need to ensure that the reconstruct actually completed! */
2610 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2611 			/*
2612 
2613 			   we claim this disk is "optimal" if it's
2614 			   rf_ds_used_spare, as that means it should be
2615 			   directly substitutable for the disk it replaced.
2616 			   We note that too...
2617 
2618 			 */
2619 
2620 			for(j=0;j<raidPtr->numCol;j++) {
2621 				if (raidPtr->Disks[j].spareCol == sparecol) {
2622 					scol = j;
2623 					break;
2624 				}
2625 			}
2626 
2627 			/* XXX shouldn't *really* need this... */
2628 			raidread_component_label(
2629 				      raidPtr->Disks[sparecol].dev,
2630 				      raidPtr->raid_cinfo[sparecol].ci_vp,
2631 				      &clabel);
2632 			/* make sure status is noted */
2633 
2634 			raid_init_component_label(raidPtr, &clabel);
2635 
2636 			clabel.mod_counter = raidPtr->mod_counter;
2637 			clabel.column = scol;
2638 			clabel.status = rf_ds_optimal;
2639 			clabel.last_unit = raidPtr->raidid;
2640 
2641 			raidwrite_component_label(
2642 				      raidPtr->Disks[sparecol].dev,
2643 				      raidPtr->raid_cinfo[sparecol].ci_vp,
2644 				      &clabel);
2645 			if (final == RF_FINAL_COMPONENT_UPDATE) {
2646 				if (raidPtr->parity_good == RF_RAID_CLEAN) {
2647 					raidmarkclean( raidPtr->Disks[sparecol].dev,
2648 						       raidPtr->raid_cinfo[sparecol].ci_vp,
2649 						       raidPtr->mod_counter);
2650 				}
2651 			}
2652 		}
2653 	}
2654 }
2655 
2656 void
2657 rf_close_component(RF_Raid_t *raidPtr, struct vnode *vp, int auto_configured)
2658 {
2659 
2660 	if (vp != NULL) {
2661 		if (auto_configured == 1) {
2662 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2663 			VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2664 			vput(vp);
2665 
2666 		} else {
2667 			(void) vn_close(vp, FREAD | FWRITE, curlwp->l_cred, curlwp);
2668 		}
2669 	}
2670 }
2671 
2672 
2673 void
2674 rf_UnconfigureVnodes(RF_Raid_t *raidPtr)
2675 {
2676 	int r,c;
2677 	struct vnode *vp;
2678 	int acd;
2679 
2680 
2681 	/* We take this opportunity to close the vnodes like we should.. */
2682 
2683 	for (c = 0; c < raidPtr->numCol; c++) {
2684 		vp = raidPtr->raid_cinfo[c].ci_vp;
2685 		acd = raidPtr->Disks[c].auto_configured;
2686 		rf_close_component(raidPtr, vp, acd);
2687 		raidPtr->raid_cinfo[c].ci_vp = NULL;
2688 		raidPtr->Disks[c].auto_configured = 0;
2689 	}
2690 
2691 	for (r = 0; r < raidPtr->numSpare; r++) {
2692 		vp = raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp;
2693 		acd = raidPtr->Disks[raidPtr->numCol + r].auto_configured;
2694 		rf_close_component(raidPtr, vp, acd);
2695 		raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp = NULL;
2696 		raidPtr->Disks[raidPtr->numCol + r].auto_configured = 0;
2697 	}
2698 }
2699 
2700 
2701 void
2702 rf_ReconThread(struct rf_recon_req *req)
2703 {
2704 	int     s;
2705 	RF_Raid_t *raidPtr;
2706 
2707 	s = splbio();
2708 	raidPtr = (RF_Raid_t *) req->raidPtr;
2709 	raidPtr->recon_in_progress = 1;
2710 
2711 	rf_FailDisk((RF_Raid_t *) req->raidPtr, req->col,
2712 		    ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
2713 
2714 	RF_Free(req, sizeof(*req));
2715 
2716 	raidPtr->recon_in_progress = 0;
2717 	splx(s);
2718 
2719 	/* That's all... */
2720 	kthread_exit(0);	/* does not return */
2721 }
2722 
2723 void
2724 rf_RewriteParityThread(RF_Raid_t *raidPtr)
2725 {
2726 	int retcode;
2727 	int s;
2728 
2729 	raidPtr->parity_rewrite_stripes_done = 0;
2730 	raidPtr->parity_rewrite_in_progress = 1;
2731 	s = splbio();
2732 	retcode = rf_RewriteParity(raidPtr);
2733 	splx(s);
2734 	if (retcode) {
2735 		printf("raid%d: Error re-writing parity!\n",raidPtr->raidid);
2736 	} else {
2737 		/* set the clean bit!  If we shutdown correctly,
2738 		   the clean bit on each component label will get
2739 		   set */
2740 		raidPtr->parity_good = RF_RAID_CLEAN;
2741 	}
2742 	raidPtr->parity_rewrite_in_progress = 0;
2743 
2744 	/* Anyone waiting for us to stop?  If so, inform them... */
2745 	if (raidPtr->waitShutdown) {
2746 		wakeup(&raidPtr->parity_rewrite_in_progress);
2747 	}
2748 
2749 	/* That's all... */
2750 	kthread_exit(0);	/* does not return */
2751 }
2752 
2753 
2754 void
2755 rf_CopybackThread(RF_Raid_t *raidPtr)
2756 {
2757 	int s;
2758 
2759 	raidPtr->copyback_in_progress = 1;
2760 	s = splbio();
2761 	rf_CopybackReconstructedData(raidPtr);
2762 	splx(s);
2763 	raidPtr->copyback_in_progress = 0;
2764 
2765 	/* That's all... */
2766 	kthread_exit(0);	/* does not return */
2767 }
2768 
2769 
2770 void
2771 rf_ReconstructInPlaceThread(struct rf_recon_req *req)
2772 {
2773 	int s;
2774 	RF_Raid_t *raidPtr;
2775 
2776 	s = splbio();
2777 	raidPtr = req->raidPtr;
2778 	raidPtr->recon_in_progress = 1;
2779 	rf_ReconstructInPlace(raidPtr, req->col);
2780 	RF_Free(req, sizeof(*req));
2781 	raidPtr->recon_in_progress = 0;
2782 	splx(s);
2783 
2784 	/* That's all... */
2785 	kthread_exit(0);	/* does not return */
2786 }
2787 
2788 static RF_AutoConfig_t *
2789 rf_get_component(RF_AutoConfig_t *ac_list, dev_t dev, struct vnode *vp,
2790     const char *cname, RF_SectorCount_t size)
2791 {
2792 	int good_one = 0;
2793 	RF_ComponentLabel_t *clabel;
2794 	RF_AutoConfig_t *ac;
2795 
2796 	clabel = malloc(sizeof(RF_ComponentLabel_t), M_RAIDFRAME, M_NOWAIT);
2797 	if (clabel == NULL) {
2798 oomem:
2799 		    while(ac_list) {
2800 			    ac = ac_list;
2801 			    if (ac->clabel)
2802 				    free(ac->clabel, M_RAIDFRAME);
2803 			    ac_list = ac_list->next;
2804 			    free(ac, M_RAIDFRAME);
2805 		    }
2806 		    printf("RAID auto config: out of memory!\n");
2807 		    return NULL; /* XXX probably should panic? */
2808 	}
2809 
2810 	if (!raidread_component_label(dev, vp, clabel)) {
2811 		    /* Got the label.  Does it look reasonable? */
2812 		    if (rf_reasonable_label(clabel) &&
2813 			(clabel->partitionSize <= size)) {
2814 #ifdef DEBUG
2815 			    printf("Component on: %s: %llu\n",
2816 				cname, (unsigned long long)size);
2817 			    rf_print_component_label(clabel);
2818 #endif
2819 			    /* if it's reasonable, add it, else ignore it. */
2820 			    ac = malloc(sizeof(RF_AutoConfig_t), M_RAIDFRAME,
2821 				M_NOWAIT);
2822 			    if (ac == NULL) {
2823 				    free(clabel, M_RAIDFRAME);
2824 				    goto oomem;
2825 			    }
2826 			    strlcpy(ac->devname, cname, sizeof(ac->devname));
2827 			    ac->dev = dev;
2828 			    ac->vp = vp;
2829 			    ac->clabel = clabel;
2830 			    ac->next = ac_list;
2831 			    ac_list = ac;
2832 			    good_one = 1;
2833 		    }
2834 	}
2835 	if (!good_one) {
2836 		/* cleanup */
2837 		free(clabel, M_RAIDFRAME);
2838 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2839 		VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2840 		vput(vp);
2841 	}
2842 	return ac_list;
2843 }
2844 
2845 RF_AutoConfig_t *
2846 rf_find_raid_components()
2847 {
2848 	struct vnode *vp;
2849 	struct disklabel label;
2850 	struct device *dv;
2851 	dev_t dev;
2852 	int bmajor, bminor, wedge;
2853 	int error;
2854 	int i;
2855 	RF_AutoConfig_t *ac_list;
2856 
2857 
2858 	/* initialize the AutoConfig list */
2859 	ac_list = NULL;
2860 
2861 	/* we begin by trolling through *all* the devices on the system */
2862 
2863 	for (dv = alldevs.tqh_first; dv != NULL;
2864 	     dv = dv->dv_list.tqe_next) {
2865 
2866 		/* we are only interested in disks... */
2867 		if (device_class(dv) != DV_DISK)
2868 			continue;
2869 
2870 		/* we don't care about floppies... */
2871 		if (device_is_a(dv, "fd")) {
2872 			continue;
2873 		}
2874 
2875 		/* we don't care about CD's... */
2876 		if (device_is_a(dv, "cd")) {
2877 			continue;
2878 		}
2879 
2880 		/* hdfd is the Atari/Hades floppy driver */
2881 		if (device_is_a(dv, "hdfd")) {
2882 			continue;
2883 		}
2884 
2885 		/* fdisa is the Atari/Milan floppy driver */
2886 		if (device_is_a(dv, "fdisa")) {
2887 			continue;
2888 		}
2889 
2890 		/* need to find the device_name_to_block_device_major stuff */
2891 		bmajor = devsw_name2blk(dv->dv_xname, NULL, 0);
2892 
2893 		/* get a vnode for the raw partition of this disk */
2894 
2895 		wedge = device_is_a(dv, "dk");
2896 		bminor = minor(device_unit(dv));
2897 		dev = wedge ? makedev(bmajor, bminor) :
2898 		    MAKEDISKDEV(bmajor, bminor, RAW_PART);
2899 		if (bdevvp(dev, &vp))
2900 			panic("RAID can't alloc vnode");
2901 
2902 		error = VOP_OPEN(vp, FREAD, NOCRED, 0);
2903 
2904 		if (error) {
2905 			/* "Who cares."  Continue looking
2906 			   for something that exists*/
2907 			vput(vp);
2908 			continue;
2909 		}
2910 
2911 		if (wedge) {
2912 			struct dkwedge_info dkw;
2913 			error = VOP_IOCTL(vp, DIOCGWEDGEINFO, &dkw, FREAD,
2914 			    NOCRED, 0);
2915 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2916 			VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2917 			vput(vp);
2918 			if (error) {
2919 				printf("RAIDframe: can't get wedge info for "
2920 				    "dev %s (%d)\n", dv->dv_xname, error);
2921 				continue;
2922 			}
2923 
2924 			if (strcmp(dkw.dkw_ptype, DKW_PTYPE_RAIDFRAME) != 0)
2925 				continue;
2926 
2927 			ac_list = rf_get_component(ac_list, dev, vp,
2928 			    dv->dv_xname, dkw.dkw_size);
2929 			continue;
2930 		}
2931 
2932 		/* Ok, the disk exists.  Go get the disklabel. */
2933 		error = VOP_IOCTL(vp, DIOCGDINFO, &label, FREAD, NOCRED, 0);
2934 		if (error) {
2935 			/*
2936 			 * XXX can't happen - open() would
2937 			 * have errored out (or faked up one)
2938 			 */
2939 			if (error != ENOTTY)
2940 				printf("RAIDframe: can't get label for dev "
2941 				    "%s (%d)\n", dv->dv_xname, error);
2942 		}
2943 
2944 		/* don't need this any more.  We'll allocate it again
2945 		   a little later if we really do... */
2946 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2947 		VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2948 		vput(vp);
2949 
2950 		if (error)
2951 			continue;
2952 
2953 		for (i = 0; i < label.d_npartitions; i++) {
2954 			char cname[sizeof(ac_list->devname)];
2955 
2956 			/* We only support partitions marked as RAID */
2957 			if (label.d_partitions[i].p_fstype != FS_RAID)
2958 				continue;
2959 
2960 			dev = MAKEDISKDEV(bmajor, device_unit(dv), i);
2961 			if (bdevvp(dev, &vp))
2962 				panic("RAID can't alloc vnode");
2963 
2964 			error = VOP_OPEN(vp, FREAD, NOCRED, 0);
2965 			if (error) {
2966 				/* Whatever... */
2967 				vput(vp);
2968 				continue;
2969 			}
2970 			snprintf(cname, sizeof(cname), "%s%c",
2971 			    dv->dv_xname, 'a' + i);
2972 			ac_list = rf_get_component(ac_list, dev, vp, cname,
2973 				label.d_partitions[i].p_size);
2974 		}
2975 	}
2976 	return ac_list;
2977 }
2978 
2979 
2980 static int
2981 rf_reasonable_label(RF_ComponentLabel_t *clabel)
2982 {
2983 
2984 	if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
2985 	     (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
2986 	    ((clabel->clean == RF_RAID_CLEAN) ||
2987 	     (clabel->clean == RF_RAID_DIRTY)) &&
2988 	    clabel->row >=0 &&
2989 	    clabel->column >= 0 &&
2990 	    clabel->num_rows > 0 &&
2991 	    clabel->num_columns > 0 &&
2992 	    clabel->row < clabel->num_rows &&
2993 	    clabel->column < clabel->num_columns &&
2994 	    clabel->blockSize > 0 &&
2995 	    clabel->numBlocks > 0) {
2996 		/* label looks reasonable enough... */
2997 		return(1);
2998 	}
2999 	return(0);
3000 }
3001 
3002 
3003 #ifdef DEBUG
3004 void
3005 rf_print_component_label(RF_ComponentLabel_t *clabel)
3006 {
3007 	printf("   Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
3008 	       clabel->row, clabel->column,
3009 	       clabel->num_rows, clabel->num_columns);
3010 	printf("   Version: %d Serial Number: %d Mod Counter: %d\n",
3011 	       clabel->version, clabel->serial_number,
3012 	       clabel->mod_counter);
3013 	printf("   Clean: %s Status: %d\n",
3014 	       clabel->clean ? "Yes" : "No", clabel->status );
3015 	printf("   sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
3016 	       clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
3017 	printf("   RAID Level: %c  blocksize: %d numBlocks: %d\n",
3018 	       (char) clabel->parityConfig, clabel->blockSize,
3019 	       clabel->numBlocks);
3020 	printf("   Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No" );
3021 	printf("   Contains root partition: %s\n",
3022 	       clabel->root_partition ? "Yes" : "No" );
3023 	printf("   Last configured as: raid%d\n", clabel->last_unit );
3024 #if 0
3025 	   printf("   Config order: %d\n", clabel->config_order);
3026 #endif
3027 
3028 }
3029 #endif
3030 
3031 RF_ConfigSet_t *
3032 rf_create_auto_sets(RF_AutoConfig_t *ac_list)
3033 {
3034 	RF_AutoConfig_t *ac;
3035 	RF_ConfigSet_t *config_sets;
3036 	RF_ConfigSet_t *cset;
3037 	RF_AutoConfig_t *ac_next;
3038 
3039 
3040 	config_sets = NULL;
3041 
3042 	/* Go through the AutoConfig list, and figure out which components
3043 	   belong to what sets.  */
3044 	ac = ac_list;
3045 	while(ac!=NULL) {
3046 		/* we're going to putz with ac->next, so save it here
3047 		   for use at the end of the loop */
3048 		ac_next = ac->next;
3049 
3050 		if (config_sets == NULL) {
3051 			/* will need at least this one... */
3052 			config_sets = (RF_ConfigSet_t *)
3053 				malloc(sizeof(RF_ConfigSet_t),
3054 				       M_RAIDFRAME, M_NOWAIT);
3055 			if (config_sets == NULL) {
3056 				panic("rf_create_auto_sets: No memory!");
3057 			}
3058 			/* this one is easy :) */
3059 			config_sets->ac = ac;
3060 			config_sets->next = NULL;
3061 			config_sets->rootable = 0;
3062 			ac->next = NULL;
3063 		} else {
3064 			/* which set does this component fit into? */
3065 			cset = config_sets;
3066 			while(cset!=NULL) {
3067 				if (rf_does_it_fit(cset, ac)) {
3068 					/* looks like it matches... */
3069 					ac->next = cset->ac;
3070 					cset->ac = ac;
3071 					break;
3072 				}
3073 				cset = cset->next;
3074 			}
3075 			if (cset==NULL) {
3076 				/* didn't find a match above... new set..*/
3077 				cset = (RF_ConfigSet_t *)
3078 					malloc(sizeof(RF_ConfigSet_t),
3079 					       M_RAIDFRAME, M_NOWAIT);
3080 				if (cset == NULL) {
3081 					panic("rf_create_auto_sets: No memory!");
3082 				}
3083 				cset->ac = ac;
3084 				ac->next = NULL;
3085 				cset->next = config_sets;
3086 				cset->rootable = 0;
3087 				config_sets = cset;
3088 			}
3089 		}
3090 		ac = ac_next;
3091 	}
3092 
3093 
3094 	return(config_sets);
3095 }
3096 
3097 static int
3098 rf_does_it_fit(RF_ConfigSet_t *cset, RF_AutoConfig_t *ac)
3099 {
3100 	RF_ComponentLabel_t *clabel1, *clabel2;
3101 
3102 	/* If this one matches the *first* one in the set, that's good
3103 	   enough, since the other members of the set would have been
3104 	   through here too... */
3105 	/* note that we are not checking partitionSize here..
3106 
3107 	   Note that we are also not checking the mod_counters here.
3108 	   If everything else matches execpt the mod_counter, that's
3109 	   good enough for this test.  We will deal with the mod_counters
3110 	   a little later in the autoconfiguration process.
3111 
3112 	    (clabel1->mod_counter == clabel2->mod_counter) &&
3113 
3114 	   The reason we don't check for this is that failed disks
3115 	   will have lower modification counts.  If those disks are
3116 	   not added to the set they used to belong to, then they will
3117 	   form their own set, which may result in 2 different sets,
3118 	   for example, competing to be configured at raid0, and
3119 	   perhaps competing to be the root filesystem set.  If the
3120 	   wrong ones get configured, or both attempt to become /,
3121 	   weird behaviour and or serious lossage will occur.  Thus we
3122 	   need to bring them into the fold here, and kick them out at
3123 	   a later point.
3124 
3125 	*/
3126 
3127 	clabel1 = cset->ac->clabel;
3128 	clabel2 = ac->clabel;
3129 	if ((clabel1->version == clabel2->version) &&
3130 	    (clabel1->serial_number == clabel2->serial_number) &&
3131 	    (clabel1->num_rows == clabel2->num_rows) &&
3132 	    (clabel1->num_columns == clabel2->num_columns) &&
3133 	    (clabel1->sectPerSU == clabel2->sectPerSU) &&
3134 	    (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
3135 	    (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
3136 	    (clabel1->parityConfig == clabel2->parityConfig) &&
3137 	    (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
3138 	    (clabel1->blockSize == clabel2->blockSize) &&
3139 	    (clabel1->numBlocks == clabel2->numBlocks) &&
3140 	    (clabel1->autoconfigure == clabel2->autoconfigure) &&
3141 	    (clabel1->root_partition == clabel2->root_partition) &&
3142 	    (clabel1->last_unit == clabel2->last_unit) &&
3143 	    (clabel1->config_order == clabel2->config_order)) {
3144 		/* if it get's here, it almost *has* to be a match */
3145 	} else {
3146 		/* it's not consistent with somebody in the set..
3147 		   punt */
3148 		return(0);
3149 	}
3150 	/* all was fine.. it must fit... */
3151 	return(1);
3152 }
3153 
3154 int
3155 rf_have_enough_components(RF_ConfigSet_t *cset)
3156 {
3157 	RF_AutoConfig_t *ac;
3158 	RF_AutoConfig_t *auto_config;
3159 	RF_ComponentLabel_t *clabel;
3160 	int c;
3161 	int num_cols;
3162 	int num_missing;
3163 	int mod_counter;
3164 	int mod_counter_found;
3165 	int even_pair_failed;
3166 	char parity_type;
3167 
3168 
3169 	/* check to see that we have enough 'live' components
3170 	   of this set.  If so, we can configure it if necessary */
3171 
3172 	num_cols = cset->ac->clabel->num_columns;
3173 	parity_type = cset->ac->clabel->parityConfig;
3174 
3175 	/* XXX Check for duplicate components!?!?!? */
3176 
3177 	/* Determine what the mod_counter is supposed to be for this set. */
3178 
3179 	mod_counter_found = 0;
3180 	mod_counter = 0;
3181 	ac = cset->ac;
3182 	while(ac!=NULL) {
3183 		if (mod_counter_found==0) {
3184 			mod_counter = ac->clabel->mod_counter;
3185 			mod_counter_found = 1;
3186 		} else {
3187 			if (ac->clabel->mod_counter > mod_counter) {
3188 				mod_counter = ac->clabel->mod_counter;
3189 			}
3190 		}
3191 		ac = ac->next;
3192 	}
3193 
3194 	num_missing = 0;
3195 	auto_config = cset->ac;
3196 
3197 	even_pair_failed = 0;
3198 	for(c=0; c<num_cols; c++) {
3199 		ac = auto_config;
3200 		while(ac!=NULL) {
3201 			if ((ac->clabel->column == c) &&
3202 			    (ac->clabel->mod_counter == mod_counter)) {
3203 				/* it's this one... */
3204 #ifdef DEBUG
3205 				printf("Found: %s at %d\n",
3206 				       ac->devname,c);
3207 #endif
3208 				break;
3209 			}
3210 			ac=ac->next;
3211 		}
3212 		if (ac==NULL) {
3213 				/* Didn't find one here! */
3214 				/* special case for RAID 1, especially
3215 				   where there are more than 2
3216 				   components (where RAIDframe treats
3217 				   things a little differently :( ) */
3218 			if (parity_type == '1') {
3219 				if (c%2 == 0) { /* even component */
3220 					even_pair_failed = 1;
3221 				} else { /* odd component.  If
3222 					    we're failed, and
3223 					    so is the even
3224 					    component, it's
3225 					    "Good Night, Charlie" */
3226 					if (even_pair_failed == 1) {
3227 						return(0);
3228 					}
3229 				}
3230 			} else {
3231 				/* normal accounting */
3232 				num_missing++;
3233 			}
3234 		}
3235 		if ((parity_type == '1') && (c%2 == 1)) {
3236 				/* Just did an even component, and we didn't
3237 				   bail.. reset the even_pair_failed flag,
3238 				   and go on to the next component.... */
3239 			even_pair_failed = 0;
3240 		}
3241 	}
3242 
3243 	clabel = cset->ac->clabel;
3244 
3245 	if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
3246 	    ((clabel->parityConfig == '4') && (num_missing > 1)) ||
3247 	    ((clabel->parityConfig == '5') && (num_missing > 1))) {
3248 		/* XXX this needs to be made *much* more general */
3249 		/* Too many failures */
3250 		return(0);
3251 	}
3252 	/* otherwise, all is well, and we've got enough to take a kick
3253 	   at autoconfiguring this set */
3254 	return(1);
3255 }
3256 
3257 void
3258 rf_create_configuration(RF_AutoConfig_t *ac, RF_Config_t *config,
3259 			RF_Raid_t *raidPtr)
3260 {
3261 	RF_ComponentLabel_t *clabel;
3262 	int i;
3263 
3264 	clabel = ac->clabel;
3265 
3266 	/* 1. Fill in the common stuff */
3267 	config->numRow = clabel->num_rows = 1;
3268 	config->numCol = clabel->num_columns;
3269 	config->numSpare = 0; /* XXX should this be set here? */
3270 	config->sectPerSU = clabel->sectPerSU;
3271 	config->SUsPerPU = clabel->SUsPerPU;
3272 	config->SUsPerRU = clabel->SUsPerRU;
3273 	config->parityConfig = clabel->parityConfig;
3274 	/* XXX... */
3275 	strcpy(config->diskQueueType,"fifo");
3276 	config->maxOutstandingDiskReqs = clabel->maxOutstanding;
3277 	config->layoutSpecificSize = 0; /* XXX ?? */
3278 
3279 	while(ac!=NULL) {
3280 		/* row/col values will be in range due to the checks
3281 		   in reasonable_label() */
3282 		strcpy(config->devnames[0][ac->clabel->column],
3283 		       ac->devname);
3284 		ac = ac->next;
3285 	}
3286 
3287 	for(i=0;i<RF_MAXDBGV;i++) {
3288 		config->debugVars[i][0] = 0;
3289 	}
3290 }
3291 
3292 int
3293 rf_set_autoconfig(RF_Raid_t *raidPtr, int new_value)
3294 {
3295 	RF_ComponentLabel_t clabel;
3296 	struct vnode *vp;
3297 	dev_t dev;
3298 	int column;
3299 	int sparecol;
3300 
3301 	raidPtr->autoconfigure = new_value;
3302 
3303 	for(column=0; column<raidPtr->numCol; column++) {
3304 		if (raidPtr->Disks[column].status == rf_ds_optimal) {
3305 			dev = raidPtr->Disks[column].dev;
3306 			vp = raidPtr->raid_cinfo[column].ci_vp;
3307 			raidread_component_label(dev, vp, &clabel);
3308 			clabel.autoconfigure = new_value;
3309 			raidwrite_component_label(dev, vp, &clabel);
3310 		}
3311 	}
3312 	for(column = 0; column < raidPtr->numSpare ; column++) {
3313 		sparecol = raidPtr->numCol + column;
3314 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3315 			dev = raidPtr->Disks[sparecol].dev;
3316 			vp = raidPtr->raid_cinfo[sparecol].ci_vp;
3317 			raidread_component_label(dev, vp, &clabel);
3318 			clabel.autoconfigure = new_value;
3319 			raidwrite_component_label(dev, vp, &clabel);
3320 		}
3321 	}
3322 	return(new_value);
3323 }
3324 
3325 int
3326 rf_set_rootpartition(RF_Raid_t *raidPtr, int new_value)
3327 {
3328 	RF_ComponentLabel_t clabel;
3329 	struct vnode *vp;
3330 	dev_t dev;
3331 	int column;
3332 	int sparecol;
3333 
3334 	raidPtr->root_partition = new_value;
3335 	for(column=0; column<raidPtr->numCol; column++) {
3336 		if (raidPtr->Disks[column].status == rf_ds_optimal) {
3337 			dev = raidPtr->Disks[column].dev;
3338 			vp = raidPtr->raid_cinfo[column].ci_vp;
3339 			raidread_component_label(dev, vp, &clabel);
3340 			clabel.root_partition = new_value;
3341 			raidwrite_component_label(dev, vp, &clabel);
3342 		}
3343 	}
3344 	for(column = 0; column < raidPtr->numSpare ; column++) {
3345 		sparecol = raidPtr->numCol + column;
3346 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3347 			dev = raidPtr->Disks[sparecol].dev;
3348 			vp = raidPtr->raid_cinfo[sparecol].ci_vp;
3349 			raidread_component_label(dev, vp, &clabel);
3350 			clabel.root_partition = new_value;
3351 			raidwrite_component_label(dev, vp, &clabel);
3352 		}
3353 	}
3354 	return(new_value);
3355 }
3356 
3357 void
3358 rf_release_all_vps(RF_ConfigSet_t *cset)
3359 {
3360 	RF_AutoConfig_t *ac;
3361 
3362 	ac = cset->ac;
3363 	while(ac!=NULL) {
3364 		/* Close the vp, and give it back */
3365 		if (ac->vp) {
3366 			vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
3367 			VOP_CLOSE(ac->vp, FREAD, NOCRED, 0);
3368 			vput(ac->vp);
3369 			ac->vp = NULL;
3370 		}
3371 		ac = ac->next;
3372 	}
3373 }
3374 
3375 
3376 void
3377 rf_cleanup_config_set(RF_ConfigSet_t *cset)
3378 {
3379 	RF_AutoConfig_t *ac;
3380 	RF_AutoConfig_t *next_ac;
3381 
3382 	ac = cset->ac;
3383 	while(ac!=NULL) {
3384 		next_ac = ac->next;
3385 		/* nuke the label */
3386 		free(ac->clabel, M_RAIDFRAME);
3387 		/* cleanup the config structure */
3388 		free(ac, M_RAIDFRAME);
3389 		/* "next.." */
3390 		ac = next_ac;
3391 	}
3392 	/* and, finally, nuke the config set */
3393 	free(cset, M_RAIDFRAME);
3394 }
3395 
3396 
3397 void
3398 raid_init_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
3399 {
3400 	/* current version number */
3401 	clabel->version = RF_COMPONENT_LABEL_VERSION;
3402 	clabel->serial_number = raidPtr->serial_number;
3403 	clabel->mod_counter = raidPtr->mod_counter;
3404 	clabel->num_rows = 1;
3405 	clabel->num_columns = raidPtr->numCol;
3406 	clabel->clean = RF_RAID_DIRTY; /* not clean */
3407 	clabel->status = rf_ds_optimal; /* "It's good!" */
3408 
3409 	clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
3410 	clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
3411 	clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
3412 
3413 	clabel->blockSize = raidPtr->bytesPerSector;
3414 	clabel->numBlocks = raidPtr->sectorsPerDisk;
3415 
3416 	/* XXX not portable */
3417 	clabel->parityConfig = raidPtr->Layout.map->parityConfig;
3418 	clabel->maxOutstanding = raidPtr->maxOutstanding;
3419 	clabel->autoconfigure = raidPtr->autoconfigure;
3420 	clabel->root_partition = raidPtr->root_partition;
3421 	clabel->last_unit = raidPtr->raidid;
3422 	clabel->config_order = raidPtr->config_order;
3423 }
3424 
3425 int
3426 rf_auto_config_set(RF_ConfigSet_t *cset, int *unit)
3427 {
3428 	RF_Raid_t *raidPtr;
3429 	RF_Config_t *config;
3430 	int raidID;
3431 	int retcode;
3432 
3433 #ifdef DEBUG
3434 	printf("RAID autoconfigure\n");
3435 #endif
3436 
3437 	retcode = 0;
3438 	*unit = -1;
3439 
3440 	/* 1. Create a config structure */
3441 
3442 	config = (RF_Config_t *)malloc(sizeof(RF_Config_t),
3443 				       M_RAIDFRAME,
3444 				       M_NOWAIT);
3445 	if (config==NULL) {
3446 		printf("Out of mem!?!?\n");
3447 				/* XXX do something more intelligent here. */
3448 		return(1);
3449 	}
3450 
3451 	memset(config, 0, sizeof(RF_Config_t));
3452 
3453 	/*
3454 	   2. Figure out what RAID ID this one is supposed to live at
3455 	   See if we can get the same RAID dev that it was configured
3456 	   on last time..
3457 	*/
3458 
3459 	raidID = cset->ac->clabel->last_unit;
3460 	if ((raidID < 0) || (raidID >= numraid)) {
3461 		/* let's not wander off into lala land. */
3462 		raidID = numraid - 1;
3463 	}
3464 	if (raidPtrs[raidID]->valid != 0) {
3465 
3466 		/*
3467 		   Nope... Go looking for an alternative...
3468 		   Start high so we don't immediately use raid0 if that's
3469 		   not taken.
3470 		*/
3471 
3472 		for(raidID = numraid - 1; raidID >= 0; raidID--) {
3473 			if (raidPtrs[raidID]->valid == 0) {
3474 				/* can use this one! */
3475 				break;
3476 			}
3477 		}
3478 	}
3479 
3480 	if (raidID < 0) {
3481 		/* punt... */
3482 		printf("Unable to auto configure this set!\n");
3483 		printf("(Out of RAID devs!)\n");
3484 		free(config, M_RAIDFRAME);
3485 		return(1);
3486 	}
3487 
3488 #ifdef DEBUG
3489 	printf("Configuring raid%d:\n",raidID);
3490 #endif
3491 
3492 	raidPtr = raidPtrs[raidID];
3493 
3494 	/* XXX all this stuff should be done SOMEWHERE ELSE! */
3495 	raidPtr->raidid = raidID;
3496 	raidPtr->openings = RAIDOUTSTANDING;
3497 
3498 	/* 3. Build the configuration structure */
3499 	rf_create_configuration(cset->ac, config, raidPtr);
3500 
3501 	/* 4. Do the configuration */
3502 	retcode = rf_Configure(raidPtr, config, cset->ac);
3503 
3504 	if (retcode == 0) {
3505 
3506 		raidinit(raidPtrs[raidID]);
3507 
3508 		rf_markalldirty(raidPtrs[raidID]);
3509 		raidPtrs[raidID]->autoconfigure = 1; /* XXX do this here? */
3510 		if (cset->ac->clabel->root_partition==1) {
3511 			/* everything configured just fine.  Make a note
3512 			   that this set is eligible to be root. */
3513 			cset->rootable = 1;
3514 			/* XXX do this here? */
3515 			raidPtrs[raidID]->root_partition = 1;
3516 		}
3517 	}
3518 
3519 	/* 5. Cleanup */
3520 	free(config, M_RAIDFRAME);
3521 
3522 	*unit = raidID;
3523 	return(retcode);
3524 }
3525 
3526 void
3527 rf_disk_unbusy(RF_RaidAccessDesc_t *desc)
3528 {
3529 	struct buf *bp;
3530 
3531 	bp = (struct buf *)desc->bp;
3532 	disk_unbusy(&raid_softc[desc->raidPtr->raidid].sc_dkdev,
3533 	    (bp->b_bcount - bp->b_resid), (bp->b_flags & B_READ));
3534 }
3535 
3536 void
3537 rf_pool_init(struct pool *p, size_t size, const char *w_chan,
3538 	     size_t xmin, size_t xmax)
3539 {
3540 	pool_init(p, size, 0, 0, 0, w_chan, NULL, IPL_BIO);
3541 	pool_sethiwat(p, xmax);
3542 	pool_prime(p, xmin);
3543 	pool_setlowat(p, xmin);
3544 }
3545 
3546 /*
3547  * rf_buf_queue_check(int raidid) -- looks into the buf_queue to see
3548  * if there is IO pending and if that IO could possibly be done for a
3549  * given RAID set.  Returns 0 if IO is waiting and can be done, 1
3550  * otherwise.
3551  *
3552  */
3553 
3554 int
3555 rf_buf_queue_check(int raidid)
3556 {
3557 	if ((BUFQ_PEEK(raid_softc[raidid].buf_queue) != NULL) &&
3558 	    raidPtrs[raidid]->openings > 0) {
3559 		/* there is work to do */
3560 		return 0;
3561 	}
3562 	/* default is nothing to do */
3563 	return 1;
3564 }
3565 
3566 int
3567 rf_getdisksize(struct vnode *vp, struct lwp *l, RF_RaidDisk_t *diskPtr)
3568 {
3569 	struct partinfo dpart;
3570 	struct dkwedge_info dkw;
3571 	int error;
3572 
3573 	error = VOP_IOCTL(vp, DIOCGPART, &dpart, FREAD, l->l_cred, l);
3574 	if (error == 0) {
3575 		diskPtr->blockSize = dpart.disklab->d_secsize;
3576 		diskPtr->numBlocks = dpart.part->p_size - rf_protectedSectors;
3577 		diskPtr->partitionSize = dpart.part->p_size;
3578 		return 0;
3579 	}
3580 
3581 	error = VOP_IOCTL(vp, DIOCGWEDGEINFO, &dkw, FREAD, l->l_cred, l);
3582 	if (error == 0) {
3583 		diskPtr->blockSize = 512;	/* XXX */
3584 		diskPtr->numBlocks = dkw.dkw_size - rf_protectedSectors;
3585 		diskPtr->partitionSize = dkw.dkw_size;
3586 		return 0;
3587 	}
3588 	return error;
3589 }
3590 
3591 static int
3592 raid_match(struct device *self, struct cfdata *cfdata,
3593     void *aux)
3594 {
3595 	return 1;
3596 }
3597 
3598 static void
3599 raid_attach(struct device *parent, struct device *self,
3600     void *aux)
3601 {
3602 
3603 }
3604 
3605 
3606 static int
3607 raid_detach(struct device *self, int flags)
3608 {
3609 	struct raid_softc *rs = (struct raid_softc *)self;
3610 
3611 	if (rs->sc_flags & RAIDF_INITED)
3612 		return EBUSY;
3613 
3614 	return 0;
3615 }
3616 
3617 static void
3618 rf_set_properties(struct raid_softc *rs, RF_Raid_t *raidPtr)
3619 {
3620 	prop_dictionary_t disk_info, odisk_info, geom;
3621 	disk_info = prop_dictionary_create();
3622 	geom = prop_dictionary_create();
3623 	prop_dictionary_set_uint64(geom, "sectors-per-unit",
3624 				   raidPtr->totalSectors);
3625 	prop_dictionary_set_uint32(geom, "sector-size",
3626 				   raidPtr->bytesPerSector);
3627 
3628 	prop_dictionary_set_uint16(geom, "sectors-per-track",
3629 				   raidPtr->Layout.dataSectorsPerStripe);
3630 	prop_dictionary_set_uint16(geom, "tracks-per-cylinder",
3631 				   4 * raidPtr->numCol);
3632 
3633 	prop_dictionary_set_uint64(geom, "cylinders-per-unit",
3634 	   raidPtr->totalSectors / (raidPtr->Layout.dataSectorsPerStripe *
3635 	   (4 * raidPtr->numCol)));
3636 
3637 	prop_dictionary_set(disk_info, "geometry", geom);
3638 	prop_object_release(geom);
3639 	prop_dictionary_set(device_properties(rs->sc_dev),
3640 			    "disk-info", disk_info);
3641 	odisk_info = rs->sc_dkdev.dk_info;
3642 	rs->sc_dkdev.dk_info = disk_info;
3643 	if (odisk_info)
3644 		prop_object_release(odisk_info);
3645 }
3646