xref: /netbsd-src/sys/dev/raidframe/rf_netbsdkintf.c (revision b1c86f5f087524e68db12794ee9c3e3da1ab17a0)
1 /*	$NetBSD: rf_netbsdkintf.c,v 1.274 2010/08/08 18:25:14 chs Exp $	*/
2 /*-
3  * Copyright (c) 1996, 1997, 1998, 2008 The NetBSD Foundation, Inc.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to The NetBSD Foundation
7  * by Greg Oster; Jason R. Thorpe.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /*
32  * Copyright (c) 1990, 1993
33  *      The Regents of the University of California.  All rights reserved.
34  *
35  * This code is derived from software contributed to Berkeley by
36  * the Systems Programming Group of the University of Utah Computer
37  * Science Department.
38  *
39  * Redistribution and use in source and binary forms, with or without
40  * modification, are permitted provided that the following conditions
41  * are met:
42  * 1. Redistributions of source code must retain the above copyright
43  *    notice, this list of conditions and the following disclaimer.
44  * 2. Redistributions in binary form must reproduce the above copyright
45  *    notice, this list of conditions and the following disclaimer in the
46  *    documentation and/or other materials provided with the distribution.
47  * 3. Neither the name of the University nor the names of its contributors
48  *    may be used to endorse or promote products derived from this software
49  *    without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61  * SUCH DAMAGE.
62  *
63  * from: Utah $Hdr: cd.c 1.6 90/11/28$
64  *
65  *      @(#)cd.c        8.2 (Berkeley) 11/16/93
66  */
67 
68 /*
69  * Copyright (c) 1988 University of Utah.
70  *
71  * This code is derived from software contributed to Berkeley by
72  * the Systems Programming Group of the University of Utah Computer
73  * Science Department.
74  *
75  * Redistribution and use in source and binary forms, with or without
76  * modification, are permitted provided that the following conditions
77  * are met:
78  * 1. Redistributions of source code must retain the above copyright
79  *    notice, this list of conditions and the following disclaimer.
80  * 2. Redistributions in binary form must reproduce the above copyright
81  *    notice, this list of conditions and the following disclaimer in the
82  *    documentation and/or other materials provided with the distribution.
83  * 3. All advertising materials mentioning features or use of this software
84  *    must display the following acknowledgement:
85  *      This product includes software developed by the University of
86  *      California, Berkeley and its contributors.
87  * 4. Neither the name of the University nor the names of its contributors
88  *    may be used to endorse or promote products derived from this software
89  *    without specific prior written permission.
90  *
91  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
92  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
93  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
94  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
95  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
96  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
97  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
98  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
99  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
100  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
101  * SUCH DAMAGE.
102  *
103  * from: Utah $Hdr: cd.c 1.6 90/11/28$
104  *
105  *      @(#)cd.c        8.2 (Berkeley) 11/16/93
106  */
107 
108 /*
109  * Copyright (c) 1995 Carnegie-Mellon University.
110  * All rights reserved.
111  *
112  * Authors: Mark Holland, Jim Zelenka
113  *
114  * Permission to use, copy, modify and distribute this software and
115  * its documentation is hereby granted, provided that both the copyright
116  * notice and this permission notice appear in all copies of the
117  * software, derivative works or modified versions, and any portions
118  * thereof, and that both notices appear in supporting documentation.
119  *
120  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
121  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
122  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
123  *
124  * Carnegie Mellon requests users of this software to return to
125  *
126  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
127  *  School of Computer Science
128  *  Carnegie Mellon University
129  *  Pittsburgh PA 15213-3890
130  *
131  * any improvements or extensions that they make and grant Carnegie the
132  * rights to redistribute these changes.
133  */
134 
135 /***********************************************************
136  *
137  * rf_kintf.c -- the kernel interface routines for RAIDframe
138  *
139  ***********************************************************/
140 
141 #include <sys/cdefs.h>
142 __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.274 2010/08/08 18:25:14 chs Exp $");
143 
144 #ifdef _KERNEL_OPT
145 #include "opt_compat_netbsd.h"
146 #include "opt_raid_autoconfig.h"
147 #include "raid.h"
148 #endif
149 
150 #include <sys/param.h>
151 #include <sys/errno.h>
152 #include <sys/pool.h>
153 #include <sys/proc.h>
154 #include <sys/queue.h>
155 #include <sys/disk.h>
156 #include <sys/device.h>
157 #include <sys/stat.h>
158 #include <sys/ioctl.h>
159 #include <sys/fcntl.h>
160 #include <sys/systm.h>
161 #include <sys/vnode.h>
162 #include <sys/disklabel.h>
163 #include <sys/conf.h>
164 #include <sys/buf.h>
165 #include <sys/bufq.h>
166 #include <sys/reboot.h>
167 #include <sys/kauth.h>
168 
169 #include <prop/proplib.h>
170 
171 #include <dev/raidframe/raidframevar.h>
172 #include <dev/raidframe/raidframeio.h>
173 #include <dev/raidframe/rf_paritymap.h>
174 
175 #include "rf_raid.h"
176 #include "rf_copyback.h"
177 #include "rf_dag.h"
178 #include "rf_dagflags.h"
179 #include "rf_desc.h"
180 #include "rf_diskqueue.h"
181 #include "rf_etimer.h"
182 #include "rf_general.h"
183 #include "rf_kintf.h"
184 #include "rf_options.h"
185 #include "rf_driver.h"
186 #include "rf_parityscan.h"
187 #include "rf_threadstuff.h"
188 
189 #ifdef COMPAT_50
190 #include "rf_compat50.h"
191 #endif
192 
193 #ifdef DEBUG
194 int     rf_kdebug_level = 0;
195 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
196 #else				/* DEBUG */
197 #define db1_printf(a) { }
198 #endif				/* DEBUG */
199 
200 static RF_Raid_t **raidPtrs;	/* global raid device descriptors */
201 
202 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
203 RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
204 
205 static RF_SparetWait_t *rf_sparet_wait_queue;	/* requests to install a
206 						 * spare table */
207 static RF_SparetWait_t *rf_sparet_resp_queue;	/* responses from
208 						 * installation process */
209 #endif
210 
211 MALLOC_DEFINE(M_RAIDFRAME, "RAIDframe", "RAIDframe structures");
212 
213 /* prototypes */
214 static void KernelWakeupFunc(struct buf *);
215 static void InitBP(struct buf *, struct vnode *, unsigned,
216     dev_t, RF_SectorNum_t, RF_SectorCount_t, void *, void (*) (struct buf *),
217     void *, int, struct proc *);
218 static void raidinit(RF_Raid_t *);
219 
220 void raidattach(int);
221 static int raid_match(device_t, cfdata_t, void *);
222 static void raid_attach(device_t, device_t, void *);
223 static int raid_detach(device_t, int);
224 
225 static int raidread_component_area(dev_t, struct vnode *, void *, size_t,
226     daddr_t, daddr_t);
227 static int raidwrite_component_area(dev_t, struct vnode *, void *, size_t,
228     daddr_t, daddr_t, int);
229 
230 static int raidwrite_component_label(dev_t, struct vnode *,
231     RF_ComponentLabel_t *);
232 static int raidread_component_label(dev_t, struct vnode *,
233     RF_ComponentLabel_t *);
234 
235 
236 dev_type_open(raidopen);
237 dev_type_close(raidclose);
238 dev_type_read(raidread);
239 dev_type_write(raidwrite);
240 dev_type_ioctl(raidioctl);
241 dev_type_strategy(raidstrategy);
242 dev_type_dump(raiddump);
243 dev_type_size(raidsize);
244 
245 const struct bdevsw raid_bdevsw = {
246 	raidopen, raidclose, raidstrategy, raidioctl,
247 	raiddump, raidsize, D_DISK
248 };
249 
250 const struct cdevsw raid_cdevsw = {
251 	raidopen, raidclose, raidread, raidwrite, raidioctl,
252 	nostop, notty, nopoll, nommap, nokqfilter, D_DISK
253 };
254 
255 static struct dkdriver rf_dkdriver = { raidstrategy, minphys };
256 
257 /* XXX Not sure if the following should be replacing the raidPtrs above,
258    or if it should be used in conjunction with that...
259 */
260 
261 struct raid_softc {
262 	device_t sc_dev;
263 	int     sc_flags;	/* flags */
264 	int     sc_cflags;	/* configuration flags */
265 	uint64_t sc_size;	/* size of the raid device */
266 	char    sc_xname[20];	/* XXX external name */
267 	struct disk sc_dkdev;	/* generic disk device info */
268 	struct bufq_state *buf_queue;	/* used for the device queue */
269 };
270 /* sc_flags */
271 #define RAIDF_INITED	0x01	/* unit has been initialized */
272 #define RAIDF_WLABEL	0x02	/* label area is writable */
273 #define RAIDF_LABELLING	0x04	/* unit is currently being labelled */
274 #define RAIDF_SHUTDOWN	0x08	/* unit is being shutdown */
275 #define RAIDF_WANTED	0x40	/* someone is waiting to obtain a lock */
276 #define RAIDF_LOCKED	0x80	/* unit is locked */
277 
278 #define	raidunit(x)	DISKUNIT(x)
279 int numraid = 0;
280 
281 extern struct cfdriver raid_cd;
282 CFATTACH_DECL3_NEW(raid, sizeof(struct raid_softc),
283     raid_match, raid_attach, raid_detach, NULL, NULL, NULL,
284     DVF_DETACH_SHUTDOWN);
285 
286 /*
287  * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
288  * Be aware that large numbers can allow the driver to consume a lot of
289  * kernel memory, especially on writes, and in degraded mode reads.
290  *
291  * For example: with a stripe width of 64 blocks (32k) and 5 disks,
292  * a single 64K write will typically require 64K for the old data,
293  * 64K for the old parity, and 64K for the new parity, for a total
294  * of 192K (if the parity buffer is not re-used immediately).
295  * Even it if is used immediately, that's still 128K, which when multiplied
296  * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
297  *
298  * Now in degraded mode, for example, a 64K read on the above setup may
299  * require data reconstruction, which will require *all* of the 4 remaining
300  * disks to participate -- 4 * 32K/disk == 128K again.
301  */
302 
303 #ifndef RAIDOUTSTANDING
304 #define RAIDOUTSTANDING   6
305 #endif
306 
307 #define RAIDLABELDEV(dev)	\
308 	(MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
309 
310 /* declared here, and made public, for the benefit of KVM stuff.. */
311 struct raid_softc *raid_softc;
312 
313 static void raidgetdefaultlabel(RF_Raid_t *, struct raid_softc *,
314 				     struct disklabel *);
315 static void raidgetdisklabel(dev_t);
316 static void raidmakedisklabel(struct raid_softc *);
317 
318 static int raidlock(struct raid_softc *);
319 static void raidunlock(struct raid_softc *);
320 
321 static int raid_detach_unlocked(struct raid_softc *);
322 
323 static void rf_markalldirty(RF_Raid_t *);
324 static void rf_set_properties(struct raid_softc *, RF_Raid_t *);
325 
326 void rf_ReconThread(struct rf_recon_req *);
327 void rf_RewriteParityThread(RF_Raid_t *raidPtr);
328 void rf_CopybackThread(RF_Raid_t *raidPtr);
329 void rf_ReconstructInPlaceThread(struct rf_recon_req *);
330 int rf_autoconfig(device_t);
331 void rf_buildroothack(RF_ConfigSet_t *);
332 
333 RF_AutoConfig_t *rf_find_raid_components(void);
334 RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
335 static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
336 static int rf_reasonable_label(RF_ComponentLabel_t *);
337 void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
338 int rf_set_autoconfig(RF_Raid_t *, int);
339 int rf_set_rootpartition(RF_Raid_t *, int);
340 void rf_release_all_vps(RF_ConfigSet_t *);
341 void rf_cleanup_config_set(RF_ConfigSet_t *);
342 int rf_have_enough_components(RF_ConfigSet_t *);
343 int rf_auto_config_set(RF_ConfigSet_t *, int *);
344 
345 static int raidautoconfig = 0; /* Debugging, mostly.  Set to 0 to not
346 				  allow autoconfig to take place.
347 				  Note that this is overridden by having
348 				  RAID_AUTOCONFIG as an option in the
349 				  kernel config file.  */
350 
351 struct RF_Pools_s rf_pools;
352 
353 void
354 raidattach(int num)
355 {
356 	int raidID;
357 	int i, rc;
358 
359 	aprint_debug("raidattach: Asked for %d units\n", num);
360 
361 	if (num <= 0) {
362 #ifdef DIAGNOSTIC
363 		panic("raidattach: count <= 0");
364 #endif
365 		return;
366 	}
367 	/* This is where all the initialization stuff gets done. */
368 
369 	numraid = num;
370 
371 	/* Make some space for requested number of units... */
372 
373 	RF_Malloc(raidPtrs, num * sizeof(RF_Raid_t *), (RF_Raid_t **));
374 	if (raidPtrs == NULL) {
375 		panic("raidPtrs is NULL!!");
376 	}
377 
378 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
379 	rf_mutex_init(&rf_sparet_wait_mutex);
380 
381 	rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
382 #endif
383 
384 	for (i = 0; i < num; i++)
385 		raidPtrs[i] = NULL;
386 	rc = rf_BootRaidframe();
387 	if (rc == 0)
388 		aprint_verbose("Kernelized RAIDframe activated\n");
389 	else
390 		panic("Serious error booting RAID!!");
391 
392 	/* put together some datastructures like the CCD device does.. This
393 	 * lets us lock the device and what-not when it gets opened. */
394 
395 	raid_softc = (struct raid_softc *)
396 		malloc(num * sizeof(struct raid_softc),
397 		       M_RAIDFRAME, M_NOWAIT);
398 	if (raid_softc == NULL) {
399 		aprint_error("WARNING: no memory for RAIDframe driver\n");
400 		return;
401 	}
402 
403 	memset(raid_softc, 0, num * sizeof(struct raid_softc));
404 
405 	for (raidID = 0; raidID < num; raidID++) {
406 		bufq_alloc(&raid_softc[raidID].buf_queue, "fcfs", 0);
407 
408 		RF_Malloc(raidPtrs[raidID], sizeof(RF_Raid_t),
409 			  (RF_Raid_t *));
410 		if (raidPtrs[raidID] == NULL) {
411 			aprint_error("WARNING: raidPtrs[%d] is NULL\n", raidID);
412 			numraid = raidID;
413 			return;
414 		}
415 	}
416 
417 	if (config_cfattach_attach(raid_cd.cd_name, &raid_ca)) {
418 		aprint_error("raidattach: config_cfattach_attach failed?\n");
419 	}
420 
421 #ifdef RAID_AUTOCONFIG
422 	raidautoconfig = 1;
423 #endif
424 
425 	/*
426 	 * Register a finalizer which will be used to auto-config RAID
427 	 * sets once all real hardware devices have been found.
428 	 */
429 	if (config_finalize_register(NULL, rf_autoconfig) != 0)
430 		aprint_error("WARNING: unable to register RAIDframe finalizer\n");
431 }
432 
433 int
434 rf_autoconfig(device_t self)
435 {
436 	RF_AutoConfig_t *ac_list;
437 	RF_ConfigSet_t *config_sets;
438 
439 	if (raidautoconfig == 0)
440 		return (0);
441 
442 	/* XXX This code can only be run once. */
443 	raidautoconfig = 0;
444 
445 	/* 1. locate all RAID components on the system */
446 	aprint_debug("Searching for RAID components...\n");
447 	ac_list = rf_find_raid_components();
448 
449 	/* 2. Sort them into their respective sets. */
450 	config_sets = rf_create_auto_sets(ac_list);
451 
452 	/*
453 	 * 3. Evaluate each set andconfigure the valid ones.
454 	 * This gets done in rf_buildroothack().
455 	 */
456 	rf_buildroothack(config_sets);
457 
458 	return 1;
459 }
460 
461 void
462 rf_buildroothack(RF_ConfigSet_t *config_sets)
463 {
464 	RF_ConfigSet_t *cset;
465 	RF_ConfigSet_t *next_cset;
466 	int retcode;
467 	int raidID;
468 	int rootID;
469 	int col;
470 	int num_root;
471 	char *devname;
472 
473 	rootID = 0;
474 	num_root = 0;
475 	cset = config_sets;
476 	while (cset != NULL) {
477 		next_cset = cset->next;
478 		if (rf_have_enough_components(cset) &&
479 		    cset->ac->clabel->autoconfigure==1) {
480 			retcode = rf_auto_config_set(cset,&raidID);
481 			if (!retcode) {
482 				aprint_debug("raid%d: configured ok\n", raidID);
483 				if (cset->rootable) {
484 					rootID = raidID;
485 					num_root++;
486 				}
487 			} else {
488 				/* The autoconfig didn't work :( */
489 				aprint_debug("Autoconfig failed with code %d for raid%d\n", retcode, raidID);
490 				rf_release_all_vps(cset);
491 			}
492 		} else {
493 			/* we're not autoconfiguring this set...
494 			   release the associated resources */
495 			rf_release_all_vps(cset);
496 		}
497 		/* cleanup */
498 		rf_cleanup_config_set(cset);
499 		cset = next_cset;
500 	}
501 
502 	/* if the user has specified what the root device should be
503 	   then we don't touch booted_device or boothowto... */
504 
505 	if (rootspec != NULL)
506 		return;
507 
508 	/* we found something bootable... */
509 
510 	if (num_root == 1) {
511 		booted_device = raid_softc[rootID].sc_dev;
512 	} else if (num_root > 1) {
513 
514 		/*
515 		 * Maybe the MD code can help. If it cannot, then
516 		 * setroot() will discover that we have no
517 		 * booted_device and will ask the user if nothing was
518 		 * hardwired in the kernel config file
519 		 */
520 
521 		if (booted_device == NULL)
522 			cpu_rootconf();
523 		if (booted_device == NULL)
524 			return;
525 
526 		num_root = 0;
527 		for (raidID = 0; raidID < numraid; raidID++) {
528 			if (raidPtrs[raidID]->valid == 0)
529 				continue;
530 
531 			if (raidPtrs[raidID]->root_partition == 0)
532 				continue;
533 
534 			for (col = 0; col < raidPtrs[raidID]->numCol; col++) {
535 				devname = raidPtrs[raidID]->Disks[col].devname;
536 				devname += sizeof("/dev/") - 1;
537 				if (strncmp(devname, device_xname(booted_device),
538 					    strlen(device_xname(booted_device))) != 0)
539 					continue;
540 				aprint_debug("raid%d includes boot device %s\n",
541 				       raidID, devname);
542 				num_root++;
543 				rootID = raidID;
544 			}
545 		}
546 
547 		if (num_root == 1) {
548 			booted_device = raid_softc[rootID].sc_dev;
549 		} else {
550 			/* we can't guess.. require the user to answer... */
551 			boothowto |= RB_ASKNAME;
552 		}
553 	}
554 }
555 
556 
557 int
558 raidsize(dev_t dev)
559 {
560 	struct raid_softc *rs;
561 	struct disklabel *lp;
562 	int     part, unit, omask, size;
563 
564 	unit = raidunit(dev);
565 	if (unit >= numraid)
566 		return (-1);
567 	rs = &raid_softc[unit];
568 
569 	if ((rs->sc_flags & RAIDF_INITED) == 0)
570 		return (-1);
571 
572 	part = DISKPART(dev);
573 	omask = rs->sc_dkdev.dk_openmask & (1 << part);
574 	lp = rs->sc_dkdev.dk_label;
575 
576 	if (omask == 0 && raidopen(dev, 0, S_IFBLK, curlwp))
577 		return (-1);
578 
579 	if (lp->d_partitions[part].p_fstype != FS_SWAP)
580 		size = -1;
581 	else
582 		size = lp->d_partitions[part].p_size *
583 		    (lp->d_secsize / DEV_BSIZE);
584 
585 	if (omask == 0 && raidclose(dev, 0, S_IFBLK, curlwp))
586 		return (-1);
587 
588 	return (size);
589 
590 }
591 
592 int
593 raiddump(dev_t dev, daddr_t blkno, void *va, size_t size)
594 {
595 	int     unit = raidunit(dev);
596 	struct raid_softc *rs;
597 	const struct bdevsw *bdev;
598 	struct disklabel *lp;
599 	RF_Raid_t *raidPtr;
600 	daddr_t offset;
601 	int     part, c, sparecol, j, scol, dumpto;
602 	int     error = 0;
603 
604 	if (unit >= numraid)
605 		return (ENXIO);
606 
607 	rs = &raid_softc[unit];
608 	raidPtr = raidPtrs[unit];
609 
610 	if ((rs->sc_flags & RAIDF_INITED) == 0)
611 		return ENXIO;
612 
613 	/* we only support dumping to RAID 1 sets */
614 	if (raidPtr->Layout.numDataCol != 1 ||
615 	    raidPtr->Layout.numParityCol != 1)
616 		return EINVAL;
617 
618 
619 	if ((error = raidlock(rs)) != 0)
620 		return error;
621 
622 	if (size % DEV_BSIZE != 0) {
623 		error = EINVAL;
624 		goto out;
625 	}
626 
627 	if (blkno + size / DEV_BSIZE > rs->sc_size) {
628 		printf("%s: blkno (%" PRIu64 ") + size / DEV_BSIZE (%zu) > "
629 		    "sc->sc_size (%" PRIu64 ")\n", __func__, blkno,
630 		    size / DEV_BSIZE, rs->sc_size);
631 		error = EINVAL;
632 		goto out;
633 	}
634 
635 	part = DISKPART(dev);
636 	lp = rs->sc_dkdev.dk_label;
637 	offset = lp->d_partitions[part].p_offset + RF_PROTECTED_SECTORS;
638 
639 	/* figure out what device is alive.. */
640 
641 	/*
642 	   Look for a component to dump to.  The preference for the
643 	   component to dump to is as follows:
644 	   1) the master
645 	   2) a used_spare of the master
646 	   3) the slave
647 	   4) a used_spare of the slave
648 	*/
649 
650 	dumpto = -1;
651 	for (c = 0; c < raidPtr->numCol; c++) {
652 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
653 			/* this might be the one */
654 			dumpto = c;
655 			break;
656 		}
657 	}
658 
659 	/*
660 	   At this point we have possibly selected a live master or a
661 	   live slave.  We now check to see if there is a spared
662 	   master (or a spared slave), if we didn't find a live master
663 	   or a live slave.
664 	*/
665 
666 	for (c = 0; c < raidPtr->numSpare; c++) {
667 		sparecol = raidPtr->numCol + c;
668 		if (raidPtr->Disks[sparecol].status ==  rf_ds_used_spare) {
669 			/* How about this one? */
670 			scol = -1;
671 			for(j=0;j<raidPtr->numCol;j++) {
672 				if (raidPtr->Disks[j].spareCol == sparecol) {
673 					scol = j;
674 					break;
675 				}
676 			}
677 			if (scol == 0) {
678 				/*
679 				   We must have found a spared master!
680 				   We'll take that over anything else
681 				   found so far.  (We couldn't have
682 				   found a real master before, since
683 				   this is a used spare, and it's
684 				   saying that it's replacing the
685 				   master.)  On reboot (with
686 				   autoconfiguration turned on)
687 				   sparecol will become the 1st
688 				   component (component0) of this set.
689 				*/
690 				dumpto = sparecol;
691 				break;
692 			} else if (scol != -1) {
693 				/*
694 				   Must be a spared slave.  We'll dump
695 				   to that if we havn't found anything
696 				   else so far.
697 				*/
698 				if (dumpto == -1)
699 					dumpto = sparecol;
700 			}
701 		}
702 	}
703 
704 	if (dumpto == -1) {
705 		/* we couldn't find any live components to dump to!?!?
706 		 */
707 		error = EINVAL;
708 		goto out;
709 	}
710 
711 	bdev = bdevsw_lookup(raidPtr->Disks[dumpto].dev);
712 
713 	/*
714 	   Note that blkno is relative to this particular partition.
715 	   By adding the offset of this partition in the RAID
716 	   set, and also adding RF_PROTECTED_SECTORS, we get a
717 	   value that is relative to the partition used for the
718 	   underlying component.
719 	*/
720 
721 	error = (*bdev->d_dump)(raidPtr->Disks[dumpto].dev,
722 				blkno + offset, va, size);
723 
724 out:
725 	raidunlock(rs);
726 
727 	return error;
728 }
729 /* ARGSUSED */
730 int
731 raidopen(dev_t dev, int flags, int fmt,
732     struct lwp *l)
733 {
734 	int     unit = raidunit(dev);
735 	struct raid_softc *rs;
736 	struct disklabel *lp;
737 	int     part, pmask;
738 	int     error = 0;
739 
740 	if (unit >= numraid)
741 		return (ENXIO);
742 	rs = &raid_softc[unit];
743 
744 	if ((error = raidlock(rs)) != 0)
745 		return (error);
746 
747 	if ((rs->sc_flags & RAIDF_SHUTDOWN) != 0) {
748 		error = EBUSY;
749 		goto bad;
750 	}
751 
752 	lp = rs->sc_dkdev.dk_label;
753 
754 	part = DISKPART(dev);
755 
756 	/*
757 	 * If there are wedges, and this is not RAW_PART, then we
758 	 * need to fail.
759 	 */
760 	if (rs->sc_dkdev.dk_nwedges != 0 && part != RAW_PART) {
761 		error = EBUSY;
762 		goto bad;
763 	}
764 	pmask = (1 << part);
765 
766 	if ((rs->sc_flags & RAIDF_INITED) &&
767 	    (rs->sc_dkdev.dk_openmask == 0))
768 		raidgetdisklabel(dev);
769 
770 	/* make sure that this partition exists */
771 
772 	if (part != RAW_PART) {
773 		if (((rs->sc_flags & RAIDF_INITED) == 0) ||
774 		    ((part >= lp->d_npartitions) ||
775 			(lp->d_partitions[part].p_fstype == FS_UNUSED))) {
776 			error = ENXIO;
777 			goto bad;
778 		}
779 	}
780 	/* Prevent this unit from being unconfigured while open. */
781 	switch (fmt) {
782 	case S_IFCHR:
783 		rs->sc_dkdev.dk_copenmask |= pmask;
784 		break;
785 
786 	case S_IFBLK:
787 		rs->sc_dkdev.dk_bopenmask |= pmask;
788 		break;
789 	}
790 
791 	if ((rs->sc_dkdev.dk_openmask == 0) &&
792 	    ((rs->sc_flags & RAIDF_INITED) != 0)) {
793 		/* First one... mark things as dirty... Note that we *MUST*
794 		 have done a configure before this.  I DO NOT WANT TO BE
795 		 SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
796 		 THAT THEY BELONG TOGETHER!!!!! */
797 		/* XXX should check to see if we're only open for reading
798 		   here... If so, we needn't do this, but then need some
799 		   other way of keeping track of what's happened.. */
800 
801 		rf_markalldirty(raidPtrs[unit]);
802 	}
803 
804 
805 	rs->sc_dkdev.dk_openmask =
806 	    rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
807 
808 bad:
809 	raidunlock(rs);
810 
811 	return (error);
812 
813 
814 }
815 /* ARGSUSED */
816 int
817 raidclose(dev_t dev, int flags, int fmt, struct lwp *l)
818 {
819 	int     unit = raidunit(dev);
820 	struct raid_softc *rs;
821 	int     error = 0;
822 	int     part;
823 
824 	if (unit >= numraid)
825 		return (ENXIO);
826 	rs = &raid_softc[unit];
827 
828 	if ((error = raidlock(rs)) != 0)
829 		return (error);
830 
831 	part = DISKPART(dev);
832 
833 	/* ...that much closer to allowing unconfiguration... */
834 	switch (fmt) {
835 	case S_IFCHR:
836 		rs->sc_dkdev.dk_copenmask &= ~(1 << part);
837 		break;
838 
839 	case S_IFBLK:
840 		rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
841 		break;
842 	}
843 	rs->sc_dkdev.dk_openmask =
844 	    rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
845 
846 	if ((rs->sc_dkdev.dk_openmask == 0) &&
847 	    ((rs->sc_flags & RAIDF_INITED) != 0)) {
848 		/* Last one... device is not unconfigured yet.
849 		   Device shutdown has taken care of setting the
850 		   clean bits if RAIDF_INITED is not set
851 		   mark things as clean... */
852 
853 		rf_update_component_labels(raidPtrs[unit],
854 						 RF_FINAL_COMPONENT_UPDATE);
855 
856 		/* If the kernel is shutting down, it will detach
857 		 * this RAID set soon enough.
858 		 */
859 	}
860 
861 	raidunlock(rs);
862 	return (0);
863 
864 }
865 
866 void
867 raidstrategy(struct buf *bp)
868 {
869 	int s;
870 
871 	unsigned int raidID = raidunit(bp->b_dev);
872 	RF_Raid_t *raidPtr;
873 	struct raid_softc *rs = &raid_softc[raidID];
874 	int     wlabel;
875 
876 	if ((rs->sc_flags & RAIDF_INITED) ==0) {
877 		bp->b_error = ENXIO;
878 		goto done;
879 	}
880 	if (raidID >= numraid || !raidPtrs[raidID]) {
881 		bp->b_error = ENODEV;
882 		goto done;
883 	}
884 	raidPtr = raidPtrs[raidID];
885 	if (!raidPtr->valid) {
886 		bp->b_error = ENODEV;
887 		goto done;
888 	}
889 	if (bp->b_bcount == 0) {
890 		db1_printf(("b_bcount is zero..\n"));
891 		goto done;
892 	}
893 
894 	/*
895 	 * Do bounds checking and adjust transfer.  If there's an
896 	 * error, the bounds check will flag that for us.
897 	 */
898 
899 	wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
900 	if (DISKPART(bp->b_dev) == RAW_PART) {
901 		uint64_t size; /* device size in DEV_BSIZE unit */
902 
903 		if (raidPtr->logBytesPerSector > DEV_BSHIFT) {
904 			size = raidPtr->totalSectors <<
905 			    (raidPtr->logBytesPerSector - DEV_BSHIFT);
906 		} else {
907 			size = raidPtr->totalSectors >>
908 			    (DEV_BSHIFT - raidPtr->logBytesPerSector);
909 		}
910 		if (bounds_check_with_mediasize(bp, DEV_BSIZE, size) <= 0) {
911 			goto done;
912 		}
913 	} else {
914 		if (bounds_check_with_label(&rs->sc_dkdev, bp, wlabel) <= 0) {
915 			db1_printf(("Bounds check failed!!:%d %d\n",
916 				(int) bp->b_blkno, (int) wlabel));
917 			goto done;
918 		}
919 	}
920 	s = splbio();
921 
922 	bp->b_resid = 0;
923 
924 	/* stuff it onto our queue */
925 	bufq_put(rs->buf_queue, bp);
926 
927 	/* scheduled the IO to happen at the next convenient time */
928 	wakeup(&(raidPtrs[raidID]->iodone));
929 
930 	splx(s);
931 	return;
932 
933 done:
934 	bp->b_resid = bp->b_bcount;
935 	biodone(bp);
936 }
937 /* ARGSUSED */
938 int
939 raidread(dev_t dev, struct uio *uio, int flags)
940 {
941 	int     unit = raidunit(dev);
942 	struct raid_softc *rs;
943 
944 	if (unit >= numraid)
945 		return (ENXIO);
946 	rs = &raid_softc[unit];
947 
948 	if ((rs->sc_flags & RAIDF_INITED) == 0)
949 		return (ENXIO);
950 
951 	return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
952 
953 }
954 /* ARGSUSED */
955 int
956 raidwrite(dev_t dev, struct uio *uio, int flags)
957 {
958 	int     unit = raidunit(dev);
959 	struct raid_softc *rs;
960 
961 	if (unit >= numraid)
962 		return (ENXIO);
963 	rs = &raid_softc[unit];
964 
965 	if ((rs->sc_flags & RAIDF_INITED) == 0)
966 		return (ENXIO);
967 
968 	return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
969 
970 }
971 
972 static int
973 raid_detach_unlocked(struct raid_softc *rs)
974 {
975 	int error;
976 	RF_Raid_t *raidPtr;
977 
978 	raidPtr = raidPtrs[device_unit(rs->sc_dev)];
979 
980 	/*
981 	 * If somebody has a partition mounted, we shouldn't
982 	 * shutdown.
983 	 */
984 	if (rs->sc_dkdev.dk_openmask != 0)
985 		return EBUSY;
986 
987 	if ((rs->sc_flags & RAIDF_INITED) == 0)
988 		;	/* not initialized: nothing to do */
989 	else if ((error = rf_Shutdown(raidPtr)) != 0)
990 		return error;
991 	else
992 		rs->sc_flags &= ~(RAIDF_INITED|RAIDF_SHUTDOWN);
993 
994 	/* Detach the disk. */
995 	disk_detach(&rs->sc_dkdev);
996 	disk_destroy(&rs->sc_dkdev);
997 
998 	return 0;
999 }
1000 
1001 int
1002 raidioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1003 {
1004 	int     unit = raidunit(dev);
1005 	int     error = 0;
1006 	int     part, pmask;
1007 	cfdata_t cf;
1008 	struct raid_softc *rs;
1009 	RF_Config_t *k_cfg, *u_cfg;
1010 	RF_Raid_t *raidPtr;
1011 	RF_RaidDisk_t *diskPtr;
1012 	RF_AccTotals_t *totals;
1013 	RF_DeviceConfig_t *d_cfg, **ucfgp;
1014 	u_char *specific_buf;
1015 	int retcode = 0;
1016 	int column;
1017 /*	int raidid; */
1018 	struct rf_recon_req *rrcopy, *rr;
1019 	RF_ComponentLabel_t *clabel;
1020 	RF_ComponentLabel_t *ci_label;
1021 	RF_ComponentLabel_t **clabel_ptr;
1022 	RF_SingleComponent_t *sparePtr,*componentPtr;
1023 	RF_SingleComponent_t component;
1024 	RF_ProgressInfo_t progressInfo, **progressInfoPtr;
1025 	int i, j, d;
1026 #ifdef __HAVE_OLD_DISKLABEL
1027 	struct disklabel newlabel;
1028 #endif
1029 	struct dkwedge_info *dkw;
1030 
1031 	if (unit >= numraid)
1032 		return (ENXIO);
1033 	rs = &raid_softc[unit];
1034 	raidPtr = raidPtrs[unit];
1035 
1036 	db1_printf(("raidioctl: %d %d %d %d\n", (int) dev,
1037 		(int) DISKPART(dev), (int) unit, (int) cmd));
1038 
1039 	/* Must be open for writes for these commands... */
1040 	switch (cmd) {
1041 #ifdef DIOCGSECTORSIZE
1042 	case DIOCGSECTORSIZE:
1043 		*(u_int *)data = raidPtr->bytesPerSector;
1044 		return 0;
1045 	case DIOCGMEDIASIZE:
1046 		*(off_t *)data =
1047 		    (off_t)raidPtr->totalSectors * raidPtr->bytesPerSector;
1048 		return 0;
1049 #endif
1050 	case DIOCSDINFO:
1051 	case DIOCWDINFO:
1052 #ifdef __HAVE_OLD_DISKLABEL
1053 	case ODIOCWDINFO:
1054 	case ODIOCSDINFO:
1055 #endif
1056 	case DIOCWLABEL:
1057 	case DIOCAWEDGE:
1058 	case DIOCDWEDGE:
1059 		if ((flag & FWRITE) == 0)
1060 			return (EBADF);
1061 	}
1062 
1063 	/* Must be initialized for these... */
1064 	switch (cmd) {
1065 	case DIOCGDINFO:
1066 	case DIOCSDINFO:
1067 	case DIOCWDINFO:
1068 #ifdef __HAVE_OLD_DISKLABEL
1069 	case ODIOCGDINFO:
1070 	case ODIOCWDINFO:
1071 	case ODIOCSDINFO:
1072 	case ODIOCGDEFLABEL:
1073 #endif
1074 	case DIOCGPART:
1075 	case DIOCWLABEL:
1076 	case DIOCGDEFLABEL:
1077 	case DIOCAWEDGE:
1078 	case DIOCDWEDGE:
1079 	case DIOCLWEDGES:
1080 	case DIOCCACHESYNC:
1081 	case RAIDFRAME_SHUTDOWN:
1082 	case RAIDFRAME_REWRITEPARITY:
1083 	case RAIDFRAME_GET_INFO:
1084 	case RAIDFRAME_RESET_ACCTOTALS:
1085 	case RAIDFRAME_GET_ACCTOTALS:
1086 	case RAIDFRAME_KEEP_ACCTOTALS:
1087 	case RAIDFRAME_GET_SIZE:
1088 	case RAIDFRAME_FAIL_DISK:
1089 	case RAIDFRAME_COPYBACK:
1090 	case RAIDFRAME_CHECK_RECON_STATUS:
1091 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
1092 	case RAIDFRAME_GET_COMPONENT_LABEL:
1093 	case RAIDFRAME_SET_COMPONENT_LABEL:
1094 	case RAIDFRAME_ADD_HOT_SPARE:
1095 	case RAIDFRAME_REMOVE_HOT_SPARE:
1096 	case RAIDFRAME_INIT_LABELS:
1097 	case RAIDFRAME_REBUILD_IN_PLACE:
1098 	case RAIDFRAME_CHECK_PARITY:
1099 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1100 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
1101 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
1102 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
1103 	case RAIDFRAME_SET_AUTOCONFIG:
1104 	case RAIDFRAME_SET_ROOT:
1105 	case RAIDFRAME_DELETE_COMPONENT:
1106 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
1107 	case RAIDFRAME_PARITYMAP_STATUS:
1108 	case RAIDFRAME_PARITYMAP_GET_DISABLE:
1109 	case RAIDFRAME_PARITYMAP_SET_DISABLE:
1110 	case RAIDFRAME_PARITYMAP_SET_PARAMS:
1111 		if ((rs->sc_flags & RAIDF_INITED) == 0)
1112 			return (ENXIO);
1113 	}
1114 
1115 	switch (cmd) {
1116 #ifdef COMPAT_50
1117 	case RAIDFRAME_GET_INFO50:
1118 		return rf_get_info50(raidPtr, data);
1119 
1120 	case RAIDFRAME_CONFIGURE50:
1121 		if ((retcode = rf_config50(raidPtr, unit, data, &k_cfg)) != 0)
1122 			return retcode;
1123 		goto config;
1124 #endif
1125 		/* configure the system */
1126 	case RAIDFRAME_CONFIGURE:
1127 
1128 		if (raidPtr->valid) {
1129 			/* There is a valid RAID set running on this unit! */
1130 			printf("raid%d: Device already configured!\n",unit);
1131 			return(EINVAL);
1132 		}
1133 
1134 		/* copy-in the configuration information */
1135 		/* data points to a pointer to the configuration structure */
1136 
1137 		u_cfg = *((RF_Config_t **) data);
1138 		RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
1139 		if (k_cfg == NULL) {
1140 			return (ENOMEM);
1141 		}
1142 		retcode = copyin(u_cfg, k_cfg, sizeof(RF_Config_t));
1143 		if (retcode) {
1144 			RF_Free(k_cfg, sizeof(RF_Config_t));
1145 			db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
1146 				retcode));
1147 			return (retcode);
1148 		}
1149 		goto config;
1150 	config:
1151 		/* allocate a buffer for the layout-specific data, and copy it
1152 		 * in */
1153 		if (k_cfg->layoutSpecificSize) {
1154 			if (k_cfg->layoutSpecificSize > 10000) {
1155 				/* sanity check */
1156 				RF_Free(k_cfg, sizeof(RF_Config_t));
1157 				return (EINVAL);
1158 			}
1159 			RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
1160 			    (u_char *));
1161 			if (specific_buf == NULL) {
1162 				RF_Free(k_cfg, sizeof(RF_Config_t));
1163 				return (ENOMEM);
1164 			}
1165 			retcode = copyin(k_cfg->layoutSpecific, specific_buf,
1166 			    k_cfg->layoutSpecificSize);
1167 			if (retcode) {
1168 				RF_Free(k_cfg, sizeof(RF_Config_t));
1169 				RF_Free(specific_buf,
1170 					k_cfg->layoutSpecificSize);
1171 				db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
1172 					retcode));
1173 				return (retcode);
1174 			}
1175 		} else
1176 			specific_buf = NULL;
1177 		k_cfg->layoutSpecific = specific_buf;
1178 
1179 		/* should do some kind of sanity check on the configuration.
1180 		 * Store the sum of all the bytes in the last byte? */
1181 
1182 		/* configure the system */
1183 
1184 		/*
1185 		 * Clear the entire RAID descriptor, just to make sure
1186 		 *  there is no stale data left in the case of a
1187 		 *  reconfiguration
1188 		 */
1189 		memset((char *) raidPtr, 0, sizeof(RF_Raid_t));
1190 		raidPtr->raidid = unit;
1191 
1192 		retcode = rf_Configure(raidPtr, k_cfg, NULL);
1193 
1194 		if (retcode == 0) {
1195 
1196 			/* allow this many simultaneous IO's to
1197 			   this RAID device */
1198 			raidPtr->openings = RAIDOUTSTANDING;
1199 
1200 			raidinit(raidPtr);
1201 			rf_markalldirty(raidPtr);
1202 		}
1203 		/* free the buffers.  No return code here. */
1204 		if (k_cfg->layoutSpecificSize) {
1205 			RF_Free(specific_buf, k_cfg->layoutSpecificSize);
1206 		}
1207 		RF_Free(k_cfg, sizeof(RF_Config_t));
1208 
1209 		return (retcode);
1210 
1211 		/* shutdown the system */
1212 	case RAIDFRAME_SHUTDOWN:
1213 
1214 		part = DISKPART(dev);
1215 		pmask = (1 << part);
1216 
1217 		if ((error = raidlock(rs)) != 0)
1218 			return (error);
1219 
1220 		if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
1221 		    ((rs->sc_dkdev.dk_bopenmask & pmask) &&
1222 			(rs->sc_dkdev.dk_copenmask & pmask)))
1223 			retcode = EBUSY;
1224 		else {
1225 			rs->sc_flags |= RAIDF_SHUTDOWN;
1226 			rs->sc_dkdev.dk_copenmask &= ~pmask;
1227 			rs->sc_dkdev.dk_bopenmask &= ~pmask;
1228 			rs->sc_dkdev.dk_openmask &= ~pmask;
1229 			retcode = 0;
1230 		}
1231 
1232 		raidunlock(rs);
1233 
1234 		if (retcode != 0)
1235 			return retcode;
1236 
1237 		/* free the pseudo device attach bits */
1238 
1239 		cf = device_cfdata(rs->sc_dev);
1240 		if ((retcode = config_detach(rs->sc_dev, DETACH_QUIET)) == 0)
1241 			free(cf, M_RAIDFRAME);
1242 
1243 		return (retcode);
1244 	case RAIDFRAME_GET_COMPONENT_LABEL:
1245 		clabel_ptr = (RF_ComponentLabel_t **) data;
1246 		/* need to read the component label for the disk indicated
1247 		   by row,column in clabel */
1248 
1249 		/*
1250 		 * Perhaps there should be an option to skip the in-core
1251 		 * copy and hit the disk, as with disklabel(8).
1252 		 */
1253 		RF_Malloc(clabel, sizeof(*clabel), (RF_ComponentLabel_t *));
1254 
1255 		retcode = copyin( *clabel_ptr, clabel,
1256 				  sizeof(RF_ComponentLabel_t));
1257 
1258 		if (retcode) {
1259 			return(retcode);
1260 		}
1261 
1262 		clabel->row = 0; /* Don't allow looking at anything else.*/
1263 
1264 		column = clabel->column;
1265 
1266 		if ((column < 0) || (column >= raidPtr->numCol +
1267 				     raidPtr->numSpare)) {
1268 			return(EINVAL);
1269 		}
1270 
1271 		RF_Free(clabel, sizeof(*clabel));
1272 
1273 		clabel = raidget_component_label(raidPtr, column);
1274 
1275 		if (retcode == 0) {
1276 			retcode = copyout(clabel, *clabel_ptr,
1277 					  sizeof(RF_ComponentLabel_t));
1278 		}
1279 		return (retcode);
1280 
1281 #if 0
1282 	case RAIDFRAME_SET_COMPONENT_LABEL:
1283 		clabel = (RF_ComponentLabel_t *) data;
1284 
1285 		/* XXX check the label for valid stuff... */
1286 		/* Note that some things *should not* get modified --
1287 		   the user should be re-initing the labels instead of
1288 		   trying to patch things.
1289 		   */
1290 
1291 		raidid = raidPtr->raidid;
1292 #ifdef DEBUG
1293 		printf("raid%d: Got component label:\n", raidid);
1294 		printf("raid%d: Version: %d\n", raidid, clabel->version);
1295 		printf("raid%d: Serial Number: %d\n", raidid, clabel->serial_number);
1296 		printf("raid%d: Mod counter: %d\n", raidid, clabel->mod_counter);
1297 		printf("raid%d: Column: %d\n", raidid, clabel->column);
1298 		printf("raid%d: Num Columns: %d\n", raidid, clabel->num_columns);
1299 		printf("raid%d: Clean: %d\n", raidid, clabel->clean);
1300 		printf("raid%d: Status: %d\n", raidid, clabel->status);
1301 #endif
1302 		clabel->row = 0;
1303 		column = clabel->column;
1304 
1305 		if ((column < 0) || (column >= raidPtr->numCol)) {
1306 			return(EINVAL);
1307 		}
1308 
1309 		/* XXX this isn't allowed to do anything for now :-) */
1310 
1311 		/* XXX and before it is, we need to fill in the rest
1312 		   of the fields!?!?!?! */
1313 		memcpy(raidget_component_label(raidPtr, column),
1314 		    clabel, sizeof(*clabel));
1315 		raidflush_component_label(raidPtr, column);
1316 		return (0);
1317 #endif
1318 
1319 	case RAIDFRAME_INIT_LABELS:
1320 		clabel = (RF_ComponentLabel_t *) data;
1321 		/*
1322 		   we only want the serial number from
1323 		   the above.  We get all the rest of the information
1324 		   from the config that was used to create this RAID
1325 		   set.
1326 		   */
1327 
1328 		raidPtr->serial_number = clabel->serial_number;
1329 
1330 		for(column=0;column<raidPtr->numCol;column++) {
1331 			diskPtr = &raidPtr->Disks[column];
1332 			if (!RF_DEAD_DISK(diskPtr->status)) {
1333 				ci_label = raidget_component_label(raidPtr,
1334 				    column);
1335 				/* Zeroing this is important. */
1336 				memset(ci_label, 0, sizeof(*ci_label));
1337 				raid_init_component_label(raidPtr, ci_label);
1338 				ci_label->serial_number =
1339 				    raidPtr->serial_number;
1340 				ci_label->row = 0; /* we dont' pretend to support more */
1341 				ci_label->partitionSize =
1342 				    diskPtr->partitionSize;
1343 				ci_label->column = column;
1344 				raidflush_component_label(raidPtr, column);
1345 			}
1346 			/* XXXjld what about the spares? */
1347 		}
1348 
1349 		return (retcode);
1350 	case RAIDFRAME_SET_AUTOCONFIG:
1351 		d = rf_set_autoconfig(raidPtr, *(int *) data);
1352 		printf("raid%d: New autoconfig value is: %d\n",
1353 		       raidPtr->raidid, d);
1354 		*(int *) data = d;
1355 		return (retcode);
1356 
1357 	case RAIDFRAME_SET_ROOT:
1358 		d = rf_set_rootpartition(raidPtr, *(int *) data);
1359 		printf("raid%d: New rootpartition value is: %d\n",
1360 		       raidPtr->raidid, d);
1361 		*(int *) data = d;
1362 		return (retcode);
1363 
1364 		/* initialize all parity */
1365 	case RAIDFRAME_REWRITEPARITY:
1366 
1367 		if (raidPtr->Layout.map->faultsTolerated == 0) {
1368 			/* Parity for RAID 0 is trivially correct */
1369 			raidPtr->parity_good = RF_RAID_CLEAN;
1370 			return(0);
1371 		}
1372 
1373 		if (raidPtr->parity_rewrite_in_progress == 1) {
1374 			/* Re-write is already in progress! */
1375 			return(EINVAL);
1376 		}
1377 
1378 		retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
1379 					   rf_RewriteParityThread,
1380 					   raidPtr,"raid_parity");
1381 		return (retcode);
1382 
1383 
1384 	case RAIDFRAME_ADD_HOT_SPARE:
1385 		sparePtr = (RF_SingleComponent_t *) data;
1386 		memcpy( &component, sparePtr, sizeof(RF_SingleComponent_t));
1387 		retcode = rf_add_hot_spare(raidPtr, &component);
1388 		return(retcode);
1389 
1390 	case RAIDFRAME_REMOVE_HOT_SPARE:
1391 		return(retcode);
1392 
1393 	case RAIDFRAME_DELETE_COMPONENT:
1394 		componentPtr = (RF_SingleComponent_t *)data;
1395 		memcpy( &component, componentPtr,
1396 			sizeof(RF_SingleComponent_t));
1397 		retcode = rf_delete_component(raidPtr, &component);
1398 		return(retcode);
1399 
1400 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
1401 		componentPtr = (RF_SingleComponent_t *)data;
1402 		memcpy( &component, componentPtr,
1403 			sizeof(RF_SingleComponent_t));
1404 		retcode = rf_incorporate_hot_spare(raidPtr, &component);
1405 		return(retcode);
1406 
1407 	case RAIDFRAME_REBUILD_IN_PLACE:
1408 
1409 		if (raidPtr->Layout.map->faultsTolerated == 0) {
1410 			/* Can't do this on a RAID 0!! */
1411 			return(EINVAL);
1412 		}
1413 
1414 		if (raidPtr->recon_in_progress == 1) {
1415 			/* a reconstruct is already in progress! */
1416 			return(EINVAL);
1417 		}
1418 
1419 		componentPtr = (RF_SingleComponent_t *) data;
1420 		memcpy( &component, componentPtr,
1421 			sizeof(RF_SingleComponent_t));
1422 		component.row = 0; /* we don't support any more */
1423 		column = component.column;
1424 
1425 		if ((column < 0) || (column >= raidPtr->numCol)) {
1426 			return(EINVAL);
1427 		}
1428 
1429 		RF_LOCK_MUTEX(raidPtr->mutex);
1430 		if ((raidPtr->Disks[column].status == rf_ds_optimal) &&
1431 		    (raidPtr->numFailures > 0)) {
1432 			/* XXX 0 above shouldn't be constant!!! */
1433 			/* some component other than this has failed.
1434 			   Let's not make things worse than they already
1435 			   are... */
1436 			printf("raid%d: Unable to reconstruct to disk at:\n",
1437 			       raidPtr->raidid);
1438 			printf("raid%d:     Col: %d   Too many failures.\n",
1439 			       raidPtr->raidid, column);
1440 			RF_UNLOCK_MUTEX(raidPtr->mutex);
1441 			return (EINVAL);
1442 		}
1443 		if (raidPtr->Disks[column].status ==
1444 		    rf_ds_reconstructing) {
1445 			printf("raid%d: Unable to reconstruct to disk at:\n",
1446 			       raidPtr->raidid);
1447 			printf("raid%d:    Col: %d   Reconstruction already occuring!\n", raidPtr->raidid, column);
1448 
1449 			RF_UNLOCK_MUTEX(raidPtr->mutex);
1450 			return (EINVAL);
1451 		}
1452 		if (raidPtr->Disks[column].status == rf_ds_spared) {
1453 			RF_UNLOCK_MUTEX(raidPtr->mutex);
1454 			return (EINVAL);
1455 		}
1456 		RF_UNLOCK_MUTEX(raidPtr->mutex);
1457 
1458 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1459 		if (rrcopy == NULL)
1460 			return(ENOMEM);
1461 
1462 		rrcopy->raidPtr = (void *) raidPtr;
1463 		rrcopy->col = column;
1464 
1465 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1466 					   rf_ReconstructInPlaceThread,
1467 					   rrcopy,"raid_reconip");
1468 		return(retcode);
1469 
1470 	case RAIDFRAME_GET_INFO:
1471 		if (!raidPtr->valid)
1472 			return (ENODEV);
1473 		ucfgp = (RF_DeviceConfig_t **) data;
1474 		RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
1475 			  (RF_DeviceConfig_t *));
1476 		if (d_cfg == NULL)
1477 			return (ENOMEM);
1478 		d_cfg->rows = 1; /* there is only 1 row now */
1479 		d_cfg->cols = raidPtr->numCol;
1480 		d_cfg->ndevs = raidPtr->numCol;
1481 		if (d_cfg->ndevs >= RF_MAX_DISKS) {
1482 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1483 			return (ENOMEM);
1484 		}
1485 		d_cfg->nspares = raidPtr->numSpare;
1486 		if (d_cfg->nspares >= RF_MAX_DISKS) {
1487 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1488 			return (ENOMEM);
1489 		}
1490 		d_cfg->maxqdepth = raidPtr->maxQueueDepth;
1491 		d = 0;
1492 		for (j = 0; j < d_cfg->cols; j++) {
1493 			d_cfg->devs[d] = raidPtr->Disks[j];
1494 			d++;
1495 		}
1496 		for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
1497 			d_cfg->spares[i] = raidPtr->Disks[j];
1498 		}
1499 		retcode = copyout(d_cfg, *ucfgp, sizeof(RF_DeviceConfig_t));
1500 		RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1501 
1502 		return (retcode);
1503 
1504 	case RAIDFRAME_CHECK_PARITY:
1505 		*(int *) data = raidPtr->parity_good;
1506 		return (0);
1507 
1508 	case RAIDFRAME_PARITYMAP_STATUS:
1509 		if (rf_paritymap_ineligible(raidPtr))
1510 			return EINVAL;
1511 		rf_paritymap_status(raidPtr->parity_map,
1512 		    (struct rf_pmstat *)data);
1513 		return 0;
1514 
1515 	case RAIDFRAME_PARITYMAP_SET_PARAMS:
1516 		if (rf_paritymap_ineligible(raidPtr))
1517 			return EINVAL;
1518 		if (raidPtr->parity_map == NULL)
1519 			return ENOENT; /* ??? */
1520 		if (0 != rf_paritymap_set_params(raidPtr->parity_map,
1521 			(struct rf_pmparams *)data, 1))
1522 			return EINVAL;
1523 		return 0;
1524 
1525 	case RAIDFRAME_PARITYMAP_GET_DISABLE:
1526 		if (rf_paritymap_ineligible(raidPtr))
1527 			return EINVAL;
1528 		*(int *) data = rf_paritymap_get_disable(raidPtr);
1529 		return 0;
1530 
1531 	case RAIDFRAME_PARITYMAP_SET_DISABLE:
1532 		if (rf_paritymap_ineligible(raidPtr))
1533 			return EINVAL;
1534 		rf_paritymap_set_disable(raidPtr, *(int *)data);
1535 		/* XXX should errors be passed up? */
1536 		return 0;
1537 
1538 	case RAIDFRAME_RESET_ACCTOTALS:
1539 		memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
1540 		return (0);
1541 
1542 	case RAIDFRAME_GET_ACCTOTALS:
1543 		totals = (RF_AccTotals_t *) data;
1544 		*totals = raidPtr->acc_totals;
1545 		return (0);
1546 
1547 	case RAIDFRAME_KEEP_ACCTOTALS:
1548 		raidPtr->keep_acc_totals = *(int *)data;
1549 		return (0);
1550 
1551 	case RAIDFRAME_GET_SIZE:
1552 		*(int *) data = raidPtr->totalSectors;
1553 		return (0);
1554 
1555 		/* fail a disk & optionally start reconstruction */
1556 	case RAIDFRAME_FAIL_DISK:
1557 
1558 		if (raidPtr->Layout.map->faultsTolerated == 0) {
1559 			/* Can't do this on a RAID 0!! */
1560 			return(EINVAL);
1561 		}
1562 
1563 		rr = (struct rf_recon_req *) data;
1564 		rr->row = 0;
1565 		if (rr->col < 0 || rr->col >= raidPtr->numCol)
1566 			return (EINVAL);
1567 
1568 
1569 		RF_LOCK_MUTEX(raidPtr->mutex);
1570 		if (raidPtr->status == rf_rs_reconstructing) {
1571 			/* you can't fail a disk while we're reconstructing! */
1572 			/* XXX wrong for RAID6 */
1573 			RF_UNLOCK_MUTEX(raidPtr->mutex);
1574 			return (EINVAL);
1575 		}
1576 		if ((raidPtr->Disks[rr->col].status ==
1577 		     rf_ds_optimal) && (raidPtr->numFailures > 0)) {
1578 			/* some other component has failed.  Let's not make
1579 			   things worse. XXX wrong for RAID6 */
1580 			RF_UNLOCK_MUTEX(raidPtr->mutex);
1581 			return (EINVAL);
1582 		}
1583 		if (raidPtr->Disks[rr->col].status == rf_ds_spared) {
1584 			/* Can't fail a spared disk! */
1585 			RF_UNLOCK_MUTEX(raidPtr->mutex);
1586 			return (EINVAL);
1587 		}
1588 		RF_UNLOCK_MUTEX(raidPtr->mutex);
1589 
1590 		/* make a copy of the recon request so that we don't rely on
1591 		 * the user's buffer */
1592 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1593 		if (rrcopy == NULL)
1594 			return(ENOMEM);
1595 		memcpy(rrcopy, rr, sizeof(*rr));
1596 		rrcopy->raidPtr = (void *) raidPtr;
1597 
1598 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1599 					   rf_ReconThread,
1600 					   rrcopy,"raid_recon");
1601 		return (0);
1602 
1603 		/* invoke a copyback operation after recon on whatever disk
1604 		 * needs it, if any */
1605 	case RAIDFRAME_COPYBACK:
1606 
1607 		if (raidPtr->Layout.map->faultsTolerated == 0) {
1608 			/* This makes no sense on a RAID 0!! */
1609 			return(EINVAL);
1610 		}
1611 
1612 		if (raidPtr->copyback_in_progress == 1) {
1613 			/* Copyback is already in progress! */
1614 			return(EINVAL);
1615 		}
1616 
1617 		retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
1618 					   rf_CopybackThread,
1619 					   raidPtr,"raid_copyback");
1620 		return (retcode);
1621 
1622 		/* return the percentage completion of reconstruction */
1623 	case RAIDFRAME_CHECK_RECON_STATUS:
1624 		if (raidPtr->Layout.map->faultsTolerated == 0) {
1625 			/* This makes no sense on a RAID 0, so tell the
1626 			   user it's done. */
1627 			*(int *) data = 100;
1628 			return(0);
1629 		}
1630 		if (raidPtr->status != rf_rs_reconstructing)
1631 			*(int *) data = 100;
1632 		else {
1633 			if (raidPtr->reconControl->numRUsTotal > 0) {
1634 				*(int *) data = (raidPtr->reconControl->numRUsComplete * 100 / raidPtr->reconControl->numRUsTotal);
1635 			} else {
1636 				*(int *) data = 0;
1637 			}
1638 		}
1639 		return (0);
1640 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
1641 		progressInfoPtr = (RF_ProgressInfo_t **) data;
1642 		if (raidPtr->status != rf_rs_reconstructing) {
1643 			progressInfo.remaining = 0;
1644 			progressInfo.completed = 100;
1645 			progressInfo.total = 100;
1646 		} else {
1647 			progressInfo.total =
1648 				raidPtr->reconControl->numRUsTotal;
1649 			progressInfo.completed =
1650 				raidPtr->reconControl->numRUsComplete;
1651 			progressInfo.remaining = progressInfo.total -
1652 				progressInfo.completed;
1653 		}
1654 		retcode = copyout(&progressInfo, *progressInfoPtr,
1655 				  sizeof(RF_ProgressInfo_t));
1656 		return (retcode);
1657 
1658 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1659 		if (raidPtr->Layout.map->faultsTolerated == 0) {
1660 			/* This makes no sense on a RAID 0, so tell the
1661 			   user it's done. */
1662 			*(int *) data = 100;
1663 			return(0);
1664 		}
1665 		if (raidPtr->parity_rewrite_in_progress == 1) {
1666 			*(int *) data = 100 *
1667 				raidPtr->parity_rewrite_stripes_done /
1668 				raidPtr->Layout.numStripe;
1669 		} else {
1670 			*(int *) data = 100;
1671 		}
1672 		return (0);
1673 
1674 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
1675 		progressInfoPtr = (RF_ProgressInfo_t **) data;
1676 		if (raidPtr->parity_rewrite_in_progress == 1) {
1677 			progressInfo.total = raidPtr->Layout.numStripe;
1678 			progressInfo.completed =
1679 				raidPtr->parity_rewrite_stripes_done;
1680 			progressInfo.remaining = progressInfo.total -
1681 				progressInfo.completed;
1682 		} else {
1683 			progressInfo.remaining = 0;
1684 			progressInfo.completed = 100;
1685 			progressInfo.total = 100;
1686 		}
1687 		retcode = copyout(&progressInfo, *progressInfoPtr,
1688 				  sizeof(RF_ProgressInfo_t));
1689 		return (retcode);
1690 
1691 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
1692 		if (raidPtr->Layout.map->faultsTolerated == 0) {
1693 			/* This makes no sense on a RAID 0 */
1694 			*(int *) data = 100;
1695 			return(0);
1696 		}
1697 		if (raidPtr->copyback_in_progress == 1) {
1698 			*(int *) data = 100 * raidPtr->copyback_stripes_done /
1699 				raidPtr->Layout.numStripe;
1700 		} else {
1701 			*(int *) data = 100;
1702 		}
1703 		return (0);
1704 
1705 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
1706 		progressInfoPtr = (RF_ProgressInfo_t **) data;
1707 		if (raidPtr->copyback_in_progress == 1) {
1708 			progressInfo.total = raidPtr->Layout.numStripe;
1709 			progressInfo.completed =
1710 				raidPtr->copyback_stripes_done;
1711 			progressInfo.remaining = progressInfo.total -
1712 				progressInfo.completed;
1713 		} else {
1714 			progressInfo.remaining = 0;
1715 			progressInfo.completed = 100;
1716 			progressInfo.total = 100;
1717 		}
1718 		retcode = copyout(&progressInfo, *progressInfoPtr,
1719 				  sizeof(RF_ProgressInfo_t));
1720 		return (retcode);
1721 
1722 		/* the sparetable daemon calls this to wait for the kernel to
1723 		 * need a spare table. this ioctl does not return until a
1724 		 * spare table is needed. XXX -- calling mpsleep here in the
1725 		 * ioctl code is almost certainly wrong and evil. -- XXX XXX
1726 		 * -- I should either compute the spare table in the kernel,
1727 		 * or have a different -- XXX XXX -- interface (a different
1728 		 * character device) for delivering the table     -- XXX */
1729 #if 0
1730 	case RAIDFRAME_SPARET_WAIT:
1731 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1732 		while (!rf_sparet_wait_queue)
1733 			mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
1734 		waitreq = rf_sparet_wait_queue;
1735 		rf_sparet_wait_queue = rf_sparet_wait_queue->next;
1736 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1737 
1738 		/* structure assignment */
1739 		*((RF_SparetWait_t *) data) = *waitreq;
1740 
1741 		RF_Free(waitreq, sizeof(*waitreq));
1742 		return (0);
1743 
1744 		/* wakes up a process waiting on SPARET_WAIT and puts an error
1745 		 * code in it that will cause the dameon to exit */
1746 	case RAIDFRAME_ABORT_SPARET_WAIT:
1747 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1748 		waitreq->fcol = -1;
1749 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1750 		waitreq->next = rf_sparet_wait_queue;
1751 		rf_sparet_wait_queue = waitreq;
1752 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1753 		wakeup(&rf_sparet_wait_queue);
1754 		return (0);
1755 
1756 		/* used by the spare table daemon to deliver a spare table
1757 		 * into the kernel */
1758 	case RAIDFRAME_SEND_SPARET:
1759 
1760 		/* install the spare table */
1761 		retcode = rf_SetSpareTable(raidPtr, *(void **) data);
1762 
1763 		/* respond to the requestor.  the return status of the spare
1764 		 * table installation is passed in the "fcol" field */
1765 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1766 		waitreq->fcol = retcode;
1767 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1768 		waitreq->next = rf_sparet_resp_queue;
1769 		rf_sparet_resp_queue = waitreq;
1770 		wakeup(&rf_sparet_resp_queue);
1771 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1772 
1773 		return (retcode);
1774 #endif
1775 
1776 	default:
1777 		break; /* fall through to the os-specific code below */
1778 
1779 	}
1780 
1781 	if (!raidPtr->valid)
1782 		return (EINVAL);
1783 
1784 	/*
1785 	 * Add support for "regular" device ioctls here.
1786 	 */
1787 
1788 	error = disk_ioctl(&rs->sc_dkdev, cmd, data, flag, l);
1789 	if (error != EPASSTHROUGH)
1790 		return (error);
1791 
1792 	switch (cmd) {
1793 	case DIOCGDINFO:
1794 		*(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
1795 		break;
1796 #ifdef __HAVE_OLD_DISKLABEL
1797 	case ODIOCGDINFO:
1798 		newlabel = *(rs->sc_dkdev.dk_label);
1799 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1800 			return ENOTTY;
1801 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
1802 		break;
1803 #endif
1804 
1805 	case DIOCGPART:
1806 		((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
1807 		((struct partinfo *) data)->part =
1808 		    &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
1809 		break;
1810 
1811 	case DIOCWDINFO:
1812 	case DIOCSDINFO:
1813 #ifdef __HAVE_OLD_DISKLABEL
1814 	case ODIOCWDINFO:
1815 	case ODIOCSDINFO:
1816 #endif
1817 	{
1818 		struct disklabel *lp;
1819 #ifdef __HAVE_OLD_DISKLABEL
1820 		if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1821 			memset(&newlabel, 0, sizeof newlabel);
1822 			memcpy(&newlabel, data, sizeof (struct olddisklabel));
1823 			lp = &newlabel;
1824 		} else
1825 #endif
1826 		lp = (struct disklabel *)data;
1827 
1828 		if ((error = raidlock(rs)) != 0)
1829 			return (error);
1830 
1831 		rs->sc_flags |= RAIDF_LABELLING;
1832 
1833 		error = setdisklabel(rs->sc_dkdev.dk_label,
1834 		    lp, 0, rs->sc_dkdev.dk_cpulabel);
1835 		if (error == 0) {
1836 			if (cmd == DIOCWDINFO
1837 #ifdef __HAVE_OLD_DISKLABEL
1838 			    || cmd == ODIOCWDINFO
1839 #endif
1840 			   )
1841 				error = writedisklabel(RAIDLABELDEV(dev),
1842 				    raidstrategy, rs->sc_dkdev.dk_label,
1843 				    rs->sc_dkdev.dk_cpulabel);
1844 		}
1845 		rs->sc_flags &= ~RAIDF_LABELLING;
1846 
1847 		raidunlock(rs);
1848 
1849 		if (error)
1850 			return (error);
1851 		break;
1852 	}
1853 
1854 	case DIOCWLABEL:
1855 		if (*(int *) data != 0)
1856 			rs->sc_flags |= RAIDF_WLABEL;
1857 		else
1858 			rs->sc_flags &= ~RAIDF_WLABEL;
1859 		break;
1860 
1861 	case DIOCGDEFLABEL:
1862 		raidgetdefaultlabel(raidPtr, rs, (struct disklabel *) data);
1863 		break;
1864 
1865 #ifdef __HAVE_OLD_DISKLABEL
1866 	case ODIOCGDEFLABEL:
1867 		raidgetdefaultlabel(raidPtr, rs, &newlabel);
1868 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1869 			return ENOTTY;
1870 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
1871 		break;
1872 #endif
1873 
1874 	case DIOCAWEDGE:
1875 	case DIOCDWEDGE:
1876 	    	dkw = (void *)data;
1877 
1878 		/* If the ioctl happens here, the parent is us. */
1879 		(void)strcpy(dkw->dkw_parent, rs->sc_xname);
1880 		return cmd == DIOCAWEDGE ? dkwedge_add(dkw) : dkwedge_del(dkw);
1881 
1882 	case DIOCLWEDGES:
1883 		return dkwedge_list(&rs->sc_dkdev,
1884 		    (struct dkwedge_list *)data, l);
1885 	case DIOCCACHESYNC:
1886 		return rf_sync_component_caches(raidPtr);
1887 	default:
1888 		retcode = ENOTTY;
1889 	}
1890 	return (retcode);
1891 
1892 }
1893 
1894 
1895 /* raidinit -- complete the rest of the initialization for the
1896    RAIDframe device.  */
1897 
1898 
1899 static void
1900 raidinit(RF_Raid_t *raidPtr)
1901 {
1902 	cfdata_t cf;
1903 	struct raid_softc *rs;
1904 	int     unit;
1905 
1906 	unit = raidPtr->raidid;
1907 
1908 	rs = &raid_softc[unit];
1909 
1910 	/* XXX should check return code first... */
1911 	rs->sc_flags |= RAIDF_INITED;
1912 
1913 	/* XXX doesn't check bounds. */
1914 	snprintf(rs->sc_xname, sizeof(rs->sc_xname), "raid%d", unit);
1915 
1916 	/* attach the pseudo device */
1917 	cf = malloc(sizeof(*cf), M_RAIDFRAME, M_WAITOK);
1918 	cf->cf_name = raid_cd.cd_name;
1919 	cf->cf_atname = raid_cd.cd_name;
1920 	cf->cf_unit = unit;
1921 	cf->cf_fstate = FSTATE_STAR;
1922 
1923 	rs->sc_dev = config_attach_pseudo(cf);
1924 
1925 	if (rs->sc_dev == NULL) {
1926 		printf("raid%d: config_attach_pseudo failed\n",
1927 		    raidPtr->raidid);
1928 		rs->sc_flags &= ~RAIDF_INITED;
1929 		free(cf, M_RAIDFRAME);
1930 		return;
1931 	}
1932 
1933 	/* disk_attach actually creates space for the CPU disklabel, among
1934 	 * other things, so it's critical to call this *BEFORE* we try putzing
1935 	 * with disklabels. */
1936 
1937 	disk_init(&rs->sc_dkdev, rs->sc_xname, &rf_dkdriver);
1938 	disk_attach(&rs->sc_dkdev);
1939 
1940 	/* XXX There may be a weird interaction here between this, and
1941 	 * protectedSectors, as used in RAIDframe.  */
1942 
1943 	rs->sc_size = raidPtr->totalSectors;
1944 
1945 	dkwedge_discover(&rs->sc_dkdev);
1946 
1947 	rf_set_properties(rs, raidPtr);
1948 
1949 }
1950 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
1951 /* wake up the daemon & tell it to get us a spare table
1952  * XXX
1953  * the entries in the queues should be tagged with the raidPtr
1954  * so that in the extremely rare case that two recons happen at once,
1955  * we know for which device were requesting a spare table
1956  * XXX
1957  *
1958  * XXX This code is not currently used. GO
1959  */
1960 int
1961 rf_GetSpareTableFromDaemon(RF_SparetWait_t *req)
1962 {
1963 	int     retcode;
1964 
1965 	RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1966 	req->next = rf_sparet_wait_queue;
1967 	rf_sparet_wait_queue = req;
1968 	wakeup(&rf_sparet_wait_queue);
1969 
1970 	/* mpsleep unlocks the mutex */
1971 	while (!rf_sparet_resp_queue) {
1972 		tsleep(&rf_sparet_resp_queue, PRIBIO,
1973 		    "raidframe getsparetable", 0);
1974 	}
1975 	req = rf_sparet_resp_queue;
1976 	rf_sparet_resp_queue = req->next;
1977 	RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1978 
1979 	retcode = req->fcol;
1980 	RF_Free(req, sizeof(*req));	/* this is not the same req as we
1981 					 * alloc'd */
1982 	return (retcode);
1983 }
1984 #endif
1985 
1986 /* a wrapper around rf_DoAccess that extracts appropriate info from the
1987  * bp & passes it down.
1988  * any calls originating in the kernel must use non-blocking I/O
1989  * do some extra sanity checking to return "appropriate" error values for
1990  * certain conditions (to make some standard utilities work)
1991  *
1992  * Formerly known as: rf_DoAccessKernel
1993  */
1994 void
1995 raidstart(RF_Raid_t *raidPtr)
1996 {
1997 	RF_SectorCount_t num_blocks, pb, sum;
1998 	RF_RaidAddr_t raid_addr;
1999 	struct partition *pp;
2000 	daddr_t blocknum;
2001 	int     unit;
2002 	struct raid_softc *rs;
2003 	int     do_async;
2004 	struct buf *bp;
2005 	int rc;
2006 
2007 	unit = raidPtr->raidid;
2008 	rs = &raid_softc[unit];
2009 
2010 	/* quick check to see if anything has died recently */
2011 	RF_LOCK_MUTEX(raidPtr->mutex);
2012 	if (raidPtr->numNewFailures > 0) {
2013 		RF_UNLOCK_MUTEX(raidPtr->mutex);
2014 		rf_update_component_labels(raidPtr,
2015 					   RF_NORMAL_COMPONENT_UPDATE);
2016 		RF_LOCK_MUTEX(raidPtr->mutex);
2017 		raidPtr->numNewFailures--;
2018 	}
2019 
2020 	/* Check to see if we're at the limit... */
2021 	while (raidPtr->openings > 0) {
2022 		RF_UNLOCK_MUTEX(raidPtr->mutex);
2023 
2024 		/* get the next item, if any, from the queue */
2025 		if ((bp = bufq_get(rs->buf_queue)) == NULL) {
2026 			/* nothing more to do */
2027 			return;
2028 		}
2029 
2030 		/* Ok, for the bp we have here, bp->b_blkno is relative to the
2031 		 * partition.. Need to make it absolute to the underlying
2032 		 * device.. */
2033 
2034 		blocknum = bp->b_blkno;
2035 		if (DISKPART(bp->b_dev) != RAW_PART) {
2036 			pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
2037 			blocknum += pp->p_offset;
2038 		}
2039 
2040 		db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
2041 			    (int) blocknum));
2042 
2043 		db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
2044 		db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
2045 
2046 		/* *THIS* is where we adjust what block we're going to...
2047 		 * but DO NOT TOUCH bp->b_blkno!!! */
2048 		raid_addr = blocknum;
2049 
2050 		num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
2051 		pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
2052 		sum = raid_addr + num_blocks + pb;
2053 		if (1 || rf_debugKernelAccess) {
2054 			db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
2055 				    (int) raid_addr, (int) sum, (int) num_blocks,
2056 				    (int) pb, (int) bp->b_resid));
2057 		}
2058 		if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
2059 		    || (sum < num_blocks) || (sum < pb)) {
2060 			bp->b_error = ENOSPC;
2061 			bp->b_resid = bp->b_bcount;
2062 			biodone(bp);
2063 			RF_LOCK_MUTEX(raidPtr->mutex);
2064 			continue;
2065 		}
2066 		/*
2067 		 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
2068 		 */
2069 
2070 		if (bp->b_bcount & raidPtr->sectorMask) {
2071 			bp->b_error = EINVAL;
2072 			bp->b_resid = bp->b_bcount;
2073 			biodone(bp);
2074 			RF_LOCK_MUTEX(raidPtr->mutex);
2075 			continue;
2076 
2077 		}
2078 		db1_printf(("Calling DoAccess..\n"));
2079 
2080 
2081 		RF_LOCK_MUTEX(raidPtr->mutex);
2082 		raidPtr->openings--;
2083 		RF_UNLOCK_MUTEX(raidPtr->mutex);
2084 
2085 		/*
2086 		 * Everything is async.
2087 		 */
2088 		do_async = 1;
2089 
2090 		disk_busy(&rs->sc_dkdev);
2091 
2092 		/* XXX we're still at splbio() here... do we *really*
2093 		   need to be? */
2094 
2095 		/* don't ever condition on bp->b_flags & B_WRITE.
2096 		 * always condition on B_READ instead */
2097 
2098 		rc = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
2099 				 RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
2100 				 do_async, raid_addr, num_blocks,
2101 				 bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
2102 
2103 		if (rc) {
2104 			bp->b_error = rc;
2105 			bp->b_resid = bp->b_bcount;
2106 			biodone(bp);
2107 			/* continue loop */
2108 		}
2109 
2110 		RF_LOCK_MUTEX(raidPtr->mutex);
2111 	}
2112 	RF_UNLOCK_MUTEX(raidPtr->mutex);
2113 }
2114 
2115 
2116 
2117 
2118 /* invoke an I/O from kernel mode.  Disk queue should be locked upon entry */
2119 
2120 int
2121 rf_DispatchKernelIO(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req)
2122 {
2123 	int     op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
2124 	struct buf *bp;
2125 
2126 	req->queue = queue;
2127 	bp = req->bp;
2128 
2129 	switch (req->type) {
2130 	case RF_IO_TYPE_NOP:	/* used primarily to unlock a locked queue */
2131 		/* XXX need to do something extra here.. */
2132 		/* I'm leaving this in, as I've never actually seen it used,
2133 		 * and I'd like folks to report it... GO */
2134 		printf(("WAKEUP CALLED\n"));
2135 		queue->numOutstanding++;
2136 
2137 		bp->b_flags = 0;
2138 		bp->b_private = req;
2139 
2140 		KernelWakeupFunc(bp);
2141 		break;
2142 
2143 	case RF_IO_TYPE_READ:
2144 	case RF_IO_TYPE_WRITE:
2145 #if RF_ACC_TRACE > 0
2146 		if (req->tracerec) {
2147 			RF_ETIMER_START(req->tracerec->timer);
2148 		}
2149 #endif
2150 		InitBP(bp, queue->rf_cinfo->ci_vp,
2151 		    op, queue->rf_cinfo->ci_dev,
2152 		    req->sectorOffset, req->numSector,
2153 		    req->buf, KernelWakeupFunc, (void *) req,
2154 		    queue->raidPtr->logBytesPerSector, req->b_proc);
2155 
2156 		if (rf_debugKernelAccess) {
2157 			db1_printf(("dispatch: bp->b_blkno = %ld\n",
2158 				(long) bp->b_blkno));
2159 		}
2160 		queue->numOutstanding++;
2161 		queue->last_deq_sector = req->sectorOffset;
2162 		/* acc wouldn't have been let in if there were any pending
2163 		 * reqs at any other priority */
2164 		queue->curPriority = req->priority;
2165 
2166 		db1_printf(("Going for %c to unit %d col %d\n",
2167 			    req->type, queue->raidPtr->raidid,
2168 			    queue->col));
2169 		db1_printf(("sector %d count %d (%d bytes) %d\n",
2170 			(int) req->sectorOffset, (int) req->numSector,
2171 			(int) (req->numSector <<
2172 			    queue->raidPtr->logBytesPerSector),
2173 			(int) queue->raidPtr->logBytesPerSector));
2174 
2175 		/*
2176 		 * XXX: drop lock here since this can block at
2177 		 * least with backing SCSI devices.  Retake it
2178 		 * to minimize fuss with calling interfaces.
2179 		 */
2180 
2181 		RF_UNLOCK_QUEUE_MUTEX(queue, "unusedparam");
2182 		bdev_strategy(bp);
2183 		RF_LOCK_QUEUE_MUTEX(queue, "unusedparam");
2184 		break;
2185 
2186 	default:
2187 		panic("bad req->type in rf_DispatchKernelIO");
2188 	}
2189 	db1_printf(("Exiting from DispatchKernelIO\n"));
2190 
2191 	return (0);
2192 }
2193 /* this is the callback function associated with a I/O invoked from
2194    kernel code.
2195  */
2196 static void
2197 KernelWakeupFunc(struct buf *bp)
2198 {
2199 	RF_DiskQueueData_t *req = NULL;
2200 	RF_DiskQueue_t *queue;
2201 	int s;
2202 
2203 	s = splbio();
2204 	db1_printf(("recovering the request queue:\n"));
2205 	req = bp->b_private;
2206 
2207 	queue = (RF_DiskQueue_t *) req->queue;
2208 
2209 #if RF_ACC_TRACE > 0
2210 	if (req->tracerec) {
2211 		RF_ETIMER_STOP(req->tracerec->timer);
2212 		RF_ETIMER_EVAL(req->tracerec->timer);
2213 		RF_LOCK_MUTEX(rf_tracing_mutex);
2214 		req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
2215 		req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
2216 		req->tracerec->num_phys_ios++;
2217 		RF_UNLOCK_MUTEX(rf_tracing_mutex);
2218 	}
2219 #endif
2220 
2221 	/* XXX Ok, let's get aggressive... If b_error is set, let's go
2222 	 * ballistic, and mark the component as hosed... */
2223 
2224 	if (bp->b_error != 0) {
2225 		/* Mark the disk as dead */
2226 		/* but only mark it once... */
2227 		/* and only if it wouldn't leave this RAID set
2228 		   completely broken */
2229 		if (((queue->raidPtr->Disks[queue->col].status ==
2230 		      rf_ds_optimal) ||
2231 		     (queue->raidPtr->Disks[queue->col].status ==
2232 		      rf_ds_used_spare)) &&
2233 		     (queue->raidPtr->numFailures <
2234 		      queue->raidPtr->Layout.map->faultsTolerated)) {
2235 			printf("raid%d: IO Error.  Marking %s as failed.\n",
2236 			       queue->raidPtr->raidid,
2237 			       queue->raidPtr->Disks[queue->col].devname);
2238 			queue->raidPtr->Disks[queue->col].status =
2239 			    rf_ds_failed;
2240 			queue->raidPtr->status = rf_rs_degraded;
2241 			queue->raidPtr->numFailures++;
2242 			queue->raidPtr->numNewFailures++;
2243 		} else {	/* Disk is already dead... */
2244 			/* printf("Disk already marked as dead!\n"); */
2245 		}
2246 
2247 	}
2248 
2249 	/* Fill in the error value */
2250 
2251 	req->error = bp->b_error;
2252 
2253 	simple_lock(&queue->raidPtr->iodone_lock);
2254 
2255 	/* Drop this one on the "finished" queue... */
2256 	TAILQ_INSERT_TAIL(&(queue->raidPtr->iodone), req, iodone_entries);
2257 
2258 	/* Let the raidio thread know there is work to be done. */
2259 	wakeup(&(queue->raidPtr->iodone));
2260 
2261 	simple_unlock(&queue->raidPtr->iodone_lock);
2262 
2263 	splx(s);
2264 }
2265 
2266 
2267 
2268 /*
2269  * initialize a buf structure for doing an I/O in the kernel.
2270  */
2271 static void
2272 InitBP(struct buf *bp, struct vnode *b_vp, unsigned rw_flag, dev_t dev,
2273        RF_SectorNum_t startSect, RF_SectorCount_t numSect, void *bf,
2274        void (*cbFunc) (struct buf *), void *cbArg, int logBytesPerSector,
2275        struct proc *b_proc)
2276 {
2277 	/* bp->b_flags       = B_PHYS | rw_flag; */
2278 	bp->b_flags = rw_flag;	/* XXX need B_PHYS here too??? */
2279 	bp->b_oflags = 0;
2280 	bp->b_cflags = 0;
2281 	bp->b_bcount = numSect << logBytesPerSector;
2282 	bp->b_bufsize = bp->b_bcount;
2283 	bp->b_error = 0;
2284 	bp->b_dev = dev;
2285 	bp->b_data = bf;
2286 	bp->b_blkno = startSect;
2287 	bp->b_resid = bp->b_bcount;	/* XXX is this right!??!?!! */
2288 	if (bp->b_bcount == 0) {
2289 		panic("bp->b_bcount is zero in InitBP!!");
2290 	}
2291 	bp->b_proc = b_proc;
2292 	bp->b_iodone = cbFunc;
2293 	bp->b_private = cbArg;
2294 }
2295 
2296 static void
2297 raidgetdefaultlabel(RF_Raid_t *raidPtr, struct raid_softc *rs,
2298 		    struct disklabel *lp)
2299 {
2300 	memset(lp, 0, sizeof(*lp));
2301 
2302 	/* fabricate a label... */
2303 	lp->d_secperunit = raidPtr->totalSectors;
2304 	lp->d_secsize = raidPtr->bytesPerSector;
2305 	lp->d_nsectors = raidPtr->Layout.dataSectorsPerStripe;
2306 	lp->d_ntracks = 4 * raidPtr->numCol;
2307 	lp->d_ncylinders = raidPtr->totalSectors /
2308 		(lp->d_nsectors * lp->d_ntracks);
2309 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
2310 
2311 	strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
2312 	lp->d_type = DTYPE_RAID;
2313 	strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
2314 	lp->d_rpm = 3600;
2315 	lp->d_interleave = 1;
2316 	lp->d_flags = 0;
2317 
2318 	lp->d_partitions[RAW_PART].p_offset = 0;
2319 	lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
2320 	lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
2321 	lp->d_npartitions = RAW_PART + 1;
2322 
2323 	lp->d_magic = DISKMAGIC;
2324 	lp->d_magic2 = DISKMAGIC;
2325 	lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
2326 
2327 }
2328 /*
2329  * Read the disklabel from the raid device.  If one is not present, fake one
2330  * up.
2331  */
2332 static void
2333 raidgetdisklabel(dev_t dev)
2334 {
2335 	int     unit = raidunit(dev);
2336 	struct raid_softc *rs = &raid_softc[unit];
2337 	const char   *errstring;
2338 	struct disklabel *lp = rs->sc_dkdev.dk_label;
2339 	struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
2340 	RF_Raid_t *raidPtr;
2341 
2342 	db1_printf(("Getting the disklabel...\n"));
2343 
2344 	memset(clp, 0, sizeof(*clp));
2345 
2346 	raidPtr = raidPtrs[unit];
2347 
2348 	raidgetdefaultlabel(raidPtr, rs, lp);
2349 
2350 	/*
2351 	 * Call the generic disklabel extraction routine.
2352 	 */
2353 	errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
2354 	    rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
2355 	if (errstring)
2356 		raidmakedisklabel(rs);
2357 	else {
2358 		int     i;
2359 		struct partition *pp;
2360 
2361 		/*
2362 		 * Sanity check whether the found disklabel is valid.
2363 		 *
2364 		 * This is necessary since total size of the raid device
2365 		 * may vary when an interleave is changed even though exactly
2366 		 * same components are used, and old disklabel may used
2367 		 * if that is found.
2368 		 */
2369 		if (lp->d_secperunit != rs->sc_size)
2370 			printf("raid%d: WARNING: %s: "
2371 			    "total sector size in disklabel (%" PRIu32 ") != "
2372 			    "the size of raid (%" PRIu64 ")\n", unit, rs->sc_xname,
2373 			    lp->d_secperunit, rs->sc_size);
2374 		for (i = 0; i < lp->d_npartitions; i++) {
2375 			pp = &lp->d_partitions[i];
2376 			if (pp->p_offset + pp->p_size > rs->sc_size)
2377 				printf("raid%d: WARNING: %s: end of partition `%c' "
2378 				       "exceeds the size of raid (%" PRIu64 ")\n",
2379 				       unit, rs->sc_xname, 'a' + i, rs->sc_size);
2380 		}
2381 	}
2382 
2383 }
2384 /*
2385  * Take care of things one might want to take care of in the event
2386  * that a disklabel isn't present.
2387  */
2388 static void
2389 raidmakedisklabel(struct raid_softc *rs)
2390 {
2391 	struct disklabel *lp = rs->sc_dkdev.dk_label;
2392 	db1_printf(("Making a label..\n"));
2393 
2394 	/*
2395 	 * For historical reasons, if there's no disklabel present
2396 	 * the raw partition must be marked FS_BSDFFS.
2397 	 */
2398 
2399 	lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
2400 
2401 	strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
2402 
2403 	lp->d_checksum = dkcksum(lp);
2404 }
2405 /*
2406  * Wait interruptibly for an exclusive lock.
2407  *
2408  * XXX
2409  * Several drivers do this; it should be abstracted and made MP-safe.
2410  * (Hmm... where have we seen this warning before :->  GO )
2411  */
2412 static int
2413 raidlock(struct raid_softc *rs)
2414 {
2415 	int     error;
2416 
2417 	while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
2418 		rs->sc_flags |= RAIDF_WANTED;
2419 		if ((error =
2420 			tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
2421 			return (error);
2422 	}
2423 	rs->sc_flags |= RAIDF_LOCKED;
2424 	return (0);
2425 }
2426 /*
2427  * Unlock and wake up any waiters.
2428  */
2429 static void
2430 raidunlock(struct raid_softc *rs)
2431 {
2432 
2433 	rs->sc_flags &= ~RAIDF_LOCKED;
2434 	if ((rs->sc_flags & RAIDF_WANTED) != 0) {
2435 		rs->sc_flags &= ~RAIDF_WANTED;
2436 		wakeup(rs);
2437 	}
2438 }
2439 
2440 
2441 #define RF_COMPONENT_INFO_OFFSET  16384 /* bytes */
2442 #define RF_COMPONENT_INFO_SIZE     1024 /* bytes */
2443 #define RF_PARITY_MAP_OFFSET \
2444 	(RF_COMPONENT_INFO_OFFSET + RF_COMPONENT_INFO_SIZE)
2445 #define RF_PARITY_MAP_SIZE   RF_PARITYMAP_NBYTE
2446 
2447 int
2448 raidmarkclean(RF_Raid_t *raidPtr, RF_RowCol_t col)
2449 {
2450 	RF_ComponentLabel_t *clabel;
2451 
2452 	clabel = raidget_component_label(raidPtr, col);
2453 	clabel->clean = RF_RAID_CLEAN;
2454 	raidflush_component_label(raidPtr, col);
2455 	return(0);
2456 }
2457 
2458 
2459 int
2460 raidmarkdirty(RF_Raid_t *raidPtr, RF_RowCol_t col)
2461 {
2462 	RF_ComponentLabel_t *clabel;
2463 
2464 	clabel = raidget_component_label(raidPtr, col);
2465 	clabel->clean = RF_RAID_DIRTY;
2466 	raidflush_component_label(raidPtr, col);
2467 	return(0);
2468 }
2469 
2470 int
2471 raidfetch_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
2472 {
2473 	return raidread_component_label(raidPtr->Disks[col].dev,
2474 	    raidPtr->raid_cinfo[col].ci_vp,
2475 	    &raidPtr->raid_cinfo[col].ci_label);
2476 }
2477 
2478 RF_ComponentLabel_t *
2479 raidget_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
2480 {
2481 	return &raidPtr->raid_cinfo[col].ci_label;
2482 }
2483 
2484 int
2485 raidflush_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
2486 {
2487 	RF_ComponentLabel_t *label;
2488 
2489 	label = &raidPtr->raid_cinfo[col].ci_label;
2490 	label->mod_counter = raidPtr->mod_counter;
2491 #ifndef RF_NO_PARITY_MAP
2492 	label->parity_map_modcount = label->mod_counter;
2493 #endif
2494 	return raidwrite_component_label(raidPtr->Disks[col].dev,
2495 	    raidPtr->raid_cinfo[col].ci_vp, label);
2496 }
2497 
2498 
2499 static int
2500 raidread_component_label(dev_t dev, struct vnode *b_vp,
2501     RF_ComponentLabel_t *clabel)
2502 {
2503 	return raidread_component_area(dev, b_vp, clabel,
2504 	    sizeof(RF_ComponentLabel_t),
2505 	    RF_COMPONENT_INFO_OFFSET, RF_COMPONENT_INFO_SIZE);
2506 }
2507 
2508 /* ARGSUSED */
2509 static int
2510 raidread_component_area(dev_t dev, struct vnode *b_vp, void *data,
2511     size_t msize, daddr_t offset, daddr_t dsize)
2512 {
2513 	struct buf *bp;
2514 	const struct bdevsw *bdev;
2515 	int error;
2516 
2517 	/* XXX should probably ensure that we don't try to do this if
2518 	   someone has changed rf_protected_sectors. */
2519 
2520 	if (b_vp == NULL) {
2521 		/* For whatever reason, this component is not valid.
2522 		   Don't try to read a component label from it. */
2523 		return(EINVAL);
2524 	}
2525 
2526 	/* get a block of the appropriate size... */
2527 	bp = geteblk((int)dsize);
2528 	bp->b_dev = dev;
2529 
2530 	/* get our ducks in a row for the read */
2531 	bp->b_blkno = offset / DEV_BSIZE;
2532 	bp->b_bcount = dsize;
2533 	bp->b_flags |= B_READ;
2534  	bp->b_resid = dsize;
2535 
2536 	bdev = bdevsw_lookup(bp->b_dev);
2537 	if (bdev == NULL)
2538 		return (ENXIO);
2539 	(*bdev->d_strategy)(bp);
2540 
2541 	error = biowait(bp);
2542 
2543 	if (!error) {
2544 		memcpy(data, bp->b_data, msize);
2545 	}
2546 
2547 	brelse(bp, 0);
2548 	return(error);
2549 }
2550 
2551 
2552 static int
2553 raidwrite_component_label(dev_t dev, struct vnode *b_vp,
2554 	RF_ComponentLabel_t *clabel)
2555 {
2556 	return raidwrite_component_area(dev, b_vp, clabel,
2557 	    sizeof(RF_ComponentLabel_t),
2558 	    RF_COMPONENT_INFO_OFFSET, RF_COMPONENT_INFO_SIZE, 0);
2559 }
2560 
2561 /* ARGSUSED */
2562 static int
2563 raidwrite_component_area(dev_t dev, struct vnode *b_vp, void *data,
2564     size_t msize, daddr_t offset, daddr_t dsize, int asyncp)
2565 {
2566 	struct buf *bp;
2567 	const struct bdevsw *bdev;
2568 	int error;
2569 
2570 	/* get a block of the appropriate size... */
2571 	bp = geteblk((int)dsize);
2572 	bp->b_dev = dev;
2573 
2574 	/* get our ducks in a row for the write */
2575 	bp->b_blkno = offset / DEV_BSIZE;
2576 	bp->b_bcount = dsize;
2577 	bp->b_flags |= B_WRITE | (asyncp ? B_ASYNC : 0);
2578  	bp->b_resid = dsize;
2579 
2580 	memset(bp->b_data, 0, dsize);
2581 	memcpy(bp->b_data, data, msize);
2582 
2583 	bdev = bdevsw_lookup(bp->b_dev);
2584 	if (bdev == NULL)
2585 		return (ENXIO);
2586 	(*bdev->d_strategy)(bp);
2587 	if (asyncp)
2588 		return 0;
2589 	error = biowait(bp);
2590 	brelse(bp, 0);
2591 	if (error) {
2592 #if 1
2593 		printf("Failed to write RAID component info!\n");
2594 #endif
2595 	}
2596 
2597 	return(error);
2598 }
2599 
2600 void
2601 rf_paritymap_kern_write(RF_Raid_t *raidPtr, struct rf_paritymap_ondisk *map)
2602 {
2603 	int c;
2604 
2605 	for (c = 0; c < raidPtr->numCol; c++) {
2606 		/* Skip dead disks. */
2607 		if (RF_DEAD_DISK(raidPtr->Disks[c].status))
2608 			continue;
2609 		/* XXXjld: what if an error occurs here? */
2610 		raidwrite_component_area(raidPtr->Disks[c].dev,
2611 		    raidPtr->raid_cinfo[c].ci_vp, map,
2612 		    RF_PARITYMAP_NBYTE,
2613 		    RF_PARITY_MAP_OFFSET, RF_PARITY_MAP_SIZE, 0);
2614 	}
2615 }
2616 
2617 void
2618 rf_paritymap_kern_read(RF_Raid_t *raidPtr, struct rf_paritymap_ondisk *map)
2619 {
2620 	struct rf_paritymap_ondisk tmp;
2621 	int c,first;
2622 
2623 	first=1;
2624 	for (c = 0; c < raidPtr->numCol; c++) {
2625 		/* Skip dead disks. */
2626 		if (RF_DEAD_DISK(raidPtr->Disks[c].status))
2627 			continue;
2628 		raidread_component_area(raidPtr->Disks[c].dev,
2629 		    raidPtr->raid_cinfo[c].ci_vp, &tmp,
2630 		    RF_PARITYMAP_NBYTE,
2631 		    RF_PARITY_MAP_OFFSET, RF_PARITY_MAP_SIZE);
2632 		if (first) {
2633 			memcpy(map, &tmp, sizeof(*map));
2634 			first = 0;
2635 		} else {
2636 			rf_paritymap_merge(map, &tmp);
2637 		}
2638 	}
2639 }
2640 
2641 void
2642 rf_markalldirty(RF_Raid_t *raidPtr)
2643 {
2644 	RF_ComponentLabel_t *clabel;
2645 	int sparecol;
2646 	int c;
2647 	int j;
2648 	int scol = -1;
2649 
2650 	raidPtr->mod_counter++;
2651 	for (c = 0; c < raidPtr->numCol; c++) {
2652 		/* we don't want to touch (at all) a disk that has
2653 		   failed */
2654 		if (!RF_DEAD_DISK(raidPtr->Disks[c].status)) {
2655 			clabel = raidget_component_label(raidPtr, c);
2656 			if (clabel->status == rf_ds_spared) {
2657 				/* XXX do something special...
2658 				   but whatever you do, don't
2659 				   try to access it!! */
2660 			} else {
2661 				raidmarkdirty(raidPtr, c);
2662 			}
2663 		}
2664 	}
2665 
2666 	for( c = 0; c < raidPtr->numSpare ; c++) {
2667 		sparecol = raidPtr->numCol + c;
2668 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2669 			/*
2670 
2671 			   we claim this disk is "optimal" if it's
2672 			   rf_ds_used_spare, as that means it should be
2673 			   directly substitutable for the disk it replaced.
2674 			   We note that too...
2675 
2676 			 */
2677 
2678 			for(j=0;j<raidPtr->numCol;j++) {
2679 				if (raidPtr->Disks[j].spareCol == sparecol) {
2680 					scol = j;
2681 					break;
2682 				}
2683 			}
2684 
2685 			clabel = raidget_component_label(raidPtr, sparecol);
2686 			/* make sure status is noted */
2687 
2688 			raid_init_component_label(raidPtr, clabel);
2689 
2690 			clabel->row = 0;
2691 			clabel->column = scol;
2692 			/* Note: we *don't* change status from rf_ds_used_spare
2693 			   to rf_ds_optimal */
2694 			/* clabel.status = rf_ds_optimal; */
2695 
2696 			raidmarkdirty(raidPtr, sparecol);
2697 		}
2698 	}
2699 }
2700 
2701 
2702 void
2703 rf_update_component_labels(RF_Raid_t *raidPtr, int final)
2704 {
2705 	RF_ComponentLabel_t *clabel;
2706 	int sparecol;
2707 	int c;
2708 	int j;
2709 	int scol;
2710 
2711 	scol = -1;
2712 
2713 	/* XXX should do extra checks to make sure things really are clean,
2714 	   rather than blindly setting the clean bit... */
2715 
2716 	raidPtr->mod_counter++;
2717 
2718 	for (c = 0; c < raidPtr->numCol; c++) {
2719 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
2720 			clabel = raidget_component_label(raidPtr, c);
2721 			/* make sure status is noted */
2722 			clabel->status = rf_ds_optimal;
2723 
2724 			/* note what unit we are configured as */
2725 			clabel->last_unit = raidPtr->raidid;
2726 
2727 			raidflush_component_label(raidPtr, c);
2728 			if (final == RF_FINAL_COMPONENT_UPDATE) {
2729 				if (raidPtr->parity_good == RF_RAID_CLEAN) {
2730 					raidmarkclean(raidPtr, c);
2731 				}
2732 			}
2733 		}
2734 		/* else we don't touch it.. */
2735 	}
2736 
2737 	for( c = 0; c < raidPtr->numSpare ; c++) {
2738 		sparecol = raidPtr->numCol + c;
2739 		/* Need to ensure that the reconstruct actually completed! */
2740 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2741 			/*
2742 
2743 			   we claim this disk is "optimal" if it's
2744 			   rf_ds_used_spare, as that means it should be
2745 			   directly substitutable for the disk it replaced.
2746 			   We note that too...
2747 
2748 			 */
2749 
2750 			for(j=0;j<raidPtr->numCol;j++) {
2751 				if (raidPtr->Disks[j].spareCol == sparecol) {
2752 					scol = j;
2753 					break;
2754 				}
2755 			}
2756 
2757 			/* XXX shouldn't *really* need this... */
2758 			clabel = raidget_component_label(raidPtr, sparecol);
2759 			/* make sure status is noted */
2760 
2761 			raid_init_component_label(raidPtr, clabel);
2762 
2763 			clabel->column = scol;
2764 			clabel->status = rf_ds_optimal;
2765 			clabel->last_unit = raidPtr->raidid;
2766 
2767 			raidflush_component_label(raidPtr, sparecol);
2768 			if (final == RF_FINAL_COMPONENT_UPDATE) {
2769 				if (raidPtr->parity_good == RF_RAID_CLEAN) {
2770 					raidmarkclean(raidPtr, sparecol);
2771 				}
2772 			}
2773 		}
2774 	}
2775 }
2776 
2777 void
2778 rf_close_component(RF_Raid_t *raidPtr, struct vnode *vp, int auto_configured)
2779 {
2780 
2781 	if (vp != NULL) {
2782 		if (auto_configured == 1) {
2783 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2784 			VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2785 			vput(vp);
2786 
2787 		} else {
2788 			(void) vn_close(vp, FREAD | FWRITE, curlwp->l_cred);
2789 		}
2790 	}
2791 }
2792 
2793 
2794 void
2795 rf_UnconfigureVnodes(RF_Raid_t *raidPtr)
2796 {
2797 	int r,c;
2798 	struct vnode *vp;
2799 	int acd;
2800 
2801 
2802 	/* We take this opportunity to close the vnodes like we should.. */
2803 
2804 	for (c = 0; c < raidPtr->numCol; c++) {
2805 		vp = raidPtr->raid_cinfo[c].ci_vp;
2806 		acd = raidPtr->Disks[c].auto_configured;
2807 		rf_close_component(raidPtr, vp, acd);
2808 		raidPtr->raid_cinfo[c].ci_vp = NULL;
2809 		raidPtr->Disks[c].auto_configured = 0;
2810 	}
2811 
2812 	for (r = 0; r < raidPtr->numSpare; r++) {
2813 		vp = raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp;
2814 		acd = raidPtr->Disks[raidPtr->numCol + r].auto_configured;
2815 		rf_close_component(raidPtr, vp, acd);
2816 		raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp = NULL;
2817 		raidPtr->Disks[raidPtr->numCol + r].auto_configured = 0;
2818 	}
2819 }
2820 
2821 
2822 void
2823 rf_ReconThread(struct rf_recon_req *req)
2824 {
2825 	int     s;
2826 	RF_Raid_t *raidPtr;
2827 
2828 	s = splbio();
2829 	raidPtr = (RF_Raid_t *) req->raidPtr;
2830 	raidPtr->recon_in_progress = 1;
2831 
2832 	rf_FailDisk((RF_Raid_t *) req->raidPtr, req->col,
2833 		    ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
2834 
2835 	RF_Free(req, sizeof(*req));
2836 
2837 	raidPtr->recon_in_progress = 0;
2838 	splx(s);
2839 
2840 	/* That's all... */
2841 	kthread_exit(0);	/* does not return */
2842 }
2843 
2844 void
2845 rf_RewriteParityThread(RF_Raid_t *raidPtr)
2846 {
2847 	int retcode;
2848 	int s;
2849 
2850 	raidPtr->parity_rewrite_stripes_done = 0;
2851 	raidPtr->parity_rewrite_in_progress = 1;
2852 	s = splbio();
2853 	retcode = rf_RewriteParity(raidPtr);
2854 	splx(s);
2855 	if (retcode) {
2856 		printf("raid%d: Error re-writing parity!\n",raidPtr->raidid);
2857 	} else {
2858 		/* set the clean bit!  If we shutdown correctly,
2859 		   the clean bit on each component label will get
2860 		   set */
2861 		raidPtr->parity_good = RF_RAID_CLEAN;
2862 	}
2863 	raidPtr->parity_rewrite_in_progress = 0;
2864 
2865 	/* Anyone waiting for us to stop?  If so, inform them... */
2866 	if (raidPtr->waitShutdown) {
2867 		wakeup(&raidPtr->parity_rewrite_in_progress);
2868 	}
2869 
2870 	/* That's all... */
2871 	kthread_exit(0);	/* does not return */
2872 }
2873 
2874 
2875 void
2876 rf_CopybackThread(RF_Raid_t *raidPtr)
2877 {
2878 	int s;
2879 
2880 	raidPtr->copyback_in_progress = 1;
2881 	s = splbio();
2882 	rf_CopybackReconstructedData(raidPtr);
2883 	splx(s);
2884 	raidPtr->copyback_in_progress = 0;
2885 
2886 	/* That's all... */
2887 	kthread_exit(0);	/* does not return */
2888 }
2889 
2890 
2891 void
2892 rf_ReconstructInPlaceThread(struct rf_recon_req *req)
2893 {
2894 	int s;
2895 	RF_Raid_t *raidPtr;
2896 
2897 	s = splbio();
2898 	raidPtr = req->raidPtr;
2899 	raidPtr->recon_in_progress = 1;
2900 	rf_ReconstructInPlace(raidPtr, req->col);
2901 	RF_Free(req, sizeof(*req));
2902 	raidPtr->recon_in_progress = 0;
2903 	splx(s);
2904 
2905 	/* That's all... */
2906 	kthread_exit(0);	/* does not return */
2907 }
2908 
2909 static RF_AutoConfig_t *
2910 rf_get_component(RF_AutoConfig_t *ac_list, dev_t dev, struct vnode *vp,
2911     const char *cname, RF_SectorCount_t size)
2912 {
2913 	int good_one = 0;
2914 	RF_ComponentLabel_t *clabel;
2915 	RF_AutoConfig_t *ac;
2916 
2917 	clabel = malloc(sizeof(RF_ComponentLabel_t), M_RAIDFRAME, M_NOWAIT);
2918 	if (clabel == NULL) {
2919 oomem:
2920 		    while(ac_list) {
2921 			    ac = ac_list;
2922 			    if (ac->clabel)
2923 				    free(ac->clabel, M_RAIDFRAME);
2924 			    ac_list = ac_list->next;
2925 			    free(ac, M_RAIDFRAME);
2926 		    }
2927 		    printf("RAID auto config: out of memory!\n");
2928 		    return NULL; /* XXX probably should panic? */
2929 	}
2930 
2931 	if (!raidread_component_label(dev, vp, clabel)) {
2932 		    /* Got the label.  Does it look reasonable? */
2933 		    if (rf_reasonable_label(clabel) &&
2934 			(clabel->partitionSize <= size)) {
2935 #ifdef DEBUG
2936 			    printf("Component on: %s: %llu\n",
2937 				cname, (unsigned long long)size);
2938 			    rf_print_component_label(clabel);
2939 #endif
2940 			    /* if it's reasonable, add it, else ignore it. */
2941 			    ac = malloc(sizeof(RF_AutoConfig_t), M_RAIDFRAME,
2942 				M_NOWAIT);
2943 			    if (ac == NULL) {
2944 				    free(clabel, M_RAIDFRAME);
2945 				    goto oomem;
2946 			    }
2947 			    strlcpy(ac->devname, cname, sizeof(ac->devname));
2948 			    ac->dev = dev;
2949 			    ac->vp = vp;
2950 			    ac->clabel = clabel;
2951 			    ac->next = ac_list;
2952 			    ac_list = ac;
2953 			    good_one = 1;
2954 		    }
2955 	}
2956 	if (!good_one) {
2957 		/* cleanup */
2958 		free(clabel, M_RAIDFRAME);
2959 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2960 		VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
2961 		vput(vp);
2962 	}
2963 	return ac_list;
2964 }
2965 
2966 RF_AutoConfig_t *
2967 rf_find_raid_components(void)
2968 {
2969 	struct vnode *vp;
2970 	struct disklabel label;
2971 	device_t dv;
2972 	deviter_t di;
2973 	dev_t dev;
2974 	int bmajor, bminor, wedge;
2975 	int error;
2976 	int i;
2977 	RF_AutoConfig_t *ac_list;
2978 
2979 
2980 	/* initialize the AutoConfig list */
2981 	ac_list = NULL;
2982 
2983 	/* we begin by trolling through *all* the devices on the system */
2984 
2985 	for (dv = deviter_first(&di, DEVITER_F_ROOT_FIRST); dv != NULL;
2986 	     dv = deviter_next(&di)) {
2987 
2988 		/* we are only interested in disks... */
2989 		if (device_class(dv) != DV_DISK)
2990 			continue;
2991 
2992 		/* we don't care about floppies... */
2993 		if (device_is_a(dv, "fd")) {
2994 			continue;
2995 		}
2996 
2997 		/* we don't care about CD's... */
2998 		if (device_is_a(dv, "cd")) {
2999 			continue;
3000 		}
3001 
3002 		/* we don't care about md's... */
3003 		if (device_is_a(dv, "md")) {
3004 			continue;
3005 		}
3006 
3007 		/* hdfd is the Atari/Hades floppy driver */
3008 		if (device_is_a(dv, "hdfd")) {
3009 			continue;
3010 		}
3011 
3012 		/* fdisa is the Atari/Milan floppy driver */
3013 		if (device_is_a(dv, "fdisa")) {
3014 			continue;
3015 		}
3016 
3017 		/* need to find the device_name_to_block_device_major stuff */
3018 		bmajor = devsw_name2blk(device_xname(dv), NULL, 0);
3019 
3020 		/* get a vnode for the raw partition of this disk */
3021 
3022 		wedge = device_is_a(dv, "dk");
3023 		bminor = minor(device_unit(dv));
3024 		dev = wedge ? makedev(bmajor, bminor) :
3025 		    MAKEDISKDEV(bmajor, bminor, RAW_PART);
3026 		if (bdevvp(dev, &vp))
3027 			panic("RAID can't alloc vnode");
3028 
3029 		error = VOP_OPEN(vp, FREAD, NOCRED);
3030 
3031 		if (error) {
3032 			/* "Who cares."  Continue looking
3033 			   for something that exists*/
3034 			vput(vp);
3035 			continue;
3036 		}
3037 
3038 		if (wedge) {
3039 			struct dkwedge_info dkw;
3040 			error = VOP_IOCTL(vp, DIOCGWEDGEINFO, &dkw, FREAD,
3041 			    NOCRED);
3042 			if (error) {
3043 				printf("RAIDframe: can't get wedge info for "
3044 				    "dev %s (%d)\n", device_xname(dv), error);
3045 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3046 				VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
3047 				vput(vp);
3048 				continue;
3049 			}
3050 
3051 			if (strcmp(dkw.dkw_ptype, DKW_PTYPE_RAIDFRAME) != 0) {
3052 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3053 				VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
3054 				vput(vp);
3055 				continue;
3056 			}
3057 
3058 			ac_list = rf_get_component(ac_list, dev, vp,
3059 			    device_xname(dv), dkw.dkw_size);
3060 			continue;
3061 		}
3062 
3063 		/* Ok, the disk exists.  Go get the disklabel. */
3064 		error = VOP_IOCTL(vp, DIOCGDINFO, &label, FREAD, NOCRED);
3065 		if (error) {
3066 			/*
3067 			 * XXX can't happen - open() would
3068 			 * have errored out (or faked up one)
3069 			 */
3070 			if (error != ENOTTY)
3071 				printf("RAIDframe: can't get label for dev "
3072 				    "%s (%d)\n", device_xname(dv), error);
3073 		}
3074 
3075 		/* don't need this any more.  We'll allocate it again
3076 		   a little later if we really do... */
3077 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3078 		VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
3079 		vput(vp);
3080 
3081 		if (error)
3082 			continue;
3083 
3084 		for (i = 0; i < label.d_npartitions; i++) {
3085 			char cname[sizeof(ac_list->devname)];
3086 
3087 			/* We only support partitions marked as RAID */
3088 			if (label.d_partitions[i].p_fstype != FS_RAID)
3089 				continue;
3090 
3091 			dev = MAKEDISKDEV(bmajor, device_unit(dv), i);
3092 			if (bdevvp(dev, &vp))
3093 				panic("RAID can't alloc vnode");
3094 
3095 			error = VOP_OPEN(vp, FREAD, NOCRED);
3096 			if (error) {
3097 				/* Whatever... */
3098 				vput(vp);
3099 				continue;
3100 			}
3101 			snprintf(cname, sizeof(cname), "%s%c",
3102 			    device_xname(dv), 'a' + i);
3103 			ac_list = rf_get_component(ac_list, dev, vp, cname,
3104 				label.d_partitions[i].p_size);
3105 		}
3106 	}
3107 	deviter_release(&di);
3108 	return ac_list;
3109 }
3110 
3111 
3112 static int
3113 rf_reasonable_label(RF_ComponentLabel_t *clabel)
3114 {
3115 
3116 	if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
3117 	     (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
3118 	    ((clabel->clean == RF_RAID_CLEAN) ||
3119 	     (clabel->clean == RF_RAID_DIRTY)) &&
3120 	    clabel->row >=0 &&
3121 	    clabel->column >= 0 &&
3122 	    clabel->num_rows > 0 &&
3123 	    clabel->num_columns > 0 &&
3124 	    clabel->row < clabel->num_rows &&
3125 	    clabel->column < clabel->num_columns &&
3126 	    clabel->blockSize > 0 &&
3127 	    clabel->numBlocks > 0) {
3128 		/* label looks reasonable enough... */
3129 		return(1);
3130 	}
3131 	return(0);
3132 }
3133 
3134 
3135 #ifdef DEBUG
3136 void
3137 rf_print_component_label(RF_ComponentLabel_t *clabel)
3138 {
3139 	printf("   Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
3140 	       clabel->row, clabel->column,
3141 	       clabel->num_rows, clabel->num_columns);
3142 	printf("   Version: %d Serial Number: %d Mod Counter: %d\n",
3143 	       clabel->version, clabel->serial_number,
3144 	       clabel->mod_counter);
3145 	printf("   Clean: %s Status: %d\n",
3146 	       clabel->clean ? "Yes" : "No", clabel->status);
3147 	printf("   sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
3148 	       clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
3149 	printf("   RAID Level: %c  blocksize: %d numBlocks: %d\n",
3150 	       (char) clabel->parityConfig, clabel->blockSize,
3151 	       clabel->numBlocks);
3152 	printf("   Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No");
3153 	printf("   Contains root partition: %s\n",
3154 	       clabel->root_partition ? "Yes" : "No");
3155 	printf("   Last configured as: raid%d\n", clabel->last_unit);
3156 #if 0
3157 	   printf("   Config order: %d\n", clabel->config_order);
3158 #endif
3159 
3160 }
3161 #endif
3162 
3163 RF_ConfigSet_t *
3164 rf_create_auto_sets(RF_AutoConfig_t *ac_list)
3165 {
3166 	RF_AutoConfig_t *ac;
3167 	RF_ConfigSet_t *config_sets;
3168 	RF_ConfigSet_t *cset;
3169 	RF_AutoConfig_t *ac_next;
3170 
3171 
3172 	config_sets = NULL;
3173 
3174 	/* Go through the AutoConfig list, and figure out which components
3175 	   belong to what sets.  */
3176 	ac = ac_list;
3177 	while(ac!=NULL) {
3178 		/* we're going to putz with ac->next, so save it here
3179 		   for use at the end of the loop */
3180 		ac_next = ac->next;
3181 
3182 		if (config_sets == NULL) {
3183 			/* will need at least this one... */
3184 			config_sets = (RF_ConfigSet_t *)
3185 				malloc(sizeof(RF_ConfigSet_t),
3186 				       M_RAIDFRAME, M_NOWAIT);
3187 			if (config_sets == NULL) {
3188 				panic("rf_create_auto_sets: No memory!");
3189 			}
3190 			/* this one is easy :) */
3191 			config_sets->ac = ac;
3192 			config_sets->next = NULL;
3193 			config_sets->rootable = 0;
3194 			ac->next = NULL;
3195 		} else {
3196 			/* which set does this component fit into? */
3197 			cset = config_sets;
3198 			while(cset!=NULL) {
3199 				if (rf_does_it_fit(cset, ac)) {
3200 					/* looks like it matches... */
3201 					ac->next = cset->ac;
3202 					cset->ac = ac;
3203 					break;
3204 				}
3205 				cset = cset->next;
3206 			}
3207 			if (cset==NULL) {
3208 				/* didn't find a match above... new set..*/
3209 				cset = (RF_ConfigSet_t *)
3210 					malloc(sizeof(RF_ConfigSet_t),
3211 					       M_RAIDFRAME, M_NOWAIT);
3212 				if (cset == NULL) {
3213 					panic("rf_create_auto_sets: No memory!");
3214 				}
3215 				cset->ac = ac;
3216 				ac->next = NULL;
3217 				cset->next = config_sets;
3218 				cset->rootable = 0;
3219 				config_sets = cset;
3220 			}
3221 		}
3222 		ac = ac_next;
3223 	}
3224 
3225 
3226 	return(config_sets);
3227 }
3228 
3229 static int
3230 rf_does_it_fit(RF_ConfigSet_t *cset, RF_AutoConfig_t *ac)
3231 {
3232 	RF_ComponentLabel_t *clabel1, *clabel2;
3233 
3234 	/* If this one matches the *first* one in the set, that's good
3235 	   enough, since the other members of the set would have been
3236 	   through here too... */
3237 	/* note that we are not checking partitionSize here..
3238 
3239 	   Note that we are also not checking the mod_counters here.
3240 	   If everything else matches execpt the mod_counter, that's
3241 	   good enough for this test.  We will deal with the mod_counters
3242 	   a little later in the autoconfiguration process.
3243 
3244 	    (clabel1->mod_counter == clabel2->mod_counter) &&
3245 
3246 	   The reason we don't check for this is that failed disks
3247 	   will have lower modification counts.  If those disks are
3248 	   not added to the set they used to belong to, then they will
3249 	   form their own set, which may result in 2 different sets,
3250 	   for example, competing to be configured at raid0, and
3251 	   perhaps competing to be the root filesystem set.  If the
3252 	   wrong ones get configured, or both attempt to become /,
3253 	   weird behaviour and or serious lossage will occur.  Thus we
3254 	   need to bring them into the fold here, and kick them out at
3255 	   a later point.
3256 
3257 	*/
3258 
3259 	clabel1 = cset->ac->clabel;
3260 	clabel2 = ac->clabel;
3261 	if ((clabel1->version == clabel2->version) &&
3262 	    (clabel1->serial_number == clabel2->serial_number) &&
3263 	    (clabel1->num_rows == clabel2->num_rows) &&
3264 	    (clabel1->num_columns == clabel2->num_columns) &&
3265 	    (clabel1->sectPerSU == clabel2->sectPerSU) &&
3266 	    (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
3267 	    (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
3268 	    (clabel1->parityConfig == clabel2->parityConfig) &&
3269 	    (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
3270 	    (clabel1->blockSize == clabel2->blockSize) &&
3271 	    (clabel1->numBlocks == clabel2->numBlocks) &&
3272 	    (clabel1->autoconfigure == clabel2->autoconfigure) &&
3273 	    (clabel1->root_partition == clabel2->root_partition) &&
3274 	    (clabel1->last_unit == clabel2->last_unit) &&
3275 	    (clabel1->config_order == clabel2->config_order)) {
3276 		/* if it get's here, it almost *has* to be a match */
3277 	} else {
3278 		/* it's not consistent with somebody in the set..
3279 		   punt */
3280 		return(0);
3281 	}
3282 	/* all was fine.. it must fit... */
3283 	return(1);
3284 }
3285 
3286 int
3287 rf_have_enough_components(RF_ConfigSet_t *cset)
3288 {
3289 	RF_AutoConfig_t *ac;
3290 	RF_AutoConfig_t *auto_config;
3291 	RF_ComponentLabel_t *clabel;
3292 	int c;
3293 	int num_cols;
3294 	int num_missing;
3295 	int mod_counter;
3296 	int mod_counter_found;
3297 	int even_pair_failed;
3298 	char parity_type;
3299 
3300 
3301 	/* check to see that we have enough 'live' components
3302 	   of this set.  If so, we can configure it if necessary */
3303 
3304 	num_cols = cset->ac->clabel->num_columns;
3305 	parity_type = cset->ac->clabel->parityConfig;
3306 
3307 	/* XXX Check for duplicate components!?!?!? */
3308 
3309 	/* Determine what the mod_counter is supposed to be for this set. */
3310 
3311 	mod_counter_found = 0;
3312 	mod_counter = 0;
3313 	ac = cset->ac;
3314 	while(ac!=NULL) {
3315 		if (mod_counter_found==0) {
3316 			mod_counter = ac->clabel->mod_counter;
3317 			mod_counter_found = 1;
3318 		} else {
3319 			if (ac->clabel->mod_counter > mod_counter) {
3320 				mod_counter = ac->clabel->mod_counter;
3321 			}
3322 		}
3323 		ac = ac->next;
3324 	}
3325 
3326 	num_missing = 0;
3327 	auto_config = cset->ac;
3328 
3329 	even_pair_failed = 0;
3330 	for(c=0; c<num_cols; c++) {
3331 		ac = auto_config;
3332 		while(ac!=NULL) {
3333 			if ((ac->clabel->column == c) &&
3334 			    (ac->clabel->mod_counter == mod_counter)) {
3335 				/* it's this one... */
3336 #ifdef DEBUG
3337 				printf("Found: %s at %d\n",
3338 				       ac->devname,c);
3339 #endif
3340 				break;
3341 			}
3342 			ac=ac->next;
3343 		}
3344 		if (ac==NULL) {
3345 				/* Didn't find one here! */
3346 				/* special case for RAID 1, especially
3347 				   where there are more than 2
3348 				   components (where RAIDframe treats
3349 				   things a little differently :( ) */
3350 			if (parity_type == '1') {
3351 				if (c%2 == 0) { /* even component */
3352 					even_pair_failed = 1;
3353 				} else { /* odd component.  If
3354 					    we're failed, and
3355 					    so is the even
3356 					    component, it's
3357 					    "Good Night, Charlie" */
3358 					if (even_pair_failed == 1) {
3359 						return(0);
3360 					}
3361 				}
3362 			} else {
3363 				/* normal accounting */
3364 				num_missing++;
3365 			}
3366 		}
3367 		if ((parity_type == '1') && (c%2 == 1)) {
3368 				/* Just did an even component, and we didn't
3369 				   bail.. reset the even_pair_failed flag,
3370 				   and go on to the next component.... */
3371 			even_pair_failed = 0;
3372 		}
3373 	}
3374 
3375 	clabel = cset->ac->clabel;
3376 
3377 	if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
3378 	    ((clabel->parityConfig == '4') && (num_missing > 1)) ||
3379 	    ((clabel->parityConfig == '5') && (num_missing > 1))) {
3380 		/* XXX this needs to be made *much* more general */
3381 		/* Too many failures */
3382 		return(0);
3383 	}
3384 	/* otherwise, all is well, and we've got enough to take a kick
3385 	   at autoconfiguring this set */
3386 	return(1);
3387 }
3388 
3389 void
3390 rf_create_configuration(RF_AutoConfig_t *ac, RF_Config_t *config,
3391 			RF_Raid_t *raidPtr)
3392 {
3393 	RF_ComponentLabel_t *clabel;
3394 	int i;
3395 
3396 	clabel = ac->clabel;
3397 
3398 	/* 1. Fill in the common stuff */
3399 	config->numRow = clabel->num_rows = 1;
3400 	config->numCol = clabel->num_columns;
3401 	config->numSpare = 0; /* XXX should this be set here? */
3402 	config->sectPerSU = clabel->sectPerSU;
3403 	config->SUsPerPU = clabel->SUsPerPU;
3404 	config->SUsPerRU = clabel->SUsPerRU;
3405 	config->parityConfig = clabel->parityConfig;
3406 	/* XXX... */
3407 	strcpy(config->diskQueueType,"fifo");
3408 	config->maxOutstandingDiskReqs = clabel->maxOutstanding;
3409 	config->layoutSpecificSize = 0; /* XXX ?? */
3410 
3411 	while(ac!=NULL) {
3412 		/* row/col values will be in range due to the checks
3413 		   in reasonable_label() */
3414 		strcpy(config->devnames[0][ac->clabel->column],
3415 		       ac->devname);
3416 		ac = ac->next;
3417 	}
3418 
3419 	for(i=0;i<RF_MAXDBGV;i++) {
3420 		config->debugVars[i][0] = 0;
3421 	}
3422 }
3423 
3424 int
3425 rf_set_autoconfig(RF_Raid_t *raidPtr, int new_value)
3426 {
3427 	RF_ComponentLabel_t *clabel;
3428 	int column;
3429 	int sparecol;
3430 
3431 	raidPtr->autoconfigure = new_value;
3432 
3433 	for(column=0; column<raidPtr->numCol; column++) {
3434 		if (raidPtr->Disks[column].status == rf_ds_optimal) {
3435 			clabel = raidget_component_label(raidPtr, column);
3436 			clabel->autoconfigure = new_value;
3437 			raidflush_component_label(raidPtr, column);
3438 		}
3439 	}
3440 	for(column = 0; column < raidPtr->numSpare ; column++) {
3441 		sparecol = raidPtr->numCol + column;
3442 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3443 			clabel = raidget_component_label(raidPtr, sparecol);
3444 			clabel->autoconfigure = new_value;
3445 			raidflush_component_label(raidPtr, sparecol);
3446 		}
3447 	}
3448 	return(new_value);
3449 }
3450 
3451 int
3452 rf_set_rootpartition(RF_Raid_t *raidPtr, int new_value)
3453 {
3454 	RF_ComponentLabel_t *clabel;
3455 	int column;
3456 	int sparecol;
3457 
3458 	raidPtr->root_partition = new_value;
3459 	for(column=0; column<raidPtr->numCol; column++) {
3460 		if (raidPtr->Disks[column].status == rf_ds_optimal) {
3461 			clabel = raidget_component_label(raidPtr, column);
3462 			clabel->root_partition = new_value;
3463 			raidflush_component_label(raidPtr, column);
3464 		}
3465 	}
3466 	for(column = 0; column < raidPtr->numSpare ; column++) {
3467 		sparecol = raidPtr->numCol + column;
3468 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3469 			clabel = raidget_component_label(raidPtr, sparecol);
3470 			clabel->root_partition = new_value;
3471 			raidflush_component_label(raidPtr, sparecol);
3472 		}
3473 	}
3474 	return(new_value);
3475 }
3476 
3477 void
3478 rf_release_all_vps(RF_ConfigSet_t *cset)
3479 {
3480 	RF_AutoConfig_t *ac;
3481 
3482 	ac = cset->ac;
3483 	while(ac!=NULL) {
3484 		/* Close the vp, and give it back */
3485 		if (ac->vp) {
3486 			vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
3487 			VOP_CLOSE(ac->vp, FREAD, NOCRED);
3488 			vput(ac->vp);
3489 			ac->vp = NULL;
3490 		}
3491 		ac = ac->next;
3492 	}
3493 }
3494 
3495 
3496 void
3497 rf_cleanup_config_set(RF_ConfigSet_t *cset)
3498 {
3499 	RF_AutoConfig_t *ac;
3500 	RF_AutoConfig_t *next_ac;
3501 
3502 	ac = cset->ac;
3503 	while(ac!=NULL) {
3504 		next_ac = ac->next;
3505 		/* nuke the label */
3506 		free(ac->clabel, M_RAIDFRAME);
3507 		/* cleanup the config structure */
3508 		free(ac, M_RAIDFRAME);
3509 		/* "next.." */
3510 		ac = next_ac;
3511 	}
3512 	/* and, finally, nuke the config set */
3513 	free(cset, M_RAIDFRAME);
3514 }
3515 
3516 
3517 void
3518 raid_init_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
3519 {
3520 	/* current version number */
3521 	clabel->version = RF_COMPONENT_LABEL_VERSION;
3522 	clabel->serial_number = raidPtr->serial_number;
3523 	clabel->mod_counter = raidPtr->mod_counter;
3524 
3525 	clabel->num_rows = 1;
3526 	clabel->num_columns = raidPtr->numCol;
3527 	clabel->clean = RF_RAID_DIRTY; /* not clean */
3528 	clabel->status = rf_ds_optimal; /* "It's good!" */
3529 
3530 	clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
3531 	clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
3532 	clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
3533 
3534 	clabel->blockSize = raidPtr->bytesPerSector;
3535 	clabel->numBlocks = raidPtr->sectorsPerDisk;
3536 
3537 	/* XXX not portable */
3538 	clabel->parityConfig = raidPtr->Layout.map->parityConfig;
3539 	clabel->maxOutstanding = raidPtr->maxOutstanding;
3540 	clabel->autoconfigure = raidPtr->autoconfigure;
3541 	clabel->root_partition = raidPtr->root_partition;
3542 	clabel->last_unit = raidPtr->raidid;
3543 	clabel->config_order = raidPtr->config_order;
3544 
3545 #ifndef RF_NO_PARITY_MAP
3546 	rf_paritymap_init_label(raidPtr->parity_map, clabel);
3547 #endif
3548 }
3549 
3550 int
3551 rf_auto_config_set(RF_ConfigSet_t *cset, int *unit)
3552 {
3553 	RF_Raid_t *raidPtr;
3554 	RF_Config_t *config;
3555 	int raidID;
3556 	int retcode;
3557 
3558 #ifdef DEBUG
3559 	printf("RAID autoconfigure\n");
3560 #endif
3561 
3562 	retcode = 0;
3563 	*unit = -1;
3564 
3565 	/* 1. Create a config structure */
3566 
3567 	config = (RF_Config_t *)malloc(sizeof(RF_Config_t),
3568 				       M_RAIDFRAME,
3569 				       M_NOWAIT);
3570 	if (config==NULL) {
3571 		printf("Out of mem!?!?\n");
3572 				/* XXX do something more intelligent here. */
3573 		return(1);
3574 	}
3575 
3576 	memset(config, 0, sizeof(RF_Config_t));
3577 
3578 	/*
3579 	   2. Figure out what RAID ID this one is supposed to live at
3580 	   See if we can get the same RAID dev that it was configured
3581 	   on last time..
3582 	*/
3583 
3584 	raidID = cset->ac->clabel->last_unit;
3585 	if ((raidID < 0) || (raidID >= numraid)) {
3586 		/* let's not wander off into lala land. */
3587 		raidID = numraid - 1;
3588 	}
3589 	if (raidPtrs[raidID]->valid != 0) {
3590 
3591 		/*
3592 		   Nope... Go looking for an alternative...
3593 		   Start high so we don't immediately use raid0 if that's
3594 		   not taken.
3595 		*/
3596 
3597 		for(raidID = numraid - 1; raidID >= 0; raidID--) {
3598 			if (raidPtrs[raidID]->valid == 0) {
3599 				/* can use this one! */
3600 				break;
3601 			}
3602 		}
3603 	}
3604 
3605 	if (raidID < 0) {
3606 		/* punt... */
3607 		printf("Unable to auto configure this set!\n");
3608 		printf("(Out of RAID devs!)\n");
3609 		free(config, M_RAIDFRAME);
3610 		return(1);
3611 	}
3612 
3613 #ifdef DEBUG
3614 	printf("Configuring raid%d:\n",raidID);
3615 #endif
3616 
3617 	raidPtr = raidPtrs[raidID];
3618 
3619 	/* XXX all this stuff should be done SOMEWHERE ELSE! */
3620 	raidPtr->raidid = raidID;
3621 	raidPtr->openings = RAIDOUTSTANDING;
3622 
3623 	/* 3. Build the configuration structure */
3624 	rf_create_configuration(cset->ac, config, raidPtr);
3625 
3626 	/* 4. Do the configuration */
3627 	retcode = rf_Configure(raidPtr, config, cset->ac);
3628 
3629 	if (retcode == 0) {
3630 
3631 		raidinit(raidPtrs[raidID]);
3632 
3633 		rf_markalldirty(raidPtrs[raidID]);
3634 		raidPtrs[raidID]->autoconfigure = 1; /* XXX do this here? */
3635 		if (cset->ac->clabel->root_partition==1) {
3636 			/* everything configured just fine.  Make a note
3637 			   that this set is eligible to be root. */
3638 			cset->rootable = 1;
3639 			/* XXX do this here? */
3640 			raidPtrs[raidID]->root_partition = 1;
3641 		}
3642 	}
3643 
3644 	/* 5. Cleanup */
3645 	free(config, M_RAIDFRAME);
3646 
3647 	*unit = raidID;
3648 	return(retcode);
3649 }
3650 
3651 void
3652 rf_disk_unbusy(RF_RaidAccessDesc_t *desc)
3653 {
3654 	struct buf *bp;
3655 
3656 	bp = (struct buf *)desc->bp;
3657 	disk_unbusy(&raid_softc[desc->raidPtr->raidid].sc_dkdev,
3658 	    (bp->b_bcount - bp->b_resid), (bp->b_flags & B_READ));
3659 }
3660 
3661 void
3662 rf_pool_init(struct pool *p, size_t size, const char *w_chan,
3663 	     size_t xmin, size_t xmax)
3664 {
3665 	pool_init(p, size, 0, 0, 0, w_chan, NULL, IPL_BIO);
3666 	pool_sethiwat(p, xmax);
3667 	pool_prime(p, xmin);
3668 	pool_setlowat(p, xmin);
3669 }
3670 
3671 /*
3672  * rf_buf_queue_check(int raidid) -- looks into the buf_queue to see
3673  * if there is IO pending and if that IO could possibly be done for a
3674  * given RAID set.  Returns 0 if IO is waiting and can be done, 1
3675  * otherwise.
3676  *
3677  */
3678 
3679 int
3680 rf_buf_queue_check(int raidid)
3681 {
3682 	if ((bufq_peek(raid_softc[raidid].buf_queue) != NULL) &&
3683 	    raidPtrs[raidid]->openings > 0) {
3684 		/* there is work to do */
3685 		return 0;
3686 	}
3687 	/* default is nothing to do */
3688 	return 1;
3689 }
3690 
3691 int
3692 rf_getdisksize(struct vnode *vp, struct lwp *l, RF_RaidDisk_t *diskPtr)
3693 {
3694 	struct partinfo dpart;
3695 	struct dkwedge_info dkw;
3696 	int error;
3697 
3698 	error = VOP_IOCTL(vp, DIOCGPART, &dpart, FREAD, l->l_cred);
3699 	if (error == 0) {
3700 		diskPtr->blockSize = dpart.disklab->d_secsize;
3701 		diskPtr->numBlocks = dpart.part->p_size - rf_protectedSectors;
3702 		diskPtr->partitionSize = dpart.part->p_size;
3703 		return 0;
3704 	}
3705 
3706 	error = VOP_IOCTL(vp, DIOCGWEDGEINFO, &dkw, FREAD, l->l_cred);
3707 	if (error == 0) {
3708 		diskPtr->blockSize = 512;	/* XXX */
3709 		diskPtr->numBlocks = dkw.dkw_size - rf_protectedSectors;
3710 		diskPtr->partitionSize = dkw.dkw_size;
3711 		return 0;
3712 	}
3713 	return error;
3714 }
3715 
3716 static int
3717 raid_match(device_t self, cfdata_t cfdata, void *aux)
3718 {
3719 	return 1;
3720 }
3721 
3722 static void
3723 raid_attach(device_t parent, device_t self, void *aux)
3724 {
3725 
3726 }
3727 
3728 
3729 static int
3730 raid_detach(device_t self, int flags)
3731 {
3732 	int error;
3733 	struct raid_softc *rs = &raid_softc[device_unit(self)];
3734 
3735 	if ((error = raidlock(rs)) != 0)
3736 		return (error);
3737 
3738 	error = raid_detach_unlocked(rs);
3739 
3740 	raidunlock(rs);
3741 
3742 	return error;
3743 }
3744 
3745 static void
3746 rf_set_properties(struct raid_softc *rs, RF_Raid_t *raidPtr)
3747 {
3748 	prop_dictionary_t disk_info, odisk_info, geom;
3749 	disk_info = prop_dictionary_create();
3750 	geom = prop_dictionary_create();
3751 	prop_dictionary_set_uint64(geom, "sectors-per-unit",
3752 				   raidPtr->totalSectors);
3753 	prop_dictionary_set_uint32(geom, "sector-size",
3754 				   raidPtr->bytesPerSector);
3755 
3756 	prop_dictionary_set_uint16(geom, "sectors-per-track",
3757 				   raidPtr->Layout.dataSectorsPerStripe);
3758 	prop_dictionary_set_uint16(geom, "tracks-per-cylinder",
3759 				   4 * raidPtr->numCol);
3760 
3761 	prop_dictionary_set_uint64(geom, "cylinders-per-unit",
3762 	   raidPtr->totalSectors / (raidPtr->Layout.dataSectorsPerStripe *
3763 	   (4 * raidPtr->numCol)));
3764 
3765 	prop_dictionary_set(disk_info, "geometry", geom);
3766 	prop_object_release(geom);
3767 	prop_dictionary_set(device_properties(rs->sc_dev),
3768 			    "disk-info", disk_info);
3769 	odisk_info = rs->sc_dkdev.dk_info;
3770 	rs->sc_dkdev.dk_info = disk_info;
3771 	if (odisk_info)
3772 		prop_object_release(odisk_info);
3773 }
3774 
3775 /*
3776  * Implement forwarding of the DIOCCACHESYNC ioctl to each of the components.
3777  * We end up returning whatever error was returned by the first cache flush
3778  * that fails.
3779  */
3780 
3781 int
3782 rf_sync_component_caches(RF_Raid_t *raidPtr)
3783 {
3784 	int c, sparecol;
3785 	int e,error;
3786 	int force = 1;
3787 
3788 	error = 0;
3789 	for (c = 0; c < raidPtr->numCol; c++) {
3790 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
3791 			e = VOP_IOCTL(raidPtr->raid_cinfo[c].ci_vp, DIOCCACHESYNC,
3792 					  &force, FWRITE, NOCRED);
3793 			if (e) {
3794 				if (e != ENODEV)
3795 					printf("raid%d: cache flush to component %s failed.\n",
3796 					       raidPtr->raidid, raidPtr->Disks[c].devname);
3797 				if (error == 0) {
3798 					error = e;
3799 				}
3800 			}
3801 		}
3802 	}
3803 
3804 	for( c = 0; c < raidPtr->numSpare ; c++) {
3805 		sparecol = raidPtr->numCol + c;
3806 		/* Need to ensure that the reconstruct actually completed! */
3807 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3808 			e = VOP_IOCTL(raidPtr->raid_cinfo[sparecol].ci_vp,
3809 					  DIOCCACHESYNC, &force, FWRITE, NOCRED);
3810 			if (e) {
3811 				if (e != ENODEV)
3812 					printf("raid%d: cache flush to component %s failed.\n",
3813 					       raidPtr->raidid, raidPtr->Disks[sparecol].devname);
3814 				if (error == 0) {
3815 					error = e;
3816 				}
3817 			}
3818 		}
3819 	}
3820 	return error;
3821 }
3822