xref: /netbsd-src/sys/dev/raidframe/rf_netbsdkintf.c (revision 23c8222edbfb0f0932d88a8351d3a0cf817dfb9e)
1 /*	$NetBSD: rf_netbsdkintf.c,v 1.182 2004/10/28 07:07:44 yamt Exp $	*/
2 /*-
3  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to The NetBSD Foundation
7  * by Greg Oster; Jason R. Thorpe.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *        This product includes software developed by the NetBSD
20  *        Foundation, Inc. and its contributors.
21  * 4. Neither the name of The NetBSD Foundation nor the names of its
22  *    contributors may be used to endorse or promote products derived
23  *    from this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (c) 1990, 1993
40  *      The Regents of the University of California.  All rights reserved.
41  *
42  * This code is derived from software contributed to Berkeley by
43  * the Systems Programming Group of the University of Utah Computer
44  * Science Department.
45  *
46  * Redistribution and use in source and binary forms, with or without
47  * modification, are permitted provided that the following conditions
48  * are met:
49  * 1. Redistributions of source code must retain the above copyright
50  *    notice, this list of conditions and the following disclaimer.
51  * 2. Redistributions in binary form must reproduce the above copyright
52  *    notice, this list of conditions and the following disclaimer in the
53  *    documentation and/or other materials provided with the distribution.
54  * 3. Neither the name of the University nor the names of its contributors
55  *    may be used to endorse or promote products derived from this software
56  *    without specific prior written permission.
57  *
58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68  * SUCH DAMAGE.
69  *
70  * from: Utah $Hdr: cd.c 1.6 90/11/28$
71  *
72  *      @(#)cd.c        8.2 (Berkeley) 11/16/93
73  */
74 
75 /*
76  * Copyright (c) 1988 University of Utah.
77  *
78  * This code is derived from software contributed to Berkeley by
79  * the Systems Programming Group of the University of Utah Computer
80  * Science Department.
81  *
82  * Redistribution and use in source and binary forms, with or without
83  * modification, are permitted provided that the following conditions
84  * are met:
85  * 1. Redistributions of source code must retain the above copyright
86  *    notice, this list of conditions and the following disclaimer.
87  * 2. Redistributions in binary form must reproduce the above copyright
88  *    notice, this list of conditions and the following disclaimer in the
89  *    documentation and/or other materials provided with the distribution.
90  * 3. All advertising materials mentioning features or use of this software
91  *    must display the following acknowledgement:
92  *      This product includes software developed by the University of
93  *      California, Berkeley and its contributors.
94  * 4. Neither the name of the University nor the names of its contributors
95  *    may be used to endorse or promote products derived from this software
96  *    without specific prior written permission.
97  *
98  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
99  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
100  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
101  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
102  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
103  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
104  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
105  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
106  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
107  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
108  * SUCH DAMAGE.
109  *
110  * from: Utah $Hdr: cd.c 1.6 90/11/28$
111  *
112  *      @(#)cd.c        8.2 (Berkeley) 11/16/93
113  */
114 
115 /*
116  * Copyright (c) 1995 Carnegie-Mellon University.
117  * All rights reserved.
118  *
119  * Authors: Mark Holland, Jim Zelenka
120  *
121  * Permission to use, copy, modify and distribute this software and
122  * its documentation is hereby granted, provided that both the copyright
123  * notice and this permission notice appear in all copies of the
124  * software, derivative works or modified versions, and any portions
125  * thereof, and that both notices appear in supporting documentation.
126  *
127  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
128  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
129  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
130  *
131  * Carnegie Mellon requests users of this software to return to
132  *
133  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
134  *  School of Computer Science
135  *  Carnegie Mellon University
136  *  Pittsburgh PA 15213-3890
137  *
138  * any improvements or extensions that they make and grant Carnegie the
139  * rights to redistribute these changes.
140  */
141 
142 /***********************************************************
143  *
144  * rf_kintf.c -- the kernel interface routines for RAIDframe
145  *
146  ***********************************************************/
147 
148 #include <sys/cdefs.h>
149 __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.182 2004/10/28 07:07:44 yamt Exp $");
150 
151 #include <sys/param.h>
152 #include <sys/errno.h>
153 #include <sys/pool.h>
154 #include <sys/proc.h>
155 #include <sys/queue.h>
156 #include <sys/disk.h>
157 #include <sys/device.h>
158 #include <sys/stat.h>
159 #include <sys/ioctl.h>
160 #include <sys/fcntl.h>
161 #include <sys/systm.h>
162 #include <sys/namei.h>
163 #include <sys/vnode.h>
164 #include <sys/disklabel.h>
165 #include <sys/conf.h>
166 #include <sys/lock.h>
167 #include <sys/buf.h>
168 #include <sys/bufq.h>
169 #include <sys/user.h>
170 #include <sys/reboot.h>
171 
172 #include <dev/raidframe/raidframevar.h>
173 #include <dev/raidframe/raidframeio.h>
174 #include "raid.h"
175 #include "opt_raid_autoconfig.h"
176 #include "rf_raid.h"
177 #include "rf_copyback.h"
178 #include "rf_dag.h"
179 #include "rf_dagflags.h"
180 #include "rf_desc.h"
181 #include "rf_diskqueue.h"
182 #include "rf_etimer.h"
183 #include "rf_general.h"
184 #include "rf_kintf.h"
185 #include "rf_options.h"
186 #include "rf_driver.h"
187 #include "rf_parityscan.h"
188 #include "rf_threadstuff.h"
189 
190 #ifdef DEBUG
191 int     rf_kdebug_level = 0;
192 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
193 #else				/* DEBUG */
194 #define db1_printf(a) { }
195 #endif				/* DEBUG */
196 
197 static RF_Raid_t **raidPtrs;	/* global raid device descriptors */
198 
199 RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
200 
201 static RF_SparetWait_t *rf_sparet_wait_queue;	/* requests to install a
202 						 * spare table */
203 static RF_SparetWait_t *rf_sparet_resp_queue;	/* responses from
204 						 * installation process */
205 
206 MALLOC_DEFINE(M_RAIDFRAME, "RAIDframe", "RAIDframe structures");
207 
208 /* prototypes */
209 static void KernelWakeupFunc(struct buf * bp);
210 static void InitBP(struct buf * bp, struct vnode *, unsigned rw_flag,
211 		   dev_t dev, RF_SectorNum_t startSect,
212 		   RF_SectorCount_t numSect, caddr_t buf,
213 		   void (*cbFunc) (struct buf *), void *cbArg,
214 		   int logBytesPerSector, struct proc * b_proc);
215 static void raidinit(RF_Raid_t *);
216 
217 void raidattach(int);
218 
219 dev_type_open(raidopen);
220 dev_type_close(raidclose);
221 dev_type_read(raidread);
222 dev_type_write(raidwrite);
223 dev_type_ioctl(raidioctl);
224 dev_type_strategy(raidstrategy);
225 dev_type_dump(raiddump);
226 dev_type_size(raidsize);
227 
228 const struct bdevsw raid_bdevsw = {
229 	raidopen, raidclose, raidstrategy, raidioctl,
230 	raiddump, raidsize, D_DISK
231 };
232 
233 const struct cdevsw raid_cdevsw = {
234 	raidopen, raidclose, raidread, raidwrite, raidioctl,
235 	nostop, notty, nopoll, nommap, nokqfilter, D_DISK
236 };
237 
238 /*
239  * Pilfered from ccd.c
240  */
241 
242 struct raidbuf {
243 	struct buf rf_buf;	/* new I/O buf.  MUST BE FIRST!!! */
244 	struct buf *rf_obp;	/* ptr. to original I/O buf */
245 	RF_DiskQueueData_t *req;/* the request that this was part of.. */
246 };
247 
248 /* XXX Not sure if the following should be replacing the raidPtrs above,
249    or if it should be used in conjunction with that...
250 */
251 
252 struct raid_softc {
253 	int     sc_flags;	/* flags */
254 	int     sc_cflags;	/* configuration flags */
255 	size_t  sc_size;        /* size of the raid device */
256 	char    sc_xname[20];	/* XXX external name */
257 	struct disk sc_dkdev;	/* generic disk device info */
258 	struct bufq_state buf_queue;	/* used for the device queue */
259 };
260 /* sc_flags */
261 #define RAIDF_INITED	0x01	/* unit has been initialized */
262 #define RAIDF_WLABEL	0x02	/* label area is writable */
263 #define RAIDF_LABELLING	0x04	/* unit is currently being labelled */
264 #define RAIDF_WANTED	0x40	/* someone is waiting to obtain a lock */
265 #define RAIDF_LOCKED	0x80	/* unit is locked */
266 
267 #define	raidunit(x)	DISKUNIT(x)
268 int numraid = 0;
269 
270 /*
271  * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
272  * Be aware that large numbers can allow the driver to consume a lot of
273  * kernel memory, especially on writes, and in degraded mode reads.
274  *
275  * For example: with a stripe width of 64 blocks (32k) and 5 disks,
276  * a single 64K write will typically require 64K for the old data,
277  * 64K for the old parity, and 64K for the new parity, for a total
278  * of 192K (if the parity buffer is not re-used immediately).
279  * Even it if is used immediately, that's still 128K, which when multiplied
280  * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
281  *
282  * Now in degraded mode, for example, a 64K read on the above setup may
283  * require data reconstruction, which will require *all* of the 4 remaining
284  * disks to participate -- 4 * 32K/disk == 128K again.
285  */
286 
287 #ifndef RAIDOUTSTANDING
288 #define RAIDOUTSTANDING   6
289 #endif
290 
291 #define RAIDLABELDEV(dev)	\
292 	(MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
293 
294 /* declared here, and made public, for the benefit of KVM stuff.. */
295 struct raid_softc *raid_softc;
296 
297 static void raidgetdefaultlabel(RF_Raid_t *, struct raid_softc *,
298 				     struct disklabel *);
299 static void raidgetdisklabel(dev_t);
300 static void raidmakedisklabel(struct raid_softc *);
301 
302 static int raidlock(struct raid_softc *);
303 static void raidunlock(struct raid_softc *);
304 
305 static void rf_markalldirty(RF_Raid_t *);
306 
307 struct device *raidrootdev;
308 
309 void rf_ReconThread(struct rf_recon_req *);
310 void rf_RewriteParityThread(RF_Raid_t *raidPtr);
311 void rf_CopybackThread(RF_Raid_t *raidPtr);
312 void rf_ReconstructInPlaceThread(struct rf_recon_req *);
313 int rf_autoconfig(struct device *self);
314 void rf_buildroothack(RF_ConfigSet_t *);
315 
316 RF_AutoConfig_t *rf_find_raid_components(void);
317 RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
318 static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
319 static int rf_reasonable_label(RF_ComponentLabel_t *);
320 void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
321 int rf_set_autoconfig(RF_Raid_t *, int);
322 int rf_set_rootpartition(RF_Raid_t *, int);
323 void rf_release_all_vps(RF_ConfigSet_t *);
324 void rf_cleanup_config_set(RF_ConfigSet_t *);
325 int rf_have_enough_components(RF_ConfigSet_t *);
326 int rf_auto_config_set(RF_ConfigSet_t *, int *);
327 
328 static int raidautoconfig = 0; /* Debugging, mostly.  Set to 0 to not
329 				  allow autoconfig to take place.
330 			          Note that this is overridden by having
331 			          RAID_AUTOCONFIG as an option in the
332 			          kernel config file.  */
333 
334 struct RF_Pools_s rf_pools;
335 
336 void
337 raidattach(int num)
338 {
339 	int raidID;
340 	int i, rc;
341 
342 #ifdef DEBUG
343 	printf("raidattach: Asked for %d units\n", num);
344 #endif
345 
346 	if (num <= 0) {
347 #ifdef DIAGNOSTIC
348 		panic("raidattach: count <= 0");
349 #endif
350 		return;
351 	}
352 	/* This is where all the initialization stuff gets done. */
353 
354 	numraid = num;
355 
356 	/* Make some space for requested number of units... */
357 
358 	RF_Malloc(raidPtrs, num * sizeof(RF_Raid_t *), (RF_Raid_t **));
359 	if (raidPtrs == NULL) {
360 		panic("raidPtrs is NULL!!");
361 	}
362 
363 	/* Initialize the component buffer pool. */
364 	rf_pool_init(&rf_pools.cbuf, sizeof(struct raidbuf),
365 		     "raidpl", num * RAIDOUTSTANDING,
366 		     2 * num * RAIDOUTSTANDING);
367 
368 	rf_mutex_init(&rf_sparet_wait_mutex);
369 
370 	rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
371 
372 	for (i = 0; i < num; i++)
373 		raidPtrs[i] = NULL;
374 	rc = rf_BootRaidframe();
375 	if (rc == 0)
376 		printf("Kernelized RAIDframe activated\n");
377 	else
378 		panic("Serious error booting RAID!!");
379 
380 	/* put together some datastructures like the CCD device does.. This
381 	 * lets us lock the device and what-not when it gets opened. */
382 
383 	raid_softc = (struct raid_softc *)
384 		malloc(num * sizeof(struct raid_softc),
385 		       M_RAIDFRAME, M_NOWAIT);
386 	if (raid_softc == NULL) {
387 		printf("WARNING: no memory for RAIDframe driver\n");
388 		return;
389 	}
390 
391 	memset(raid_softc, 0, num * sizeof(struct raid_softc));
392 
393 	raidrootdev = (struct device *)malloc(num * sizeof(struct device),
394 					      M_RAIDFRAME, M_NOWAIT);
395 	if (raidrootdev == NULL) {
396 		panic("No memory for RAIDframe driver!!?!?!");
397 	}
398 
399 	for (raidID = 0; raidID < num; raidID++) {
400 		bufq_alloc(&raid_softc[raidID].buf_queue, BUFQ_FCFS);
401 
402 		raidrootdev[raidID].dv_class  = DV_DISK;
403 		raidrootdev[raidID].dv_cfdata = NULL;
404 		raidrootdev[raidID].dv_unit   = raidID;
405 		raidrootdev[raidID].dv_parent = NULL;
406 		raidrootdev[raidID].dv_flags  = 0;
407 		snprintf(raidrootdev[raidID].dv_xname,
408 		    sizeof(raidrootdev[raidID].dv_xname), "raid%d", raidID);
409 
410 		RF_Malloc(raidPtrs[raidID], sizeof(RF_Raid_t),
411 			  (RF_Raid_t *));
412 		if (raidPtrs[raidID] == NULL) {
413 			printf("WARNING: raidPtrs[%d] is NULL\n", raidID);
414 			numraid = raidID;
415 			return;
416 		}
417 	}
418 
419 #ifdef RAID_AUTOCONFIG
420 	raidautoconfig = 1;
421 #endif
422 
423 	/*
424 	 * Register a finalizer which will be used to auto-config RAID
425 	 * sets once all real hardware devices have been found.
426 	 */
427 	if (config_finalize_register(NULL, rf_autoconfig) != 0)
428 		printf("WARNING: unable to register RAIDframe finalizer\n");
429 }
430 
431 int
432 rf_autoconfig(struct device *self)
433 {
434 	RF_AutoConfig_t *ac_list;
435 	RF_ConfigSet_t *config_sets;
436 
437 	if (raidautoconfig == 0)
438 		return (0);
439 
440 	/* XXX This code can only be run once. */
441 	raidautoconfig = 0;
442 
443 	/* 1. locate all RAID components on the system */
444 #ifdef DEBUG
445 	printf("Searching for RAID components...\n");
446 #endif
447 	ac_list = rf_find_raid_components();
448 
449 	/* 2. Sort them into their respective sets. */
450 	config_sets = rf_create_auto_sets(ac_list);
451 
452 	/*
453 	 * 3. Evaluate each set andconfigure the valid ones.
454 	 * This gets done in rf_buildroothack().
455 	 */
456 	rf_buildroothack(config_sets);
457 
458 	return (1);
459 }
460 
461 void
462 rf_buildroothack(RF_ConfigSet_t *config_sets)
463 {
464 	RF_ConfigSet_t *cset;
465 	RF_ConfigSet_t *next_cset;
466 	int retcode;
467 	int raidID;
468 	int rootID;
469 	int num_root;
470 
471 	rootID = 0;
472 	num_root = 0;
473 	cset = config_sets;
474 	while(cset != NULL ) {
475 		next_cset = cset->next;
476 		if (rf_have_enough_components(cset) &&
477 		    cset->ac->clabel->autoconfigure==1) {
478 			retcode = rf_auto_config_set(cset,&raidID);
479 			if (!retcode) {
480 				if (cset->rootable) {
481 					rootID = raidID;
482 					num_root++;
483 				}
484 			} else {
485 				/* The autoconfig didn't work :( */
486 #if DEBUG
487 				printf("Autoconfig failed with code %d for raid%d\n", retcode, raidID);
488 #endif
489 				rf_release_all_vps(cset);
490 			}
491 		} else {
492 			/* we're not autoconfiguring this set...
493 			   release the associated resources */
494 			rf_release_all_vps(cset);
495 		}
496 		/* cleanup */
497 		rf_cleanup_config_set(cset);
498 		cset = next_cset;
499 	}
500 
501 	/* we found something bootable... */
502 
503 	if (num_root == 1) {
504 		booted_device = &raidrootdev[rootID];
505 	} else if (num_root > 1) {
506 		/* we can't guess.. require the user to answer... */
507 		boothowto |= RB_ASKNAME;
508 	}
509 }
510 
511 
512 int
513 raidsize(dev_t dev)
514 {
515 	struct raid_softc *rs;
516 	struct disklabel *lp;
517 	int     part, unit, omask, size;
518 
519 	unit = raidunit(dev);
520 	if (unit >= numraid)
521 		return (-1);
522 	rs = &raid_softc[unit];
523 
524 	if ((rs->sc_flags & RAIDF_INITED) == 0)
525 		return (-1);
526 
527 	part = DISKPART(dev);
528 	omask = rs->sc_dkdev.dk_openmask & (1 << part);
529 	lp = rs->sc_dkdev.dk_label;
530 
531 	if (omask == 0 && raidopen(dev, 0, S_IFBLK, curproc))
532 		return (-1);
533 
534 	if (lp->d_partitions[part].p_fstype != FS_SWAP)
535 		size = -1;
536 	else
537 		size = lp->d_partitions[part].p_size *
538 		    (lp->d_secsize / DEV_BSIZE);
539 
540 	if (omask == 0 && raidclose(dev, 0, S_IFBLK, curproc))
541 		return (-1);
542 
543 	return (size);
544 
545 }
546 
547 int
548 raiddump(dev_t dev, daddr_t blkno, caddr_t va, size_t  size)
549 {
550 	/* Not implemented. */
551 	return ENXIO;
552 }
553 /* ARGSUSED */
554 int
555 raidopen(dev_t dev, int flags, int fmt, struct proc *p)
556 {
557 	int     unit = raidunit(dev);
558 	struct raid_softc *rs;
559 	struct disklabel *lp;
560 	int     part, pmask;
561 	int     error = 0;
562 
563 	if (unit >= numraid)
564 		return (ENXIO);
565 	rs = &raid_softc[unit];
566 
567 	if ((error = raidlock(rs)) != 0)
568 		return (error);
569 	lp = rs->sc_dkdev.dk_label;
570 
571 	part = DISKPART(dev);
572 	pmask = (1 << part);
573 
574 	if ((rs->sc_flags & RAIDF_INITED) &&
575 	    (rs->sc_dkdev.dk_openmask == 0))
576 		raidgetdisklabel(dev);
577 
578 	/* make sure that this partition exists */
579 
580 	if (part != RAW_PART) {
581 		if (((rs->sc_flags & RAIDF_INITED) == 0) ||
582 		    ((part >= lp->d_npartitions) ||
583 			(lp->d_partitions[part].p_fstype == FS_UNUSED))) {
584 			error = ENXIO;
585 			raidunlock(rs);
586 			return (error);
587 		}
588 	}
589 	/* Prevent this unit from being unconfigured while open. */
590 	switch (fmt) {
591 	case S_IFCHR:
592 		rs->sc_dkdev.dk_copenmask |= pmask;
593 		break;
594 
595 	case S_IFBLK:
596 		rs->sc_dkdev.dk_bopenmask |= pmask;
597 		break;
598 	}
599 
600 	if ((rs->sc_dkdev.dk_openmask == 0) &&
601 	    ((rs->sc_flags & RAIDF_INITED) != 0)) {
602 		/* First one... mark things as dirty... Note that we *MUST*
603 		 have done a configure before this.  I DO NOT WANT TO BE
604 		 SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
605 		 THAT THEY BELONG TOGETHER!!!!! */
606 		/* XXX should check to see if we're only open for reading
607 		   here... If so, we needn't do this, but then need some
608 		   other way of keeping track of what's happened.. */
609 
610 		rf_markalldirty( raidPtrs[unit] );
611 	}
612 
613 
614 	rs->sc_dkdev.dk_openmask =
615 	    rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
616 
617 	raidunlock(rs);
618 
619 	return (error);
620 
621 
622 }
623 /* ARGSUSED */
624 int
625 raidclose(dev_t dev, int flags, int fmt, struct proc *p)
626 {
627 	int     unit = raidunit(dev);
628 	struct raid_softc *rs;
629 	int     error = 0;
630 	int     part;
631 
632 	if (unit >= numraid)
633 		return (ENXIO);
634 	rs = &raid_softc[unit];
635 
636 	if ((error = raidlock(rs)) != 0)
637 		return (error);
638 
639 	part = DISKPART(dev);
640 
641 	/* ...that much closer to allowing unconfiguration... */
642 	switch (fmt) {
643 	case S_IFCHR:
644 		rs->sc_dkdev.dk_copenmask &= ~(1 << part);
645 		break;
646 
647 	case S_IFBLK:
648 		rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
649 		break;
650 	}
651 	rs->sc_dkdev.dk_openmask =
652 	    rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
653 
654 	if ((rs->sc_dkdev.dk_openmask == 0) &&
655 	    ((rs->sc_flags & RAIDF_INITED) != 0)) {
656 		/* Last one... device is not unconfigured yet.
657 		   Device shutdown has taken care of setting the
658 		   clean bits if RAIDF_INITED is not set
659 		   mark things as clean... */
660 
661 		rf_update_component_labels(raidPtrs[unit],
662 						 RF_FINAL_COMPONENT_UPDATE);
663 		if (doing_shutdown) {
664 			/* last one, and we're going down, so
665 			   lights out for this RAID set too. */
666 			error = rf_Shutdown(raidPtrs[unit]);
667 
668 			/* It's no longer initialized... */
669 			rs->sc_flags &= ~RAIDF_INITED;
670 
671 			/* Detach the disk. */
672 			disk_detach(&rs->sc_dkdev);
673 		}
674 	}
675 
676 	raidunlock(rs);
677 	return (0);
678 
679 }
680 
681 void
682 raidstrategy(struct buf *bp)
683 {
684 	int s;
685 
686 	unsigned int raidID = raidunit(bp->b_dev);
687 	RF_Raid_t *raidPtr;
688 	struct raid_softc *rs = &raid_softc[raidID];
689 	int     wlabel;
690 
691 	if ((rs->sc_flags & RAIDF_INITED) ==0) {
692 		bp->b_error = ENXIO;
693 		bp->b_flags |= B_ERROR;
694 		bp->b_resid = bp->b_bcount;
695 		biodone(bp);
696 		return;
697 	}
698 	if (raidID >= numraid || !raidPtrs[raidID]) {
699 		bp->b_error = ENODEV;
700 		bp->b_flags |= B_ERROR;
701 		bp->b_resid = bp->b_bcount;
702 		biodone(bp);
703 		return;
704 	}
705 	raidPtr = raidPtrs[raidID];
706 	if (!raidPtr->valid) {
707 		bp->b_error = ENODEV;
708 		bp->b_flags |= B_ERROR;
709 		bp->b_resid = bp->b_bcount;
710 		biodone(bp);
711 		return;
712 	}
713 	if (bp->b_bcount == 0) {
714 		db1_printf(("b_bcount is zero..\n"));
715 		biodone(bp);
716 		return;
717 	}
718 
719 	/*
720 	 * Do bounds checking and adjust transfer.  If there's an
721 	 * error, the bounds check will flag that for us.
722 	 */
723 
724 	wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
725 	if (DISKPART(bp->b_dev) != RAW_PART)
726 		if (bounds_check_with_label(&rs->sc_dkdev, bp, wlabel) <= 0) {
727 			db1_printf(("Bounds check failed!!:%d %d\n",
728 				(int) bp->b_blkno, (int) wlabel));
729 			biodone(bp);
730 			return;
731 		}
732 	s = splbio();
733 
734 	bp->b_resid = 0;
735 
736 	/* stuff it onto our queue */
737 	BUFQ_PUT(&rs->buf_queue, bp);
738 
739 	raidstart(raidPtrs[raidID]);
740 
741 	splx(s);
742 }
743 /* ARGSUSED */
744 int
745 raidread(dev_t dev, struct uio *uio, int flags)
746 {
747 	int     unit = raidunit(dev);
748 	struct raid_softc *rs;
749 
750 	if (unit >= numraid)
751 		return (ENXIO);
752 	rs = &raid_softc[unit];
753 
754 	if ((rs->sc_flags & RAIDF_INITED) == 0)
755 		return (ENXIO);
756 
757 	return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
758 
759 }
760 /* ARGSUSED */
761 int
762 raidwrite(dev_t dev, struct uio *uio, int flags)
763 {
764 	int     unit = raidunit(dev);
765 	struct raid_softc *rs;
766 
767 	if (unit >= numraid)
768 		return (ENXIO);
769 	rs = &raid_softc[unit];
770 
771 	if ((rs->sc_flags & RAIDF_INITED) == 0)
772 		return (ENXIO);
773 
774 	return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
775 
776 }
777 
778 int
779 raidioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
780 {
781 	int     unit = raidunit(dev);
782 	int     error = 0;
783 	int     part, pmask;
784 	struct raid_softc *rs;
785 	RF_Config_t *k_cfg, *u_cfg;
786 	RF_Raid_t *raidPtr;
787 	RF_RaidDisk_t *diskPtr;
788 	RF_AccTotals_t *totals;
789 	RF_DeviceConfig_t *d_cfg, **ucfgp;
790 	u_char *specific_buf;
791 	int retcode = 0;
792 	int column;
793 	int raidid;
794 	struct rf_recon_req *rrcopy, *rr;
795 	RF_ComponentLabel_t *clabel;
796 	RF_ComponentLabel_t ci_label;
797 	RF_ComponentLabel_t **clabel_ptr;
798 	RF_SingleComponent_t *sparePtr,*componentPtr;
799 	RF_SingleComponent_t hot_spare;
800 	RF_SingleComponent_t component;
801 	RF_ProgressInfo_t progressInfo, **progressInfoPtr;
802 	int i, j, d;
803 #ifdef __HAVE_OLD_DISKLABEL
804 	struct disklabel newlabel;
805 #endif
806 
807 	if (unit >= numraid)
808 		return (ENXIO);
809 	rs = &raid_softc[unit];
810 	raidPtr = raidPtrs[unit];
811 
812 	db1_printf(("raidioctl: %d %d %d %d\n", (int) dev,
813 		(int) DISKPART(dev), (int) unit, (int) cmd));
814 
815 	/* Must be open for writes for these commands... */
816 	switch (cmd) {
817 	case DIOCSDINFO:
818 	case DIOCWDINFO:
819 #ifdef __HAVE_OLD_DISKLABEL
820 	case ODIOCWDINFO:
821 	case ODIOCSDINFO:
822 #endif
823 	case DIOCWLABEL:
824 		if ((flag & FWRITE) == 0)
825 			return (EBADF);
826 	}
827 
828 	/* Must be initialized for these... */
829 	switch (cmd) {
830 	case DIOCGDINFO:
831 	case DIOCSDINFO:
832 	case DIOCWDINFO:
833 #ifdef __HAVE_OLD_DISKLABEL
834 	case ODIOCGDINFO:
835 	case ODIOCWDINFO:
836 	case ODIOCSDINFO:
837 	case ODIOCGDEFLABEL:
838 #endif
839 	case DIOCGPART:
840 	case DIOCWLABEL:
841 	case DIOCGDEFLABEL:
842 	case RAIDFRAME_SHUTDOWN:
843 	case RAIDFRAME_REWRITEPARITY:
844 	case RAIDFRAME_GET_INFO:
845 	case RAIDFRAME_RESET_ACCTOTALS:
846 	case RAIDFRAME_GET_ACCTOTALS:
847 	case RAIDFRAME_KEEP_ACCTOTALS:
848 	case RAIDFRAME_GET_SIZE:
849 	case RAIDFRAME_FAIL_DISK:
850 	case RAIDFRAME_COPYBACK:
851 	case RAIDFRAME_CHECK_RECON_STATUS:
852 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
853 	case RAIDFRAME_GET_COMPONENT_LABEL:
854 	case RAIDFRAME_SET_COMPONENT_LABEL:
855 	case RAIDFRAME_ADD_HOT_SPARE:
856 	case RAIDFRAME_REMOVE_HOT_SPARE:
857 	case RAIDFRAME_INIT_LABELS:
858 	case RAIDFRAME_REBUILD_IN_PLACE:
859 	case RAIDFRAME_CHECK_PARITY:
860 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
861 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
862 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
863 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
864 	case RAIDFRAME_SET_AUTOCONFIG:
865 	case RAIDFRAME_SET_ROOT:
866 	case RAIDFRAME_DELETE_COMPONENT:
867 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
868 		if ((rs->sc_flags & RAIDF_INITED) == 0)
869 			return (ENXIO);
870 	}
871 
872 	switch (cmd) {
873 
874 		/* configure the system */
875 	case RAIDFRAME_CONFIGURE:
876 
877 		if (raidPtr->valid) {
878 			/* There is a valid RAID set running on this unit! */
879 			printf("raid%d: Device already configured!\n",unit);
880 			return(EINVAL);
881 		}
882 
883 		/* copy-in the configuration information */
884 		/* data points to a pointer to the configuration structure */
885 
886 		u_cfg = *((RF_Config_t **) data);
887 		RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
888 		if (k_cfg == NULL) {
889 			return (ENOMEM);
890 		}
891 		retcode = copyin(u_cfg, k_cfg, sizeof(RF_Config_t));
892 		if (retcode) {
893 			RF_Free(k_cfg, sizeof(RF_Config_t));
894 			db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
895 				retcode));
896 			return (retcode);
897 		}
898 		/* allocate a buffer for the layout-specific data, and copy it
899 		 * in */
900 		if (k_cfg->layoutSpecificSize) {
901 			if (k_cfg->layoutSpecificSize > 10000) {
902 				/* sanity check */
903 				RF_Free(k_cfg, sizeof(RF_Config_t));
904 				return (EINVAL);
905 			}
906 			RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
907 			    (u_char *));
908 			if (specific_buf == NULL) {
909 				RF_Free(k_cfg, sizeof(RF_Config_t));
910 				return (ENOMEM);
911 			}
912 			retcode = copyin(k_cfg->layoutSpecific, specific_buf,
913 			    k_cfg->layoutSpecificSize);
914 			if (retcode) {
915 				RF_Free(k_cfg, sizeof(RF_Config_t));
916 				RF_Free(specific_buf,
917 					k_cfg->layoutSpecificSize);
918 				db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
919 					retcode));
920 				return (retcode);
921 			}
922 		} else
923 			specific_buf = NULL;
924 		k_cfg->layoutSpecific = specific_buf;
925 
926 		/* should do some kind of sanity check on the configuration.
927 		 * Store the sum of all the bytes in the last byte? */
928 
929 		/* configure the system */
930 
931 		/*
932 		 * Clear the entire RAID descriptor, just to make sure
933 		 *  there is no stale data left in the case of a
934 		 *  reconfiguration
935 		 */
936 		memset((char *) raidPtr, 0, sizeof(RF_Raid_t));
937 		raidPtr->raidid = unit;
938 
939 		retcode = rf_Configure(raidPtr, k_cfg, NULL);
940 
941 		if (retcode == 0) {
942 
943 			/* allow this many simultaneous IO's to
944 			   this RAID device */
945 			raidPtr->openings = RAIDOUTSTANDING;
946 
947 			raidinit(raidPtr);
948 			rf_markalldirty(raidPtr);
949 		}
950 		/* free the buffers.  No return code here. */
951 		if (k_cfg->layoutSpecificSize) {
952 			RF_Free(specific_buf, k_cfg->layoutSpecificSize);
953 		}
954 		RF_Free(k_cfg, sizeof(RF_Config_t));
955 
956 		return (retcode);
957 
958 		/* shutdown the system */
959 	case RAIDFRAME_SHUTDOWN:
960 
961 		if ((error = raidlock(rs)) != 0)
962 			return (error);
963 
964 		/*
965 		 * If somebody has a partition mounted, we shouldn't
966 		 * shutdown.
967 		 */
968 
969 		part = DISKPART(dev);
970 		pmask = (1 << part);
971 		if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
972 		    ((rs->sc_dkdev.dk_bopenmask & pmask) &&
973 			(rs->sc_dkdev.dk_copenmask & pmask))) {
974 			raidunlock(rs);
975 			return (EBUSY);
976 		}
977 
978 		retcode = rf_Shutdown(raidPtr);
979 
980 		/* It's no longer initialized... */
981 		rs->sc_flags &= ~RAIDF_INITED;
982 
983 		/* Detach the disk. */
984 		disk_detach(&rs->sc_dkdev);
985 
986 		raidunlock(rs);
987 
988 		return (retcode);
989 	case RAIDFRAME_GET_COMPONENT_LABEL:
990 		clabel_ptr = (RF_ComponentLabel_t **) data;
991 		/* need to read the component label for the disk indicated
992 		   by row,column in clabel */
993 
994 		/* For practice, let's get it directly fromdisk, rather
995 		   than from the in-core copy */
996 		RF_Malloc( clabel, sizeof( RF_ComponentLabel_t ),
997 			   (RF_ComponentLabel_t *));
998 		if (clabel == NULL)
999 			return (ENOMEM);
1000 
1001 		memset((char *) clabel, 0, sizeof(RF_ComponentLabel_t));
1002 
1003 		retcode = copyin( *clabel_ptr, clabel,
1004 				  sizeof(RF_ComponentLabel_t));
1005 
1006 		if (retcode) {
1007 			RF_Free( clabel, sizeof(RF_ComponentLabel_t));
1008 			return(retcode);
1009 		}
1010 
1011 		clabel->row = 0; /* Don't allow looking at anything else.*/
1012 
1013 		column = clabel->column;
1014 
1015 		if ((column < 0) || (column >= raidPtr->numCol +
1016 				     raidPtr->numSpare)) {
1017 			RF_Free( clabel, sizeof(RF_ComponentLabel_t));
1018 			return(EINVAL);
1019 		}
1020 
1021 		raidread_component_label(raidPtr->Disks[column].dev,
1022 				raidPtr->raid_cinfo[column].ci_vp,
1023 				clabel );
1024 
1025 		retcode = copyout(clabel, *clabel_ptr,
1026 				  sizeof(RF_ComponentLabel_t));
1027 		RF_Free(clabel, sizeof(RF_ComponentLabel_t));
1028 		return (retcode);
1029 
1030 	case RAIDFRAME_SET_COMPONENT_LABEL:
1031 		clabel = (RF_ComponentLabel_t *) data;
1032 
1033 		/* XXX check the label for valid stuff... */
1034 		/* Note that some things *should not* get modified --
1035 		   the user should be re-initing the labels instead of
1036 		   trying to patch things.
1037 		   */
1038 
1039 		raidid = raidPtr->raidid;
1040 #if DEBUG
1041 		printf("raid%d: Got component label:\n", raidid);
1042 		printf("raid%d: Version: %d\n", raidid, clabel->version);
1043 		printf("raid%d: Serial Number: %d\n", raidid, clabel->serial_number);
1044 		printf("raid%d: Mod counter: %d\n", raidid, clabel->mod_counter);
1045 		printf("raid%d: Column: %d\n", raidid, clabel->column);
1046 		printf("raid%d: Num Columns: %d\n", raidid, clabel->num_columns);
1047 		printf("raid%d: Clean: %d\n", raidid, clabel->clean);
1048 		printf("raid%d: Status: %d\n", raidid, clabel->status);
1049 #endif
1050 		clabel->row = 0;
1051 		column = clabel->column;
1052 
1053 		if ((column < 0) || (column >= raidPtr->numCol)) {
1054 			return(EINVAL);
1055 		}
1056 
1057 		/* XXX this isn't allowed to do anything for now :-) */
1058 
1059 		/* XXX and before it is, we need to fill in the rest
1060 		   of the fields!?!?!?! */
1061 #if 0
1062 		raidwrite_component_label(
1063                             raidPtr->Disks[column].dev,
1064 			    raidPtr->raid_cinfo[column].ci_vp,
1065 			    clabel );
1066 #endif
1067 		return (0);
1068 
1069 	case RAIDFRAME_INIT_LABELS:
1070 		clabel = (RF_ComponentLabel_t *) data;
1071 		/*
1072 		   we only want the serial number from
1073 		   the above.  We get all the rest of the information
1074 		   from the config that was used to create this RAID
1075 		   set.
1076 		   */
1077 
1078 		raidPtr->serial_number = clabel->serial_number;
1079 
1080 		raid_init_component_label(raidPtr, &ci_label);
1081 		ci_label.serial_number = clabel->serial_number;
1082 		ci_label.row = 0; /* we dont' pretend to support more */
1083 
1084 		for(column=0;column<raidPtr->numCol;column++) {
1085 			diskPtr = &raidPtr->Disks[column];
1086 			if (!RF_DEAD_DISK(diskPtr->status)) {
1087 				ci_label.partitionSize = diskPtr->partitionSize;
1088 				ci_label.column = column;
1089 				raidwrite_component_label(
1090 							  raidPtr->Disks[column].dev,
1091 							  raidPtr->raid_cinfo[column].ci_vp,
1092 							  &ci_label );
1093 			}
1094 		}
1095 
1096 		return (retcode);
1097 	case RAIDFRAME_SET_AUTOCONFIG:
1098 		d = rf_set_autoconfig(raidPtr, *(int *) data);
1099 		printf("raid%d: New autoconfig value is: %d\n",
1100 		       raidPtr->raidid, d);
1101 		*(int *) data = d;
1102 		return (retcode);
1103 
1104 	case RAIDFRAME_SET_ROOT:
1105 		d = rf_set_rootpartition(raidPtr, *(int *) data);
1106 		printf("raid%d: New rootpartition value is: %d\n",
1107 		       raidPtr->raidid, d);
1108 		*(int *) data = d;
1109 		return (retcode);
1110 
1111 		/* initialize all parity */
1112 	case RAIDFRAME_REWRITEPARITY:
1113 
1114 		if (raidPtr->Layout.map->faultsTolerated == 0) {
1115 			/* Parity for RAID 0 is trivially correct */
1116 			raidPtr->parity_good = RF_RAID_CLEAN;
1117 			return(0);
1118 		}
1119 
1120 		if (raidPtr->parity_rewrite_in_progress == 1) {
1121 			/* Re-write is already in progress! */
1122 			return(EINVAL);
1123 		}
1124 
1125 		retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
1126 					   rf_RewriteParityThread,
1127 					   raidPtr,"raid_parity");
1128 		return (retcode);
1129 
1130 
1131 	case RAIDFRAME_ADD_HOT_SPARE:
1132 		sparePtr = (RF_SingleComponent_t *) data;
1133 		memcpy( &hot_spare, sparePtr, sizeof(RF_SingleComponent_t));
1134 		retcode = rf_add_hot_spare(raidPtr, &hot_spare);
1135 		return(retcode);
1136 
1137 	case RAIDFRAME_REMOVE_HOT_SPARE:
1138 		return(retcode);
1139 
1140 	case RAIDFRAME_DELETE_COMPONENT:
1141 		componentPtr = (RF_SingleComponent_t *)data;
1142 		memcpy( &component, componentPtr,
1143 			sizeof(RF_SingleComponent_t));
1144 		retcode = rf_delete_component(raidPtr, &component);
1145 		return(retcode);
1146 
1147 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
1148 		componentPtr = (RF_SingleComponent_t *)data;
1149 		memcpy( &component, componentPtr,
1150 			sizeof(RF_SingleComponent_t));
1151 		retcode = rf_incorporate_hot_spare(raidPtr, &component);
1152 		return(retcode);
1153 
1154 	case RAIDFRAME_REBUILD_IN_PLACE:
1155 
1156 		if (raidPtr->Layout.map->faultsTolerated == 0) {
1157 			/* Can't do this on a RAID 0!! */
1158 			return(EINVAL);
1159 		}
1160 
1161 		if (raidPtr->recon_in_progress == 1) {
1162 			/* a reconstruct is already in progress! */
1163 			return(EINVAL);
1164 		}
1165 
1166 		componentPtr = (RF_SingleComponent_t *) data;
1167 		memcpy( &component, componentPtr,
1168 			sizeof(RF_SingleComponent_t));
1169 		component.row = 0; /* we don't support any more */
1170 		column = component.column;
1171 
1172 		if ((column < 0) || (column >= raidPtr->numCol)) {
1173 			return(EINVAL);
1174 		}
1175 
1176 		RF_LOCK_MUTEX(raidPtr->mutex);
1177 		if ((raidPtr->Disks[column].status == rf_ds_optimal) &&
1178 		    (raidPtr->numFailures > 0)) {
1179 			/* XXX 0 above shouldn't be constant!!! */
1180 			/* some component other than this has failed.
1181 			   Let's not make things worse than they already
1182 			   are... */
1183 			printf("raid%d: Unable to reconstruct to disk at:\n",
1184 			       raidPtr->raidid);
1185 			printf("raid%d:     Col: %d   Too many failures.\n",
1186 			       raidPtr->raidid, column);
1187 			RF_UNLOCK_MUTEX(raidPtr->mutex);
1188 			return (EINVAL);
1189 		}
1190 		if (raidPtr->Disks[column].status ==
1191 		    rf_ds_reconstructing) {
1192 			printf("raid%d: Unable to reconstruct to disk at:\n",
1193 			       raidPtr->raidid);
1194 			printf("raid%d:    Col: %d   Reconstruction already occuring!\n", raidPtr->raidid, column);
1195 
1196 			RF_UNLOCK_MUTEX(raidPtr->mutex);
1197 			return (EINVAL);
1198 		}
1199 		if (raidPtr->Disks[column].status == rf_ds_spared) {
1200 			RF_UNLOCK_MUTEX(raidPtr->mutex);
1201 			return (EINVAL);
1202 		}
1203 		RF_UNLOCK_MUTEX(raidPtr->mutex);
1204 
1205 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1206 		if (rrcopy == NULL)
1207 			return(ENOMEM);
1208 
1209 		rrcopy->raidPtr = (void *) raidPtr;
1210 		rrcopy->col = column;
1211 
1212 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1213 					   rf_ReconstructInPlaceThread,
1214 					   rrcopy,"raid_reconip");
1215 		return(retcode);
1216 
1217 	case RAIDFRAME_GET_INFO:
1218 		if (!raidPtr->valid)
1219 			return (ENODEV);
1220 		ucfgp = (RF_DeviceConfig_t **) data;
1221 		RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
1222 			  (RF_DeviceConfig_t *));
1223 		if (d_cfg == NULL)
1224 			return (ENOMEM);
1225 		memset((char *) d_cfg, 0, sizeof(RF_DeviceConfig_t));
1226 		d_cfg->rows = 1; /* there is only 1 row now */
1227 		d_cfg->cols = raidPtr->numCol;
1228 		d_cfg->ndevs = raidPtr->numCol;
1229 		if (d_cfg->ndevs >= RF_MAX_DISKS) {
1230 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1231 			return (ENOMEM);
1232 		}
1233 		d_cfg->nspares = raidPtr->numSpare;
1234 		if (d_cfg->nspares >= RF_MAX_DISKS) {
1235 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1236 			return (ENOMEM);
1237 		}
1238 		d_cfg->maxqdepth = raidPtr->maxQueueDepth;
1239 		d = 0;
1240 		for (j = 0; j < d_cfg->cols; j++) {
1241 			d_cfg->devs[d] = raidPtr->Disks[j];
1242 			d++;
1243 		}
1244 		for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
1245 			d_cfg->spares[i] = raidPtr->Disks[j];
1246 		}
1247 		retcode = copyout(d_cfg, *ucfgp, sizeof(RF_DeviceConfig_t));
1248 		RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1249 
1250 		return (retcode);
1251 
1252 	case RAIDFRAME_CHECK_PARITY:
1253 		*(int *) data = raidPtr->parity_good;
1254 		return (0);
1255 
1256 	case RAIDFRAME_RESET_ACCTOTALS:
1257 		memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
1258 		return (0);
1259 
1260 	case RAIDFRAME_GET_ACCTOTALS:
1261 		totals = (RF_AccTotals_t *) data;
1262 		*totals = raidPtr->acc_totals;
1263 		return (0);
1264 
1265 	case RAIDFRAME_KEEP_ACCTOTALS:
1266 		raidPtr->keep_acc_totals = *(int *)data;
1267 		return (0);
1268 
1269 	case RAIDFRAME_GET_SIZE:
1270 		*(int *) data = raidPtr->totalSectors;
1271 		return (0);
1272 
1273 		/* fail a disk & optionally start reconstruction */
1274 	case RAIDFRAME_FAIL_DISK:
1275 
1276 		if (raidPtr->Layout.map->faultsTolerated == 0) {
1277 			/* Can't do this on a RAID 0!! */
1278 			return(EINVAL);
1279 		}
1280 
1281 		rr = (struct rf_recon_req *) data;
1282 		rr->row = 0;
1283 		if (rr->col < 0 || rr->col >= raidPtr->numCol)
1284 			return (EINVAL);
1285 
1286 
1287 		RF_LOCK_MUTEX(raidPtr->mutex);
1288 		if ((raidPtr->Disks[rr->col].status ==
1289 		     rf_ds_optimal) && (raidPtr->numFailures > 0)) {
1290 			/* some other component has failed.  Let's not make
1291 			   things worse. XXX wrong for RAID6 */
1292 			RF_UNLOCK_MUTEX(raidPtr->mutex);
1293 			return (EINVAL);
1294 		}
1295 		if (raidPtr->Disks[rr->col].status == rf_ds_spared) {
1296 			/* Can't fail a spared disk! */
1297 			RF_UNLOCK_MUTEX(raidPtr->mutex);
1298 			return (EINVAL);
1299 		}
1300 		RF_UNLOCK_MUTEX(raidPtr->mutex);
1301 
1302 		/* make a copy of the recon request so that we don't rely on
1303 		 * the user's buffer */
1304 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1305 		if (rrcopy == NULL)
1306 			return(ENOMEM);
1307 		memcpy(rrcopy, rr, sizeof(*rr));
1308 		rrcopy->raidPtr = (void *) raidPtr;
1309 
1310 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1311 					   rf_ReconThread,
1312 					   rrcopy,"raid_recon");
1313 		return (0);
1314 
1315 		/* invoke a copyback operation after recon on whatever disk
1316 		 * needs it, if any */
1317 	case RAIDFRAME_COPYBACK:
1318 
1319 		if (raidPtr->Layout.map->faultsTolerated == 0) {
1320 			/* This makes no sense on a RAID 0!! */
1321 			return(EINVAL);
1322 		}
1323 
1324 		if (raidPtr->copyback_in_progress == 1) {
1325 			/* Copyback is already in progress! */
1326 			return(EINVAL);
1327 		}
1328 
1329 		retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
1330 					   rf_CopybackThread,
1331 					   raidPtr,"raid_copyback");
1332 		return (retcode);
1333 
1334 		/* return the percentage completion of reconstruction */
1335 	case RAIDFRAME_CHECK_RECON_STATUS:
1336 		if (raidPtr->Layout.map->faultsTolerated == 0) {
1337 			/* This makes no sense on a RAID 0, so tell the
1338 			   user it's done. */
1339 			*(int *) data = 100;
1340 			return(0);
1341 		}
1342 		if (raidPtr->status != rf_rs_reconstructing)
1343 			*(int *) data = 100;
1344 		else {
1345 			if (raidPtr->reconControl->numRUsTotal > 0) {
1346 				*(int *) data = (raidPtr->reconControl->numRUsComplete * 100 / raidPtr->reconControl->numRUsTotal);
1347 			} else {
1348 				*(int *) data = 0;
1349 			}
1350 		}
1351 		return (0);
1352 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
1353 		progressInfoPtr = (RF_ProgressInfo_t **) data;
1354 		if (raidPtr->status != rf_rs_reconstructing) {
1355 			progressInfo.remaining = 0;
1356 			progressInfo.completed = 100;
1357 			progressInfo.total = 100;
1358 		} else {
1359 			progressInfo.total =
1360 				raidPtr->reconControl->numRUsTotal;
1361 			progressInfo.completed =
1362 				raidPtr->reconControl->numRUsComplete;
1363 			progressInfo.remaining = progressInfo.total -
1364 				progressInfo.completed;
1365 		}
1366 		retcode = copyout(&progressInfo, *progressInfoPtr,
1367 				  sizeof(RF_ProgressInfo_t));
1368 		return (retcode);
1369 
1370 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1371 		if (raidPtr->Layout.map->faultsTolerated == 0) {
1372 			/* This makes no sense on a RAID 0, so tell the
1373 			   user it's done. */
1374 			*(int *) data = 100;
1375 			return(0);
1376 		}
1377 		if (raidPtr->parity_rewrite_in_progress == 1) {
1378 			*(int *) data = 100 *
1379 				raidPtr->parity_rewrite_stripes_done /
1380 				raidPtr->Layout.numStripe;
1381 		} else {
1382 			*(int *) data = 100;
1383 		}
1384 		return (0);
1385 
1386 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
1387 		progressInfoPtr = (RF_ProgressInfo_t **) data;
1388 		if (raidPtr->parity_rewrite_in_progress == 1) {
1389 			progressInfo.total = raidPtr->Layout.numStripe;
1390 			progressInfo.completed =
1391 				raidPtr->parity_rewrite_stripes_done;
1392 			progressInfo.remaining = progressInfo.total -
1393 				progressInfo.completed;
1394 		} else {
1395 			progressInfo.remaining = 0;
1396 			progressInfo.completed = 100;
1397 			progressInfo.total = 100;
1398 		}
1399 		retcode = copyout(&progressInfo, *progressInfoPtr,
1400 				  sizeof(RF_ProgressInfo_t));
1401 		return (retcode);
1402 
1403 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
1404 		if (raidPtr->Layout.map->faultsTolerated == 0) {
1405 			/* This makes no sense on a RAID 0 */
1406 			*(int *) data = 100;
1407 			return(0);
1408 		}
1409 		if (raidPtr->copyback_in_progress == 1) {
1410 			*(int *) data = 100 * raidPtr->copyback_stripes_done /
1411 				raidPtr->Layout.numStripe;
1412 		} else {
1413 			*(int *) data = 100;
1414 		}
1415 		return (0);
1416 
1417 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
1418 		progressInfoPtr = (RF_ProgressInfo_t **) data;
1419 		if (raidPtr->copyback_in_progress == 1) {
1420 			progressInfo.total = raidPtr->Layout.numStripe;
1421 			progressInfo.completed =
1422 				raidPtr->copyback_stripes_done;
1423 			progressInfo.remaining = progressInfo.total -
1424 				progressInfo.completed;
1425 		} else {
1426 			progressInfo.remaining = 0;
1427 			progressInfo.completed = 100;
1428 			progressInfo.total = 100;
1429 		}
1430 		retcode = copyout(&progressInfo, *progressInfoPtr,
1431 				  sizeof(RF_ProgressInfo_t));
1432 		return (retcode);
1433 
1434 		/* the sparetable daemon calls this to wait for the kernel to
1435 		 * need a spare table. this ioctl does not return until a
1436 		 * spare table is needed. XXX -- calling mpsleep here in the
1437 		 * ioctl code is almost certainly wrong and evil. -- XXX XXX
1438 		 * -- I should either compute the spare table in the kernel,
1439 		 * or have a different -- XXX XXX -- interface (a different
1440 		 * character device) for delivering the table     -- XXX */
1441 #if 0
1442 	case RAIDFRAME_SPARET_WAIT:
1443 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1444 		while (!rf_sparet_wait_queue)
1445 			mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
1446 		waitreq = rf_sparet_wait_queue;
1447 		rf_sparet_wait_queue = rf_sparet_wait_queue->next;
1448 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1449 
1450 		/* structure assignment */
1451 		*((RF_SparetWait_t *) data) = *waitreq;
1452 
1453 		RF_Free(waitreq, sizeof(*waitreq));
1454 		return (0);
1455 
1456 		/* wakes up a process waiting on SPARET_WAIT and puts an error
1457 		 * code in it that will cause the dameon to exit */
1458 	case RAIDFRAME_ABORT_SPARET_WAIT:
1459 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1460 		waitreq->fcol = -1;
1461 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1462 		waitreq->next = rf_sparet_wait_queue;
1463 		rf_sparet_wait_queue = waitreq;
1464 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1465 		wakeup(&rf_sparet_wait_queue);
1466 		return (0);
1467 
1468 		/* used by the spare table daemon to deliver a spare table
1469 		 * into the kernel */
1470 	case RAIDFRAME_SEND_SPARET:
1471 
1472 		/* install the spare table */
1473 		retcode = rf_SetSpareTable(raidPtr, *(void **) data);
1474 
1475 		/* respond to the requestor.  the return status of the spare
1476 		 * table installation is passed in the "fcol" field */
1477 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1478 		waitreq->fcol = retcode;
1479 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1480 		waitreq->next = rf_sparet_resp_queue;
1481 		rf_sparet_resp_queue = waitreq;
1482 		wakeup(&rf_sparet_resp_queue);
1483 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1484 
1485 		return (retcode);
1486 #endif
1487 
1488 	default:
1489 		break; /* fall through to the os-specific code below */
1490 
1491 	}
1492 
1493 	if (!raidPtr->valid)
1494 		return (EINVAL);
1495 
1496 	/*
1497 	 * Add support for "regular" device ioctls here.
1498 	 */
1499 
1500 	switch (cmd) {
1501 	case DIOCGDINFO:
1502 		*(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
1503 		break;
1504 #ifdef __HAVE_OLD_DISKLABEL
1505 	case ODIOCGDINFO:
1506 		newlabel = *(rs->sc_dkdev.dk_label);
1507 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1508 			return ENOTTY;
1509 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
1510 		break;
1511 #endif
1512 
1513 	case DIOCGPART:
1514 		((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
1515 		((struct partinfo *) data)->part =
1516 		    &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
1517 		break;
1518 
1519 	case DIOCWDINFO:
1520 	case DIOCSDINFO:
1521 #ifdef __HAVE_OLD_DISKLABEL
1522 	case ODIOCWDINFO:
1523 	case ODIOCSDINFO:
1524 #endif
1525 	{
1526 		struct disklabel *lp;
1527 #ifdef __HAVE_OLD_DISKLABEL
1528 		if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1529 			memset(&newlabel, 0, sizeof newlabel);
1530 			memcpy(&newlabel, data, sizeof (struct olddisklabel));
1531 			lp = &newlabel;
1532 		} else
1533 #endif
1534 		lp = (struct disklabel *)data;
1535 
1536 		if ((error = raidlock(rs)) != 0)
1537 			return (error);
1538 
1539 		rs->sc_flags |= RAIDF_LABELLING;
1540 
1541 		error = setdisklabel(rs->sc_dkdev.dk_label,
1542 		    lp, 0, rs->sc_dkdev.dk_cpulabel);
1543 		if (error == 0) {
1544 			if (cmd == DIOCWDINFO
1545 #ifdef __HAVE_OLD_DISKLABEL
1546 			    || cmd == ODIOCWDINFO
1547 #endif
1548 			   )
1549 				error = writedisklabel(RAIDLABELDEV(dev),
1550 				    raidstrategy, rs->sc_dkdev.dk_label,
1551 				    rs->sc_dkdev.dk_cpulabel);
1552 		}
1553 		rs->sc_flags &= ~RAIDF_LABELLING;
1554 
1555 		raidunlock(rs);
1556 
1557 		if (error)
1558 			return (error);
1559 		break;
1560 	}
1561 
1562 	case DIOCWLABEL:
1563 		if (*(int *) data != 0)
1564 			rs->sc_flags |= RAIDF_WLABEL;
1565 		else
1566 			rs->sc_flags &= ~RAIDF_WLABEL;
1567 		break;
1568 
1569 	case DIOCGDEFLABEL:
1570 		raidgetdefaultlabel(raidPtr, rs, (struct disklabel *) data);
1571 		break;
1572 
1573 #ifdef __HAVE_OLD_DISKLABEL
1574 	case ODIOCGDEFLABEL:
1575 		raidgetdefaultlabel(raidPtr, rs, &newlabel);
1576 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1577 			return ENOTTY;
1578 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
1579 		break;
1580 #endif
1581 
1582 	default:
1583 		retcode = ENOTTY;
1584 	}
1585 	return (retcode);
1586 
1587 }
1588 
1589 
1590 /* raidinit -- complete the rest of the initialization for the
1591    RAIDframe device.  */
1592 
1593 
1594 static void
1595 raidinit(RF_Raid_t *raidPtr)
1596 {
1597 	struct raid_softc *rs;
1598 	int     unit;
1599 
1600 	unit = raidPtr->raidid;
1601 
1602 	rs = &raid_softc[unit];
1603 
1604 	/* XXX should check return code first... */
1605 	rs->sc_flags |= RAIDF_INITED;
1606 
1607 	/* XXX doesn't check bounds. */
1608 	snprintf(rs->sc_xname, sizeof(rs->sc_xname), "raid%d", unit);
1609 
1610 	rs->sc_dkdev.dk_name = rs->sc_xname;
1611 
1612 	/* disk_attach actually creates space for the CPU disklabel, among
1613 	 * other things, so it's critical to call this *BEFORE* we try putzing
1614 	 * with disklabels. */
1615 
1616 	disk_attach(&rs->sc_dkdev);
1617 
1618 	/* XXX There may be a weird interaction here between this, and
1619 	 * protectedSectors, as used in RAIDframe.  */
1620 
1621 	rs->sc_size = raidPtr->totalSectors;
1622 }
1623 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
1624 /* wake up the daemon & tell it to get us a spare table
1625  * XXX
1626  * the entries in the queues should be tagged with the raidPtr
1627  * so that in the extremely rare case that two recons happen at once,
1628  * we know for which device were requesting a spare table
1629  * XXX
1630  *
1631  * XXX This code is not currently used. GO
1632  */
1633 int
1634 rf_GetSpareTableFromDaemon(RF_SparetWait_t *req)
1635 {
1636 	int     retcode;
1637 
1638 	RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1639 	req->next = rf_sparet_wait_queue;
1640 	rf_sparet_wait_queue = req;
1641 	wakeup(&rf_sparet_wait_queue);
1642 
1643 	/* mpsleep unlocks the mutex */
1644 	while (!rf_sparet_resp_queue) {
1645 		tsleep(&rf_sparet_resp_queue, PRIBIO,
1646 		    "raidframe getsparetable", 0);
1647 	}
1648 	req = rf_sparet_resp_queue;
1649 	rf_sparet_resp_queue = req->next;
1650 	RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1651 
1652 	retcode = req->fcol;
1653 	RF_Free(req, sizeof(*req));	/* this is not the same req as we
1654 					 * alloc'd */
1655 	return (retcode);
1656 }
1657 #endif
1658 
1659 /* a wrapper around rf_DoAccess that extracts appropriate info from the
1660  * bp & passes it down.
1661  * any calls originating in the kernel must use non-blocking I/O
1662  * do some extra sanity checking to return "appropriate" error values for
1663  * certain conditions (to make some standard utilities work)
1664  *
1665  * Formerly known as: rf_DoAccessKernel
1666  */
1667 void
1668 raidstart(RF_Raid_t *raidPtr)
1669 {
1670 	RF_SectorCount_t num_blocks, pb, sum;
1671 	RF_RaidAddr_t raid_addr;
1672 	struct partition *pp;
1673 	daddr_t blocknum;
1674 	int     unit;
1675 	struct raid_softc *rs;
1676 	int     do_async;
1677 	struct buf *bp;
1678 	int rc;
1679 
1680 	unit = raidPtr->raidid;
1681 	rs = &raid_softc[unit];
1682 
1683 	/* quick check to see if anything has died recently */
1684 	RF_LOCK_MUTEX(raidPtr->mutex);
1685 	if (raidPtr->numNewFailures > 0) {
1686 		RF_UNLOCK_MUTEX(raidPtr->mutex);
1687 		rf_update_component_labels(raidPtr,
1688 					   RF_NORMAL_COMPONENT_UPDATE);
1689 		RF_LOCK_MUTEX(raidPtr->mutex);
1690 		raidPtr->numNewFailures--;
1691 	}
1692 
1693 	/* Check to see if we're at the limit... */
1694 	while (raidPtr->openings > 0) {
1695 		RF_UNLOCK_MUTEX(raidPtr->mutex);
1696 
1697 		/* get the next item, if any, from the queue */
1698 		if ((bp = BUFQ_GET(&rs->buf_queue)) == NULL) {
1699 			/* nothing more to do */
1700 			return;
1701 		}
1702 
1703 		/* Ok, for the bp we have here, bp->b_blkno is relative to the
1704 		 * partition.. Need to make it absolute to the underlying
1705 		 * device.. */
1706 
1707 		blocknum = bp->b_blkno;
1708 		if (DISKPART(bp->b_dev) != RAW_PART) {
1709 			pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
1710 			blocknum += pp->p_offset;
1711 		}
1712 
1713 		db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
1714 			    (int) blocknum));
1715 
1716 		db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
1717 		db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
1718 
1719 		/* *THIS* is where we adjust what block we're going to...
1720 		 * but DO NOT TOUCH bp->b_blkno!!! */
1721 		raid_addr = blocknum;
1722 
1723 		num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
1724 		pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
1725 		sum = raid_addr + num_blocks + pb;
1726 		if (1 || rf_debugKernelAccess) {
1727 			db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
1728 				    (int) raid_addr, (int) sum, (int) num_blocks,
1729 				    (int) pb, (int) bp->b_resid));
1730 		}
1731 		if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
1732 		    || (sum < num_blocks) || (sum < pb)) {
1733 			bp->b_error = ENOSPC;
1734 			bp->b_flags |= B_ERROR;
1735 			bp->b_resid = bp->b_bcount;
1736 			biodone(bp);
1737 			RF_LOCK_MUTEX(raidPtr->mutex);
1738 			continue;
1739 		}
1740 		/*
1741 		 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
1742 		 */
1743 
1744 		if (bp->b_bcount & raidPtr->sectorMask) {
1745 			bp->b_error = EINVAL;
1746 			bp->b_flags |= B_ERROR;
1747 			bp->b_resid = bp->b_bcount;
1748 			biodone(bp);
1749 			RF_LOCK_MUTEX(raidPtr->mutex);
1750 			continue;
1751 
1752 		}
1753 		db1_printf(("Calling DoAccess..\n"));
1754 
1755 
1756 		RF_LOCK_MUTEX(raidPtr->mutex);
1757 		raidPtr->openings--;
1758 		RF_UNLOCK_MUTEX(raidPtr->mutex);
1759 
1760 		/*
1761 		 * Everything is async.
1762 		 */
1763 		do_async = 1;
1764 
1765 		disk_busy(&rs->sc_dkdev);
1766 
1767 		/* XXX we're still at splbio() here... do we *really*
1768 		   need to be? */
1769 
1770 		/* don't ever condition on bp->b_flags & B_WRITE.
1771 		 * always condition on B_READ instead */
1772 
1773 		rc = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
1774 				 RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
1775 				 do_async, raid_addr, num_blocks,
1776 				 bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
1777 
1778 		if (rc) {
1779 			bp->b_error = rc;
1780 			bp->b_flags |= B_ERROR;
1781 			bp->b_resid = bp->b_bcount;
1782 			biodone(bp);
1783 			/* continue loop */
1784 		}
1785 
1786 		RF_LOCK_MUTEX(raidPtr->mutex);
1787 	}
1788 	RF_UNLOCK_MUTEX(raidPtr->mutex);
1789 }
1790 
1791 
1792 
1793 
1794 /* invoke an I/O from kernel mode.  Disk queue should be locked upon entry */
1795 
1796 int
1797 rf_DispatchKernelIO(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req)
1798 {
1799 	int     op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
1800 	struct buf *bp;
1801 	struct raidbuf *raidbp = NULL;
1802 
1803 	req->queue = queue;
1804 
1805 #if DIAGNOSTIC
1806 	if (queue->raidPtr->raidid >= numraid) {
1807 		printf("Invalid unit number: %d %d\n", queue->raidPtr->raidid,
1808 		    numraid);
1809 		panic("Invalid Unit number in rf_DispatchKernelIO");
1810 	}
1811 #endif
1812 
1813 	bp = req->bp;
1814 #if 1
1815 	/* XXX when there is a physical disk failure, someone is passing us a
1816 	 * buffer that contains old stuff!!  Attempt to deal with this problem
1817 	 * without taking a performance hit... (not sure where the real bug
1818 	 * is.  It's buried in RAIDframe somewhere) :-(  GO ) */
1819 
1820 	if (bp->b_flags & B_ERROR) {
1821 		bp->b_flags &= ~B_ERROR;
1822 	}
1823 	if (bp->b_error != 0) {
1824 		bp->b_error = 0;
1825 	}
1826 #endif
1827 	raidbp = pool_get(&rf_pools.cbuf, PR_NOWAIT);
1828 	if (raidbp == NULL) {
1829 		bp->b_flags |= B_ERROR;
1830 		bp->b_error = ENOMEM;
1831 		return (ENOMEM);
1832 	}
1833 	BUF_INIT(&raidbp->rf_buf);
1834 
1835 	/*
1836 	 * context for raidiodone
1837 	 */
1838 	raidbp->rf_obp = bp;
1839 	raidbp->req = req;
1840 
1841 	BIO_COPYPRIO(&raidbp->rf_buf, bp);
1842 
1843 	switch (req->type) {
1844 	case RF_IO_TYPE_NOP:	/* used primarily to unlock a locked queue */
1845 		/* XXX need to do something extra here.. */
1846 		/* I'm leaving this in, as I've never actually seen it used,
1847 		 * and I'd like folks to report it... GO */
1848 		printf(("WAKEUP CALLED\n"));
1849 		queue->numOutstanding++;
1850 
1851 		/* XXX need to glue the original buffer into this??  */
1852 
1853 		KernelWakeupFunc(&raidbp->rf_buf);
1854 		break;
1855 
1856 	case RF_IO_TYPE_READ:
1857 	case RF_IO_TYPE_WRITE:
1858 #if RF_ACC_TRACE > 0
1859 		if (req->tracerec) {
1860 			RF_ETIMER_START(req->tracerec->timer);
1861 		}
1862 #endif
1863 		InitBP(&raidbp->rf_buf, queue->rf_cinfo->ci_vp,
1864 		    op | bp->b_flags, queue->rf_cinfo->ci_dev,
1865 		    req->sectorOffset, req->numSector,
1866 		    req->buf, KernelWakeupFunc, (void *) req,
1867 		    queue->raidPtr->logBytesPerSector, req->b_proc);
1868 
1869 		if (rf_debugKernelAccess) {
1870 			db1_printf(("dispatch: bp->b_blkno = %ld\n",
1871 				(long) bp->b_blkno));
1872 		}
1873 		queue->numOutstanding++;
1874 		queue->last_deq_sector = req->sectorOffset;
1875 		/* acc wouldn't have been let in if there were any pending
1876 		 * reqs at any other priority */
1877 		queue->curPriority = req->priority;
1878 
1879 		db1_printf(("Going for %c to unit %d col %d\n",
1880 			    req->type, queue->raidPtr->raidid,
1881 			    queue->col));
1882 		db1_printf(("sector %d count %d (%d bytes) %d\n",
1883 			(int) req->sectorOffset, (int) req->numSector,
1884 			(int) (req->numSector <<
1885 			    queue->raidPtr->logBytesPerSector),
1886 			(int) queue->raidPtr->logBytesPerSector));
1887 		if ((raidbp->rf_buf.b_flags & B_READ) == 0) {
1888 			raidbp->rf_buf.b_vp->v_numoutput++;
1889 		}
1890 		VOP_STRATEGY(raidbp->rf_buf.b_vp, &raidbp->rf_buf);
1891 
1892 		break;
1893 
1894 	default:
1895 		panic("bad req->type in rf_DispatchKernelIO");
1896 	}
1897 	db1_printf(("Exiting from DispatchKernelIO\n"));
1898 
1899 	return (0);
1900 }
1901 /* this is the callback function associated with a I/O invoked from
1902    kernel code.
1903  */
1904 static void
1905 KernelWakeupFunc(struct buf *vbp)
1906 {
1907 	RF_DiskQueueData_t *req = NULL;
1908 	RF_DiskQueue_t *queue;
1909 	struct raidbuf *raidbp = (struct raidbuf *) vbp;
1910 	struct buf *bp;
1911 	int s;
1912 
1913 	s = splbio();
1914 	db1_printf(("recovering the request queue:\n"));
1915 	req = raidbp->req;
1916 
1917 	bp = raidbp->rf_obp;
1918 
1919 	queue = (RF_DiskQueue_t *) req->queue;
1920 
1921 	if (raidbp->rf_buf.b_flags & B_ERROR) {
1922 		bp->b_flags |= B_ERROR;
1923 		bp->b_error = raidbp->rf_buf.b_error ?
1924 		    raidbp->rf_buf.b_error : EIO;
1925 	}
1926 
1927 	/* XXX methinks this could be wrong... */
1928 #if 1
1929 	bp->b_resid = raidbp->rf_buf.b_resid;
1930 #endif
1931 #if RF_ACC_TRACE > 0
1932 	if (req->tracerec) {
1933 		RF_ETIMER_STOP(req->tracerec->timer);
1934 		RF_ETIMER_EVAL(req->tracerec->timer);
1935 		RF_LOCK_MUTEX(rf_tracing_mutex);
1936 		req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1937 		req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1938 		req->tracerec->num_phys_ios++;
1939 		RF_UNLOCK_MUTEX(rf_tracing_mutex);
1940 	}
1941 #endif
1942 	bp->b_bcount = raidbp->rf_buf.b_bcount;	/* XXXX ?? */
1943 
1944 	/* XXX Ok, let's get aggressive... If B_ERROR is set, let's go
1945 	 * ballistic, and mark the component as hosed... */
1946 
1947 	if (bp->b_flags & B_ERROR) {
1948 		/* Mark the disk as dead */
1949 		/* but only mark it once... */
1950 		if (queue->raidPtr->Disks[queue->col].status ==
1951 		    rf_ds_optimal) {
1952 			printf("raid%d: IO Error.  Marking %s as failed.\n",
1953 			       queue->raidPtr->raidid,
1954 			       queue->raidPtr->Disks[queue->col].devname);
1955 			queue->raidPtr->Disks[queue->col].status =
1956 			    rf_ds_failed;
1957 			queue->raidPtr->status = rf_rs_degraded;
1958 			queue->raidPtr->numFailures++;
1959 			queue->raidPtr->numNewFailures++;
1960 		} else {	/* Disk is already dead... */
1961 			/* printf("Disk already marked as dead!\n"); */
1962 		}
1963 
1964 	}
1965 
1966 	pool_put(&rf_pools.cbuf, raidbp);
1967 
1968 	/* Fill in the error value */
1969 
1970 	req->error = (bp->b_flags & B_ERROR) ? bp->b_error : 0;
1971 
1972 	simple_lock(&queue->raidPtr->iodone_lock);
1973 
1974 	/* Drop this one on the "finished" queue... */
1975 	TAILQ_INSERT_TAIL(&(queue->raidPtr->iodone), req, iodone_entries);
1976 
1977 	/* Let the raidio thread know there is work to be done. */
1978 	wakeup(&(queue->raidPtr->iodone));
1979 
1980 	simple_unlock(&queue->raidPtr->iodone_lock);
1981 
1982 	splx(s);
1983 }
1984 
1985 
1986 
1987 /*
1988  * initialize a buf structure for doing an I/O in the kernel.
1989  */
1990 static void
1991 InitBP(struct buf *bp, struct vnode *b_vp, unsigned rw_flag, dev_t dev,
1992        RF_SectorNum_t startSect, RF_SectorCount_t numSect, caddr_t buf,
1993        void (*cbFunc) (struct buf *), void *cbArg, int logBytesPerSector,
1994        struct proc *b_proc)
1995 {
1996 	/* bp->b_flags       = B_PHYS | rw_flag; */
1997 	bp->b_flags = B_CALL | rw_flag;	/* XXX need B_PHYS here too??? */
1998 	bp->b_bcount = numSect << logBytesPerSector;
1999 	bp->b_bufsize = bp->b_bcount;
2000 	bp->b_error = 0;
2001 	bp->b_dev = dev;
2002 	bp->b_data = buf;
2003 	bp->b_blkno = startSect;
2004 	bp->b_resid = bp->b_bcount;	/* XXX is this right!??!?!! */
2005 	if (bp->b_bcount == 0) {
2006 		panic("bp->b_bcount is zero in InitBP!!");
2007 	}
2008 	bp->b_proc = b_proc;
2009 	bp->b_iodone = cbFunc;
2010 	bp->b_vp = b_vp;
2011 
2012 }
2013 
2014 static void
2015 raidgetdefaultlabel(RF_Raid_t *raidPtr, struct raid_softc *rs,
2016 		    struct disklabel *lp)
2017 {
2018 	memset(lp, 0, sizeof(*lp));
2019 
2020 	/* fabricate a label... */
2021 	lp->d_secperunit = raidPtr->totalSectors;
2022 	lp->d_secsize = raidPtr->bytesPerSector;
2023 	lp->d_nsectors = raidPtr->Layout.dataSectorsPerStripe;
2024 	lp->d_ntracks = 4 * raidPtr->numCol;
2025 	lp->d_ncylinders = raidPtr->totalSectors /
2026 		(lp->d_nsectors * lp->d_ntracks);
2027 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
2028 
2029 	strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
2030 	lp->d_type = DTYPE_RAID;
2031 	strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
2032 	lp->d_rpm = 3600;
2033 	lp->d_interleave = 1;
2034 	lp->d_flags = 0;
2035 
2036 	lp->d_partitions[RAW_PART].p_offset = 0;
2037 	lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
2038 	lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
2039 	lp->d_npartitions = RAW_PART + 1;
2040 
2041 	lp->d_magic = DISKMAGIC;
2042 	lp->d_magic2 = DISKMAGIC;
2043 	lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
2044 
2045 }
2046 /*
2047  * Read the disklabel from the raid device.  If one is not present, fake one
2048  * up.
2049  */
2050 static void
2051 raidgetdisklabel(dev_t dev)
2052 {
2053 	int     unit = raidunit(dev);
2054 	struct raid_softc *rs = &raid_softc[unit];
2055 	const char   *errstring;
2056 	struct disklabel *lp = rs->sc_dkdev.dk_label;
2057 	struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
2058 	RF_Raid_t *raidPtr;
2059 
2060 	db1_printf(("Getting the disklabel...\n"));
2061 
2062 	memset(clp, 0, sizeof(*clp));
2063 
2064 	raidPtr = raidPtrs[unit];
2065 
2066 	raidgetdefaultlabel(raidPtr, rs, lp);
2067 
2068 	/*
2069 	 * Call the generic disklabel extraction routine.
2070 	 */
2071 	errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
2072 	    rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
2073 	if (errstring)
2074 		raidmakedisklabel(rs);
2075 	else {
2076 		int     i;
2077 		struct partition *pp;
2078 
2079 		/*
2080 		 * Sanity check whether the found disklabel is valid.
2081 		 *
2082 		 * This is necessary since total size of the raid device
2083 		 * may vary when an interleave is changed even though exactly
2084 		 * same componets are used, and old disklabel may used
2085 		 * if that is found.
2086 		 */
2087 		if (lp->d_secperunit != rs->sc_size)
2088 			printf("raid%d: WARNING: %s: "
2089 			    "total sector size in disklabel (%d) != "
2090 			    "the size of raid (%ld)\n", unit, rs->sc_xname,
2091 			    lp->d_secperunit, (long) rs->sc_size);
2092 		for (i = 0; i < lp->d_npartitions; i++) {
2093 			pp = &lp->d_partitions[i];
2094 			if (pp->p_offset + pp->p_size > rs->sc_size)
2095 				printf("raid%d: WARNING: %s: end of partition `%c' "
2096 				       "exceeds the size of raid (%ld)\n",
2097 				       unit, rs->sc_xname, 'a' + i, (long) rs->sc_size);
2098 		}
2099 	}
2100 
2101 }
2102 /*
2103  * Take care of things one might want to take care of in the event
2104  * that a disklabel isn't present.
2105  */
2106 static void
2107 raidmakedisklabel(struct raid_softc *rs)
2108 {
2109 	struct disklabel *lp = rs->sc_dkdev.dk_label;
2110 	db1_printf(("Making a label..\n"));
2111 
2112 	/*
2113 	 * For historical reasons, if there's no disklabel present
2114 	 * the raw partition must be marked FS_BSDFFS.
2115 	 */
2116 
2117 	lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
2118 
2119 	strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
2120 
2121 	lp->d_checksum = dkcksum(lp);
2122 }
2123 /*
2124  * Lookup the provided name in the filesystem.  If the file exists,
2125  * is a valid block device, and isn't being used by anyone else,
2126  * set *vpp to the file's vnode.
2127  * You'll find the original of this in ccd.c
2128  */
2129 int
2130 raidlookup(char *path, struct proc *p, struct vnode **vpp)
2131 {
2132 	struct nameidata nd;
2133 	struct vnode *vp;
2134 	struct vattr va;
2135 	int     error;
2136 
2137 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, p);
2138 	if ((error = vn_open(&nd, FREAD | FWRITE, 0)) != 0) {
2139 		return (error);
2140 	}
2141 	vp = nd.ni_vp;
2142 	if (vp->v_usecount > 1) {
2143 		VOP_UNLOCK(vp, 0);
2144 		(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2145 		return (EBUSY);
2146 	}
2147 	if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) != 0) {
2148 		VOP_UNLOCK(vp, 0);
2149 		(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2150 		return (error);
2151 	}
2152 	/* XXX: eventually we should handle VREG, too. */
2153 	if (va.va_type != VBLK) {
2154 		VOP_UNLOCK(vp, 0);
2155 		(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2156 		return (ENOTBLK);
2157 	}
2158 	VOP_UNLOCK(vp, 0);
2159 	*vpp = vp;
2160 	return (0);
2161 }
2162 /*
2163  * Wait interruptibly for an exclusive lock.
2164  *
2165  * XXX
2166  * Several drivers do this; it should be abstracted and made MP-safe.
2167  * (Hmm... where have we seen this warning before :->  GO )
2168  */
2169 static int
2170 raidlock(struct raid_softc *rs)
2171 {
2172 	int     error;
2173 
2174 	while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
2175 		rs->sc_flags |= RAIDF_WANTED;
2176 		if ((error =
2177 			tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
2178 			return (error);
2179 	}
2180 	rs->sc_flags |= RAIDF_LOCKED;
2181 	return (0);
2182 }
2183 /*
2184  * Unlock and wake up any waiters.
2185  */
2186 static void
2187 raidunlock(struct raid_softc *rs)
2188 {
2189 
2190 	rs->sc_flags &= ~RAIDF_LOCKED;
2191 	if ((rs->sc_flags & RAIDF_WANTED) != 0) {
2192 		rs->sc_flags &= ~RAIDF_WANTED;
2193 		wakeup(rs);
2194 	}
2195 }
2196 
2197 
2198 #define RF_COMPONENT_INFO_OFFSET  16384 /* bytes */
2199 #define RF_COMPONENT_INFO_SIZE     1024 /* bytes */
2200 
2201 int
2202 raidmarkclean(dev_t dev, struct vnode *b_vp, int mod_counter)
2203 {
2204 	RF_ComponentLabel_t clabel;
2205 	raidread_component_label(dev, b_vp, &clabel);
2206 	clabel.mod_counter = mod_counter;
2207 	clabel.clean = RF_RAID_CLEAN;
2208 	raidwrite_component_label(dev, b_vp, &clabel);
2209 	return(0);
2210 }
2211 
2212 
2213 int
2214 raidmarkdirty(dev_t dev, struct vnode *b_vp, int mod_counter)
2215 {
2216 	RF_ComponentLabel_t clabel;
2217 	raidread_component_label(dev, b_vp, &clabel);
2218 	clabel.mod_counter = mod_counter;
2219 	clabel.clean = RF_RAID_DIRTY;
2220 	raidwrite_component_label(dev, b_vp, &clabel);
2221 	return(0);
2222 }
2223 
2224 /* ARGSUSED */
2225 int
2226 raidread_component_label(dev_t dev, struct vnode *b_vp,
2227 			 RF_ComponentLabel_t *clabel)
2228 {
2229 	struct buf *bp;
2230 	const struct bdevsw *bdev;
2231 	int error;
2232 
2233 	/* XXX should probably ensure that we don't try to do this if
2234 	   someone has changed rf_protected_sectors. */
2235 
2236 	if (b_vp == NULL) {
2237 		/* For whatever reason, this component is not valid.
2238 		   Don't try to read a component label from it. */
2239 		return(EINVAL);
2240 	}
2241 
2242 	/* get a block of the appropriate size... */
2243 	bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
2244 	bp->b_dev = dev;
2245 
2246 	/* get our ducks in a row for the read */
2247 	bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
2248 	bp->b_bcount = RF_COMPONENT_INFO_SIZE;
2249 	bp->b_flags |= B_READ;
2250  	bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
2251 
2252 	bdev = bdevsw_lookup(bp->b_dev);
2253 	if (bdev == NULL)
2254 		return (ENXIO);
2255 	(*bdev->d_strategy)(bp);
2256 
2257 	error = biowait(bp);
2258 
2259 	if (!error) {
2260 		memcpy(clabel, bp->b_data,
2261 		       sizeof(RF_ComponentLabel_t));
2262         }
2263 
2264 	brelse(bp);
2265 	return(error);
2266 }
2267 /* ARGSUSED */
2268 int
2269 raidwrite_component_label(dev_t dev, struct vnode *b_vp,
2270 			  RF_ComponentLabel_t *clabel)
2271 {
2272 	struct buf *bp;
2273 	const struct bdevsw *bdev;
2274 	int error;
2275 
2276 	/* get a block of the appropriate size... */
2277 	bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
2278 	bp->b_dev = dev;
2279 
2280 	/* get our ducks in a row for the write */
2281 	bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
2282 	bp->b_bcount = RF_COMPONENT_INFO_SIZE;
2283 	bp->b_flags |= B_WRITE;
2284  	bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
2285 
2286 	memset(bp->b_data, 0, RF_COMPONENT_INFO_SIZE );
2287 
2288 	memcpy(bp->b_data, clabel, sizeof(RF_ComponentLabel_t));
2289 
2290 	bdev = bdevsw_lookup(bp->b_dev);
2291 	if (bdev == NULL)
2292 		return (ENXIO);
2293 	(*bdev->d_strategy)(bp);
2294 	error = biowait(bp);
2295 	brelse(bp);
2296 	if (error) {
2297 #if 1
2298 		printf("Failed to write RAID component info!\n");
2299 #endif
2300 	}
2301 
2302 	return(error);
2303 }
2304 
2305 void
2306 rf_markalldirty(RF_Raid_t *raidPtr)
2307 {
2308 	RF_ComponentLabel_t clabel;
2309 	int sparecol;
2310 	int c;
2311 	int j;
2312 	int scol = -1;
2313 
2314 	raidPtr->mod_counter++;
2315 	for (c = 0; c < raidPtr->numCol; c++) {
2316 		/* we don't want to touch (at all) a disk that has
2317 		   failed */
2318 		if (!RF_DEAD_DISK(raidPtr->Disks[c].status)) {
2319 			raidread_component_label(
2320 						 raidPtr->Disks[c].dev,
2321 						 raidPtr->raid_cinfo[c].ci_vp,
2322 						 &clabel);
2323 			if (clabel.status == rf_ds_spared) {
2324 				/* XXX do something special...
2325 				   but whatever you do, don't
2326 				   try to access it!! */
2327 			} else {
2328 				raidmarkdirty(
2329 					      raidPtr->Disks[c].dev,
2330 					      raidPtr->raid_cinfo[c].ci_vp,
2331 					      raidPtr->mod_counter);
2332 			}
2333 		}
2334 	}
2335 
2336 	for( c = 0; c < raidPtr->numSpare ; c++) {
2337 		sparecol = raidPtr->numCol + c;
2338 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2339 			/*
2340 
2341 			   we claim this disk is "optimal" if it's
2342 			   rf_ds_used_spare, as that means it should be
2343 			   directly substitutable for the disk it replaced.
2344 			   We note that too...
2345 
2346 			 */
2347 
2348 			for(j=0;j<raidPtr->numCol;j++) {
2349 				if (raidPtr->Disks[j].spareCol == sparecol) {
2350 					scol = j;
2351 					break;
2352 				}
2353 			}
2354 
2355 			raidread_component_label(
2356 				 raidPtr->Disks[sparecol].dev,
2357 				 raidPtr->raid_cinfo[sparecol].ci_vp,
2358 				 &clabel);
2359 			/* make sure status is noted */
2360 
2361 			raid_init_component_label(raidPtr, &clabel);
2362 
2363 			clabel.row = 0;
2364 			clabel.column = scol;
2365 			/* Note: we *don't* change status from rf_ds_used_spare
2366 			   to rf_ds_optimal */
2367 			/* clabel.status = rf_ds_optimal; */
2368 
2369 			raidmarkdirty(raidPtr->Disks[sparecol].dev,
2370 				      raidPtr->raid_cinfo[sparecol].ci_vp,
2371 				      raidPtr->mod_counter);
2372 		}
2373 	}
2374 }
2375 
2376 
2377 void
2378 rf_update_component_labels(RF_Raid_t *raidPtr, int final)
2379 {
2380 	RF_ComponentLabel_t clabel;
2381 	int sparecol;
2382 	int c;
2383 	int j;
2384 	int scol;
2385 
2386 	scol = -1;
2387 
2388 	/* XXX should do extra checks to make sure things really are clean,
2389 	   rather than blindly setting the clean bit... */
2390 
2391 	raidPtr->mod_counter++;
2392 
2393 	for (c = 0; c < raidPtr->numCol; c++) {
2394 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
2395 			raidread_component_label(
2396 						 raidPtr->Disks[c].dev,
2397 						 raidPtr->raid_cinfo[c].ci_vp,
2398 						 &clabel);
2399 				/* make sure status is noted */
2400 			clabel.status = rf_ds_optimal;
2401 				/* bump the counter */
2402 			clabel.mod_counter = raidPtr->mod_counter;
2403 
2404 			raidwrite_component_label(
2405 						  raidPtr->Disks[c].dev,
2406 						  raidPtr->raid_cinfo[c].ci_vp,
2407 						  &clabel);
2408 			if (final == RF_FINAL_COMPONENT_UPDATE) {
2409 				if (raidPtr->parity_good == RF_RAID_CLEAN) {
2410 					raidmarkclean(
2411 						      raidPtr->Disks[c].dev,
2412 						      raidPtr->raid_cinfo[c].ci_vp,
2413 						      raidPtr->mod_counter);
2414 				}
2415 			}
2416 		}
2417 		/* else we don't touch it.. */
2418 	}
2419 
2420 	for( c = 0; c < raidPtr->numSpare ; c++) {
2421 		sparecol = raidPtr->numCol + c;
2422 		/* Need to ensure that the reconstruct actually completed! */
2423 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2424 			/*
2425 
2426 			   we claim this disk is "optimal" if it's
2427 			   rf_ds_used_spare, as that means it should be
2428 			   directly substitutable for the disk it replaced.
2429 			   We note that too...
2430 
2431 			 */
2432 
2433 			for(j=0;j<raidPtr->numCol;j++) {
2434 				if (raidPtr->Disks[j].spareCol == sparecol) {
2435 					scol = j;
2436 					break;
2437 				}
2438 			}
2439 
2440 			/* XXX shouldn't *really* need this... */
2441 			raidread_component_label(
2442 				      raidPtr->Disks[sparecol].dev,
2443 				      raidPtr->raid_cinfo[sparecol].ci_vp,
2444 				      &clabel);
2445 			/* make sure status is noted */
2446 
2447 			raid_init_component_label(raidPtr, &clabel);
2448 
2449 			clabel.mod_counter = raidPtr->mod_counter;
2450 			clabel.column = scol;
2451 			clabel.status = rf_ds_optimal;
2452 
2453 			raidwrite_component_label(
2454 				      raidPtr->Disks[sparecol].dev,
2455 				      raidPtr->raid_cinfo[sparecol].ci_vp,
2456 				      &clabel);
2457 			if (final == RF_FINAL_COMPONENT_UPDATE) {
2458 				if (raidPtr->parity_good == RF_RAID_CLEAN) {
2459 					raidmarkclean( raidPtr->Disks[sparecol].dev,
2460 						       raidPtr->raid_cinfo[sparecol].ci_vp,
2461 						       raidPtr->mod_counter);
2462 				}
2463 			}
2464 		}
2465 	}
2466 }
2467 
2468 void
2469 rf_close_component(RF_Raid_t *raidPtr, struct vnode *vp, int auto_configured)
2470 {
2471 	struct proc *p;
2472 
2473 	p = raidPtr->engine_thread;
2474 
2475 	if (vp != NULL) {
2476 		if (auto_configured == 1) {
2477 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2478 			VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2479 			vput(vp);
2480 
2481 		} else {
2482 			(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2483 		}
2484 	}
2485 }
2486 
2487 
2488 void
2489 rf_UnconfigureVnodes(RF_Raid_t *raidPtr)
2490 {
2491 	int r,c;
2492 	struct vnode *vp;
2493 	int acd;
2494 
2495 
2496 	/* We take this opportunity to close the vnodes like we should.. */
2497 
2498 	for (c = 0; c < raidPtr->numCol; c++) {
2499 		vp = raidPtr->raid_cinfo[c].ci_vp;
2500 		acd = raidPtr->Disks[c].auto_configured;
2501 		rf_close_component(raidPtr, vp, acd);
2502 		raidPtr->raid_cinfo[c].ci_vp = NULL;
2503 		raidPtr->Disks[c].auto_configured = 0;
2504 	}
2505 
2506 	for (r = 0; r < raidPtr->numSpare; r++) {
2507 		vp = raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp;
2508 		acd = raidPtr->Disks[raidPtr->numCol + r].auto_configured;
2509 		rf_close_component(raidPtr, vp, acd);
2510 		raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp = NULL;
2511 		raidPtr->Disks[raidPtr->numCol + r].auto_configured = 0;
2512 	}
2513 }
2514 
2515 
2516 void
2517 rf_ReconThread(struct rf_recon_req *req)
2518 {
2519 	int     s;
2520 	RF_Raid_t *raidPtr;
2521 
2522 	s = splbio();
2523 	raidPtr = (RF_Raid_t *) req->raidPtr;
2524 	raidPtr->recon_in_progress = 1;
2525 
2526 	rf_FailDisk((RF_Raid_t *) req->raidPtr, req->col,
2527 		    ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
2528 
2529 	RF_Free(req, sizeof(*req));
2530 
2531 	raidPtr->recon_in_progress = 0;
2532 	splx(s);
2533 
2534 	/* That's all... */
2535 	kthread_exit(0);        /* does not return */
2536 }
2537 
2538 void
2539 rf_RewriteParityThread(RF_Raid_t *raidPtr)
2540 {
2541 	int retcode;
2542 	int s;
2543 
2544 	raidPtr->parity_rewrite_in_progress = 1;
2545 	s = splbio();
2546 	retcode = rf_RewriteParity(raidPtr);
2547 	splx(s);
2548 	if (retcode) {
2549 		printf("raid%d: Error re-writing parity!\n",raidPtr->raidid);
2550 	} else {
2551 		/* set the clean bit!  If we shutdown correctly,
2552 		   the clean bit on each component label will get
2553 		   set */
2554 		raidPtr->parity_good = RF_RAID_CLEAN;
2555 	}
2556 	raidPtr->parity_rewrite_in_progress = 0;
2557 
2558 	/* Anyone waiting for us to stop?  If so, inform them... */
2559 	if (raidPtr->waitShutdown) {
2560 		wakeup(&raidPtr->parity_rewrite_in_progress);
2561 	}
2562 
2563 	/* That's all... */
2564 	kthread_exit(0);        /* does not return */
2565 }
2566 
2567 
2568 void
2569 rf_CopybackThread(RF_Raid_t *raidPtr)
2570 {
2571 	int s;
2572 
2573 	raidPtr->copyback_in_progress = 1;
2574 	s = splbio();
2575 	rf_CopybackReconstructedData(raidPtr);
2576 	splx(s);
2577 	raidPtr->copyback_in_progress = 0;
2578 
2579 	/* That's all... */
2580 	kthread_exit(0);        /* does not return */
2581 }
2582 
2583 
2584 void
2585 rf_ReconstructInPlaceThread(struct rf_recon_req *req)
2586 {
2587 	int s;
2588 	RF_Raid_t *raidPtr;
2589 
2590 	s = splbio();
2591 	raidPtr = req->raidPtr;
2592 	raidPtr->recon_in_progress = 1;
2593 	rf_ReconstructInPlace(raidPtr, req->col);
2594 	RF_Free(req, sizeof(*req));
2595 	raidPtr->recon_in_progress = 0;
2596 	splx(s);
2597 
2598 	/* That's all... */
2599 	kthread_exit(0);        /* does not return */
2600 }
2601 
2602 RF_AutoConfig_t *
2603 rf_find_raid_components()
2604 {
2605 	struct vnode *vp;
2606 	struct disklabel label;
2607 	struct device *dv;
2608 	dev_t dev;
2609 	int bmajor;
2610 	int error;
2611 	int i;
2612 	int good_one;
2613 	RF_ComponentLabel_t *clabel;
2614 	RF_AutoConfig_t *ac_list;
2615 	RF_AutoConfig_t *ac;
2616 
2617 
2618 	/* initialize the AutoConfig list */
2619 	ac_list = NULL;
2620 
2621 	/* we begin by trolling through *all* the devices on the system */
2622 
2623 	for (dv = alldevs.tqh_first; dv != NULL;
2624 	     dv = dv->dv_list.tqe_next) {
2625 
2626 		/* we are only interested in disks... */
2627 		if (dv->dv_class != DV_DISK)
2628 			continue;
2629 
2630 		/* we don't care about floppies... */
2631 		if (!strcmp(dv->dv_cfdata->cf_name,"fd")) {
2632 			continue;
2633 		}
2634 
2635 		/* we don't care about CD's... */
2636 		if (!strcmp(dv->dv_cfdata->cf_name,"cd")) {
2637 			continue;
2638 		}
2639 
2640 		/* hdfd is the Atari/Hades floppy driver */
2641 		if (!strcmp(dv->dv_cfdata->cf_name,"hdfd")) {
2642 			continue;
2643 		}
2644 		/* fdisa is the Atari/Milan floppy driver */
2645 		if (!strcmp(dv->dv_cfdata->cf_name,"fdisa")) {
2646 			continue;
2647 		}
2648 
2649 		/* need to find the device_name_to_block_device_major stuff */
2650 		bmajor = devsw_name2blk(dv->dv_xname, NULL, 0);
2651 
2652 		/* get a vnode for the raw partition of this disk */
2653 
2654 		dev = MAKEDISKDEV(bmajor, dv->dv_unit, RAW_PART);
2655 		if (bdevvp(dev, &vp))
2656 			panic("RAID can't alloc vnode");
2657 
2658 		error = VOP_OPEN(vp, FREAD, NOCRED, 0);
2659 
2660 		if (error) {
2661 			/* "Who cares."  Continue looking
2662 			   for something that exists*/
2663 			vput(vp);
2664 			continue;
2665 		}
2666 
2667 		/* Ok, the disk exists.  Go get the disklabel. */
2668 		error = VOP_IOCTL(vp, DIOCGDINFO, &label, FREAD, NOCRED, 0);
2669 		if (error) {
2670 			/*
2671 			 * XXX can't happen - open() would
2672 			 * have errored out (or faked up one)
2673 			 */
2674 			if (error != ENOTTY)
2675 				printf("RAIDframe: can't get label for dev "
2676 				    "%s (%d)\n", dv->dv_xname, error);
2677 		}
2678 
2679 		/* don't need this any more.  We'll allocate it again
2680 		   a little later if we really do... */
2681 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2682 		VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2683 		vput(vp);
2684 
2685 		if (error)
2686 			continue;
2687 
2688 		for (i=0; i < label.d_npartitions; i++) {
2689 			/* We only support partitions marked as RAID */
2690 			if (label.d_partitions[i].p_fstype != FS_RAID)
2691 				continue;
2692 
2693 			dev = MAKEDISKDEV(bmajor, dv->dv_unit, i);
2694 			if (bdevvp(dev, &vp))
2695 				panic("RAID can't alloc vnode");
2696 
2697 			error = VOP_OPEN(vp, FREAD, NOCRED, 0);
2698 			if (error) {
2699 				/* Whatever... */
2700 				vput(vp);
2701 				continue;
2702 			}
2703 
2704 			good_one = 0;
2705 
2706 			clabel = (RF_ComponentLabel_t *)
2707 				malloc(sizeof(RF_ComponentLabel_t),
2708 				       M_RAIDFRAME, M_NOWAIT);
2709 			if (clabel == NULL) {
2710 				/* XXX CLEANUP HERE */
2711 				printf("RAID auto config: out of memory!\n");
2712 				return(NULL); /* XXX probably should panic? */
2713 			}
2714 
2715 			if (!raidread_component_label(dev, vp, clabel)) {
2716 				/* Got the label.  Does it look reasonable? */
2717 				if (rf_reasonable_label(clabel) &&
2718 				    (clabel->partitionSize <=
2719 				     label.d_partitions[i].p_size)) {
2720 #if DEBUG
2721 					printf("Component on: %s%c: %d\n",
2722 					       dv->dv_xname, 'a'+i,
2723 					       label.d_partitions[i].p_size);
2724 					rf_print_component_label(clabel);
2725 #endif
2726 					/* if it's reasonable, add it,
2727 					   else ignore it. */
2728 					ac = (RF_AutoConfig_t *)
2729 						malloc(sizeof(RF_AutoConfig_t),
2730 						       M_RAIDFRAME,
2731 						       M_NOWAIT);
2732 					if (ac == NULL) {
2733 						/* XXX should panic?? */
2734 						return(NULL);
2735 					}
2736 
2737 					snprintf(ac->devname,
2738 					    sizeof(ac->devname), "%s%c",
2739 					    dv->dv_xname, 'a'+i);
2740 					ac->dev = dev;
2741 					ac->vp = vp;
2742 					ac->clabel = clabel;
2743 					ac->next = ac_list;
2744 					ac_list = ac;
2745 					good_one = 1;
2746 				}
2747 			}
2748 			if (!good_one) {
2749 				/* cleanup */
2750 				free(clabel, M_RAIDFRAME);
2751 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2752 				VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2753 				vput(vp);
2754 			}
2755 		}
2756 	}
2757 	return(ac_list);
2758 }
2759 
2760 static int
2761 rf_reasonable_label(RF_ComponentLabel_t *clabel)
2762 {
2763 
2764 	if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
2765 	     (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
2766 	    ((clabel->clean == RF_RAID_CLEAN) ||
2767 	     (clabel->clean == RF_RAID_DIRTY)) &&
2768 	    clabel->row >=0 &&
2769 	    clabel->column >= 0 &&
2770 	    clabel->num_rows > 0 &&
2771 	    clabel->num_columns > 0 &&
2772 	    clabel->row < clabel->num_rows &&
2773 	    clabel->column < clabel->num_columns &&
2774 	    clabel->blockSize > 0 &&
2775 	    clabel->numBlocks > 0) {
2776 		/* label looks reasonable enough... */
2777 		return(1);
2778 	}
2779 	return(0);
2780 }
2781 
2782 
2783 #if DEBUG
2784 void
2785 rf_print_component_label(RF_ComponentLabel_t *clabel)
2786 {
2787 	printf("   Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
2788 	       clabel->row, clabel->column,
2789 	       clabel->num_rows, clabel->num_columns);
2790 	printf("   Version: %d Serial Number: %d Mod Counter: %d\n",
2791 	       clabel->version, clabel->serial_number,
2792 	       clabel->mod_counter);
2793 	printf("   Clean: %s Status: %d\n",
2794 	       clabel->clean ? "Yes" : "No", clabel->status );
2795 	printf("   sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
2796 	       clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
2797 	printf("   RAID Level: %c  blocksize: %d numBlocks: %d\n",
2798 	       (char) clabel->parityConfig, clabel->blockSize,
2799 	       clabel->numBlocks);
2800 	printf("   Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No" );
2801 	printf("   Contains root partition: %s\n",
2802 	       clabel->root_partition ? "Yes" : "No" );
2803 	printf("   Last configured as: raid%d\n", clabel->last_unit );
2804 #if 0
2805 	   printf("   Config order: %d\n", clabel->config_order);
2806 #endif
2807 
2808 }
2809 #endif
2810 
2811 RF_ConfigSet_t *
2812 rf_create_auto_sets(RF_AutoConfig_t *ac_list)
2813 {
2814 	RF_AutoConfig_t *ac;
2815 	RF_ConfigSet_t *config_sets;
2816 	RF_ConfigSet_t *cset;
2817 	RF_AutoConfig_t *ac_next;
2818 
2819 
2820 	config_sets = NULL;
2821 
2822 	/* Go through the AutoConfig list, and figure out which components
2823 	   belong to what sets.  */
2824 	ac = ac_list;
2825 	while(ac!=NULL) {
2826 		/* we're going to putz with ac->next, so save it here
2827 		   for use at the end of the loop */
2828 		ac_next = ac->next;
2829 
2830 		if (config_sets == NULL) {
2831 			/* will need at least this one... */
2832 			config_sets = (RF_ConfigSet_t *)
2833 				malloc(sizeof(RF_ConfigSet_t),
2834 				       M_RAIDFRAME, M_NOWAIT);
2835 			if (config_sets == NULL) {
2836 				panic("rf_create_auto_sets: No memory!");
2837 			}
2838 			/* this one is easy :) */
2839 			config_sets->ac = ac;
2840 			config_sets->next = NULL;
2841 			config_sets->rootable = 0;
2842 			ac->next = NULL;
2843 		} else {
2844 			/* which set does this component fit into? */
2845 			cset = config_sets;
2846 			while(cset!=NULL) {
2847 				if (rf_does_it_fit(cset, ac)) {
2848 					/* looks like it matches... */
2849 					ac->next = cset->ac;
2850 					cset->ac = ac;
2851 					break;
2852 				}
2853 				cset = cset->next;
2854 			}
2855 			if (cset==NULL) {
2856 				/* didn't find a match above... new set..*/
2857 				cset = (RF_ConfigSet_t *)
2858 					malloc(sizeof(RF_ConfigSet_t),
2859 					       M_RAIDFRAME, M_NOWAIT);
2860 				if (cset == NULL) {
2861 					panic("rf_create_auto_sets: No memory!");
2862 				}
2863 				cset->ac = ac;
2864 				ac->next = NULL;
2865 				cset->next = config_sets;
2866 				cset->rootable = 0;
2867 				config_sets = cset;
2868 			}
2869 		}
2870 		ac = ac_next;
2871 	}
2872 
2873 
2874 	return(config_sets);
2875 }
2876 
2877 static int
2878 rf_does_it_fit(RF_ConfigSet_t *cset, RF_AutoConfig_t *ac)
2879 {
2880 	RF_ComponentLabel_t *clabel1, *clabel2;
2881 
2882 	/* If this one matches the *first* one in the set, that's good
2883 	   enough, since the other members of the set would have been
2884 	   through here too... */
2885 	/* note that we are not checking partitionSize here..
2886 
2887 	   Note that we are also not checking the mod_counters here.
2888 	   If everything else matches execpt the mod_counter, that's
2889 	   good enough for this test.  We will deal with the mod_counters
2890 	   a little later in the autoconfiguration process.
2891 
2892 	    (clabel1->mod_counter == clabel2->mod_counter) &&
2893 
2894 	   The reason we don't check for this is that failed disks
2895 	   will have lower modification counts.  If those disks are
2896 	   not added to the set they used to belong to, then they will
2897 	   form their own set, which may result in 2 different sets,
2898 	   for example, competing to be configured at raid0, and
2899 	   perhaps competing to be the root filesystem set.  If the
2900 	   wrong ones get configured, or both attempt to become /,
2901 	   weird behaviour and or serious lossage will occur.  Thus we
2902 	   need to bring them into the fold here, and kick them out at
2903 	   a later point.
2904 
2905 	*/
2906 
2907 	clabel1 = cset->ac->clabel;
2908 	clabel2 = ac->clabel;
2909 	if ((clabel1->version == clabel2->version) &&
2910 	    (clabel1->serial_number == clabel2->serial_number) &&
2911 	    (clabel1->num_rows == clabel2->num_rows) &&
2912 	    (clabel1->num_columns == clabel2->num_columns) &&
2913 	    (clabel1->sectPerSU == clabel2->sectPerSU) &&
2914 	    (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
2915 	    (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
2916 	    (clabel1->parityConfig == clabel2->parityConfig) &&
2917 	    (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
2918 	    (clabel1->blockSize == clabel2->blockSize) &&
2919 	    (clabel1->numBlocks == clabel2->numBlocks) &&
2920 	    (clabel1->autoconfigure == clabel2->autoconfigure) &&
2921 	    (clabel1->root_partition == clabel2->root_partition) &&
2922 	    (clabel1->last_unit == clabel2->last_unit) &&
2923 	    (clabel1->config_order == clabel2->config_order)) {
2924 		/* if it get's here, it almost *has* to be a match */
2925 	} else {
2926 		/* it's not consistent with somebody in the set..
2927 		   punt */
2928 		return(0);
2929 	}
2930 	/* all was fine.. it must fit... */
2931 	return(1);
2932 }
2933 
2934 int
2935 rf_have_enough_components(RF_ConfigSet_t *cset)
2936 {
2937 	RF_AutoConfig_t *ac;
2938 	RF_AutoConfig_t *auto_config;
2939 	RF_ComponentLabel_t *clabel;
2940 	int c;
2941 	int num_cols;
2942 	int num_missing;
2943 	int mod_counter;
2944 	int mod_counter_found;
2945 	int even_pair_failed;
2946 	char parity_type;
2947 
2948 
2949 	/* check to see that we have enough 'live' components
2950 	   of this set.  If so, we can configure it if necessary */
2951 
2952 	num_cols = cset->ac->clabel->num_columns;
2953 	parity_type = cset->ac->clabel->parityConfig;
2954 
2955 	/* XXX Check for duplicate components!?!?!? */
2956 
2957 	/* Determine what the mod_counter is supposed to be for this set. */
2958 
2959 	mod_counter_found = 0;
2960 	mod_counter = 0;
2961 	ac = cset->ac;
2962 	while(ac!=NULL) {
2963 		if (mod_counter_found==0) {
2964 			mod_counter = ac->clabel->mod_counter;
2965 			mod_counter_found = 1;
2966 		} else {
2967 			if (ac->clabel->mod_counter > mod_counter) {
2968 				mod_counter = ac->clabel->mod_counter;
2969 			}
2970 		}
2971 		ac = ac->next;
2972 	}
2973 
2974 	num_missing = 0;
2975 	auto_config = cset->ac;
2976 
2977 	even_pair_failed = 0;
2978 	for(c=0; c<num_cols; c++) {
2979 		ac = auto_config;
2980 		while(ac!=NULL) {
2981 			if ((ac->clabel->column == c) &&
2982 			    (ac->clabel->mod_counter == mod_counter)) {
2983 				/* it's this one... */
2984 #if DEBUG
2985 				printf("Found: %s at %d\n",
2986 				       ac->devname,c);
2987 #endif
2988 				break;
2989 			}
2990 			ac=ac->next;
2991 		}
2992 		if (ac==NULL) {
2993 				/* Didn't find one here! */
2994 				/* special case for RAID 1, especially
2995 				   where there are more than 2
2996 				   components (where RAIDframe treats
2997 				   things a little differently :( ) */
2998 			if (parity_type == '1') {
2999 				if (c%2 == 0) { /* even component */
3000 					even_pair_failed = 1;
3001 				} else { /* odd component.  If
3002 					    we're failed, and
3003 					    so is the even
3004 					    component, it's
3005 					    "Good Night, Charlie" */
3006 					if (even_pair_failed == 1) {
3007 						return(0);
3008 					}
3009 				}
3010 			} else {
3011 				/* normal accounting */
3012 				num_missing++;
3013 			}
3014 		}
3015 		if ((parity_type == '1') && (c%2 == 1)) {
3016 				/* Just did an even component, and we didn't
3017 				   bail.. reset the even_pair_failed flag,
3018 				   and go on to the next component.... */
3019 			even_pair_failed = 0;
3020 		}
3021 	}
3022 
3023 	clabel = cset->ac->clabel;
3024 
3025 	if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
3026 	    ((clabel->parityConfig == '4') && (num_missing > 1)) ||
3027 	    ((clabel->parityConfig == '5') && (num_missing > 1))) {
3028 		/* XXX this needs to be made *much* more general */
3029 		/* Too many failures */
3030 		return(0);
3031 	}
3032 	/* otherwise, all is well, and we've got enough to take a kick
3033 	   at autoconfiguring this set */
3034 	return(1);
3035 }
3036 
3037 void
3038 rf_create_configuration(RF_AutoConfig_t *ac, RF_Config_t *config,
3039 			RF_Raid_t *raidPtr)
3040 {
3041 	RF_ComponentLabel_t *clabel;
3042 	int i;
3043 
3044 	clabel = ac->clabel;
3045 
3046 	/* 1. Fill in the common stuff */
3047 	config->numRow = clabel->num_rows = 1;
3048 	config->numCol = clabel->num_columns;
3049 	config->numSpare = 0; /* XXX should this be set here? */
3050 	config->sectPerSU = clabel->sectPerSU;
3051 	config->SUsPerPU = clabel->SUsPerPU;
3052 	config->SUsPerRU = clabel->SUsPerRU;
3053 	config->parityConfig = clabel->parityConfig;
3054 	/* XXX... */
3055 	strcpy(config->diskQueueType,"fifo");
3056 	config->maxOutstandingDiskReqs = clabel->maxOutstanding;
3057 	config->layoutSpecificSize = 0; /* XXX ?? */
3058 
3059 	while(ac!=NULL) {
3060 		/* row/col values will be in range due to the checks
3061 		   in reasonable_label() */
3062 		strcpy(config->devnames[0][ac->clabel->column],
3063 		       ac->devname);
3064 		ac = ac->next;
3065 	}
3066 
3067 	for(i=0;i<RF_MAXDBGV;i++) {
3068 		config->debugVars[i][0] = 0;
3069 	}
3070 }
3071 
3072 int
3073 rf_set_autoconfig(RF_Raid_t *raidPtr, int new_value)
3074 {
3075 	RF_ComponentLabel_t clabel;
3076 	struct vnode *vp;
3077 	dev_t dev;
3078 	int column;
3079 	int sparecol;
3080 
3081 	raidPtr->autoconfigure = new_value;
3082 
3083 	for(column=0; column<raidPtr->numCol; column++) {
3084 		if (raidPtr->Disks[column].status == rf_ds_optimal) {
3085 			dev = raidPtr->Disks[column].dev;
3086 			vp = raidPtr->raid_cinfo[column].ci_vp;
3087 			raidread_component_label(dev, vp, &clabel);
3088 			clabel.autoconfigure = new_value;
3089 			raidwrite_component_label(dev, vp, &clabel);
3090 		}
3091 	}
3092 	for(column = 0; column < raidPtr->numSpare ; column++) {
3093 		sparecol = raidPtr->numCol + column;
3094 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3095 			dev = raidPtr->Disks[sparecol].dev;
3096 			vp = raidPtr->raid_cinfo[sparecol].ci_vp;
3097 			raidread_component_label(dev, vp, &clabel);
3098 			clabel.autoconfigure = new_value;
3099 			raidwrite_component_label(dev, vp, &clabel);
3100 		}
3101 	}
3102 	return(new_value);
3103 }
3104 
3105 int
3106 rf_set_rootpartition(RF_Raid_t *raidPtr, int new_value)
3107 {
3108 	RF_ComponentLabel_t clabel;
3109 	struct vnode *vp;
3110 	dev_t dev;
3111 	int column;
3112 	int sparecol;
3113 
3114 	raidPtr->root_partition = new_value;
3115 	for(column=0; column<raidPtr->numCol; column++) {
3116 		if (raidPtr->Disks[column].status == rf_ds_optimal) {
3117 			dev = raidPtr->Disks[column].dev;
3118 			vp = raidPtr->raid_cinfo[column].ci_vp;
3119 			raidread_component_label(dev, vp, &clabel);
3120 			clabel.root_partition = new_value;
3121 			raidwrite_component_label(dev, vp, &clabel);
3122 		}
3123 	}
3124 	for(column = 0; column < raidPtr->numSpare ; column++) {
3125 		sparecol = raidPtr->numCol + column;
3126 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3127 			dev = raidPtr->Disks[sparecol].dev;
3128 			vp = raidPtr->raid_cinfo[sparecol].ci_vp;
3129 			raidread_component_label(dev, vp, &clabel);
3130 			clabel.root_partition = new_value;
3131 			raidwrite_component_label(dev, vp, &clabel);
3132 		}
3133 	}
3134 	return(new_value);
3135 }
3136 
3137 void
3138 rf_release_all_vps(RF_ConfigSet_t *cset)
3139 {
3140 	RF_AutoConfig_t *ac;
3141 
3142 	ac = cset->ac;
3143 	while(ac!=NULL) {
3144 		/* Close the vp, and give it back */
3145 		if (ac->vp) {
3146 			vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
3147 			VOP_CLOSE(ac->vp, FREAD, NOCRED, 0);
3148 			vput(ac->vp);
3149 			ac->vp = NULL;
3150 		}
3151 		ac = ac->next;
3152 	}
3153 }
3154 
3155 
3156 void
3157 rf_cleanup_config_set(RF_ConfigSet_t *cset)
3158 {
3159 	RF_AutoConfig_t *ac;
3160 	RF_AutoConfig_t *next_ac;
3161 
3162 	ac = cset->ac;
3163 	while(ac!=NULL) {
3164 		next_ac = ac->next;
3165 		/* nuke the label */
3166 		free(ac->clabel, M_RAIDFRAME);
3167 		/* cleanup the config structure */
3168 		free(ac, M_RAIDFRAME);
3169 		/* "next.." */
3170 		ac = next_ac;
3171 	}
3172 	/* and, finally, nuke the config set */
3173 	free(cset, M_RAIDFRAME);
3174 }
3175 
3176 
3177 void
3178 raid_init_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
3179 {
3180 	/* current version number */
3181 	clabel->version = RF_COMPONENT_LABEL_VERSION;
3182 	clabel->serial_number = raidPtr->serial_number;
3183 	clabel->mod_counter = raidPtr->mod_counter;
3184 	clabel->num_rows = 1;
3185 	clabel->num_columns = raidPtr->numCol;
3186 	clabel->clean = RF_RAID_DIRTY; /* not clean */
3187 	clabel->status = rf_ds_optimal; /* "It's good!" */
3188 
3189 	clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
3190 	clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
3191 	clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
3192 
3193 	clabel->blockSize = raidPtr->bytesPerSector;
3194 	clabel->numBlocks = raidPtr->sectorsPerDisk;
3195 
3196 	/* XXX not portable */
3197 	clabel->parityConfig = raidPtr->Layout.map->parityConfig;
3198 	clabel->maxOutstanding = raidPtr->maxOutstanding;
3199 	clabel->autoconfigure = raidPtr->autoconfigure;
3200 	clabel->root_partition = raidPtr->root_partition;
3201 	clabel->last_unit = raidPtr->raidid;
3202 	clabel->config_order = raidPtr->config_order;
3203 }
3204 
3205 int
3206 rf_auto_config_set(RF_ConfigSet_t *cset, int *unit)
3207 {
3208 	RF_Raid_t *raidPtr;
3209 	RF_Config_t *config;
3210 	int raidID;
3211 	int retcode;
3212 
3213 #if DEBUG
3214 	printf("RAID autoconfigure\n");
3215 #endif
3216 
3217 	retcode = 0;
3218 	*unit = -1;
3219 
3220 	/* 1. Create a config structure */
3221 
3222 	config = (RF_Config_t *)malloc(sizeof(RF_Config_t),
3223 				       M_RAIDFRAME,
3224 				       M_NOWAIT);
3225 	if (config==NULL) {
3226 		printf("Out of mem!?!?\n");
3227 				/* XXX do something more intelligent here. */
3228 		return(1);
3229 	}
3230 
3231 	memset(config, 0, sizeof(RF_Config_t));
3232 
3233 	/*
3234 	   2. Figure out what RAID ID this one is supposed to live at
3235 	   See if we can get the same RAID dev that it was configured
3236 	   on last time..
3237 	*/
3238 
3239 	raidID = cset->ac->clabel->last_unit;
3240 	if ((raidID < 0) || (raidID >= numraid)) {
3241 		/* let's not wander off into lala land. */
3242 		raidID = numraid - 1;
3243 	}
3244 	if (raidPtrs[raidID]->valid != 0) {
3245 
3246 		/*
3247 		   Nope... Go looking for an alternative...
3248 		   Start high so we don't immediately use raid0 if that's
3249 		   not taken.
3250 		*/
3251 
3252 		for(raidID = numraid - 1; raidID >= 0; raidID--) {
3253 			if (raidPtrs[raidID]->valid == 0) {
3254 				/* can use this one! */
3255 				break;
3256 			}
3257 		}
3258 	}
3259 
3260 	if (raidID < 0) {
3261 		/* punt... */
3262 		printf("Unable to auto configure this set!\n");
3263 		printf("(Out of RAID devs!)\n");
3264 		return(1);
3265 	}
3266 
3267 #if DEBUG
3268 	printf("Configuring raid%d:\n",raidID);
3269 #endif
3270 
3271 	raidPtr = raidPtrs[raidID];
3272 
3273 	/* XXX all this stuff should be done SOMEWHERE ELSE! */
3274 	raidPtr->raidid = raidID;
3275 	raidPtr->openings = RAIDOUTSTANDING;
3276 
3277 	/* 3. Build the configuration structure */
3278 	rf_create_configuration(cset->ac, config, raidPtr);
3279 
3280 	/* 4. Do the configuration */
3281 	retcode = rf_Configure(raidPtr, config, cset->ac);
3282 
3283 	if (retcode == 0) {
3284 
3285 		raidinit(raidPtrs[raidID]);
3286 
3287 		rf_markalldirty(raidPtrs[raidID]);
3288 		raidPtrs[raidID]->autoconfigure = 1; /* XXX do this here? */
3289 		if (cset->ac->clabel->root_partition==1) {
3290 			/* everything configured just fine.  Make a note
3291 			   that this set is eligible to be root. */
3292 			cset->rootable = 1;
3293 			/* XXX do this here? */
3294 			raidPtrs[raidID]->root_partition = 1;
3295 		}
3296 	}
3297 
3298 	/* 5. Cleanup */
3299 	free(config, M_RAIDFRAME);
3300 
3301 	*unit = raidID;
3302 	return(retcode);
3303 }
3304 
3305 void
3306 rf_disk_unbusy(RF_RaidAccessDesc_t *desc)
3307 {
3308 	struct buf *bp;
3309 
3310 	bp = (struct buf *)desc->bp;
3311 	disk_unbusy(&raid_softc[desc->raidPtr->raidid].sc_dkdev,
3312 	    (bp->b_bcount - bp->b_resid), (bp->b_flags & B_READ));
3313 }
3314 
3315 void
3316 rf_pool_init(struct pool *p, size_t size, char *w_chan,
3317 	     size_t min, size_t max)
3318 {
3319 	pool_init(p, size, 0, 0, 0, w_chan, NULL);
3320 	pool_sethiwat(p, max);
3321 	pool_prime(p, min);
3322 	pool_setlowat(p, min);
3323 }
3324