xref: /netbsd-src/sys/dev/dkwedge/dk.c (revision 9573673d78c64ea1eac42d7f2e9521be89932ae5)
1 /*	$NetBSD: dk.c,v 1.88 2016/01/15 07:48:22 mlelstv Exp $	*/
2 
3 /*-
4  * Copyright (c) 2004, 2005, 2006, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: dk.c,v 1.88 2016/01/15 07:48:22 mlelstv Exp $");
34 
35 #ifdef _KERNEL_OPT
36 #include "opt_dkwedge.h"
37 #endif
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/proc.h>
42 #include <sys/errno.h>
43 #include <sys/pool.h>
44 #include <sys/ioctl.h>
45 #include <sys/disklabel.h>
46 #include <sys/disk.h>
47 #include <sys/fcntl.h>
48 #include <sys/buf.h>
49 #include <sys/bufq.h>
50 #include <sys/vnode.h>
51 #include <sys/stat.h>
52 #include <sys/conf.h>
53 #include <sys/callout.h>
54 #include <sys/kernel.h>
55 #include <sys/malloc.h>
56 #include <sys/device.h>
57 #include <sys/kauth.h>
58 
59 #include <miscfs/specfs/specdev.h>
60 
61 MALLOC_DEFINE(M_DKWEDGE, "dkwedge", "Disk wedge structures");
62 
63 typedef enum {
64 	DKW_STATE_LARVAL	= 0,
65 	DKW_STATE_RUNNING	= 1,
66 	DKW_STATE_DYING		= 2,
67 	DKW_STATE_DEAD		= 666
68 } dkwedge_state_t;
69 
70 struct dkwedge_softc {
71 	device_t	sc_dev;	/* pointer to our pseudo-device */
72 	struct cfdata	sc_cfdata;	/* our cfdata structure */
73 	uint8_t		sc_wname[128];	/* wedge name (Unicode, UTF-8) */
74 
75 	dkwedge_state_t sc_state;	/* state this wedge is in */
76 
77 	struct disk	*sc_parent;	/* parent disk */
78 	daddr_t		sc_offset;	/* LBA offset of wedge in parent */
79 	uint64_t	sc_size;	/* size of wedge in blocks */
80 	char		sc_ptype[32];	/* partition type */
81 	dev_t		sc_pdev;	/* cached parent's dev_t */
82 					/* link on parent's wedge list */
83 	LIST_ENTRY(dkwedge_softc) sc_plink;
84 
85 	struct disk	sc_dk;		/* our own disk structure */
86 	struct bufq_state *sc_bufq;	/* buffer queue */
87 	struct callout	sc_restart_ch;	/* callout to restart I/O */
88 
89 	u_int		sc_iopend;	/* I/Os pending */
90 	int		sc_flags;	/* flags (splbio) */
91 };
92 
93 #define	DK_F_WAIT_DRAIN		0x0001	/* waiting for I/O to drain */
94 
95 static void	dkstart(struct dkwedge_softc *);
96 static void	dkiodone(struct buf *);
97 static void	dkrestart(void *);
98 static void	dkminphys(struct buf *);
99 
100 static int	dklastclose(struct dkwedge_softc *);
101 static int	dkwedge_cleanup_parent(struct dkwedge_softc *, int);
102 static int	dkwedge_detach(device_t, int);
103 static void	dkwedge_delall1(struct disk *, bool);
104 static int	dkwedge_del1(struct dkwedge_info *, int);
105 static int	dk_open_parent(dev_t, int, struct vnode **);
106 static int	dk_close_parent(struct vnode *, int);
107 
108 static dev_type_open(dkopen);
109 static dev_type_close(dkclose);
110 static dev_type_read(dkread);
111 static dev_type_write(dkwrite);
112 static dev_type_ioctl(dkioctl);
113 static dev_type_strategy(dkstrategy);
114 static dev_type_dump(dkdump);
115 static dev_type_size(dksize);
116 static dev_type_discard(dkdiscard);
117 
118 const struct bdevsw dk_bdevsw = {
119 	.d_open = dkopen,
120 	.d_close = dkclose,
121 	.d_strategy = dkstrategy,
122 	.d_ioctl = dkioctl,
123 	.d_dump = dkdump,
124 	.d_psize = dksize,
125 	.d_discard = dkdiscard,
126 	.d_flag = D_DISK
127 };
128 
129 const struct cdevsw dk_cdevsw = {
130 	.d_open = dkopen,
131 	.d_close = dkclose,
132 	.d_read = dkread,
133 	.d_write = dkwrite,
134 	.d_ioctl = dkioctl,
135 	.d_stop = nostop,
136 	.d_tty = notty,
137 	.d_poll = nopoll,
138 	.d_mmap = nommap,
139 	.d_kqfilter = nokqfilter,
140 	.d_discard = dkdiscard,
141 	.d_flag = D_DISK
142 };
143 
144 static struct dkwedge_softc **dkwedges;
145 static u_int ndkwedges;
146 static krwlock_t dkwedges_lock;
147 
148 static LIST_HEAD(, dkwedge_discovery_method) dkwedge_discovery_methods;
149 static krwlock_t dkwedge_discovery_methods_lock;
150 
151 /*
152  * dkwedge_match:
153  *
154  *	Autoconfiguration match function for pseudo-device glue.
155  */
156 static int
157 dkwedge_match(device_t parent, cfdata_t match,
158     void *aux)
159 {
160 
161 	/* Pseudo-device; always present. */
162 	return (1);
163 }
164 
165 /*
166  * dkwedge_attach:
167  *
168  *	Autoconfiguration attach function for pseudo-device glue.
169  */
170 static void
171 dkwedge_attach(device_t parent, device_t self,
172     void *aux)
173 {
174 
175 	if (!pmf_device_register(self, NULL, NULL))
176 		aprint_error_dev(self, "couldn't establish power handler\n");
177 }
178 
179 CFDRIVER_DECL(dk, DV_DISK, NULL);
180 CFATTACH_DECL3_NEW(dk, 0,
181     dkwedge_match, dkwedge_attach, dkwedge_detach, NULL, NULL, NULL,
182     DVF_DETACH_SHUTDOWN);
183 
184 /*
185  * dkwedge_wait_drain:
186  *
187  *	Wait for I/O on the wedge to drain.
188  *	NOTE: Must be called at splbio()!
189  */
190 static void
191 dkwedge_wait_drain(struct dkwedge_softc *sc)
192 {
193 
194 	while (sc->sc_iopend != 0) {
195 		sc->sc_flags |= DK_F_WAIT_DRAIN;
196 		(void) tsleep(&sc->sc_iopend, PRIBIO, "dkdrn", 0);
197 	}
198 }
199 
200 /*
201  * dkwedge_compute_pdev:
202  *
203  *	Compute the parent disk's dev_t.
204  */
205 static int
206 dkwedge_compute_pdev(const char *pname, dev_t *pdevp, enum vtype type)
207 {
208 	const char *name, *cp;
209 	devmajor_t pmaj;
210 	int punit;
211 	char devname[16];
212 
213 	name = pname;
214 	switch (type) {
215 	case VBLK:
216 		pmaj = devsw_name2blk(name, devname, sizeof(devname));
217 		break;
218 	case VCHR:
219 		pmaj = devsw_name2chr(name, devname, sizeof(devname));
220 		break;
221 	default:
222 		pmaj = NODEVMAJOR;
223 		break;
224 	}
225 	if (pmaj == NODEVMAJOR)
226 		return (ENODEV);
227 
228 	name += strlen(devname);
229 	for (cp = name, punit = 0; *cp >= '0' && *cp <= '9'; cp++)
230 		punit = (punit * 10) + (*cp - '0');
231 	if (cp == name) {
232 		/* Invalid parent disk name. */
233 		return (ENODEV);
234 	}
235 
236 	*pdevp = MAKEDISKDEV(pmaj, punit, RAW_PART);
237 
238 	return (0);
239 }
240 
241 /*
242  * dkwedge_array_expand:
243  *
244  *	Expand the dkwedges array.
245  */
246 static void
247 dkwedge_array_expand(void)
248 {
249 	int newcnt = ndkwedges + 16;
250 	struct dkwedge_softc **newarray, **oldarray;
251 
252 	newarray = malloc(newcnt * sizeof(*newarray), M_DKWEDGE,
253 	    M_WAITOK|M_ZERO);
254 	if ((oldarray = dkwedges) != NULL)
255 		memcpy(newarray, dkwedges, ndkwedges * sizeof(*newarray));
256 	dkwedges = newarray;
257 	ndkwedges = newcnt;
258 	if (oldarray != NULL)
259 		free(oldarray, M_DKWEDGE);
260 }
261 
262 static void
263 dk_set_geometry(struct dkwedge_softc *sc, struct disk *pdk)
264 {
265 	struct disk *dk = &sc->sc_dk;
266 	struct disk_geom *dg = &dk->dk_geom;
267 
268 	memset(dg, 0, sizeof(*dg));
269 
270 	dg->dg_secperunit = sc->sc_size;
271 	dg->dg_secsize = DEV_BSIZE << pdk->dk_blkshift;
272 
273 	/* fake numbers, 1 cylinder is 1 MB with default sector size */
274 	dg->dg_nsectors = 32;
275 	dg->dg_ntracks = 64;
276 	dg->dg_ncylinders = dg->dg_secperunit / (dg->dg_nsectors * dg->dg_ntracks);
277 
278 	disk_set_info(sc->sc_dev, dk, NULL);
279 }
280 
281 /*
282  * dkwedge_add:		[exported function]
283  *
284  *	Add a disk wedge based on the provided information.
285  *
286  *	The incoming dkw_devname[] is ignored, instead being
287  *	filled in and returned to the caller.
288  */
289 int
290 dkwedge_add(struct dkwedge_info *dkw)
291 {
292 	struct dkwedge_softc *sc, *lsc;
293 	struct disk *pdk;
294 	u_int unit;
295 	int error;
296 	dev_t pdev;
297 
298 	dkw->dkw_parent[sizeof(dkw->dkw_parent) - 1] = '\0';
299 	pdk = disk_find(dkw->dkw_parent);
300 	if (pdk == NULL)
301 		return (ENODEV);
302 
303 	error = dkwedge_compute_pdev(pdk->dk_name, &pdev, VBLK);
304 	if (error)
305 		return (error);
306 
307 	if (dkw->dkw_offset < 0)
308 		return (EINVAL);
309 
310 	sc = malloc(sizeof(*sc), M_DKWEDGE, M_WAITOK|M_ZERO);
311 	sc->sc_state = DKW_STATE_LARVAL;
312 	sc->sc_parent = pdk;
313 	sc->sc_pdev = pdev;
314 	sc->sc_offset = dkw->dkw_offset;
315 	sc->sc_size = dkw->dkw_size;
316 
317 	memcpy(sc->sc_wname, dkw->dkw_wname, sizeof(sc->sc_wname));
318 	sc->sc_wname[sizeof(sc->sc_wname) - 1] = '\0';
319 
320 	memcpy(sc->sc_ptype, dkw->dkw_ptype, sizeof(sc->sc_ptype));
321 	sc->sc_ptype[sizeof(sc->sc_ptype) - 1] = '\0';
322 
323 	bufq_alloc(&sc->sc_bufq, "fcfs", 0);
324 
325 	callout_init(&sc->sc_restart_ch, 0);
326 	callout_setfunc(&sc->sc_restart_ch, dkrestart, sc);
327 
328 	/*
329 	 * Wedge will be added; increment the wedge count for the parent.
330 	 * Only allow this to happend if RAW_PART is the only thing open.
331 	 */
332 	mutex_enter(&pdk->dk_openlock);
333 	if (pdk->dk_openmask & ~(1 << RAW_PART))
334 		error = EBUSY;
335 	else {
336 		/* Check for wedge overlap. */
337 		LIST_FOREACH(lsc, &pdk->dk_wedges, sc_plink) {
338 			daddr_t lastblk = sc->sc_offset + sc->sc_size - 1;
339 			daddr_t llastblk = lsc->sc_offset + lsc->sc_size - 1;
340 
341 			if (sc->sc_offset >= lsc->sc_offset &&
342 			    sc->sc_offset <= llastblk) {
343 				/* Overlaps the tail of the existing wedge. */
344 				break;
345 			}
346 			if (lastblk >= lsc->sc_offset &&
347 			    lastblk <= llastblk) {
348 				/* Overlaps the head of the existing wedge. */
349 			    	break;
350 			}
351 		}
352 		if (lsc != NULL) {
353 			if (sc->sc_offset == lsc->sc_offset &&
354 			    sc->sc_size == lsc->sc_size &&
355 			    strcmp(sc->sc_wname, lsc->sc_wname) == 0)
356 				error = EEXIST;
357 			else
358 				error = EINVAL;
359 		} else {
360 			pdk->dk_nwedges++;
361 			LIST_INSERT_HEAD(&pdk->dk_wedges, sc, sc_plink);
362 		}
363 	}
364 	mutex_exit(&pdk->dk_openlock);
365 	if (error) {
366 		bufq_free(sc->sc_bufq);
367 		free(sc, M_DKWEDGE);
368 		return (error);
369 	}
370 
371 	/* Fill in our cfdata for the pseudo-device glue. */
372 	sc->sc_cfdata.cf_name = dk_cd.cd_name;
373 	sc->sc_cfdata.cf_atname = dk_ca.ca_name;
374 	/* sc->sc_cfdata.cf_unit set below */
375 	sc->sc_cfdata.cf_fstate = FSTATE_STAR;
376 
377 	/* Insert the larval wedge into the array. */
378 	rw_enter(&dkwedges_lock, RW_WRITER);
379 	for (error = 0;;) {
380 		struct dkwedge_softc **scpp;
381 
382 		/*
383 		 * Check for a duplicate wname while searching for
384 		 * a slot.
385 		 */
386 		for (scpp = NULL, unit = 0; unit < ndkwedges; unit++) {
387 			if (dkwedges[unit] == NULL) {
388 				if (scpp == NULL) {
389 					scpp = &dkwedges[unit];
390 					sc->sc_cfdata.cf_unit = unit;
391 				}
392 			} else {
393 				/* XXX Unicode. */
394 				if (strcmp(dkwedges[unit]->sc_wname,
395 					   sc->sc_wname) == 0) {
396 					error = EEXIST;
397 					break;
398 				}
399 			}
400 		}
401 		if (error)
402 			break;
403 		KASSERT(unit == ndkwedges);
404 		if (scpp == NULL)
405 			dkwedge_array_expand();
406 		else {
407 			KASSERT(scpp == &dkwedges[sc->sc_cfdata.cf_unit]);
408 			*scpp = sc;
409 			break;
410 		}
411 	}
412 	rw_exit(&dkwedges_lock);
413 	if (error) {
414 		mutex_enter(&pdk->dk_openlock);
415 		pdk->dk_nwedges--;
416 		LIST_REMOVE(sc, sc_plink);
417 		mutex_exit(&pdk->dk_openlock);
418 
419 		bufq_free(sc->sc_bufq);
420 		free(sc, M_DKWEDGE);
421 		return (error);
422 	}
423 
424 	/*
425 	 * Now that we know the unit #, attach a pseudo-device for
426 	 * this wedge instance.  This will provide us with the
427 	 * device_t necessary for glue to other parts of the system.
428 	 *
429 	 * This should never fail, unless we're almost totally out of
430 	 * memory.
431 	 */
432 	if ((sc->sc_dev = config_attach_pseudo(&sc->sc_cfdata)) == NULL) {
433 		aprint_error("%s%u: unable to attach pseudo-device\n",
434 		    sc->sc_cfdata.cf_name, sc->sc_cfdata.cf_unit);
435 
436 		rw_enter(&dkwedges_lock, RW_WRITER);
437 		dkwedges[sc->sc_cfdata.cf_unit] = NULL;
438 		rw_exit(&dkwedges_lock);
439 
440 		mutex_enter(&pdk->dk_openlock);
441 		pdk->dk_nwedges--;
442 		LIST_REMOVE(sc, sc_plink);
443 		mutex_exit(&pdk->dk_openlock);
444 
445 		bufq_free(sc->sc_bufq);
446 		free(sc, M_DKWEDGE);
447 		return (ENOMEM);
448 	}
449 
450 	/* Return the devname to the caller. */
451 	strlcpy(dkw->dkw_devname, device_xname(sc->sc_dev),
452 		sizeof(dkw->dkw_devname));
453 
454 	/*
455 	 * XXX Really ought to make the disk_attach() and the changing
456 	 * of state to RUNNING atomic.
457 	 */
458 
459 	disk_init(&sc->sc_dk, device_xname(sc->sc_dev), NULL);
460 	dk_set_geometry(sc, pdk);
461 	disk_attach(&sc->sc_dk);
462 
463 	/* Disk wedge is ready for use! */
464 	sc->sc_state = DKW_STATE_RUNNING;
465 
466 	/* Announce our arrival. */
467 	aprint_normal(
468 	    "%s at %s: \"%s\", %"PRIu64" blocks at %"PRId64", type: %s\n",
469 	    device_xname(sc->sc_dev), pdk->dk_name,
470 	    sc->sc_wname,	/* XXX Unicode */
471 	    sc->sc_size, sc->sc_offset,
472 	    sc->sc_ptype[0] == '\0' ? "<unknown>" : sc->sc_ptype);
473 
474 	return (0);
475 }
476 
477 /*
478  * dkwedge_find:
479  *
480  *	Lookup a disk wedge based on the provided information.
481  *	NOTE: We look up the wedge based on the wedge devname,
482  *	not wname.
483  *
484  *	Return NULL if the wedge is not found, otherwise return
485  *	the wedge's softc.  Assign the wedge's unit number to unitp
486  *	if unitp is not NULL.
487  */
488 static struct dkwedge_softc *
489 dkwedge_find(struct dkwedge_info *dkw, u_int *unitp)
490 {
491 	struct dkwedge_softc *sc = NULL;
492 	u_int unit;
493 
494 	/* Find our softc. */
495 	dkw->dkw_devname[sizeof(dkw->dkw_devname) - 1] = '\0';
496 	rw_enter(&dkwedges_lock, RW_READER);
497 	for (unit = 0; unit < ndkwedges; unit++) {
498 		if ((sc = dkwedges[unit]) != NULL &&
499 		    strcmp(device_xname(sc->sc_dev), dkw->dkw_devname) == 0 &&
500 		    strcmp(sc->sc_parent->dk_name, dkw->dkw_parent) == 0) {
501 			break;
502 		}
503 	}
504 	rw_exit(&dkwedges_lock);
505 	if (unit == ndkwedges)
506 		return NULL;
507 
508 	if (unitp != NULL)
509 		*unitp = unit;
510 
511 	return sc;
512 }
513 
514 /*
515  * dkwedge_del:		[exported function]
516  *
517  *	Delete a disk wedge based on the provided information.
518  *	NOTE: We look up the wedge based on the wedge devname,
519  *	not wname.
520  */
521 int
522 dkwedge_del(struct dkwedge_info *dkw)
523 {
524 	return dkwedge_del1(dkw, 0);
525 }
526 
527 int
528 dkwedge_del1(struct dkwedge_info *dkw, int flags)
529 {
530 	struct dkwedge_softc *sc = NULL;
531 
532 	/* Find our softc. */
533 	if ((sc = dkwedge_find(dkw, NULL)) == NULL)
534 		return (ESRCH);
535 
536 	return config_detach(sc->sc_dev, flags);
537 }
538 
539 static int
540 dkwedge_cleanup_parent(struct dkwedge_softc *sc, int flags)
541 {
542 	struct disk *dk = &sc->sc_dk;
543 	int rc;
544 
545 	rc = 0;
546 	mutex_enter(&dk->dk_openlock);
547 	if (dk->dk_openmask == 0)
548 		;	/* nothing to do */
549 	else if ((flags & DETACH_FORCE) == 0)
550 		rc = EBUSY;
551 	else {
552 		mutex_enter(&sc->sc_parent->dk_rawlock);
553 		rc = dklastclose(sc); /* releases dk_rawlock */
554 	}
555 	mutex_exit(&dk->dk_openlock);
556 
557 	return rc;
558 }
559 
560 /*
561  * dkwedge_detach:
562  *
563  *	Autoconfiguration detach function for pseudo-device glue.
564  */
565 static int
566 dkwedge_detach(device_t self, int flags)
567 {
568 	struct dkwedge_softc *sc = NULL;
569 	u_int unit;
570 	int bmaj, cmaj, rc, s;
571 
572 	rw_enter(&dkwedges_lock, RW_WRITER);
573 	for (unit = 0; unit < ndkwedges; unit++) {
574 		if ((sc = dkwedges[unit]) != NULL && sc->sc_dev == self)
575 			break;
576 	}
577 	if (unit == ndkwedges)
578 		rc = ENXIO;
579 	else if ((rc = dkwedge_cleanup_parent(sc, flags)) == 0) {
580 		/* Mark the wedge as dying. */
581 		sc->sc_state = DKW_STATE_DYING;
582 	}
583 	rw_exit(&dkwedges_lock);
584 
585 	if (rc != 0)
586 		return rc;
587 
588 	pmf_device_deregister(self);
589 
590 	/* Locate the wedge major numbers. */
591 	bmaj = bdevsw_lookup_major(&dk_bdevsw);
592 	cmaj = cdevsw_lookup_major(&dk_cdevsw);
593 
594 	/* Kill any pending restart. */
595 	callout_stop(&sc->sc_restart_ch);
596 
597 	/*
598 	 * dkstart() will kill any queued buffers now that the
599 	 * state of the wedge is not RUNNING.  Once we've done
600 	 * that, wait for any other pending I/O to complete.
601 	 */
602 	s = splbio();
603 	dkstart(sc);
604 	dkwedge_wait_drain(sc);
605 	splx(s);
606 
607 	/* Nuke the vnodes for any open instances. */
608 	vdevgone(bmaj, unit, unit, VBLK);
609 	vdevgone(cmaj, unit, unit, VCHR);
610 
611 	/* Clean up the parent. */
612 	dkwedge_cleanup_parent(sc, flags | DETACH_FORCE);
613 
614 	/* Announce our departure. */
615 	aprint_normal("%s at %s (%s) deleted\n", device_xname(sc->sc_dev),
616 	    sc->sc_parent->dk_name,
617 	    sc->sc_wname);	/* XXX Unicode */
618 
619 	mutex_enter(&sc->sc_parent->dk_openlock);
620 	sc->sc_parent->dk_nwedges--;
621 	LIST_REMOVE(sc, sc_plink);
622 	mutex_exit(&sc->sc_parent->dk_openlock);
623 
624 	/* Delete our buffer queue. */
625 	bufq_free(sc->sc_bufq);
626 
627 	/* Detach from the disk list. */
628 	disk_detach(&sc->sc_dk);
629 	disk_destroy(&sc->sc_dk);
630 
631 	/* Poof. */
632 	rw_enter(&dkwedges_lock, RW_WRITER);
633 	dkwedges[unit] = NULL;
634 	sc->sc_state = DKW_STATE_DEAD;
635 	rw_exit(&dkwedges_lock);
636 
637 	free(sc, M_DKWEDGE);
638 
639 	return 0;
640 }
641 
642 /*
643  * dkwedge_delall:	[exported function]
644  *
645  *	Delete all of the wedges on the specified disk.  Used when
646  *	a disk is being detached.
647  */
648 void
649 dkwedge_delall(struct disk *pdk)
650 {
651 	dkwedge_delall1(pdk, false);
652 }
653 
654 static void
655 dkwedge_delall1(struct disk *pdk, bool idleonly)
656 {
657 	struct dkwedge_info dkw;
658 	struct dkwedge_softc *sc;
659 	int flags;
660 
661 	flags = DETACH_QUIET;
662 	if (!idleonly) flags |= DETACH_FORCE;
663 
664 	for (;;) {
665 		mutex_enter(&pdk->dk_openlock);
666 		LIST_FOREACH(sc, &pdk->dk_wedges, sc_plink) {
667 			if (!idleonly || sc->sc_dk.dk_openmask == 0)
668 				break;
669 		}
670 		if (sc == NULL) {
671 			KASSERT(idleonly || pdk->dk_nwedges == 0);
672 			mutex_exit(&pdk->dk_openlock);
673 			return;
674 		}
675 		strcpy(dkw.dkw_parent, pdk->dk_name);
676 		strlcpy(dkw.dkw_devname, device_xname(sc->sc_dev),
677 			sizeof(dkw.dkw_devname));
678 		mutex_exit(&pdk->dk_openlock);
679 		(void) dkwedge_del1(&dkw, flags);
680 	}
681 }
682 
683 /*
684  * dkwedge_list:	[exported function]
685  *
686  *	List all of the wedges on a particular disk.
687  */
688 int
689 dkwedge_list(struct disk *pdk, struct dkwedge_list *dkwl, struct lwp *l)
690 {
691 	struct uio uio;
692 	struct iovec iov;
693 	struct dkwedge_softc *sc;
694 	struct dkwedge_info dkw;
695 	int error = 0;
696 
697 	iov.iov_base = dkwl->dkwl_buf;
698 	iov.iov_len = dkwl->dkwl_bufsize;
699 
700 	uio.uio_iov = &iov;
701 	uio.uio_iovcnt = 1;
702 	uio.uio_offset = 0;
703 	uio.uio_resid = dkwl->dkwl_bufsize;
704 	uio.uio_rw = UIO_READ;
705 	KASSERT(l == curlwp);
706 	uio.uio_vmspace = l->l_proc->p_vmspace;
707 
708 	dkwl->dkwl_ncopied = 0;
709 
710 	mutex_enter(&pdk->dk_openlock);
711 	LIST_FOREACH(sc, &pdk->dk_wedges, sc_plink) {
712 		if (uio.uio_resid < sizeof(dkw))
713 			break;
714 
715 		if (sc->sc_state != DKW_STATE_RUNNING)
716 			continue;
717 
718 		strlcpy(dkw.dkw_devname, device_xname(sc->sc_dev),
719 			sizeof(dkw.dkw_devname));
720 		memcpy(dkw.dkw_wname, sc->sc_wname, sizeof(dkw.dkw_wname));
721 		dkw.dkw_wname[sizeof(dkw.dkw_wname) - 1] = '\0';
722 		strcpy(dkw.dkw_parent, sc->sc_parent->dk_name);
723 		dkw.dkw_offset = sc->sc_offset;
724 		dkw.dkw_size = sc->sc_size;
725 		strcpy(dkw.dkw_ptype, sc->sc_ptype);
726 
727 		error = uiomove(&dkw, sizeof(dkw), &uio);
728 		if (error)
729 			break;
730 		dkwl->dkwl_ncopied++;
731 	}
732 	dkwl->dkwl_nwedges = pdk->dk_nwedges;
733 	mutex_exit(&pdk->dk_openlock);
734 
735 	return (error);
736 }
737 
738 device_t
739 dkwedge_find_by_wname(const char *wname)
740 {
741 	device_t dv = NULL;
742 	struct dkwedge_softc *sc;
743 	int i;
744 
745 	rw_enter(&dkwedges_lock, RW_WRITER);
746 	for (i = 0; i < ndkwedges; i++) {
747 		if ((sc = dkwedges[i]) == NULL)
748 			continue;
749 		if (strcmp(sc->sc_wname, wname) == 0) {
750 			if (dv != NULL) {
751 				printf(
752 				    "WARNING: double match for wedge name %s "
753 				    "(%s, %s)\n", wname, device_xname(dv),
754 				    device_xname(sc->sc_dev));
755 				continue;
756 			}
757 			dv = sc->sc_dev;
758 		}
759 	}
760 	rw_exit(&dkwedges_lock);
761 	return dv;
762 }
763 
764 void
765 dkwedge_print_wnames(void)
766 {
767 	struct dkwedge_softc *sc;
768 	int i;
769 
770 	rw_enter(&dkwedges_lock, RW_WRITER);
771 	for (i = 0; i < ndkwedges; i++) {
772 		if ((sc = dkwedges[i]) == NULL)
773 			continue;
774 		printf(" wedge:%s", sc->sc_wname);
775 	}
776 	rw_exit(&dkwedges_lock);
777 }
778 
779 /*
780  * We need a dummy object to stuff into the dkwedge discovery method link
781  * set to ensure that there is always at least one object in the set.
782  */
783 static struct dkwedge_discovery_method dummy_discovery_method;
784 __link_set_add_bss(dkwedge_methods, dummy_discovery_method);
785 
786 /*
787  * dkwedge_init:
788  *
789  *	Initialize the disk wedge subsystem.
790  */
791 void
792 dkwedge_init(void)
793 {
794 	__link_set_decl(dkwedge_methods, struct dkwedge_discovery_method);
795 	struct dkwedge_discovery_method * const *ddmp;
796 	struct dkwedge_discovery_method *lddm, *ddm;
797 
798 	rw_init(&dkwedges_lock);
799 	rw_init(&dkwedge_discovery_methods_lock);
800 
801 	if (config_cfdriver_attach(&dk_cd) != 0)
802 		panic("dkwedge: unable to attach cfdriver");
803 	if (config_cfattach_attach(dk_cd.cd_name, &dk_ca) != 0)
804 		panic("dkwedge: unable to attach cfattach");
805 
806 	rw_enter(&dkwedge_discovery_methods_lock, RW_WRITER);
807 
808 	LIST_INIT(&dkwedge_discovery_methods);
809 
810 	__link_set_foreach(ddmp, dkwedge_methods) {
811 		ddm = *ddmp;
812 		if (ddm == &dummy_discovery_method)
813 			continue;
814 		if (LIST_EMPTY(&dkwedge_discovery_methods)) {
815 			LIST_INSERT_HEAD(&dkwedge_discovery_methods,
816 					 ddm, ddm_list);
817 			continue;
818 		}
819 		LIST_FOREACH(lddm, &dkwedge_discovery_methods, ddm_list) {
820 			if (ddm->ddm_priority == lddm->ddm_priority) {
821 				aprint_error("dk-method-%s: method \"%s\" "
822 				    "already exists at priority %d\n",
823 				    ddm->ddm_name, lddm->ddm_name,
824 				    lddm->ddm_priority);
825 				/* Not inserted. */
826 				break;
827 			}
828 			if (ddm->ddm_priority < lddm->ddm_priority) {
829 				/* Higher priority; insert before. */
830 				LIST_INSERT_BEFORE(lddm, ddm, ddm_list);
831 				break;
832 			}
833 			if (LIST_NEXT(lddm, ddm_list) == NULL) {
834 				/* Last one; insert after. */
835 				KASSERT(lddm->ddm_priority < ddm->ddm_priority);
836 				LIST_INSERT_AFTER(lddm, ddm, ddm_list);
837 				break;
838 			}
839 		}
840 	}
841 
842 	rw_exit(&dkwedge_discovery_methods_lock);
843 }
844 
845 #ifdef DKWEDGE_AUTODISCOVER
846 int	dkwedge_autodiscover = 1;
847 #else
848 int	dkwedge_autodiscover = 0;
849 #endif
850 
851 /*
852  * dkwedge_discover:	[exported function]
853  *
854  *	Discover the wedges on a newly attached disk.
855  *	Remove all unused wedges on the disk first.
856  */
857 void
858 dkwedge_discover(struct disk *pdk)
859 {
860 	struct dkwedge_discovery_method *ddm;
861 	struct vnode *vp;
862 	int error;
863 	dev_t pdev;
864 
865 	/*
866 	 * Require people playing with wedges to enable this explicitly.
867 	 */
868 	if (dkwedge_autodiscover == 0)
869 		return;
870 
871 	rw_enter(&dkwedge_discovery_methods_lock, RW_READER);
872 
873 	/*
874 	 * Use the character device for scanning, the block device
875 	 * is busy if there are already wedges attached.
876 	 */
877 	error = dkwedge_compute_pdev(pdk->dk_name, &pdev, VCHR);
878 	if (error) {
879 		aprint_error("%s: unable to compute pdev, error = %d\n",
880 		    pdk->dk_name, error);
881 		goto out;
882 	}
883 
884 	error = cdevvp(pdev, &vp);
885 	if (error) {
886 		aprint_error("%s: unable to find vnode for pdev, error = %d\n",
887 		    pdk->dk_name, error);
888 		goto out;
889 	}
890 
891 	error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
892 	if (error) {
893 		aprint_error("%s: unable to lock vnode for pdev, error = %d\n",
894 		    pdk->dk_name, error);
895 		vrele(vp);
896 		goto out;
897 	}
898 
899 	error = VOP_OPEN(vp, FREAD | FSILENT, NOCRED);
900 	if (error) {
901 		if (error != ENODEV)
902 			aprint_error("%s: unable to open device, error = %d\n",
903 			    pdk->dk_name, error);
904 		vput(vp);
905 		goto out;
906 	}
907 	VOP_UNLOCK(vp);
908 
909 	/*
910 	 * Remove unused wedges
911 	 */
912 	dkwedge_delall1(pdk, true);
913 
914 	/*
915 	 * For each supported partition map type, look to see if
916 	 * this map type exists.  If so, parse it and add the
917 	 * corresponding wedges.
918 	 */
919 	LIST_FOREACH(ddm, &dkwedge_discovery_methods, ddm_list) {
920 		error = (*ddm->ddm_discover)(pdk, vp);
921 		if (error == 0) {
922 			/* Successfully created wedges; we're done. */
923 			break;
924 		}
925 	}
926 
927 	error = vn_close(vp, FREAD, NOCRED);
928 	if (error) {
929 		aprint_error("%s: unable to close device, error = %d\n",
930 		    pdk->dk_name, error);
931 		/* We'll just assume the vnode has been cleaned up. */
932 	}
933 
934  out:
935 	rw_exit(&dkwedge_discovery_methods_lock);
936 }
937 
938 /*
939  * dkwedge_read:
940  *
941  *	Read some data from the specified disk, used for
942  *	partition discovery.
943  */
944 int
945 dkwedge_read(struct disk *pdk, struct vnode *vp, daddr_t blkno,
946     void *tbuf, size_t len)
947 {
948 	buf_t *bp;
949 	int error;
950 	bool isopen;
951 	dev_t bdev;
952 	struct vnode *bdvp;
953 
954 	/*
955 	 * The kernel cannot read from a character device vnode
956 	 * as physio() only handles user memory.
957 	 *
958 	 * If the block device has already been opened by a wedge
959 	 * use that vnode and temporarily bump the open counter.
960 	 *
961 	 * Otherwise try to open the block device.
962 	 */
963 
964 	bdev = devsw_chr2blk(vp->v_rdev);
965 
966 	mutex_enter(&pdk->dk_rawlock);
967 	if (pdk->dk_rawopens != 0) {
968 		KASSERT(pdk->dk_rawvp != NULL);
969 		isopen = true;
970 		++pdk->dk_rawopens;
971 		bdvp = pdk->dk_rawvp;
972 		error = 0;
973 	} else {
974 		isopen = false;
975 		error = dk_open_parent(bdev, FREAD, &bdvp);
976 	}
977 	mutex_exit(&pdk->dk_rawlock);
978 
979 	if (error)
980 		return error;
981 
982 	bp = getiobuf(bdvp, true);
983 	bp->b_flags = B_READ;
984 	bp->b_cflags = BC_BUSY;
985 	bp->b_dev = bdev;
986 	bp->b_data = tbuf;
987 	bp->b_bufsize = bp->b_bcount = len;
988 	bp->b_blkno = blkno;
989 	bp->b_cylinder = 0;
990 	bp->b_error = 0;
991 
992 	VOP_STRATEGY(bdvp, bp);
993 	error = biowait(bp);
994 	putiobuf(bp);
995 
996 	mutex_enter(&pdk->dk_rawlock);
997 	if (isopen) {
998 		--pdk->dk_rawopens;
999 	} else {
1000 		dk_close_parent(bdvp, FREAD);
1001 	}
1002 	mutex_exit(&pdk->dk_rawlock);
1003 
1004 	return error;
1005 }
1006 
1007 /*
1008  * dkwedge_lookup:
1009  *
1010  *	Look up a dkwedge_softc based on the provided dev_t.
1011  */
1012 static struct dkwedge_softc *
1013 dkwedge_lookup(dev_t dev)
1014 {
1015 	int unit = minor(dev);
1016 
1017 	if (unit >= ndkwedges)
1018 		return (NULL);
1019 
1020 	KASSERT(dkwedges != NULL);
1021 
1022 	return (dkwedges[unit]);
1023 }
1024 
1025 static int
1026 dk_open_parent(dev_t dev, int mode, struct vnode **vpp)
1027 {
1028 	struct vnode *vp;
1029 	int error;
1030 
1031 	error = bdevvp(dev, &vp);
1032 	if (error)
1033 		return error;
1034 
1035 	error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1036 	if (error) {
1037 		vrele(vp);
1038 		return error;
1039 	}
1040 	error = VOP_OPEN(vp, mode, NOCRED);
1041 	if (error) {
1042 		vput(vp);
1043 		return error;
1044 	}
1045 
1046 	/* VOP_OPEN() doesn't do this for us. */
1047 	if (mode & FWRITE) {
1048 		mutex_enter(vp->v_interlock);
1049 		vp->v_writecount++;
1050 		mutex_exit(vp->v_interlock);
1051 	}
1052 
1053 	VOP_UNLOCK(vp);
1054 
1055 	*vpp = vp;
1056 
1057 	return 0;
1058 }
1059 
1060 static int
1061 dk_close_parent(struct vnode *vp, int mode)
1062 {
1063 	int error;
1064 
1065 	error = vn_close(vp, mode, NOCRED);
1066 	return error;
1067 }
1068 
1069 /*
1070  * dkopen:		[devsw entry point]
1071  *
1072  *	Open a wedge.
1073  */
1074 static int
1075 dkopen(dev_t dev, int flags, int fmt, struct lwp *l)
1076 {
1077 	struct dkwedge_softc *sc = dkwedge_lookup(dev);
1078 	struct vnode *vp;
1079 	int error = 0;
1080 
1081 	if (sc == NULL)
1082 		return (ENODEV);
1083 	if (sc->sc_state != DKW_STATE_RUNNING)
1084 		return (ENXIO);
1085 
1086 	/*
1087 	 * We go through a complicated little dance to only open the parent
1088 	 * vnode once per wedge, no matter how many times the wedge is
1089 	 * opened.  The reason?  We see one dkopen() per open call, but
1090 	 * only dkclose() on the last close.
1091 	 */
1092 	mutex_enter(&sc->sc_dk.dk_openlock);
1093 	mutex_enter(&sc->sc_parent->dk_rawlock);
1094 	if (sc->sc_dk.dk_openmask == 0) {
1095 		if (sc->sc_parent->dk_rawopens == 0) {
1096 			KASSERT(sc->sc_parent->dk_rawvp == NULL);
1097 			error = dk_open_parent(sc->sc_pdev, FREAD | FWRITE, &vp);
1098 			if (error)
1099 				goto popen_fail;
1100 			sc->sc_parent->dk_rawvp = vp;
1101 		}
1102 		sc->sc_parent->dk_rawopens++;
1103 	}
1104 	if (fmt == S_IFCHR)
1105 		sc->sc_dk.dk_copenmask |= 1;
1106 	else
1107 		sc->sc_dk.dk_bopenmask |= 1;
1108 	sc->sc_dk.dk_openmask =
1109 	    sc->sc_dk.dk_copenmask | sc->sc_dk.dk_bopenmask;
1110 
1111  popen_fail:
1112 	mutex_exit(&sc->sc_parent->dk_rawlock);
1113 	mutex_exit(&sc->sc_dk.dk_openlock);
1114 	return (error);
1115 }
1116 
1117 /*
1118  * Caller must hold sc->sc_dk.dk_openlock and sc->sc_parent->dk_rawlock.
1119  */
1120 static int
1121 dklastclose(struct dkwedge_softc *sc)
1122 {
1123 	int error = 0, doclose;
1124 
1125 	doclose = 0;
1126 	if (sc->sc_parent->dk_rawopens > 0) {
1127 		if (--sc->sc_parent->dk_rawopens == 0)
1128 			doclose = 1;
1129 	}
1130 
1131 	mutex_exit(&sc->sc_parent->dk_rawlock);
1132 
1133 	if (doclose) {
1134 		KASSERT(sc->sc_parent->dk_rawvp != NULL);
1135 		dk_close_parent(sc->sc_parent->dk_rawvp, FREAD | FWRITE);
1136 		sc->sc_parent->dk_rawvp = NULL;
1137 	}
1138 
1139 	return error;
1140 }
1141 
1142 /*
1143  * dkclose:		[devsw entry point]
1144  *
1145  *	Close a wedge.
1146  */
1147 static int
1148 dkclose(dev_t dev, int flags, int fmt, struct lwp *l)
1149 {
1150 	struct dkwedge_softc *sc = dkwedge_lookup(dev);
1151 	int error = 0;
1152 
1153 	if (sc == NULL)
1154 		return (ENODEV);
1155 	if (sc->sc_state != DKW_STATE_RUNNING)
1156 		return (ENXIO);
1157 
1158 	KASSERT(sc->sc_dk.dk_openmask != 0);
1159 
1160 	mutex_enter(&sc->sc_dk.dk_openlock);
1161 	mutex_enter(&sc->sc_parent->dk_rawlock);
1162 
1163 	if (fmt == S_IFCHR)
1164 		sc->sc_dk.dk_copenmask &= ~1;
1165 	else
1166 		sc->sc_dk.dk_bopenmask &= ~1;
1167 	sc->sc_dk.dk_openmask =
1168 	    sc->sc_dk.dk_copenmask | sc->sc_dk.dk_bopenmask;
1169 
1170 	if (sc->sc_dk.dk_openmask == 0)
1171 		error = dklastclose(sc); /* releases dk_rawlock */
1172 	else
1173 		mutex_exit(&sc->sc_parent->dk_rawlock);
1174 
1175 	mutex_exit(&sc->sc_dk.dk_openlock);
1176 
1177 	return (error);
1178 }
1179 
1180 /*
1181  * dkstragegy:		[devsw entry point]
1182  *
1183  *	Perform I/O based on the wedge I/O strategy.
1184  */
1185 static void
1186 dkstrategy(struct buf *bp)
1187 {
1188 	struct dkwedge_softc *sc = dkwedge_lookup(bp->b_dev);
1189 	uint64_t p_size, p_offset;
1190 	int s;
1191 
1192 	if (sc == NULL) {
1193 		bp->b_error = ENODEV;
1194 		goto done;
1195 	}
1196 
1197 	if (sc->sc_state != DKW_STATE_RUNNING ||
1198 	    sc->sc_parent->dk_rawvp == NULL) {
1199 		bp->b_error = ENXIO;
1200 		goto done;
1201 	}
1202 
1203 	/* If it's an empty transfer, wake up the top half now. */
1204 	if (bp->b_bcount == 0)
1205 		goto done;
1206 
1207 	p_offset = sc->sc_offset << sc->sc_parent->dk_blkshift;
1208 	p_size   = sc->sc_size << sc->sc_parent->dk_blkshift;
1209 
1210 	/* Make sure it's in-range. */
1211 	if (bounds_check_with_mediasize(bp, DEV_BSIZE, p_size) <= 0)
1212 		goto done;
1213 
1214 	/* Translate it to the parent's raw LBA. */
1215 	bp->b_rawblkno = bp->b_blkno + p_offset;
1216 
1217 	/* Place it in the queue and start I/O on the unit. */
1218 	s = splbio();
1219 	sc->sc_iopend++;
1220 	bufq_put(sc->sc_bufq, bp);
1221 	dkstart(sc);
1222 	splx(s);
1223 	return;
1224 
1225  done:
1226 	bp->b_resid = bp->b_bcount;
1227 	biodone(bp);
1228 }
1229 
1230 /*
1231  * dkstart:
1232  *
1233  *	Start I/O that has been enqueued on the wedge.
1234  *	NOTE: Must be called at splbio()!
1235  */
1236 static void
1237 dkstart(struct dkwedge_softc *sc)
1238 {
1239 	struct vnode *vp;
1240 	struct buf *bp, *nbp;
1241 
1242 	/* Do as much work as has been enqueued. */
1243 	while ((bp = bufq_peek(sc->sc_bufq)) != NULL) {
1244 		if (sc->sc_state != DKW_STATE_RUNNING) {
1245 			(void) bufq_get(sc->sc_bufq);
1246 			if (sc->sc_iopend-- == 1 &&
1247 			    (sc->sc_flags & DK_F_WAIT_DRAIN) != 0) {
1248 				sc->sc_flags &= ~DK_F_WAIT_DRAIN;
1249 				wakeup(&sc->sc_iopend);
1250 			}
1251 			bp->b_error = ENXIO;
1252 			bp->b_resid = bp->b_bcount;
1253 			biodone(bp);
1254 		}
1255 
1256 		/* Instrumentation. */
1257 		disk_busy(&sc->sc_dk);
1258 
1259 		nbp = getiobuf(sc->sc_parent->dk_rawvp, false);
1260 		if (nbp == NULL) {
1261 			/*
1262 			 * No resources to run this request; leave the
1263 			 * buffer queued up, and schedule a timer to
1264 			 * restart the queue in 1/2 a second.
1265 			 */
1266 			disk_unbusy(&sc->sc_dk, 0, bp->b_flags & B_READ);
1267 			callout_schedule(&sc->sc_restart_ch, hz / 2);
1268 			return;
1269 		}
1270 
1271 		(void) bufq_get(sc->sc_bufq);
1272 
1273 		nbp->b_data = bp->b_data;
1274 		nbp->b_flags = bp->b_flags;
1275 		nbp->b_oflags = bp->b_oflags;
1276 		nbp->b_cflags = bp->b_cflags;
1277 		nbp->b_iodone = dkiodone;
1278 		nbp->b_proc = bp->b_proc;
1279 		nbp->b_blkno = bp->b_rawblkno;
1280 		nbp->b_dev = sc->sc_parent->dk_rawvp->v_rdev;
1281 		nbp->b_bcount = bp->b_bcount;
1282 		nbp->b_private = bp;
1283 		BIO_COPYPRIO(nbp, bp);
1284 
1285 		vp = nbp->b_vp;
1286 		if ((nbp->b_flags & B_READ) == 0) {
1287 			mutex_enter(vp->v_interlock);
1288 			vp->v_numoutput++;
1289 			mutex_exit(vp->v_interlock);
1290 		}
1291 		VOP_STRATEGY(vp, nbp);
1292 	}
1293 }
1294 
1295 /*
1296  * dkiodone:
1297  *
1298  *	I/O to a wedge has completed; alert the top half.
1299  */
1300 static void
1301 dkiodone(struct buf *bp)
1302 {
1303 	struct buf *obp = bp->b_private;
1304 	struct dkwedge_softc *sc = dkwedge_lookup(obp->b_dev);
1305 
1306 	int s = splbio();
1307 
1308 	if (bp->b_error != 0)
1309 		obp->b_error = bp->b_error;
1310 	obp->b_resid = bp->b_resid;
1311 	putiobuf(bp);
1312 
1313 	if (sc->sc_iopend-- == 1 && (sc->sc_flags & DK_F_WAIT_DRAIN) != 0) {
1314 		sc->sc_flags &= ~DK_F_WAIT_DRAIN;
1315 		wakeup(&sc->sc_iopend);
1316 	}
1317 
1318 	disk_unbusy(&sc->sc_dk, obp->b_bcount - obp->b_resid,
1319 	    obp->b_flags & B_READ);
1320 
1321 	biodone(obp);
1322 
1323 	/* Kick the queue in case there is more work we can do. */
1324 	dkstart(sc);
1325 	splx(s);
1326 }
1327 
1328 /*
1329  * dkrestart:
1330  *
1331  *	Restart the work queue after it was stalled due to
1332  *	a resource shortage.  Invoked via a callout.
1333  */
1334 static void
1335 dkrestart(void *v)
1336 {
1337 	struct dkwedge_softc *sc = v;
1338 	int s;
1339 
1340 	s = splbio();
1341 	dkstart(sc);
1342 	splx(s);
1343 }
1344 
1345 /*
1346  * dkminphys:
1347  *
1348  *	Call parent's minphys function.
1349  */
1350 static void
1351 dkminphys(struct buf *bp)
1352 {
1353 	struct dkwedge_softc *sc = dkwedge_lookup(bp->b_dev);
1354 	dev_t dev;
1355 
1356 	dev = bp->b_dev;
1357 	bp->b_dev = sc->sc_pdev;
1358 	(*sc->sc_parent->dk_driver->d_minphys)(bp);
1359 	bp->b_dev = dev;
1360 }
1361 
1362 /*
1363  * dkread:		[devsw entry point]
1364  *
1365  *	Read from a wedge.
1366  */
1367 static int
1368 dkread(dev_t dev, struct uio *uio, int flags)
1369 {
1370 	struct dkwedge_softc *sc = dkwedge_lookup(dev);
1371 
1372 	if (sc == NULL)
1373 		return (ENODEV);
1374 	if (sc->sc_state != DKW_STATE_RUNNING)
1375 		return (ENXIO);
1376 
1377 	return (physio(dkstrategy, NULL, dev, B_READ, dkminphys, uio));
1378 }
1379 
1380 /*
1381  * dkwrite:		[devsw entry point]
1382  *
1383  *	Write to a wedge.
1384  */
1385 static int
1386 dkwrite(dev_t dev, struct uio *uio, int flags)
1387 {
1388 	struct dkwedge_softc *sc = dkwedge_lookup(dev);
1389 
1390 	if (sc == NULL)
1391 		return (ENODEV);
1392 	if (sc->sc_state != DKW_STATE_RUNNING)
1393 		return (ENXIO);
1394 
1395 	return (physio(dkstrategy, NULL, dev, B_WRITE, dkminphys, uio));
1396 }
1397 
1398 /*
1399  * dkioctl:		[devsw entry point]
1400  *
1401  *	Perform an ioctl request on a wedge.
1402  */
1403 static int
1404 dkioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1405 {
1406 	struct dkwedge_softc *sc = dkwedge_lookup(dev);
1407 	int error = 0;
1408 
1409 	if (sc == NULL)
1410 		return (ENODEV);
1411 	if (sc->sc_state != DKW_STATE_RUNNING)
1412 		return (ENXIO);
1413 	if (sc->sc_parent->dk_rawvp == NULL)
1414 		return (ENXIO);
1415 
1416 	/*
1417 	 * We pass NODEV instead of our device to indicate we don't
1418 	 * want to handle disklabel ioctls
1419 	 */
1420 	error = disk_ioctl(&sc->sc_dk, NODEV, cmd, data, flag, l);
1421 	if (error != EPASSTHROUGH)
1422 		return (error);
1423 
1424 	error = 0;
1425 
1426 	switch (cmd) {
1427 	case DIOCCACHESYNC:
1428 		/*
1429 		 * XXX Do we really need to care about having a writable
1430 		 * file descriptor here?
1431 		 */
1432 		if ((flag & FWRITE) == 0)
1433 			error = EBADF;
1434 		else
1435 			error = VOP_IOCTL(sc->sc_parent->dk_rawvp,
1436 					  cmd, data, flag,
1437 					  l != NULL ? l->l_cred : NOCRED);
1438 		break;
1439 	case DIOCGWEDGEINFO:
1440 	    {
1441 		struct dkwedge_info *dkw = (void *) data;
1442 
1443 		strlcpy(dkw->dkw_devname, device_xname(sc->sc_dev),
1444 			sizeof(dkw->dkw_devname));
1445 	    	memcpy(dkw->dkw_wname, sc->sc_wname, sizeof(dkw->dkw_wname));
1446 		dkw->dkw_wname[sizeof(dkw->dkw_wname) - 1] = '\0';
1447 		strcpy(dkw->dkw_parent, sc->sc_parent->dk_name);
1448 		dkw->dkw_offset = sc->sc_offset;
1449 		dkw->dkw_size = sc->sc_size;
1450 		strcpy(dkw->dkw_ptype, sc->sc_ptype);
1451 
1452 		break;
1453 	    }
1454 
1455 	default:
1456 		error = ENOTTY;
1457 	}
1458 
1459 	return (error);
1460 }
1461 
1462 /*
1463  * dkdiscard:		[devsw entry point]
1464  *
1465  *	Perform a discard-range request on a wedge.
1466  */
1467 static int
1468 dkdiscard(dev_t dev, off_t pos, off_t len)
1469 {
1470 	struct dkwedge_softc *sc = dkwedge_lookup(dev);
1471 	unsigned shift;
1472 	off_t offset, maxlen;
1473 
1474 	if (sc == NULL)
1475 		return (ENODEV);
1476 	if (sc->sc_state != DKW_STATE_RUNNING)
1477 		return (ENXIO);
1478 	if (sc->sc_parent->dk_rawvp == NULL)
1479 		return (ENXIO);
1480 
1481 	shift = (sc->sc_parent->dk_blkshift + DEV_BSHIFT);
1482 	KASSERT(__type_fit(off_t, sc->sc_size));
1483 	KASSERT(__type_fit(off_t, sc->sc_offset));
1484 	KASSERT(0 <= sc->sc_offset);
1485 	KASSERT(sc->sc_size <= (__type_max(off_t) >> shift));
1486 	KASSERT(sc->sc_offset <= ((__type_max(off_t) >> shift) - sc->sc_size));
1487 	offset = ((off_t)sc->sc_offset << shift);
1488 	maxlen = ((off_t)sc->sc_size << shift);
1489 
1490 	if (len > maxlen)
1491 		return (EINVAL);
1492 	if (pos > (maxlen - len))
1493 		return (EINVAL);
1494 
1495 	pos += offset;
1496 	return VOP_FDISCARD(sc->sc_parent->dk_rawvp, pos, len);
1497 }
1498 
1499 /*
1500  * dksize:		[devsw entry point]
1501  *
1502  *	Query the size of a wedge for the purpose of performing a dump
1503  *	or for swapping to.
1504  */
1505 static int
1506 dksize(dev_t dev)
1507 {
1508 	struct dkwedge_softc *sc = dkwedge_lookup(dev);
1509 	int rv = -1;
1510 
1511 	if (sc == NULL)
1512 		return (-1);
1513 	if (sc->sc_state != DKW_STATE_RUNNING)
1514 		return (-1);
1515 
1516 	mutex_enter(&sc->sc_dk.dk_openlock);
1517 	mutex_enter(&sc->sc_parent->dk_rawlock);
1518 
1519 	/* Our content type is static, no need to open the device. */
1520 
1521 	if (strcmp(sc->sc_ptype, DKW_PTYPE_SWAP) == 0) {
1522 		/* Saturate if we are larger than INT_MAX. */
1523 		if (sc->sc_size > INT_MAX)
1524 			rv = INT_MAX;
1525 		else
1526 			rv = (int) sc->sc_size;
1527 	}
1528 
1529 	mutex_exit(&sc->sc_parent->dk_rawlock);
1530 	mutex_exit(&sc->sc_dk.dk_openlock);
1531 
1532 	return (rv);
1533 }
1534 
1535 /*
1536  * dkdump:		[devsw entry point]
1537  *
1538  *	Perform a crash dump to a wedge.
1539  */
1540 static int
1541 dkdump(dev_t dev, daddr_t blkno, void *va, size_t size)
1542 {
1543 	struct dkwedge_softc *sc = dkwedge_lookup(dev);
1544 	const struct bdevsw *bdev;
1545 	int rv = 0;
1546 
1547 	if (sc == NULL)
1548 		return (ENODEV);
1549 	if (sc->sc_state != DKW_STATE_RUNNING)
1550 		return (ENXIO);
1551 
1552 	mutex_enter(&sc->sc_dk.dk_openlock);
1553 	mutex_enter(&sc->sc_parent->dk_rawlock);
1554 
1555 	/* Our content type is static, no need to open the device. */
1556 
1557 	if (strcmp(sc->sc_ptype, DKW_PTYPE_SWAP) != 0 &&
1558 	    strcmp(sc->sc_ptype, DKW_PTYPE_RAID) != 0) {
1559 		rv = ENXIO;
1560 		goto out;
1561 	}
1562 	if (size % DEV_BSIZE != 0) {
1563 		rv = EINVAL;
1564 		goto out;
1565 	}
1566 	if (blkno + size / DEV_BSIZE > sc->sc_size) {
1567 		printf("%s: blkno (%" PRIu64 ") + size / DEV_BSIZE (%zu) > "
1568 		    "sc->sc_size (%" PRIu64 ")\n", __func__, blkno,
1569 		    size / DEV_BSIZE, sc->sc_size);
1570 		rv = EINVAL;
1571 		goto out;
1572 	}
1573 
1574 	bdev = bdevsw_lookup(sc->sc_pdev);
1575 	rv = (*bdev->d_dump)(sc->sc_pdev, blkno + sc->sc_offset, va, size);
1576 
1577 out:
1578 	mutex_exit(&sc->sc_parent->dk_rawlock);
1579 	mutex_exit(&sc->sc_dk.dk_openlock);
1580 
1581 	return rv;
1582 }
1583 
1584 /*
1585  * config glue
1586  */
1587 
1588 /*
1589  * dkwedge_find_partition
1590  *
1591  *	Find wedge corresponding to the specified parent name
1592  *	and offset/length.
1593  */
1594 device_t
1595 dkwedge_find_partition(device_t parent, daddr_t startblk, uint64_t nblks)
1596 {
1597 	struct dkwedge_softc *sc;
1598 	int i;
1599 	device_t wedge = NULL;
1600 
1601 	rw_enter(&dkwedges_lock, RW_READER);
1602 	for (i = 0; i < ndkwedges; i++) {
1603 		if ((sc = dkwedges[i]) == NULL)
1604 			continue;
1605 		if (strcmp(sc->sc_parent->dk_name, device_xname(parent)) == 0 &&
1606 		    sc->sc_offset == startblk &&
1607 		    sc->sc_size == nblks) {
1608 			if (wedge) {
1609 				printf("WARNING: double match for boot wedge "
1610 				    "(%s, %s)\n",
1611 				    device_xname(wedge),
1612 				    device_xname(sc->sc_dev));
1613 				continue;
1614 			}
1615 			wedge = sc->sc_dev;
1616 		}
1617 	}
1618 	rw_exit(&dkwedges_lock);
1619 
1620 	return wedge;
1621 }
1622 
1623 const char *
1624 dkwedge_get_parent_name(dev_t dev)
1625 {
1626 	/* XXX: perhaps do this in lookup? */
1627 	int bmaj = bdevsw_lookup_major(&dk_bdevsw);
1628 	int cmaj = cdevsw_lookup_major(&dk_cdevsw);
1629 	if (major(dev) != bmaj && major(dev) != cmaj)
1630 		return NULL;
1631 	struct dkwedge_softc *sc = dkwedge_lookup(dev);
1632 	if (sc == NULL)
1633 		return NULL;
1634 	return sc->sc_parent->dk_name;
1635 }
1636 
1637