xref: /onnv-gate/usr/src/uts/intel/io/dktp/drvobj/strategy.c (revision 1709:39a1331cb1e3)
1*1709Smlf /*
2*1709Smlf  * CDDL HEADER START
3*1709Smlf  *
4*1709Smlf  * The contents of this file are subject to the terms of the
5*1709Smlf  * Common Development and Distribution License, Version 1.0 only
6*1709Smlf  * (the "License").  You may not use this file except in compliance
7*1709Smlf  * with the License.
8*1709Smlf  *
9*1709Smlf  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*1709Smlf  * or http://www.opensolaris.org/os/licensing.
11*1709Smlf  * See the License for the specific language governing permissions
12*1709Smlf  * and limitations under the License.
13*1709Smlf  *
14*1709Smlf  * When distributing Covered Code, include this CDDL HEADER in each
15*1709Smlf  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*1709Smlf  * If applicable, add the following below this CDDL HEADER, with the
17*1709Smlf  * fields enclosed by brackets "[]" replaced with your own identifying
18*1709Smlf  * information: Portions Copyright [yyyy] [name of copyright owner]
19*1709Smlf  *
20*1709Smlf  * CDDL HEADER END
21*1709Smlf  */
22*1709Smlf /*
23*1709Smlf  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*1709Smlf  * Use is subject to license terms.
25*1709Smlf  */
26*1709Smlf 
27*1709Smlf #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*1709Smlf 
29*1709Smlf /*
30*1709Smlf  *	Device Strategy
31*1709Smlf  */
32*1709Smlf #include <sys/dktp/cm.h>
33*1709Smlf #include <sys/kstat.h>
34*1709Smlf 
35*1709Smlf #include <sys/dktp/quetypes.h>
36*1709Smlf #include <sys/dktp/queue.h>
37*1709Smlf #include <sys/dktp/tgcom.h>
38*1709Smlf #include <sys/dktp/fctypes.h>
39*1709Smlf #include <sys/dktp/flowctrl.h>
40*1709Smlf #include <sys/param.h>
41*1709Smlf #include <vm/page.h>
42*1709Smlf #include <sys/modctl.h>
43*1709Smlf 
44*1709Smlf /*
45*1709Smlf  *	Object Management
46*1709Smlf  */
47*1709Smlf 
48*1709Smlf static struct buf *qmerge_nextbp(struct que_data *qfp, struct buf *bp_merge,
49*1709Smlf     int *can_merge);
50*1709Smlf 
51*1709Smlf static struct modlmisc modlmisc = {
52*1709Smlf 	&mod_miscops,	/* Type of module */
53*1709Smlf 	"Device Strategy Objects %I%"
54*1709Smlf };
55*1709Smlf 
56*1709Smlf static struct modlinkage modlinkage = {
57*1709Smlf 	MODREV_1,
58*1709Smlf 	&modlmisc,
59*1709Smlf 	NULL
60*1709Smlf };
61*1709Smlf 
62*1709Smlf int
63*1709Smlf _init(void)
64*1709Smlf {
65*1709Smlf 	return (mod_install(&modlinkage));
66*1709Smlf }
67*1709Smlf 
68*1709Smlf int
69*1709Smlf _fini(void)
70*1709Smlf {
71*1709Smlf 	return (mod_remove(&modlinkage));
72*1709Smlf }
73*1709Smlf 
74*1709Smlf int
75*1709Smlf _info(struct modinfo *modinfop)
76*1709Smlf {
77*1709Smlf 	return (mod_info(&modlinkage, modinfop));
78*1709Smlf }
79*1709Smlf 
80*1709Smlf 
81*1709Smlf /*
82*1709Smlf  *	Common Flow Control functions
83*1709Smlf  */
84*1709Smlf 
85*1709Smlf /*
86*1709Smlf  * Local static data
87*1709Smlf  */
88*1709Smlf #ifdef	FLC_DEBUG
89*1709Smlf #define	DENT	0x0001
90*1709Smlf #define	DERR	0x0002
91*1709Smlf #define	DIO	0x0004
92*1709Smlf static	int	flc_debug = DENT|DERR|DIO;
93*1709Smlf 
94*1709Smlf #include <sys/thread.h>
95*1709Smlf static 	int	flc_malloc_intr = 0;
96*1709Smlf #endif	/* FLC_DEBUG */
97*1709Smlf 
98*1709Smlf static	int	flc_kstat = 1;
99*1709Smlf 
100*1709Smlf static struct flc_obj *fc_create(struct flc_objops *fcopsp);
101*1709Smlf static int fc_init(opaque_t queuep, opaque_t tgcom_objp, opaque_t que_objp,
102*1709Smlf     void *lkarg);
103*1709Smlf static int fc_free(struct flc_obj *flcobjp);
104*1709Smlf static int fc_start_kstat(opaque_t queuep, char *devtype, int instance);
105*1709Smlf static int fc_stop_kstat(opaque_t queuep);
106*1709Smlf 
107*1709Smlf static struct flc_obj *
108*1709Smlf fc_create(struct flc_objops *fcopsp)
109*1709Smlf {
110*1709Smlf 	struct	flc_obj *flcobjp;
111*1709Smlf 	struct	fc_data *fcdp;
112*1709Smlf 
113*1709Smlf 	flcobjp = kmem_zalloc((sizeof (*flcobjp) + sizeof (*fcdp)), KM_NOSLEEP);
114*1709Smlf 	if (!flcobjp)
115*1709Smlf 		return (NULL);
116*1709Smlf 
117*1709Smlf 	fcdp = (struct fc_data *)(flcobjp+1);
118*1709Smlf 	flcobjp->flc_data = (opaque_t)fcdp;
119*1709Smlf 	flcobjp->flc_ops  = fcopsp;
120*1709Smlf 
121*1709Smlf 	return ((opaque_t)flcobjp);
122*1709Smlf }
123*1709Smlf 
124*1709Smlf static int dmult_maxcnt = DMULT_MAXCNT;
125*1709Smlf 
126*1709Smlf static int
127*1709Smlf fc_init(opaque_t queuep, opaque_t tgcom_objp, opaque_t que_objp, void *lkarg)
128*1709Smlf {
129*1709Smlf 	struct fc_data *fcdp = (struct fc_data *)queuep;
130*1709Smlf 
131*1709Smlf 	mutex_init(&fcdp->ds_mutex, NULL, MUTEX_DRIVER, lkarg);
132*1709Smlf 
133*1709Smlf 	fcdp->ds_queobjp   = que_objp;
134*1709Smlf 	fcdp->ds_tgcomobjp = tgcom_objp;
135*1709Smlf 	fcdp->ds_waitcnt   = dmult_maxcnt;
136*1709Smlf 
137*1709Smlf 	QUE_INIT(que_objp, lkarg);
138*1709Smlf 	TGCOM_INIT(tgcom_objp);
139*1709Smlf 	return (DDI_SUCCESS);
140*1709Smlf }
141*1709Smlf 
142*1709Smlf static int
143*1709Smlf fc_free(struct flc_obj *flcobjp)
144*1709Smlf {
145*1709Smlf 	struct fc_data *fcdp;
146*1709Smlf 
147*1709Smlf 	fcdp = (struct fc_data *)flcobjp->flc_data;
148*1709Smlf 	if (fcdp->ds_queobjp)
149*1709Smlf 		QUE_FREE(fcdp->ds_queobjp);
150*1709Smlf 	if (fcdp->ds_tgcomobjp) {
151*1709Smlf 		TGCOM_FREE(fcdp->ds_tgcomobjp);
152*1709Smlf 		mutex_destroy(&fcdp->ds_mutex);
153*1709Smlf 	}
154*1709Smlf 	kmem_free(flcobjp, (sizeof (*flcobjp) + sizeof (*fcdp)));
155*1709Smlf 	return (0);
156*1709Smlf }
157*1709Smlf 
158*1709Smlf /*ARGSUSED*/
159*1709Smlf static int
160*1709Smlf fc_start_kstat(opaque_t queuep, char *devtype, int instance)
161*1709Smlf {
162*1709Smlf 	struct fc_data *fcdp = (struct fc_data *)queuep;
163*1709Smlf 	if (!flc_kstat)
164*1709Smlf 		return (0);
165*1709Smlf 
166*1709Smlf 	if (!fcdp->ds_kstat) {
167*1709Smlf 		if (fcdp->ds_kstat = kstat_create("cmdk", instance, NULL,
168*1709Smlf 		    "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT)) {
169*1709Smlf 			kstat_install(fcdp->ds_kstat);
170*1709Smlf 		}
171*1709Smlf 	}
172*1709Smlf 	return (0);
173*1709Smlf }
174*1709Smlf 
175*1709Smlf static int
176*1709Smlf fc_stop_kstat(opaque_t queuep)
177*1709Smlf {
178*1709Smlf 	struct fc_data *fcdp = (struct fc_data *)queuep;
179*1709Smlf 
180*1709Smlf 	if (fcdp->ds_kstat) {
181*1709Smlf 		kstat_delete(fcdp->ds_kstat);
182*1709Smlf 		fcdp->ds_kstat = NULL;
183*1709Smlf 	}
184*1709Smlf 	return (0);
185*1709Smlf }
186*1709Smlf 
187*1709Smlf 
188*1709Smlf /*
189*1709Smlf  *	Single Command per Device
190*1709Smlf  */
191*1709Smlf /*
192*1709Smlf  * Local Function Prototypes
193*1709Smlf  */
194*1709Smlf static int dsngl_restart();
195*1709Smlf 
196*1709Smlf static int dsngl_enque(opaque_t, struct buf *);
197*1709Smlf static int dsngl_deque(opaque_t, struct buf *);
198*1709Smlf 
199*1709Smlf struct 	flc_objops dsngl_ops = {
200*1709Smlf 	fc_init,
201*1709Smlf 	fc_free,
202*1709Smlf 	dsngl_enque,
203*1709Smlf 	dsngl_deque,
204*1709Smlf 	fc_start_kstat,
205*1709Smlf 	fc_stop_kstat,
206*1709Smlf 	0, 0
207*1709Smlf };
208*1709Smlf 
209*1709Smlf struct flc_obj *
210*1709Smlf dsngl_create()
211*1709Smlf {
212*1709Smlf 	return (fc_create((struct flc_objops *)&dsngl_ops));
213*1709Smlf }
214*1709Smlf 
215*1709Smlf static int
216*1709Smlf dsngl_enque(opaque_t queuep, struct buf *in_bp)
217*1709Smlf {
218*1709Smlf 	struct fc_data *dsnglp = (struct fc_data *)queuep;
219*1709Smlf 	opaque_t tgcom_objp;
220*1709Smlf 	opaque_t que_objp;
221*1709Smlf 
222*1709Smlf 	que_objp   = dsnglp->ds_queobjp;
223*1709Smlf 	tgcom_objp = dsnglp->ds_tgcomobjp;
224*1709Smlf 
225*1709Smlf 	if (!in_bp)
226*1709Smlf 		return (0);
227*1709Smlf 	mutex_enter(&dsnglp->ds_mutex);
228*1709Smlf 	if (dsnglp->ds_bp || dsnglp->ds_outcnt) {
229*1709Smlf 		QUE_ADD(que_objp, in_bp);
230*1709Smlf 		if (dsnglp->ds_kstat) {
231*1709Smlf 			kstat_waitq_enter(KSTAT_IO_PTR(dsnglp->ds_kstat));
232*1709Smlf 		}
233*1709Smlf 		mutex_exit(&dsnglp->ds_mutex);
234*1709Smlf 		return (0);
235*1709Smlf 	}
236*1709Smlf 	if (dsnglp->ds_kstat) {
237*1709Smlf 		kstat_waitq_enter(KSTAT_IO_PTR(dsnglp->ds_kstat));
238*1709Smlf 	}
239*1709Smlf 	if (TGCOM_PKT(tgcom_objp, in_bp, dsngl_restart,
240*1709Smlf 		(caddr_t)dsnglp) != DDI_SUCCESS) {
241*1709Smlf 
242*1709Smlf 		dsnglp->ds_bp = in_bp;
243*1709Smlf 		mutex_exit(&dsnglp->ds_mutex);
244*1709Smlf 		return (0);
245*1709Smlf 	}
246*1709Smlf 	dsnglp->ds_outcnt++;
247*1709Smlf 	if (dsnglp->ds_kstat)
248*1709Smlf 		kstat_waitq_to_runq(KSTAT_IO_PTR(dsnglp->ds_kstat));
249*1709Smlf 	mutex_exit(&dsnglp->ds_mutex);
250*1709Smlf 	TGCOM_TRANSPORT(tgcom_objp, in_bp);
251*1709Smlf 	return (0);
252*1709Smlf }
253*1709Smlf 
254*1709Smlf static int
255*1709Smlf dsngl_deque(opaque_t queuep, struct buf *in_bp)
256*1709Smlf {
257*1709Smlf 	struct fc_data *dsnglp = (struct fc_data *)queuep;
258*1709Smlf 	opaque_t tgcom_objp;
259*1709Smlf 	opaque_t que_objp;
260*1709Smlf 	struct	 buf *bp;
261*1709Smlf 
262*1709Smlf 	que_objp   = dsnglp->ds_queobjp;
263*1709Smlf 	tgcom_objp = dsnglp->ds_tgcomobjp;
264*1709Smlf 
265*1709Smlf 	mutex_enter(&dsnglp->ds_mutex);
266*1709Smlf 	if (in_bp) {
267*1709Smlf 		dsnglp->ds_outcnt--;
268*1709Smlf 		if (dsnglp->ds_kstat) {
269*1709Smlf 			if (in_bp->b_flags & B_READ) {
270*1709Smlf 				KSTAT_IO_PTR(dsnglp->ds_kstat)->reads++;
271*1709Smlf 				KSTAT_IO_PTR(dsnglp->ds_kstat)->nread +=
272*1709Smlf 				    (in_bp->b_bcount - in_bp->b_resid);
273*1709Smlf 			} else {
274*1709Smlf 				KSTAT_IO_PTR(dsnglp->ds_kstat)->writes++;
275*1709Smlf 				KSTAT_IO_PTR(dsnglp->ds_kstat)->nwritten +=
276*1709Smlf 				    (in_bp->b_bcount - in_bp->b_resid);
277*1709Smlf 			}
278*1709Smlf 			kstat_runq_exit(KSTAT_IO_PTR(dsnglp->ds_kstat));
279*1709Smlf 		}
280*1709Smlf 	}
281*1709Smlf 	for (;;) {
282*1709Smlf 		if (!dsnglp->ds_bp)
283*1709Smlf 			dsnglp->ds_bp = QUE_DEL(que_objp);
284*1709Smlf 		if (!dsnglp->ds_bp ||
285*1709Smlf 		    (TGCOM_PKT(tgcom_objp, dsnglp->ds_bp, dsngl_restart,
286*1709Smlf 		    (caddr_t)dsnglp) != DDI_SUCCESS) ||
287*1709Smlf 		    dsnglp->ds_outcnt) {
288*1709Smlf 			mutex_exit(&dsnglp->ds_mutex);
289*1709Smlf 			return (0);
290*1709Smlf 		}
291*1709Smlf 		dsnglp->ds_outcnt++;
292*1709Smlf 		bp = dsnglp->ds_bp;
293*1709Smlf 		dsnglp->ds_bp = QUE_DEL(que_objp);
294*1709Smlf 		if (dsnglp->ds_kstat)
295*1709Smlf 			kstat_waitq_to_runq(KSTAT_IO_PTR(dsnglp->ds_kstat));
296*1709Smlf 		mutex_exit(&dsnglp->ds_mutex);
297*1709Smlf 
298*1709Smlf 		TGCOM_TRANSPORT(tgcom_objp, bp);
299*1709Smlf 
300*1709Smlf 		if (!mutex_tryenter(&dsnglp->ds_mutex))
301*1709Smlf 			return (0);
302*1709Smlf 	}
303*1709Smlf }
304*1709Smlf 
305*1709Smlf static int
306*1709Smlf dsngl_restart(struct fc_data *dsnglp)
307*1709Smlf {
308*1709Smlf 	(void) dsngl_deque(dsnglp, NULL);
309*1709Smlf 	return (-1);
310*1709Smlf }
311*1709Smlf 
312*1709Smlf 
313*1709Smlf /*
314*1709Smlf  *	Multiple Commands per Device
315*1709Smlf  */
316*1709Smlf /*
317*1709Smlf  * Local Function Prototypes
318*1709Smlf  */
319*1709Smlf static int dmult_restart();
320*1709Smlf 
321*1709Smlf static int dmult_enque(opaque_t, struct buf *);
322*1709Smlf static int dmult_deque(opaque_t, struct buf *);
323*1709Smlf 
324*1709Smlf struct 	flc_objops dmult_ops = {
325*1709Smlf 	fc_init,
326*1709Smlf 	fc_free,
327*1709Smlf 	dmult_enque,
328*1709Smlf 	dmult_deque,
329*1709Smlf 	fc_start_kstat,
330*1709Smlf 	fc_stop_kstat,
331*1709Smlf 	0, 0
332*1709Smlf };
333*1709Smlf 
334*1709Smlf struct flc_obj *
335*1709Smlf dmult_create()
336*1709Smlf {
337*1709Smlf 	return (fc_create((struct flc_objops *)&dmult_ops));
338*1709Smlf 
339*1709Smlf }
340*1709Smlf 
341*1709Smlf 
342*1709Smlf /*
343*1709Smlf  * Some of the object management functions QUE_ADD() and QUE_DEL()
344*1709Smlf  * do not accquire lock.
345*1709Smlf  * They depend on dmult_enque(), dmult_deque() to do all locking.
346*1709Smlf  * If this changes we have to grab locks in qmerge_add() and qmerge_del().
347*1709Smlf  */
348*1709Smlf static int
349*1709Smlf dmult_enque(opaque_t queuep, struct buf *in_bp)
350*1709Smlf {
351*1709Smlf 	struct fc_data *dmultp = (struct fc_data *)queuep;
352*1709Smlf 	opaque_t tgcom_objp;
353*1709Smlf 	opaque_t que_objp;
354*1709Smlf 
355*1709Smlf 	que_objp   = dmultp->ds_queobjp;
356*1709Smlf 	tgcom_objp = dmultp->ds_tgcomobjp;
357*1709Smlf 
358*1709Smlf 	if (!in_bp)
359*1709Smlf 		return (0);
360*1709Smlf 	mutex_enter(&dmultp->ds_mutex);
361*1709Smlf 	if ((dmultp->ds_outcnt >= dmultp->ds_waitcnt) || dmultp->ds_bp) {
362*1709Smlf 		QUE_ADD(que_objp, in_bp);
363*1709Smlf 		if (dmultp->ds_kstat) {
364*1709Smlf 			kstat_waitq_enter(KSTAT_IO_PTR(dmultp->ds_kstat));
365*1709Smlf 		}
366*1709Smlf 		mutex_exit(&dmultp->ds_mutex);
367*1709Smlf 		return (0);
368*1709Smlf 	}
369*1709Smlf 	if (dmultp->ds_kstat) {
370*1709Smlf 		kstat_waitq_enter(KSTAT_IO_PTR(dmultp->ds_kstat));
371*1709Smlf 	}
372*1709Smlf 
373*1709Smlf 	if (TGCOM_PKT(tgcom_objp, in_bp, dmult_restart,
374*1709Smlf 		(caddr_t)dmultp) != DDI_SUCCESS) {
375*1709Smlf 
376*1709Smlf 		dmultp->ds_bp = in_bp;
377*1709Smlf 		mutex_exit(&dmultp->ds_mutex);
378*1709Smlf 		return (0);
379*1709Smlf 	}
380*1709Smlf 	dmultp->ds_outcnt++;
381*1709Smlf 	if (dmultp->ds_kstat)
382*1709Smlf 		kstat_waitq_to_runq(KSTAT_IO_PTR(dmultp->ds_kstat));
383*1709Smlf 	mutex_exit(&dmultp->ds_mutex);
384*1709Smlf 
385*1709Smlf 	TGCOM_TRANSPORT(tgcom_objp, in_bp);
386*1709Smlf 	return (0);
387*1709Smlf }
388*1709Smlf 
389*1709Smlf static int
390*1709Smlf dmult_deque(opaque_t queuep, struct buf *in_bp)
391*1709Smlf {
392*1709Smlf 	struct fc_data *dmultp = (struct fc_data *)queuep;
393*1709Smlf 	opaque_t tgcom_objp;
394*1709Smlf 	opaque_t que_objp;
395*1709Smlf 	struct	 buf *bp;
396*1709Smlf 
397*1709Smlf 	que_objp = dmultp->ds_queobjp;
398*1709Smlf 	tgcom_objp = dmultp->ds_tgcomobjp;
399*1709Smlf 
400*1709Smlf 	mutex_enter(&dmultp->ds_mutex);
401*1709Smlf 	if (in_bp) {
402*1709Smlf 		dmultp->ds_outcnt--;
403*1709Smlf 		if (dmultp->ds_kstat) {
404*1709Smlf 			if (in_bp->b_flags & B_READ) {
405*1709Smlf 				KSTAT_IO_PTR(dmultp->ds_kstat)->reads++;
406*1709Smlf 				KSTAT_IO_PTR(dmultp->ds_kstat)->nread +=
407*1709Smlf 				    (in_bp->b_bcount - in_bp->b_resid);
408*1709Smlf 			} else {
409*1709Smlf 				KSTAT_IO_PTR(dmultp->ds_kstat)->writes++;
410*1709Smlf 				KSTAT_IO_PTR(dmultp->ds_kstat)->nwritten +=
411*1709Smlf 				    (in_bp->b_bcount - in_bp->b_resid);
412*1709Smlf 			}
413*1709Smlf 			kstat_runq_exit(KSTAT_IO_PTR(dmultp->ds_kstat));
414*1709Smlf 		}
415*1709Smlf 	}
416*1709Smlf 
417*1709Smlf 	for (;;) {
418*1709Smlf 
419*1709Smlf #ifdef	FLC_DEBUG
420*1709Smlf 		if ((curthread->t_intr) && (!dmultp->ds_bp) &&
421*1709Smlf 		    (!dmultp->ds_outcnt))
422*1709Smlf 			flc_malloc_intr++;
423*1709Smlf #endif
424*1709Smlf 
425*1709Smlf 		if (!dmultp->ds_bp)
426*1709Smlf 			dmultp->ds_bp = QUE_DEL(que_objp);
427*1709Smlf 		if (!dmultp->ds_bp ||
428*1709Smlf 		    (TGCOM_PKT(tgcom_objp, dmultp->ds_bp, dmult_restart,
429*1709Smlf 		    (caddr_t)dmultp) != DDI_SUCCESS) ||
430*1709Smlf 		    (dmultp->ds_outcnt >= dmultp->ds_waitcnt)) {
431*1709Smlf 			mutex_exit(&dmultp->ds_mutex);
432*1709Smlf 			return (0);
433*1709Smlf 		}
434*1709Smlf 		dmultp->ds_outcnt++;
435*1709Smlf 		bp = dmultp->ds_bp;
436*1709Smlf 		dmultp->ds_bp = QUE_DEL(que_objp);
437*1709Smlf 
438*1709Smlf 		if (dmultp->ds_kstat)
439*1709Smlf 			kstat_waitq_to_runq(KSTAT_IO_PTR(dmultp->ds_kstat));
440*1709Smlf 
441*1709Smlf 		mutex_exit(&dmultp->ds_mutex);
442*1709Smlf 
443*1709Smlf 		TGCOM_TRANSPORT(tgcom_objp, bp);
444*1709Smlf 
445*1709Smlf 		if (!mutex_tryenter(&dmultp->ds_mutex))
446*1709Smlf 			return (0);
447*1709Smlf 	}
448*1709Smlf }
449*1709Smlf 
450*1709Smlf static int
451*1709Smlf dmult_restart(struct fc_data *dmultp)
452*1709Smlf {
453*1709Smlf 	(void) dmult_deque(dmultp, NULL);
454*1709Smlf 	return (-1);
455*1709Smlf }
456*1709Smlf 
457*1709Smlf /*
458*1709Smlf  *	Duplexed Commands per Device: Read Queue and Write Queue
459*1709Smlf  */
460*1709Smlf /*
461*1709Smlf  * Local Function Prototypes
462*1709Smlf  */
463*1709Smlf static int duplx_restart();
464*1709Smlf 
465*1709Smlf static int duplx_init(opaque_t queuep, opaque_t tgcom_objp, opaque_t que_objp,
466*1709Smlf     void *lkarg);
467*1709Smlf static int duplx_free(struct flc_obj *flcobjp);
468*1709Smlf static int duplx_enque(opaque_t queuep, struct buf *bp);
469*1709Smlf static int duplx_deque(opaque_t queuep, struct buf *bp);
470*1709Smlf 
471*1709Smlf struct 	flc_objops duplx_ops = {
472*1709Smlf 	duplx_init,
473*1709Smlf 	duplx_free,
474*1709Smlf 	duplx_enque,
475*1709Smlf 	duplx_deque,
476*1709Smlf 	fc_start_kstat,
477*1709Smlf 	fc_stop_kstat,
478*1709Smlf 	0, 0
479*1709Smlf };
480*1709Smlf 
481*1709Smlf struct flc_obj *
482*1709Smlf duplx_create()
483*1709Smlf {
484*1709Smlf 	struct	flc_obj *flcobjp;
485*1709Smlf 	struct	duplx_data *fcdp;
486*1709Smlf 
487*1709Smlf 	flcobjp = kmem_zalloc((sizeof (*flcobjp) + sizeof (*fcdp)), KM_NOSLEEP);
488*1709Smlf 	if (!flcobjp)
489*1709Smlf 		return (NULL);
490*1709Smlf 
491*1709Smlf 	fcdp = (struct duplx_data *)(flcobjp+1);
492*1709Smlf 	flcobjp->flc_data = (opaque_t)fcdp;
493*1709Smlf 	flcobjp->flc_ops  = &duplx_ops;
494*1709Smlf 
495*1709Smlf 	fcdp->ds_writeq.fc_qobjp = qfifo_create();
496*1709Smlf 	if (!(fcdp->ds_writeq.fc_qobjp = qfifo_create())) {
497*1709Smlf 		kmem_free(flcobjp, (sizeof (*flcobjp) + sizeof (*fcdp)));
498*1709Smlf 		return (NULL);
499*1709Smlf 	}
500*1709Smlf 	return (flcobjp);
501*1709Smlf }
502*1709Smlf 
503*1709Smlf static int
504*1709Smlf duplx_free(struct flc_obj *flcobjp)
505*1709Smlf {
506*1709Smlf 	struct duplx_data *fcdp;
507*1709Smlf 
508*1709Smlf 	fcdp = (struct duplx_data *)flcobjp->flc_data;
509*1709Smlf 	if (fcdp->ds_writeq.fc_qobjp) {
510*1709Smlf 		QUE_FREE(fcdp->ds_writeq.fc_qobjp);
511*1709Smlf 	}
512*1709Smlf 	if (fcdp->ds_readq.fc_qobjp)
513*1709Smlf 		QUE_FREE(fcdp->ds_readq.fc_qobjp);
514*1709Smlf 	if (fcdp->ds_tgcomobjp) {
515*1709Smlf 		TGCOM_FREE(fcdp->ds_tgcomobjp);
516*1709Smlf 		mutex_destroy(&fcdp->ds_mutex);
517*1709Smlf 	}
518*1709Smlf 	kmem_free(flcobjp, (sizeof (*flcobjp) + sizeof (*fcdp)));
519*1709Smlf 	return (0);
520*1709Smlf }
521*1709Smlf 
522*1709Smlf static int
523*1709Smlf duplx_init(opaque_t queuep, opaque_t tgcom_objp, opaque_t que_objp, void *lkarg)
524*1709Smlf {
525*1709Smlf 	struct duplx_data *fcdp = (struct duplx_data *)queuep;
526*1709Smlf 	fcdp->ds_tgcomobjp = tgcom_objp;
527*1709Smlf 	fcdp->ds_readq.fc_qobjp = que_objp;
528*1709Smlf 
529*1709Smlf 	QUE_INIT(que_objp, lkarg);
530*1709Smlf 	QUE_INIT(fcdp->ds_writeq.fc_qobjp, lkarg);
531*1709Smlf 	TGCOM_INIT(tgcom_objp);
532*1709Smlf 
533*1709Smlf 	mutex_init(&fcdp->ds_mutex, NULL, MUTEX_DRIVER, lkarg);
534*1709Smlf 
535*1709Smlf 	fcdp->ds_writeq.fc_maxcnt = DUPLX_MAXCNT;
536*1709Smlf 	fcdp->ds_readq.fc_maxcnt  = DUPLX_MAXCNT;
537*1709Smlf 
538*1709Smlf 	/* queues point to each other for round robin */
539*1709Smlf 	fcdp->ds_readq.next = &fcdp->ds_writeq;
540*1709Smlf 	fcdp->ds_writeq.next = &fcdp->ds_readq;
541*1709Smlf 
542*1709Smlf 	return (DDI_SUCCESS);
543*1709Smlf }
544*1709Smlf 
545*1709Smlf static int
546*1709Smlf duplx_enque(opaque_t queuep, struct buf *in_bp)
547*1709Smlf {
548*1709Smlf 	struct duplx_data *duplxp = (struct duplx_data *)queuep;
549*1709Smlf 	opaque_t tgcom_objp;
550*1709Smlf 	struct fc_que *activeq;
551*1709Smlf 	struct buf *bp;
552*1709Smlf 
553*1709Smlf 	mutex_enter(&duplxp->ds_mutex);
554*1709Smlf 	if (in_bp) {
555*1709Smlf 		if (duplxp->ds_kstat) {
556*1709Smlf 			kstat_waitq_enter(KSTAT_IO_PTR(duplxp->ds_kstat));
557*1709Smlf 		}
558*1709Smlf 		if (in_bp->b_flags & B_READ)
559*1709Smlf 			activeq = &duplxp->ds_readq;
560*1709Smlf 		else
561*1709Smlf 			activeq = &duplxp->ds_writeq;
562*1709Smlf 
563*1709Smlf 		QUE_ADD(activeq->fc_qobjp, in_bp);
564*1709Smlf 	} else {
565*1709Smlf 		activeq = &duplxp->ds_readq;
566*1709Smlf 	}
567*1709Smlf 
568*1709Smlf 	tgcom_objp = duplxp->ds_tgcomobjp;
569*1709Smlf 
570*1709Smlf 	for (;;) {
571*1709Smlf 		if (!activeq->fc_bp)
572*1709Smlf 			activeq->fc_bp = QUE_DEL(activeq->fc_qobjp);
573*1709Smlf 		if (!activeq->fc_bp ||
574*1709Smlf 		    (TGCOM_PKT(tgcom_objp, activeq->fc_bp, duplx_restart,
575*1709Smlf 		    (caddr_t)duplxp) != DDI_SUCCESS) ||
576*1709Smlf 		    (activeq->fc_outcnt >= activeq->fc_maxcnt)) {
577*1709Smlf 
578*1709Smlf 			/* switch read/write queues */
579*1709Smlf 			activeq = activeq->next;
580*1709Smlf 			if (!activeq->fc_bp)
581*1709Smlf 				activeq->fc_bp = QUE_DEL(activeq->fc_qobjp);
582*1709Smlf 			if (!activeq->fc_bp ||
583*1709Smlf 			    (TGCOM_PKT(tgcom_objp, activeq->fc_bp,
584*1709Smlf 			    duplx_restart, (caddr_t)duplxp) != DDI_SUCCESS) ||
585*1709Smlf 			    (activeq->fc_outcnt >= activeq->fc_maxcnt)) {
586*1709Smlf 				mutex_exit(&duplxp->ds_mutex);
587*1709Smlf 				return (0);
588*1709Smlf 			}
589*1709Smlf 		}
590*1709Smlf 
591*1709Smlf 		activeq->fc_outcnt++;
592*1709Smlf 		bp = activeq->fc_bp;
593*1709Smlf 		activeq->fc_bp = NULL;
594*1709Smlf 
595*1709Smlf 		if (duplxp->ds_kstat)
596*1709Smlf 			kstat_waitq_to_runq(KSTAT_IO_PTR(duplxp->ds_kstat));
597*1709Smlf 		mutex_exit(&duplxp->ds_mutex);
598*1709Smlf 
599*1709Smlf 		TGCOM_TRANSPORT(tgcom_objp, bp);
600*1709Smlf 
601*1709Smlf 		if (!mutex_tryenter(&duplxp->ds_mutex))
602*1709Smlf 			return (0);
603*1709Smlf 
604*1709Smlf 		activeq = activeq->next;
605*1709Smlf 	}
606*1709Smlf }
607*1709Smlf 
608*1709Smlf static int
609*1709Smlf duplx_deque(opaque_t queuep, struct buf *in_bp)
610*1709Smlf {
611*1709Smlf 	struct duplx_data *duplxp = (struct duplx_data *)queuep;
612*1709Smlf 	opaque_t tgcom_objp;
613*1709Smlf 	struct fc_que *activeq;
614*1709Smlf 	struct buf *bp;
615*1709Smlf 
616*1709Smlf 	mutex_enter(&duplxp->ds_mutex);
617*1709Smlf 
618*1709Smlf 	tgcom_objp = duplxp->ds_tgcomobjp;
619*1709Smlf 
620*1709Smlf 	if (in_bp->b_flags & B_READ)
621*1709Smlf 		activeq = &duplxp->ds_readq;
622*1709Smlf 	else
623*1709Smlf 		activeq = &duplxp->ds_writeq;
624*1709Smlf 	activeq->fc_outcnt--;
625*1709Smlf 
626*1709Smlf 	if (duplxp->ds_kstat) {
627*1709Smlf 		if (in_bp->b_flags & B_READ) {
628*1709Smlf 			KSTAT_IO_PTR(duplxp->ds_kstat)->reads++;
629*1709Smlf 			KSTAT_IO_PTR(duplxp->ds_kstat)->nread +=
630*1709Smlf 			    (in_bp->b_bcount - in_bp->b_resid);
631*1709Smlf 		} else {
632*1709Smlf 			KSTAT_IO_PTR(duplxp->ds_kstat)->writes++;
633*1709Smlf 			KSTAT_IO_PTR(duplxp->ds_kstat)->nwritten +=
634*1709Smlf 			    (in_bp->b_bcount - in_bp->b_resid);
635*1709Smlf 		}
636*1709Smlf 		kstat_runq_exit(KSTAT_IO_PTR(duplxp->ds_kstat));
637*1709Smlf 	}
638*1709Smlf 
639*1709Smlf 	for (;;) {
640*1709Smlf 
641*1709Smlf 		/* if needed, try to pull request off a queue */
642*1709Smlf 		if (!activeq->fc_bp)
643*1709Smlf 			activeq->fc_bp = QUE_DEL(activeq->fc_qobjp);
644*1709Smlf 
645*1709Smlf 		if (!activeq->fc_bp ||
646*1709Smlf 		    (TGCOM_PKT(tgcom_objp, activeq->fc_bp, duplx_restart,
647*1709Smlf 		    (caddr_t)duplxp) != DDI_SUCCESS) ||
648*1709Smlf 		    (activeq->fc_outcnt >= activeq->fc_maxcnt)) {
649*1709Smlf 
650*1709Smlf 			activeq = activeq->next;
651*1709Smlf 			if (!activeq->fc_bp)
652*1709Smlf 				activeq->fc_bp = QUE_DEL(activeq->fc_qobjp);
653*1709Smlf 
654*1709Smlf 			if (!activeq->fc_bp ||
655*1709Smlf 			    (TGCOM_PKT(tgcom_objp, activeq->fc_bp,
656*1709Smlf 			    duplx_restart, (caddr_t)duplxp) != DDI_SUCCESS) ||
657*1709Smlf 			    (activeq->fc_outcnt >= activeq->fc_maxcnt)) {
658*1709Smlf 				mutex_exit(&duplxp->ds_mutex);
659*1709Smlf 				return (0);
660*1709Smlf 			}
661*1709Smlf 		}
662*1709Smlf 
663*1709Smlf 		activeq->fc_outcnt++;
664*1709Smlf 		bp = activeq->fc_bp;
665*1709Smlf 		activeq->fc_bp = NULL;
666*1709Smlf 
667*1709Smlf 		if (duplxp->ds_kstat)
668*1709Smlf 			kstat_waitq_to_runq(KSTAT_IO_PTR(duplxp->ds_kstat));
669*1709Smlf 
670*1709Smlf 		mutex_exit(&duplxp->ds_mutex);
671*1709Smlf 
672*1709Smlf 		TGCOM_TRANSPORT(tgcom_objp, bp);
673*1709Smlf 
674*1709Smlf 		if (!mutex_tryenter(&duplxp->ds_mutex))
675*1709Smlf 			return (0);
676*1709Smlf 
677*1709Smlf 		activeq = activeq->next;
678*1709Smlf 	}
679*1709Smlf }
680*1709Smlf 
681*1709Smlf static int
682*1709Smlf duplx_restart(struct duplx_data *duplxp)
683*1709Smlf {
684*1709Smlf 	(void) duplx_enque(duplxp, NULL);
685*1709Smlf 	return (-1);
686*1709Smlf }
687*1709Smlf 
688*1709Smlf /*
689*1709Smlf  *	Tagged queueing flow control
690*1709Smlf  */
691*1709Smlf /*
692*1709Smlf  * Local Function Prototypes
693*1709Smlf  */
694*1709Smlf 
695*1709Smlf struct 	flc_objops adapt_ops = {
696*1709Smlf 	fc_init,
697*1709Smlf 	fc_free,
698*1709Smlf 	dmult_enque,
699*1709Smlf 	dmult_deque,
700*1709Smlf 	fc_start_kstat,
701*1709Smlf 	fc_stop_kstat,
702*1709Smlf 	0, 0
703*1709Smlf };
704*1709Smlf 
705*1709Smlf struct flc_obj *
706*1709Smlf adapt_create()
707*1709Smlf {
708*1709Smlf 	return (fc_create((struct flc_objops *)&adapt_ops));
709*1709Smlf 
710*1709Smlf }
711*1709Smlf 
712*1709Smlf /*
713*1709Smlf  *	Common Queue functions
714*1709Smlf  */
715*1709Smlf 
716*1709Smlf /*
717*1709Smlf  * 	Local static data
718*1709Smlf  */
719*1709Smlf #ifdef	Q_DEBUG
720*1709Smlf #define	DENT	0x0001
721*1709Smlf #define	DERR	0x0002
722*1709Smlf #define	DIO	0x0004
723*1709Smlf static	int	que_debug = DENT|DERR|DIO;
724*1709Smlf 
725*1709Smlf #endif	/* Q_DEBUG */
726*1709Smlf /*
727*1709Smlf  * 	Local Function Prototypes
728*1709Smlf  */
729*1709Smlf static struct que_obj *que_create(struct que_objops *qopsp);
730*1709Smlf static int que_init(struct que_data *qfp, void *lkarg);
731*1709Smlf static int que_free(struct que_obj *queobjp);
732*1709Smlf static struct buf *que_del(struct que_data *qfp);
733*1709Smlf 
734*1709Smlf static struct que_obj *
735*1709Smlf que_create(struct que_objops *qopsp)
736*1709Smlf {
737*1709Smlf 	struct	que_data *qfp;
738*1709Smlf 	struct	que_obj *queobjp;
739*1709Smlf 
740*1709Smlf 	queobjp = kmem_zalloc((sizeof (*queobjp) + sizeof (*qfp)), KM_NOSLEEP);
741*1709Smlf 	if (!queobjp)
742*1709Smlf 		return (NULL);
743*1709Smlf 
744*1709Smlf 	queobjp->que_ops = qopsp;
745*1709Smlf 	qfp = (struct que_data *)(queobjp+1);
746*1709Smlf 	queobjp->que_data = (opaque_t)qfp;
747*1709Smlf 
748*1709Smlf 	return ((opaque_t)queobjp);
749*1709Smlf }
750*1709Smlf 
751*1709Smlf static int
752*1709Smlf que_init(struct que_data *qfp, void *lkarg)
753*1709Smlf {
754*1709Smlf 	mutex_init(&qfp->q_mutex, NULL, MUTEX_DRIVER, lkarg);
755*1709Smlf 	return (DDI_SUCCESS);
756*1709Smlf }
757*1709Smlf 
758*1709Smlf static int
759*1709Smlf que_free(struct que_obj *queobjp)
760*1709Smlf {
761*1709Smlf 	struct	que_data *qfp;
762*1709Smlf 
763*1709Smlf 	qfp = (struct que_data *)queobjp->que_data;
764*1709Smlf 	mutex_destroy(&qfp->q_mutex);
765*1709Smlf 	kmem_free(queobjp, (sizeof (*queobjp) + sizeof (struct que_data)));
766*1709Smlf 	return (0);
767*1709Smlf }
768*1709Smlf 
769*1709Smlf static struct buf *
770*1709Smlf que_del(struct que_data *qfp)
771*1709Smlf {
772*1709Smlf 	struct buf *bp;
773*1709Smlf 
774*1709Smlf 	bp = qfp->q_tab.b_actf;
775*1709Smlf 	if (bp) {
776*1709Smlf 		qfp->q_tab.b_actf = bp->av_forw;
777*1709Smlf 		if (!qfp->q_tab.b_actf)
778*1709Smlf 			qfp->q_tab.b_actl = NULL;
779*1709Smlf 		bp->av_forw = 0;
780*1709Smlf 	}
781*1709Smlf 	return (bp);
782*1709Smlf }
783*1709Smlf 
784*1709Smlf 
785*1709Smlf 
786*1709Smlf /*
787*1709Smlf  *	Qmerge
788*1709Smlf  * 	Local Function Prototypes
789*1709Smlf  */
790*1709Smlf static int qmerge_add(), qmerge_free();
791*1709Smlf static struct buf *qmerge_del(struct que_data *qfp);
792*1709Smlf 
793*1709Smlf struct 	que_objops qmerge_ops = {
794*1709Smlf 	que_init,
795*1709Smlf 	qmerge_free,
796*1709Smlf 	qmerge_add,
797*1709Smlf 	qmerge_del,
798*1709Smlf 	0, 0
799*1709Smlf };
800*1709Smlf 
801*1709Smlf /* fields in diskhd */
802*1709Smlf #define	hd_cnt			b_back
803*1709Smlf #define	hd_private		b_forw
804*1709Smlf #define	hd_flags		b_flags
805*1709Smlf #define	hd_sync_next		av_forw
806*1709Smlf #define	hd_async_next		av_back
807*1709Smlf 
808*1709Smlf #define	hd_sync2async		sync_async_ratio
809*1709Smlf 
810*1709Smlf #define	QNEAR_FORWARD		0x01
811*1709Smlf #define	QNEAR_BACKWARD		0x02
812*1709Smlf #define	QNEAR_ASYNCONLY		0x04
813*1709Smlf #define	QNEAR_ASYNCALSO		0x08
814*1709Smlf 
815*1709Smlf #define	DBLK(bp) ((unsigned long)(bp)->b_private)
816*1709Smlf 
817*1709Smlf #define	BP_LT_BP(a, b) (DBLK(a) < DBLK(b))
818*1709Smlf #define	BP_GT_BP(a, b) (DBLK(a) > DBLK(b))
819*1709Smlf #define	BP_LT_HD(a, b) (DBLK(a) < (unsigned long)((b)->hd_private))
820*1709Smlf #define	BP_GT_HD(a, b) (DBLK(a) > (unsigned long)((b)->hd_private))
821*1709Smlf #define	QNEAR_ASYNC	(QNEAR_ASYNCONLY|QNEAR_ASYNCALSO)
822*1709Smlf 
823*1709Smlf #define	SYNC2ASYNC(a) ((a)->q_tab.hd_cnt)
824*1709Smlf 
825*1709Smlf 
826*1709Smlf /*
827*1709Smlf  * qmerge implements a two priority queue, the low priority queue holding ASYNC
828*1709Smlf  * write requests, while the rest are queued in the high priority sync queue.
829*1709Smlf  * Requests on the async queue would be merged if possible.
830*1709Smlf  * By default qmerge2wayscan is 1, indicating an elevator algorithm. When
831*1709Smlf  * this variable is set to zero, it has the following side effects.
832*1709Smlf  * 1. We assume fairness is the number one issue.
833*1709Smlf  * 2. The next request to be picked indicates current head position.
834*1709Smlf  *
835*1709Smlf  * qmerge_sync2async indicates the ratio of scans of high prioriy
836*1709Smlf  * sync queue to low priority async queue.
837*1709Smlf  *
838*1709Smlf  * When qmerge variables have the following values it defaults to qsort
839*1709Smlf  *
840*1709Smlf  * qmerge1pri = 1, qmerge2wayscan = 0, qmerge_max_merge = 0
841*1709Smlf  *
842*1709Smlf  */
843*1709Smlf static int	qmerge_max_merge = 128 * 1024;
844*1709Smlf static intptr_t	qmerge_sync2async = 4;
845*1709Smlf static int	qmerge2wayscan = 1;
846*1709Smlf static int	qmerge1pri = 0;
847*1709Smlf static int	qmerge_merge = 0;
848*1709Smlf 
849*1709Smlf /*
850*1709Smlf  * 	Local static data
851*1709Smlf  */
852*1709Smlf struct que_obj *
853*1709Smlf qmerge_create()
854*1709Smlf {
855*1709Smlf 	struct que_data *qfp;
856*1709Smlf 	struct que_obj *queobjp;
857*1709Smlf 
858*1709Smlf 	queobjp = kmem_zalloc((sizeof (*queobjp) + sizeof (*qfp)), KM_NOSLEEP);
859*1709Smlf 	if (!queobjp)
860*1709Smlf 		return (NULL);
861*1709Smlf 
862*1709Smlf 	queobjp->que_ops = &qmerge_ops;
863*1709Smlf 	qfp = (struct que_data *)(queobjp+1);
864*1709Smlf 	qfp->q_tab.hd_private = qfp->q_tab.hd_private = 0;
865*1709Smlf 	qfp->q_tab.hd_sync_next = qfp->q_tab.hd_async_next = NULL;
866*1709Smlf 	qfp->q_tab.hd_cnt = (void *)qmerge_sync2async;
867*1709Smlf 	queobjp->que_data = (opaque_t)qfp;
868*1709Smlf 
869*1709Smlf 	return ((opaque_t)queobjp);
870*1709Smlf }
871*1709Smlf 
872*1709Smlf static int
873*1709Smlf qmerge_free(struct que_obj *queobjp)
874*1709Smlf {
875*1709Smlf 	struct	que_data *qfp;
876*1709Smlf 
877*1709Smlf 	qfp = (struct que_data *)queobjp->que_data;
878*1709Smlf 	mutex_destroy(&qfp->q_mutex);
879*1709Smlf 	kmem_free(queobjp, (sizeof (*queobjp) + sizeof (*qfp)));
880*1709Smlf 	return (0);
881*1709Smlf }
882*1709Smlf 
883*1709Smlf static int
884*1709Smlf qmerge_can_merge(bp1, bp2)
885*1709Smlf struct	buf *bp1, *bp2;
886*1709Smlf {
887*1709Smlf 	const int paw_flags = B_PAGEIO | B_ASYNC | B_WRITE;
888*1709Smlf 
889*1709Smlf 	if ((bp1->b_un.b_addr != 0) || (bp2->b_un.b_addr != 0) ||
890*1709Smlf 	    ((bp1->b_flags & (paw_flags | B_REMAPPED)) != paw_flags) ||
891*1709Smlf 	    ((bp2->b_flags & (paw_flags | B_REMAPPED)) != paw_flags) ||
892*1709Smlf 	    (bp1->b_bcount & PAGEOFFSET) || (bp2->b_bcount & PAGEOFFSET) ||
893*1709Smlf 	    (bp1->b_bcount + bp2->b_bcount > qmerge_max_merge))
894*1709Smlf 		return (0);
895*1709Smlf 
896*1709Smlf 	if ((DBLK(bp2) + bp2->b_bcount / DEV_BSIZE == DBLK(bp1)) ||
897*1709Smlf 	    (DBLK(bp1) + bp1->b_bcount / DEV_BSIZE == DBLK(bp2)))
898*1709Smlf 		return (1);
899*1709Smlf 	else
900*1709Smlf 		return (0);
901*1709Smlf }
902*1709Smlf 
903*1709Smlf static void
904*1709Smlf qmerge_mergesetup(bp_merge, bp)
905*1709Smlf struct	buf *bp_merge, *bp;
906*1709Smlf {
907*1709Smlf 	struct	buf *bp1;
908*1709Smlf 	struct	page *pp, *pp_merge, *pp_merge_prev;
909*1709Smlf 	int	forward;
910*1709Smlf 
911*1709Smlf 	qmerge_merge++;
912*1709Smlf 	forward = DBLK(bp_merge) < DBLK(bp);
913*1709Smlf 
914*1709Smlf 	bp_merge->b_bcount += bp->b_bcount;
915*1709Smlf 
916*1709Smlf 	pp = bp->b_pages;
917*1709Smlf 	pp_merge = bp_merge->b_pages;
918*1709Smlf 
919*1709Smlf 	pp_merge_prev = pp_merge->p_prev;
920*1709Smlf 
921*1709Smlf 	pp_merge->p_prev->p_next = pp;
922*1709Smlf 	pp_merge->p_prev = pp->p_prev;
923*1709Smlf 	pp->p_prev->p_next = pp_merge;
924*1709Smlf 	pp->p_prev = pp_merge_prev;
925*1709Smlf 
926*1709Smlf 	bp1 = bp_merge->b_forw;
927*1709Smlf 
928*1709Smlf 	bp1->av_back->av_forw = bp;
929*1709Smlf 	bp->av_back = bp1->av_back;
930*1709Smlf 	bp1->av_back = bp;
931*1709Smlf 	bp->av_forw = bp1;
932*1709Smlf 
933*1709Smlf 	if (!forward) {
934*1709Smlf 		bp_merge->b_forw = bp;
935*1709Smlf 		bp_merge->b_pages = pp;
936*1709Smlf 		bp_merge->b_private = bp->b_private;
937*1709Smlf 	}
938*1709Smlf }
939*1709Smlf 
940*1709Smlf static void
941*1709Smlf que_insert(struct que_data *qfp, struct buf *bp)
942*1709Smlf {
943*1709Smlf 	struct buf	*bp1, *bp_start, *lowest_bp, *highest_bp;
944*1709Smlf 	uintptr_t	highest_blk, lowest_blk;
945*1709Smlf 	struct buf	**async_bpp, **sync_bpp, **bpp;
946*1709Smlf 	struct diskhd	*dp = &qfp->q_tab;
947*1709Smlf 
948*1709Smlf 	sync_bpp = &dp->hd_sync_next;
949*1709Smlf 	async_bpp = &dp->hd_async_next;
950*1709Smlf 	/*
951*1709Smlf 	 * The ioctl used by the format utility requires that bp->av_back be
952*1709Smlf 	 * preserved.
953*1709Smlf 	 */
954*1709Smlf 	if (bp->av_back)
955*1709Smlf 		bp->b_error = (intptr_t)bp->av_back;
956*1709Smlf 	if (!qmerge1pri &&
957*1709Smlf 	    ((bp->b_flags & (B_ASYNC|B_READ|B_FREE)) == B_ASYNC)) {
958*1709Smlf 		bpp = &dp->hd_async_next;
959*1709Smlf 	} else {
960*1709Smlf 		bpp = &dp->hd_sync_next;
961*1709Smlf 	}
962*1709Smlf 
963*1709Smlf 
964*1709Smlf 	if ((bp1 = *bpp) == NULL) {
965*1709Smlf 		*bpp = bp;
966*1709Smlf 		bp->av_forw = bp->av_back = bp;
967*1709Smlf 		if ((bpp == async_bpp) && (*sync_bpp == NULL)) {
968*1709Smlf 			dp->hd_flags |= QNEAR_ASYNCONLY;
969*1709Smlf 		} else if (bpp == sync_bpp) {
970*1709Smlf 			dp->hd_flags &= ~QNEAR_ASYNCONLY;
971*1709Smlf 			if (*async_bpp) {
972*1709Smlf 				dp->hd_flags |= QNEAR_ASYNCALSO;
973*1709Smlf 			}
974*1709Smlf 		}
975*1709Smlf 		return;
976*1709Smlf 	}
977*1709Smlf 	bp_start = bp1;
978*1709Smlf 	if (DBLK(bp) < DBLK(bp1)) {
979*1709Smlf 		lowest_blk = DBLK(bp1);
980*1709Smlf 		lowest_bp = bp1;
981*1709Smlf 		do {
982*1709Smlf 			if (DBLK(bp) > DBLK(bp1)) {
983*1709Smlf 				bp->av_forw = bp1->av_forw;
984*1709Smlf 				bp1->av_forw->av_back = bp;
985*1709Smlf 				bp1->av_forw = bp;
986*1709Smlf 				bp->av_back = bp1;
987*1709Smlf 
988*1709Smlf 				if (((bpp == async_bpp) &&
989*1709Smlf 				    (dp->hd_flags & QNEAR_ASYNC)) ||
990*1709Smlf 				    (bpp == sync_bpp)) {
991*1709Smlf 					if (!(dp->hd_flags & QNEAR_BACKWARD) &&
992*1709Smlf 					    BP_GT_HD(bp, dp)) {
993*1709Smlf 						*bpp = bp;
994*1709Smlf 					}
995*1709Smlf 				}
996*1709Smlf 				return;
997*1709Smlf 			} else if (DBLK(bp1) < lowest_blk) {
998*1709Smlf 				lowest_bp = bp1;
999*1709Smlf 				lowest_blk = DBLK(bp1);
1000*1709Smlf 			}
1001*1709Smlf 		} while ((DBLK(bp1->av_back) < DBLK(bp1)) &&
1002*1709Smlf 		    ((bp1 = bp1->av_back) != bp_start));
1003*1709Smlf 		bp->av_forw = lowest_bp;
1004*1709Smlf 		lowest_bp->av_back->av_forw = bp;
1005*1709Smlf 		bp->av_back = lowest_bp->av_back;
1006*1709Smlf 		lowest_bp->av_back = bp;
1007*1709Smlf 		if ((bpp == async_bpp) && !(dp->hd_flags & QNEAR_ASYNC)) {
1008*1709Smlf 			*bpp = bp;
1009*1709Smlf 		} else if (!(dp->hd_flags & QNEAR_BACKWARD) &&
1010*1709Smlf 		    BP_GT_HD(bp, dp)) {
1011*1709Smlf 			*bpp = bp;
1012*1709Smlf 		}
1013*1709Smlf 	} else {
1014*1709Smlf 		highest_blk = DBLK(bp1);
1015*1709Smlf 		highest_bp = bp1;
1016*1709Smlf 		do {
1017*1709Smlf 			if (DBLK(bp) < DBLK(bp1)) {
1018*1709Smlf 				bp->av_forw = bp1;
1019*1709Smlf 				bp1->av_back->av_forw = bp;
1020*1709Smlf 				bp->av_back = bp1->av_back;
1021*1709Smlf 				bp1->av_back = bp;
1022*1709Smlf 				if (((bpp == async_bpp) &&
1023*1709Smlf 				    (dp->hd_flags & QNEAR_ASYNC)) ||
1024*1709Smlf 				    (bpp == sync_bpp)) {
1025*1709Smlf 					if ((dp->hd_flags & QNEAR_BACKWARD) &&
1026*1709Smlf 					    BP_LT_HD(bp, dp)) {
1027*1709Smlf 						*bpp = bp;
1028*1709Smlf 					}
1029*1709Smlf 				}
1030*1709Smlf 				return;
1031*1709Smlf 			} else if (DBLK(bp1) > highest_blk) {
1032*1709Smlf 				highest_bp = bp1;
1033*1709Smlf 				highest_blk = DBLK(bp1);
1034*1709Smlf 			}
1035*1709Smlf 		} while ((DBLK(bp1->av_forw) > DBLK(bp1)) &&
1036*1709Smlf 		    ((bp1 = bp1->av_forw) != bp_start));
1037*1709Smlf 		bp->av_back = highest_bp;
1038*1709Smlf 		highest_bp->av_forw->av_back = bp;
1039*1709Smlf 		bp->av_forw = highest_bp->av_forw;
1040*1709Smlf 		highest_bp->av_forw = bp;
1041*1709Smlf 
1042*1709Smlf 		if (((bpp == sync_bpp) ||
1043*1709Smlf 		    ((bpp == async_bpp) && (dp->hd_flags & QNEAR_ASYNC))) &&
1044*1709Smlf 		    (dp->hd_flags & QNEAR_BACKWARD) && (BP_LT_HD(bp, dp)))
1045*1709Smlf 			*bpp = bp;
1046*1709Smlf 	}
1047*1709Smlf }
1048*1709Smlf 
1049*1709Smlf /*
1050*1709Smlf  * dmult_enque() holds dmultp->ds_mutex lock, so we dont grab
1051*1709Smlf  * lock here. If dmult_enque() changes we will have to visit
1052*1709Smlf  * this function again
1053*1709Smlf  */
1054*1709Smlf static int
1055*1709Smlf qmerge_add(struct que_data *qfp, struct buf *bp)
1056*1709Smlf {
1057*1709Smlf 
1058*1709Smlf 	que_insert(qfp, bp);
1059*1709Smlf 	return (++qfp->q_cnt);
1060*1709Smlf }
1061*1709Smlf 
1062*1709Smlf static int
1063*1709Smlf qmerge_iodone(struct buf *bp)
1064*1709Smlf {
1065*1709Smlf 	struct buf *bp1;
1066*1709Smlf 	struct	page *pp, *pp1, *tmp_pp;
1067*1709Smlf 
1068*1709Smlf 	if (bp->b_flags & B_REMAPPED)
1069*1709Smlf 		bp_mapout(bp);
1070*1709Smlf 
1071*1709Smlf 	bp1 = bp->b_forw;
1072*1709Smlf 	do {
1073*1709Smlf 		bp->b_forw = bp1->av_forw;
1074*1709Smlf 		bp1->av_forw->av_back = bp1->av_back;
1075*1709Smlf 		bp1->av_back->av_forw = bp1->av_forw;
1076*1709Smlf 		pp = (page_t *)bp1->b_pages;
1077*1709Smlf 		pp1 = bp->b_forw->b_pages;
1078*1709Smlf 
1079*1709Smlf 		tmp_pp = pp->p_prev;
1080*1709Smlf 		pp->p_prev = pp1->p_prev;
1081*1709Smlf 		pp->p_prev->p_next = pp;
1082*1709Smlf 
1083*1709Smlf 		pp1->p_prev = tmp_pp;
1084*1709Smlf 		pp1->p_prev->p_next = pp1;
1085*1709Smlf 
1086*1709Smlf 		if (bp->b_flags & B_ERROR) {
1087*1709Smlf 			bp1->b_error = bp->b_error;
1088*1709Smlf 			bp1->b_flags |= B_ERROR;
1089*1709Smlf 		}
1090*1709Smlf 
1091*1709Smlf 		biodone(bp1);
1092*1709Smlf 	} while ((bp1 = bp->b_forw) != bp->b_forw->av_forw);
1093*1709Smlf 
1094*1709Smlf 	biodone(bp1);
1095*1709Smlf 	kmem_free(bp, sizeof (*bp));
1096*1709Smlf 	return (0);
1097*1709Smlf }
1098*1709Smlf 
1099*1709Smlf 
1100*1709Smlf 
1101*1709Smlf 
1102*1709Smlf static struct buf *
1103*1709Smlf qmerge_nextbp(struct que_data *qfp, struct buf *bp_merge, int *can_merge)
1104*1709Smlf {
1105*1709Smlf 	intptr_t	private, cnt;
1106*1709Smlf 	int		flags;
1107*1709Smlf 	struct		buf *sync_bp, *async_bp, *bp;
1108*1709Smlf 	struct		buf **sync_bpp, **async_bpp, **bpp;
1109*1709Smlf 	struct		diskhd *dp = &qfp->q_tab;
1110*1709Smlf 
1111*1709Smlf 	if (qfp->q_cnt == 0) {
1112*1709Smlf 		return (NULL);
1113*1709Smlf 	}
1114*1709Smlf 	flags = qfp->q_tab.hd_flags;
1115*1709Smlf 	sync_bpp = &qfp->q_tab.hd_sync_next;
1116*1709Smlf 	async_bpp = &qfp->q_tab.hd_async_next;
1117*1709Smlf 
1118*1709Smlf begin_nextbp:
1119*1709Smlf 	if (flags & QNEAR_ASYNCONLY) {
1120*1709Smlf 		bp = *async_bpp;
1121*1709Smlf 		private = DBLK(bp);
1122*1709Smlf 		if (bp_merge && !qmerge_can_merge(bp, bp_merge)) {
1123*1709Smlf 			return (NULL);
1124*1709Smlf 		} else if (bp->av_forw == bp) {
1125*1709Smlf 			bp->av_forw = bp->av_back = NULL;
1126*1709Smlf 			flags &= ~(QNEAR_ASYNCONLY | QNEAR_BACKWARD);
1127*1709Smlf 			private = 0;
1128*1709Smlf 		} else if (flags & QNEAR_BACKWARD) {
1129*1709Smlf 			if (DBLK(bp) < DBLK(bp->av_back)) {
1130*1709Smlf 				flags &= ~QNEAR_BACKWARD;
1131*1709Smlf 				private = 0;
1132*1709Smlf 			}
1133*1709Smlf 		} else if (DBLK(bp) > DBLK(bp->av_forw)) {
1134*1709Smlf 			if (qmerge2wayscan) {
1135*1709Smlf 				flags |= QNEAR_BACKWARD;
1136*1709Smlf 			} else {
1137*1709Smlf 				private = 0;
1138*1709Smlf 			}
1139*1709Smlf 		} else if (qmerge2wayscan == 0) {
1140*1709Smlf 			private = DBLK(bp->av_forw);
1141*1709Smlf 		}
1142*1709Smlf 		bpp = async_bpp;
1143*1709Smlf 
1144*1709Smlf 	} else if (flags & QNEAR_ASYNCALSO) {
1145*1709Smlf 		sync_bp = *sync_bpp;
1146*1709Smlf 		async_bp = *async_bpp;
1147*1709Smlf 		if (flags & QNEAR_BACKWARD) {
1148*1709Smlf 			if (BP_GT_HD(sync_bp, dp) && BP_GT_HD(async_bp, dp)) {
1149*1709Smlf 				flags &= ~(QNEAR_BACKWARD|QNEAR_ASYNCALSO);
1150*1709Smlf 				*sync_bpp = sync_bp->av_forw;
1151*1709Smlf 				*async_bpp = async_bp->av_forw;
1152*1709Smlf 				SYNC2ASYNC(qfp) = (void *)qmerge_sync2async;
1153*1709Smlf 				qfp->q_tab.hd_private = 0;
1154*1709Smlf 				goto begin_nextbp;
1155*1709Smlf 			}
1156*1709Smlf 			if (BP_LT_HD(async_bp, dp) && BP_LT_HD(sync_bp, dp)) {
1157*1709Smlf 				if (BP_GT_BP(async_bp, sync_bp)) {
1158*1709Smlf 					bpp = async_bpp;
1159*1709Smlf 					bp = *async_bpp;
1160*1709Smlf 				} else {
1161*1709Smlf 					bpp = sync_bpp;
1162*1709Smlf 					bp = *sync_bpp;
1163*1709Smlf 				}
1164*1709Smlf 			} else if (BP_LT_HD(async_bp, dp)) {
1165*1709Smlf 				bpp = async_bpp;
1166*1709Smlf 				bp = *async_bpp;
1167*1709Smlf 			} else {
1168*1709Smlf 				bpp = sync_bpp;
1169*1709Smlf 				bp = *sync_bpp;
1170*1709Smlf 			}
1171*1709Smlf 		} else {
1172*1709Smlf 			if (BP_LT_HD(sync_bp, dp) && BP_LT_HD(async_bp, dp)) {
1173*1709Smlf 				if (qmerge2wayscan) {
1174*1709Smlf 					flags |= QNEAR_BACKWARD;
1175*1709Smlf 					*sync_bpp = sync_bp->av_back;
1176*1709Smlf 					*async_bpp = async_bp->av_back;
1177*1709Smlf 					goto begin_nextbp;
1178*1709Smlf 				} else {
1179*1709Smlf 					flags &= ~QNEAR_ASYNCALSO;
1180*1709Smlf 					SYNC2ASYNC(qfp) =
1181*1709Smlf 						(void *)qmerge_sync2async;
1182*1709Smlf 					qfp->q_tab.hd_private = 0;
1183*1709Smlf 					goto begin_nextbp;
1184*1709Smlf 				}
1185*1709Smlf 			}
1186*1709Smlf 			if (BP_GT_HD(async_bp, dp) && BP_GT_HD(sync_bp, dp)) {
1187*1709Smlf 				if (BP_LT_BP(async_bp, sync_bp)) {
1188*1709Smlf 					bpp = async_bpp;
1189*1709Smlf 					bp = *async_bpp;
1190*1709Smlf 				} else {
1191*1709Smlf 					bpp = sync_bpp;
1192*1709Smlf 					bp = *sync_bpp;
1193*1709Smlf 				}
1194*1709Smlf 			} else if (BP_GT_HD(async_bp, dp)) {
1195*1709Smlf 				bpp = async_bpp;
1196*1709Smlf 				bp = *async_bpp;
1197*1709Smlf 			} else {
1198*1709Smlf 				bpp = sync_bpp;
1199*1709Smlf 				bp = *sync_bpp;
1200*1709Smlf 			}
1201*1709Smlf 		}
1202*1709Smlf 		if (bp_merge && !qmerge_can_merge(bp, bp_merge)) {
1203*1709Smlf 			return (NULL);
1204*1709Smlf 		} else if (bp->av_forw == bp) {
1205*1709Smlf 			bp->av_forw = bp->av_back = NULL;
1206*1709Smlf 			flags &= ~QNEAR_ASYNCALSO;
1207*1709Smlf 			if (bpp == async_bpp) {
1208*1709Smlf 				SYNC2ASYNC(qfp) = (void *)qmerge_sync2async;
1209*1709Smlf 			} else {
1210*1709Smlf 				flags |= QNEAR_ASYNCONLY;
1211*1709Smlf 			}
1212*1709Smlf 		}
1213*1709Smlf 		private = DBLK(bp);
1214*1709Smlf 	} else {
1215*1709Smlf 		bp = *sync_bpp;
1216*1709Smlf 		private = DBLK(bp);
1217*1709Smlf 		if (bp_merge && !qmerge_can_merge(bp, bp_merge)) {
1218*1709Smlf 			return (NULL);
1219*1709Smlf 		} else if (bp->av_forw == bp) {
1220*1709Smlf 			private = 0;
1221*1709Smlf 			SYNC2ASYNC(qfp) = (void *)qmerge_sync2async;
1222*1709Smlf 			bp->av_forw = bp->av_back = NULL;
1223*1709Smlf 			flags &= ~QNEAR_BACKWARD;
1224*1709Smlf 			if (*async_bpp)
1225*1709Smlf 				flags |= QNEAR_ASYNCONLY;
1226*1709Smlf 		} else if (flags & QNEAR_BACKWARD) {
1227*1709Smlf 			if (DBLK(bp) < DBLK(bp->av_back)) {
1228*1709Smlf 				flags &= ~QNEAR_BACKWARD;
1229*1709Smlf 				cnt = (intptr_t)SYNC2ASYNC(qfp);
1230*1709Smlf 				if (cnt > 0) {
1231*1709Smlf 					cnt--;
1232*1709Smlf 					SYNC2ASYNC(qfp) = (void *)cnt;
1233*1709Smlf 				} else {
1234*1709Smlf 					if (*async_bpp)
1235*1709Smlf 						flags |= QNEAR_ASYNCALSO;
1236*1709Smlf 					SYNC2ASYNC(qfp) =
1237*1709Smlf 						(void *)qmerge_sync2async;
1238*1709Smlf 				}
1239*1709Smlf 				private = 0;
1240*1709Smlf 			}
1241*1709Smlf 		} else if (DBLK(bp) > DBLK(bp->av_forw)) {
1242*1709Smlf 			private = 0;
1243*1709Smlf 			if (qmerge2wayscan) {
1244*1709Smlf 				flags |= QNEAR_BACKWARD;
1245*1709Smlf 				private = DBLK(bp);
1246*1709Smlf 			} else {
1247*1709Smlf 				cnt = (intptr_t)SYNC2ASYNC(qfp);
1248*1709Smlf 				if (cnt > 0) {
1249*1709Smlf 					cnt--;
1250*1709Smlf 					SYNC2ASYNC(qfp) = (void *)cnt;
1251*1709Smlf 				} else {
1252*1709Smlf 					if (*async_bpp)
1253*1709Smlf 						flags |= QNEAR_ASYNCALSO;
1254*1709Smlf 					SYNC2ASYNC(qfp) =
1255*1709Smlf 						(void *)qmerge_sync2async;
1256*1709Smlf 				}
1257*1709Smlf 			}
1258*1709Smlf 		} else if (qmerge2wayscan == 0) {
1259*1709Smlf 			private = DBLK(bp->av_forw);
1260*1709Smlf 		}
1261*1709Smlf 		bpp = sync_bpp;
1262*1709Smlf 	}
1263*1709Smlf 
1264*1709Smlf 	if (bp->av_forw) {
1265*1709Smlf 		*can_merge = !(bp->b_flags & B_READ);
1266*1709Smlf 		if (flags & QNEAR_BACKWARD) {
1267*1709Smlf 			*bpp = bp->av_back;
1268*1709Smlf 			if ((DBLK(bp->av_back) +
1269*1709Smlf 			    bp->av_back->b_bcount / DEV_BSIZE) != DBLK(bp))
1270*1709Smlf 				*can_merge = 0;
1271*1709Smlf 		} else {
1272*1709Smlf 			*bpp = bp->av_forw;
1273*1709Smlf 			if ((DBLK(bp) + bp->b_bcount / DEV_BSIZE) !=
1274*1709Smlf 			    DBLK(bp->av_forw))
1275*1709Smlf 				*can_merge = 0;
1276*1709Smlf 		}
1277*1709Smlf 		bp->av_forw->av_back = bp->av_back;
1278*1709Smlf 		bp->av_back->av_forw = bp->av_forw;
1279*1709Smlf 		bp->av_forw = bp->av_back = NULL;
1280*1709Smlf 	} else {
1281*1709Smlf 		*bpp = NULL;
1282*1709Smlf 		*can_merge = 0;
1283*1709Smlf 	}
1284*1709Smlf 	qfp->q_tab.hd_private = (void *)private;
1285*1709Smlf 	qfp->q_cnt--;
1286*1709Smlf 	qfp->q_tab.hd_flags = flags;
1287*1709Smlf 	if (bp->b_error) {
1288*1709Smlf 		bp->av_back = (void *)(intptr_t)bp->b_error;
1289*1709Smlf 		bp->b_error = 0;
1290*1709Smlf 	}
1291*1709Smlf 	return (bp);
1292*1709Smlf }
1293*1709Smlf 
1294*1709Smlf static struct buf *
1295*1709Smlf qmerge_del(struct que_data *qfp)
1296*1709Smlf {
1297*1709Smlf 	struct	buf *bp, *next_bp, *bp_merge;
1298*1709Smlf 	int	alloc_mergebp, merge;
1299*1709Smlf 
1300*1709Smlf 	if (qfp->q_cnt == 0) {
1301*1709Smlf 		return (NULL);
1302*1709Smlf 	}
1303*1709Smlf 
1304*1709Smlf 	bp_merge = bp = qmerge_nextbp(qfp, NULL, &merge);
1305*1709Smlf 	alloc_mergebp = 1;
1306*1709Smlf 	while (merge && (next_bp = qmerge_nextbp(qfp, bp_merge, &merge))) {
1307*1709Smlf 		if (alloc_mergebp) {
1308*1709Smlf 			bp_merge = kmem_alloc(sizeof (*bp_merge), KM_NOSLEEP);
1309*1709Smlf 			if (bp_merge == NULL) {
1310*1709Smlf 				mutex_exit(&qfp->q_mutex);
1311*1709Smlf 				return (bp);
1312*1709Smlf 			}
1313*1709Smlf 			bcopy(bp, bp_merge, sizeof (*bp_merge));
1314*1709Smlf 			bp_merge->b_iodone = qmerge_iodone;
1315*1709Smlf 			bp_merge->b_forw = bp;
1316*1709Smlf 			bp_merge->b_back = (struct buf *)qfp;
1317*1709Smlf 			bp->av_forw = bp->av_back = bp;
1318*1709Smlf 			alloc_mergebp = 0;
1319*1709Smlf 		}
1320*1709Smlf 		qmerge_mergesetup(bp_merge, next_bp);
1321*1709Smlf 	}
1322*1709Smlf 	return (bp_merge);
1323*1709Smlf }
1324*1709Smlf 
1325*1709Smlf 
1326*1709Smlf /*
1327*1709Smlf  *	FIFO Queue functions
1328*1709Smlf  */
1329*1709Smlf /*
1330*1709Smlf  * 	Local Function Prototypes
1331*1709Smlf  */
1332*1709Smlf static int qfifo_add();
1333*1709Smlf 
1334*1709Smlf struct 	que_objops qfifo_ops = {
1335*1709Smlf 	que_init,
1336*1709Smlf 	que_free,
1337*1709Smlf 	qfifo_add,
1338*1709Smlf 	que_del,
1339*1709Smlf 	0, 0
1340*1709Smlf };
1341*1709Smlf 
1342*1709Smlf /*
1343*1709Smlf  * 	Local static data
1344*1709Smlf  */
1345*1709Smlf struct que_obj *
1346*1709Smlf qfifo_create()
1347*1709Smlf {
1348*1709Smlf 	return (que_create((struct que_objops *)&qfifo_ops));
1349*1709Smlf }
1350*1709Smlf 
1351*1709Smlf static int
1352*1709Smlf qfifo_add(struct que_data *qfp, struct buf *bp)
1353*1709Smlf {
1354*1709Smlf 
1355*1709Smlf 	if (!qfp->q_tab.b_actf)
1356*1709Smlf 		qfp->q_tab.b_actf = bp;
1357*1709Smlf 	else
1358*1709Smlf 		qfp->q_tab.b_actl->av_forw = bp;
1359*1709Smlf 	qfp->q_tab.b_actl = bp;
1360*1709Smlf 	bp->av_forw = NULL;
1361*1709Smlf 	return (0);
1362*1709Smlf }
1363*1709Smlf 
1364*1709Smlf /*
1365*1709Smlf  *	One-Way-Scan Queue functions
1366*1709Smlf  */
1367*1709Smlf /*
1368*1709Smlf  * 	Local Function Prototypes
1369*1709Smlf  */
1370*1709Smlf static int qsort_add();
1371*1709Smlf static struct buf *qsort_del();
1372*1709Smlf static void oneway_scan_binary(struct diskhd *dp, struct buf *bp);
1373*1709Smlf 
1374*1709Smlf struct 	que_objops qsort_ops = {
1375*1709Smlf 	que_init,
1376*1709Smlf 	que_free,
1377*1709Smlf 	qsort_add,
1378*1709Smlf 	qsort_del,
1379*1709Smlf 	0, 0
1380*1709Smlf };
1381*1709Smlf 
1382*1709Smlf /*
1383*1709Smlf  * 	Local static data
1384*1709Smlf  */
1385*1709Smlf struct que_obj *
1386*1709Smlf qsort_create()
1387*1709Smlf {
1388*1709Smlf 	return (que_create((struct que_objops *)&qsort_ops));
1389*1709Smlf }
1390*1709Smlf 
1391*1709Smlf static int
1392*1709Smlf qsort_add(struct que_data *qfp, struct buf *bp)
1393*1709Smlf {
1394*1709Smlf 	qfp->q_cnt++;
1395*1709Smlf 	oneway_scan_binary(&qfp->q_tab, bp);
1396*1709Smlf 	return (0);
1397*1709Smlf }
1398*1709Smlf 
1399*1709Smlf 
1400*1709Smlf #define	b_pasf	b_forw
1401*1709Smlf #define	b_pasl	b_back
1402*1709Smlf static void
1403*1709Smlf oneway_scan_binary(struct diskhd *dp, struct buf *bp)
1404*1709Smlf {
1405*1709Smlf 	struct buf *ap;
1406*1709Smlf 
1407*1709Smlf 	ap = dp->b_actf;
1408*1709Smlf 	if (ap == NULL) {
1409*1709Smlf 		dp->b_actf = bp;
1410*1709Smlf 		bp->av_forw = NULL;
1411*1709Smlf 		return;
1412*1709Smlf 	}
1413*1709Smlf 	if (DBLK(bp) < DBLK(ap)) {
1414*1709Smlf 		ap = dp->b_pasf;
1415*1709Smlf 		if ((ap == NULL) || (DBLK(bp) < DBLK(ap))) {
1416*1709Smlf 			dp->b_pasf = bp;
1417*1709Smlf 			bp->av_forw = ap;
1418*1709Smlf 			return;
1419*1709Smlf 		}
1420*1709Smlf 	}
1421*1709Smlf 	while (ap->av_forw) {
1422*1709Smlf 		if (DBLK(bp) < DBLK(ap->av_forw))
1423*1709Smlf 			break;
1424*1709Smlf 		ap = ap->av_forw;
1425*1709Smlf 	}
1426*1709Smlf 	bp->av_forw = ap->av_forw;
1427*1709Smlf 	ap->av_forw = bp;
1428*1709Smlf }
1429*1709Smlf 
1430*1709Smlf static struct buf *
1431*1709Smlf qsort_del(struct que_data *qfp)
1432*1709Smlf {
1433*1709Smlf 	struct buf *bp;
1434*1709Smlf 
1435*1709Smlf 	if (qfp->q_cnt == 0) {
1436*1709Smlf 		return (NULL);
1437*1709Smlf 	}
1438*1709Smlf 	qfp->q_cnt--;
1439*1709Smlf 	bp = qfp->q_tab.b_actf;
1440*1709Smlf 	qfp->q_tab.b_actf = bp->av_forw;
1441*1709Smlf 	bp->av_forw = 0;
1442*1709Smlf 	if (!qfp->q_tab.b_actf && qfp->q_tab.b_pasf) {
1443*1709Smlf 		qfp->q_tab.b_actf = qfp->q_tab.b_pasf;
1444*1709Smlf 		qfp->q_tab.b_pasf = NULL;
1445*1709Smlf 	}
1446*1709Smlf 	return (bp);
1447*1709Smlf }
1448*1709Smlf 
1449*1709Smlf /*
1450*1709Smlf  *	Tagged queueing
1451*1709Smlf  */
1452*1709Smlf /*
1453*1709Smlf  * 	Local Function Prototypes
1454*1709Smlf  */
1455*1709Smlf 
1456*1709Smlf struct 	que_objops qtag_ops = {
1457*1709Smlf 	que_init,
1458*1709Smlf 	que_free,
1459*1709Smlf 	qsort_add,
1460*1709Smlf 	qsort_del,
1461*1709Smlf 	0, 0
1462*1709Smlf };
1463*1709Smlf 
1464*1709Smlf /*
1465*1709Smlf  * 	Local static data
1466*1709Smlf  */
1467*1709Smlf struct que_obj *
1468*1709Smlf qtag_create()
1469*1709Smlf {
1470*1709Smlf 	return (que_create((struct que_objops *)&qtag_ops));
1471*1709Smlf }
1472