xref: /netbsd-src/sys/dev/i2o/ld_iop.c (revision 5b28f239895d55856221c590945769250e289f5f)
1*5b28f239Srillig /*	$NetBSD: ld_iop.c,v 1.41 2024/09/08 09:36:50 rillig Exp $	*/
257ea462dSad 
357ea462dSad /*-
4ebf51109Sad  * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
557ea462dSad  * All rights reserved.
657ea462dSad  *
757ea462dSad  * This code is derived from software contributed to The NetBSD Foundation
857ea462dSad  * by Andrew Doran.
957ea462dSad  *
1057ea462dSad  * Redistribution and use in source and binary forms, with or without
1157ea462dSad  * modification, are permitted provided that the following conditions
1257ea462dSad  * are met:
1357ea462dSad  * 1. Redistributions of source code must retain the above copyright
1457ea462dSad  *    notice, this list of conditions and the following disclaimer.
1557ea462dSad  * 2. Redistributions in binary form must reproduce the above copyright
1657ea462dSad  *    notice, this list of conditions and the following disclaimer in the
1757ea462dSad  *    documentation and/or other materials provided with the distribution.
1857ea462dSad  *
1957ea462dSad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
2057ea462dSad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
2157ea462dSad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2257ea462dSad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
2357ea462dSad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2457ea462dSad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2557ea462dSad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2657ea462dSad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2757ea462dSad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2857ea462dSad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2957ea462dSad  * POSSIBILITY OF SUCH DAMAGE.
3057ea462dSad  */
3157ea462dSad 
3257ea462dSad /*
3357ea462dSad  * I2O front-end for ld(4) driver, supporting random block storage class
34af9d4c4fSad  * devices.  Currently, this doesn't handle anything more complex than
35af9d4c4fSad  * fixed direct-access devices.
3657ea462dSad  */
3757ea462dSad 
3805b019baSlukem #include <sys/cdefs.h>
39*5b28f239Srillig __KERNEL_RCSID(0, "$NetBSD: ld_iop.c,v 1.41 2024/09/08 09:36:50 rillig Exp $");
4057ea462dSad 
4157ea462dSad #include <sys/param.h>
4257ea462dSad #include <sys/systm.h>
4357ea462dSad #include <sys/kernel.h>
4457ea462dSad #include <sys/device.h>
4557ea462dSad #include <sys/buf.h>
4605f25dccSyamt #include <sys/bufq.h>
4757ea462dSad #include <sys/endian.h>
4857ea462dSad #include <sys/dkio.h>
4957ea462dSad #include <sys/disk.h>
5057ea462dSad #include <sys/proc.h>
5157ea462dSad 
52a2a38285Sad #include <sys/bus.h>
5357ea462dSad 
5457ea462dSad #include <dev/ldvar.h>
5557ea462dSad 
5657ea462dSad #include <dev/i2o/i2o.h>
57ebf51109Sad #include <dev/i2o/iopio.h>
5857ea462dSad #include <dev/i2o/iopvar.h>
5957ea462dSad 
60ebf51109Sad #define	LD_IOP_TIMEOUT		30*1000
61ebf51109Sad 
62ebf51109Sad #define	LD_IOP_CLAIMED		0x01
63ebf51109Sad #define	LD_IOP_NEW_EVTMASK	0x02
6457ea462dSad 
6557ea462dSad struct ld_iop_softc {
6657ea462dSad 	struct	ld_softc sc_ld;
6757ea462dSad 	struct	iop_initiator sc_ii;
68af9d4c4fSad 	struct	iop_initiator sc_eventii;
69ebf51109Sad 	int	sc_flags;
7057ea462dSad };
7157ea462dSad 
72362b2f2bStron static void	ld_iop_adjqparam(device_t, int);
73362b2f2bStron static void	ld_iop_attach(device_t, device_t, void *);
74362b2f2bStron static int	ld_iop_detach(device_t, int);
7557ea462dSad static int	ld_iop_dump(struct ld_softc *, void *, int, int);
76afe4d516Sjdolecek static int	ld_iop_flush(struct ld_softc *, bool);
77afe4d516Sjdolecek static int	ld_iop_ioctl(struct ld_softc *, u_long, void *, int32_t, bool);
78362b2f2bStron static void	ld_iop_intr(device_t, struct iop_msg *, void *);
79362b2f2bStron static void	ld_iop_intr_event(device_t, struct iop_msg *, void *);
80362b2f2bStron static int	ld_iop_match(device_t, cfdata_t, void *);
81ebf51109Sad static int	ld_iop_start(struct ld_softc *, struct buf *);
82ebf51109Sad static void	ld_iop_unconfig(struct ld_iop_softc *, int);
8357ea462dSad 
84362b2f2bStron CFATTACH_DECL_NEW(ld_iop, sizeof(struct ld_iop_softc),
85c9b3657cSthorpej     ld_iop_match, ld_iop_attach, ld_iop_detach, NULL);
8657ea462dSad 
87ebf51109Sad static const char * const ld_iop_errors[] = {
8857ea462dSad 	"success",
8957ea462dSad 	"media error",
90ebf51109Sad 	"access error",
9157ea462dSad 	"device failure",
92ebf51109Sad 	"device not ready",
9357ea462dSad 	"media not present",
94ebf51109Sad 	"media locked",
9557ea462dSad 	"media failure",
96ebf51109Sad 	"protocol failure",
97ebf51109Sad 	"bus failure",
98ebf51109Sad 	"access violation",
99ebf51109Sad 	"media write protected",
10057ea462dSad 	"device reset",
101ebf51109Sad 	"volume changed, waiting for acknowledgement",
102ebf51109Sad 	"timeout",
10357ea462dSad };
10457ea462dSad 
10557ea462dSad static int
106362b2f2bStron ld_iop_match(device_t parent, cfdata_t match, void *aux)
10757ea462dSad {
10857ea462dSad 	struct iop_attach_args *ia;
10957ea462dSad 
11057ea462dSad 	ia = aux;
11157ea462dSad 
112e931ce94Sad 	return (ia->ia_class == I2O_CLASS_RANDOM_BLOCK_STORAGE);
11357ea462dSad }
11457ea462dSad 
11557ea462dSad static void
116362b2f2bStron ld_iop_attach(device_t parent, device_t self, void *aux)
11757ea462dSad {
118362b2f2bStron 	struct iop_attach_args *ia = aux;
119362b2f2bStron 	struct ld_iop_softc *sc = device_private(self);
120362b2f2bStron 	struct iop_softc *iop = device_private(parent);
121362b2f2bStron 	struct ld_softc *ld = &sc->sc_ld;
122af9d4c4fSad 	int rv, evreg, enable;
123b3c5e803Schristos 	const char *typestr, *fixedstr;
12457ea462dSad 	u_int cachesz;
1251d6329bbSad 	u_int32_t timeoutbase, rwvtimeoutbase, rwvtimeout;
12657ea462dSad 	struct {
12757ea462dSad 		struct	i2o_param_op_results pr;
12857ea462dSad 		struct	i2o_param_read_results prr;
12957ea462dSad 		union {
13057ea462dSad 			struct	i2o_param_rbs_cache_control cc;
13157ea462dSad 			struct	i2o_param_rbs_device_info bdi;
13257ea462dSad 		} p;
1330de5da96Sgmcgarry 	} __packed param;
13457ea462dSad 
135362b2f2bStron 	ld->sc_dv = self;
136af9d4c4fSad 	evreg = 0;
13757ea462dSad 
13857ea462dSad 	/* Register us as an initiator. */
13957ea462dSad 	sc->sc_ii.ii_dv = self;
14057ea462dSad 	sc->sc_ii.ii_intr = ld_iop_intr;
141ebf51109Sad 	sc->sc_ii.ii_adjqparam = ld_iop_adjqparam;
14257ea462dSad 	sc->sc_ii.ii_flags = 0;
143af9d4c4fSad 	sc->sc_ii.ii_tid = ia->ia_tid;
144ebf51109Sad 	iop_initiator_register(iop, &sc->sc_ii);
14557ea462dSad 
146af9d4c4fSad 	/* Register another initiator to handle events from the device. */
147af9d4c4fSad 	sc->sc_eventii.ii_dv = self;
148af9d4c4fSad 	sc->sc_eventii.ii_intr = ld_iop_intr_event;
14903e46dc8Sad 	sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
150af9d4c4fSad 	sc->sc_eventii.ii_tid = ia->ia_tid;
151ebf51109Sad 	iop_initiator_register(iop, &sc->sc_eventii);
152ebf51109Sad 
153ebf51109Sad 	rv = iop_util_eventreg(iop, &sc->sc_eventii,
154ebf51109Sad 	    I2O_EVENT_GEN_EVENT_MASK_MODIFIED |
155ebf51109Sad 	    I2O_EVENT_GEN_DEVICE_RESET |
156ebf51109Sad 	    I2O_EVENT_GEN_STATE_CHANGE |
157ebf51109Sad 	    I2O_EVENT_GEN_GENERAL_WARNING);
158ebf51109Sad 	if (rv != 0) {
1590e50a946Scegger 		aprint_error_dev(self, "unable to register for events");
160af9d4c4fSad 		goto bad;
161af9d4c4fSad 	}
162af9d4c4fSad 	evreg = 1;
163af9d4c4fSad 
164ebf51109Sad 	/*
165ebf51109Sad 	 * Start out with one queued command.  The `iop' driver will adjust
166ebf51109Sad 	 * the queue parameters once we're up and running.
167ebf51109Sad 	 */
168ebf51109Sad 	ld->sc_maxqueuecnt = 1;
169ebf51109Sad 
17057ea462dSad 	ld->sc_maxxfer = IOP_MAX_XFER;
17157ea462dSad 	ld->sc_dump = ld_iop_dump;
172afe4d516Sjdolecek 	ld->sc_ioctl = ld_iop_ioctl;
17357ea462dSad 	ld->sc_start = ld_iop_start;
1745b68a5eeSmlelstv 	ld->sc_flags = LDF_MPSAFE;
17557ea462dSad 
17657ea462dSad 	/* Say what the device is. */
17757ea462dSad 	printf(":");
178ebf51109Sad 	iop_print_ident(iop, ia->ia_tid);
17957ea462dSad 
180af9d4c4fSad 	/*
181af9d4c4fSad 	 * Claim the device so that we don't get any nasty surprises.  Allow
182af9d4c4fSad 	 * failure.
183af9d4c4fSad 	 */
184ebf51109Sad 	rv = iop_util_claim(iop, &sc->sc_ii, 0,
18557ea462dSad 	    I2O_UTIL_CLAIM_CAPACITY_SENSITIVE |
18657ea462dSad 	    I2O_UTIL_CLAIM_NO_PEER_SERVICE |
18757ea462dSad 	    I2O_UTIL_CLAIM_NO_MANAGEMENT_SERVICE |
18857ea462dSad 	    I2O_UTIL_CLAIM_PRIMARY_USER);
189ebf51109Sad 	sc->sc_flags = rv ? 0 : LD_IOP_CLAIMED;
19057ea462dSad 
1911d6329bbSad 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_DEVICE_INFO,
1921d6329bbSad 	    &param, sizeof(param), NULL);
1931d6329bbSad 	if (rv != 0)
19457ea462dSad 		goto bad;
19557ea462dSad 
19657ea462dSad 	ld->sc_secsize = le32toh(param.p.bdi.blocksize);
19757ea462dSad 	ld->sc_secperunit = (int)
19857ea462dSad 	    (le64toh(param.p.bdi.capacity) / ld->sc_secsize);
19957ea462dSad 
20057ea462dSad 	switch (param.p.bdi.type) {
20157ea462dSad 	case I2O_RBS_TYPE_DIRECT:
20257ea462dSad 		typestr = "direct access";
203af9d4c4fSad 		enable = 1;
20457ea462dSad 		break;
20557ea462dSad 	case I2O_RBS_TYPE_WORM:
20657ea462dSad 		typestr = "WORM";
207af9d4c4fSad 		enable = 0;
20857ea462dSad 		break;
20957ea462dSad 	case I2O_RBS_TYPE_CDROM:
210ebf51109Sad 		typestr = "CD-ROM";
211af9d4c4fSad 		enable = 0;
21257ea462dSad 		break;
21357ea462dSad 	case I2O_RBS_TYPE_OPTICAL:
21457ea462dSad 		typestr = "optical";
215af9d4c4fSad 		enable = 0;
21657ea462dSad 		break;
21757ea462dSad 	default:
21857ea462dSad 		typestr = "unknown";
219af9d4c4fSad 		enable = 0;
22057ea462dSad 		break;
22157ea462dSad 	}
22257ea462dSad 
2233f9411f6Swiz 	if ((le32toh(param.p.bdi.capabilities) & I2O_RBS_CAP_REMOVABLE_MEDIA)
22457ea462dSad 	    != 0) {
2255b68a5eeSmlelstv 		/* ld->sc_flags |= LDF_REMOVABLE; */
2263f9411f6Swiz 		fixedstr = "removable";
227af9d4c4fSad 		enable = 0;
228af9d4c4fSad 	} else
22957ea462dSad 		fixedstr = "fixed";
230af9d4c4fSad 
231af9d4c4fSad 	printf(" %s, %s", typestr, fixedstr);
23257ea462dSad 
23357ea462dSad 	/*
234*5b28f239Srillig 	 * Determine if the device has a private cache.  If so, print the
23557ea462dSad 	 * cache size.  Even if the device doesn't appear to have a cache,
236ebf51109Sad 	 * we perform a flush at shutdown.
23757ea462dSad 	 */
2381d6329bbSad 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_CACHE_CONTROL,
2391d6329bbSad 	    &param, sizeof(param), NULL);
2401d6329bbSad 	if (rv != 0)
24157ea462dSad 		goto bad;
24257ea462dSad 
24357ea462dSad 	if ((cachesz = le32toh(param.p.cc.totalcachesize)) != 0)
24457ea462dSad 		printf(", %dkB cache", cachesz >> 10);
24557ea462dSad 
24657ea462dSad 	printf("\n");
247af9d4c4fSad 
248af9d4c4fSad 	/*
249af9d4c4fSad 	 * Configure the DDM's timeout functions to time out all commands
250ebf51109Sad 	 * after 30 seconds.
251af9d4c4fSad 	 */
2521d6329bbSad 	timeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
2531d6329bbSad 	rwvtimeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
2541d6329bbSad 	rwvtimeout = 0;
255af9d4c4fSad 
2561d6329bbSad 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
2571d6329bbSad 	    &timeoutbase, sizeof(timeoutbase),
2581d6329bbSad 	    I2O_PARAM_RBS_OPERATION_timeoutbase);
2591d6329bbSad 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
2601d6329bbSad 	    &rwvtimeoutbase, sizeof(rwvtimeoutbase),
2611d6329bbSad 	    I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
2621d6329bbSad 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
2631d6329bbSad 	    &rwvtimeout, sizeof(rwvtimeout),
2641d6329bbSad 	    I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
265af9d4c4fSad 
266af9d4c4fSad 	if (enable)
267af9d4c4fSad 		ld->sc_flags |= LDF_ENABLED;
268af9d4c4fSad 	else
2690e50a946Scegger 		aprint_error_dev(self, "device not yet supported\n");
270af9d4c4fSad 
271e90b5bddSjdolecek 	ldattach(ld, BUFQ_DISK_DEFAULT_STRAT);
27257ea462dSad 	return;
27357ea462dSad 
27457ea462dSad  bad:
275ebf51109Sad 	ld_iop_unconfig(sc, evreg);
276ebf51109Sad }
277ebf51109Sad 
278ebf51109Sad static void
279ebf51109Sad ld_iop_unconfig(struct ld_iop_softc *sc, int evreg)
280ebf51109Sad {
281ebf51109Sad 	struct iop_softc *iop;
282ebf51109Sad 
283362b2f2bStron 	iop = device_private(device_parent(sc->sc_ld.sc_dv));
284ebf51109Sad 
285ebf51109Sad 	if ((sc->sc_flags & LD_IOP_CLAIMED) != 0)
286af9d4c4fSad 		iop_util_claim(iop, &sc->sc_ii, 1,
287af9d4c4fSad 		    I2O_UTIL_CLAIM_PRIMARY_USER);
288ebf51109Sad 
289ebf51109Sad 	if (evreg) {
290ebf51109Sad 		/*
291ebf51109Sad 		 * Mask off events, and wait up to 5 seconds for a reply.
292ebf51109Sad 		 * Note that some adapters won't reply to this (XXX We
293ebf51109Sad 		 * should check the event capabilities).
294ebf51109Sad 		 */
295e5bca80aSad 		mutex_spin_enter(&iop->sc_intrlock);
296ebf51109Sad 		sc->sc_flags &= ~LD_IOP_NEW_EVTMASK;
297e5bca80aSad 		mutex_spin_exit(&iop->sc_intrlock);
298e5bca80aSad 
299ebf51109Sad 		iop_util_eventreg(iop, &sc->sc_eventii,
300ebf51109Sad 		    I2O_EVENT_GEN_EVENT_MASK_MODIFIED);
301e5bca80aSad 
302e5bca80aSad 		mutex_spin_enter(&iop->sc_intrlock);
303ebf51109Sad 		if ((sc->sc_flags & LD_IOP_NEW_EVTMASK) == 0)
304e5bca80aSad 			cv_timedwait(&sc->sc_eventii.ii_cv,
305e5bca80aSad 			    &iop->sc_intrlock, hz * 5);
306e5bca80aSad 		mutex_spin_exit(&iop->sc_intrlock);
307ebf51109Sad 	}
308ebf51109Sad 
309af9d4c4fSad 	iop_initiator_unregister(iop, &sc->sc_eventii);
31057ea462dSad 	iop_initiator_unregister(iop, &sc->sc_ii);
31157ea462dSad }
31257ea462dSad 
31357ea462dSad static int
314362b2f2bStron ld_iop_detach(device_t self, int flags)
315af9d4c4fSad {
316af9d4c4fSad 	struct ld_iop_softc *sc;
317af9d4c4fSad 	struct iop_softc *iop;
3188494f44bSad 	int rv;
319af9d4c4fSad 
3208fc35725Sthorpej 	sc = device_private(self);
3218fc35725Sthorpej 	iop = device_private(device_parent(self));
322af9d4c4fSad 
323d606f3d6Sad 	if ((rv = ldbegindetach(&sc->sc_ld, flags)) != 0)
3248494f44bSad 		return (rv);
325af9d4c4fSad 
326af9d4c4fSad 	/*
327af9d4c4fSad 	 * Abort any requests queued with the IOP, but allow requests that
328af9d4c4fSad 	 * are already in progress to complete.
329af9d4c4fSad 	 */
330af9d4c4fSad 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
331af9d4c4fSad 		iop_util_abort(iop, &sc->sc_ii, 0, 0,
332af9d4c4fSad 		    I2O_UTIL_ABORT_WILD | I2O_UTIL_ABORT_CLEAN);
333af9d4c4fSad 
334d606f3d6Sad 	ldenddetach(&sc->sc_ld);
335af9d4c4fSad 
336ebf51109Sad 	/* Un-claim the target, and un-register our initiators. */
337ebf51109Sad 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
338ebf51109Sad 		ld_iop_unconfig(sc, 1);
339af9d4c4fSad 
340af9d4c4fSad 	return (0);
341af9d4c4fSad }
342af9d4c4fSad 
343af9d4c4fSad static int
34457ea462dSad ld_iop_start(struct ld_softc *ld, struct buf *bp)
34557ea462dSad {
34657ea462dSad 	struct iop_msg *im;
34757ea462dSad 	struct iop_softc *iop;
34857ea462dSad 	struct ld_iop_softc *sc;
349ebf51109Sad 	struct i2o_rbs_block_read *mf;
350ebf51109Sad 	u_int rv, flags, write;
35157ea462dSad 	u_int64_t ba;
352ebf51109Sad 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
35357ea462dSad 
3542c6d71a5Smhitch 	sc = device_private(ld->sc_dv);
355362b2f2bStron 	iop = device_private(device_parent(ld->sc_dv));
35657ea462dSad 
35703e46dc8Sad 	im = iop_msg_alloc(iop, 0);
35857ea462dSad 	im->im_dvcontext = bp;
35957ea462dSad 
36057ea462dSad 	write = ((bp->b_flags & B_READ) == 0);
36157ea462dSad 	ba = (u_int64_t)bp->b_rawblkno * ld->sc_secsize;
36257ea462dSad 
363af9d4c4fSad 	/*
364af9d4c4fSad 	 * Write through the cache when performing synchronous writes.  When
365af9d4c4fSad 	 * performing a read, we don't request that the DDM cache the data,
366af9d4c4fSad 	 * as there's little advantage to it.
367af9d4c4fSad 	 */
36857ea462dSad 	if (write) {
369af9d4c4fSad 		if ((bp->b_flags & B_ASYNC) == 0)
37057ea462dSad 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WT;
37157ea462dSad 		else
37257ea462dSad 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WB;
373af9d4c4fSad 	} else
374af9d4c4fSad 		flags = 0;
37557ea462dSad 
37657ea462dSad 	/*
37757ea462dSad 	 * Fill the message frame.  We can use the block_read structure for
37857ea462dSad 	 * both reads and writes, as it's almost identical to the
37957ea462dSad 	 * block_write structure.
38057ea462dSad 	 */
381ebf51109Sad 	mf = (struct i2o_rbs_block_read *)mb;
382ebf51109Sad 	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_read);
383ebf51109Sad 	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid,
38457ea462dSad 	    write ? I2O_RBS_BLOCK_WRITE : I2O_RBS_BLOCK_READ);
385ebf51109Sad 	mf->msgictx = sc->sc_ii.ii_ictx;
386ebf51109Sad 	mf->msgtctx = im->im_tctx;
387ebf51109Sad 	mf->flags = flags | (1 << 16);		/* flags & time multiplier */
388ebf51109Sad 	mf->datasize = bp->b_bcount;
389ebf51109Sad 	mf->lowoffset = (u_int32_t)ba;
390ebf51109Sad 	mf->highoffset = (u_int32_t)(ba >> 32);
39157ea462dSad 
392ebf51109Sad 	/* Map the data transfer and enqueue the command. */
393ebf51109Sad 	rv = iop_msg_map_bio(iop, im, mb, bp->b_data, bp->b_bcount, write);
394ebf51109Sad 	if (rv == 0) {
39503e46dc8Sad 		if ((rv = iop_post(iop, mb)) != 0) {
396ebf51109Sad 			iop_msg_unmap(iop, im);
397ebf51109Sad 			iop_msg_free(iop, im);
398ebf51109Sad 		}
399ebf51109Sad 	}
40057ea462dSad 	return (rv);
40157ea462dSad }
40257ea462dSad 
40357ea462dSad static int
40457ea462dSad ld_iop_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
40557ea462dSad {
40657ea462dSad 	struct iop_msg *im;
40757ea462dSad 	struct iop_softc *iop;
40857ea462dSad 	struct ld_iop_softc *sc;
409ebf51109Sad 	struct i2o_rbs_block_write *mf;
41057ea462dSad 	int rv, bcount;
41157ea462dSad 	u_int64_t ba;
412ebf51109Sad 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
41357ea462dSad 
4142c6d71a5Smhitch 	sc = device_private(ld->sc_dv);
415362b2f2bStron 	iop = device_private(device_parent(ld->sc_dv));
41657ea462dSad 	bcount = blkcnt * ld->sc_secsize;
41757ea462dSad 	ba = (u_int64_t)blkno * ld->sc_secsize;
41803e46dc8Sad 	im = iop_msg_alloc(iop, IM_POLL);
41957ea462dSad 
420ebf51109Sad 	mf = (struct i2o_rbs_block_write *)mb;
421ebf51109Sad 	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_write);
422ebf51109Sad 	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_BLOCK_WRITE);
423ebf51109Sad 	mf->msgictx = sc->sc_ii.ii_ictx;
424ebf51109Sad 	mf->msgtctx = im->im_tctx;
425ebf51109Sad 	mf->flags = I2O_RBS_BLOCK_WRITE_CACHE_WT | (1 << 16);
426ebf51109Sad 	mf->datasize = bcount;
427ebf51109Sad 	mf->lowoffset = (u_int32_t)ba;
428ebf51109Sad 	mf->highoffset = (u_int32_t)(ba >> 32);
42957ea462dSad 
43003e46dc8Sad 	if ((rv = iop_msg_map(iop, im, mb, data, bcount, 1, NULL)) != 0) {
431ebf51109Sad 		iop_msg_free(iop, im);
43257ea462dSad 		return (rv);
43357ea462dSad 	}
43457ea462dSad 
435ebf51109Sad 	rv = iop_msg_post(iop, im, mb, LD_IOP_TIMEOUT * 2);
43657ea462dSad 	iop_msg_unmap(iop, im);
437ebf51109Sad 	iop_msg_free(iop, im);
43857ea462dSad  	return (rv);
43957ea462dSad }
44057ea462dSad 
44157ea462dSad static int
442afe4d516Sjdolecek ld_iop_flush(struct ld_softc *ld, bool poll)
44357ea462dSad {
44457ea462dSad 	struct iop_msg *im;
44557ea462dSad 	struct iop_softc *iop;
44657ea462dSad 	struct ld_iop_softc *sc;
447ebf51109Sad 	struct i2o_rbs_cache_flush mf;
44857ea462dSad 	int rv;
44957ea462dSad 
4502c6d71a5Smhitch 	sc = device_private(ld->sc_dv);
451362b2f2bStron 	iop = device_private(device_parent(ld->sc_dv));
452fd946ab2Smlelstv 	im = iop_msg_alloc(iop, poll ? IM_POLL : IM_WAIT);
45357ea462dSad 
454ebf51109Sad 	mf.msgflags = I2O_MSGFLAGS(i2o_rbs_cache_flush);
455ebf51109Sad 	mf.msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_CACHE_FLUSH);
456ebf51109Sad 	mf.msgictx = sc->sc_ii.ii_ictx;
457ebf51109Sad 	mf.msgtctx = im->im_tctx;
458ebf51109Sad 	mf.flags = 1 << 16;			/* time multiplier */
45957ea462dSad 
460dc159151Ssimonb 	/* Ancient disks will return an error here. */
461ebf51109Sad 	rv = iop_msg_post(iop, im, &mf, LD_IOP_TIMEOUT * 2);
462ebf51109Sad 	iop_msg_free(iop, im);
46357ea462dSad 	return (rv);
46457ea462dSad }
46557ea462dSad 
466afe4d516Sjdolecek static int
467afe4d516Sjdolecek ld_iop_ioctl(struct ld_softc *ld, u_long cmd, void *addr, int32_t flag, bool poll)
468afe4d516Sjdolecek {
469afe4d516Sjdolecek 	int error;
470afe4d516Sjdolecek 
471afe4d516Sjdolecek 	switch (cmd) {
472afe4d516Sjdolecek         case DIOCCACHESYNC:
473afe4d516Sjdolecek 		error = ld_iop_flush(ld, poll);
474afe4d516Sjdolecek 		break;
475afe4d516Sjdolecek 
476afe4d516Sjdolecek 	default:
477afe4d516Sjdolecek 		error = EPASSTHROUGH;
478afe4d516Sjdolecek 		break;
479afe4d516Sjdolecek 	}
480afe4d516Sjdolecek 
481afe4d516Sjdolecek         return error;
482afe4d516Sjdolecek }
483afe4d516Sjdolecek 
484afe4d516Sjdolecek static void
485362b2f2bStron ld_iop_intr(device_t dv, struct iop_msg *im, void *reply)
48657ea462dSad {
48757ea462dSad 	struct i2o_rbs_reply *rb;
48857ea462dSad 	struct buf *bp;
48957ea462dSad 	struct ld_iop_softc *sc;
49057ea462dSad 	struct iop_softc *iop;
491ebf51109Sad 	int err, detail;
49257ea462dSad 	const char *errstr;
49357ea462dSad 
49457ea462dSad 	rb = reply;
49557ea462dSad 	bp = im->im_dvcontext;
4962c6d71a5Smhitch 	sc = device_private(dv);
497362b2f2bStron 	iop = device_private(device_parent(dv));
49857ea462dSad 
499ebf51109Sad 	err = ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0);
500ebf51109Sad 
501ebf51109Sad 	if (!err && rb->reqstatus != I2O_STATUS_SUCCESS) {
50257ea462dSad 		detail = le16toh(rb->detail);
503e35ee08cSchristos 		if (detail >= __arraycount(ld_iop_errors))
504ebf51109Sad 			errstr = "<unknown>";
50557ea462dSad 		else
50657ea462dSad 			errstr = ld_iop_errors[detail];
5070e50a946Scegger 		aprint_error_dev(dv, "error 0x%04x: %s\n", detail, errstr);
508ebf51109Sad 		err = 1;
509ebf51109Sad 	}
510ebf51109Sad 
511ebf51109Sad 	if (err) {
51257ea462dSad 		bp->b_error = EIO;
51357ea462dSad 		bp->b_resid = bp->b_bcount;
51457ea462dSad 	} else
51557ea462dSad 		bp->b_resid = bp->b_bcount - le32toh(rb->transfercount);
51657ea462dSad 
51757ea462dSad 	iop_msg_unmap(iop, im);
518ebf51109Sad 	iop_msg_free(iop, im);
51957ea462dSad 	lddone(&sc->sc_ld, bp);
52057ea462dSad }
521af9d4c4fSad 
522af9d4c4fSad static void
523362b2f2bStron ld_iop_intr_event(device_t dv, struct iop_msg *im, void *reply)
524af9d4c4fSad {
525af9d4c4fSad 	struct i2o_util_event_register_reply *rb;
526ebf51109Sad 	struct ld_iop_softc *sc;
527e5bca80aSad 	struct iop_softc *iop;
528af9d4c4fSad 	u_int event;
529af9d4c4fSad 
530af9d4c4fSad 	rb = reply;
531af9d4c4fSad 
532ebf51109Sad 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
533ebf51109Sad 		return;
534ebf51109Sad 
535ebf51109Sad 	event = le32toh(rb->event);
5362c6d71a5Smhitch 	sc = device_private(dv);
537ebf51109Sad 
538ebf51109Sad 	if (event == I2O_EVENT_GEN_EVENT_MASK_MODIFIED) {
539e5bca80aSad 		iop = device_private(device_parent(dv));
540e5bca80aSad 		mutex_spin_enter(&iop->sc_intrlock);
541ebf51109Sad 		sc->sc_flags |= LD_IOP_NEW_EVTMASK;
542e5bca80aSad 		cv_broadcast(&sc->sc_eventii.ii_cv);
543e5bca80aSad 		mutex_spin_exit(&iop->sc_intrlock);
544af9d4c4fSad 		return;
545ebf51109Sad 	}
546af9d4c4fSad 
5470e50a946Scegger 	printf("%s: event 0x%08x received\n", device_xname(dv), event);
548af9d4c4fSad }
549ebf51109Sad 
550ebf51109Sad static void
551362b2f2bStron ld_iop_adjqparam(device_t dv, int mpi)
552ebf51109Sad {
553362b2f2bStron 	struct ld_iop_softc *sc = device_private(dv);
554362b2f2bStron 	struct iop_softc *iop = device_private(device_parent(dv));
555362b2f2bStron 	struct ld_softc *ld = &sc->sc_ld;
556ebf51109Sad 
557ebf51109Sad 	/*
558d3ebc886Sad 	 * AMI controllers seem to lose the plot if you hand off lots of
559ebf51109Sad 	 * queued commands.
560ebf51109Sad 	 */
561ebf51109Sad 	if (le16toh(I2O_ORG_AMI) == iop->sc_status.orgid && mpi > 64)
562ebf51109Sad 		mpi = 64;
563ebf51109Sad 
564362b2f2bStron 	ldadjqparam(ld, mpi);
565ebf51109Sad }
566