xref: /netbsd-src/sys/dev/i2o/ld_iop.c (revision d710132b4b8ce7f7cccaaf660cb16aa16b4077a0)
1 /*	$NetBSD: ld_iop.c,v 1.13 2002/10/02 16:33:53 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * I2O front-end for ld(4) driver, supporting random block storage class
41  * devices.  Currently, this doesn't handle anything more complex than
42  * fixed direct-access devices.
43  */
44 
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: ld_iop.c,v 1.13 2002/10/02 16:33:53 thorpej Exp $");
47 
48 #include "opt_i2o.h"
49 #include "rnd.h"
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/device.h>
55 #include <sys/buf.h>
56 #include <sys/endian.h>
57 #include <sys/dkio.h>
58 #include <sys/disk.h>
59 #include <sys/proc.h>
60 #if NRND > 0
61 #include <sys/rnd.h>
62 #endif
63 
64 #include <machine/bus.h>
65 
66 #include <dev/ldvar.h>
67 
68 #include <dev/i2o/i2o.h>
69 #include <dev/i2o/iopio.h>
70 #include <dev/i2o/iopvar.h>
71 
72 #define	LD_IOP_TIMEOUT		30*1000
73 
74 #define	LD_IOP_CLAIMED		0x01
75 #define	LD_IOP_NEW_EVTMASK	0x02
76 
77 struct ld_iop_softc {
78 	struct	ld_softc sc_ld;
79 	struct	iop_initiator sc_ii;
80 	struct	iop_initiator sc_eventii;
81 	int	sc_flags;
82 };
83 
84 static void	ld_iop_adjqparam(struct device *, int);
85 static void	ld_iop_attach(struct device *, struct device *, void *);
86 static int	ld_iop_detach(struct device *, int);
87 static int	ld_iop_dump(struct ld_softc *, void *, int, int);
88 static int	ld_iop_flush(struct ld_softc *);
89 static void	ld_iop_intr(struct device *, struct iop_msg *, void *);
90 static void	ld_iop_intr_event(struct device *, struct iop_msg *, void *);
91 static int	ld_iop_match(struct device *, struct cfdata *, void *);
92 static int	ld_iop_start(struct ld_softc *, struct buf *);
93 static void	ld_iop_unconfig(struct ld_iop_softc *, int);
94 
95 CFATTACH_DECL(ld_iop, sizeof(struct ld_iop_softc),
96     ld_iop_match, ld_iop_attach, ld_iop_detach, NULL);
97 
98 #ifdef I2OVERBOSE
99 static const char * const ld_iop_errors[] = {
100 	"success",
101 	"media error",
102 	"access error",
103 	"device failure",
104 	"device not ready",
105 	"media not present",
106 	"media locked",
107 	"media failure",
108 	"protocol failure",
109 	"bus failure",
110 	"access violation",
111 	"media write protected",
112 	"device reset",
113 	"volume changed, waiting for acknowledgement",
114 	"timeout",
115 };
116 #endif
117 
118 static int
119 ld_iop_match(struct device *parent, struct cfdata *match, void *aux)
120 {
121 	struct iop_attach_args *ia;
122 
123 	ia = aux;
124 
125 	return (ia->ia_class == I2O_CLASS_RANDOM_BLOCK_STORAGE);
126 }
127 
128 static void
129 ld_iop_attach(struct device *parent, struct device *self, void *aux)
130 {
131 	struct iop_attach_args *ia;
132 	struct ld_softc *ld;
133 	struct ld_iop_softc *sc;
134 	struct iop_softc *iop;
135 	int rv, evreg, enable;
136 	char *typestr, *fixedstr;
137 	u_int cachesz;
138 	u_int32_t timeoutbase, rwvtimeoutbase, rwvtimeout;
139 	struct {
140 		struct	i2o_param_op_results pr;
141 		struct	i2o_param_read_results prr;
142 		union {
143 			struct	i2o_param_rbs_cache_control cc;
144 			struct	i2o_param_rbs_device_info bdi;
145 		} p;
146 	} __attribute__ ((__packed__)) param;
147 
148 	sc = (struct ld_iop_softc *)self;
149 	ld = &sc->sc_ld;
150 	iop = (struct iop_softc *)parent;
151 	ia = (struct iop_attach_args *)aux;
152 	evreg = 0;
153 
154 	/* Register us as an initiator. */
155 	sc->sc_ii.ii_dv = self;
156 	sc->sc_ii.ii_intr = ld_iop_intr;
157 	sc->sc_ii.ii_adjqparam = ld_iop_adjqparam;
158 	sc->sc_ii.ii_flags = 0;
159 	sc->sc_ii.ii_tid = ia->ia_tid;
160 	iop_initiator_register(iop, &sc->sc_ii);
161 
162 	/* Register another initiator to handle events from the device. */
163 	sc->sc_eventii.ii_dv = self;
164 	sc->sc_eventii.ii_intr = ld_iop_intr_event;
165 	sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
166 	sc->sc_eventii.ii_tid = ia->ia_tid;
167 	iop_initiator_register(iop, &sc->sc_eventii);
168 
169 	rv = iop_util_eventreg(iop, &sc->sc_eventii,
170 	    I2O_EVENT_GEN_EVENT_MASK_MODIFIED |
171 	    I2O_EVENT_GEN_DEVICE_RESET |
172 	    I2O_EVENT_GEN_STATE_CHANGE |
173 	    I2O_EVENT_GEN_GENERAL_WARNING);
174 	if (rv != 0) {
175 		printf("%s: unable to register for events", self->dv_xname);
176 		goto bad;
177 	}
178 	evreg = 1;
179 
180 	/*
181 	 * Start out with one queued command.  The `iop' driver will adjust
182 	 * the queue parameters once we're up and running.
183 	 */
184 	ld->sc_maxqueuecnt = 1;
185 
186 	ld->sc_maxxfer = IOP_MAX_XFER;
187 	ld->sc_dump = ld_iop_dump;
188 	ld->sc_flush = ld_iop_flush;
189 	ld->sc_start = ld_iop_start;
190 
191 	/* Say what the device is. */
192 	printf(":");
193 	iop_print_ident(iop, ia->ia_tid);
194 
195 	/*
196 	 * Claim the device so that we don't get any nasty surprises.  Allow
197 	 * failure.
198 	 */
199 	rv = iop_util_claim(iop, &sc->sc_ii, 0,
200 	    I2O_UTIL_CLAIM_CAPACITY_SENSITIVE |
201 	    I2O_UTIL_CLAIM_NO_PEER_SERVICE |
202 	    I2O_UTIL_CLAIM_NO_MANAGEMENT_SERVICE |
203 	    I2O_UTIL_CLAIM_PRIMARY_USER);
204 	sc->sc_flags = rv ? 0 : LD_IOP_CLAIMED;
205 
206 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_DEVICE_INFO,
207 	    &param, sizeof(param), NULL);
208 	if (rv != 0)
209 		goto bad;
210 
211 	ld->sc_secsize = le32toh(param.p.bdi.blocksize);
212 	ld->sc_secperunit = (int)
213 	    (le64toh(param.p.bdi.capacity) / ld->sc_secsize);
214 
215 	switch (param.p.bdi.type) {
216 	case I2O_RBS_TYPE_DIRECT:
217 		typestr = "direct access";
218 		enable = 1;
219 		break;
220 	case I2O_RBS_TYPE_WORM:
221 		typestr = "WORM";
222 		enable = 0;
223 		break;
224 	case I2O_RBS_TYPE_CDROM:
225 		typestr = "CD-ROM";
226 		enable = 0;
227 		break;
228 	case I2O_RBS_TYPE_OPTICAL:
229 		typestr = "optical";
230 		enable = 0;
231 		break;
232 	default:
233 		typestr = "unknown";
234 		enable = 0;
235 		break;
236 	}
237 
238 	if ((le32toh(param.p.bdi.capabilities) & I2O_RBS_CAP_REMOVEABLE_MEDIA)
239 	    != 0) {
240 		/* ld->sc_flags = LDF_REMOVEABLE; */
241 		fixedstr = "removeable";
242 		enable = 0;
243 	} else
244 		fixedstr = "fixed";
245 
246 	printf(" %s, %s", typestr, fixedstr);
247 
248 	/*
249 	 * Determine if the device has an private cache.  If so, print the
250 	 * cache size.  Even if the device doesn't appear to have a cache,
251 	 * we perform a flush at shutdown.
252 	 */
253 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_CACHE_CONTROL,
254 	    &param, sizeof(param), NULL);
255 	if (rv != 0)
256 		goto bad;
257 
258 	if ((cachesz = le32toh(param.p.cc.totalcachesize)) != 0)
259 		printf(", %dkB cache", cachesz >> 10);
260 
261 	printf("\n");
262 
263 	/*
264 	 * Configure the DDM's timeout functions to time out all commands
265 	 * after 30 seconds.
266 	 */
267 	timeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
268 	rwvtimeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
269 	rwvtimeout = 0;
270 
271 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
272 	    &timeoutbase, sizeof(timeoutbase),
273 	    I2O_PARAM_RBS_OPERATION_timeoutbase);
274 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
275 	    &rwvtimeoutbase, sizeof(rwvtimeoutbase),
276 	    I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
277 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
278 	    &rwvtimeout, sizeof(rwvtimeout),
279 	    I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
280 
281 	if (enable)
282 		ld->sc_flags |= LDF_ENABLED;
283 	else
284 		printf("%s: device not yet supported\n", self->dv_xname);
285 
286 	ldattach(ld);
287 	return;
288 
289  bad:
290 	ld_iop_unconfig(sc, evreg);
291 }
292 
293 static void
294 ld_iop_unconfig(struct ld_iop_softc *sc, int evreg)
295 {
296 	struct iop_softc *iop;
297 	int s;
298 
299 	iop = (struct iop_softc *)sc->sc_ld.sc_dv.dv_parent;
300 
301 	if ((sc->sc_flags & LD_IOP_CLAIMED) != 0)
302 		iop_util_claim(iop, &sc->sc_ii, 1,
303 		    I2O_UTIL_CLAIM_PRIMARY_USER);
304 
305 	if (evreg) {
306 		/*
307 		 * Mask off events, and wait up to 5 seconds for a reply.
308 		 * Note that some adapters won't reply to this (XXX We
309 		 * should check the event capabilities).
310 		 */
311 		sc->sc_flags &= ~LD_IOP_NEW_EVTMASK;
312 		iop_util_eventreg(iop, &sc->sc_eventii,
313 		    I2O_EVENT_GEN_EVENT_MASK_MODIFIED);
314 		s = splbio();
315 		if ((sc->sc_flags & LD_IOP_NEW_EVTMASK) == 0)
316 			tsleep(&sc->sc_eventii, PRIBIO, "ld_iopevt", hz * 5);
317 		splx(s);
318 #ifdef I2ODEBUG
319 		if ((sc->sc_flags & LD_IOP_NEW_EVTMASK) == 0)
320 			printf("%s: didn't reply to event unregister",
321 			    sc->sc_ld.sc_dv.dv_xname);
322 #endif
323 	}
324 
325 	iop_initiator_unregister(iop, &sc->sc_eventii);
326 	iop_initiator_unregister(iop, &sc->sc_ii);
327 }
328 
329 static int
330 ld_iop_detach(struct device *self, int flags)
331 {
332 	struct ld_iop_softc *sc;
333 	struct iop_softc *iop;
334 	int rv;
335 
336 	sc = (struct ld_iop_softc *)self;
337 	iop = (struct iop_softc *)self->dv_parent;
338 
339 	if ((rv = ldbegindetach(&sc->sc_ld, flags)) != 0)
340 		return (rv);
341 
342 	/*
343 	 * Abort any requests queued with the IOP, but allow requests that
344 	 * are already in progress to complete.
345 	 */
346 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
347 		iop_util_abort(iop, &sc->sc_ii, 0, 0,
348 		    I2O_UTIL_ABORT_WILD | I2O_UTIL_ABORT_CLEAN);
349 
350 	ldenddetach(&sc->sc_ld);
351 
352 	/* Un-claim the target, and un-register our initiators. */
353 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
354 		ld_iop_unconfig(sc, 1);
355 
356 	return (0);
357 }
358 
359 static int
360 ld_iop_start(struct ld_softc *ld, struct buf *bp)
361 {
362 	struct iop_msg *im;
363 	struct iop_softc *iop;
364 	struct ld_iop_softc *sc;
365 	struct i2o_rbs_block_read *mf;
366 	u_int rv, flags, write;
367 	u_int64_t ba;
368 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
369 
370 	sc = (struct ld_iop_softc *)ld;
371 	iop = (struct iop_softc *)ld->sc_dv.dv_parent;
372 
373 	im = iop_msg_alloc(iop, 0);
374 	im->im_dvcontext = bp;
375 
376 	write = ((bp->b_flags & B_READ) == 0);
377 	ba = (u_int64_t)bp->b_rawblkno * ld->sc_secsize;
378 
379 	/*
380 	 * Write through the cache when performing synchronous writes.  When
381 	 * performing a read, we don't request that the DDM cache the data,
382 	 * as there's little advantage to it.
383 	 */
384 	if (write) {
385 		if ((bp->b_flags & B_ASYNC) == 0)
386 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WT;
387 		else
388 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WB;
389 	} else
390 		flags = 0;
391 
392 	/*
393 	 * Fill the message frame.  We can use the block_read structure for
394 	 * both reads and writes, as it's almost identical to the
395 	 * block_write structure.
396 	 */
397 	mf = (struct i2o_rbs_block_read *)mb;
398 	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_read);
399 	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid,
400 	    write ? I2O_RBS_BLOCK_WRITE : I2O_RBS_BLOCK_READ);
401 	mf->msgictx = sc->sc_ii.ii_ictx;
402 	mf->msgtctx = im->im_tctx;
403 	mf->flags = flags | (1 << 16);		/* flags & time multiplier */
404 	mf->datasize = bp->b_bcount;
405 	mf->lowoffset = (u_int32_t)ba;
406 	mf->highoffset = (u_int32_t)(ba >> 32);
407 
408 	/* Map the data transfer and enqueue the command. */
409 	rv = iop_msg_map_bio(iop, im, mb, bp->b_data, bp->b_bcount, write);
410 	if (rv == 0) {
411 		if ((rv = iop_post(iop, mb)) != 0) {
412 			iop_msg_unmap(iop, im);
413 			iop_msg_free(iop, im);
414 		}
415 	}
416 	return (rv);
417 }
418 
419 static int
420 ld_iop_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
421 {
422 	struct iop_msg *im;
423 	struct iop_softc *iop;
424 	struct ld_iop_softc *sc;
425 	struct i2o_rbs_block_write *mf;
426 	int rv, bcount;
427 	u_int64_t ba;
428 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
429 
430 	sc = (struct ld_iop_softc *)ld;
431 	iop = (struct iop_softc *)ld->sc_dv.dv_parent;
432 	bcount = blkcnt * ld->sc_secsize;
433 	ba = (u_int64_t)blkno * ld->sc_secsize;
434 	im = iop_msg_alloc(iop, IM_POLL);
435 
436 	mf = (struct i2o_rbs_block_write *)mb;
437 	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_write);
438 	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_BLOCK_WRITE);
439 	mf->msgictx = sc->sc_ii.ii_ictx;
440 	mf->msgtctx = im->im_tctx;
441 	mf->flags = I2O_RBS_BLOCK_WRITE_CACHE_WT | (1 << 16);
442 	mf->datasize = bcount;
443 	mf->lowoffset = (u_int32_t)ba;
444 	mf->highoffset = (u_int32_t)(ba >> 32);
445 
446 	if ((rv = iop_msg_map(iop, im, mb, data, bcount, 1, NULL)) != 0) {
447 		iop_msg_free(iop, im);
448 		return (rv);
449 	}
450 
451 	rv = iop_msg_post(iop, im, mb, LD_IOP_TIMEOUT * 2);
452 	iop_msg_unmap(iop, im);
453 	iop_msg_free(iop, im);
454  	return (rv);
455 }
456 
457 static int
458 ld_iop_flush(struct ld_softc *ld)
459 {
460 	struct iop_msg *im;
461 	struct iop_softc *iop;
462 	struct ld_iop_softc *sc;
463 	struct i2o_rbs_cache_flush mf;
464 	int rv;
465 
466 	sc = (struct ld_iop_softc *)ld;
467 	iop = (struct iop_softc *)ld->sc_dv.dv_parent;
468 	im = iop_msg_alloc(iop, IM_WAIT);
469 
470 	mf.msgflags = I2O_MSGFLAGS(i2o_rbs_cache_flush);
471 	mf.msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_CACHE_FLUSH);
472 	mf.msgictx = sc->sc_ii.ii_ictx;
473 	mf.msgtctx = im->im_tctx;
474 	mf.flags = 1 << 16;			/* time multiplier */
475 
476 	/* XXX Aincent disks will return an error here. */
477 	rv = iop_msg_post(iop, im, &mf, LD_IOP_TIMEOUT * 2);
478 	iop_msg_free(iop, im);
479 	return (rv);
480 }
481 
482 void
483 ld_iop_intr(struct device *dv, struct iop_msg *im, void *reply)
484 {
485 	struct i2o_rbs_reply *rb;
486 	struct buf *bp;
487 	struct ld_iop_softc *sc;
488 	struct iop_softc *iop;
489 	int err, detail;
490 #ifdef I2OVERBOSE
491 	const char *errstr;
492 #endif
493 
494 	rb = reply;
495 	bp = im->im_dvcontext;
496 	sc = (struct ld_iop_softc *)dv;
497 	iop = (struct iop_softc *)dv->dv_parent;
498 
499 	err = ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0);
500 
501 	if (!err && rb->reqstatus != I2O_STATUS_SUCCESS) {
502 		detail = le16toh(rb->detail);
503 #ifdef I2OVERBOSE
504 		if (detail > sizeof(ld_iop_errors) / sizeof(ld_iop_errors[0]))
505 			errstr = "<unknown>";
506 		else
507 			errstr = ld_iop_errors[detail];
508 		printf("%s: error 0x%04x: %s\n", dv->dv_xname, detail, errstr);
509 #else
510 		printf("%s: error 0x%04x\n", dv->dv_xname, detail);
511 #endif
512 		err = 1;
513 	}
514 
515 	if (err) {
516 		bp->b_flags |= B_ERROR;
517 		bp->b_error = EIO;
518 		bp->b_resid = bp->b_bcount;
519 	} else
520 		bp->b_resid = bp->b_bcount - le32toh(rb->transfercount);
521 
522 	iop_msg_unmap(iop, im);
523 	iop_msg_free(iop, im);
524 	lddone(&sc->sc_ld, bp);
525 }
526 
527 static void
528 ld_iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
529 {
530 	struct i2o_util_event_register_reply *rb;
531 	struct ld_iop_softc *sc;
532 	u_int event;
533 
534 	rb = reply;
535 
536 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
537 		return;
538 
539 	event = le32toh(rb->event);
540 	sc = (struct ld_iop_softc *)dv;
541 
542 	if (event == I2O_EVENT_GEN_EVENT_MASK_MODIFIED) {
543 		sc->sc_flags |= LD_IOP_NEW_EVTMASK;
544 		wakeup(&sc->sc_eventii);
545 #ifndef I2ODEBUG
546 		return;
547 #endif
548 	}
549 
550 	printf("%s: event 0x%08x received\n", dv->dv_xname, event);
551 }
552 
553 static void
554 ld_iop_adjqparam(struct device *dv, int mpi)
555 {
556 	struct iop_softc *iop;
557 
558 	/*
559 	 * AMI controllers seem to loose the plot if you hand off lots of
560 	 * queued commands.
561 	 */
562 	iop = (struct iop_softc *)dv->dv_parent;
563 	if (le16toh(I2O_ORG_AMI) == iop->sc_status.orgid && mpi > 64)
564 		mpi = 64;
565 
566 	ldadjqparam((struct ld_softc *)dv, mpi);
567 }
568