xref: /netbsd-src/sys/dev/i2o/ld_iop.c (revision 23c8222edbfb0f0932d88a8351d3a0cf817dfb9e)
1 /*	$NetBSD: ld_iop.c,v 1.15 2004/10/28 07:07:40 yamt Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * I2O front-end for ld(4) driver, supporting random block storage class
41  * devices.  Currently, this doesn't handle anything more complex than
42  * fixed direct-access devices.
43  */
44 
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: ld_iop.c,v 1.15 2004/10/28 07:07:40 yamt Exp $");
47 
48 #include "opt_i2o.h"
49 #include "rnd.h"
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/device.h>
55 #include <sys/buf.h>
56 #include <sys/bufq.h>
57 #include <sys/endian.h>
58 #include <sys/dkio.h>
59 #include <sys/disk.h>
60 #include <sys/proc.h>
61 #if NRND > 0
62 #include <sys/rnd.h>
63 #endif
64 
65 #include <machine/bus.h>
66 
67 #include <dev/ldvar.h>
68 
69 #include <dev/i2o/i2o.h>
70 #include <dev/i2o/iopio.h>
71 #include <dev/i2o/iopvar.h>
72 
73 #define	LD_IOP_TIMEOUT		30*1000
74 
75 #define	LD_IOP_CLAIMED		0x01
76 #define	LD_IOP_NEW_EVTMASK	0x02
77 
78 struct ld_iop_softc {
79 	struct	ld_softc sc_ld;
80 	struct	iop_initiator sc_ii;
81 	struct	iop_initiator sc_eventii;
82 	int	sc_flags;
83 };
84 
85 static void	ld_iop_adjqparam(struct device *, int);
86 static void	ld_iop_attach(struct device *, struct device *, void *);
87 static int	ld_iop_detach(struct device *, int);
88 static int	ld_iop_dump(struct ld_softc *, void *, int, int);
89 static int	ld_iop_flush(struct ld_softc *);
90 static void	ld_iop_intr(struct device *, struct iop_msg *, void *);
91 static void	ld_iop_intr_event(struct device *, struct iop_msg *, void *);
92 static int	ld_iop_match(struct device *, struct cfdata *, void *);
93 static int	ld_iop_start(struct ld_softc *, struct buf *);
94 static void	ld_iop_unconfig(struct ld_iop_softc *, int);
95 
96 CFATTACH_DECL(ld_iop, sizeof(struct ld_iop_softc),
97     ld_iop_match, ld_iop_attach, ld_iop_detach, NULL);
98 
99 #ifdef I2OVERBOSE
100 static const char * const ld_iop_errors[] = {
101 	"success",
102 	"media error",
103 	"access error",
104 	"device failure",
105 	"device not ready",
106 	"media not present",
107 	"media locked",
108 	"media failure",
109 	"protocol failure",
110 	"bus failure",
111 	"access violation",
112 	"media write protected",
113 	"device reset",
114 	"volume changed, waiting for acknowledgement",
115 	"timeout",
116 };
117 #endif
118 
119 static int
120 ld_iop_match(struct device *parent, struct cfdata *match, void *aux)
121 {
122 	struct iop_attach_args *ia;
123 
124 	ia = aux;
125 
126 	return (ia->ia_class == I2O_CLASS_RANDOM_BLOCK_STORAGE);
127 }
128 
129 static void
130 ld_iop_attach(struct device *parent, struct device *self, void *aux)
131 {
132 	struct iop_attach_args *ia;
133 	struct ld_softc *ld;
134 	struct ld_iop_softc *sc;
135 	struct iop_softc *iop;
136 	int rv, evreg, enable;
137 	char *typestr, *fixedstr;
138 	u_int cachesz;
139 	u_int32_t timeoutbase, rwvtimeoutbase, rwvtimeout;
140 	struct {
141 		struct	i2o_param_op_results pr;
142 		struct	i2o_param_read_results prr;
143 		union {
144 			struct	i2o_param_rbs_cache_control cc;
145 			struct	i2o_param_rbs_device_info bdi;
146 		} p;
147 	} __attribute__ ((__packed__)) param;
148 
149 	sc = (struct ld_iop_softc *)self;
150 	ld = &sc->sc_ld;
151 	iop = (struct iop_softc *)parent;
152 	ia = (struct iop_attach_args *)aux;
153 	evreg = 0;
154 
155 	/* Register us as an initiator. */
156 	sc->sc_ii.ii_dv = self;
157 	sc->sc_ii.ii_intr = ld_iop_intr;
158 	sc->sc_ii.ii_adjqparam = ld_iop_adjqparam;
159 	sc->sc_ii.ii_flags = 0;
160 	sc->sc_ii.ii_tid = ia->ia_tid;
161 	iop_initiator_register(iop, &sc->sc_ii);
162 
163 	/* Register another initiator to handle events from the device. */
164 	sc->sc_eventii.ii_dv = self;
165 	sc->sc_eventii.ii_intr = ld_iop_intr_event;
166 	sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
167 	sc->sc_eventii.ii_tid = ia->ia_tid;
168 	iop_initiator_register(iop, &sc->sc_eventii);
169 
170 	rv = iop_util_eventreg(iop, &sc->sc_eventii,
171 	    I2O_EVENT_GEN_EVENT_MASK_MODIFIED |
172 	    I2O_EVENT_GEN_DEVICE_RESET |
173 	    I2O_EVENT_GEN_STATE_CHANGE |
174 	    I2O_EVENT_GEN_GENERAL_WARNING);
175 	if (rv != 0) {
176 		printf("%s: unable to register for events", self->dv_xname);
177 		goto bad;
178 	}
179 	evreg = 1;
180 
181 	/*
182 	 * Start out with one queued command.  The `iop' driver will adjust
183 	 * the queue parameters once we're up and running.
184 	 */
185 	ld->sc_maxqueuecnt = 1;
186 
187 	ld->sc_maxxfer = IOP_MAX_XFER;
188 	ld->sc_dump = ld_iop_dump;
189 	ld->sc_flush = ld_iop_flush;
190 	ld->sc_start = ld_iop_start;
191 
192 	/* Say what the device is. */
193 	printf(":");
194 	iop_print_ident(iop, ia->ia_tid);
195 
196 	/*
197 	 * Claim the device so that we don't get any nasty surprises.  Allow
198 	 * failure.
199 	 */
200 	rv = iop_util_claim(iop, &sc->sc_ii, 0,
201 	    I2O_UTIL_CLAIM_CAPACITY_SENSITIVE |
202 	    I2O_UTIL_CLAIM_NO_PEER_SERVICE |
203 	    I2O_UTIL_CLAIM_NO_MANAGEMENT_SERVICE |
204 	    I2O_UTIL_CLAIM_PRIMARY_USER);
205 	sc->sc_flags = rv ? 0 : LD_IOP_CLAIMED;
206 
207 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_DEVICE_INFO,
208 	    &param, sizeof(param), NULL);
209 	if (rv != 0)
210 		goto bad;
211 
212 	ld->sc_secsize = le32toh(param.p.bdi.blocksize);
213 	ld->sc_secperunit = (int)
214 	    (le64toh(param.p.bdi.capacity) / ld->sc_secsize);
215 
216 	switch (param.p.bdi.type) {
217 	case I2O_RBS_TYPE_DIRECT:
218 		typestr = "direct access";
219 		enable = 1;
220 		break;
221 	case I2O_RBS_TYPE_WORM:
222 		typestr = "WORM";
223 		enable = 0;
224 		break;
225 	case I2O_RBS_TYPE_CDROM:
226 		typestr = "CD-ROM";
227 		enable = 0;
228 		break;
229 	case I2O_RBS_TYPE_OPTICAL:
230 		typestr = "optical";
231 		enable = 0;
232 		break;
233 	default:
234 		typestr = "unknown";
235 		enable = 0;
236 		break;
237 	}
238 
239 	if ((le32toh(param.p.bdi.capabilities) & I2O_RBS_CAP_REMOVABLE_MEDIA)
240 	    != 0) {
241 		/* ld->sc_flags = LDF_REMOVABLE; */
242 		fixedstr = "removable";
243 		enable = 0;
244 	} else
245 		fixedstr = "fixed";
246 
247 	printf(" %s, %s", typestr, fixedstr);
248 
249 	/*
250 	 * Determine if the device has an private cache.  If so, print the
251 	 * cache size.  Even if the device doesn't appear to have a cache,
252 	 * we perform a flush at shutdown.
253 	 */
254 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_CACHE_CONTROL,
255 	    &param, sizeof(param), NULL);
256 	if (rv != 0)
257 		goto bad;
258 
259 	if ((cachesz = le32toh(param.p.cc.totalcachesize)) != 0)
260 		printf(", %dkB cache", cachesz >> 10);
261 
262 	printf("\n");
263 
264 	/*
265 	 * Configure the DDM's timeout functions to time out all commands
266 	 * after 30 seconds.
267 	 */
268 	timeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
269 	rwvtimeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
270 	rwvtimeout = 0;
271 
272 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
273 	    &timeoutbase, sizeof(timeoutbase),
274 	    I2O_PARAM_RBS_OPERATION_timeoutbase);
275 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
276 	    &rwvtimeoutbase, sizeof(rwvtimeoutbase),
277 	    I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
278 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
279 	    &rwvtimeout, sizeof(rwvtimeout),
280 	    I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
281 
282 	if (enable)
283 		ld->sc_flags |= LDF_ENABLED;
284 	else
285 		printf("%s: device not yet supported\n", self->dv_xname);
286 
287 	ldattach(ld);
288 	return;
289 
290  bad:
291 	ld_iop_unconfig(sc, evreg);
292 }
293 
294 static void
295 ld_iop_unconfig(struct ld_iop_softc *sc, int evreg)
296 {
297 	struct iop_softc *iop;
298 	int s;
299 
300 	iop = (struct iop_softc *)sc->sc_ld.sc_dv.dv_parent;
301 
302 	if ((sc->sc_flags & LD_IOP_CLAIMED) != 0)
303 		iop_util_claim(iop, &sc->sc_ii, 1,
304 		    I2O_UTIL_CLAIM_PRIMARY_USER);
305 
306 	if (evreg) {
307 		/*
308 		 * Mask off events, and wait up to 5 seconds for a reply.
309 		 * Note that some adapters won't reply to this (XXX We
310 		 * should check the event capabilities).
311 		 */
312 		sc->sc_flags &= ~LD_IOP_NEW_EVTMASK;
313 		iop_util_eventreg(iop, &sc->sc_eventii,
314 		    I2O_EVENT_GEN_EVENT_MASK_MODIFIED);
315 		s = splbio();
316 		if ((sc->sc_flags & LD_IOP_NEW_EVTMASK) == 0)
317 			tsleep(&sc->sc_eventii, PRIBIO, "ld_iopevt", hz * 5);
318 		splx(s);
319 #ifdef I2ODEBUG
320 		if ((sc->sc_flags & LD_IOP_NEW_EVTMASK) == 0)
321 			printf("%s: didn't reply to event unregister",
322 			    sc->sc_ld.sc_dv.dv_xname);
323 #endif
324 	}
325 
326 	iop_initiator_unregister(iop, &sc->sc_eventii);
327 	iop_initiator_unregister(iop, &sc->sc_ii);
328 }
329 
330 static int
331 ld_iop_detach(struct device *self, int flags)
332 {
333 	struct ld_iop_softc *sc;
334 	struct iop_softc *iop;
335 	int rv;
336 
337 	sc = (struct ld_iop_softc *)self;
338 	iop = (struct iop_softc *)self->dv_parent;
339 
340 	if ((rv = ldbegindetach(&sc->sc_ld, flags)) != 0)
341 		return (rv);
342 
343 	/*
344 	 * Abort any requests queued with the IOP, but allow requests that
345 	 * are already in progress to complete.
346 	 */
347 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
348 		iop_util_abort(iop, &sc->sc_ii, 0, 0,
349 		    I2O_UTIL_ABORT_WILD | I2O_UTIL_ABORT_CLEAN);
350 
351 	ldenddetach(&sc->sc_ld);
352 
353 	/* Un-claim the target, and un-register our initiators. */
354 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
355 		ld_iop_unconfig(sc, 1);
356 
357 	return (0);
358 }
359 
360 static int
361 ld_iop_start(struct ld_softc *ld, struct buf *bp)
362 {
363 	struct iop_msg *im;
364 	struct iop_softc *iop;
365 	struct ld_iop_softc *sc;
366 	struct i2o_rbs_block_read *mf;
367 	u_int rv, flags, write;
368 	u_int64_t ba;
369 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
370 
371 	sc = (struct ld_iop_softc *)ld;
372 	iop = (struct iop_softc *)ld->sc_dv.dv_parent;
373 
374 	im = iop_msg_alloc(iop, 0);
375 	im->im_dvcontext = bp;
376 
377 	write = ((bp->b_flags & B_READ) == 0);
378 	ba = (u_int64_t)bp->b_rawblkno * ld->sc_secsize;
379 
380 	/*
381 	 * Write through the cache when performing synchronous writes.  When
382 	 * performing a read, we don't request that the DDM cache the data,
383 	 * as there's little advantage to it.
384 	 */
385 	if (write) {
386 		if ((bp->b_flags & B_ASYNC) == 0)
387 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WT;
388 		else
389 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WB;
390 	} else
391 		flags = 0;
392 
393 	/*
394 	 * Fill the message frame.  We can use the block_read structure for
395 	 * both reads and writes, as it's almost identical to the
396 	 * block_write structure.
397 	 */
398 	mf = (struct i2o_rbs_block_read *)mb;
399 	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_read);
400 	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid,
401 	    write ? I2O_RBS_BLOCK_WRITE : I2O_RBS_BLOCK_READ);
402 	mf->msgictx = sc->sc_ii.ii_ictx;
403 	mf->msgtctx = im->im_tctx;
404 	mf->flags = flags | (1 << 16);		/* flags & time multiplier */
405 	mf->datasize = bp->b_bcount;
406 	mf->lowoffset = (u_int32_t)ba;
407 	mf->highoffset = (u_int32_t)(ba >> 32);
408 
409 	/* Map the data transfer and enqueue the command. */
410 	rv = iop_msg_map_bio(iop, im, mb, bp->b_data, bp->b_bcount, write);
411 	if (rv == 0) {
412 		if ((rv = iop_post(iop, mb)) != 0) {
413 			iop_msg_unmap(iop, im);
414 			iop_msg_free(iop, im);
415 		}
416 	}
417 	return (rv);
418 }
419 
420 static int
421 ld_iop_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
422 {
423 	struct iop_msg *im;
424 	struct iop_softc *iop;
425 	struct ld_iop_softc *sc;
426 	struct i2o_rbs_block_write *mf;
427 	int rv, bcount;
428 	u_int64_t ba;
429 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
430 
431 	sc = (struct ld_iop_softc *)ld;
432 	iop = (struct iop_softc *)ld->sc_dv.dv_parent;
433 	bcount = blkcnt * ld->sc_secsize;
434 	ba = (u_int64_t)blkno * ld->sc_secsize;
435 	im = iop_msg_alloc(iop, IM_POLL);
436 
437 	mf = (struct i2o_rbs_block_write *)mb;
438 	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_write);
439 	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_BLOCK_WRITE);
440 	mf->msgictx = sc->sc_ii.ii_ictx;
441 	mf->msgtctx = im->im_tctx;
442 	mf->flags = I2O_RBS_BLOCK_WRITE_CACHE_WT | (1 << 16);
443 	mf->datasize = bcount;
444 	mf->lowoffset = (u_int32_t)ba;
445 	mf->highoffset = (u_int32_t)(ba >> 32);
446 
447 	if ((rv = iop_msg_map(iop, im, mb, data, bcount, 1, NULL)) != 0) {
448 		iop_msg_free(iop, im);
449 		return (rv);
450 	}
451 
452 	rv = iop_msg_post(iop, im, mb, LD_IOP_TIMEOUT * 2);
453 	iop_msg_unmap(iop, im);
454 	iop_msg_free(iop, im);
455  	return (rv);
456 }
457 
458 static int
459 ld_iop_flush(struct ld_softc *ld)
460 {
461 	struct iop_msg *im;
462 	struct iop_softc *iop;
463 	struct ld_iop_softc *sc;
464 	struct i2o_rbs_cache_flush mf;
465 	int rv;
466 
467 	sc = (struct ld_iop_softc *)ld;
468 	iop = (struct iop_softc *)ld->sc_dv.dv_parent;
469 	im = iop_msg_alloc(iop, IM_WAIT);
470 
471 	mf.msgflags = I2O_MSGFLAGS(i2o_rbs_cache_flush);
472 	mf.msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_CACHE_FLUSH);
473 	mf.msgictx = sc->sc_ii.ii_ictx;
474 	mf.msgtctx = im->im_tctx;
475 	mf.flags = 1 << 16;			/* time multiplier */
476 
477 	/* XXX Aincent disks will return an error here. */
478 	rv = iop_msg_post(iop, im, &mf, LD_IOP_TIMEOUT * 2);
479 	iop_msg_free(iop, im);
480 	return (rv);
481 }
482 
483 void
484 ld_iop_intr(struct device *dv, struct iop_msg *im, void *reply)
485 {
486 	struct i2o_rbs_reply *rb;
487 	struct buf *bp;
488 	struct ld_iop_softc *sc;
489 	struct iop_softc *iop;
490 	int err, detail;
491 #ifdef I2OVERBOSE
492 	const char *errstr;
493 #endif
494 
495 	rb = reply;
496 	bp = im->im_dvcontext;
497 	sc = (struct ld_iop_softc *)dv;
498 	iop = (struct iop_softc *)dv->dv_parent;
499 
500 	err = ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0);
501 
502 	if (!err && rb->reqstatus != I2O_STATUS_SUCCESS) {
503 		detail = le16toh(rb->detail);
504 #ifdef I2OVERBOSE
505 		if (detail > sizeof(ld_iop_errors) / sizeof(ld_iop_errors[0]))
506 			errstr = "<unknown>";
507 		else
508 			errstr = ld_iop_errors[detail];
509 		printf("%s: error 0x%04x: %s\n", dv->dv_xname, detail, errstr);
510 #else
511 		printf("%s: error 0x%04x\n", dv->dv_xname, detail);
512 #endif
513 		err = 1;
514 	}
515 
516 	if (err) {
517 		bp->b_flags |= B_ERROR;
518 		bp->b_error = EIO;
519 		bp->b_resid = bp->b_bcount;
520 	} else
521 		bp->b_resid = bp->b_bcount - le32toh(rb->transfercount);
522 
523 	iop_msg_unmap(iop, im);
524 	iop_msg_free(iop, im);
525 	lddone(&sc->sc_ld, bp);
526 }
527 
528 static void
529 ld_iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
530 {
531 	struct i2o_util_event_register_reply *rb;
532 	struct ld_iop_softc *sc;
533 	u_int event;
534 
535 	rb = reply;
536 
537 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
538 		return;
539 
540 	event = le32toh(rb->event);
541 	sc = (struct ld_iop_softc *)dv;
542 
543 	if (event == I2O_EVENT_GEN_EVENT_MASK_MODIFIED) {
544 		sc->sc_flags |= LD_IOP_NEW_EVTMASK;
545 		wakeup(&sc->sc_eventii);
546 #ifndef I2ODEBUG
547 		return;
548 #endif
549 	}
550 
551 	printf("%s: event 0x%08x received\n", dv->dv_xname, event);
552 }
553 
554 static void
555 ld_iop_adjqparam(struct device *dv, int mpi)
556 {
557 	struct iop_softc *iop;
558 
559 	/*
560 	 * AMI controllers seem to loose the plot if you hand off lots of
561 	 * queued commands.
562 	 */
563 	iop = (struct iop_softc *)dv->dv_parent;
564 	if (le16toh(I2O_ORG_AMI) == iop->sc_status.orgid && mpi > 64)
565 		mpi = 64;
566 
567 	ldadjqparam((struct ld_softc *)dv, mpi);
568 }
569