xref: /netbsd-src/sys/dev/i2o/ld_iop.c (revision ce2c90c7c172d95d2402a5b3d96d8f8e6d138a21)
1 /*	$NetBSD: ld_iop.c,v 1.22 2006/10/12 01:30:58 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * I2O front-end for ld(4) driver, supporting random block storage class
41  * devices.  Currently, this doesn't handle anything more complex than
42  * fixed direct-access devices.
43  */
44 
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: ld_iop.c,v 1.22 2006/10/12 01:30:58 christos Exp $");
47 
48 #include "opt_i2o.h"
49 #include "rnd.h"
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/device.h>
55 #include <sys/buf.h>
56 #include <sys/bufq.h>
57 #include <sys/endian.h>
58 #include <sys/dkio.h>
59 #include <sys/disk.h>
60 #include <sys/proc.h>
61 #if NRND > 0
62 #include <sys/rnd.h>
63 #endif
64 
65 #include <machine/bus.h>
66 
67 #include <dev/ldvar.h>
68 
69 #include <dev/i2o/i2o.h>
70 #include <dev/i2o/iopio.h>
71 #include <dev/i2o/iopvar.h>
72 
73 #define	LD_IOP_TIMEOUT		30*1000
74 
75 #define	LD_IOP_CLAIMED		0x01
76 #define	LD_IOP_NEW_EVTMASK	0x02
77 
78 struct ld_iop_softc {
79 	struct	ld_softc sc_ld;
80 	struct	iop_initiator sc_ii;
81 	struct	iop_initiator sc_eventii;
82 	int	sc_flags;
83 };
84 
85 static void	ld_iop_adjqparam(struct device *, int);
86 static void	ld_iop_attach(struct device *, struct device *, void *);
87 static int	ld_iop_detach(struct device *, int);
88 static int	ld_iop_dump(struct ld_softc *, void *, int, int);
89 static int	ld_iop_flush(struct ld_softc *);
90 static void	ld_iop_intr(struct device *, struct iop_msg *, void *);
91 static void	ld_iop_intr_event(struct device *, struct iop_msg *, void *);
92 static int	ld_iop_match(struct device *, struct cfdata *, void *);
93 static int	ld_iop_start(struct ld_softc *, struct buf *);
94 static void	ld_iop_unconfig(struct ld_iop_softc *, int);
95 
96 CFATTACH_DECL(ld_iop, sizeof(struct ld_iop_softc),
97     ld_iop_match, ld_iop_attach, ld_iop_detach, NULL);
98 
99 #ifdef I2OVERBOSE
100 static const char * const ld_iop_errors[] = {
101 	"success",
102 	"media error",
103 	"access error",
104 	"device failure",
105 	"device not ready",
106 	"media not present",
107 	"media locked",
108 	"media failure",
109 	"protocol failure",
110 	"bus failure",
111 	"access violation",
112 	"media write protected",
113 	"device reset",
114 	"volume changed, waiting for acknowledgement",
115 	"timeout",
116 };
117 #endif
118 
119 static int
120 ld_iop_match(struct device *parent __unused, struct cfdata *match __unused,
121     void *aux)
122 {
123 	struct iop_attach_args *ia;
124 
125 	ia = aux;
126 
127 	return (ia->ia_class == I2O_CLASS_RANDOM_BLOCK_STORAGE);
128 }
129 
130 static void
131 ld_iop_attach(struct device *parent, struct device *self, void *aux)
132 {
133 	struct iop_attach_args *ia;
134 	struct ld_softc *ld;
135 	struct ld_iop_softc *sc;
136 	struct iop_softc *iop;
137 	int rv, evreg, enable;
138 	const char *typestr, *fixedstr;
139 	u_int cachesz;
140 	u_int32_t timeoutbase, rwvtimeoutbase, rwvtimeout;
141 	struct {
142 		struct	i2o_param_op_results pr;
143 		struct	i2o_param_read_results prr;
144 		union {
145 			struct	i2o_param_rbs_cache_control cc;
146 			struct	i2o_param_rbs_device_info bdi;
147 		} p;
148 	} __attribute__ ((__packed__)) param;
149 
150 	sc = device_private(self);
151 	ld = &sc->sc_ld;
152 	iop = device_private(parent);
153 	ia = (struct iop_attach_args *)aux;
154 	evreg = 0;
155 
156 	/* Register us as an initiator. */
157 	sc->sc_ii.ii_dv = self;
158 	sc->sc_ii.ii_intr = ld_iop_intr;
159 	sc->sc_ii.ii_adjqparam = ld_iop_adjqparam;
160 	sc->sc_ii.ii_flags = 0;
161 	sc->sc_ii.ii_tid = ia->ia_tid;
162 	iop_initiator_register(iop, &sc->sc_ii);
163 
164 	/* Register another initiator to handle events from the device. */
165 	sc->sc_eventii.ii_dv = self;
166 	sc->sc_eventii.ii_intr = ld_iop_intr_event;
167 	sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
168 	sc->sc_eventii.ii_tid = ia->ia_tid;
169 	iop_initiator_register(iop, &sc->sc_eventii);
170 
171 	rv = iop_util_eventreg(iop, &sc->sc_eventii,
172 	    I2O_EVENT_GEN_EVENT_MASK_MODIFIED |
173 	    I2O_EVENT_GEN_DEVICE_RESET |
174 	    I2O_EVENT_GEN_STATE_CHANGE |
175 	    I2O_EVENT_GEN_GENERAL_WARNING);
176 	if (rv != 0) {
177 		printf("%s: unable to register for events", self->dv_xname);
178 		goto bad;
179 	}
180 	evreg = 1;
181 
182 	/*
183 	 * Start out with one queued command.  The `iop' driver will adjust
184 	 * the queue parameters once we're up and running.
185 	 */
186 	ld->sc_maxqueuecnt = 1;
187 
188 	ld->sc_maxxfer = IOP_MAX_XFER;
189 	ld->sc_dump = ld_iop_dump;
190 	ld->sc_flush = ld_iop_flush;
191 	ld->sc_start = ld_iop_start;
192 
193 	/* Say what the device is. */
194 	printf(":");
195 	iop_print_ident(iop, ia->ia_tid);
196 
197 	/*
198 	 * Claim the device so that we don't get any nasty surprises.  Allow
199 	 * failure.
200 	 */
201 	rv = iop_util_claim(iop, &sc->sc_ii, 0,
202 	    I2O_UTIL_CLAIM_CAPACITY_SENSITIVE |
203 	    I2O_UTIL_CLAIM_NO_PEER_SERVICE |
204 	    I2O_UTIL_CLAIM_NO_MANAGEMENT_SERVICE |
205 	    I2O_UTIL_CLAIM_PRIMARY_USER);
206 	sc->sc_flags = rv ? 0 : LD_IOP_CLAIMED;
207 
208 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_DEVICE_INFO,
209 	    &param, sizeof(param), NULL);
210 	if (rv != 0)
211 		goto bad;
212 
213 	ld->sc_secsize = le32toh(param.p.bdi.blocksize);
214 	ld->sc_secperunit = (int)
215 	    (le64toh(param.p.bdi.capacity) / ld->sc_secsize);
216 
217 	switch (param.p.bdi.type) {
218 	case I2O_RBS_TYPE_DIRECT:
219 		typestr = "direct access";
220 		enable = 1;
221 		break;
222 	case I2O_RBS_TYPE_WORM:
223 		typestr = "WORM";
224 		enable = 0;
225 		break;
226 	case I2O_RBS_TYPE_CDROM:
227 		typestr = "CD-ROM";
228 		enable = 0;
229 		break;
230 	case I2O_RBS_TYPE_OPTICAL:
231 		typestr = "optical";
232 		enable = 0;
233 		break;
234 	default:
235 		typestr = "unknown";
236 		enable = 0;
237 		break;
238 	}
239 
240 	if ((le32toh(param.p.bdi.capabilities) & I2O_RBS_CAP_REMOVABLE_MEDIA)
241 	    != 0) {
242 		/* ld->sc_flags = LDF_REMOVABLE; */
243 		fixedstr = "removable";
244 		enable = 0;
245 	} else
246 		fixedstr = "fixed";
247 
248 	printf(" %s, %s", typestr, fixedstr);
249 
250 	/*
251 	 * Determine if the device has an private cache.  If so, print the
252 	 * cache size.  Even if the device doesn't appear to have a cache,
253 	 * we perform a flush at shutdown.
254 	 */
255 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_CACHE_CONTROL,
256 	    &param, sizeof(param), NULL);
257 	if (rv != 0)
258 		goto bad;
259 
260 	if ((cachesz = le32toh(param.p.cc.totalcachesize)) != 0)
261 		printf(", %dkB cache", cachesz >> 10);
262 
263 	printf("\n");
264 
265 	/*
266 	 * Configure the DDM's timeout functions to time out all commands
267 	 * after 30 seconds.
268 	 */
269 	timeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
270 	rwvtimeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
271 	rwvtimeout = 0;
272 
273 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
274 	    &timeoutbase, sizeof(timeoutbase),
275 	    I2O_PARAM_RBS_OPERATION_timeoutbase);
276 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
277 	    &rwvtimeoutbase, sizeof(rwvtimeoutbase),
278 	    I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
279 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
280 	    &rwvtimeout, sizeof(rwvtimeout),
281 	    I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
282 
283 	if (enable)
284 		ld->sc_flags |= LDF_ENABLED;
285 	else
286 		printf("%s: device not yet supported\n", self->dv_xname);
287 
288 	ldattach(ld);
289 	return;
290 
291  bad:
292 	ld_iop_unconfig(sc, evreg);
293 }
294 
295 static void
296 ld_iop_unconfig(struct ld_iop_softc *sc, int evreg)
297 {
298 	struct iop_softc *iop;
299 	int s;
300 
301 	iop = (struct iop_softc *)device_parent(&sc->sc_ld.sc_dv);
302 
303 	if ((sc->sc_flags & LD_IOP_CLAIMED) != 0)
304 		iop_util_claim(iop, &sc->sc_ii, 1,
305 		    I2O_UTIL_CLAIM_PRIMARY_USER);
306 
307 	if (evreg) {
308 		/*
309 		 * Mask off events, and wait up to 5 seconds for a reply.
310 		 * Note that some adapters won't reply to this (XXX We
311 		 * should check the event capabilities).
312 		 */
313 		sc->sc_flags &= ~LD_IOP_NEW_EVTMASK;
314 		iop_util_eventreg(iop, &sc->sc_eventii,
315 		    I2O_EVENT_GEN_EVENT_MASK_MODIFIED);
316 		s = splbio();
317 		if ((sc->sc_flags & LD_IOP_NEW_EVTMASK) == 0)
318 			tsleep(&sc->sc_eventii, PRIBIO, "ld_iopevt", hz * 5);
319 		splx(s);
320 #ifdef I2ODEBUG
321 		if ((sc->sc_flags & LD_IOP_NEW_EVTMASK) == 0)
322 			printf("%s: didn't reply to event unregister",
323 			    sc->sc_ld.sc_dv.dv_xname);
324 #endif
325 	}
326 
327 	iop_initiator_unregister(iop, &sc->sc_eventii);
328 	iop_initiator_unregister(iop, &sc->sc_ii);
329 }
330 
331 static int
332 ld_iop_detach(struct device *self, int flags)
333 {
334 	struct ld_iop_softc *sc;
335 	struct iop_softc *iop;
336 	int rv;
337 
338 	sc = device_private(self);
339 	iop = device_private(device_parent(self));
340 
341 	if ((rv = ldbegindetach(&sc->sc_ld, flags)) != 0)
342 		return (rv);
343 
344 	/*
345 	 * Abort any requests queued with the IOP, but allow requests that
346 	 * are already in progress to complete.
347 	 */
348 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
349 		iop_util_abort(iop, &sc->sc_ii, 0, 0,
350 		    I2O_UTIL_ABORT_WILD | I2O_UTIL_ABORT_CLEAN);
351 
352 	ldenddetach(&sc->sc_ld);
353 
354 	/* Un-claim the target, and un-register our initiators. */
355 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
356 		ld_iop_unconfig(sc, 1);
357 
358 	return (0);
359 }
360 
361 static int
362 ld_iop_start(struct ld_softc *ld, struct buf *bp)
363 {
364 	struct iop_msg *im;
365 	struct iop_softc *iop;
366 	struct ld_iop_softc *sc;
367 	struct i2o_rbs_block_read *mf;
368 	u_int rv, flags, write;
369 	u_int64_t ba;
370 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
371 
372 	sc = (struct ld_iop_softc *)ld;
373 	iop = (struct iop_softc *)device_parent(&ld->sc_dv);
374 
375 	im = iop_msg_alloc(iop, 0);
376 	im->im_dvcontext = bp;
377 
378 	write = ((bp->b_flags & B_READ) == 0);
379 	ba = (u_int64_t)bp->b_rawblkno * ld->sc_secsize;
380 
381 	/*
382 	 * Write through the cache when performing synchronous writes.  When
383 	 * performing a read, we don't request that the DDM cache the data,
384 	 * as there's little advantage to it.
385 	 */
386 	if (write) {
387 		if ((bp->b_flags & B_ASYNC) == 0)
388 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WT;
389 		else
390 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WB;
391 	} else
392 		flags = 0;
393 
394 	/*
395 	 * Fill the message frame.  We can use the block_read structure for
396 	 * both reads and writes, as it's almost identical to the
397 	 * block_write structure.
398 	 */
399 	mf = (struct i2o_rbs_block_read *)mb;
400 	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_read);
401 	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid,
402 	    write ? I2O_RBS_BLOCK_WRITE : I2O_RBS_BLOCK_READ);
403 	mf->msgictx = sc->sc_ii.ii_ictx;
404 	mf->msgtctx = im->im_tctx;
405 	mf->flags = flags | (1 << 16);		/* flags & time multiplier */
406 	mf->datasize = bp->b_bcount;
407 	mf->lowoffset = (u_int32_t)ba;
408 	mf->highoffset = (u_int32_t)(ba >> 32);
409 
410 	/* Map the data transfer and enqueue the command. */
411 	rv = iop_msg_map_bio(iop, im, mb, bp->b_data, bp->b_bcount, write);
412 	if (rv == 0) {
413 		if ((rv = iop_post(iop, mb)) != 0) {
414 			iop_msg_unmap(iop, im);
415 			iop_msg_free(iop, im);
416 		}
417 	}
418 	return (rv);
419 }
420 
421 static int
422 ld_iop_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
423 {
424 	struct iop_msg *im;
425 	struct iop_softc *iop;
426 	struct ld_iop_softc *sc;
427 	struct i2o_rbs_block_write *mf;
428 	int rv, bcount;
429 	u_int64_t ba;
430 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
431 
432 	sc = (struct ld_iop_softc *)ld;
433 	iop = (struct iop_softc *)device_parent(&ld->sc_dv);
434 	bcount = blkcnt * ld->sc_secsize;
435 	ba = (u_int64_t)blkno * ld->sc_secsize;
436 	im = iop_msg_alloc(iop, IM_POLL);
437 
438 	mf = (struct i2o_rbs_block_write *)mb;
439 	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_write);
440 	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_BLOCK_WRITE);
441 	mf->msgictx = sc->sc_ii.ii_ictx;
442 	mf->msgtctx = im->im_tctx;
443 	mf->flags = I2O_RBS_BLOCK_WRITE_CACHE_WT | (1 << 16);
444 	mf->datasize = bcount;
445 	mf->lowoffset = (u_int32_t)ba;
446 	mf->highoffset = (u_int32_t)(ba >> 32);
447 
448 	if ((rv = iop_msg_map(iop, im, mb, data, bcount, 1, NULL)) != 0) {
449 		iop_msg_free(iop, im);
450 		return (rv);
451 	}
452 
453 	rv = iop_msg_post(iop, im, mb, LD_IOP_TIMEOUT * 2);
454 	iop_msg_unmap(iop, im);
455 	iop_msg_free(iop, im);
456  	return (rv);
457 }
458 
459 static int
460 ld_iop_flush(struct ld_softc *ld)
461 {
462 	struct iop_msg *im;
463 	struct iop_softc *iop;
464 	struct ld_iop_softc *sc;
465 	struct i2o_rbs_cache_flush mf;
466 	int rv;
467 
468 	sc = (struct ld_iop_softc *)ld;
469 	iop = (struct iop_softc *)device_parent(&ld->sc_dv);
470 	im = iop_msg_alloc(iop, IM_WAIT);
471 
472 	mf.msgflags = I2O_MSGFLAGS(i2o_rbs_cache_flush);
473 	mf.msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_CACHE_FLUSH);
474 	mf.msgictx = sc->sc_ii.ii_ictx;
475 	mf.msgtctx = im->im_tctx;
476 	mf.flags = 1 << 16;			/* time multiplier */
477 
478 	/* XXX Aincent disks will return an error here. */
479 	rv = iop_msg_post(iop, im, &mf, LD_IOP_TIMEOUT * 2);
480 	iop_msg_free(iop, im);
481 	return (rv);
482 }
483 
484 void
485 ld_iop_intr(struct device *dv, struct iop_msg *im, void *reply)
486 {
487 	struct i2o_rbs_reply *rb;
488 	struct buf *bp;
489 	struct ld_iop_softc *sc;
490 	struct iop_softc *iop;
491 	int err, detail;
492 #ifdef I2OVERBOSE
493 	const char *errstr;
494 #endif
495 
496 	rb = reply;
497 	bp = im->im_dvcontext;
498 	sc = (struct ld_iop_softc *)dv;
499 	iop = (struct iop_softc *)device_parent(dv);
500 
501 	err = ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0);
502 
503 	if (!err && rb->reqstatus != I2O_STATUS_SUCCESS) {
504 		detail = le16toh(rb->detail);
505 #ifdef I2OVERBOSE
506 		if (detail >= __arraycount(ld_iop_errors))
507 			errstr = "<unknown>";
508 		else
509 			errstr = ld_iop_errors[detail];
510 		printf("%s: error 0x%04x: %s\n", dv->dv_xname, detail, errstr);
511 #else
512 		printf("%s: error 0x%04x\n", dv->dv_xname, detail);
513 #endif
514 		err = 1;
515 	}
516 
517 	if (err) {
518 		bp->b_flags |= B_ERROR;
519 		bp->b_error = EIO;
520 		bp->b_resid = bp->b_bcount;
521 	} else
522 		bp->b_resid = bp->b_bcount - le32toh(rb->transfercount);
523 
524 	iop_msg_unmap(iop, im);
525 	iop_msg_free(iop, im);
526 	lddone(&sc->sc_ld, bp);
527 }
528 
529 static void
530 ld_iop_intr_event(struct device *dv, struct iop_msg *im __unused, void *reply)
531 {
532 	struct i2o_util_event_register_reply *rb;
533 	struct ld_iop_softc *sc;
534 	u_int event;
535 
536 	rb = reply;
537 
538 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
539 		return;
540 
541 	event = le32toh(rb->event);
542 	sc = (struct ld_iop_softc *)dv;
543 
544 	if (event == I2O_EVENT_GEN_EVENT_MASK_MODIFIED) {
545 		sc->sc_flags |= LD_IOP_NEW_EVTMASK;
546 		wakeup(&sc->sc_eventii);
547 #ifndef I2ODEBUG
548 		return;
549 #endif
550 	}
551 
552 	printf("%s: event 0x%08x received\n", dv->dv_xname, event);
553 }
554 
555 static void
556 ld_iop_adjqparam(struct device *dv, int mpi)
557 {
558 	struct iop_softc *iop;
559 
560 	/*
561 	 * AMI controllers seem to loose the plot if you hand off lots of
562 	 * queued commands.
563 	 */
564 	iop = (struct iop_softc *)device_parent(dv);
565 	if (le16toh(I2O_ORG_AMI) == iop->sc_status.orgid && mpi > 64)
566 		mpi = 64;
567 
568 	ldadjqparam((struct ld_softc *)dv, mpi);
569 }
570