xref: /netbsd-src/sys/dev/i2o/ld_iop.c (revision d909946ca08dceb44d7d0f22ec9488679695d976)
1 /*	$NetBSD: ld_iop.c,v 1.35 2015/04/13 16:33:24 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * I2O front-end for ld(4) driver, supporting random block storage class
34  * devices.  Currently, this doesn't handle anything more complex than
35  * fixed direct-access devices.
36  */
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: ld_iop.c,v 1.35 2015/04/13 16:33:24 riastradh Exp $");
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/device.h>
45 #include <sys/buf.h>
46 #include <sys/bufq.h>
47 #include <sys/endian.h>
48 #include <sys/dkio.h>
49 #include <sys/disk.h>
50 #include <sys/proc.h>
51 
52 #include <sys/bus.h>
53 
54 #include <dev/ldvar.h>
55 
56 #include <dev/i2o/i2o.h>
57 #include <dev/i2o/iopio.h>
58 #include <dev/i2o/iopvar.h>
59 
60 #define	LD_IOP_TIMEOUT		30*1000
61 
62 #define	LD_IOP_CLAIMED		0x01
63 #define	LD_IOP_NEW_EVTMASK	0x02
64 
65 struct ld_iop_softc {
66 	struct	ld_softc sc_ld;
67 	struct	iop_initiator sc_ii;
68 	struct	iop_initiator sc_eventii;
69 	int	sc_flags;
70 };
71 
72 static void	ld_iop_adjqparam(device_t, int);
73 static void	ld_iop_attach(device_t, device_t, void *);
74 static int	ld_iop_detach(device_t, int);
75 static int	ld_iop_dump(struct ld_softc *, void *, int, int);
76 static int	ld_iop_flush(struct ld_softc *, int);
77 static void	ld_iop_intr(device_t, struct iop_msg *, void *);
78 static void	ld_iop_intr_event(device_t, struct iop_msg *, void *);
79 static int	ld_iop_match(device_t, cfdata_t, void *);
80 static int	ld_iop_start(struct ld_softc *, struct buf *);
81 static void	ld_iop_unconfig(struct ld_iop_softc *, int);
82 
83 CFATTACH_DECL_NEW(ld_iop, sizeof(struct ld_iop_softc),
84     ld_iop_match, ld_iop_attach, ld_iop_detach, NULL);
85 
86 static const char * const ld_iop_errors[] = {
87 	"success",
88 	"media error",
89 	"access error",
90 	"device failure",
91 	"device not ready",
92 	"media not present",
93 	"media locked",
94 	"media failure",
95 	"protocol failure",
96 	"bus failure",
97 	"access violation",
98 	"media write protected",
99 	"device reset",
100 	"volume changed, waiting for acknowledgement",
101 	"timeout",
102 };
103 
104 static int
105 ld_iop_match(device_t parent, cfdata_t match, void *aux)
106 {
107 	struct iop_attach_args *ia;
108 
109 	ia = aux;
110 
111 	return (ia->ia_class == I2O_CLASS_RANDOM_BLOCK_STORAGE);
112 }
113 
114 static void
115 ld_iop_attach(device_t parent, device_t self, void *aux)
116 {
117 	struct iop_attach_args *ia = aux;
118 	struct ld_iop_softc *sc = device_private(self);
119 	struct iop_softc *iop = device_private(parent);
120 	struct ld_softc *ld = &sc->sc_ld;
121 	int rv, evreg, enable;
122 	const char *typestr, *fixedstr;
123 	u_int cachesz;
124 	u_int32_t timeoutbase, rwvtimeoutbase, rwvtimeout;
125 	struct {
126 		struct	i2o_param_op_results pr;
127 		struct	i2o_param_read_results prr;
128 		union {
129 			struct	i2o_param_rbs_cache_control cc;
130 			struct	i2o_param_rbs_device_info bdi;
131 		} p;
132 	} __packed param;
133 
134 	ld->sc_dv = self;
135 	evreg = 0;
136 
137 	/* Register us as an initiator. */
138 	sc->sc_ii.ii_dv = self;
139 	sc->sc_ii.ii_intr = ld_iop_intr;
140 	sc->sc_ii.ii_adjqparam = ld_iop_adjqparam;
141 	sc->sc_ii.ii_flags = 0;
142 	sc->sc_ii.ii_tid = ia->ia_tid;
143 	iop_initiator_register(iop, &sc->sc_ii);
144 
145 	/* Register another initiator to handle events from the device. */
146 	sc->sc_eventii.ii_dv = self;
147 	sc->sc_eventii.ii_intr = ld_iop_intr_event;
148 	sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
149 	sc->sc_eventii.ii_tid = ia->ia_tid;
150 	iop_initiator_register(iop, &sc->sc_eventii);
151 
152 	rv = iop_util_eventreg(iop, &sc->sc_eventii,
153 	    I2O_EVENT_GEN_EVENT_MASK_MODIFIED |
154 	    I2O_EVENT_GEN_DEVICE_RESET |
155 	    I2O_EVENT_GEN_STATE_CHANGE |
156 	    I2O_EVENT_GEN_GENERAL_WARNING);
157 	if (rv != 0) {
158 		aprint_error_dev(self, "unable to register for events");
159 		goto bad;
160 	}
161 	evreg = 1;
162 
163 	/*
164 	 * Start out with one queued command.  The `iop' driver will adjust
165 	 * the queue parameters once we're up and running.
166 	 */
167 	ld->sc_maxqueuecnt = 1;
168 
169 	ld->sc_maxxfer = IOP_MAX_XFER;
170 	ld->sc_dump = ld_iop_dump;
171 	ld->sc_flush = ld_iop_flush;
172 	ld->sc_start = ld_iop_start;
173 
174 	/* Say what the device is. */
175 	printf(":");
176 	iop_print_ident(iop, ia->ia_tid);
177 
178 	/*
179 	 * Claim the device so that we don't get any nasty surprises.  Allow
180 	 * failure.
181 	 */
182 	rv = iop_util_claim(iop, &sc->sc_ii, 0,
183 	    I2O_UTIL_CLAIM_CAPACITY_SENSITIVE |
184 	    I2O_UTIL_CLAIM_NO_PEER_SERVICE |
185 	    I2O_UTIL_CLAIM_NO_MANAGEMENT_SERVICE |
186 	    I2O_UTIL_CLAIM_PRIMARY_USER);
187 	sc->sc_flags = rv ? 0 : LD_IOP_CLAIMED;
188 
189 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_DEVICE_INFO,
190 	    &param, sizeof(param), NULL);
191 	if (rv != 0)
192 		goto bad;
193 
194 	ld->sc_secsize = le32toh(param.p.bdi.blocksize);
195 	ld->sc_secperunit = (int)
196 	    (le64toh(param.p.bdi.capacity) / ld->sc_secsize);
197 
198 	switch (param.p.bdi.type) {
199 	case I2O_RBS_TYPE_DIRECT:
200 		typestr = "direct access";
201 		enable = 1;
202 		break;
203 	case I2O_RBS_TYPE_WORM:
204 		typestr = "WORM";
205 		enable = 0;
206 		break;
207 	case I2O_RBS_TYPE_CDROM:
208 		typestr = "CD-ROM";
209 		enable = 0;
210 		break;
211 	case I2O_RBS_TYPE_OPTICAL:
212 		typestr = "optical";
213 		enable = 0;
214 		break;
215 	default:
216 		typestr = "unknown";
217 		enable = 0;
218 		break;
219 	}
220 
221 	if ((le32toh(param.p.bdi.capabilities) & I2O_RBS_CAP_REMOVABLE_MEDIA)
222 	    != 0) {
223 		/* ld->sc_flags = LDF_REMOVABLE; */
224 		fixedstr = "removable";
225 		enable = 0;
226 	} else
227 		fixedstr = "fixed";
228 
229 	printf(" %s, %s", typestr, fixedstr);
230 
231 	/*
232 	 * Determine if the device has an private cache.  If so, print the
233 	 * cache size.  Even if the device doesn't appear to have a cache,
234 	 * we perform a flush at shutdown.
235 	 */
236 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_CACHE_CONTROL,
237 	    &param, sizeof(param), NULL);
238 	if (rv != 0)
239 		goto bad;
240 
241 	if ((cachesz = le32toh(param.p.cc.totalcachesize)) != 0)
242 		printf(", %dkB cache", cachesz >> 10);
243 
244 	printf("\n");
245 
246 	/*
247 	 * Configure the DDM's timeout functions to time out all commands
248 	 * after 30 seconds.
249 	 */
250 	timeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
251 	rwvtimeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
252 	rwvtimeout = 0;
253 
254 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
255 	    &timeoutbase, sizeof(timeoutbase),
256 	    I2O_PARAM_RBS_OPERATION_timeoutbase);
257 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
258 	    &rwvtimeoutbase, sizeof(rwvtimeoutbase),
259 	    I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
260 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
261 	    &rwvtimeout, sizeof(rwvtimeout),
262 	    I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
263 
264 	if (enable)
265 		ld->sc_flags |= LDF_ENABLED;
266 	else
267 		aprint_error_dev(self, "device not yet supported\n");
268 
269 	ldattach(ld);
270 	return;
271 
272  bad:
273 	ld_iop_unconfig(sc, evreg);
274 }
275 
276 static void
277 ld_iop_unconfig(struct ld_iop_softc *sc, int evreg)
278 {
279 	struct iop_softc *iop;
280 
281 	iop = device_private(device_parent(sc->sc_ld.sc_dv));
282 
283 	if ((sc->sc_flags & LD_IOP_CLAIMED) != 0)
284 		iop_util_claim(iop, &sc->sc_ii, 1,
285 		    I2O_UTIL_CLAIM_PRIMARY_USER);
286 
287 	if (evreg) {
288 		/*
289 		 * Mask off events, and wait up to 5 seconds for a reply.
290 		 * Note that some adapters won't reply to this (XXX We
291 		 * should check the event capabilities).
292 		 */
293 		mutex_spin_enter(&iop->sc_intrlock);
294 		sc->sc_flags &= ~LD_IOP_NEW_EVTMASK;
295 		mutex_spin_exit(&iop->sc_intrlock);
296 
297 		iop_util_eventreg(iop, &sc->sc_eventii,
298 		    I2O_EVENT_GEN_EVENT_MASK_MODIFIED);
299 
300 		mutex_spin_enter(&iop->sc_intrlock);
301 		if ((sc->sc_flags & LD_IOP_NEW_EVTMASK) == 0)
302 			cv_timedwait(&sc->sc_eventii.ii_cv,
303 			    &iop->sc_intrlock, hz * 5);
304 		mutex_spin_exit(&iop->sc_intrlock);
305 	}
306 
307 	iop_initiator_unregister(iop, &sc->sc_eventii);
308 	iop_initiator_unregister(iop, &sc->sc_ii);
309 }
310 
311 static int
312 ld_iop_detach(device_t self, int flags)
313 {
314 	struct ld_iop_softc *sc;
315 	struct iop_softc *iop;
316 	int rv;
317 
318 	sc = device_private(self);
319 	iop = device_private(device_parent(self));
320 
321 	if ((rv = ldbegindetach(&sc->sc_ld, flags)) != 0)
322 		return (rv);
323 
324 	/*
325 	 * Abort any requests queued with the IOP, but allow requests that
326 	 * are already in progress to complete.
327 	 */
328 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
329 		iop_util_abort(iop, &sc->sc_ii, 0, 0,
330 		    I2O_UTIL_ABORT_WILD | I2O_UTIL_ABORT_CLEAN);
331 
332 	ldenddetach(&sc->sc_ld);
333 
334 	/* Un-claim the target, and un-register our initiators. */
335 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
336 		ld_iop_unconfig(sc, 1);
337 
338 	return (0);
339 }
340 
341 static int
342 ld_iop_start(struct ld_softc *ld, struct buf *bp)
343 {
344 	struct iop_msg *im;
345 	struct iop_softc *iop;
346 	struct ld_iop_softc *sc;
347 	struct i2o_rbs_block_read *mf;
348 	u_int rv, flags, write;
349 	u_int64_t ba;
350 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
351 
352 	sc = device_private(ld->sc_dv);
353 	iop = device_private(device_parent(ld->sc_dv));
354 
355 	im = iop_msg_alloc(iop, 0);
356 	im->im_dvcontext = bp;
357 
358 	write = ((bp->b_flags & B_READ) == 0);
359 	ba = (u_int64_t)bp->b_rawblkno * ld->sc_secsize;
360 
361 	/*
362 	 * Write through the cache when performing synchronous writes.  When
363 	 * performing a read, we don't request that the DDM cache the data,
364 	 * as there's little advantage to it.
365 	 */
366 	if (write) {
367 		if ((bp->b_flags & B_ASYNC) == 0)
368 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WT;
369 		else
370 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WB;
371 	} else
372 		flags = 0;
373 
374 	/*
375 	 * Fill the message frame.  We can use the block_read structure for
376 	 * both reads and writes, as it's almost identical to the
377 	 * block_write structure.
378 	 */
379 	mf = (struct i2o_rbs_block_read *)mb;
380 	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_read);
381 	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid,
382 	    write ? I2O_RBS_BLOCK_WRITE : I2O_RBS_BLOCK_READ);
383 	mf->msgictx = sc->sc_ii.ii_ictx;
384 	mf->msgtctx = im->im_tctx;
385 	mf->flags = flags | (1 << 16);		/* flags & time multiplier */
386 	mf->datasize = bp->b_bcount;
387 	mf->lowoffset = (u_int32_t)ba;
388 	mf->highoffset = (u_int32_t)(ba >> 32);
389 
390 	/* Map the data transfer and enqueue the command. */
391 	rv = iop_msg_map_bio(iop, im, mb, bp->b_data, bp->b_bcount, write);
392 	if (rv == 0) {
393 		if ((rv = iop_post(iop, mb)) != 0) {
394 			iop_msg_unmap(iop, im);
395 			iop_msg_free(iop, im);
396 		}
397 	}
398 	return (rv);
399 }
400 
401 static int
402 ld_iop_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
403 {
404 	struct iop_msg *im;
405 	struct iop_softc *iop;
406 	struct ld_iop_softc *sc;
407 	struct i2o_rbs_block_write *mf;
408 	int rv, bcount;
409 	u_int64_t ba;
410 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
411 
412 	sc = device_private(ld->sc_dv);
413 	iop = device_private(device_parent(ld->sc_dv));
414 	bcount = blkcnt * ld->sc_secsize;
415 	ba = (u_int64_t)blkno * ld->sc_secsize;
416 	im = iop_msg_alloc(iop, IM_POLL);
417 
418 	mf = (struct i2o_rbs_block_write *)mb;
419 	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_write);
420 	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_BLOCK_WRITE);
421 	mf->msgictx = sc->sc_ii.ii_ictx;
422 	mf->msgtctx = im->im_tctx;
423 	mf->flags = I2O_RBS_BLOCK_WRITE_CACHE_WT | (1 << 16);
424 	mf->datasize = bcount;
425 	mf->lowoffset = (u_int32_t)ba;
426 	mf->highoffset = (u_int32_t)(ba >> 32);
427 
428 	if ((rv = iop_msg_map(iop, im, mb, data, bcount, 1, NULL)) != 0) {
429 		iop_msg_free(iop, im);
430 		return (rv);
431 	}
432 
433 	rv = iop_msg_post(iop, im, mb, LD_IOP_TIMEOUT * 2);
434 	iop_msg_unmap(iop, im);
435 	iop_msg_free(iop, im);
436  	return (rv);
437 }
438 
439 static int
440 ld_iop_flush(struct ld_softc *ld, int flags)
441 {
442 	struct iop_msg *im;
443 	struct iop_softc *iop;
444 	struct ld_iop_softc *sc;
445 	struct i2o_rbs_cache_flush mf;
446 	int rv;
447 
448 	sc = device_private(ld->sc_dv);
449 	iop = device_private(device_parent(ld->sc_dv));
450 	im = iop_msg_alloc(iop, IM_WAIT);
451 
452 	mf.msgflags = I2O_MSGFLAGS(i2o_rbs_cache_flush);
453 	mf.msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_CACHE_FLUSH);
454 	mf.msgictx = sc->sc_ii.ii_ictx;
455 	mf.msgtctx = im->im_tctx;
456 	mf.flags = 1 << 16;			/* time multiplier */
457 
458 	/* Ancient disks will return an error here. */
459 	rv = iop_msg_post(iop, im, &mf, LD_IOP_TIMEOUT * 2);
460 	iop_msg_free(iop, im);
461 	return (rv);
462 }
463 
464 void
465 ld_iop_intr(device_t dv, struct iop_msg *im, void *reply)
466 {
467 	struct i2o_rbs_reply *rb;
468 	struct buf *bp;
469 	struct ld_iop_softc *sc;
470 	struct iop_softc *iop;
471 	int err, detail;
472 	const char *errstr;
473 
474 	rb = reply;
475 	bp = im->im_dvcontext;
476 	sc = device_private(dv);
477 	iop = device_private(device_parent(dv));
478 
479 	err = ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0);
480 
481 	if (!err && rb->reqstatus != I2O_STATUS_SUCCESS) {
482 		detail = le16toh(rb->detail);
483 		if (detail >= __arraycount(ld_iop_errors))
484 			errstr = "<unknown>";
485 		else
486 			errstr = ld_iop_errors[detail];
487 		aprint_error_dev(dv, "error 0x%04x: %s\n", detail, errstr);
488 		err = 1;
489 	}
490 
491 	if (err) {
492 		bp->b_error = EIO;
493 		bp->b_resid = bp->b_bcount;
494 	} else
495 		bp->b_resid = bp->b_bcount - le32toh(rb->transfercount);
496 
497 	iop_msg_unmap(iop, im);
498 	iop_msg_free(iop, im);
499 	lddone(&sc->sc_ld, bp);
500 }
501 
502 static void
503 ld_iop_intr_event(device_t dv, struct iop_msg *im, void *reply)
504 {
505 	struct i2o_util_event_register_reply *rb;
506 	struct ld_iop_softc *sc;
507 	struct iop_softc *iop;
508 	u_int event;
509 
510 	rb = reply;
511 
512 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
513 		return;
514 
515 	event = le32toh(rb->event);
516 	sc = device_private(dv);
517 
518 	if (event == I2O_EVENT_GEN_EVENT_MASK_MODIFIED) {
519 		iop = device_private(device_parent(dv));
520 		mutex_spin_enter(&iop->sc_intrlock);
521 		sc->sc_flags |= LD_IOP_NEW_EVTMASK;
522 		cv_broadcast(&sc->sc_eventii.ii_cv);
523 		mutex_spin_exit(&iop->sc_intrlock);
524 		return;
525 	}
526 
527 	printf("%s: event 0x%08x received\n", device_xname(dv), event);
528 }
529 
530 static void
531 ld_iop_adjqparam(device_t dv, int mpi)
532 {
533 	struct ld_iop_softc *sc = device_private(dv);
534 	struct iop_softc *iop = device_private(device_parent(dv));
535 	struct ld_softc *ld = &sc->sc_ld;
536 
537 	/*
538 	 * AMI controllers seem to loose the plot if you hand off lots of
539 	 * queued commands.
540 	 */
541 	if (le16toh(I2O_ORG_AMI) == iop->sc_status.orgid && mpi > 64)
542 		mpi = 64;
543 
544 	ldadjqparam(ld, mpi);
545 }
546