xref: /openbsd-src/sys/dev/pci/ips.c (revision a0747c9f67a4ae71ccb71e62a28d1ea19e06a63c)
1 /*	$OpenBSD: ips.c,v 1.133 2020/10/15 13:22:13 krw Exp $	*/
2 
3 /*
4  * Copyright (c) 2006, 2007, 2009 Alexander Yurchenko <grange@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * IBM (Adaptec) ServeRAID controllers driver.
21  */
22 
23 #include "bio.h"
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/device.h>
28 #include <sys/ioctl.h>
29 #include <sys/kernel.h>
30 #include <sys/malloc.h>
31 #include <sys/sensors.h>
32 #include <sys/timeout.h>
33 #include <sys/queue.h>
34 
35 #include <machine/bus.h>
36 
37 #include <scsi/scsi_all.h>
38 #include <scsi/scsi_disk.h>
39 #include <scsi/scsiconf.h>
40 
41 #include <dev/biovar.h>
42 
43 #include <dev/pci/pcidevs.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 
47 /* Debug levels */
48 #define IPS_D_ERR	0x0001	/* errors */
49 #define IPS_D_INFO	0x0002	/* information */
50 #define IPS_D_XFER	0x0004	/* transfers */
51 
52 #ifdef IPS_DEBUG
53 #define DPRINTF(a, b)	do { if (ips_debug & (a)) printf b; } while (0)
54 int ips_debug = IPS_D_ERR;
55 #else
56 #define DPRINTF(a, b)
57 #endif
58 
59 #define IPS_MAXDRIVES		8
60 #define IPS_MAXCHANS		4
61 #define IPS_MAXTARGETS		16
62 #define IPS_MAXCHUNKS		16
63 #define IPS_MAXCMDS		128
64 
65 #define IPS_MAXSGS		16
66 #define IPS_MAXCDB		12
67 
68 #define IPS_SECSZ		512
69 #define IPS_NVRAMPGSZ		128
70 #define IPS_SQSZ		(IPS_MAXCMDS * sizeof(u_int32_t))
71 
72 #define	IPS_TIMEOUT		60000	/* ms */
73 
74 /* Command codes */
75 #define IPS_CMD_READ		0x02
76 #define IPS_CMD_WRITE		0x03
77 #define IPS_CMD_DCDB		0x04
78 #define IPS_CMD_GETADAPTERINFO	0x05
79 #define IPS_CMD_FLUSH		0x0a
80 #define IPS_CMD_REBUILDSTATUS	0x0c
81 #define IPS_CMD_SETSTATE	0x10
82 #define IPS_CMD_REBUILD		0x16
83 #define IPS_CMD_ERRORTABLE	0x17
84 #define IPS_CMD_GETDRIVEINFO	0x19
85 #define IPS_CMD_RESETCHAN	0x1a
86 #define IPS_CMD_DOWNLOAD	0x20
87 #define IPS_CMD_RWBIOSFW	0x22
88 #define IPS_CMD_READCONF	0x38
89 #define IPS_CMD_GETSUBSYS	0x40
90 #define IPS_CMD_CONFIGSYNC	0x58
91 #define IPS_CMD_READ_SG		0x82
92 #define IPS_CMD_WRITE_SG	0x83
93 #define IPS_CMD_DCDB_SG		0x84
94 #define IPS_CMD_EDCDB		0x95
95 #define IPS_CMD_EDCDB_SG	0x96
96 #define IPS_CMD_RWNVRAMPAGE	0xbc
97 #define IPS_CMD_GETVERINFO	0xc6
98 #define IPS_CMD_FFDC		0xd7
99 #define IPS_CMD_SG		0x80
100 #define IPS_CMD_RWNVRAM		0xbc
101 
102 /* DCDB attributes */
103 #define IPS_DCDB_DATAIN		0x01	/* data input */
104 #define IPS_DCDB_DATAOUT	0x02	/* data output */
105 #define IPS_DCDB_XFER64K	0x08	/* 64K transfer */
106 #define IPS_DCDB_TIMO10		0x10	/* 10 secs timeout */
107 #define IPS_DCDB_TIMO60		0x20	/* 60 secs timeout */
108 #define IPS_DCDB_TIMO20M	0x30	/* 20 mins timeout */
109 #define IPS_DCDB_NOAUTOREQSEN	0x40	/* no auto request sense */
110 #define IPS_DCDB_DISCON		0x80	/* disconnect allowed */
111 
112 /* Register definitions */
113 #define IPS_REG_HIS		0x08	/* host interrupt status */
114 #define IPS_REG_HIS_SCE			0x01	/* status channel enqueue */
115 #define IPS_REG_HIS_EN			0x80	/* enable interrupts */
116 #define IPS_REG_CCSA		0x10	/* command channel system address */
117 #define IPS_REG_CCC		0x14	/* command channel control */
118 #define IPS_REG_CCC_SEM			0x0008	/* semaphore */
119 #define IPS_REG_CCC_START		0x101a	/* start command */
120 #define IPS_REG_SQH		0x20	/* status queue head */
121 #define IPS_REG_SQT		0x24	/* status queue tail */
122 #define IPS_REG_SQE		0x28	/* status queue end */
123 #define IPS_REG_SQS		0x2c	/* status queue start */
124 
125 #define IPS_REG_OIS		0x30	/* outbound interrupt status */
126 #define IPS_REG_OIS_PEND		0x0008	/* interrupt is pending */
127 #define IPS_REG_OIM		0x34	/* outbound interrupt mask */
128 #define IPS_REG_OIM_DS			0x0008	/* disable interrupts */
129 #define IPS_REG_IQP		0x40	/* inbound queue port */
130 #define IPS_REG_OQP		0x44	/* outbound queue port */
131 
132 /* Status word fields */
133 #define IPS_STAT_ID(x)		(((x) >> 8) & 0xff)	/* command id */
134 #define IPS_STAT_BASIC(x)	(((x) >> 16) & 0xff)	/* basic status */
135 #define IPS_STAT_EXT(x)		(((x) >> 24) & 0xff)	/* ext status */
136 #define IPS_STAT_GSC(x)		((x) & 0x0f)
137 
138 /* Basic status codes */
139 #define IPS_STAT_OK		0x00	/* success */
140 #define IPS_STAT_RECOV		0x01	/* recovered error */
141 #define IPS_STAT_INVOP		0x03	/* invalid opcode */
142 #define IPS_STAT_INVCMD		0x04	/* invalid command block */
143 #define IPS_STAT_INVPARM	0x05	/* invalid parameters block */
144 #define IPS_STAT_BUSY		0x08	/* busy */
145 #define IPS_STAT_CMPLERR	0x0c	/* completed with error */
146 #define IPS_STAT_LDERR		0x0d	/* logical drive error */
147 #define IPS_STAT_TIMO		0x0e	/* timeout */
148 #define IPS_STAT_PDRVERR	0x0f	/* physical drive error */
149 
150 /* Extended status codes */
151 #define IPS_ESTAT_SELTIMO	0xf0	/* select timeout */
152 #define IPS_ESTAT_OURUN		0xf2	/* over/underrun */
153 #define IPS_ESTAT_HOSTRST	0xf7	/* host reset */
154 #define IPS_ESTAT_DEVRST	0xf8	/* device reset */
155 #define IPS_ESTAT_RECOV		0xfc	/* recovered error */
156 #define IPS_ESTAT_CKCOND	0xff	/* check condition */
157 
158 #define IPS_IOSIZE		128	/* max space size to map */
159 
160 /* Command frame */
161 struct ips_cmd {
162 	u_int8_t	code;
163 	u_int8_t	id;
164 	u_int8_t	drive;
165 	u_int8_t	sgcnt;
166 	u_int32_t	lba;
167 	u_int32_t	sgaddr;
168 	u_int16_t	seccnt;
169 	u_int8_t	seg4g;
170 	u_int8_t	esg;
171 	u_int32_t	ccsar;
172 	u_int32_t	cccr;
173 };
174 
175 /* Direct CDB (SCSI pass-through) frame */
176 struct ips_dcdb {
177 	u_int8_t	device;
178 	u_int8_t	attr;
179 	u_int16_t	datalen;
180 	u_int32_t	sgaddr;
181 	u_int8_t	cdblen;
182 	u_int8_t	senselen;
183 	u_int8_t	sgcnt;
184 	u_int8_t	__reserved1;
185 	u_int8_t	cdb[IPS_MAXCDB];
186 	u_int8_t	sense[64];
187 	u_int8_t	status;
188 	u_int8_t	__reserved2[3];
189 };
190 
191 /* Scatter-gather array element */
192 struct ips_sg {
193 	u_int32_t	addr;
194 	u_int32_t	size;
195 };
196 
197 /* Command block */
198 struct ips_cmdb {
199 	struct ips_cmd	cmd;
200 	struct ips_dcdb	dcdb;
201 	struct ips_sg	sg[IPS_MAXSGS];
202 };
203 
204 /* Data frames */
205 struct ips_adapterinfo {
206 	u_int8_t	drivecnt;
207 	u_int8_t	miscflag;
208 	u_int8_t	sltflag;
209 	u_int8_t	bstflag;
210 	u_int8_t	pwrchgcnt;
211 	u_int8_t	wrongaddrcnt;
212 	u_int8_t	unidentcnt;
213 	u_int8_t	nvramdevchgcnt;
214 	u_int8_t	firmware[8];
215 	u_int8_t	bios[8];
216 	u_int32_t	drivesize[IPS_MAXDRIVES];
217 	u_int8_t	cmdcnt;
218 	u_int8_t	maxphysdevs;
219 	u_int16_t	flashrepgmcnt;
220 	u_int8_t	defunctdiskcnt;
221 	u_int8_t	rebuildflag;
222 	u_int8_t	offdrivecnt;
223 	u_int8_t	critdrivecnt;
224 	u_int16_t	confupdcnt;
225 	u_int8_t	blkflag;
226 	u_int8_t	__reserved;
227 	u_int16_t	deaddisk[IPS_MAXCHANS][IPS_MAXTARGETS];
228 };
229 
230 struct ips_driveinfo {
231 	u_int8_t	drivecnt;
232 	u_int8_t	__reserved[3];
233 	struct ips_drive {
234 		u_int8_t	id;
235 		u_int8_t	__reserved;
236 		u_int8_t	raid;
237 		u_int8_t	state;
238 #define IPS_DS_FREE	0x00
239 #define IPS_DS_OFFLINE	0x02
240 #define IPS_DS_ONLINE	0x03
241 #define IPS_DS_DEGRADED	0x04
242 #define IPS_DS_SYS	0x06
243 #define IPS_DS_CRS	0x24
244 
245 		u_int32_t	seccnt;
246 	}		drive[IPS_MAXDRIVES];
247 };
248 
249 struct ips_conf {
250 	u_int8_t	ldcnt;
251 	u_int8_t	day;
252 	u_int8_t	month;
253 	u_int8_t	year;
254 	u_int8_t	initid[4];
255 	u_int8_t	hostid[12];
256 	u_int8_t	time[8];
257 	u_int32_t	useropt;
258 	u_int16_t	userfield;
259 	u_int8_t	rebuildrate;
260 	u_int8_t	__reserved1;
261 
262 	struct ips_hw {
263 		u_int8_t	board[8];
264 		u_int8_t	cpu[8];
265 		u_int8_t	nchantype;
266 		u_int8_t	nhostinttype;
267 		u_int8_t	compression;
268 		u_int8_t	nvramtype;
269 		u_int32_t	nvramsize;
270 	}		hw;
271 
272 	struct ips_ld {
273 		u_int16_t	userfield;
274 		u_int8_t	state;
275 		u_int8_t	raidcacheparam;
276 		u_int8_t	chunkcnt;
277 		u_int8_t	stripesize;
278 		u_int8_t	params;
279 		u_int8_t	__reserved;
280 		u_int32_t	size;
281 
282 		struct ips_chunk {
283 			u_int8_t	channel;
284 			u_int8_t	target;
285 			u_int16_t	__reserved;
286 			u_int32_t	startsec;
287 			u_int32_t	seccnt;
288 		}		chunk[IPS_MAXCHUNKS];
289 	}		ld[IPS_MAXDRIVES];
290 
291 	struct ips_dev {
292 		u_int8_t	initiator;
293 		u_int8_t	params;
294 		u_int8_t	miscflag;
295 		u_int8_t	state;
296 #define IPS_DVS_STANDBY	0x01
297 #define IPS_DVS_REBUILD	0x02
298 #define IPS_DVS_SPARE	0x04
299 #define IPS_DVS_MEMBER	0x08
300 #define IPS_DVS_ONLINE	0x80
301 #define IPS_DVS_READY	(IPS_DVS_STANDBY | IPS_DVS_ONLINE)
302 
303 		u_int32_t	seccnt;
304 		u_int8_t	devid[28];
305 	}		dev[IPS_MAXCHANS][IPS_MAXTARGETS];
306 
307 	u_int8_t	reserved[512];
308 };
309 
310 struct ips_rblstat {
311 	u_int8_t	__unknown[20];
312 	struct {
313 		u_int8_t	__unknown[4];
314 		u_int32_t	total;
315 		u_int32_t	remain;
316 	}		ld[IPS_MAXDRIVES];
317 };
318 
319 struct ips_pg5 {
320 	u_int32_t	signature;
321 	u_int8_t	__reserved1;
322 	u_int8_t	slot;
323 	u_int16_t	type;
324 	u_int8_t	bioshi[4];
325 	u_int8_t	bioslo[4];
326 	u_int16_t	__reserved2;
327 	u_int8_t	__reserved3;
328 	u_int8_t	os;
329 	u_int8_t	driverhi[4];
330 	u_int8_t	driverlo[4];
331 	u_int8_t	__reserved4[100];
332 };
333 
334 struct ips_info {
335 	struct ips_adapterinfo	adapter;
336 	struct ips_driveinfo	drive;
337 	struct ips_conf		conf;
338 	struct ips_rblstat	rblstat;
339 	struct ips_pg5		pg5;
340 };
341 
342 /* Command control block */
343 struct ips_softc;
344 struct ips_ccb {
345 	struct ips_softc *	c_sc;		/* driver softc */
346 	int			c_id;		/* command id */
347 	int			c_flags;	/* SCSI_* flags */
348 	enum {
349 		IPS_CCB_FREE,
350 		IPS_CCB_QUEUED,
351 		IPS_CCB_DONE
352 	}			c_state;	/* command state */
353 
354 	void *			c_cmdbva;	/* command block virt addr */
355 	paddr_t			c_cmdbpa;	/* command block phys addr */
356 	bus_dmamap_t		c_dmam;		/* data buffer DMA map */
357 
358 	struct scsi_xfer *	c_xfer;		/* corresponding SCSI xfer */
359 
360 	u_int8_t		c_stat;		/* status byte copy */
361 	u_int8_t		c_estat;	/* ext status byte copy */
362 	int			c_error;	/* completion error */
363 
364 	void			(*c_done)(struct ips_softc *,	/* cmd done */
365 				    struct ips_ccb *);		/* callback */
366 
367 	SLIST_ENTRY(ips_ccb)	c_link;		/* queue link */
368 };
369 
370 /* CCB queue */
371 SLIST_HEAD(ips_ccbq, ips_ccb);
372 
373 /* DMA-able chunk of memory */
374 struct dmamem {
375 	bus_dma_tag_t		dm_tag;
376 	bus_dmamap_t		dm_map;
377 	bus_dma_segment_t	dm_seg;
378 	bus_size_t		dm_size;
379 	void *			dm_vaddr;
380 #define dm_paddr dm_seg.ds_addr
381 };
382 
383 struct ips_softc {
384 	struct device		sc_dev;
385 
386 	struct scsibus_softc *	sc_scsibus;
387 
388 	struct ips_pt {
389 		struct ips_softc *	pt_sc;
390 		int			pt_chan;
391 
392 		int			pt_proctgt;
393 		char			pt_procdev[16];
394 	}			sc_pt[IPS_MAXCHANS];
395 
396 	struct ksensordev	sc_sensordev;
397 	struct ksensor *	sc_sensors;
398 
399 	bus_space_tag_t		sc_iot;
400 	bus_space_handle_t	sc_ioh;
401 	bus_dma_tag_t		sc_dmat;
402 
403 	const struct ips_chipset *sc_chip;
404 
405 	struct ips_info *	sc_info;
406 	struct dmamem		sc_infom;
407 
408 	int			sc_nunits;
409 
410 	struct dmamem		sc_cmdbm;
411 
412 	struct ips_ccb *	sc_ccb;
413 	int			sc_nccbs;
414 	struct ips_ccbq		sc_ccbq_free;
415 	struct mutex		sc_ccb_mtx;
416 	struct scsi_iopool	sc_iopool;
417 
418 	struct dmamem		sc_sqm;
419 	paddr_t			sc_sqtail;
420 	u_int32_t *		sc_sqbuf;
421 	int			sc_sqidx;
422 };
423 
424 int	ips_match(struct device *, void *, void *);
425 void	ips_attach(struct device *, struct device *, void *);
426 
427 void	ips_scsi_cmd(struct scsi_xfer *);
428 void	ips_scsi_pt_cmd(struct scsi_xfer *);
429 int	ips_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
430 
431 #if NBIO > 0
432 int	ips_ioctl(struct device *, u_long, caddr_t);
433 int	ips_ioctl_inq(struct ips_softc *, struct bioc_inq *);
434 int	ips_ioctl_vol(struct ips_softc *, struct bioc_vol *);
435 int	ips_ioctl_disk(struct ips_softc *, struct bioc_disk *);
436 int	ips_ioctl_setstate(struct ips_softc *, struct bioc_setstate *);
437 #endif
438 
439 #ifndef SMALL_KERNEL
440 void	ips_sensors(void *);
441 #endif
442 
443 int	ips_load_xs(struct ips_softc *, struct ips_ccb *, struct scsi_xfer *);
444 void	ips_start_xs(struct ips_softc *, struct ips_ccb *, struct scsi_xfer *);
445 
446 int	ips_cmd(struct ips_softc *, struct ips_ccb *);
447 int	ips_poll(struct ips_softc *, struct ips_ccb *);
448 void	ips_done(struct ips_softc *, struct ips_ccb *);
449 void	ips_done_xs(struct ips_softc *, struct ips_ccb *);
450 void	ips_done_pt(struct ips_softc *, struct ips_ccb *);
451 void	ips_done_mgmt(struct ips_softc *, struct ips_ccb *);
452 int	ips_error(struct ips_softc *, struct ips_ccb *);
453 int	ips_error_xs(struct ips_softc *, struct ips_ccb *);
454 int	ips_intr(void *);
455 void	ips_timeout(void *);
456 
457 int	ips_getadapterinfo(struct ips_softc *, int);
458 int	ips_getdriveinfo(struct ips_softc *, int);
459 int	ips_getconf(struct ips_softc *, int);
460 int	ips_getpg5(struct ips_softc *, int);
461 
462 #if NBIO > 0
463 int	ips_getrblstat(struct ips_softc *, int);
464 int	ips_setstate(struct ips_softc *, int, int, int, int);
465 int	ips_rebuild(struct ips_softc *, int, int, int, int, int);
466 #endif
467 
468 void	ips_copperhead_exec(struct ips_softc *, struct ips_ccb *);
469 void	ips_copperhead_intren(struct ips_softc *);
470 int	ips_copperhead_isintr(struct ips_softc *);
471 u_int32_t ips_copperhead_status(struct ips_softc *);
472 
473 void	ips_morpheus_exec(struct ips_softc *, struct ips_ccb *);
474 void	ips_morpheus_intren(struct ips_softc *);
475 int	ips_morpheus_isintr(struct ips_softc *);
476 u_int32_t ips_morpheus_status(struct ips_softc *);
477 
478 struct ips_ccb *ips_ccb_alloc(struct ips_softc *, int);
479 void	ips_ccb_free(struct ips_softc *, struct ips_ccb *, int);
480 void	*ips_ccb_get(void *);
481 void	ips_ccb_put(void *, void *);
482 
483 int	ips_dmamem_alloc(struct dmamem *, bus_dma_tag_t, bus_size_t);
484 void	ips_dmamem_free(struct dmamem *);
485 
486 struct cfattach ips_ca = {
487 	sizeof(struct ips_softc),
488 	ips_match,
489 	ips_attach
490 };
491 
492 struct cfdriver ips_cd = {
493 	NULL, "ips", DV_DULL
494 };
495 
496 static struct scsi_adapter ips_switch = {
497 	ips_scsi_cmd, NULL, NULL, NULL, ips_scsi_ioctl
498 };
499 
500 static struct scsi_adapter ips_pt_switch = {
501 	ips_scsi_pt_cmd, NULL, NULL, NULL, NULL
502 };
503 
504 static const struct pci_matchid ips_ids[] = {
505 	{ PCI_VENDOR_IBM,	PCI_PRODUCT_IBM_SERVERAID },
506 	{ PCI_VENDOR_IBM,	PCI_PRODUCT_IBM_SERVERAID2 },
507 	{ PCI_VENDOR_ADP2,	PCI_PRODUCT_ADP2_SERVERAID }
508 };
509 
510 static const struct ips_chipset {
511 	enum {
512 		IPS_CHIP_COPPERHEAD = 0,
513 		IPS_CHIP_MORPHEUS
514 	}		ic_id;
515 
516 	int		ic_bar;
517 
518 	void		(*ic_exec)(struct ips_softc *, struct ips_ccb *);
519 	void		(*ic_intren)(struct ips_softc *);
520 	int		(*ic_isintr)(struct ips_softc *);
521 	u_int32_t	(*ic_status)(struct ips_softc *);
522 } ips_chips[] = {
523 	{
524 		IPS_CHIP_COPPERHEAD,
525 		0x14,
526 		ips_copperhead_exec,
527 		ips_copperhead_intren,
528 		ips_copperhead_isintr,
529 		ips_copperhead_status
530 	},
531 	{
532 		IPS_CHIP_MORPHEUS,
533 		0x10,
534 		ips_morpheus_exec,
535 		ips_morpheus_intren,
536 		ips_morpheus_isintr,
537 		ips_morpheus_status
538 	}
539 };
540 
541 #define ips_exec(s, c)	(s)->sc_chip->ic_exec((s), (c))
542 #define ips_intren(s)	(s)->sc_chip->ic_intren((s))
543 #define ips_isintr(s)	(s)->sc_chip->ic_isintr((s))
544 #define ips_status(s)	(s)->sc_chip->ic_status((s))
545 
546 static const char *ips_names[] = {
547 	NULL,
548 	NULL,
549 	"II",
550 	"onboard",
551 	"onboard",
552 	"3H",
553 	"3L",
554 	"4H",
555 	"4M",
556 	"4L",
557 	"4Mx",
558 	"4Lx",
559 	"5i",
560 	"5i",
561 	"6M",
562 	"6i",
563 	"7t",
564 	"7k",
565 	"7M"
566 };
567 
568 int
569 ips_match(struct device *parent, void *match, void *aux)
570 {
571 	return (pci_matchbyid(aux, ips_ids,
572 	    sizeof(ips_ids) / sizeof(ips_ids[0])));
573 }
574 
575 void
576 ips_attach(struct device *parent, struct device *self, void *aux)
577 {
578 	struct ips_softc *sc = (struct ips_softc *)self;
579 	struct pci_attach_args *pa = aux;
580 	struct ips_ccb ccb0;
581 	struct scsibus_attach_args saa;
582 	struct ips_adapterinfo *ai;
583 	struct ips_driveinfo *di;
584 	struct ips_pg5 *pg5;
585 	pcireg_t maptype;
586 	bus_size_t iosize;
587 	pci_intr_handle_t ih;
588 	const char *intrstr;
589 	int type, i;
590 
591 	sc->sc_dmat = pa->pa_dmat;
592 
593 	/* Identify chipset */
594 	if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_IBM_SERVERAID)
595 		sc->sc_chip = &ips_chips[IPS_CHIP_COPPERHEAD];
596 	else
597 		sc->sc_chip = &ips_chips[IPS_CHIP_MORPHEUS];
598 
599 	/* Map registers */
600 	maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_chip->ic_bar);
601 	if (pci_mapreg_map(pa, sc->sc_chip->ic_bar, maptype, 0, &sc->sc_iot,
602 	    &sc->sc_ioh, NULL, &iosize, IPS_IOSIZE)) {
603 		printf(": can't map regs\n");
604 		return;
605 	}
606 
607 	/* Allocate command buffer */
608 	if (ips_dmamem_alloc(&sc->sc_cmdbm, sc->sc_dmat,
609 	    IPS_MAXCMDS * sizeof(struct ips_cmdb))) {
610 		printf(": can't alloc cmd buffer\n");
611 		goto fail1;
612 	}
613 
614 	/* Allocate info buffer */
615 	if (ips_dmamem_alloc(&sc->sc_infom, sc->sc_dmat,
616 	    sizeof(struct ips_info))) {
617 		printf(": can't alloc info buffer\n");
618 		goto fail2;
619 	}
620 	sc->sc_info = sc->sc_infom.dm_vaddr;
621 	ai = &sc->sc_info->adapter;
622 	di = &sc->sc_info->drive;
623 	pg5 = &sc->sc_info->pg5;
624 
625 	/* Allocate status queue for the Copperhead chipset */
626 	if (sc->sc_chip->ic_id == IPS_CHIP_COPPERHEAD) {
627 		if (ips_dmamem_alloc(&sc->sc_sqm, sc->sc_dmat, IPS_SQSZ)) {
628 			printf(": can't alloc status queue\n");
629 			goto fail3;
630 		}
631 		sc->sc_sqtail = sc->sc_sqm.dm_paddr;
632 		sc->sc_sqbuf = sc->sc_sqm.dm_vaddr;
633 		sc->sc_sqidx = 0;
634 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQS,
635 		    sc->sc_sqm.dm_paddr);
636 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQE,
637 		    sc->sc_sqm.dm_paddr + IPS_SQSZ);
638 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQH,
639 		    sc->sc_sqm.dm_paddr + sizeof(u_int32_t));
640 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQT,
641 		    sc->sc_sqm.dm_paddr);
642 	}
643 
644 	/* Bootstrap CCB queue */
645 	sc->sc_nccbs = 1;
646 	sc->sc_ccb = &ccb0;
647 	bzero(&ccb0, sizeof(ccb0));
648 	ccb0.c_cmdbva = sc->sc_cmdbm.dm_vaddr;
649 	ccb0.c_cmdbpa = sc->sc_cmdbm.dm_paddr;
650 	SLIST_INIT(&sc->sc_ccbq_free);
651 	SLIST_INSERT_HEAD(&sc->sc_ccbq_free, &ccb0, c_link);
652 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
653 	scsi_iopool_init(&sc->sc_iopool, sc, ips_ccb_get, ips_ccb_put);
654 
655 	/* Get adapter info */
656 	if (ips_getadapterinfo(sc, SCSI_NOSLEEP)) {
657 		printf(": can't get adapter info\n");
658 		goto fail4;
659 	}
660 
661 	/* Get logical drives info */
662 	if (ips_getdriveinfo(sc, SCSI_NOSLEEP)) {
663 		printf(": can't get ld info\n");
664 		goto fail4;
665 	}
666 	sc->sc_nunits = di->drivecnt;
667 
668 	/* Get configuration */
669 	if (ips_getconf(sc, SCSI_NOSLEEP)) {
670 		printf(": can't get config\n");
671 		goto fail4;
672 	}
673 
674 	/* Read NVRAM page 5 for additional info */
675 	(void)ips_getpg5(sc, SCSI_NOSLEEP);
676 
677 	/* Initialize CCB queue */
678 	sc->sc_nccbs = ai->cmdcnt;
679 	if ((sc->sc_ccb = ips_ccb_alloc(sc, sc->sc_nccbs)) == NULL) {
680 		printf(": can't alloc ccb queue\n");
681 		goto fail4;
682 	}
683 	SLIST_INIT(&sc->sc_ccbq_free);
684 	for (i = 0; i < sc->sc_nccbs; i++)
685 		SLIST_INSERT_HEAD(&sc->sc_ccbq_free,
686 		    &sc->sc_ccb[i], c_link);
687 
688 	/* Install interrupt handler */
689 	if (pci_intr_map(pa, &ih)) {
690 		printf(": can't map interrupt\n");
691 		goto fail5;
692 	}
693 	intrstr = pci_intr_string(pa->pa_pc, ih);
694 	if (pci_intr_establish(pa->pa_pc, ih, IPL_BIO, ips_intr, sc,
695 	    sc->sc_dev.dv_xname) == NULL) {
696 		printf(": can't establish interrupt");
697 		if (intrstr != NULL)
698 			printf(" at %s", intrstr);
699 		printf("\n");
700 		goto fail5;
701 	}
702 	printf(": %s\n", intrstr);
703 
704 	/* Display adapter info */
705 	printf("%s: ServeRAID", sc->sc_dev.dv_xname);
706 	type = letoh16(pg5->type);
707 	if (type < sizeof(ips_names) / sizeof(ips_names[0]) && ips_names[type])
708 		printf(" %s", ips_names[type]);
709 	printf(", FW %c%c%c%c%c%c%c", ai->firmware[0], ai->firmware[1],
710 	    ai->firmware[2], ai->firmware[3], ai->firmware[4], ai->firmware[5],
711 	    ai->firmware[6]);
712 	printf(", BIOS %c%c%c%c%c%c%c", ai->bios[0], ai->bios[1], ai->bios[2],
713 	    ai->bios[3], ai->bios[4], ai->bios[5], ai->bios[6]);
714 	printf(", %d cmds, %d LD%s", sc->sc_nccbs, sc->sc_nunits,
715 	    (sc->sc_nunits == 1 ? "" : "s"));
716 	printf("\n");
717 
718 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
719 	saa.saa_adapter_buswidth = sc->sc_nunits;
720 	saa.saa_adapter = &ips_switch;
721 	saa.saa_adapter_softc = sc;
722 	saa.saa_luns = 8;
723 	if (sc->sc_nunits > 0)
724 		saa.saa_openings = sc->sc_nccbs / sc->sc_nunits;
725 	else
726 		saa.saa_openings = 0;
727 	saa.saa_pool = &sc->sc_iopool;
728 	saa.saa_quirks = saa.saa_flags = 0;
729 	saa.saa_wwpn = saa.saa_wwnn = 0;
730 
731 	sc->sc_scsibus = (struct scsibus_softc *)config_found(self, &saa,
732 	    scsiprint);
733 
734 	/* For each channel attach SCSI pass-through bus */
735 	for (i = 0; i < IPS_MAXCHANS; i++) {
736 		struct ips_pt *pt;
737 		int target, lastarget;
738 
739 		pt = &sc->sc_pt[i];
740 		pt->pt_sc = sc;
741 		pt->pt_chan = i;
742 		pt->pt_proctgt = -1;
743 
744 		/* Check if channel has any devices besides disks */
745 		for (target = 0, lastarget = -1; target < IPS_MAXTARGETS;
746 		    target++) {
747 			struct ips_dev *idev;
748 			int type;
749 
750 			idev = &sc->sc_info->conf.dev[i][target];
751 			type = idev->params & SID_TYPE;
752 			if (idev->state && type != T_DIRECT) {
753 				lastarget = target;
754 				if (type == T_PROCESSOR ||
755 				    type == T_ENCLOSURE)
756 					/* remember enclosure address */
757 					pt->pt_proctgt = target;
758 			}
759 		}
760 		if (lastarget == -1)
761 			continue;
762 
763 		saa.saa_adapter = &ips_pt_switch;
764 		saa.saa_adapter_softc = pt;
765 		saa.saa_adapter_buswidth =  lastarget + 1;
766 		saa.saa_adapter_target = IPS_MAXTARGETS;
767 		saa.saa_luns = 8;
768 		saa.saa_openings = 1;
769 		saa.saa_pool = &sc->sc_iopool;
770 		saa.saa_quirks = saa.saa_flags = 0;
771 		saa.saa_wwpn = saa.saa_wwnn = 0;
772 
773 		config_found(self, &saa, scsiprint);
774 	}
775 
776 	/* Enable interrupts */
777 	ips_intren(sc);
778 
779 #if NBIO > 0
780 	/* Install ioctl handler */
781 	if (bio_register(&sc->sc_dev, ips_ioctl))
782 		printf("%s: no ioctl support\n", sc->sc_dev.dv_xname);
783 #endif
784 
785 #ifndef SMALL_KERNEL
786 	/* Add sensors */
787 	if ((sc->sc_sensors = mallocarray(sc->sc_nunits, sizeof(struct ksensor),
788 	    M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) {
789 		printf(": can't alloc sensors\n");
790 		return;
791 	}
792 	strlcpy(sc->sc_sensordev.xname, sc->sc_dev.dv_xname,
793 	    sizeof(sc->sc_sensordev.xname));
794 	for (i = 0; i < sc->sc_nunits; i++) {
795 		struct device *dev;
796 
797 		sc->sc_sensors[i].type = SENSOR_DRIVE;
798 		sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
799 		dev = scsi_get_link(sc->sc_scsibus, i, 0)->device_softc;
800 		strlcpy(sc->sc_sensors[i].desc, dev->dv_xname,
801 		    sizeof(sc->sc_sensors[i].desc));
802 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
803 	}
804 	if (sensor_task_register(sc, ips_sensors, 10) == NULL) {
805 		printf(": no sensors support\n");
806 		free(sc->sc_sensors, M_DEVBUF,
807 		    sc->sc_nunits * sizeof(struct ksensor));
808 		return;
809 	}
810 	sensordev_install(&sc->sc_sensordev);
811 #endif	/* !SMALL_KERNEL */
812 
813 	return;
814 fail5:
815 	ips_ccb_free(sc, sc->sc_ccb, sc->sc_nccbs);
816 fail4:
817 	if (sc->sc_chip->ic_id == IPS_CHIP_COPPERHEAD)
818 		ips_dmamem_free(&sc->sc_sqm);
819 fail3:
820 	ips_dmamem_free(&sc->sc_infom);
821 fail2:
822 	ips_dmamem_free(&sc->sc_cmdbm);
823 fail1:
824 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, iosize);
825 }
826 
827 void
828 ips_scsi_cmd(struct scsi_xfer *xs)
829 {
830 	struct scsi_link *link = xs->sc_link;
831 	struct ips_softc *sc = link->bus->sb_adapter_softc;
832 	struct ips_driveinfo *di = &sc->sc_info->drive;
833 	struct ips_drive *drive;
834 	struct scsi_inquiry_data inq;
835 	struct scsi_read_cap_data rcd;
836 	struct scsi_sense_data sd;
837 	struct scsi_rw *rw;
838 	struct scsi_rw_10 *rw10;
839 	struct ips_ccb *ccb = xs->io;
840 	struct ips_cmd *cmd;
841 	int target = link->target;
842 	u_int32_t blkno, blkcnt;
843 	int code;
844 
845 	DPRINTF(IPS_D_XFER, ("%s: ips_scsi_cmd: xs %p, target %d, "
846 	    "opcode 0x%02x, flags 0x%x\n", sc->sc_dev.dv_xname, xs, target,
847 	    xs->cmd.opcode, xs->flags));
848 
849 	if (target >= sc->sc_nunits || link->lun != 0) {
850 		DPRINTF(IPS_D_INFO, ("%s: ips_scsi_cmd: invalid params "
851 		    "target %d, lun %d\n", sc->sc_dev.dv_xname,
852 		    target, link->lun));
853 		xs->error = XS_DRIVER_STUFFUP;
854 		scsi_done(xs);
855 		return;
856 	}
857 
858 	drive = &di->drive[target];
859 	xs->error = XS_NOERROR;
860 
861 	/* Fake SCSI commands */
862 	switch (xs->cmd.opcode) {
863 	case READ_10:
864 	case READ_COMMAND:
865 	case WRITE_10:
866 	case WRITE_COMMAND:
867 		if (xs->cmdlen == sizeof(struct scsi_rw)) {
868 			rw = (void *)&xs->cmd;
869 			blkno = _3btol(rw->addr) &
870 			    (SRW_TOPADDR << 16 | 0xffff);
871 			blkcnt = rw->length ? rw->length : 0x100;
872 		} else {
873 			rw10 = (void *)&xs->cmd;
874 			blkno = _4btol(rw10->addr);
875 			blkcnt = _2btol(rw10->length);
876 		}
877 
878 		if (blkno >= letoh32(drive->seccnt) || blkno + blkcnt >
879 		    letoh32(drive->seccnt)) {
880 			DPRINTF(IPS_D_ERR, ("%s: ips_scsi_cmd: invalid params "
881 			    "blkno %u, blkcnt %u\n", sc->sc_dev.dv_xname,
882 			    blkno, blkcnt));
883 			xs->error = XS_DRIVER_STUFFUP;
884 			break;
885 		}
886 
887 		if (xs->flags & SCSI_DATA_IN)
888 			code = IPS_CMD_READ;
889 		else
890 			code = IPS_CMD_WRITE;
891 
892 		ccb = xs->io;
893 
894 		cmd = ccb->c_cmdbva;
895 		cmd->code = code;
896 		cmd->drive = target;
897 		cmd->lba = htole32(blkno);
898 		cmd->seccnt = htole16(blkcnt);
899 
900 		if (ips_load_xs(sc, ccb, xs)) {
901 			DPRINTF(IPS_D_ERR, ("%s: ips_scsi_cmd: ips_load_xs "
902 			    "failed\n", sc->sc_dev.dv_xname));
903 			xs->error = XS_DRIVER_STUFFUP;
904 			scsi_done(xs);
905 			return;
906 		}
907 
908 		if (cmd->sgcnt > 0)
909 			cmd->code |= IPS_CMD_SG;
910 
911 		ccb->c_done = ips_done_xs;
912 		ips_start_xs(sc, ccb, xs);
913 		return;
914 	case INQUIRY:
915 		bzero(&inq, sizeof(inq));
916 		inq.device = T_DIRECT;
917 		inq.version = SCSI_REV_2;
918 		inq.response_format = SID_SCSI2_RESPONSE;
919 		inq.additional_length = SID_SCSI2_ALEN;
920 		inq.flags |= SID_CmdQue;
921 		strlcpy(inq.vendor, "IBM", sizeof(inq.vendor));
922 		snprintf(inq.product, sizeof(inq.product),
923 		    "LD%d RAID%d", target, drive->raid);
924 		strlcpy(inq.revision, "1.0", sizeof(inq.revision));
925 		scsi_copy_internal_data(xs, &inq, sizeof(inq));
926 		break;
927 	case READ_CAPACITY:
928 		bzero(&rcd, sizeof(rcd));
929 		_lto4b(letoh32(drive->seccnt) - 1, rcd.addr);
930 		_lto4b(IPS_SECSZ, rcd.length);
931 		scsi_copy_internal_data(xs, &rcd, sizeof(rcd));
932 		break;
933 	case REQUEST_SENSE:
934 		bzero(&sd, sizeof(sd));
935 		sd.error_code = SSD_ERRCODE_CURRENT;
936 		sd.flags = SKEY_NO_SENSE;
937 		scsi_copy_internal_data(xs, &sd, sizeof(sd));
938 		break;
939 	case SYNCHRONIZE_CACHE:
940 		cmd = ccb->c_cmdbva;
941 		cmd->code = IPS_CMD_FLUSH;
942 
943 		ccb->c_done = ips_done_xs;
944 		ips_start_xs(sc, ccb, xs);
945 		return;
946 	case PREVENT_ALLOW:
947 	case START_STOP:
948 	case TEST_UNIT_READY:
949 		break;
950 	default:
951 		DPRINTF(IPS_D_INFO, ("%s: unsupported scsi command 0x%02x\n",
952 		    sc->sc_dev.dv_xname, xs->cmd.opcode));
953 		xs->error = XS_DRIVER_STUFFUP;
954 	}
955 
956 	scsi_done(xs);
957 }
958 
959 void
960 ips_scsi_pt_cmd(struct scsi_xfer *xs)
961 {
962 	struct scsi_link *link = xs->sc_link;
963 	struct ips_pt *pt = link->bus->sb_adapter_softc;
964 	struct ips_softc *sc = pt->pt_sc;
965 	struct device *dev = link->device_softc;
966 	struct ips_ccb *ccb = xs->io;
967 	struct ips_cmdb *cmdb;
968 	struct ips_cmd *cmd;
969 	struct ips_dcdb *dcdb;
970 	int chan = pt->pt_chan, target = link->target;
971 
972 	DPRINTF(IPS_D_XFER, ("%s: ips_scsi_pt_cmd: xs %p, chan %d, target %d, "
973 	    "opcode 0x%02x, flags 0x%x\n", sc->sc_dev.dv_xname, xs, chan,
974 	    target, xs->cmd.opcode, xs->flags));
975 
976 	if (pt->pt_procdev[0] == '\0' && target == pt->pt_proctgt && dev)
977 		strlcpy(pt->pt_procdev, dev->dv_xname, sizeof(pt->pt_procdev));
978 
979 	if (xs->cmdlen > IPS_MAXCDB) {
980 		DPRINTF(IPS_D_ERR, ("%s: cmdlen %d too big\n",
981 		    sc->sc_dev.dv_xname, xs->cmdlen));
982 
983 		bzero(&xs->sense, sizeof(xs->sense));
984 		xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT;
985 		xs->sense.flags = SKEY_ILLEGAL_REQUEST;
986 		xs->sense.add_sense_code = 0x20; /* illcmd, 0x24 illfield */
987 		xs->error = XS_SENSE;
988 		scsi_done(xs);
989 		return;
990 	}
991 
992 	xs->error = XS_NOERROR;
993 
994 	cmdb = ccb->c_cmdbva;
995 	cmd = &cmdb->cmd;
996 	dcdb = &cmdb->dcdb;
997 
998 	cmd->code = IPS_CMD_DCDB;
999 
1000 	dcdb->device = (chan << 4) | target;
1001 	if (xs->flags & SCSI_DATA_IN)
1002 		dcdb->attr |= IPS_DCDB_DATAIN;
1003 	if (xs->flags & SCSI_DATA_OUT)
1004 		dcdb->attr |= IPS_DCDB_DATAOUT;
1005 
1006 	/*
1007 	 * Adjust timeout value to what controller supports. Make sure our
1008 	 * timeout will be fired after controller gives up.
1009 	 */
1010 	if (xs->timeout <= 10000) {
1011 		dcdb->attr |= IPS_DCDB_TIMO10;
1012 		xs->timeout = 11000;
1013 	} else if (xs->timeout <= 60000) {
1014 		dcdb->attr |= IPS_DCDB_TIMO60;
1015 		xs->timeout = 61000;
1016 	} else {
1017 		dcdb->attr |= IPS_DCDB_TIMO20M;
1018 		xs->timeout = 20 * 60000 + 1000;
1019 	}
1020 
1021 	dcdb->attr |= IPS_DCDB_DISCON;
1022 	dcdb->datalen = htole16(xs->datalen);
1023 	dcdb->cdblen = xs->cmdlen;
1024 	dcdb->senselen = MIN(sizeof(xs->sense), sizeof(dcdb->sense));
1025 	memcpy(dcdb->cdb, &xs->cmd, xs->cmdlen);
1026 
1027 	if (ips_load_xs(sc, ccb, xs)) {
1028 		DPRINTF(IPS_D_ERR, ("%s: ips_scsi_pt_cmd: ips_load_xs "
1029 		    "failed\n", sc->sc_dev.dv_xname));
1030 		xs->error = XS_DRIVER_STUFFUP;
1031 		scsi_done(xs);
1032 		return;
1033 	}
1034 	if (cmd->sgcnt > 0)
1035 		cmd->code |= IPS_CMD_SG;
1036 	dcdb->sgaddr = cmd->sgaddr;
1037 	dcdb->sgcnt = cmd->sgcnt;
1038 	cmd->sgaddr = htole32(ccb->c_cmdbpa + offsetof(struct ips_cmdb, dcdb));
1039 	cmd->sgcnt = 0;
1040 
1041 	ccb->c_done = ips_done_pt;
1042 	ips_start_xs(sc, ccb, xs);
1043 }
1044 
1045 int
1046 ips_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
1047 {
1048 #if NBIO > 0
1049 	return (ips_ioctl(link->bus->sb_adapter_softc, cmd, addr));
1050 #else
1051 	return (ENOTTY);
1052 #endif
1053 }
1054 
1055 #if NBIO > 0
1056 int
1057 ips_ioctl(struct device *dev, u_long cmd, caddr_t addr)
1058 {
1059 	struct ips_softc *sc = (struct ips_softc *)dev;
1060 
1061 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl: cmd %lu\n",
1062 	    sc->sc_dev.dv_xname, cmd));
1063 
1064 	switch (cmd) {
1065 	case BIOCINQ:
1066 		return (ips_ioctl_inq(sc, (struct bioc_inq *)addr));
1067 	case BIOCVOL:
1068 		return (ips_ioctl_vol(sc, (struct bioc_vol *)addr));
1069 	case BIOCDISK:
1070 		return (ips_ioctl_disk(sc, (struct bioc_disk *)addr));
1071 	case BIOCSETSTATE:
1072 		return (ips_ioctl_setstate(sc, (struct bioc_setstate *)addr));
1073 	default:
1074 		return (ENOTTY);
1075 	}
1076 }
1077 
1078 int
1079 ips_ioctl_inq(struct ips_softc *sc, struct bioc_inq *bi)
1080 {
1081 	struct ips_conf *conf = &sc->sc_info->conf;
1082 	int i;
1083 
1084 	strlcpy(bi->bi_dev, sc->sc_dev.dv_xname, sizeof(bi->bi_dev));
1085 	bi->bi_novol = sc->sc_nunits;
1086 	for (i = 0, bi->bi_nodisk = 0; i < sc->sc_nunits; i++)
1087 		bi->bi_nodisk += conf->ld[i].chunkcnt;
1088 
1089 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_inq: novol %d, nodisk %d\n",
1090 	    bi->bi_dev, bi->bi_novol, bi->bi_nodisk));
1091 
1092 	return (0);
1093 }
1094 
1095 int
1096 ips_ioctl_vol(struct ips_softc *sc, struct bioc_vol *bv)
1097 {
1098 	struct ips_driveinfo *di = &sc->sc_info->drive;
1099 	struct ips_conf *conf = &sc->sc_info->conf;
1100 	struct ips_rblstat *rblstat = &sc->sc_info->rblstat;
1101 	struct ips_ld *ld;
1102 	int vid = bv->bv_volid;
1103 	struct device *dv;
1104 	int error, rebuild = 0;
1105 	u_int32_t total = 0, done = 0;
1106 
1107 	if (vid >= sc->sc_nunits)
1108 		return (EINVAL);
1109 	if ((error = ips_getconf(sc, 0)))
1110 		return (error);
1111 	ld = &conf->ld[vid];
1112 
1113 	switch (ld->state) {
1114 	case IPS_DS_ONLINE:
1115 		bv->bv_status = BIOC_SVONLINE;
1116 		break;
1117 	case IPS_DS_DEGRADED:
1118 		bv->bv_status = BIOC_SVDEGRADED;
1119 		rebuild++;
1120 		break;
1121 	case IPS_DS_OFFLINE:
1122 		bv->bv_status = BIOC_SVOFFLINE;
1123 		break;
1124 	default:
1125 		bv->bv_status = BIOC_SVINVALID;
1126 	}
1127 
1128 	if (rebuild && ips_getrblstat(sc, 0) == 0) {
1129 		total = letoh32(rblstat->ld[vid].total);
1130 		done = total - letoh32(rblstat->ld[vid].remain);
1131 		if (total && total > done) {
1132 			bv->bv_status = BIOC_SVREBUILD;
1133 			bv->bv_percent = 100 * done / total;
1134 		}
1135 	}
1136 
1137 	bv->bv_size = (uint64_t)letoh32(ld->size) * IPS_SECSZ;
1138 	bv->bv_level = di->drive[vid].raid;
1139 	bv->bv_nodisk = ld->chunkcnt;
1140 
1141 	/* Associate all unused and spare drives with first volume */
1142 	if (vid == 0) {
1143 		struct ips_dev *dev;
1144 		int chan, target;
1145 
1146 		for (chan = 0; chan < IPS_MAXCHANS; chan++)
1147 			for (target = 0; target < IPS_MAXTARGETS; target++) {
1148 				dev = &conf->dev[chan][target];
1149 				if (dev->state && !(dev->state &
1150 				    IPS_DVS_MEMBER) &&
1151 				    (dev->params & SID_TYPE) == T_DIRECT)
1152 					bv->bv_nodisk++;
1153 			}
1154 	}
1155 
1156 	dv = scsi_get_link(sc->sc_scsibus, vid, 0)->device_softc;
1157 	strlcpy(bv->bv_dev, dv->dv_xname, sizeof(bv->bv_dev));
1158 	strlcpy(bv->bv_vendor, "IBM", sizeof(bv->bv_vendor));
1159 
1160 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_vol: vid %d, state 0x%02x, "
1161 	    "total %u, done %u, size %llu, level %d, nodisk %d, dev %s\n",
1162 	    sc->sc_dev.dv_xname, vid, ld->state, total, done, bv->bv_size,
1163 	    bv->bv_level, bv->bv_nodisk, bv->bv_dev));
1164 
1165 	return (0);
1166 }
1167 
1168 int
1169 ips_ioctl_disk(struct ips_softc *sc, struct bioc_disk *bd)
1170 {
1171 	struct ips_conf *conf = &sc->sc_info->conf;
1172 	struct ips_ld *ld;
1173 	struct ips_chunk *chunk;
1174 	struct ips_dev *dev;
1175 	int vid = bd->bd_volid, did = bd->bd_diskid;
1176 	int chan, target, error, i;
1177 
1178 	if (vid >= sc->sc_nunits)
1179 		return (EINVAL);
1180 	if ((error = ips_getconf(sc, 0)))
1181 		return (error);
1182 	ld = &conf->ld[vid];
1183 
1184 	if (did >= ld->chunkcnt) {
1185 		/* Probably unused or spare drives */
1186 		if (vid != 0)
1187 			return (EINVAL);
1188 
1189 		i = ld->chunkcnt;
1190 		for (chan = 0; chan < IPS_MAXCHANS; chan++)
1191 			for (target = 0; target < IPS_MAXTARGETS; target++) {
1192 				dev = &conf->dev[chan][target];
1193 				if (dev->state && !(dev->state &
1194 				    IPS_DVS_MEMBER) &&
1195 				    (dev->params & SID_TYPE) == T_DIRECT)
1196 					if (i++ == did)
1197 						goto out;
1198 			}
1199 	} else {
1200 		chunk = &ld->chunk[did];
1201 		chan = chunk->channel;
1202 		target = chunk->target;
1203 	}
1204 
1205 out:
1206 	if (chan >= IPS_MAXCHANS || target >= IPS_MAXTARGETS)
1207 		return (EINVAL);
1208 	dev = &conf->dev[chan][target];
1209 
1210 	bd->bd_channel = chan;
1211 	bd->bd_target = target;
1212 	bd->bd_lun = 0;
1213 	bd->bd_size = (uint64_t)letoh32(dev->seccnt) * IPS_SECSZ;
1214 
1215 	bzero(bd->bd_vendor, sizeof(bd->bd_vendor));
1216 	memcpy(bd->bd_vendor, dev->devid, MIN(sizeof(bd->bd_vendor),
1217 	    sizeof(dev->devid)));
1218 	strlcpy(bd->bd_procdev, sc->sc_pt[chan].pt_procdev,
1219 	    sizeof(bd->bd_procdev));
1220 
1221 	if (dev->state & IPS_DVS_READY) {
1222 		bd->bd_status = BIOC_SDUNUSED;
1223 		if (dev->state & IPS_DVS_MEMBER)
1224 			bd->bd_status = BIOC_SDONLINE;
1225 		if (dev->state & IPS_DVS_SPARE)
1226 			bd->bd_status = BIOC_SDHOTSPARE;
1227 		if (dev->state & IPS_DVS_REBUILD)
1228 			bd->bd_status = BIOC_SDREBUILD;
1229 	} else {
1230 		bd->bd_status = BIOC_SDOFFLINE;
1231 	}
1232 
1233 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_disk: vid %d, did %d, channel %d, "
1234 	    "target %d, size %llu, state 0x%02x\n", sc->sc_dev.dv_xname,
1235 	    vid, did, bd->bd_channel, bd->bd_target, bd->bd_size, dev->state));
1236 
1237 	return (0);
1238 }
1239 
1240 int
1241 ips_ioctl_setstate(struct ips_softc *sc, struct bioc_setstate *bs)
1242 {
1243 	struct ips_conf *conf = &sc->sc_info->conf;
1244 	struct ips_dev *dev;
1245 	int state, error;
1246 
1247 	if (bs->bs_channel >= IPS_MAXCHANS || bs->bs_target >= IPS_MAXTARGETS)
1248 		return (EINVAL);
1249 	if ((error = ips_getconf(sc, 0)))
1250 		return (error);
1251 	dev = &conf->dev[bs->bs_channel][bs->bs_target];
1252 	state = dev->state;
1253 
1254 	switch (bs->bs_status) {
1255 	case BIOC_SSONLINE:
1256 		state |= IPS_DVS_READY;
1257 		break;
1258 	case BIOC_SSOFFLINE:
1259 		state &= ~IPS_DVS_READY;
1260 		break;
1261 	case BIOC_SSHOTSPARE:
1262 		state |= IPS_DVS_SPARE;
1263 		break;
1264 	case BIOC_SSREBUILD:
1265 		return (ips_rebuild(sc, bs->bs_channel, bs->bs_target,
1266 		    bs->bs_channel, bs->bs_target, 0));
1267 	default:
1268 		return (EINVAL);
1269 	}
1270 
1271 	return (ips_setstate(sc, bs->bs_channel, bs->bs_target, state, 0));
1272 }
1273 #endif	/* NBIO > 0 */
1274 
1275 #ifndef SMALL_KERNEL
1276 void
1277 ips_sensors(void *arg)
1278 {
1279 	struct ips_softc *sc = arg;
1280 	struct ips_conf *conf = &sc->sc_info->conf;
1281 	struct ips_ld *ld;
1282 	int i;
1283 
1284 	/* ips_sensors() runs from work queue thus allowed to sleep */
1285 	if (ips_getconf(sc, 0)) {
1286 		DPRINTF(IPS_D_ERR, ("%s: ips_sensors: ips_getconf failed\n",
1287 		    sc->sc_dev.dv_xname));
1288 
1289 		for (i = 0; i < sc->sc_nunits; i++) {
1290 			sc->sc_sensors[i].value = 0;
1291 			sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
1292 		}
1293 		return;
1294 	}
1295 
1296 	DPRINTF(IPS_D_INFO, ("%s: ips_sensors:", sc->sc_dev.dv_xname));
1297 	for (i = 0; i < sc->sc_nunits; i++) {
1298 		ld = &conf->ld[i];
1299 		DPRINTF(IPS_D_INFO, (" ld%d.state 0x%02x", i, ld->state));
1300 		switch (ld->state) {
1301 		case IPS_DS_ONLINE:
1302 			sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE;
1303 			sc->sc_sensors[i].status = SENSOR_S_OK;
1304 			break;
1305 		case IPS_DS_DEGRADED:
1306 			sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL;
1307 			sc->sc_sensors[i].status = SENSOR_S_WARN;
1308 			break;
1309 		case IPS_DS_OFFLINE:
1310 			sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL;
1311 			sc->sc_sensors[i].status = SENSOR_S_CRIT;
1312 			break;
1313 		default:
1314 			sc->sc_sensors[i].value = 0;
1315 			sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
1316 		}
1317 	}
1318 	DPRINTF(IPS_D_INFO, ("\n"));
1319 }
1320 #endif	/* !SMALL_KERNEL */
1321 
1322 int
1323 ips_load_xs(struct ips_softc *sc, struct ips_ccb *ccb, struct scsi_xfer *xs)
1324 {
1325 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1326 	struct ips_cmd *cmd = &cmdb->cmd;
1327 	struct ips_sg *sg = cmdb->sg;
1328 	int nsegs, i;
1329 
1330 	if (xs->datalen == 0)
1331 		return (0);
1332 
1333 	/* Map data buffer into DMA segments */
1334 	if (bus_dmamap_load(sc->sc_dmat, ccb->c_dmam, xs->data, xs->datalen,
1335 	    NULL, (xs->flags & SCSI_NOSLEEP ? BUS_DMA_NOWAIT : 0)))
1336 		return (1);
1337 	bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,ccb->c_dmam->dm_mapsize,
1338 	    xs->flags & SCSI_DATA_IN ? BUS_DMASYNC_PREREAD :
1339 	    BUS_DMASYNC_PREWRITE);
1340 
1341 	if ((nsegs = ccb->c_dmam->dm_nsegs) > IPS_MAXSGS)
1342 		return (1);
1343 
1344 	if (nsegs > 1) {
1345 		cmd->sgcnt = nsegs;
1346 		cmd->sgaddr = htole32(ccb->c_cmdbpa + offsetof(struct ips_cmdb,
1347 		    sg));
1348 
1349 		/* Fill in scatter-gather array */
1350 		for (i = 0; i < nsegs; i++) {
1351 			sg[i].addr = htole32(ccb->c_dmam->dm_segs[i].ds_addr);
1352 			sg[i].size = htole32(ccb->c_dmam->dm_segs[i].ds_len);
1353 		}
1354 	} else {
1355 		cmd->sgcnt = 0;
1356 		cmd->sgaddr = htole32(ccb->c_dmam->dm_segs[0].ds_addr);
1357 	}
1358 
1359 	return (0);
1360 }
1361 
1362 void
1363 ips_start_xs(struct ips_softc *sc, struct ips_ccb *ccb, struct scsi_xfer *xs)
1364 {
1365 	ccb->c_flags = xs->flags;
1366 	ccb->c_xfer = xs;
1367 	int ispoll = xs->flags & SCSI_POLL;
1368 
1369 	if (!ispoll) {
1370 		timeout_set(&xs->stimeout, ips_timeout, ccb);
1371 		timeout_add_msec(&xs->stimeout, xs->timeout);
1372 	}
1373 
1374 	/*
1375 	 * Return value not used here because ips_cmd() must complete
1376 	 * scsi_xfer on any failure and SCSI layer will handle possible
1377 	 * errors.
1378 	 */
1379 	ips_cmd(sc, ccb);
1380 }
1381 
1382 int
1383 ips_cmd(struct ips_softc *sc, struct ips_ccb *ccb)
1384 {
1385 	struct ips_cmd *cmd = ccb->c_cmdbva;
1386 	int s, error = 0;
1387 
1388 	DPRINTF(IPS_D_XFER, ("%s: ips_cmd: id 0x%02x, flags 0x%x, xs %p, "
1389 	    "code 0x%02x, drive %d, sgcnt %d, lba %d, sgaddr 0x%08x, "
1390 	    "seccnt %d\n", sc->sc_dev.dv_xname, ccb->c_id, ccb->c_flags,
1391 	    ccb->c_xfer, cmd->code, cmd->drive, cmd->sgcnt, letoh32(cmd->lba),
1392 	    letoh32(cmd->sgaddr), letoh16(cmd->seccnt)));
1393 
1394 	cmd->id = ccb->c_id;
1395 
1396 	/* Post command to controller and optionally wait for completion */
1397 	s = splbio();
1398 	ips_exec(sc, ccb);
1399 	ccb->c_state = IPS_CCB_QUEUED;
1400 	if (ccb->c_flags & SCSI_POLL)
1401 		error = ips_poll(sc, ccb);
1402 	splx(s);
1403 
1404 	return (error);
1405 }
1406 
1407 int
1408 ips_poll(struct ips_softc *sc, struct ips_ccb *ccb)
1409 {
1410 	int error, msecs, usecs;
1411 
1412 	splassert(IPL_BIO);
1413 
1414 	if (ccb->c_flags & SCSI_NOSLEEP) {
1415 		/* busy-wait */
1416 		DPRINTF(IPS_D_XFER, ("%s: ips_poll: busy-wait\n",
1417 		    sc->sc_dev.dv_xname));
1418 
1419 		for (usecs = 1000000; usecs > 0; usecs -= 100) {
1420 			delay(100);
1421 			ips_intr(sc);
1422 			if (ccb->c_state == IPS_CCB_DONE)
1423 				break;
1424 		}
1425 	} else {
1426 		/* sleep */
1427 		msecs = ccb->c_xfer ? ccb->c_xfer->timeout : IPS_TIMEOUT;
1428 
1429 		DPRINTF(IPS_D_XFER, ("%s: ips_poll: sleep %d ms\n",
1430 		    sc->sc_dev.dv_xname, msecs));
1431 		tsleep_nsec(ccb, PRIBIO + 1, "ipscmd", MSEC_TO_NSEC(msecs));
1432 	}
1433 	DPRINTF(IPS_D_XFER, ("%s: ips_poll: state %d\n", sc->sc_dev.dv_xname,
1434 	    ccb->c_state));
1435 
1436 	if (ccb->c_state != IPS_CCB_DONE)
1437 		/*
1438 		 * Command never completed. Fake hardware status byte
1439 		 * to indicate timeout.
1440 		 */
1441 		ccb->c_stat = IPS_STAT_TIMO;
1442 
1443 	ips_done(sc, ccb);
1444 	error = ccb->c_error;
1445 
1446 	return (error);
1447 }
1448 
1449 void
1450 ips_done(struct ips_softc *sc, struct ips_ccb *ccb)
1451 {
1452 	splassert(IPL_BIO);
1453 
1454 	DPRINTF(IPS_D_XFER, ("%s: ips_done: id 0x%02x, flags 0x%x, xs %p\n",
1455 	    sc->sc_dev.dv_xname, ccb->c_id, ccb->c_flags, ccb->c_xfer));
1456 
1457 	ccb->c_error = ips_error(sc, ccb);
1458 	ccb->c_done(sc, ccb);
1459 }
1460 
1461 void
1462 ips_done_xs(struct ips_softc *sc, struct ips_ccb *ccb)
1463 {
1464 	struct scsi_xfer *xs = ccb->c_xfer;
1465 
1466 	if (!(xs->flags & SCSI_POLL))
1467 		timeout_del(&xs->stimeout);
1468 
1469 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1470 		bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,
1471 		    ccb->c_dmam->dm_mapsize, xs->flags & SCSI_DATA_IN ?
1472 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1473 		bus_dmamap_unload(sc->sc_dmat, ccb->c_dmam);
1474 	}
1475 
1476 	xs->resid = 0;
1477 	xs->error = ips_error_xs(sc, ccb);
1478 	scsi_done(xs);
1479 }
1480 
1481 void
1482 ips_done_pt(struct ips_softc *sc, struct ips_ccb *ccb)
1483 {
1484 	struct scsi_xfer *xs = ccb->c_xfer;
1485 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1486 	struct ips_dcdb *dcdb = &cmdb->dcdb;
1487 	int done = letoh16(dcdb->datalen);
1488 
1489 	if (!(xs->flags & SCSI_POLL))
1490 		timeout_del(&xs->stimeout);
1491 
1492 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1493 		bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,
1494 		    ccb->c_dmam->dm_mapsize, xs->flags & SCSI_DATA_IN ?
1495 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1496 		bus_dmamap_unload(sc->sc_dmat, ccb->c_dmam);
1497 	}
1498 
1499 	if (done && done < xs->datalen)
1500 		xs->resid = xs->datalen - done;
1501 	else
1502 		xs->resid = 0;
1503 	xs->error = ips_error_xs(sc, ccb);
1504 	xs->status = dcdb->status;
1505 
1506 	if (xs->error == XS_SENSE)
1507 		memcpy(&xs->sense, dcdb->sense, MIN(sizeof(xs->sense),
1508 		    sizeof(dcdb->sense)));
1509 
1510 	if (xs->cmd.opcode == INQUIRY && xs->error == XS_NOERROR) {
1511 		int type = ((struct scsi_inquiry_data *)xs->data)->device &
1512 		    SID_TYPE;
1513 
1514 		if (type == T_DIRECT)
1515 			/* mask physical drives */
1516 			xs->error = XS_DRIVER_STUFFUP;
1517 	}
1518 
1519 	scsi_done(xs);
1520 }
1521 
1522 void
1523 ips_done_mgmt(struct ips_softc *sc, struct ips_ccb *ccb)
1524 {
1525 	if (ccb->c_flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
1526 		bus_dmamap_sync(sc->sc_dmat, sc->sc_infom.dm_map, 0,
1527 		    sc->sc_infom.dm_map->dm_mapsize,
1528 		    ccb->c_flags & SCSI_DATA_IN ? BUS_DMASYNC_POSTREAD :
1529 		    BUS_DMASYNC_POSTWRITE);
1530 	scsi_io_put(&sc->sc_iopool, ccb);
1531 }
1532 
1533 int
1534 ips_error(struct ips_softc *sc, struct ips_ccb *ccb)
1535 {
1536 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1537 	struct ips_cmd *cmd = &cmdb->cmd;
1538 	struct ips_dcdb *dcdb = &cmdb->dcdb;
1539 	struct scsi_xfer *xs = ccb->c_xfer;
1540 	u_int8_t gsc = IPS_STAT_GSC(ccb->c_stat);
1541 
1542 	if (gsc == IPS_STAT_OK)
1543 		return (0);
1544 
1545 	DPRINTF(IPS_D_ERR, ("%s: ips_error: stat 0x%02x, estat 0x%02x, "
1546 	    "cmd code 0x%02x, drive %d, sgcnt %d, lba %u, seccnt %d",
1547 	    sc->sc_dev.dv_xname, ccb->c_stat, ccb->c_estat, cmd->code,
1548 	    cmd->drive, cmd->sgcnt, letoh32(cmd->lba), letoh16(cmd->seccnt)));
1549 	if (cmd->code == IPS_CMD_DCDB || cmd->code == IPS_CMD_DCDB_SG) {
1550 		int i;
1551 
1552 		DPRINTF(IPS_D_ERR, (", dcdb device 0x%02x, attr 0x%02x, "
1553 		    "datalen %d, sgcnt %d, status 0x%02x",
1554 		    dcdb->device, dcdb->attr, letoh16(dcdb->datalen),
1555 		    dcdb->sgcnt, dcdb->status));
1556 
1557 		DPRINTF(IPS_D_ERR, (", cdb"));
1558 		for (i = 0; i < dcdb->cdblen; i++)
1559 			DPRINTF(IPS_D_ERR, (" %x", dcdb->cdb[i]));
1560 		if (ccb->c_estat == IPS_ESTAT_CKCOND) {
1561 			DPRINTF(IPS_D_ERR, (", sense"));
1562 			for (i = 0; i < dcdb->senselen; i++)
1563 				DPRINTF(IPS_D_ERR, (" %x", dcdb->sense[i]));
1564 		}
1565 	}
1566 	DPRINTF(IPS_D_ERR, ("\n"));
1567 
1568 	switch (gsc) {
1569 	case IPS_STAT_RECOV:
1570 		return (0);
1571 	case IPS_STAT_INVOP:
1572 	case IPS_STAT_INVCMD:
1573 	case IPS_STAT_INVPARM:
1574 		return (EINVAL);
1575 	case IPS_STAT_BUSY:
1576 		return (EBUSY);
1577 	case IPS_STAT_TIMO:
1578 		return (ETIMEDOUT);
1579 	case IPS_STAT_PDRVERR:
1580 		switch (ccb->c_estat) {
1581 		case IPS_ESTAT_SELTIMO:
1582 			return (ENODEV);
1583 		case IPS_ESTAT_OURUN:
1584 			if (xs && letoh16(dcdb->datalen) < xs->datalen)
1585 				/* underrun */
1586 				return (0);
1587 			break;
1588 		case IPS_ESTAT_RECOV:
1589 			return (0);
1590 		}
1591 		break;
1592 	}
1593 
1594 	return (EIO);
1595 }
1596 
1597 int
1598 ips_error_xs(struct ips_softc *sc, struct ips_ccb *ccb)
1599 {
1600 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1601 	struct ips_dcdb *dcdb = &cmdb->dcdb;
1602 	struct scsi_xfer *xs = ccb->c_xfer;
1603 	u_int8_t gsc = IPS_STAT_GSC(ccb->c_stat);
1604 
1605 	/* Map hardware error codes to SCSI ones */
1606 	switch (gsc) {
1607 	case IPS_STAT_OK:
1608 	case IPS_STAT_RECOV:
1609 		return (XS_NOERROR);
1610 	case IPS_STAT_BUSY:
1611 		return (XS_BUSY);
1612 	case IPS_STAT_TIMO:
1613 		return (XS_TIMEOUT);
1614 	case IPS_STAT_PDRVERR:
1615 		switch (ccb->c_estat) {
1616 		case IPS_ESTAT_SELTIMO:
1617 			return (XS_SELTIMEOUT);
1618 		case IPS_ESTAT_OURUN:
1619 			if (xs && letoh16(dcdb->datalen) < xs->datalen)
1620 				/* underrun */
1621 				return (XS_NOERROR);
1622 			break;
1623 		case IPS_ESTAT_HOSTRST:
1624 		case IPS_ESTAT_DEVRST:
1625 			return (XS_RESET);
1626 		case IPS_ESTAT_RECOV:
1627 			return (XS_NOERROR);
1628 		case IPS_ESTAT_CKCOND:
1629 			return (XS_SENSE);
1630 		}
1631 		break;
1632 	}
1633 
1634 	return (XS_DRIVER_STUFFUP);
1635 }
1636 
1637 int
1638 ips_intr(void *arg)
1639 {
1640 	struct ips_softc *sc = arg;
1641 	struct ips_ccb *ccb;
1642 	u_int32_t status;
1643 	int id;
1644 
1645 	DPRINTF(IPS_D_XFER, ("%s: ips_intr", sc->sc_dev.dv_xname));
1646 	if (!ips_isintr(sc)) {
1647 		DPRINTF(IPS_D_XFER, (": not ours\n"));
1648 		return (0);
1649 	}
1650 	DPRINTF(IPS_D_XFER, ("\n"));
1651 
1652 	/* Process completed commands */
1653 	while ((status = ips_status(sc)) != 0xffffffff) {
1654 		DPRINTF(IPS_D_XFER, ("%s: ips_intr: status 0x%08x\n",
1655 		    sc->sc_dev.dv_xname, status));
1656 
1657 		id = IPS_STAT_ID(status);
1658 		if (id >= sc->sc_nccbs) {
1659 			DPRINTF(IPS_D_ERR, ("%s: ips_intr: invalid id %d\n",
1660 			    sc->sc_dev.dv_xname, id));
1661 			continue;
1662 		}
1663 
1664 		ccb = &sc->sc_ccb[id];
1665 		if (ccb->c_state != IPS_CCB_QUEUED) {
1666 			DPRINTF(IPS_D_ERR, ("%s: ips_intr: cmd 0x%02x not "
1667 			    "queued, state %d, status 0x%08x\n",
1668 			    sc->sc_dev.dv_xname, ccb->c_id, ccb->c_state,
1669 			    status));
1670 			continue;
1671 		}
1672 
1673 		ccb->c_state = IPS_CCB_DONE;
1674 		ccb->c_stat = IPS_STAT_BASIC(status);
1675 		ccb->c_estat = IPS_STAT_EXT(status);
1676 
1677 		if (ccb->c_flags & SCSI_POLL) {
1678 			wakeup(ccb);
1679 		} else {
1680 			ips_done(sc, ccb);
1681 		}
1682 	}
1683 
1684 	return (1);
1685 }
1686 
1687 void
1688 ips_timeout(void *arg)
1689 {
1690 	struct ips_ccb *ccb = arg;
1691 	struct ips_softc *sc = ccb->c_sc;
1692 	struct scsi_xfer *xs = ccb->c_xfer;
1693 	int s;
1694 
1695 	s = splbio();
1696 	if (xs)
1697 		sc_print_addr(xs->sc_link);
1698 	else
1699 		printf("%s: ", sc->sc_dev.dv_xname);
1700 	printf("timeout\n");
1701 
1702 	/*
1703 	 * Command never completed. Fake hardware status byte
1704 	 * to indicate timeout.
1705 	 * XXX: need to remove command from controller.
1706 	 */
1707 	ccb->c_stat = IPS_STAT_TIMO;
1708 	ips_done(sc, ccb);
1709 	splx(s);
1710 }
1711 
1712 int
1713 ips_getadapterinfo(struct ips_softc *sc, int flags)
1714 {
1715 	struct ips_ccb *ccb;
1716 	struct ips_cmd *cmd;
1717 
1718 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1719 	if (ccb == NULL)
1720 		return (1);
1721 
1722 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1723 	ccb->c_done = ips_done_mgmt;
1724 
1725 	cmd = ccb->c_cmdbva;
1726 	cmd->code = IPS_CMD_GETADAPTERINFO;
1727 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1728 	    adapter));
1729 
1730 	return (ips_cmd(sc, ccb));
1731 }
1732 
1733 int
1734 ips_getdriveinfo(struct ips_softc *sc, int flags)
1735 {
1736 	struct ips_ccb *ccb;
1737 	struct ips_cmd *cmd;
1738 
1739 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1740 	if (ccb == NULL)
1741 		return (1);
1742 
1743 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1744 	ccb->c_done = ips_done_mgmt;
1745 
1746 	cmd = ccb->c_cmdbva;
1747 	cmd->code = IPS_CMD_GETDRIVEINFO;
1748 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1749 	    drive));
1750 
1751 	return (ips_cmd(sc, ccb));
1752 }
1753 
1754 int
1755 ips_getconf(struct ips_softc *sc, int flags)
1756 {
1757 	struct ips_ccb *ccb;
1758 	struct ips_cmd *cmd;
1759 
1760 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1761 	if (ccb == NULL)
1762 		return (1);
1763 
1764 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1765 	ccb->c_done = ips_done_mgmt;
1766 
1767 	cmd = ccb->c_cmdbva;
1768 	cmd->code = IPS_CMD_READCONF;
1769 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1770 	    conf));
1771 
1772 	return (ips_cmd(sc, ccb));
1773 }
1774 
1775 int
1776 ips_getpg5(struct ips_softc *sc, int flags)
1777 {
1778 	struct ips_ccb *ccb;
1779 	struct ips_cmd *cmd;
1780 
1781 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1782 	if (ccb == NULL)
1783 		return (1);
1784 
1785 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1786 	ccb->c_done = ips_done_mgmt;
1787 
1788 	cmd = ccb->c_cmdbva;
1789 	cmd->code = IPS_CMD_RWNVRAM;
1790 	cmd->drive = 5;
1791 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1792 	    pg5));
1793 
1794 	return (ips_cmd(sc, ccb));
1795 }
1796 
1797 #if NBIO > 0
1798 int
1799 ips_getrblstat(struct ips_softc *sc, int flags)
1800 {
1801 	struct ips_ccb *ccb;
1802 	struct ips_cmd *cmd;
1803 
1804 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1805 	if (ccb == NULL)
1806 		return (1);
1807 
1808 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1809 	ccb->c_done = ips_done_mgmt;
1810 
1811 	cmd = ccb->c_cmdbva;
1812 	cmd->code = IPS_CMD_REBUILDSTATUS;
1813 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1814 	    rblstat));
1815 
1816 	return (ips_cmd(sc, ccb));
1817 }
1818 
1819 int
1820 ips_setstate(struct ips_softc *sc, int chan, int target, int state, int flags)
1821 {
1822 	struct ips_ccb *ccb;
1823 	struct ips_cmd *cmd;
1824 
1825 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1826 	if (ccb == NULL)
1827 		return (1);
1828 
1829 	ccb->c_flags = SCSI_POLL | flags;
1830 	ccb->c_done = ips_done_mgmt;
1831 
1832 	cmd = ccb->c_cmdbva;
1833 	cmd->code = IPS_CMD_SETSTATE;
1834 	cmd->drive = chan;
1835 	cmd->sgcnt = target;
1836 	cmd->seg4g = state;
1837 
1838 	return (ips_cmd(sc, ccb));
1839 }
1840 
1841 int
1842 ips_rebuild(struct ips_softc *sc, int chan, int target, int nchan,
1843     int ntarget, int flags)
1844 {
1845 	struct ips_ccb *ccb;
1846 	struct ips_cmd *cmd;
1847 
1848 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1849 	if (ccb == NULL)
1850 		return (1);
1851 
1852 	ccb->c_flags = SCSI_POLL | flags;
1853 	ccb->c_done = ips_done_mgmt;
1854 
1855 	cmd = ccb->c_cmdbva;
1856 	cmd->code = IPS_CMD_REBUILD;
1857 	cmd->drive = chan;
1858 	cmd->sgcnt = target;
1859 	cmd->seccnt = htole16(ntarget << 8 | nchan);
1860 
1861 	return (ips_cmd(sc, ccb));
1862 }
1863 #endif	/* NBIO > 0 */
1864 
1865 void
1866 ips_copperhead_exec(struct ips_softc *sc, struct ips_ccb *ccb)
1867 {
1868 	u_int32_t reg;
1869 	int timeout;
1870 
1871 	for (timeout = 100; timeout-- > 0; delay(100)) {
1872 		reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_CCC);
1873 		if ((reg & IPS_REG_CCC_SEM) == 0)
1874 			break;
1875 	}
1876 	if (timeout < 0) {
1877 		printf("%s: semaphore timeout\n", sc->sc_dev.dv_xname);
1878 		return;
1879 	}
1880 
1881 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_CCSA, ccb->c_cmdbpa);
1882 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, IPS_REG_CCC,
1883 	    IPS_REG_CCC_START);
1884 }
1885 
1886 void
1887 ips_copperhead_intren(struct ips_softc *sc)
1888 {
1889 	bus_space_write_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS, IPS_REG_HIS_EN);
1890 }
1891 
1892 int
1893 ips_copperhead_isintr(struct ips_softc *sc)
1894 {
1895 	u_int8_t reg;
1896 
1897 	reg = bus_space_read_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS);
1898 	bus_space_write_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS, reg);
1899 	if (reg != 0xff && (reg & IPS_REG_HIS_SCE))
1900 		return (1);
1901 
1902 	return (0);
1903 }
1904 
1905 u_int32_t
1906 ips_copperhead_status(struct ips_softc *sc)
1907 {
1908 	u_int32_t sqhead, sqtail, status;
1909 
1910 	sqhead = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQH);
1911 	DPRINTF(IPS_D_XFER, ("%s: sqhead 0x%08x, sqtail 0x%08lx\n",
1912 	    sc->sc_dev.dv_xname, sqhead, sc->sc_sqtail));
1913 
1914 	sqtail = sc->sc_sqtail + sizeof(u_int32_t);
1915 	if (sqtail == sc->sc_sqm.dm_paddr + IPS_SQSZ)
1916 		sqtail = sc->sc_sqm.dm_paddr;
1917 	if (sqtail == sqhead)
1918 		return (0xffffffff);
1919 
1920 	sc->sc_sqtail = sqtail;
1921 	if (++sc->sc_sqidx == IPS_MAXCMDS)
1922 		sc->sc_sqidx = 0;
1923 	status = letoh32(sc->sc_sqbuf[sc->sc_sqidx]);
1924 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQT, sqtail);
1925 
1926 	return (status);
1927 }
1928 
1929 void
1930 ips_morpheus_exec(struct ips_softc *sc, struct ips_ccb *ccb)
1931 {
1932 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_IQP, ccb->c_cmdbpa);
1933 }
1934 
1935 void
1936 ips_morpheus_intren(struct ips_softc *sc)
1937 {
1938 	u_int32_t reg;
1939 
1940 	reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIM);
1941 	reg &= ~IPS_REG_OIM_DS;
1942 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIM, reg);
1943 }
1944 
1945 int
1946 ips_morpheus_isintr(struct ips_softc *sc)
1947 {
1948 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIS) &
1949 	    IPS_REG_OIS_PEND);
1950 }
1951 
1952 u_int32_t
1953 ips_morpheus_status(struct ips_softc *sc)
1954 {
1955 	u_int32_t reg;
1956 
1957 	reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OQP);
1958 	DPRINTF(IPS_D_XFER, ("%s: status 0x%08x\n", sc->sc_dev.dv_xname, reg));
1959 
1960 	return (reg);
1961 }
1962 
1963 struct ips_ccb *
1964 ips_ccb_alloc(struct ips_softc *sc, int n)
1965 {
1966 	struct ips_ccb *ccb;
1967 	int i;
1968 
1969 	if ((ccb = mallocarray(n, sizeof(*ccb), M_DEVBUF,
1970 	    M_NOWAIT | M_ZERO)) == NULL)
1971 		return (NULL);
1972 
1973 	for (i = 0; i < n; i++) {
1974 		ccb[i].c_sc = sc;
1975 		ccb[i].c_id = i;
1976 		ccb[i].c_cmdbva = (char *)sc->sc_cmdbm.dm_vaddr +
1977 		    i * sizeof(struct ips_cmdb);
1978 		ccb[i].c_cmdbpa = sc->sc_cmdbm.dm_paddr +
1979 		    i * sizeof(struct ips_cmdb);
1980 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, IPS_MAXSGS,
1981 		    MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1982 		    &ccb[i].c_dmam))
1983 			goto fail;
1984 	}
1985 
1986 	return (ccb);
1987 fail:
1988 	for (; i > 0; i--)
1989 		bus_dmamap_destroy(sc->sc_dmat, ccb[i - 1].c_dmam);
1990 	free(ccb, M_DEVBUF, n * sizeof(*ccb));
1991 	return (NULL);
1992 }
1993 
1994 void
1995 ips_ccb_free(struct ips_softc *sc, struct ips_ccb *ccb, int n)
1996 {
1997 	int i;
1998 
1999 	for (i = 0; i < n; i++)
2000 		bus_dmamap_destroy(sc->sc_dmat, ccb[i - 1].c_dmam);
2001 	free(ccb, M_DEVBUF, n * sizeof(*ccb));
2002 }
2003 
2004 void *
2005 ips_ccb_get(void *xsc)
2006 {
2007 	struct ips_softc *sc = xsc;
2008 	struct ips_ccb *ccb;
2009 
2010 	mtx_enter(&sc->sc_ccb_mtx);
2011 	if ((ccb = SLIST_FIRST(&sc->sc_ccbq_free)) != NULL) {
2012 		SLIST_REMOVE_HEAD(&sc->sc_ccbq_free, c_link);
2013 		ccb->c_flags = 0;
2014 		ccb->c_xfer = NULL;
2015 		bzero(ccb->c_cmdbva, sizeof(struct ips_cmdb));
2016 	}
2017 	mtx_leave(&sc->sc_ccb_mtx);
2018 
2019 	return (ccb);
2020 }
2021 
2022 void
2023 ips_ccb_put(void *xsc, void *xccb)
2024 {
2025 	struct ips_softc *sc = xsc;
2026 	struct ips_ccb *ccb = xccb;
2027 
2028 	ccb->c_state = IPS_CCB_FREE;
2029 	mtx_enter(&sc->sc_ccb_mtx);
2030 	SLIST_INSERT_HEAD(&sc->sc_ccbq_free, ccb, c_link);
2031 	mtx_leave(&sc->sc_ccb_mtx);
2032 }
2033 
2034 int
2035 ips_dmamem_alloc(struct dmamem *dm, bus_dma_tag_t tag, bus_size_t size)
2036 {
2037 	int nsegs;
2038 
2039 	dm->dm_tag = tag;
2040 	dm->dm_size = size;
2041 
2042 	if (bus_dmamap_create(tag, size, 1, size, 0,
2043 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &dm->dm_map))
2044 		return (1);
2045 	if (bus_dmamem_alloc(tag, size, 0, 0, &dm->dm_seg, 1, &nsegs,
2046 	    BUS_DMA_NOWAIT))
2047 		goto fail1;
2048 	if (bus_dmamem_map(tag, &dm->dm_seg, 1, size, (caddr_t *)&dm->dm_vaddr,
2049 	    BUS_DMA_NOWAIT))
2050 		goto fail2;
2051 	if (bus_dmamap_load(tag, dm->dm_map, dm->dm_vaddr, size, NULL,
2052 	    BUS_DMA_NOWAIT))
2053 		goto fail3;
2054 
2055 	return (0);
2056 
2057 fail3:
2058 	bus_dmamem_unmap(tag, dm->dm_vaddr, size);
2059 fail2:
2060 	bus_dmamem_free(tag, &dm->dm_seg, 1);
2061 fail1:
2062 	bus_dmamap_destroy(tag, dm->dm_map);
2063 	return (1);
2064 }
2065 
2066 void
2067 ips_dmamem_free(struct dmamem *dm)
2068 {
2069 	bus_dmamap_unload(dm->dm_tag, dm->dm_map);
2070 	bus_dmamem_unmap(dm->dm_tag, dm->dm_vaddr, dm->dm_size);
2071 	bus_dmamem_free(dm->dm_tag, &dm->dm_seg, 1);
2072 	bus_dmamap_destroy(dm->dm_tag, dm->dm_map);
2073 }
2074