xref: /openbsd-src/sys/dev/pci/ips.c (revision 91f110e064cd7c194e59e019b83bb7496c1c84d4)
1 /*	$OpenBSD: ips.c,v 1.108 2011/07/17 22:46:48 matthew Exp $	*/
2 
3 /*
4  * Copyright (c) 2006, 2007, 2009 Alexander Yurchenko <grange@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * IBM (Adaptec) ServeRAID controllers driver.
21  */
22 
23 #include "bio.h"
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/buf.h>
28 #include <sys/device.h>
29 #include <sys/ioctl.h>
30 #include <sys/kernel.h>
31 #include <sys/malloc.h>
32 #include <sys/sensors.h>
33 #include <sys/timeout.h>
34 #include <sys/queue.h>
35 
36 #include <machine/bus.h>
37 
38 #include <scsi/scsi_all.h>
39 #include <scsi/scsi_disk.h>
40 #include <scsi/scsiconf.h>
41 
42 #include <dev/biovar.h>
43 
44 #include <dev/pci/pcidevs.h>
45 #include <dev/pci/pcireg.h>
46 #include <dev/pci/pcivar.h>
47 
48 /* Debug levels */
49 #define IPS_D_ERR	0x0001	/* errors */
50 #define IPS_D_INFO	0x0002	/* information */
51 #define IPS_D_XFER	0x0004	/* transfers */
52 
53 #ifdef IPS_DEBUG
54 #define DPRINTF(a, b)	do { if (ips_debug & (a)) printf b; } while (0)
55 int ips_debug = IPS_D_ERR;
56 #else
57 #define DPRINTF(a, b)
58 #endif
59 
60 #define IPS_MAXDRIVES		8
61 #define IPS_MAXCHANS		4
62 #define IPS_MAXTARGETS		16
63 #define IPS_MAXCHUNKS		16
64 #define IPS_MAXCMDS		128
65 
66 #define IPS_MAXFER		(64 * 1024)
67 #define IPS_MAXSGS		16
68 #define IPS_MAXCDB		12
69 
70 #define IPS_SECSZ		512
71 #define IPS_NVRAMPGSZ		128
72 #define IPS_SQSZ		(IPS_MAXCMDS * sizeof(u_int32_t))
73 
74 #define	IPS_TIMEOUT		60000	/* ms */
75 
76 /* Command codes */
77 #define IPS_CMD_READ		0x02
78 #define IPS_CMD_WRITE		0x03
79 #define IPS_CMD_DCDB		0x04
80 #define IPS_CMD_GETADAPTERINFO	0x05
81 #define IPS_CMD_FLUSH		0x0a
82 #define IPS_CMD_REBUILDSTATUS	0x0c
83 #define IPS_CMD_SETSTATE	0x10
84 #define IPS_CMD_REBUILD		0x16
85 #define IPS_CMD_ERRORTABLE	0x17
86 #define IPS_CMD_GETDRIVEINFO	0x19
87 #define IPS_CMD_RESETCHAN	0x1a
88 #define IPS_CMD_DOWNLOAD	0x20
89 #define IPS_CMD_RWBIOSFW	0x22
90 #define IPS_CMD_READCONF	0x38
91 #define IPS_CMD_GETSUBSYS	0x40
92 #define IPS_CMD_CONFIGSYNC	0x58
93 #define IPS_CMD_READ_SG		0x82
94 #define IPS_CMD_WRITE_SG	0x83
95 #define IPS_CMD_DCDB_SG		0x84
96 #define IPS_CMD_EDCDB		0x95
97 #define IPS_CMD_EDCDB_SG	0x96
98 #define IPS_CMD_RWNVRAMPAGE	0xbc
99 #define IPS_CMD_GETVERINFO	0xc6
100 #define IPS_CMD_FFDC		0xd7
101 #define IPS_CMD_SG		0x80
102 #define IPS_CMD_RWNVRAM		0xbc
103 
104 /* DCDB attributes */
105 #define IPS_DCDB_DATAIN		0x01	/* data input */
106 #define IPS_DCDB_DATAOUT	0x02	/* data output */
107 #define IPS_DCDB_XFER64K	0x08	/* 64K transfer */
108 #define IPS_DCDB_TIMO10		0x10	/* 10 secs timeout */
109 #define IPS_DCDB_TIMO60		0x20	/* 60 secs timeout */
110 #define IPS_DCDB_TIMO20M	0x30	/* 20 mins timeout */
111 #define IPS_DCDB_NOAUTOREQSEN	0x40	/* no auto request sense */
112 #define IPS_DCDB_DISCON		0x80	/* disconnect allowed */
113 
114 /* Register definitions */
115 #define IPS_REG_HIS		0x08	/* host interrupt status */
116 #define IPS_REG_HIS_SCE			0x01	/* status channel enqueue */
117 #define IPS_REG_HIS_EN			0x80	/* enable interrupts */
118 #define IPS_REG_CCSA		0x10	/* command channel system address */
119 #define IPS_REG_CCC		0x14	/* command channel control */
120 #define IPS_REG_CCC_SEM			0x0008	/* semaphore */
121 #define IPS_REG_CCC_START		0x101a	/* start command */
122 #define IPS_REG_SQH		0x20	/* status queue head */
123 #define IPS_REG_SQT		0x24	/* status queue tail */
124 #define IPS_REG_SQE		0x28	/* status queue end */
125 #define IPS_REG_SQS		0x2c	/* status queue start */
126 
127 #define IPS_REG_OIS		0x30	/* outbound interrupt status */
128 #define IPS_REG_OIS_PEND		0x0008	/* interrupt is pending */
129 #define IPS_REG_OIM		0x34	/* outbound interrupt mask */
130 #define IPS_REG_OIM_DS			0x0008	/* disable interrupts */
131 #define IPS_REG_IQP		0x40	/* inbound queue port */
132 #define IPS_REG_OQP		0x44	/* outbound queue port */
133 
134 /* Status word fields */
135 #define IPS_STAT_ID(x)		(((x) >> 8) & 0xff)	/* command id */
136 #define IPS_STAT_BASIC(x)	(((x) >> 16) & 0xff)	/* basic status */
137 #define IPS_STAT_EXT(x)		(((x) >> 24) & 0xff)	/* ext status */
138 #define IPS_STAT_GSC(x)		((x) & 0x0f)
139 
140 /* Basic status codes */
141 #define IPS_STAT_OK		0x00	/* success */
142 #define IPS_STAT_RECOV		0x01	/* recovered error */
143 #define IPS_STAT_INVOP		0x03	/* invalid opcode */
144 #define IPS_STAT_INVCMD		0x04	/* invalid command block */
145 #define IPS_STAT_INVPARM	0x05	/* invalid parameters block */
146 #define IPS_STAT_BUSY		0x08	/* busy */
147 #define IPS_STAT_CMPLERR	0x0c	/* completed with error */
148 #define IPS_STAT_LDERR		0x0d	/* logical drive error */
149 #define IPS_STAT_TIMO		0x0e	/* timeout */
150 #define IPS_STAT_PDRVERR	0x0f	/* physical drive error */
151 
152 /* Extended status codes */
153 #define IPS_ESTAT_SELTIMO	0xf0	/* select timeout */
154 #define IPS_ESTAT_OURUN		0xf2	/* over/underrun */
155 #define IPS_ESTAT_HOSTRST	0xf7	/* host reset */
156 #define IPS_ESTAT_DEVRST	0xf8	/* device reset */
157 #define IPS_ESTAT_RECOV		0xfc	/* recovered error */
158 #define IPS_ESTAT_CKCOND	0xff	/* check condition */
159 
160 #define IPS_IOSIZE		128	/* max space size to map */
161 
162 /* Command frame */
163 struct ips_cmd {
164 	u_int8_t	code;
165 	u_int8_t	id;
166 	u_int8_t	drive;
167 	u_int8_t	sgcnt;
168 	u_int32_t	lba;
169 	u_int32_t	sgaddr;
170 	u_int16_t	seccnt;
171 	u_int8_t	seg4g;
172 	u_int8_t	esg;
173 	u_int32_t	ccsar;
174 	u_int32_t	cccr;
175 };
176 
177 /* Direct CDB (SCSI pass-through) frame */
178 struct ips_dcdb {
179 	u_int8_t	device;
180 	u_int8_t	attr;
181 	u_int16_t	datalen;
182 	u_int32_t	sgaddr;
183 	u_int8_t	cdblen;
184 	u_int8_t	senselen;
185 	u_int8_t	sgcnt;
186 	u_int8_t	__reserved1;
187 	u_int8_t	cdb[IPS_MAXCDB];
188 	u_int8_t	sense[64];
189 	u_int8_t	status;
190 	u_int8_t	__reserved2[3];
191 };
192 
193 /* Scatter-gather array element */
194 struct ips_sg {
195 	u_int32_t	addr;
196 	u_int32_t	size;
197 };
198 
199 /* Command block */
200 struct ips_cmdb {
201 	struct ips_cmd	cmd;
202 	struct ips_dcdb	dcdb;
203 	struct ips_sg	sg[IPS_MAXSGS];
204 };
205 
206 /* Data frames */
207 struct ips_adapterinfo {
208 	u_int8_t	drivecnt;
209 	u_int8_t	miscflag;
210 	u_int8_t	sltflag;
211 	u_int8_t	bstflag;
212 	u_int8_t	pwrchgcnt;
213 	u_int8_t	wrongaddrcnt;
214 	u_int8_t	unidentcnt;
215 	u_int8_t	nvramdevchgcnt;
216 	u_int8_t	firmware[8];
217 	u_int8_t	bios[8];
218 	u_int32_t	drivesize[IPS_MAXDRIVES];
219 	u_int8_t	cmdcnt;
220 	u_int8_t	maxphysdevs;
221 	u_int16_t	flashrepgmcnt;
222 	u_int8_t	defunctdiskcnt;
223 	u_int8_t	rebuildflag;
224 	u_int8_t	offdrivecnt;
225 	u_int8_t	critdrivecnt;
226 	u_int16_t	confupdcnt;
227 	u_int8_t	blkflag;
228 	u_int8_t	__reserved;
229 	u_int16_t	deaddisk[IPS_MAXCHANS][IPS_MAXTARGETS];
230 };
231 
232 struct ips_driveinfo {
233 	u_int8_t	drivecnt;
234 	u_int8_t	__reserved[3];
235 	struct ips_drive {
236 		u_int8_t	id;
237 		u_int8_t	__reserved;
238 		u_int8_t	raid;
239 		u_int8_t	state;
240 #define IPS_DS_FREE	0x00
241 #define IPS_DS_OFFLINE	0x02
242 #define IPS_DS_ONLINE	0x03
243 #define IPS_DS_DEGRADED	0x04
244 #define IPS_DS_SYS	0x06
245 #define IPS_DS_CRS	0x24
246 
247 		u_int32_t	seccnt;
248 	}		drive[IPS_MAXDRIVES];
249 };
250 
251 struct ips_conf {
252 	u_int8_t	ldcnt;
253 	u_int8_t	day;
254 	u_int8_t	month;
255 	u_int8_t	year;
256 	u_int8_t	initid[4];
257 	u_int8_t	hostid[12];
258 	u_int8_t	time[8];
259 	u_int32_t	useropt;
260 	u_int16_t	userfield;
261 	u_int8_t	rebuildrate;
262 	u_int8_t	__reserved1;
263 
264 	struct ips_hw {
265 		u_int8_t	board[8];
266 		u_int8_t	cpu[8];
267 		u_int8_t	nchantype;
268 		u_int8_t	nhostinttype;
269 		u_int8_t	compression;
270 		u_int8_t	nvramtype;
271 		u_int32_t	nvramsize;
272 	}		hw;
273 
274 	struct ips_ld {
275 		u_int16_t	userfield;
276 		u_int8_t	state;
277 		u_int8_t	raidcacheparam;
278 		u_int8_t	chunkcnt;
279 		u_int8_t	stripesize;
280 		u_int8_t	params;
281 		u_int8_t	__reserved;
282 		u_int32_t	size;
283 
284 		struct ips_chunk {
285 			u_int8_t	channel;
286 			u_int8_t	target;
287 			u_int16_t	__reserved;
288 			u_int32_t	startsec;
289 			u_int32_t	seccnt;
290 		}		chunk[IPS_MAXCHUNKS];
291 	}		ld[IPS_MAXDRIVES];
292 
293 	struct ips_dev {
294 		u_int8_t	initiator;
295 		u_int8_t	params;
296 		u_int8_t	miscflag;
297 		u_int8_t	state;
298 #define IPS_DVS_STANDBY	0x01
299 #define IPS_DVS_REBUILD	0x02
300 #define IPS_DVS_SPARE	0x04
301 #define IPS_DVS_MEMBER	0x08
302 #define IPS_DVS_ONLINE	0x80
303 #define IPS_DVS_READY	(IPS_DVS_STANDBY | IPS_DVS_ONLINE)
304 
305 		u_int32_t	seccnt;
306 		u_int8_t	devid[28];
307 	}		dev[IPS_MAXCHANS][IPS_MAXTARGETS];
308 
309 	u_int8_t	reserved[512];
310 };
311 
312 struct ips_rblstat {
313 	u_int8_t	__unknown[20];
314 	struct {
315 		u_int8_t	__unknown[4];
316 		u_int32_t	total;
317 		u_int32_t	remain;
318 	}		ld[IPS_MAXDRIVES];
319 };
320 
321 struct ips_pg5 {
322 	u_int32_t	signature;
323 	u_int8_t	__reserved1;
324 	u_int8_t	slot;
325 	u_int16_t	type;
326 	u_int8_t	bioshi[4];
327 	u_int8_t	bioslo[4];
328 	u_int16_t	__reserved2;
329 	u_int8_t	__reserved3;
330 	u_int8_t	os;
331 	u_int8_t	driverhi[4];
332 	u_int8_t	driverlo[4];
333 	u_int8_t	__reserved4[100];
334 };
335 
336 struct ips_info {
337 	struct ips_adapterinfo	adapter;
338 	struct ips_driveinfo	drive;
339 	struct ips_conf		conf;
340 	struct ips_rblstat	rblstat;
341 	struct ips_pg5		pg5;
342 };
343 
344 /* Command control block */
345 struct ips_softc;
346 struct ips_ccb {
347 	struct ips_softc *	c_sc;		/* driver softc */
348 	int			c_id;		/* command id */
349 	int			c_flags;	/* SCSI_* flags */
350 	enum {
351 		IPS_CCB_FREE,
352 		IPS_CCB_QUEUED,
353 		IPS_CCB_DONE
354 	}			c_state;	/* command state */
355 
356 	void *			c_cmdbva;	/* command block virt addr */
357 	paddr_t			c_cmdbpa;	/* command block phys addr */
358 	bus_dmamap_t		c_dmam;		/* data buffer DMA map */
359 
360 	struct scsi_xfer *	c_xfer;		/* corresponding SCSI xfer */
361 
362 	u_int8_t		c_stat;		/* status byte copy */
363 	u_int8_t		c_estat;	/* ext status byte copy */
364 	int			c_error;	/* completion error */
365 
366 	void			(*c_done)(struct ips_softc *,	/* cmd done */
367 				    struct ips_ccb *);		/* callback */
368 
369 	SLIST_ENTRY(ips_ccb)	c_link;		/* queue link */
370 };
371 
372 /* CCB queue */
373 SLIST_HEAD(ips_ccbq, ips_ccb);
374 
375 /* DMA-able chunk of memory */
376 struct dmamem {
377 	bus_dma_tag_t		dm_tag;
378 	bus_dmamap_t		dm_map;
379 	bus_dma_segment_t	dm_seg;
380 	bus_size_t		dm_size;
381 	void *			dm_vaddr;
382 #define dm_paddr dm_seg.ds_addr
383 };
384 
385 struct ips_softc {
386 	struct device		sc_dev;
387 
388 	struct scsi_link	sc_scsi_link;
389 	struct scsibus_softc *	sc_scsibus;
390 
391 	struct ips_pt {
392 		struct ips_softc *	pt_sc;
393 		int			pt_chan;
394 
395 		struct scsi_link	pt_link;
396 
397 		int			pt_proctgt;
398 		char			pt_procdev[16];
399 	}			sc_pt[IPS_MAXCHANS];
400 
401 	struct ksensordev	sc_sensordev;
402 	struct ksensor *	sc_sensors;
403 
404 	bus_space_tag_t		sc_iot;
405 	bus_space_handle_t	sc_ioh;
406 	bus_dma_tag_t		sc_dmat;
407 
408 	const struct ips_chipset *sc_chip;
409 
410 	struct ips_info *	sc_info;
411 	struct dmamem		sc_infom;
412 
413 	int			sc_nunits;
414 
415 	struct dmamem		sc_cmdbm;
416 
417 	struct ips_ccb *	sc_ccb;
418 	int			sc_nccbs;
419 	struct ips_ccbq		sc_ccbq_free;
420 	struct mutex		sc_ccb_mtx;
421 	struct scsi_iopool	sc_iopool;
422 
423 	struct dmamem		sc_sqm;
424 	paddr_t			sc_sqtail;
425 	u_int32_t *		sc_sqbuf;
426 	int			sc_sqidx;
427 };
428 
429 int	ips_match(struct device *, void *, void *);
430 void	ips_attach(struct device *, struct device *, void *);
431 
432 void	ips_scsi_cmd(struct scsi_xfer *);
433 void	ips_scsi_pt_cmd(struct scsi_xfer *);
434 int	ips_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
435 
436 #if NBIO > 0
437 int	ips_ioctl(struct device *, u_long, caddr_t);
438 int	ips_ioctl_inq(struct ips_softc *, struct bioc_inq *);
439 int	ips_ioctl_vol(struct ips_softc *, struct bioc_vol *);
440 int	ips_ioctl_disk(struct ips_softc *, struct bioc_disk *);
441 int	ips_ioctl_setstate(struct ips_softc *, struct bioc_setstate *);
442 #endif
443 
444 #ifndef SMALL_KERNEL
445 void	ips_sensors(void *);
446 #endif
447 
448 int	ips_load_xs(struct ips_softc *, struct ips_ccb *, struct scsi_xfer *);
449 void	ips_start_xs(struct ips_softc *, struct ips_ccb *, struct scsi_xfer *);
450 
451 int	ips_cmd(struct ips_softc *, struct ips_ccb *);
452 int	ips_poll(struct ips_softc *, struct ips_ccb *);
453 void	ips_done(struct ips_softc *, struct ips_ccb *);
454 void	ips_done_xs(struct ips_softc *, struct ips_ccb *);
455 void	ips_done_pt(struct ips_softc *, struct ips_ccb *);
456 void	ips_done_mgmt(struct ips_softc *, struct ips_ccb *);
457 int	ips_error(struct ips_softc *, struct ips_ccb *);
458 int	ips_error_xs(struct ips_softc *, struct ips_ccb *);
459 int	ips_intr(void *);
460 void	ips_timeout(void *);
461 
462 int	ips_getadapterinfo(struct ips_softc *, int);
463 int	ips_getdriveinfo(struct ips_softc *, int);
464 int	ips_getconf(struct ips_softc *, int);
465 int	ips_getpg5(struct ips_softc *, int);
466 
467 #if NBIO > 0
468 int	ips_getrblstat(struct ips_softc *, int);
469 int	ips_setstate(struct ips_softc *, int, int, int, int);
470 int	ips_rebuild(struct ips_softc *, int, int, int, int, int);
471 #endif
472 
473 void	ips_copperhead_exec(struct ips_softc *, struct ips_ccb *);
474 void	ips_copperhead_intren(struct ips_softc *);
475 int	ips_copperhead_isintr(struct ips_softc *);
476 u_int32_t ips_copperhead_status(struct ips_softc *);
477 
478 void	ips_morpheus_exec(struct ips_softc *, struct ips_ccb *);
479 void	ips_morpheus_intren(struct ips_softc *);
480 int	ips_morpheus_isintr(struct ips_softc *);
481 u_int32_t ips_morpheus_status(struct ips_softc *);
482 
483 struct ips_ccb *ips_ccb_alloc(struct ips_softc *, int);
484 void	ips_ccb_free(struct ips_softc *, struct ips_ccb *, int);
485 void	*ips_ccb_get(void *);
486 void	ips_ccb_put(void *, void *);
487 
488 int	ips_dmamem_alloc(struct dmamem *, bus_dma_tag_t, bus_size_t);
489 void	ips_dmamem_free(struct dmamem *);
490 
491 struct cfattach ips_ca = {
492 	sizeof(struct ips_softc),
493 	ips_match,
494 	ips_attach
495 };
496 
497 struct cfdriver ips_cd = {
498 	NULL, "ips", DV_DULL
499 };
500 
501 static struct scsi_adapter ips_scsi_adapter = {
502 	ips_scsi_cmd,
503 	scsi_minphys,
504 	NULL,
505 	NULL,
506 	ips_scsi_ioctl
507 };
508 
509 static struct scsi_adapter ips_scsi_pt_adapter = {
510 	ips_scsi_pt_cmd,
511 	scsi_minphys,
512 	NULL,
513 	NULL,
514 	NULL
515 };
516 
517 static const struct pci_matchid ips_ids[] = {
518 	{ PCI_VENDOR_IBM,	PCI_PRODUCT_IBM_SERVERAID },
519 	{ PCI_VENDOR_IBM,	PCI_PRODUCT_IBM_SERVERAID2 },
520 	{ PCI_VENDOR_ADP2,	PCI_PRODUCT_ADP2_SERVERAID }
521 };
522 
523 static const struct ips_chipset {
524 	enum {
525 		IPS_CHIP_COPPERHEAD = 0,
526 		IPS_CHIP_MORPHEUS
527 	}		ic_id;
528 
529 	int		ic_bar;
530 
531 	void		(*ic_exec)(struct ips_softc *, struct ips_ccb *);
532 	void		(*ic_intren)(struct ips_softc *);
533 	int		(*ic_isintr)(struct ips_softc *);
534 	u_int32_t	(*ic_status)(struct ips_softc *);
535 } ips_chips[] = {
536 	{
537 		IPS_CHIP_COPPERHEAD,
538 		0x14,
539 		ips_copperhead_exec,
540 		ips_copperhead_intren,
541 		ips_copperhead_isintr,
542 		ips_copperhead_status
543 	},
544 	{
545 		IPS_CHIP_MORPHEUS,
546 		0x10,
547 		ips_morpheus_exec,
548 		ips_morpheus_intren,
549 		ips_morpheus_isintr,
550 		ips_morpheus_status
551 	}
552 };
553 
554 #define ips_exec(s, c)	(s)->sc_chip->ic_exec((s), (c))
555 #define ips_intren(s)	(s)->sc_chip->ic_intren((s))
556 #define ips_isintr(s)	(s)->sc_chip->ic_isintr((s))
557 #define ips_status(s)	(s)->sc_chip->ic_status((s))
558 
559 static const char *ips_names[] = {
560 	NULL,
561 	NULL,
562 	"II",
563 	"onboard",
564 	"onboard",
565 	"3H",
566 	"3L",
567 	"4H",
568 	"4M",
569 	"4L",
570 	"4Mx",
571 	"4Lx",
572 	"5i",
573 	"5i",
574 	"6M",
575 	"6i",
576 	"7t",
577 	"7k",
578 	"7M"
579 };
580 
581 int
582 ips_match(struct device *parent, void *match, void *aux)
583 {
584 	return (pci_matchbyid(aux, ips_ids,
585 	    sizeof(ips_ids) / sizeof(ips_ids[0])));
586 }
587 
588 void
589 ips_attach(struct device *parent, struct device *self, void *aux)
590 {
591 	struct ips_softc *sc = (struct ips_softc *)self;
592 	struct pci_attach_args *pa = aux;
593 	struct ips_ccb ccb0;
594 	struct scsibus_attach_args saa;
595 	struct ips_adapterinfo *ai;
596 	struct ips_driveinfo *di;
597 	struct ips_pg5 *pg5;
598 	pcireg_t maptype;
599 	bus_size_t iosize;
600 	pci_intr_handle_t ih;
601 	const char *intrstr;
602 	int type, i;
603 
604 	sc->sc_dmat = pa->pa_dmat;
605 
606 	/* Identify chipset */
607 	if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_IBM_SERVERAID)
608 		sc->sc_chip = &ips_chips[IPS_CHIP_COPPERHEAD];
609 	else
610 		sc->sc_chip = &ips_chips[IPS_CHIP_MORPHEUS];
611 
612 	/* Map registers */
613 	maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_chip->ic_bar);
614 	if (pci_mapreg_map(pa, sc->sc_chip->ic_bar, maptype, 0, &sc->sc_iot,
615 	    &sc->sc_ioh, NULL, &iosize, IPS_IOSIZE)) {
616 		printf(": can't map regs\n");
617 		return;
618 	}
619 
620 	/* Allocate command buffer */
621 	if (ips_dmamem_alloc(&sc->sc_cmdbm, sc->sc_dmat,
622 	    IPS_MAXCMDS * sizeof(struct ips_cmdb))) {
623 		printf(": can't alloc cmd buffer\n");
624 		goto fail1;
625 	}
626 
627 	/* Allocate info buffer */
628 	if (ips_dmamem_alloc(&sc->sc_infom, sc->sc_dmat,
629 	    sizeof(struct ips_info))) {
630 		printf(": can't alloc info buffer\n");
631 		goto fail2;
632 	}
633 	sc->sc_info = sc->sc_infom.dm_vaddr;
634 	ai = &sc->sc_info->adapter;
635 	di = &sc->sc_info->drive;
636 	pg5 = &sc->sc_info->pg5;
637 
638 	/* Allocate status queue for the Copperhead chipset */
639 	if (sc->sc_chip->ic_id == IPS_CHIP_COPPERHEAD) {
640 		if (ips_dmamem_alloc(&sc->sc_sqm, sc->sc_dmat, IPS_SQSZ)) {
641 			printf(": can't alloc status queue\n");
642 			goto fail3;
643 		}
644 		sc->sc_sqtail = sc->sc_sqm.dm_paddr;
645 		sc->sc_sqbuf = sc->sc_sqm.dm_vaddr;
646 		sc->sc_sqidx = 0;
647 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQS,
648 		    sc->sc_sqm.dm_paddr);
649 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQE,
650 		    sc->sc_sqm.dm_paddr + IPS_SQSZ);
651 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQH,
652 		    sc->sc_sqm.dm_paddr + sizeof(u_int32_t));
653 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQT,
654 		    sc->sc_sqm.dm_paddr);
655 	}
656 
657 	/* Bootstrap CCB queue */
658 	sc->sc_nccbs = 1;
659 	sc->sc_ccb = &ccb0;
660 	bzero(&ccb0, sizeof(ccb0));
661 	ccb0.c_cmdbva = sc->sc_cmdbm.dm_vaddr;
662 	ccb0.c_cmdbpa = sc->sc_cmdbm.dm_paddr;
663 	SLIST_INIT(&sc->sc_ccbq_free);
664 	SLIST_INSERT_HEAD(&sc->sc_ccbq_free, &ccb0, c_link);
665 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
666 	scsi_iopool_init(&sc->sc_iopool, sc, ips_ccb_get, ips_ccb_put);
667 
668 	/* Get adapter info */
669 	if (ips_getadapterinfo(sc, SCSI_NOSLEEP)) {
670 		printf(": can't get adapter info\n");
671 		goto fail4;
672 	}
673 
674 	/* Get logical drives info */
675 	if (ips_getdriveinfo(sc, SCSI_NOSLEEP)) {
676 		printf(": can't get ld info\n");
677 		goto fail4;
678 	}
679 	sc->sc_nunits = di->drivecnt;
680 
681 	/* Get configuration */
682 	if (ips_getconf(sc, SCSI_NOSLEEP)) {
683 		printf(": can't get config\n");
684 		goto fail4;
685 	}
686 
687 	/* Read NVRAM page 5 for additional info */
688 	(void)ips_getpg5(sc, SCSI_NOSLEEP);
689 
690 	/* Initialize CCB queue */
691 	sc->sc_nccbs = ai->cmdcnt;
692 	if ((sc->sc_ccb = ips_ccb_alloc(sc, sc->sc_nccbs)) == NULL) {
693 		printf(": can't alloc ccb queue\n");
694 		goto fail4;
695 	}
696 	SLIST_INIT(&sc->sc_ccbq_free);
697 	for (i = 0; i < sc->sc_nccbs; i++)
698 		SLIST_INSERT_HEAD(&sc->sc_ccbq_free,
699 		    &sc->sc_ccb[i], c_link);
700 
701 	/* Install interrupt handler */
702 	if (pci_intr_map(pa, &ih)) {
703 		printf(": can't map interrupt\n");
704 		goto fail5;
705 	}
706 	intrstr = pci_intr_string(pa->pa_pc, ih);
707 	if (pci_intr_establish(pa->pa_pc, ih, IPL_BIO, ips_intr, sc,
708 	    sc->sc_dev.dv_xname) == NULL) {
709 		printf(": can't establish interrupt");
710 		if (intrstr != NULL)
711 			printf(" at %s", intrstr);
712 		printf("\n");
713 		goto fail5;
714 	}
715 	printf(": %s\n", intrstr);
716 
717 	/* Display adapter info */
718 	printf("%s: ServeRAID", sc->sc_dev.dv_xname);
719 	type = letoh16(pg5->type);
720 	if (type < sizeof(ips_names) / sizeof(ips_names[0]) && ips_names[type])
721 		printf(" %s", ips_names[type]);
722 	printf(", FW %c%c%c%c%c%c%c", ai->firmware[0], ai->firmware[1],
723 	    ai->firmware[2], ai->firmware[3], ai->firmware[4], ai->firmware[5],
724 	    ai->firmware[6]);
725 	printf(", BIOS %c%c%c%c%c%c%c", ai->bios[0], ai->bios[1], ai->bios[2],
726 	    ai->bios[3], ai->bios[4], ai->bios[5], ai->bios[6]);
727 	printf(", %d cmds, %d LD%s", sc->sc_nccbs, sc->sc_nunits,
728 	    (sc->sc_nunits == 1 ? "" : "s"));
729 	printf("\n");
730 
731 	/* Attach SCSI bus */
732 	if (sc->sc_nunits > 0)
733 		sc->sc_scsi_link.openings = sc->sc_nccbs / sc->sc_nunits;
734 	sc->sc_scsi_link.adapter_target = sc->sc_nunits;
735 	sc->sc_scsi_link.adapter_buswidth = sc->sc_nunits;
736 	sc->sc_scsi_link.adapter = &ips_scsi_adapter;
737 	sc->sc_scsi_link.adapter_softc = sc;
738 	sc->sc_scsi_link.pool = &sc->sc_iopool;
739 
740 	bzero(&saa, sizeof(saa));
741 	saa.saa_sc_link = &sc->sc_scsi_link;
742 	sc->sc_scsibus = (struct scsibus_softc *)config_found(self, &saa,
743 	    scsiprint);
744 
745 	/* For each channel attach SCSI pass-through bus */
746 	bzero(&saa, sizeof(saa));
747 	for (i = 0; i < IPS_MAXCHANS; i++) {
748 		struct ips_pt *pt;
749 		struct scsi_link *link;
750 		int target, lastarget;
751 
752 		pt = &sc->sc_pt[i];
753 		pt->pt_sc = sc;
754 		pt->pt_chan = i;
755 		pt->pt_proctgt = -1;
756 
757 		/* Check if channel has any devices besides disks */
758 		for (target = 0, lastarget = -1; target < IPS_MAXTARGETS;
759 		    target++) {
760 			struct ips_dev *idev;
761 			int type;
762 
763 			idev = &sc->sc_info->conf.dev[i][target];
764 			type = idev->params & SID_TYPE;
765 			if (idev->state && type != T_DIRECT) {
766 				lastarget = target;
767 				if (type == T_PROCESSOR ||
768 				    type == T_ENCLOSURE)
769 					/* remember enclosure address */
770 					pt->pt_proctgt = target;
771 			}
772 		}
773 		if (lastarget == -1)
774 			continue;
775 
776 		link = &pt->pt_link;
777 		link->openings = 1;
778 		link->adapter_target = IPS_MAXTARGETS;
779 		link->adapter_buswidth = lastarget + 1;
780 		link->adapter = &ips_scsi_pt_adapter;
781 		link->adapter_softc = pt;
782 		link->pool = &sc->sc_iopool;
783 
784 		saa.saa_sc_link = link;
785 		config_found(self, &saa, scsiprint);
786 	}
787 
788 	/* Enable interrupts */
789 	ips_intren(sc);
790 
791 #if NBIO > 0
792 	/* Install ioctl handler */
793 	if (bio_register(&sc->sc_dev, ips_ioctl))
794 		printf("%s: no ioctl support\n", sc->sc_dev.dv_xname);
795 #endif
796 
797 #ifndef SMALL_KERNEL
798 	/* Add sensors */
799 	if ((sc->sc_sensors = malloc(sizeof(struct ksensor) * sc->sc_nunits,
800 	    M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) {
801 		printf(": can't alloc sensors\n");
802 		return;
803 	}
804 	strlcpy(sc->sc_sensordev.xname, sc->sc_dev.dv_xname,
805 	    sizeof(sc->sc_sensordev.xname));
806 	for (i = 0; i < sc->sc_nunits; i++) {
807 		struct device *dev;
808 
809 		sc->sc_sensors[i].type = SENSOR_DRIVE;
810 		sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
811 		dev = scsi_get_link(sc->sc_scsibus, i, 0)->device_softc;
812 		strlcpy(sc->sc_sensors[i].desc, dev->dv_xname,
813 		    sizeof(sc->sc_sensors[i].desc));
814 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
815 	}
816 	if (sensor_task_register(sc, ips_sensors, 10) == NULL) {
817 		printf(": no sensors support\n");
818 		free(sc->sc_sensors, M_DEVBUF);
819 		return;
820 	}
821 	sensordev_install(&sc->sc_sensordev);
822 #endif	/* !SMALL_KERNEL */
823 
824 	return;
825 fail5:
826 	ips_ccb_free(sc, sc->sc_ccb, sc->sc_nccbs);
827 fail4:
828 	if (sc->sc_chip->ic_id == IPS_CHIP_COPPERHEAD)
829 		ips_dmamem_free(&sc->sc_sqm);
830 fail3:
831 	ips_dmamem_free(&sc->sc_infom);
832 fail2:
833 	ips_dmamem_free(&sc->sc_cmdbm);
834 fail1:
835 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, iosize);
836 }
837 
838 void
839 ips_scsi_cmd(struct scsi_xfer *xs)
840 {
841 	struct scsi_link *link = xs->sc_link;
842 	struct ips_softc *sc = link->adapter_softc;
843 	struct ips_driveinfo *di = &sc->sc_info->drive;
844 	struct ips_drive *drive;
845 	struct scsi_inquiry_data inq;
846 	struct scsi_read_cap_data rcd;
847 	struct scsi_sense_data sd;
848 	struct scsi_rw *rw;
849 	struct scsi_rw_big *rwb;
850 	struct ips_ccb *ccb = xs->io;
851 	struct ips_cmd *cmd;
852 	int target = link->target;
853 	u_int32_t blkno, blkcnt;
854 	int code;
855 
856 	DPRINTF(IPS_D_XFER, ("%s: ips_scsi_cmd: xs %p, target %d, "
857 	    "opcode 0x%02x, flags 0x%x\n", sc->sc_dev.dv_xname, xs, target,
858 	    xs->cmd->opcode, xs->flags));
859 
860 	if (target >= sc->sc_nunits || link->lun != 0) {
861 		DPRINTF(IPS_D_INFO, ("%s: ips_scsi_cmd: invalid params "
862 		    "target %d, lun %d\n", sc->sc_dev.dv_xname,
863 		    target, link->lun));
864 		xs->error = XS_DRIVER_STUFFUP;
865 		scsi_done(xs);
866 		return;
867 	}
868 
869 	drive = &di->drive[target];
870 	xs->error = XS_NOERROR;
871 
872 	/* Fake SCSI commands */
873 	switch (xs->cmd->opcode) {
874 	case READ_BIG:
875 	case READ_COMMAND:
876 	case WRITE_BIG:
877 	case WRITE_COMMAND:
878 		if (xs->cmdlen == sizeof(struct scsi_rw)) {
879 			rw = (void *)xs->cmd;
880 			blkno = _3btol(rw->addr) &
881 			    (SRW_TOPADDR << 16 | 0xffff);
882 			blkcnt = rw->length ? rw->length : 0x100;
883 		} else {
884 			rwb = (void *)xs->cmd;
885 			blkno = _4btol(rwb->addr);
886 			blkcnt = _2btol(rwb->length);
887 		}
888 
889 		if (blkno >= letoh32(drive->seccnt) || blkno + blkcnt >
890 		    letoh32(drive->seccnt)) {
891 			DPRINTF(IPS_D_ERR, ("%s: ips_scsi_cmd: invalid params "
892 			    "blkno %u, blkcnt %u\n", sc->sc_dev.dv_xname,
893 			    blkno, blkcnt));
894 			xs->error = XS_DRIVER_STUFFUP;
895 			break;
896 		}
897 
898 		if (xs->flags & SCSI_DATA_IN)
899 			code = IPS_CMD_READ;
900 		else
901 			code = IPS_CMD_WRITE;
902 
903 		ccb = xs->io;
904 
905 		cmd = ccb->c_cmdbva;
906 		cmd->code = code;
907 		cmd->drive = target;
908 		cmd->lba = htole32(blkno);
909 		cmd->seccnt = htole16(blkcnt);
910 
911 		if (ips_load_xs(sc, ccb, xs)) {
912 			DPRINTF(IPS_D_ERR, ("%s: ips_scsi_cmd: ips_load_xs "
913 			    "failed\n", sc->sc_dev.dv_xname));
914 			xs->error = XS_DRIVER_STUFFUP;
915 			scsi_done(xs);
916 			return;
917 		}
918 
919 		if (cmd->sgcnt > 0)
920 			cmd->code |= IPS_CMD_SG;
921 
922 		ccb->c_done = ips_done_xs;
923 		ips_start_xs(sc, ccb, xs);
924 		return;
925 	case INQUIRY:
926 		bzero(&inq, sizeof(inq));
927 		inq.device = T_DIRECT;
928 		inq.version = 2;
929 		inq.response_format = 2;
930 		inq.additional_length = 32;
931 		inq.flags |= SID_CmdQue;
932 		strlcpy(inq.vendor, "IBM", sizeof(inq.vendor));
933 		snprintf(inq.product, sizeof(inq.product),
934 		    "LD%d RAID%d", target, drive->raid);
935 		strlcpy(inq.revision, "1.0", sizeof(inq.revision));
936 		memcpy(xs->data, &inq, MIN(xs->datalen, sizeof(inq)));
937 		break;
938 	case READ_CAPACITY:
939 		bzero(&rcd, sizeof(rcd));
940 		_lto4b(letoh32(drive->seccnt) - 1, rcd.addr);
941 		_lto4b(IPS_SECSZ, rcd.length);
942 		memcpy(xs->data, &rcd, MIN(xs->datalen, sizeof(rcd)));
943 		break;
944 	case REQUEST_SENSE:
945 		bzero(&sd, sizeof(sd));
946 		sd.error_code = SSD_ERRCODE_CURRENT;
947 		sd.flags = SKEY_NO_SENSE;
948 		memcpy(xs->data, &sd, MIN(xs->datalen, sizeof(sd)));
949 		break;
950 	case SYNCHRONIZE_CACHE:
951 		cmd = ccb->c_cmdbva;
952 		cmd->code = IPS_CMD_FLUSH;
953 
954 		ccb->c_done = ips_done_xs;
955 		ips_start_xs(sc, ccb, xs);
956 		return;
957 	case PREVENT_ALLOW:
958 	case START_STOP:
959 	case TEST_UNIT_READY:
960 		break;
961 	default:
962 		DPRINTF(IPS_D_INFO, ("%s: unsupported scsi command 0x%02x\n",
963 		    sc->sc_dev.dv_xname, xs->cmd->opcode));
964 		xs->error = XS_DRIVER_STUFFUP;
965 	}
966 
967 	scsi_done(xs);
968 }
969 
970 void
971 ips_scsi_pt_cmd(struct scsi_xfer *xs)
972 {
973 	struct scsi_link *link = xs->sc_link;
974 	struct ips_pt *pt = link->adapter_softc;
975 	struct ips_softc *sc = pt->pt_sc;
976 	struct device *dev = link->device_softc;
977 	struct ips_ccb *ccb = xs->io;
978 	struct ips_cmdb *cmdb;
979 	struct ips_cmd *cmd;
980 	struct ips_dcdb *dcdb;
981 	int chan = pt->pt_chan, target = link->target;
982 
983 	DPRINTF(IPS_D_XFER, ("%s: ips_scsi_pt_cmd: xs %p, chan %d, target %d, "
984 	    "opcode 0x%02x, flags 0x%x\n", sc->sc_dev.dv_xname, xs, chan,
985 	    target, xs->cmd->opcode, xs->flags));
986 
987 	if (pt->pt_procdev[0] == '\0' && target == pt->pt_proctgt && dev)
988 		strlcpy(pt->pt_procdev, dev->dv_xname, sizeof(pt->pt_procdev));
989 
990 	if (xs->cmdlen > IPS_MAXCDB) {
991 		DPRINTF(IPS_D_ERR, ("%s: cmdlen %d too big\n",
992 		    sc->sc_dev.dv_xname, xs->cmdlen));
993 
994 		bzero(&xs->sense, sizeof(xs->sense));
995 		xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT;
996 		xs->sense.flags = SKEY_ILLEGAL_REQUEST;
997 		xs->sense.add_sense_code = 0x20; /* illcmd, 0x24 illfield */
998 		xs->error = XS_SENSE;
999 		scsi_done(xs);
1000 		return;
1001 	}
1002 
1003 	xs->error = XS_NOERROR;
1004 
1005 	cmdb = ccb->c_cmdbva;
1006 	cmd = &cmdb->cmd;
1007 	dcdb = &cmdb->dcdb;
1008 
1009 	cmd->code = IPS_CMD_DCDB;
1010 
1011 	dcdb->device = (chan << 4) | target;
1012 	if (xs->flags & SCSI_DATA_IN)
1013 		dcdb->attr |= IPS_DCDB_DATAIN;
1014 	if (xs->flags & SCSI_DATA_OUT)
1015 		dcdb->attr |= IPS_DCDB_DATAOUT;
1016 
1017 	/*
1018 	 * Adjust timeout value to what controller supports. Make sure our
1019 	 * timeout will be fired after controller gives up.
1020 	 */
1021 	if (xs->timeout <= 10000) {
1022 		dcdb->attr |= IPS_DCDB_TIMO10;
1023 		xs->timeout = 11000;
1024 	} else if (xs->timeout <= 60000) {
1025 		dcdb->attr |= IPS_DCDB_TIMO60;
1026 		xs->timeout = 61000;
1027 	} else {
1028 		dcdb->attr |= IPS_DCDB_TIMO20M;
1029 		xs->timeout = 20 * 60000 + 1000;
1030 	}
1031 
1032 	dcdb->attr |= IPS_DCDB_DISCON;
1033 	dcdb->datalen = htole16(xs->datalen);
1034 	dcdb->cdblen = xs->cmdlen;
1035 	dcdb->senselen = MIN(sizeof(xs->sense), sizeof(dcdb->sense));
1036 	memcpy(dcdb->cdb, xs->cmd, xs->cmdlen);
1037 
1038 	if (ips_load_xs(sc, ccb, xs)) {
1039 		DPRINTF(IPS_D_ERR, ("%s: ips_scsi_pt_cmd: ips_load_xs "
1040 		    "failed\n", sc->sc_dev.dv_xname));
1041 		xs->error = XS_DRIVER_STUFFUP;
1042 		scsi_done(xs);
1043 		return;
1044 	}
1045 	if (cmd->sgcnt > 0)
1046 		cmd->code |= IPS_CMD_SG;
1047 	dcdb->sgaddr = cmd->sgaddr;
1048 	dcdb->sgcnt = cmd->sgcnt;
1049 	cmd->sgaddr = htole32(ccb->c_cmdbpa + offsetof(struct ips_cmdb, dcdb));
1050 	cmd->sgcnt = 0;
1051 
1052 	ccb->c_done = ips_done_pt;
1053 	ips_start_xs(sc, ccb, xs);
1054 }
1055 
1056 int
1057 ips_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
1058 {
1059 #if NBIO > 0
1060 	return (ips_ioctl(link->adapter_softc, cmd, addr));
1061 #else
1062 	return (ENOTTY);
1063 #endif
1064 }
1065 
1066 #if NBIO > 0
1067 int
1068 ips_ioctl(struct device *dev, u_long cmd, caddr_t addr)
1069 {
1070 	struct ips_softc *sc = (struct ips_softc *)dev;
1071 
1072 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl: cmd %lu\n",
1073 	    sc->sc_dev.dv_xname, cmd));
1074 
1075 	switch (cmd) {
1076 	case BIOCINQ:
1077 		return (ips_ioctl_inq(sc, (struct bioc_inq *)addr));
1078 	case BIOCVOL:
1079 		return (ips_ioctl_vol(sc, (struct bioc_vol *)addr));
1080 	case BIOCDISK:
1081 		return (ips_ioctl_disk(sc, (struct bioc_disk *)addr));
1082 	case BIOCSETSTATE:
1083 		return (ips_ioctl_setstate(sc, (struct bioc_setstate *)addr));
1084 	default:
1085 		return (ENOTTY);
1086 	}
1087 }
1088 
1089 int
1090 ips_ioctl_inq(struct ips_softc *sc, struct bioc_inq *bi)
1091 {
1092 	struct ips_conf *conf = &sc->sc_info->conf;
1093 	int i;
1094 
1095 	strlcpy(bi->bi_dev, sc->sc_dev.dv_xname, sizeof(bi->bi_dev));
1096 	bi->bi_novol = sc->sc_nunits;
1097 	for (i = 0, bi->bi_nodisk = 0; i < sc->sc_nunits; i++)
1098 		bi->bi_nodisk += conf->ld[i].chunkcnt;
1099 
1100 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_inq: novol %d, nodisk %d\n",
1101 	    bi->bi_dev, bi->bi_novol, bi->bi_nodisk));
1102 
1103 	return (0);
1104 }
1105 
1106 int
1107 ips_ioctl_vol(struct ips_softc *sc, struct bioc_vol *bv)
1108 {
1109 	struct ips_driveinfo *di = &sc->sc_info->drive;
1110 	struct ips_conf *conf = &sc->sc_info->conf;
1111 	struct ips_rblstat *rblstat = &sc->sc_info->rblstat;
1112 	struct ips_ld *ld;
1113 	int vid = bv->bv_volid;
1114 	struct device *dv;
1115 	int error, rebuild = 0;
1116 	u_int32_t total = 0, done = 0;
1117 
1118 	if (vid >= sc->sc_nunits)
1119 		return (EINVAL);
1120 	if ((error = ips_getconf(sc, 0)))
1121 		return (error);
1122 	ld = &conf->ld[vid];
1123 
1124 	switch (ld->state) {
1125 	case IPS_DS_ONLINE:
1126 		bv->bv_status = BIOC_SVONLINE;
1127 		break;
1128 	case IPS_DS_DEGRADED:
1129 		bv->bv_status = BIOC_SVDEGRADED;
1130 		rebuild++;
1131 		break;
1132 	case IPS_DS_OFFLINE:
1133 		bv->bv_status = BIOC_SVOFFLINE;
1134 		break;
1135 	default:
1136 		bv->bv_status = BIOC_SVINVALID;
1137 	}
1138 
1139 	if (rebuild && ips_getrblstat(sc, 0) == 0) {
1140 		total = letoh32(rblstat->ld[vid].total);
1141 		done = total - letoh32(rblstat->ld[vid].remain);
1142 		if (total && total > done) {
1143 			bv->bv_status = BIOC_SVREBUILD;
1144 			bv->bv_percent = 100 * done / total;
1145 		}
1146 	}
1147 
1148 	bv->bv_size = (u_quad_t)letoh32(ld->size) * IPS_SECSZ;
1149 	bv->bv_level = di->drive[vid].raid;
1150 	bv->bv_nodisk = ld->chunkcnt;
1151 
1152 	/* Associate all unused and spare drives with first volume */
1153 	if (vid == 0) {
1154 		struct ips_dev *dev;
1155 		int chan, target;
1156 
1157 		for (chan = 0; chan < IPS_MAXCHANS; chan++)
1158 			for (target = 0; target < IPS_MAXTARGETS; target++) {
1159 				dev = &conf->dev[chan][target];
1160 				if (dev->state && !(dev->state &
1161 				    IPS_DVS_MEMBER) &&
1162 				    (dev->params & SID_TYPE) == T_DIRECT)
1163 					bv->bv_nodisk++;
1164 			}
1165 	}
1166 
1167 	dv = scsi_get_link(sc->sc_scsibus, vid, 0)->device_softc;
1168 	strlcpy(bv->bv_dev, dv->dv_xname, sizeof(bv->bv_dev));
1169 	strlcpy(bv->bv_vendor, "IBM", sizeof(bv->bv_vendor));
1170 
1171 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_vol: vid %d, state 0x%02x, "
1172 	    "total %u, done %u, size %llu, level %d, nodisk %d, dev %s\n",
1173 	    sc->sc_dev.dv_xname, vid, ld->state, total, done, bv->bv_size,
1174 	    bv->bv_level, bv->bv_nodisk, bv->bv_dev));
1175 
1176 	return (0);
1177 }
1178 
1179 int
1180 ips_ioctl_disk(struct ips_softc *sc, struct bioc_disk *bd)
1181 {
1182 	struct ips_conf *conf = &sc->sc_info->conf;
1183 	struct ips_ld *ld;
1184 	struct ips_chunk *chunk;
1185 	struct ips_dev *dev;
1186 	int vid = bd->bd_volid, did = bd->bd_diskid;
1187 	int chan, target, error, i;
1188 
1189 	if (vid >= sc->sc_nunits)
1190 		return (EINVAL);
1191 	if ((error = ips_getconf(sc, 0)))
1192 		return (error);
1193 	ld = &conf->ld[vid];
1194 
1195 	if (did >= ld->chunkcnt) {
1196 		/* Probably unused or spare drives */
1197 		if (vid != 0)
1198 			return (EINVAL);
1199 
1200 		i = ld->chunkcnt;
1201 		for (chan = 0; chan < IPS_MAXCHANS; chan++)
1202 			for (target = 0; target < IPS_MAXTARGETS; target++) {
1203 				dev = &conf->dev[chan][target];
1204 				if (dev->state && !(dev->state &
1205 				    IPS_DVS_MEMBER) &&
1206 				    (dev->params & SID_TYPE) == T_DIRECT)
1207 					if (i++ == did)
1208 						goto out;
1209 			}
1210 	} else {
1211 		chunk = &ld->chunk[did];
1212 		chan = chunk->channel;
1213 		target = chunk->target;
1214 	}
1215 
1216 out:
1217 	if (chan >= IPS_MAXCHANS || target >= IPS_MAXTARGETS)
1218 		return (EINVAL);
1219 	dev = &conf->dev[chan][target];
1220 
1221 	bd->bd_channel = chan;
1222 	bd->bd_target = target;
1223 	bd->bd_lun = 0;
1224 	bd->bd_size = (u_quad_t)letoh32(dev->seccnt) * IPS_SECSZ;
1225 
1226 	bzero(bd->bd_vendor, sizeof(bd->bd_vendor));
1227 	memcpy(bd->bd_vendor, dev->devid, MIN(sizeof(bd->bd_vendor),
1228 	    sizeof(dev->devid)));
1229 	strlcpy(bd->bd_procdev, sc->sc_pt[chan].pt_procdev,
1230 	    sizeof(bd->bd_procdev));
1231 
1232 	if (dev->state & IPS_DVS_READY) {
1233 		bd->bd_status = BIOC_SDUNUSED;
1234 		if (dev->state & IPS_DVS_MEMBER)
1235 			bd->bd_status = BIOC_SDONLINE;
1236 		if (dev->state & IPS_DVS_SPARE)
1237 			bd->bd_status = BIOC_SDHOTSPARE;
1238 		if (dev->state & IPS_DVS_REBUILD)
1239 			bd->bd_status = BIOC_SDREBUILD;
1240 	} else {
1241 		bd->bd_status = BIOC_SDOFFLINE;
1242 	}
1243 
1244 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_disk: vid %d, did %d, channel %d, "
1245 	    "target %d, size %llu, state 0x%02x\n", sc->sc_dev.dv_xname,
1246 	    vid, did, bd->bd_channel, bd->bd_target, bd->bd_size, dev->state));
1247 
1248 	return (0);
1249 }
1250 
1251 int
1252 ips_ioctl_setstate(struct ips_softc *sc, struct bioc_setstate *bs)
1253 {
1254 	struct ips_conf *conf = &sc->sc_info->conf;
1255 	struct ips_dev *dev;
1256 	int state, error;
1257 
1258 	if (bs->bs_channel >= IPS_MAXCHANS || bs->bs_target >= IPS_MAXTARGETS)
1259 		return (EINVAL);
1260 	if ((error = ips_getconf(sc, 0)))
1261 		return (error);
1262 	dev = &conf->dev[bs->bs_channel][bs->bs_target];
1263 	state = dev->state;
1264 
1265 	switch (bs->bs_status) {
1266 	case BIOC_SSONLINE:
1267 		state |= IPS_DVS_READY;
1268 		break;
1269 	case BIOC_SSOFFLINE:
1270 		state &= ~IPS_DVS_READY;
1271 		break;
1272 	case BIOC_SSHOTSPARE:
1273 		state |= IPS_DVS_SPARE;
1274 		break;
1275 	case BIOC_SSREBUILD:
1276 		return (ips_rebuild(sc, bs->bs_channel, bs->bs_target,
1277 		    bs->bs_channel, bs->bs_target, 0));
1278 	default:
1279 		return (EINVAL);
1280 	}
1281 
1282 	return (ips_setstate(sc, bs->bs_channel, bs->bs_target, state, 0));
1283 }
1284 #endif	/* NBIO > 0 */
1285 
1286 #ifndef SMALL_KERNEL
1287 void
1288 ips_sensors(void *arg)
1289 {
1290 	struct ips_softc *sc = arg;
1291 	struct ips_conf *conf = &sc->sc_info->conf;
1292 	struct ips_ld *ld;
1293 	int i;
1294 
1295 	/* ips_sensors() runs from work queue thus allowed to sleep */
1296 	if (ips_getconf(sc, 0)) {
1297 		DPRINTF(IPS_D_ERR, ("%s: ips_sensors: ips_getconf failed\n",
1298 		    sc->sc_dev.dv_xname));
1299 
1300 		for (i = 0; i < sc->sc_nunits; i++) {
1301 			sc->sc_sensors[i].value = 0;
1302 			sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
1303 		}
1304 		return;
1305 	}
1306 
1307 	DPRINTF(IPS_D_INFO, ("%s: ips_sensors:", sc->sc_dev.dv_xname));
1308 	for (i = 0; i < sc->sc_nunits; i++) {
1309 		ld = &conf->ld[i];
1310 		DPRINTF(IPS_D_INFO, (" ld%d.state 0x%02x", i, ld->state));
1311 		switch (ld->state) {
1312 		case IPS_DS_ONLINE:
1313 			sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE;
1314 			sc->sc_sensors[i].status = SENSOR_S_OK;
1315 			break;
1316 		case IPS_DS_DEGRADED:
1317 			sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL;
1318 			sc->sc_sensors[i].status = SENSOR_S_WARN;
1319 			break;
1320 		case IPS_DS_OFFLINE:
1321 			sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL;
1322 			sc->sc_sensors[i].status = SENSOR_S_CRIT;
1323 			break;
1324 		default:
1325 			sc->sc_sensors[i].value = 0;
1326 			sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
1327 		}
1328 	}
1329 	DPRINTF(IPS_D_INFO, ("\n"));
1330 }
1331 #endif	/* !SMALL_KERNEL */
1332 
1333 int
1334 ips_load_xs(struct ips_softc *sc, struct ips_ccb *ccb, struct scsi_xfer *xs)
1335 {
1336 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1337 	struct ips_cmd *cmd = &cmdb->cmd;
1338 	struct ips_sg *sg = cmdb->sg;
1339 	int nsegs, i;
1340 
1341 	if (xs->datalen == 0)
1342 		return (0);
1343 
1344 	/* Map data buffer into DMA segments */
1345 	if (bus_dmamap_load(sc->sc_dmat, ccb->c_dmam, xs->data, xs->datalen,
1346 	    NULL, (xs->flags & SCSI_NOSLEEP ? BUS_DMA_NOWAIT : 0)))
1347 		return (1);
1348 	bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,ccb->c_dmam->dm_mapsize,
1349 	    xs->flags & SCSI_DATA_IN ? BUS_DMASYNC_PREREAD :
1350 	    BUS_DMASYNC_PREWRITE);
1351 
1352 	if ((nsegs = ccb->c_dmam->dm_nsegs) > IPS_MAXSGS)
1353 		return (1);
1354 
1355 	if (nsegs > 1) {
1356 		cmd->sgcnt = nsegs;
1357 		cmd->sgaddr = htole32(ccb->c_cmdbpa + offsetof(struct ips_cmdb,
1358 		    sg));
1359 
1360 		/* Fill in scatter-gather array */
1361 		for (i = 0; i < nsegs; i++) {
1362 			sg[i].addr = htole32(ccb->c_dmam->dm_segs[i].ds_addr);
1363 			sg[i].size = htole32(ccb->c_dmam->dm_segs[i].ds_len);
1364 		}
1365 	} else {
1366 		cmd->sgcnt = 0;
1367 		cmd->sgaddr = htole32(ccb->c_dmam->dm_segs[0].ds_addr);
1368 	}
1369 
1370 	return (0);
1371 }
1372 
1373 void
1374 ips_start_xs(struct ips_softc *sc, struct ips_ccb *ccb, struct scsi_xfer *xs)
1375 {
1376 	ccb->c_flags = xs->flags;
1377 	ccb->c_xfer = xs;
1378 	int ispoll = xs->flags & SCSI_POLL;
1379 
1380 	if (!ispoll) {
1381 		timeout_set(&xs->stimeout, ips_timeout, ccb);
1382 		timeout_add_msec(&xs->stimeout, xs->timeout);
1383 	}
1384 
1385 	/*
1386 	 * Return value not used here because ips_cmd() must complete
1387 	 * scsi_xfer on any failure and SCSI layer will handle possible
1388 	 * errors.
1389 	 */
1390 	ips_cmd(sc, ccb);
1391 }
1392 
1393 int
1394 ips_cmd(struct ips_softc *sc, struct ips_ccb *ccb)
1395 {
1396 	struct ips_cmd *cmd = ccb->c_cmdbva;
1397 	int s, error = 0;
1398 
1399 	DPRINTF(IPS_D_XFER, ("%s: ips_cmd: id 0x%02x, flags 0x%x, xs %p, "
1400 	    "code 0x%02x, drive %d, sgcnt %d, lba %d, sgaddr 0x%08x, "
1401 	    "seccnt %d\n", sc->sc_dev.dv_xname, ccb->c_id, ccb->c_flags,
1402 	    ccb->c_xfer, cmd->code, cmd->drive, cmd->sgcnt, letoh32(cmd->lba),
1403 	    letoh32(cmd->sgaddr), letoh16(cmd->seccnt)));
1404 
1405 	cmd->id = ccb->c_id;
1406 
1407 	/* Post command to controller and optionally wait for completion */
1408 	s = splbio();
1409 	ips_exec(sc, ccb);
1410 	ccb->c_state = IPS_CCB_QUEUED;
1411 	if (ccb->c_flags & SCSI_POLL)
1412 		error = ips_poll(sc, ccb);
1413 	splx(s);
1414 
1415 	return (error);
1416 }
1417 
1418 int
1419 ips_poll(struct ips_softc *sc, struct ips_ccb *ccb)
1420 {
1421 	struct timeval tv;
1422 	int error, timo;
1423 
1424 	splassert(IPL_BIO);
1425 
1426 	if (ccb->c_flags & SCSI_NOSLEEP) {
1427 		/* busy-wait */
1428 		DPRINTF(IPS_D_XFER, ("%s: ips_poll: busy-wait\n",
1429 		    sc->sc_dev.dv_xname));
1430 
1431 		for (timo = 10000; timo > 0; timo--) {
1432 			delay(100);
1433 			ips_intr(sc);
1434 			if (ccb->c_state == IPS_CCB_DONE)
1435 				break;
1436 		}
1437 	} else {
1438 		/* sleep */
1439 		timo = ccb->c_xfer ? ccb->c_xfer->timeout : IPS_TIMEOUT;
1440 		tv.tv_sec = timo / 1000;
1441 		tv.tv_usec = (timo % 1000) * 1000;
1442 		timo = tvtohz(&tv);
1443 
1444 		DPRINTF(IPS_D_XFER, ("%s: ips_poll: sleep %d hz\n",
1445 		    sc->sc_dev.dv_xname, timo));
1446 		tsleep(ccb, PRIBIO + 1, "ipscmd", timo);
1447 	}
1448 	DPRINTF(IPS_D_XFER, ("%s: ips_poll: state %d\n", sc->sc_dev.dv_xname,
1449 	    ccb->c_state));
1450 
1451 	if (ccb->c_state != IPS_CCB_DONE)
1452 		/*
1453 		 * Command never completed. Fake hardware status byte
1454 		 * to indicate timeout.
1455 		 */
1456 		ccb->c_stat = IPS_STAT_TIMO;
1457 
1458 	ips_done(sc, ccb);
1459 	error = ccb->c_error;
1460 
1461 	return (error);
1462 }
1463 
1464 void
1465 ips_done(struct ips_softc *sc, struct ips_ccb *ccb)
1466 {
1467 	splassert(IPL_BIO);
1468 
1469 	DPRINTF(IPS_D_XFER, ("%s: ips_done: id 0x%02x, flags 0x%x, xs %p\n",
1470 	    sc->sc_dev.dv_xname, ccb->c_id, ccb->c_flags, ccb->c_xfer));
1471 
1472 	ccb->c_error = ips_error(sc, ccb);
1473 	ccb->c_done(sc, ccb);
1474 }
1475 
1476 void
1477 ips_done_xs(struct ips_softc *sc, struct ips_ccb *ccb)
1478 {
1479 	struct scsi_xfer *xs = ccb->c_xfer;
1480 
1481 	if (!(xs->flags & SCSI_POLL))
1482 		timeout_del(&xs->stimeout);
1483 
1484 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1485 		bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,
1486 		    ccb->c_dmam->dm_mapsize, xs->flags & SCSI_DATA_IN ?
1487 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1488 		bus_dmamap_unload(sc->sc_dmat, ccb->c_dmam);
1489 	}
1490 
1491 	xs->resid = 0;
1492 	xs->error = ips_error_xs(sc, ccb);
1493 	scsi_done(xs);
1494 }
1495 
1496 void
1497 ips_done_pt(struct ips_softc *sc, struct ips_ccb *ccb)
1498 {
1499 	struct scsi_xfer *xs = ccb->c_xfer;
1500 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1501 	struct ips_dcdb *dcdb = &cmdb->dcdb;
1502 	int done = letoh16(dcdb->datalen);
1503 
1504 	if (!(xs->flags & SCSI_POLL))
1505 		timeout_del(&xs->stimeout);
1506 
1507 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1508 		bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,
1509 		    ccb->c_dmam->dm_mapsize, xs->flags & SCSI_DATA_IN ?
1510 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1511 		bus_dmamap_unload(sc->sc_dmat, ccb->c_dmam);
1512 	}
1513 
1514 	if (done && done < xs->datalen)
1515 		xs->resid = xs->datalen - done;
1516 	else
1517 		xs->resid = 0;
1518 	xs->error = ips_error_xs(sc, ccb);
1519 	xs->status = dcdb->status;
1520 
1521 	if (xs->error == XS_SENSE)
1522 		memcpy(&xs->sense, dcdb->sense, MIN(sizeof(xs->sense),
1523 		    sizeof(dcdb->sense)));
1524 
1525 	if (xs->cmd->opcode == INQUIRY && xs->error == XS_NOERROR) {
1526 		int type = ((struct scsi_inquiry_data *)xs->data)->device &
1527 		    SID_TYPE;
1528 
1529 		if (type == T_DIRECT)
1530 			/* mask physical drives */
1531 			xs->error = XS_DRIVER_STUFFUP;
1532 	}
1533 
1534 	scsi_done(xs);
1535 }
1536 
1537 void
1538 ips_done_mgmt(struct ips_softc *sc, struct ips_ccb *ccb)
1539 {
1540 	if (ccb->c_flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
1541 		bus_dmamap_sync(sc->sc_dmat, sc->sc_infom.dm_map, 0,
1542 		    sc->sc_infom.dm_map->dm_mapsize,
1543 		    ccb->c_flags & SCSI_DATA_IN ? BUS_DMASYNC_POSTREAD :
1544 		    BUS_DMASYNC_POSTWRITE);
1545 	scsi_io_put(&sc->sc_iopool, ccb);
1546 }
1547 
1548 int
1549 ips_error(struct ips_softc *sc, struct ips_ccb *ccb)
1550 {
1551 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1552 	struct ips_cmd *cmd = &cmdb->cmd;
1553 	struct ips_dcdb *dcdb = &cmdb->dcdb;
1554 	struct scsi_xfer *xs = ccb->c_xfer;
1555 	u_int8_t gsc = IPS_STAT_GSC(ccb->c_stat);
1556 
1557 	if (gsc == IPS_STAT_OK)
1558 		return (0);
1559 
1560 	DPRINTF(IPS_D_ERR, ("%s: ips_error: stat 0x%02x, estat 0x%02x, "
1561 	    "cmd code 0x%02x, drive %d, sgcnt %d, lba %u, seccnt %d",
1562 	    sc->sc_dev.dv_xname, ccb->c_stat, ccb->c_estat, cmd->code,
1563 	    cmd->drive, cmd->sgcnt, letoh32(cmd->lba), letoh16(cmd->seccnt)));
1564 	if (cmd->code == IPS_CMD_DCDB || cmd->code == IPS_CMD_DCDB_SG) {
1565 		int i;
1566 
1567 		DPRINTF(IPS_D_ERR, (", dcdb device 0x%02x, attr 0x%02x, "
1568 		    "datalen %d, sgcnt %d, status 0x%02x",
1569 		    dcdb->device, dcdb->attr, letoh16(dcdb->datalen),
1570 		    dcdb->sgcnt, dcdb->status));
1571 
1572 		DPRINTF(IPS_D_ERR, (", cdb"));
1573 		for (i = 0; i < dcdb->cdblen; i++)
1574 			DPRINTF(IPS_D_ERR, (" %x", dcdb->cdb[i]));
1575 		if (ccb->c_estat == IPS_ESTAT_CKCOND) {
1576 			DPRINTF(IPS_D_ERR, (", sense"));
1577 			for (i = 0; i < dcdb->senselen; i++)
1578 				DPRINTF(IPS_D_ERR, (" %x", dcdb->sense[i]));
1579 		}
1580 	}
1581 	DPRINTF(IPS_D_ERR, ("\n"));
1582 
1583 	switch (gsc) {
1584 	case IPS_STAT_RECOV:
1585 		return (0);
1586 	case IPS_STAT_INVOP:
1587 	case IPS_STAT_INVCMD:
1588 	case IPS_STAT_INVPARM:
1589 		return (EINVAL);
1590 	case IPS_STAT_BUSY:
1591 		return (EBUSY);
1592 	case IPS_STAT_TIMO:
1593 		return (ETIMEDOUT);
1594 	case IPS_STAT_PDRVERR:
1595 		switch (ccb->c_estat) {
1596 		case IPS_ESTAT_SELTIMO:
1597 			return (ENODEV);
1598 		case IPS_ESTAT_OURUN:
1599 			if (xs && letoh16(dcdb->datalen) < xs->datalen)
1600 				/* underrun */
1601 				return (0);
1602 			break;
1603 		case IPS_ESTAT_RECOV:
1604 			return (0);
1605 		}
1606 		break;
1607 	}
1608 
1609 	return (EIO);
1610 }
1611 
1612 int
1613 ips_error_xs(struct ips_softc *sc, struct ips_ccb *ccb)
1614 {
1615 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1616 	struct ips_dcdb *dcdb = &cmdb->dcdb;
1617 	struct scsi_xfer *xs = ccb->c_xfer;
1618 	u_int8_t gsc = IPS_STAT_GSC(ccb->c_stat);
1619 
1620 	/* Map hardware error codes to SCSI ones */
1621 	switch (gsc) {
1622 	case IPS_STAT_OK:
1623 	case IPS_STAT_RECOV:
1624 		return (XS_NOERROR);
1625 	case IPS_STAT_BUSY:
1626 		return (XS_BUSY);
1627 	case IPS_STAT_TIMO:
1628 		return (XS_TIMEOUT);
1629 	case IPS_STAT_PDRVERR:
1630 		switch (ccb->c_estat) {
1631 		case IPS_ESTAT_SELTIMO:
1632 			return (XS_SELTIMEOUT);
1633 		case IPS_ESTAT_OURUN:
1634 			if (xs && letoh16(dcdb->datalen) < xs->datalen)
1635 				/* underrun */
1636 				return (XS_NOERROR);
1637 			break;
1638 		case IPS_ESTAT_HOSTRST:
1639 		case IPS_ESTAT_DEVRST:
1640 			return (XS_RESET);
1641 		case IPS_ESTAT_RECOV:
1642 			return (XS_NOERROR);
1643 		case IPS_ESTAT_CKCOND:
1644 			return (XS_SENSE);
1645 		}
1646 		break;
1647 	}
1648 
1649 	return (XS_DRIVER_STUFFUP);
1650 }
1651 
1652 int
1653 ips_intr(void *arg)
1654 {
1655 	struct ips_softc *sc = arg;
1656 	struct ips_ccb *ccb;
1657 	u_int32_t status;
1658 	int id;
1659 
1660 	DPRINTF(IPS_D_XFER, ("%s: ips_intr", sc->sc_dev.dv_xname));
1661 	if (!ips_isintr(sc)) {
1662 		DPRINTF(IPS_D_XFER, (": not ours\n"));
1663 		return (0);
1664 	}
1665 	DPRINTF(IPS_D_XFER, ("\n"));
1666 
1667 	/* Process completed commands */
1668 	while ((status = ips_status(sc)) != 0xffffffff) {
1669 		DPRINTF(IPS_D_XFER, ("%s: ips_intr: status 0x%08x\n",
1670 		    sc->sc_dev.dv_xname, status));
1671 
1672 		id = IPS_STAT_ID(status);
1673 		if (id >= sc->sc_nccbs) {
1674 			DPRINTF(IPS_D_ERR, ("%s: ips_intr: invalid id %d\n",
1675 			    sc->sc_dev.dv_xname, id));
1676 			continue;
1677 		}
1678 
1679 		ccb = &sc->sc_ccb[id];
1680 		if (ccb->c_state != IPS_CCB_QUEUED) {
1681 			DPRINTF(IPS_D_ERR, ("%s: ips_intr: cmd 0x%02x not "
1682 			    "queued, state %d, status 0x%08x\n",
1683 			    sc->sc_dev.dv_xname, ccb->c_id, ccb->c_state,
1684 			    status));
1685 			continue;
1686 		}
1687 
1688 		ccb->c_state = IPS_CCB_DONE;
1689 		ccb->c_stat = IPS_STAT_BASIC(status);
1690 		ccb->c_estat = IPS_STAT_EXT(status);
1691 
1692 		if (ccb->c_flags & SCSI_POLL) {
1693 			wakeup(ccb);
1694 		} else {
1695 			ips_done(sc, ccb);
1696 		}
1697 	}
1698 
1699 	return (1);
1700 }
1701 
1702 void
1703 ips_timeout(void *arg)
1704 {
1705 	struct ips_ccb *ccb = arg;
1706 	struct ips_softc *sc = ccb->c_sc;
1707 	struct scsi_xfer *xs = ccb->c_xfer;
1708 	int s;
1709 
1710 	s = splbio();
1711 	if (xs)
1712 		sc_print_addr(xs->sc_link);
1713 	else
1714 		printf("%s: ", sc->sc_dev.dv_xname);
1715 	printf("timeout\n");
1716 
1717 	/*
1718 	 * Command never completed. Fake hardware status byte
1719 	 * to indicate timeout.
1720 	 * XXX: need to remove command from controller.
1721 	 */
1722 	ccb->c_stat = IPS_STAT_TIMO;
1723 	ips_done(sc, ccb);
1724 	splx(s);
1725 }
1726 
1727 int
1728 ips_getadapterinfo(struct ips_softc *sc, int flags)
1729 {
1730 	struct ips_ccb *ccb;
1731 	struct ips_cmd *cmd;
1732 
1733 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1734 	if (ccb == NULL)
1735 		return (1);
1736 
1737 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1738 	ccb->c_done = ips_done_mgmt;
1739 
1740 	cmd = ccb->c_cmdbva;
1741 	cmd->code = IPS_CMD_GETADAPTERINFO;
1742 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1743 	    adapter));
1744 
1745 	return (ips_cmd(sc, ccb));
1746 }
1747 
1748 int
1749 ips_getdriveinfo(struct ips_softc *sc, int flags)
1750 {
1751 	struct ips_ccb *ccb;
1752 	struct ips_cmd *cmd;
1753 
1754 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1755 	if (ccb == NULL)
1756 		return (1);
1757 
1758 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1759 	ccb->c_done = ips_done_mgmt;
1760 
1761 	cmd = ccb->c_cmdbva;
1762 	cmd->code = IPS_CMD_GETDRIVEINFO;
1763 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1764 	    drive));
1765 
1766 	return (ips_cmd(sc, ccb));
1767 }
1768 
1769 int
1770 ips_getconf(struct ips_softc *sc, int flags)
1771 {
1772 	struct ips_ccb *ccb;
1773 	struct ips_cmd *cmd;
1774 
1775 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1776 	if (ccb == NULL)
1777 		return (1);
1778 
1779 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1780 	ccb->c_done = ips_done_mgmt;
1781 
1782 	cmd = ccb->c_cmdbva;
1783 	cmd->code = IPS_CMD_READCONF;
1784 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1785 	    conf));
1786 
1787 	return (ips_cmd(sc, ccb));
1788 }
1789 
1790 int
1791 ips_getpg5(struct ips_softc *sc, int flags)
1792 {
1793 	struct ips_ccb *ccb;
1794 	struct ips_cmd *cmd;
1795 
1796 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1797 	if (ccb == NULL)
1798 		return (1);
1799 
1800 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1801 	ccb->c_done = ips_done_mgmt;
1802 
1803 	cmd = ccb->c_cmdbva;
1804 	cmd->code = IPS_CMD_RWNVRAM;
1805 	cmd->drive = 5;
1806 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1807 	    pg5));
1808 
1809 	return (ips_cmd(sc, ccb));
1810 }
1811 
1812 #if NBIO > 0
1813 int
1814 ips_getrblstat(struct ips_softc *sc, int flags)
1815 {
1816 	struct ips_ccb *ccb;
1817 	struct ips_cmd *cmd;
1818 
1819 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1820 	if (ccb == NULL)
1821 		return (1);
1822 
1823 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1824 	ccb->c_done = ips_done_mgmt;
1825 
1826 	cmd = ccb->c_cmdbva;
1827 	cmd->code = IPS_CMD_REBUILDSTATUS;
1828 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1829 	    rblstat));
1830 
1831 	return (ips_cmd(sc, ccb));
1832 }
1833 
1834 int
1835 ips_setstate(struct ips_softc *sc, int chan, int target, int state, int flags)
1836 {
1837 	struct ips_ccb *ccb;
1838 	struct ips_cmd *cmd;
1839 
1840 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1841 	if (ccb == NULL)
1842 		return (1);
1843 
1844 	ccb->c_flags = SCSI_POLL | flags;
1845 	ccb->c_done = ips_done_mgmt;
1846 
1847 	cmd = ccb->c_cmdbva;
1848 	cmd->code = IPS_CMD_SETSTATE;
1849 	cmd->drive = chan;
1850 	cmd->sgcnt = target;
1851 	cmd->seg4g = state;
1852 
1853 	return (ips_cmd(sc, ccb));
1854 }
1855 
1856 int
1857 ips_rebuild(struct ips_softc *sc, int chan, int target, int nchan,
1858     int ntarget, int flags)
1859 {
1860 	struct ips_ccb *ccb;
1861 	struct ips_cmd *cmd;
1862 
1863 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1864 	if (ccb == NULL)
1865 		return (1);
1866 
1867 	ccb->c_flags = SCSI_POLL | flags;
1868 	ccb->c_done = ips_done_mgmt;
1869 
1870 	cmd = ccb->c_cmdbva;
1871 	cmd->code = IPS_CMD_REBUILD;
1872 	cmd->drive = chan;
1873 	cmd->sgcnt = target;
1874 	cmd->seccnt = htole16(ntarget << 8 | nchan);
1875 
1876 	return (ips_cmd(sc, ccb));
1877 }
1878 #endif	/* NBIO > 0 */
1879 
1880 void
1881 ips_copperhead_exec(struct ips_softc *sc, struct ips_ccb *ccb)
1882 {
1883 	u_int32_t reg;
1884 	int timeout;
1885 
1886 	for (timeout = 100; timeout-- > 0; delay(100)) {
1887 		reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_CCC);
1888 		if ((reg & IPS_REG_CCC_SEM) == 0)
1889 			break;
1890 	}
1891 	if (timeout < 0) {
1892 		printf("%s: semaphore timeout\n", sc->sc_dev.dv_xname);
1893 		return;
1894 	}
1895 
1896 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_CCSA, ccb->c_cmdbpa);
1897 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, IPS_REG_CCC,
1898 	    IPS_REG_CCC_START);
1899 }
1900 
1901 void
1902 ips_copperhead_intren(struct ips_softc *sc)
1903 {
1904 	bus_space_write_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS, IPS_REG_HIS_EN);
1905 }
1906 
1907 int
1908 ips_copperhead_isintr(struct ips_softc *sc)
1909 {
1910 	u_int8_t reg;
1911 
1912 	reg = bus_space_read_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS);
1913 	bus_space_write_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS, reg);
1914 	if (reg != 0xff && (reg & IPS_REG_HIS_SCE))
1915 		return (1);
1916 
1917 	return (0);
1918 }
1919 
1920 u_int32_t
1921 ips_copperhead_status(struct ips_softc *sc)
1922 {
1923 	u_int32_t sqhead, sqtail, status;
1924 
1925 	sqhead = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQH);
1926 	DPRINTF(IPS_D_XFER, ("%s: sqhead 0x%08x, sqtail 0x%08x\n",
1927 	    sc->sc_dev.dv_xname, sqhead, sc->sc_sqtail));
1928 
1929 	sqtail = sc->sc_sqtail + sizeof(u_int32_t);
1930 	if (sqtail == sc->sc_sqm.dm_paddr + IPS_SQSZ)
1931 		sqtail = sc->sc_sqm.dm_paddr;
1932 	if (sqtail == sqhead)
1933 		return (0xffffffff);
1934 
1935 	sc->sc_sqtail = sqtail;
1936 	if (++sc->sc_sqidx == IPS_MAXCMDS)
1937 		sc->sc_sqidx = 0;
1938 	status = letoh32(sc->sc_sqbuf[sc->sc_sqidx]);
1939 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQT, sqtail);
1940 
1941 	return (status);
1942 }
1943 
1944 void
1945 ips_morpheus_exec(struct ips_softc *sc, struct ips_ccb *ccb)
1946 {
1947 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_IQP, ccb->c_cmdbpa);
1948 }
1949 
1950 void
1951 ips_morpheus_intren(struct ips_softc *sc)
1952 {
1953 	u_int32_t reg;
1954 
1955 	reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIM);
1956 	reg &= ~IPS_REG_OIM_DS;
1957 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIM, reg);
1958 }
1959 
1960 int
1961 ips_morpheus_isintr(struct ips_softc *sc)
1962 {
1963 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIS) &
1964 	    IPS_REG_OIS_PEND);
1965 }
1966 
1967 u_int32_t
1968 ips_morpheus_status(struct ips_softc *sc)
1969 {
1970 	u_int32_t reg;
1971 
1972 	reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OQP);
1973 	DPRINTF(IPS_D_XFER, ("%s: status 0x%08x\n", sc->sc_dev.dv_xname, reg));
1974 
1975 	return (reg);
1976 }
1977 
1978 struct ips_ccb *
1979 ips_ccb_alloc(struct ips_softc *sc, int n)
1980 {
1981 	struct ips_ccb *ccb;
1982 	int i;
1983 
1984 	if ((ccb = malloc(n * sizeof(*ccb), M_DEVBUF,
1985 	    M_NOWAIT | M_ZERO)) == NULL)
1986 		return (NULL);
1987 
1988 	for (i = 0; i < n; i++) {
1989 		ccb[i].c_sc = sc;
1990 		ccb[i].c_id = i;
1991 		ccb[i].c_cmdbva = (char *)sc->sc_cmdbm.dm_vaddr +
1992 		    i * sizeof(struct ips_cmdb);
1993 		ccb[i].c_cmdbpa = sc->sc_cmdbm.dm_paddr +
1994 		    i * sizeof(struct ips_cmdb);
1995 		if (bus_dmamap_create(sc->sc_dmat, IPS_MAXFER, IPS_MAXSGS,
1996 		    IPS_MAXFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1997 		    &ccb[i].c_dmam))
1998 			goto fail;
1999 	}
2000 
2001 	return (ccb);
2002 fail:
2003 	for (; i > 0; i--)
2004 		bus_dmamap_destroy(sc->sc_dmat, ccb[i - 1].c_dmam);
2005 	free(ccb, M_DEVBUF);
2006 	return (NULL);
2007 }
2008 
2009 void
2010 ips_ccb_free(struct ips_softc *sc, struct ips_ccb *ccb, int n)
2011 {
2012 	int i;
2013 
2014 	for (i = 0; i < n; i++)
2015 		bus_dmamap_destroy(sc->sc_dmat, ccb[i - 1].c_dmam);
2016 	free(ccb, M_DEVBUF);
2017 }
2018 
2019 void *
2020 ips_ccb_get(void *xsc)
2021 {
2022 	struct ips_softc *sc = xsc;
2023 	struct ips_ccb *ccb;
2024 
2025 	mtx_enter(&sc->sc_ccb_mtx);
2026 	if ((ccb = SLIST_FIRST(&sc->sc_ccbq_free)) != NULL) {
2027 		SLIST_REMOVE_HEAD(&sc->sc_ccbq_free, c_link);
2028 		ccb->c_flags = 0;
2029 		ccb->c_xfer = NULL;
2030 		bzero(ccb->c_cmdbva, sizeof(struct ips_cmdb));
2031 	}
2032 	mtx_leave(&sc->sc_ccb_mtx);
2033 
2034 	return (ccb);
2035 }
2036 
2037 void
2038 ips_ccb_put(void *xsc, void *xccb)
2039 {
2040 	struct ips_softc *sc = xsc;
2041 	struct ips_ccb *ccb = xccb;
2042 
2043 	ccb->c_state = IPS_CCB_FREE;
2044 	mtx_enter(&sc->sc_ccb_mtx);
2045 	SLIST_INSERT_HEAD(&sc->sc_ccbq_free, ccb, c_link);
2046 	mtx_leave(&sc->sc_ccb_mtx);
2047 }
2048 
2049 int
2050 ips_dmamem_alloc(struct dmamem *dm, bus_dma_tag_t tag, bus_size_t size)
2051 {
2052 	int nsegs;
2053 
2054 	dm->dm_tag = tag;
2055 	dm->dm_size = size;
2056 
2057 	if (bus_dmamap_create(tag, size, 1, size, 0,
2058 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &dm->dm_map))
2059 		return (1);
2060 	if (bus_dmamem_alloc(tag, size, 0, 0, &dm->dm_seg, 1, &nsegs,
2061 	    BUS_DMA_NOWAIT))
2062 		goto fail1;
2063 	if (bus_dmamem_map(tag, &dm->dm_seg, 1, size, (caddr_t *)&dm->dm_vaddr,
2064 	    BUS_DMA_NOWAIT))
2065 		goto fail2;
2066 	if (bus_dmamap_load(tag, dm->dm_map, dm->dm_vaddr, size, NULL,
2067 	    BUS_DMA_NOWAIT))
2068 		goto fail3;
2069 
2070 	return (0);
2071 
2072 fail3:
2073 	bus_dmamem_unmap(tag, dm->dm_vaddr, size);
2074 fail2:
2075 	bus_dmamem_free(tag, &dm->dm_seg, 1);
2076 fail1:
2077 	bus_dmamap_destroy(tag, dm->dm_map);
2078 	return (1);
2079 }
2080 
2081 void
2082 ips_dmamem_free(struct dmamem *dm)
2083 {
2084 	bus_dmamap_unload(dm->dm_tag, dm->dm_map);
2085 	bus_dmamem_unmap(dm->dm_tag, dm->dm_vaddr, dm->dm_size);
2086 	bus_dmamem_free(dm->dm_tag, &dm->dm_seg, 1);
2087 	bus_dmamap_destroy(dm->dm_tag, dm->dm_map);
2088 }
2089