xref: /openbsd-src/sys/dev/pci/ips.c (revision 0b7734b3d77bb9b21afec6f4621cae6c805dbd45)
1 /*	$OpenBSD: ips.c,v 1.112 2015/09/10 18:10:34 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2006, 2007, 2009 Alexander Yurchenko <grange@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * IBM (Adaptec) ServeRAID controllers driver.
21  */
22 
23 #include "bio.h"
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/device.h>
28 #include <sys/ioctl.h>
29 #include <sys/kernel.h>
30 #include <sys/malloc.h>
31 #include <sys/sensors.h>
32 #include <sys/timeout.h>
33 #include <sys/queue.h>
34 
35 #include <machine/bus.h>
36 
37 #include <scsi/scsi_all.h>
38 #include <scsi/scsi_disk.h>
39 #include <scsi/scsiconf.h>
40 
41 #include <dev/biovar.h>
42 
43 #include <dev/pci/pcidevs.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 
47 /* Debug levels */
48 #define IPS_D_ERR	0x0001	/* errors */
49 #define IPS_D_INFO	0x0002	/* information */
50 #define IPS_D_XFER	0x0004	/* transfers */
51 
52 #ifdef IPS_DEBUG
53 #define DPRINTF(a, b)	do { if (ips_debug & (a)) printf b; } while (0)
54 int ips_debug = IPS_D_ERR;
55 #else
56 #define DPRINTF(a, b)
57 #endif
58 
59 #define IPS_MAXDRIVES		8
60 #define IPS_MAXCHANS		4
61 #define IPS_MAXTARGETS		16
62 #define IPS_MAXCHUNKS		16
63 #define IPS_MAXCMDS		128
64 
65 #define IPS_MAXFER		(64 * 1024)
66 #define IPS_MAXSGS		16
67 #define IPS_MAXCDB		12
68 
69 #define IPS_SECSZ		512
70 #define IPS_NVRAMPGSZ		128
71 #define IPS_SQSZ		(IPS_MAXCMDS * sizeof(u_int32_t))
72 
73 #define	IPS_TIMEOUT		60000	/* ms */
74 
75 /* Command codes */
76 #define IPS_CMD_READ		0x02
77 #define IPS_CMD_WRITE		0x03
78 #define IPS_CMD_DCDB		0x04
79 #define IPS_CMD_GETADAPTERINFO	0x05
80 #define IPS_CMD_FLUSH		0x0a
81 #define IPS_CMD_REBUILDSTATUS	0x0c
82 #define IPS_CMD_SETSTATE	0x10
83 #define IPS_CMD_REBUILD		0x16
84 #define IPS_CMD_ERRORTABLE	0x17
85 #define IPS_CMD_GETDRIVEINFO	0x19
86 #define IPS_CMD_RESETCHAN	0x1a
87 #define IPS_CMD_DOWNLOAD	0x20
88 #define IPS_CMD_RWBIOSFW	0x22
89 #define IPS_CMD_READCONF	0x38
90 #define IPS_CMD_GETSUBSYS	0x40
91 #define IPS_CMD_CONFIGSYNC	0x58
92 #define IPS_CMD_READ_SG		0x82
93 #define IPS_CMD_WRITE_SG	0x83
94 #define IPS_CMD_DCDB_SG		0x84
95 #define IPS_CMD_EDCDB		0x95
96 #define IPS_CMD_EDCDB_SG	0x96
97 #define IPS_CMD_RWNVRAMPAGE	0xbc
98 #define IPS_CMD_GETVERINFO	0xc6
99 #define IPS_CMD_FFDC		0xd7
100 #define IPS_CMD_SG		0x80
101 #define IPS_CMD_RWNVRAM		0xbc
102 
103 /* DCDB attributes */
104 #define IPS_DCDB_DATAIN		0x01	/* data input */
105 #define IPS_DCDB_DATAOUT	0x02	/* data output */
106 #define IPS_DCDB_XFER64K	0x08	/* 64K transfer */
107 #define IPS_DCDB_TIMO10		0x10	/* 10 secs timeout */
108 #define IPS_DCDB_TIMO60		0x20	/* 60 secs timeout */
109 #define IPS_DCDB_TIMO20M	0x30	/* 20 mins timeout */
110 #define IPS_DCDB_NOAUTOREQSEN	0x40	/* no auto request sense */
111 #define IPS_DCDB_DISCON		0x80	/* disconnect allowed */
112 
113 /* Register definitions */
114 #define IPS_REG_HIS		0x08	/* host interrupt status */
115 #define IPS_REG_HIS_SCE			0x01	/* status channel enqueue */
116 #define IPS_REG_HIS_EN			0x80	/* enable interrupts */
117 #define IPS_REG_CCSA		0x10	/* command channel system address */
118 #define IPS_REG_CCC		0x14	/* command channel control */
119 #define IPS_REG_CCC_SEM			0x0008	/* semaphore */
120 #define IPS_REG_CCC_START		0x101a	/* start command */
121 #define IPS_REG_SQH		0x20	/* status queue head */
122 #define IPS_REG_SQT		0x24	/* status queue tail */
123 #define IPS_REG_SQE		0x28	/* status queue end */
124 #define IPS_REG_SQS		0x2c	/* status queue start */
125 
126 #define IPS_REG_OIS		0x30	/* outbound interrupt status */
127 #define IPS_REG_OIS_PEND		0x0008	/* interrupt is pending */
128 #define IPS_REG_OIM		0x34	/* outbound interrupt mask */
129 #define IPS_REG_OIM_DS			0x0008	/* disable interrupts */
130 #define IPS_REG_IQP		0x40	/* inbound queue port */
131 #define IPS_REG_OQP		0x44	/* outbound queue port */
132 
133 /* Status word fields */
134 #define IPS_STAT_ID(x)		(((x) >> 8) & 0xff)	/* command id */
135 #define IPS_STAT_BASIC(x)	(((x) >> 16) & 0xff)	/* basic status */
136 #define IPS_STAT_EXT(x)		(((x) >> 24) & 0xff)	/* ext status */
137 #define IPS_STAT_GSC(x)		((x) & 0x0f)
138 
139 /* Basic status codes */
140 #define IPS_STAT_OK		0x00	/* success */
141 #define IPS_STAT_RECOV		0x01	/* recovered error */
142 #define IPS_STAT_INVOP		0x03	/* invalid opcode */
143 #define IPS_STAT_INVCMD		0x04	/* invalid command block */
144 #define IPS_STAT_INVPARM	0x05	/* invalid parameters block */
145 #define IPS_STAT_BUSY		0x08	/* busy */
146 #define IPS_STAT_CMPLERR	0x0c	/* completed with error */
147 #define IPS_STAT_LDERR		0x0d	/* logical drive error */
148 #define IPS_STAT_TIMO		0x0e	/* timeout */
149 #define IPS_STAT_PDRVERR	0x0f	/* physical drive error */
150 
151 /* Extended status codes */
152 #define IPS_ESTAT_SELTIMO	0xf0	/* select timeout */
153 #define IPS_ESTAT_OURUN		0xf2	/* over/underrun */
154 #define IPS_ESTAT_HOSTRST	0xf7	/* host reset */
155 #define IPS_ESTAT_DEVRST	0xf8	/* device reset */
156 #define IPS_ESTAT_RECOV		0xfc	/* recovered error */
157 #define IPS_ESTAT_CKCOND	0xff	/* check condition */
158 
159 #define IPS_IOSIZE		128	/* max space size to map */
160 
161 /* Command frame */
162 struct ips_cmd {
163 	u_int8_t	code;
164 	u_int8_t	id;
165 	u_int8_t	drive;
166 	u_int8_t	sgcnt;
167 	u_int32_t	lba;
168 	u_int32_t	sgaddr;
169 	u_int16_t	seccnt;
170 	u_int8_t	seg4g;
171 	u_int8_t	esg;
172 	u_int32_t	ccsar;
173 	u_int32_t	cccr;
174 };
175 
176 /* Direct CDB (SCSI pass-through) frame */
177 struct ips_dcdb {
178 	u_int8_t	device;
179 	u_int8_t	attr;
180 	u_int16_t	datalen;
181 	u_int32_t	sgaddr;
182 	u_int8_t	cdblen;
183 	u_int8_t	senselen;
184 	u_int8_t	sgcnt;
185 	u_int8_t	__reserved1;
186 	u_int8_t	cdb[IPS_MAXCDB];
187 	u_int8_t	sense[64];
188 	u_int8_t	status;
189 	u_int8_t	__reserved2[3];
190 };
191 
192 /* Scatter-gather array element */
193 struct ips_sg {
194 	u_int32_t	addr;
195 	u_int32_t	size;
196 };
197 
198 /* Command block */
199 struct ips_cmdb {
200 	struct ips_cmd	cmd;
201 	struct ips_dcdb	dcdb;
202 	struct ips_sg	sg[IPS_MAXSGS];
203 };
204 
205 /* Data frames */
206 struct ips_adapterinfo {
207 	u_int8_t	drivecnt;
208 	u_int8_t	miscflag;
209 	u_int8_t	sltflag;
210 	u_int8_t	bstflag;
211 	u_int8_t	pwrchgcnt;
212 	u_int8_t	wrongaddrcnt;
213 	u_int8_t	unidentcnt;
214 	u_int8_t	nvramdevchgcnt;
215 	u_int8_t	firmware[8];
216 	u_int8_t	bios[8];
217 	u_int32_t	drivesize[IPS_MAXDRIVES];
218 	u_int8_t	cmdcnt;
219 	u_int8_t	maxphysdevs;
220 	u_int16_t	flashrepgmcnt;
221 	u_int8_t	defunctdiskcnt;
222 	u_int8_t	rebuildflag;
223 	u_int8_t	offdrivecnt;
224 	u_int8_t	critdrivecnt;
225 	u_int16_t	confupdcnt;
226 	u_int8_t	blkflag;
227 	u_int8_t	__reserved;
228 	u_int16_t	deaddisk[IPS_MAXCHANS][IPS_MAXTARGETS];
229 };
230 
231 struct ips_driveinfo {
232 	u_int8_t	drivecnt;
233 	u_int8_t	__reserved[3];
234 	struct ips_drive {
235 		u_int8_t	id;
236 		u_int8_t	__reserved;
237 		u_int8_t	raid;
238 		u_int8_t	state;
239 #define IPS_DS_FREE	0x00
240 #define IPS_DS_OFFLINE	0x02
241 #define IPS_DS_ONLINE	0x03
242 #define IPS_DS_DEGRADED	0x04
243 #define IPS_DS_SYS	0x06
244 #define IPS_DS_CRS	0x24
245 
246 		u_int32_t	seccnt;
247 	}		drive[IPS_MAXDRIVES];
248 };
249 
250 struct ips_conf {
251 	u_int8_t	ldcnt;
252 	u_int8_t	day;
253 	u_int8_t	month;
254 	u_int8_t	year;
255 	u_int8_t	initid[4];
256 	u_int8_t	hostid[12];
257 	u_int8_t	time[8];
258 	u_int32_t	useropt;
259 	u_int16_t	userfield;
260 	u_int8_t	rebuildrate;
261 	u_int8_t	__reserved1;
262 
263 	struct ips_hw {
264 		u_int8_t	board[8];
265 		u_int8_t	cpu[8];
266 		u_int8_t	nchantype;
267 		u_int8_t	nhostinttype;
268 		u_int8_t	compression;
269 		u_int8_t	nvramtype;
270 		u_int32_t	nvramsize;
271 	}		hw;
272 
273 	struct ips_ld {
274 		u_int16_t	userfield;
275 		u_int8_t	state;
276 		u_int8_t	raidcacheparam;
277 		u_int8_t	chunkcnt;
278 		u_int8_t	stripesize;
279 		u_int8_t	params;
280 		u_int8_t	__reserved;
281 		u_int32_t	size;
282 
283 		struct ips_chunk {
284 			u_int8_t	channel;
285 			u_int8_t	target;
286 			u_int16_t	__reserved;
287 			u_int32_t	startsec;
288 			u_int32_t	seccnt;
289 		}		chunk[IPS_MAXCHUNKS];
290 	}		ld[IPS_MAXDRIVES];
291 
292 	struct ips_dev {
293 		u_int8_t	initiator;
294 		u_int8_t	params;
295 		u_int8_t	miscflag;
296 		u_int8_t	state;
297 #define IPS_DVS_STANDBY	0x01
298 #define IPS_DVS_REBUILD	0x02
299 #define IPS_DVS_SPARE	0x04
300 #define IPS_DVS_MEMBER	0x08
301 #define IPS_DVS_ONLINE	0x80
302 #define IPS_DVS_READY	(IPS_DVS_STANDBY | IPS_DVS_ONLINE)
303 
304 		u_int32_t	seccnt;
305 		u_int8_t	devid[28];
306 	}		dev[IPS_MAXCHANS][IPS_MAXTARGETS];
307 
308 	u_int8_t	reserved[512];
309 };
310 
311 struct ips_rblstat {
312 	u_int8_t	__unknown[20];
313 	struct {
314 		u_int8_t	__unknown[4];
315 		u_int32_t	total;
316 		u_int32_t	remain;
317 	}		ld[IPS_MAXDRIVES];
318 };
319 
320 struct ips_pg5 {
321 	u_int32_t	signature;
322 	u_int8_t	__reserved1;
323 	u_int8_t	slot;
324 	u_int16_t	type;
325 	u_int8_t	bioshi[4];
326 	u_int8_t	bioslo[4];
327 	u_int16_t	__reserved2;
328 	u_int8_t	__reserved3;
329 	u_int8_t	os;
330 	u_int8_t	driverhi[4];
331 	u_int8_t	driverlo[4];
332 	u_int8_t	__reserved4[100];
333 };
334 
335 struct ips_info {
336 	struct ips_adapterinfo	adapter;
337 	struct ips_driveinfo	drive;
338 	struct ips_conf		conf;
339 	struct ips_rblstat	rblstat;
340 	struct ips_pg5		pg5;
341 };
342 
343 /* Command control block */
344 struct ips_softc;
345 struct ips_ccb {
346 	struct ips_softc *	c_sc;		/* driver softc */
347 	int			c_id;		/* command id */
348 	int			c_flags;	/* SCSI_* flags */
349 	enum {
350 		IPS_CCB_FREE,
351 		IPS_CCB_QUEUED,
352 		IPS_CCB_DONE
353 	}			c_state;	/* command state */
354 
355 	void *			c_cmdbva;	/* command block virt addr */
356 	paddr_t			c_cmdbpa;	/* command block phys addr */
357 	bus_dmamap_t		c_dmam;		/* data buffer DMA map */
358 
359 	struct scsi_xfer *	c_xfer;		/* corresponding SCSI xfer */
360 
361 	u_int8_t		c_stat;		/* status byte copy */
362 	u_int8_t		c_estat;	/* ext status byte copy */
363 	int			c_error;	/* completion error */
364 
365 	void			(*c_done)(struct ips_softc *,	/* cmd done */
366 				    struct ips_ccb *);		/* callback */
367 
368 	SLIST_ENTRY(ips_ccb)	c_link;		/* queue link */
369 };
370 
371 /* CCB queue */
372 SLIST_HEAD(ips_ccbq, ips_ccb);
373 
374 /* DMA-able chunk of memory */
375 struct dmamem {
376 	bus_dma_tag_t		dm_tag;
377 	bus_dmamap_t		dm_map;
378 	bus_dma_segment_t	dm_seg;
379 	bus_size_t		dm_size;
380 	void *			dm_vaddr;
381 #define dm_paddr dm_seg.ds_addr
382 };
383 
384 struct ips_softc {
385 	struct device		sc_dev;
386 
387 	struct scsi_link	sc_scsi_link;
388 	struct scsibus_softc *	sc_scsibus;
389 
390 	struct ips_pt {
391 		struct ips_softc *	pt_sc;
392 		int			pt_chan;
393 
394 		struct scsi_link	pt_link;
395 
396 		int			pt_proctgt;
397 		char			pt_procdev[16];
398 	}			sc_pt[IPS_MAXCHANS];
399 
400 	struct ksensordev	sc_sensordev;
401 	struct ksensor *	sc_sensors;
402 
403 	bus_space_tag_t		sc_iot;
404 	bus_space_handle_t	sc_ioh;
405 	bus_dma_tag_t		sc_dmat;
406 
407 	const struct ips_chipset *sc_chip;
408 
409 	struct ips_info *	sc_info;
410 	struct dmamem		sc_infom;
411 
412 	int			sc_nunits;
413 
414 	struct dmamem		sc_cmdbm;
415 
416 	struct ips_ccb *	sc_ccb;
417 	int			sc_nccbs;
418 	struct ips_ccbq		sc_ccbq_free;
419 	struct mutex		sc_ccb_mtx;
420 	struct scsi_iopool	sc_iopool;
421 
422 	struct dmamem		sc_sqm;
423 	paddr_t			sc_sqtail;
424 	u_int32_t *		sc_sqbuf;
425 	int			sc_sqidx;
426 };
427 
428 int	ips_match(struct device *, void *, void *);
429 void	ips_attach(struct device *, struct device *, void *);
430 
431 void	ips_scsi_cmd(struct scsi_xfer *);
432 void	ips_scsi_pt_cmd(struct scsi_xfer *);
433 int	ips_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
434 
435 #if NBIO > 0
436 int	ips_ioctl(struct device *, u_long, caddr_t);
437 int	ips_ioctl_inq(struct ips_softc *, struct bioc_inq *);
438 int	ips_ioctl_vol(struct ips_softc *, struct bioc_vol *);
439 int	ips_ioctl_disk(struct ips_softc *, struct bioc_disk *);
440 int	ips_ioctl_setstate(struct ips_softc *, struct bioc_setstate *);
441 #endif
442 
443 #ifndef SMALL_KERNEL
444 void	ips_sensors(void *);
445 #endif
446 
447 int	ips_load_xs(struct ips_softc *, struct ips_ccb *, struct scsi_xfer *);
448 void	ips_start_xs(struct ips_softc *, struct ips_ccb *, struct scsi_xfer *);
449 
450 int	ips_cmd(struct ips_softc *, struct ips_ccb *);
451 int	ips_poll(struct ips_softc *, struct ips_ccb *);
452 void	ips_done(struct ips_softc *, struct ips_ccb *);
453 void	ips_done_xs(struct ips_softc *, struct ips_ccb *);
454 void	ips_done_pt(struct ips_softc *, struct ips_ccb *);
455 void	ips_done_mgmt(struct ips_softc *, struct ips_ccb *);
456 int	ips_error(struct ips_softc *, struct ips_ccb *);
457 int	ips_error_xs(struct ips_softc *, struct ips_ccb *);
458 int	ips_intr(void *);
459 void	ips_timeout(void *);
460 
461 int	ips_getadapterinfo(struct ips_softc *, int);
462 int	ips_getdriveinfo(struct ips_softc *, int);
463 int	ips_getconf(struct ips_softc *, int);
464 int	ips_getpg5(struct ips_softc *, int);
465 
466 #if NBIO > 0
467 int	ips_getrblstat(struct ips_softc *, int);
468 int	ips_setstate(struct ips_softc *, int, int, int, int);
469 int	ips_rebuild(struct ips_softc *, int, int, int, int, int);
470 #endif
471 
472 void	ips_copperhead_exec(struct ips_softc *, struct ips_ccb *);
473 void	ips_copperhead_intren(struct ips_softc *);
474 int	ips_copperhead_isintr(struct ips_softc *);
475 u_int32_t ips_copperhead_status(struct ips_softc *);
476 
477 void	ips_morpheus_exec(struct ips_softc *, struct ips_ccb *);
478 void	ips_morpheus_intren(struct ips_softc *);
479 int	ips_morpheus_isintr(struct ips_softc *);
480 u_int32_t ips_morpheus_status(struct ips_softc *);
481 
482 struct ips_ccb *ips_ccb_alloc(struct ips_softc *, int);
483 void	ips_ccb_free(struct ips_softc *, struct ips_ccb *, int);
484 void	*ips_ccb_get(void *);
485 void	ips_ccb_put(void *, void *);
486 
487 int	ips_dmamem_alloc(struct dmamem *, bus_dma_tag_t, bus_size_t);
488 void	ips_dmamem_free(struct dmamem *);
489 
490 struct cfattach ips_ca = {
491 	sizeof(struct ips_softc),
492 	ips_match,
493 	ips_attach
494 };
495 
496 struct cfdriver ips_cd = {
497 	NULL, "ips", DV_DULL
498 };
499 
500 static struct scsi_adapter ips_scsi_adapter = {
501 	ips_scsi_cmd,
502 	scsi_minphys,
503 	NULL,
504 	NULL,
505 	ips_scsi_ioctl
506 };
507 
508 static struct scsi_adapter ips_scsi_pt_adapter = {
509 	ips_scsi_pt_cmd,
510 	scsi_minphys,
511 	NULL,
512 	NULL,
513 	NULL
514 };
515 
516 static const struct pci_matchid ips_ids[] = {
517 	{ PCI_VENDOR_IBM,	PCI_PRODUCT_IBM_SERVERAID },
518 	{ PCI_VENDOR_IBM,	PCI_PRODUCT_IBM_SERVERAID2 },
519 	{ PCI_VENDOR_ADP2,	PCI_PRODUCT_ADP2_SERVERAID }
520 };
521 
522 static const struct ips_chipset {
523 	enum {
524 		IPS_CHIP_COPPERHEAD = 0,
525 		IPS_CHIP_MORPHEUS
526 	}		ic_id;
527 
528 	int		ic_bar;
529 
530 	void		(*ic_exec)(struct ips_softc *, struct ips_ccb *);
531 	void		(*ic_intren)(struct ips_softc *);
532 	int		(*ic_isintr)(struct ips_softc *);
533 	u_int32_t	(*ic_status)(struct ips_softc *);
534 } ips_chips[] = {
535 	{
536 		IPS_CHIP_COPPERHEAD,
537 		0x14,
538 		ips_copperhead_exec,
539 		ips_copperhead_intren,
540 		ips_copperhead_isintr,
541 		ips_copperhead_status
542 	},
543 	{
544 		IPS_CHIP_MORPHEUS,
545 		0x10,
546 		ips_morpheus_exec,
547 		ips_morpheus_intren,
548 		ips_morpheus_isintr,
549 		ips_morpheus_status
550 	}
551 };
552 
553 #define ips_exec(s, c)	(s)->sc_chip->ic_exec((s), (c))
554 #define ips_intren(s)	(s)->sc_chip->ic_intren((s))
555 #define ips_isintr(s)	(s)->sc_chip->ic_isintr((s))
556 #define ips_status(s)	(s)->sc_chip->ic_status((s))
557 
558 static const char *ips_names[] = {
559 	NULL,
560 	NULL,
561 	"II",
562 	"onboard",
563 	"onboard",
564 	"3H",
565 	"3L",
566 	"4H",
567 	"4M",
568 	"4L",
569 	"4Mx",
570 	"4Lx",
571 	"5i",
572 	"5i",
573 	"6M",
574 	"6i",
575 	"7t",
576 	"7k",
577 	"7M"
578 };
579 
580 int
581 ips_match(struct device *parent, void *match, void *aux)
582 {
583 	return (pci_matchbyid(aux, ips_ids,
584 	    sizeof(ips_ids) / sizeof(ips_ids[0])));
585 }
586 
587 void
588 ips_attach(struct device *parent, struct device *self, void *aux)
589 {
590 	struct ips_softc *sc = (struct ips_softc *)self;
591 	struct pci_attach_args *pa = aux;
592 	struct ips_ccb ccb0;
593 	struct scsibus_attach_args saa;
594 	struct ips_adapterinfo *ai;
595 	struct ips_driveinfo *di;
596 	struct ips_pg5 *pg5;
597 	pcireg_t maptype;
598 	bus_size_t iosize;
599 	pci_intr_handle_t ih;
600 	const char *intrstr;
601 	int type, i;
602 
603 	sc->sc_dmat = pa->pa_dmat;
604 
605 	/* Identify chipset */
606 	if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_IBM_SERVERAID)
607 		sc->sc_chip = &ips_chips[IPS_CHIP_COPPERHEAD];
608 	else
609 		sc->sc_chip = &ips_chips[IPS_CHIP_MORPHEUS];
610 
611 	/* Map registers */
612 	maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_chip->ic_bar);
613 	if (pci_mapreg_map(pa, sc->sc_chip->ic_bar, maptype, 0, &sc->sc_iot,
614 	    &sc->sc_ioh, NULL, &iosize, IPS_IOSIZE)) {
615 		printf(": can't map regs\n");
616 		return;
617 	}
618 
619 	/* Allocate command buffer */
620 	if (ips_dmamem_alloc(&sc->sc_cmdbm, sc->sc_dmat,
621 	    IPS_MAXCMDS * sizeof(struct ips_cmdb))) {
622 		printf(": can't alloc cmd buffer\n");
623 		goto fail1;
624 	}
625 
626 	/* Allocate info buffer */
627 	if (ips_dmamem_alloc(&sc->sc_infom, sc->sc_dmat,
628 	    sizeof(struct ips_info))) {
629 		printf(": can't alloc info buffer\n");
630 		goto fail2;
631 	}
632 	sc->sc_info = sc->sc_infom.dm_vaddr;
633 	ai = &sc->sc_info->adapter;
634 	di = &sc->sc_info->drive;
635 	pg5 = &sc->sc_info->pg5;
636 
637 	/* Allocate status queue for the Copperhead chipset */
638 	if (sc->sc_chip->ic_id == IPS_CHIP_COPPERHEAD) {
639 		if (ips_dmamem_alloc(&sc->sc_sqm, sc->sc_dmat, IPS_SQSZ)) {
640 			printf(": can't alloc status queue\n");
641 			goto fail3;
642 		}
643 		sc->sc_sqtail = sc->sc_sqm.dm_paddr;
644 		sc->sc_sqbuf = sc->sc_sqm.dm_vaddr;
645 		sc->sc_sqidx = 0;
646 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQS,
647 		    sc->sc_sqm.dm_paddr);
648 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQE,
649 		    sc->sc_sqm.dm_paddr + IPS_SQSZ);
650 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQH,
651 		    sc->sc_sqm.dm_paddr + sizeof(u_int32_t));
652 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQT,
653 		    sc->sc_sqm.dm_paddr);
654 	}
655 
656 	/* Bootstrap CCB queue */
657 	sc->sc_nccbs = 1;
658 	sc->sc_ccb = &ccb0;
659 	bzero(&ccb0, sizeof(ccb0));
660 	ccb0.c_cmdbva = sc->sc_cmdbm.dm_vaddr;
661 	ccb0.c_cmdbpa = sc->sc_cmdbm.dm_paddr;
662 	SLIST_INIT(&sc->sc_ccbq_free);
663 	SLIST_INSERT_HEAD(&sc->sc_ccbq_free, &ccb0, c_link);
664 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
665 	scsi_iopool_init(&sc->sc_iopool, sc, ips_ccb_get, ips_ccb_put);
666 
667 	/* Get adapter info */
668 	if (ips_getadapterinfo(sc, SCSI_NOSLEEP)) {
669 		printf(": can't get adapter info\n");
670 		goto fail4;
671 	}
672 
673 	/* Get logical drives info */
674 	if (ips_getdriveinfo(sc, SCSI_NOSLEEP)) {
675 		printf(": can't get ld info\n");
676 		goto fail4;
677 	}
678 	sc->sc_nunits = di->drivecnt;
679 
680 	/* Get configuration */
681 	if (ips_getconf(sc, SCSI_NOSLEEP)) {
682 		printf(": can't get config\n");
683 		goto fail4;
684 	}
685 
686 	/* Read NVRAM page 5 for additional info */
687 	(void)ips_getpg5(sc, SCSI_NOSLEEP);
688 
689 	/* Initialize CCB queue */
690 	sc->sc_nccbs = ai->cmdcnt;
691 	if ((sc->sc_ccb = ips_ccb_alloc(sc, sc->sc_nccbs)) == NULL) {
692 		printf(": can't alloc ccb queue\n");
693 		goto fail4;
694 	}
695 	SLIST_INIT(&sc->sc_ccbq_free);
696 	for (i = 0; i < sc->sc_nccbs; i++)
697 		SLIST_INSERT_HEAD(&sc->sc_ccbq_free,
698 		    &sc->sc_ccb[i], c_link);
699 
700 	/* Install interrupt handler */
701 	if (pci_intr_map(pa, &ih)) {
702 		printf(": can't map interrupt\n");
703 		goto fail5;
704 	}
705 	intrstr = pci_intr_string(pa->pa_pc, ih);
706 	if (pci_intr_establish(pa->pa_pc, ih, IPL_BIO, ips_intr, sc,
707 	    sc->sc_dev.dv_xname) == NULL) {
708 		printf(": can't establish interrupt");
709 		if (intrstr != NULL)
710 			printf(" at %s", intrstr);
711 		printf("\n");
712 		goto fail5;
713 	}
714 	printf(": %s\n", intrstr);
715 
716 	/* Display adapter info */
717 	printf("%s: ServeRAID", sc->sc_dev.dv_xname);
718 	type = letoh16(pg5->type);
719 	if (type < sizeof(ips_names) / sizeof(ips_names[0]) && ips_names[type])
720 		printf(" %s", ips_names[type]);
721 	printf(", FW %c%c%c%c%c%c%c", ai->firmware[0], ai->firmware[1],
722 	    ai->firmware[2], ai->firmware[3], ai->firmware[4], ai->firmware[5],
723 	    ai->firmware[6]);
724 	printf(", BIOS %c%c%c%c%c%c%c", ai->bios[0], ai->bios[1], ai->bios[2],
725 	    ai->bios[3], ai->bios[4], ai->bios[5], ai->bios[6]);
726 	printf(", %d cmds, %d LD%s", sc->sc_nccbs, sc->sc_nunits,
727 	    (sc->sc_nunits == 1 ? "" : "s"));
728 	printf("\n");
729 
730 	/* Attach SCSI bus */
731 	if (sc->sc_nunits > 0)
732 		sc->sc_scsi_link.openings = sc->sc_nccbs / sc->sc_nunits;
733 	sc->sc_scsi_link.adapter_target = sc->sc_nunits;
734 	sc->sc_scsi_link.adapter_buswidth = sc->sc_nunits;
735 	sc->sc_scsi_link.adapter = &ips_scsi_adapter;
736 	sc->sc_scsi_link.adapter_softc = sc;
737 	sc->sc_scsi_link.pool = &sc->sc_iopool;
738 
739 	bzero(&saa, sizeof(saa));
740 	saa.saa_sc_link = &sc->sc_scsi_link;
741 	sc->sc_scsibus = (struct scsibus_softc *)config_found(self, &saa,
742 	    scsiprint);
743 
744 	/* For each channel attach SCSI pass-through bus */
745 	bzero(&saa, sizeof(saa));
746 	for (i = 0; i < IPS_MAXCHANS; i++) {
747 		struct ips_pt *pt;
748 		struct scsi_link *link;
749 		int target, lastarget;
750 
751 		pt = &sc->sc_pt[i];
752 		pt->pt_sc = sc;
753 		pt->pt_chan = i;
754 		pt->pt_proctgt = -1;
755 
756 		/* Check if channel has any devices besides disks */
757 		for (target = 0, lastarget = -1; target < IPS_MAXTARGETS;
758 		    target++) {
759 			struct ips_dev *idev;
760 			int type;
761 
762 			idev = &sc->sc_info->conf.dev[i][target];
763 			type = idev->params & SID_TYPE;
764 			if (idev->state && type != T_DIRECT) {
765 				lastarget = target;
766 				if (type == T_PROCESSOR ||
767 				    type == T_ENCLOSURE)
768 					/* remember enclosure address */
769 					pt->pt_proctgt = target;
770 			}
771 		}
772 		if (lastarget == -1)
773 			continue;
774 
775 		link = &pt->pt_link;
776 		link->openings = 1;
777 		link->adapter_target = IPS_MAXTARGETS;
778 		link->adapter_buswidth = lastarget + 1;
779 		link->adapter = &ips_scsi_pt_adapter;
780 		link->adapter_softc = pt;
781 		link->pool = &sc->sc_iopool;
782 
783 		saa.saa_sc_link = link;
784 		config_found(self, &saa, scsiprint);
785 	}
786 
787 	/* Enable interrupts */
788 	ips_intren(sc);
789 
790 #if NBIO > 0
791 	/* Install ioctl handler */
792 	if (bio_register(&sc->sc_dev, ips_ioctl))
793 		printf("%s: no ioctl support\n", sc->sc_dev.dv_xname);
794 #endif
795 
796 #ifndef SMALL_KERNEL
797 	/* Add sensors */
798 	if ((sc->sc_sensors = mallocarray(sc->sc_nunits, sizeof(struct ksensor),
799 	    M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) {
800 		printf(": can't alloc sensors\n");
801 		return;
802 	}
803 	strlcpy(sc->sc_sensordev.xname, sc->sc_dev.dv_xname,
804 	    sizeof(sc->sc_sensordev.xname));
805 	for (i = 0; i < sc->sc_nunits; i++) {
806 		struct device *dev;
807 
808 		sc->sc_sensors[i].type = SENSOR_DRIVE;
809 		sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
810 		dev = scsi_get_link(sc->sc_scsibus, i, 0)->device_softc;
811 		strlcpy(sc->sc_sensors[i].desc, dev->dv_xname,
812 		    sizeof(sc->sc_sensors[i].desc));
813 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
814 	}
815 	if (sensor_task_register(sc, ips_sensors, 10) == NULL) {
816 		printf(": no sensors support\n");
817 		free(sc->sc_sensors, M_DEVBUF,
818 		    sc->sc_nunits * sizeof(struct ksensor));
819 		return;
820 	}
821 	sensordev_install(&sc->sc_sensordev);
822 #endif	/* !SMALL_KERNEL */
823 
824 	return;
825 fail5:
826 	ips_ccb_free(sc, sc->sc_ccb, sc->sc_nccbs);
827 fail4:
828 	if (sc->sc_chip->ic_id == IPS_CHIP_COPPERHEAD)
829 		ips_dmamem_free(&sc->sc_sqm);
830 fail3:
831 	ips_dmamem_free(&sc->sc_infom);
832 fail2:
833 	ips_dmamem_free(&sc->sc_cmdbm);
834 fail1:
835 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, iosize);
836 }
837 
838 void
839 ips_scsi_cmd(struct scsi_xfer *xs)
840 {
841 	struct scsi_link *link = xs->sc_link;
842 	struct ips_softc *sc = link->adapter_softc;
843 	struct ips_driveinfo *di = &sc->sc_info->drive;
844 	struct ips_drive *drive;
845 	struct scsi_inquiry_data inq;
846 	struct scsi_read_cap_data rcd;
847 	struct scsi_sense_data sd;
848 	struct scsi_rw *rw;
849 	struct scsi_rw_big *rwb;
850 	struct ips_ccb *ccb = xs->io;
851 	struct ips_cmd *cmd;
852 	int target = link->target;
853 	u_int32_t blkno, blkcnt;
854 	int code;
855 
856 	DPRINTF(IPS_D_XFER, ("%s: ips_scsi_cmd: xs %p, target %d, "
857 	    "opcode 0x%02x, flags 0x%x\n", sc->sc_dev.dv_xname, xs, target,
858 	    xs->cmd->opcode, xs->flags));
859 
860 	if (target >= sc->sc_nunits || link->lun != 0) {
861 		DPRINTF(IPS_D_INFO, ("%s: ips_scsi_cmd: invalid params "
862 		    "target %d, lun %d\n", sc->sc_dev.dv_xname,
863 		    target, link->lun));
864 		xs->error = XS_DRIVER_STUFFUP;
865 		scsi_done(xs);
866 		return;
867 	}
868 
869 	drive = &di->drive[target];
870 	xs->error = XS_NOERROR;
871 
872 	/* Fake SCSI commands */
873 	switch (xs->cmd->opcode) {
874 	case READ_BIG:
875 	case READ_COMMAND:
876 	case WRITE_BIG:
877 	case WRITE_COMMAND:
878 		if (xs->cmdlen == sizeof(struct scsi_rw)) {
879 			rw = (void *)xs->cmd;
880 			blkno = _3btol(rw->addr) &
881 			    (SRW_TOPADDR << 16 | 0xffff);
882 			blkcnt = rw->length ? rw->length : 0x100;
883 		} else {
884 			rwb = (void *)xs->cmd;
885 			blkno = _4btol(rwb->addr);
886 			blkcnt = _2btol(rwb->length);
887 		}
888 
889 		if (blkno >= letoh32(drive->seccnt) || blkno + blkcnt >
890 		    letoh32(drive->seccnt)) {
891 			DPRINTF(IPS_D_ERR, ("%s: ips_scsi_cmd: invalid params "
892 			    "blkno %u, blkcnt %u\n", sc->sc_dev.dv_xname,
893 			    blkno, blkcnt));
894 			xs->error = XS_DRIVER_STUFFUP;
895 			break;
896 		}
897 
898 		if (xs->flags & SCSI_DATA_IN)
899 			code = IPS_CMD_READ;
900 		else
901 			code = IPS_CMD_WRITE;
902 
903 		ccb = xs->io;
904 
905 		cmd = ccb->c_cmdbva;
906 		cmd->code = code;
907 		cmd->drive = target;
908 		cmd->lba = htole32(blkno);
909 		cmd->seccnt = htole16(blkcnt);
910 
911 		if (ips_load_xs(sc, ccb, xs)) {
912 			DPRINTF(IPS_D_ERR, ("%s: ips_scsi_cmd: ips_load_xs "
913 			    "failed\n", sc->sc_dev.dv_xname));
914 			xs->error = XS_DRIVER_STUFFUP;
915 			scsi_done(xs);
916 			return;
917 		}
918 
919 		if (cmd->sgcnt > 0)
920 			cmd->code |= IPS_CMD_SG;
921 
922 		ccb->c_done = ips_done_xs;
923 		ips_start_xs(sc, ccb, xs);
924 		return;
925 	case INQUIRY:
926 		bzero(&inq, sizeof(inq));
927 		inq.device = T_DIRECT;
928 		inq.version = 2;
929 		inq.response_format = 2;
930 		inq.additional_length = 32;
931 		inq.flags |= SID_CmdQue;
932 		strlcpy(inq.vendor, "IBM", sizeof(inq.vendor));
933 		snprintf(inq.product, sizeof(inq.product),
934 		    "LD%d RAID%d", target, drive->raid);
935 		strlcpy(inq.revision, "1.0", sizeof(inq.revision));
936 		memcpy(xs->data, &inq, MIN(xs->datalen, sizeof(inq)));
937 		break;
938 	case READ_CAPACITY:
939 		bzero(&rcd, sizeof(rcd));
940 		_lto4b(letoh32(drive->seccnt) - 1, rcd.addr);
941 		_lto4b(IPS_SECSZ, rcd.length);
942 		memcpy(xs->data, &rcd, MIN(xs->datalen, sizeof(rcd)));
943 		break;
944 	case REQUEST_SENSE:
945 		bzero(&sd, sizeof(sd));
946 		sd.error_code = SSD_ERRCODE_CURRENT;
947 		sd.flags = SKEY_NO_SENSE;
948 		memcpy(xs->data, &sd, MIN(xs->datalen, sizeof(sd)));
949 		break;
950 	case SYNCHRONIZE_CACHE:
951 		cmd = ccb->c_cmdbva;
952 		cmd->code = IPS_CMD_FLUSH;
953 
954 		ccb->c_done = ips_done_xs;
955 		ips_start_xs(sc, ccb, xs);
956 		return;
957 	case PREVENT_ALLOW:
958 	case START_STOP:
959 	case TEST_UNIT_READY:
960 		break;
961 	default:
962 		DPRINTF(IPS_D_INFO, ("%s: unsupported scsi command 0x%02x\n",
963 		    sc->sc_dev.dv_xname, xs->cmd->opcode));
964 		xs->error = XS_DRIVER_STUFFUP;
965 	}
966 
967 	scsi_done(xs);
968 }
969 
970 void
971 ips_scsi_pt_cmd(struct scsi_xfer *xs)
972 {
973 	struct scsi_link *link = xs->sc_link;
974 	struct ips_pt *pt = link->adapter_softc;
975 	struct ips_softc *sc = pt->pt_sc;
976 	struct device *dev = link->device_softc;
977 	struct ips_ccb *ccb = xs->io;
978 	struct ips_cmdb *cmdb;
979 	struct ips_cmd *cmd;
980 	struct ips_dcdb *dcdb;
981 	int chan = pt->pt_chan, target = link->target;
982 
983 	DPRINTF(IPS_D_XFER, ("%s: ips_scsi_pt_cmd: xs %p, chan %d, target %d, "
984 	    "opcode 0x%02x, flags 0x%x\n", sc->sc_dev.dv_xname, xs, chan,
985 	    target, xs->cmd->opcode, xs->flags));
986 
987 	if (pt->pt_procdev[0] == '\0' && target == pt->pt_proctgt && dev)
988 		strlcpy(pt->pt_procdev, dev->dv_xname, sizeof(pt->pt_procdev));
989 
990 	if (xs->cmdlen > IPS_MAXCDB) {
991 		DPRINTF(IPS_D_ERR, ("%s: cmdlen %d too big\n",
992 		    sc->sc_dev.dv_xname, xs->cmdlen));
993 
994 		bzero(&xs->sense, sizeof(xs->sense));
995 		xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT;
996 		xs->sense.flags = SKEY_ILLEGAL_REQUEST;
997 		xs->sense.add_sense_code = 0x20; /* illcmd, 0x24 illfield */
998 		xs->error = XS_SENSE;
999 		scsi_done(xs);
1000 		return;
1001 	}
1002 
1003 	xs->error = XS_NOERROR;
1004 
1005 	cmdb = ccb->c_cmdbva;
1006 	cmd = &cmdb->cmd;
1007 	dcdb = &cmdb->dcdb;
1008 
1009 	cmd->code = IPS_CMD_DCDB;
1010 
1011 	dcdb->device = (chan << 4) | target;
1012 	if (xs->flags & SCSI_DATA_IN)
1013 		dcdb->attr |= IPS_DCDB_DATAIN;
1014 	if (xs->flags & SCSI_DATA_OUT)
1015 		dcdb->attr |= IPS_DCDB_DATAOUT;
1016 
1017 	/*
1018 	 * Adjust timeout value to what controller supports. Make sure our
1019 	 * timeout will be fired after controller gives up.
1020 	 */
1021 	if (xs->timeout <= 10000) {
1022 		dcdb->attr |= IPS_DCDB_TIMO10;
1023 		xs->timeout = 11000;
1024 	} else if (xs->timeout <= 60000) {
1025 		dcdb->attr |= IPS_DCDB_TIMO60;
1026 		xs->timeout = 61000;
1027 	} else {
1028 		dcdb->attr |= IPS_DCDB_TIMO20M;
1029 		xs->timeout = 20 * 60000 + 1000;
1030 	}
1031 
1032 	dcdb->attr |= IPS_DCDB_DISCON;
1033 	dcdb->datalen = htole16(xs->datalen);
1034 	dcdb->cdblen = xs->cmdlen;
1035 	dcdb->senselen = MIN(sizeof(xs->sense), sizeof(dcdb->sense));
1036 	memcpy(dcdb->cdb, xs->cmd, xs->cmdlen);
1037 
1038 	if (ips_load_xs(sc, ccb, xs)) {
1039 		DPRINTF(IPS_D_ERR, ("%s: ips_scsi_pt_cmd: ips_load_xs "
1040 		    "failed\n", sc->sc_dev.dv_xname));
1041 		xs->error = XS_DRIVER_STUFFUP;
1042 		scsi_done(xs);
1043 		return;
1044 	}
1045 	if (cmd->sgcnt > 0)
1046 		cmd->code |= IPS_CMD_SG;
1047 	dcdb->sgaddr = cmd->sgaddr;
1048 	dcdb->sgcnt = cmd->sgcnt;
1049 	cmd->sgaddr = htole32(ccb->c_cmdbpa + offsetof(struct ips_cmdb, dcdb));
1050 	cmd->sgcnt = 0;
1051 
1052 	ccb->c_done = ips_done_pt;
1053 	ips_start_xs(sc, ccb, xs);
1054 }
1055 
1056 int
1057 ips_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
1058 {
1059 #if NBIO > 0
1060 	return (ips_ioctl(link->adapter_softc, cmd, addr));
1061 #else
1062 	return (ENOTTY);
1063 #endif
1064 }
1065 
1066 #if NBIO > 0
1067 int
1068 ips_ioctl(struct device *dev, u_long cmd, caddr_t addr)
1069 {
1070 	struct ips_softc *sc = (struct ips_softc *)dev;
1071 
1072 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl: cmd %lu\n",
1073 	    sc->sc_dev.dv_xname, cmd));
1074 
1075 	switch (cmd) {
1076 	case BIOCINQ:
1077 		return (ips_ioctl_inq(sc, (struct bioc_inq *)addr));
1078 	case BIOCVOL:
1079 		return (ips_ioctl_vol(sc, (struct bioc_vol *)addr));
1080 	case BIOCDISK:
1081 		return (ips_ioctl_disk(sc, (struct bioc_disk *)addr));
1082 	case BIOCSETSTATE:
1083 		return (ips_ioctl_setstate(sc, (struct bioc_setstate *)addr));
1084 	default:
1085 		return (ENOTTY);
1086 	}
1087 }
1088 
1089 int
1090 ips_ioctl_inq(struct ips_softc *sc, struct bioc_inq *bi)
1091 {
1092 	struct ips_conf *conf = &sc->sc_info->conf;
1093 	int i;
1094 
1095 	strlcpy(bi->bi_dev, sc->sc_dev.dv_xname, sizeof(bi->bi_dev));
1096 	bi->bi_novol = sc->sc_nunits;
1097 	for (i = 0, bi->bi_nodisk = 0; i < sc->sc_nunits; i++)
1098 		bi->bi_nodisk += conf->ld[i].chunkcnt;
1099 
1100 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_inq: novol %d, nodisk %d\n",
1101 	    bi->bi_dev, bi->bi_novol, bi->bi_nodisk));
1102 
1103 	return (0);
1104 }
1105 
1106 int
1107 ips_ioctl_vol(struct ips_softc *sc, struct bioc_vol *bv)
1108 {
1109 	struct ips_driveinfo *di = &sc->sc_info->drive;
1110 	struct ips_conf *conf = &sc->sc_info->conf;
1111 	struct ips_rblstat *rblstat = &sc->sc_info->rblstat;
1112 	struct ips_ld *ld;
1113 	int vid = bv->bv_volid;
1114 	struct device *dv;
1115 	int error, rebuild = 0;
1116 	u_int32_t total = 0, done = 0;
1117 
1118 	if (vid >= sc->sc_nunits)
1119 		return (EINVAL);
1120 	if ((error = ips_getconf(sc, 0)))
1121 		return (error);
1122 	ld = &conf->ld[vid];
1123 
1124 	switch (ld->state) {
1125 	case IPS_DS_ONLINE:
1126 		bv->bv_status = BIOC_SVONLINE;
1127 		break;
1128 	case IPS_DS_DEGRADED:
1129 		bv->bv_status = BIOC_SVDEGRADED;
1130 		rebuild++;
1131 		break;
1132 	case IPS_DS_OFFLINE:
1133 		bv->bv_status = BIOC_SVOFFLINE;
1134 		break;
1135 	default:
1136 		bv->bv_status = BIOC_SVINVALID;
1137 	}
1138 
1139 	if (rebuild && ips_getrblstat(sc, 0) == 0) {
1140 		total = letoh32(rblstat->ld[vid].total);
1141 		done = total - letoh32(rblstat->ld[vid].remain);
1142 		if (total && total > done) {
1143 			bv->bv_status = BIOC_SVREBUILD;
1144 			bv->bv_percent = 100 * done / total;
1145 		}
1146 	}
1147 
1148 	bv->bv_size = (u_quad_t)letoh32(ld->size) * IPS_SECSZ;
1149 	bv->bv_level = di->drive[vid].raid;
1150 	bv->bv_nodisk = ld->chunkcnt;
1151 
1152 	/* Associate all unused and spare drives with first volume */
1153 	if (vid == 0) {
1154 		struct ips_dev *dev;
1155 		int chan, target;
1156 
1157 		for (chan = 0; chan < IPS_MAXCHANS; chan++)
1158 			for (target = 0; target < IPS_MAXTARGETS; target++) {
1159 				dev = &conf->dev[chan][target];
1160 				if (dev->state && !(dev->state &
1161 				    IPS_DVS_MEMBER) &&
1162 				    (dev->params & SID_TYPE) == T_DIRECT)
1163 					bv->bv_nodisk++;
1164 			}
1165 	}
1166 
1167 	dv = scsi_get_link(sc->sc_scsibus, vid, 0)->device_softc;
1168 	strlcpy(bv->bv_dev, dv->dv_xname, sizeof(bv->bv_dev));
1169 	strlcpy(bv->bv_vendor, "IBM", sizeof(bv->bv_vendor));
1170 
1171 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_vol: vid %d, state 0x%02x, "
1172 	    "total %u, done %u, size %llu, level %d, nodisk %d, dev %s\n",
1173 	    sc->sc_dev.dv_xname, vid, ld->state, total, done, bv->bv_size,
1174 	    bv->bv_level, bv->bv_nodisk, bv->bv_dev));
1175 
1176 	return (0);
1177 }
1178 
1179 int
1180 ips_ioctl_disk(struct ips_softc *sc, struct bioc_disk *bd)
1181 {
1182 	struct ips_conf *conf = &sc->sc_info->conf;
1183 	struct ips_ld *ld;
1184 	struct ips_chunk *chunk;
1185 	struct ips_dev *dev;
1186 	int vid = bd->bd_volid, did = bd->bd_diskid;
1187 	int chan, target, error, i;
1188 
1189 	if (vid >= sc->sc_nunits)
1190 		return (EINVAL);
1191 	if ((error = ips_getconf(sc, 0)))
1192 		return (error);
1193 	ld = &conf->ld[vid];
1194 
1195 	if (did >= ld->chunkcnt) {
1196 		/* Probably unused or spare drives */
1197 		if (vid != 0)
1198 			return (EINVAL);
1199 
1200 		i = ld->chunkcnt;
1201 		for (chan = 0; chan < IPS_MAXCHANS; chan++)
1202 			for (target = 0; target < IPS_MAXTARGETS; target++) {
1203 				dev = &conf->dev[chan][target];
1204 				if (dev->state && !(dev->state &
1205 				    IPS_DVS_MEMBER) &&
1206 				    (dev->params & SID_TYPE) == T_DIRECT)
1207 					if (i++ == did)
1208 						goto out;
1209 			}
1210 	} else {
1211 		chunk = &ld->chunk[did];
1212 		chan = chunk->channel;
1213 		target = chunk->target;
1214 	}
1215 
1216 out:
1217 	if (chan >= IPS_MAXCHANS || target >= IPS_MAXTARGETS)
1218 		return (EINVAL);
1219 	dev = &conf->dev[chan][target];
1220 
1221 	bd->bd_channel = chan;
1222 	bd->bd_target = target;
1223 	bd->bd_lun = 0;
1224 	bd->bd_size = (u_quad_t)letoh32(dev->seccnt) * IPS_SECSZ;
1225 
1226 	bzero(bd->bd_vendor, sizeof(bd->bd_vendor));
1227 	memcpy(bd->bd_vendor, dev->devid, MIN(sizeof(bd->bd_vendor),
1228 	    sizeof(dev->devid)));
1229 	strlcpy(bd->bd_procdev, sc->sc_pt[chan].pt_procdev,
1230 	    sizeof(bd->bd_procdev));
1231 
1232 	if (dev->state & IPS_DVS_READY) {
1233 		bd->bd_status = BIOC_SDUNUSED;
1234 		if (dev->state & IPS_DVS_MEMBER)
1235 			bd->bd_status = BIOC_SDONLINE;
1236 		if (dev->state & IPS_DVS_SPARE)
1237 			bd->bd_status = BIOC_SDHOTSPARE;
1238 		if (dev->state & IPS_DVS_REBUILD)
1239 			bd->bd_status = BIOC_SDREBUILD;
1240 	} else {
1241 		bd->bd_status = BIOC_SDOFFLINE;
1242 	}
1243 
1244 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_disk: vid %d, did %d, channel %d, "
1245 	    "target %d, size %llu, state 0x%02x\n", sc->sc_dev.dv_xname,
1246 	    vid, did, bd->bd_channel, bd->bd_target, bd->bd_size, dev->state));
1247 
1248 	return (0);
1249 }
1250 
1251 int
1252 ips_ioctl_setstate(struct ips_softc *sc, struct bioc_setstate *bs)
1253 {
1254 	struct ips_conf *conf = &sc->sc_info->conf;
1255 	struct ips_dev *dev;
1256 	int state, error;
1257 
1258 	if (bs->bs_channel >= IPS_MAXCHANS || bs->bs_target >= IPS_MAXTARGETS)
1259 		return (EINVAL);
1260 	if ((error = ips_getconf(sc, 0)))
1261 		return (error);
1262 	dev = &conf->dev[bs->bs_channel][bs->bs_target];
1263 	state = dev->state;
1264 
1265 	switch (bs->bs_status) {
1266 	case BIOC_SSONLINE:
1267 		state |= IPS_DVS_READY;
1268 		break;
1269 	case BIOC_SSOFFLINE:
1270 		state &= ~IPS_DVS_READY;
1271 		break;
1272 	case BIOC_SSHOTSPARE:
1273 		state |= IPS_DVS_SPARE;
1274 		break;
1275 	case BIOC_SSREBUILD:
1276 		return (ips_rebuild(sc, bs->bs_channel, bs->bs_target,
1277 		    bs->bs_channel, bs->bs_target, 0));
1278 	default:
1279 		return (EINVAL);
1280 	}
1281 
1282 	return (ips_setstate(sc, bs->bs_channel, bs->bs_target, state, 0));
1283 }
1284 #endif	/* NBIO > 0 */
1285 
1286 #ifndef SMALL_KERNEL
1287 void
1288 ips_sensors(void *arg)
1289 {
1290 	struct ips_softc *sc = arg;
1291 	struct ips_conf *conf = &sc->sc_info->conf;
1292 	struct ips_ld *ld;
1293 	int i;
1294 
1295 	/* ips_sensors() runs from work queue thus allowed to sleep */
1296 	if (ips_getconf(sc, 0)) {
1297 		DPRINTF(IPS_D_ERR, ("%s: ips_sensors: ips_getconf failed\n",
1298 		    sc->sc_dev.dv_xname));
1299 
1300 		for (i = 0; i < sc->sc_nunits; i++) {
1301 			sc->sc_sensors[i].value = 0;
1302 			sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
1303 		}
1304 		return;
1305 	}
1306 
1307 	DPRINTF(IPS_D_INFO, ("%s: ips_sensors:", sc->sc_dev.dv_xname));
1308 	for (i = 0; i < sc->sc_nunits; i++) {
1309 		ld = &conf->ld[i];
1310 		DPRINTF(IPS_D_INFO, (" ld%d.state 0x%02x", i, ld->state));
1311 		switch (ld->state) {
1312 		case IPS_DS_ONLINE:
1313 			sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE;
1314 			sc->sc_sensors[i].status = SENSOR_S_OK;
1315 			break;
1316 		case IPS_DS_DEGRADED:
1317 			sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL;
1318 			sc->sc_sensors[i].status = SENSOR_S_WARN;
1319 			break;
1320 		case IPS_DS_OFFLINE:
1321 			sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL;
1322 			sc->sc_sensors[i].status = SENSOR_S_CRIT;
1323 			break;
1324 		default:
1325 			sc->sc_sensors[i].value = 0;
1326 			sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
1327 		}
1328 	}
1329 	DPRINTF(IPS_D_INFO, ("\n"));
1330 }
1331 #endif	/* !SMALL_KERNEL */
1332 
1333 int
1334 ips_load_xs(struct ips_softc *sc, struct ips_ccb *ccb, struct scsi_xfer *xs)
1335 {
1336 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1337 	struct ips_cmd *cmd = &cmdb->cmd;
1338 	struct ips_sg *sg = cmdb->sg;
1339 	int nsegs, i;
1340 
1341 	if (xs->datalen == 0)
1342 		return (0);
1343 
1344 	/* Map data buffer into DMA segments */
1345 	if (bus_dmamap_load(sc->sc_dmat, ccb->c_dmam, xs->data, xs->datalen,
1346 	    NULL, (xs->flags & SCSI_NOSLEEP ? BUS_DMA_NOWAIT : 0)))
1347 		return (1);
1348 	bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,ccb->c_dmam->dm_mapsize,
1349 	    xs->flags & SCSI_DATA_IN ? BUS_DMASYNC_PREREAD :
1350 	    BUS_DMASYNC_PREWRITE);
1351 
1352 	if ((nsegs = ccb->c_dmam->dm_nsegs) > IPS_MAXSGS)
1353 		return (1);
1354 
1355 	if (nsegs > 1) {
1356 		cmd->sgcnt = nsegs;
1357 		cmd->sgaddr = htole32(ccb->c_cmdbpa + offsetof(struct ips_cmdb,
1358 		    sg));
1359 
1360 		/* Fill in scatter-gather array */
1361 		for (i = 0; i < nsegs; i++) {
1362 			sg[i].addr = htole32(ccb->c_dmam->dm_segs[i].ds_addr);
1363 			sg[i].size = htole32(ccb->c_dmam->dm_segs[i].ds_len);
1364 		}
1365 	} else {
1366 		cmd->sgcnt = 0;
1367 		cmd->sgaddr = htole32(ccb->c_dmam->dm_segs[0].ds_addr);
1368 	}
1369 
1370 	return (0);
1371 }
1372 
1373 void
1374 ips_start_xs(struct ips_softc *sc, struct ips_ccb *ccb, struct scsi_xfer *xs)
1375 {
1376 	ccb->c_flags = xs->flags;
1377 	ccb->c_xfer = xs;
1378 	int ispoll = xs->flags & SCSI_POLL;
1379 
1380 	if (!ispoll) {
1381 		timeout_set(&xs->stimeout, ips_timeout, ccb);
1382 		timeout_add_msec(&xs->stimeout, xs->timeout);
1383 	}
1384 
1385 	/*
1386 	 * Return value not used here because ips_cmd() must complete
1387 	 * scsi_xfer on any failure and SCSI layer will handle possible
1388 	 * errors.
1389 	 */
1390 	ips_cmd(sc, ccb);
1391 }
1392 
1393 int
1394 ips_cmd(struct ips_softc *sc, struct ips_ccb *ccb)
1395 {
1396 	struct ips_cmd *cmd = ccb->c_cmdbva;
1397 	int s, error = 0;
1398 
1399 	DPRINTF(IPS_D_XFER, ("%s: ips_cmd: id 0x%02x, flags 0x%x, xs %p, "
1400 	    "code 0x%02x, drive %d, sgcnt %d, lba %d, sgaddr 0x%08x, "
1401 	    "seccnt %d\n", sc->sc_dev.dv_xname, ccb->c_id, ccb->c_flags,
1402 	    ccb->c_xfer, cmd->code, cmd->drive, cmd->sgcnt, letoh32(cmd->lba),
1403 	    letoh32(cmd->sgaddr), letoh16(cmd->seccnt)));
1404 
1405 	cmd->id = ccb->c_id;
1406 
1407 	/* Post command to controller and optionally wait for completion */
1408 	s = splbio();
1409 	ips_exec(sc, ccb);
1410 	ccb->c_state = IPS_CCB_QUEUED;
1411 	if (ccb->c_flags & SCSI_POLL)
1412 		error = ips_poll(sc, ccb);
1413 	splx(s);
1414 
1415 	return (error);
1416 }
1417 
1418 int
1419 ips_poll(struct ips_softc *sc, struct ips_ccb *ccb)
1420 {
1421 	struct timeval tv;
1422 	int error, timo;
1423 
1424 	splassert(IPL_BIO);
1425 
1426 	if (ccb->c_flags & SCSI_NOSLEEP) {
1427 		/* busy-wait */
1428 		DPRINTF(IPS_D_XFER, ("%s: ips_poll: busy-wait\n",
1429 		    sc->sc_dev.dv_xname));
1430 
1431 		for (timo = 10000; timo > 0; timo--) {
1432 			delay(100);
1433 			ips_intr(sc);
1434 			if (ccb->c_state == IPS_CCB_DONE)
1435 				break;
1436 		}
1437 	} else {
1438 		/* sleep */
1439 		timo = ccb->c_xfer ? ccb->c_xfer->timeout : IPS_TIMEOUT;
1440 		tv.tv_sec = timo / 1000;
1441 		tv.tv_usec = (timo % 1000) * 1000;
1442 		timo = tvtohz(&tv);
1443 
1444 		DPRINTF(IPS_D_XFER, ("%s: ips_poll: sleep %d hz\n",
1445 		    sc->sc_dev.dv_xname, timo));
1446 		tsleep(ccb, PRIBIO + 1, "ipscmd", timo);
1447 	}
1448 	DPRINTF(IPS_D_XFER, ("%s: ips_poll: state %d\n", sc->sc_dev.dv_xname,
1449 	    ccb->c_state));
1450 
1451 	if (ccb->c_state != IPS_CCB_DONE)
1452 		/*
1453 		 * Command never completed. Fake hardware status byte
1454 		 * to indicate timeout.
1455 		 */
1456 		ccb->c_stat = IPS_STAT_TIMO;
1457 
1458 	ips_done(sc, ccb);
1459 	error = ccb->c_error;
1460 
1461 	return (error);
1462 }
1463 
1464 void
1465 ips_done(struct ips_softc *sc, struct ips_ccb *ccb)
1466 {
1467 	splassert(IPL_BIO);
1468 
1469 	DPRINTF(IPS_D_XFER, ("%s: ips_done: id 0x%02x, flags 0x%x, xs %p\n",
1470 	    sc->sc_dev.dv_xname, ccb->c_id, ccb->c_flags, ccb->c_xfer));
1471 
1472 	ccb->c_error = ips_error(sc, ccb);
1473 	ccb->c_done(sc, ccb);
1474 }
1475 
1476 void
1477 ips_done_xs(struct ips_softc *sc, struct ips_ccb *ccb)
1478 {
1479 	struct scsi_xfer *xs = ccb->c_xfer;
1480 
1481 	if (!(xs->flags & SCSI_POLL))
1482 		timeout_del(&xs->stimeout);
1483 
1484 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1485 		bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,
1486 		    ccb->c_dmam->dm_mapsize, xs->flags & SCSI_DATA_IN ?
1487 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1488 		bus_dmamap_unload(sc->sc_dmat, ccb->c_dmam);
1489 	}
1490 
1491 	xs->resid = 0;
1492 	xs->error = ips_error_xs(sc, ccb);
1493 	scsi_done(xs);
1494 }
1495 
1496 void
1497 ips_done_pt(struct ips_softc *sc, struct ips_ccb *ccb)
1498 {
1499 	struct scsi_xfer *xs = ccb->c_xfer;
1500 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1501 	struct ips_dcdb *dcdb = &cmdb->dcdb;
1502 	int done = letoh16(dcdb->datalen);
1503 
1504 	if (!(xs->flags & SCSI_POLL))
1505 		timeout_del(&xs->stimeout);
1506 
1507 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1508 		bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,
1509 		    ccb->c_dmam->dm_mapsize, xs->flags & SCSI_DATA_IN ?
1510 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1511 		bus_dmamap_unload(sc->sc_dmat, ccb->c_dmam);
1512 	}
1513 
1514 	if (done && done < xs->datalen)
1515 		xs->resid = xs->datalen - done;
1516 	else
1517 		xs->resid = 0;
1518 	xs->error = ips_error_xs(sc, ccb);
1519 	xs->status = dcdb->status;
1520 
1521 	if (xs->error == XS_SENSE)
1522 		memcpy(&xs->sense, dcdb->sense, MIN(sizeof(xs->sense),
1523 		    sizeof(dcdb->sense)));
1524 
1525 	if (xs->cmd->opcode == INQUIRY && xs->error == XS_NOERROR) {
1526 		int type = ((struct scsi_inquiry_data *)xs->data)->device &
1527 		    SID_TYPE;
1528 
1529 		if (type == T_DIRECT)
1530 			/* mask physical drives */
1531 			xs->error = XS_DRIVER_STUFFUP;
1532 	}
1533 
1534 	scsi_done(xs);
1535 }
1536 
1537 void
1538 ips_done_mgmt(struct ips_softc *sc, struct ips_ccb *ccb)
1539 {
1540 	if (ccb->c_flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
1541 		bus_dmamap_sync(sc->sc_dmat, sc->sc_infom.dm_map, 0,
1542 		    sc->sc_infom.dm_map->dm_mapsize,
1543 		    ccb->c_flags & SCSI_DATA_IN ? BUS_DMASYNC_POSTREAD :
1544 		    BUS_DMASYNC_POSTWRITE);
1545 	scsi_io_put(&sc->sc_iopool, ccb);
1546 }
1547 
1548 int
1549 ips_error(struct ips_softc *sc, struct ips_ccb *ccb)
1550 {
1551 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1552 	struct ips_cmd *cmd = &cmdb->cmd;
1553 	struct ips_dcdb *dcdb = &cmdb->dcdb;
1554 	struct scsi_xfer *xs = ccb->c_xfer;
1555 	u_int8_t gsc = IPS_STAT_GSC(ccb->c_stat);
1556 
1557 	if (gsc == IPS_STAT_OK)
1558 		return (0);
1559 
1560 	DPRINTF(IPS_D_ERR, ("%s: ips_error: stat 0x%02x, estat 0x%02x, "
1561 	    "cmd code 0x%02x, drive %d, sgcnt %d, lba %u, seccnt %d",
1562 	    sc->sc_dev.dv_xname, ccb->c_stat, ccb->c_estat, cmd->code,
1563 	    cmd->drive, cmd->sgcnt, letoh32(cmd->lba), letoh16(cmd->seccnt)));
1564 	if (cmd->code == IPS_CMD_DCDB || cmd->code == IPS_CMD_DCDB_SG) {
1565 		int i;
1566 
1567 		DPRINTF(IPS_D_ERR, (", dcdb device 0x%02x, attr 0x%02x, "
1568 		    "datalen %d, sgcnt %d, status 0x%02x",
1569 		    dcdb->device, dcdb->attr, letoh16(dcdb->datalen),
1570 		    dcdb->sgcnt, dcdb->status));
1571 
1572 		DPRINTF(IPS_D_ERR, (", cdb"));
1573 		for (i = 0; i < dcdb->cdblen; i++)
1574 			DPRINTF(IPS_D_ERR, (" %x", dcdb->cdb[i]));
1575 		if (ccb->c_estat == IPS_ESTAT_CKCOND) {
1576 			DPRINTF(IPS_D_ERR, (", sense"));
1577 			for (i = 0; i < dcdb->senselen; i++)
1578 				DPRINTF(IPS_D_ERR, (" %x", dcdb->sense[i]));
1579 		}
1580 	}
1581 	DPRINTF(IPS_D_ERR, ("\n"));
1582 
1583 	switch (gsc) {
1584 	case IPS_STAT_RECOV:
1585 		return (0);
1586 	case IPS_STAT_INVOP:
1587 	case IPS_STAT_INVCMD:
1588 	case IPS_STAT_INVPARM:
1589 		return (EINVAL);
1590 	case IPS_STAT_BUSY:
1591 		return (EBUSY);
1592 	case IPS_STAT_TIMO:
1593 		return (ETIMEDOUT);
1594 	case IPS_STAT_PDRVERR:
1595 		switch (ccb->c_estat) {
1596 		case IPS_ESTAT_SELTIMO:
1597 			return (ENODEV);
1598 		case IPS_ESTAT_OURUN:
1599 			if (xs && letoh16(dcdb->datalen) < xs->datalen)
1600 				/* underrun */
1601 				return (0);
1602 			break;
1603 		case IPS_ESTAT_RECOV:
1604 			return (0);
1605 		}
1606 		break;
1607 	}
1608 
1609 	return (EIO);
1610 }
1611 
1612 int
1613 ips_error_xs(struct ips_softc *sc, struct ips_ccb *ccb)
1614 {
1615 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1616 	struct ips_dcdb *dcdb = &cmdb->dcdb;
1617 	struct scsi_xfer *xs = ccb->c_xfer;
1618 	u_int8_t gsc = IPS_STAT_GSC(ccb->c_stat);
1619 
1620 	/* Map hardware error codes to SCSI ones */
1621 	switch (gsc) {
1622 	case IPS_STAT_OK:
1623 	case IPS_STAT_RECOV:
1624 		return (XS_NOERROR);
1625 	case IPS_STAT_BUSY:
1626 		return (XS_BUSY);
1627 	case IPS_STAT_TIMO:
1628 		return (XS_TIMEOUT);
1629 	case IPS_STAT_PDRVERR:
1630 		switch (ccb->c_estat) {
1631 		case IPS_ESTAT_SELTIMO:
1632 			return (XS_SELTIMEOUT);
1633 		case IPS_ESTAT_OURUN:
1634 			if (xs && letoh16(dcdb->datalen) < xs->datalen)
1635 				/* underrun */
1636 				return (XS_NOERROR);
1637 			break;
1638 		case IPS_ESTAT_HOSTRST:
1639 		case IPS_ESTAT_DEVRST:
1640 			return (XS_RESET);
1641 		case IPS_ESTAT_RECOV:
1642 			return (XS_NOERROR);
1643 		case IPS_ESTAT_CKCOND:
1644 			return (XS_SENSE);
1645 		}
1646 		break;
1647 	}
1648 
1649 	return (XS_DRIVER_STUFFUP);
1650 }
1651 
1652 int
1653 ips_intr(void *arg)
1654 {
1655 	struct ips_softc *sc = arg;
1656 	struct ips_ccb *ccb;
1657 	u_int32_t status;
1658 	int id;
1659 
1660 	DPRINTF(IPS_D_XFER, ("%s: ips_intr", sc->sc_dev.dv_xname));
1661 	if (!ips_isintr(sc)) {
1662 		DPRINTF(IPS_D_XFER, (": not ours\n"));
1663 		return (0);
1664 	}
1665 	DPRINTF(IPS_D_XFER, ("\n"));
1666 
1667 	/* Process completed commands */
1668 	while ((status = ips_status(sc)) != 0xffffffff) {
1669 		DPRINTF(IPS_D_XFER, ("%s: ips_intr: status 0x%08x\n",
1670 		    sc->sc_dev.dv_xname, status));
1671 
1672 		id = IPS_STAT_ID(status);
1673 		if (id >= sc->sc_nccbs) {
1674 			DPRINTF(IPS_D_ERR, ("%s: ips_intr: invalid id %d\n",
1675 			    sc->sc_dev.dv_xname, id));
1676 			continue;
1677 		}
1678 
1679 		ccb = &sc->sc_ccb[id];
1680 		if (ccb->c_state != IPS_CCB_QUEUED) {
1681 			DPRINTF(IPS_D_ERR, ("%s: ips_intr: cmd 0x%02x not "
1682 			    "queued, state %d, status 0x%08x\n",
1683 			    sc->sc_dev.dv_xname, ccb->c_id, ccb->c_state,
1684 			    status));
1685 			continue;
1686 		}
1687 
1688 		ccb->c_state = IPS_CCB_DONE;
1689 		ccb->c_stat = IPS_STAT_BASIC(status);
1690 		ccb->c_estat = IPS_STAT_EXT(status);
1691 
1692 		if (ccb->c_flags & SCSI_POLL) {
1693 			wakeup(ccb);
1694 		} else {
1695 			ips_done(sc, ccb);
1696 		}
1697 	}
1698 
1699 	return (1);
1700 }
1701 
1702 void
1703 ips_timeout(void *arg)
1704 {
1705 	struct ips_ccb *ccb = arg;
1706 	struct ips_softc *sc = ccb->c_sc;
1707 	struct scsi_xfer *xs = ccb->c_xfer;
1708 	int s;
1709 
1710 	s = splbio();
1711 	if (xs)
1712 		sc_print_addr(xs->sc_link);
1713 	else
1714 		printf("%s: ", sc->sc_dev.dv_xname);
1715 	printf("timeout\n");
1716 
1717 	/*
1718 	 * Command never completed. Fake hardware status byte
1719 	 * to indicate timeout.
1720 	 * XXX: need to remove command from controller.
1721 	 */
1722 	ccb->c_stat = IPS_STAT_TIMO;
1723 	ips_done(sc, ccb);
1724 	splx(s);
1725 }
1726 
1727 int
1728 ips_getadapterinfo(struct ips_softc *sc, int flags)
1729 {
1730 	struct ips_ccb *ccb;
1731 	struct ips_cmd *cmd;
1732 
1733 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1734 	if (ccb == NULL)
1735 		return (1);
1736 
1737 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1738 	ccb->c_done = ips_done_mgmt;
1739 
1740 	cmd = ccb->c_cmdbva;
1741 	cmd->code = IPS_CMD_GETADAPTERINFO;
1742 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1743 	    adapter));
1744 
1745 	return (ips_cmd(sc, ccb));
1746 }
1747 
1748 int
1749 ips_getdriveinfo(struct ips_softc *sc, int flags)
1750 {
1751 	struct ips_ccb *ccb;
1752 	struct ips_cmd *cmd;
1753 
1754 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1755 	if (ccb == NULL)
1756 		return (1);
1757 
1758 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1759 	ccb->c_done = ips_done_mgmt;
1760 
1761 	cmd = ccb->c_cmdbva;
1762 	cmd->code = IPS_CMD_GETDRIVEINFO;
1763 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1764 	    drive));
1765 
1766 	return (ips_cmd(sc, ccb));
1767 }
1768 
1769 int
1770 ips_getconf(struct ips_softc *sc, int flags)
1771 {
1772 	struct ips_ccb *ccb;
1773 	struct ips_cmd *cmd;
1774 
1775 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1776 	if (ccb == NULL)
1777 		return (1);
1778 
1779 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1780 	ccb->c_done = ips_done_mgmt;
1781 
1782 	cmd = ccb->c_cmdbva;
1783 	cmd->code = IPS_CMD_READCONF;
1784 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1785 	    conf));
1786 
1787 	return (ips_cmd(sc, ccb));
1788 }
1789 
1790 int
1791 ips_getpg5(struct ips_softc *sc, int flags)
1792 {
1793 	struct ips_ccb *ccb;
1794 	struct ips_cmd *cmd;
1795 
1796 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1797 	if (ccb == NULL)
1798 		return (1);
1799 
1800 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1801 	ccb->c_done = ips_done_mgmt;
1802 
1803 	cmd = ccb->c_cmdbva;
1804 	cmd->code = IPS_CMD_RWNVRAM;
1805 	cmd->drive = 5;
1806 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1807 	    pg5));
1808 
1809 	return (ips_cmd(sc, ccb));
1810 }
1811 
1812 #if NBIO > 0
1813 int
1814 ips_getrblstat(struct ips_softc *sc, int flags)
1815 {
1816 	struct ips_ccb *ccb;
1817 	struct ips_cmd *cmd;
1818 
1819 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1820 	if (ccb == NULL)
1821 		return (1);
1822 
1823 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1824 	ccb->c_done = ips_done_mgmt;
1825 
1826 	cmd = ccb->c_cmdbva;
1827 	cmd->code = IPS_CMD_REBUILDSTATUS;
1828 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1829 	    rblstat));
1830 
1831 	return (ips_cmd(sc, ccb));
1832 }
1833 
1834 int
1835 ips_setstate(struct ips_softc *sc, int chan, int target, int state, int flags)
1836 {
1837 	struct ips_ccb *ccb;
1838 	struct ips_cmd *cmd;
1839 
1840 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1841 	if (ccb == NULL)
1842 		return (1);
1843 
1844 	ccb->c_flags = SCSI_POLL | flags;
1845 	ccb->c_done = ips_done_mgmt;
1846 
1847 	cmd = ccb->c_cmdbva;
1848 	cmd->code = IPS_CMD_SETSTATE;
1849 	cmd->drive = chan;
1850 	cmd->sgcnt = target;
1851 	cmd->seg4g = state;
1852 
1853 	return (ips_cmd(sc, ccb));
1854 }
1855 
1856 int
1857 ips_rebuild(struct ips_softc *sc, int chan, int target, int nchan,
1858     int ntarget, int flags)
1859 {
1860 	struct ips_ccb *ccb;
1861 	struct ips_cmd *cmd;
1862 
1863 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1864 	if (ccb == NULL)
1865 		return (1);
1866 
1867 	ccb->c_flags = SCSI_POLL | flags;
1868 	ccb->c_done = ips_done_mgmt;
1869 
1870 	cmd = ccb->c_cmdbva;
1871 	cmd->code = IPS_CMD_REBUILD;
1872 	cmd->drive = chan;
1873 	cmd->sgcnt = target;
1874 	cmd->seccnt = htole16(ntarget << 8 | nchan);
1875 
1876 	return (ips_cmd(sc, ccb));
1877 }
1878 #endif	/* NBIO > 0 */
1879 
1880 void
1881 ips_copperhead_exec(struct ips_softc *sc, struct ips_ccb *ccb)
1882 {
1883 	u_int32_t reg;
1884 	int timeout;
1885 
1886 	for (timeout = 100; timeout-- > 0; delay(100)) {
1887 		reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_CCC);
1888 		if ((reg & IPS_REG_CCC_SEM) == 0)
1889 			break;
1890 	}
1891 	if (timeout < 0) {
1892 		printf("%s: semaphore timeout\n", sc->sc_dev.dv_xname);
1893 		return;
1894 	}
1895 
1896 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_CCSA, ccb->c_cmdbpa);
1897 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, IPS_REG_CCC,
1898 	    IPS_REG_CCC_START);
1899 }
1900 
1901 void
1902 ips_copperhead_intren(struct ips_softc *sc)
1903 {
1904 	bus_space_write_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS, IPS_REG_HIS_EN);
1905 }
1906 
1907 int
1908 ips_copperhead_isintr(struct ips_softc *sc)
1909 {
1910 	u_int8_t reg;
1911 
1912 	reg = bus_space_read_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS);
1913 	bus_space_write_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS, reg);
1914 	if (reg != 0xff && (reg & IPS_REG_HIS_SCE))
1915 		return (1);
1916 
1917 	return (0);
1918 }
1919 
1920 u_int32_t
1921 ips_copperhead_status(struct ips_softc *sc)
1922 {
1923 	u_int32_t sqhead, sqtail, status;
1924 
1925 	sqhead = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQH);
1926 	DPRINTF(IPS_D_XFER, ("%s: sqhead 0x%08x, sqtail 0x%08x\n",
1927 	    sc->sc_dev.dv_xname, sqhead, sc->sc_sqtail));
1928 
1929 	sqtail = sc->sc_sqtail + sizeof(u_int32_t);
1930 	if (sqtail == sc->sc_sqm.dm_paddr + IPS_SQSZ)
1931 		sqtail = sc->sc_sqm.dm_paddr;
1932 	if (sqtail == sqhead)
1933 		return (0xffffffff);
1934 
1935 	sc->sc_sqtail = sqtail;
1936 	if (++sc->sc_sqidx == IPS_MAXCMDS)
1937 		sc->sc_sqidx = 0;
1938 	status = letoh32(sc->sc_sqbuf[sc->sc_sqidx]);
1939 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQT, sqtail);
1940 
1941 	return (status);
1942 }
1943 
1944 void
1945 ips_morpheus_exec(struct ips_softc *sc, struct ips_ccb *ccb)
1946 {
1947 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_IQP, ccb->c_cmdbpa);
1948 }
1949 
1950 void
1951 ips_morpheus_intren(struct ips_softc *sc)
1952 {
1953 	u_int32_t reg;
1954 
1955 	reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIM);
1956 	reg &= ~IPS_REG_OIM_DS;
1957 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIM, reg);
1958 }
1959 
1960 int
1961 ips_morpheus_isintr(struct ips_softc *sc)
1962 {
1963 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIS) &
1964 	    IPS_REG_OIS_PEND);
1965 }
1966 
1967 u_int32_t
1968 ips_morpheus_status(struct ips_softc *sc)
1969 {
1970 	u_int32_t reg;
1971 
1972 	reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OQP);
1973 	DPRINTF(IPS_D_XFER, ("%s: status 0x%08x\n", sc->sc_dev.dv_xname, reg));
1974 
1975 	return (reg);
1976 }
1977 
1978 struct ips_ccb *
1979 ips_ccb_alloc(struct ips_softc *sc, int n)
1980 {
1981 	struct ips_ccb *ccb;
1982 	int i;
1983 
1984 	if ((ccb = mallocarray(n, sizeof(*ccb), M_DEVBUF,
1985 	    M_NOWAIT | M_ZERO)) == NULL)
1986 		return (NULL);
1987 
1988 	for (i = 0; i < n; i++) {
1989 		ccb[i].c_sc = sc;
1990 		ccb[i].c_id = i;
1991 		ccb[i].c_cmdbva = (char *)sc->sc_cmdbm.dm_vaddr +
1992 		    i * sizeof(struct ips_cmdb);
1993 		ccb[i].c_cmdbpa = sc->sc_cmdbm.dm_paddr +
1994 		    i * sizeof(struct ips_cmdb);
1995 		if (bus_dmamap_create(sc->sc_dmat, IPS_MAXFER, IPS_MAXSGS,
1996 		    IPS_MAXFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1997 		    &ccb[i].c_dmam))
1998 			goto fail;
1999 	}
2000 
2001 	return (ccb);
2002 fail:
2003 	for (; i > 0; i--)
2004 		bus_dmamap_destroy(sc->sc_dmat, ccb[i - 1].c_dmam);
2005 	free(ccb, M_DEVBUF, n * sizeof(*ccb));
2006 	return (NULL);
2007 }
2008 
2009 void
2010 ips_ccb_free(struct ips_softc *sc, struct ips_ccb *ccb, int n)
2011 {
2012 	int i;
2013 
2014 	for (i = 0; i < n; i++)
2015 		bus_dmamap_destroy(sc->sc_dmat, ccb[i - 1].c_dmam);
2016 	free(ccb, M_DEVBUF, n * sizeof(*ccb));
2017 }
2018 
2019 void *
2020 ips_ccb_get(void *xsc)
2021 {
2022 	struct ips_softc *sc = xsc;
2023 	struct ips_ccb *ccb;
2024 
2025 	mtx_enter(&sc->sc_ccb_mtx);
2026 	if ((ccb = SLIST_FIRST(&sc->sc_ccbq_free)) != NULL) {
2027 		SLIST_REMOVE_HEAD(&sc->sc_ccbq_free, c_link);
2028 		ccb->c_flags = 0;
2029 		ccb->c_xfer = NULL;
2030 		bzero(ccb->c_cmdbva, sizeof(struct ips_cmdb));
2031 	}
2032 	mtx_leave(&sc->sc_ccb_mtx);
2033 
2034 	return (ccb);
2035 }
2036 
2037 void
2038 ips_ccb_put(void *xsc, void *xccb)
2039 {
2040 	struct ips_softc *sc = xsc;
2041 	struct ips_ccb *ccb = xccb;
2042 
2043 	ccb->c_state = IPS_CCB_FREE;
2044 	mtx_enter(&sc->sc_ccb_mtx);
2045 	SLIST_INSERT_HEAD(&sc->sc_ccbq_free, ccb, c_link);
2046 	mtx_leave(&sc->sc_ccb_mtx);
2047 }
2048 
2049 int
2050 ips_dmamem_alloc(struct dmamem *dm, bus_dma_tag_t tag, bus_size_t size)
2051 {
2052 	int nsegs;
2053 
2054 	dm->dm_tag = tag;
2055 	dm->dm_size = size;
2056 
2057 	if (bus_dmamap_create(tag, size, 1, size, 0,
2058 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &dm->dm_map))
2059 		return (1);
2060 	if (bus_dmamem_alloc(tag, size, 0, 0, &dm->dm_seg, 1, &nsegs,
2061 	    BUS_DMA_NOWAIT))
2062 		goto fail1;
2063 	if (bus_dmamem_map(tag, &dm->dm_seg, 1, size, (caddr_t *)&dm->dm_vaddr,
2064 	    BUS_DMA_NOWAIT))
2065 		goto fail2;
2066 	if (bus_dmamap_load(tag, dm->dm_map, dm->dm_vaddr, size, NULL,
2067 	    BUS_DMA_NOWAIT))
2068 		goto fail3;
2069 
2070 	return (0);
2071 
2072 fail3:
2073 	bus_dmamem_unmap(tag, dm->dm_vaddr, size);
2074 fail2:
2075 	bus_dmamem_free(tag, &dm->dm_seg, 1);
2076 fail1:
2077 	bus_dmamap_destroy(tag, dm->dm_map);
2078 	return (1);
2079 }
2080 
2081 void
2082 ips_dmamem_free(struct dmamem *dm)
2083 {
2084 	bus_dmamap_unload(dm->dm_tag, dm->dm_map);
2085 	bus_dmamem_unmap(dm->dm_tag, dm->dm_vaddr, dm->dm_size);
2086 	bus_dmamem_free(dm->dm_tag, &dm->dm_seg, 1);
2087 	bus_dmamap_destroy(dm->dm_tag, dm->dm_map);
2088 }
2089