xref: /openbsd-src/sys/dev/pci/ips.c (revision 505ee9ea3b177e2387d907a91ca7da069f3f14d8)
1 /*	$OpenBSD: ips.c,v 1.125 2020/07/20 14:41:13 krw Exp $	*/
2 
3 /*
4  * Copyright (c) 2006, 2007, 2009 Alexander Yurchenko <grange@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * IBM (Adaptec) ServeRAID controllers driver.
21  */
22 
23 #include "bio.h"
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/device.h>
28 #include <sys/ioctl.h>
29 #include <sys/kernel.h>
30 #include <sys/malloc.h>
31 #include <sys/sensors.h>
32 #include <sys/timeout.h>
33 #include <sys/queue.h>
34 
35 #include <machine/bus.h>
36 
37 #include <scsi/scsi_all.h>
38 #include <scsi/scsi_disk.h>
39 #include <scsi/scsiconf.h>
40 
41 #include <dev/biovar.h>
42 
43 #include <dev/pci/pcidevs.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 
47 /* Debug levels */
48 #define IPS_D_ERR	0x0001	/* errors */
49 #define IPS_D_INFO	0x0002	/* information */
50 #define IPS_D_XFER	0x0004	/* transfers */
51 
52 #ifdef IPS_DEBUG
53 #define DPRINTF(a, b)	do { if (ips_debug & (a)) printf b; } while (0)
54 int ips_debug = IPS_D_ERR;
55 #else
56 #define DPRINTF(a, b)
57 #endif
58 
59 #define IPS_MAXDRIVES		8
60 #define IPS_MAXCHANS		4
61 #define IPS_MAXTARGETS		16
62 #define IPS_MAXCHUNKS		16
63 #define IPS_MAXCMDS		128
64 
65 #define IPS_MAXSGS		16
66 #define IPS_MAXCDB		12
67 
68 #define IPS_SECSZ		512
69 #define IPS_NVRAMPGSZ		128
70 #define IPS_SQSZ		(IPS_MAXCMDS * sizeof(u_int32_t))
71 
72 #define	IPS_TIMEOUT		60000	/* ms */
73 
74 /* Command codes */
75 #define IPS_CMD_READ		0x02
76 #define IPS_CMD_WRITE		0x03
77 #define IPS_CMD_DCDB		0x04
78 #define IPS_CMD_GETADAPTERINFO	0x05
79 #define IPS_CMD_FLUSH		0x0a
80 #define IPS_CMD_REBUILDSTATUS	0x0c
81 #define IPS_CMD_SETSTATE	0x10
82 #define IPS_CMD_REBUILD		0x16
83 #define IPS_CMD_ERRORTABLE	0x17
84 #define IPS_CMD_GETDRIVEINFO	0x19
85 #define IPS_CMD_RESETCHAN	0x1a
86 #define IPS_CMD_DOWNLOAD	0x20
87 #define IPS_CMD_RWBIOSFW	0x22
88 #define IPS_CMD_READCONF	0x38
89 #define IPS_CMD_GETSUBSYS	0x40
90 #define IPS_CMD_CONFIGSYNC	0x58
91 #define IPS_CMD_READ_SG		0x82
92 #define IPS_CMD_WRITE_SG	0x83
93 #define IPS_CMD_DCDB_SG		0x84
94 #define IPS_CMD_EDCDB		0x95
95 #define IPS_CMD_EDCDB_SG	0x96
96 #define IPS_CMD_RWNVRAMPAGE	0xbc
97 #define IPS_CMD_GETVERINFO	0xc6
98 #define IPS_CMD_FFDC		0xd7
99 #define IPS_CMD_SG		0x80
100 #define IPS_CMD_RWNVRAM		0xbc
101 
102 /* DCDB attributes */
103 #define IPS_DCDB_DATAIN		0x01	/* data input */
104 #define IPS_DCDB_DATAOUT	0x02	/* data output */
105 #define IPS_DCDB_XFER64K	0x08	/* 64K transfer */
106 #define IPS_DCDB_TIMO10		0x10	/* 10 secs timeout */
107 #define IPS_DCDB_TIMO60		0x20	/* 60 secs timeout */
108 #define IPS_DCDB_TIMO20M	0x30	/* 20 mins timeout */
109 #define IPS_DCDB_NOAUTOREQSEN	0x40	/* no auto request sense */
110 #define IPS_DCDB_DISCON		0x80	/* disconnect allowed */
111 
112 /* Register definitions */
113 #define IPS_REG_HIS		0x08	/* host interrupt status */
114 #define IPS_REG_HIS_SCE			0x01	/* status channel enqueue */
115 #define IPS_REG_HIS_EN			0x80	/* enable interrupts */
116 #define IPS_REG_CCSA		0x10	/* command channel system address */
117 #define IPS_REG_CCC		0x14	/* command channel control */
118 #define IPS_REG_CCC_SEM			0x0008	/* semaphore */
119 #define IPS_REG_CCC_START		0x101a	/* start command */
120 #define IPS_REG_SQH		0x20	/* status queue head */
121 #define IPS_REG_SQT		0x24	/* status queue tail */
122 #define IPS_REG_SQE		0x28	/* status queue end */
123 #define IPS_REG_SQS		0x2c	/* status queue start */
124 
125 #define IPS_REG_OIS		0x30	/* outbound interrupt status */
126 #define IPS_REG_OIS_PEND		0x0008	/* interrupt is pending */
127 #define IPS_REG_OIM		0x34	/* outbound interrupt mask */
128 #define IPS_REG_OIM_DS			0x0008	/* disable interrupts */
129 #define IPS_REG_IQP		0x40	/* inbound queue port */
130 #define IPS_REG_OQP		0x44	/* outbound queue port */
131 
132 /* Status word fields */
133 #define IPS_STAT_ID(x)		(((x) >> 8) & 0xff)	/* command id */
134 #define IPS_STAT_BASIC(x)	(((x) >> 16) & 0xff)	/* basic status */
135 #define IPS_STAT_EXT(x)		(((x) >> 24) & 0xff)	/* ext status */
136 #define IPS_STAT_GSC(x)		((x) & 0x0f)
137 
138 /* Basic status codes */
139 #define IPS_STAT_OK		0x00	/* success */
140 #define IPS_STAT_RECOV		0x01	/* recovered error */
141 #define IPS_STAT_INVOP		0x03	/* invalid opcode */
142 #define IPS_STAT_INVCMD		0x04	/* invalid command block */
143 #define IPS_STAT_INVPARM	0x05	/* invalid parameters block */
144 #define IPS_STAT_BUSY		0x08	/* busy */
145 #define IPS_STAT_CMPLERR	0x0c	/* completed with error */
146 #define IPS_STAT_LDERR		0x0d	/* logical drive error */
147 #define IPS_STAT_TIMO		0x0e	/* timeout */
148 #define IPS_STAT_PDRVERR	0x0f	/* physical drive error */
149 
150 /* Extended status codes */
151 #define IPS_ESTAT_SELTIMO	0xf0	/* select timeout */
152 #define IPS_ESTAT_OURUN		0xf2	/* over/underrun */
153 #define IPS_ESTAT_HOSTRST	0xf7	/* host reset */
154 #define IPS_ESTAT_DEVRST	0xf8	/* device reset */
155 #define IPS_ESTAT_RECOV		0xfc	/* recovered error */
156 #define IPS_ESTAT_CKCOND	0xff	/* check condition */
157 
158 #define IPS_IOSIZE		128	/* max space size to map */
159 
160 /* Command frame */
161 struct ips_cmd {
162 	u_int8_t	code;
163 	u_int8_t	id;
164 	u_int8_t	drive;
165 	u_int8_t	sgcnt;
166 	u_int32_t	lba;
167 	u_int32_t	sgaddr;
168 	u_int16_t	seccnt;
169 	u_int8_t	seg4g;
170 	u_int8_t	esg;
171 	u_int32_t	ccsar;
172 	u_int32_t	cccr;
173 };
174 
175 /* Direct CDB (SCSI pass-through) frame */
176 struct ips_dcdb {
177 	u_int8_t	device;
178 	u_int8_t	attr;
179 	u_int16_t	datalen;
180 	u_int32_t	sgaddr;
181 	u_int8_t	cdblen;
182 	u_int8_t	senselen;
183 	u_int8_t	sgcnt;
184 	u_int8_t	__reserved1;
185 	u_int8_t	cdb[IPS_MAXCDB];
186 	u_int8_t	sense[64];
187 	u_int8_t	status;
188 	u_int8_t	__reserved2[3];
189 };
190 
191 /* Scatter-gather array element */
192 struct ips_sg {
193 	u_int32_t	addr;
194 	u_int32_t	size;
195 };
196 
197 /* Command block */
198 struct ips_cmdb {
199 	struct ips_cmd	cmd;
200 	struct ips_dcdb	dcdb;
201 	struct ips_sg	sg[IPS_MAXSGS];
202 };
203 
204 /* Data frames */
205 struct ips_adapterinfo {
206 	u_int8_t	drivecnt;
207 	u_int8_t	miscflag;
208 	u_int8_t	sltflag;
209 	u_int8_t	bstflag;
210 	u_int8_t	pwrchgcnt;
211 	u_int8_t	wrongaddrcnt;
212 	u_int8_t	unidentcnt;
213 	u_int8_t	nvramdevchgcnt;
214 	u_int8_t	firmware[8];
215 	u_int8_t	bios[8];
216 	u_int32_t	drivesize[IPS_MAXDRIVES];
217 	u_int8_t	cmdcnt;
218 	u_int8_t	maxphysdevs;
219 	u_int16_t	flashrepgmcnt;
220 	u_int8_t	defunctdiskcnt;
221 	u_int8_t	rebuildflag;
222 	u_int8_t	offdrivecnt;
223 	u_int8_t	critdrivecnt;
224 	u_int16_t	confupdcnt;
225 	u_int8_t	blkflag;
226 	u_int8_t	__reserved;
227 	u_int16_t	deaddisk[IPS_MAXCHANS][IPS_MAXTARGETS];
228 };
229 
230 struct ips_driveinfo {
231 	u_int8_t	drivecnt;
232 	u_int8_t	__reserved[3];
233 	struct ips_drive {
234 		u_int8_t	id;
235 		u_int8_t	__reserved;
236 		u_int8_t	raid;
237 		u_int8_t	state;
238 #define IPS_DS_FREE	0x00
239 #define IPS_DS_OFFLINE	0x02
240 #define IPS_DS_ONLINE	0x03
241 #define IPS_DS_DEGRADED	0x04
242 #define IPS_DS_SYS	0x06
243 #define IPS_DS_CRS	0x24
244 
245 		u_int32_t	seccnt;
246 	}		drive[IPS_MAXDRIVES];
247 };
248 
249 struct ips_conf {
250 	u_int8_t	ldcnt;
251 	u_int8_t	day;
252 	u_int8_t	month;
253 	u_int8_t	year;
254 	u_int8_t	initid[4];
255 	u_int8_t	hostid[12];
256 	u_int8_t	time[8];
257 	u_int32_t	useropt;
258 	u_int16_t	userfield;
259 	u_int8_t	rebuildrate;
260 	u_int8_t	__reserved1;
261 
262 	struct ips_hw {
263 		u_int8_t	board[8];
264 		u_int8_t	cpu[8];
265 		u_int8_t	nchantype;
266 		u_int8_t	nhostinttype;
267 		u_int8_t	compression;
268 		u_int8_t	nvramtype;
269 		u_int32_t	nvramsize;
270 	}		hw;
271 
272 	struct ips_ld {
273 		u_int16_t	userfield;
274 		u_int8_t	state;
275 		u_int8_t	raidcacheparam;
276 		u_int8_t	chunkcnt;
277 		u_int8_t	stripesize;
278 		u_int8_t	params;
279 		u_int8_t	__reserved;
280 		u_int32_t	size;
281 
282 		struct ips_chunk {
283 			u_int8_t	channel;
284 			u_int8_t	target;
285 			u_int16_t	__reserved;
286 			u_int32_t	startsec;
287 			u_int32_t	seccnt;
288 		}		chunk[IPS_MAXCHUNKS];
289 	}		ld[IPS_MAXDRIVES];
290 
291 	struct ips_dev {
292 		u_int8_t	initiator;
293 		u_int8_t	params;
294 		u_int8_t	miscflag;
295 		u_int8_t	state;
296 #define IPS_DVS_STANDBY	0x01
297 #define IPS_DVS_REBUILD	0x02
298 #define IPS_DVS_SPARE	0x04
299 #define IPS_DVS_MEMBER	0x08
300 #define IPS_DVS_ONLINE	0x80
301 #define IPS_DVS_READY	(IPS_DVS_STANDBY | IPS_DVS_ONLINE)
302 
303 		u_int32_t	seccnt;
304 		u_int8_t	devid[28];
305 	}		dev[IPS_MAXCHANS][IPS_MAXTARGETS];
306 
307 	u_int8_t	reserved[512];
308 };
309 
310 struct ips_rblstat {
311 	u_int8_t	__unknown[20];
312 	struct {
313 		u_int8_t	__unknown[4];
314 		u_int32_t	total;
315 		u_int32_t	remain;
316 	}		ld[IPS_MAXDRIVES];
317 };
318 
319 struct ips_pg5 {
320 	u_int32_t	signature;
321 	u_int8_t	__reserved1;
322 	u_int8_t	slot;
323 	u_int16_t	type;
324 	u_int8_t	bioshi[4];
325 	u_int8_t	bioslo[4];
326 	u_int16_t	__reserved2;
327 	u_int8_t	__reserved3;
328 	u_int8_t	os;
329 	u_int8_t	driverhi[4];
330 	u_int8_t	driverlo[4];
331 	u_int8_t	__reserved4[100];
332 };
333 
334 struct ips_info {
335 	struct ips_adapterinfo	adapter;
336 	struct ips_driveinfo	drive;
337 	struct ips_conf		conf;
338 	struct ips_rblstat	rblstat;
339 	struct ips_pg5		pg5;
340 };
341 
342 /* Command control block */
343 struct ips_softc;
344 struct ips_ccb {
345 	struct ips_softc *	c_sc;		/* driver softc */
346 	int			c_id;		/* command id */
347 	int			c_flags;	/* SCSI_* flags */
348 	enum {
349 		IPS_CCB_FREE,
350 		IPS_CCB_QUEUED,
351 		IPS_CCB_DONE
352 	}			c_state;	/* command state */
353 
354 	void *			c_cmdbva;	/* command block virt addr */
355 	paddr_t			c_cmdbpa;	/* command block phys addr */
356 	bus_dmamap_t		c_dmam;		/* data buffer DMA map */
357 
358 	struct scsi_xfer *	c_xfer;		/* corresponding SCSI xfer */
359 
360 	u_int8_t		c_stat;		/* status byte copy */
361 	u_int8_t		c_estat;	/* ext status byte copy */
362 	int			c_error;	/* completion error */
363 
364 	void			(*c_done)(struct ips_softc *,	/* cmd done */
365 				    struct ips_ccb *);		/* callback */
366 
367 	SLIST_ENTRY(ips_ccb)	c_link;		/* queue link */
368 };
369 
370 /* CCB queue */
371 SLIST_HEAD(ips_ccbq, ips_ccb);
372 
373 /* DMA-able chunk of memory */
374 struct dmamem {
375 	bus_dma_tag_t		dm_tag;
376 	bus_dmamap_t		dm_map;
377 	bus_dma_segment_t	dm_seg;
378 	bus_size_t		dm_size;
379 	void *			dm_vaddr;
380 #define dm_paddr dm_seg.ds_addr
381 };
382 
383 struct ips_softc {
384 	struct device		sc_dev;
385 
386 	struct scsi_link	sc_scsi_link;
387 	struct scsibus_softc *	sc_scsibus;
388 
389 	struct ips_pt {
390 		struct ips_softc *	pt_sc;
391 		int			pt_chan;
392 
393 		struct scsi_link	pt_link;
394 
395 		int			pt_proctgt;
396 		char			pt_procdev[16];
397 	}			sc_pt[IPS_MAXCHANS];
398 
399 	struct ksensordev	sc_sensordev;
400 	struct ksensor *	sc_sensors;
401 
402 	bus_space_tag_t		sc_iot;
403 	bus_space_handle_t	sc_ioh;
404 	bus_dma_tag_t		sc_dmat;
405 
406 	const struct ips_chipset *sc_chip;
407 
408 	struct ips_info *	sc_info;
409 	struct dmamem		sc_infom;
410 
411 	int			sc_nunits;
412 
413 	struct dmamem		sc_cmdbm;
414 
415 	struct ips_ccb *	sc_ccb;
416 	int			sc_nccbs;
417 	struct ips_ccbq		sc_ccbq_free;
418 	struct mutex		sc_ccb_mtx;
419 	struct scsi_iopool	sc_iopool;
420 
421 	struct dmamem		sc_sqm;
422 	paddr_t			sc_sqtail;
423 	u_int32_t *		sc_sqbuf;
424 	int			sc_sqidx;
425 };
426 
427 int	ips_match(struct device *, void *, void *);
428 void	ips_attach(struct device *, struct device *, void *);
429 
430 void	ips_scsi_cmd(struct scsi_xfer *);
431 void	ips_scsi_pt_cmd(struct scsi_xfer *);
432 int	ips_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
433 
434 #if NBIO > 0
435 int	ips_ioctl(struct device *, u_long, caddr_t);
436 int	ips_ioctl_inq(struct ips_softc *, struct bioc_inq *);
437 int	ips_ioctl_vol(struct ips_softc *, struct bioc_vol *);
438 int	ips_ioctl_disk(struct ips_softc *, struct bioc_disk *);
439 int	ips_ioctl_setstate(struct ips_softc *, struct bioc_setstate *);
440 #endif
441 
442 #ifndef SMALL_KERNEL
443 void	ips_sensors(void *);
444 #endif
445 
446 int	ips_load_xs(struct ips_softc *, struct ips_ccb *, struct scsi_xfer *);
447 void	ips_start_xs(struct ips_softc *, struct ips_ccb *, struct scsi_xfer *);
448 
449 int	ips_cmd(struct ips_softc *, struct ips_ccb *);
450 int	ips_poll(struct ips_softc *, struct ips_ccb *);
451 void	ips_done(struct ips_softc *, struct ips_ccb *);
452 void	ips_done_xs(struct ips_softc *, struct ips_ccb *);
453 void	ips_done_pt(struct ips_softc *, struct ips_ccb *);
454 void	ips_done_mgmt(struct ips_softc *, struct ips_ccb *);
455 int	ips_error(struct ips_softc *, struct ips_ccb *);
456 int	ips_error_xs(struct ips_softc *, struct ips_ccb *);
457 int	ips_intr(void *);
458 void	ips_timeout(void *);
459 
460 int	ips_getadapterinfo(struct ips_softc *, int);
461 int	ips_getdriveinfo(struct ips_softc *, int);
462 int	ips_getconf(struct ips_softc *, int);
463 int	ips_getpg5(struct ips_softc *, int);
464 
465 #if NBIO > 0
466 int	ips_getrblstat(struct ips_softc *, int);
467 int	ips_setstate(struct ips_softc *, int, int, int, int);
468 int	ips_rebuild(struct ips_softc *, int, int, int, int, int);
469 #endif
470 
471 void	ips_copperhead_exec(struct ips_softc *, struct ips_ccb *);
472 void	ips_copperhead_intren(struct ips_softc *);
473 int	ips_copperhead_isintr(struct ips_softc *);
474 u_int32_t ips_copperhead_status(struct ips_softc *);
475 
476 void	ips_morpheus_exec(struct ips_softc *, struct ips_ccb *);
477 void	ips_morpheus_intren(struct ips_softc *);
478 int	ips_morpheus_isintr(struct ips_softc *);
479 u_int32_t ips_morpheus_status(struct ips_softc *);
480 
481 struct ips_ccb *ips_ccb_alloc(struct ips_softc *, int);
482 void	ips_ccb_free(struct ips_softc *, struct ips_ccb *, int);
483 void	*ips_ccb_get(void *);
484 void	ips_ccb_put(void *, void *);
485 
486 int	ips_dmamem_alloc(struct dmamem *, bus_dma_tag_t, bus_size_t);
487 void	ips_dmamem_free(struct dmamem *);
488 
489 struct cfattach ips_ca = {
490 	sizeof(struct ips_softc),
491 	ips_match,
492 	ips_attach
493 };
494 
495 struct cfdriver ips_cd = {
496 	NULL, "ips", DV_DULL
497 };
498 
499 static struct scsi_adapter ips_switch = {
500 	ips_scsi_cmd, NULL, NULL, NULL, ips_scsi_ioctl
501 };
502 
503 static struct scsi_adapter ips_pt_switch = {
504 	ips_scsi_pt_cmd, NULL, NULL, NULL, NULL
505 };
506 
507 static const struct pci_matchid ips_ids[] = {
508 	{ PCI_VENDOR_IBM,	PCI_PRODUCT_IBM_SERVERAID },
509 	{ PCI_VENDOR_IBM,	PCI_PRODUCT_IBM_SERVERAID2 },
510 	{ PCI_VENDOR_ADP2,	PCI_PRODUCT_ADP2_SERVERAID }
511 };
512 
513 static const struct ips_chipset {
514 	enum {
515 		IPS_CHIP_COPPERHEAD = 0,
516 		IPS_CHIP_MORPHEUS
517 	}		ic_id;
518 
519 	int		ic_bar;
520 
521 	void		(*ic_exec)(struct ips_softc *, struct ips_ccb *);
522 	void		(*ic_intren)(struct ips_softc *);
523 	int		(*ic_isintr)(struct ips_softc *);
524 	u_int32_t	(*ic_status)(struct ips_softc *);
525 } ips_chips[] = {
526 	{
527 		IPS_CHIP_COPPERHEAD,
528 		0x14,
529 		ips_copperhead_exec,
530 		ips_copperhead_intren,
531 		ips_copperhead_isintr,
532 		ips_copperhead_status
533 	},
534 	{
535 		IPS_CHIP_MORPHEUS,
536 		0x10,
537 		ips_morpheus_exec,
538 		ips_morpheus_intren,
539 		ips_morpheus_isintr,
540 		ips_morpheus_status
541 	}
542 };
543 
544 #define ips_exec(s, c)	(s)->sc_chip->ic_exec((s), (c))
545 #define ips_intren(s)	(s)->sc_chip->ic_intren((s))
546 #define ips_isintr(s)	(s)->sc_chip->ic_isintr((s))
547 #define ips_status(s)	(s)->sc_chip->ic_status((s))
548 
549 static const char *ips_names[] = {
550 	NULL,
551 	NULL,
552 	"II",
553 	"onboard",
554 	"onboard",
555 	"3H",
556 	"3L",
557 	"4H",
558 	"4M",
559 	"4L",
560 	"4Mx",
561 	"4Lx",
562 	"5i",
563 	"5i",
564 	"6M",
565 	"6i",
566 	"7t",
567 	"7k",
568 	"7M"
569 };
570 
571 int
572 ips_match(struct device *parent, void *match, void *aux)
573 {
574 	return (pci_matchbyid(aux, ips_ids,
575 	    sizeof(ips_ids) / sizeof(ips_ids[0])));
576 }
577 
578 void
579 ips_attach(struct device *parent, struct device *self, void *aux)
580 {
581 	struct ips_softc *sc = (struct ips_softc *)self;
582 	struct pci_attach_args *pa = aux;
583 	struct ips_ccb ccb0;
584 	struct scsibus_attach_args saa;
585 	struct ips_adapterinfo *ai;
586 	struct ips_driveinfo *di;
587 	struct ips_pg5 *pg5;
588 	pcireg_t maptype;
589 	bus_size_t iosize;
590 	pci_intr_handle_t ih;
591 	const char *intrstr;
592 	int type, i;
593 
594 	sc->sc_dmat = pa->pa_dmat;
595 
596 	/* Identify chipset */
597 	if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_IBM_SERVERAID)
598 		sc->sc_chip = &ips_chips[IPS_CHIP_COPPERHEAD];
599 	else
600 		sc->sc_chip = &ips_chips[IPS_CHIP_MORPHEUS];
601 
602 	/* Map registers */
603 	maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_chip->ic_bar);
604 	if (pci_mapreg_map(pa, sc->sc_chip->ic_bar, maptype, 0, &sc->sc_iot,
605 	    &sc->sc_ioh, NULL, &iosize, IPS_IOSIZE)) {
606 		printf(": can't map regs\n");
607 		return;
608 	}
609 
610 	/* Allocate command buffer */
611 	if (ips_dmamem_alloc(&sc->sc_cmdbm, sc->sc_dmat,
612 	    IPS_MAXCMDS * sizeof(struct ips_cmdb))) {
613 		printf(": can't alloc cmd buffer\n");
614 		goto fail1;
615 	}
616 
617 	/* Allocate info buffer */
618 	if (ips_dmamem_alloc(&sc->sc_infom, sc->sc_dmat,
619 	    sizeof(struct ips_info))) {
620 		printf(": can't alloc info buffer\n");
621 		goto fail2;
622 	}
623 	sc->sc_info = sc->sc_infom.dm_vaddr;
624 	ai = &sc->sc_info->adapter;
625 	di = &sc->sc_info->drive;
626 	pg5 = &sc->sc_info->pg5;
627 
628 	/* Allocate status queue for the Copperhead chipset */
629 	if (sc->sc_chip->ic_id == IPS_CHIP_COPPERHEAD) {
630 		if (ips_dmamem_alloc(&sc->sc_sqm, sc->sc_dmat, IPS_SQSZ)) {
631 			printf(": can't alloc status queue\n");
632 			goto fail3;
633 		}
634 		sc->sc_sqtail = sc->sc_sqm.dm_paddr;
635 		sc->sc_sqbuf = sc->sc_sqm.dm_vaddr;
636 		sc->sc_sqidx = 0;
637 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQS,
638 		    sc->sc_sqm.dm_paddr);
639 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQE,
640 		    sc->sc_sqm.dm_paddr + IPS_SQSZ);
641 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQH,
642 		    sc->sc_sqm.dm_paddr + sizeof(u_int32_t));
643 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQT,
644 		    sc->sc_sqm.dm_paddr);
645 	}
646 
647 	/* Bootstrap CCB queue */
648 	sc->sc_nccbs = 1;
649 	sc->sc_ccb = &ccb0;
650 	bzero(&ccb0, sizeof(ccb0));
651 	ccb0.c_cmdbva = sc->sc_cmdbm.dm_vaddr;
652 	ccb0.c_cmdbpa = sc->sc_cmdbm.dm_paddr;
653 	SLIST_INIT(&sc->sc_ccbq_free);
654 	SLIST_INSERT_HEAD(&sc->sc_ccbq_free, &ccb0, c_link);
655 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
656 	scsi_iopool_init(&sc->sc_iopool, sc, ips_ccb_get, ips_ccb_put);
657 
658 	/* Get adapter info */
659 	if (ips_getadapterinfo(sc, SCSI_NOSLEEP)) {
660 		printf(": can't get adapter info\n");
661 		goto fail4;
662 	}
663 
664 	/* Get logical drives info */
665 	if (ips_getdriveinfo(sc, SCSI_NOSLEEP)) {
666 		printf(": can't get ld info\n");
667 		goto fail4;
668 	}
669 	sc->sc_nunits = di->drivecnt;
670 
671 	/* Get configuration */
672 	if (ips_getconf(sc, SCSI_NOSLEEP)) {
673 		printf(": can't get config\n");
674 		goto fail4;
675 	}
676 
677 	/* Read NVRAM page 5 for additional info */
678 	(void)ips_getpg5(sc, SCSI_NOSLEEP);
679 
680 	/* Initialize CCB queue */
681 	sc->sc_nccbs = ai->cmdcnt;
682 	if ((sc->sc_ccb = ips_ccb_alloc(sc, sc->sc_nccbs)) == NULL) {
683 		printf(": can't alloc ccb queue\n");
684 		goto fail4;
685 	}
686 	SLIST_INIT(&sc->sc_ccbq_free);
687 	for (i = 0; i < sc->sc_nccbs; i++)
688 		SLIST_INSERT_HEAD(&sc->sc_ccbq_free,
689 		    &sc->sc_ccb[i], c_link);
690 
691 	/* Install interrupt handler */
692 	if (pci_intr_map(pa, &ih)) {
693 		printf(": can't map interrupt\n");
694 		goto fail5;
695 	}
696 	intrstr = pci_intr_string(pa->pa_pc, ih);
697 	if (pci_intr_establish(pa->pa_pc, ih, IPL_BIO, ips_intr, sc,
698 	    sc->sc_dev.dv_xname) == NULL) {
699 		printf(": can't establish interrupt");
700 		if (intrstr != NULL)
701 			printf(" at %s", intrstr);
702 		printf("\n");
703 		goto fail5;
704 	}
705 	printf(": %s\n", intrstr);
706 
707 	/* Display adapter info */
708 	printf("%s: ServeRAID", sc->sc_dev.dv_xname);
709 	type = letoh16(pg5->type);
710 	if (type < sizeof(ips_names) / sizeof(ips_names[0]) && ips_names[type])
711 		printf(" %s", ips_names[type]);
712 	printf(", FW %c%c%c%c%c%c%c", ai->firmware[0], ai->firmware[1],
713 	    ai->firmware[2], ai->firmware[3], ai->firmware[4], ai->firmware[5],
714 	    ai->firmware[6]);
715 	printf(", BIOS %c%c%c%c%c%c%c", ai->bios[0], ai->bios[1], ai->bios[2],
716 	    ai->bios[3], ai->bios[4], ai->bios[5], ai->bios[6]);
717 	printf(", %d cmds, %d LD%s", sc->sc_nccbs, sc->sc_nunits,
718 	    (sc->sc_nunits == 1 ? "" : "s"));
719 	printf("\n");
720 
721 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
722 	saa.saa_adapter_buswidth = sc->sc_nunits;
723 	saa.saa_adapter = &ips_switch;
724 	saa.saa_adapter_softc = sc;
725 	saa.saa_luns = 8;
726 	if (sc->sc_nunits > 0)
727 		saa.saa_openings = sc->sc_nccbs / sc->sc_nunits;
728 	else
729 		saa.saa_openings = 0;
730 	saa.saa_pool = &sc->sc_iopool;
731 	saa.saa_quirks = saa.saa_flags = 0;
732 	saa.saa_wwpn = saa.saa_wwnn = 0;
733 
734 	sc->sc_scsibus = (struct scsibus_softc *)config_found(self, &saa,
735 	    scsiprint);
736 
737 	/* For each channel attach SCSI pass-through bus */
738 	for (i = 0; i < IPS_MAXCHANS; i++) {
739 		struct ips_pt *pt;
740 		int target, lastarget;
741 
742 		pt = &sc->sc_pt[i];
743 		pt->pt_sc = sc;
744 		pt->pt_chan = i;
745 		pt->pt_proctgt = -1;
746 
747 		/* Check if channel has any devices besides disks */
748 		for (target = 0, lastarget = -1; target < IPS_MAXTARGETS;
749 		    target++) {
750 			struct ips_dev *idev;
751 			int type;
752 
753 			idev = &sc->sc_info->conf.dev[i][target];
754 			type = idev->params & SID_TYPE;
755 			if (idev->state && type != T_DIRECT) {
756 				lastarget = target;
757 				if (type == T_PROCESSOR ||
758 				    type == T_ENCLOSURE)
759 					/* remember enclosure address */
760 					pt->pt_proctgt = target;
761 			}
762 		}
763 		if (lastarget == -1)
764 			continue;
765 
766 		saa.saa_adapter = &ips_pt_switch;
767 		saa.saa_adapter_softc = pt;
768 		saa.saa_adapter_buswidth =  lastarget + 1;
769 		saa.saa_adapter_target = IPS_MAXTARGETS;
770 		saa.saa_luns = 8;
771 		saa.saa_openings = 1;
772 		saa.saa_pool = &sc->sc_iopool;
773 		saa.saa_quirks = saa.saa_flags = 0;
774 		saa.saa_wwpn = saa.saa_wwnn = 0;
775 
776 		config_found(self, &saa, scsiprint);
777 	}
778 
779 	/* Enable interrupts */
780 	ips_intren(sc);
781 
782 #if NBIO > 0
783 	/* Install ioctl handler */
784 	if (bio_register(&sc->sc_dev, ips_ioctl))
785 		printf("%s: no ioctl support\n", sc->sc_dev.dv_xname);
786 #endif
787 
788 #ifndef SMALL_KERNEL
789 	/* Add sensors */
790 	if ((sc->sc_sensors = mallocarray(sc->sc_nunits, sizeof(struct ksensor),
791 	    M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) {
792 		printf(": can't alloc sensors\n");
793 		return;
794 	}
795 	strlcpy(sc->sc_sensordev.xname, sc->sc_dev.dv_xname,
796 	    sizeof(sc->sc_sensordev.xname));
797 	for (i = 0; i < sc->sc_nunits; i++) {
798 		struct device *dev;
799 
800 		sc->sc_sensors[i].type = SENSOR_DRIVE;
801 		sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
802 		dev = scsi_get_link(sc->sc_scsibus, i, 0)->device_softc;
803 		strlcpy(sc->sc_sensors[i].desc, dev->dv_xname,
804 		    sizeof(sc->sc_sensors[i].desc));
805 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
806 	}
807 	if (sensor_task_register(sc, ips_sensors, 10) == NULL) {
808 		printf(": no sensors support\n");
809 		free(sc->sc_sensors, M_DEVBUF,
810 		    sc->sc_nunits * sizeof(struct ksensor));
811 		return;
812 	}
813 	sensordev_install(&sc->sc_sensordev);
814 #endif	/* !SMALL_KERNEL */
815 
816 	return;
817 fail5:
818 	ips_ccb_free(sc, sc->sc_ccb, sc->sc_nccbs);
819 fail4:
820 	if (sc->sc_chip->ic_id == IPS_CHIP_COPPERHEAD)
821 		ips_dmamem_free(&sc->sc_sqm);
822 fail3:
823 	ips_dmamem_free(&sc->sc_infom);
824 fail2:
825 	ips_dmamem_free(&sc->sc_cmdbm);
826 fail1:
827 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, iosize);
828 }
829 
830 void
831 ips_scsi_cmd(struct scsi_xfer *xs)
832 {
833 	struct scsi_link *link = xs->sc_link;
834 	struct ips_softc *sc = link->bus->sb_adapter_softc;
835 	struct ips_driveinfo *di = &sc->sc_info->drive;
836 	struct ips_drive *drive;
837 	struct scsi_inquiry_data inq;
838 	struct scsi_read_cap_data rcd;
839 	struct scsi_sense_data sd;
840 	struct scsi_rw *rw;
841 	struct scsi_rw_big *rwb;
842 	struct ips_ccb *ccb = xs->io;
843 	struct ips_cmd *cmd;
844 	int target = link->target;
845 	u_int32_t blkno, blkcnt;
846 	int code;
847 
848 	DPRINTF(IPS_D_XFER, ("%s: ips_scsi_cmd: xs %p, target %d, "
849 	    "opcode 0x%02x, flags 0x%x\n", sc->sc_dev.dv_xname, xs, target,
850 	    xs->cmd->opcode, xs->flags));
851 
852 	if (target >= sc->sc_nunits || link->lun != 0) {
853 		DPRINTF(IPS_D_INFO, ("%s: ips_scsi_cmd: invalid params "
854 		    "target %d, lun %d\n", sc->sc_dev.dv_xname,
855 		    target, link->lun));
856 		xs->error = XS_DRIVER_STUFFUP;
857 		scsi_done(xs);
858 		return;
859 	}
860 
861 	drive = &di->drive[target];
862 	xs->error = XS_NOERROR;
863 
864 	/* Fake SCSI commands */
865 	switch (xs->cmd->opcode) {
866 	case READ_BIG:
867 	case READ_COMMAND:
868 	case WRITE_BIG:
869 	case WRITE_COMMAND:
870 		if (xs->cmdlen == sizeof(struct scsi_rw)) {
871 			rw = (void *)xs->cmd;
872 			blkno = _3btol(rw->addr) &
873 			    (SRW_TOPADDR << 16 | 0xffff);
874 			blkcnt = rw->length ? rw->length : 0x100;
875 		} else {
876 			rwb = (void *)xs->cmd;
877 			blkno = _4btol(rwb->addr);
878 			blkcnt = _2btol(rwb->length);
879 		}
880 
881 		if (blkno >= letoh32(drive->seccnt) || blkno + blkcnt >
882 		    letoh32(drive->seccnt)) {
883 			DPRINTF(IPS_D_ERR, ("%s: ips_scsi_cmd: invalid params "
884 			    "blkno %u, blkcnt %u\n", sc->sc_dev.dv_xname,
885 			    blkno, blkcnt));
886 			xs->error = XS_DRIVER_STUFFUP;
887 			break;
888 		}
889 
890 		if (xs->flags & SCSI_DATA_IN)
891 			code = IPS_CMD_READ;
892 		else
893 			code = IPS_CMD_WRITE;
894 
895 		ccb = xs->io;
896 
897 		cmd = ccb->c_cmdbva;
898 		cmd->code = code;
899 		cmd->drive = target;
900 		cmd->lba = htole32(blkno);
901 		cmd->seccnt = htole16(blkcnt);
902 
903 		if (ips_load_xs(sc, ccb, xs)) {
904 			DPRINTF(IPS_D_ERR, ("%s: ips_scsi_cmd: ips_load_xs "
905 			    "failed\n", sc->sc_dev.dv_xname));
906 			xs->error = XS_DRIVER_STUFFUP;
907 			scsi_done(xs);
908 			return;
909 		}
910 
911 		if (cmd->sgcnt > 0)
912 			cmd->code |= IPS_CMD_SG;
913 
914 		ccb->c_done = ips_done_xs;
915 		ips_start_xs(sc, ccb, xs);
916 		return;
917 	case INQUIRY:
918 		bzero(&inq, sizeof(inq));
919 		inq.device = T_DIRECT;
920 		inq.version = 2;
921 		inq.response_format = 2;
922 		inq.additional_length = 32;
923 		inq.flags |= SID_CmdQue;
924 		strlcpy(inq.vendor, "IBM", sizeof(inq.vendor));
925 		snprintf(inq.product, sizeof(inq.product),
926 		    "LD%d RAID%d", target, drive->raid);
927 		strlcpy(inq.revision, "1.0", sizeof(inq.revision));
928 		memcpy(xs->data, &inq, MIN(xs->datalen, sizeof(inq)));
929 		break;
930 	case READ_CAPACITY:
931 		bzero(&rcd, sizeof(rcd));
932 		_lto4b(letoh32(drive->seccnt) - 1, rcd.addr);
933 		_lto4b(IPS_SECSZ, rcd.length);
934 		memcpy(xs->data, &rcd, MIN(xs->datalen, sizeof(rcd)));
935 		break;
936 	case REQUEST_SENSE:
937 		bzero(&sd, sizeof(sd));
938 		sd.error_code = SSD_ERRCODE_CURRENT;
939 		sd.flags = SKEY_NO_SENSE;
940 		memcpy(xs->data, &sd, MIN(xs->datalen, sizeof(sd)));
941 		break;
942 	case SYNCHRONIZE_CACHE:
943 		cmd = ccb->c_cmdbva;
944 		cmd->code = IPS_CMD_FLUSH;
945 
946 		ccb->c_done = ips_done_xs;
947 		ips_start_xs(sc, ccb, xs);
948 		return;
949 	case PREVENT_ALLOW:
950 	case START_STOP:
951 	case TEST_UNIT_READY:
952 		break;
953 	default:
954 		DPRINTF(IPS_D_INFO, ("%s: unsupported scsi command 0x%02x\n",
955 		    sc->sc_dev.dv_xname, xs->cmd->opcode));
956 		xs->error = XS_DRIVER_STUFFUP;
957 	}
958 
959 	scsi_done(xs);
960 }
961 
962 void
963 ips_scsi_pt_cmd(struct scsi_xfer *xs)
964 {
965 	struct scsi_link *link = xs->sc_link;
966 	struct ips_pt *pt = link->bus->sb_adapter_softc;
967 	struct ips_softc *sc = pt->pt_sc;
968 	struct device *dev = link->device_softc;
969 	struct ips_ccb *ccb = xs->io;
970 	struct ips_cmdb *cmdb;
971 	struct ips_cmd *cmd;
972 	struct ips_dcdb *dcdb;
973 	int chan = pt->pt_chan, target = link->target;
974 
975 	DPRINTF(IPS_D_XFER, ("%s: ips_scsi_pt_cmd: xs %p, chan %d, target %d, "
976 	    "opcode 0x%02x, flags 0x%x\n", sc->sc_dev.dv_xname, xs, chan,
977 	    target, xs->cmd->opcode, xs->flags));
978 
979 	if (pt->pt_procdev[0] == '\0' && target == pt->pt_proctgt && dev)
980 		strlcpy(pt->pt_procdev, dev->dv_xname, sizeof(pt->pt_procdev));
981 
982 	if (xs->cmdlen > IPS_MAXCDB) {
983 		DPRINTF(IPS_D_ERR, ("%s: cmdlen %d too big\n",
984 		    sc->sc_dev.dv_xname, xs->cmdlen));
985 
986 		bzero(&xs->sense, sizeof(xs->sense));
987 		xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT;
988 		xs->sense.flags = SKEY_ILLEGAL_REQUEST;
989 		xs->sense.add_sense_code = 0x20; /* illcmd, 0x24 illfield */
990 		xs->error = XS_SENSE;
991 		scsi_done(xs);
992 		return;
993 	}
994 
995 	xs->error = XS_NOERROR;
996 
997 	cmdb = ccb->c_cmdbva;
998 	cmd = &cmdb->cmd;
999 	dcdb = &cmdb->dcdb;
1000 
1001 	cmd->code = IPS_CMD_DCDB;
1002 
1003 	dcdb->device = (chan << 4) | target;
1004 	if (xs->flags & SCSI_DATA_IN)
1005 		dcdb->attr |= IPS_DCDB_DATAIN;
1006 	if (xs->flags & SCSI_DATA_OUT)
1007 		dcdb->attr |= IPS_DCDB_DATAOUT;
1008 
1009 	/*
1010 	 * Adjust timeout value to what controller supports. Make sure our
1011 	 * timeout will be fired after controller gives up.
1012 	 */
1013 	if (xs->timeout <= 10000) {
1014 		dcdb->attr |= IPS_DCDB_TIMO10;
1015 		xs->timeout = 11000;
1016 	} else if (xs->timeout <= 60000) {
1017 		dcdb->attr |= IPS_DCDB_TIMO60;
1018 		xs->timeout = 61000;
1019 	} else {
1020 		dcdb->attr |= IPS_DCDB_TIMO20M;
1021 		xs->timeout = 20 * 60000 + 1000;
1022 	}
1023 
1024 	dcdb->attr |= IPS_DCDB_DISCON;
1025 	dcdb->datalen = htole16(xs->datalen);
1026 	dcdb->cdblen = xs->cmdlen;
1027 	dcdb->senselen = MIN(sizeof(xs->sense), sizeof(dcdb->sense));
1028 	memcpy(dcdb->cdb, xs->cmd, xs->cmdlen);
1029 
1030 	if (ips_load_xs(sc, ccb, xs)) {
1031 		DPRINTF(IPS_D_ERR, ("%s: ips_scsi_pt_cmd: ips_load_xs "
1032 		    "failed\n", sc->sc_dev.dv_xname));
1033 		xs->error = XS_DRIVER_STUFFUP;
1034 		scsi_done(xs);
1035 		return;
1036 	}
1037 	if (cmd->sgcnt > 0)
1038 		cmd->code |= IPS_CMD_SG;
1039 	dcdb->sgaddr = cmd->sgaddr;
1040 	dcdb->sgcnt = cmd->sgcnt;
1041 	cmd->sgaddr = htole32(ccb->c_cmdbpa + offsetof(struct ips_cmdb, dcdb));
1042 	cmd->sgcnt = 0;
1043 
1044 	ccb->c_done = ips_done_pt;
1045 	ips_start_xs(sc, ccb, xs);
1046 }
1047 
1048 int
1049 ips_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
1050 {
1051 #if NBIO > 0
1052 	return (ips_ioctl(link->bus->sb_adapter_softc, cmd, addr));
1053 #else
1054 	return (ENOTTY);
1055 #endif
1056 }
1057 
1058 #if NBIO > 0
1059 int
1060 ips_ioctl(struct device *dev, u_long cmd, caddr_t addr)
1061 {
1062 	struct ips_softc *sc = (struct ips_softc *)dev;
1063 
1064 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl: cmd %lu\n",
1065 	    sc->sc_dev.dv_xname, cmd));
1066 
1067 	switch (cmd) {
1068 	case BIOCINQ:
1069 		return (ips_ioctl_inq(sc, (struct bioc_inq *)addr));
1070 	case BIOCVOL:
1071 		return (ips_ioctl_vol(sc, (struct bioc_vol *)addr));
1072 	case BIOCDISK:
1073 		return (ips_ioctl_disk(sc, (struct bioc_disk *)addr));
1074 	case BIOCSETSTATE:
1075 		return (ips_ioctl_setstate(sc, (struct bioc_setstate *)addr));
1076 	default:
1077 		return (ENOTTY);
1078 	}
1079 }
1080 
1081 int
1082 ips_ioctl_inq(struct ips_softc *sc, struct bioc_inq *bi)
1083 {
1084 	struct ips_conf *conf = &sc->sc_info->conf;
1085 	int i;
1086 
1087 	strlcpy(bi->bi_dev, sc->sc_dev.dv_xname, sizeof(bi->bi_dev));
1088 	bi->bi_novol = sc->sc_nunits;
1089 	for (i = 0, bi->bi_nodisk = 0; i < sc->sc_nunits; i++)
1090 		bi->bi_nodisk += conf->ld[i].chunkcnt;
1091 
1092 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_inq: novol %d, nodisk %d\n",
1093 	    bi->bi_dev, bi->bi_novol, bi->bi_nodisk));
1094 
1095 	return (0);
1096 }
1097 
1098 int
1099 ips_ioctl_vol(struct ips_softc *sc, struct bioc_vol *bv)
1100 {
1101 	struct ips_driveinfo *di = &sc->sc_info->drive;
1102 	struct ips_conf *conf = &sc->sc_info->conf;
1103 	struct ips_rblstat *rblstat = &sc->sc_info->rblstat;
1104 	struct ips_ld *ld;
1105 	int vid = bv->bv_volid;
1106 	struct device *dv;
1107 	int error, rebuild = 0;
1108 	u_int32_t total = 0, done = 0;
1109 
1110 	if (vid >= sc->sc_nunits)
1111 		return (EINVAL);
1112 	if ((error = ips_getconf(sc, 0)))
1113 		return (error);
1114 	ld = &conf->ld[vid];
1115 
1116 	switch (ld->state) {
1117 	case IPS_DS_ONLINE:
1118 		bv->bv_status = BIOC_SVONLINE;
1119 		break;
1120 	case IPS_DS_DEGRADED:
1121 		bv->bv_status = BIOC_SVDEGRADED;
1122 		rebuild++;
1123 		break;
1124 	case IPS_DS_OFFLINE:
1125 		bv->bv_status = BIOC_SVOFFLINE;
1126 		break;
1127 	default:
1128 		bv->bv_status = BIOC_SVINVALID;
1129 	}
1130 
1131 	if (rebuild && ips_getrblstat(sc, 0) == 0) {
1132 		total = letoh32(rblstat->ld[vid].total);
1133 		done = total - letoh32(rblstat->ld[vid].remain);
1134 		if (total && total > done) {
1135 			bv->bv_status = BIOC_SVREBUILD;
1136 			bv->bv_percent = 100 * done / total;
1137 		}
1138 	}
1139 
1140 	bv->bv_size = (uint64_t)letoh32(ld->size) * IPS_SECSZ;
1141 	bv->bv_level = di->drive[vid].raid;
1142 	bv->bv_nodisk = ld->chunkcnt;
1143 
1144 	/* Associate all unused and spare drives with first volume */
1145 	if (vid == 0) {
1146 		struct ips_dev *dev;
1147 		int chan, target;
1148 
1149 		for (chan = 0; chan < IPS_MAXCHANS; chan++)
1150 			for (target = 0; target < IPS_MAXTARGETS; target++) {
1151 				dev = &conf->dev[chan][target];
1152 				if (dev->state && !(dev->state &
1153 				    IPS_DVS_MEMBER) &&
1154 				    (dev->params & SID_TYPE) == T_DIRECT)
1155 					bv->bv_nodisk++;
1156 			}
1157 	}
1158 
1159 	dv = scsi_get_link(sc->sc_scsibus, vid, 0)->device_softc;
1160 	strlcpy(bv->bv_dev, dv->dv_xname, sizeof(bv->bv_dev));
1161 	strlcpy(bv->bv_vendor, "IBM", sizeof(bv->bv_vendor));
1162 
1163 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_vol: vid %d, state 0x%02x, "
1164 	    "total %u, done %u, size %llu, level %d, nodisk %d, dev %s\n",
1165 	    sc->sc_dev.dv_xname, vid, ld->state, total, done, bv->bv_size,
1166 	    bv->bv_level, bv->bv_nodisk, bv->bv_dev));
1167 
1168 	return (0);
1169 }
1170 
1171 int
1172 ips_ioctl_disk(struct ips_softc *sc, struct bioc_disk *bd)
1173 {
1174 	struct ips_conf *conf = &sc->sc_info->conf;
1175 	struct ips_ld *ld;
1176 	struct ips_chunk *chunk;
1177 	struct ips_dev *dev;
1178 	int vid = bd->bd_volid, did = bd->bd_diskid;
1179 	int chan, target, error, i;
1180 
1181 	if (vid >= sc->sc_nunits)
1182 		return (EINVAL);
1183 	if ((error = ips_getconf(sc, 0)))
1184 		return (error);
1185 	ld = &conf->ld[vid];
1186 
1187 	if (did >= ld->chunkcnt) {
1188 		/* Probably unused or spare drives */
1189 		if (vid != 0)
1190 			return (EINVAL);
1191 
1192 		i = ld->chunkcnt;
1193 		for (chan = 0; chan < IPS_MAXCHANS; chan++)
1194 			for (target = 0; target < IPS_MAXTARGETS; target++) {
1195 				dev = &conf->dev[chan][target];
1196 				if (dev->state && !(dev->state &
1197 				    IPS_DVS_MEMBER) &&
1198 				    (dev->params & SID_TYPE) == T_DIRECT)
1199 					if (i++ == did)
1200 						goto out;
1201 			}
1202 	} else {
1203 		chunk = &ld->chunk[did];
1204 		chan = chunk->channel;
1205 		target = chunk->target;
1206 	}
1207 
1208 out:
1209 	if (chan >= IPS_MAXCHANS || target >= IPS_MAXTARGETS)
1210 		return (EINVAL);
1211 	dev = &conf->dev[chan][target];
1212 
1213 	bd->bd_channel = chan;
1214 	bd->bd_target = target;
1215 	bd->bd_lun = 0;
1216 	bd->bd_size = (uint64_t)letoh32(dev->seccnt) * IPS_SECSZ;
1217 
1218 	bzero(bd->bd_vendor, sizeof(bd->bd_vendor));
1219 	memcpy(bd->bd_vendor, dev->devid, MIN(sizeof(bd->bd_vendor),
1220 	    sizeof(dev->devid)));
1221 	strlcpy(bd->bd_procdev, sc->sc_pt[chan].pt_procdev,
1222 	    sizeof(bd->bd_procdev));
1223 
1224 	if (dev->state & IPS_DVS_READY) {
1225 		bd->bd_status = BIOC_SDUNUSED;
1226 		if (dev->state & IPS_DVS_MEMBER)
1227 			bd->bd_status = BIOC_SDONLINE;
1228 		if (dev->state & IPS_DVS_SPARE)
1229 			bd->bd_status = BIOC_SDHOTSPARE;
1230 		if (dev->state & IPS_DVS_REBUILD)
1231 			bd->bd_status = BIOC_SDREBUILD;
1232 	} else {
1233 		bd->bd_status = BIOC_SDOFFLINE;
1234 	}
1235 
1236 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_disk: vid %d, did %d, channel %d, "
1237 	    "target %d, size %llu, state 0x%02x\n", sc->sc_dev.dv_xname,
1238 	    vid, did, bd->bd_channel, bd->bd_target, bd->bd_size, dev->state));
1239 
1240 	return (0);
1241 }
1242 
1243 int
1244 ips_ioctl_setstate(struct ips_softc *sc, struct bioc_setstate *bs)
1245 {
1246 	struct ips_conf *conf = &sc->sc_info->conf;
1247 	struct ips_dev *dev;
1248 	int state, error;
1249 
1250 	if (bs->bs_channel >= IPS_MAXCHANS || bs->bs_target >= IPS_MAXTARGETS)
1251 		return (EINVAL);
1252 	if ((error = ips_getconf(sc, 0)))
1253 		return (error);
1254 	dev = &conf->dev[bs->bs_channel][bs->bs_target];
1255 	state = dev->state;
1256 
1257 	switch (bs->bs_status) {
1258 	case BIOC_SSONLINE:
1259 		state |= IPS_DVS_READY;
1260 		break;
1261 	case BIOC_SSOFFLINE:
1262 		state &= ~IPS_DVS_READY;
1263 		break;
1264 	case BIOC_SSHOTSPARE:
1265 		state |= IPS_DVS_SPARE;
1266 		break;
1267 	case BIOC_SSREBUILD:
1268 		return (ips_rebuild(sc, bs->bs_channel, bs->bs_target,
1269 		    bs->bs_channel, bs->bs_target, 0));
1270 	default:
1271 		return (EINVAL);
1272 	}
1273 
1274 	return (ips_setstate(sc, bs->bs_channel, bs->bs_target, state, 0));
1275 }
1276 #endif	/* NBIO > 0 */
1277 
1278 #ifndef SMALL_KERNEL
1279 void
1280 ips_sensors(void *arg)
1281 {
1282 	struct ips_softc *sc = arg;
1283 	struct ips_conf *conf = &sc->sc_info->conf;
1284 	struct ips_ld *ld;
1285 	int i;
1286 
1287 	/* ips_sensors() runs from work queue thus allowed to sleep */
1288 	if (ips_getconf(sc, 0)) {
1289 		DPRINTF(IPS_D_ERR, ("%s: ips_sensors: ips_getconf failed\n",
1290 		    sc->sc_dev.dv_xname));
1291 
1292 		for (i = 0; i < sc->sc_nunits; i++) {
1293 			sc->sc_sensors[i].value = 0;
1294 			sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
1295 		}
1296 		return;
1297 	}
1298 
1299 	DPRINTF(IPS_D_INFO, ("%s: ips_sensors:", sc->sc_dev.dv_xname));
1300 	for (i = 0; i < sc->sc_nunits; i++) {
1301 		ld = &conf->ld[i];
1302 		DPRINTF(IPS_D_INFO, (" ld%d.state 0x%02x", i, ld->state));
1303 		switch (ld->state) {
1304 		case IPS_DS_ONLINE:
1305 			sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE;
1306 			sc->sc_sensors[i].status = SENSOR_S_OK;
1307 			break;
1308 		case IPS_DS_DEGRADED:
1309 			sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL;
1310 			sc->sc_sensors[i].status = SENSOR_S_WARN;
1311 			break;
1312 		case IPS_DS_OFFLINE:
1313 			sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL;
1314 			sc->sc_sensors[i].status = SENSOR_S_CRIT;
1315 			break;
1316 		default:
1317 			sc->sc_sensors[i].value = 0;
1318 			sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
1319 		}
1320 	}
1321 	DPRINTF(IPS_D_INFO, ("\n"));
1322 }
1323 #endif	/* !SMALL_KERNEL */
1324 
1325 int
1326 ips_load_xs(struct ips_softc *sc, struct ips_ccb *ccb, struct scsi_xfer *xs)
1327 {
1328 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1329 	struct ips_cmd *cmd = &cmdb->cmd;
1330 	struct ips_sg *sg = cmdb->sg;
1331 	int nsegs, i;
1332 
1333 	if (xs->datalen == 0)
1334 		return (0);
1335 
1336 	/* Map data buffer into DMA segments */
1337 	if (bus_dmamap_load(sc->sc_dmat, ccb->c_dmam, xs->data, xs->datalen,
1338 	    NULL, (xs->flags & SCSI_NOSLEEP ? BUS_DMA_NOWAIT : 0)))
1339 		return (1);
1340 	bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,ccb->c_dmam->dm_mapsize,
1341 	    xs->flags & SCSI_DATA_IN ? BUS_DMASYNC_PREREAD :
1342 	    BUS_DMASYNC_PREWRITE);
1343 
1344 	if ((nsegs = ccb->c_dmam->dm_nsegs) > IPS_MAXSGS)
1345 		return (1);
1346 
1347 	if (nsegs > 1) {
1348 		cmd->sgcnt = nsegs;
1349 		cmd->sgaddr = htole32(ccb->c_cmdbpa + offsetof(struct ips_cmdb,
1350 		    sg));
1351 
1352 		/* Fill in scatter-gather array */
1353 		for (i = 0; i < nsegs; i++) {
1354 			sg[i].addr = htole32(ccb->c_dmam->dm_segs[i].ds_addr);
1355 			sg[i].size = htole32(ccb->c_dmam->dm_segs[i].ds_len);
1356 		}
1357 	} else {
1358 		cmd->sgcnt = 0;
1359 		cmd->sgaddr = htole32(ccb->c_dmam->dm_segs[0].ds_addr);
1360 	}
1361 
1362 	return (0);
1363 }
1364 
1365 void
1366 ips_start_xs(struct ips_softc *sc, struct ips_ccb *ccb, struct scsi_xfer *xs)
1367 {
1368 	ccb->c_flags = xs->flags;
1369 	ccb->c_xfer = xs;
1370 	int ispoll = xs->flags & SCSI_POLL;
1371 
1372 	if (!ispoll) {
1373 		timeout_set(&xs->stimeout, ips_timeout, ccb);
1374 		timeout_add_msec(&xs->stimeout, xs->timeout);
1375 	}
1376 
1377 	/*
1378 	 * Return value not used here because ips_cmd() must complete
1379 	 * scsi_xfer on any failure and SCSI layer will handle possible
1380 	 * errors.
1381 	 */
1382 	ips_cmd(sc, ccb);
1383 }
1384 
1385 int
1386 ips_cmd(struct ips_softc *sc, struct ips_ccb *ccb)
1387 {
1388 	struct ips_cmd *cmd = ccb->c_cmdbva;
1389 	int s, error = 0;
1390 
1391 	DPRINTF(IPS_D_XFER, ("%s: ips_cmd: id 0x%02x, flags 0x%x, xs %p, "
1392 	    "code 0x%02x, drive %d, sgcnt %d, lba %d, sgaddr 0x%08x, "
1393 	    "seccnt %d\n", sc->sc_dev.dv_xname, ccb->c_id, ccb->c_flags,
1394 	    ccb->c_xfer, cmd->code, cmd->drive, cmd->sgcnt, letoh32(cmd->lba),
1395 	    letoh32(cmd->sgaddr), letoh16(cmd->seccnt)));
1396 
1397 	cmd->id = ccb->c_id;
1398 
1399 	/* Post command to controller and optionally wait for completion */
1400 	s = splbio();
1401 	ips_exec(sc, ccb);
1402 	ccb->c_state = IPS_CCB_QUEUED;
1403 	if (ccb->c_flags & SCSI_POLL)
1404 		error = ips_poll(sc, ccb);
1405 	splx(s);
1406 
1407 	return (error);
1408 }
1409 
1410 int
1411 ips_poll(struct ips_softc *sc, struct ips_ccb *ccb)
1412 {
1413 	int error, msecs, usecs;
1414 
1415 	splassert(IPL_BIO);
1416 
1417 	if (ccb->c_flags & SCSI_NOSLEEP) {
1418 		/* busy-wait */
1419 		DPRINTF(IPS_D_XFER, ("%s: ips_poll: busy-wait\n",
1420 		    sc->sc_dev.dv_xname));
1421 
1422 		for (usecs = 1000000; usecs > 0; usecs -= 100) {
1423 			delay(100);
1424 			ips_intr(sc);
1425 			if (ccb->c_state == IPS_CCB_DONE)
1426 				break;
1427 		}
1428 	} else {
1429 		/* sleep */
1430 		msecs = ccb->c_xfer ? ccb->c_xfer->timeout : IPS_TIMEOUT;
1431 
1432 		DPRINTF(IPS_D_XFER, ("%s: ips_poll: sleep %d ms\n",
1433 		    sc->sc_dev.dv_xname, msecs));
1434 		tsleep_nsec(ccb, PRIBIO + 1, "ipscmd", MSEC_TO_NSEC(msecs));
1435 	}
1436 	DPRINTF(IPS_D_XFER, ("%s: ips_poll: state %d\n", sc->sc_dev.dv_xname,
1437 	    ccb->c_state));
1438 
1439 	if (ccb->c_state != IPS_CCB_DONE)
1440 		/*
1441 		 * Command never completed. Fake hardware status byte
1442 		 * to indicate timeout.
1443 		 */
1444 		ccb->c_stat = IPS_STAT_TIMO;
1445 
1446 	ips_done(sc, ccb);
1447 	error = ccb->c_error;
1448 
1449 	return (error);
1450 }
1451 
1452 void
1453 ips_done(struct ips_softc *sc, struct ips_ccb *ccb)
1454 {
1455 	splassert(IPL_BIO);
1456 
1457 	DPRINTF(IPS_D_XFER, ("%s: ips_done: id 0x%02x, flags 0x%x, xs %p\n",
1458 	    sc->sc_dev.dv_xname, ccb->c_id, ccb->c_flags, ccb->c_xfer));
1459 
1460 	ccb->c_error = ips_error(sc, ccb);
1461 	ccb->c_done(sc, ccb);
1462 }
1463 
1464 void
1465 ips_done_xs(struct ips_softc *sc, struct ips_ccb *ccb)
1466 {
1467 	struct scsi_xfer *xs = ccb->c_xfer;
1468 
1469 	if (!(xs->flags & SCSI_POLL))
1470 		timeout_del(&xs->stimeout);
1471 
1472 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1473 		bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,
1474 		    ccb->c_dmam->dm_mapsize, xs->flags & SCSI_DATA_IN ?
1475 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1476 		bus_dmamap_unload(sc->sc_dmat, ccb->c_dmam);
1477 	}
1478 
1479 	xs->resid = 0;
1480 	xs->error = ips_error_xs(sc, ccb);
1481 	scsi_done(xs);
1482 }
1483 
1484 void
1485 ips_done_pt(struct ips_softc *sc, struct ips_ccb *ccb)
1486 {
1487 	struct scsi_xfer *xs = ccb->c_xfer;
1488 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1489 	struct ips_dcdb *dcdb = &cmdb->dcdb;
1490 	int done = letoh16(dcdb->datalen);
1491 
1492 	if (!(xs->flags & SCSI_POLL))
1493 		timeout_del(&xs->stimeout);
1494 
1495 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1496 		bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,
1497 		    ccb->c_dmam->dm_mapsize, xs->flags & SCSI_DATA_IN ?
1498 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1499 		bus_dmamap_unload(sc->sc_dmat, ccb->c_dmam);
1500 	}
1501 
1502 	if (done && done < xs->datalen)
1503 		xs->resid = xs->datalen - done;
1504 	else
1505 		xs->resid = 0;
1506 	xs->error = ips_error_xs(sc, ccb);
1507 	xs->status = dcdb->status;
1508 
1509 	if (xs->error == XS_SENSE)
1510 		memcpy(&xs->sense, dcdb->sense, MIN(sizeof(xs->sense),
1511 		    sizeof(dcdb->sense)));
1512 
1513 	if (xs->cmd->opcode == INQUIRY && xs->error == XS_NOERROR) {
1514 		int type = ((struct scsi_inquiry_data *)xs->data)->device &
1515 		    SID_TYPE;
1516 
1517 		if (type == T_DIRECT)
1518 			/* mask physical drives */
1519 			xs->error = XS_DRIVER_STUFFUP;
1520 	}
1521 
1522 	scsi_done(xs);
1523 }
1524 
1525 void
1526 ips_done_mgmt(struct ips_softc *sc, struct ips_ccb *ccb)
1527 {
1528 	if (ccb->c_flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
1529 		bus_dmamap_sync(sc->sc_dmat, sc->sc_infom.dm_map, 0,
1530 		    sc->sc_infom.dm_map->dm_mapsize,
1531 		    ccb->c_flags & SCSI_DATA_IN ? BUS_DMASYNC_POSTREAD :
1532 		    BUS_DMASYNC_POSTWRITE);
1533 	scsi_io_put(&sc->sc_iopool, ccb);
1534 }
1535 
1536 int
1537 ips_error(struct ips_softc *sc, struct ips_ccb *ccb)
1538 {
1539 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1540 	struct ips_cmd *cmd = &cmdb->cmd;
1541 	struct ips_dcdb *dcdb = &cmdb->dcdb;
1542 	struct scsi_xfer *xs = ccb->c_xfer;
1543 	u_int8_t gsc = IPS_STAT_GSC(ccb->c_stat);
1544 
1545 	if (gsc == IPS_STAT_OK)
1546 		return (0);
1547 
1548 	DPRINTF(IPS_D_ERR, ("%s: ips_error: stat 0x%02x, estat 0x%02x, "
1549 	    "cmd code 0x%02x, drive %d, sgcnt %d, lba %u, seccnt %d",
1550 	    sc->sc_dev.dv_xname, ccb->c_stat, ccb->c_estat, cmd->code,
1551 	    cmd->drive, cmd->sgcnt, letoh32(cmd->lba), letoh16(cmd->seccnt)));
1552 	if (cmd->code == IPS_CMD_DCDB || cmd->code == IPS_CMD_DCDB_SG) {
1553 		int i;
1554 
1555 		DPRINTF(IPS_D_ERR, (", dcdb device 0x%02x, attr 0x%02x, "
1556 		    "datalen %d, sgcnt %d, status 0x%02x",
1557 		    dcdb->device, dcdb->attr, letoh16(dcdb->datalen),
1558 		    dcdb->sgcnt, dcdb->status));
1559 
1560 		DPRINTF(IPS_D_ERR, (", cdb"));
1561 		for (i = 0; i < dcdb->cdblen; i++)
1562 			DPRINTF(IPS_D_ERR, (" %x", dcdb->cdb[i]));
1563 		if (ccb->c_estat == IPS_ESTAT_CKCOND) {
1564 			DPRINTF(IPS_D_ERR, (", sense"));
1565 			for (i = 0; i < dcdb->senselen; i++)
1566 				DPRINTF(IPS_D_ERR, (" %x", dcdb->sense[i]));
1567 		}
1568 	}
1569 	DPRINTF(IPS_D_ERR, ("\n"));
1570 
1571 	switch (gsc) {
1572 	case IPS_STAT_RECOV:
1573 		return (0);
1574 	case IPS_STAT_INVOP:
1575 	case IPS_STAT_INVCMD:
1576 	case IPS_STAT_INVPARM:
1577 		return (EINVAL);
1578 	case IPS_STAT_BUSY:
1579 		return (EBUSY);
1580 	case IPS_STAT_TIMO:
1581 		return (ETIMEDOUT);
1582 	case IPS_STAT_PDRVERR:
1583 		switch (ccb->c_estat) {
1584 		case IPS_ESTAT_SELTIMO:
1585 			return (ENODEV);
1586 		case IPS_ESTAT_OURUN:
1587 			if (xs && letoh16(dcdb->datalen) < xs->datalen)
1588 				/* underrun */
1589 				return (0);
1590 			break;
1591 		case IPS_ESTAT_RECOV:
1592 			return (0);
1593 		}
1594 		break;
1595 	}
1596 
1597 	return (EIO);
1598 }
1599 
1600 int
1601 ips_error_xs(struct ips_softc *sc, struct ips_ccb *ccb)
1602 {
1603 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1604 	struct ips_dcdb *dcdb = &cmdb->dcdb;
1605 	struct scsi_xfer *xs = ccb->c_xfer;
1606 	u_int8_t gsc = IPS_STAT_GSC(ccb->c_stat);
1607 
1608 	/* Map hardware error codes to SCSI ones */
1609 	switch (gsc) {
1610 	case IPS_STAT_OK:
1611 	case IPS_STAT_RECOV:
1612 		return (XS_NOERROR);
1613 	case IPS_STAT_BUSY:
1614 		return (XS_BUSY);
1615 	case IPS_STAT_TIMO:
1616 		return (XS_TIMEOUT);
1617 	case IPS_STAT_PDRVERR:
1618 		switch (ccb->c_estat) {
1619 		case IPS_ESTAT_SELTIMO:
1620 			return (XS_SELTIMEOUT);
1621 		case IPS_ESTAT_OURUN:
1622 			if (xs && letoh16(dcdb->datalen) < xs->datalen)
1623 				/* underrun */
1624 				return (XS_NOERROR);
1625 			break;
1626 		case IPS_ESTAT_HOSTRST:
1627 		case IPS_ESTAT_DEVRST:
1628 			return (XS_RESET);
1629 		case IPS_ESTAT_RECOV:
1630 			return (XS_NOERROR);
1631 		case IPS_ESTAT_CKCOND:
1632 			return (XS_SENSE);
1633 		}
1634 		break;
1635 	}
1636 
1637 	return (XS_DRIVER_STUFFUP);
1638 }
1639 
1640 int
1641 ips_intr(void *arg)
1642 {
1643 	struct ips_softc *sc = arg;
1644 	struct ips_ccb *ccb;
1645 	u_int32_t status;
1646 	int id;
1647 
1648 	DPRINTF(IPS_D_XFER, ("%s: ips_intr", sc->sc_dev.dv_xname));
1649 	if (!ips_isintr(sc)) {
1650 		DPRINTF(IPS_D_XFER, (": not ours\n"));
1651 		return (0);
1652 	}
1653 	DPRINTF(IPS_D_XFER, ("\n"));
1654 
1655 	/* Process completed commands */
1656 	while ((status = ips_status(sc)) != 0xffffffff) {
1657 		DPRINTF(IPS_D_XFER, ("%s: ips_intr: status 0x%08x\n",
1658 		    sc->sc_dev.dv_xname, status));
1659 
1660 		id = IPS_STAT_ID(status);
1661 		if (id >= sc->sc_nccbs) {
1662 			DPRINTF(IPS_D_ERR, ("%s: ips_intr: invalid id %d\n",
1663 			    sc->sc_dev.dv_xname, id));
1664 			continue;
1665 		}
1666 
1667 		ccb = &sc->sc_ccb[id];
1668 		if (ccb->c_state != IPS_CCB_QUEUED) {
1669 			DPRINTF(IPS_D_ERR, ("%s: ips_intr: cmd 0x%02x not "
1670 			    "queued, state %d, status 0x%08x\n",
1671 			    sc->sc_dev.dv_xname, ccb->c_id, ccb->c_state,
1672 			    status));
1673 			continue;
1674 		}
1675 
1676 		ccb->c_state = IPS_CCB_DONE;
1677 		ccb->c_stat = IPS_STAT_BASIC(status);
1678 		ccb->c_estat = IPS_STAT_EXT(status);
1679 
1680 		if (ccb->c_flags & SCSI_POLL) {
1681 			wakeup(ccb);
1682 		} else {
1683 			ips_done(sc, ccb);
1684 		}
1685 	}
1686 
1687 	return (1);
1688 }
1689 
1690 void
1691 ips_timeout(void *arg)
1692 {
1693 	struct ips_ccb *ccb = arg;
1694 	struct ips_softc *sc = ccb->c_sc;
1695 	struct scsi_xfer *xs = ccb->c_xfer;
1696 	int s;
1697 
1698 	s = splbio();
1699 	if (xs)
1700 		sc_print_addr(xs->sc_link);
1701 	else
1702 		printf("%s: ", sc->sc_dev.dv_xname);
1703 	printf("timeout\n");
1704 
1705 	/*
1706 	 * Command never completed. Fake hardware status byte
1707 	 * to indicate timeout.
1708 	 * XXX: need to remove command from controller.
1709 	 */
1710 	ccb->c_stat = IPS_STAT_TIMO;
1711 	ips_done(sc, ccb);
1712 	splx(s);
1713 }
1714 
1715 int
1716 ips_getadapterinfo(struct ips_softc *sc, int flags)
1717 {
1718 	struct ips_ccb *ccb;
1719 	struct ips_cmd *cmd;
1720 
1721 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1722 	if (ccb == NULL)
1723 		return (1);
1724 
1725 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1726 	ccb->c_done = ips_done_mgmt;
1727 
1728 	cmd = ccb->c_cmdbva;
1729 	cmd->code = IPS_CMD_GETADAPTERINFO;
1730 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1731 	    adapter));
1732 
1733 	return (ips_cmd(sc, ccb));
1734 }
1735 
1736 int
1737 ips_getdriveinfo(struct ips_softc *sc, int flags)
1738 {
1739 	struct ips_ccb *ccb;
1740 	struct ips_cmd *cmd;
1741 
1742 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1743 	if (ccb == NULL)
1744 		return (1);
1745 
1746 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1747 	ccb->c_done = ips_done_mgmt;
1748 
1749 	cmd = ccb->c_cmdbva;
1750 	cmd->code = IPS_CMD_GETDRIVEINFO;
1751 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1752 	    drive));
1753 
1754 	return (ips_cmd(sc, ccb));
1755 }
1756 
1757 int
1758 ips_getconf(struct ips_softc *sc, int flags)
1759 {
1760 	struct ips_ccb *ccb;
1761 	struct ips_cmd *cmd;
1762 
1763 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1764 	if (ccb == NULL)
1765 		return (1);
1766 
1767 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1768 	ccb->c_done = ips_done_mgmt;
1769 
1770 	cmd = ccb->c_cmdbva;
1771 	cmd->code = IPS_CMD_READCONF;
1772 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1773 	    conf));
1774 
1775 	return (ips_cmd(sc, ccb));
1776 }
1777 
1778 int
1779 ips_getpg5(struct ips_softc *sc, int flags)
1780 {
1781 	struct ips_ccb *ccb;
1782 	struct ips_cmd *cmd;
1783 
1784 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1785 	if (ccb == NULL)
1786 		return (1);
1787 
1788 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1789 	ccb->c_done = ips_done_mgmt;
1790 
1791 	cmd = ccb->c_cmdbva;
1792 	cmd->code = IPS_CMD_RWNVRAM;
1793 	cmd->drive = 5;
1794 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1795 	    pg5));
1796 
1797 	return (ips_cmd(sc, ccb));
1798 }
1799 
1800 #if NBIO > 0
1801 int
1802 ips_getrblstat(struct ips_softc *sc, int flags)
1803 {
1804 	struct ips_ccb *ccb;
1805 	struct ips_cmd *cmd;
1806 
1807 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1808 	if (ccb == NULL)
1809 		return (1);
1810 
1811 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1812 	ccb->c_done = ips_done_mgmt;
1813 
1814 	cmd = ccb->c_cmdbva;
1815 	cmd->code = IPS_CMD_REBUILDSTATUS;
1816 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1817 	    rblstat));
1818 
1819 	return (ips_cmd(sc, ccb));
1820 }
1821 
1822 int
1823 ips_setstate(struct ips_softc *sc, int chan, int target, int state, int flags)
1824 {
1825 	struct ips_ccb *ccb;
1826 	struct ips_cmd *cmd;
1827 
1828 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1829 	if (ccb == NULL)
1830 		return (1);
1831 
1832 	ccb->c_flags = SCSI_POLL | flags;
1833 	ccb->c_done = ips_done_mgmt;
1834 
1835 	cmd = ccb->c_cmdbva;
1836 	cmd->code = IPS_CMD_SETSTATE;
1837 	cmd->drive = chan;
1838 	cmd->sgcnt = target;
1839 	cmd->seg4g = state;
1840 
1841 	return (ips_cmd(sc, ccb));
1842 }
1843 
1844 int
1845 ips_rebuild(struct ips_softc *sc, int chan, int target, int nchan,
1846     int ntarget, int flags)
1847 {
1848 	struct ips_ccb *ccb;
1849 	struct ips_cmd *cmd;
1850 
1851 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1852 	if (ccb == NULL)
1853 		return (1);
1854 
1855 	ccb->c_flags = SCSI_POLL | flags;
1856 	ccb->c_done = ips_done_mgmt;
1857 
1858 	cmd = ccb->c_cmdbva;
1859 	cmd->code = IPS_CMD_REBUILD;
1860 	cmd->drive = chan;
1861 	cmd->sgcnt = target;
1862 	cmd->seccnt = htole16(ntarget << 8 | nchan);
1863 
1864 	return (ips_cmd(sc, ccb));
1865 }
1866 #endif	/* NBIO > 0 */
1867 
1868 void
1869 ips_copperhead_exec(struct ips_softc *sc, struct ips_ccb *ccb)
1870 {
1871 	u_int32_t reg;
1872 	int timeout;
1873 
1874 	for (timeout = 100; timeout-- > 0; delay(100)) {
1875 		reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_CCC);
1876 		if ((reg & IPS_REG_CCC_SEM) == 0)
1877 			break;
1878 	}
1879 	if (timeout < 0) {
1880 		printf("%s: semaphore timeout\n", sc->sc_dev.dv_xname);
1881 		return;
1882 	}
1883 
1884 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_CCSA, ccb->c_cmdbpa);
1885 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, IPS_REG_CCC,
1886 	    IPS_REG_CCC_START);
1887 }
1888 
1889 void
1890 ips_copperhead_intren(struct ips_softc *sc)
1891 {
1892 	bus_space_write_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS, IPS_REG_HIS_EN);
1893 }
1894 
1895 int
1896 ips_copperhead_isintr(struct ips_softc *sc)
1897 {
1898 	u_int8_t reg;
1899 
1900 	reg = bus_space_read_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS);
1901 	bus_space_write_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS, reg);
1902 	if (reg != 0xff && (reg & IPS_REG_HIS_SCE))
1903 		return (1);
1904 
1905 	return (0);
1906 }
1907 
1908 u_int32_t
1909 ips_copperhead_status(struct ips_softc *sc)
1910 {
1911 	u_int32_t sqhead, sqtail, status;
1912 
1913 	sqhead = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQH);
1914 	DPRINTF(IPS_D_XFER, ("%s: sqhead 0x%08x, sqtail 0x%08x\n",
1915 	    sc->sc_dev.dv_xname, sqhead, sc->sc_sqtail));
1916 
1917 	sqtail = sc->sc_sqtail + sizeof(u_int32_t);
1918 	if (sqtail == sc->sc_sqm.dm_paddr + IPS_SQSZ)
1919 		sqtail = sc->sc_sqm.dm_paddr;
1920 	if (sqtail == sqhead)
1921 		return (0xffffffff);
1922 
1923 	sc->sc_sqtail = sqtail;
1924 	if (++sc->sc_sqidx == IPS_MAXCMDS)
1925 		sc->sc_sqidx = 0;
1926 	status = letoh32(sc->sc_sqbuf[sc->sc_sqidx]);
1927 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQT, sqtail);
1928 
1929 	return (status);
1930 }
1931 
1932 void
1933 ips_morpheus_exec(struct ips_softc *sc, struct ips_ccb *ccb)
1934 {
1935 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_IQP, ccb->c_cmdbpa);
1936 }
1937 
1938 void
1939 ips_morpheus_intren(struct ips_softc *sc)
1940 {
1941 	u_int32_t reg;
1942 
1943 	reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIM);
1944 	reg &= ~IPS_REG_OIM_DS;
1945 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIM, reg);
1946 }
1947 
1948 int
1949 ips_morpheus_isintr(struct ips_softc *sc)
1950 {
1951 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIS) &
1952 	    IPS_REG_OIS_PEND);
1953 }
1954 
1955 u_int32_t
1956 ips_morpheus_status(struct ips_softc *sc)
1957 {
1958 	u_int32_t reg;
1959 
1960 	reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OQP);
1961 	DPRINTF(IPS_D_XFER, ("%s: status 0x%08x\n", sc->sc_dev.dv_xname, reg));
1962 
1963 	return (reg);
1964 }
1965 
1966 struct ips_ccb *
1967 ips_ccb_alloc(struct ips_softc *sc, int n)
1968 {
1969 	struct ips_ccb *ccb;
1970 	int i;
1971 
1972 	if ((ccb = mallocarray(n, sizeof(*ccb), M_DEVBUF,
1973 	    M_NOWAIT | M_ZERO)) == NULL)
1974 		return (NULL);
1975 
1976 	for (i = 0; i < n; i++) {
1977 		ccb[i].c_sc = sc;
1978 		ccb[i].c_id = i;
1979 		ccb[i].c_cmdbva = (char *)sc->sc_cmdbm.dm_vaddr +
1980 		    i * sizeof(struct ips_cmdb);
1981 		ccb[i].c_cmdbpa = sc->sc_cmdbm.dm_paddr +
1982 		    i * sizeof(struct ips_cmdb);
1983 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, IPS_MAXSGS,
1984 		    MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1985 		    &ccb[i].c_dmam))
1986 			goto fail;
1987 	}
1988 
1989 	return (ccb);
1990 fail:
1991 	for (; i > 0; i--)
1992 		bus_dmamap_destroy(sc->sc_dmat, ccb[i - 1].c_dmam);
1993 	free(ccb, M_DEVBUF, n * sizeof(*ccb));
1994 	return (NULL);
1995 }
1996 
1997 void
1998 ips_ccb_free(struct ips_softc *sc, struct ips_ccb *ccb, int n)
1999 {
2000 	int i;
2001 
2002 	for (i = 0; i < n; i++)
2003 		bus_dmamap_destroy(sc->sc_dmat, ccb[i - 1].c_dmam);
2004 	free(ccb, M_DEVBUF, n * sizeof(*ccb));
2005 }
2006 
2007 void *
2008 ips_ccb_get(void *xsc)
2009 {
2010 	struct ips_softc *sc = xsc;
2011 	struct ips_ccb *ccb;
2012 
2013 	mtx_enter(&sc->sc_ccb_mtx);
2014 	if ((ccb = SLIST_FIRST(&sc->sc_ccbq_free)) != NULL) {
2015 		SLIST_REMOVE_HEAD(&sc->sc_ccbq_free, c_link);
2016 		ccb->c_flags = 0;
2017 		ccb->c_xfer = NULL;
2018 		bzero(ccb->c_cmdbva, sizeof(struct ips_cmdb));
2019 	}
2020 	mtx_leave(&sc->sc_ccb_mtx);
2021 
2022 	return (ccb);
2023 }
2024 
2025 void
2026 ips_ccb_put(void *xsc, void *xccb)
2027 {
2028 	struct ips_softc *sc = xsc;
2029 	struct ips_ccb *ccb = xccb;
2030 
2031 	ccb->c_state = IPS_CCB_FREE;
2032 	mtx_enter(&sc->sc_ccb_mtx);
2033 	SLIST_INSERT_HEAD(&sc->sc_ccbq_free, ccb, c_link);
2034 	mtx_leave(&sc->sc_ccb_mtx);
2035 }
2036 
2037 int
2038 ips_dmamem_alloc(struct dmamem *dm, bus_dma_tag_t tag, bus_size_t size)
2039 {
2040 	int nsegs;
2041 
2042 	dm->dm_tag = tag;
2043 	dm->dm_size = size;
2044 
2045 	if (bus_dmamap_create(tag, size, 1, size, 0,
2046 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &dm->dm_map))
2047 		return (1);
2048 	if (bus_dmamem_alloc(tag, size, 0, 0, &dm->dm_seg, 1, &nsegs,
2049 	    BUS_DMA_NOWAIT))
2050 		goto fail1;
2051 	if (bus_dmamem_map(tag, &dm->dm_seg, 1, size, (caddr_t *)&dm->dm_vaddr,
2052 	    BUS_DMA_NOWAIT))
2053 		goto fail2;
2054 	if (bus_dmamap_load(tag, dm->dm_map, dm->dm_vaddr, size, NULL,
2055 	    BUS_DMA_NOWAIT))
2056 		goto fail3;
2057 
2058 	return (0);
2059 
2060 fail3:
2061 	bus_dmamem_unmap(tag, dm->dm_vaddr, size);
2062 fail2:
2063 	bus_dmamem_free(tag, &dm->dm_seg, 1);
2064 fail1:
2065 	bus_dmamap_destroy(tag, dm->dm_map);
2066 	return (1);
2067 }
2068 
2069 void
2070 ips_dmamem_free(struct dmamem *dm)
2071 {
2072 	bus_dmamap_unload(dm->dm_tag, dm->dm_map);
2073 	bus_dmamem_unmap(dm->dm_tag, dm->dm_vaddr, dm->dm_size);
2074 	bus_dmamem_free(dm->dm_tag, &dm->dm_seg, 1);
2075 	bus_dmamap_destroy(dm->dm_tag, dm->dm_map);
2076 }
2077