xref: /netbsd-src/sys/dev/pci/ips.c (revision 53b02e147d4ed531c0d2a5ca9b3e8026ba3e99b5)
1 /*	$NetBSD: ips.c,v 1.4 2021/08/07 16:19:14 thorpej Exp $	*/
2 /*	$OpenBSD: ips.c,v 1.113 2016/08/14 04:08:03 dlg Exp $	*/
3 
4 /*-
5  * Copyright (c) 2017 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  * POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * Copyright (c) 2006, 2007, 2009 Alexander Yurchenko <grange@openbsd.org>
32  *
33  * Permission to use, copy, modify, and distribute this software for any
34  * purpose with or without fee is hereby granted, provided that the above
35  * copyright notice and this permission notice appear in all copies.
36  *
37  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
38  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
39  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
40  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
41  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
42  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
43  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
44  */
45 
46 /*
47  * IBM (Adaptec) ServeRAID controllers driver.
48  */
49 
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: ips.c,v 1.4 2021/08/07 16:19:14 thorpej Exp $");
52 
53 #include "bio.h"
54 
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/device.h>
58 #include <sys/kernel.h>
59 #include <sys/queue.h>
60 #include <sys/buf.h>
61 #include <sys/endian.h>
62 #include <sys/conf.h>
63 #include <sys/malloc.h>
64 #include <sys/ioctl.h>
65 #include <sys/kthread.h>
66 
67 #include <sys/bus.h>
68 #include <sys/intr.h>
69 
70 #include <dev/scsipi/scsi_all.h>
71 #include <dev/scsipi/scsipi_all.h>
72 #include <dev/scsipi/scsi_disk.h>
73 #include <dev/scsipi/scsipi_disk.h>
74 #include <dev/scsipi/scsiconf.h>
75 
76 #include <dev/biovar.h>
77 #include <dev/sysmon/sysmonvar.h>
78 #include <sys/envsys.h>
79 
80 #include <dev/pci/pcireg.h>
81 #include <dev/pci/pcivar.h>
82 #include <dev/pci/pcidevs.h>
83 
84 /* Debug levels */
85 #define IPS_D_ERR	0x0001	/* errors */
86 #define IPS_D_INFO	0x0002	/* information */
87 #define IPS_D_XFER	0x0004	/* transfers */
88 
89 #ifdef IPS_DEBUG
90 #define DPRINTF(a, b)	do { if (ips_debug & (a)) printf b; } while (0)
91 int ips_debug = IPS_D_ERR;
92 #else
93 #define DPRINTF(a, b)
94 #endif
95 
96 #define IPS_MAXDRIVES		8
97 #define IPS_MAXCHANS		4
98 #define IPS_MAXTARGETS		16
99 #define IPS_MAXCHUNKS		16
100 #define IPS_MAXCMDS		128
101 
102 #define IPS_MAXFER		(64 * 1024)
103 #define IPS_MAXSGS		16
104 #define IPS_MAXCDB		12
105 
106 #define IPS_SECSZ		512
107 #define IPS_NVRAMPGSZ		128
108 #define IPS_SQSZ		(IPS_MAXCMDS * sizeof(u_int32_t))
109 
110 #define	IPS_TIMEOUT		60000	/* ms */
111 
112 /* Command codes */
113 #define IPS_CMD_READ		0x02
114 #define IPS_CMD_WRITE		0x03
115 #define IPS_CMD_DCDB		0x04
116 #define IPS_CMD_GETADAPTERINFO	0x05
117 #define IPS_CMD_FLUSH		0x0a
118 #define IPS_CMD_REBUILDSTATUS	0x0c
119 #define IPS_CMD_SETSTATE	0x10
120 #define IPS_CMD_REBUILD		0x16
121 #define IPS_CMD_ERRORTABLE	0x17
122 #define IPS_CMD_GETDRIVEINFO	0x19
123 #define IPS_CMD_RESETCHAN	0x1a
124 #define IPS_CMD_DOWNLOAD	0x20
125 #define IPS_CMD_RWBIOSFW	0x22
126 #define IPS_CMD_READCONF	0x38
127 #define IPS_CMD_GETSUBSYS	0x40
128 #define IPS_CMD_CONFIGSYNC	0x58
129 #define IPS_CMD_READ_SG		0x82
130 #define IPS_CMD_WRITE_SG	0x83
131 #define IPS_CMD_DCDB_SG		0x84
132 #define IPS_CMD_EDCDB		0x95
133 #define IPS_CMD_EDCDB_SG	0x96
134 #define IPS_CMD_RWNVRAMPAGE	0xbc
135 #define IPS_CMD_GETVERINFO	0xc6
136 #define IPS_CMD_FFDC		0xd7
137 #define IPS_CMD_SG		0x80
138 #define IPS_CMD_RWNVRAM		0xbc
139 
140 /* DCDB attributes */
141 #define IPS_DCDB_DATAIN		0x01	/* data input */
142 #define IPS_DCDB_DATAOUT	0x02	/* data output */
143 #define IPS_DCDB_XFER64K	0x08	/* 64K transfer */
144 #define IPS_DCDB_TIMO10		0x10	/* 10 secs timeout */
145 #define IPS_DCDB_TIMO60		0x20	/* 60 secs timeout */
146 #define IPS_DCDB_TIMO20M	0x30	/* 20 mins timeout */
147 #define IPS_DCDB_NOAUTOREQSEN	0x40	/* no auto request sense */
148 #define IPS_DCDB_DISCON		0x80	/* disconnect allowed */
149 
150 /* Register definitions */
151 #define IPS_REG_HIS		0x08	/* host interrupt status */
152 #define IPS_REG_HIS_SCE			0x01	/* status channel enqueue */
153 #define IPS_REG_HIS_EN			0x80	/* enable interrupts */
154 #define IPS_REG_CCSA		0x10	/* command channel system address */
155 #define IPS_REG_CCC		0x14	/* command channel control */
156 #define IPS_REG_CCC_SEM			0x0008	/* semaphore */
157 #define IPS_REG_CCC_START		0x101a	/* start command */
158 #define IPS_REG_SQH		0x20	/* status queue head */
159 #define IPS_REG_SQT		0x24	/* status queue tail */
160 #define IPS_REG_SQE		0x28	/* status queue end */
161 #define IPS_REG_SQS		0x2c	/* status queue start */
162 
163 #define IPS_REG_OIS		0x30	/* outbound interrupt status */
164 #define IPS_REG_OIS_PEND		0x0008	/* interrupt is pending */
165 #define IPS_REG_OIM		0x34	/* outbound interrupt mask */
166 #define IPS_REG_OIM_DS			0x0008	/* disable interrupts */
167 #define IPS_REG_IQP		0x40	/* inbound queue port */
168 #define IPS_REG_OQP		0x44	/* outbound queue port */
169 
170 /* Status word fields */
171 #define IPS_STAT_ID(x)		(((x) >> 8) & 0xff)	/* command id */
172 #define IPS_STAT_BASIC(x)	(((x) >> 16) & 0xff)	/* basic status */
173 #define IPS_STAT_EXT(x)		(((x) >> 24) & 0xff)	/* ext status */
174 #define IPS_STAT_GSC(x)		((x) & 0x0f)
175 
176 /* Basic status codes */
177 #define IPS_STAT_OK		0x00	/* success */
178 #define IPS_STAT_RECOV		0x01	/* recovered error */
179 #define IPS_STAT_INVOP		0x03	/* invalid opcode */
180 #define IPS_STAT_INVCMD		0x04	/* invalid command block */
181 #define IPS_STAT_INVPARM	0x05	/* invalid parameters block */
182 #define IPS_STAT_BUSY		0x08	/* busy */
183 #define IPS_STAT_CMPLERR	0x0c	/* completed with error */
184 #define IPS_STAT_LDERR		0x0d	/* logical drive error */
185 #define IPS_STAT_TIMO		0x0e	/* timeout */
186 #define IPS_STAT_PDRVERR	0x0f	/* physical drive error */
187 
188 /* Extended status codes */
189 #define IPS_ESTAT_SELTIMO	0xf0	/* select timeout */
190 #define IPS_ESTAT_OURUN		0xf2	/* over/underrun */
191 #define IPS_ESTAT_HOSTRST	0xf7	/* host reset */
192 #define IPS_ESTAT_DEVRST	0xf8	/* device reset */
193 #define IPS_ESTAT_RECOV		0xfc	/* recovered error */
194 #define IPS_ESTAT_CKCOND	0xff	/* check condition */
195 
196 #define IPS_IOSIZE		128	/* max space size to map */
197 
198 /* Command frame */
199 struct ips_cmd {
200 	u_int8_t	code;
201 	u_int8_t	id;
202 	u_int8_t	drive;
203 	u_int8_t	sgcnt;
204 	u_int32_t	lba;
205 	u_int32_t	sgaddr;
206 	u_int16_t	seccnt;
207 	u_int8_t	seg4g;
208 	u_int8_t	esg;
209 	u_int32_t	ccsar;
210 	u_int32_t	cccr;
211 };
212 
213 /* Direct CDB (SCSI pass-through) frame */
214 struct ips_dcdb {
215 	u_int8_t	device;
216 	u_int8_t	attr;
217 	u_int16_t	datalen;
218 	u_int32_t	sgaddr;
219 	u_int8_t	cdblen;
220 	u_int8_t	senselen;
221 	u_int8_t	sgcnt;
222 	u_int8_t	__reserved1;
223 	u_int8_t	cdb[IPS_MAXCDB];
224 	u_int8_t	sense[64];
225 	u_int8_t	status;
226 	u_int8_t	__reserved2[3];
227 };
228 
229 /* Scatter-gather array element */
230 struct ips_sg {
231 	u_int32_t	addr;
232 	u_int32_t	size;
233 };
234 
235 /* Command block */
236 struct ips_cmdb {
237 	struct ips_cmd	cmd;
238 	struct ips_dcdb	dcdb;
239 	struct ips_sg	sg[IPS_MAXSGS];
240 };
241 
242 /* Data frames */
243 struct ips_adapterinfo {
244 	u_int8_t	drivecnt;
245 	u_int8_t	miscflag;
246 	u_int8_t	sltflag;
247 	u_int8_t	bstflag;
248 	u_int8_t	pwrchgcnt;
249 	u_int8_t	wrongaddrcnt;
250 	u_int8_t	unidentcnt;
251 	u_int8_t	nvramdevchgcnt;
252 	u_int8_t	firmware[8];
253 	u_int8_t	bios[8];
254 	u_int32_t	drivesize[IPS_MAXDRIVES];
255 	u_int8_t	cmdcnt;
256 	u_int8_t	maxphysdevs;
257 	u_int16_t	flashrepgmcnt;
258 	u_int8_t	defunctdiskcnt;
259 	u_int8_t	rebuildflag;
260 	u_int8_t	offdrivecnt;
261 	u_int8_t	critdrivecnt;
262 	u_int16_t	confupdcnt;
263 	u_int8_t	blkflag;
264 	u_int8_t	__reserved;
265 	u_int16_t	deaddisk[IPS_MAXCHANS][IPS_MAXTARGETS];
266 };
267 
268 struct ips_driveinfo {
269 	u_int8_t	drivecnt;
270 	u_int8_t	__reserved[3];
271 	struct ips_drive {
272 		u_int8_t	id;
273 		u_int8_t	__reserved;
274 		u_int8_t	raid;
275 		u_int8_t	state;
276 #define IPS_DS_FREE	0x00
277 #define IPS_DS_OFFLINE	0x02
278 #define IPS_DS_ONLINE	0x03
279 #define IPS_DS_DEGRADED	0x04
280 #define IPS_DS_SYS	0x06
281 #define IPS_DS_CRS	0x24
282 
283 		u_int32_t	seccnt;
284 	}		drive[IPS_MAXDRIVES];
285 };
286 
287 struct ips_conf {
288 	u_int8_t	ldcnt;
289 	u_int8_t	day;
290 	u_int8_t	month;
291 	u_int8_t	year;
292 	u_int8_t	initid[4];
293 	u_int8_t	hostid[12];
294 	u_int8_t	time[8];
295 	u_int32_t	useropt;
296 	u_int16_t	userfield;
297 	u_int8_t	rebuildrate;
298 	u_int8_t	__reserved1;
299 
300 	struct ips_hw {
301 		u_int8_t	board[8];
302 		u_int8_t	cpu[8];
303 		u_int8_t	nchantype;
304 		u_int8_t	nhostinttype;
305 		u_int8_t	compression;
306 		u_int8_t	nvramtype;
307 		u_int32_t	nvramsize;
308 	}		hw;
309 
310 	struct ips_ld {
311 		u_int16_t	userfield;
312 		u_int8_t	state;
313 		u_int8_t	raidcacheparam;
314 		u_int8_t	chunkcnt;
315 		u_int8_t	stripesize;
316 		u_int8_t	params;
317 		u_int8_t	__reserved;
318 		u_int32_t	size;
319 
320 		struct ips_chunk {
321 			u_int8_t	channel;
322 			u_int8_t	target;
323 			u_int16_t	__reserved;
324 			u_int32_t	startsec;
325 			u_int32_t	seccnt;
326 		}		chunk[IPS_MAXCHUNKS];
327 	}		ld[IPS_MAXDRIVES];
328 
329 	struct ips_dev {
330 		u_int8_t	initiator;
331 		u_int8_t	params;
332 		u_int8_t	miscflag;
333 		u_int8_t	state;
334 #define IPS_DVS_STANDBY	0x01
335 #define IPS_DVS_REBUILD	0x02
336 #define IPS_DVS_SPARE	0x04
337 #define IPS_DVS_MEMBER	0x08
338 #define IPS_DVS_ONLINE	0x80
339 #define IPS_DVS_READY	(IPS_DVS_STANDBY | IPS_DVS_ONLINE)
340 
341 		u_int32_t	seccnt;
342 		u_int8_t	devid[28];
343 	}		dev[IPS_MAXCHANS][IPS_MAXTARGETS];
344 
345 	u_int8_t	reserved[512];
346 };
347 
348 struct ips_rblstat {
349 	u_int8_t	__unknown[20];
350 	struct {
351 		u_int8_t	__unknown[4];
352 		u_int32_t	total;
353 		u_int32_t	remain;
354 	}		ld[IPS_MAXDRIVES];
355 };
356 
357 struct ips_pg5 {
358 	u_int32_t	signature;
359 	u_int8_t	__reserved1;
360 	u_int8_t	slot;
361 	u_int16_t	type;
362 	u_int8_t	bioshi[4];
363 	u_int8_t	bioslo[4];
364 	u_int16_t	__reserved2;
365 	u_int8_t	__reserved3;
366 	u_int8_t	os;
367 	u_int8_t	driverhi[4];
368 	u_int8_t	driverlo[4];
369 	u_int8_t	__reserved4[100];
370 };
371 
372 struct ips_info {
373 	struct ips_adapterinfo	adapter;
374 	struct ips_driveinfo	drive;
375 	struct ips_conf		conf;
376 	struct ips_rblstat	rblstat;
377 	struct ips_pg5		pg5;
378 };
379 
380 /* Command control block */
381 struct ips_softc;
382 struct ips_ccb {
383 	struct ips_softc *	c_sc;		/* driver softc */
384 	int			c_id;		/* command id */
385 	int			c_flags;	/* SCSI_* flags */
386 	enum {
387 		IPS_CCB_FREE,
388 		IPS_CCB_QUEUED,
389 		IPS_CCB_DONE
390 	}			c_state;	/* command state */
391 
392 	void *			c_cmdbva;	/* command block virt addr */
393 	paddr_t			c_cmdbpa;	/* command block phys addr */
394 	bus_dmamap_t		c_dmam;		/* data buffer DMA map */
395 
396 	struct scsipi_xfer *	c_xfer;		/* corresponding SCSI xfer */
397 
398 	u_int8_t		c_stat;		/* status byte copy */
399 	u_int8_t		c_estat;	/* ext status byte copy */
400 	int			c_error;	/* completion error */
401 
402 	void			(*c_done)(struct ips_softc *,	/* cmd done */
403 				    struct ips_ccb *);		/* callback */
404 
405 	SLIST_ENTRY(ips_ccb)	c_link;		/* queue link */
406 };
407 
408 /* CCB queue */
409 SLIST_HEAD(ips_ccbq, ips_ccb);
410 
411 /* DMA-able chunk of memory */
412 struct dmamem {
413 	bus_dma_tag_t		dm_tag;
414 	bus_dmamap_t		dm_map;
415 	bus_dma_segment_t	dm_seg;
416 	bus_size_t		dm_size;
417 	void *			dm_vaddr;
418 #define dm_paddr dm_seg.ds_addr
419 };
420 
421 struct ips_softc {
422 	struct device		sc_dev;
423 
424 	/* SCSI mid-layer connection. */
425 	struct scsipi_adapter   sc_adapt;
426 
427 	struct ips_pt {
428 		struct scsipi_channel	pt_chan;
429 		int			pt_nchan;
430 		struct ips_softc *	pt_sc;
431 
432 		int			pt_proctgt;
433 		char			pt_procdev[16];
434 	}			sc_pt[IPS_MAXCHANS];
435 
436 	bus_space_tag_t		sc_iot;
437 	bus_space_handle_t	sc_ioh;
438 	bus_dma_tag_t		sc_dmat;
439 
440 	const struct ips_chipset *sc_chip;
441 
442 	struct ips_info *	sc_info;
443 	struct dmamem		sc_infom;
444 
445 	int			sc_nunits;
446 
447 	struct dmamem		sc_cmdbm;
448 
449 	struct ips_ccb *	sc_ccb;
450 	int			sc_nccbs;
451 	struct ips_ccbq		sc_ccbq_free;
452 	struct kmutex		sc_ccb_mtx;
453 
454 	struct dmamem		sc_sqm;
455 	paddr_t			sc_sqtail;
456 	u_int32_t *		sc_sqbuf;
457 	int			sc_sqidx;
458 };
459 
460 int	ips_match(device_t, cfdata_t, void *);
461 void	ips_attach(struct device *, struct device *, void *);
462 
463 void	ips_scsi_cmd(struct ips_ccb *);
464 void	ips_scsi_pt_cmd(struct scsipi_xfer *);
465 static void ips_scsipi_request(struct scsipi_channel *,
466 	    scsipi_adapter_req_t, void *);
467 int	ips_scsi_ioctl(struct scsipi_channel *, u_long, void *,
468 	    int, struct proc *);
469 
470 #if NBIO > 0
471 int	ips_ioctl(device_t, u_long, void *);
472 int	ips_ioctl_inq(struct ips_softc *, struct bioc_inq *);
473 int	ips_ioctl_vol(struct ips_softc *, struct bioc_vol *);
474 int	ips_ioctl_disk(struct ips_softc *, struct bioc_disk *);
475 int	ips_ioctl_setstate(struct ips_softc *, struct bioc_setstate *);
476 #endif
477 
478 int	ips_load_xs(struct ips_softc *, struct ips_ccb *, struct scsipi_xfer *);
479 void	ips_start_xs(struct ips_softc *, struct ips_ccb *, struct scsipi_xfer *);
480 
481 int	ips_cmd(struct ips_softc *, struct ips_ccb *);
482 int	ips_poll(struct ips_softc *, struct ips_ccb *);
483 void	ips_done(struct ips_softc *, struct ips_ccb *);
484 void	ips_done_xs(struct ips_softc *, struct ips_ccb *);
485 void	ips_done_pt(struct ips_softc *, struct ips_ccb *);
486 void	ips_done_mgmt(struct ips_softc *, struct ips_ccb *);
487 int	ips_error(struct ips_softc *, struct ips_ccb *);
488 int	ips_error_xs(struct ips_softc *, struct ips_ccb *);
489 int	ips_intr(void *);
490 void	ips_timeout(void *);
491 
492 int	ips_getadapterinfo(struct ips_softc *, int);
493 int	ips_getdriveinfo(struct ips_softc *, int);
494 int	ips_getconf(struct ips_softc *, int);
495 int	ips_getpg5(struct ips_softc *, int);
496 
497 #if NBIO > 0
498 int	ips_getrblstat(struct ips_softc *, int);
499 int	ips_setstate(struct ips_softc *, int, int, int, int);
500 int	ips_rebuild(struct ips_softc *, int, int, int, int, int);
501 #endif
502 
503 void	ips_copperhead_exec(struct ips_softc *, struct ips_ccb *);
504 void	ips_copperhead_intren(struct ips_softc *);
505 int	ips_copperhead_isintr(struct ips_softc *);
506 u_int32_t ips_copperhead_status(struct ips_softc *);
507 
508 void	ips_morpheus_exec(struct ips_softc *, struct ips_ccb *);
509 void	ips_morpheus_intren(struct ips_softc *);
510 int	ips_morpheus_isintr(struct ips_softc *);
511 u_int32_t ips_morpheus_status(struct ips_softc *);
512 
513 struct ips_ccb *ips_ccb_alloc(struct ips_softc *, int);
514 void	ips_ccb_free(struct ips_softc *, struct ips_ccb *, int);
515 struct ips_ccb *ips_ccb_get(struct ips_softc *);
516 void	ips_ccb_put(struct ips_softc *, struct ips_ccb *);
517 
518 int	ips_dmamem_alloc(struct dmamem *, bus_dma_tag_t, bus_size_t);
519 void	ips_dmamem_free(struct dmamem *);
520 
521 extern struct  cfdriver ips_cd;
522 
523 CFATTACH_DECL_NEW(ips, sizeof(struct ips_softc),
524     ips_match, ips_attach, NULL, NULL);
525 
526 static struct ips_ident {
527         pci_vendor_id_t vendor;
528         pci_product_id_t product;
529 } const ips_ids[] = {
530 	{ PCI_VENDOR_IBM,	PCI_PRODUCT_IBM_SERVERAID },
531 	{ PCI_VENDOR_IBM,	PCI_PRODUCT_IBM_SERVERAID4 },
532 	{ PCI_VENDOR_ADP2,	PCI_PRODUCT_ADP2_SERVERAID }
533 };
534 
535 static const struct ips_chipset {
536 	enum {
537 		IPS_CHIP_COPPERHEAD = 0,
538 		IPS_CHIP_MORPHEUS
539 	}		ic_id;
540 
541 	int		ic_bar;
542 
543 	void		(*ic_exec)(struct ips_softc *, struct ips_ccb *);
544 	void		(*ic_intren)(struct ips_softc *);
545 	int		(*ic_isintr)(struct ips_softc *);
546 	u_int32_t	(*ic_status)(struct ips_softc *);
547 } ips_chips[] = {
548 	{
549 		IPS_CHIP_COPPERHEAD,
550 		0x14,
551 		ips_copperhead_exec,
552 		ips_copperhead_intren,
553 		ips_copperhead_isintr,
554 		ips_copperhead_status
555 	},
556 	{
557 		IPS_CHIP_MORPHEUS,
558 		0x10,
559 		ips_morpheus_exec,
560 		ips_morpheus_intren,
561 		ips_morpheus_isintr,
562 		ips_morpheus_status
563 	}
564 };
565 
566 #define ips_exec(s, c)	(s)->sc_chip->ic_exec((s), (c))
567 #define ips_intren(s)	(s)->sc_chip->ic_intren((s))
568 #define ips_isintr(s)	(s)->sc_chip->ic_isintr((s))
569 #define ips_status(s)	(s)->sc_chip->ic_status((s))
570 
571 static const char *ips_names[] = {
572 	NULL,
573 	NULL,
574 	"II",
575 	"onboard",
576 	"onboard",
577 	"3H",
578 	"3L",
579 	"4H",
580 	"4M",
581 	"4L",
582 	"4Mx",
583 	"4Lx",
584 	"5i",
585 	"5i",
586 	"6M",
587 	"6i",
588 	"7t",
589 	"7k",
590 	"7M"
591 };
592 
593 /* Lookup supported device table */
594 static const struct ips_ident *
595 ips_lookup(const struct pci_attach_args *pa)
596 {
597         const struct ips_ident *imp;
598 	int i;
599 
600 	for (i = 0, imp = ips_ids; i < __arraycount(ips_ids); i++, imp++) {
601                 if (PCI_VENDOR(pa->pa_id) == imp->vendor &&
602                     PCI_PRODUCT(pa->pa_id) == imp->product)
603                         return imp;
604         }
605         return NULL;
606 }
607 
608 int
609 ips_match(device_t parent, cfdata_t cfdata, void *aux)
610 {
611 	struct pci_attach_args *pa = aux;
612 
613 	if (ips_lookup(pa) != NULL)
614 		return 1;
615 
616 	return 0;
617 }
618 
619 void
620 ips_attach(struct device *parent, struct device *self, void *aux)
621 {
622 	struct ips_softc *sc = (struct ips_softc *)self;
623 	struct pci_attach_args *pa = aux;
624 	struct ips_ccb ccb0;
625 	struct ips_adapterinfo *ai;
626 	struct ips_driveinfo *di;
627 	struct ips_pg5 *pg5;
628 	pcireg_t maptype;
629 	bus_size_t iosize;
630 	pci_intr_handle_t ih;
631 	const char *intrstr;
632 	int type, i;
633 	struct scsipi_adapter *adapt;
634 	struct scsipi_channel *chan;
635 	char intrbuf[PCI_INTRSTR_LEN];
636 
637 	sc->sc_dmat = pa->pa_dmat;
638 
639 	/* Identify chipset */
640 	if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_IBM_SERVERAID)
641 		sc->sc_chip = &ips_chips[IPS_CHIP_COPPERHEAD];
642 	else
643 		sc->sc_chip = &ips_chips[IPS_CHIP_MORPHEUS];
644 
645 	/* Map registers */
646 	// XXX check IPS_IOSIZE as old code used to do?
647 	maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_chip->ic_bar);
648 	if (pci_mapreg_map(pa, sc->sc_chip->ic_bar, maptype, 0, &sc->sc_iot,
649 	    &sc->sc_ioh, NULL, &iosize)) {
650 		printf(": can't map regs\n");
651 		return;
652 	}
653 
654 	/* Allocate command buffer */
655 	if (ips_dmamem_alloc(&sc->sc_cmdbm, sc->sc_dmat,
656 	    IPS_MAXCMDS * sizeof(struct ips_cmdb))) {
657 		printf(": can't alloc cmd buffer\n");
658 		goto fail1;
659 	}
660 
661 	/* Allocate info buffer */
662 	if (ips_dmamem_alloc(&sc->sc_infom, sc->sc_dmat,
663 	    sizeof(struct ips_info))) {
664 		printf(": can't alloc info buffer\n");
665 		goto fail2;
666 	}
667 	sc->sc_info = sc->sc_infom.dm_vaddr;
668 	ai = &sc->sc_info->adapter;
669 	di = &sc->sc_info->drive;
670 	pg5 = &sc->sc_info->pg5;
671 
672 	/* Allocate status queue for the Copperhead chipset */
673 	if (sc->sc_chip->ic_id == IPS_CHIP_COPPERHEAD) {
674 		if (ips_dmamem_alloc(&sc->sc_sqm, sc->sc_dmat, IPS_SQSZ)) {
675 			printf(": can't alloc status queue\n");
676 			goto fail3;
677 		}
678 		sc->sc_sqtail = sc->sc_sqm.dm_paddr;
679 		sc->sc_sqbuf = sc->sc_sqm.dm_vaddr;
680 		sc->sc_sqidx = 0;
681 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQS,
682 		    sc->sc_sqm.dm_paddr);
683 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQE,
684 		    sc->sc_sqm.dm_paddr + IPS_SQSZ);
685 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQH,
686 		    sc->sc_sqm.dm_paddr + sizeof(u_int32_t));
687 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQT,
688 		    sc->sc_sqm.dm_paddr);
689 	}
690 
691 	/* Bootstrap CCB queue */
692 	sc->sc_nccbs = 1;
693 	sc->sc_ccb = &ccb0;
694 	bzero(&ccb0, sizeof(ccb0));
695 	ccb0.c_cmdbva = sc->sc_cmdbm.dm_vaddr;
696 	ccb0.c_cmdbpa = sc->sc_cmdbm.dm_paddr;
697 	SLIST_INIT(&sc->sc_ccbq_free);
698 	SLIST_INSERT_HEAD(&sc->sc_ccbq_free, &ccb0, c_link);
699 	mutex_init(&sc->sc_ccb_mtx, MUTEX_DEFAULT, IPL_BIO);
700 
701 	/* Get adapter info */
702 	if (ips_getadapterinfo(sc, XS_CTL_NOSLEEP)) {
703 		printf(": can't get adapter info\n");
704 		goto fail4;
705 	}
706 
707 	/* Get logical drives info */
708 	if (ips_getdriveinfo(sc, XS_CTL_NOSLEEP)) {
709 		printf(": can't get ld info\n");
710 		goto fail4;
711 	}
712 	sc->sc_nunits = di->drivecnt;
713 
714 	/* Get configuration */
715 	if (ips_getconf(sc, XS_CTL_NOSLEEP)) {
716 		printf(": can't get config\n");
717 		goto fail4;
718 	}
719 
720 	/* Read NVRAM page 5 for additional info */
721 	(void)ips_getpg5(sc, XS_CTL_NOSLEEP);
722 
723 	/* Initialize CCB queue */
724 	sc->sc_nccbs = ai->cmdcnt;
725 	if ((sc->sc_ccb = ips_ccb_alloc(sc, sc->sc_nccbs)) == NULL) {
726 		printf(": can't alloc ccb queue\n");
727 		goto fail4;
728 	}
729 	SLIST_INIT(&sc->sc_ccbq_free);
730 	for (i = 0; i < sc->sc_nccbs; i++)
731 		SLIST_INSERT_HEAD(&sc->sc_ccbq_free,
732 		    &sc->sc_ccb[i], c_link);
733 
734 	/* Install interrupt handler */
735 	if (pci_intr_map(pa, &ih)) {
736 		printf(": can't map interrupt\n");
737 		goto fail5;
738 	}
739 	intrstr = pci_intr_string(pa->pa_pc, ih, intrbuf, sizeof(intrbuf));
740 	if (pci_intr_establish_xname(pa->pa_pc, ih, IPL_BIO, ips_intr, sc,
741 	    sc->sc_dev.dv_xname) == NULL) {
742 		printf(": can't establish interrupt");
743 		if (intrstr != NULL)
744 			printf(" at %s", intrstr);
745 		printf("\n");
746 		goto fail5;
747 	}
748 	printf(": %s\n", intrstr);
749 
750 	/* Display adapter info */
751 	printf("%s: ServeRAID", sc->sc_dev.dv_xname);
752 	type = htole16(pg5->type);
753 	if (type < sizeof(ips_names) / sizeof(ips_names[0]) && ips_names[type])
754 		printf(" %s", ips_names[type]);
755 	printf(", FW %c%c%c%c%c%c%c", ai->firmware[0], ai->firmware[1],
756 	    ai->firmware[2], ai->firmware[3], ai->firmware[4], ai->firmware[5],
757 	    ai->firmware[6]);
758 	printf(", BIOS %c%c%c%c%c%c%c", ai->bios[0], ai->bios[1], ai->bios[2],
759 	    ai->bios[3], ai->bios[4], ai->bios[5], ai->bios[6]);
760 	printf(", %d cmds, %d LD%s", sc->sc_nccbs, sc->sc_nunits,
761 	    (sc->sc_nunits == 1 ? "" : "s"));
762 	printf("\n");
763 
764 	/*
765 	 * Attach to scsipi.
766 	 */
767 	adapt = &sc->sc_adapt;
768 	memset(adapt, 0, sizeof(*adapt));
769 	adapt->adapt_dev = self;
770 	adapt->adapt_nchannels = IPS_MAXCHANS;
771 	if (sc->sc_nunits > 0)
772 		adapt->adapt_openings = sc->sc_nccbs / sc->sc_nunits;
773 	adapt->adapt_max_periph = adapt->adapt_openings;
774 	adapt->adapt_request = ips_scsipi_request;
775 	adapt->adapt_minphys = minphys;
776 	adapt->adapt_ioctl = ips_scsi_ioctl;
777 
778 	/* For each channel attach SCSI pass-through bus */
779 	for (i = 0; i < IPS_MAXCHANS; i++) {
780 		struct ips_pt *pt;
781 		int target, lastarget;
782 
783 		pt = &sc->sc_pt[i];
784 		pt->pt_sc = sc;
785 		pt->pt_nchan = i;
786 		pt->pt_proctgt = -1;
787 
788 		/* Check if channel has any devices besides disks */
789 		for (target = 0, lastarget = -1; target < IPS_MAXTARGETS;
790 		    target++) {
791 			struct ips_dev *idev;
792 			int dev_type;
793 
794 			idev = &sc->sc_info->conf.dev[i][target];
795 			dev_type = idev->params & SID_TYPE;
796 			if (idev->state && dev_type != T_DIRECT) {
797 				lastarget = target;
798 				if (type == T_PROCESSOR ||
799 				    type == T_ENCLOSURE)
800 					/* remember enclosure address */
801 					pt->pt_proctgt = target;
802 			}
803 		}
804 		if (lastarget == -1)
805 			continue;
806 
807 		chan = &pt->pt_chan;
808 		memset(chan, 0, sizeof(*chan));
809 		chan->chan_adapter = adapt;
810 		chan->chan_bustype = &scsi_bustype;
811 		chan->chan_channel = i;
812 		chan->chan_ntargets = IPS_MAXTARGETS;
813 		chan->chan_nluns = lastarget + 1;
814 		chan->chan_id = i;
815 		chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
816 		config_found(self, chan, scsiprint, CFARGS_NONE);
817 	}
818 
819 	/* Enable interrupts */
820 	ips_intren(sc);
821 
822 #if NBIO > 0
823 	/* Install ioctl handler */
824 	if (bio_register(&sc->sc_dev, ips_ioctl))
825 		printf("%s: no ioctl support\n", sc->sc_dev.dv_xname);
826 #endif
827 
828 	return;
829 fail5:
830 	ips_ccb_free(sc, sc->sc_ccb, sc->sc_nccbs);
831 fail4:
832 	if (sc->sc_chip->ic_id == IPS_CHIP_COPPERHEAD)
833 		ips_dmamem_free(&sc->sc_sqm);
834 fail3:
835 	ips_dmamem_free(&sc->sc_infom);
836 fail2:
837 	ips_dmamem_free(&sc->sc_cmdbm);
838 fail1:
839 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, iosize);
840 }
841 
842 void
843 ips_scsi_cmd(struct ips_ccb *ccb)
844 {
845 	struct scsipi_xfer *xs = ccb->c_xfer;
846 	struct scsipi_periph *periph = xs->xs_periph;
847 	struct scsipi_channel *chan = periph->periph_channel;
848 	struct ips_softc *sc = device_private(chan->chan_adapter->adapt_dev);
849 	struct ips_driveinfo *di = &sc->sc_info->drive;
850 	struct ips_drive *drive;
851 	struct ips_cmd *cmd;
852 	int target = periph->periph_target;
853 	u_int32_t blkno, blkcnt;
854 	int code;
855 
856 	DPRINTF(IPS_D_XFER, ("%s: ips_scsi_cmd: xs %p, target %d, "
857 	    "opcode 0x%02x, flags 0x%x\n", sc->sc_dev.dv_xname, xs, target,
858 	    xs->cmd->opcode, xs->xs_control));
859 
860 	if (target >= sc->sc_nunits || periph->periph_lun != 0) {
861 		DPRINTF(IPS_D_INFO, ("%s: ips_scsi_cmd: invalid params "
862 		    "target %d, lun %d\n", sc->sc_dev.dv_xname,
863 		    target, periph->periph_lun));
864 		xs->error = XS_DRIVER_STUFFUP;
865 		ips_ccb_put(sc, ccb);
866 		scsipi_done(xs);
867 		return;
868 	}
869 
870 	drive = &di->drive[target];
871 	xs->error = XS_NOERROR;
872 
873 	/* Fake SCSI commands */
874 	switch (xs->cmd->opcode) {
875 	case READ_10:
876 	case SCSI_READ_6_COMMAND:
877 	case WRITE_10:
878 	case SCSI_WRITE_6_COMMAND: {
879 		struct scsi_rw_6 *rw;
880 		struct scsipi_rw_10 *rwb;
881 
882 		if (xs->cmdlen == sizeof(struct scsi_rw_6)) {
883 			rw = (void *)xs->cmd;
884 			blkno = _3btol(rw->addr) &
885 			    (SRW_TOPADDR << 16 | 0xffff);
886 			blkcnt = rw->length ? rw->length : 0x100;
887 		} else {
888 			rwb = (void *)xs->cmd;
889 			blkno = _4btol(rwb->addr);
890 			blkcnt = _2btol(rwb->length);
891 		}
892 
893 		if (blkno >= htole32(drive->seccnt) || blkno + blkcnt >
894 		    htole32(drive->seccnt)) {
895 			DPRINTF(IPS_D_ERR, ("%s: ips_scsi_cmd: invalid params "
896 			    "blkno %u, blkcnt %u\n", sc->sc_dev.dv_xname,
897 			    blkno, blkcnt));
898 			xs->error = XS_DRIVER_STUFFUP;
899 			break;
900 		}
901 
902 		if (xs->xs_control & XS_CTL_DATA_IN)
903 			code = IPS_CMD_READ;
904 		else
905 			code = IPS_CMD_WRITE;
906 
907 		cmd = ccb->c_cmdbva;
908 		cmd->code = code;
909 		cmd->drive = target;
910 		cmd->lba = htole32(blkno);
911 		cmd->seccnt = htole16(blkcnt);
912 
913 		if (ips_load_xs(sc, ccb, xs)) {
914 			DPRINTF(IPS_D_ERR, ("%s: ips_scsi_cmd: ips_load_xs "
915 			    "failed\n", sc->sc_dev.dv_xname));
916 			xs->error = XS_DRIVER_STUFFUP;
917 			ips_ccb_put(sc, ccb);
918 			scsipi_done(xs);
919 			return;
920 		}
921 
922 		if (cmd->sgcnt > 0)
923 			cmd->code |= IPS_CMD_SG;
924 
925 		ccb->c_done = ips_done_xs;
926 		ips_start_xs(sc, ccb, xs);
927 		return;
928 	}
929 	case INQUIRY: {
930 		struct scsipi_inquiry_data inq;
931 
932 		bzero(&inq, sizeof(inq));
933 		inq.device = T_DIRECT;
934 		inq.version = 2;
935 		inq.response_format = 2;
936 		inq.additional_length = 32;
937 		inq.flags3 |= SID_CmdQue;
938 		strlcpy(inq.vendor, "IBM", sizeof(inq.vendor));
939 		snprintf(inq.product, sizeof(inq.product),
940 		    "LD%d RAID%d", target, drive->raid);
941 		strlcpy(inq.revision, "1.0", sizeof(inq.revision));
942 		memcpy(xs->data, &inq, MIN(xs->datalen, sizeof(inq)));
943 		break;
944 	}
945 	case READ_CAPACITY_10: {
946 		struct scsipi_read_capacity_10_data rcd;
947 
948 		bzero(&rcd, sizeof(rcd));
949 		_lto4b(htole32(drive->seccnt) - 1, rcd.addr);
950 		_lto4b(IPS_SECSZ, rcd.length);
951 		memcpy(xs->data, &rcd, MIN(xs->datalen, sizeof(rcd)));
952 		break;
953 	}
954 	case SCSI_REQUEST_SENSE: {
955 		struct scsi_sense_data sd;
956 
957 		bzero(&sd, sizeof(sd));
958 		sd.response_code = SSD_RCODE_CURRENT;
959 		sd.flags = SKEY_NO_SENSE;
960 		memcpy(xs->data, &sd, MIN(xs->datalen, sizeof(sd)));
961 		break;
962 	}
963 	case SCSI_SYNCHRONIZE_CACHE_10:
964 		cmd = ccb->c_cmdbva;
965 		cmd->code = IPS_CMD_FLUSH;
966 
967 		ccb->c_done = ips_done_xs;
968 		ips_start_xs(sc, ccb, xs);
969 		return;
970 	case SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL:
971 	case START_STOP:
972 	case SCSI_TEST_UNIT_READY:
973 		break;
974 	default:
975 		DPRINTF(IPS_D_INFO, ("%s: unsupported scsi command 0x%02x\n",
976 		    sc->sc_dev.dv_xname, xs->cmd->opcode));
977 		xs->error = XS_DRIVER_STUFFUP;
978 	}
979 
980 	ips_ccb_put(sc, ccb);
981 	scsipi_done(xs);
982 }
983 
984 /*
985  * Start a SCSI command.
986  */
987 static void
988 ips_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
989 		   void *arg)
990 {
991 	switch (req) {
992 	case ADAPTER_REQ_RUN_XFER: {
993 		struct ips_ccb *ccb;
994 		struct scsipi_xfer *xs;
995 		struct ips_softc *sc;
996 
997 		sc = device_private(chan->chan_adapter->adapt_dev);
998 		xs = (struct scsipi_xfer *)arg;
999 
1000 		if ((ccb = ips_ccb_get(sc)) == NULL) {
1001 			xs->error = XS_RESOURCE_SHORTAGE;
1002 			scsipi_done(xs);
1003 			break;
1004 		}
1005 
1006 		ccb->c_xfer = xs;
1007 		ips_scsi_cmd(ccb);
1008 
1009 		break;
1010 	}
1011 
1012 	case ADAPTER_REQ_SET_XFER_MODE: {
1013 		struct scsipi_xfer_mode *xm = arg;
1014 		xm->xm_mode = PERIPH_CAP_TQING;
1015 		xm->xm_period = 0;
1016 		xm->xm_offset = 0;
1017 		scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
1018 		return;
1019 	}
1020 
1021 	case ADAPTER_REQ_GROW_RESOURCES:
1022 		/*
1023 		 * Not supported.
1024 		 */
1025 		break;
1026 	}
1027 }
1028 
1029 int
1030 ips_scsi_ioctl(struct scsipi_channel *chan, u_long cmd, void *data,
1031     int flag, struct proc *p)
1032 {
1033 #if NBIO > 0
1034 	return (ips_ioctl(chan->chan_adapter->adapt_dev, cmd, data));
1035 #else
1036 	return (ENOTTY);
1037 #endif
1038 }
1039 
1040 #if NBIO > 0
1041 int
1042 ips_ioctl(device_t dev, u_long cmd, void *data)
1043 {
1044 	struct ips_softc *sc = (struct ips_softc *)dev;
1045 
1046 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl: cmd %lu\n",
1047 	    sc->sc_dev.dv_xname, cmd));
1048 
1049 	switch (cmd) {
1050 	case BIOCINQ:
1051 		return (ips_ioctl_inq(sc, (struct bioc_inq *)data));
1052 	case BIOCVOL:
1053 		return (ips_ioctl_vol(sc, (struct bioc_vol *)data));
1054 	case BIOCDISK:
1055 		return (ips_ioctl_disk(sc, (struct bioc_disk *)data));
1056 	case BIOCSETSTATE:
1057 		return (ips_ioctl_setstate(sc, (struct bioc_setstate *)data));
1058 	default:
1059 		return (ENOTTY);
1060 	}
1061 }
1062 
1063 int
1064 ips_ioctl_inq(struct ips_softc *sc, struct bioc_inq *bi)
1065 {
1066 	struct ips_conf *conf = &sc->sc_info->conf;
1067 	int i;
1068 
1069 	strlcpy(bi->bi_dev, sc->sc_dev.dv_xname, sizeof(bi->bi_dev));
1070 	bi->bi_novol = sc->sc_nunits;
1071 	for (i = 0, bi->bi_nodisk = 0; i < sc->sc_nunits; i++)
1072 		bi->bi_nodisk += conf->ld[i].chunkcnt;
1073 
1074 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_inq: novol %d, nodisk %d\n",
1075 	    bi->bi_dev, bi->bi_novol, bi->bi_nodisk));
1076 
1077 	return (0);
1078 }
1079 
1080 int
1081 ips_ioctl_vol(struct ips_softc *sc, struct bioc_vol *bv)
1082 {
1083 	struct ips_driveinfo *di = &sc->sc_info->drive;
1084 	struct ips_conf *conf = &sc->sc_info->conf;
1085 	struct ips_rblstat *rblstat = &sc->sc_info->rblstat;
1086 	struct ips_ld *ld;
1087 	int vid = bv->bv_volid;
1088 	struct device *dv;
1089 	int error, rebuild = 0;
1090 	u_int32_t total = 0, done = 0;
1091 
1092 	if (vid >= sc->sc_nunits)
1093 		return (EINVAL);
1094 	if ((error = ips_getconf(sc, 0)))
1095 		return (error);
1096 	ld = &conf->ld[vid];
1097 
1098 	switch (ld->state) {
1099 	case IPS_DS_ONLINE:
1100 		bv->bv_status = BIOC_SVONLINE;
1101 		break;
1102 	case IPS_DS_DEGRADED:
1103 		bv->bv_status = BIOC_SVDEGRADED;
1104 		rebuild++;
1105 		break;
1106 	case IPS_DS_OFFLINE:
1107 		bv->bv_status = BIOC_SVOFFLINE;
1108 		break;
1109 	default:
1110 		bv->bv_status = BIOC_SVINVALID;
1111 	}
1112 
1113 	if (rebuild && ips_getrblstat(sc, 0) == 0) {
1114 		total = htole32(rblstat->ld[vid].total);
1115 		done = total - htole32(rblstat->ld[vid].remain);
1116 		if (total && total > done) {
1117 			bv->bv_status = BIOC_SVREBUILD;
1118 			bv->bv_percent = 100 * done / total;
1119 		}
1120 	}
1121 
1122 	bv->bv_size = (uint64_t)htole32(ld->size) * IPS_SECSZ;
1123 	bv->bv_level = di->drive[vid].raid;
1124 	bv->bv_nodisk = ld->chunkcnt;
1125 
1126 	/* Associate all unused and spare drives with first volume */
1127 	if (vid == 0) {
1128 		struct ips_dev *dev;
1129 		int chan, target;
1130 
1131 		for (chan = 0; chan < IPS_MAXCHANS; chan++)
1132 			for (target = 0; target < IPS_MAXTARGETS; target++) {
1133 				dev = &conf->dev[chan][target];
1134 				if (dev->state && !(dev->state &
1135 				    IPS_DVS_MEMBER) &&
1136 				    (dev->params & SID_TYPE) == T_DIRECT)
1137 					bv->bv_nodisk++;
1138 			}
1139 	}
1140 
1141 	dv = &sc->sc_dev;
1142 	strlcpy(bv->bv_dev, dv->dv_xname, sizeof(bv->bv_dev));
1143 	strlcpy(bv->bv_vendor, "IBM", sizeof(bv->bv_vendor));
1144 
1145 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_vol: vid %d, state 0x%02x, "
1146 	    "total %u, done %u, size %llu, level %d, nodisk %d, dev %s\n",
1147 	    sc->sc_dev.dv_xname, vid, ld->state, total, done, bv->bv_size,
1148 	    bv->bv_level, bv->bv_nodisk, bv->bv_dev));
1149 
1150 	return (0);
1151 }
1152 
1153 int
1154 ips_ioctl_disk(struct ips_softc *sc, struct bioc_disk *bd)
1155 {
1156 	struct ips_conf *conf = &sc->sc_info->conf;
1157 	struct ips_ld *ld;
1158 	struct ips_chunk *chunk;
1159 	struct ips_dev *dev;
1160 	int vid = bd->bd_volid, did = bd->bd_diskid;
1161 	int chan, target, error, i;
1162 
1163 	if (vid >= sc->sc_nunits)
1164 		return (EINVAL);
1165 	if ((error = ips_getconf(sc, 0)))
1166 		return (error);
1167 	ld = &conf->ld[vid];
1168 
1169 	if (did >= ld->chunkcnt) {
1170 		/* Probably unused or spare drives */
1171 		if (vid != 0)
1172 			return (EINVAL);
1173 
1174 		i = ld->chunkcnt;
1175 		for (chan = 0; chan < IPS_MAXCHANS; chan++)
1176 			for (target = 0; target < IPS_MAXTARGETS; target++) {
1177 				dev = &conf->dev[chan][target];
1178 				if (dev->state && !(dev->state &
1179 				    IPS_DVS_MEMBER) &&
1180 				    (dev->params & SID_TYPE) == T_DIRECT)
1181 					if (i++ == did)
1182 						goto out;
1183 			}
1184 	} else {
1185 		chunk = &ld->chunk[did];
1186 		chan = chunk->channel;
1187 		target = chunk->target;
1188 	}
1189 
1190 out:
1191 	if (chan >= IPS_MAXCHANS || target >= IPS_MAXTARGETS)
1192 		return (EINVAL);
1193 	dev = &conf->dev[chan][target];
1194 
1195 	bd->bd_channel = chan;
1196 	bd->bd_target = target;
1197 	bd->bd_lun = 0;
1198 	bd->bd_size = (uint64_t)htole32(dev->seccnt) * IPS_SECSZ;
1199 
1200 	bzero(bd->bd_vendor, sizeof(bd->bd_vendor));
1201 	memcpy(bd->bd_vendor, dev->devid, MIN(sizeof(bd->bd_vendor),
1202 	    sizeof(dev->devid)));
1203 	strlcpy(bd->bd_procdev, sc->sc_pt[chan].pt_procdev,
1204 	    sizeof(bd->bd_procdev));
1205 
1206 	if (dev->state & IPS_DVS_READY) {
1207 		bd->bd_status = BIOC_SDUNUSED;
1208 		if (dev->state & IPS_DVS_MEMBER)
1209 			bd->bd_status = BIOC_SDONLINE;
1210 		if (dev->state & IPS_DVS_SPARE)
1211 			bd->bd_status = BIOC_SDHOTSPARE;
1212 		if (dev->state & IPS_DVS_REBUILD)
1213 			bd->bd_status = BIOC_SDREBUILD;
1214 	} else {
1215 		bd->bd_status = BIOC_SDOFFLINE;
1216 	}
1217 
1218 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_disk: vid %d, did %d, channel %d, "
1219 	    "target %d, size %llu, state 0x%02x\n", sc->sc_dev.dv_xname,
1220 	    vid, did, bd->bd_channel, bd->bd_target, bd->bd_size, dev->state));
1221 
1222 	return (0);
1223 }
1224 
1225 int
1226 ips_ioctl_setstate(struct ips_softc *sc, struct bioc_setstate *bs)
1227 {
1228 	struct ips_conf *conf = &sc->sc_info->conf;
1229 	struct ips_dev *dev;
1230 	int state, error;
1231 
1232 	if (bs->bs_channel >= IPS_MAXCHANS || bs->bs_target >= IPS_MAXTARGETS)
1233 		return (EINVAL);
1234 	if ((error = ips_getconf(sc, 0)))
1235 		return (error);
1236 	dev = &conf->dev[bs->bs_channel][bs->bs_target];
1237 	state = dev->state;
1238 
1239 	switch (bs->bs_status) {
1240 	case BIOC_SSONLINE:
1241 		state |= IPS_DVS_READY;
1242 		break;
1243 	case BIOC_SSOFFLINE:
1244 		state &= ~IPS_DVS_READY;
1245 		break;
1246 	case BIOC_SSHOTSPARE:
1247 		state |= IPS_DVS_SPARE;
1248 		break;
1249 	case BIOC_SSREBUILD:
1250 		return (ips_rebuild(sc, bs->bs_channel, bs->bs_target,
1251 		    bs->bs_channel, bs->bs_target, 0));
1252 	default:
1253 		return (EINVAL);
1254 	}
1255 
1256 	return (ips_setstate(sc, bs->bs_channel, bs->bs_target, state, 0));
1257 }
1258 #endif	/* NBIO > 0 */
1259 
1260 int
1261 ips_load_xs(struct ips_softc *sc, struct ips_ccb *ccb, struct scsipi_xfer *xs)
1262 {
1263 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1264 	struct ips_cmd *cmd = &cmdb->cmd;
1265 	struct ips_sg *sg = cmdb->sg;
1266 	int nsegs, i;
1267 
1268 	if (xs->datalen == 0)
1269 		return (0);
1270 
1271 	/* Map data buffer into DMA segments */
1272 	if (bus_dmamap_load(sc->sc_dmat, ccb->c_dmam, xs->data, xs->datalen,
1273 	    NULL, (xs->xs_control & XS_CTL_NOSLEEP ? BUS_DMA_NOWAIT : 0)))
1274 		return (1);
1275 	bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,ccb->c_dmam->dm_mapsize,
1276 	    xs->xs_control & XS_CTL_DATA_IN ? BUS_DMASYNC_PREREAD :
1277 	    BUS_DMASYNC_PREWRITE);
1278 
1279 	if ((nsegs = ccb->c_dmam->dm_nsegs) > IPS_MAXSGS)
1280 		return (1);
1281 
1282 	if (nsegs > 1) {
1283 		cmd->sgcnt = nsegs;
1284 		cmd->sgaddr = htole32(ccb->c_cmdbpa + offsetof(struct ips_cmdb,
1285 		    sg));
1286 
1287 		/* Fill in scatter-gather array */
1288 		for (i = 0; i < nsegs; i++) {
1289 			sg[i].addr = htole32(ccb->c_dmam->dm_segs[i].ds_addr);
1290 			sg[i].size = htole32(ccb->c_dmam->dm_segs[i].ds_len);
1291 		}
1292 	} else {
1293 		cmd->sgcnt = 0;
1294 		cmd->sgaddr = htole32(ccb->c_dmam->dm_segs[0].ds_addr);
1295 	}
1296 
1297 	return (0);
1298 }
1299 
1300 void
1301 ips_start_xs(struct ips_softc *sc, struct ips_ccb *ccb, struct scsipi_xfer *xs)
1302 {
1303 	ccb->c_flags = xs->xs_control;
1304 	ccb->c_xfer = xs;
1305 	int ispoll = xs->xs_control & XS_CTL_POLL;
1306 
1307 	if (!ispoll) {
1308 		int timeout = mstohz(xs->timeout);
1309 		if (timeout == 0)
1310 			timeout = 1;
1311 
1312 		callout_reset(&xs->xs_callout, timeout, ips_timeout, ccb);
1313 	}
1314 
1315 	/*
1316 	 * Return value not used here because ips_cmd() must complete
1317 	 * scsipi_xfer on any failure and SCSI layer will handle possible
1318 	 * errors.
1319 	 */
1320 	ips_cmd(sc, ccb);
1321 }
1322 
1323 int
1324 ips_cmd(struct ips_softc *sc, struct ips_ccb *ccb)
1325 {
1326 	struct ips_cmd *cmd = ccb->c_cmdbva;
1327 	int s, error = 0;
1328 
1329 	DPRINTF(IPS_D_XFER, ("%s: ips_cmd: id 0x%02x, flags 0x%x, xs %p, "
1330 	    "code 0x%02x, drive %d, sgcnt %d, lba %d, sgaddr 0x%08x, "
1331 	    "seccnt %d\n", sc->sc_dev.dv_xname, ccb->c_id, ccb->c_flags,
1332 	    ccb->c_xfer, cmd->code, cmd->drive, cmd->sgcnt, htole32(cmd->lba),
1333 	    htole32(cmd->sgaddr), htole16(cmd->seccnt)));
1334 
1335 	cmd->id = ccb->c_id;
1336 
1337 	/* Post command to controller and optionally wait for completion */
1338 	s = splbio();
1339 	ips_exec(sc, ccb);
1340 	ccb->c_state = IPS_CCB_QUEUED;
1341 	if (ccb->c_flags & XS_CTL_POLL)
1342 		error = ips_poll(sc, ccb);
1343 	splx(s);
1344 
1345 	return (error);
1346 }
1347 
1348 int
1349 ips_poll(struct ips_softc *sc, struct ips_ccb *ccb)
1350 {
1351 	struct timeval tv;
1352 	int error, timo;
1353 
1354 	if (ccb->c_flags & XS_CTL_NOSLEEP) {
1355 		/* busy-wait */
1356 		DPRINTF(IPS_D_XFER, ("%s: ips_poll: busy-wait\n",
1357 		    sc->sc_dev.dv_xname));
1358 
1359 		for (timo = 10000; timo > 0; timo--) {
1360 			delay(100);
1361 			ips_intr(sc);
1362 			if (ccb->c_state == IPS_CCB_DONE)
1363 				break;
1364 		}
1365 	} else {
1366 		/* sleep */
1367 		timo = ccb->c_xfer ? ccb->c_xfer->timeout : IPS_TIMEOUT;
1368 		tv.tv_sec = timo / 1000;
1369 		tv.tv_usec = (timo % 1000) * 1000;
1370 		timo = tvtohz(&tv);
1371 
1372 		DPRINTF(IPS_D_XFER, ("%s: ips_poll: sleep %d hz\n",
1373 		    sc->sc_dev.dv_xname, timo));
1374 		tsleep(ccb, PRIBIO + 1, "ipscmd", timo);
1375 	}
1376 	DPRINTF(IPS_D_XFER, ("%s: ips_poll: state %d\n", sc->sc_dev.dv_xname,
1377 	    ccb->c_state));
1378 
1379 	if (ccb->c_state != IPS_CCB_DONE)
1380 		/*
1381 		 * Command never completed. Fake hardware status byte
1382 		 * to indicate timeout.
1383 		 */
1384 		ccb->c_stat = IPS_STAT_TIMO;
1385 
1386 	ips_done(sc, ccb);
1387 	error = ccb->c_error;
1388 
1389 	return (error);
1390 }
1391 
1392 void
1393 ips_done(struct ips_softc *sc, struct ips_ccb *ccb)
1394 {
1395 	DPRINTF(IPS_D_XFER, ("%s: ips_done: id 0x%02x, flags 0x%x, xs %p\n",
1396 	    sc->sc_dev.dv_xname, ccb->c_id, ccb->c_flags, ccb->c_xfer));
1397 
1398 	ccb->c_error = ips_error(sc, ccb);
1399 	ccb->c_done(sc, ccb);
1400 }
1401 
1402 void
1403 ips_done_xs(struct ips_softc *sc, struct ips_ccb *ccb)
1404 {
1405 	struct scsipi_xfer *xs = ccb->c_xfer;
1406 
1407 	if (!(xs->xs_control & XS_CTL_POLL))
1408 		callout_stop(&xs->xs_callout);
1409 
1410 	if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1411 		bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,
1412 		    ccb->c_dmam->dm_mapsize, xs->xs_control & XS_CTL_DATA_IN ?
1413 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1414 		bus_dmamap_unload(sc->sc_dmat, ccb->c_dmam);
1415 	}
1416 
1417 	xs->resid = 0;
1418 	xs->error = ips_error_xs(sc, ccb);
1419 	ips_ccb_put(sc, ccb);
1420 	scsipi_done(xs);
1421 }
1422 
1423 void
1424 ips_done_pt(struct ips_softc *sc, struct ips_ccb *ccb)
1425 {
1426 	struct scsipi_xfer *xs = ccb->c_xfer;
1427 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1428 	struct ips_dcdb *dcdb = &cmdb->dcdb;
1429 	int done = htole16(dcdb->datalen);
1430 
1431 	if (!(xs->xs_control & XS_CTL_POLL))
1432 		callout_stop(&xs->xs_callout);
1433 
1434 	if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1435 		bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,
1436 		    ccb->c_dmam->dm_mapsize, xs->xs_control & XS_CTL_DATA_IN ?
1437 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1438 		bus_dmamap_unload(sc->sc_dmat, ccb->c_dmam);
1439 	}
1440 
1441 	if (done && done < xs->datalen)
1442 		xs->resid = xs->datalen - done;
1443 	else
1444 		xs->resid = 0;
1445 	xs->error = ips_error_xs(sc, ccb);
1446 	xs->status = dcdb->status;
1447 
1448 	if (xs->error == XS_SENSE)
1449 		memcpy(&xs->sense, dcdb->sense, MIN(sizeof(xs->sense),
1450 		    sizeof(dcdb->sense)));
1451 
1452 	if (xs->cmd->opcode == INQUIRY && xs->error == XS_NOERROR) {
1453 		int type = ((struct scsipi_inquiry_data *)xs->data)->device &
1454 		    SID_TYPE;
1455 
1456 		if (type == T_DIRECT)
1457 			/* mask physical drives */
1458 			xs->error = XS_DRIVER_STUFFUP;
1459 	}
1460 
1461 	ips_ccb_put(sc, ccb);
1462 	scsipi_done(xs);
1463 }
1464 
1465 void
1466 ips_done_mgmt(struct ips_softc *sc, struct ips_ccb *ccb)
1467 {
1468 	if (ccb->c_flags & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT))
1469 		bus_dmamap_sync(sc->sc_dmat, sc->sc_infom.dm_map, 0,
1470 		    sc->sc_infom.dm_map->dm_mapsize,
1471 		    ccb->c_flags & XS_CTL_DATA_IN ? BUS_DMASYNC_POSTREAD :
1472 		    BUS_DMASYNC_POSTWRITE);
1473 
1474 	ips_ccb_put(sc, ccb);
1475 }
1476 
1477 int
1478 ips_error(struct ips_softc *sc, struct ips_ccb *ccb)
1479 {
1480 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1481 	struct ips_cmd *cmd = &cmdb->cmd;
1482 	struct ips_dcdb *dcdb = &cmdb->dcdb;
1483 	struct scsipi_xfer *xs = ccb->c_xfer;
1484 	u_int8_t gsc = IPS_STAT_GSC(ccb->c_stat);
1485 
1486 	if (gsc == IPS_STAT_OK)
1487 		return (0);
1488 
1489 	DPRINTF(IPS_D_ERR, ("%s: ips_error: stat 0x%02x, estat 0x%02x, "
1490 	    "cmd code 0x%02x, drive %d, sgcnt %d, lba %u, seccnt %d",
1491 	    sc->sc_dev.dv_xname, ccb->c_stat, ccb->c_estat, cmd->code,
1492 	    cmd->drive, cmd->sgcnt, htole32(cmd->lba), htole16(cmd->seccnt)));
1493 	if (cmd->code == IPS_CMD_DCDB || cmd->code == IPS_CMD_DCDB_SG) {
1494 		int i;
1495 
1496 		DPRINTF(IPS_D_ERR, (", dcdb device 0x%02x, attr 0x%02x, "
1497 		    "datalen %d, sgcnt %d, status 0x%02x",
1498 		    dcdb->device, dcdb->attr, htole16(dcdb->datalen),
1499 		    dcdb->sgcnt, dcdb->status));
1500 
1501 		DPRINTF(IPS_D_ERR, (", cdb"));
1502 		for (i = 0; i < dcdb->cdblen; i++)
1503 			DPRINTF(IPS_D_ERR, (" %x", dcdb->cdb[i]));
1504 		if (ccb->c_estat == IPS_ESTAT_CKCOND) {
1505 			DPRINTF(IPS_D_ERR, (", sense"));
1506 			for (i = 0; i < dcdb->senselen; i++)
1507 				DPRINTF(IPS_D_ERR, (" %x", dcdb->sense[i]));
1508 		}
1509 	}
1510 	DPRINTF(IPS_D_ERR, ("\n"));
1511 
1512 	switch (gsc) {
1513 	case IPS_STAT_RECOV:
1514 		return (0);
1515 	case IPS_STAT_INVOP:
1516 	case IPS_STAT_INVCMD:
1517 	case IPS_STAT_INVPARM:
1518 		return (EINVAL);
1519 	case IPS_STAT_BUSY:
1520 		return (EBUSY);
1521 	case IPS_STAT_TIMO:
1522 		return (ETIMEDOUT);
1523 	case IPS_STAT_PDRVERR:
1524 		switch (ccb->c_estat) {
1525 		case IPS_ESTAT_SELTIMO:
1526 			return (ENODEV);
1527 		case IPS_ESTAT_OURUN:
1528 			if (xs && htole16(dcdb->datalen) < xs->datalen)
1529 				/* underrun */
1530 				return (0);
1531 			break;
1532 		case IPS_ESTAT_RECOV:
1533 			return (0);
1534 		}
1535 		break;
1536 	}
1537 
1538 	return (EIO);
1539 }
1540 
1541 int
1542 ips_error_xs(struct ips_softc *sc, struct ips_ccb *ccb)
1543 {
1544 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1545 	struct ips_dcdb *dcdb = &cmdb->dcdb;
1546 	struct scsipi_xfer *xs = ccb->c_xfer;
1547 	u_int8_t gsc = IPS_STAT_GSC(ccb->c_stat);
1548 
1549 	/* Map hardware error codes to SCSI ones */
1550 	switch (gsc) {
1551 	case IPS_STAT_OK:
1552 	case IPS_STAT_RECOV:
1553 		return (XS_NOERROR);
1554 	case IPS_STAT_BUSY:
1555 		return (XS_BUSY);
1556 	case IPS_STAT_TIMO:
1557 		return (XS_TIMEOUT);
1558 	case IPS_STAT_PDRVERR:
1559 		switch (ccb->c_estat) {
1560 		case IPS_ESTAT_SELTIMO:
1561 			return (XS_SELTIMEOUT);
1562 		case IPS_ESTAT_OURUN:
1563 			if (xs && htole16(dcdb->datalen) < xs->datalen)
1564 				/* underrun */
1565 				return (XS_NOERROR);
1566 			break;
1567 		case IPS_ESTAT_HOSTRST:
1568 		case IPS_ESTAT_DEVRST:
1569 			return (XS_RESET);
1570 		case IPS_ESTAT_RECOV:
1571 			return (XS_NOERROR);
1572 		case IPS_ESTAT_CKCOND:
1573 			return (XS_SENSE);
1574 		}
1575 		break;
1576 	}
1577 
1578 	return (XS_DRIVER_STUFFUP);
1579 }
1580 
1581 int
1582 ips_intr(void *arg)
1583 {
1584 	struct ips_softc *sc = arg;
1585 	struct ips_ccb *ccb;
1586 	u_int32_t status;
1587 	int id;
1588 
1589 	DPRINTF(IPS_D_XFER, ("%s: ips_intr", sc->sc_dev.dv_xname));
1590 	if (!ips_isintr(sc)) {
1591 		DPRINTF(IPS_D_XFER, (": not ours\n"));
1592 		return (0);
1593 	}
1594 	DPRINTF(IPS_D_XFER, ("\n"));
1595 
1596 	/* Process completed commands */
1597 	while ((status = ips_status(sc)) != 0xffffffff) {
1598 		DPRINTF(IPS_D_XFER, ("%s: ips_intr: status 0x%08x\n",
1599 		    sc->sc_dev.dv_xname, status));
1600 
1601 		id = IPS_STAT_ID(status);
1602 		if (id >= sc->sc_nccbs) {
1603 			DPRINTF(IPS_D_ERR, ("%s: ips_intr: invalid id %d\n",
1604 			    sc->sc_dev.dv_xname, id));
1605 			continue;
1606 		}
1607 
1608 		ccb = &sc->sc_ccb[id];
1609 		if (ccb->c_state != IPS_CCB_QUEUED) {
1610 			DPRINTF(IPS_D_ERR, ("%s: ips_intr: cmd 0x%02x not "
1611 			    "queued, state %d, status 0x%08x\n",
1612 			    sc->sc_dev.dv_xname, ccb->c_id, ccb->c_state,
1613 			    status));
1614 			continue;
1615 		}
1616 
1617 		ccb->c_state = IPS_CCB_DONE;
1618 		ccb->c_stat = IPS_STAT_BASIC(status);
1619 		ccb->c_estat = IPS_STAT_EXT(status);
1620 
1621 		if (ccb->c_flags & XS_CTL_POLL) {
1622 			wakeup(ccb);
1623 		} else {
1624 			ips_done(sc, ccb);
1625 		}
1626 	}
1627 
1628 	return (1);
1629 }
1630 
1631 void
1632 ips_timeout(void *arg)
1633 {
1634 	struct ips_ccb *ccb = arg;
1635 	struct ips_softc *sc = ccb->c_sc;
1636 	struct scsipi_xfer *xs = ccb->c_xfer;
1637 	int s;
1638 
1639 	s = splbio();
1640 	if (xs)
1641 		scsi_print_addr(xs->xs_periph);
1642 	else
1643 		printf("%s: ", sc->sc_dev.dv_xname);
1644 	printf("timeout\n");
1645 
1646 	/*
1647 	 * Command never completed. Fake hardware status byte
1648 	 * to indicate timeout.
1649 	 * XXX: need to remove command from controller.
1650 	 */
1651 	ccb->c_stat = IPS_STAT_TIMO;
1652 	ips_done(sc, ccb);
1653 	splx(s);
1654 }
1655 
1656 int
1657 ips_getadapterinfo(struct ips_softc *sc, int flags)
1658 {
1659 	struct ips_ccb *ccb;
1660 	struct ips_cmd *cmd;
1661 
1662 	ccb = ips_ccb_get(sc);
1663 	if (ccb == NULL)
1664 		return (1);
1665 
1666 	ccb->c_flags = XS_CTL_DATA_IN | XS_CTL_POLL | flags;
1667 	ccb->c_done = ips_done_mgmt;
1668 
1669 	cmd = ccb->c_cmdbva;
1670 	cmd->code = IPS_CMD_GETADAPTERINFO;
1671 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1672 	    adapter));
1673 
1674 	return (ips_cmd(sc, ccb));
1675 }
1676 
1677 int
1678 ips_getdriveinfo(struct ips_softc *sc, int flags)
1679 {
1680 	struct ips_ccb *ccb;
1681 	struct ips_cmd *cmd;
1682 
1683 	ccb = ips_ccb_get(sc);
1684 	if (ccb == NULL)
1685 		return (1);
1686 
1687 	ccb->c_flags = XS_CTL_DATA_IN | XS_CTL_POLL | flags;
1688 	ccb->c_done = ips_done_mgmt;
1689 
1690 	cmd = ccb->c_cmdbva;
1691 	cmd->code = IPS_CMD_GETDRIVEINFO;
1692 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1693 	    drive));
1694 
1695 	return (ips_cmd(sc, ccb));
1696 }
1697 
1698 int
1699 ips_getconf(struct ips_softc *sc, int flags)
1700 {
1701 	struct ips_ccb *ccb;
1702 	struct ips_cmd *cmd;
1703 
1704 	ccb = ips_ccb_get(sc);
1705 	if (ccb == NULL)
1706 		return (1);
1707 
1708 	ccb->c_flags = XS_CTL_DATA_IN | XS_CTL_POLL | flags;
1709 	ccb->c_done = ips_done_mgmt;
1710 
1711 	cmd = ccb->c_cmdbva;
1712 	cmd->code = IPS_CMD_READCONF;
1713 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1714 	    conf));
1715 
1716 	return (ips_cmd(sc, ccb));
1717 }
1718 
1719 int
1720 ips_getpg5(struct ips_softc *sc, int flags)
1721 {
1722 	struct ips_ccb *ccb;
1723 	struct ips_cmd *cmd;
1724 
1725 	ccb = ips_ccb_get(sc);
1726 	if (ccb == NULL)
1727 		return (1);
1728 
1729 	ccb->c_flags = XS_CTL_DATA_IN | XS_CTL_POLL | flags;
1730 	ccb->c_done = ips_done_mgmt;
1731 
1732 	cmd = ccb->c_cmdbva;
1733 	cmd->code = IPS_CMD_RWNVRAM;
1734 	cmd->drive = 5;
1735 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1736 	    pg5));
1737 
1738 	return (ips_cmd(sc, ccb));
1739 }
1740 
1741 #if NBIO > 0
1742 int
1743 ips_getrblstat(struct ips_softc *sc, int flags)
1744 {
1745 	struct ips_ccb *ccb;
1746 	struct ips_cmd *cmd;
1747 
1748 	ccb = ips_ccb_get(sc);
1749 	if (ccb == NULL)
1750 		return (1);
1751 
1752 	ccb->c_flags = XS_CTL_DATA_IN | XS_CTL_POLL | flags;
1753 	ccb->c_done = ips_done_mgmt;
1754 
1755 	cmd = ccb->c_cmdbva;
1756 	cmd->code = IPS_CMD_REBUILDSTATUS;
1757 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1758 	    rblstat));
1759 
1760 	return (ips_cmd(sc, ccb));
1761 }
1762 
1763 int
1764 ips_setstate(struct ips_softc *sc, int chan, int target, int state, int flags)
1765 {
1766 	struct ips_ccb *ccb;
1767 	struct ips_cmd *cmd;
1768 
1769 	ccb = ips_ccb_get(sc);
1770 	if (ccb == NULL)
1771 		return (1);
1772 
1773 	ccb->c_flags = XS_CTL_POLL | flags;
1774 	ccb->c_done = ips_done_mgmt;
1775 
1776 	cmd = ccb->c_cmdbva;
1777 	cmd->code = IPS_CMD_SETSTATE;
1778 	cmd->drive = chan;
1779 	cmd->sgcnt = target;
1780 	cmd->seg4g = state;
1781 
1782 	return (ips_cmd(sc, ccb));
1783 }
1784 
1785 int
1786 ips_rebuild(struct ips_softc *sc, int chan, int target, int nchan,
1787     int ntarget, int flags)
1788 {
1789 	struct ips_ccb *ccb;
1790 	struct ips_cmd *cmd;
1791 
1792 	ccb = ips_ccb_get(sc);
1793 	if (ccb == NULL)
1794 		return (1);
1795 
1796 	ccb->c_flags = XS_CTL_POLL | flags;
1797 	ccb->c_done = ips_done_mgmt;
1798 
1799 	cmd = ccb->c_cmdbva;
1800 	cmd->code = IPS_CMD_REBUILD;
1801 	cmd->drive = chan;
1802 	cmd->sgcnt = target;
1803 	cmd->seccnt = htole16(ntarget << 8 | nchan);
1804 
1805 	return (ips_cmd(sc, ccb));
1806 }
1807 #endif	/* NBIO > 0 */
1808 
1809 void
1810 ips_copperhead_exec(struct ips_softc *sc, struct ips_ccb *ccb)
1811 {
1812 	u_int32_t reg;
1813 	int timeout;
1814 
1815 	for (timeout = 100; timeout-- > 0; delay(100)) {
1816 		reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_CCC);
1817 		if ((reg & IPS_REG_CCC_SEM) == 0)
1818 			break;
1819 	}
1820 	if (timeout < 0) {
1821 		printf("%s: semaphore timeout\n", sc->sc_dev.dv_xname);
1822 		return;
1823 	}
1824 
1825 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_CCSA, ccb->c_cmdbpa);
1826 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, IPS_REG_CCC,
1827 	    IPS_REG_CCC_START);
1828 }
1829 
1830 void
1831 ips_copperhead_intren(struct ips_softc *sc)
1832 {
1833 	bus_space_write_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS, IPS_REG_HIS_EN);
1834 }
1835 
1836 int
1837 ips_copperhead_isintr(struct ips_softc *sc)
1838 {
1839 	u_int8_t reg;
1840 
1841 	reg = bus_space_read_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS);
1842 	bus_space_write_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS, reg);
1843 	if (reg != 0xff && (reg & IPS_REG_HIS_SCE))
1844 		return (1);
1845 
1846 	return (0);
1847 }
1848 
1849 u_int32_t
1850 ips_copperhead_status(struct ips_softc *sc)
1851 {
1852 	u_int32_t sqhead, sqtail, status;
1853 
1854 	sqhead = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQH);
1855 	DPRINTF(IPS_D_XFER, ("%s: sqhead 0x%08x, sqtail 0x%08x\n",
1856 	    sc->sc_dev.dv_xname, sqhead, sc->sc_sqtail));
1857 
1858 	sqtail = sc->sc_sqtail + sizeof(u_int32_t);
1859 	if (sqtail == sc->sc_sqm.dm_paddr + IPS_SQSZ)
1860 		sqtail = sc->sc_sqm.dm_paddr;
1861 	if (sqtail == sqhead)
1862 		return (0xffffffff);
1863 
1864 	sc->sc_sqtail = sqtail;
1865 	if (++sc->sc_sqidx == IPS_MAXCMDS)
1866 		sc->sc_sqidx = 0;
1867 	status = htole32(sc->sc_sqbuf[sc->sc_sqidx]);
1868 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQT, sqtail);
1869 
1870 	return (status);
1871 }
1872 
1873 void
1874 ips_morpheus_exec(struct ips_softc *sc, struct ips_ccb *ccb)
1875 {
1876 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_IQP, ccb->c_cmdbpa);
1877 }
1878 
1879 void
1880 ips_morpheus_intren(struct ips_softc *sc)
1881 {
1882 	u_int32_t reg;
1883 
1884 	reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIM);
1885 	reg &= ~IPS_REG_OIM_DS;
1886 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIM, reg);
1887 }
1888 
1889 int
1890 ips_morpheus_isintr(struct ips_softc *sc)
1891 {
1892 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIS) &
1893 	    IPS_REG_OIS_PEND);
1894 }
1895 
1896 u_int32_t
1897 ips_morpheus_status(struct ips_softc *sc)
1898 {
1899 	u_int32_t reg;
1900 
1901 	reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OQP);
1902 	DPRINTF(IPS_D_XFER, ("%s: status 0x%08x\n", sc->sc_dev.dv_xname, reg));
1903 
1904 	return (reg);
1905 }
1906 
1907 struct ips_ccb *
1908 ips_ccb_alloc(struct ips_softc *sc, int n)
1909 {
1910 	struct ips_ccb *ccb;
1911 	int i;
1912 
1913 	ccb = malloc(n * sizeof(*ccb), M_DEVBUF, M_WAITOK | M_ZERO);
1914 	for (i = 0; i < n; i++) {
1915 		ccb[i].c_sc = sc;
1916 		ccb[i].c_id = i;
1917 		ccb[i].c_cmdbva = (char *)sc->sc_cmdbm.dm_vaddr +
1918 		    i * sizeof(struct ips_cmdb);
1919 		ccb[i].c_cmdbpa = sc->sc_cmdbm.dm_paddr +
1920 		    i * sizeof(struct ips_cmdb);
1921 		if (bus_dmamap_create(sc->sc_dmat, IPS_MAXFER, IPS_MAXSGS,
1922 		    IPS_MAXFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1923 		    &ccb[i].c_dmam))
1924 			goto fail;
1925 	}
1926 
1927 	return (ccb);
1928 fail:
1929 	for (; i > 0; i--)
1930 		bus_dmamap_destroy(sc->sc_dmat, ccb[i - 1].c_dmam);
1931 	free(ccb, M_DEVBUF);
1932 	return (NULL);
1933 }
1934 
1935 void
1936 ips_ccb_free(struct ips_softc *sc, struct ips_ccb *ccb, int n)
1937 {
1938 	int i;
1939 
1940 	for (i = 0; i < n; i++)
1941 		bus_dmamap_destroy(sc->sc_dmat, ccb[i - 1].c_dmam);
1942 	free(ccb, M_DEVBUF);
1943 }
1944 
1945 struct ips_ccb *
1946 ips_ccb_get(struct ips_softc *sc)
1947 {
1948 	struct ips_ccb *ccb;
1949 
1950 	mutex_enter(&sc->sc_ccb_mtx);
1951 	if ((ccb = SLIST_FIRST(&sc->sc_ccbq_free)) != NULL) {
1952 		SLIST_REMOVE_HEAD(&sc->sc_ccbq_free, c_link);
1953 		ccb->c_flags = 0;
1954 		ccb->c_xfer = NULL;
1955 		bzero(ccb->c_cmdbva, sizeof(struct ips_cmdb));
1956 	}
1957 	mutex_exit(&sc->sc_ccb_mtx);
1958 
1959 	return (ccb);
1960 }
1961 
1962 void
1963 ips_ccb_put(struct ips_softc *sc, struct ips_ccb *ccb)
1964 {
1965 	ccb->c_state = IPS_CCB_FREE;
1966 	mutex_enter(&sc->sc_ccb_mtx);
1967 	SLIST_INSERT_HEAD(&sc->sc_ccbq_free, ccb, c_link);
1968 	mutex_exit(&sc->sc_ccb_mtx);
1969 }
1970 
1971 int
1972 ips_dmamem_alloc(struct dmamem *dm, bus_dma_tag_t tag, bus_size_t size)
1973 {
1974 	int nsegs;
1975 
1976 	dm->dm_tag = tag;
1977 	dm->dm_size = size;
1978 
1979 	if (bus_dmamap_create(tag, size, 1, size, 0,
1980 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &dm->dm_map))
1981 		return (1);
1982 	if (bus_dmamem_alloc(tag, size, 0, 0, &dm->dm_seg, 1, &nsegs,
1983 	    BUS_DMA_NOWAIT))
1984 		goto fail1;
1985 	if (bus_dmamem_map(tag, &dm->dm_seg, 1, size, &dm->dm_vaddr,
1986 	    BUS_DMA_NOWAIT))
1987 		goto fail2;
1988 	if (bus_dmamap_load(tag, dm->dm_map, dm->dm_vaddr, size, NULL,
1989 	    BUS_DMA_NOWAIT))
1990 		goto fail3;
1991 
1992 	return (0);
1993 
1994 fail3:
1995 	bus_dmamem_unmap(tag, dm->dm_vaddr, size);
1996 fail2:
1997 	bus_dmamem_free(tag, &dm->dm_seg, 1);
1998 fail1:
1999 	bus_dmamap_destroy(tag, dm->dm_map);
2000 	return (1);
2001 }
2002 
2003 void
2004 ips_dmamem_free(struct dmamem *dm)
2005 {
2006 	bus_dmamap_unload(dm->dm_tag, dm->dm_map);
2007 	bus_dmamem_unmap(dm->dm_tag, dm->dm_vaddr, dm->dm_size);
2008 	bus_dmamem_free(dm->dm_tag, &dm->dm_seg, 1);
2009 	bus_dmamap_destroy(dm->dm_tag, dm->dm_map);
2010 }
2011