xref: /openbsd-src/sys/dev/pci/ips.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: ips.c,v 1.118 2020/02/19 01:31:38 cheloha Exp $	*/
2 
3 /*
4  * Copyright (c) 2006, 2007, 2009 Alexander Yurchenko <grange@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * IBM (Adaptec) ServeRAID controllers driver.
21  */
22 
23 #include "bio.h"
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/device.h>
28 #include <sys/ioctl.h>
29 #include <sys/kernel.h>
30 #include <sys/malloc.h>
31 #include <sys/sensors.h>
32 #include <sys/timeout.h>
33 #include <sys/queue.h>
34 
35 #include <machine/bus.h>
36 
37 #include <scsi/scsi_all.h>
38 #include <scsi/scsi_disk.h>
39 #include <scsi/scsiconf.h>
40 
41 #include <dev/biovar.h>
42 
43 #include <dev/pci/pcidevs.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 
47 /* Debug levels */
48 #define IPS_D_ERR	0x0001	/* errors */
49 #define IPS_D_INFO	0x0002	/* information */
50 #define IPS_D_XFER	0x0004	/* transfers */
51 
52 #ifdef IPS_DEBUG
53 #define DPRINTF(a, b)	do { if (ips_debug & (a)) printf b; } while (0)
54 int ips_debug = IPS_D_ERR;
55 #else
56 #define DPRINTF(a, b)
57 #endif
58 
59 #define IPS_MAXDRIVES		8
60 #define IPS_MAXCHANS		4
61 #define IPS_MAXTARGETS		16
62 #define IPS_MAXCHUNKS		16
63 #define IPS_MAXCMDS		128
64 
65 #define IPS_MAXSGS		16
66 #define IPS_MAXCDB		12
67 
68 #define IPS_SECSZ		512
69 #define IPS_NVRAMPGSZ		128
70 #define IPS_SQSZ		(IPS_MAXCMDS * sizeof(u_int32_t))
71 
72 #define	IPS_TIMEOUT		60000	/* ms */
73 
74 /* Command codes */
75 #define IPS_CMD_READ		0x02
76 #define IPS_CMD_WRITE		0x03
77 #define IPS_CMD_DCDB		0x04
78 #define IPS_CMD_GETADAPTERINFO	0x05
79 #define IPS_CMD_FLUSH		0x0a
80 #define IPS_CMD_REBUILDSTATUS	0x0c
81 #define IPS_CMD_SETSTATE	0x10
82 #define IPS_CMD_REBUILD		0x16
83 #define IPS_CMD_ERRORTABLE	0x17
84 #define IPS_CMD_GETDRIVEINFO	0x19
85 #define IPS_CMD_RESETCHAN	0x1a
86 #define IPS_CMD_DOWNLOAD	0x20
87 #define IPS_CMD_RWBIOSFW	0x22
88 #define IPS_CMD_READCONF	0x38
89 #define IPS_CMD_GETSUBSYS	0x40
90 #define IPS_CMD_CONFIGSYNC	0x58
91 #define IPS_CMD_READ_SG		0x82
92 #define IPS_CMD_WRITE_SG	0x83
93 #define IPS_CMD_DCDB_SG		0x84
94 #define IPS_CMD_EDCDB		0x95
95 #define IPS_CMD_EDCDB_SG	0x96
96 #define IPS_CMD_RWNVRAMPAGE	0xbc
97 #define IPS_CMD_GETVERINFO	0xc6
98 #define IPS_CMD_FFDC		0xd7
99 #define IPS_CMD_SG		0x80
100 #define IPS_CMD_RWNVRAM		0xbc
101 
102 /* DCDB attributes */
103 #define IPS_DCDB_DATAIN		0x01	/* data input */
104 #define IPS_DCDB_DATAOUT	0x02	/* data output */
105 #define IPS_DCDB_XFER64K	0x08	/* 64K transfer */
106 #define IPS_DCDB_TIMO10		0x10	/* 10 secs timeout */
107 #define IPS_DCDB_TIMO60		0x20	/* 60 secs timeout */
108 #define IPS_DCDB_TIMO20M	0x30	/* 20 mins timeout */
109 #define IPS_DCDB_NOAUTOREQSEN	0x40	/* no auto request sense */
110 #define IPS_DCDB_DISCON		0x80	/* disconnect allowed */
111 
112 /* Register definitions */
113 #define IPS_REG_HIS		0x08	/* host interrupt status */
114 #define IPS_REG_HIS_SCE			0x01	/* status channel enqueue */
115 #define IPS_REG_HIS_EN			0x80	/* enable interrupts */
116 #define IPS_REG_CCSA		0x10	/* command channel system address */
117 #define IPS_REG_CCC		0x14	/* command channel control */
118 #define IPS_REG_CCC_SEM			0x0008	/* semaphore */
119 #define IPS_REG_CCC_START		0x101a	/* start command */
120 #define IPS_REG_SQH		0x20	/* status queue head */
121 #define IPS_REG_SQT		0x24	/* status queue tail */
122 #define IPS_REG_SQE		0x28	/* status queue end */
123 #define IPS_REG_SQS		0x2c	/* status queue start */
124 
125 #define IPS_REG_OIS		0x30	/* outbound interrupt status */
126 #define IPS_REG_OIS_PEND		0x0008	/* interrupt is pending */
127 #define IPS_REG_OIM		0x34	/* outbound interrupt mask */
128 #define IPS_REG_OIM_DS			0x0008	/* disable interrupts */
129 #define IPS_REG_IQP		0x40	/* inbound queue port */
130 #define IPS_REG_OQP		0x44	/* outbound queue port */
131 
132 /* Status word fields */
133 #define IPS_STAT_ID(x)		(((x) >> 8) & 0xff)	/* command id */
134 #define IPS_STAT_BASIC(x)	(((x) >> 16) & 0xff)	/* basic status */
135 #define IPS_STAT_EXT(x)		(((x) >> 24) & 0xff)	/* ext status */
136 #define IPS_STAT_GSC(x)		((x) & 0x0f)
137 
138 /* Basic status codes */
139 #define IPS_STAT_OK		0x00	/* success */
140 #define IPS_STAT_RECOV		0x01	/* recovered error */
141 #define IPS_STAT_INVOP		0x03	/* invalid opcode */
142 #define IPS_STAT_INVCMD		0x04	/* invalid command block */
143 #define IPS_STAT_INVPARM	0x05	/* invalid parameters block */
144 #define IPS_STAT_BUSY		0x08	/* busy */
145 #define IPS_STAT_CMPLERR	0x0c	/* completed with error */
146 #define IPS_STAT_LDERR		0x0d	/* logical drive error */
147 #define IPS_STAT_TIMO		0x0e	/* timeout */
148 #define IPS_STAT_PDRVERR	0x0f	/* physical drive error */
149 
150 /* Extended status codes */
151 #define IPS_ESTAT_SELTIMO	0xf0	/* select timeout */
152 #define IPS_ESTAT_OURUN		0xf2	/* over/underrun */
153 #define IPS_ESTAT_HOSTRST	0xf7	/* host reset */
154 #define IPS_ESTAT_DEVRST	0xf8	/* device reset */
155 #define IPS_ESTAT_RECOV		0xfc	/* recovered error */
156 #define IPS_ESTAT_CKCOND	0xff	/* check condition */
157 
158 #define IPS_IOSIZE		128	/* max space size to map */
159 
160 /* Command frame */
161 struct ips_cmd {
162 	u_int8_t	code;
163 	u_int8_t	id;
164 	u_int8_t	drive;
165 	u_int8_t	sgcnt;
166 	u_int32_t	lba;
167 	u_int32_t	sgaddr;
168 	u_int16_t	seccnt;
169 	u_int8_t	seg4g;
170 	u_int8_t	esg;
171 	u_int32_t	ccsar;
172 	u_int32_t	cccr;
173 };
174 
175 /* Direct CDB (SCSI pass-through) frame */
176 struct ips_dcdb {
177 	u_int8_t	device;
178 	u_int8_t	attr;
179 	u_int16_t	datalen;
180 	u_int32_t	sgaddr;
181 	u_int8_t	cdblen;
182 	u_int8_t	senselen;
183 	u_int8_t	sgcnt;
184 	u_int8_t	__reserved1;
185 	u_int8_t	cdb[IPS_MAXCDB];
186 	u_int8_t	sense[64];
187 	u_int8_t	status;
188 	u_int8_t	__reserved2[3];
189 };
190 
191 /* Scatter-gather array element */
192 struct ips_sg {
193 	u_int32_t	addr;
194 	u_int32_t	size;
195 };
196 
197 /* Command block */
198 struct ips_cmdb {
199 	struct ips_cmd	cmd;
200 	struct ips_dcdb	dcdb;
201 	struct ips_sg	sg[IPS_MAXSGS];
202 };
203 
204 /* Data frames */
205 struct ips_adapterinfo {
206 	u_int8_t	drivecnt;
207 	u_int8_t	miscflag;
208 	u_int8_t	sltflag;
209 	u_int8_t	bstflag;
210 	u_int8_t	pwrchgcnt;
211 	u_int8_t	wrongaddrcnt;
212 	u_int8_t	unidentcnt;
213 	u_int8_t	nvramdevchgcnt;
214 	u_int8_t	firmware[8];
215 	u_int8_t	bios[8];
216 	u_int32_t	drivesize[IPS_MAXDRIVES];
217 	u_int8_t	cmdcnt;
218 	u_int8_t	maxphysdevs;
219 	u_int16_t	flashrepgmcnt;
220 	u_int8_t	defunctdiskcnt;
221 	u_int8_t	rebuildflag;
222 	u_int8_t	offdrivecnt;
223 	u_int8_t	critdrivecnt;
224 	u_int16_t	confupdcnt;
225 	u_int8_t	blkflag;
226 	u_int8_t	__reserved;
227 	u_int16_t	deaddisk[IPS_MAXCHANS][IPS_MAXTARGETS];
228 };
229 
230 struct ips_driveinfo {
231 	u_int8_t	drivecnt;
232 	u_int8_t	__reserved[3];
233 	struct ips_drive {
234 		u_int8_t	id;
235 		u_int8_t	__reserved;
236 		u_int8_t	raid;
237 		u_int8_t	state;
238 #define IPS_DS_FREE	0x00
239 #define IPS_DS_OFFLINE	0x02
240 #define IPS_DS_ONLINE	0x03
241 #define IPS_DS_DEGRADED	0x04
242 #define IPS_DS_SYS	0x06
243 #define IPS_DS_CRS	0x24
244 
245 		u_int32_t	seccnt;
246 	}		drive[IPS_MAXDRIVES];
247 };
248 
249 struct ips_conf {
250 	u_int8_t	ldcnt;
251 	u_int8_t	day;
252 	u_int8_t	month;
253 	u_int8_t	year;
254 	u_int8_t	initid[4];
255 	u_int8_t	hostid[12];
256 	u_int8_t	time[8];
257 	u_int32_t	useropt;
258 	u_int16_t	userfield;
259 	u_int8_t	rebuildrate;
260 	u_int8_t	__reserved1;
261 
262 	struct ips_hw {
263 		u_int8_t	board[8];
264 		u_int8_t	cpu[8];
265 		u_int8_t	nchantype;
266 		u_int8_t	nhostinttype;
267 		u_int8_t	compression;
268 		u_int8_t	nvramtype;
269 		u_int32_t	nvramsize;
270 	}		hw;
271 
272 	struct ips_ld {
273 		u_int16_t	userfield;
274 		u_int8_t	state;
275 		u_int8_t	raidcacheparam;
276 		u_int8_t	chunkcnt;
277 		u_int8_t	stripesize;
278 		u_int8_t	params;
279 		u_int8_t	__reserved;
280 		u_int32_t	size;
281 
282 		struct ips_chunk {
283 			u_int8_t	channel;
284 			u_int8_t	target;
285 			u_int16_t	__reserved;
286 			u_int32_t	startsec;
287 			u_int32_t	seccnt;
288 		}		chunk[IPS_MAXCHUNKS];
289 	}		ld[IPS_MAXDRIVES];
290 
291 	struct ips_dev {
292 		u_int8_t	initiator;
293 		u_int8_t	params;
294 		u_int8_t	miscflag;
295 		u_int8_t	state;
296 #define IPS_DVS_STANDBY	0x01
297 #define IPS_DVS_REBUILD	0x02
298 #define IPS_DVS_SPARE	0x04
299 #define IPS_DVS_MEMBER	0x08
300 #define IPS_DVS_ONLINE	0x80
301 #define IPS_DVS_READY	(IPS_DVS_STANDBY | IPS_DVS_ONLINE)
302 
303 		u_int32_t	seccnt;
304 		u_int8_t	devid[28];
305 	}		dev[IPS_MAXCHANS][IPS_MAXTARGETS];
306 
307 	u_int8_t	reserved[512];
308 };
309 
310 struct ips_rblstat {
311 	u_int8_t	__unknown[20];
312 	struct {
313 		u_int8_t	__unknown[4];
314 		u_int32_t	total;
315 		u_int32_t	remain;
316 	}		ld[IPS_MAXDRIVES];
317 };
318 
319 struct ips_pg5 {
320 	u_int32_t	signature;
321 	u_int8_t	__reserved1;
322 	u_int8_t	slot;
323 	u_int16_t	type;
324 	u_int8_t	bioshi[4];
325 	u_int8_t	bioslo[4];
326 	u_int16_t	__reserved2;
327 	u_int8_t	__reserved3;
328 	u_int8_t	os;
329 	u_int8_t	driverhi[4];
330 	u_int8_t	driverlo[4];
331 	u_int8_t	__reserved4[100];
332 };
333 
334 struct ips_info {
335 	struct ips_adapterinfo	adapter;
336 	struct ips_driveinfo	drive;
337 	struct ips_conf		conf;
338 	struct ips_rblstat	rblstat;
339 	struct ips_pg5		pg5;
340 };
341 
342 /* Command control block */
343 struct ips_softc;
344 struct ips_ccb {
345 	struct ips_softc *	c_sc;		/* driver softc */
346 	int			c_id;		/* command id */
347 	int			c_flags;	/* SCSI_* flags */
348 	enum {
349 		IPS_CCB_FREE,
350 		IPS_CCB_QUEUED,
351 		IPS_CCB_DONE
352 	}			c_state;	/* command state */
353 
354 	void *			c_cmdbva;	/* command block virt addr */
355 	paddr_t			c_cmdbpa;	/* command block phys addr */
356 	bus_dmamap_t		c_dmam;		/* data buffer DMA map */
357 
358 	struct scsi_xfer *	c_xfer;		/* corresponding SCSI xfer */
359 
360 	u_int8_t		c_stat;		/* status byte copy */
361 	u_int8_t		c_estat;	/* ext status byte copy */
362 	int			c_error;	/* completion error */
363 
364 	void			(*c_done)(struct ips_softc *,	/* cmd done */
365 				    struct ips_ccb *);		/* callback */
366 
367 	SLIST_ENTRY(ips_ccb)	c_link;		/* queue link */
368 };
369 
370 /* CCB queue */
371 SLIST_HEAD(ips_ccbq, ips_ccb);
372 
373 /* DMA-able chunk of memory */
374 struct dmamem {
375 	bus_dma_tag_t		dm_tag;
376 	bus_dmamap_t		dm_map;
377 	bus_dma_segment_t	dm_seg;
378 	bus_size_t		dm_size;
379 	void *			dm_vaddr;
380 #define dm_paddr dm_seg.ds_addr
381 };
382 
383 struct ips_softc {
384 	struct device		sc_dev;
385 
386 	struct scsi_link	sc_scsi_link;
387 	struct scsibus_softc *	sc_scsibus;
388 
389 	struct ips_pt {
390 		struct ips_softc *	pt_sc;
391 		int			pt_chan;
392 
393 		struct scsi_link	pt_link;
394 
395 		int			pt_proctgt;
396 		char			pt_procdev[16];
397 	}			sc_pt[IPS_MAXCHANS];
398 
399 	struct ksensordev	sc_sensordev;
400 	struct ksensor *	sc_sensors;
401 
402 	bus_space_tag_t		sc_iot;
403 	bus_space_handle_t	sc_ioh;
404 	bus_dma_tag_t		sc_dmat;
405 
406 	const struct ips_chipset *sc_chip;
407 
408 	struct ips_info *	sc_info;
409 	struct dmamem		sc_infom;
410 
411 	int			sc_nunits;
412 
413 	struct dmamem		sc_cmdbm;
414 
415 	struct ips_ccb *	sc_ccb;
416 	int			sc_nccbs;
417 	struct ips_ccbq		sc_ccbq_free;
418 	struct mutex		sc_ccb_mtx;
419 	struct scsi_iopool	sc_iopool;
420 
421 	struct dmamem		sc_sqm;
422 	paddr_t			sc_sqtail;
423 	u_int32_t *		sc_sqbuf;
424 	int			sc_sqidx;
425 };
426 
427 int	ips_match(struct device *, void *, void *);
428 void	ips_attach(struct device *, struct device *, void *);
429 
430 void	ips_scsi_cmd(struct scsi_xfer *);
431 void	ips_scsi_pt_cmd(struct scsi_xfer *);
432 int	ips_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
433 
434 #if NBIO > 0
435 int	ips_ioctl(struct device *, u_long, caddr_t);
436 int	ips_ioctl_inq(struct ips_softc *, struct bioc_inq *);
437 int	ips_ioctl_vol(struct ips_softc *, struct bioc_vol *);
438 int	ips_ioctl_disk(struct ips_softc *, struct bioc_disk *);
439 int	ips_ioctl_setstate(struct ips_softc *, struct bioc_setstate *);
440 #endif
441 
442 #ifndef SMALL_KERNEL
443 void	ips_sensors(void *);
444 #endif
445 
446 int	ips_load_xs(struct ips_softc *, struct ips_ccb *, struct scsi_xfer *);
447 void	ips_start_xs(struct ips_softc *, struct ips_ccb *, struct scsi_xfer *);
448 
449 int	ips_cmd(struct ips_softc *, struct ips_ccb *);
450 int	ips_poll(struct ips_softc *, struct ips_ccb *);
451 void	ips_done(struct ips_softc *, struct ips_ccb *);
452 void	ips_done_xs(struct ips_softc *, struct ips_ccb *);
453 void	ips_done_pt(struct ips_softc *, struct ips_ccb *);
454 void	ips_done_mgmt(struct ips_softc *, struct ips_ccb *);
455 int	ips_error(struct ips_softc *, struct ips_ccb *);
456 int	ips_error_xs(struct ips_softc *, struct ips_ccb *);
457 int	ips_intr(void *);
458 void	ips_timeout(void *);
459 
460 int	ips_getadapterinfo(struct ips_softc *, int);
461 int	ips_getdriveinfo(struct ips_softc *, int);
462 int	ips_getconf(struct ips_softc *, int);
463 int	ips_getpg5(struct ips_softc *, int);
464 
465 #if NBIO > 0
466 int	ips_getrblstat(struct ips_softc *, int);
467 int	ips_setstate(struct ips_softc *, int, int, int, int);
468 int	ips_rebuild(struct ips_softc *, int, int, int, int, int);
469 #endif
470 
471 void	ips_copperhead_exec(struct ips_softc *, struct ips_ccb *);
472 void	ips_copperhead_intren(struct ips_softc *);
473 int	ips_copperhead_isintr(struct ips_softc *);
474 u_int32_t ips_copperhead_status(struct ips_softc *);
475 
476 void	ips_morpheus_exec(struct ips_softc *, struct ips_ccb *);
477 void	ips_morpheus_intren(struct ips_softc *);
478 int	ips_morpheus_isintr(struct ips_softc *);
479 u_int32_t ips_morpheus_status(struct ips_softc *);
480 
481 struct ips_ccb *ips_ccb_alloc(struct ips_softc *, int);
482 void	ips_ccb_free(struct ips_softc *, struct ips_ccb *, int);
483 void	*ips_ccb_get(void *);
484 void	ips_ccb_put(void *, void *);
485 
486 int	ips_dmamem_alloc(struct dmamem *, bus_dma_tag_t, bus_size_t);
487 void	ips_dmamem_free(struct dmamem *);
488 
489 struct cfattach ips_ca = {
490 	sizeof(struct ips_softc),
491 	ips_match,
492 	ips_attach
493 };
494 
495 struct cfdriver ips_cd = {
496 	NULL, "ips", DV_DULL
497 };
498 
499 static struct scsi_adapter ips_switch = {
500 	ips_scsi_cmd, NULL, NULL, NULL, ips_scsi_ioctl
501 };
502 
503 static struct scsi_adapter ips_pt_switch = {
504 	ips_scsi_pt_cmd, NULL, NULL, NULL, NULL
505 };
506 
507 static const struct pci_matchid ips_ids[] = {
508 	{ PCI_VENDOR_IBM,	PCI_PRODUCT_IBM_SERVERAID },
509 	{ PCI_VENDOR_IBM,	PCI_PRODUCT_IBM_SERVERAID2 },
510 	{ PCI_VENDOR_ADP2,	PCI_PRODUCT_ADP2_SERVERAID }
511 };
512 
513 static const struct ips_chipset {
514 	enum {
515 		IPS_CHIP_COPPERHEAD = 0,
516 		IPS_CHIP_MORPHEUS
517 	}		ic_id;
518 
519 	int		ic_bar;
520 
521 	void		(*ic_exec)(struct ips_softc *, struct ips_ccb *);
522 	void		(*ic_intren)(struct ips_softc *);
523 	int		(*ic_isintr)(struct ips_softc *);
524 	u_int32_t	(*ic_status)(struct ips_softc *);
525 } ips_chips[] = {
526 	{
527 		IPS_CHIP_COPPERHEAD,
528 		0x14,
529 		ips_copperhead_exec,
530 		ips_copperhead_intren,
531 		ips_copperhead_isintr,
532 		ips_copperhead_status
533 	},
534 	{
535 		IPS_CHIP_MORPHEUS,
536 		0x10,
537 		ips_morpheus_exec,
538 		ips_morpheus_intren,
539 		ips_morpheus_isintr,
540 		ips_morpheus_status
541 	}
542 };
543 
544 #define ips_exec(s, c)	(s)->sc_chip->ic_exec((s), (c))
545 #define ips_intren(s)	(s)->sc_chip->ic_intren((s))
546 #define ips_isintr(s)	(s)->sc_chip->ic_isintr((s))
547 #define ips_status(s)	(s)->sc_chip->ic_status((s))
548 
549 static const char *ips_names[] = {
550 	NULL,
551 	NULL,
552 	"II",
553 	"onboard",
554 	"onboard",
555 	"3H",
556 	"3L",
557 	"4H",
558 	"4M",
559 	"4L",
560 	"4Mx",
561 	"4Lx",
562 	"5i",
563 	"5i",
564 	"6M",
565 	"6i",
566 	"7t",
567 	"7k",
568 	"7M"
569 };
570 
571 int
572 ips_match(struct device *parent, void *match, void *aux)
573 {
574 	return (pci_matchbyid(aux, ips_ids,
575 	    sizeof(ips_ids) / sizeof(ips_ids[0])));
576 }
577 
578 void
579 ips_attach(struct device *parent, struct device *self, void *aux)
580 {
581 	struct ips_softc *sc = (struct ips_softc *)self;
582 	struct pci_attach_args *pa = aux;
583 	struct ips_ccb ccb0;
584 	struct scsibus_attach_args saa;
585 	struct ips_adapterinfo *ai;
586 	struct ips_driveinfo *di;
587 	struct ips_pg5 *pg5;
588 	pcireg_t maptype;
589 	bus_size_t iosize;
590 	pci_intr_handle_t ih;
591 	const char *intrstr;
592 	int type, i;
593 
594 	sc->sc_dmat = pa->pa_dmat;
595 
596 	/* Identify chipset */
597 	if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_IBM_SERVERAID)
598 		sc->sc_chip = &ips_chips[IPS_CHIP_COPPERHEAD];
599 	else
600 		sc->sc_chip = &ips_chips[IPS_CHIP_MORPHEUS];
601 
602 	/* Map registers */
603 	maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_chip->ic_bar);
604 	if (pci_mapreg_map(pa, sc->sc_chip->ic_bar, maptype, 0, &sc->sc_iot,
605 	    &sc->sc_ioh, NULL, &iosize, IPS_IOSIZE)) {
606 		printf(": can't map regs\n");
607 		return;
608 	}
609 
610 	/* Allocate command buffer */
611 	if (ips_dmamem_alloc(&sc->sc_cmdbm, sc->sc_dmat,
612 	    IPS_MAXCMDS * sizeof(struct ips_cmdb))) {
613 		printf(": can't alloc cmd buffer\n");
614 		goto fail1;
615 	}
616 
617 	/* Allocate info buffer */
618 	if (ips_dmamem_alloc(&sc->sc_infom, sc->sc_dmat,
619 	    sizeof(struct ips_info))) {
620 		printf(": can't alloc info buffer\n");
621 		goto fail2;
622 	}
623 	sc->sc_info = sc->sc_infom.dm_vaddr;
624 	ai = &sc->sc_info->adapter;
625 	di = &sc->sc_info->drive;
626 	pg5 = &sc->sc_info->pg5;
627 
628 	/* Allocate status queue for the Copperhead chipset */
629 	if (sc->sc_chip->ic_id == IPS_CHIP_COPPERHEAD) {
630 		if (ips_dmamem_alloc(&sc->sc_sqm, sc->sc_dmat, IPS_SQSZ)) {
631 			printf(": can't alloc status queue\n");
632 			goto fail3;
633 		}
634 		sc->sc_sqtail = sc->sc_sqm.dm_paddr;
635 		sc->sc_sqbuf = sc->sc_sqm.dm_vaddr;
636 		sc->sc_sqidx = 0;
637 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQS,
638 		    sc->sc_sqm.dm_paddr);
639 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQE,
640 		    sc->sc_sqm.dm_paddr + IPS_SQSZ);
641 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQH,
642 		    sc->sc_sqm.dm_paddr + sizeof(u_int32_t));
643 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQT,
644 		    sc->sc_sqm.dm_paddr);
645 	}
646 
647 	/* Bootstrap CCB queue */
648 	sc->sc_nccbs = 1;
649 	sc->sc_ccb = &ccb0;
650 	bzero(&ccb0, sizeof(ccb0));
651 	ccb0.c_cmdbva = sc->sc_cmdbm.dm_vaddr;
652 	ccb0.c_cmdbpa = sc->sc_cmdbm.dm_paddr;
653 	SLIST_INIT(&sc->sc_ccbq_free);
654 	SLIST_INSERT_HEAD(&sc->sc_ccbq_free, &ccb0, c_link);
655 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
656 	scsi_iopool_init(&sc->sc_iopool, sc, ips_ccb_get, ips_ccb_put);
657 
658 	/* Get adapter info */
659 	if (ips_getadapterinfo(sc, SCSI_NOSLEEP)) {
660 		printf(": can't get adapter info\n");
661 		goto fail4;
662 	}
663 
664 	/* Get logical drives info */
665 	if (ips_getdriveinfo(sc, SCSI_NOSLEEP)) {
666 		printf(": can't get ld info\n");
667 		goto fail4;
668 	}
669 	sc->sc_nunits = di->drivecnt;
670 
671 	/* Get configuration */
672 	if (ips_getconf(sc, SCSI_NOSLEEP)) {
673 		printf(": can't get config\n");
674 		goto fail4;
675 	}
676 
677 	/* Read NVRAM page 5 for additional info */
678 	(void)ips_getpg5(sc, SCSI_NOSLEEP);
679 
680 	/* Initialize CCB queue */
681 	sc->sc_nccbs = ai->cmdcnt;
682 	if ((sc->sc_ccb = ips_ccb_alloc(sc, sc->sc_nccbs)) == NULL) {
683 		printf(": can't alloc ccb queue\n");
684 		goto fail4;
685 	}
686 	SLIST_INIT(&sc->sc_ccbq_free);
687 	for (i = 0; i < sc->sc_nccbs; i++)
688 		SLIST_INSERT_HEAD(&sc->sc_ccbq_free,
689 		    &sc->sc_ccb[i], c_link);
690 
691 	/* Install interrupt handler */
692 	if (pci_intr_map(pa, &ih)) {
693 		printf(": can't map interrupt\n");
694 		goto fail5;
695 	}
696 	intrstr = pci_intr_string(pa->pa_pc, ih);
697 	if (pci_intr_establish(pa->pa_pc, ih, IPL_BIO, ips_intr, sc,
698 	    sc->sc_dev.dv_xname) == NULL) {
699 		printf(": can't establish interrupt");
700 		if (intrstr != NULL)
701 			printf(" at %s", intrstr);
702 		printf("\n");
703 		goto fail5;
704 	}
705 	printf(": %s\n", intrstr);
706 
707 	/* Display adapter info */
708 	printf("%s: ServeRAID", sc->sc_dev.dv_xname);
709 	type = letoh16(pg5->type);
710 	if (type < sizeof(ips_names) / sizeof(ips_names[0]) && ips_names[type])
711 		printf(" %s", ips_names[type]);
712 	printf(", FW %c%c%c%c%c%c%c", ai->firmware[0], ai->firmware[1],
713 	    ai->firmware[2], ai->firmware[3], ai->firmware[4], ai->firmware[5],
714 	    ai->firmware[6]);
715 	printf(", BIOS %c%c%c%c%c%c%c", ai->bios[0], ai->bios[1], ai->bios[2],
716 	    ai->bios[3], ai->bios[4], ai->bios[5], ai->bios[6]);
717 	printf(", %d cmds, %d LD%s", sc->sc_nccbs, sc->sc_nunits,
718 	    (sc->sc_nunits == 1 ? "" : "s"));
719 	printf("\n");
720 
721 	/* Attach SCSI bus */
722 	if (sc->sc_nunits > 0)
723 		sc->sc_scsi_link.openings = sc->sc_nccbs / sc->sc_nunits;
724 	sc->sc_scsi_link.adapter_target = sc->sc_nunits;
725 	sc->sc_scsi_link.adapter_buswidth = sc->sc_nunits;
726 	sc->sc_scsi_link.adapter = &ips_switch;
727 	sc->sc_scsi_link.adapter_softc = sc;
728 	sc->sc_scsi_link.pool = &sc->sc_iopool;
729 
730 	bzero(&saa, sizeof(saa));
731 	saa.saa_sc_link = &sc->sc_scsi_link;
732 	sc->sc_scsibus = (struct scsibus_softc *)config_found(self, &saa,
733 	    scsiprint);
734 
735 	/* For each channel attach SCSI pass-through bus */
736 	bzero(&saa, sizeof(saa));
737 	for (i = 0; i < IPS_MAXCHANS; i++) {
738 		struct ips_pt *pt;
739 		struct scsi_link *link;
740 		int target, lastarget;
741 
742 		pt = &sc->sc_pt[i];
743 		pt->pt_sc = sc;
744 		pt->pt_chan = i;
745 		pt->pt_proctgt = -1;
746 
747 		/* Check if channel has any devices besides disks */
748 		for (target = 0, lastarget = -1; target < IPS_MAXTARGETS;
749 		    target++) {
750 			struct ips_dev *idev;
751 			int type;
752 
753 			idev = &sc->sc_info->conf.dev[i][target];
754 			type = idev->params & SID_TYPE;
755 			if (idev->state && type != T_DIRECT) {
756 				lastarget = target;
757 				if (type == T_PROCESSOR ||
758 				    type == T_ENCLOSURE)
759 					/* remember enclosure address */
760 					pt->pt_proctgt = target;
761 			}
762 		}
763 		if (lastarget == -1)
764 			continue;
765 
766 		link = &pt->pt_link;
767 		link->openings = 1;
768 		link->adapter_target = IPS_MAXTARGETS;
769 		link->adapter_buswidth = lastarget + 1;
770 		link->adapter = &ips_pt_switch;
771 		link->adapter_softc = pt;
772 		link->pool = &sc->sc_iopool;
773 
774 		saa.saa_sc_link = link;
775 		config_found(self, &saa, scsiprint);
776 	}
777 
778 	/* Enable interrupts */
779 	ips_intren(sc);
780 
781 #if NBIO > 0
782 	/* Install ioctl handler */
783 	if (bio_register(&sc->sc_dev, ips_ioctl))
784 		printf("%s: no ioctl support\n", sc->sc_dev.dv_xname);
785 #endif
786 
787 #ifndef SMALL_KERNEL
788 	/* Add sensors */
789 	if ((sc->sc_sensors = mallocarray(sc->sc_nunits, sizeof(struct ksensor),
790 	    M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) {
791 		printf(": can't alloc sensors\n");
792 		return;
793 	}
794 	strlcpy(sc->sc_sensordev.xname, sc->sc_dev.dv_xname,
795 	    sizeof(sc->sc_sensordev.xname));
796 	for (i = 0; i < sc->sc_nunits; i++) {
797 		struct device *dev;
798 
799 		sc->sc_sensors[i].type = SENSOR_DRIVE;
800 		sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
801 		dev = scsi_get_link(sc->sc_scsibus, i, 0)->device_softc;
802 		strlcpy(sc->sc_sensors[i].desc, dev->dv_xname,
803 		    sizeof(sc->sc_sensors[i].desc));
804 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
805 	}
806 	if (sensor_task_register(sc, ips_sensors, 10) == NULL) {
807 		printf(": no sensors support\n");
808 		free(sc->sc_sensors, M_DEVBUF,
809 		    sc->sc_nunits * sizeof(struct ksensor));
810 		return;
811 	}
812 	sensordev_install(&sc->sc_sensordev);
813 #endif	/* !SMALL_KERNEL */
814 
815 	return;
816 fail5:
817 	ips_ccb_free(sc, sc->sc_ccb, sc->sc_nccbs);
818 fail4:
819 	if (sc->sc_chip->ic_id == IPS_CHIP_COPPERHEAD)
820 		ips_dmamem_free(&sc->sc_sqm);
821 fail3:
822 	ips_dmamem_free(&sc->sc_infom);
823 fail2:
824 	ips_dmamem_free(&sc->sc_cmdbm);
825 fail1:
826 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, iosize);
827 }
828 
829 void
830 ips_scsi_cmd(struct scsi_xfer *xs)
831 {
832 	struct scsi_link *link = xs->sc_link;
833 	struct ips_softc *sc = link->adapter_softc;
834 	struct ips_driveinfo *di = &sc->sc_info->drive;
835 	struct ips_drive *drive;
836 	struct scsi_inquiry_data inq;
837 	struct scsi_read_cap_data rcd;
838 	struct scsi_sense_data sd;
839 	struct scsi_rw *rw;
840 	struct scsi_rw_big *rwb;
841 	struct ips_ccb *ccb = xs->io;
842 	struct ips_cmd *cmd;
843 	int target = link->target;
844 	u_int32_t blkno, blkcnt;
845 	int code;
846 
847 	DPRINTF(IPS_D_XFER, ("%s: ips_scsi_cmd: xs %p, target %d, "
848 	    "opcode 0x%02x, flags 0x%x\n", sc->sc_dev.dv_xname, xs, target,
849 	    xs->cmd->opcode, xs->flags));
850 
851 	if (target >= sc->sc_nunits || link->lun != 0) {
852 		DPRINTF(IPS_D_INFO, ("%s: ips_scsi_cmd: invalid params "
853 		    "target %d, lun %d\n", sc->sc_dev.dv_xname,
854 		    target, link->lun));
855 		xs->error = XS_DRIVER_STUFFUP;
856 		scsi_done(xs);
857 		return;
858 	}
859 
860 	drive = &di->drive[target];
861 	xs->error = XS_NOERROR;
862 
863 	/* Fake SCSI commands */
864 	switch (xs->cmd->opcode) {
865 	case READ_BIG:
866 	case READ_COMMAND:
867 	case WRITE_BIG:
868 	case WRITE_COMMAND:
869 		if (xs->cmdlen == sizeof(struct scsi_rw)) {
870 			rw = (void *)xs->cmd;
871 			blkno = _3btol(rw->addr) &
872 			    (SRW_TOPADDR << 16 | 0xffff);
873 			blkcnt = rw->length ? rw->length : 0x100;
874 		} else {
875 			rwb = (void *)xs->cmd;
876 			blkno = _4btol(rwb->addr);
877 			blkcnt = _2btol(rwb->length);
878 		}
879 
880 		if (blkno >= letoh32(drive->seccnt) || blkno + blkcnt >
881 		    letoh32(drive->seccnt)) {
882 			DPRINTF(IPS_D_ERR, ("%s: ips_scsi_cmd: invalid params "
883 			    "blkno %u, blkcnt %u\n", sc->sc_dev.dv_xname,
884 			    blkno, blkcnt));
885 			xs->error = XS_DRIVER_STUFFUP;
886 			break;
887 		}
888 
889 		if (xs->flags & SCSI_DATA_IN)
890 			code = IPS_CMD_READ;
891 		else
892 			code = IPS_CMD_WRITE;
893 
894 		ccb = xs->io;
895 
896 		cmd = ccb->c_cmdbva;
897 		cmd->code = code;
898 		cmd->drive = target;
899 		cmd->lba = htole32(blkno);
900 		cmd->seccnt = htole16(blkcnt);
901 
902 		if (ips_load_xs(sc, ccb, xs)) {
903 			DPRINTF(IPS_D_ERR, ("%s: ips_scsi_cmd: ips_load_xs "
904 			    "failed\n", sc->sc_dev.dv_xname));
905 			xs->error = XS_DRIVER_STUFFUP;
906 			scsi_done(xs);
907 			return;
908 		}
909 
910 		if (cmd->sgcnt > 0)
911 			cmd->code |= IPS_CMD_SG;
912 
913 		ccb->c_done = ips_done_xs;
914 		ips_start_xs(sc, ccb, xs);
915 		return;
916 	case INQUIRY:
917 		bzero(&inq, sizeof(inq));
918 		inq.device = T_DIRECT;
919 		inq.version = 2;
920 		inq.response_format = 2;
921 		inq.additional_length = 32;
922 		inq.flags |= SID_CmdQue;
923 		strlcpy(inq.vendor, "IBM", sizeof(inq.vendor));
924 		snprintf(inq.product, sizeof(inq.product),
925 		    "LD%d RAID%d", target, drive->raid);
926 		strlcpy(inq.revision, "1.0", sizeof(inq.revision));
927 		memcpy(xs->data, &inq, MIN(xs->datalen, sizeof(inq)));
928 		break;
929 	case READ_CAPACITY:
930 		bzero(&rcd, sizeof(rcd));
931 		_lto4b(letoh32(drive->seccnt) - 1, rcd.addr);
932 		_lto4b(IPS_SECSZ, rcd.length);
933 		memcpy(xs->data, &rcd, MIN(xs->datalen, sizeof(rcd)));
934 		break;
935 	case REQUEST_SENSE:
936 		bzero(&sd, sizeof(sd));
937 		sd.error_code = SSD_ERRCODE_CURRENT;
938 		sd.flags = SKEY_NO_SENSE;
939 		memcpy(xs->data, &sd, MIN(xs->datalen, sizeof(sd)));
940 		break;
941 	case SYNCHRONIZE_CACHE:
942 		cmd = ccb->c_cmdbva;
943 		cmd->code = IPS_CMD_FLUSH;
944 
945 		ccb->c_done = ips_done_xs;
946 		ips_start_xs(sc, ccb, xs);
947 		return;
948 	case PREVENT_ALLOW:
949 	case START_STOP:
950 	case TEST_UNIT_READY:
951 		break;
952 	default:
953 		DPRINTF(IPS_D_INFO, ("%s: unsupported scsi command 0x%02x\n",
954 		    sc->sc_dev.dv_xname, xs->cmd->opcode));
955 		xs->error = XS_DRIVER_STUFFUP;
956 	}
957 
958 	scsi_done(xs);
959 }
960 
961 void
962 ips_scsi_pt_cmd(struct scsi_xfer *xs)
963 {
964 	struct scsi_link *link = xs->sc_link;
965 	struct ips_pt *pt = link->adapter_softc;
966 	struct ips_softc *sc = pt->pt_sc;
967 	struct device *dev = link->device_softc;
968 	struct ips_ccb *ccb = xs->io;
969 	struct ips_cmdb *cmdb;
970 	struct ips_cmd *cmd;
971 	struct ips_dcdb *dcdb;
972 	int chan = pt->pt_chan, target = link->target;
973 
974 	DPRINTF(IPS_D_XFER, ("%s: ips_scsi_pt_cmd: xs %p, chan %d, target %d, "
975 	    "opcode 0x%02x, flags 0x%x\n", sc->sc_dev.dv_xname, xs, chan,
976 	    target, xs->cmd->opcode, xs->flags));
977 
978 	if (pt->pt_procdev[0] == '\0' && target == pt->pt_proctgt && dev)
979 		strlcpy(pt->pt_procdev, dev->dv_xname, sizeof(pt->pt_procdev));
980 
981 	if (xs->cmdlen > IPS_MAXCDB) {
982 		DPRINTF(IPS_D_ERR, ("%s: cmdlen %d too big\n",
983 		    sc->sc_dev.dv_xname, xs->cmdlen));
984 
985 		bzero(&xs->sense, sizeof(xs->sense));
986 		xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT;
987 		xs->sense.flags = SKEY_ILLEGAL_REQUEST;
988 		xs->sense.add_sense_code = 0x20; /* illcmd, 0x24 illfield */
989 		xs->error = XS_SENSE;
990 		scsi_done(xs);
991 		return;
992 	}
993 
994 	xs->error = XS_NOERROR;
995 
996 	cmdb = ccb->c_cmdbva;
997 	cmd = &cmdb->cmd;
998 	dcdb = &cmdb->dcdb;
999 
1000 	cmd->code = IPS_CMD_DCDB;
1001 
1002 	dcdb->device = (chan << 4) | target;
1003 	if (xs->flags & SCSI_DATA_IN)
1004 		dcdb->attr |= IPS_DCDB_DATAIN;
1005 	if (xs->flags & SCSI_DATA_OUT)
1006 		dcdb->attr |= IPS_DCDB_DATAOUT;
1007 
1008 	/*
1009 	 * Adjust timeout value to what controller supports. Make sure our
1010 	 * timeout will be fired after controller gives up.
1011 	 */
1012 	if (xs->timeout <= 10000) {
1013 		dcdb->attr |= IPS_DCDB_TIMO10;
1014 		xs->timeout = 11000;
1015 	} else if (xs->timeout <= 60000) {
1016 		dcdb->attr |= IPS_DCDB_TIMO60;
1017 		xs->timeout = 61000;
1018 	} else {
1019 		dcdb->attr |= IPS_DCDB_TIMO20M;
1020 		xs->timeout = 20 * 60000 + 1000;
1021 	}
1022 
1023 	dcdb->attr |= IPS_DCDB_DISCON;
1024 	dcdb->datalen = htole16(xs->datalen);
1025 	dcdb->cdblen = xs->cmdlen;
1026 	dcdb->senselen = MIN(sizeof(xs->sense), sizeof(dcdb->sense));
1027 	memcpy(dcdb->cdb, xs->cmd, xs->cmdlen);
1028 
1029 	if (ips_load_xs(sc, ccb, xs)) {
1030 		DPRINTF(IPS_D_ERR, ("%s: ips_scsi_pt_cmd: ips_load_xs "
1031 		    "failed\n", sc->sc_dev.dv_xname));
1032 		xs->error = XS_DRIVER_STUFFUP;
1033 		scsi_done(xs);
1034 		return;
1035 	}
1036 	if (cmd->sgcnt > 0)
1037 		cmd->code |= IPS_CMD_SG;
1038 	dcdb->sgaddr = cmd->sgaddr;
1039 	dcdb->sgcnt = cmd->sgcnt;
1040 	cmd->sgaddr = htole32(ccb->c_cmdbpa + offsetof(struct ips_cmdb, dcdb));
1041 	cmd->sgcnt = 0;
1042 
1043 	ccb->c_done = ips_done_pt;
1044 	ips_start_xs(sc, ccb, xs);
1045 }
1046 
1047 int
1048 ips_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
1049 {
1050 #if NBIO > 0
1051 	return (ips_ioctl(link->adapter_softc, cmd, addr));
1052 #else
1053 	return (ENOTTY);
1054 #endif
1055 }
1056 
1057 #if NBIO > 0
1058 int
1059 ips_ioctl(struct device *dev, u_long cmd, caddr_t addr)
1060 {
1061 	struct ips_softc *sc = (struct ips_softc *)dev;
1062 
1063 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl: cmd %lu\n",
1064 	    sc->sc_dev.dv_xname, cmd));
1065 
1066 	switch (cmd) {
1067 	case BIOCINQ:
1068 		return (ips_ioctl_inq(sc, (struct bioc_inq *)addr));
1069 	case BIOCVOL:
1070 		return (ips_ioctl_vol(sc, (struct bioc_vol *)addr));
1071 	case BIOCDISK:
1072 		return (ips_ioctl_disk(sc, (struct bioc_disk *)addr));
1073 	case BIOCSETSTATE:
1074 		return (ips_ioctl_setstate(sc, (struct bioc_setstate *)addr));
1075 	default:
1076 		return (ENOTTY);
1077 	}
1078 }
1079 
1080 int
1081 ips_ioctl_inq(struct ips_softc *sc, struct bioc_inq *bi)
1082 {
1083 	struct ips_conf *conf = &sc->sc_info->conf;
1084 	int i;
1085 
1086 	strlcpy(bi->bi_dev, sc->sc_dev.dv_xname, sizeof(bi->bi_dev));
1087 	bi->bi_novol = sc->sc_nunits;
1088 	for (i = 0, bi->bi_nodisk = 0; i < sc->sc_nunits; i++)
1089 		bi->bi_nodisk += conf->ld[i].chunkcnt;
1090 
1091 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_inq: novol %d, nodisk %d\n",
1092 	    bi->bi_dev, bi->bi_novol, bi->bi_nodisk));
1093 
1094 	return (0);
1095 }
1096 
1097 int
1098 ips_ioctl_vol(struct ips_softc *sc, struct bioc_vol *bv)
1099 {
1100 	struct ips_driveinfo *di = &sc->sc_info->drive;
1101 	struct ips_conf *conf = &sc->sc_info->conf;
1102 	struct ips_rblstat *rblstat = &sc->sc_info->rblstat;
1103 	struct ips_ld *ld;
1104 	int vid = bv->bv_volid;
1105 	struct device *dv;
1106 	int error, rebuild = 0;
1107 	u_int32_t total = 0, done = 0;
1108 
1109 	if (vid >= sc->sc_nunits)
1110 		return (EINVAL);
1111 	if ((error = ips_getconf(sc, 0)))
1112 		return (error);
1113 	ld = &conf->ld[vid];
1114 
1115 	switch (ld->state) {
1116 	case IPS_DS_ONLINE:
1117 		bv->bv_status = BIOC_SVONLINE;
1118 		break;
1119 	case IPS_DS_DEGRADED:
1120 		bv->bv_status = BIOC_SVDEGRADED;
1121 		rebuild++;
1122 		break;
1123 	case IPS_DS_OFFLINE:
1124 		bv->bv_status = BIOC_SVOFFLINE;
1125 		break;
1126 	default:
1127 		bv->bv_status = BIOC_SVINVALID;
1128 	}
1129 
1130 	if (rebuild && ips_getrblstat(sc, 0) == 0) {
1131 		total = letoh32(rblstat->ld[vid].total);
1132 		done = total - letoh32(rblstat->ld[vid].remain);
1133 		if (total && total > done) {
1134 			bv->bv_status = BIOC_SVREBUILD;
1135 			bv->bv_percent = 100 * done / total;
1136 		}
1137 	}
1138 
1139 	bv->bv_size = (uint64_t)letoh32(ld->size) * IPS_SECSZ;
1140 	bv->bv_level = di->drive[vid].raid;
1141 	bv->bv_nodisk = ld->chunkcnt;
1142 
1143 	/* Associate all unused and spare drives with first volume */
1144 	if (vid == 0) {
1145 		struct ips_dev *dev;
1146 		int chan, target;
1147 
1148 		for (chan = 0; chan < IPS_MAXCHANS; chan++)
1149 			for (target = 0; target < IPS_MAXTARGETS; target++) {
1150 				dev = &conf->dev[chan][target];
1151 				if (dev->state && !(dev->state &
1152 				    IPS_DVS_MEMBER) &&
1153 				    (dev->params & SID_TYPE) == T_DIRECT)
1154 					bv->bv_nodisk++;
1155 			}
1156 	}
1157 
1158 	dv = scsi_get_link(sc->sc_scsibus, vid, 0)->device_softc;
1159 	strlcpy(bv->bv_dev, dv->dv_xname, sizeof(bv->bv_dev));
1160 	strlcpy(bv->bv_vendor, "IBM", sizeof(bv->bv_vendor));
1161 
1162 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_vol: vid %d, state 0x%02x, "
1163 	    "total %u, done %u, size %llu, level %d, nodisk %d, dev %s\n",
1164 	    sc->sc_dev.dv_xname, vid, ld->state, total, done, bv->bv_size,
1165 	    bv->bv_level, bv->bv_nodisk, bv->bv_dev));
1166 
1167 	return (0);
1168 }
1169 
1170 int
1171 ips_ioctl_disk(struct ips_softc *sc, struct bioc_disk *bd)
1172 {
1173 	struct ips_conf *conf = &sc->sc_info->conf;
1174 	struct ips_ld *ld;
1175 	struct ips_chunk *chunk;
1176 	struct ips_dev *dev;
1177 	int vid = bd->bd_volid, did = bd->bd_diskid;
1178 	int chan, target, error, i;
1179 
1180 	if (vid >= sc->sc_nunits)
1181 		return (EINVAL);
1182 	if ((error = ips_getconf(sc, 0)))
1183 		return (error);
1184 	ld = &conf->ld[vid];
1185 
1186 	if (did >= ld->chunkcnt) {
1187 		/* Probably unused or spare drives */
1188 		if (vid != 0)
1189 			return (EINVAL);
1190 
1191 		i = ld->chunkcnt;
1192 		for (chan = 0; chan < IPS_MAXCHANS; chan++)
1193 			for (target = 0; target < IPS_MAXTARGETS; target++) {
1194 				dev = &conf->dev[chan][target];
1195 				if (dev->state && !(dev->state &
1196 				    IPS_DVS_MEMBER) &&
1197 				    (dev->params & SID_TYPE) == T_DIRECT)
1198 					if (i++ == did)
1199 						goto out;
1200 			}
1201 	} else {
1202 		chunk = &ld->chunk[did];
1203 		chan = chunk->channel;
1204 		target = chunk->target;
1205 	}
1206 
1207 out:
1208 	if (chan >= IPS_MAXCHANS || target >= IPS_MAXTARGETS)
1209 		return (EINVAL);
1210 	dev = &conf->dev[chan][target];
1211 
1212 	bd->bd_channel = chan;
1213 	bd->bd_target = target;
1214 	bd->bd_lun = 0;
1215 	bd->bd_size = (uint64_t)letoh32(dev->seccnt) * IPS_SECSZ;
1216 
1217 	bzero(bd->bd_vendor, sizeof(bd->bd_vendor));
1218 	memcpy(bd->bd_vendor, dev->devid, MIN(sizeof(bd->bd_vendor),
1219 	    sizeof(dev->devid)));
1220 	strlcpy(bd->bd_procdev, sc->sc_pt[chan].pt_procdev,
1221 	    sizeof(bd->bd_procdev));
1222 
1223 	if (dev->state & IPS_DVS_READY) {
1224 		bd->bd_status = BIOC_SDUNUSED;
1225 		if (dev->state & IPS_DVS_MEMBER)
1226 			bd->bd_status = BIOC_SDONLINE;
1227 		if (dev->state & IPS_DVS_SPARE)
1228 			bd->bd_status = BIOC_SDHOTSPARE;
1229 		if (dev->state & IPS_DVS_REBUILD)
1230 			bd->bd_status = BIOC_SDREBUILD;
1231 	} else {
1232 		bd->bd_status = BIOC_SDOFFLINE;
1233 	}
1234 
1235 	DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_disk: vid %d, did %d, channel %d, "
1236 	    "target %d, size %llu, state 0x%02x\n", sc->sc_dev.dv_xname,
1237 	    vid, did, bd->bd_channel, bd->bd_target, bd->bd_size, dev->state));
1238 
1239 	return (0);
1240 }
1241 
1242 int
1243 ips_ioctl_setstate(struct ips_softc *sc, struct bioc_setstate *bs)
1244 {
1245 	struct ips_conf *conf = &sc->sc_info->conf;
1246 	struct ips_dev *dev;
1247 	int state, error;
1248 
1249 	if (bs->bs_channel >= IPS_MAXCHANS || bs->bs_target >= IPS_MAXTARGETS)
1250 		return (EINVAL);
1251 	if ((error = ips_getconf(sc, 0)))
1252 		return (error);
1253 	dev = &conf->dev[bs->bs_channel][bs->bs_target];
1254 	state = dev->state;
1255 
1256 	switch (bs->bs_status) {
1257 	case BIOC_SSONLINE:
1258 		state |= IPS_DVS_READY;
1259 		break;
1260 	case BIOC_SSOFFLINE:
1261 		state &= ~IPS_DVS_READY;
1262 		break;
1263 	case BIOC_SSHOTSPARE:
1264 		state |= IPS_DVS_SPARE;
1265 		break;
1266 	case BIOC_SSREBUILD:
1267 		return (ips_rebuild(sc, bs->bs_channel, bs->bs_target,
1268 		    bs->bs_channel, bs->bs_target, 0));
1269 	default:
1270 		return (EINVAL);
1271 	}
1272 
1273 	return (ips_setstate(sc, bs->bs_channel, bs->bs_target, state, 0));
1274 }
1275 #endif	/* NBIO > 0 */
1276 
1277 #ifndef SMALL_KERNEL
1278 void
1279 ips_sensors(void *arg)
1280 {
1281 	struct ips_softc *sc = arg;
1282 	struct ips_conf *conf = &sc->sc_info->conf;
1283 	struct ips_ld *ld;
1284 	int i;
1285 
1286 	/* ips_sensors() runs from work queue thus allowed to sleep */
1287 	if (ips_getconf(sc, 0)) {
1288 		DPRINTF(IPS_D_ERR, ("%s: ips_sensors: ips_getconf failed\n",
1289 		    sc->sc_dev.dv_xname));
1290 
1291 		for (i = 0; i < sc->sc_nunits; i++) {
1292 			sc->sc_sensors[i].value = 0;
1293 			sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
1294 		}
1295 		return;
1296 	}
1297 
1298 	DPRINTF(IPS_D_INFO, ("%s: ips_sensors:", sc->sc_dev.dv_xname));
1299 	for (i = 0; i < sc->sc_nunits; i++) {
1300 		ld = &conf->ld[i];
1301 		DPRINTF(IPS_D_INFO, (" ld%d.state 0x%02x", i, ld->state));
1302 		switch (ld->state) {
1303 		case IPS_DS_ONLINE:
1304 			sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE;
1305 			sc->sc_sensors[i].status = SENSOR_S_OK;
1306 			break;
1307 		case IPS_DS_DEGRADED:
1308 			sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL;
1309 			sc->sc_sensors[i].status = SENSOR_S_WARN;
1310 			break;
1311 		case IPS_DS_OFFLINE:
1312 			sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL;
1313 			sc->sc_sensors[i].status = SENSOR_S_CRIT;
1314 			break;
1315 		default:
1316 			sc->sc_sensors[i].value = 0;
1317 			sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
1318 		}
1319 	}
1320 	DPRINTF(IPS_D_INFO, ("\n"));
1321 }
1322 #endif	/* !SMALL_KERNEL */
1323 
1324 int
1325 ips_load_xs(struct ips_softc *sc, struct ips_ccb *ccb, struct scsi_xfer *xs)
1326 {
1327 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1328 	struct ips_cmd *cmd = &cmdb->cmd;
1329 	struct ips_sg *sg = cmdb->sg;
1330 	int nsegs, i;
1331 
1332 	if (xs->datalen == 0)
1333 		return (0);
1334 
1335 	/* Map data buffer into DMA segments */
1336 	if (bus_dmamap_load(sc->sc_dmat, ccb->c_dmam, xs->data, xs->datalen,
1337 	    NULL, (xs->flags & SCSI_NOSLEEP ? BUS_DMA_NOWAIT : 0)))
1338 		return (1);
1339 	bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,ccb->c_dmam->dm_mapsize,
1340 	    xs->flags & SCSI_DATA_IN ? BUS_DMASYNC_PREREAD :
1341 	    BUS_DMASYNC_PREWRITE);
1342 
1343 	if ((nsegs = ccb->c_dmam->dm_nsegs) > IPS_MAXSGS)
1344 		return (1);
1345 
1346 	if (nsegs > 1) {
1347 		cmd->sgcnt = nsegs;
1348 		cmd->sgaddr = htole32(ccb->c_cmdbpa + offsetof(struct ips_cmdb,
1349 		    sg));
1350 
1351 		/* Fill in scatter-gather array */
1352 		for (i = 0; i < nsegs; i++) {
1353 			sg[i].addr = htole32(ccb->c_dmam->dm_segs[i].ds_addr);
1354 			sg[i].size = htole32(ccb->c_dmam->dm_segs[i].ds_len);
1355 		}
1356 	} else {
1357 		cmd->sgcnt = 0;
1358 		cmd->sgaddr = htole32(ccb->c_dmam->dm_segs[0].ds_addr);
1359 	}
1360 
1361 	return (0);
1362 }
1363 
1364 void
1365 ips_start_xs(struct ips_softc *sc, struct ips_ccb *ccb, struct scsi_xfer *xs)
1366 {
1367 	ccb->c_flags = xs->flags;
1368 	ccb->c_xfer = xs;
1369 	int ispoll = xs->flags & SCSI_POLL;
1370 
1371 	if (!ispoll) {
1372 		timeout_set(&xs->stimeout, ips_timeout, ccb);
1373 		timeout_add_msec(&xs->stimeout, xs->timeout);
1374 	}
1375 
1376 	/*
1377 	 * Return value not used here because ips_cmd() must complete
1378 	 * scsi_xfer on any failure and SCSI layer will handle possible
1379 	 * errors.
1380 	 */
1381 	ips_cmd(sc, ccb);
1382 }
1383 
1384 int
1385 ips_cmd(struct ips_softc *sc, struct ips_ccb *ccb)
1386 {
1387 	struct ips_cmd *cmd = ccb->c_cmdbva;
1388 	int s, error = 0;
1389 
1390 	DPRINTF(IPS_D_XFER, ("%s: ips_cmd: id 0x%02x, flags 0x%x, xs %p, "
1391 	    "code 0x%02x, drive %d, sgcnt %d, lba %d, sgaddr 0x%08x, "
1392 	    "seccnt %d\n", sc->sc_dev.dv_xname, ccb->c_id, ccb->c_flags,
1393 	    ccb->c_xfer, cmd->code, cmd->drive, cmd->sgcnt, letoh32(cmd->lba),
1394 	    letoh32(cmd->sgaddr), letoh16(cmd->seccnt)));
1395 
1396 	cmd->id = ccb->c_id;
1397 
1398 	/* Post command to controller and optionally wait for completion */
1399 	s = splbio();
1400 	ips_exec(sc, ccb);
1401 	ccb->c_state = IPS_CCB_QUEUED;
1402 	if (ccb->c_flags & SCSI_POLL)
1403 		error = ips_poll(sc, ccb);
1404 	splx(s);
1405 
1406 	return (error);
1407 }
1408 
1409 int
1410 ips_poll(struct ips_softc *sc, struct ips_ccb *ccb)
1411 {
1412 	int error, msecs, usecs;
1413 
1414 	splassert(IPL_BIO);
1415 
1416 	if (ccb->c_flags & SCSI_NOSLEEP) {
1417 		/* busy-wait */
1418 		DPRINTF(IPS_D_XFER, ("%s: ips_poll: busy-wait\n",
1419 		    sc->sc_dev.dv_xname));
1420 
1421 		for (usecs = 1000000; usecs > 0; usecs -= 100) {
1422 			delay(100);
1423 			ips_intr(sc);
1424 			if (ccb->c_state == IPS_CCB_DONE)
1425 				break;
1426 		}
1427 	} else {
1428 		/* sleep */
1429 		msecs = ccb->c_xfer ? ccb->c_xfer->timeout : IPS_TIMEOUT;
1430 
1431 		DPRINTF(IPS_D_XFER, ("%s: ips_poll: sleep %d ms\n",
1432 		    sc->sc_dev.dv_xname, msecs));
1433 		tsleep_nsec(ccb, PRIBIO + 1, "ipscmd", MSEC_TO_NSEC(msecs));
1434 	}
1435 	DPRINTF(IPS_D_XFER, ("%s: ips_poll: state %d\n", sc->sc_dev.dv_xname,
1436 	    ccb->c_state));
1437 
1438 	if (ccb->c_state != IPS_CCB_DONE)
1439 		/*
1440 		 * Command never completed. Fake hardware status byte
1441 		 * to indicate timeout.
1442 		 */
1443 		ccb->c_stat = IPS_STAT_TIMO;
1444 
1445 	ips_done(sc, ccb);
1446 	error = ccb->c_error;
1447 
1448 	return (error);
1449 }
1450 
1451 void
1452 ips_done(struct ips_softc *sc, struct ips_ccb *ccb)
1453 {
1454 	splassert(IPL_BIO);
1455 
1456 	DPRINTF(IPS_D_XFER, ("%s: ips_done: id 0x%02x, flags 0x%x, xs %p\n",
1457 	    sc->sc_dev.dv_xname, ccb->c_id, ccb->c_flags, ccb->c_xfer));
1458 
1459 	ccb->c_error = ips_error(sc, ccb);
1460 	ccb->c_done(sc, ccb);
1461 }
1462 
1463 void
1464 ips_done_xs(struct ips_softc *sc, struct ips_ccb *ccb)
1465 {
1466 	struct scsi_xfer *xs = ccb->c_xfer;
1467 
1468 	if (!(xs->flags & SCSI_POLL))
1469 		timeout_del(&xs->stimeout);
1470 
1471 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1472 		bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,
1473 		    ccb->c_dmam->dm_mapsize, xs->flags & SCSI_DATA_IN ?
1474 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1475 		bus_dmamap_unload(sc->sc_dmat, ccb->c_dmam);
1476 	}
1477 
1478 	xs->resid = 0;
1479 	xs->error = ips_error_xs(sc, ccb);
1480 	scsi_done(xs);
1481 }
1482 
1483 void
1484 ips_done_pt(struct ips_softc *sc, struct ips_ccb *ccb)
1485 {
1486 	struct scsi_xfer *xs = ccb->c_xfer;
1487 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1488 	struct ips_dcdb *dcdb = &cmdb->dcdb;
1489 	int done = letoh16(dcdb->datalen);
1490 
1491 	if (!(xs->flags & SCSI_POLL))
1492 		timeout_del(&xs->stimeout);
1493 
1494 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1495 		bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,
1496 		    ccb->c_dmam->dm_mapsize, xs->flags & SCSI_DATA_IN ?
1497 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1498 		bus_dmamap_unload(sc->sc_dmat, ccb->c_dmam);
1499 	}
1500 
1501 	if (done && done < xs->datalen)
1502 		xs->resid = xs->datalen - done;
1503 	else
1504 		xs->resid = 0;
1505 	xs->error = ips_error_xs(sc, ccb);
1506 	xs->status = dcdb->status;
1507 
1508 	if (xs->error == XS_SENSE)
1509 		memcpy(&xs->sense, dcdb->sense, MIN(sizeof(xs->sense),
1510 		    sizeof(dcdb->sense)));
1511 
1512 	if (xs->cmd->opcode == INQUIRY && xs->error == XS_NOERROR) {
1513 		int type = ((struct scsi_inquiry_data *)xs->data)->device &
1514 		    SID_TYPE;
1515 
1516 		if (type == T_DIRECT)
1517 			/* mask physical drives */
1518 			xs->error = XS_DRIVER_STUFFUP;
1519 	}
1520 
1521 	scsi_done(xs);
1522 }
1523 
1524 void
1525 ips_done_mgmt(struct ips_softc *sc, struct ips_ccb *ccb)
1526 {
1527 	if (ccb->c_flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
1528 		bus_dmamap_sync(sc->sc_dmat, sc->sc_infom.dm_map, 0,
1529 		    sc->sc_infom.dm_map->dm_mapsize,
1530 		    ccb->c_flags & SCSI_DATA_IN ? BUS_DMASYNC_POSTREAD :
1531 		    BUS_DMASYNC_POSTWRITE);
1532 	scsi_io_put(&sc->sc_iopool, ccb);
1533 }
1534 
1535 int
1536 ips_error(struct ips_softc *sc, struct ips_ccb *ccb)
1537 {
1538 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1539 	struct ips_cmd *cmd = &cmdb->cmd;
1540 	struct ips_dcdb *dcdb = &cmdb->dcdb;
1541 	struct scsi_xfer *xs = ccb->c_xfer;
1542 	u_int8_t gsc = IPS_STAT_GSC(ccb->c_stat);
1543 
1544 	if (gsc == IPS_STAT_OK)
1545 		return (0);
1546 
1547 	DPRINTF(IPS_D_ERR, ("%s: ips_error: stat 0x%02x, estat 0x%02x, "
1548 	    "cmd code 0x%02x, drive %d, sgcnt %d, lba %u, seccnt %d",
1549 	    sc->sc_dev.dv_xname, ccb->c_stat, ccb->c_estat, cmd->code,
1550 	    cmd->drive, cmd->sgcnt, letoh32(cmd->lba), letoh16(cmd->seccnt)));
1551 	if (cmd->code == IPS_CMD_DCDB || cmd->code == IPS_CMD_DCDB_SG) {
1552 		int i;
1553 
1554 		DPRINTF(IPS_D_ERR, (", dcdb device 0x%02x, attr 0x%02x, "
1555 		    "datalen %d, sgcnt %d, status 0x%02x",
1556 		    dcdb->device, dcdb->attr, letoh16(dcdb->datalen),
1557 		    dcdb->sgcnt, dcdb->status));
1558 
1559 		DPRINTF(IPS_D_ERR, (", cdb"));
1560 		for (i = 0; i < dcdb->cdblen; i++)
1561 			DPRINTF(IPS_D_ERR, (" %x", dcdb->cdb[i]));
1562 		if (ccb->c_estat == IPS_ESTAT_CKCOND) {
1563 			DPRINTF(IPS_D_ERR, (", sense"));
1564 			for (i = 0; i < dcdb->senselen; i++)
1565 				DPRINTF(IPS_D_ERR, (" %x", dcdb->sense[i]));
1566 		}
1567 	}
1568 	DPRINTF(IPS_D_ERR, ("\n"));
1569 
1570 	switch (gsc) {
1571 	case IPS_STAT_RECOV:
1572 		return (0);
1573 	case IPS_STAT_INVOP:
1574 	case IPS_STAT_INVCMD:
1575 	case IPS_STAT_INVPARM:
1576 		return (EINVAL);
1577 	case IPS_STAT_BUSY:
1578 		return (EBUSY);
1579 	case IPS_STAT_TIMO:
1580 		return (ETIMEDOUT);
1581 	case IPS_STAT_PDRVERR:
1582 		switch (ccb->c_estat) {
1583 		case IPS_ESTAT_SELTIMO:
1584 			return (ENODEV);
1585 		case IPS_ESTAT_OURUN:
1586 			if (xs && letoh16(dcdb->datalen) < xs->datalen)
1587 				/* underrun */
1588 				return (0);
1589 			break;
1590 		case IPS_ESTAT_RECOV:
1591 			return (0);
1592 		}
1593 		break;
1594 	}
1595 
1596 	return (EIO);
1597 }
1598 
1599 int
1600 ips_error_xs(struct ips_softc *sc, struct ips_ccb *ccb)
1601 {
1602 	struct ips_cmdb *cmdb = ccb->c_cmdbva;
1603 	struct ips_dcdb *dcdb = &cmdb->dcdb;
1604 	struct scsi_xfer *xs = ccb->c_xfer;
1605 	u_int8_t gsc = IPS_STAT_GSC(ccb->c_stat);
1606 
1607 	/* Map hardware error codes to SCSI ones */
1608 	switch (gsc) {
1609 	case IPS_STAT_OK:
1610 	case IPS_STAT_RECOV:
1611 		return (XS_NOERROR);
1612 	case IPS_STAT_BUSY:
1613 		return (XS_BUSY);
1614 	case IPS_STAT_TIMO:
1615 		return (XS_TIMEOUT);
1616 	case IPS_STAT_PDRVERR:
1617 		switch (ccb->c_estat) {
1618 		case IPS_ESTAT_SELTIMO:
1619 			return (XS_SELTIMEOUT);
1620 		case IPS_ESTAT_OURUN:
1621 			if (xs && letoh16(dcdb->datalen) < xs->datalen)
1622 				/* underrun */
1623 				return (XS_NOERROR);
1624 			break;
1625 		case IPS_ESTAT_HOSTRST:
1626 		case IPS_ESTAT_DEVRST:
1627 			return (XS_RESET);
1628 		case IPS_ESTAT_RECOV:
1629 			return (XS_NOERROR);
1630 		case IPS_ESTAT_CKCOND:
1631 			return (XS_SENSE);
1632 		}
1633 		break;
1634 	}
1635 
1636 	return (XS_DRIVER_STUFFUP);
1637 }
1638 
1639 int
1640 ips_intr(void *arg)
1641 {
1642 	struct ips_softc *sc = arg;
1643 	struct ips_ccb *ccb;
1644 	u_int32_t status;
1645 	int id;
1646 
1647 	DPRINTF(IPS_D_XFER, ("%s: ips_intr", sc->sc_dev.dv_xname));
1648 	if (!ips_isintr(sc)) {
1649 		DPRINTF(IPS_D_XFER, (": not ours\n"));
1650 		return (0);
1651 	}
1652 	DPRINTF(IPS_D_XFER, ("\n"));
1653 
1654 	/* Process completed commands */
1655 	while ((status = ips_status(sc)) != 0xffffffff) {
1656 		DPRINTF(IPS_D_XFER, ("%s: ips_intr: status 0x%08x\n",
1657 		    sc->sc_dev.dv_xname, status));
1658 
1659 		id = IPS_STAT_ID(status);
1660 		if (id >= sc->sc_nccbs) {
1661 			DPRINTF(IPS_D_ERR, ("%s: ips_intr: invalid id %d\n",
1662 			    sc->sc_dev.dv_xname, id));
1663 			continue;
1664 		}
1665 
1666 		ccb = &sc->sc_ccb[id];
1667 		if (ccb->c_state != IPS_CCB_QUEUED) {
1668 			DPRINTF(IPS_D_ERR, ("%s: ips_intr: cmd 0x%02x not "
1669 			    "queued, state %d, status 0x%08x\n",
1670 			    sc->sc_dev.dv_xname, ccb->c_id, ccb->c_state,
1671 			    status));
1672 			continue;
1673 		}
1674 
1675 		ccb->c_state = IPS_CCB_DONE;
1676 		ccb->c_stat = IPS_STAT_BASIC(status);
1677 		ccb->c_estat = IPS_STAT_EXT(status);
1678 
1679 		if (ccb->c_flags & SCSI_POLL) {
1680 			wakeup(ccb);
1681 		} else {
1682 			ips_done(sc, ccb);
1683 		}
1684 	}
1685 
1686 	return (1);
1687 }
1688 
1689 void
1690 ips_timeout(void *arg)
1691 {
1692 	struct ips_ccb *ccb = arg;
1693 	struct ips_softc *sc = ccb->c_sc;
1694 	struct scsi_xfer *xs = ccb->c_xfer;
1695 	int s;
1696 
1697 	s = splbio();
1698 	if (xs)
1699 		sc_print_addr(xs->sc_link);
1700 	else
1701 		printf("%s: ", sc->sc_dev.dv_xname);
1702 	printf("timeout\n");
1703 
1704 	/*
1705 	 * Command never completed. Fake hardware status byte
1706 	 * to indicate timeout.
1707 	 * XXX: need to remove command from controller.
1708 	 */
1709 	ccb->c_stat = IPS_STAT_TIMO;
1710 	ips_done(sc, ccb);
1711 	splx(s);
1712 }
1713 
1714 int
1715 ips_getadapterinfo(struct ips_softc *sc, int flags)
1716 {
1717 	struct ips_ccb *ccb;
1718 	struct ips_cmd *cmd;
1719 
1720 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1721 	if (ccb == NULL)
1722 		return (1);
1723 
1724 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1725 	ccb->c_done = ips_done_mgmt;
1726 
1727 	cmd = ccb->c_cmdbva;
1728 	cmd->code = IPS_CMD_GETADAPTERINFO;
1729 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1730 	    adapter));
1731 
1732 	return (ips_cmd(sc, ccb));
1733 }
1734 
1735 int
1736 ips_getdriveinfo(struct ips_softc *sc, int flags)
1737 {
1738 	struct ips_ccb *ccb;
1739 	struct ips_cmd *cmd;
1740 
1741 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1742 	if (ccb == NULL)
1743 		return (1);
1744 
1745 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1746 	ccb->c_done = ips_done_mgmt;
1747 
1748 	cmd = ccb->c_cmdbva;
1749 	cmd->code = IPS_CMD_GETDRIVEINFO;
1750 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1751 	    drive));
1752 
1753 	return (ips_cmd(sc, ccb));
1754 }
1755 
1756 int
1757 ips_getconf(struct ips_softc *sc, int flags)
1758 {
1759 	struct ips_ccb *ccb;
1760 	struct ips_cmd *cmd;
1761 
1762 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1763 	if (ccb == NULL)
1764 		return (1);
1765 
1766 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1767 	ccb->c_done = ips_done_mgmt;
1768 
1769 	cmd = ccb->c_cmdbva;
1770 	cmd->code = IPS_CMD_READCONF;
1771 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1772 	    conf));
1773 
1774 	return (ips_cmd(sc, ccb));
1775 }
1776 
1777 int
1778 ips_getpg5(struct ips_softc *sc, int flags)
1779 {
1780 	struct ips_ccb *ccb;
1781 	struct ips_cmd *cmd;
1782 
1783 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1784 	if (ccb == NULL)
1785 		return (1);
1786 
1787 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1788 	ccb->c_done = ips_done_mgmt;
1789 
1790 	cmd = ccb->c_cmdbva;
1791 	cmd->code = IPS_CMD_RWNVRAM;
1792 	cmd->drive = 5;
1793 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1794 	    pg5));
1795 
1796 	return (ips_cmd(sc, ccb));
1797 }
1798 
1799 #if NBIO > 0
1800 int
1801 ips_getrblstat(struct ips_softc *sc, int flags)
1802 {
1803 	struct ips_ccb *ccb;
1804 	struct ips_cmd *cmd;
1805 
1806 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1807 	if (ccb == NULL)
1808 		return (1);
1809 
1810 	ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1811 	ccb->c_done = ips_done_mgmt;
1812 
1813 	cmd = ccb->c_cmdbva;
1814 	cmd->code = IPS_CMD_REBUILDSTATUS;
1815 	cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1816 	    rblstat));
1817 
1818 	return (ips_cmd(sc, ccb));
1819 }
1820 
1821 int
1822 ips_setstate(struct ips_softc *sc, int chan, int target, int state, int flags)
1823 {
1824 	struct ips_ccb *ccb;
1825 	struct ips_cmd *cmd;
1826 
1827 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1828 	if (ccb == NULL)
1829 		return (1);
1830 
1831 	ccb->c_flags = SCSI_POLL | flags;
1832 	ccb->c_done = ips_done_mgmt;
1833 
1834 	cmd = ccb->c_cmdbva;
1835 	cmd->code = IPS_CMD_SETSTATE;
1836 	cmd->drive = chan;
1837 	cmd->sgcnt = target;
1838 	cmd->seg4g = state;
1839 
1840 	return (ips_cmd(sc, ccb));
1841 }
1842 
1843 int
1844 ips_rebuild(struct ips_softc *sc, int chan, int target, int nchan,
1845     int ntarget, int flags)
1846 {
1847 	struct ips_ccb *ccb;
1848 	struct ips_cmd *cmd;
1849 
1850 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1851 	if (ccb == NULL)
1852 		return (1);
1853 
1854 	ccb->c_flags = SCSI_POLL | flags;
1855 	ccb->c_done = ips_done_mgmt;
1856 
1857 	cmd = ccb->c_cmdbva;
1858 	cmd->code = IPS_CMD_REBUILD;
1859 	cmd->drive = chan;
1860 	cmd->sgcnt = target;
1861 	cmd->seccnt = htole16(ntarget << 8 | nchan);
1862 
1863 	return (ips_cmd(sc, ccb));
1864 }
1865 #endif	/* NBIO > 0 */
1866 
1867 void
1868 ips_copperhead_exec(struct ips_softc *sc, struct ips_ccb *ccb)
1869 {
1870 	u_int32_t reg;
1871 	int timeout;
1872 
1873 	for (timeout = 100; timeout-- > 0; delay(100)) {
1874 		reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_CCC);
1875 		if ((reg & IPS_REG_CCC_SEM) == 0)
1876 			break;
1877 	}
1878 	if (timeout < 0) {
1879 		printf("%s: semaphore timeout\n", sc->sc_dev.dv_xname);
1880 		return;
1881 	}
1882 
1883 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_CCSA, ccb->c_cmdbpa);
1884 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, IPS_REG_CCC,
1885 	    IPS_REG_CCC_START);
1886 }
1887 
1888 void
1889 ips_copperhead_intren(struct ips_softc *sc)
1890 {
1891 	bus_space_write_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS, IPS_REG_HIS_EN);
1892 }
1893 
1894 int
1895 ips_copperhead_isintr(struct ips_softc *sc)
1896 {
1897 	u_int8_t reg;
1898 
1899 	reg = bus_space_read_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS);
1900 	bus_space_write_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS, reg);
1901 	if (reg != 0xff && (reg & IPS_REG_HIS_SCE))
1902 		return (1);
1903 
1904 	return (0);
1905 }
1906 
1907 u_int32_t
1908 ips_copperhead_status(struct ips_softc *sc)
1909 {
1910 	u_int32_t sqhead, sqtail, status;
1911 
1912 	sqhead = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQH);
1913 	DPRINTF(IPS_D_XFER, ("%s: sqhead 0x%08x, sqtail 0x%08x\n",
1914 	    sc->sc_dev.dv_xname, sqhead, sc->sc_sqtail));
1915 
1916 	sqtail = sc->sc_sqtail + sizeof(u_int32_t);
1917 	if (sqtail == sc->sc_sqm.dm_paddr + IPS_SQSZ)
1918 		sqtail = sc->sc_sqm.dm_paddr;
1919 	if (sqtail == sqhead)
1920 		return (0xffffffff);
1921 
1922 	sc->sc_sqtail = sqtail;
1923 	if (++sc->sc_sqidx == IPS_MAXCMDS)
1924 		sc->sc_sqidx = 0;
1925 	status = letoh32(sc->sc_sqbuf[sc->sc_sqidx]);
1926 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQT, sqtail);
1927 
1928 	return (status);
1929 }
1930 
1931 void
1932 ips_morpheus_exec(struct ips_softc *sc, struct ips_ccb *ccb)
1933 {
1934 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_IQP, ccb->c_cmdbpa);
1935 }
1936 
1937 void
1938 ips_morpheus_intren(struct ips_softc *sc)
1939 {
1940 	u_int32_t reg;
1941 
1942 	reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIM);
1943 	reg &= ~IPS_REG_OIM_DS;
1944 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIM, reg);
1945 }
1946 
1947 int
1948 ips_morpheus_isintr(struct ips_softc *sc)
1949 {
1950 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIS) &
1951 	    IPS_REG_OIS_PEND);
1952 }
1953 
1954 u_int32_t
1955 ips_morpheus_status(struct ips_softc *sc)
1956 {
1957 	u_int32_t reg;
1958 
1959 	reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OQP);
1960 	DPRINTF(IPS_D_XFER, ("%s: status 0x%08x\n", sc->sc_dev.dv_xname, reg));
1961 
1962 	return (reg);
1963 }
1964 
1965 struct ips_ccb *
1966 ips_ccb_alloc(struct ips_softc *sc, int n)
1967 {
1968 	struct ips_ccb *ccb;
1969 	int i;
1970 
1971 	if ((ccb = mallocarray(n, sizeof(*ccb), M_DEVBUF,
1972 	    M_NOWAIT | M_ZERO)) == NULL)
1973 		return (NULL);
1974 
1975 	for (i = 0; i < n; i++) {
1976 		ccb[i].c_sc = sc;
1977 		ccb[i].c_id = i;
1978 		ccb[i].c_cmdbva = (char *)sc->sc_cmdbm.dm_vaddr +
1979 		    i * sizeof(struct ips_cmdb);
1980 		ccb[i].c_cmdbpa = sc->sc_cmdbm.dm_paddr +
1981 		    i * sizeof(struct ips_cmdb);
1982 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, IPS_MAXSGS,
1983 		    MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1984 		    &ccb[i].c_dmam))
1985 			goto fail;
1986 	}
1987 
1988 	return (ccb);
1989 fail:
1990 	for (; i > 0; i--)
1991 		bus_dmamap_destroy(sc->sc_dmat, ccb[i - 1].c_dmam);
1992 	free(ccb, M_DEVBUF, n * sizeof(*ccb));
1993 	return (NULL);
1994 }
1995 
1996 void
1997 ips_ccb_free(struct ips_softc *sc, struct ips_ccb *ccb, int n)
1998 {
1999 	int i;
2000 
2001 	for (i = 0; i < n; i++)
2002 		bus_dmamap_destroy(sc->sc_dmat, ccb[i - 1].c_dmam);
2003 	free(ccb, M_DEVBUF, n * sizeof(*ccb));
2004 }
2005 
2006 void *
2007 ips_ccb_get(void *xsc)
2008 {
2009 	struct ips_softc *sc = xsc;
2010 	struct ips_ccb *ccb;
2011 
2012 	mtx_enter(&sc->sc_ccb_mtx);
2013 	if ((ccb = SLIST_FIRST(&sc->sc_ccbq_free)) != NULL) {
2014 		SLIST_REMOVE_HEAD(&sc->sc_ccbq_free, c_link);
2015 		ccb->c_flags = 0;
2016 		ccb->c_xfer = NULL;
2017 		bzero(ccb->c_cmdbva, sizeof(struct ips_cmdb));
2018 	}
2019 	mtx_leave(&sc->sc_ccb_mtx);
2020 
2021 	return (ccb);
2022 }
2023 
2024 void
2025 ips_ccb_put(void *xsc, void *xccb)
2026 {
2027 	struct ips_softc *sc = xsc;
2028 	struct ips_ccb *ccb = xccb;
2029 
2030 	ccb->c_state = IPS_CCB_FREE;
2031 	mtx_enter(&sc->sc_ccb_mtx);
2032 	SLIST_INSERT_HEAD(&sc->sc_ccbq_free, ccb, c_link);
2033 	mtx_leave(&sc->sc_ccb_mtx);
2034 }
2035 
2036 int
2037 ips_dmamem_alloc(struct dmamem *dm, bus_dma_tag_t tag, bus_size_t size)
2038 {
2039 	int nsegs;
2040 
2041 	dm->dm_tag = tag;
2042 	dm->dm_size = size;
2043 
2044 	if (bus_dmamap_create(tag, size, 1, size, 0,
2045 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &dm->dm_map))
2046 		return (1);
2047 	if (bus_dmamem_alloc(tag, size, 0, 0, &dm->dm_seg, 1, &nsegs,
2048 	    BUS_DMA_NOWAIT))
2049 		goto fail1;
2050 	if (bus_dmamem_map(tag, &dm->dm_seg, 1, size, (caddr_t *)&dm->dm_vaddr,
2051 	    BUS_DMA_NOWAIT))
2052 		goto fail2;
2053 	if (bus_dmamap_load(tag, dm->dm_map, dm->dm_vaddr, size, NULL,
2054 	    BUS_DMA_NOWAIT))
2055 		goto fail3;
2056 
2057 	return (0);
2058 
2059 fail3:
2060 	bus_dmamem_unmap(tag, dm->dm_vaddr, size);
2061 fail2:
2062 	bus_dmamem_free(tag, &dm->dm_seg, 1);
2063 fail1:
2064 	bus_dmamap_destroy(tag, dm->dm_map);
2065 	return (1);
2066 }
2067 
2068 void
2069 ips_dmamem_free(struct dmamem *dm)
2070 {
2071 	bus_dmamap_unload(dm->dm_tag, dm->dm_map);
2072 	bus_dmamem_unmap(dm->dm_tag, dm->dm_vaddr, dm->dm_size);
2073 	bus_dmamem_free(dm->dm_tag, &dm->dm_seg, 1);
2074 	bus_dmamap_destroy(dm->dm_tag, dm->dm_map);
2075 }
2076