1 /* $OpenBSD: ips.c,v 1.93 2009/03/23 17:40:56 grange Exp $ */ 2 3 /* 4 * Copyright (c) 2006, 2007, 2009 Alexander Yurchenko <grange@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * IBM (Adaptec) ServeRAID controllers driver. 21 */ 22 23 #include "bio.h" 24 25 #include <sys/param.h> 26 #include <sys/systm.h> 27 #include <sys/buf.h> 28 #include <sys/device.h> 29 #include <sys/ioctl.h> 30 #include <sys/kernel.h> 31 #include <sys/malloc.h> 32 #include <sys/proc.h> 33 #include <sys/sensors.h> 34 #include <sys/timeout.h> 35 #include <sys/queue.h> 36 37 #include <machine/bus.h> 38 39 #include <scsi/scsi_all.h> 40 #include <scsi/scsi_disk.h> 41 #include <scsi/scsiconf.h> 42 43 #include <dev/biovar.h> 44 45 #include <dev/pci/pcidevs.h> 46 #include <dev/pci/pcireg.h> 47 #include <dev/pci/pcivar.h> 48 49 /* Debug levels */ 50 #define IPS_D_ERR 0x0001 /* errors */ 51 #define IPS_D_INFO 0x0002 /* information */ 52 #define IPS_D_XFER 0x0004 /* transfers */ 53 54 #ifdef IPS_DEBUG 55 #define DPRINTF(a, b) do { if (ips_debug & (a)) printf b; } while (0) 56 int ips_debug = IPS_D_ERR; 57 #else 58 #define DPRINTF(a, b) 59 #endif 60 61 #define IPS_MAXDRIVES 8 62 #define IPS_MAXCHANS 4 63 #define IPS_MAXTARGETS 16 64 #define IPS_MAXCHUNKS 16 65 #define IPS_MAXCMDS 128 66 67 #define IPS_MAXFER (64 * 1024) 68 #define IPS_MAXSGS 16 69 #define IPS_MAXCDB 12 70 71 #define IPS_SECSZ 512 72 #define IPS_NVRAMPGSZ 128 73 #define IPS_SQSZ (IPS_MAXCMDS * sizeof(u_int32_t)) 74 75 #define IPS_TIMEOUT 60000 /* ms */ 76 77 /* Command codes */ 78 #define IPS_CMD_READ 0x02 79 #define IPS_CMD_WRITE 0x03 80 #define IPS_CMD_DCDB 0x04 81 #define IPS_CMD_GETADAPTERINFO 0x05 82 #define IPS_CMD_FLUSH 0x0a 83 #define IPS_CMD_REBUILDSTATUS 0x0c 84 #define IPS_CMD_SETSTATE 0x10 85 #define IPS_CMD_REBUILD 0x16 86 #define IPS_CMD_ERRORTABLE 0x17 87 #define IPS_CMD_GETDRIVEINFO 0x19 88 #define IPS_CMD_RESETCHAN 0x1a 89 #define IPS_CMD_DOWNLOAD 0x20 90 #define IPS_CMD_RWBIOSFW 0x22 91 #define IPS_CMD_READCONF 0x38 92 #define IPS_CMD_GETSUBSYS 0x40 93 #define IPS_CMD_CONFIGSYNC 0x58 94 #define IPS_CMD_READ_SG 0x82 95 #define IPS_CMD_WRITE_SG 0x83 96 #define IPS_CMD_DCDB_SG 0x84 97 #define IPS_CMD_EDCDB 0x95 98 #define IPS_CMD_EDCDB_SG 0x96 99 #define IPS_CMD_RWNVRAMPAGE 0xbc 100 #define IPS_CMD_GETVERINFO 0xc6 101 #define IPS_CMD_FFDC 0xd7 102 #define IPS_CMD_SG 0x80 103 #define IPS_CMD_RWNVRAM 0xbc 104 105 /* DCDB attributes */ 106 #define IPS_DCDB_DATAIN 0x01 /* data input */ 107 #define IPS_DCDB_DATAOUT 0x02 /* data output */ 108 #define IPS_DCDB_XFER64K 0x08 /* 64K transfer */ 109 #define IPS_DCDB_TIMO10 0x10 /* 10 secs timeout */ 110 #define IPS_DCDB_TIMO60 0x20 /* 60 secs timeout */ 111 #define IPS_DCDB_TIMO20M 0x30 /* 20 mins timeout */ 112 #define IPS_DCDB_NOAUTOREQSEN 0x40 /* no auto request sense */ 113 #define IPS_DCDB_DISCON 0x80 /* disconnect allowed */ 114 115 /* Register definitions */ 116 #define IPS_REG_HIS 0x08 /* host interrupt status */ 117 #define IPS_REG_HIS_SCE 0x01 /* status channel enqueue */ 118 #define IPS_REG_HIS_EN 0x80 /* enable interrupts */ 119 #define IPS_REG_CCSA 0x10 /* command channel system address */ 120 #define IPS_REG_CCC 0x14 /* command channel control */ 121 #define IPS_REG_CCC_SEM 0x0008 /* semaphore */ 122 #define IPS_REG_CCC_START 0x101a /* start command */ 123 #define IPS_REG_SQH 0x20 /* status queue head */ 124 #define IPS_REG_SQT 0x24 /* status queue tail */ 125 #define IPS_REG_SQE 0x28 /* status queue end */ 126 #define IPS_REG_SQS 0x2c /* status queue start */ 127 128 #define IPS_REG_OIS 0x30 /* outbound interrupt status */ 129 #define IPS_REG_OIS_PEND 0x0008 /* interrupt is pending */ 130 #define IPS_REG_OIM 0x34 /* outbound interrupt mask */ 131 #define IPS_REG_OIM_DS 0x0008 /* disable interrupts */ 132 #define IPS_REG_IQP 0x40 /* inbound queue port */ 133 #define IPS_REG_OQP 0x44 /* outbound queue port */ 134 135 /* Status word fields */ 136 #define IPS_STAT_ID(x) (((x) >> 8) & 0xff) /* command id */ 137 #define IPS_STAT_BASIC(x) (((x) >> 16) & 0xff) /* basic status */ 138 #define IPS_STAT_EXT(x) (((x) >> 24) & 0xff) /* ext status */ 139 #define IPS_STAT_GSC(x) ((x) & 0x0f) 140 141 /* Basic status codes */ 142 #define IPS_STAT_OK 0x00 /* success */ 143 #define IPS_STAT_RECOV 0x01 /* recovered error */ 144 #define IPS_STAT_INVOP 0x03 /* invalid opcode */ 145 #define IPS_STAT_INVCMD 0x04 /* invalid command block */ 146 #define IPS_STAT_INVPARM 0x05 /* invalid parameters block */ 147 #define IPS_STAT_BUSY 0x08 /* busy */ 148 #define IPS_STAT_CMPLERR 0x0c /* completed with error */ 149 #define IPS_STAT_LDERR 0x0d /* logical drive error */ 150 #define IPS_STAT_TIMO 0x0e /* timeout */ 151 #define IPS_STAT_PDRVERR 0x0f /* physical drive error */ 152 153 /* Extended status codes */ 154 #define IPS_ESTAT_SELTIMO 0xf0 /* select timeout */ 155 #define IPS_ESTAT_OURUN 0xf2 /* over/underrun */ 156 #define IPS_ESTAT_HOSTRST 0xf7 /* host reset */ 157 #define IPS_ESTAT_DEVRST 0xf8 /* device reset */ 158 #define IPS_ESTAT_RECOV 0xfc /* recovered error */ 159 #define IPS_ESTAT_CKCOND 0xff /* check condition */ 160 161 #define IPS_IOSIZE 128 /* max space size to map */ 162 163 /* Command frame */ 164 struct ips_cmd { 165 u_int8_t code; 166 u_int8_t id; 167 u_int8_t drive; 168 u_int8_t sgcnt; 169 u_int32_t lba; 170 u_int32_t sgaddr; 171 u_int16_t seccnt; 172 u_int8_t seg4g; 173 u_int8_t esg; 174 u_int32_t ccsar; 175 u_int32_t cccr; 176 }; 177 178 /* Direct CDB (SCSI pass-through) frame */ 179 struct ips_dcdb { 180 u_int8_t device; 181 u_int8_t attr; 182 u_int16_t datalen; 183 u_int32_t sgaddr; 184 u_int8_t cdblen; 185 u_int8_t senselen; 186 u_int8_t sgcnt; 187 u_int8_t __reserved1; 188 u_int8_t cdb[IPS_MAXCDB]; 189 u_int8_t sense[64]; 190 u_int8_t status; 191 u_int8_t __reserved2[3]; 192 }; 193 194 /* Scatter-gather array element */ 195 struct ips_sg { 196 u_int32_t addr; 197 u_int32_t size; 198 }; 199 200 /* Command block */ 201 struct ips_cmdb { 202 struct ips_cmd cmd; 203 struct ips_dcdb dcdb; 204 struct ips_sg sg[IPS_MAXSGS]; 205 }; 206 207 /* Data frames */ 208 struct ips_adapterinfo { 209 u_int8_t drivecnt; 210 u_int8_t miscflag; 211 u_int8_t sltflag; 212 u_int8_t bstflag; 213 u_int8_t pwrchgcnt; 214 u_int8_t wrongaddrcnt; 215 u_int8_t unidentcnt; 216 u_int8_t nvramdevchgcnt; 217 u_int8_t firmware[8]; 218 u_int8_t bios[8]; 219 u_int32_t drivesize[IPS_MAXDRIVES]; 220 u_int8_t cmdcnt; 221 u_int8_t maxphysdevs; 222 u_int16_t flashrepgmcnt; 223 u_int8_t defunctdiskcnt; 224 u_int8_t rebuildflag; 225 u_int8_t offdrivecnt; 226 u_int8_t critdrivecnt; 227 u_int16_t confupdcnt; 228 u_int8_t blkflag; 229 u_int8_t __reserved; 230 u_int16_t deaddisk[IPS_MAXCHANS][IPS_MAXTARGETS]; 231 }; 232 233 struct ips_driveinfo { 234 u_int8_t drivecnt; 235 u_int8_t __reserved[3]; 236 struct ips_drive { 237 u_int8_t id; 238 u_int8_t __reserved; 239 u_int8_t raid; 240 u_int8_t state; 241 #define IPS_DS_FREE 0x00 242 #define IPS_DS_OFFLINE 0x02 243 #define IPS_DS_ONLINE 0x03 244 #define IPS_DS_DEGRADED 0x04 245 #define IPS_DS_SYS 0x06 246 #define IPS_DS_CRS 0x24 247 248 u_int32_t seccnt; 249 } drive[IPS_MAXDRIVES]; 250 }; 251 252 struct ips_conf { 253 u_int8_t ldcnt; 254 u_int8_t day; 255 u_int8_t month; 256 u_int8_t year; 257 u_int8_t initid[4]; 258 u_int8_t hostid[12]; 259 u_int8_t time[8]; 260 u_int32_t useropt; 261 u_int16_t userfield; 262 u_int8_t rebuildrate; 263 u_int8_t __reserved1; 264 265 struct ips_hw { 266 u_int8_t board[8]; 267 u_int8_t cpu[8]; 268 u_int8_t nchantype; 269 u_int8_t nhostinttype; 270 u_int8_t compression; 271 u_int8_t nvramtype; 272 u_int32_t nvramsize; 273 } hw; 274 275 struct ips_ld { 276 u_int16_t userfield; 277 u_int8_t state; 278 u_int8_t raidcacheparam; 279 u_int8_t chunkcnt; 280 u_int8_t stripesize; 281 u_int8_t params; 282 u_int8_t __reserved; 283 u_int32_t size; 284 285 struct ips_chunk { 286 u_int8_t channel; 287 u_int8_t target; 288 u_int16_t __reserved; 289 u_int32_t startsec; 290 u_int32_t seccnt; 291 } chunk[IPS_MAXCHUNKS]; 292 } ld[IPS_MAXDRIVES]; 293 294 struct ips_dev { 295 u_int8_t initiator; 296 u_int8_t params; 297 u_int8_t miscflag; 298 u_int8_t state; 299 #define IPS_DVS_STANDBY 0x01 300 #define IPS_DVS_REBUILD 0x02 301 #define IPS_DVS_SPARE 0x04 302 #define IPS_DVS_MEMBER 0x08 303 #define IPS_DVS_ONLINE 0x80 304 #define IPS_DVS_READY (IPS_DVS_STANDBY | IPS_DVS_ONLINE) 305 306 u_int32_t seccnt; 307 u_int8_t devid[28]; 308 } dev[IPS_MAXCHANS][IPS_MAXTARGETS]; 309 310 u_int8_t reserved[512]; 311 }; 312 313 struct ips_rblstat { 314 u_int8_t __unknown[20]; 315 struct { 316 u_int8_t __unknown[4]; 317 u_int32_t total; 318 u_int32_t remain; 319 } ld[IPS_MAXDRIVES]; 320 }; 321 322 struct ips_pg5 { 323 u_int32_t signature; 324 u_int8_t __reserved1; 325 u_int8_t slot; 326 u_int16_t type; 327 u_int8_t bioshi[4]; 328 u_int8_t bioslo[4]; 329 u_int16_t __reserved2; 330 u_int8_t __reserved3; 331 u_int8_t os; 332 u_int8_t driverhi[4]; 333 u_int8_t driverlo[4]; 334 u_int8_t __reserved4[100]; 335 }; 336 337 struct ips_info { 338 struct ips_adapterinfo adapter; 339 struct ips_driveinfo drive; 340 struct ips_conf conf; 341 struct ips_rblstat rblstat; 342 struct ips_pg5 pg5; 343 }; 344 345 /* Command control block */ 346 struct ips_softc; 347 struct ips_ccb { 348 struct ips_softc * c_sc; /* driver softc */ 349 int c_id; /* command id */ 350 int c_flags; /* SCSI_* flags */ 351 enum { 352 IPS_CCB_FREE, 353 IPS_CCB_QUEUED, 354 IPS_CCB_DONE 355 } c_state; /* command state */ 356 357 void * c_cmdbva; /* command block virt addr */ 358 paddr_t c_cmdbpa; /* command block phys addr */ 359 bus_dmamap_t c_dmam; /* data buffer DMA map */ 360 361 struct scsi_xfer * c_xfer; /* corresponding SCSI xfer */ 362 363 u_int8_t c_stat; /* status byte copy */ 364 u_int8_t c_estat; /* ext status byte copy */ 365 int c_error; /* completion error */ 366 367 void (*c_done)(struct ips_softc *, /* cmd done */ 368 struct ips_ccb *); /* callback */ 369 370 TAILQ_ENTRY(ips_ccb) c_link; /* queue link */ 371 }; 372 373 /* CCB queue */ 374 TAILQ_HEAD(ips_ccbq, ips_ccb); 375 376 /* DMA-able chunk of memory */ 377 struct dmamem { 378 bus_dma_tag_t dm_tag; 379 bus_dmamap_t dm_map; 380 bus_dma_segment_t dm_seg; 381 bus_size_t dm_size; 382 void * dm_vaddr; 383 #define dm_paddr dm_seg.ds_addr 384 }; 385 386 struct ips_softc { 387 struct device sc_dev; 388 389 struct scsi_link sc_scsi_link; 390 struct scsibus_softc * sc_scsibus; 391 392 struct ips_pt { 393 struct ips_softc * pt_sc; 394 int pt_chan; 395 396 struct scsi_link pt_link; 397 398 int pt_proctgt; 399 char pt_procdev[16]; 400 } sc_pt[IPS_MAXCHANS]; 401 402 struct ksensordev sc_sensordev; 403 struct ksensor * sc_sensors; 404 405 bus_space_tag_t sc_iot; 406 bus_space_handle_t sc_ioh; 407 bus_dma_tag_t sc_dmat; 408 409 const struct ips_chipset *sc_chip; 410 411 struct ips_info * sc_info; 412 struct dmamem sc_infom; 413 414 int sc_nunits; 415 416 struct dmamem sc_cmdbm; 417 418 struct ips_ccb * sc_ccb; 419 int sc_nccbs; 420 struct ips_ccbq sc_ccbq_free; 421 422 struct dmamem sc_sqm; 423 paddr_t sc_sqtail; 424 u_int32_t * sc_sqbuf; 425 int sc_sqidx; 426 }; 427 428 int ips_match(struct device *, void *, void *); 429 void ips_attach(struct device *, struct device *, void *); 430 431 int ips_scsi_cmd(struct scsi_xfer *); 432 int ips_scsi_pt_cmd(struct scsi_xfer *); 433 int ips_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int, 434 struct proc *); 435 436 #if NBIO > 0 437 int ips_ioctl(struct device *, u_long, caddr_t); 438 int ips_ioctl_inq(struct ips_softc *, struct bioc_inq *); 439 int ips_ioctl_vol(struct ips_softc *, struct bioc_vol *); 440 int ips_ioctl_disk(struct ips_softc *, struct bioc_disk *); 441 int ips_ioctl_setstate(struct ips_softc *, struct bioc_setstate *); 442 #endif 443 444 #ifndef SMALL_KERNEL 445 void ips_sensors(void *); 446 #endif 447 448 int ips_load_xs(struct ips_softc *, struct ips_ccb *, struct scsi_xfer *); 449 int ips_start_xs(struct ips_softc *, struct ips_ccb *, struct scsi_xfer *); 450 451 int ips_cmd(struct ips_softc *, struct ips_ccb *); 452 int ips_poll(struct ips_softc *, struct ips_ccb *); 453 void ips_done(struct ips_softc *, struct ips_ccb *); 454 void ips_done_xs(struct ips_softc *, struct ips_ccb *); 455 void ips_done_pt(struct ips_softc *, struct ips_ccb *); 456 void ips_done_mgmt(struct ips_softc *, struct ips_ccb *); 457 int ips_error(struct ips_softc *, struct ips_ccb *); 458 int ips_error_xs(struct ips_softc *, struct ips_ccb *); 459 int ips_intr(void *); 460 void ips_timeout(void *); 461 462 int ips_getadapterinfo(struct ips_softc *, int); 463 int ips_getdriveinfo(struct ips_softc *, int); 464 int ips_getconf(struct ips_softc *, int); 465 int ips_getpg5(struct ips_softc *, int); 466 467 #if NBIO > 0 468 int ips_getrblstat(struct ips_softc *, int); 469 int ips_setstate(struct ips_softc *, int, int, int, int); 470 int ips_rebuild(struct ips_softc *, int, int, int, int, int); 471 #endif 472 473 void ips_copperhead_exec(struct ips_softc *, struct ips_ccb *); 474 void ips_copperhead_intren(struct ips_softc *); 475 int ips_copperhead_isintr(struct ips_softc *); 476 u_int32_t ips_copperhead_status(struct ips_softc *); 477 478 void ips_morpheus_exec(struct ips_softc *, struct ips_ccb *); 479 void ips_morpheus_intren(struct ips_softc *); 480 int ips_morpheus_isintr(struct ips_softc *); 481 u_int32_t ips_morpheus_status(struct ips_softc *); 482 483 struct ips_ccb *ips_ccb_alloc(struct ips_softc *, int); 484 void ips_ccb_free(struct ips_softc *, struct ips_ccb *, int); 485 struct ips_ccb *ips_ccb_get(struct ips_softc *); 486 void ips_ccb_put(struct ips_softc *, struct ips_ccb *); 487 488 int ips_dmamem_alloc(struct dmamem *, bus_dma_tag_t, bus_size_t); 489 void ips_dmamem_free(struct dmamem *); 490 491 struct cfattach ips_ca = { 492 sizeof(struct ips_softc), 493 ips_match, 494 ips_attach 495 }; 496 497 struct cfdriver ips_cd = { 498 NULL, "ips", DV_DULL 499 }; 500 501 static struct scsi_adapter ips_scsi_adapter = { 502 ips_scsi_cmd, 503 scsi_minphys, 504 NULL, 505 NULL, 506 ips_scsi_ioctl 507 }; 508 509 static struct scsi_device ips_scsi_device = { 510 NULL, 511 NULL, 512 NULL, 513 NULL 514 }; 515 516 static struct scsi_adapter ips_scsi_pt_adapter = { 517 ips_scsi_pt_cmd, 518 scsi_minphys, 519 NULL, 520 NULL, 521 NULL 522 }; 523 524 static struct scsi_device ips_scsi_pt_device = { 525 NULL, 526 NULL, 527 NULL, 528 NULL 529 }; 530 531 static const struct pci_matchid ips_ids[] = { 532 { PCI_VENDOR_IBM, PCI_PRODUCT_IBM_SERVERAID }, 533 { PCI_VENDOR_IBM, PCI_PRODUCT_IBM_SERVERAID2 }, 534 { PCI_VENDOR_ADP2, PCI_PRODUCT_ADP2_SERVERAID } 535 }; 536 537 static const struct ips_chipset { 538 enum { 539 IPS_CHIP_COPPERHEAD = 0, 540 IPS_CHIP_MORPHEUS 541 } ic_id; 542 543 int ic_bar; 544 545 void (*ic_exec)(struct ips_softc *, struct ips_ccb *); 546 void (*ic_intren)(struct ips_softc *); 547 int (*ic_isintr)(struct ips_softc *); 548 u_int32_t (*ic_status)(struct ips_softc *); 549 } ips_chips[] = { 550 { 551 IPS_CHIP_COPPERHEAD, 552 0x14, 553 ips_copperhead_exec, 554 ips_copperhead_intren, 555 ips_copperhead_isintr, 556 ips_copperhead_status 557 }, 558 { 559 IPS_CHIP_MORPHEUS, 560 0x10, 561 ips_morpheus_exec, 562 ips_morpheus_intren, 563 ips_morpheus_isintr, 564 ips_morpheus_status 565 } 566 }; 567 568 #define ips_exec(s, c) (s)->sc_chip->ic_exec((s), (c)) 569 #define ips_intren(s) (s)->sc_chip->ic_intren((s)) 570 #define ips_isintr(s) (s)->sc_chip->ic_isintr((s)) 571 #define ips_status(s) (s)->sc_chip->ic_status((s)) 572 573 static const char *ips_names[] = { 574 NULL, 575 NULL, 576 "II", 577 "onboard", 578 "onboard", 579 "3H", 580 "3L", 581 "4H", 582 "4M", 583 "4L", 584 "4Mx", 585 "4Lx", 586 "5i", 587 "5i", 588 "6M", 589 "6i", 590 "7t", 591 "7k", 592 "7M" 593 }; 594 595 int 596 ips_match(struct device *parent, void *match, void *aux) 597 { 598 return (pci_matchbyid(aux, ips_ids, 599 sizeof(ips_ids) / sizeof(ips_ids[0]))); 600 } 601 602 void 603 ips_attach(struct device *parent, struct device *self, void *aux) 604 { 605 struct ips_softc *sc = (struct ips_softc *)self; 606 struct pci_attach_args *pa = aux; 607 struct ips_ccb ccb0; 608 struct scsibus_attach_args saa; 609 struct ips_adapterinfo *ai; 610 struct ips_driveinfo *di; 611 struct ips_pg5 *pg5; 612 pcireg_t maptype; 613 bus_size_t iosize; 614 pci_intr_handle_t ih; 615 const char *intrstr; 616 int type, i; 617 618 sc->sc_dmat = pa->pa_dmat; 619 620 /* Identify chipset */ 621 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_IBM_SERVERAID) 622 sc->sc_chip = &ips_chips[IPS_CHIP_COPPERHEAD]; 623 else 624 sc->sc_chip = &ips_chips[IPS_CHIP_MORPHEUS]; 625 626 /* Map registers */ 627 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_chip->ic_bar); 628 if (pci_mapreg_map(pa, sc->sc_chip->ic_bar, maptype, 0, &sc->sc_iot, 629 &sc->sc_ioh, NULL, &iosize, IPS_IOSIZE)) { 630 printf(": can't map regs\n"); 631 return; 632 } 633 634 /* Allocate command buffer */ 635 if (ips_dmamem_alloc(&sc->sc_cmdbm, sc->sc_dmat, 636 IPS_MAXCMDS * sizeof(struct ips_cmdb))) { 637 printf(": can't alloc cmd buffer\n"); 638 goto fail1; 639 } 640 641 /* Allocate info buffer */ 642 if (ips_dmamem_alloc(&sc->sc_infom, sc->sc_dmat, 643 sizeof(struct ips_info))) { 644 printf(": can't alloc info buffer\n"); 645 goto fail2; 646 } 647 sc->sc_info = sc->sc_infom.dm_vaddr; 648 ai = &sc->sc_info->adapter; 649 di = &sc->sc_info->drive; 650 pg5 = &sc->sc_info->pg5; 651 652 /* Allocate status queue for the Copperhead chipset */ 653 if (sc->sc_chip->ic_id == IPS_CHIP_COPPERHEAD) { 654 if (ips_dmamem_alloc(&sc->sc_sqm, sc->sc_dmat, IPS_SQSZ)) { 655 printf(": can't alloc status queue\n"); 656 goto fail3; 657 } 658 sc->sc_sqtail = sc->sc_sqm.dm_paddr; 659 sc->sc_sqbuf = sc->sc_sqm.dm_vaddr; 660 sc->sc_sqidx = 0; 661 bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQS, 662 sc->sc_sqm.dm_paddr); 663 bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQE, 664 sc->sc_sqm.dm_paddr + IPS_SQSZ); 665 bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQH, 666 sc->sc_sqm.dm_paddr + sizeof(u_int32_t)); 667 bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQT, 668 sc->sc_sqm.dm_paddr); 669 } 670 671 /* Bootstrap CCB queue */ 672 sc->sc_nccbs = 1; 673 sc->sc_ccb = &ccb0; 674 bzero(&ccb0, sizeof(ccb0)); 675 ccb0.c_cmdbva = sc->sc_cmdbm.dm_vaddr; 676 ccb0.c_cmdbpa = sc->sc_cmdbm.dm_paddr; 677 TAILQ_INIT(&sc->sc_ccbq_free); 678 TAILQ_INSERT_TAIL(&sc->sc_ccbq_free, &ccb0, c_link); 679 680 /* Get adapter info */ 681 if (ips_getadapterinfo(sc, SCSI_NOSLEEP)) { 682 printf(": can't get adapter info\n"); 683 goto fail4; 684 } 685 686 /* Get logical drives info */ 687 if (ips_getdriveinfo(sc, SCSI_NOSLEEP)) { 688 printf(": can't get ld info\n"); 689 goto fail4; 690 } 691 sc->sc_nunits = di->drivecnt; 692 693 /* Get configuration */ 694 if (ips_getconf(sc, SCSI_NOSLEEP)) { 695 printf(": can't get config\n"); 696 goto fail4; 697 } 698 699 /* Read NVRAM page 5 for additional info */ 700 (void)ips_getpg5(sc, SCSI_NOSLEEP); 701 702 /* Initialize CCB queue */ 703 sc->sc_nccbs = ai->cmdcnt; 704 if ((sc->sc_ccb = ips_ccb_alloc(sc, sc->sc_nccbs)) == NULL) { 705 printf(": can't alloc ccb queue\n"); 706 goto fail4; 707 } 708 TAILQ_INIT(&sc->sc_ccbq_free); 709 for (i = 0; i < sc->sc_nccbs; i++) 710 TAILQ_INSERT_TAIL(&sc->sc_ccbq_free, 711 &sc->sc_ccb[i], c_link); 712 713 /* Install interrupt handler */ 714 if (pci_intr_map(pa, &ih)) { 715 printf(": can't map interrupt\n"); 716 goto fail5; 717 } 718 intrstr = pci_intr_string(pa->pa_pc, ih); 719 if (pci_intr_establish(pa->pa_pc, ih, IPL_BIO, ips_intr, sc, 720 sc->sc_dev.dv_xname) == NULL) { 721 printf(": can't establish interrupt"); 722 if (intrstr != NULL) 723 printf(" at %s", intrstr); 724 printf("\n"); 725 goto fail5; 726 } 727 printf(": %s\n", intrstr); 728 729 /* Display adapter info */ 730 printf("%s: ServeRAID", sc->sc_dev.dv_xname); 731 type = letoh16(pg5->type); 732 if (type < sizeof(ips_names) / sizeof(ips_names[0]) && ips_names[type]) 733 printf(" %s", ips_names[type]); 734 printf(", FW %c%c%c%c%c%c%c", ai->firmware[0], ai->firmware[1], 735 ai->firmware[2], ai->firmware[3], ai->firmware[4], ai->firmware[5], 736 ai->firmware[6]); 737 printf(", BIOS %c%c%c%c%c%c%c", ai->bios[0], ai->bios[1], ai->bios[2], 738 ai->bios[3], ai->bios[4], ai->bios[5], ai->bios[6]); 739 printf(", %d cmds, %d LD%s", sc->sc_nccbs, sc->sc_nunits, 740 (sc->sc_nunits == 1 ? "" : "s")); 741 printf("\n"); 742 743 /* Attach SCSI bus */ 744 if (sc->sc_nunits > 0) 745 sc->sc_scsi_link.openings = sc->sc_nccbs / sc->sc_nunits; 746 sc->sc_scsi_link.adapter_target = sc->sc_nunits; 747 sc->sc_scsi_link.adapter_buswidth = sc->sc_nunits; 748 sc->sc_scsi_link.device = &ips_scsi_device; 749 sc->sc_scsi_link.adapter = &ips_scsi_adapter; 750 sc->sc_scsi_link.adapter_softc = sc; 751 752 bzero(&saa, sizeof(saa)); 753 saa.saa_sc_link = &sc->sc_scsi_link; 754 sc->sc_scsibus = (struct scsibus_softc *)config_found(self, &saa, 755 scsiprint); 756 757 /* For each channel attach SCSI pass-through bus */ 758 bzero(&saa, sizeof(saa)); 759 for (i = 0; i < IPS_MAXCHANS; i++) { 760 struct ips_pt *pt; 761 struct scsi_link *link; 762 int target, lastarget; 763 764 pt = &sc->sc_pt[i]; 765 pt->pt_sc = sc; 766 pt->pt_chan = i; 767 pt->pt_proctgt = -1; 768 769 /* Check if channel has any devices besides disks */ 770 for (target = 0, lastarget = -1; target < IPS_MAXTARGETS; 771 target++) { 772 struct ips_dev *dev; 773 int type; 774 775 dev = &sc->sc_info->conf.dev[i][target]; 776 type = dev->params & SID_TYPE; 777 if (dev->state && type != T_DIRECT) { 778 lastarget = target; 779 if (type == T_PROCESSOR || 780 type == T_ENCLOSURE) 781 /* remember enclosure address */ 782 pt->pt_proctgt = target; 783 } 784 } 785 if (lastarget == -1) 786 continue; 787 788 link = &pt->pt_link; 789 link->openings = 1; 790 link->adapter_target = IPS_MAXTARGETS; 791 link->adapter_buswidth = lastarget + 1; 792 link->device = &ips_scsi_pt_device; 793 link->adapter = &ips_scsi_pt_adapter; 794 link->adapter_softc = pt; 795 796 saa.saa_sc_link = link; 797 config_found(self, &saa, scsiprint); 798 } 799 800 /* Enable interrupts */ 801 ips_intren(sc); 802 803 #if NBIO > 0 804 /* Install ioctl handler */ 805 if (bio_register(&sc->sc_dev, ips_ioctl)) 806 printf("%s: no ioctl support\n", sc->sc_dev.dv_xname); 807 #endif 808 809 #ifndef SMALL_KERNEL 810 /* Add sensors */ 811 if ((sc->sc_sensors = malloc(sizeof(struct ksensor) * sc->sc_nunits, 812 M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { 813 printf(": can't alloc sensors\n"); 814 return; 815 } 816 strlcpy(sc->sc_sensordev.xname, sc->sc_dev.dv_xname, 817 sizeof(sc->sc_sensordev.xname)); 818 for (i = 0; i < sc->sc_nunits; i++) { 819 sc->sc_sensors[i].type = SENSOR_DRIVE; 820 sc->sc_sensors[i].status = SENSOR_S_UNKNOWN; 821 strlcpy(sc->sc_sensors[i].desc, ((struct device *) 822 sc->sc_scsibus->sc_link[i][0]->device_softc)->dv_xname, 823 sizeof(sc->sc_sensors[i].desc)); 824 sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]); 825 } 826 if (sensor_task_register(sc, ips_sensors, 10) == NULL) { 827 printf(": no sensors support\n"); 828 free(sc->sc_sensors, M_DEVBUF); 829 return; 830 } 831 sensordev_install(&sc->sc_sensordev); 832 #endif /* !SMALL_KERNEL */ 833 834 return; 835 fail5: 836 ips_ccb_free(sc, sc->sc_ccb, sc->sc_nccbs); 837 fail4: 838 if (sc->sc_chip->ic_id == IPS_CHIP_COPPERHEAD) 839 ips_dmamem_free(&sc->sc_sqm); 840 fail3: 841 ips_dmamem_free(&sc->sc_infom); 842 fail2: 843 ips_dmamem_free(&sc->sc_cmdbm); 844 fail1: 845 bus_space_unmap(sc->sc_iot, sc->sc_ioh, iosize); 846 } 847 848 int 849 ips_scsi_cmd(struct scsi_xfer *xs) 850 { 851 struct scsi_link *link = xs->sc_link; 852 struct ips_softc *sc = link->adapter_softc; 853 struct ips_driveinfo *di = &sc->sc_info->drive; 854 struct ips_drive *drive; 855 struct scsi_inquiry_data inq; 856 struct scsi_read_cap_data rcd; 857 struct scsi_sense_data sd; 858 struct scsi_rw *rw; 859 struct scsi_rw_big *rwb; 860 struct ips_ccb *ccb; 861 struct ips_cmd *cmd; 862 int target = link->target; 863 u_int32_t blkno, blkcnt; 864 int code, s; 865 866 DPRINTF(IPS_D_XFER, ("%s: ips_scsi_cmd: xs %p, target %d, " 867 "opcode 0x%02x, flags 0x%x\n", sc->sc_dev.dv_xname, xs, target, 868 xs->cmd->opcode, xs->flags)); 869 870 if (target >= sc->sc_nunits || link->lun != 0) { 871 DPRINTF(IPS_D_INFO, ("%s: ips_scsi_cmd: invalid params " 872 "target %d, lun %d\n", sc->sc_dev.dv_xname, 873 target, link->lun)); 874 xs->error = XS_DRIVER_STUFFUP; 875 s = splbio(); 876 scsi_done(xs); 877 splx(s); 878 return (COMPLETE); 879 } 880 881 drive = &di->drive[target]; 882 xs->error = XS_NOERROR; 883 884 /* Fake SCSI commands */ 885 switch (xs->cmd->opcode) { 886 case READ_BIG: 887 case READ_COMMAND: 888 case WRITE_BIG: 889 case WRITE_COMMAND: 890 if (xs->cmdlen == sizeof(struct scsi_rw)) { 891 rw = (void *)xs->cmd; 892 blkno = _3btol(rw->addr) & 893 (SRW_TOPADDR << 16 | 0xffff); 894 blkcnt = rw->length ? rw->length : 0x100; 895 } else { 896 rwb = (void *)xs->cmd; 897 blkno = _4btol(rwb->addr); 898 blkcnt = _2btol(rwb->length); 899 } 900 901 if (blkno >= letoh32(drive->seccnt) || blkno + blkcnt > 902 letoh32(drive->seccnt)) { 903 DPRINTF(IPS_D_ERR, ("%s: ips_scsi_cmd: invalid params " 904 "blkno %u, blkcnt %u\n", sc->sc_dev.dv_xname, 905 blkno, blkcnt)); 906 xs->error = XS_DRIVER_STUFFUP; 907 break; 908 } 909 910 if (xs->flags & SCSI_DATA_IN) 911 code = IPS_CMD_READ; 912 else 913 code = IPS_CMD_WRITE; 914 915 s = splbio(); 916 ccb = ips_ccb_get(sc); 917 splx(s); 918 if (ccb == NULL) { 919 DPRINTF(IPS_D_ERR, ("%s: ips_scsi_cmd: no ccb\n", 920 sc->sc_dev.dv_xname)); 921 return (NO_CCB); 922 } 923 924 cmd = ccb->c_cmdbva; 925 cmd->code = code; 926 cmd->drive = target; 927 cmd->lba = htole32(blkno); 928 cmd->seccnt = htole16(blkcnt); 929 930 if (ips_load_xs(sc, ccb, xs)) { 931 DPRINTF(IPS_D_ERR, ("%s: ips_scsi_cmd: ips_load_xs " 932 "failed\n", sc->sc_dev.dv_xname)); 933 934 s = splbio(); 935 ips_ccb_put(sc, ccb); 936 splx(s); 937 xs->error = XS_DRIVER_STUFFUP; 938 break; 939 } 940 941 if (cmd->sgcnt > 0) 942 cmd->code |= IPS_CMD_SG; 943 944 ccb->c_done = ips_done_xs; 945 return (ips_start_xs(sc, ccb, xs)); 946 case INQUIRY: 947 bzero(&inq, sizeof(inq)); 948 inq.device = T_DIRECT; 949 inq.version = 2; 950 inq.response_format = 2; 951 inq.additional_length = 32; 952 strlcpy(inq.vendor, "IBM", sizeof(inq.vendor)); 953 snprintf(inq.product, sizeof(inq.product), 954 "LD%d RAID%d", target, drive->raid); 955 strlcpy(inq.revision, "1.0", sizeof(inq.revision)); 956 memcpy(xs->data, &inq, MIN(xs->datalen, sizeof(inq))); 957 break; 958 case READ_CAPACITY: 959 bzero(&rcd, sizeof(rcd)); 960 _lto4b(letoh32(drive->seccnt) - 1, rcd.addr); 961 _lto4b(IPS_SECSZ, rcd.length); 962 memcpy(xs->data, &rcd, MIN(xs->datalen, sizeof(rcd))); 963 break; 964 case REQUEST_SENSE: 965 bzero(&sd, sizeof(sd)); 966 sd.error_code = SSD_ERRCODE_CURRENT; 967 sd.flags = SKEY_NO_SENSE; 968 memcpy(xs->data, &sd, MIN(xs->datalen, sizeof(sd))); 969 break; 970 case SYNCHRONIZE_CACHE: 971 s = splbio(); 972 ccb = ips_ccb_get(sc); 973 splx(s); 974 if (ccb == NULL) { 975 DPRINTF(IPS_D_ERR, ("%s: ips_scsi_cmd: no ccb\n", 976 sc->sc_dev.dv_xname)); 977 return (NO_CCB); 978 } 979 980 cmd = ccb->c_cmdbva; 981 cmd->code = IPS_CMD_FLUSH; 982 983 ccb->c_done = ips_done_xs; 984 return (ips_start_xs(sc, ccb, xs)); 985 case PREVENT_ALLOW: 986 case START_STOP: 987 case TEST_UNIT_READY: 988 break; 989 default: 990 DPRINTF(IPS_D_INFO, ("%s: unsupported scsi command 0x%02x\n", 991 sc->sc_dev.dv_xname, xs->cmd->opcode)); 992 xs->error = XS_DRIVER_STUFFUP; 993 } 994 995 s = splbio(); 996 scsi_done(xs); 997 splx(s); 998 999 return (COMPLETE); 1000 } 1001 1002 int 1003 ips_scsi_pt_cmd(struct scsi_xfer *xs) 1004 { 1005 struct scsi_link *link = xs->sc_link; 1006 struct ips_pt *pt = link->adapter_softc; 1007 struct ips_softc *sc = pt->pt_sc; 1008 struct device *dev = link->device_softc; 1009 struct ips_ccb *ccb; 1010 struct ips_cmdb *cmdb; 1011 struct ips_cmd *cmd; 1012 struct ips_dcdb *dcdb; 1013 int chan = pt->pt_chan, target = link->target; 1014 int s; 1015 1016 DPRINTF(IPS_D_XFER, ("%s: ips_scsi_pt_cmd: xs %p, chan %d, target %d, " 1017 "opcode 0x%02x, flags 0x%x\n", sc->sc_dev.dv_xname, xs, chan, 1018 target, xs->cmd->opcode, xs->flags)); 1019 1020 if (pt->pt_procdev[0] == '\0' && target == pt->pt_proctgt && dev) 1021 strlcpy(pt->pt_procdev, dev->dv_xname, sizeof(pt->pt_procdev)); 1022 1023 if (xs->cmdlen > IPS_MAXCDB) { 1024 DPRINTF(IPS_D_ERR, ("%s: cmdlen %d too big\n", 1025 sc->sc_dev.dv_xname, xs->cmdlen)); 1026 1027 bzero(&xs->sense, sizeof(xs->sense)); 1028 xs->sense.error_code = SSD_ERRCODE_VALID | 0x70; 1029 xs->sense.flags = SKEY_ILLEGAL_REQUEST; 1030 xs->sense.add_sense_code = 0x20; /* illcmd, 0x24 illfield */ 1031 xs->error = XS_SENSE; 1032 s = splbio(); 1033 scsi_done(xs); 1034 splx(s); 1035 return (COMPLETE); 1036 } 1037 1038 xs->error = XS_NOERROR; 1039 1040 s = splbio(); 1041 ccb = ips_ccb_get(sc); 1042 splx(s); 1043 if (ccb == NULL) { 1044 DPRINTF(IPS_D_ERR, ("%s: ips_scsi_pt_cmd: no ccb\n", 1045 sc->sc_dev.dv_xname)); 1046 return (NO_CCB); 1047 } 1048 1049 cmdb = ccb->c_cmdbva; 1050 cmd = &cmdb->cmd; 1051 dcdb = &cmdb->dcdb; 1052 1053 cmd->code = IPS_CMD_DCDB; 1054 1055 dcdb->device = (chan << 4) | target; 1056 if (xs->flags & SCSI_DATA_IN) 1057 dcdb->attr |= IPS_DCDB_DATAIN; 1058 if (xs->flags & SCSI_DATA_OUT) 1059 dcdb->attr |= IPS_DCDB_DATAOUT; 1060 1061 /* 1062 * Adjust timeout value to what controller supports. Make sure our 1063 * timeout will be fired after controller gives up. 1064 */ 1065 if (xs->timeout <= 10000) { 1066 dcdb->attr |= IPS_DCDB_TIMO10; 1067 xs->timeout = 11000; 1068 } else if (xs->timeout <= 60000) { 1069 dcdb->attr |= IPS_DCDB_TIMO60; 1070 xs->timeout = 61000; 1071 } else { 1072 dcdb->attr |= IPS_DCDB_TIMO20M; 1073 xs->timeout = 20 * 60000 + 1000; 1074 } 1075 1076 dcdb->attr |= IPS_DCDB_DISCON; 1077 dcdb->datalen = htole16(xs->datalen); 1078 dcdb->cdblen = xs->cmdlen; 1079 dcdb->senselen = MIN(sizeof(xs->sense), sizeof(dcdb->sense)); 1080 memcpy(dcdb->cdb, xs->cmd, xs->cmdlen); 1081 1082 if (ips_load_xs(sc, ccb, xs)) { 1083 DPRINTF(IPS_D_ERR, ("%s: ips_scsi_pt_cmd: ips_load_xs " 1084 "failed\n", sc->sc_dev.dv_xname)); 1085 1086 s = splbio(); 1087 ips_ccb_put(sc, ccb); 1088 splx(s); 1089 xs->error = XS_DRIVER_STUFFUP; 1090 s = splbio(); 1091 scsi_done(xs); 1092 splx(s); 1093 return (COMPLETE); 1094 } 1095 if (cmd->sgcnt > 0) 1096 cmd->code |= IPS_CMD_SG; 1097 dcdb->sgaddr = cmd->sgaddr; 1098 dcdb->sgcnt = cmd->sgcnt; 1099 cmd->sgaddr = htole32(ccb->c_cmdbpa + offsetof(struct ips_cmdb, dcdb)); 1100 cmd->sgcnt = 0; 1101 1102 ccb->c_done = ips_done_pt; 1103 return (ips_start_xs(sc, ccb, xs)); 1104 } 1105 1106 int 1107 ips_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag, 1108 struct proc *p) 1109 { 1110 #if NBIO > 0 1111 return (ips_ioctl(link->adapter_softc, cmd, addr)); 1112 #else 1113 return (ENOTTY); 1114 #endif 1115 } 1116 1117 #if NBIO > 0 1118 int 1119 ips_ioctl(struct device *dev, u_long cmd, caddr_t addr) 1120 { 1121 struct ips_softc *sc = (struct ips_softc *)dev; 1122 1123 DPRINTF(IPS_D_INFO, ("%s: ips_ioctl: cmd %lu\n", 1124 sc->sc_dev.dv_xname, cmd)); 1125 1126 switch (cmd) { 1127 case BIOCINQ: 1128 return (ips_ioctl_inq(sc, (struct bioc_inq *)addr)); 1129 case BIOCVOL: 1130 return (ips_ioctl_vol(sc, (struct bioc_vol *)addr)); 1131 case BIOCDISK: 1132 return (ips_ioctl_disk(sc, (struct bioc_disk *)addr)); 1133 case BIOCSETSTATE: 1134 return (ips_ioctl_setstate(sc, (struct bioc_setstate *)addr)); 1135 default: 1136 return (ENOTTY); 1137 } 1138 } 1139 1140 int 1141 ips_ioctl_inq(struct ips_softc *sc, struct bioc_inq *bi) 1142 { 1143 struct ips_conf *conf = &sc->sc_info->conf; 1144 int i; 1145 1146 strlcpy(bi->bi_dev, sc->sc_dev.dv_xname, sizeof(bi->bi_dev)); 1147 bi->bi_novol = sc->sc_nunits; 1148 for (i = 0, bi->bi_nodisk = 0; i < sc->sc_nunits; i++) 1149 bi->bi_nodisk += conf->ld[i].chunkcnt; 1150 1151 DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_inq: novol %d, nodisk %d\n", 1152 bi->bi_dev, bi->bi_novol, bi->bi_nodisk)); 1153 1154 return (0); 1155 } 1156 1157 int 1158 ips_ioctl_vol(struct ips_softc *sc, struct bioc_vol *bv) 1159 { 1160 struct ips_driveinfo *di = &sc->sc_info->drive; 1161 struct ips_conf *conf = &sc->sc_info->conf; 1162 struct ips_rblstat *rblstat = &sc->sc_info->rblstat; 1163 struct ips_ld *ld; 1164 int vid = bv->bv_volid; 1165 struct device *dv; 1166 int error, rebuild = 0; 1167 u_int32_t total = 0, done = 0; 1168 1169 if (vid >= sc->sc_nunits) 1170 return (EINVAL); 1171 if ((error = ips_getconf(sc, 0))) 1172 return (error); 1173 ld = &conf->ld[vid]; 1174 1175 switch (ld->state) { 1176 case IPS_DS_ONLINE: 1177 bv->bv_status = BIOC_SVONLINE; 1178 break; 1179 case IPS_DS_DEGRADED: 1180 bv->bv_status = BIOC_SVDEGRADED; 1181 rebuild++; 1182 break; 1183 case IPS_DS_OFFLINE: 1184 bv->bv_status = BIOC_SVOFFLINE; 1185 break; 1186 default: 1187 bv->bv_status = BIOC_SVINVALID; 1188 } 1189 1190 if (rebuild && ips_getrblstat(sc, 0) == 0) { 1191 total = letoh32(rblstat->ld[vid].total); 1192 done = total - letoh32(rblstat->ld[vid].remain); 1193 if (total && total > done) { 1194 bv->bv_status = BIOC_SVREBUILD; 1195 bv->bv_percent = 100 * done / total; 1196 } 1197 } 1198 1199 bv->bv_size = (u_quad_t)letoh32(ld->size) * IPS_SECSZ; 1200 bv->bv_level = di->drive[vid].raid; 1201 bv->bv_nodisk = ld->chunkcnt; 1202 1203 /* Associate all unused and spare drives with first volume */ 1204 if (vid == 0) { 1205 struct ips_dev *dev; 1206 int chan, target; 1207 1208 for (chan = 0; chan < IPS_MAXCHANS; chan++) 1209 for (target = 0; target < IPS_MAXTARGETS; target++) { 1210 dev = &conf->dev[chan][target]; 1211 if (dev->state && !(dev->state & 1212 IPS_DVS_MEMBER) && 1213 (dev->params & SID_TYPE) == T_DIRECT) 1214 bv->bv_nodisk++; 1215 } 1216 } 1217 1218 dv = sc->sc_scsibus->sc_link[vid][0]->device_softc; 1219 strlcpy(bv->bv_dev, dv->dv_xname, sizeof(bv->bv_dev)); 1220 strlcpy(bv->bv_vendor, "IBM", sizeof(bv->bv_vendor)); 1221 1222 DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_vol: vid %d, state 0x%02x, " 1223 "total %u, done %u, size %llu, level %d, nodisk %d, dev %s\n", 1224 sc->sc_dev.dv_xname, vid, ld->state, total, done, bv->bv_size, 1225 bv->bv_level, bv->bv_nodisk, bv->bv_dev)); 1226 1227 return (0); 1228 } 1229 1230 int 1231 ips_ioctl_disk(struct ips_softc *sc, struct bioc_disk *bd) 1232 { 1233 struct ips_conf *conf = &sc->sc_info->conf; 1234 struct ips_ld *ld; 1235 struct ips_chunk *chunk; 1236 struct ips_dev *dev; 1237 int vid = bd->bd_volid, did = bd->bd_diskid; 1238 int chan, target, error, i; 1239 1240 if (vid >= sc->sc_nunits) 1241 return (EINVAL); 1242 if ((error = ips_getconf(sc, 0))) 1243 return (error); 1244 ld = &conf->ld[vid]; 1245 1246 if (did >= ld->chunkcnt) { 1247 /* Probably unused or spare drives */ 1248 if (vid != 0) 1249 return (EINVAL); 1250 1251 i = ld->chunkcnt; 1252 for (chan = 0; chan < IPS_MAXCHANS; chan++) 1253 for (target = 0; target < IPS_MAXTARGETS; target++) { 1254 dev = &conf->dev[chan][target]; 1255 if (dev->state && !(dev->state & 1256 IPS_DVS_MEMBER) && 1257 (dev->params & SID_TYPE) == T_DIRECT) 1258 if (i++ == did) 1259 goto out; 1260 } 1261 } else { 1262 chunk = &ld->chunk[did]; 1263 chan = chunk->channel; 1264 target = chunk->target; 1265 } 1266 1267 out: 1268 if (chan >= IPS_MAXCHANS || target >= IPS_MAXTARGETS) 1269 return (EINVAL); 1270 dev = &conf->dev[chan][target]; 1271 1272 bd->bd_channel = chan; 1273 bd->bd_target = target; 1274 bd->bd_lun = 0; 1275 bd->bd_size = (u_quad_t)letoh32(dev->seccnt) * IPS_SECSZ; 1276 1277 bzero(bd->bd_vendor, sizeof(bd->bd_vendor)); 1278 memcpy(bd->bd_vendor, dev->devid, MIN(sizeof(bd->bd_vendor), 1279 sizeof(dev->devid))); 1280 strlcpy(bd->bd_procdev, sc->sc_pt[chan].pt_procdev, 1281 sizeof(bd->bd_procdev)); 1282 1283 if (dev->state & IPS_DVS_READY) { 1284 bd->bd_status = BIOC_SDUNUSED; 1285 if (dev->state & IPS_DVS_MEMBER) 1286 bd->bd_status = BIOC_SDONLINE; 1287 if (dev->state & IPS_DVS_SPARE) 1288 bd->bd_status = BIOC_SDHOTSPARE; 1289 if (dev->state & IPS_DVS_REBUILD) 1290 bd->bd_status = BIOC_SDREBUILD; 1291 } else { 1292 bd->bd_status = BIOC_SDOFFLINE; 1293 } 1294 1295 DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_disk: vid %d, did %d, channel %d, " 1296 "target %d, size %llu, state 0x%02x\n", sc->sc_dev.dv_xname, 1297 vid, did, bd->bd_channel, bd->bd_target, bd->bd_size, dev->state)); 1298 1299 return (0); 1300 } 1301 1302 int 1303 ips_ioctl_setstate(struct ips_softc *sc, struct bioc_setstate *bs) 1304 { 1305 struct ips_conf *conf = &sc->sc_info->conf; 1306 struct ips_dev *dev; 1307 int state, error; 1308 1309 if (bs->bs_channel >= IPS_MAXCHANS || bs->bs_target >= IPS_MAXTARGETS) 1310 return (EINVAL); 1311 if ((error = ips_getconf(sc, 0))) 1312 return (error); 1313 dev = &conf->dev[bs->bs_channel][bs->bs_target]; 1314 state = dev->state; 1315 1316 switch (bs->bs_status) { 1317 case BIOC_SSONLINE: 1318 state |= IPS_DVS_READY; 1319 break; 1320 case BIOC_SSOFFLINE: 1321 state &= ~IPS_DVS_READY; 1322 break; 1323 case BIOC_SSHOTSPARE: 1324 state |= IPS_DVS_SPARE; 1325 break; 1326 case BIOC_SSREBUILD: 1327 return (ips_rebuild(sc, bs->bs_channel, bs->bs_target, 1328 bs->bs_channel, bs->bs_target, 0)); 1329 default: 1330 return (EINVAL); 1331 } 1332 1333 return (ips_setstate(sc, bs->bs_channel, bs->bs_target, state, 0)); 1334 } 1335 #endif /* NBIO > 0 */ 1336 1337 #ifndef SMALL_KERNEL 1338 void 1339 ips_sensors(void *arg) 1340 { 1341 struct ips_softc *sc = arg; 1342 struct ips_conf *conf = &sc->sc_info->conf; 1343 struct ips_ld *ld; 1344 int i; 1345 1346 /* ips_sensors() runs from work queue thus allowed to sleep */ 1347 if (ips_getconf(sc, 0)) { 1348 DPRINTF(IPS_D_ERR, ("%s: ips_sensors: ips_getconf failed\n", 1349 sc->sc_dev.dv_xname)); 1350 1351 for (i = 0; i < sc->sc_nunits; i++) { 1352 sc->sc_sensors[i].value = 0; 1353 sc->sc_sensors[i].status = SENSOR_S_UNKNOWN; 1354 } 1355 return; 1356 } 1357 1358 DPRINTF(IPS_D_INFO, ("%s: ips_sensors:", sc->sc_dev.dv_xname)); 1359 for (i = 0; i < sc->sc_nunits; i++) { 1360 ld = &conf->ld[i]; 1361 DPRINTF(IPS_D_INFO, (" ld%d.state 0x%02x", i, ld->state)); 1362 switch (ld->state) { 1363 case IPS_DS_ONLINE: 1364 sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE; 1365 sc->sc_sensors[i].status = SENSOR_S_OK; 1366 break; 1367 case IPS_DS_DEGRADED: 1368 sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL; 1369 sc->sc_sensors[i].status = SENSOR_S_WARN; 1370 break; 1371 case IPS_DS_OFFLINE: 1372 sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL; 1373 sc->sc_sensors[i].status = SENSOR_S_CRIT; 1374 break; 1375 default: 1376 sc->sc_sensors[i].value = 0; 1377 sc->sc_sensors[i].status = SENSOR_S_UNKNOWN; 1378 } 1379 } 1380 DPRINTF(IPS_D_INFO, ("\n")); 1381 } 1382 #endif /* !SMALL_KERNEL */ 1383 1384 int 1385 ips_load_xs(struct ips_softc *sc, struct ips_ccb *ccb, struct scsi_xfer *xs) 1386 { 1387 struct ips_cmdb *cmdb = ccb->c_cmdbva; 1388 struct ips_cmd *cmd = &cmdb->cmd; 1389 struct ips_sg *sg = cmdb->sg; 1390 int nsegs, i; 1391 1392 if (xs->datalen == 0) 1393 return (0); 1394 1395 /* Map data buffer into DMA segments */ 1396 if (bus_dmamap_load(sc->sc_dmat, ccb->c_dmam, xs->data, xs->datalen, 1397 NULL, (xs->flags & SCSI_NOSLEEP ? BUS_DMA_NOWAIT : 0))) 1398 return (1); 1399 bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,ccb->c_dmam->dm_mapsize, 1400 xs->flags & SCSI_DATA_IN ? BUS_DMASYNC_PREREAD : 1401 BUS_DMASYNC_PREWRITE); 1402 1403 if ((nsegs = ccb->c_dmam->dm_nsegs) > IPS_MAXSGS) 1404 return (1); 1405 1406 if (nsegs > 1) { 1407 cmd->sgcnt = nsegs; 1408 cmd->sgaddr = htole32(ccb->c_cmdbpa + offsetof(struct ips_cmdb, 1409 sg)); 1410 1411 /* Fill in scatter-gather array */ 1412 for (i = 0; i < nsegs; i++) { 1413 sg[i].addr = htole32(ccb->c_dmam->dm_segs[i].ds_addr); 1414 sg[i].size = htole32(ccb->c_dmam->dm_segs[i].ds_len); 1415 } 1416 } else { 1417 cmd->sgcnt = 0; 1418 cmd->sgaddr = htole32(ccb->c_dmam->dm_segs[0].ds_addr); 1419 } 1420 1421 return (0); 1422 } 1423 1424 int 1425 ips_start_xs(struct ips_softc *sc, struct ips_ccb *ccb, struct scsi_xfer *xs) 1426 { 1427 ccb->c_flags = xs->flags; 1428 ccb->c_xfer = xs; 1429 int ispoll = xs->flags & SCSI_POLL; 1430 1431 if (!ispoll) { 1432 timeout_set(&xs->stimeout, ips_timeout, ccb); 1433 timeout_add_msec(&xs->stimeout, xs->timeout); 1434 } 1435 1436 /* 1437 * Return value not used here because ips_cmd() must complete 1438 * scsi_xfer on any failure and SCSI layer will handle possible 1439 * errors. 1440 */ 1441 (void)ips_cmd(sc, ccb); 1442 1443 if (ispoll) 1444 return (COMPLETE); 1445 else 1446 return (SUCCESSFULLY_QUEUED); 1447 } 1448 1449 int 1450 ips_cmd(struct ips_softc *sc, struct ips_ccb *ccb) 1451 { 1452 struct ips_cmd *cmd = ccb->c_cmdbva; 1453 int s, error = 0; 1454 1455 DPRINTF(IPS_D_XFER, ("%s: ips_cmd: id 0x%02x, flags 0x%x, xs %p, " 1456 "code 0x%02x, drive %d, sgcnt %d, lba %d, sgaddr 0x%08x, " 1457 "seccnt %d\n", sc->sc_dev.dv_xname, ccb->c_id, ccb->c_flags, 1458 ccb->c_xfer, cmd->code, cmd->drive, cmd->sgcnt, letoh32(cmd->lba), 1459 letoh32(cmd->sgaddr), letoh16(cmd->seccnt))); 1460 1461 cmd->id = ccb->c_id; 1462 1463 /* Post command to controller and optionally wait for completion */ 1464 s = splbio(); 1465 ips_exec(sc, ccb); 1466 ccb->c_state = IPS_CCB_QUEUED; 1467 if (ccb->c_flags & SCSI_POLL) 1468 error = ips_poll(sc, ccb); 1469 splx(s); 1470 1471 return (error); 1472 } 1473 1474 int 1475 ips_poll(struct ips_softc *sc, struct ips_ccb *ccb) 1476 { 1477 struct timeval tv; 1478 int error, timo; 1479 1480 splassert(IPL_BIO); 1481 1482 if (ccb->c_flags & SCSI_NOSLEEP) { 1483 /* busy-wait */ 1484 DPRINTF(IPS_D_XFER, ("%s: ips_poll: busy-wait\n", 1485 sc->sc_dev.dv_xname)); 1486 1487 for (timo = 10000; timo > 0; timo--) { 1488 delay(100); 1489 ips_intr(sc); 1490 if (ccb->c_state == IPS_CCB_DONE) 1491 break; 1492 } 1493 } else { 1494 /* sleep */ 1495 timo = ccb->c_xfer ? ccb->c_xfer->timeout : IPS_TIMEOUT; 1496 tv.tv_sec = timo / 1000; 1497 tv.tv_usec = (timo % 1000) * 1000; 1498 timo = tvtohz(&tv); 1499 1500 DPRINTF(IPS_D_XFER, ("%s: ips_poll: sleep %d hz\n", 1501 sc->sc_dev.dv_xname, timo)); 1502 tsleep(ccb, PRIBIO + 1, "ipscmd", timo); 1503 } 1504 DPRINTF(IPS_D_XFER, ("%s: ips_poll: state %d\n", sc->sc_dev.dv_xname, 1505 ccb->c_state)); 1506 1507 if (ccb->c_state != IPS_CCB_DONE) 1508 /* 1509 * Command never completed. Fake hardware status byte 1510 * to indicate timeout. 1511 */ 1512 ccb->c_stat = IPS_STAT_TIMO; 1513 1514 ips_done(sc, ccb); 1515 error = ccb->c_error; 1516 ips_ccb_put(sc, ccb); 1517 1518 return (error); 1519 } 1520 1521 void 1522 ips_done(struct ips_softc *sc, struct ips_ccb *ccb) 1523 { 1524 splassert(IPL_BIO); 1525 1526 DPRINTF(IPS_D_XFER, ("%s: ips_done: id 0x%02x, flags 0x%x, xs %p\n", 1527 sc->sc_dev.dv_xname, ccb->c_id, ccb->c_flags, ccb->c_xfer)); 1528 1529 ccb->c_error = ips_error(sc, ccb); 1530 ccb->c_done(sc, ccb); 1531 } 1532 1533 void 1534 ips_done_xs(struct ips_softc *sc, struct ips_ccb *ccb) 1535 { 1536 struct scsi_xfer *xs = ccb->c_xfer; 1537 1538 if (!(xs->flags & SCSI_POLL)) 1539 timeout_del(&xs->stimeout); 1540 1541 if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) { 1542 bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0, 1543 ccb->c_dmam->dm_mapsize, xs->flags & SCSI_DATA_IN ? 1544 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1545 bus_dmamap_unload(sc->sc_dmat, ccb->c_dmam); 1546 } 1547 1548 xs->resid = 0; 1549 xs->error = ips_error_xs(sc, ccb); 1550 xs->flags |= ITSDONE; 1551 scsi_done(xs); 1552 } 1553 1554 void 1555 ips_done_pt(struct ips_softc *sc, struct ips_ccb *ccb) 1556 { 1557 struct scsi_xfer *xs = ccb->c_xfer; 1558 struct ips_cmdb *cmdb = ccb->c_cmdbva; 1559 struct ips_dcdb *dcdb = &cmdb->dcdb; 1560 int done = letoh16(dcdb->datalen); 1561 1562 if (!(xs->flags & SCSI_POLL)) 1563 timeout_del(&xs->stimeout); 1564 1565 if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) { 1566 bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0, 1567 ccb->c_dmam->dm_mapsize, xs->flags & SCSI_DATA_IN ? 1568 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1569 bus_dmamap_unload(sc->sc_dmat, ccb->c_dmam); 1570 } 1571 1572 if (done && done < xs->datalen) 1573 xs->resid = xs->datalen - done; 1574 else 1575 xs->resid = 0; 1576 xs->error = ips_error_xs(sc, ccb); 1577 xs->status = dcdb->status; 1578 1579 if (xs->error == XS_SENSE) 1580 memcpy(&xs->sense, dcdb->sense, MIN(sizeof(xs->sense), 1581 sizeof(dcdb->sense))); 1582 1583 if (xs->cmd->opcode == INQUIRY && xs->error == XS_NOERROR) { 1584 int type = ((struct scsi_inquiry_data *)xs->data)->device & 1585 SID_TYPE; 1586 1587 if (type == T_DIRECT) 1588 /* mask physical drives */ 1589 xs->error = XS_DRIVER_STUFFUP; 1590 } 1591 1592 xs->flags |= ITSDONE; 1593 scsi_done(xs); 1594 } 1595 1596 void 1597 ips_done_mgmt(struct ips_softc *sc, struct ips_ccb *ccb) 1598 { 1599 if (ccb->c_flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) 1600 bus_dmamap_sync(sc->sc_dmat, sc->sc_infom.dm_map, 0, 1601 sc->sc_infom.dm_map->dm_mapsize, 1602 ccb->c_flags & SCSI_DATA_IN ? BUS_DMASYNC_POSTREAD : 1603 BUS_DMASYNC_POSTWRITE); 1604 } 1605 1606 int 1607 ips_error(struct ips_softc *sc, struct ips_ccb *ccb) 1608 { 1609 struct ips_cmdb *cmdb = ccb->c_cmdbva; 1610 struct ips_cmd *cmd = &cmdb->cmd; 1611 struct ips_dcdb *dcdb = &cmdb->dcdb; 1612 struct scsi_xfer *xs = ccb->c_xfer; 1613 u_int8_t gsc = IPS_STAT_GSC(ccb->c_stat); 1614 1615 if (gsc == IPS_STAT_OK) 1616 return (0); 1617 1618 DPRINTF(IPS_D_ERR, ("%s: ips_error: stat 0x%02x, estat 0x%02x, " 1619 "cmd code 0x%02x, drive %d, sgcnt %d, lba %u, seccnt %d", 1620 sc->sc_dev.dv_xname, ccb->c_stat, ccb->c_estat, cmd->code, 1621 cmd->drive, cmd->sgcnt, letoh32(cmd->lba), letoh16(cmd->seccnt))); 1622 if (cmd->code == IPS_CMD_DCDB || cmd->code == IPS_CMD_DCDB_SG) { 1623 int i; 1624 1625 DPRINTF(IPS_D_ERR, (", dcdb device 0x%02x, attr 0x%02x, " 1626 "datalen %d, sgcnt %d, status 0x%02x", 1627 dcdb->device, dcdb->attr, letoh16(dcdb->datalen), 1628 dcdb->sgcnt, dcdb->status)); 1629 1630 DPRINTF(IPS_D_ERR, (", cdb")); 1631 for (i = 0; i < dcdb->cdblen; i++) 1632 DPRINTF(IPS_D_ERR, (" %x", dcdb->cdb[i])); 1633 if (ccb->c_estat == IPS_ESTAT_CKCOND) { 1634 DPRINTF(IPS_D_ERR, (", sense")); 1635 for (i = 0; i < dcdb->senselen; i++) 1636 DPRINTF(IPS_D_ERR, (" %x", dcdb->sense[i])); 1637 } 1638 } 1639 DPRINTF(IPS_D_ERR, ("\n")); 1640 1641 switch (gsc) { 1642 case IPS_STAT_RECOV: 1643 return (0); 1644 case IPS_STAT_INVOP: 1645 case IPS_STAT_INVCMD: 1646 case IPS_STAT_INVPARM: 1647 return (EINVAL); 1648 case IPS_STAT_BUSY: 1649 return (EBUSY); 1650 case IPS_STAT_TIMO: 1651 return (ETIMEDOUT); 1652 case IPS_STAT_PDRVERR: 1653 switch (ccb->c_estat) { 1654 case IPS_ESTAT_SELTIMO: 1655 return (ENODEV); 1656 case IPS_ESTAT_OURUN: 1657 if (xs && letoh16(dcdb->datalen) < xs->datalen) 1658 /* underrun */ 1659 return (0); 1660 break; 1661 case IPS_ESTAT_RECOV: 1662 return (0); 1663 } 1664 break; 1665 } 1666 1667 return (EIO); 1668 } 1669 1670 int 1671 ips_error_xs(struct ips_softc *sc, struct ips_ccb *ccb) 1672 { 1673 struct ips_cmdb *cmdb = ccb->c_cmdbva; 1674 struct ips_dcdb *dcdb = &cmdb->dcdb; 1675 struct scsi_xfer *xs = ccb->c_xfer; 1676 u_int8_t gsc = IPS_STAT_GSC(ccb->c_stat); 1677 1678 /* Map hardware error codes to SCSI ones */ 1679 switch (gsc) { 1680 case IPS_STAT_OK: 1681 case IPS_STAT_RECOV: 1682 return (XS_NOERROR); 1683 case IPS_STAT_BUSY: 1684 return (XS_BUSY); 1685 case IPS_STAT_TIMO: 1686 return (XS_TIMEOUT); 1687 case IPS_STAT_PDRVERR: 1688 switch (ccb->c_estat) { 1689 case IPS_ESTAT_SELTIMO: 1690 return (XS_SELTIMEOUT); 1691 case IPS_ESTAT_OURUN: 1692 if (xs && letoh16(dcdb->datalen) < xs->datalen) 1693 /* underrun */ 1694 return (XS_NOERROR); 1695 break; 1696 case IPS_ESTAT_HOSTRST: 1697 case IPS_ESTAT_DEVRST: 1698 return (XS_RESET); 1699 case IPS_ESTAT_RECOV: 1700 return (XS_NOERROR); 1701 case IPS_ESTAT_CKCOND: 1702 return (XS_SENSE); 1703 } 1704 break; 1705 } 1706 1707 return (XS_DRIVER_STUFFUP); 1708 } 1709 1710 int 1711 ips_intr(void *arg) 1712 { 1713 struct ips_softc *sc = arg; 1714 struct ips_ccb *ccb; 1715 u_int32_t status; 1716 int id; 1717 1718 DPRINTF(IPS_D_XFER, ("%s: ips_intr", sc->sc_dev.dv_xname)); 1719 if (!ips_isintr(sc)) { 1720 DPRINTF(IPS_D_XFER, (": not ours\n")); 1721 return (0); 1722 } 1723 DPRINTF(IPS_D_XFER, ("\n")); 1724 1725 /* Process completed commands */ 1726 while ((status = ips_status(sc)) != 0xffffffff) { 1727 DPRINTF(IPS_D_XFER, ("%s: ips_intr: status 0x%08x\n", 1728 sc->sc_dev.dv_xname, status)); 1729 1730 id = IPS_STAT_ID(status); 1731 if (id >= sc->sc_nccbs) { 1732 DPRINTF(IPS_D_ERR, ("%s: ips_intr: invalid id %d\n", 1733 sc->sc_dev.dv_xname, id)); 1734 continue; 1735 } 1736 1737 ccb = &sc->sc_ccb[id]; 1738 if (ccb->c_state != IPS_CCB_QUEUED) { 1739 DPRINTF(IPS_D_ERR, ("%s: ips_intr: cmd 0x%02x not " 1740 "queued, state %d, status 0x%08x\n", 1741 sc->sc_dev.dv_xname, ccb->c_id, ccb->c_state, 1742 status)); 1743 continue; 1744 } 1745 1746 ccb->c_state = IPS_CCB_DONE; 1747 ccb->c_stat = IPS_STAT_BASIC(status); 1748 ccb->c_estat = IPS_STAT_EXT(status); 1749 1750 if (ccb->c_flags & SCSI_POLL) { 1751 wakeup(ccb); 1752 } else { 1753 ips_done(sc, ccb); 1754 ips_ccb_put(sc, ccb); 1755 } 1756 } 1757 1758 return (1); 1759 } 1760 1761 void 1762 ips_timeout(void *arg) 1763 { 1764 struct ips_ccb *ccb = arg; 1765 struct ips_softc *sc = ccb->c_sc; 1766 struct scsi_xfer *xs = ccb->c_xfer; 1767 int s; 1768 1769 s = splbio(); 1770 if (xs) 1771 sc_print_addr(xs->sc_link); 1772 else 1773 printf("%s: ", sc->sc_dev.dv_xname); 1774 printf("timeout\n"); 1775 1776 /* 1777 * Command never completed. Fake hardware status byte 1778 * to indicate timeout. 1779 * XXX: need to remove command from controller. 1780 */ 1781 ccb->c_stat = IPS_STAT_TIMO; 1782 ips_done(sc, ccb); 1783 ips_ccb_put(sc, ccb); 1784 splx(s); 1785 } 1786 1787 int 1788 ips_getadapterinfo(struct ips_softc *sc, int flags) 1789 { 1790 struct ips_ccb *ccb; 1791 struct ips_cmd *cmd; 1792 int s; 1793 1794 s = splbio(); 1795 ccb = ips_ccb_get(sc); 1796 splx(s); 1797 if (ccb == NULL) 1798 return (1); 1799 1800 ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags; 1801 ccb->c_done = ips_done_mgmt; 1802 1803 cmd = ccb->c_cmdbva; 1804 cmd->code = IPS_CMD_GETADAPTERINFO; 1805 cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info, 1806 adapter)); 1807 1808 return (ips_cmd(sc, ccb)); 1809 } 1810 1811 int 1812 ips_getdriveinfo(struct ips_softc *sc, int flags) 1813 { 1814 struct ips_ccb *ccb; 1815 struct ips_cmd *cmd; 1816 int s; 1817 1818 s = splbio(); 1819 ccb = ips_ccb_get(sc); 1820 splx(s); 1821 if (ccb == NULL) 1822 return (1); 1823 1824 ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags; 1825 ccb->c_done = ips_done_mgmt; 1826 1827 cmd = ccb->c_cmdbva; 1828 cmd->code = IPS_CMD_GETDRIVEINFO; 1829 cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info, 1830 drive)); 1831 1832 return (ips_cmd(sc, ccb)); 1833 } 1834 1835 int 1836 ips_getconf(struct ips_softc *sc, int flags) 1837 { 1838 struct ips_ccb *ccb; 1839 struct ips_cmd *cmd; 1840 int s; 1841 1842 s = splbio(); 1843 ccb = ips_ccb_get(sc); 1844 splx(s); 1845 if (ccb == NULL) 1846 return (1); 1847 1848 ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags; 1849 ccb->c_done = ips_done_mgmt; 1850 1851 cmd = ccb->c_cmdbva; 1852 cmd->code = IPS_CMD_READCONF; 1853 cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info, 1854 conf)); 1855 1856 return (ips_cmd(sc, ccb)); 1857 } 1858 1859 int 1860 ips_getpg5(struct ips_softc *sc, int flags) 1861 { 1862 struct ips_ccb *ccb; 1863 struct ips_cmd *cmd; 1864 int s; 1865 1866 s = splbio(); 1867 ccb = ips_ccb_get(sc); 1868 splx(s); 1869 if (ccb == NULL) 1870 return (1); 1871 1872 ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags; 1873 ccb->c_done = ips_done_mgmt; 1874 1875 cmd = ccb->c_cmdbva; 1876 cmd->code = IPS_CMD_RWNVRAM; 1877 cmd->drive = 5; 1878 cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info, 1879 pg5)); 1880 1881 return (ips_cmd(sc, ccb)); 1882 } 1883 1884 #if NBIO > 0 1885 int 1886 ips_getrblstat(struct ips_softc *sc, int flags) 1887 { 1888 struct ips_ccb *ccb; 1889 struct ips_cmd *cmd; 1890 int s; 1891 1892 s = splbio(); 1893 ccb = ips_ccb_get(sc); 1894 splx(s); 1895 if (ccb == NULL) 1896 return (1); 1897 1898 ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags; 1899 ccb->c_done = ips_done_mgmt; 1900 1901 cmd = ccb->c_cmdbva; 1902 cmd->code = IPS_CMD_REBUILDSTATUS; 1903 cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info, 1904 rblstat)); 1905 1906 return (ips_cmd(sc, ccb)); 1907 } 1908 1909 int 1910 ips_setstate(struct ips_softc *sc, int chan, int target, int state, int flags) 1911 { 1912 struct ips_ccb *ccb; 1913 struct ips_cmd *cmd; 1914 int s; 1915 1916 s = splbio(); 1917 ccb = ips_ccb_get(sc); 1918 splx(s); 1919 if (ccb == NULL) 1920 return (1); 1921 1922 ccb->c_flags = SCSI_POLL | flags; 1923 ccb->c_done = ips_done_mgmt; 1924 1925 cmd = ccb->c_cmdbva; 1926 cmd->code = IPS_CMD_SETSTATE; 1927 cmd->drive = chan; 1928 cmd->sgcnt = target; 1929 cmd->seg4g = state; 1930 1931 return (ips_cmd(sc, ccb)); 1932 } 1933 1934 int 1935 ips_rebuild(struct ips_softc *sc, int chan, int target, int nchan, 1936 int ntarget, int flags) 1937 { 1938 struct ips_ccb *ccb; 1939 struct ips_cmd *cmd; 1940 int s; 1941 1942 s = splbio(); 1943 ccb = ips_ccb_get(sc); 1944 splx(s); 1945 if (ccb == NULL) 1946 return (1); 1947 1948 ccb->c_flags = SCSI_POLL | flags; 1949 ccb->c_done = ips_done_mgmt; 1950 1951 cmd = ccb->c_cmdbva; 1952 cmd->code = IPS_CMD_REBUILD; 1953 cmd->drive = chan; 1954 cmd->sgcnt = target; 1955 cmd->seccnt = htole16(ntarget << 8 | nchan); 1956 1957 return (ips_cmd(sc, ccb)); 1958 } 1959 #endif /* NBIO > 0 */ 1960 1961 void 1962 ips_copperhead_exec(struct ips_softc *sc, struct ips_ccb *ccb) 1963 { 1964 u_int32_t reg; 1965 int timeout; 1966 1967 for (timeout = 100; timeout-- > 0; delay(100)) { 1968 reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_CCC); 1969 if ((reg & IPS_REG_CCC_SEM) == 0) 1970 break; 1971 } 1972 if (timeout < 0) { 1973 printf("%s: semaphore timeout\n", sc->sc_dev.dv_xname); 1974 return; 1975 } 1976 1977 bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_CCSA, ccb->c_cmdbpa); 1978 bus_space_write_2(sc->sc_iot, sc->sc_ioh, IPS_REG_CCC, 1979 IPS_REG_CCC_START); 1980 } 1981 1982 void 1983 ips_copperhead_intren(struct ips_softc *sc) 1984 { 1985 bus_space_write_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS, IPS_REG_HIS_EN); 1986 } 1987 1988 int 1989 ips_copperhead_isintr(struct ips_softc *sc) 1990 { 1991 u_int8_t reg; 1992 1993 reg = bus_space_read_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS); 1994 bus_space_write_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS, reg); 1995 if (reg != 0xff && (reg & IPS_REG_HIS_SCE)) 1996 return (1); 1997 1998 return (0); 1999 } 2000 2001 u_int32_t 2002 ips_copperhead_status(struct ips_softc *sc) 2003 { 2004 u_int32_t sqhead, sqtail, status; 2005 2006 sqhead = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQH); 2007 DPRINTF(IPS_D_XFER, ("%s: sqhead 0x%08x, sqtail 0x%08x\n", 2008 sc->sc_dev.dv_xname, sqhead, sc->sc_sqtail)); 2009 2010 sqtail = sc->sc_sqtail + sizeof(u_int32_t); 2011 if (sqtail == sc->sc_sqm.dm_paddr + IPS_SQSZ) 2012 sqtail = sc->sc_sqm.dm_paddr; 2013 if (sqtail == sqhead) 2014 return (0xffffffff); 2015 2016 sc->sc_sqtail = sqtail; 2017 if (++sc->sc_sqidx == IPS_MAXCMDS) 2018 sc->sc_sqidx = 0; 2019 status = letoh32(sc->sc_sqbuf[sc->sc_sqidx]); 2020 bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQT, sqtail); 2021 2022 return (status); 2023 } 2024 2025 void 2026 ips_morpheus_exec(struct ips_softc *sc, struct ips_ccb *ccb) 2027 { 2028 bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_IQP, ccb->c_cmdbpa); 2029 } 2030 2031 void 2032 ips_morpheus_intren(struct ips_softc *sc) 2033 { 2034 u_int32_t reg; 2035 2036 reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIM); 2037 reg &= ~IPS_REG_OIM_DS; 2038 bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIM, reg); 2039 } 2040 2041 int 2042 ips_morpheus_isintr(struct ips_softc *sc) 2043 { 2044 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIS) & 2045 IPS_REG_OIS_PEND); 2046 } 2047 2048 u_int32_t 2049 ips_morpheus_status(struct ips_softc *sc) 2050 { 2051 u_int32_t reg; 2052 2053 reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OQP); 2054 DPRINTF(IPS_D_XFER, ("%s: status 0x%08x\n", sc->sc_dev.dv_xname, reg)); 2055 2056 return (reg); 2057 } 2058 2059 struct ips_ccb * 2060 ips_ccb_alloc(struct ips_softc *sc, int n) 2061 { 2062 struct ips_ccb *ccb; 2063 int i; 2064 2065 if ((ccb = malloc(n * sizeof(*ccb), M_DEVBUF, 2066 M_NOWAIT | M_ZERO)) == NULL) 2067 return (NULL); 2068 2069 for (i = 0; i < n; i++) { 2070 ccb[i].c_sc = sc; 2071 ccb[i].c_id = i; 2072 ccb[i].c_cmdbva = (char *)sc->sc_cmdbm.dm_vaddr + 2073 i * sizeof(struct ips_cmdb); 2074 ccb[i].c_cmdbpa = sc->sc_cmdbm.dm_paddr + 2075 i * sizeof(struct ips_cmdb); 2076 if (bus_dmamap_create(sc->sc_dmat, IPS_MAXFER, IPS_MAXSGS, 2077 IPS_MAXFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 2078 &ccb[i].c_dmam)) 2079 goto fail; 2080 } 2081 2082 return (ccb); 2083 fail: 2084 for (; i > 0; i--) 2085 bus_dmamap_destroy(sc->sc_dmat, ccb[i - 1].c_dmam); 2086 free(ccb, M_DEVBUF); 2087 return (NULL); 2088 } 2089 2090 void 2091 ips_ccb_free(struct ips_softc *sc, struct ips_ccb *ccb, int n) 2092 { 2093 int i; 2094 2095 for (i = 0; i < n; i++) 2096 bus_dmamap_destroy(sc->sc_dmat, ccb[i - 1].c_dmam); 2097 free(ccb, M_DEVBUF); 2098 } 2099 2100 struct ips_ccb * 2101 ips_ccb_get(struct ips_softc *sc) 2102 { 2103 struct ips_ccb *ccb; 2104 2105 splassert(IPL_BIO); 2106 2107 if ((ccb = TAILQ_FIRST(&sc->sc_ccbq_free)) != NULL) { 2108 TAILQ_REMOVE(&sc->sc_ccbq_free, ccb, c_link); 2109 ccb->c_flags = 0; 2110 ccb->c_xfer = NULL; 2111 bzero(ccb->c_cmdbva, sizeof(struct ips_cmdb)); 2112 } 2113 2114 return (ccb); 2115 } 2116 2117 void 2118 ips_ccb_put(struct ips_softc *sc, struct ips_ccb *ccb) 2119 { 2120 splassert(IPL_BIO); 2121 2122 ccb->c_state = IPS_CCB_FREE; 2123 TAILQ_INSERT_TAIL(&sc->sc_ccbq_free, ccb, c_link); 2124 } 2125 2126 int 2127 ips_dmamem_alloc(struct dmamem *dm, bus_dma_tag_t tag, bus_size_t size) 2128 { 2129 int nsegs; 2130 2131 dm->dm_tag = tag; 2132 dm->dm_size = size; 2133 2134 if (bus_dmamap_create(tag, size, 1, size, 0, 2135 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &dm->dm_map)) 2136 return (1); 2137 if (bus_dmamem_alloc(tag, size, 0, 0, &dm->dm_seg, 1, &nsegs, 2138 BUS_DMA_NOWAIT)) 2139 goto fail1; 2140 if (bus_dmamem_map(tag, &dm->dm_seg, 1, size, (caddr_t *)&dm->dm_vaddr, 2141 BUS_DMA_NOWAIT)) 2142 goto fail2; 2143 if (bus_dmamap_load(tag, dm->dm_map, dm->dm_vaddr, size, NULL, 2144 BUS_DMA_NOWAIT)) 2145 goto fail3; 2146 2147 return (0); 2148 2149 fail3: 2150 bus_dmamem_unmap(tag, dm->dm_vaddr, size); 2151 fail2: 2152 bus_dmamem_free(tag, &dm->dm_seg, 1); 2153 fail1: 2154 bus_dmamap_destroy(tag, dm->dm_map); 2155 return (1); 2156 } 2157 2158 void 2159 ips_dmamem_free(struct dmamem *dm) 2160 { 2161 bus_dmamap_unload(dm->dm_tag, dm->dm_map); 2162 bus_dmamem_unmap(dm->dm_tag, dm->dm_vaddr, dm->dm_size); 2163 bus_dmamem_free(dm->dm_tag, &dm->dm_seg, 1); 2164 bus_dmamap_destroy(dm->dm_tag, dm->dm_map); 2165 } 2166