1 /* $OpenBSD: ips.c,v 1.30 2007/09/17 01:33:33 krw Exp $ */ 2 3 /* 4 * Copyright (c) 2006, 2007 Alexander Yurchenko <grange@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * IBM (Adaptec) ServeRAID controller driver. 21 */ 22 23 #include <sys/param.h> 24 #include <sys/systm.h> 25 #include <sys/buf.h> 26 #include <sys/device.h> 27 #include <sys/kernel.h> 28 #include <sys/malloc.h> 29 #include <sys/timeout.h> 30 #include <sys/queue.h> 31 32 #include <machine/bus.h> 33 34 #include <scsi/scsi_all.h> 35 #include <scsi/scsi_disk.h> 36 #include <scsi/scsiconf.h> 37 38 #include <dev/pci/pcidevs.h> 39 #include <dev/pci/pcireg.h> 40 #include <dev/pci/pcivar.h> 41 42 #define IPS_DEBUG /* XXX: remove when driver becomes stable */ 43 44 /* Debug levels */ 45 #define IPS_D_ERR 0x0001 /* errors */ 46 #define IPS_D_INFO 0x0002 /* information */ 47 #define IPS_D_XFER 0x0004 /* transfers */ 48 49 #ifdef IPS_DEBUG 50 #define DPRINTF(a, b) do { if (ips_debug & (a)) printf b; } while (0) 51 int ips_debug = IPS_D_ERR; 52 #else 53 #define DPRINTF(a, b) 54 #endif 55 56 #define IPS_MAXDRIVES 8 57 #define IPS_MAXCHANS 4 58 #define IPS_MAXTARGETS 15 59 #define IPS_MAXCMDS 128 60 61 #define IPS_MAXFER (64 * 1024) 62 #define IPS_MAXSGS 16 63 #define IPS_MAXCMDSZ (IPS_CMDSZ + IPS_MAXSGS * IPS_SGSZ) 64 65 #define IPS_CMDSZ sizeof(struct ips_cmd) 66 #define IPS_SGSZ sizeof(struct ips_sg) 67 #define IPS_SECSZ 512 68 69 /* Command codes */ 70 #define IPS_CMD_READ 0x02 71 #define IPS_CMD_WRITE 0x03 72 #define IPS_CMD_DCDB 0x04 73 #define IPS_CMD_GETADAPTERINFO 0x05 74 #define IPS_CMD_FLUSH 0x0a 75 #define IPS_CMD_ERRORTABLE 0x17 76 #define IPS_CMD_GETDRIVEINFO 0x19 77 #define IPS_CMD_RESETCHAN 0x1a 78 #define IPS_CMD_DOWNLOAD 0x20 79 #define IPS_CMD_RWBIOSFW 0x22 80 #define IPS_CMD_READCONF 0x38 81 #define IPS_CMD_GETSUBSYS 0x40 82 #define IPS_CMD_CONFIGSYNC 0x58 83 #define IPS_CMD_READ_SG 0x82 84 #define IPS_CMD_WRITE_SG 0x83 85 #define IPS_CMD_DCDB_SG 0x84 86 #define IPS_CMD_EXT_DCDB 0x95 87 #define IPS_CMD_EXT_DCDB_SG 0x96 88 #define IPS_CMD_RWNVRAMPAGE 0xbc 89 #define IPS_CMD_GETVERINFO 0xc6 90 #define IPS_CMD_FFDC 0xd7 91 #define IPS_CMD_SG 0x80 92 93 /* Register definitions */ 94 #define IPS_REG_HIS 0x08 /* host interrupt status */ 95 #define IPS_REG_HIS_SCE 0x01 /* status channel enqueue */ 96 #define IPS_REG_HIS_EN 0x80 /* enable interrupts */ 97 #define IPS_REG_CCSA 0x10 /* command channel system address */ 98 #define IPS_REG_CCC 0x14 /* command channel control */ 99 #define IPS_REG_CCC_SEM 0x0008 /* semaphore */ 100 #define IPS_REG_CCC_START 0x101a /* start command */ 101 #define IPS_REG_OIS 0x30 /* outbound interrupt status */ 102 #define IPS_REG_OIS_PEND 0x0008 /* interrupt is pending */ 103 #define IPS_REG_OIM 0x34 /* outbound interrupt mask */ 104 #define IPS_REG_OIM_DS 0x0008 /* disable interrupts */ 105 #define IPS_REG_IQP 0x40 /* inbound queue port */ 106 #define IPS_REG_OQP 0x44 /* outbound queue port */ 107 108 #define IPS_REG_STAT_ID(x) (((x) >> 8) & 0xff) 109 #define IPS_REG_STAT_BASIC(x) (((x) >> 16) & 0xff) 110 #define IPS_REG_STAT_GSC(x) (((x) >> 16) & 0x0f) 111 #define IPS_REG_STAT_EXT(x) (((x) >> 24) & 0xff) 112 113 /* Command frame */ 114 struct ips_cmd { 115 u_int8_t code; 116 u_int8_t id; 117 u_int8_t drive; 118 u_int8_t sgcnt; 119 u_int32_t lba; 120 u_int32_t sgaddr; 121 u_int16_t seccnt; 122 u_int8_t seg4g; 123 u_int8_t esg; 124 u_int32_t ccsar; 125 u_int32_t cccr; 126 }; 127 128 /* Scatter-gather array element */ 129 struct ips_sg { 130 u_int32_t addr; 131 u_int32_t size; 132 }; 133 134 /* Data frames */ 135 struct ips_adapterinfo { 136 u_int8_t drivecnt; 137 u_int8_t miscflag; 138 u_int8_t sltflag; 139 u_int8_t bstflag; 140 u_int8_t pwrchgcnt; 141 u_int8_t wrongaddrcnt; 142 u_int8_t unidentcnt; 143 u_int8_t nvramdevchgcnt; 144 u_int8_t codeblkver[8]; 145 u_int8_t bootblkver[8]; 146 u_int32_t drivesize[IPS_MAXDRIVES]; 147 u_int8_t cmdcnt; 148 u_int8_t maxphysdevs; 149 u_int16_t flashrepgmcnt; 150 u_int8_t defunctdiskcnt; 151 u_int8_t rebuildflag; 152 u_int8_t offdrivecnt; 153 u_int8_t critdrivecnt; 154 u_int16_t confupdcnt; 155 u_int8_t blkflag; 156 u_int8_t __reserved; 157 u_int16_t deaddisk[IPS_MAXCHANS * (IPS_MAXTARGETS + 1)]; 158 }; 159 160 struct ips_driveinfo { 161 u_int8_t drivecnt; 162 u_int8_t __reserved[3]; 163 struct ips_drive { 164 u_int8_t id; 165 u_int8_t __reserved; 166 u_int8_t raid; 167 u_int8_t state; 168 u_int32_t seccnt; 169 } drive[IPS_MAXDRIVES]; 170 }; 171 172 /* Command control block */ 173 struct ips_ccb { 174 int c_id; /* command id */ 175 int c_flags; /* flags */ 176 #define IPS_CCB_READ 0x0001 177 #define IPS_CCB_WRITE 0x0002 178 #define IPS_CCB_POLL 0x0004 179 #define IPS_CCB_RUN 0x0008 180 181 void * c_cmdva; /* command frame virt addr */ 182 paddr_t c_cmdpa; /* command frame phys addr */ 183 bus_dmamap_t c_dmam; /* data buffer DMA map */ 184 struct scsi_xfer * c_xfer; /* corresponding SCSI xfer */ 185 int c_stat; /* status word copy */ 186 int c_estat; /* ext status word copy */ 187 188 TAILQ_ENTRY(ips_ccb) c_link; /* queue link */ 189 }; 190 191 /* CCB queue */ 192 TAILQ_HEAD(ips_ccbq, ips_ccb); 193 194 /* DMA-able chunk of memory */ 195 struct dmamem { 196 bus_dma_tag_t dm_tag; 197 bus_dmamap_t dm_map; 198 bus_dma_segment_t dm_seg; 199 bus_size_t dm_size; 200 void * dm_vaddr; 201 #define dm_paddr dm_seg.ds_addr 202 }; 203 204 struct ips_softc { 205 struct device sc_dev; 206 207 struct scsi_link sc_scsi_link; 208 209 bus_space_tag_t sc_iot; 210 bus_space_handle_t sc_ioh; 211 bus_dma_tag_t sc_dmat; 212 213 const struct ips_chipset *sc_chip; 214 215 struct ips_driveinfo sc_di; 216 int sc_nunits; 217 218 struct dmamem sc_cmdm; 219 220 struct ips_ccb * sc_ccb; 221 int sc_nccbs; 222 struct ips_ccbq sc_ccbq_free; 223 struct ips_ccbq sc_ccbq_run; 224 }; 225 226 int ips_match(struct device *, void *, void *); 227 void ips_attach(struct device *, struct device *, void *); 228 229 int ips_scsi_cmd(struct scsi_xfer *); 230 231 int ips_cmd(struct ips_softc *, int, int, u_int32_t, void *, size_t, int, 232 struct scsi_xfer *); 233 int ips_poll(struct ips_softc *, struct ips_ccb *); 234 void ips_done(struct ips_softc *, struct ips_ccb *); 235 int ips_intr(void *); 236 237 int ips_getadapterinfo(struct ips_softc *, struct ips_adapterinfo *); 238 int ips_getdriveinfo(struct ips_softc *, struct ips_driveinfo *); 239 int ips_flush(struct ips_softc *); 240 241 void ips_copperhead_exec(struct ips_softc *, struct ips_ccb *); 242 void ips_copperhead_init(struct ips_softc *); 243 void ips_copperhead_intren(struct ips_softc *); 244 int ips_copperhead_isintr(struct ips_softc *); 245 int ips_copperhead_reset(struct ips_softc *); 246 u_int32_t ips_copperhead_status(struct ips_softc *); 247 248 void ips_morpheus_exec(struct ips_softc *, struct ips_ccb *); 249 void ips_morpheus_init(struct ips_softc *); 250 void ips_morpheus_intren(struct ips_softc *); 251 int ips_morpheus_isintr(struct ips_softc *); 252 int ips_morpheus_reset(struct ips_softc *); 253 u_int32_t ips_morpheus_status(struct ips_softc *); 254 255 struct ips_ccb *ips_ccb_alloc(struct ips_softc *, int); 256 void ips_ccb_free(struct ips_softc *, struct ips_ccb *, int); 257 struct ips_ccb *ips_ccb_get(struct ips_softc *); 258 void ips_ccb_put(struct ips_softc *, struct ips_ccb *); 259 260 int ips_dmamem_alloc(struct dmamem *, bus_dma_tag_t, bus_size_t); 261 void ips_dmamem_free(struct dmamem *); 262 263 struct cfattach ips_ca = { 264 sizeof(struct ips_softc), 265 ips_match, 266 ips_attach 267 }; 268 269 struct cfdriver ips_cd = { 270 NULL, "ips", DV_DULL 271 }; 272 273 static struct scsi_adapter ips_scsi_adapter = { 274 ips_scsi_cmd, 275 minphys, 276 NULL, 277 NULL, 278 NULL 279 }; 280 281 static struct scsi_device ips_scsi_device = { 282 NULL, 283 NULL, 284 NULL, 285 NULL 286 }; 287 288 static const struct pci_matchid ips_ids[] = { 289 { PCI_VENDOR_IBM, PCI_PRODUCT_IBM_SERVERAID }, 290 { PCI_VENDOR_IBM, PCI_PRODUCT_IBM_SERVERAID2 }, 291 { PCI_VENDOR_ADP2, PCI_PRODUCT_ADP2_SERVERAID } 292 }; 293 294 static const struct ips_chipset { 295 const char * ic_name; 296 int ic_bar; 297 298 void (*ic_exec)(struct ips_softc *, struct ips_ccb *); 299 void (*ic_init)(struct ips_softc *); 300 void (*ic_intren)(struct ips_softc *); 301 int (*ic_isintr)(struct ips_softc *); 302 int (*ic_reset)(struct ips_softc *); 303 u_int32_t (*ic_status)(struct ips_softc *); 304 } ips_chips[] = { 305 { 306 "Copperhead", 307 0x14, 308 ips_copperhead_exec, 309 ips_copperhead_init, 310 ips_copperhead_intren, 311 ips_copperhead_isintr, 312 ips_copperhead_reset, 313 ips_copperhead_status 314 }, 315 { 316 "Morpheus", 317 0x10, 318 ips_morpheus_exec, 319 ips_morpheus_init, 320 ips_morpheus_intren, 321 ips_morpheus_isintr, 322 ips_morpheus_reset, 323 ips_morpheus_status 324 } 325 }; 326 327 enum { 328 IPS_CHIP_COPPERHEAD = 0, 329 IPS_CHIP_MORPHEUS 330 }; 331 332 #define ips_exec(s, c) (s)->sc_chip->ic_exec((s), (c)) 333 #define ips_init(s) (s)->sc_chip->ic_init((s)) 334 #define ips_intren(s) (s)->sc_chip->ic_intren((s)) 335 #define ips_isintr(s) (s)->sc_chip->ic_isintr((s)) 336 #define ips_reset(s) (s)->sc_chip->ic_reset((s)) 337 #define ips_status(s) (s)->sc_chip->ic_status((s)) 338 339 int 340 ips_match(struct device *parent, void *match, void *aux) 341 { 342 return (pci_matchbyid(aux, ips_ids, 343 sizeof(ips_ids) / sizeof(ips_ids[0]))); 344 } 345 346 void 347 ips_attach(struct device *parent, struct device *self, void *aux) 348 { 349 struct ips_softc *sc = (struct ips_softc *)self; 350 struct pci_attach_args *pa = aux; 351 struct ips_ccb ccb0; 352 struct scsibus_attach_args saa; 353 struct ips_adapterinfo ai; 354 pcireg_t maptype; 355 bus_size_t iosize; 356 pci_intr_handle_t ih; 357 const char *intrstr; 358 int i; 359 360 sc->sc_dmat = pa->pa_dmat; 361 362 /* Identify chipset */ 363 switch (PCI_PRODUCT(pa->pa_id)) { 364 case PCI_PRODUCT_IBM_SERVERAID: 365 sc->sc_chip = &ips_chips[IPS_CHIP_COPPERHEAD]; 366 break; 367 case PCI_PRODUCT_IBM_SERVERAID2: 368 case PCI_PRODUCT_ADP2_SERVERAID: 369 sc->sc_chip = &ips_chips[IPS_CHIP_MORPHEUS]; 370 break; 371 default: 372 printf(": unsupported chipset\n"); 373 return; 374 } 375 376 /* Map registers */ 377 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_chip->ic_bar); 378 if (pci_mapreg_map(pa, sc->sc_chip->ic_bar, maptype, 0, &sc->sc_iot, 379 &sc->sc_ioh, NULL, &iosize, 0)) { 380 printf(": can't map registers\n"); 381 return; 382 } 383 384 /* Initialize hardware */ 385 ips_init(sc); 386 387 /* Allocate command buffer */ 388 if (ips_dmamem_alloc(&sc->sc_cmdm, sc->sc_dmat, 389 IPS_MAXCMDS * IPS_MAXCMDSZ)) { 390 printf(": can't allocate command buffer\n"); 391 goto fail1; 392 } 393 394 /* Bootstrap CCB queue */ 395 sc->sc_nccbs = 1; 396 sc->sc_ccb = &ccb0; 397 bzero(&ccb0, sizeof(ccb0)); 398 ccb0.c_cmdva = sc->sc_cmdm.dm_vaddr; 399 ccb0.c_cmdpa = sc->sc_cmdm.dm_paddr; 400 if (bus_dmamap_create(sc->sc_dmat, IPS_MAXFER, IPS_MAXSGS, 401 IPS_MAXFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 402 &ccb0.c_dmam)) { 403 printf(": can't bootstrap CCB queue\n"); 404 goto fail2; 405 } 406 TAILQ_INIT(&sc->sc_ccbq_free); 407 TAILQ_INIT(&sc->sc_ccbq_run); 408 TAILQ_INSERT_TAIL(&sc->sc_ccbq_free, &ccb0, c_link); 409 410 /* Get adapter info */ 411 if (ips_getadapterinfo(sc, &ai)) { 412 printf(": can't get adapter info\n"); 413 bus_dmamap_destroy(sc->sc_dmat, ccb0.c_dmam); 414 goto fail2; 415 } 416 417 /* Get logical drives info */ 418 if (ips_getdriveinfo(sc, &sc->sc_di)) { 419 printf(": can't get logical drives info\n"); 420 bus_dmamap_destroy(sc->sc_dmat, ccb0.c_dmam); 421 goto fail2; 422 } 423 sc->sc_nunits = sc->sc_di.drivecnt; 424 425 bus_dmamap_destroy(sc->sc_dmat, ccb0.c_dmam); 426 427 /* Initialize CCB queue */ 428 sc->sc_nccbs = ai.cmdcnt; 429 if ((sc->sc_ccb = ips_ccb_alloc(sc, sc->sc_nccbs)) == NULL) { 430 printf(": can't allocate CCB queue\n"); 431 goto fail2; 432 } 433 TAILQ_INIT(&sc->sc_ccbq_free); 434 TAILQ_INIT(&sc->sc_ccbq_run); 435 for (i = 0; i < sc->sc_nccbs; i++) 436 TAILQ_INSERT_TAIL(&sc->sc_ccbq_free, 437 &sc->sc_ccb[i], c_link); 438 439 /* Install interrupt handler */ 440 if (pci_intr_map(pa, &ih)) { 441 printf(": can't map interrupt\n"); 442 goto fail3; 443 } 444 intrstr = pci_intr_string(pa->pa_pc, ih); 445 if (pci_intr_establish(pa->pa_pc, ih, IPL_BIO, ips_intr, sc, 446 sc->sc_dev.dv_xname) == NULL) { 447 printf(": can't establish interrupt"); 448 if (intrstr != NULL) 449 printf(" at %s", intrstr); 450 printf("\n"); 451 goto fail3; 452 } 453 printf(": %s\n", intrstr); 454 455 /* Display adapter info */ 456 printf("%s", sc->sc_dev.dv_xname); 457 printf(": %s", sc->sc_chip->ic_name); 458 printf(", firmware %c%c%c%c%c%c%c", 459 ai.codeblkver[0], ai.codeblkver[1], ai.codeblkver[2], 460 ai.codeblkver[3], ai.codeblkver[4], ai.codeblkver[5], 461 ai.codeblkver[6]); 462 printf(", bootblock %c%c%c%c%c%c%c", 463 ai.bootblkver[0], ai.bootblkver[1], ai.bootblkver[2], 464 ai.bootblkver[3], ai.bootblkver[4], ai.bootblkver[5], 465 ai.bootblkver[6]); 466 printf(", %d CCBs, %d units", sc->sc_nccbs, sc->sc_nunits); 467 printf("\n"); 468 469 /* Attach SCSI bus */ 470 if (sc->sc_nunits > 0) 471 sc->sc_scsi_link.openings = sc->sc_nccbs / sc->sc_nunits; 472 sc->sc_scsi_link.adapter_target = sc->sc_nunits; 473 sc->sc_scsi_link.adapter_buswidth = sc->sc_nunits; 474 sc->sc_scsi_link.device = &ips_scsi_device; 475 sc->sc_scsi_link.adapter = &ips_scsi_adapter; 476 sc->sc_scsi_link.adapter_softc = sc; 477 478 bzero(&saa, sizeof(saa)); 479 saa.saa_sc_link = &sc->sc_scsi_link; 480 config_found(self, &saa, scsiprint); 481 482 /* Enable interrupts */ 483 ips_intren(sc); 484 485 return; 486 fail3: 487 ips_ccb_free(sc, sc->sc_ccb, sc->sc_nccbs); 488 fail2: 489 ips_dmamem_free(&sc->sc_cmdm); 490 fail1: 491 bus_space_unmap(sc->sc_iot, sc->sc_ioh, iosize); 492 } 493 494 int 495 ips_scsi_cmd(struct scsi_xfer *xs) 496 { 497 struct scsi_link *link = xs->sc_link; 498 struct ips_softc *sc = link->adapter_softc; 499 struct ips_drive *drive; 500 struct scsi_inquiry_data *id; 501 struct scsi_read_cap_data *rcd; 502 struct scsi_sense_data *sd; 503 struct scsi_rw *rw; 504 struct scsi_rw_big *rwb; 505 int target = link->target; 506 u_int32_t blkno, blkcnt; 507 int cmd, error, flags, s; 508 509 if (target >= sc->sc_nunits || link->lun != 0) { 510 DPRINTF(IPS_D_INFO, ("%s: invalid scsi command, " 511 "target %d, lun %d\n", sc->sc_dev.dv_xname, 512 target, link->lun)); 513 xs->error = XS_DRIVER_STUFFUP; 514 s = splbio(); 515 scsi_done(xs); 516 splx(s); 517 return (COMPLETE); 518 } 519 520 s = splbio(); 521 drive = &sc->sc_di.drive[target]; 522 xs->error = XS_NOERROR; 523 524 /* Fake SCSI commands */ 525 switch (xs->cmd->opcode) { 526 case READ_BIG: 527 case READ_COMMAND: 528 case WRITE_BIG: 529 case WRITE_COMMAND: 530 if (xs->cmdlen == sizeof(struct scsi_rw)) { 531 rw = (void *)xs->cmd; 532 blkno = _3btol(rw->addr) & 533 (SRW_TOPADDR << 16 | 0xffff); 534 blkcnt = rw->length ? rw->length : 0x100; 535 } else { 536 rwb = (void *)xs->cmd; 537 blkno = _4btol(rwb->addr); 538 blkcnt = _2btol(rwb->length); 539 } 540 541 if (blkno >= letoh32(drive->seccnt) || blkno + blkcnt > 542 letoh32(drive->seccnt)) { 543 DPRINTF(IPS_D_ERR, ("%s: invalid scsi command, " 544 "blkno %u, blkcnt %u\n", sc->sc_dev.dv_xname, 545 blkno, blkcnt)); 546 xs->error = XS_DRIVER_STUFFUP; 547 scsi_done(xs); 548 break; 549 } 550 551 if (xs->flags & SCSI_DATA_IN) { 552 cmd = IPS_CMD_READ; 553 flags = IPS_CCB_READ; 554 } else { 555 cmd = IPS_CMD_WRITE; 556 flags = IPS_CCB_WRITE; 557 } 558 if (xs->flags & SCSI_POLL) 559 flags |= IPS_CCB_POLL; 560 561 if ((error = ips_cmd(sc, cmd, target, blkno, xs->data, 562 blkcnt * IPS_SECSZ, flags, xs))) { 563 if (error == ENOMEM) { 564 splx(s); 565 return (NO_CCB); 566 } else if (flags & IPS_CCB_POLL) { 567 splx(s); 568 return (TRY_AGAIN_LATER); 569 } else { 570 xs->error = XS_DRIVER_STUFFUP; 571 scsi_done(xs); 572 break; 573 } 574 } 575 576 splx(s); 577 if (flags & IPS_CCB_POLL) 578 return (COMPLETE); 579 else 580 return (SUCCESSFULLY_QUEUED); 581 case INQUIRY: 582 id = (void *)xs->data; 583 bzero(id, sizeof(*id)); 584 id->device = T_DIRECT; 585 id->version = 2; 586 id->response_format = 2; 587 id->additional_length = 32; 588 strlcpy(id->vendor, "IBM ", sizeof(id->vendor)); 589 snprintf(id->product, sizeof(id->product), 590 "ServeRAID RAID%d #%02d", drive->raid, target); 591 strlcpy(id->revision, " ", sizeof(id->revision)); 592 break; 593 case READ_CAPACITY: 594 rcd = (void *)xs->data; 595 bzero(rcd, sizeof(*rcd)); 596 _lto4b(letoh32(drive->seccnt) - 1, rcd->addr); 597 _lto4b(IPS_SECSZ, rcd->length); 598 break; 599 case REQUEST_SENSE: 600 sd = (void *)xs->data; 601 bzero(sd, sizeof(*sd)); 602 sd->error_code = SSD_ERRCODE_CURRENT; 603 sd->flags = SKEY_NO_SENSE; 604 break; 605 case SYNCHRONIZE_CACHE: 606 if (ips_flush(sc)) 607 xs->error = XS_DRIVER_STUFFUP; 608 break; 609 case PREVENT_ALLOW: 610 case START_STOP: 611 case TEST_UNIT_READY: 612 break; 613 default: 614 DPRINTF(IPS_D_INFO, ("%s: unsupported scsi command 0x%02x\n", 615 sc->sc_dev.dv_xname, xs->cmd->opcode)); 616 xs->error = XS_DRIVER_STUFFUP; 617 } 618 scsi_done(xs); 619 splx(s); 620 621 return (COMPLETE); 622 } 623 624 int 625 ips_cmd(struct ips_softc *sc, int code, int drive, u_int32_t lba, void *data, 626 size_t size, int flags, struct scsi_xfer *xs) 627 { 628 struct ips_cmd *cmd; 629 struct ips_sg *sg; 630 struct ips_ccb *ccb; 631 int nsegs, i, error = 0; 632 633 DPRINTF(IPS_D_XFER, ("%s: cmd code 0x%02x, drive %d, lba %u, " 634 "size %lu, flags 0x%02x\n", sc->sc_dev.dv_xname, code, drive, lba, 635 (u_long)size, flags)); 636 637 /* Grab free CCB */ 638 if ((ccb = ips_ccb_get(sc)) == NULL) { 639 DPRINTF(IPS_D_ERR, ("%s: no free CCB\n", sc->sc_dev.dv_xname)); 640 return (ENOMEM); 641 } 642 643 ccb->c_flags = flags; 644 ccb->c_xfer = xs; 645 646 /* Fill in command frame */ 647 cmd = ccb->c_cmdva; 648 cmd->code = code; 649 cmd->id = ccb->c_id; 650 cmd->drive = drive; 651 cmd->lba = htole32(lba); 652 cmd->seccnt = htole16(howmany(size, IPS_SECSZ)); 653 654 if (size > 0) { 655 /* Map data buffer into DMA segments */ 656 if (bus_dmamap_load(sc->sc_dmat, ccb->c_dmam, data, size, 657 NULL, BUS_DMA_NOWAIT)) { 658 printf("%s: can't load DMA map\n", 659 sc->sc_dev.dv_xname); 660 return (1); /* XXX: return code */ 661 } 662 bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0, 663 ccb->c_dmam->dm_mapsize, 664 flags & IPS_CCB_READ ? BUS_DMASYNC_PREREAD : 665 BUS_DMASYNC_PREWRITE); 666 667 if ((nsegs = ccb->c_dmam->dm_nsegs) > IPS_MAXSGS) { 668 printf("%s: too many DMA segments\n", 669 sc->sc_dev.dv_xname); 670 return (1); /* XXX: return code */ 671 } 672 673 if (nsegs > 1) { 674 cmd->code |= IPS_CMD_SG; 675 cmd->sgcnt = nsegs; 676 cmd->sgaddr = htole32(ccb->c_cmdpa + IPS_CMDSZ); 677 678 /* Fill in scatter-gather array */ 679 sg = (void *)(cmd + 1); 680 for (i = 0; i < nsegs; i++) { 681 sg[i].addr = 682 htole32(ccb->c_dmam->dm_segs[i].ds_addr); 683 sg[i].size = 684 htole32(ccb->c_dmam->dm_segs[i].ds_len); 685 } 686 } else { 687 cmd->sgcnt = 0; 688 cmd->sgaddr = htole32(ccb->c_dmam->dm_segs[0].ds_addr); 689 } 690 } 691 692 /* Pass command to hardware */ 693 DPRINTF(IPS_D_XFER, ("%s: run command 0x%02x\n", sc->sc_dev.dv_xname, 694 ccb->c_id)); 695 ccb->c_flags |= IPS_CCB_RUN; 696 TAILQ_INSERT_TAIL(&sc->sc_ccbq_run, ccb, c_link); 697 ips_exec(sc, ccb); 698 699 if (flags & IPS_CCB_POLL) 700 /* Wait for command to complete */ 701 error = ips_poll(sc, ccb); 702 703 return (error); 704 } 705 706 int 707 ips_poll(struct ips_softc *sc, struct ips_ccb *c) 708 { 709 struct ips_ccb *ccb = NULL; 710 u_int32_t status; 711 int id, timeout; 712 713 while (ccb != c) { 714 for (timeout = 100; timeout-- > 0; delay(100)) { 715 if ((status = ips_status(sc)) == 0xffffffff) 716 continue; 717 id = IPS_REG_STAT_ID(status); 718 if (id >= sc->sc_nccbs) { 719 DPRINTF(IPS_D_ERR, ("%s: invalid command " 720 "0x%02x\n", sc->sc_dev.dv_xname, id)); 721 continue; 722 } 723 break; 724 } 725 if (timeout < 0) { 726 printf("%s: poll timeout\n", sc->sc_dev.dv_xname); 727 return (EBUSY); 728 } 729 ccb = &sc->sc_ccb[id]; 730 ccb->c_stat = IPS_REG_STAT_GSC(status); 731 ccb->c_estat = IPS_REG_STAT_EXT(status); 732 ips_done(sc, ccb); 733 } 734 735 return (0); 736 } 737 738 void 739 ips_done(struct ips_softc *sc, struct ips_ccb *ccb) 740 { 741 struct scsi_xfer *xs = ccb->c_xfer; 742 int flags = ccb->c_flags; 743 int error = 0; 744 745 if ((flags & IPS_CCB_RUN) == 0) { 746 printf("%s: command 0x%02x not run\n", sc->sc_dev.dv_xname, 747 ccb->c_id); 748 if (xs != NULL) { 749 xs->error = XS_DRIVER_STUFFUP; 750 scsi_done(xs); 751 } 752 return; 753 } 754 755 if (flags & (IPS_CCB_READ | IPS_CCB_WRITE)) { 756 bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0, 757 ccb->c_dmam->dm_mapsize, flags & IPS_CCB_READ ? 758 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 759 bus_dmamap_unload(sc->sc_dmat, ccb->c_dmam); 760 } 761 762 if (ccb->c_stat) { 763 printf("%s: ", sc->sc_dev.dv_xname); 764 if (ccb->c_stat == 1) { 765 printf("recovered error\n"); 766 } else { 767 printf("error\n"); 768 error = 1; 769 } 770 } 771 772 /* Release CCB */ 773 TAILQ_REMOVE(&sc->sc_ccbq_run, ccb, c_link); 774 ips_ccb_put(sc, ccb); 775 776 if (xs != NULL) { 777 if (error) 778 xs->error = XS_DRIVER_STUFFUP; 779 else 780 xs->resid = 0; 781 xs->flags |= ITSDONE; 782 scsi_done(xs); 783 } 784 } 785 786 int 787 ips_intr(void *arg) 788 { 789 struct ips_softc *sc = arg; 790 struct ips_ccb *ccb; 791 u_int32_t status; 792 int id; 793 794 if (!ips_isintr(sc)) 795 return (0); 796 797 /* Process completed commands */ 798 while ((status = ips_status(sc)) != 0xffffffff) { 799 DPRINTF(IPS_D_XFER, ("%s: intr status 0x%08x\n", 800 sc->sc_dev.dv_xname, status)); 801 802 id = IPS_REG_STAT_ID(status); 803 if (id >= sc->sc_nccbs) { 804 DPRINTF(IPS_D_ERR, ("%s: invalid command %d\n", 805 sc->sc_dev.dv_xname, id)); 806 continue; 807 } 808 ccb = &sc->sc_ccb[id]; 809 ccb->c_stat = IPS_REG_STAT_GSC(status); 810 ccb->c_estat = IPS_REG_STAT_EXT(status); 811 ips_done(sc, ccb); 812 } 813 814 return (1); 815 } 816 817 int 818 ips_getadapterinfo(struct ips_softc *sc, struct ips_adapterinfo *ai) 819 { 820 return (ips_cmd(sc, IPS_CMD_GETADAPTERINFO, 0, 0, ai, sizeof(*ai), 821 IPS_CCB_READ | IPS_CCB_POLL, NULL)); 822 } 823 824 int 825 ips_getdriveinfo(struct ips_softc *sc, struct ips_driveinfo *di) 826 { 827 return (ips_cmd(sc, IPS_CMD_GETDRIVEINFO, 0, 0, di, sizeof(*di), 828 IPS_CCB_READ | IPS_CCB_POLL, NULL)); 829 } 830 831 int 832 ips_flush(struct ips_softc *sc) 833 { 834 return (ips_cmd(sc, IPS_CMD_FLUSH, 0, 0, NULL, 0, IPS_CCB_POLL, NULL)); 835 } 836 837 void 838 ips_copperhead_exec(struct ips_softc *sc, struct ips_ccb *ccb) 839 { 840 u_int32_t reg; 841 int timeout; 842 843 for (timeout = 100; timeout-- > 0; delay(100)) { 844 reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_CCC); 845 if ((reg & IPS_REG_CCC_SEM) == 0) 846 break; 847 } 848 if (timeout < 0) { 849 printf("%s: semaphore timeout\n", sc->sc_dev.dv_xname); 850 return; 851 } 852 853 bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_CCSA, ccb->c_cmdpa); 854 bus_space_write_2(sc->sc_iot, sc->sc_ioh, IPS_REG_CCC, 855 IPS_REG_CCC_START); 856 } 857 858 void 859 ips_copperhead_init(struct ips_softc *sc) 860 { 861 /* XXX: not implemented */ 862 } 863 864 void 865 ips_copperhead_intren(struct ips_softc *sc) 866 { 867 bus_space_write_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS, IPS_REG_HIS_EN); 868 } 869 870 int 871 ips_copperhead_isintr(struct ips_softc *sc) 872 { 873 u_int8_t reg; 874 875 reg = bus_space_read_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS); 876 bus_space_write_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS, reg); 877 if (reg != 0xff && (reg & IPS_REG_HIS_SCE)) 878 return (1); 879 880 return (0); 881 } 882 883 int 884 ips_copperhead_reset(struct ips_softc *sc) 885 { 886 /* XXX: not implemented */ 887 return (0); 888 } 889 890 u_int32_t 891 ips_copperhead_status(struct ips_softc *sc) 892 { 893 /* XXX: not implemented */ 894 return (0); 895 } 896 897 void 898 ips_morpheus_exec(struct ips_softc *sc, struct ips_ccb *ccb) 899 { 900 bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_IQP, ccb->c_cmdpa); 901 } 902 903 void 904 ips_morpheus_init(struct ips_softc *sc) 905 { 906 /* XXX: not implemented */ 907 } 908 909 void 910 ips_morpheus_intren(struct ips_softc *sc) 911 { 912 u_int32_t reg; 913 914 reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIM); 915 reg &= ~IPS_REG_OIM_DS; 916 bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIM, reg); 917 } 918 919 int 920 ips_morpheus_isintr(struct ips_softc *sc) 921 { 922 u_int32_t reg; 923 924 reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIS); 925 DPRINTF(IPS_D_XFER, ("%s: isintr 0x%08x\n", sc->sc_dev.dv_xname, reg)); 926 927 return (reg & IPS_REG_OIS_PEND); 928 } 929 930 int 931 ips_morpheus_reset(struct ips_softc *sc) 932 { 933 /* XXX: not implemented */ 934 return (0); 935 } 936 937 u_int32_t 938 ips_morpheus_status(struct ips_softc *sc) 939 { 940 u_int32_t reg; 941 942 reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OQP); 943 DPRINTF(IPS_D_XFER, ("%s: status 0x%08x\n", sc->sc_dev.dv_xname, reg)); 944 945 return (reg); 946 } 947 948 struct ips_ccb * 949 ips_ccb_alloc(struct ips_softc *sc, int n) 950 { 951 struct ips_ccb *ccb; 952 int i; 953 954 if ((ccb = malloc(n * sizeof(*ccb), M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL) 955 return (NULL); 956 957 for (i = 0; i < n; i++) { 958 ccb[i].c_id = i; 959 ccb[i].c_cmdva = (char *)sc->sc_cmdm.dm_vaddr + 960 i * IPS_MAXCMDSZ; 961 ccb[i].c_cmdpa = sc->sc_cmdm.dm_paddr + i * IPS_MAXCMDSZ; 962 if (bus_dmamap_create(sc->sc_dmat, IPS_MAXFER, IPS_MAXSGS, 963 IPS_MAXFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 964 &ccb[i].c_dmam)) 965 goto fail; 966 } 967 968 return (ccb); 969 fail: 970 for (; i > 0; i--) 971 bus_dmamap_destroy(sc->sc_dmat, ccb[i - 1].c_dmam); 972 free(ccb, M_DEVBUF); 973 return (NULL); 974 } 975 976 void 977 ips_ccb_free(struct ips_softc *sc, struct ips_ccb *ccb, int n) 978 { 979 int i; 980 981 for (i = 0; i < n; i++) 982 bus_dmamap_destroy(sc->sc_dmat, ccb[i - 1].c_dmam); 983 free(ccb, M_DEVBUF); 984 } 985 986 struct ips_ccb * 987 ips_ccb_get(struct ips_softc *sc) 988 { 989 struct ips_ccb *ccb; 990 991 if ((ccb = TAILQ_FIRST(&sc->sc_ccbq_free)) != NULL) 992 TAILQ_REMOVE(&sc->sc_ccbq_free, ccb, c_link); 993 994 return (ccb); 995 } 996 997 void 998 ips_ccb_put(struct ips_softc *sc, struct ips_ccb *ccb) 999 { 1000 ccb->c_flags = 0; 1001 ccb->c_xfer = NULL; 1002 TAILQ_INSERT_TAIL(&sc->sc_ccbq_free, ccb, c_link); 1003 } 1004 1005 int 1006 ips_dmamem_alloc(struct dmamem *dm, bus_dma_tag_t tag, bus_size_t size) 1007 { 1008 int nsegs; 1009 1010 dm->dm_tag = tag; 1011 dm->dm_size = size; 1012 1013 if (bus_dmamap_create(tag, size, 1, size, 0, 1014 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &dm->dm_map)) 1015 return (1); 1016 if (bus_dmamem_alloc(tag, size, 0, 0, &dm->dm_seg, 1, &nsegs, 1017 BUS_DMA_NOWAIT)) 1018 goto fail1; 1019 if (bus_dmamem_map(tag, &dm->dm_seg, 1, size, (caddr_t *)&dm->dm_vaddr, 1020 BUS_DMA_NOWAIT)) 1021 goto fail2; 1022 if (bus_dmamap_load(tag, dm->dm_map, dm->dm_vaddr, size, NULL, 1023 BUS_DMA_NOWAIT)) 1024 goto fail3; 1025 1026 return (0); 1027 1028 fail3: 1029 bus_dmamem_unmap(tag, dm->dm_vaddr, size); 1030 fail2: 1031 bus_dmamem_free(tag, &dm->dm_seg, 1); 1032 fail1: 1033 bus_dmamap_destroy(tag, dm->dm_map); 1034 return (1); 1035 } 1036 1037 void 1038 ips_dmamem_free(struct dmamem *dm) 1039 { 1040 bus_dmamap_unload(dm->dm_tag, dm->dm_map); 1041 bus_dmamem_unmap(dm->dm_tag, dm->dm_vaddr, dm->dm_size); 1042 bus_dmamem_free(dm->dm_tag, &dm->dm_seg, 1); 1043 bus_dmamap_destroy(dm->dm_tag, dm->dm_map); 1044 } 1045