1 /* $NetBSD: xy.c,v 1.48 2003/09/29 09:50:22 wiz Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 1995 Charles D. Cranor 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Charles D. Cranor. 19 * 4. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * 36 * x y . c x y l o g i c s 4 5 0 / 4 5 1 s m d d r i v e r 37 * 38 * author: Chuck Cranor <chuck@ccrc.wustl.edu> 39 * id: &Id: xy.c,v 1.1 1995/09/25 20:35:14 chuck Exp & 40 * started: 14-Sep-95 41 * references: [1] Xylogics Model 753 User's Manual 42 * part number: 166-753-001, Revision B, May 21, 1988. 43 * "Your Partner For Performance" 44 * [2] other NetBSD disk device drivers 45 * [3] Xylogics Model 450 User's Manual 46 * part number: 166-017-001, Revision B, 1983. 47 * [4] Addendum to Xylogics Model 450 Disk Controller User's 48 * Manual, Jan. 1985. 49 * [5] The 451 Controller, Rev. B3, September 2, 1986. 50 * [6] David Jones <dej@achilles.net>'s unfinished 450/451 driver 51 * 52 */ 53 54 #include <sys/cdefs.h> 55 __KERNEL_RCSID(0, "$NetBSD: xy.c,v 1.48 2003/09/29 09:50:22 wiz Exp $"); 56 57 #undef XYC_DEBUG /* full debug */ 58 #undef XYC_DIAG /* extra sanity checks */ 59 #if defined(DIAGNOSTIC) && !defined(XYC_DIAG) 60 #define XYC_DIAG /* link in with master DIAG option */ 61 #endif 62 63 #include <sys/param.h> 64 #include <sys/proc.h> 65 #include <sys/systm.h> 66 #include <sys/kernel.h> 67 #include <sys/file.h> 68 #include <sys/stat.h> 69 #include <sys/ioctl.h> 70 #include <sys/buf.h> 71 #include <sys/uio.h> 72 #include <sys/malloc.h> 73 #include <sys/device.h> 74 #include <sys/disklabel.h> 75 #include <sys/disk.h> 76 #include <sys/syslog.h> 77 #include <sys/dkbad.h> 78 #include <sys/conf.h> 79 80 #include <uvm/uvm_extern.h> 81 82 #include <dev/sun/disklabel.h> 83 84 #include <machine/autoconf.h> 85 #include <machine/dvma.h> 86 87 #include <sun3/dev/xyreg.h> 88 #include <sun3/dev/xyvar.h> 89 #include <sun3/dev/xio.h> 90 91 #include "locators.h" 92 93 /* 94 * Print a complaint when no xy children were specified 95 * in the config file. Better than a link error... 96 * 97 * XXX: Some folks say this driver should be split in two, 98 * but that seems pointless with ONLY one type of child. 99 */ 100 #include "xy.h" 101 #if NXY == 0 102 #error "xyc but no xy?" 103 #endif 104 105 /* 106 * macros 107 */ 108 109 /* 110 * XYC_GO: start iopb ADDR (DVMA addr in a u_long) on XYC 111 */ 112 #define XYC_GO(XYC, ADDR) { \ 113 (XYC)->xyc_addr_lo = ((ADDR) & 0xff); \ 114 (ADDR) = ((ADDR) >> 8); \ 115 (XYC)->xyc_addr_hi = ((ADDR) & 0xff); \ 116 (ADDR) = ((ADDR) >> 8); \ 117 (XYC)->xyc_reloc_lo = ((ADDR) & 0xff); \ 118 (ADDR) = ((ADDR) >> 8); \ 119 (XYC)->xyc_reloc_hi = (ADDR); \ 120 (XYC)->xyc_csr = XYC_GBSY; /* go! */ \ 121 } 122 123 /* 124 * XYC_DONE: don't need IORQ, get error code and free (done after xyc_cmd) 125 */ 126 127 #define XYC_DONE(SC,ER) { \ 128 if ((ER) == XY_ERR_AOK) { \ 129 (ER) = (SC)->ciorq->errno; \ 130 (SC)->ciorq->mode = XY_SUB_FREE; \ 131 wakeup((SC)->ciorq); \ 132 } \ 133 } 134 135 /* 136 * XYC_ADVANCE: advance iorq's pointers by a number of sectors 137 */ 138 139 #define XYC_ADVANCE(IORQ, N) { \ 140 if (N) { \ 141 (IORQ)->sectcnt -= (N); \ 142 (IORQ)->blockno += (N); \ 143 (IORQ)->dbuf += ((N)*XYFM_BPS); \ 144 } \ 145 } 146 147 /* 148 * note - addresses you can sleep on: 149 * [1] & of xy_softc's "state" (waiting for a chance to attach a drive) 150 * [2] & an iorq (waiting for an XY_SUB_WAIT iorq to finish) 151 */ 152 153 154 /* 155 * function prototypes 156 * "xyc_*" functions are internal, all others are external interfaces 157 */ 158 159 /* internals */ 160 struct xy_iopb *xyc_chain __P((struct xyc_softc *, struct xy_iorq *)); 161 int xyc_cmd __P((struct xyc_softc *, int, int, int, int, int, char *, int)); 162 char *xyc_e2str __P((int)); 163 int xyc_entoact __P((int)); 164 int xyc_error __P((struct xyc_softc *, struct xy_iorq *, 165 struct xy_iopb *, int)); 166 int xyc_ioctlcmd __P((struct xy_softc *, dev_t dev, struct xd_iocmd *)); 167 void xyc_perror __P((struct xy_iorq *, struct xy_iopb *, int)); 168 int xyc_piodriver __P((struct xyc_softc *, struct xy_iorq *)); 169 int xyc_remove_iorq __P((struct xyc_softc *)); 170 int xyc_reset __P((struct xyc_softc *, int, struct xy_iorq *, int, 171 struct xy_softc *)); 172 inline void xyc_rqinit __P((struct xy_iorq *, struct xyc_softc *, 173 struct xy_softc *, int, u_long, int, 174 caddr_t, struct buf *)); 175 void xyc_rqtopb __P((struct xy_iorq *, struct xy_iopb *, int, int)); 176 void xyc_start __P((struct xyc_softc *, struct xy_iorq *)); 177 int xyc_startbuf __P((struct xyc_softc *, struct xy_softc *, struct buf *)); 178 int xyc_submit_iorq __P((struct xyc_softc *, struct xy_iorq *, int)); 179 void xyc_tick __P((void *)); 180 int xyc_unbusy __P((struct xyc *, int)); 181 void xyc_xyreset __P((struct xyc_softc *, struct xy_softc *)); 182 183 /* machine interrupt hook */ 184 int xycintr __P((void *)); 185 186 /* autoconf */ 187 static int xycmatch __P((struct device *, struct cfdata *, void *)); 188 static void xycattach __P((struct device *, struct device *, void *)); 189 static int xyc_print __P((void *, const char *name)); 190 191 static int xymatch __P((struct device *, struct cfdata *, void *)); 192 static void xyattach __P((struct device *, struct device *, void *)); 193 static void xy_init __P((struct xy_softc *)); 194 195 static void xydummystrat __P((struct buf *)); 196 int xygetdisklabel __P((struct xy_softc *, void *)); 197 198 /* 199 * cfattach's: device driver interface to autoconfig 200 */ 201 202 CFATTACH_DECL(xyc, sizeof(struct xyc_softc), 203 xycmatch, xycattach, NULL, NULL); 204 205 CFATTACH_DECL(xy, sizeof(struct xy_softc), 206 xymatch, xyattach, NULL, NULL); 207 208 extern struct cfdriver xy_cd; 209 210 struct xyc_attach_args { /* this is the "aux" args to xyattach */ 211 int driveno; /* unit number */ 212 }; 213 214 dev_type_open(xyopen); 215 dev_type_close(xyclose); 216 dev_type_read(xyread); 217 dev_type_write(xywrite); 218 dev_type_ioctl(xyioctl); 219 dev_type_strategy(xystrategy); 220 dev_type_dump(xydump); 221 dev_type_size(xysize); 222 223 const struct bdevsw xy_bdevsw = { 224 xyopen, xyclose, xystrategy, xyioctl, xydump, xysize, D_DISK 225 }; 226 227 const struct cdevsw xy_cdevsw = { 228 xyopen, xyclose, xyread, xywrite, xyioctl, 229 nostop, notty, nopoll, nommap, nokqfilter, D_DISK 230 }; 231 232 /* 233 * dkdriver 234 */ 235 236 struct dkdriver xydkdriver = { xystrategy }; 237 238 /* 239 * start: disk label fix code (XXX) 240 */ 241 242 static void *xy_labeldata; 243 244 static void 245 xydummystrat(bp) 246 struct buf *bp; 247 { 248 if (bp->b_bcount != XYFM_BPS) 249 panic("xydummystrat"); 250 memcpy(bp->b_data, xy_labeldata, XYFM_BPS); 251 bp->b_flags |= B_DONE; 252 bp->b_flags &= ~B_BUSY; 253 } 254 255 int 256 xygetdisklabel(xy, b) 257 struct xy_softc *xy; 258 void *b; 259 { 260 const char *err; 261 struct sun_disklabel *sdl; 262 263 /* We already have the label data in `b'; setup for dummy strategy */ 264 xy_labeldata = b; 265 266 /* Required parameter for readdisklabel() */ 267 xy->sc_dk.dk_label->d_secsize = XYFM_BPS; 268 269 err = readdisklabel(MAKEDISKDEV(0, xy->sc_dev.dv_unit, RAW_PART), 270 xydummystrat, 271 xy->sc_dk.dk_label, xy->sc_dk.dk_cpulabel); 272 if (err) { 273 printf("%s: %s\n", xy->sc_dev.dv_xname, err); 274 return(XY_ERR_FAIL); 275 } 276 277 /* Ok, we have the label; fill in `pcyl' if there's SunOS magic */ 278 sdl = (struct sun_disklabel *)xy->sc_dk.dk_cpulabel->cd_block; 279 if (sdl->sl_magic == SUN_DKMAGIC) 280 xy->pcyl = sdl->sl_pcyl; 281 else { 282 printf("%s: WARNING: no `pcyl' in disk label.\n", 283 xy->sc_dev.dv_xname); 284 xy->pcyl = xy->sc_dk.dk_label->d_ncylinders + 285 xy->sc_dk.dk_label->d_acylinders; 286 printf("%s: WARNING: guessing pcyl=%d (ncyl+acyl)\n", 287 xy->sc_dev.dv_xname, xy->pcyl); 288 } 289 290 xy->ncyl = xy->sc_dk.dk_label->d_ncylinders; 291 xy->acyl = xy->sc_dk.dk_label->d_acylinders; 292 xy->nhead = xy->sc_dk.dk_label->d_ntracks; 293 xy->nsect = xy->sc_dk.dk_label->d_nsectors; 294 xy->sectpercyl = xy->nhead * xy->nsect; 295 xy->sc_dk.dk_label->d_secsize = XYFM_BPS; /* not handled by 296 * sun->bsd */ 297 return(XY_ERR_AOK); 298 } 299 300 /* 301 * end: disk label fix code (XXX) 302 */ 303 304 /* 305 * a u t o c o n f i g f u n c t i o n s 306 */ 307 308 /* 309 * xycmatch: determine if xyc is present or not. we do a 310 * soft reset to detect the xyc. 311 */ 312 static int 313 xycmatch(parent, cf, aux) 314 struct device *parent; 315 struct cfdata *cf; 316 void *aux; 317 { 318 struct confargs *ca = aux; 319 320 /* No default VME address. */ 321 if (ca->ca_paddr == -1) 322 return (0); 323 324 /* Make sure something is there... */ 325 if (bus_peek(ca->ca_bustype, ca->ca_paddr + 5, 1) == -1) 326 return (0); 327 328 /* Default interrupt priority. */ 329 if (ca->ca_intpri == -1) 330 ca->ca_intpri = 2; 331 332 return (1); 333 } 334 335 /* 336 * xycattach: attach controller 337 */ 338 static void 339 xycattach(parent, self, aux) 340 struct device *parent, *self; 341 void *aux; 342 { 343 struct xyc_softc *xyc = (void *) self; 344 struct confargs *ca = aux; 345 struct xyc_attach_args xa; 346 int lcv, err, res, pbsz; 347 void *tmp, *tmp2; 348 u_long ultmp; 349 350 /* get addressing and intr level stuff from autoconfig and load it 351 * into our xyc_softc. */ 352 353 xyc->xyc = (struct xyc *) 354 bus_mapin(ca->ca_bustype, ca->ca_paddr, sizeof(struct xyc)); 355 xyc->bustype = ca->ca_bustype; 356 xyc->ipl = ca->ca_intpri; 357 xyc->vector = ca->ca_intvec; 358 xyc->no_ols = 0; /* XXX should be from config */ 359 360 for (lcv = 0; lcv < XYC_MAXDEV; lcv++) 361 xyc->sc_drives[lcv] = (struct xy_softc *) 0; 362 363 /* 364 * allocate and zero buffers 365 * check boundaries of the KVA's ... all IOPBs must reside in 366 * the same 64K region. 367 */ 368 369 pbsz = XYC_MAXIOPB * sizeof(struct xy_iopb); 370 tmp = tmp2 = (struct xy_iopb *) dvma_malloc(pbsz); /* KVA */ 371 ultmp = (u_long) tmp; 372 if ((ultmp & 0xffff0000) != ((ultmp + pbsz) & 0xffff0000)) { 373 tmp = (struct xy_iopb *) dvma_malloc(pbsz); /* retry! */ 374 dvma_free(tmp2, pbsz); 375 ultmp = (u_long) tmp; 376 if ((ultmp & 0xffff0000) != ((ultmp + pbsz) & 0xffff0000)) { 377 printf("%s: can't alloc IOPB mem in 64K\n", 378 xyc->sc_dev.dv_xname); 379 return; 380 } 381 } 382 memset(tmp, 0, pbsz); 383 xyc->iopbase = tmp; 384 xyc->dvmaiopb = (struct xy_iopb *) 385 dvma_kvtopa(xyc->iopbase, xyc->bustype); 386 xyc->reqs = (struct xy_iorq *) 387 malloc(XYC_MAXIOPB * sizeof(struct xy_iorq), M_DEVBUF, M_NOWAIT); 388 if (xyc->reqs == NULL) 389 panic("xyc malloc"); 390 memset(xyc->reqs, 0, XYC_MAXIOPB * sizeof(struct xy_iorq)); 391 392 /* 393 * init iorq to iopb pointers, and non-zero fields in the 394 * iopb which never change. 395 */ 396 397 for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) { 398 xyc->xy_chain[lcv] = NULL; 399 xyc->reqs[lcv].iopb = &xyc->iopbase[lcv]; 400 xyc->iopbase[lcv].asr = 1; /* always the same */ 401 xyc->iopbase[lcv].eef = 1; /* always the same */ 402 xyc->iopbase[lcv].ecm = XY_ECM; /* always the same */ 403 xyc->iopbase[lcv].aud = 1; /* always the same */ 404 xyc->iopbase[lcv].relo = 1; /* always the same */ 405 xyc->iopbase[lcv].thro = XY_THRO;/* always the same */ 406 } 407 xyc->ciorq = &xyc->reqs[XYC_CTLIOPB]; /* short hand name */ 408 xyc->ciopb = &xyc->iopbase[XYC_CTLIOPB]; /* short hand name */ 409 xyc->xy_hand = 0; 410 411 /* read controller parameters and insure we have a 450/451 */ 412 413 err = xyc_cmd(xyc, XYCMD_ST, 0, 0, 0, 0, 0, XY_SUB_POLL); 414 res = xyc->ciopb->ctyp; 415 XYC_DONE(xyc, err); 416 if (res != XYCT_450) { 417 if (err) 418 printf(": %s: ", xyc_e2str(err)); 419 printf(": doesn't identify as a 450/451\n"); 420 return; 421 } 422 printf(": Xylogics 450/451"); 423 if (xyc->no_ols) 424 printf(" [OLS disabled]"); /* 450 doesn't overlap seek right */ 425 printf("\n"); 426 if (err) { 427 printf("%s: error: %s\n", xyc->sc_dev.dv_xname, 428 xyc_e2str(err)); 429 return; 430 } 431 if ((xyc->xyc->xyc_csr & XYC_ADRM) == 0) { 432 printf("%s: 24 bit addressing turned off\n", 433 xyc->sc_dev.dv_xname); 434 printf("please set hardware jumpers JM1-JM2=in, JM3-JM4=out\n"); 435 printf("to enable 24 bit mode and this driver\n"); 436 return; 437 } 438 439 /* link in interrupt with higher level software */ 440 isr_add_vectored(xycintr, (void *)xyc, 441 ca->ca_intpri, ca->ca_intvec); 442 evcnt_attach_dynamic(&xyc->sc_intrcnt, EVCNT_TYPE_INTR, NULL, 443 xyc->sc_dev.dv_xname, "intr"); 444 445 callout_init(&xyc->sc_tick_ch); 446 447 /* now we must look for disks using autoconfig */ 448 for (xa.driveno = 0; xa.driveno < XYC_MAXDEV; xa.driveno++) 449 (void) config_found(self, (void *) &xa, xyc_print); 450 451 /* start the watchdog clock */ 452 callout_reset(&xyc->sc_tick_ch, XYC_TICKCNT, xyc_tick, xyc); 453 } 454 455 static int 456 xyc_print(aux, name) 457 void *aux; 458 const char *name; 459 { 460 struct xyc_attach_args *xa = aux; 461 462 if (name != NULL) 463 aprint_normal("%s: ", name); 464 465 if (xa->driveno != -1) 466 aprint_normal(" drive %d", xa->driveno); 467 468 return UNCONF; 469 } 470 471 /* 472 * xymatch: probe for disk. 473 * 474 * note: we almost always say disk is present. this allows us to 475 * spin up and configure a disk after the system is booted (we can 476 * call xyattach!). Also, wire down the relationship between the 477 * xy* and xyc* devices, to simplify boot device identification. 478 */ 479 static int 480 xymatch(parent, cf, aux) 481 struct device *parent; 482 struct cfdata *cf; 483 void *aux; 484 { 485 struct xyc_attach_args *xa = aux; 486 int xy_unit; 487 488 /* Match only on the "wired-down" controller+disk. */ 489 xy_unit = parent->dv_unit * 2 + xa->driveno; 490 if (cf->cf_unit != xy_unit) 491 return (0); 492 493 return (1); 494 } 495 496 /* 497 * xyattach: attach a disk. 498 */ 499 static void 500 xyattach(parent, self, aux) 501 struct device *parent, *self; 502 void *aux; 503 { 504 struct xy_softc *xy = (void *) self; 505 struct xyc_softc *xyc = (void *) parent; 506 struct xyc_attach_args *xa = aux; 507 508 printf("\n"); 509 510 /* 511 * Always re-initialize the disk structure. We want statistics 512 * to start with a clean slate. 513 */ 514 memset(&xy->sc_dk, 0, sizeof(xy->sc_dk)); 515 xy->sc_dk.dk_driver = &xydkdriver; 516 xy->sc_dk.dk_name = xy->sc_dev.dv_xname; 517 518 xy->state = XY_DRIVE_UNKNOWN; /* to start */ 519 xy->flags = 0; 520 xy->parent = xyc; 521 522 /* init queue of waiting bufs */ 523 bufq_alloc(&xy->xyq, BUFQ_DISKSORT|BUFQ_SORT_RAWBLOCK); 524 xy->xyrq = &xyc->reqs[xa->driveno]; 525 526 xy->xy_drive = xa->driveno; 527 xyc->sc_drives[xa->driveno] = xy; 528 529 /* Do init work common to attach and open. */ 530 xy_init(xy); 531 } 532 533 /* 534 * end of autoconfig functions 535 */ 536 537 /* 538 * Initialize a disk. This can be called from both autoconf and 539 * also from xyopen/xystrategy. 540 */ 541 static void 542 xy_init(xy) 543 struct xy_softc *xy; 544 { 545 struct xyc_softc *xyc; 546 struct dkbad *dkb; 547 void *dvmabuf; 548 int err, spt, mb, blk, lcv, fullmode, newstate; 549 550 xyc = xy->parent; 551 xy->state = XY_DRIVE_ATTACHING; 552 newstate = XY_DRIVE_UNKNOWN; 553 fullmode = (cold) ? XY_SUB_POLL : XY_SUB_WAIT; 554 dvmabuf = dvma_malloc(XYFM_BPS); 555 556 /* first try and reset the drive */ 557 558 err = xyc_cmd(xyc, XYCMD_RST, 0, xy->xy_drive, 0, 0, 0, fullmode); 559 XYC_DONE(xyc, err); 560 if (err == XY_ERR_DNRY) { 561 printf("%s: drive %d: off-line\n", 562 xy->sc_dev.dv_xname, xy->xy_drive); 563 goto done; 564 } 565 if (err) { 566 printf("%s: ERROR 0x%02x (%s)\n", 567 xy->sc_dev.dv_xname, err, xyc_e2str(err)); 568 goto done; 569 } 570 printf("%s: drive %d ready", 571 xy->sc_dev.dv_xname, xy->xy_drive); 572 573 /* 574 * now set drive parameters (to semi-bogus values) so we can read the 575 * disk label. 576 */ 577 xy->pcyl = xy->ncyl = 1; 578 xy->acyl = 0; 579 xy->nhead = 1; 580 xy->nsect = 1; 581 xy->sectpercyl = 1; 582 for (lcv = 0; lcv < 126; lcv++) /* init empty bad144 table */ 583 xy->dkb.bt_bad[lcv].bt_cyl = 584 xy->dkb.bt_bad[lcv].bt_trksec = 0xffff; 585 586 /* read disk label */ 587 for (xy->drive_type = 0 ; xy->drive_type <= XYC_MAXDT ; 588 xy->drive_type++) { 589 err = xyc_cmd(xyc, XYCMD_RD, 0, xy->xy_drive, 0, 1, 590 dvmabuf, fullmode); 591 XYC_DONE(xyc, err); 592 if (err == XY_ERR_AOK) break; 593 } 594 595 if (err != XY_ERR_AOK) { 596 printf("%s: reading disk label failed: %s\n", 597 xy->sc_dev.dv_xname, xyc_e2str(err)); 598 goto done; 599 } 600 printf("%s: drive type %d\n", 601 xy->sc_dev.dv_xname, xy->drive_type); 602 603 newstate = XY_DRIVE_NOLABEL; 604 605 xy->hw_spt = spt = 0; /* XXX needed ? */ 606 /* Attach the disk: must be before getdisklabel to malloc label */ 607 disk_attach(&xy->sc_dk); 608 609 if (xygetdisklabel(xy, dvmabuf) != XY_ERR_AOK) 610 goto done; 611 612 /* inform the user of what is up */ 613 printf("%s: <%s>, pcyl %d\n", 614 xy->sc_dev.dv_xname, 615 (char *)dvmabuf, xy->pcyl); 616 mb = xy->ncyl * (xy->nhead * xy->nsect) / (1048576 / XYFM_BPS); 617 printf("%s: %dMB, %d cyl, %d head, %d sec\n", 618 xy->sc_dev.dv_xname, mb, 619 xy->ncyl, xy->nhead, xy->nsect); 620 621 /* 622 * 450/451 stupidity: the drive type is encoded into the format 623 * of the disk. the drive type in the IOPB must match the drive 624 * type in the format, or you will not be able to do I/O to the 625 * disk (you get header not found errors). if you have two drives 626 * of different sizes that have the same drive type in their 627 * formatting then you are out of luck. 628 * 629 * this problem was corrected in the 753/7053. 630 */ 631 632 for (lcv = 0 ; lcv < XYC_MAXDEV ; lcv++) { 633 struct xy_softc *oxy; 634 635 oxy = xyc->sc_drives[lcv]; 636 if (oxy == NULL || oxy == xy) continue; 637 if (oxy->drive_type != xy->drive_type) continue; 638 if (xy->nsect != oxy->nsect || xy->pcyl != oxy->pcyl || 639 xy->nhead != oxy->nhead) { 640 printf("%s: %s and %s must be the same size!\n", 641 xyc->sc_dev.dv_xname, 642 xy ->sc_dev.dv_xname, 643 oxy->sc_dev.dv_xname); 644 panic("xy drive size mismatch"); 645 } 646 } 647 648 649 /* now set the real drive parameters! */ 650 blk = (xy->nsect - 1) + 651 ((xy->nhead - 1) * xy->nsect) + 652 ((xy->pcyl - 1) * xy->nsect * xy->nhead); 653 err = xyc_cmd(xyc, XYCMD_SDS, 0, xy->xy_drive, blk, 0, 0, fullmode); 654 XYC_DONE(xyc, err); 655 if (err) { 656 printf("%s: write drive size failed: %s\n", 657 xy->sc_dev.dv_xname, xyc_e2str(err)); 658 goto done; 659 } 660 newstate = XY_DRIVE_ONLINE; 661 662 /* 663 * read bad144 table. this table resides on the first sector of the 664 * last track of the disk (i.e. second cyl of "acyl" area). 665 */ 666 blk = (xy->ncyl + xy->acyl - 1) * (xy->nhead * xy->nsect) + 667 /* last cyl */ 668 (xy->nhead - 1) * xy->nsect; /* last head */ 669 err = xyc_cmd(xyc, XYCMD_RD, 0, xy->xy_drive, blk, 1, 670 dvmabuf, fullmode); 671 XYC_DONE(xyc, err); 672 if (err) { 673 printf("%s: reading bad144 failed: %s\n", 674 xy->sc_dev.dv_xname, xyc_e2str(err)); 675 goto done; 676 } 677 678 /* check dkbad for sanity */ 679 dkb = (struct dkbad *) dvmabuf; 680 for (lcv = 0; lcv < 126; lcv++) { 681 if ((dkb->bt_bad[lcv].bt_cyl == 0xffff || 682 dkb->bt_bad[lcv].bt_cyl == 0) && 683 dkb->bt_bad[lcv].bt_trksec == 0xffff) 684 continue; /* blank */ 685 if (dkb->bt_bad[lcv].bt_cyl >= xy->ncyl) 686 break; 687 if ((dkb->bt_bad[lcv].bt_trksec >> 8) >= xy->nhead) 688 break; 689 if ((dkb->bt_bad[lcv].bt_trksec & 0xff) >= xy->nsect) 690 break; 691 } 692 if (lcv != 126) { 693 printf("%s: warning: invalid bad144 sector!\n", 694 xy->sc_dev.dv_xname); 695 } else { 696 memcpy(&xy->dkb, dvmabuf, XYFM_BPS); 697 } 698 699 done: 700 xy->state = newstate; 701 dvma_free(dvmabuf, XYFM_BPS); 702 } 703 704 /* 705 * { b , c } d e v s w f u n c t i o n s 706 */ 707 708 /* 709 * xyclose: close device 710 */ 711 int 712 xyclose(dev, flag, fmt, p) 713 dev_t dev; 714 int flag, fmt; 715 struct proc *p; 716 { 717 struct xy_softc *xy = xy_cd.cd_devs[DISKUNIT(dev)]; 718 int part = DISKPART(dev); 719 720 /* clear mask bits */ 721 722 switch (fmt) { 723 case S_IFCHR: 724 xy->sc_dk.dk_copenmask &= ~(1 << part); 725 break; 726 case S_IFBLK: 727 xy->sc_dk.dk_bopenmask &= ~(1 << part); 728 break; 729 } 730 xy->sc_dk.dk_openmask = xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask; 731 732 return 0; 733 } 734 735 /* 736 * xydump: crash dump system 737 */ 738 int 739 xydump(dev, blkno, va, sz) 740 dev_t dev; 741 daddr_t blkno; 742 caddr_t va; 743 size_t sz; 744 { 745 int unit, part; 746 struct xy_softc *xy; 747 748 unit = DISKUNIT(dev); 749 if (unit >= xy_cd.cd_ndevs) 750 return ENXIO; 751 part = DISKPART(dev); 752 753 xy = xy_cd.cd_devs[unit]; 754 755 printf("%s%c: crash dump not supported (yet)\n", xy->sc_dev.dv_xname, 756 'a' + part); 757 758 return ENXIO; 759 760 /* outline: globals: "dumplo" == sector number of partition to start 761 * dump at (convert to physical sector with partition table) 762 * "dumpsize" == size of dump in clicks "physmem" == size of physical 763 * memory (clicks, ctob() to get bytes) (normal case: dumpsize == 764 * physmem) 765 * 766 * dump a copy of physical memory to the dump device starting at sector 767 * "dumplo" in the swap partition (make sure > 0). map in pages as 768 * we go. use polled I/O. 769 * 770 * XXX how to handle NON_CONTIG? 771 */ 772 } 773 774 /* 775 * xyioctl: ioctls on XY drives. based on ioctl's of other netbsd disks. 776 */ 777 int 778 xyioctl(dev, command, addr, flag, p) 779 dev_t dev; 780 u_long command; 781 caddr_t addr; 782 int flag; 783 struct proc *p; 784 785 { 786 struct xy_softc *xy; 787 struct xd_iocmd *xio; 788 int error, s, unit; 789 790 unit = DISKUNIT(dev); 791 792 if (unit >= xy_cd.cd_ndevs || (xy = xy_cd.cd_devs[unit]) == NULL) 793 return (ENXIO); 794 795 /* switch on ioctl type */ 796 797 switch (command) { 798 case DIOCSBAD: /* set bad144 info */ 799 if ((flag & FWRITE) == 0) 800 return EBADF; 801 s = splbio(); 802 memcpy(&xy->dkb, addr, sizeof(xy->dkb)); 803 splx(s); 804 return 0; 805 806 case DIOCGDINFO: /* get disk label */ 807 memcpy(addr, xy->sc_dk.dk_label, sizeof(struct disklabel)); 808 return 0; 809 810 case DIOCGPART: /* get partition info */ 811 ((struct partinfo *) addr)->disklab = xy->sc_dk.dk_label; 812 ((struct partinfo *) addr)->part = 813 &xy->sc_dk.dk_label->d_partitions[DISKPART(dev)]; 814 return 0; 815 816 case DIOCSDINFO: /* set disk label */ 817 if ((flag & FWRITE) == 0) 818 return EBADF; 819 error = setdisklabel(xy->sc_dk.dk_label, 820 (struct disklabel *) addr, /* xy->sc_dk.dk_openmask : */ 0, 821 xy->sc_dk.dk_cpulabel); 822 if (error == 0) { 823 if (xy->state == XY_DRIVE_NOLABEL) 824 xy->state = XY_DRIVE_ONLINE; 825 } 826 return error; 827 828 case DIOCWLABEL: /* change write status of disk label */ 829 if ((flag & FWRITE) == 0) 830 return EBADF; 831 if (*(int *) addr) 832 xy->flags |= XY_WLABEL; 833 else 834 xy->flags &= ~XY_WLABEL; 835 return 0; 836 837 case DIOCWDINFO: /* write disk label */ 838 if ((flag & FWRITE) == 0) 839 return EBADF; 840 error = setdisklabel(xy->sc_dk.dk_label, 841 (struct disklabel *) addr, /* xy->sc_dk.dk_openmask : */ 0, 842 xy->sc_dk.dk_cpulabel); 843 if (error == 0) { 844 if (xy->state == XY_DRIVE_NOLABEL) 845 xy->state = XY_DRIVE_ONLINE; 846 847 /* Simulate opening partition 0 so write succeeds. */ 848 xy->sc_dk.dk_openmask |= (1 << 0); 849 error = writedisklabel(MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART), 850 xystrategy, xy->sc_dk.dk_label, 851 xy->sc_dk.dk_cpulabel); 852 xy->sc_dk.dk_openmask = 853 xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask; 854 } 855 return error; 856 857 case DIOSXDCMD: 858 xio = (struct xd_iocmd *) addr; 859 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) 860 return (error); 861 return (xyc_ioctlcmd(xy, dev, xio)); 862 863 default: 864 return ENOTTY; 865 } 866 } 867 868 /* 869 * xyopen: open drive 870 */ 871 int 872 xyopen(dev, flag, fmt, p) 873 dev_t dev; 874 int flag, fmt; 875 struct proc *p; 876 { 877 int err, unit, part, s; 878 struct xy_softc *xy; 879 880 /* first, could it be a valid target? */ 881 unit = DISKUNIT(dev); 882 if (unit >= xy_cd.cd_ndevs || (xy = xy_cd.cd_devs[unit]) == NULL) 883 return (ENXIO); 884 part = DISKPART(dev); 885 err = 0; 886 887 /* 888 * If some other processing is doing init, sleep. 889 */ 890 s = splbio(); 891 while (xy->state == XY_DRIVE_ATTACHING) { 892 if (tsleep(&xy->state, PRIBIO, "xyopen", 0)) { 893 err = EINTR; 894 goto done; 895 } 896 } 897 /* Do we need to init the drive? */ 898 if (xy->state == XY_DRIVE_UNKNOWN) { 899 xy_init(xy); 900 wakeup(&xy->state); 901 } 902 /* Was the init successful? */ 903 if (xy->state == XY_DRIVE_UNKNOWN) { 904 err = EIO; 905 goto done; 906 } 907 908 /* check for partition */ 909 if (part != RAW_PART && 910 (part >= xy->sc_dk.dk_label->d_npartitions || 911 xy->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) { 912 err = ENXIO; 913 goto done; 914 } 915 916 /* set open masks */ 917 switch (fmt) { 918 case S_IFCHR: 919 xy->sc_dk.dk_copenmask |= (1 << part); 920 break; 921 case S_IFBLK: 922 xy->sc_dk.dk_bopenmask |= (1 << part); 923 break; 924 } 925 xy->sc_dk.dk_openmask = xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask; 926 927 done: 928 splx(s); 929 return (err); 930 } 931 932 int 933 xyread(dev, uio, flags) 934 dev_t dev; 935 struct uio *uio; 936 int flags; 937 { 938 939 return (physio(xystrategy, NULL, dev, B_READ, minphys, uio)); 940 } 941 942 int 943 xywrite(dev, uio, flags) 944 dev_t dev; 945 struct uio *uio; 946 int flags; 947 { 948 949 return (physio(xystrategy, NULL, dev, B_WRITE, minphys, uio)); 950 } 951 952 953 /* 954 * xysize: return size of a partition for a dump 955 */ 956 957 int 958 xysize(dev) 959 dev_t dev; 960 961 { 962 struct xy_softc *xysc; 963 int unit, part, size, omask; 964 965 /* valid unit? */ 966 unit = DISKUNIT(dev); 967 if (unit >= xy_cd.cd_ndevs || (xysc = xy_cd.cd_devs[unit]) == NULL) 968 return (-1); 969 970 part = DISKPART(dev); 971 omask = xysc->sc_dk.dk_openmask & (1 << part); 972 973 if (omask == 0 && xyopen(dev, 0, S_IFBLK, NULL) != 0) 974 return (-1); 975 976 /* do it */ 977 if (xysc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP) 978 size = -1; /* only give valid size for swap partitions */ 979 else 980 size = xysc->sc_dk.dk_label->d_partitions[part].p_size * 981 (xysc->sc_dk.dk_label->d_secsize / DEV_BSIZE); 982 if (omask == 0 && xyclose(dev, 0, S_IFBLK, NULL) != 0) 983 return (-1); 984 return (size); 985 } 986 987 /* 988 * xystrategy: buffering system interface to xy. 989 */ 990 void 991 xystrategy(bp) 992 struct buf *bp; 993 994 { 995 struct xy_softc *xy; 996 int s, unit; 997 struct disklabel *lp; 998 daddr_t blkno; 999 1000 unit = DISKUNIT(bp->b_dev); 1001 1002 /* check for live device */ 1003 1004 if (unit >= xy_cd.cd_ndevs || (xy = xy_cd.cd_devs[unit]) == 0 || 1005 bp->b_blkno < 0 || 1006 (bp->b_bcount % xy->sc_dk.dk_label->d_secsize) != 0) { 1007 bp->b_error = EINVAL; 1008 goto bad; 1009 } 1010 1011 /* There should always be an open first. */ 1012 if (xy->state == XY_DRIVE_UNKNOWN) { 1013 bp->b_error = EIO; 1014 goto bad; 1015 } 1016 if (xy->state != XY_DRIVE_ONLINE && DISKPART(bp->b_dev) != RAW_PART) { 1017 /* no I/O to unlabeled disks, unless raw partition */ 1018 bp->b_error = EIO; 1019 goto bad; 1020 } 1021 /* short circuit zero length request */ 1022 1023 if (bp->b_bcount == 0) 1024 goto done; 1025 1026 /* check bounds with label (disksubr.c). Determine the size of the 1027 * transfer, and make sure it is within the boundaries of the 1028 * partition. Adjust transfer if needed, and signal errors or early 1029 * completion. */ 1030 1031 lp = xy->sc_dk.dk_label; 1032 1033 if (bounds_check_with_label(&xy->sc_dk, bp, 1034 (xy->flags & XY_WLABEL) != 0) <= 0) 1035 goto done; 1036 1037 /* 1038 * Now convert the block number to absolute and put it in 1039 * terms of the device's logical block size. 1040 */ 1041 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE); 1042 if (DISKPART(bp->b_dev) != RAW_PART) 1043 blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset; 1044 1045 bp->b_rawblkno = blkno; 1046 1047 /* 1048 * now we know we have a valid buf structure that we need to do I/O 1049 * on. 1050 */ 1051 1052 s = splbio(); /* protect the queues */ 1053 1054 BUFQ_PUT(&xy->xyq, bp); /* XXX disksort_cylinder */ 1055 1056 /* start 'em up */ 1057 1058 xyc_start(xy->parent, NULL); 1059 1060 /* done! */ 1061 1062 splx(s); 1063 return; 1064 1065 bad: /* tells upper layers we have an error */ 1066 bp->b_flags |= B_ERROR; 1067 done: /* tells upper layers we are done with this 1068 * buf */ 1069 bp->b_resid = bp->b_bcount; 1070 biodone(bp); 1071 } 1072 /* 1073 * end of {b,c}devsw functions 1074 */ 1075 1076 /* 1077 * i n t e r r u p t f u n c t i o n 1078 * 1079 * xycintr: hardware interrupt. 1080 */ 1081 int 1082 xycintr(v) 1083 void *v; 1084 1085 { 1086 struct xyc_softc *xycsc = v; 1087 1088 /* kick the event counter */ 1089 xycsc->sc_intrcnt.ev_count++; 1090 1091 /* remove as many done IOPBs as possible */ 1092 xyc_remove_iorq(xycsc); 1093 1094 /* start any iorq's already waiting */ 1095 xyc_start(xycsc, NULL); 1096 1097 return (1); 1098 } 1099 /* 1100 * end of interrupt function 1101 */ 1102 1103 /* 1104 * i n t e r n a l f u n c t i o n s 1105 */ 1106 1107 /* 1108 * xyc_rqinit: fill out the fields of an I/O request 1109 */ 1110 1111 inline void 1112 xyc_rqinit(rq, xyc, xy, md, blk, cnt, db, bp) 1113 struct xy_iorq *rq; 1114 struct xyc_softc *xyc; 1115 struct xy_softc *xy; 1116 int md; 1117 u_long blk; 1118 int cnt; 1119 caddr_t db; 1120 struct buf *bp; 1121 { 1122 rq->xyc = xyc; 1123 rq->xy = xy; 1124 rq->ttl = XYC_MAXTTL + 10; 1125 rq->mode = md; 1126 rq->tries = rq->errno = rq->lasterror = 0; 1127 rq->blockno = blk; 1128 rq->sectcnt = cnt; 1129 rq->dbuf = rq->dbufbase = db; 1130 rq->buf = bp; 1131 } 1132 1133 /* 1134 * xyc_rqtopb: load up an IOPB based on an iorq 1135 */ 1136 1137 void 1138 xyc_rqtopb(iorq, iopb, cmd, subfun) 1139 struct xy_iorq *iorq; 1140 struct xy_iopb *iopb; 1141 int cmd, subfun; 1142 1143 { 1144 u_long block, dp; 1145 1146 /* normal IOPB case, standard stuff */ 1147 1148 /* chain bit handled later */ 1149 iopb->ien = (XY_STATE(iorq->mode) == XY_SUB_POLL) ? 0 : 1; 1150 iopb->com = cmd; 1151 iopb->errno = 0; 1152 iopb->errs = 0; 1153 iopb->done = 0; 1154 if (iorq->xy) { 1155 iopb->unit = iorq->xy->xy_drive; 1156 iopb->dt = iorq->xy->drive_type; 1157 } else { 1158 iopb->unit = 0; 1159 iopb->dt = 0; 1160 } 1161 block = iorq->blockno; 1162 if (iorq->xy == NULL || block == 0) { 1163 iopb->sect = iopb->head = iopb->cyl = 0; 1164 } else { 1165 iopb->sect = block % iorq->xy->nsect; 1166 block = block / iorq->xy->nsect; 1167 iopb->head = block % iorq->xy->nhead; 1168 block = block / iorq->xy->nhead; 1169 iopb->cyl = block; 1170 } 1171 iopb->scnt = iorq->sectcnt; 1172 if (iorq->dbuf == NULL) { 1173 iopb->dataa = 0; 1174 iopb->datar = 0; 1175 } else { 1176 dp = dvma_kvtopa(iorq->dbuf, iorq->xyc->bustype); 1177 iopb->dataa = (dp & 0xffff); 1178 iopb->datar = ((dp & 0xff0000) >> 16); 1179 } 1180 iopb->subfn = subfun; 1181 } 1182 1183 1184 /* 1185 * xyc_unbusy: wait for the xyc to go unbusy, or timeout. 1186 */ 1187 1188 int 1189 xyc_unbusy(xyc, del) 1190 1191 struct xyc *xyc; 1192 int del; 1193 1194 { 1195 while (del-- > 0) { 1196 if ((xyc->xyc_csr & XYC_GBSY) == 0) 1197 break; 1198 DELAY(1); 1199 } 1200 return(del == 0 ? XY_ERR_FAIL : XY_ERR_AOK); 1201 } 1202 1203 /* 1204 * xyc_cmd: front end for POLL'd and WAIT'd commands. Returns 0 or error. 1205 * note that NORM requests are handled separately. 1206 */ 1207 int 1208 xyc_cmd(xycsc, cmd, subfn, unit, block, scnt, dptr, fullmode) 1209 struct xyc_softc *xycsc; 1210 int cmd, subfn, unit, block, scnt; 1211 char *dptr; 1212 int fullmode; 1213 { 1214 struct xy_iorq *iorq = xycsc->ciorq; 1215 struct xy_iopb *iopb = xycsc->ciopb; 1216 int submode = XY_STATE(fullmode); 1217 1218 /* 1219 * is someone else using the control iopq wait for it if we can 1220 */ 1221 start: 1222 if (submode == XY_SUB_WAIT && XY_STATE(iorq->mode) != XY_SUB_FREE) { 1223 if (tsleep(iorq, PRIBIO, "xyc_cmd", 0)) 1224 return(XY_ERR_FAIL); 1225 goto start; 1226 } 1227 1228 if (XY_STATE(iorq->mode) != XY_SUB_FREE) { 1229 DELAY(1000000); /* XY_SUB_POLL: steal the iorq */ 1230 iorq->mode = XY_SUB_FREE; 1231 printf("%s: stole control iopb\n", xycsc->sc_dev.dv_xname); 1232 } 1233 1234 /* init iorq/iopb */ 1235 1236 xyc_rqinit(iorq, xycsc, 1237 (unit == XYC_NOUNIT) ? NULL : xycsc->sc_drives[unit], 1238 fullmode, block, scnt, dptr, NULL); 1239 1240 /* load IOPB from iorq */ 1241 1242 xyc_rqtopb(iorq, iopb, cmd, subfn); 1243 1244 /* submit it for processing */ 1245 1246 xyc_submit_iorq(xycsc, iorq, fullmode); /* error code will be in iorq */ 1247 1248 return(XY_ERR_AOK); 1249 } 1250 1251 /* 1252 * xyc_startbuf 1253 * start a buffer for running 1254 */ 1255 1256 int 1257 xyc_startbuf(xycsc, xysc, bp) 1258 struct xyc_softc *xycsc; 1259 struct xy_softc *xysc; 1260 struct buf *bp; 1261 1262 { 1263 int partno; 1264 struct xy_iorq *iorq; 1265 struct xy_iopb *iopb; 1266 u_long block; 1267 caddr_t dbuf; 1268 1269 iorq = xysc->xyrq; 1270 iopb = iorq->iopb; 1271 1272 /* get buf */ 1273 1274 if (bp == NULL) 1275 panic("xyc_startbuf null buf"); 1276 1277 partno = DISKPART(bp->b_dev); 1278 #ifdef XYC_DEBUG 1279 printf("xyc_startbuf: %s%c: %s block %d\n", xysc->sc_dev.dv_xname, 1280 'a' + partno, (bp->b_flags & B_READ) ? "read" : "write", bp->b_blkno); 1281 printf("xyc_startbuf: b_bcount %d, b_data 0x%x\n", 1282 bp->b_bcount, bp->b_data); 1283 #endif 1284 1285 /* 1286 * load request. 1287 * 1288 * also, note that there are two kinds of buf structures, those with 1289 * B_PHYS set and those without B_PHYS. if B_PHYS is set, then it is 1290 * a raw I/O (to a cdevsw) and we are doing I/O directly to the users' 1291 * buffer which has already been mapped into DVMA space. (Not on sun3) 1292 * However, if B_PHYS is not set, then the buffer is a normal system 1293 * buffer which does *not* live in DVMA space. In that case we call 1294 * dvma_mapin to map it into DVMA space so we can do the DMA to it. 1295 * 1296 * in cases where we do a dvma_mapin, note that iorq points to the buffer 1297 * as mapped into DVMA space, where as the bp->b_data points to its 1298 * non-DVMA mapping. 1299 * 1300 * XXX - On the sun3, B_PHYS does NOT mean the buffer is mapped 1301 * into dvma space, only that it was remapped into the kernel. 1302 * We ALWAYS have to remap the kernel buf into DVMA space. 1303 * (It is done inexpensively, using whole segments!) 1304 */ 1305 1306 block = bp->b_rawblkno; 1307 1308 dbuf = dvma_mapin(bp->b_data, bp->b_bcount, 0); 1309 if (dbuf == NULL) { /* out of DVMA space */ 1310 printf("%s: warning: out of DVMA space\n", 1311 xycsc->sc_dev.dv_xname); 1312 return (XY_ERR_FAIL); /* XXX: need some sort of 1313 * call-back scheme here? */ 1314 } 1315 1316 /* init iorq and load iopb from it */ 1317 1318 xyc_rqinit(iorq, xycsc, xysc, XY_SUB_NORM | XY_MODE_VERBO, block, 1319 bp->b_bcount / XYFM_BPS, dbuf, bp); 1320 1321 xyc_rqtopb(iorq, iopb, (bp->b_flags & B_READ) ? XYCMD_RD : XYCMD_WR, 0); 1322 1323 /* Instrumentation. */ 1324 disk_busy(&xysc->sc_dk); 1325 1326 return (XY_ERR_AOK); 1327 } 1328 1329 1330 /* 1331 * xyc_submit_iorq: submit an iorq for processing. returns XY_ERR_AOK 1332 * if ok. if it fail returns an error code. type is XY_SUB_*. 1333 * 1334 * note: caller frees iorq in all cases except NORM 1335 * 1336 * return value: 1337 * NORM: XY_AOK (req pending), XY_FAIL (couldn't submit request) 1338 * WAIT: XY_AOK (success), <error-code> (failed) 1339 * POLL: <same as WAIT> 1340 * NOQ : <same as NORM> 1341 * 1342 * there are three sources for i/o requests: 1343 * [1] xystrategy: normal block I/O, using "struct buf" system. 1344 * [2] autoconfig/crash dump: these are polled I/O requests, no interrupts. 1345 * [3] open/ioctl: these are I/O requests done in the context of a process, 1346 * and the process should block until they are done. 1347 * 1348 * software state is stored in the iorq structure. each iorq has an 1349 * iopb structure. the hardware understands the iopb structure. 1350 * every command must go through an iopb. a 450 handles one iopb at a 1351 * time, where as a 451 can take them in chains. [the 450 claims it 1352 * can handle chains, but is appears to be buggy...] iopb are allocated 1353 * in DVMA space at boot up time. each disk gets one iopb, and the 1354 * controller gets one (for POLL and WAIT commands). what happens if 1355 * the iopb is busy? for i/o type [1], the buffers are queued at the 1356 * "buff" layer and * picked up later by the interrupt routine. for case 1357 * [2] we can only be blocked if there is a WAIT type I/O request being 1358 * run. since this can only happen when we are crashing, we wait a sec 1359 * and then steal the IOPB. for case [3] the process can sleep 1360 * on the iorq free list until some iopbs are available. 1361 */ 1362 1363 1364 int 1365 xyc_submit_iorq(xycsc, iorq, type) 1366 struct xyc_softc *xycsc; 1367 struct xy_iorq *iorq; 1368 int type; 1369 1370 { 1371 struct xy_iopb *iopb; 1372 u_long iopbaddr; 1373 1374 #ifdef XYC_DEBUG 1375 printf("xyc_submit_iorq(%s, addr=0x%x, type=%d)\n", 1376 xycsc->sc_dev.dv_xname, iorq, type); 1377 #endif 1378 1379 /* first check and see if controller is busy */ 1380 if ((xycsc->xyc->xyc_csr & XYC_GBSY) != 0) { 1381 #ifdef XYC_DEBUG 1382 printf("xyc_submit_iorq: XYC not ready (BUSY)\n"); 1383 #endif 1384 if (type == XY_SUB_NOQ) 1385 return (XY_ERR_FAIL); /* failed */ 1386 switch (type) { 1387 case XY_SUB_NORM: 1388 return XY_ERR_AOK; /* success */ 1389 case XY_SUB_WAIT: 1390 while (iorq->iopb->done == 0) { 1391 (void) tsleep(iorq, PRIBIO, "xyciorq", 0); 1392 } 1393 return (iorq->errno); 1394 case XY_SUB_POLL: /* steal controller */ 1395 iopbaddr = xycsc->xyc->xyc_rsetup; /* RESET */ 1396 if (xyc_unbusy(xycsc->xyc,XYC_RESETUSEC) == XY_ERR_FAIL) 1397 panic("xyc_submit_iorq: stuck xyc"); 1398 printf("%s: stole controller\n", 1399 xycsc->sc_dev.dv_xname); 1400 break; 1401 default: 1402 panic("xyc_submit_iorq adding"); 1403 } 1404 } 1405 1406 iopb = xyc_chain(xycsc, iorq); /* build chain */ 1407 if (iopb == NULL) { /* nothing doing? */ 1408 if (type == XY_SUB_NORM || type == XY_SUB_NOQ) 1409 return(XY_ERR_AOK); 1410 panic("xyc_submit_iorq: xyc_chain failed!"); 1411 } 1412 iopbaddr = dvma_kvtopa(iopb, xycsc->bustype); 1413 1414 XYC_GO(xycsc->xyc, iopbaddr); 1415 1416 /* command now running, wrap it up */ 1417 switch (type) { 1418 case XY_SUB_NORM: 1419 case XY_SUB_NOQ: 1420 return (XY_ERR_AOK); /* success */ 1421 case XY_SUB_WAIT: 1422 while (iorq->iopb->done == 0) { 1423 (void) tsleep(iorq, PRIBIO, "xyciorq", 0); 1424 } 1425 return (iorq->errno); 1426 case XY_SUB_POLL: 1427 return (xyc_piodriver(xycsc, iorq)); 1428 default: 1429 panic("xyc_submit_iorq wrap up"); 1430 } 1431 panic("xyc_submit_iorq"); 1432 return 0; /* not reached */ 1433 } 1434 1435 1436 /* 1437 * xyc_chain: build a chain. return dvma address of first element in 1438 * the chain. iorq != NULL: means we only want that item on the chain. 1439 */ 1440 1441 struct xy_iopb * 1442 xyc_chain(xycsc, iorq) 1443 struct xyc_softc *xycsc; 1444 struct xy_iorq *iorq; 1445 { 1446 int togo, chain, hand; 1447 struct xy_iopb *iopb, *prev_iopb; 1448 memset(xycsc->xy_chain, 0, sizeof(xycsc->xy_chain)); 1449 1450 /* 1451 * promote control IOPB to the top 1452 */ 1453 if (iorq == NULL) { 1454 if ((XY_STATE(xycsc->reqs[XYC_CTLIOPB].mode) == XY_SUB_POLL || 1455 XY_STATE(xycsc->reqs[XYC_CTLIOPB].mode) == XY_SUB_WAIT) && 1456 xycsc->iopbase[XYC_CTLIOPB].done == 0) 1457 iorq = &xycsc->reqs[XYC_CTLIOPB]; 1458 } 1459 /* 1460 * special case: if iorq != NULL then we have a POLL or WAIT request. 1461 * we let these take priority and do them first. 1462 */ 1463 if (iorq) { 1464 xycsc->xy_chain[0] = iorq; 1465 iorq->iopb->chen = 0; 1466 return(iorq->iopb); 1467 } 1468 1469 /* 1470 * NORM case: do round robin and maybe chain (if allowed and possible) 1471 */ 1472 1473 chain = 0; 1474 hand = xycsc->xy_hand; 1475 xycsc->xy_hand = (xycsc->xy_hand + 1) % XYC_MAXIOPB; 1476 1477 for (togo = XYC_MAXIOPB ; 1478 togo > 0 ; 1479 togo--, hand = (hand + 1) % XYC_MAXIOPB) 1480 { 1481 1482 if (XY_STATE(xycsc->reqs[hand].mode) != XY_SUB_NORM || 1483 xycsc->iopbase[hand].done) 1484 continue; /* not ready-for-i/o */ 1485 1486 xycsc->xy_chain[chain] = &xycsc->reqs[hand]; 1487 iopb = xycsc->xy_chain[chain]->iopb; 1488 iopb->chen = 0; 1489 if (chain != 0) { /* adding a link to a chain? */ 1490 prev_iopb = xycsc->xy_chain[chain-1]->iopb; 1491 prev_iopb->chen = 1; 1492 prev_iopb->nxtiopb = 0xffff & 1493 dvma_kvtopa(iopb, xycsc->bustype); 1494 } else { /* head of chain */ 1495 iorq = xycsc->xy_chain[chain]; 1496 } 1497 chain++; 1498 if (xycsc->no_ols) break; /* quit if chaining dis-allowed */ 1499 } 1500 return(iorq ? iorq->iopb : NULL); 1501 } 1502 1503 /* 1504 * xyc_piodriver 1505 * 1506 * programmed i/o driver. this function takes over the computer 1507 * and drains off the polled i/o request. it returns the status of the iorq 1508 * the caller is interesting in. 1509 */ 1510 int 1511 xyc_piodriver(xycsc, iorq) 1512 struct xyc_softc *xycsc; 1513 struct xy_iorq *iorq; 1514 1515 { 1516 int nreset = 0; 1517 int retval = 0; 1518 u_long res; 1519 1520 #ifdef XYC_DEBUG 1521 printf("xyc_piodriver(%s, 0x%x)\n", xycsc->sc_dev.dv_xname, iorq); 1522 #endif 1523 1524 while (iorq->iopb->done == 0) { 1525 1526 res = xyc_unbusy(xycsc->xyc, XYC_MAXTIME); 1527 1528 /* we expect some progress soon */ 1529 if (res == XY_ERR_FAIL && nreset >= 2) { 1530 xyc_reset(xycsc, 0, XY_RSET_ALL, XY_ERR_FAIL, 0); 1531 #ifdef XYC_DEBUG 1532 printf("xyc_piodriver: timeout\n"); 1533 #endif 1534 return (XY_ERR_FAIL); 1535 } 1536 if (res == XY_ERR_FAIL) { 1537 if (xyc_reset(xycsc, 0, 1538 (nreset++ == 0) ? XY_RSET_NONE : iorq, 1539 XY_ERR_FAIL, 1540 0) == XY_ERR_FAIL) 1541 return (XY_ERR_FAIL); /* flushes all but POLL 1542 * requests, resets */ 1543 continue; 1544 } 1545 1546 xyc_remove_iorq(xycsc); /* may resubmit request */ 1547 1548 if (iorq->iopb->done == 0) 1549 xyc_start(xycsc, iorq); 1550 } 1551 1552 /* get return value */ 1553 1554 retval = iorq->errno; 1555 1556 #ifdef XYC_DEBUG 1557 printf("xyc_piodriver: done, retval = 0x%x (%s)\n", 1558 iorq->errno, xyc_e2str(iorq->errno)); 1559 #endif 1560 1561 /* start up any bufs that have queued */ 1562 1563 xyc_start(xycsc, NULL); 1564 1565 return (retval); 1566 } 1567 1568 /* 1569 * xyc_xyreset: reset one drive. NOTE: assumes xyc was just reset. 1570 * we steal iopb[XYC_CTLIOPB] for this, but we put it back when we are done. 1571 */ 1572 void 1573 xyc_xyreset(xycsc, xysc) 1574 struct xyc_softc *xycsc; 1575 struct xy_softc *xysc; 1576 1577 { 1578 struct xy_iopb tmpiopb; 1579 u_long addr; 1580 int del; 1581 memcpy(&tmpiopb, xycsc->ciopb, sizeof(tmpiopb)); 1582 xycsc->ciopb->chen = xycsc->ciopb->done = xycsc->ciopb->errs = 0; 1583 xycsc->ciopb->ien = 0; 1584 xycsc->ciopb->com = XYCMD_RST; 1585 xycsc->ciopb->unit = xysc->xy_drive; 1586 addr = dvma_kvtopa(xycsc->ciopb, xycsc->bustype); 1587 1588 XYC_GO(xycsc->xyc, addr); 1589 1590 del = XYC_RESETUSEC; 1591 while (del > 0) { 1592 if ((xycsc->xyc->xyc_csr & XYC_GBSY) == 0) break; 1593 DELAY(1); 1594 del--; 1595 } 1596 1597 if (del <= 0 || xycsc->ciopb->errs) { 1598 printf("%s: off-line: %s\n", xycsc->sc_dev.dv_xname, 1599 xyc_e2str(xycsc->ciopb->errno)); 1600 del = xycsc->xyc->xyc_rsetup; 1601 if (xyc_unbusy(xycsc->xyc, XYC_RESETUSEC) == XY_ERR_FAIL) 1602 panic("xyc_reset"); 1603 } else { 1604 xycsc->xyc->xyc_csr = XYC_IPND; /* clear IPND */ 1605 } 1606 memcpy(xycsc->ciopb, &tmpiopb, sizeof(tmpiopb)); 1607 } 1608 1609 1610 /* 1611 * xyc_reset: reset everything: requests are marked as errors except 1612 * a polled request (which is resubmitted) 1613 */ 1614 int 1615 xyc_reset(xycsc, quiet, blastmode, error, xysc) 1616 struct xyc_softc *xycsc; 1617 int quiet, error; 1618 struct xy_iorq *blastmode; 1619 struct xy_softc *xysc; 1620 1621 { 1622 int del = 0, lcv, retval = XY_ERR_AOK; 1623 struct xy_iorq *iorq; 1624 1625 /* soft reset hardware */ 1626 1627 if (!quiet) 1628 printf("%s: soft reset\n", xycsc->sc_dev.dv_xname); 1629 del = xycsc->xyc->xyc_rsetup; 1630 del = xyc_unbusy(xycsc->xyc, XYC_RESETUSEC); 1631 if (del == XY_ERR_FAIL) { 1632 blastmode = XY_RSET_ALL; /* dead, flush all requests */ 1633 retval = XY_ERR_FAIL; 1634 } 1635 if (xysc) 1636 xyc_xyreset(xycsc, xysc); 1637 1638 /* fix queues based on "blast-mode" */ 1639 1640 for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) { 1641 iorq = &xycsc->reqs[lcv]; 1642 1643 if (XY_STATE(iorq->mode) != XY_SUB_POLL && 1644 XY_STATE(iorq->mode) != XY_SUB_WAIT && 1645 XY_STATE(iorq->mode) != XY_SUB_NORM) 1646 /* is it active? */ 1647 continue; 1648 1649 if (blastmode == XY_RSET_ALL || 1650 blastmode != iorq) { 1651 /* failed */ 1652 iorq->errno = error; 1653 xycsc->iopbase[lcv].done = xycsc->iopbase[lcv].errs = 1; 1654 switch (XY_STATE(iorq->mode)) { 1655 case XY_SUB_NORM: 1656 iorq->buf->b_error = EIO; 1657 iorq->buf->b_flags |= B_ERROR; 1658 iorq->buf->b_resid = 1659 iorq->sectcnt * XYFM_BPS; 1660 /* Sun3: map/unmap regardless of B_PHYS */ 1661 dvma_mapout(iorq->dbufbase, 1662 iorq->buf->b_bcount); 1663 (void)BUFQ_GET(&iorq->xy->xyq); 1664 disk_unbusy(&iorq->xy->sc_dk, 1665 (iorq->buf->b_bcount - iorq->buf->b_resid), 1666 (iorq->buf->b_flags & B_READ)); 1667 biodone(iorq->buf); 1668 iorq->mode = XY_SUB_FREE; 1669 break; 1670 case XY_SUB_WAIT: 1671 wakeup(iorq); 1672 case XY_SUB_POLL: 1673 iorq->mode = 1674 XY_NEWSTATE(iorq->mode, XY_SUB_DONE); 1675 break; 1676 } 1677 1678 } else { 1679 1680 /* resubmit, no need to do anything here */ 1681 } 1682 } 1683 1684 /* 1685 * now, if stuff is waiting, start it. 1686 * since we just reset it should go 1687 */ 1688 xyc_start(xycsc, NULL); 1689 1690 return (retval); 1691 } 1692 1693 /* 1694 * xyc_start: start waiting buffers 1695 */ 1696 1697 void 1698 xyc_start(xycsc, iorq) 1699 struct xyc_softc *xycsc; 1700 struct xy_iorq *iorq; 1701 1702 { 1703 int lcv; 1704 struct xy_softc *xy; 1705 1706 if (iorq == NULL) { 1707 for (lcv = 0; lcv < XYC_MAXDEV ; lcv++) { 1708 if ((xy = xycsc->sc_drives[lcv]) == NULL) continue; 1709 if (BUFQ_PEEK(&xy->xyq) == NULL) continue; 1710 if (xy->xyrq->mode != XY_SUB_FREE) continue; 1711 xyc_startbuf(xycsc, xy, BUFQ_PEEK(&xy->xyq)); 1712 } 1713 } 1714 xyc_submit_iorq(xycsc, iorq, XY_SUB_NOQ); 1715 } 1716 1717 /* 1718 * xyc_remove_iorq: remove "done" IOPB's. 1719 */ 1720 1721 int 1722 xyc_remove_iorq(xycsc) 1723 struct xyc_softc *xycsc; 1724 1725 { 1726 int errno, rq, comm, errs; 1727 struct xyc *xyc = xycsc->xyc; 1728 u_long addr; 1729 struct xy_iopb *iopb; 1730 struct xy_iorq *iorq; 1731 struct buf *bp; 1732 1733 if (xyc->xyc_csr & XYC_DERR) { 1734 /* 1735 * DOUBLE ERROR: should never happen under normal use. This 1736 * error is so bad, you can't even tell which IOPB is bad, so 1737 * we dump them all. 1738 */ 1739 errno = XY_ERR_DERR; 1740 printf("%s: DOUBLE ERROR!\n", xycsc->sc_dev.dv_xname); 1741 if (xyc_reset(xycsc, 0, XY_RSET_ALL, errno, 0) != XY_ERR_AOK) { 1742 printf("%s: soft reset failed!\n", 1743 xycsc->sc_dev.dv_xname); 1744 panic("xyc_remove_iorq: controller DEAD"); 1745 } 1746 return (XY_ERR_AOK); 1747 } 1748 1749 /* 1750 * get iopb that is done, loop down the chain 1751 */ 1752 1753 if (xyc->xyc_csr & XYC_ERR) { 1754 xyc->xyc_csr = XYC_ERR; /* clear error condition */ 1755 } 1756 if (xyc->xyc_csr & XYC_IPND) { 1757 xyc->xyc_csr = XYC_IPND; /* clear interrupt */ 1758 } 1759 1760 for (rq = 0; rq < XYC_MAXIOPB; rq++) { 1761 iorq = xycsc->xy_chain[rq]; 1762 if (iorq == NULL) break; /* done ! */ 1763 if (iorq->mode == 0 || XY_STATE(iorq->mode) == XY_SUB_DONE) 1764 continue; /* free, or done */ 1765 iopb = iorq->iopb; 1766 if (iopb->done == 0) 1767 continue; /* not done yet */ 1768 1769 comm = iopb->com; 1770 errs = iopb->errs; 1771 1772 if (errs) 1773 iorq->errno = iopb->errno; 1774 else 1775 iorq->errno = 0; 1776 1777 /* handle non-fatal errors */ 1778 1779 if (errs && 1780 xyc_error(xycsc, iorq, iopb, comm) == XY_ERR_AOK) 1781 continue; /* AOK: we resubmitted it */ 1782 1783 1784 /* this iorq is now done (hasn't been restarted or anything) */ 1785 1786 if ((iorq->mode & XY_MODE_VERBO) && iorq->lasterror) 1787 xyc_perror(iorq, iopb, 0); 1788 1789 /* now, if read/write check to make sure we got all the data 1790 * we needed. (this may not be the case if we got an error in 1791 * the middle of a multisector request). */ 1792 1793 if ((iorq->mode & XY_MODE_B144) != 0 && errs == 0 && 1794 (comm == XYCMD_RD || comm == XYCMD_WR)) { 1795 /* we just successfully processed a bad144 sector 1796 * note: if we are in bad 144 mode, the pointers have 1797 * been advanced already (see above) and are pointing 1798 * at the bad144 sector. to exit bad144 mode, we 1799 * must advance the pointers 1 sector and issue a new 1800 * request if there are still sectors left to process 1801 * 1802 */ 1803 XYC_ADVANCE(iorq, 1); /* advance 1 sector */ 1804 1805 /* exit b144 mode */ 1806 iorq->mode = iorq->mode & (~XY_MODE_B144); 1807 1808 if (iorq->sectcnt) { /* more to go! */ 1809 iorq->lasterror = iorq->errno = iopb->errno = 0; 1810 iopb->errs = iopb->done = 0; 1811 iorq->tries = 0; 1812 iopb->scnt = iorq->sectcnt; 1813 iopb->cyl = iorq->blockno / 1814 iorq->xy->sectpercyl; 1815 iopb->head = 1816 (iorq->blockno / iorq->xy->nhead) % 1817 iorq->xy->nhead; 1818 iopb->sect = iorq->blockno % XYFM_BPS; 1819 addr = dvma_kvtopa(iorq->dbuf, xycsc->bustype); 1820 iopb->dataa = (addr & 0xffff); 1821 iopb->datar = ((addr & 0xff0000) >> 16); 1822 /* will resubit at end */ 1823 continue; 1824 } 1825 } 1826 /* final cleanup, totally done with this request */ 1827 1828 switch (XY_STATE(iorq->mode)) { 1829 case XY_SUB_NORM: 1830 bp = iorq->buf; 1831 if (errs) { 1832 bp->b_error = EIO; 1833 bp->b_flags |= B_ERROR; 1834 bp->b_resid = iorq->sectcnt * XYFM_BPS; 1835 } else { 1836 bp->b_resid = 0; /* done */ 1837 } 1838 /* Sun3: map/unmap regardless of B_PHYS */ 1839 dvma_mapout(iorq->dbufbase, 1840 iorq->buf->b_bcount); 1841 (void)BUFQ_GET(&iorq->xy->xyq); 1842 disk_unbusy(&iorq->xy->sc_dk, 1843 (bp->b_bcount - bp->b_resid), 1844 (bp->b_flags & B_READ)); 1845 iorq->mode = XY_SUB_FREE; 1846 biodone(bp); 1847 break; 1848 case XY_SUB_WAIT: 1849 iorq->mode = XY_NEWSTATE(iorq->mode, XY_SUB_DONE); 1850 wakeup(iorq); 1851 break; 1852 case XY_SUB_POLL: 1853 iorq->mode = XY_NEWSTATE(iorq->mode, XY_SUB_DONE); 1854 break; 1855 } 1856 } 1857 1858 return (XY_ERR_AOK); 1859 } 1860 1861 /* 1862 * xyc_perror: print error. 1863 * - if still_trying is true: we got an error, retried and got a 1864 * different error. in that case lasterror is the old error, 1865 * and errno is the new one. 1866 * - if still_trying is not true, then if we ever had an error it 1867 * is in lasterror. also, if iorq->errno == 0, then we recovered 1868 * from that error (otherwise iorq->errno == iorq->lasterror). 1869 */ 1870 void 1871 xyc_perror(iorq, iopb, still_trying) 1872 struct xy_iorq *iorq; 1873 struct xy_iopb *iopb; 1874 int still_trying; 1875 1876 { 1877 1878 int error = iorq->lasterror; 1879 1880 printf("%s", (iorq->xy) ? iorq->xy->sc_dev.dv_xname 1881 : iorq->xyc->sc_dev.dv_xname); 1882 if (iorq->buf) 1883 printf("%c: ", 'a' + DISKPART(iorq->buf->b_dev)); 1884 if (iopb->com == XYCMD_RD || iopb->com == XYCMD_WR) 1885 printf("%s %d/%d/%d: ", 1886 (iopb->com == XYCMD_RD) ? "read" : "write", 1887 iopb->cyl, iopb->head, iopb->sect); 1888 printf("%s", xyc_e2str(error)); 1889 1890 if (still_trying) 1891 printf(" [still trying, new error=%s]", xyc_e2str(iorq->errno)); 1892 else 1893 if (iorq->errno == 0) 1894 printf(" [recovered in %d tries]", iorq->tries); 1895 1896 printf("\n"); 1897 } 1898 1899 /* 1900 * xyc_error: non-fatal error encountered... recover. 1901 * return AOK if resubmitted, return FAIL if this iopb is done 1902 */ 1903 int 1904 xyc_error(xycsc, iorq, iopb, comm) 1905 struct xyc_softc *xycsc; 1906 struct xy_iorq *iorq; 1907 struct xy_iopb *iopb; 1908 int comm; 1909 1910 { 1911 int errno = iorq->errno; 1912 int erract = xyc_entoact(errno); 1913 int oldmode, advance, i; 1914 1915 if (erract == XY_ERA_RSET) { /* some errors require a reset */ 1916 oldmode = iorq->mode; 1917 iorq->mode = XY_SUB_DONE | (~XY_SUB_MASK & oldmode); 1918 /* make xyc_start ignore us */ 1919 xyc_reset(xycsc, 1, XY_RSET_NONE, errno, iorq->xy); 1920 iorq->mode = oldmode; 1921 } 1922 /* check for read/write to a sector in bad144 table if bad: redirect 1923 * request to bad144 area */ 1924 1925 if ((comm == XYCMD_RD || comm == XYCMD_WR) && 1926 (iorq->mode & XY_MODE_B144) == 0) { 1927 advance = iorq->sectcnt - iopb->scnt; 1928 XYC_ADVANCE(iorq, advance); 1929 if ((i = isbad(&iorq->xy->dkb, iorq->blockno / iorq->xy->sectpercyl, 1930 (iorq->blockno / iorq->xy->nsect) % iorq->xy->nhead, 1931 iorq->blockno % iorq->xy->nsect)) != -1) { 1932 iorq->mode |= XY_MODE_B144; /* enter bad144 mode & 1933 * redirect */ 1934 iopb->errno = iopb->done = iopb->errs = 0; 1935 iopb->scnt = 1; 1936 iopb->cyl = (iorq->xy->ncyl + iorq->xy->acyl) - 2; 1937 /* second to last acyl */ 1938 i = iorq->xy->sectpercyl - 1 - i; /* follow bad144 1939 * standard */ 1940 iopb->head = i / iorq->xy->nhead; 1941 iopb->sect = i % iorq->xy->nhead; 1942 /* will resubmit when we come out of remove_iorq */ 1943 return (XY_ERR_AOK); /* recovered! */ 1944 } 1945 } 1946 1947 /* 1948 * it isn't a bad144 sector, must be real error! see if we can retry 1949 * it? 1950 */ 1951 if ((iorq->mode & XY_MODE_VERBO) && iorq->lasterror) 1952 xyc_perror(iorq, iopb, 1); /* inform of error state 1953 * change */ 1954 iorq->lasterror = errno; 1955 1956 if ((erract == XY_ERA_RSET || erract == XY_ERA_HARD) 1957 && iorq->tries < XYC_MAXTRIES) { /* retry? */ 1958 iorq->tries++; 1959 iorq->errno = iopb->errno = iopb->done = iopb->errs = 0; 1960 /* will resubmit at end of remove_iorq */ 1961 return (XY_ERR_AOK); /* recovered! */ 1962 } 1963 1964 /* failed to recover from this error */ 1965 return (XY_ERR_FAIL); 1966 } 1967 1968 /* 1969 * xyc_tick: make sure xy is still alive and ticking (err, kicking). 1970 */ 1971 void 1972 xyc_tick(arg) 1973 void *arg; 1974 1975 { 1976 struct xyc_softc *xycsc = arg; 1977 int lcv, s, reset = 0; 1978 1979 /* reduce ttl for each request if one goes to zero, reset xyc */ 1980 s = splbio(); 1981 for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) { 1982 if (xycsc->reqs[lcv].mode == 0 || 1983 XY_STATE(xycsc->reqs[lcv].mode) == XY_SUB_DONE) 1984 continue; 1985 xycsc->reqs[lcv].ttl--; 1986 if (xycsc->reqs[lcv].ttl == 0) 1987 reset = 1; 1988 } 1989 if (reset) { 1990 printf("%s: watchdog timeout\n", xycsc->sc_dev.dv_xname); 1991 xyc_reset(xycsc, 0, XY_RSET_NONE, XY_ERR_FAIL, NULL); 1992 } 1993 splx(s); 1994 1995 /* until next time */ 1996 1997 callout_reset(&xycsc->sc_tick_ch, XYC_TICKCNT, xyc_tick, xycsc); 1998 } 1999 2000 /* 2001 * xyc_ioctlcmd: this function provides a user level interface to the 2002 * controller via ioctl. this allows "format" programs to be written 2003 * in user code, and is also useful for some debugging. we return 2004 * an error code. called at user priority. 2005 * 2006 * XXX missing a few commands (see the 7053 driver for ideas) 2007 */ 2008 int 2009 xyc_ioctlcmd(xy, dev, xio) 2010 struct xy_softc *xy; 2011 dev_t dev; 2012 struct xd_iocmd *xio; 2013 2014 { 2015 int s, err, rqno; 2016 void * dvmabuf = NULL; 2017 struct xyc_softc *xycsc; 2018 2019 /* check sanity of requested command */ 2020 2021 switch (xio->cmd) { 2022 2023 case XYCMD_NOP: /* no op: everything should be zero */ 2024 if (xio->subfn || xio->dptr || xio->dlen || 2025 xio->block || xio->sectcnt) 2026 return (EINVAL); 2027 break; 2028 2029 case XYCMD_RD: /* read / write sectors (up to XD_IOCMD_MAXS) */ 2030 case XYCMD_WR: 2031 if (xio->subfn || xio->sectcnt > XD_IOCMD_MAXS || 2032 xio->sectcnt * XYFM_BPS != xio->dlen || xio->dptr == NULL) 2033 return (EINVAL); 2034 break; 2035 2036 case XYCMD_SK: /* seek: doesn't seem useful to export this */ 2037 return (EINVAL); 2038 2039 break; 2040 2041 default: 2042 return (EINVAL);/* ??? */ 2043 } 2044 2045 /* create DVMA buffer for request if needed */ 2046 2047 if (xio->dlen) { 2048 dvmabuf = dvma_malloc(xio->dlen); 2049 if (xio->cmd == XYCMD_WR) { 2050 err = copyin(xio->dptr, dvmabuf, xio->dlen); 2051 if (err) { 2052 dvma_free(dvmabuf, xio->dlen); 2053 return (err); 2054 } 2055 } 2056 } 2057 /* do it! */ 2058 2059 err = 0; 2060 xycsc = xy->parent; 2061 s = splbio(); 2062 rqno = xyc_cmd(xycsc, xio->cmd, xio->subfn, xy->xy_drive, xio->block, 2063 xio->sectcnt, dvmabuf, XY_SUB_WAIT); 2064 if (rqno == XY_ERR_FAIL) { 2065 err = EIO; 2066 goto done; 2067 } 2068 xio->errno = xycsc->ciorq->errno; 2069 xio->tries = xycsc->ciorq->tries; 2070 XYC_DONE(xycsc, err); 2071 2072 if (xio->cmd == XYCMD_RD) 2073 err = copyout(dvmabuf, xio->dptr, xio->dlen); 2074 2075 done: 2076 splx(s); 2077 if (dvmabuf) 2078 dvma_free(dvmabuf, xio->dlen); 2079 return (err); 2080 } 2081 2082 /* 2083 * xyc_e2str: convert error code number into an error string 2084 */ 2085 char * 2086 xyc_e2str(no) 2087 int no; 2088 { 2089 switch (no) { 2090 case XY_ERR_FAIL: 2091 return ("Software fatal error"); 2092 case XY_ERR_DERR: 2093 return ("DOUBLE ERROR"); 2094 case XY_ERR_AOK: 2095 return ("Successful completion"); 2096 case XY_ERR_IPEN: 2097 return("Interrupt pending"); 2098 case XY_ERR_BCFL: 2099 return("Busy conflict"); 2100 case XY_ERR_TIMO: 2101 return("Operation timeout"); 2102 case XY_ERR_NHDR: 2103 return("Header not found"); 2104 case XY_ERR_HARD: 2105 return("Hard ECC error"); 2106 case XY_ERR_ICYL: 2107 return("Illegal cylinder address"); 2108 case XY_ERR_ISEC: 2109 return("Illegal sector address"); 2110 case XY_ERR_SMAL: 2111 return("Last sector too small"); 2112 case XY_ERR_SACK: 2113 return("Slave ACK error (non-existent memory)"); 2114 case XY_ERR_CHER: 2115 return("Cylinder and head/header error"); 2116 case XY_ERR_SRTR: 2117 return("Auto-seek retry successful"); 2118 case XY_ERR_WPRO: 2119 return("Write-protect error"); 2120 case XY_ERR_UIMP: 2121 return("Unimplemented command"); 2122 case XY_ERR_DNRY: 2123 return("Drive not ready"); 2124 case XY_ERR_SZER: 2125 return("Sector count zero"); 2126 case XY_ERR_DFLT: 2127 return("Drive faulted"); 2128 case XY_ERR_ISSZ: 2129 return("Illegal sector size"); 2130 case XY_ERR_SLTA: 2131 return("Self test A"); 2132 case XY_ERR_SLTB: 2133 return("Self test B"); 2134 case XY_ERR_SLTC: 2135 return("Self test C"); 2136 case XY_ERR_SOFT: 2137 return("Soft ECC error"); 2138 case XY_ERR_SFOK: 2139 return("Soft ECC error recovered"); 2140 case XY_ERR_IHED: 2141 return("Illegal head"); 2142 case XY_ERR_DSEQ: 2143 return("Disk sequencer error"); 2144 case XY_ERR_SEEK: 2145 return("Seek error"); 2146 default: 2147 return ("Unknown error"); 2148 } 2149 } 2150 2151 int 2152 xyc_entoact(errno) 2153 2154 int errno; 2155 2156 { 2157 switch (errno) { 2158 case XY_ERR_FAIL: case XY_ERR_DERR: case XY_ERR_IPEN: 2159 case XY_ERR_BCFL: case XY_ERR_ICYL: case XY_ERR_ISEC: 2160 case XY_ERR_UIMP: case XY_ERR_SZER: case XY_ERR_ISSZ: 2161 case XY_ERR_SLTA: case XY_ERR_SLTB: case XY_ERR_SLTC: 2162 case XY_ERR_IHED: case XY_ERR_SACK: case XY_ERR_SMAL: 2163 2164 return(XY_ERA_PROG); /* program error ! */ 2165 2166 case XY_ERR_TIMO: case XY_ERR_NHDR: case XY_ERR_HARD: 2167 case XY_ERR_DNRY: case XY_ERR_CHER: case XY_ERR_SEEK: 2168 case XY_ERR_SOFT: 2169 2170 return(XY_ERA_HARD); /* hard error, retry */ 2171 2172 case XY_ERR_DFLT: case XY_ERR_DSEQ: 2173 2174 return(XY_ERA_RSET); /* hard error reset */ 2175 2176 case XY_ERR_SRTR: case XY_ERR_SFOK: case XY_ERR_AOK: 2177 2178 return(XY_ERA_SOFT); /* an FYI error */ 2179 2180 case XY_ERR_WPRO: 2181 2182 return(XY_ERA_WPRO); /* write protect */ 2183 } 2184 2185 return(XY_ERA_PROG); /* ??? */ 2186 } 2187