1 /* $NetBSD: xy.c,v 1.56 2006/05/14 21:57:13 elad Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 1995 Charles D. Cranor 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Charles D. Cranor. 19 * 4. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * 36 * x y . c x y l o g i c s 4 5 0 / 4 5 1 s m d d r i v e r 37 * 38 * author: Chuck Cranor <chuck@ccrc.wustl.edu> 39 * id: &Id: xy.c,v 1.1 1995/09/25 20:35:14 chuck Exp & 40 * started: 14-Sep-95 41 * references: [1] Xylogics Model 753 User's Manual 42 * part number: 166-753-001, Revision B, May 21, 1988. 43 * "Your Partner For Performance" 44 * [2] other NetBSD disk device drivers 45 * [3] Xylogics Model 450 User's Manual 46 * part number: 166-017-001, Revision B, 1983. 47 * [4] Addendum to Xylogics Model 450 Disk Controller User's 48 * Manual, Jan. 1985. 49 * [5] The 451 Controller, Rev. B3, September 2, 1986. 50 * [6] David Jones <dej@achilles.net>'s unfinished 450/451 driver 51 * 52 */ 53 54 #include <sys/cdefs.h> 55 __KERNEL_RCSID(0, "$NetBSD: xy.c,v 1.56 2006/05/14 21:57:13 elad Exp $"); 56 57 #undef XYC_DEBUG /* full debug */ 58 #undef XYC_DIAG /* extra sanity checks */ 59 #if defined(DIAGNOSTIC) && !defined(XYC_DIAG) 60 #define XYC_DIAG /* link in with master DIAG option */ 61 #endif 62 63 #include <sys/param.h> 64 #include <sys/proc.h> 65 #include <sys/systm.h> 66 #include <sys/kernel.h> 67 #include <sys/file.h> 68 #include <sys/stat.h> 69 #include <sys/ioctl.h> 70 #include <sys/buf.h> 71 #include <sys/bufq.h> 72 #include <sys/uio.h> 73 #include <sys/malloc.h> 74 #include <sys/device.h> 75 #include <sys/disklabel.h> 76 #include <sys/disk.h> 77 #include <sys/syslog.h> 78 #include <sys/dkbad.h> 79 #include <sys/conf.h> 80 #include <sys/kauth.h> 81 82 #include <uvm/uvm_extern.h> 83 84 #include <dev/sun/disklabel.h> 85 86 #include <machine/autoconf.h> 87 #include <machine/dvma.h> 88 89 #include <sun3/dev/xyreg.h> 90 #include <sun3/dev/xyvar.h> 91 #include <sun3/dev/xio.h> 92 93 #include "locators.h" 94 95 /* 96 * Print a complaint when no xy children were specified 97 * in the config file. Better than a link error... 98 * 99 * XXX: Some folks say this driver should be split in two, 100 * but that seems pointless with ONLY one type of child. 101 */ 102 #include "xy.h" 103 #if NXY == 0 104 #error "xyc but no xy?" 105 #endif 106 107 /* 108 * macros 109 */ 110 111 /* 112 * XYC_GO: start iopb ADDR (DVMA addr in a u_long) on XYC 113 */ 114 #define XYC_GO(XYC, ADDR) { \ 115 (XYC)->xyc_addr_lo = ((ADDR) & 0xff); \ 116 (ADDR) = ((ADDR) >> 8); \ 117 (XYC)->xyc_addr_hi = ((ADDR) & 0xff); \ 118 (ADDR) = ((ADDR) >> 8); \ 119 (XYC)->xyc_reloc_lo = ((ADDR) & 0xff); \ 120 (ADDR) = ((ADDR) >> 8); \ 121 (XYC)->xyc_reloc_hi = (ADDR); \ 122 (XYC)->xyc_csr = XYC_GBSY; /* go! */ \ 123 } 124 125 /* 126 * XYC_DONE: don't need IORQ, get error code and free (done after xyc_cmd) 127 */ 128 129 #define XYC_DONE(SC,ER) { \ 130 if ((ER) == XY_ERR_AOK) { \ 131 (ER) = (SC)->ciorq->errno; \ 132 (SC)->ciorq->mode = XY_SUB_FREE; \ 133 wakeup((SC)->ciorq); \ 134 } \ 135 } 136 137 /* 138 * XYC_ADVANCE: advance iorq's pointers by a number of sectors 139 */ 140 141 #define XYC_ADVANCE(IORQ, N) { \ 142 if (N) { \ 143 (IORQ)->sectcnt -= (N); \ 144 (IORQ)->blockno += (N); \ 145 (IORQ)->dbuf += ((N)*XYFM_BPS); \ 146 } \ 147 } 148 149 /* 150 * note - addresses you can sleep on: 151 * [1] & of xy_softc's "state" (waiting for a chance to attach a drive) 152 * [2] & an iorq (waiting for an XY_SUB_WAIT iorq to finish) 153 */ 154 155 156 /* 157 * function prototypes 158 * "xyc_*" functions are internal, all others are external interfaces 159 */ 160 161 /* internals */ 162 struct xy_iopb *xyc_chain(struct xyc_softc *, struct xy_iorq *); 163 int xyc_cmd(struct xyc_softc *, int, int, int, int, int, char *, int); 164 const char *xyc_e2str(int); 165 int xyc_entoact(int); 166 int xyc_error(struct xyc_softc *, struct xy_iorq *, struct xy_iopb *, int); 167 int xyc_ioctlcmd(struct xy_softc *, dev_t dev, struct xd_iocmd *); 168 void xyc_perror(struct xy_iorq *, struct xy_iopb *, int); 169 int xyc_piodriver(struct xyc_softc *, struct xy_iorq *); 170 int xyc_remove_iorq(struct xyc_softc *); 171 int xyc_reset(struct xyc_softc *, int, struct xy_iorq *, int, 172 struct xy_softc *); 173 inline void xyc_rqinit(struct xy_iorq *, struct xyc_softc *, struct xy_softc *, 174 int, u_long, int, caddr_t, struct buf *); 175 void xyc_rqtopb(struct xy_iorq *, struct xy_iopb *, int, int); 176 void xyc_start(struct xyc_softc *, struct xy_iorq *); 177 int xyc_startbuf(struct xyc_softc *, struct xy_softc *, struct buf *); 178 int xyc_submit_iorq(struct xyc_softc *, struct xy_iorq *, int); 179 void xyc_tick(void *); 180 int xyc_unbusy(struct xyc *, int); 181 void xyc_xyreset(struct xyc_softc *, struct xy_softc *); 182 183 /* machine interrupt hook */ 184 int xycintr(void *); 185 186 /* autoconf */ 187 static int xycmatch(struct device *, struct cfdata *, void *); 188 static void xycattach(struct device *, struct device *, void *); 189 static int xyc_print(void *, const char *); 190 191 static int xymatch(struct device *, struct cfdata *, void *); 192 static void xyattach(struct device *, struct device *, void *); 193 static void xy_init(struct xy_softc *); 194 195 static void xydummystrat(struct buf *); 196 int xygetdisklabel(struct xy_softc *, void *); 197 198 /* 199 * cfattach's: device driver interface to autoconfig 200 */ 201 202 CFATTACH_DECL(xyc, sizeof(struct xyc_softc), 203 xycmatch, xycattach, NULL, NULL); 204 205 CFATTACH_DECL(xy, sizeof(struct xy_softc), 206 xymatch, xyattach, NULL, NULL); 207 208 extern struct cfdriver xy_cd; 209 210 struct xyc_attach_args { /* this is the "aux" args to xyattach */ 211 int driveno; /* unit number */ 212 }; 213 214 dev_type_open(xyopen); 215 dev_type_close(xyclose); 216 dev_type_read(xyread); 217 dev_type_write(xywrite); 218 dev_type_ioctl(xyioctl); 219 dev_type_strategy(xystrategy); 220 dev_type_dump(xydump); 221 dev_type_size(xysize); 222 223 const struct bdevsw xy_bdevsw = { 224 xyopen, xyclose, xystrategy, xyioctl, xydump, xysize, D_DISK 225 }; 226 227 const struct cdevsw xy_cdevsw = { 228 xyopen, xyclose, xyread, xywrite, xyioctl, 229 nostop, notty, nopoll, nommap, nokqfilter, D_DISK 230 }; 231 232 /* 233 * dkdriver 234 */ 235 236 struct dkdriver xydkdriver = { xystrategy }; 237 238 /* 239 * start: disk label fix code (XXX) 240 */ 241 242 static void *xy_labeldata; 243 244 static void 245 xydummystrat(struct buf *bp) 246 { 247 if (bp->b_bcount != XYFM_BPS) 248 panic("xydummystrat"); 249 memcpy(bp->b_data, xy_labeldata, XYFM_BPS); 250 bp->b_flags |= B_DONE; 251 bp->b_flags &= ~B_BUSY; 252 } 253 254 int 255 xygetdisklabel(struct xy_softc *xy, void *b) 256 { 257 const char *err; 258 struct sun_disklabel *sdl; 259 260 /* We already have the label data in `b'; setup for dummy strategy */ 261 xy_labeldata = b; 262 263 /* Required parameter for readdisklabel() */ 264 xy->sc_dk.dk_label->d_secsize = XYFM_BPS; 265 266 err = readdisklabel(MAKEDISKDEV(0, device_unit(&xy->sc_dev), RAW_PART), 267 xydummystrat, 268 xy->sc_dk.dk_label, xy->sc_dk.dk_cpulabel); 269 if (err) { 270 printf("%s: %s\n", xy->sc_dev.dv_xname, err); 271 return(XY_ERR_FAIL); 272 } 273 274 /* Ok, we have the label; fill in `pcyl' if there's SunOS magic */ 275 sdl = (struct sun_disklabel *)xy->sc_dk.dk_cpulabel->cd_block; 276 if (sdl->sl_magic == SUN_DKMAGIC) 277 xy->pcyl = sdl->sl_pcyl; 278 else { 279 printf("%s: WARNING: no `pcyl' in disk label.\n", 280 xy->sc_dev.dv_xname); 281 xy->pcyl = xy->sc_dk.dk_label->d_ncylinders + 282 xy->sc_dk.dk_label->d_acylinders; 283 printf("%s: WARNING: guessing pcyl=%d (ncyl+acyl)\n", 284 xy->sc_dev.dv_xname, xy->pcyl); 285 } 286 287 xy->ncyl = xy->sc_dk.dk_label->d_ncylinders; 288 xy->acyl = xy->sc_dk.dk_label->d_acylinders; 289 xy->nhead = xy->sc_dk.dk_label->d_ntracks; 290 xy->nsect = xy->sc_dk.dk_label->d_nsectors; 291 xy->sectpercyl = xy->nhead * xy->nsect; 292 xy->sc_dk.dk_label->d_secsize = XYFM_BPS; /* not handled by 293 * sun->bsd */ 294 return(XY_ERR_AOK); 295 } 296 297 /* 298 * end: disk label fix code (XXX) 299 */ 300 301 /* 302 * a u t o c o n f i g f u n c t i o n s 303 */ 304 305 /* 306 * xycmatch: determine if xyc is present or not. we do a 307 * soft reset to detect the xyc. 308 */ 309 static int 310 xycmatch(struct device *parent, struct cfdata *cf, void *aux) 311 { 312 struct confargs *ca = aux; 313 314 /* No default VME address. */ 315 if (ca->ca_paddr == -1) 316 return (0); 317 318 /* Make sure something is there... */ 319 if (bus_peek(ca->ca_bustype, ca->ca_paddr + 5, 1) == -1) 320 return (0); 321 322 /* Default interrupt priority. */ 323 if (ca->ca_intpri == -1) 324 ca->ca_intpri = 2; 325 326 return (1); 327 } 328 329 /* 330 * xycattach: attach controller 331 */ 332 static void 333 xycattach(struct device *parent, struct device *self, void *aux) 334 { 335 struct xyc_softc *xyc = (void *) self; 336 struct confargs *ca = aux; 337 struct xyc_attach_args xa; 338 int lcv, err, res, pbsz; 339 void *tmp, *tmp2; 340 u_long ultmp; 341 342 /* get addressing and intr level stuff from autoconfig and load it 343 * into our xyc_softc. */ 344 345 xyc->xyc = (struct xyc *) 346 bus_mapin(ca->ca_bustype, ca->ca_paddr, sizeof(struct xyc)); 347 xyc->bustype = ca->ca_bustype; 348 xyc->ipl = ca->ca_intpri; 349 xyc->vector = ca->ca_intvec; 350 xyc->no_ols = 0; /* XXX should be from config */ 351 352 for (lcv = 0; lcv < XYC_MAXDEV; lcv++) 353 xyc->sc_drives[lcv] = (struct xy_softc *) 0; 354 355 /* 356 * allocate and zero buffers 357 * check boundaries of the KVA's ... all IOPBs must reside in 358 * the same 64K region. 359 */ 360 361 pbsz = XYC_MAXIOPB * sizeof(struct xy_iopb); 362 tmp = tmp2 = (struct xy_iopb *) dvma_malloc(pbsz); /* KVA */ 363 ultmp = (u_long) tmp; 364 if ((ultmp & 0xffff0000) != ((ultmp + pbsz) & 0xffff0000)) { 365 tmp = (struct xy_iopb *) dvma_malloc(pbsz); /* retry! */ 366 dvma_free(tmp2, pbsz); 367 ultmp = (u_long) tmp; 368 if ((ultmp & 0xffff0000) != ((ultmp + pbsz) & 0xffff0000)) { 369 printf("%s: can't alloc IOPB mem in 64K\n", 370 xyc->sc_dev.dv_xname); 371 return; 372 } 373 } 374 memset(tmp, 0, pbsz); 375 xyc->iopbase = tmp; 376 xyc->dvmaiopb = (struct xy_iopb *) 377 dvma_kvtopa(xyc->iopbase, xyc->bustype); 378 xyc->reqs = (struct xy_iorq *) 379 malloc(XYC_MAXIOPB * sizeof(struct xy_iorq), M_DEVBUF, M_NOWAIT); 380 if (xyc->reqs == NULL) 381 panic("xyc malloc"); 382 memset(xyc->reqs, 0, XYC_MAXIOPB * sizeof(struct xy_iorq)); 383 384 /* 385 * init iorq to iopb pointers, and non-zero fields in the 386 * iopb which never change. 387 */ 388 389 for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) { 390 xyc->xy_chain[lcv] = NULL; 391 xyc->reqs[lcv].iopb = &xyc->iopbase[lcv]; 392 xyc->iopbase[lcv].asr = 1; /* always the same */ 393 xyc->iopbase[lcv].eef = 1; /* always the same */ 394 xyc->iopbase[lcv].ecm = XY_ECM; /* always the same */ 395 xyc->iopbase[lcv].aud = 1; /* always the same */ 396 xyc->iopbase[lcv].relo = 1; /* always the same */ 397 xyc->iopbase[lcv].thro = XY_THRO;/* always the same */ 398 } 399 xyc->ciorq = &xyc->reqs[XYC_CTLIOPB]; /* short hand name */ 400 xyc->ciopb = &xyc->iopbase[XYC_CTLIOPB]; /* short hand name */ 401 xyc->xy_hand = 0; 402 403 /* read controller parameters and insure we have a 450/451 */ 404 405 err = xyc_cmd(xyc, XYCMD_ST, 0, 0, 0, 0, 0, XY_SUB_POLL); 406 res = xyc->ciopb->ctyp; 407 XYC_DONE(xyc, err); 408 if (res != XYCT_450) { 409 if (err) 410 printf(": %s: ", xyc_e2str(err)); 411 printf(": doesn't identify as a 450/451\n"); 412 return; 413 } 414 printf(": Xylogics 450/451"); 415 if (xyc->no_ols) 416 printf(" [OLS disabled]"); /* 450 doesn't overlap seek right */ 417 printf("\n"); 418 if (err) { 419 printf("%s: error: %s\n", xyc->sc_dev.dv_xname, 420 xyc_e2str(err)); 421 return; 422 } 423 if ((xyc->xyc->xyc_csr & XYC_ADRM) == 0) { 424 printf("%s: 24 bit addressing turned off\n", 425 xyc->sc_dev.dv_xname); 426 printf("please set hardware jumpers JM1-JM2=in, JM3-JM4=out\n"); 427 printf("to enable 24 bit mode and this driver\n"); 428 return; 429 } 430 431 /* link in interrupt with higher level software */ 432 isr_add_vectored(xycintr, (void *)xyc, 433 ca->ca_intpri, ca->ca_intvec); 434 evcnt_attach_dynamic(&xyc->sc_intrcnt, EVCNT_TYPE_INTR, NULL, 435 xyc->sc_dev.dv_xname, "intr"); 436 437 callout_init(&xyc->sc_tick_ch); 438 439 /* now we must look for disks using autoconfig */ 440 for (xa.driveno = 0; xa.driveno < XYC_MAXDEV; xa.driveno++) 441 (void) config_found(self, (void *) &xa, xyc_print); 442 443 /* start the watchdog clock */ 444 callout_reset(&xyc->sc_tick_ch, XYC_TICKCNT, xyc_tick, xyc); 445 } 446 447 static int 448 xyc_print(void *aux, const char *name) 449 { 450 struct xyc_attach_args *xa = aux; 451 452 if (name != NULL) 453 aprint_normal("%s: ", name); 454 455 if (xa->driveno != -1) 456 aprint_normal(" drive %d", xa->driveno); 457 458 return UNCONF; 459 } 460 461 /* 462 * xymatch: probe for disk. 463 * 464 * note: we almost always say disk is present. this allows us to 465 * spin up and configure a disk after the system is booted (we can 466 * call xyattach!). Also, wire down the relationship between the 467 * xy* and xyc* devices, to simplify boot device identification. 468 */ 469 static int 470 xymatch(struct device *parent, struct cfdata *cf, void *aux) 471 { 472 struct xyc_attach_args *xa = aux; 473 int xy_unit; 474 475 /* Match only on the "wired-down" controller+disk. */ 476 xy_unit = device_unit(parent) * 2 + xa->driveno; 477 if (cf->cf_unit != xy_unit) 478 return (0); 479 480 return (1); 481 } 482 483 /* 484 * xyattach: attach a disk. 485 */ 486 static void 487 xyattach(struct device *parent, struct device *self, void *aux) 488 { 489 struct xy_softc *xy = (void *) self; 490 struct xyc_softc *xyc = (void *) parent; 491 struct xyc_attach_args *xa = aux; 492 493 printf("\n"); 494 495 /* 496 * Always re-initialize the disk structure. We want statistics 497 * to start with a clean slate. 498 */ 499 memset(&xy->sc_dk, 0, sizeof(xy->sc_dk)); 500 xy->sc_dk.dk_driver = &xydkdriver; 501 xy->sc_dk.dk_name = xy->sc_dev.dv_xname; 502 503 xy->state = XY_DRIVE_UNKNOWN; /* to start */ 504 xy->flags = 0; 505 xy->parent = xyc; 506 507 /* init queue of waiting bufs */ 508 bufq_alloc(&xy->xyq, "disksort", BUFQ_SORT_RAWBLOCK); 509 xy->xyrq = &xyc->reqs[xa->driveno]; 510 511 xy->xy_drive = xa->driveno; 512 xyc->sc_drives[xa->driveno] = xy; 513 514 /* Do init work common to attach and open. */ 515 xy_init(xy); 516 } 517 518 /* 519 * end of autoconfig functions 520 */ 521 522 /* 523 * Initialize a disk. This can be called from both autoconf and 524 * also from xyopen/xystrategy. 525 */ 526 static void 527 xy_init(struct xy_softc *xy) 528 { 529 struct xyc_softc *xyc; 530 struct dkbad *dkb; 531 void *dvmabuf; 532 int err, spt, mb, blk, lcv, fullmode, newstate; 533 534 xyc = xy->parent; 535 xy->state = XY_DRIVE_ATTACHING; 536 newstate = XY_DRIVE_UNKNOWN; 537 fullmode = (cold) ? XY_SUB_POLL : XY_SUB_WAIT; 538 dvmabuf = dvma_malloc(XYFM_BPS); 539 540 /* first try and reset the drive */ 541 542 err = xyc_cmd(xyc, XYCMD_RST, 0, xy->xy_drive, 0, 0, 0, fullmode); 543 XYC_DONE(xyc, err); 544 if (err == XY_ERR_DNRY) { 545 printf("%s: drive %d: off-line\n", 546 xy->sc_dev.dv_xname, xy->xy_drive); 547 goto done; 548 } 549 if (err) { 550 printf("%s: ERROR 0x%02x (%s)\n", 551 xy->sc_dev.dv_xname, err, xyc_e2str(err)); 552 goto done; 553 } 554 printf("%s: drive %d ready", 555 xy->sc_dev.dv_xname, xy->xy_drive); 556 557 /* 558 * now set drive parameters (to semi-bogus values) so we can read the 559 * disk label. 560 */ 561 xy->pcyl = xy->ncyl = 1; 562 xy->acyl = 0; 563 xy->nhead = 1; 564 xy->nsect = 1; 565 xy->sectpercyl = 1; 566 for (lcv = 0; lcv < 126; lcv++) /* init empty bad144 table */ 567 xy->dkb.bt_bad[lcv].bt_cyl = 568 xy->dkb.bt_bad[lcv].bt_trksec = 0xffff; 569 570 /* read disk label */ 571 for (xy->drive_type = 0 ; xy->drive_type <= XYC_MAXDT ; 572 xy->drive_type++) { 573 err = xyc_cmd(xyc, XYCMD_RD, 0, xy->xy_drive, 0, 1, 574 dvmabuf, fullmode); 575 XYC_DONE(xyc, err); 576 if (err == XY_ERR_AOK) break; 577 } 578 579 if (err != XY_ERR_AOK) { 580 printf("%s: reading disk label failed: %s\n", 581 xy->sc_dev.dv_xname, xyc_e2str(err)); 582 goto done; 583 } 584 printf("%s: drive type %d\n", 585 xy->sc_dev.dv_xname, xy->drive_type); 586 587 newstate = XY_DRIVE_NOLABEL; 588 589 xy->hw_spt = spt = 0; /* XXX needed ? */ 590 /* Attach the disk: must be before getdisklabel to malloc label */ 591 disk_attach(&xy->sc_dk); 592 593 if (xygetdisklabel(xy, dvmabuf) != XY_ERR_AOK) 594 goto done; 595 596 /* inform the user of what is up */ 597 printf("%s: <%s>, pcyl %d\n", 598 xy->sc_dev.dv_xname, 599 (char *)dvmabuf, xy->pcyl); 600 mb = xy->ncyl * (xy->nhead * xy->nsect) / (1048576 / XYFM_BPS); 601 printf("%s: %dMB, %d cyl, %d head, %d sec\n", 602 xy->sc_dev.dv_xname, mb, 603 xy->ncyl, xy->nhead, xy->nsect); 604 605 /* 606 * 450/451 stupidity: the drive type is encoded into the format 607 * of the disk. the drive type in the IOPB must match the drive 608 * type in the format, or you will not be able to do I/O to the 609 * disk (you get header not found errors). if you have two drives 610 * of different sizes that have the same drive type in their 611 * formatting then you are out of luck. 612 * 613 * this problem was corrected in the 753/7053. 614 */ 615 616 for (lcv = 0 ; lcv < XYC_MAXDEV ; lcv++) { 617 struct xy_softc *oxy; 618 619 oxy = xyc->sc_drives[lcv]; 620 if (oxy == NULL || oxy == xy) continue; 621 if (oxy->drive_type != xy->drive_type) continue; 622 if (xy->nsect != oxy->nsect || xy->pcyl != oxy->pcyl || 623 xy->nhead != oxy->nhead) { 624 printf("%s: %s and %s must be the same size!\n", 625 xyc->sc_dev.dv_xname, 626 xy ->sc_dev.dv_xname, 627 oxy->sc_dev.dv_xname); 628 panic("xy drive size mismatch"); 629 } 630 } 631 632 633 /* now set the real drive parameters! */ 634 blk = (xy->nsect - 1) + 635 ((xy->nhead - 1) * xy->nsect) + 636 ((xy->pcyl - 1) * xy->nsect * xy->nhead); 637 err = xyc_cmd(xyc, XYCMD_SDS, 0, xy->xy_drive, blk, 0, 0, fullmode); 638 XYC_DONE(xyc, err); 639 if (err) { 640 printf("%s: write drive size failed: %s\n", 641 xy->sc_dev.dv_xname, xyc_e2str(err)); 642 goto done; 643 } 644 newstate = XY_DRIVE_ONLINE; 645 646 /* 647 * read bad144 table. this table resides on the first sector of the 648 * last track of the disk (i.e. second cyl of "acyl" area). 649 */ 650 blk = (xy->ncyl + xy->acyl - 1) * (xy->nhead * xy->nsect) + 651 /* last cyl */ 652 (xy->nhead - 1) * xy->nsect; /* last head */ 653 err = xyc_cmd(xyc, XYCMD_RD, 0, xy->xy_drive, blk, 1, 654 dvmabuf, fullmode); 655 XYC_DONE(xyc, err); 656 if (err) { 657 printf("%s: reading bad144 failed: %s\n", 658 xy->sc_dev.dv_xname, xyc_e2str(err)); 659 goto done; 660 } 661 662 /* check dkbad for sanity */ 663 dkb = (struct dkbad *) dvmabuf; 664 for (lcv = 0; lcv < 126; lcv++) { 665 if ((dkb->bt_bad[lcv].bt_cyl == 0xffff || 666 dkb->bt_bad[lcv].bt_cyl == 0) && 667 dkb->bt_bad[lcv].bt_trksec == 0xffff) 668 continue; /* blank */ 669 if (dkb->bt_bad[lcv].bt_cyl >= xy->ncyl) 670 break; 671 if ((dkb->bt_bad[lcv].bt_trksec >> 8) >= xy->nhead) 672 break; 673 if ((dkb->bt_bad[lcv].bt_trksec & 0xff) >= xy->nsect) 674 break; 675 } 676 if (lcv != 126) { 677 printf("%s: warning: invalid bad144 sector!\n", 678 xy->sc_dev.dv_xname); 679 } else { 680 memcpy(&xy->dkb, dvmabuf, XYFM_BPS); 681 } 682 683 done: 684 xy->state = newstate; 685 dvma_free(dvmabuf, XYFM_BPS); 686 } 687 688 /* 689 * { b , c } d e v s w f u n c t i o n s 690 */ 691 692 /* 693 * xyclose: close device 694 */ 695 int 696 xyclose(dev_t dev, int flag, int fmt, struct lwp *l) 697 { 698 struct xy_softc *xy = xy_cd.cd_devs[DISKUNIT(dev)]; 699 int part = DISKPART(dev); 700 701 /* clear mask bits */ 702 703 switch (fmt) { 704 case S_IFCHR: 705 xy->sc_dk.dk_copenmask &= ~(1 << part); 706 break; 707 case S_IFBLK: 708 xy->sc_dk.dk_bopenmask &= ~(1 << part); 709 break; 710 } 711 xy->sc_dk.dk_openmask = xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask; 712 713 return 0; 714 } 715 716 /* 717 * xydump: crash dump system 718 */ 719 int 720 xydump(dev_t dev, daddr_t blkno, caddr_t va, size_t sz) 721 { 722 int unit, part; 723 struct xy_softc *xy; 724 725 unit = DISKUNIT(dev); 726 if (unit >= xy_cd.cd_ndevs) 727 return ENXIO; 728 part = DISKPART(dev); 729 730 xy = xy_cd.cd_devs[unit]; 731 732 printf("%s%c: crash dump not supported (yet)\n", xy->sc_dev.dv_xname, 733 'a' + part); 734 735 return ENXIO; 736 737 /* outline: globals: "dumplo" == sector number of partition to start 738 * dump at (convert to physical sector with partition table) 739 * "dumpsize" == size of dump in clicks "physmem" == size of physical 740 * memory (clicks, ctob() to get bytes) (normal case: dumpsize == 741 * physmem) 742 * 743 * dump a copy of physical memory to the dump device starting at sector 744 * "dumplo" in the swap partition (make sure > 0). map in pages as 745 * we go. use polled I/O. 746 * 747 * XXX how to handle NON_CONTIG? 748 */ 749 } 750 751 /* 752 * xyioctl: ioctls on XY drives. based on ioctl's of other netbsd disks. 753 */ 754 int 755 xyioctl(dev_t dev, u_long command, caddr_t addr, int flag, struct lwp *l) 756 { 757 struct xy_softc *xy; 758 struct xd_iocmd *xio; 759 int error, s, unit; 760 761 unit = DISKUNIT(dev); 762 763 if (unit >= xy_cd.cd_ndevs || (xy = xy_cd.cd_devs[unit]) == NULL) 764 return (ENXIO); 765 766 /* switch on ioctl type */ 767 768 switch (command) { 769 case DIOCSBAD: /* set bad144 info */ 770 if ((flag & FWRITE) == 0) 771 return EBADF; 772 s = splbio(); 773 memcpy(&xy->dkb, addr, sizeof(xy->dkb)); 774 splx(s); 775 return 0; 776 777 case DIOCGDINFO: /* get disk label */ 778 memcpy(addr, xy->sc_dk.dk_label, sizeof(struct disklabel)); 779 return 0; 780 781 case DIOCGPART: /* get partition info */ 782 ((struct partinfo *) addr)->disklab = xy->sc_dk.dk_label; 783 ((struct partinfo *) addr)->part = 784 &xy->sc_dk.dk_label->d_partitions[DISKPART(dev)]; 785 return 0; 786 787 case DIOCSDINFO: /* set disk label */ 788 if ((flag & FWRITE) == 0) 789 return EBADF; 790 error = setdisklabel(xy->sc_dk.dk_label, 791 (struct disklabel *) addr, /* xy->sc_dk.dk_openmask : */ 0, 792 xy->sc_dk.dk_cpulabel); 793 if (error == 0) { 794 if (xy->state == XY_DRIVE_NOLABEL) 795 xy->state = XY_DRIVE_ONLINE; 796 } 797 return error; 798 799 case DIOCWLABEL: /* change write status of disk label */ 800 if ((flag & FWRITE) == 0) 801 return EBADF; 802 if (*(int *) addr) 803 xy->flags |= XY_WLABEL; 804 else 805 xy->flags &= ~XY_WLABEL; 806 return 0; 807 808 case DIOCWDINFO: /* write disk label */ 809 if ((flag & FWRITE) == 0) 810 return EBADF; 811 error = setdisklabel(xy->sc_dk.dk_label, 812 (struct disklabel *) addr, /* xy->sc_dk.dk_openmask : */ 0, 813 xy->sc_dk.dk_cpulabel); 814 if (error == 0) { 815 if (xy->state == XY_DRIVE_NOLABEL) 816 xy->state = XY_DRIVE_ONLINE; 817 818 /* Simulate opening partition 0 so write succeeds. */ 819 xy->sc_dk.dk_openmask |= (1 << 0); 820 error = writedisklabel(MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART), 821 xystrategy, xy->sc_dk.dk_label, 822 xy->sc_dk.dk_cpulabel); 823 xy->sc_dk.dk_openmask = 824 xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask; 825 } 826 return error; 827 828 case DIOSXDCMD: 829 xio = (struct xd_iocmd *) addr; 830 if ((error = kauth_authorize_generic(l->l_proc->p_cred, 831 KAUTH_GENERIC_ISSUSER, 832 &l->l_proc->p_acflag)) != 0) 833 return (error); 834 return (xyc_ioctlcmd(xy, dev, xio)); 835 836 default: 837 return ENOTTY; 838 } 839 } 840 841 /* 842 * xyopen: open drive 843 */ 844 int 845 xyopen(dev_t dev, int flag, int fmt, struct lwp *l) 846 { 847 int err, unit, part, s; 848 struct xy_softc *xy; 849 850 /* first, could it be a valid target? */ 851 unit = DISKUNIT(dev); 852 if (unit >= xy_cd.cd_ndevs || (xy = xy_cd.cd_devs[unit]) == NULL) 853 return (ENXIO); 854 part = DISKPART(dev); 855 err = 0; 856 857 /* 858 * If some other processing is doing init, sleep. 859 */ 860 s = splbio(); 861 while (xy->state == XY_DRIVE_ATTACHING) { 862 if (tsleep(&xy->state, PRIBIO, "xyopen", 0)) { 863 err = EINTR; 864 goto done; 865 } 866 } 867 /* Do we need to init the drive? */ 868 if (xy->state == XY_DRIVE_UNKNOWN) { 869 xy_init(xy); 870 wakeup(&xy->state); 871 } 872 /* Was the init successful? */ 873 if (xy->state == XY_DRIVE_UNKNOWN) { 874 err = EIO; 875 goto done; 876 } 877 878 /* check for partition */ 879 if (part != RAW_PART && 880 (part >= xy->sc_dk.dk_label->d_npartitions || 881 xy->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) { 882 err = ENXIO; 883 goto done; 884 } 885 886 /* set open masks */ 887 switch (fmt) { 888 case S_IFCHR: 889 xy->sc_dk.dk_copenmask |= (1 << part); 890 break; 891 case S_IFBLK: 892 xy->sc_dk.dk_bopenmask |= (1 << part); 893 break; 894 } 895 xy->sc_dk.dk_openmask = xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask; 896 897 done: 898 splx(s); 899 return (err); 900 } 901 902 int 903 xyread(dev_t dev, struct uio *uio, int flags) 904 { 905 906 return (physio(xystrategy, NULL, dev, B_READ, minphys, uio)); 907 } 908 909 int 910 xywrite(dev_t dev, struct uio *uio, int flags) 911 { 912 913 return (physio(xystrategy, NULL, dev, B_WRITE, minphys, uio)); 914 } 915 916 917 /* 918 * xysize: return size of a partition for a dump 919 */ 920 921 int 922 xysize(dev_t dev) 923 { 924 struct xy_softc *xysc; 925 int unit, part, size, omask; 926 927 /* valid unit? */ 928 unit = DISKUNIT(dev); 929 if (unit >= xy_cd.cd_ndevs || (xysc = xy_cd.cd_devs[unit]) == NULL) 930 return (-1); 931 932 part = DISKPART(dev); 933 omask = xysc->sc_dk.dk_openmask & (1 << part); 934 935 if (omask == 0 && xyopen(dev, 0, S_IFBLK, NULL) != 0) 936 return (-1); 937 938 /* do it */ 939 if (xysc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP) 940 size = -1; /* only give valid size for swap partitions */ 941 else 942 size = xysc->sc_dk.dk_label->d_partitions[part].p_size * 943 (xysc->sc_dk.dk_label->d_secsize / DEV_BSIZE); 944 if (omask == 0 && xyclose(dev, 0, S_IFBLK, NULL) != 0) 945 return (-1); 946 return (size); 947 } 948 949 /* 950 * xystrategy: buffering system interface to xy. 951 */ 952 void 953 xystrategy(struct buf *bp) 954 { 955 struct xy_softc *xy; 956 int s, unit; 957 struct disklabel *lp; 958 daddr_t blkno; 959 960 unit = DISKUNIT(bp->b_dev); 961 962 /* check for live device */ 963 964 if (unit >= xy_cd.cd_ndevs || (xy = xy_cd.cd_devs[unit]) == 0 || 965 bp->b_blkno < 0 || 966 (bp->b_bcount % xy->sc_dk.dk_label->d_secsize) != 0) { 967 bp->b_error = EINVAL; 968 goto bad; 969 } 970 971 /* There should always be an open first. */ 972 if (xy->state == XY_DRIVE_UNKNOWN) { 973 bp->b_error = EIO; 974 goto bad; 975 } 976 if (xy->state != XY_DRIVE_ONLINE && DISKPART(bp->b_dev) != RAW_PART) { 977 /* no I/O to unlabeled disks, unless raw partition */ 978 bp->b_error = EIO; 979 goto bad; 980 } 981 /* short circuit zero length request */ 982 983 if (bp->b_bcount == 0) 984 goto done; 985 986 /* check bounds with label (disksubr.c). Determine the size of the 987 * transfer, and make sure it is within the boundaries of the 988 * partition. Adjust transfer if needed, and signal errors or early 989 * completion. */ 990 991 lp = xy->sc_dk.dk_label; 992 993 if (bounds_check_with_label(&xy->sc_dk, bp, 994 (xy->flags & XY_WLABEL) != 0) <= 0) 995 goto done; 996 997 /* 998 * Now convert the block number to absolute and put it in 999 * terms of the device's logical block size. 1000 */ 1001 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE); 1002 if (DISKPART(bp->b_dev) != RAW_PART) 1003 blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset; 1004 1005 bp->b_rawblkno = blkno; 1006 1007 /* 1008 * now we know we have a valid buf structure that we need to do I/O 1009 * on. 1010 */ 1011 1012 s = splbio(); /* protect the queues */ 1013 1014 BUFQ_PUT(xy->xyq, bp); /* XXX disksort_cylinder */ 1015 1016 /* start 'em up */ 1017 1018 xyc_start(xy->parent, NULL); 1019 1020 /* done! */ 1021 1022 splx(s); 1023 return; 1024 1025 bad: /* tells upper layers we have an error */ 1026 bp->b_flags |= B_ERROR; 1027 done: /* tells upper layers we are done with this 1028 * buf */ 1029 bp->b_resid = bp->b_bcount; 1030 biodone(bp); 1031 } 1032 /* 1033 * end of {b,c}devsw functions 1034 */ 1035 1036 /* 1037 * i n t e r r u p t f u n c t i o n 1038 * 1039 * xycintr: hardware interrupt. 1040 */ 1041 int 1042 xycintr(void *v) 1043 { 1044 struct xyc_softc *xycsc = v; 1045 1046 /* kick the event counter */ 1047 xycsc->sc_intrcnt.ev_count++; 1048 1049 /* remove as many done IOPBs as possible */ 1050 xyc_remove_iorq(xycsc); 1051 1052 /* start any iorq's already waiting */ 1053 xyc_start(xycsc, NULL); 1054 1055 return (1); 1056 } 1057 /* 1058 * end of interrupt function 1059 */ 1060 1061 /* 1062 * i n t e r n a l f u n c t i o n s 1063 */ 1064 1065 /* 1066 * xyc_rqinit: fill out the fields of an I/O request 1067 */ 1068 1069 inline void 1070 xyc_rqinit(struct xy_iorq *rq, struct xyc_softc *xyc, struct xy_softc *xy, 1071 int md, u_long blk, int cnt, caddr_t db, struct buf *bp) 1072 { 1073 rq->xyc = xyc; 1074 rq->xy = xy; 1075 rq->ttl = XYC_MAXTTL + 10; 1076 rq->mode = md; 1077 rq->tries = rq->errno = rq->lasterror = 0; 1078 rq->blockno = blk; 1079 rq->sectcnt = cnt; 1080 rq->dbuf = rq->dbufbase = db; 1081 rq->buf = bp; 1082 } 1083 1084 /* 1085 * xyc_rqtopb: load up an IOPB based on an iorq 1086 */ 1087 1088 void 1089 xyc_rqtopb(struct xy_iorq *iorq, struct xy_iopb *iopb, int cmd, int subfun) 1090 { 1091 u_long block, dp; 1092 1093 /* normal IOPB case, standard stuff */ 1094 1095 /* chain bit handled later */ 1096 iopb->ien = (XY_STATE(iorq->mode) == XY_SUB_POLL) ? 0 : 1; 1097 iopb->com = cmd; 1098 iopb->errno = 0; 1099 iopb->errs = 0; 1100 iopb->done = 0; 1101 if (iorq->xy) { 1102 iopb->unit = iorq->xy->xy_drive; 1103 iopb->dt = iorq->xy->drive_type; 1104 } else { 1105 iopb->unit = 0; 1106 iopb->dt = 0; 1107 } 1108 block = iorq->blockno; 1109 if (iorq->xy == NULL || block == 0) { 1110 iopb->sect = iopb->head = iopb->cyl = 0; 1111 } else { 1112 iopb->sect = block % iorq->xy->nsect; 1113 block = block / iorq->xy->nsect; 1114 iopb->head = block % iorq->xy->nhead; 1115 block = block / iorq->xy->nhead; 1116 iopb->cyl = block; 1117 } 1118 iopb->scnt = iorq->sectcnt; 1119 if (iorq->dbuf == NULL) { 1120 iopb->dataa = 0; 1121 iopb->datar = 0; 1122 } else { 1123 dp = dvma_kvtopa(iorq->dbuf, iorq->xyc->bustype); 1124 iopb->dataa = (dp & 0xffff); 1125 iopb->datar = ((dp & 0xff0000) >> 16); 1126 } 1127 iopb->subfn = subfun; 1128 } 1129 1130 1131 /* 1132 * xyc_unbusy: wait for the xyc to go unbusy, or timeout. 1133 */ 1134 1135 int 1136 xyc_unbusy(struct xyc *xyc, int del) 1137 { 1138 while (del-- > 0) { 1139 if ((xyc->xyc_csr & XYC_GBSY) == 0) 1140 break; 1141 DELAY(1); 1142 } 1143 return(del == 0 ? XY_ERR_FAIL : XY_ERR_AOK); 1144 } 1145 1146 /* 1147 * xyc_cmd: front end for POLL'd and WAIT'd commands. Returns 0 or error. 1148 * note that NORM requests are handled separately. 1149 */ 1150 int 1151 xyc_cmd(struct xyc_softc *xycsc, int cmd, int subfn, int unit, int block, 1152 int scnt, char *dptr, int fullmode) 1153 { 1154 struct xy_iorq *iorq = xycsc->ciorq; 1155 struct xy_iopb *iopb = xycsc->ciopb; 1156 int submode = XY_STATE(fullmode); 1157 1158 /* 1159 * is someone else using the control iopq wait for it if we can 1160 */ 1161 start: 1162 if (submode == XY_SUB_WAIT && XY_STATE(iorq->mode) != XY_SUB_FREE) { 1163 if (tsleep(iorq, PRIBIO, "xyc_cmd", 0)) 1164 return(XY_ERR_FAIL); 1165 goto start; 1166 } 1167 1168 if (XY_STATE(iorq->mode) != XY_SUB_FREE) { 1169 DELAY(1000000); /* XY_SUB_POLL: steal the iorq */ 1170 iorq->mode = XY_SUB_FREE; 1171 printf("%s: stole control iopb\n", xycsc->sc_dev.dv_xname); 1172 } 1173 1174 /* init iorq/iopb */ 1175 1176 xyc_rqinit(iorq, xycsc, 1177 (unit == XYC_NOUNIT) ? NULL : xycsc->sc_drives[unit], 1178 fullmode, block, scnt, dptr, NULL); 1179 1180 /* load IOPB from iorq */ 1181 1182 xyc_rqtopb(iorq, iopb, cmd, subfn); 1183 1184 /* submit it for processing */ 1185 1186 xyc_submit_iorq(xycsc, iorq, fullmode); /* error code will be in iorq */ 1187 1188 return(XY_ERR_AOK); 1189 } 1190 1191 /* 1192 * xyc_startbuf 1193 * start a buffer for running 1194 */ 1195 1196 int 1197 xyc_startbuf(struct xyc_softc *xycsc, struct xy_softc *xysc, struct buf *bp) 1198 { 1199 int partno; 1200 struct xy_iorq *iorq; 1201 struct xy_iopb *iopb; 1202 u_long block; 1203 caddr_t dbuf; 1204 1205 iorq = xysc->xyrq; 1206 iopb = iorq->iopb; 1207 1208 /* get buf */ 1209 1210 if (bp == NULL) 1211 panic("xyc_startbuf null buf"); 1212 1213 partno = DISKPART(bp->b_dev); 1214 #ifdef XYC_DEBUG 1215 printf("xyc_startbuf: %s%c: %s block %d\n", xysc->sc_dev.dv_xname, 1216 'a' + partno, (bp->b_flags & B_READ) ? "read" : "write", bp->b_blkno); 1217 printf("xyc_startbuf: b_bcount %d, b_data 0x%x\n", 1218 bp->b_bcount, bp->b_data); 1219 #endif 1220 1221 /* 1222 * load request. 1223 * 1224 * also, note that there are two kinds of buf structures, those with 1225 * B_PHYS set and those without B_PHYS. if B_PHYS is set, then it is 1226 * a raw I/O (to a cdevsw) and we are doing I/O directly to the users' 1227 * buffer which has already been mapped into DVMA space. (Not on sun3) 1228 * However, if B_PHYS is not set, then the buffer is a normal system 1229 * buffer which does *not* live in DVMA space. In that case we call 1230 * dvma_mapin to map it into DVMA space so we can do the DMA to it. 1231 * 1232 * in cases where we do a dvma_mapin, note that iorq points to the buffer 1233 * as mapped into DVMA space, where as the bp->b_data points to its 1234 * non-DVMA mapping. 1235 * 1236 * XXX - On the sun3, B_PHYS does NOT mean the buffer is mapped 1237 * into dvma space, only that it was remapped into the kernel. 1238 * We ALWAYS have to remap the kernel buf into DVMA space. 1239 * (It is done inexpensively, using whole segments!) 1240 */ 1241 1242 block = bp->b_rawblkno; 1243 1244 dbuf = dvma_mapin(bp->b_data, bp->b_bcount, 0); 1245 if (dbuf == NULL) { /* out of DVMA space */ 1246 printf("%s: warning: out of DVMA space\n", 1247 xycsc->sc_dev.dv_xname); 1248 return (XY_ERR_FAIL); /* XXX: need some sort of 1249 * call-back scheme here? */ 1250 } 1251 1252 /* init iorq and load iopb from it */ 1253 1254 xyc_rqinit(iorq, xycsc, xysc, XY_SUB_NORM | XY_MODE_VERBO, block, 1255 bp->b_bcount / XYFM_BPS, dbuf, bp); 1256 1257 xyc_rqtopb(iorq, iopb, (bp->b_flags & B_READ) ? XYCMD_RD : XYCMD_WR, 0); 1258 1259 /* Instrumentation. */ 1260 disk_busy(&xysc->sc_dk); 1261 1262 return (XY_ERR_AOK); 1263 } 1264 1265 1266 /* 1267 * xyc_submit_iorq: submit an iorq for processing. returns XY_ERR_AOK 1268 * if ok. if it fail returns an error code. type is XY_SUB_*. 1269 * 1270 * note: caller frees iorq in all cases except NORM 1271 * 1272 * return value: 1273 * NORM: XY_AOK (req pending), XY_FAIL (couldn't submit request) 1274 * WAIT: XY_AOK (success), <error-code> (failed) 1275 * POLL: <same as WAIT> 1276 * NOQ : <same as NORM> 1277 * 1278 * there are three sources for i/o requests: 1279 * [1] xystrategy: normal block I/O, using "struct buf" system. 1280 * [2] autoconfig/crash dump: these are polled I/O requests, no interrupts. 1281 * [3] open/ioctl: these are I/O requests done in the context of a process, 1282 * and the process should block until they are done. 1283 * 1284 * software state is stored in the iorq structure. each iorq has an 1285 * iopb structure. the hardware understands the iopb structure. 1286 * every command must go through an iopb. a 450 handles one iopb at a 1287 * time, where as a 451 can take them in chains. [the 450 claims it 1288 * can handle chains, but is appears to be buggy...] iopb are allocated 1289 * in DVMA space at boot up time. each disk gets one iopb, and the 1290 * controller gets one (for POLL and WAIT commands). what happens if 1291 * the iopb is busy? for i/o type [1], the buffers are queued at the 1292 * "buff" layer and * picked up later by the interrupt routine. for case 1293 * [2] we can only be blocked if there is a WAIT type I/O request being 1294 * run. since this can only happen when we are crashing, we wait a sec 1295 * and then steal the IOPB. for case [3] the process can sleep 1296 * on the iorq free list until some iopbs are available. 1297 */ 1298 1299 int 1300 xyc_submit_iorq(struct xyc_softc *xycsc, struct xy_iorq *iorq, int type) 1301 { 1302 struct xy_iopb *iopb; 1303 u_long iopbaddr; 1304 1305 #ifdef XYC_DEBUG 1306 printf("xyc_submit_iorq(%s, addr=0x%x, type=%d)\n", 1307 xycsc->sc_dev.dv_xname, iorq, type); 1308 #endif 1309 1310 /* first check and see if controller is busy */ 1311 if ((xycsc->xyc->xyc_csr & XYC_GBSY) != 0) { 1312 #ifdef XYC_DEBUG 1313 printf("xyc_submit_iorq: XYC not ready (BUSY)\n"); 1314 #endif 1315 if (type == XY_SUB_NOQ) 1316 return (XY_ERR_FAIL); /* failed */ 1317 switch (type) { 1318 case XY_SUB_NORM: 1319 return XY_ERR_AOK; /* success */ 1320 case XY_SUB_WAIT: 1321 while (iorq->iopb->done == 0) { 1322 (void) tsleep(iorq, PRIBIO, "xyciorq", 0); 1323 } 1324 return (iorq->errno); 1325 case XY_SUB_POLL: /* steal controller */ 1326 iopbaddr = xycsc->xyc->xyc_rsetup; /* RESET */ 1327 if (xyc_unbusy(xycsc->xyc,XYC_RESETUSEC) == XY_ERR_FAIL) 1328 panic("xyc_submit_iorq: stuck xyc"); 1329 printf("%s: stole controller\n", 1330 xycsc->sc_dev.dv_xname); 1331 break; 1332 default: 1333 panic("xyc_submit_iorq adding"); 1334 } 1335 } 1336 1337 iopb = xyc_chain(xycsc, iorq); /* build chain */ 1338 if (iopb == NULL) { /* nothing doing? */ 1339 if (type == XY_SUB_NORM || type == XY_SUB_NOQ) 1340 return(XY_ERR_AOK); 1341 panic("xyc_submit_iorq: xyc_chain failed!"); 1342 } 1343 iopbaddr = dvma_kvtopa(iopb, xycsc->bustype); 1344 1345 XYC_GO(xycsc->xyc, iopbaddr); 1346 1347 /* command now running, wrap it up */ 1348 switch (type) { 1349 case XY_SUB_NORM: 1350 case XY_SUB_NOQ: 1351 return (XY_ERR_AOK); /* success */ 1352 case XY_SUB_WAIT: 1353 while (iorq->iopb->done == 0) { 1354 (void) tsleep(iorq, PRIBIO, "xyciorq", 0); 1355 } 1356 return (iorq->errno); 1357 case XY_SUB_POLL: 1358 return (xyc_piodriver(xycsc, iorq)); 1359 default: 1360 panic("xyc_submit_iorq wrap up"); 1361 } 1362 panic("xyc_submit_iorq"); 1363 return 0; /* not reached */ 1364 } 1365 1366 1367 /* 1368 * xyc_chain: build a chain. return dvma address of first element in 1369 * the chain. iorq != NULL: means we only want that item on the chain. 1370 */ 1371 1372 struct xy_iopb * 1373 xyc_chain(struct xyc_softc *xycsc, struct xy_iorq *iorq) 1374 { 1375 int togo, chain, hand; 1376 struct xy_iopb *iopb, *prev_iopb; 1377 1378 memset(xycsc->xy_chain, 0, sizeof(xycsc->xy_chain)); 1379 1380 /* 1381 * promote control IOPB to the top 1382 */ 1383 if (iorq == NULL) { 1384 if ((XY_STATE(xycsc->reqs[XYC_CTLIOPB].mode) == XY_SUB_POLL || 1385 XY_STATE(xycsc->reqs[XYC_CTLIOPB].mode) == XY_SUB_WAIT) && 1386 xycsc->iopbase[XYC_CTLIOPB].done == 0) 1387 iorq = &xycsc->reqs[XYC_CTLIOPB]; 1388 } 1389 1390 /* 1391 * special case: if iorq != NULL then we have a POLL or WAIT request. 1392 * we let these take priority and do them first. 1393 */ 1394 if (iorq) { 1395 xycsc->xy_chain[0] = iorq; 1396 iorq->iopb->chen = 0; 1397 return(iorq->iopb); 1398 } 1399 1400 /* 1401 * NORM case: do round robin and maybe chain (if allowed and possible) 1402 */ 1403 1404 chain = 0; 1405 hand = xycsc->xy_hand; 1406 xycsc->xy_hand = (xycsc->xy_hand + 1) % XYC_MAXIOPB; 1407 1408 for (togo = XYC_MAXIOPB ; 1409 togo > 0 ; 1410 togo--, hand = (hand + 1) % XYC_MAXIOPB) 1411 { 1412 1413 if (XY_STATE(xycsc->reqs[hand].mode) != XY_SUB_NORM || 1414 xycsc->iopbase[hand].done) 1415 continue; /* not ready-for-i/o */ 1416 1417 xycsc->xy_chain[chain] = &xycsc->reqs[hand]; 1418 iopb = xycsc->xy_chain[chain]->iopb; 1419 iopb->chen = 0; 1420 if (chain != 0) { /* adding a link to a chain? */ 1421 prev_iopb = xycsc->xy_chain[chain-1]->iopb; 1422 prev_iopb->chen = 1; 1423 prev_iopb->nxtiopb = 0xffff & 1424 dvma_kvtopa(iopb, xycsc->bustype); 1425 } else { /* head of chain */ 1426 iorq = xycsc->xy_chain[chain]; 1427 } 1428 chain++; 1429 if (xycsc->no_ols) break; /* quit if chaining dis-allowed */ 1430 } 1431 return(iorq ? iorq->iopb : NULL); 1432 } 1433 1434 /* 1435 * xyc_piodriver 1436 * 1437 * programmed i/o driver. this function takes over the computer 1438 * and drains off the polled i/o request. it returns the status of the iorq 1439 * the caller is interesting in. 1440 */ 1441 int 1442 xyc_piodriver(struct xyc_softc *xycsc, struct xy_iorq *iorq) 1443 { 1444 int nreset = 0; 1445 int retval = 0; 1446 u_long res; 1447 1448 #ifdef XYC_DEBUG 1449 printf("xyc_piodriver(%s, 0x%x)\n", xycsc->sc_dev.dv_xname, iorq); 1450 #endif 1451 1452 while (iorq->iopb->done == 0) { 1453 1454 res = xyc_unbusy(xycsc->xyc, XYC_MAXTIME); 1455 1456 /* we expect some progress soon */ 1457 if (res == XY_ERR_FAIL && nreset >= 2) { 1458 xyc_reset(xycsc, 0, XY_RSET_ALL, XY_ERR_FAIL, 0); 1459 #ifdef XYC_DEBUG 1460 printf("xyc_piodriver: timeout\n"); 1461 #endif 1462 return (XY_ERR_FAIL); 1463 } 1464 if (res == XY_ERR_FAIL) { 1465 if (xyc_reset(xycsc, 0, 1466 (nreset++ == 0) ? XY_RSET_NONE : iorq, 1467 XY_ERR_FAIL, 1468 0) == XY_ERR_FAIL) 1469 return (XY_ERR_FAIL); /* flushes all but POLL 1470 * requests, resets */ 1471 continue; 1472 } 1473 1474 xyc_remove_iorq(xycsc); /* may resubmit request */ 1475 1476 if (iorq->iopb->done == 0) 1477 xyc_start(xycsc, iorq); 1478 } 1479 1480 /* get return value */ 1481 1482 retval = iorq->errno; 1483 1484 #ifdef XYC_DEBUG 1485 printf("xyc_piodriver: done, retval = 0x%x (%s)\n", 1486 iorq->errno, xyc_e2str(iorq->errno)); 1487 #endif 1488 1489 /* start up any bufs that have queued */ 1490 1491 xyc_start(xycsc, NULL); 1492 1493 return (retval); 1494 } 1495 1496 /* 1497 * xyc_xyreset: reset one drive. NOTE: assumes xyc was just reset. 1498 * we steal iopb[XYC_CTLIOPB] for this, but we put it back when we are done. 1499 */ 1500 void 1501 xyc_xyreset(struct xyc_softc *xycsc, struct xy_softc *xysc) 1502 { 1503 struct xy_iopb tmpiopb; 1504 u_long addr; 1505 int del; 1506 memcpy(&tmpiopb, xycsc->ciopb, sizeof(tmpiopb)); 1507 xycsc->ciopb->chen = xycsc->ciopb->done = xycsc->ciopb->errs = 0; 1508 xycsc->ciopb->ien = 0; 1509 xycsc->ciopb->com = XYCMD_RST; 1510 xycsc->ciopb->unit = xysc->xy_drive; 1511 addr = dvma_kvtopa(xycsc->ciopb, xycsc->bustype); 1512 1513 XYC_GO(xycsc->xyc, addr); 1514 1515 del = XYC_RESETUSEC; 1516 while (del > 0) { 1517 if ((xycsc->xyc->xyc_csr & XYC_GBSY) == 0) break; 1518 DELAY(1); 1519 del--; 1520 } 1521 1522 if (del <= 0 || xycsc->ciopb->errs) { 1523 printf("%s: off-line: %s\n", xycsc->sc_dev.dv_xname, 1524 xyc_e2str(xycsc->ciopb->errno)); 1525 del = xycsc->xyc->xyc_rsetup; 1526 if (xyc_unbusy(xycsc->xyc, XYC_RESETUSEC) == XY_ERR_FAIL) 1527 panic("xyc_reset"); 1528 } else { 1529 xycsc->xyc->xyc_csr = XYC_IPND; /* clear IPND */ 1530 } 1531 memcpy(xycsc->ciopb, &tmpiopb, sizeof(tmpiopb)); 1532 } 1533 1534 1535 /* 1536 * xyc_reset: reset everything: requests are marked as errors except 1537 * a polled request (which is resubmitted) 1538 */ 1539 int 1540 xyc_reset(struct xyc_softc *xycsc, int quiet, struct xy_iorq *blastmode, 1541 int error, struct xy_softc *xysc) 1542 { 1543 int del = 0, lcv, retval = XY_ERR_AOK; 1544 struct xy_iorq *iorq; 1545 1546 /* soft reset hardware */ 1547 1548 if (!quiet) 1549 printf("%s: soft reset\n", xycsc->sc_dev.dv_xname); 1550 del = xycsc->xyc->xyc_rsetup; 1551 del = xyc_unbusy(xycsc->xyc, XYC_RESETUSEC); 1552 if (del == XY_ERR_FAIL) { 1553 blastmode = XY_RSET_ALL; /* dead, flush all requests */ 1554 retval = XY_ERR_FAIL; 1555 } 1556 if (xysc) 1557 xyc_xyreset(xycsc, xysc); 1558 1559 /* fix queues based on "blast-mode" */ 1560 1561 for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) { 1562 iorq = &xycsc->reqs[lcv]; 1563 1564 if (XY_STATE(iorq->mode) != XY_SUB_POLL && 1565 XY_STATE(iorq->mode) != XY_SUB_WAIT && 1566 XY_STATE(iorq->mode) != XY_SUB_NORM) 1567 /* is it active? */ 1568 continue; 1569 1570 if (blastmode == XY_RSET_ALL || 1571 blastmode != iorq) { 1572 /* failed */ 1573 iorq->errno = error; 1574 xycsc->iopbase[lcv].done = xycsc->iopbase[lcv].errs = 1; 1575 switch (XY_STATE(iorq->mode)) { 1576 case XY_SUB_NORM: 1577 iorq->buf->b_error = EIO; 1578 iorq->buf->b_flags |= B_ERROR; 1579 iorq->buf->b_resid = 1580 iorq->sectcnt * XYFM_BPS; 1581 /* Sun3: map/unmap regardless of B_PHYS */ 1582 dvma_mapout(iorq->dbufbase, 1583 iorq->buf->b_bcount); 1584 (void)BUFQ_GET(iorq->xy->xyq); 1585 disk_unbusy(&iorq->xy->sc_dk, 1586 (iorq->buf->b_bcount - iorq->buf->b_resid), 1587 (iorq->buf->b_flags & B_READ)); 1588 biodone(iorq->buf); 1589 iorq->mode = XY_SUB_FREE; 1590 break; 1591 case XY_SUB_WAIT: 1592 wakeup(iorq); 1593 case XY_SUB_POLL: 1594 iorq->mode = 1595 XY_NEWSTATE(iorq->mode, XY_SUB_DONE); 1596 break; 1597 } 1598 1599 } else { 1600 1601 /* resubmit, no need to do anything here */ 1602 } 1603 } 1604 1605 /* 1606 * now, if stuff is waiting, start it. 1607 * since we just reset it should go 1608 */ 1609 xyc_start(xycsc, NULL); 1610 1611 return (retval); 1612 } 1613 1614 /* 1615 * xyc_start: start waiting buffers 1616 */ 1617 1618 void 1619 xyc_start(struct xyc_softc *xycsc, struct xy_iorq *iorq) 1620 { 1621 int lcv; 1622 struct xy_softc *xy; 1623 1624 if (iorq == NULL) { 1625 for (lcv = 0; lcv < XYC_MAXDEV ; lcv++) { 1626 if ((xy = xycsc->sc_drives[lcv]) == NULL) continue; 1627 if (BUFQ_PEEK(xy->xyq) == NULL) continue; 1628 if (xy->xyrq->mode != XY_SUB_FREE) continue; 1629 xyc_startbuf(xycsc, xy, BUFQ_PEEK(xy->xyq)); 1630 } 1631 } 1632 xyc_submit_iorq(xycsc, iorq, XY_SUB_NOQ); 1633 } 1634 1635 /* 1636 * xyc_remove_iorq: remove "done" IOPB's. 1637 */ 1638 1639 int 1640 xyc_remove_iorq(struct xyc_softc *xycsc) 1641 { 1642 int errno, rq, comm, errs; 1643 struct xyc *xyc = xycsc->xyc; 1644 u_long addr; 1645 struct xy_iopb *iopb; 1646 struct xy_iorq *iorq; 1647 struct buf *bp; 1648 1649 if (xyc->xyc_csr & XYC_DERR) { 1650 /* 1651 * DOUBLE ERROR: should never happen under normal use. This 1652 * error is so bad, you can't even tell which IOPB is bad, so 1653 * we dump them all. 1654 */ 1655 errno = XY_ERR_DERR; 1656 printf("%s: DOUBLE ERROR!\n", xycsc->sc_dev.dv_xname); 1657 if (xyc_reset(xycsc, 0, XY_RSET_ALL, errno, 0) != XY_ERR_AOK) { 1658 printf("%s: soft reset failed!\n", 1659 xycsc->sc_dev.dv_xname); 1660 panic("xyc_remove_iorq: controller DEAD"); 1661 } 1662 return (XY_ERR_AOK); 1663 } 1664 1665 /* 1666 * get iopb that is done, loop down the chain 1667 */ 1668 1669 if (xyc->xyc_csr & XYC_ERR) { 1670 xyc->xyc_csr = XYC_ERR; /* clear error condition */ 1671 } 1672 if (xyc->xyc_csr & XYC_IPND) { 1673 xyc->xyc_csr = XYC_IPND; /* clear interrupt */ 1674 } 1675 1676 for (rq = 0; rq < XYC_MAXIOPB; rq++) { 1677 iorq = xycsc->xy_chain[rq]; 1678 if (iorq == NULL) break; /* done ! */ 1679 if (iorq->mode == 0 || XY_STATE(iorq->mode) == XY_SUB_DONE) 1680 continue; /* free, or done */ 1681 iopb = iorq->iopb; 1682 if (iopb->done == 0) 1683 continue; /* not done yet */ 1684 1685 comm = iopb->com; 1686 errs = iopb->errs; 1687 1688 if (errs) 1689 iorq->errno = iopb->errno; 1690 else 1691 iorq->errno = 0; 1692 1693 /* handle non-fatal errors */ 1694 1695 if (errs && 1696 xyc_error(xycsc, iorq, iopb, comm) == XY_ERR_AOK) 1697 continue; /* AOK: we resubmitted it */ 1698 1699 1700 /* this iorq is now done (hasn't been restarted or anything) */ 1701 1702 if ((iorq->mode & XY_MODE_VERBO) && iorq->lasterror) 1703 xyc_perror(iorq, iopb, 0); 1704 1705 /* now, if read/write check to make sure we got all the data 1706 * we needed. (this may not be the case if we got an error in 1707 * the middle of a multisector request). */ 1708 1709 if ((iorq->mode & XY_MODE_B144) != 0 && errs == 0 && 1710 (comm == XYCMD_RD || comm == XYCMD_WR)) { 1711 /* we just successfully processed a bad144 sector 1712 * note: if we are in bad 144 mode, the pointers have 1713 * been advanced already (see above) and are pointing 1714 * at the bad144 sector. to exit bad144 mode, we 1715 * must advance the pointers 1 sector and issue a new 1716 * request if there are still sectors left to process 1717 * 1718 */ 1719 XYC_ADVANCE(iorq, 1); /* advance 1 sector */ 1720 1721 /* exit b144 mode */ 1722 iorq->mode = iorq->mode & (~XY_MODE_B144); 1723 1724 if (iorq->sectcnt) { /* more to go! */ 1725 iorq->lasterror = iorq->errno = iopb->errno = 0; 1726 iopb->errs = iopb->done = 0; 1727 iorq->tries = 0; 1728 iopb->scnt = iorq->sectcnt; 1729 iopb->cyl = iorq->blockno / 1730 iorq->xy->sectpercyl; 1731 iopb->head = 1732 (iorq->blockno / iorq->xy->nhead) % 1733 iorq->xy->nhead; 1734 iopb->sect = iorq->blockno % XYFM_BPS; 1735 addr = dvma_kvtopa(iorq->dbuf, xycsc->bustype); 1736 iopb->dataa = (addr & 0xffff); 1737 iopb->datar = ((addr & 0xff0000) >> 16); 1738 /* will resubit at end */ 1739 continue; 1740 } 1741 } 1742 /* final cleanup, totally done with this request */ 1743 1744 switch (XY_STATE(iorq->mode)) { 1745 case XY_SUB_NORM: 1746 bp = iorq->buf; 1747 if (errs) { 1748 bp->b_error = EIO; 1749 bp->b_flags |= B_ERROR; 1750 bp->b_resid = iorq->sectcnt * XYFM_BPS; 1751 } else { 1752 bp->b_resid = 0; /* done */ 1753 } 1754 /* Sun3: map/unmap regardless of B_PHYS */ 1755 dvma_mapout(iorq->dbufbase, 1756 iorq->buf->b_bcount); 1757 (void)BUFQ_GET(iorq->xy->xyq); 1758 disk_unbusy(&iorq->xy->sc_dk, 1759 (bp->b_bcount - bp->b_resid), 1760 (bp->b_flags & B_READ)); 1761 iorq->mode = XY_SUB_FREE; 1762 biodone(bp); 1763 break; 1764 case XY_SUB_WAIT: 1765 iorq->mode = XY_NEWSTATE(iorq->mode, XY_SUB_DONE); 1766 wakeup(iorq); 1767 break; 1768 case XY_SUB_POLL: 1769 iorq->mode = XY_NEWSTATE(iorq->mode, XY_SUB_DONE); 1770 break; 1771 } 1772 } 1773 1774 return (XY_ERR_AOK); 1775 } 1776 1777 /* 1778 * xyc_perror: print error. 1779 * - if still_trying is true: we got an error, retried and got a 1780 * different error. in that case lasterror is the old error, 1781 * and errno is the new one. 1782 * - if still_trying is not true, then if we ever had an error it 1783 * is in lasterror. also, if iorq->errno == 0, then we recovered 1784 * from that error (otherwise iorq->errno == iorq->lasterror). 1785 */ 1786 void 1787 xyc_perror(struct xy_iorq *iorq, struct xy_iopb *iopb, int still_trying) 1788 { 1789 1790 int error = iorq->lasterror; 1791 1792 printf("%s", (iorq->xy) ? iorq->xy->sc_dev.dv_xname 1793 : iorq->xyc->sc_dev.dv_xname); 1794 if (iorq->buf) 1795 printf("%c: ", 'a' + DISKPART(iorq->buf->b_dev)); 1796 if (iopb->com == XYCMD_RD || iopb->com == XYCMD_WR) 1797 printf("%s %d/%d/%d: ", 1798 (iopb->com == XYCMD_RD) ? "read" : "write", 1799 iopb->cyl, iopb->head, iopb->sect); 1800 printf("%s", xyc_e2str(error)); 1801 1802 if (still_trying) 1803 printf(" [still trying, new error=%s]", xyc_e2str(iorq->errno)); 1804 else 1805 if (iorq->errno == 0) 1806 printf(" [recovered in %d tries]", iorq->tries); 1807 1808 printf("\n"); 1809 } 1810 1811 /* 1812 * xyc_error: non-fatal error encountered... recover. 1813 * return AOK if resubmitted, return FAIL if this iopb is done 1814 */ 1815 int 1816 xyc_error(struct xyc_softc *xycsc, struct xy_iorq *iorq, struct xy_iopb *iopb, 1817 int comm) 1818 { 1819 int errno = iorq->errno; 1820 int erract = xyc_entoact(errno); 1821 int oldmode, advance, i; 1822 1823 if (erract == XY_ERA_RSET) { /* some errors require a reset */ 1824 oldmode = iorq->mode; 1825 iorq->mode = XY_SUB_DONE | (~XY_SUB_MASK & oldmode); 1826 /* make xyc_start ignore us */ 1827 xyc_reset(xycsc, 1, XY_RSET_NONE, errno, iorq->xy); 1828 iorq->mode = oldmode; 1829 } 1830 /* check for read/write to a sector in bad144 table if bad: redirect 1831 * request to bad144 area */ 1832 1833 if ((comm == XYCMD_RD || comm == XYCMD_WR) && 1834 (iorq->mode & XY_MODE_B144) == 0) { 1835 advance = iorq->sectcnt - iopb->scnt; 1836 XYC_ADVANCE(iorq, advance); 1837 if ((i = isbad(&iorq->xy->dkb, iorq->blockno / iorq->xy->sectpercyl, 1838 (iorq->blockno / iorq->xy->nsect) % iorq->xy->nhead, 1839 iorq->blockno % iorq->xy->nsect)) != -1) { 1840 iorq->mode |= XY_MODE_B144; /* enter bad144 mode & 1841 * redirect */ 1842 iopb->errno = iopb->done = iopb->errs = 0; 1843 iopb->scnt = 1; 1844 iopb->cyl = (iorq->xy->ncyl + iorq->xy->acyl) - 2; 1845 /* second to last acyl */ 1846 i = iorq->xy->sectpercyl - 1 - i; /* follow bad144 1847 * standard */ 1848 iopb->head = i / iorq->xy->nhead; 1849 iopb->sect = i % iorq->xy->nhead; 1850 /* will resubmit when we come out of remove_iorq */ 1851 return (XY_ERR_AOK); /* recovered! */ 1852 } 1853 } 1854 1855 /* 1856 * it isn't a bad144 sector, must be real error! see if we can retry 1857 * it? 1858 */ 1859 if ((iorq->mode & XY_MODE_VERBO) && iorq->lasterror) 1860 xyc_perror(iorq, iopb, 1); /* inform of error state 1861 * change */ 1862 iorq->lasterror = errno; 1863 1864 if ((erract == XY_ERA_RSET || erract == XY_ERA_HARD) 1865 && iorq->tries < XYC_MAXTRIES) { /* retry? */ 1866 iorq->tries++; 1867 iorq->errno = iopb->errno = iopb->done = iopb->errs = 0; 1868 /* will resubmit at end of remove_iorq */ 1869 return (XY_ERR_AOK); /* recovered! */ 1870 } 1871 1872 /* failed to recover from this error */ 1873 return (XY_ERR_FAIL); 1874 } 1875 1876 /* 1877 * xyc_tick: make sure xy is still alive and ticking (err, kicking). 1878 */ 1879 void 1880 xyc_tick(void *arg) 1881 { 1882 struct xyc_softc *xycsc = arg; 1883 int lcv, s, reset = 0; 1884 1885 /* reduce ttl for each request if one goes to zero, reset xyc */ 1886 s = splbio(); 1887 for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) { 1888 if (xycsc->reqs[lcv].mode == 0 || 1889 XY_STATE(xycsc->reqs[lcv].mode) == XY_SUB_DONE) 1890 continue; 1891 xycsc->reqs[lcv].ttl--; 1892 if (xycsc->reqs[lcv].ttl == 0) 1893 reset = 1; 1894 } 1895 if (reset) { 1896 printf("%s: watchdog timeout\n", xycsc->sc_dev.dv_xname); 1897 xyc_reset(xycsc, 0, XY_RSET_NONE, XY_ERR_FAIL, NULL); 1898 } 1899 splx(s); 1900 1901 /* until next time */ 1902 1903 callout_reset(&xycsc->sc_tick_ch, XYC_TICKCNT, xyc_tick, xycsc); 1904 } 1905 1906 /* 1907 * xyc_ioctlcmd: this function provides a user level interface to the 1908 * controller via ioctl. this allows "format" programs to be written 1909 * in user code, and is also useful for some debugging. we return 1910 * an error code. called at user priority. 1911 * 1912 * XXX missing a few commands (see the 7053 driver for ideas) 1913 */ 1914 int 1915 xyc_ioctlcmd(struct xy_softc *xy, dev_t dev, struct xd_iocmd *xio) 1916 { 1917 int s, err, rqno; 1918 void * dvmabuf = NULL; 1919 struct xyc_softc *xycsc; 1920 1921 /* check sanity of requested command */ 1922 1923 switch (xio->cmd) { 1924 1925 case XYCMD_NOP: /* no op: everything should be zero */ 1926 if (xio->subfn || xio->dptr || xio->dlen || 1927 xio->block || xio->sectcnt) 1928 return (EINVAL); 1929 break; 1930 1931 case XYCMD_RD: /* read / write sectors (up to XD_IOCMD_MAXS) */ 1932 case XYCMD_WR: 1933 if (xio->subfn || xio->sectcnt > XD_IOCMD_MAXS || 1934 xio->sectcnt * XYFM_BPS != xio->dlen || xio->dptr == NULL) 1935 return (EINVAL); 1936 break; 1937 1938 case XYCMD_SK: /* seek: doesn't seem useful to export this */ 1939 return (EINVAL); 1940 1941 break; 1942 1943 default: 1944 return (EINVAL);/* ??? */ 1945 } 1946 1947 /* create DVMA buffer for request if needed */ 1948 1949 if (xio->dlen) { 1950 dvmabuf = dvma_malloc(xio->dlen); 1951 if (xio->cmd == XYCMD_WR) { 1952 err = copyin(xio->dptr, dvmabuf, xio->dlen); 1953 if (err) { 1954 dvma_free(dvmabuf, xio->dlen); 1955 return (err); 1956 } 1957 } 1958 } 1959 /* do it! */ 1960 1961 err = 0; 1962 xycsc = xy->parent; 1963 s = splbio(); 1964 rqno = xyc_cmd(xycsc, xio->cmd, xio->subfn, xy->xy_drive, xio->block, 1965 xio->sectcnt, dvmabuf, XY_SUB_WAIT); 1966 if (rqno == XY_ERR_FAIL) { 1967 err = EIO; 1968 goto done; 1969 } 1970 xio->errno = xycsc->ciorq->errno; 1971 xio->tries = xycsc->ciorq->tries; 1972 XYC_DONE(xycsc, err); 1973 1974 if (xio->cmd == XYCMD_RD) 1975 err = copyout(dvmabuf, xio->dptr, xio->dlen); 1976 1977 done: 1978 splx(s); 1979 if (dvmabuf) 1980 dvma_free(dvmabuf, xio->dlen); 1981 return (err); 1982 } 1983 1984 /* 1985 * xyc_e2str: convert error code number into an error string 1986 */ 1987 const char * 1988 xyc_e2str(int no) 1989 { 1990 switch (no) { 1991 case XY_ERR_FAIL: 1992 return ("Software fatal error"); 1993 case XY_ERR_DERR: 1994 return ("DOUBLE ERROR"); 1995 case XY_ERR_AOK: 1996 return ("Successful completion"); 1997 case XY_ERR_IPEN: 1998 return("Interrupt pending"); 1999 case XY_ERR_BCFL: 2000 return("Busy conflict"); 2001 case XY_ERR_TIMO: 2002 return("Operation timeout"); 2003 case XY_ERR_NHDR: 2004 return("Header not found"); 2005 case XY_ERR_HARD: 2006 return("Hard ECC error"); 2007 case XY_ERR_ICYL: 2008 return("Illegal cylinder address"); 2009 case XY_ERR_ISEC: 2010 return("Illegal sector address"); 2011 case XY_ERR_SMAL: 2012 return("Last sector too small"); 2013 case XY_ERR_SACK: 2014 return("Slave ACK error (non-existent memory)"); 2015 case XY_ERR_CHER: 2016 return("Cylinder and head/header error"); 2017 case XY_ERR_SRTR: 2018 return("Auto-seek retry successful"); 2019 case XY_ERR_WPRO: 2020 return("Write-protect error"); 2021 case XY_ERR_UIMP: 2022 return("Unimplemented command"); 2023 case XY_ERR_DNRY: 2024 return("Drive not ready"); 2025 case XY_ERR_SZER: 2026 return("Sector count zero"); 2027 case XY_ERR_DFLT: 2028 return("Drive faulted"); 2029 case XY_ERR_ISSZ: 2030 return("Illegal sector size"); 2031 case XY_ERR_SLTA: 2032 return("Self test A"); 2033 case XY_ERR_SLTB: 2034 return("Self test B"); 2035 case XY_ERR_SLTC: 2036 return("Self test C"); 2037 case XY_ERR_SOFT: 2038 return("Soft ECC error"); 2039 case XY_ERR_SFOK: 2040 return("Soft ECC error recovered"); 2041 case XY_ERR_IHED: 2042 return("Illegal head"); 2043 case XY_ERR_DSEQ: 2044 return("Disk sequencer error"); 2045 case XY_ERR_SEEK: 2046 return("Seek error"); 2047 default: 2048 return ("Unknown error"); 2049 } 2050 } 2051 2052 int 2053 xyc_entoact(int errno) 2054 { 2055 switch (errno) { 2056 case XY_ERR_FAIL: case XY_ERR_DERR: case XY_ERR_IPEN: 2057 case XY_ERR_BCFL: case XY_ERR_ICYL: case XY_ERR_ISEC: 2058 case XY_ERR_UIMP: case XY_ERR_SZER: case XY_ERR_ISSZ: 2059 case XY_ERR_SLTA: case XY_ERR_SLTB: case XY_ERR_SLTC: 2060 case XY_ERR_IHED: case XY_ERR_SACK: case XY_ERR_SMAL: 2061 2062 return(XY_ERA_PROG); /* program error ! */ 2063 2064 case XY_ERR_TIMO: case XY_ERR_NHDR: case XY_ERR_HARD: 2065 case XY_ERR_DNRY: case XY_ERR_CHER: case XY_ERR_SEEK: 2066 case XY_ERR_SOFT: 2067 2068 return(XY_ERA_HARD); /* hard error, retry */ 2069 2070 case XY_ERR_DFLT: case XY_ERR_DSEQ: 2071 2072 return(XY_ERA_RSET); /* hard error reset */ 2073 2074 case XY_ERR_SRTR: case XY_ERR_SFOK: case XY_ERR_AOK: 2075 2076 return(XY_ERA_SOFT); /* an FYI error */ 2077 2078 case XY_ERR_WPRO: 2079 2080 return(XY_ERA_WPRO); /* write protect */ 2081 } 2082 2083 return(XY_ERA_PROG); /* ??? */ 2084 } 2085