1 /* $NetBSD: xy.c,v 1.63 2007/10/17 19:57:45 garbled Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 1995 Charles D. Cranor 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Charles D. Cranor. 19 * 4. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * 36 * x y . c x y l o g i c s 4 5 0 / 4 5 1 s m d d r i v e r 37 * 38 * author: Chuck Cranor <chuck@ccrc.wustl.edu> 39 * id: &Id: xy.c,v 1.1 1995/09/25 20:35:14 chuck Exp & 40 * started: 14-Sep-95 41 * references: [1] Xylogics Model 753 User's Manual 42 * part number: 166-753-001, Revision B, May 21, 1988. 43 * "Your Partner For Performance" 44 * [2] other NetBSD disk device drivers 45 * [3] Xylogics Model 450 User's Manual 46 * part number: 166-017-001, Revision B, 1983. 47 * [4] Addendum to Xylogics Model 450 Disk Controller User's 48 * Manual, Jan. 1985. 49 * [5] The 451 Controller, Rev. B3, September 2, 1986. 50 * [6] David Jones <dej@achilles.net>'s unfinished 450/451 driver 51 * 52 */ 53 54 #include <sys/cdefs.h> 55 __KERNEL_RCSID(0, "$NetBSD: xy.c,v 1.63 2007/10/17 19:57:45 garbled Exp $"); 56 57 #undef XYC_DEBUG /* full debug */ 58 #undef XYC_DIAG /* extra sanity checks */ 59 #if defined(DIAGNOSTIC) && !defined(XYC_DIAG) 60 #define XYC_DIAG /* link in with master DIAG option */ 61 #endif 62 63 #include <sys/param.h> 64 #include <sys/proc.h> 65 #include <sys/systm.h> 66 #include <sys/kernel.h> 67 #include <sys/file.h> 68 #include <sys/stat.h> 69 #include <sys/ioctl.h> 70 #include <sys/buf.h> 71 #include <sys/bufq.h> 72 #include <sys/uio.h> 73 #include <sys/malloc.h> 74 #include <sys/device.h> 75 #include <sys/disklabel.h> 76 #include <sys/disk.h> 77 #include <sys/syslog.h> 78 #include <sys/dkbad.h> 79 #include <sys/conf.h> 80 #include <sys/kauth.h> 81 82 #include <uvm/uvm_extern.h> 83 84 #include <dev/sun/disklabel.h> 85 86 #include <machine/autoconf.h> 87 #include <machine/dvma.h> 88 89 #include <sun3/dev/xyreg.h> 90 #include <sun3/dev/xyvar.h> 91 #include <sun3/dev/xio.h> 92 93 #include "locators.h" 94 95 /* 96 * Print a complaint when no xy children were specified 97 * in the config file. Better than a link error... 98 * 99 * XXX: Some folks say this driver should be split in two, 100 * but that seems pointless with ONLY one type of child. 101 */ 102 #include "xy.h" 103 #if NXY == 0 104 #error "xyc but no xy?" 105 #endif 106 107 /* 108 * macros 109 */ 110 111 /* 112 * XYC_GO: start iopb ADDR (DVMA addr in a u_long) on XYC 113 */ 114 #define XYC_GO(XYC, ADDR) { \ 115 (XYC)->xyc_addr_lo = ((ADDR) & 0xff); \ 116 (ADDR) = ((ADDR) >> 8); \ 117 (XYC)->xyc_addr_hi = ((ADDR) & 0xff); \ 118 (ADDR) = ((ADDR) >> 8); \ 119 (XYC)->xyc_reloc_lo = ((ADDR) & 0xff); \ 120 (ADDR) = ((ADDR) >> 8); \ 121 (XYC)->xyc_reloc_hi = (ADDR); \ 122 (XYC)->xyc_csr = XYC_GBSY; /* go! */ \ 123 } 124 125 /* 126 * XYC_DONE: don't need IORQ, get error code and free (done after xyc_cmd) 127 */ 128 129 #define XYC_DONE(SC,ER) { \ 130 if ((ER) == XY_ERR_AOK) { \ 131 (ER) = (SC)->ciorq->errno; \ 132 (SC)->ciorq->mode = XY_SUB_FREE; \ 133 wakeup((SC)->ciorq); \ 134 } \ 135 } 136 137 /* 138 * XYC_ADVANCE: advance iorq's pointers by a number of sectors 139 */ 140 141 #define XYC_ADVANCE(IORQ, N) { \ 142 if (N) { \ 143 (IORQ)->sectcnt -= (N); \ 144 (IORQ)->blockno += (N); \ 145 (IORQ)->dbuf += ((N)*XYFM_BPS); \ 146 } \ 147 } 148 149 /* 150 * note - addresses you can sleep on: 151 * [1] & of xy_softc's "state" (waiting for a chance to attach a drive) 152 * [2] & an iorq (waiting for an XY_SUB_WAIT iorq to finish) 153 */ 154 155 156 /* 157 * function prototypes 158 * "xyc_*" functions are internal, all others are external interfaces 159 */ 160 161 /* internals */ 162 struct xy_iopb *xyc_chain(struct xyc_softc *, struct xy_iorq *); 163 int xyc_cmd(struct xyc_softc *, int, int, int, int, int, char *, int); 164 const char *xyc_e2str(int); 165 int xyc_entoact(int); 166 int xyc_error(struct xyc_softc *, struct xy_iorq *, struct xy_iopb *, int); 167 int xyc_ioctlcmd(struct xy_softc *, dev_t dev, struct xd_iocmd *); 168 void xyc_perror(struct xy_iorq *, struct xy_iopb *, int); 169 int xyc_piodriver(struct xyc_softc *, struct xy_iorq *); 170 int xyc_remove_iorq(struct xyc_softc *); 171 int xyc_reset(struct xyc_softc *, int, struct xy_iorq *, int, 172 struct xy_softc *); 173 inline void xyc_rqinit(struct xy_iorq *, struct xyc_softc *, struct xy_softc *, 174 int, u_long, int, void *, struct buf *); 175 void xyc_rqtopb(struct xy_iorq *, struct xy_iopb *, int, int); 176 void xyc_start(struct xyc_softc *, struct xy_iorq *); 177 int xyc_startbuf(struct xyc_softc *, struct xy_softc *, struct buf *); 178 int xyc_submit_iorq(struct xyc_softc *, struct xy_iorq *, int); 179 void xyc_tick(void *); 180 int xyc_unbusy(struct xyc *, int); 181 void xyc_xyreset(struct xyc_softc *, struct xy_softc *); 182 183 /* machine interrupt hook */ 184 int xycintr(void *); 185 186 /* autoconf */ 187 static int xycmatch(struct device *, struct cfdata *, void *); 188 static void xycattach(struct device *, struct device *, void *); 189 static int xyc_print(void *, const char *); 190 191 static int xymatch(struct device *, struct cfdata *, void *); 192 static void xyattach(struct device *, struct device *, void *); 193 static void xy_init(struct xy_softc *); 194 195 static void xydummystrat(struct buf *); 196 int xygetdisklabel(struct xy_softc *, void *); 197 198 /* 199 * cfattach's: device driver interface to autoconfig 200 */ 201 202 CFATTACH_DECL(xyc, sizeof(struct xyc_softc), 203 xycmatch, xycattach, NULL, NULL); 204 205 CFATTACH_DECL(xy, sizeof(struct xy_softc), 206 xymatch, xyattach, NULL, NULL); 207 208 extern struct cfdriver xy_cd; 209 210 struct xyc_attach_args { /* this is the "aux" args to xyattach */ 211 int driveno; /* unit number */ 212 }; 213 214 dev_type_open(xyopen); 215 dev_type_close(xyclose); 216 dev_type_read(xyread); 217 dev_type_write(xywrite); 218 dev_type_ioctl(xyioctl); 219 dev_type_strategy(xystrategy); 220 dev_type_dump(xydump); 221 dev_type_size(xysize); 222 223 const struct bdevsw xy_bdevsw = { 224 xyopen, xyclose, xystrategy, xyioctl, xydump, xysize, D_DISK 225 }; 226 227 const struct cdevsw xy_cdevsw = { 228 xyopen, xyclose, xyread, xywrite, xyioctl, 229 nostop, notty, nopoll, nommap, nokqfilter, D_DISK 230 }; 231 232 /* 233 * dkdriver 234 */ 235 236 struct dkdriver xydkdriver = { xystrategy }; 237 238 /* 239 * start: disk label fix code (XXX) 240 */ 241 242 static void *xy_labeldata; 243 244 static void 245 xydummystrat(struct buf *bp) 246 { 247 if (bp->b_bcount != XYFM_BPS) 248 panic("xydummystrat"); 249 memcpy(bp->b_data, xy_labeldata, XYFM_BPS); 250 bp->b_flags |= B_DONE; 251 bp->b_flags &= ~B_BUSY; 252 } 253 254 int 255 xygetdisklabel(struct xy_softc *xy, void *b) 256 { 257 const char *err; 258 struct sun_disklabel *sdl; 259 260 /* We already have the label data in `b'; setup for dummy strategy */ 261 xy_labeldata = b; 262 263 /* Required parameter for readdisklabel() */ 264 xy->sc_dk.dk_label->d_secsize = XYFM_BPS; 265 266 err = readdisklabel(MAKEDISKDEV(0, device_unit(&xy->sc_dev), RAW_PART), 267 xydummystrat, 268 xy->sc_dk.dk_label, xy->sc_dk.dk_cpulabel); 269 if (err) { 270 printf("%s: %s\n", xy->sc_dev.dv_xname, err); 271 return(XY_ERR_FAIL); 272 } 273 274 /* Ok, we have the label; fill in `pcyl' if there's SunOS magic */ 275 sdl = (struct sun_disklabel *)xy->sc_dk.dk_cpulabel->cd_block; 276 if (sdl->sl_magic == SUN_DKMAGIC) 277 xy->pcyl = sdl->sl_pcyl; 278 else { 279 printf("%s: WARNING: no `pcyl' in disk label.\n", 280 xy->sc_dev.dv_xname); 281 xy->pcyl = xy->sc_dk.dk_label->d_ncylinders + 282 xy->sc_dk.dk_label->d_acylinders; 283 printf("%s: WARNING: guessing pcyl=%d (ncyl+acyl)\n", 284 xy->sc_dev.dv_xname, xy->pcyl); 285 } 286 287 xy->ncyl = xy->sc_dk.dk_label->d_ncylinders; 288 xy->acyl = xy->sc_dk.dk_label->d_acylinders; 289 xy->nhead = xy->sc_dk.dk_label->d_ntracks; 290 xy->nsect = xy->sc_dk.dk_label->d_nsectors; 291 xy->sectpercyl = xy->nhead * xy->nsect; 292 xy->sc_dk.dk_label->d_secsize = XYFM_BPS; /* not handled by 293 * sun->bsd */ 294 return(XY_ERR_AOK); 295 } 296 297 /* 298 * end: disk label fix code (XXX) 299 */ 300 301 /* 302 * a u t o c o n f i g f u n c t i o n s 303 */ 304 305 /* 306 * xycmatch: determine if xyc is present or not. we do a 307 * soft reset to detect the xyc. 308 */ 309 static int 310 xycmatch(struct device *parent, struct cfdata *cf, void *aux) 311 { 312 struct confargs *ca = aux; 313 314 /* No default VME address. */ 315 if (ca->ca_paddr == -1) 316 return (0); 317 318 /* Make sure something is there... */ 319 if (bus_peek(ca->ca_bustype, ca->ca_paddr + 5, 1) == -1) 320 return (0); 321 322 /* Default interrupt priority. */ 323 if (ca->ca_intpri == -1) 324 ca->ca_intpri = 2; 325 326 return (1); 327 } 328 329 /* 330 * xycattach: attach controller 331 */ 332 static void 333 xycattach(struct device *parent, struct device *self, void *aux) 334 { 335 struct xyc_softc *xyc = (void *) self; 336 struct confargs *ca = aux; 337 struct xyc_attach_args xa; 338 int lcv, err, res, pbsz; 339 void *tmp, *tmp2; 340 u_long ultmp; 341 342 /* get addressing and intr level stuff from autoconfig and load it 343 * into our xyc_softc. */ 344 345 xyc->xyc = (struct xyc *) 346 bus_mapin(ca->ca_bustype, ca->ca_paddr, sizeof(struct xyc)); 347 xyc->bustype = ca->ca_bustype; 348 xyc->ipl = ca->ca_intpri; 349 xyc->vector = ca->ca_intvec; 350 xyc->no_ols = 0; /* XXX should be from config */ 351 352 for (lcv = 0; lcv < XYC_MAXDEV; lcv++) 353 xyc->sc_drives[lcv] = (struct xy_softc *) 0; 354 355 /* 356 * allocate and zero buffers 357 * check boundaries of the KVA's ... all IOPBs must reside in 358 * the same 64K region. 359 */ 360 361 pbsz = XYC_MAXIOPB * sizeof(struct xy_iopb); 362 tmp = tmp2 = (struct xy_iopb *) dvma_malloc(pbsz); /* KVA */ 363 ultmp = (u_long) tmp; 364 if ((ultmp & 0xffff0000) != ((ultmp + pbsz) & 0xffff0000)) { 365 tmp = (struct xy_iopb *) dvma_malloc(pbsz); /* retry! */ 366 dvma_free(tmp2, pbsz); 367 ultmp = (u_long) tmp; 368 if ((ultmp & 0xffff0000) != ((ultmp + pbsz) & 0xffff0000)) { 369 printf("%s: can't alloc IOPB mem in 64K\n", 370 xyc->sc_dev.dv_xname); 371 return; 372 } 373 } 374 memset(tmp, 0, pbsz); 375 xyc->iopbase = tmp; 376 xyc->dvmaiopb = (struct xy_iopb *) 377 dvma_kvtopa(xyc->iopbase, xyc->bustype); 378 xyc->reqs = (struct xy_iorq *) 379 malloc(XYC_MAXIOPB * sizeof(struct xy_iorq), M_DEVBUF, M_NOWAIT); 380 if (xyc->reqs == NULL) 381 panic("xyc malloc"); 382 memset(xyc->reqs, 0, XYC_MAXIOPB * sizeof(struct xy_iorq)); 383 384 /* 385 * init iorq to iopb pointers, and non-zero fields in the 386 * iopb which never change. 387 */ 388 389 for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) { 390 xyc->xy_chain[lcv] = NULL; 391 xyc->reqs[lcv].iopb = &xyc->iopbase[lcv]; 392 xyc->iopbase[lcv].asr = 1; /* always the same */ 393 xyc->iopbase[lcv].eef = 1; /* always the same */ 394 xyc->iopbase[lcv].ecm = XY_ECM; /* always the same */ 395 xyc->iopbase[lcv].aud = 1; /* always the same */ 396 xyc->iopbase[lcv].relo = 1; /* always the same */ 397 xyc->iopbase[lcv].thro = XY_THRO;/* always the same */ 398 } 399 xyc->ciorq = &xyc->reqs[XYC_CTLIOPB]; /* short hand name */ 400 xyc->ciopb = &xyc->iopbase[XYC_CTLIOPB]; /* short hand name */ 401 xyc->xy_hand = 0; 402 403 /* read controller parameters and insure we have a 450/451 */ 404 405 err = xyc_cmd(xyc, XYCMD_ST, 0, 0, 0, 0, 0, XY_SUB_POLL); 406 res = xyc->ciopb->ctyp; 407 XYC_DONE(xyc, err); 408 if (res != XYCT_450) { 409 if (err) 410 printf(": %s: ", xyc_e2str(err)); 411 printf(": doesn't identify as a 450/451\n"); 412 return; 413 } 414 printf(": Xylogics 450/451"); 415 if (xyc->no_ols) 416 printf(" [OLS disabled]"); /* 450 doesn't overlap seek right */ 417 printf("\n"); 418 if (err) { 419 printf("%s: error: %s\n", xyc->sc_dev.dv_xname, 420 xyc_e2str(err)); 421 return; 422 } 423 if ((xyc->xyc->xyc_csr & XYC_ADRM) == 0) { 424 printf("%s: 24 bit addressing turned off\n", 425 xyc->sc_dev.dv_xname); 426 printf("please set hardware jumpers JM1-JM2=in, JM3-JM4=out\n"); 427 printf("to enable 24 bit mode and this driver\n"); 428 return; 429 } 430 431 /* link in interrupt with higher level software */ 432 isr_add_vectored(xycintr, (void *)xyc, 433 ca->ca_intpri, ca->ca_intvec); 434 evcnt_attach_dynamic(&xyc->sc_intrcnt, EVCNT_TYPE_INTR, NULL, 435 xyc->sc_dev.dv_xname, "intr"); 436 437 callout_init(&xyc->sc_tick_ch, 0); 438 439 /* now we must look for disks using autoconfig */ 440 for (xa.driveno = 0; xa.driveno < XYC_MAXDEV; xa.driveno++) 441 (void) config_found(self, (void *) &xa, xyc_print); 442 443 /* start the watchdog clock */ 444 callout_reset(&xyc->sc_tick_ch, XYC_TICKCNT, xyc_tick, xyc); 445 } 446 447 static int 448 xyc_print(void *aux, const char *name) 449 { 450 struct xyc_attach_args *xa = aux; 451 452 if (name != NULL) 453 aprint_normal("%s: ", name); 454 455 if (xa->driveno != -1) 456 aprint_normal(" drive %d", xa->driveno); 457 458 return UNCONF; 459 } 460 461 /* 462 * xymatch: probe for disk. 463 * 464 * note: we almost always say disk is present. this allows us to 465 * spin up and configure a disk after the system is booted (we can 466 * call xyattach!). Also, wire down the relationship between the 467 * xy* and xyc* devices, to simplify boot device identification. 468 */ 469 static int 470 xymatch(struct device *parent, struct cfdata *cf, void *aux) 471 { 472 struct xyc_attach_args *xa = aux; 473 int xy_unit; 474 475 /* Match only on the "wired-down" controller+disk. */ 476 xy_unit = device_unit(parent) * 2 + xa->driveno; 477 if (cf->cf_unit != xy_unit) 478 return (0); 479 480 return (1); 481 } 482 483 /* 484 * xyattach: attach a disk. 485 */ 486 static void 487 xyattach(struct device *parent, struct device *self, void *aux) 488 { 489 struct xy_softc *xy = (void *) self; 490 struct xyc_softc *xyc = (void *) parent; 491 struct xyc_attach_args *xa = aux; 492 493 printf("\n"); 494 495 /* 496 * Always re-initialize the disk structure. We want statistics 497 * to start with a clean slate. 498 */ 499 memset(&xy->sc_dk, 0, sizeof(xy->sc_dk)); 500 disk_init(&xy->sc_dk, xy->sc_dev.dv_xname, &xydkdriver); 501 502 xy->state = XY_DRIVE_UNKNOWN; /* to start */ 503 xy->flags = 0; 504 xy->parent = xyc; 505 506 /* init queue of waiting bufs */ 507 bufq_alloc(&xy->xyq, "disksort", BUFQ_SORT_RAWBLOCK); 508 xy->xyrq = &xyc->reqs[xa->driveno]; 509 510 xy->xy_drive = xa->driveno; 511 xyc->sc_drives[xa->driveno] = xy; 512 513 /* Do init work common to attach and open. */ 514 xy_init(xy); 515 } 516 517 /* 518 * end of autoconfig functions 519 */ 520 521 /* 522 * Initialize a disk. This can be called from both autoconf and 523 * also from xyopen/xystrategy. 524 */ 525 static void 526 xy_init(struct xy_softc *xy) 527 { 528 struct xyc_softc *xyc; 529 struct dkbad *dkb; 530 void *dvmabuf; 531 int err, spt, mb, blk, lcv, fullmode, newstate; 532 533 xyc = xy->parent; 534 xy->state = XY_DRIVE_ATTACHING; 535 newstate = XY_DRIVE_UNKNOWN; 536 fullmode = (cold) ? XY_SUB_POLL : XY_SUB_WAIT; 537 dvmabuf = dvma_malloc(XYFM_BPS); 538 539 /* first try and reset the drive */ 540 541 err = xyc_cmd(xyc, XYCMD_RST, 0, xy->xy_drive, 0, 0, 0, fullmode); 542 XYC_DONE(xyc, err); 543 if (err == XY_ERR_DNRY) { 544 printf("%s: drive %d: off-line\n", 545 xy->sc_dev.dv_xname, xy->xy_drive); 546 goto done; 547 } 548 if (err) { 549 printf("%s: ERROR 0x%02x (%s)\n", 550 xy->sc_dev.dv_xname, err, xyc_e2str(err)); 551 goto done; 552 } 553 printf("%s: drive %d ready", 554 xy->sc_dev.dv_xname, xy->xy_drive); 555 556 /* 557 * now set drive parameters (to semi-bogus values) so we can read the 558 * disk label. 559 */ 560 xy->pcyl = xy->ncyl = 1; 561 xy->acyl = 0; 562 xy->nhead = 1; 563 xy->nsect = 1; 564 xy->sectpercyl = 1; 565 for (lcv = 0; lcv < 126; lcv++) /* init empty bad144 table */ 566 xy->dkb.bt_bad[lcv].bt_cyl = 567 xy->dkb.bt_bad[lcv].bt_trksec = 0xffff; 568 569 /* read disk label */ 570 for (xy->drive_type = 0 ; xy->drive_type <= XYC_MAXDT ; 571 xy->drive_type++) { 572 err = xyc_cmd(xyc, XYCMD_RD, 0, xy->xy_drive, 0, 1, 573 dvmabuf, fullmode); 574 XYC_DONE(xyc, err); 575 if (err == XY_ERR_AOK) break; 576 } 577 578 if (err != XY_ERR_AOK) { 579 printf("%s: reading disk label failed: %s\n", 580 xy->sc_dev.dv_xname, xyc_e2str(err)); 581 goto done; 582 } 583 printf("%s: drive type %d\n", 584 xy->sc_dev.dv_xname, xy->drive_type); 585 586 newstate = XY_DRIVE_NOLABEL; 587 588 xy->hw_spt = spt = 0; /* XXX needed ? */ 589 /* Attach the disk: must be before getdisklabel to malloc label */ 590 disk_attach(&xy->sc_dk); 591 592 if (xygetdisklabel(xy, dvmabuf) != XY_ERR_AOK) 593 goto done; 594 595 /* inform the user of what is up */ 596 printf("%s: <%s>, pcyl %d\n", 597 xy->sc_dev.dv_xname, 598 (char *)dvmabuf, xy->pcyl); 599 mb = xy->ncyl * (xy->nhead * xy->nsect) / (1048576 / XYFM_BPS); 600 printf("%s: %dMB, %d cyl, %d head, %d sec\n", 601 xy->sc_dev.dv_xname, mb, 602 xy->ncyl, xy->nhead, xy->nsect); 603 604 /* 605 * 450/451 stupidity: the drive type is encoded into the format 606 * of the disk. the drive type in the IOPB must match the drive 607 * type in the format, or you will not be able to do I/O to the 608 * disk (you get header not found errors). if you have two drives 609 * of different sizes that have the same drive type in their 610 * formatting then you are out of luck. 611 * 612 * this problem was corrected in the 753/7053. 613 */ 614 615 for (lcv = 0 ; lcv < XYC_MAXDEV ; lcv++) { 616 struct xy_softc *oxy; 617 618 oxy = xyc->sc_drives[lcv]; 619 if (oxy == NULL || oxy == xy) continue; 620 if (oxy->drive_type != xy->drive_type) continue; 621 if (xy->nsect != oxy->nsect || xy->pcyl != oxy->pcyl || 622 xy->nhead != oxy->nhead) { 623 printf("%s: %s and %s must be the same size!\n", 624 xyc->sc_dev.dv_xname, 625 xy ->sc_dev.dv_xname, 626 oxy->sc_dev.dv_xname); 627 panic("xy drive size mismatch"); 628 } 629 } 630 631 632 /* now set the real drive parameters! */ 633 blk = (xy->nsect - 1) + 634 ((xy->nhead - 1) * xy->nsect) + 635 ((xy->pcyl - 1) * xy->nsect * xy->nhead); 636 err = xyc_cmd(xyc, XYCMD_SDS, 0, xy->xy_drive, blk, 0, 0, fullmode); 637 XYC_DONE(xyc, err); 638 if (err) { 639 printf("%s: write drive size failed: %s\n", 640 xy->sc_dev.dv_xname, xyc_e2str(err)); 641 goto done; 642 } 643 newstate = XY_DRIVE_ONLINE; 644 645 /* 646 * read bad144 table. this table resides on the first sector of the 647 * last track of the disk (i.e. second cyl of "acyl" area). 648 */ 649 blk = (xy->ncyl + xy->acyl - 1) * (xy->nhead * xy->nsect) + 650 /* last cyl */ 651 (xy->nhead - 1) * xy->nsect; /* last head */ 652 err = xyc_cmd(xyc, XYCMD_RD, 0, xy->xy_drive, blk, 1, 653 dvmabuf, fullmode); 654 XYC_DONE(xyc, err); 655 if (err) { 656 printf("%s: reading bad144 failed: %s\n", 657 xy->sc_dev.dv_xname, xyc_e2str(err)); 658 goto done; 659 } 660 661 /* check dkbad for sanity */ 662 dkb = (struct dkbad *) dvmabuf; 663 for (lcv = 0; lcv < 126; lcv++) { 664 if ((dkb->bt_bad[lcv].bt_cyl == 0xffff || 665 dkb->bt_bad[lcv].bt_cyl == 0) && 666 dkb->bt_bad[lcv].bt_trksec == 0xffff) 667 continue; /* blank */ 668 if (dkb->bt_bad[lcv].bt_cyl >= xy->ncyl) 669 break; 670 if ((dkb->bt_bad[lcv].bt_trksec >> 8) >= xy->nhead) 671 break; 672 if ((dkb->bt_bad[lcv].bt_trksec & 0xff) >= xy->nsect) 673 break; 674 } 675 if (lcv != 126) { 676 printf("%s: warning: invalid bad144 sector!\n", 677 xy->sc_dev.dv_xname); 678 } else { 679 memcpy(&xy->dkb, dvmabuf, XYFM_BPS); 680 } 681 682 done: 683 xy->state = newstate; 684 dvma_free(dvmabuf, XYFM_BPS); 685 } 686 687 /* 688 * { b , c } d e v s w f u n c t i o n s 689 */ 690 691 /* 692 * xyclose: close device 693 */ 694 int 695 xyclose(dev_t dev, int flag, int fmt, struct lwp *l) 696 { 697 struct xy_softc *xy = xy_cd.cd_devs[DISKUNIT(dev)]; 698 int part = DISKPART(dev); 699 700 /* clear mask bits */ 701 702 switch (fmt) { 703 case S_IFCHR: 704 xy->sc_dk.dk_copenmask &= ~(1 << part); 705 break; 706 case S_IFBLK: 707 xy->sc_dk.dk_bopenmask &= ~(1 << part); 708 break; 709 } 710 xy->sc_dk.dk_openmask = xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask; 711 712 return 0; 713 } 714 715 /* 716 * xydump: crash dump system 717 */ 718 int 719 xydump(dev_t dev, daddr_t blkno, void *va, size_t sz) 720 { 721 int unit, part; 722 struct xy_softc *xy; 723 724 unit = DISKUNIT(dev); 725 if (unit >= xy_cd.cd_ndevs) 726 return ENXIO; 727 part = DISKPART(dev); 728 729 xy = xy_cd.cd_devs[unit]; 730 731 printf("%s%c: crash dump not supported (yet)\n", xy->sc_dev.dv_xname, 732 'a' + part); 733 734 return ENXIO; 735 736 /* outline: globals: "dumplo" == sector number of partition to start 737 * dump at (convert to physical sector with partition table) 738 * "dumpsize" == size of dump in clicks "physmem" == size of physical 739 * memory (clicks, ctob() to get bytes) (normal case: dumpsize == 740 * physmem) 741 * 742 * dump a copy of physical memory to the dump device starting at sector 743 * "dumplo" in the swap partition (make sure > 0). map in pages as 744 * we go. use polled I/O. 745 * 746 * XXX how to handle NON_CONTIG? 747 */ 748 } 749 750 /* 751 * xyioctl: ioctls on XY drives. based on ioctl's of other netbsd disks. 752 */ 753 int 754 xyioctl(dev_t dev, u_long command, void *addr, int flag, struct lwp *l) 755 { 756 struct xy_softc *xy; 757 struct xd_iocmd *xio; 758 int error, s, unit; 759 760 unit = DISKUNIT(dev); 761 762 if (unit >= xy_cd.cd_ndevs || (xy = xy_cd.cd_devs[unit]) == NULL) 763 return (ENXIO); 764 765 /* switch on ioctl type */ 766 767 switch (command) { 768 case DIOCSBAD: /* set bad144 info */ 769 if ((flag & FWRITE) == 0) 770 return EBADF; 771 s = splbio(); 772 memcpy(&xy->dkb, addr, sizeof(xy->dkb)); 773 splx(s); 774 return 0; 775 776 case DIOCGDINFO: /* get disk label */ 777 memcpy(addr, xy->sc_dk.dk_label, sizeof(struct disklabel)); 778 return 0; 779 780 case DIOCGPART: /* get partition info */ 781 ((struct partinfo *) addr)->disklab = xy->sc_dk.dk_label; 782 ((struct partinfo *) addr)->part = 783 &xy->sc_dk.dk_label->d_partitions[DISKPART(dev)]; 784 return 0; 785 786 case DIOCSDINFO: /* set disk label */ 787 if ((flag & FWRITE) == 0) 788 return EBADF; 789 error = setdisklabel(xy->sc_dk.dk_label, 790 (struct disklabel *) addr, /* xy->sc_dk.dk_openmask : */ 0, 791 xy->sc_dk.dk_cpulabel); 792 if (error == 0) { 793 if (xy->state == XY_DRIVE_NOLABEL) 794 xy->state = XY_DRIVE_ONLINE; 795 } 796 return error; 797 798 case DIOCWLABEL: /* change write status of disk label */ 799 if ((flag & FWRITE) == 0) 800 return EBADF; 801 if (*(int *) addr) 802 xy->flags |= XY_WLABEL; 803 else 804 xy->flags &= ~XY_WLABEL; 805 return 0; 806 807 case DIOCWDINFO: /* write disk label */ 808 if ((flag & FWRITE) == 0) 809 return EBADF; 810 error = setdisklabel(xy->sc_dk.dk_label, 811 (struct disklabel *) addr, /* xy->sc_dk.dk_openmask : */ 0, 812 xy->sc_dk.dk_cpulabel); 813 if (error == 0) { 814 if (xy->state == XY_DRIVE_NOLABEL) 815 xy->state = XY_DRIVE_ONLINE; 816 817 /* Simulate opening partition 0 so write succeeds. */ 818 xy->sc_dk.dk_openmask |= (1 << 0); 819 error = writedisklabel(MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART), 820 xystrategy, xy->sc_dk.dk_label, 821 xy->sc_dk.dk_cpulabel); 822 xy->sc_dk.dk_openmask = 823 xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask; 824 } 825 return error; 826 827 case DIOSXDCMD: 828 xio = (struct xd_iocmd *) addr; 829 if ((error = kauth_authorize_generic(l->l_cred, 830 KAUTH_GENERIC_ISSUSER, NULL)) != 0) 831 return (error); 832 return (xyc_ioctlcmd(xy, dev, xio)); 833 834 default: 835 return ENOTTY; 836 } 837 } 838 839 /* 840 * xyopen: open drive 841 */ 842 int 843 xyopen(dev_t dev, int flag, int fmt, struct lwp *l) 844 { 845 int err, unit, part, s; 846 struct xy_softc *xy; 847 848 /* first, could it be a valid target? */ 849 unit = DISKUNIT(dev); 850 if (unit >= xy_cd.cd_ndevs || (xy = xy_cd.cd_devs[unit]) == NULL) 851 return (ENXIO); 852 part = DISKPART(dev); 853 err = 0; 854 855 /* 856 * If some other processing is doing init, sleep. 857 */ 858 s = splbio(); 859 while (xy->state == XY_DRIVE_ATTACHING) { 860 if (tsleep(&xy->state, PRIBIO, "xyopen", 0)) { 861 err = EINTR; 862 goto done; 863 } 864 } 865 /* Do we need to init the drive? */ 866 if (xy->state == XY_DRIVE_UNKNOWN) { 867 xy_init(xy); 868 wakeup(&xy->state); 869 } 870 /* Was the init successful? */ 871 if (xy->state == XY_DRIVE_UNKNOWN) { 872 err = EIO; 873 goto done; 874 } 875 876 /* check for partition */ 877 if (part != RAW_PART && 878 (part >= xy->sc_dk.dk_label->d_npartitions || 879 xy->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) { 880 err = ENXIO; 881 goto done; 882 } 883 884 /* set open masks */ 885 switch (fmt) { 886 case S_IFCHR: 887 xy->sc_dk.dk_copenmask |= (1 << part); 888 break; 889 case S_IFBLK: 890 xy->sc_dk.dk_bopenmask |= (1 << part); 891 break; 892 } 893 xy->sc_dk.dk_openmask = xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask; 894 895 done: 896 splx(s); 897 return (err); 898 } 899 900 int 901 xyread(dev_t dev, struct uio *uio, int flags) 902 { 903 904 return (physio(xystrategy, NULL, dev, B_READ, minphys, uio)); 905 } 906 907 int 908 xywrite(dev_t dev, struct uio *uio, int flags) 909 { 910 911 return (physio(xystrategy, NULL, dev, B_WRITE, minphys, uio)); 912 } 913 914 915 /* 916 * xysize: return size of a partition for a dump 917 */ 918 919 int 920 xysize(dev_t dev) 921 { 922 struct xy_softc *xysc; 923 int unit, part, size, omask; 924 925 /* valid unit? */ 926 unit = DISKUNIT(dev); 927 if (unit >= xy_cd.cd_ndevs || (xysc = xy_cd.cd_devs[unit]) == NULL) 928 return (-1); 929 930 part = DISKPART(dev); 931 omask = xysc->sc_dk.dk_openmask & (1 << part); 932 933 if (omask == 0 && xyopen(dev, 0, S_IFBLK, NULL) != 0) 934 return (-1); 935 936 /* do it */ 937 if (xysc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP) 938 size = -1; /* only give valid size for swap partitions */ 939 else 940 size = xysc->sc_dk.dk_label->d_partitions[part].p_size * 941 (xysc->sc_dk.dk_label->d_secsize / DEV_BSIZE); 942 if (omask == 0 && xyclose(dev, 0, S_IFBLK, NULL) != 0) 943 return (-1); 944 return (size); 945 } 946 947 /* 948 * xystrategy: buffering system interface to xy. 949 */ 950 void 951 xystrategy(struct buf *bp) 952 { 953 struct xy_softc *xy; 954 int s, unit; 955 struct disklabel *lp; 956 daddr_t blkno; 957 958 unit = DISKUNIT(bp->b_dev); 959 960 /* check for live device */ 961 962 if (unit >= xy_cd.cd_ndevs || (xy = xy_cd.cd_devs[unit]) == 0 || 963 bp->b_blkno < 0 || 964 (bp->b_bcount % xy->sc_dk.dk_label->d_secsize) != 0) { 965 bp->b_error = EINVAL; 966 goto done; 967 } 968 969 /* There should always be an open first. */ 970 if (xy->state == XY_DRIVE_UNKNOWN) { 971 bp->b_error = EIO; 972 goto done; 973 } 974 if (xy->state != XY_DRIVE_ONLINE && DISKPART(bp->b_dev) != RAW_PART) { 975 /* no I/O to unlabeled disks, unless raw partition */ 976 bp->b_error = EIO; 977 goto done; 978 } 979 /* short circuit zero length request */ 980 981 if (bp->b_bcount == 0) 982 goto done; 983 984 /* check bounds with label (disksubr.c). Determine the size of the 985 * transfer, and make sure it is within the boundaries of the 986 * partition. Adjust transfer if needed, and signal errors or early 987 * completion. */ 988 989 lp = xy->sc_dk.dk_label; 990 991 if (bounds_check_with_label(&xy->sc_dk, bp, 992 (xy->flags & XY_WLABEL) != 0) <= 0) 993 goto done; 994 995 /* 996 * Now convert the block number to absolute and put it in 997 * terms of the device's logical block size. 998 */ 999 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE); 1000 if (DISKPART(bp->b_dev) != RAW_PART) 1001 blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset; 1002 1003 bp->b_rawblkno = blkno; 1004 1005 /* 1006 * now we know we have a valid buf structure that we need to do I/O 1007 * on. 1008 */ 1009 1010 s = splbio(); /* protect the queues */ 1011 1012 BUFQ_PUT(xy->xyq, bp); /* XXX disksort_cylinder */ 1013 1014 /* start 'em up */ 1015 1016 xyc_start(xy->parent, NULL); 1017 1018 /* done! */ 1019 1020 splx(s); 1021 return; 1022 1023 done: /* tells upper layers we are done with this 1024 * buf */ 1025 bp->b_resid = bp->b_bcount; 1026 biodone(bp); 1027 } 1028 /* 1029 * end of {b,c}devsw functions 1030 */ 1031 1032 /* 1033 * i n t e r r u p t f u n c t i o n 1034 * 1035 * xycintr: hardware interrupt. 1036 */ 1037 int 1038 xycintr(void *v) 1039 { 1040 struct xyc_softc *xycsc = v; 1041 1042 /* kick the event counter */ 1043 xycsc->sc_intrcnt.ev_count++; 1044 1045 /* remove as many done IOPBs as possible */ 1046 xyc_remove_iorq(xycsc); 1047 1048 /* start any iorq's already waiting */ 1049 xyc_start(xycsc, NULL); 1050 1051 return (1); 1052 } 1053 /* 1054 * end of interrupt function 1055 */ 1056 1057 /* 1058 * i n t e r n a l f u n c t i o n s 1059 */ 1060 1061 /* 1062 * xyc_rqinit: fill out the fields of an I/O request 1063 */ 1064 1065 inline void 1066 xyc_rqinit(struct xy_iorq *rq, struct xyc_softc *xyc, struct xy_softc *xy, 1067 int md, u_long blk, int cnt, void *db, struct buf *bp) 1068 { 1069 rq->xyc = xyc; 1070 rq->xy = xy; 1071 rq->ttl = XYC_MAXTTL + 10; 1072 rq->mode = md; 1073 rq->tries = rq->errno = rq->lasterror = 0; 1074 rq->blockno = blk; 1075 rq->sectcnt = cnt; 1076 rq->dbuf = rq->dbufbase = db; 1077 rq->buf = bp; 1078 } 1079 1080 /* 1081 * xyc_rqtopb: load up an IOPB based on an iorq 1082 */ 1083 1084 void 1085 xyc_rqtopb(struct xy_iorq *iorq, struct xy_iopb *iopb, int cmd, int subfun) 1086 { 1087 u_long block, dp; 1088 1089 /* normal IOPB case, standard stuff */ 1090 1091 /* chain bit handled later */ 1092 iopb->ien = (XY_STATE(iorq->mode) == XY_SUB_POLL) ? 0 : 1; 1093 iopb->com = cmd; 1094 iopb->errno = 0; 1095 iopb->errs = 0; 1096 iopb->done = 0; 1097 if (iorq->xy) { 1098 iopb->unit = iorq->xy->xy_drive; 1099 iopb->dt = iorq->xy->drive_type; 1100 } else { 1101 iopb->unit = 0; 1102 iopb->dt = 0; 1103 } 1104 block = iorq->blockno; 1105 if (iorq->xy == NULL || block == 0) { 1106 iopb->sect = iopb->head = iopb->cyl = 0; 1107 } else { 1108 iopb->sect = block % iorq->xy->nsect; 1109 block = block / iorq->xy->nsect; 1110 iopb->head = block % iorq->xy->nhead; 1111 block = block / iorq->xy->nhead; 1112 iopb->cyl = block; 1113 } 1114 iopb->scnt = iorq->sectcnt; 1115 if (iorq->dbuf == NULL) { 1116 iopb->dataa = 0; 1117 iopb->datar = 0; 1118 } else { 1119 dp = dvma_kvtopa(iorq->dbuf, iorq->xyc->bustype); 1120 iopb->dataa = (dp & 0xffff); 1121 iopb->datar = ((dp & 0xff0000) >> 16); 1122 } 1123 iopb->subfn = subfun; 1124 } 1125 1126 1127 /* 1128 * xyc_unbusy: wait for the xyc to go unbusy, or timeout. 1129 */ 1130 1131 int 1132 xyc_unbusy(struct xyc *xyc, int del) 1133 { 1134 while (del-- > 0) { 1135 if ((xyc->xyc_csr & XYC_GBSY) == 0) 1136 break; 1137 DELAY(1); 1138 } 1139 return(del == 0 ? XY_ERR_FAIL : XY_ERR_AOK); 1140 } 1141 1142 /* 1143 * xyc_cmd: front end for POLL'd and WAIT'd commands. Returns 0 or error. 1144 * note that NORM requests are handled separately. 1145 */ 1146 int 1147 xyc_cmd(struct xyc_softc *xycsc, int cmd, int subfn, int unit, int block, 1148 int scnt, char *dptr, int fullmode) 1149 { 1150 struct xy_iorq *iorq = xycsc->ciorq; 1151 struct xy_iopb *iopb = xycsc->ciopb; 1152 int submode = XY_STATE(fullmode); 1153 1154 /* 1155 * is someone else using the control iopq wait for it if we can 1156 */ 1157 start: 1158 if (submode == XY_SUB_WAIT && XY_STATE(iorq->mode) != XY_SUB_FREE) { 1159 if (tsleep(iorq, PRIBIO, "xyc_cmd", 0)) 1160 return(XY_ERR_FAIL); 1161 goto start; 1162 } 1163 1164 if (XY_STATE(iorq->mode) != XY_SUB_FREE) { 1165 DELAY(1000000); /* XY_SUB_POLL: steal the iorq */ 1166 iorq->mode = XY_SUB_FREE; 1167 printf("%s: stole control iopb\n", xycsc->sc_dev.dv_xname); 1168 } 1169 1170 /* init iorq/iopb */ 1171 1172 xyc_rqinit(iorq, xycsc, 1173 (unit == XYC_NOUNIT) ? NULL : xycsc->sc_drives[unit], 1174 fullmode, block, scnt, dptr, NULL); 1175 1176 /* load IOPB from iorq */ 1177 1178 xyc_rqtopb(iorq, iopb, cmd, subfn); 1179 1180 /* submit it for processing */ 1181 1182 xyc_submit_iorq(xycsc, iorq, fullmode); /* error code will be in iorq */ 1183 1184 return(XY_ERR_AOK); 1185 } 1186 1187 /* 1188 * xyc_startbuf 1189 * start a buffer for running 1190 */ 1191 1192 int 1193 xyc_startbuf(struct xyc_softc *xycsc, struct xy_softc *xysc, struct buf *bp) 1194 { 1195 int partno; 1196 struct xy_iorq *iorq; 1197 struct xy_iopb *iopb; 1198 u_long block; 1199 void *dbuf; 1200 1201 iorq = xysc->xyrq; 1202 iopb = iorq->iopb; 1203 1204 /* get buf */ 1205 1206 if (bp == NULL) 1207 panic("xyc_startbuf null buf"); 1208 1209 partno = DISKPART(bp->b_dev); 1210 #ifdef XYC_DEBUG 1211 printf("xyc_startbuf: %s%c: %s block %d\n", xysc->sc_dev.dv_xname, 1212 'a' + partno, (bp->b_flags & B_READ) ? "read" : "write", bp->b_blkno); 1213 printf("xyc_startbuf: b_bcount %d, b_data 0x%x\n", 1214 bp->b_bcount, bp->b_data); 1215 #endif 1216 1217 /* 1218 * load request. 1219 * 1220 * also, note that there are two kinds of buf structures, those with 1221 * B_PHYS set and those without B_PHYS. if B_PHYS is set, then it is 1222 * a raw I/O (to a cdevsw) and we are doing I/O directly to the users' 1223 * buffer which has already been mapped into DVMA space. (Not on sun3) 1224 * However, if B_PHYS is not set, then the buffer is a normal system 1225 * buffer which does *not* live in DVMA space. In that case we call 1226 * dvma_mapin to map it into DVMA space so we can do the DMA to it. 1227 * 1228 * in cases where we do a dvma_mapin, note that iorq points to the buffer 1229 * as mapped into DVMA space, where as the bp->b_data points to its 1230 * non-DVMA mapping. 1231 * 1232 * XXX - On the sun3, B_PHYS does NOT mean the buffer is mapped 1233 * into dvma space, only that it was remapped into the kernel. 1234 * We ALWAYS have to remap the kernel buf into DVMA space. 1235 * (It is done inexpensively, using whole segments!) 1236 */ 1237 1238 block = bp->b_rawblkno; 1239 1240 dbuf = dvma_mapin(bp->b_data, bp->b_bcount, 0); 1241 if (dbuf == NULL) { /* out of DVMA space */ 1242 printf("%s: warning: out of DVMA space\n", 1243 xycsc->sc_dev.dv_xname); 1244 return (XY_ERR_FAIL); /* XXX: need some sort of 1245 * call-back scheme here? */ 1246 } 1247 1248 /* init iorq and load iopb from it */ 1249 1250 xyc_rqinit(iorq, xycsc, xysc, XY_SUB_NORM | XY_MODE_VERBO, block, 1251 bp->b_bcount / XYFM_BPS, dbuf, bp); 1252 1253 xyc_rqtopb(iorq, iopb, (bp->b_flags & B_READ) ? XYCMD_RD : XYCMD_WR, 0); 1254 1255 /* Instrumentation. */ 1256 disk_busy(&xysc->sc_dk); 1257 1258 return (XY_ERR_AOK); 1259 } 1260 1261 1262 /* 1263 * xyc_submit_iorq: submit an iorq for processing. returns XY_ERR_AOK 1264 * if ok. if it fail returns an error code. type is XY_SUB_*. 1265 * 1266 * note: caller frees iorq in all cases except NORM 1267 * 1268 * return value: 1269 * NORM: XY_AOK (req pending), XY_FAIL (couldn't submit request) 1270 * WAIT: XY_AOK (success), <error-code> (failed) 1271 * POLL: <same as WAIT> 1272 * NOQ : <same as NORM> 1273 * 1274 * there are three sources for i/o requests: 1275 * [1] xystrategy: normal block I/O, using "struct buf" system. 1276 * [2] autoconfig/crash dump: these are polled I/O requests, no interrupts. 1277 * [3] open/ioctl: these are I/O requests done in the context of a process, 1278 * and the process should block until they are done. 1279 * 1280 * software state is stored in the iorq structure. each iorq has an 1281 * iopb structure. the hardware understands the iopb structure. 1282 * every command must go through an iopb. a 450 handles one iopb at a 1283 * time, where as a 451 can take them in chains. [the 450 claims it 1284 * can handle chains, but is appears to be buggy...] iopb are allocated 1285 * in DVMA space at boot up time. each disk gets one iopb, and the 1286 * controller gets one (for POLL and WAIT commands). what happens if 1287 * the iopb is busy? for i/o type [1], the buffers are queued at the 1288 * "buff" layer and * picked up later by the interrupt routine. for case 1289 * [2] we can only be blocked if there is a WAIT type I/O request being 1290 * run. since this can only happen when we are crashing, we wait a sec 1291 * and then steal the IOPB. for case [3] the process can sleep 1292 * on the iorq free list until some iopbs are available. 1293 */ 1294 1295 int 1296 xyc_submit_iorq(struct xyc_softc *xycsc, struct xy_iorq *iorq, int type) 1297 { 1298 struct xy_iopb *iopb; 1299 u_long iopbaddr; 1300 1301 #ifdef XYC_DEBUG 1302 printf("xyc_submit_iorq(%s, addr=0x%x, type=%d)\n", 1303 xycsc->sc_dev.dv_xname, iorq, type); 1304 #endif 1305 1306 /* first check and see if controller is busy */ 1307 if ((xycsc->xyc->xyc_csr & XYC_GBSY) != 0) { 1308 #ifdef XYC_DEBUG 1309 printf("xyc_submit_iorq: XYC not ready (BUSY)\n"); 1310 #endif 1311 if (type == XY_SUB_NOQ) 1312 return (XY_ERR_FAIL); /* failed */ 1313 switch (type) { 1314 case XY_SUB_NORM: 1315 return XY_ERR_AOK; /* success */ 1316 case XY_SUB_WAIT: 1317 while (iorq->iopb->done == 0) { 1318 (void) tsleep(iorq, PRIBIO, "xyciorq", 0); 1319 } 1320 return (iorq->errno); 1321 case XY_SUB_POLL: /* steal controller */ 1322 iopbaddr = xycsc->xyc->xyc_rsetup; /* RESET */ 1323 if (xyc_unbusy(xycsc->xyc,XYC_RESETUSEC) == XY_ERR_FAIL) 1324 panic("xyc_submit_iorq: stuck xyc"); 1325 printf("%s: stole controller\n", 1326 xycsc->sc_dev.dv_xname); 1327 break; 1328 default: 1329 panic("xyc_submit_iorq adding"); 1330 } 1331 } 1332 1333 iopb = xyc_chain(xycsc, iorq); /* build chain */ 1334 if (iopb == NULL) { /* nothing doing? */ 1335 if (type == XY_SUB_NORM || type == XY_SUB_NOQ) 1336 return(XY_ERR_AOK); 1337 panic("xyc_submit_iorq: xyc_chain failed!"); 1338 } 1339 iopbaddr = dvma_kvtopa(iopb, xycsc->bustype); 1340 1341 XYC_GO(xycsc->xyc, iopbaddr); 1342 1343 /* command now running, wrap it up */ 1344 switch (type) { 1345 case XY_SUB_NORM: 1346 case XY_SUB_NOQ: 1347 return (XY_ERR_AOK); /* success */ 1348 case XY_SUB_WAIT: 1349 while (iorq->iopb->done == 0) { 1350 (void) tsleep(iorq, PRIBIO, "xyciorq", 0); 1351 } 1352 return (iorq->errno); 1353 case XY_SUB_POLL: 1354 return (xyc_piodriver(xycsc, iorq)); 1355 default: 1356 panic("xyc_submit_iorq wrap up"); 1357 } 1358 panic("xyc_submit_iorq"); 1359 return 0; /* not reached */ 1360 } 1361 1362 1363 /* 1364 * xyc_chain: build a chain. return dvma address of first element in 1365 * the chain. iorq != NULL: means we only want that item on the chain. 1366 */ 1367 1368 struct xy_iopb * 1369 xyc_chain(struct xyc_softc *xycsc, struct xy_iorq *iorq) 1370 { 1371 int togo, chain, hand; 1372 struct xy_iopb *iopb, *prev_iopb; 1373 1374 memset(xycsc->xy_chain, 0, sizeof(xycsc->xy_chain)); 1375 1376 /* 1377 * promote control IOPB to the top 1378 */ 1379 if (iorq == NULL) { 1380 if ((XY_STATE(xycsc->reqs[XYC_CTLIOPB].mode) == XY_SUB_POLL || 1381 XY_STATE(xycsc->reqs[XYC_CTLIOPB].mode) == XY_SUB_WAIT) && 1382 xycsc->iopbase[XYC_CTLIOPB].done == 0) 1383 iorq = &xycsc->reqs[XYC_CTLIOPB]; 1384 } 1385 1386 /* 1387 * special case: if iorq != NULL then we have a POLL or WAIT request. 1388 * we let these take priority and do them first. 1389 */ 1390 if (iorq) { 1391 xycsc->xy_chain[0] = iorq; 1392 iorq->iopb->chen = 0; 1393 return(iorq->iopb); 1394 } 1395 1396 /* 1397 * NORM case: do round robin and maybe chain (if allowed and possible) 1398 */ 1399 1400 chain = 0; 1401 hand = xycsc->xy_hand; 1402 xycsc->xy_hand = (xycsc->xy_hand + 1) % XYC_MAXIOPB; 1403 1404 for (togo = XYC_MAXIOPB ; 1405 togo > 0 ; 1406 togo--, hand = (hand + 1) % XYC_MAXIOPB) 1407 { 1408 1409 if (XY_STATE(xycsc->reqs[hand].mode) != XY_SUB_NORM || 1410 xycsc->iopbase[hand].done) 1411 continue; /* not ready-for-i/o */ 1412 1413 xycsc->xy_chain[chain] = &xycsc->reqs[hand]; 1414 iopb = xycsc->xy_chain[chain]->iopb; 1415 iopb->chen = 0; 1416 if (chain != 0) { /* adding a link to a chain? */ 1417 prev_iopb = xycsc->xy_chain[chain-1]->iopb; 1418 prev_iopb->chen = 1; 1419 prev_iopb->nxtiopb = 0xffff & 1420 dvma_kvtopa(iopb, xycsc->bustype); 1421 } else { /* head of chain */ 1422 iorq = xycsc->xy_chain[chain]; 1423 } 1424 chain++; 1425 if (xycsc->no_ols) break; /* quit if chaining dis-allowed */ 1426 } 1427 return(iorq ? iorq->iopb : NULL); 1428 } 1429 1430 /* 1431 * xyc_piodriver 1432 * 1433 * programmed i/o driver. this function takes over the computer 1434 * and drains off the polled i/o request. it returns the status of the iorq 1435 * the caller is interesting in. 1436 */ 1437 int 1438 xyc_piodriver(struct xyc_softc *xycsc, struct xy_iorq *iorq) 1439 { 1440 int nreset = 0; 1441 int retval = 0; 1442 u_long res; 1443 1444 #ifdef XYC_DEBUG 1445 printf("xyc_piodriver(%s, 0x%x)\n", xycsc->sc_dev.dv_xname, iorq); 1446 #endif 1447 1448 while (iorq->iopb->done == 0) { 1449 1450 res = xyc_unbusy(xycsc->xyc, XYC_MAXTIME); 1451 1452 /* we expect some progress soon */ 1453 if (res == XY_ERR_FAIL && nreset >= 2) { 1454 xyc_reset(xycsc, 0, XY_RSET_ALL, XY_ERR_FAIL, 0); 1455 #ifdef XYC_DEBUG 1456 printf("xyc_piodriver: timeout\n"); 1457 #endif 1458 return (XY_ERR_FAIL); 1459 } 1460 if (res == XY_ERR_FAIL) { 1461 if (xyc_reset(xycsc, 0, 1462 (nreset++ == 0) ? XY_RSET_NONE : iorq, 1463 XY_ERR_FAIL, 1464 0) == XY_ERR_FAIL) 1465 return (XY_ERR_FAIL); /* flushes all but POLL 1466 * requests, resets */ 1467 continue; 1468 } 1469 1470 xyc_remove_iorq(xycsc); /* may resubmit request */ 1471 1472 if (iorq->iopb->done == 0) 1473 xyc_start(xycsc, iorq); 1474 } 1475 1476 /* get return value */ 1477 1478 retval = iorq->errno; 1479 1480 #ifdef XYC_DEBUG 1481 printf("xyc_piodriver: done, retval = 0x%x (%s)\n", 1482 iorq->errno, xyc_e2str(iorq->errno)); 1483 #endif 1484 1485 /* start up any bufs that have queued */ 1486 1487 xyc_start(xycsc, NULL); 1488 1489 return (retval); 1490 } 1491 1492 /* 1493 * xyc_xyreset: reset one drive. NOTE: assumes xyc was just reset. 1494 * we steal iopb[XYC_CTLIOPB] for this, but we put it back when we are done. 1495 */ 1496 void 1497 xyc_xyreset(struct xyc_softc *xycsc, struct xy_softc *xysc) 1498 { 1499 struct xy_iopb tmpiopb; 1500 u_long addr; 1501 int del; 1502 memcpy(&tmpiopb, xycsc->ciopb, sizeof(tmpiopb)); 1503 xycsc->ciopb->chen = xycsc->ciopb->done = xycsc->ciopb->errs = 0; 1504 xycsc->ciopb->ien = 0; 1505 xycsc->ciopb->com = XYCMD_RST; 1506 xycsc->ciopb->unit = xysc->xy_drive; 1507 addr = dvma_kvtopa(xycsc->ciopb, xycsc->bustype); 1508 1509 XYC_GO(xycsc->xyc, addr); 1510 1511 del = XYC_RESETUSEC; 1512 while (del > 0) { 1513 if ((xycsc->xyc->xyc_csr & XYC_GBSY) == 0) break; 1514 DELAY(1); 1515 del--; 1516 } 1517 1518 if (del <= 0 || xycsc->ciopb->errs) { 1519 printf("%s: off-line: %s\n", xycsc->sc_dev.dv_xname, 1520 xyc_e2str(xycsc->ciopb->errno)); 1521 del = xycsc->xyc->xyc_rsetup; 1522 if (xyc_unbusy(xycsc->xyc, XYC_RESETUSEC) == XY_ERR_FAIL) 1523 panic("xyc_reset"); 1524 } else { 1525 xycsc->xyc->xyc_csr = XYC_IPND; /* clear IPND */ 1526 } 1527 memcpy(xycsc->ciopb, &tmpiopb, sizeof(tmpiopb)); 1528 } 1529 1530 1531 /* 1532 * xyc_reset: reset everything: requests are marked as errors except 1533 * a polled request (which is resubmitted) 1534 */ 1535 int 1536 xyc_reset(struct xyc_softc *xycsc, int quiet, struct xy_iorq *blastmode, 1537 int error, struct xy_softc *xysc) 1538 { 1539 int del = 0, lcv, retval = XY_ERR_AOK; 1540 struct xy_iorq *iorq; 1541 1542 /* soft reset hardware */ 1543 1544 if (!quiet) 1545 printf("%s: soft reset\n", xycsc->sc_dev.dv_xname); 1546 del = xycsc->xyc->xyc_rsetup; 1547 del = xyc_unbusy(xycsc->xyc, XYC_RESETUSEC); 1548 if (del == XY_ERR_FAIL) { 1549 blastmode = XY_RSET_ALL; /* dead, flush all requests */ 1550 retval = XY_ERR_FAIL; 1551 } 1552 if (xysc) 1553 xyc_xyreset(xycsc, xysc); 1554 1555 /* fix queues based on "blast-mode" */ 1556 1557 for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) { 1558 iorq = &xycsc->reqs[lcv]; 1559 1560 if (XY_STATE(iorq->mode) != XY_SUB_POLL && 1561 XY_STATE(iorq->mode) != XY_SUB_WAIT && 1562 XY_STATE(iorq->mode) != XY_SUB_NORM) 1563 /* is it active? */ 1564 continue; 1565 1566 if (blastmode == XY_RSET_ALL || 1567 blastmode != iorq) { 1568 /* failed */ 1569 iorq->errno = error; 1570 xycsc->iopbase[lcv].done = xycsc->iopbase[lcv].errs = 1; 1571 switch (XY_STATE(iorq->mode)) { 1572 case XY_SUB_NORM: 1573 iorq->buf->b_error = EIO; 1574 iorq->buf->b_resid = 1575 iorq->sectcnt * XYFM_BPS; 1576 /* Sun3: map/unmap regardless of B_PHYS */ 1577 dvma_mapout(iorq->dbufbase, 1578 iorq->buf->b_bcount); 1579 (void)BUFQ_GET(iorq->xy->xyq); 1580 disk_unbusy(&iorq->xy->sc_dk, 1581 (iorq->buf->b_bcount - iorq->buf->b_resid), 1582 (iorq->buf->b_flags & B_READ)); 1583 biodone(iorq->buf); 1584 iorq->mode = XY_SUB_FREE; 1585 break; 1586 case XY_SUB_WAIT: 1587 wakeup(iorq); 1588 case XY_SUB_POLL: 1589 iorq->mode = 1590 XY_NEWSTATE(iorq->mode, XY_SUB_DONE); 1591 break; 1592 } 1593 1594 } else { 1595 1596 /* resubmit, no need to do anything here */ 1597 } 1598 } 1599 1600 /* 1601 * now, if stuff is waiting, start it. 1602 * since we just reset it should go 1603 */ 1604 xyc_start(xycsc, NULL); 1605 1606 return (retval); 1607 } 1608 1609 /* 1610 * xyc_start: start waiting buffers 1611 */ 1612 1613 void 1614 xyc_start(struct xyc_softc *xycsc, struct xy_iorq *iorq) 1615 { 1616 int lcv; 1617 struct xy_softc *xy; 1618 1619 if (iorq == NULL) { 1620 for (lcv = 0; lcv < XYC_MAXDEV ; lcv++) { 1621 if ((xy = xycsc->sc_drives[lcv]) == NULL) continue; 1622 if (BUFQ_PEEK(xy->xyq) == NULL) continue; 1623 if (xy->xyrq->mode != XY_SUB_FREE) continue; 1624 xyc_startbuf(xycsc, xy, BUFQ_PEEK(xy->xyq)); 1625 } 1626 } 1627 xyc_submit_iorq(xycsc, iorq, XY_SUB_NOQ); 1628 } 1629 1630 /* 1631 * xyc_remove_iorq: remove "done" IOPB's. 1632 */ 1633 1634 int 1635 xyc_remove_iorq(struct xyc_softc *xycsc) 1636 { 1637 int errno, rq, comm, errs; 1638 struct xyc *xyc = xycsc->xyc; 1639 u_long addr; 1640 struct xy_iopb *iopb; 1641 struct xy_iorq *iorq; 1642 struct buf *bp; 1643 1644 if (xyc->xyc_csr & XYC_DERR) { 1645 /* 1646 * DOUBLE ERROR: should never happen under normal use. This 1647 * error is so bad, you can't even tell which IOPB is bad, so 1648 * we dump them all. 1649 */ 1650 errno = XY_ERR_DERR; 1651 printf("%s: DOUBLE ERROR!\n", xycsc->sc_dev.dv_xname); 1652 if (xyc_reset(xycsc, 0, XY_RSET_ALL, errno, 0) != XY_ERR_AOK) { 1653 printf("%s: soft reset failed!\n", 1654 xycsc->sc_dev.dv_xname); 1655 panic("xyc_remove_iorq: controller DEAD"); 1656 } 1657 return (XY_ERR_AOK); 1658 } 1659 1660 /* 1661 * get iopb that is done, loop down the chain 1662 */ 1663 1664 if (xyc->xyc_csr & XYC_ERR) { 1665 xyc->xyc_csr = XYC_ERR; /* clear error condition */ 1666 } 1667 if (xyc->xyc_csr & XYC_IPND) { 1668 xyc->xyc_csr = XYC_IPND; /* clear interrupt */ 1669 } 1670 1671 for (rq = 0; rq < XYC_MAXIOPB; rq++) { 1672 iorq = xycsc->xy_chain[rq]; 1673 if (iorq == NULL) break; /* done ! */ 1674 if (iorq->mode == 0 || XY_STATE(iorq->mode) == XY_SUB_DONE) 1675 continue; /* free, or done */ 1676 iopb = iorq->iopb; 1677 if (iopb->done == 0) 1678 continue; /* not done yet */ 1679 1680 comm = iopb->com; 1681 errs = iopb->errs; 1682 1683 if (errs) 1684 iorq->errno = iopb->errno; 1685 else 1686 iorq->errno = 0; 1687 1688 /* handle non-fatal errors */ 1689 1690 if (errs && 1691 xyc_error(xycsc, iorq, iopb, comm) == XY_ERR_AOK) 1692 continue; /* AOK: we resubmitted it */ 1693 1694 1695 /* this iorq is now done (hasn't been restarted or anything) */ 1696 1697 if ((iorq->mode & XY_MODE_VERBO) && iorq->lasterror) 1698 xyc_perror(iorq, iopb, 0); 1699 1700 /* now, if read/write check to make sure we got all the data 1701 * we needed. (this may not be the case if we got an error in 1702 * the middle of a multisector request). */ 1703 1704 if ((iorq->mode & XY_MODE_B144) != 0 && errs == 0 && 1705 (comm == XYCMD_RD || comm == XYCMD_WR)) { 1706 /* we just successfully processed a bad144 sector 1707 * note: if we are in bad 144 mode, the pointers have 1708 * been advanced already (see above) and are pointing 1709 * at the bad144 sector. to exit bad144 mode, we 1710 * must advance the pointers 1 sector and issue a new 1711 * request if there are still sectors left to process 1712 * 1713 */ 1714 XYC_ADVANCE(iorq, 1); /* advance 1 sector */ 1715 1716 /* exit b144 mode */ 1717 iorq->mode = iorq->mode & (~XY_MODE_B144); 1718 1719 if (iorq->sectcnt) { /* more to go! */ 1720 iorq->lasterror = iorq->errno = iopb->errno = 0; 1721 iopb->errs = iopb->done = 0; 1722 iorq->tries = 0; 1723 iopb->scnt = iorq->sectcnt; 1724 iopb->cyl = iorq->blockno / 1725 iorq->xy->sectpercyl; 1726 iopb->head = 1727 (iorq->blockno / iorq->xy->nhead) % 1728 iorq->xy->nhead; 1729 iopb->sect = iorq->blockno % XYFM_BPS; 1730 addr = dvma_kvtopa(iorq->dbuf, xycsc->bustype); 1731 iopb->dataa = (addr & 0xffff); 1732 iopb->datar = ((addr & 0xff0000) >> 16); 1733 /* will resubit at end */ 1734 continue; 1735 } 1736 } 1737 /* final cleanup, totally done with this request */ 1738 1739 switch (XY_STATE(iorq->mode)) { 1740 case XY_SUB_NORM: 1741 bp = iorq->buf; 1742 if (errs) { 1743 bp->b_error = EIO; 1744 bp->b_resid = iorq->sectcnt * XYFM_BPS; 1745 } else { 1746 bp->b_resid = 0; /* done */ 1747 } 1748 /* Sun3: map/unmap regardless of B_PHYS */ 1749 dvma_mapout(iorq->dbufbase, 1750 iorq->buf->b_bcount); 1751 (void)BUFQ_GET(iorq->xy->xyq); 1752 disk_unbusy(&iorq->xy->sc_dk, 1753 (bp->b_bcount - bp->b_resid), 1754 (bp->b_flags & B_READ)); 1755 iorq->mode = XY_SUB_FREE; 1756 biodone(bp); 1757 break; 1758 case XY_SUB_WAIT: 1759 iorq->mode = XY_NEWSTATE(iorq->mode, XY_SUB_DONE); 1760 wakeup(iorq); 1761 break; 1762 case XY_SUB_POLL: 1763 iorq->mode = XY_NEWSTATE(iorq->mode, XY_SUB_DONE); 1764 break; 1765 } 1766 } 1767 1768 return (XY_ERR_AOK); 1769 } 1770 1771 /* 1772 * xyc_perror: print error. 1773 * - if still_trying is true: we got an error, retried and got a 1774 * different error. in that case lasterror is the old error, 1775 * and errno is the new one. 1776 * - if still_trying is not true, then if we ever had an error it 1777 * is in lasterror. also, if iorq->errno == 0, then we recovered 1778 * from that error (otherwise iorq->errno == iorq->lasterror). 1779 */ 1780 void 1781 xyc_perror(struct xy_iorq *iorq, struct xy_iopb *iopb, int still_trying) 1782 { 1783 1784 int error = iorq->lasterror; 1785 1786 printf("%s", (iorq->xy) ? iorq->xy->sc_dev.dv_xname 1787 : iorq->xyc->sc_dev.dv_xname); 1788 if (iorq->buf) 1789 printf("%c: ", 'a' + DISKPART(iorq->buf->b_dev)); 1790 if (iopb->com == XYCMD_RD || iopb->com == XYCMD_WR) 1791 printf("%s %d/%d/%d: ", 1792 (iopb->com == XYCMD_RD) ? "read" : "write", 1793 iopb->cyl, iopb->head, iopb->sect); 1794 printf("%s", xyc_e2str(error)); 1795 1796 if (still_trying) 1797 printf(" [still trying, new error=%s]", xyc_e2str(iorq->errno)); 1798 else 1799 if (iorq->errno == 0) 1800 printf(" [recovered in %d tries]", iorq->tries); 1801 1802 printf("\n"); 1803 } 1804 1805 /* 1806 * xyc_error: non-fatal error encountered... recover. 1807 * return AOK if resubmitted, return FAIL if this iopb is done 1808 */ 1809 int 1810 xyc_error(struct xyc_softc *xycsc, struct xy_iorq *iorq, struct xy_iopb *iopb, 1811 int comm) 1812 { 1813 int errno = iorq->errno; 1814 int erract = xyc_entoact(errno); 1815 int oldmode, advance, i; 1816 1817 if (erract == XY_ERA_RSET) { /* some errors require a reset */ 1818 oldmode = iorq->mode; 1819 iorq->mode = XY_SUB_DONE | (~XY_SUB_MASK & oldmode); 1820 /* make xyc_start ignore us */ 1821 xyc_reset(xycsc, 1, XY_RSET_NONE, errno, iorq->xy); 1822 iorq->mode = oldmode; 1823 } 1824 /* check for read/write to a sector in bad144 table if bad: redirect 1825 * request to bad144 area */ 1826 1827 if ((comm == XYCMD_RD || comm == XYCMD_WR) && 1828 (iorq->mode & XY_MODE_B144) == 0) { 1829 advance = iorq->sectcnt - iopb->scnt; 1830 XYC_ADVANCE(iorq, advance); 1831 if ((i = isbad(&iorq->xy->dkb, iorq->blockno / iorq->xy->sectpercyl, 1832 (iorq->blockno / iorq->xy->nsect) % iorq->xy->nhead, 1833 iorq->blockno % iorq->xy->nsect)) != -1) { 1834 iorq->mode |= XY_MODE_B144; /* enter bad144 mode & 1835 * redirect */ 1836 iopb->errno = iopb->done = iopb->errs = 0; 1837 iopb->scnt = 1; 1838 iopb->cyl = (iorq->xy->ncyl + iorq->xy->acyl) - 2; 1839 /* second to last acyl */ 1840 i = iorq->xy->sectpercyl - 1 - i; /* follow bad144 1841 * standard */ 1842 iopb->head = i / iorq->xy->nhead; 1843 iopb->sect = i % iorq->xy->nhead; 1844 /* will resubmit when we come out of remove_iorq */ 1845 return (XY_ERR_AOK); /* recovered! */ 1846 } 1847 } 1848 1849 /* 1850 * it isn't a bad144 sector, must be real error! see if we can retry 1851 * it? 1852 */ 1853 if ((iorq->mode & XY_MODE_VERBO) && iorq->lasterror) 1854 xyc_perror(iorq, iopb, 1); /* inform of error state 1855 * change */ 1856 iorq->lasterror = errno; 1857 1858 if ((erract == XY_ERA_RSET || erract == XY_ERA_HARD) 1859 && iorq->tries < XYC_MAXTRIES) { /* retry? */ 1860 iorq->tries++; 1861 iorq->errno = iopb->errno = iopb->done = iopb->errs = 0; 1862 /* will resubmit at end of remove_iorq */ 1863 return (XY_ERR_AOK); /* recovered! */ 1864 } 1865 1866 /* failed to recover from this error */ 1867 return (XY_ERR_FAIL); 1868 } 1869 1870 /* 1871 * xyc_tick: make sure xy is still alive and ticking (err, kicking). 1872 */ 1873 void 1874 xyc_tick(void *arg) 1875 { 1876 struct xyc_softc *xycsc = arg; 1877 int lcv, s, reset = 0; 1878 1879 /* reduce ttl for each request if one goes to zero, reset xyc */ 1880 s = splbio(); 1881 for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) { 1882 if (xycsc->reqs[lcv].mode == 0 || 1883 XY_STATE(xycsc->reqs[lcv].mode) == XY_SUB_DONE) 1884 continue; 1885 xycsc->reqs[lcv].ttl--; 1886 if (xycsc->reqs[lcv].ttl == 0) 1887 reset = 1; 1888 } 1889 if (reset) { 1890 printf("%s: watchdog timeout\n", xycsc->sc_dev.dv_xname); 1891 xyc_reset(xycsc, 0, XY_RSET_NONE, XY_ERR_FAIL, NULL); 1892 } 1893 splx(s); 1894 1895 /* until next time */ 1896 1897 callout_reset(&xycsc->sc_tick_ch, XYC_TICKCNT, xyc_tick, xycsc); 1898 } 1899 1900 /* 1901 * xyc_ioctlcmd: this function provides a user level interface to the 1902 * controller via ioctl. this allows "format" programs to be written 1903 * in user code, and is also useful for some debugging. we return 1904 * an error code. called at user priority. 1905 * 1906 * XXX missing a few commands (see the 7053 driver for ideas) 1907 */ 1908 int 1909 xyc_ioctlcmd(struct xy_softc *xy, dev_t dev, struct xd_iocmd *xio) 1910 { 1911 int s, err, rqno; 1912 void *dvmabuf = NULL; 1913 struct xyc_softc *xycsc; 1914 1915 /* check sanity of requested command */ 1916 1917 switch (xio->cmd) { 1918 1919 case XYCMD_NOP: /* no op: everything should be zero */ 1920 if (xio->subfn || xio->dptr || xio->dlen || 1921 xio->block || xio->sectcnt) 1922 return (EINVAL); 1923 break; 1924 1925 case XYCMD_RD: /* read / write sectors (up to XD_IOCMD_MAXS) */ 1926 case XYCMD_WR: 1927 if (xio->subfn || xio->sectcnt > XD_IOCMD_MAXS || 1928 xio->sectcnt * XYFM_BPS != xio->dlen || xio->dptr == NULL) 1929 return (EINVAL); 1930 break; 1931 1932 case XYCMD_SK: /* seek: doesn't seem useful to export this */ 1933 return (EINVAL); 1934 1935 break; 1936 1937 default: 1938 return (EINVAL);/* ??? */ 1939 } 1940 1941 /* create DVMA buffer for request if needed */ 1942 1943 if (xio->dlen) { 1944 dvmabuf = dvma_malloc(xio->dlen); 1945 if (xio->cmd == XYCMD_WR) { 1946 err = copyin(xio->dptr, dvmabuf, xio->dlen); 1947 if (err) { 1948 dvma_free(dvmabuf, xio->dlen); 1949 return (err); 1950 } 1951 } 1952 } 1953 /* do it! */ 1954 1955 err = 0; 1956 xycsc = xy->parent; 1957 s = splbio(); 1958 rqno = xyc_cmd(xycsc, xio->cmd, xio->subfn, xy->xy_drive, xio->block, 1959 xio->sectcnt, dvmabuf, XY_SUB_WAIT); 1960 if (rqno == XY_ERR_FAIL) { 1961 err = EIO; 1962 goto done; 1963 } 1964 xio->errno = xycsc->ciorq->errno; 1965 xio->tries = xycsc->ciorq->tries; 1966 XYC_DONE(xycsc, err); 1967 1968 if (xio->cmd == XYCMD_RD) 1969 err = copyout(dvmabuf, xio->dptr, xio->dlen); 1970 1971 done: 1972 splx(s); 1973 if (dvmabuf) 1974 dvma_free(dvmabuf, xio->dlen); 1975 return (err); 1976 } 1977 1978 /* 1979 * xyc_e2str: convert error code number into an error string 1980 */ 1981 const char * 1982 xyc_e2str(int no) 1983 { 1984 switch (no) { 1985 case XY_ERR_FAIL: 1986 return ("Software fatal error"); 1987 case XY_ERR_DERR: 1988 return ("DOUBLE ERROR"); 1989 case XY_ERR_AOK: 1990 return ("Successful completion"); 1991 case XY_ERR_IPEN: 1992 return("Interrupt pending"); 1993 case XY_ERR_BCFL: 1994 return("Busy conflict"); 1995 case XY_ERR_TIMO: 1996 return("Operation timeout"); 1997 case XY_ERR_NHDR: 1998 return("Header not found"); 1999 case XY_ERR_HARD: 2000 return("Hard ECC error"); 2001 case XY_ERR_ICYL: 2002 return("Illegal cylinder address"); 2003 case XY_ERR_ISEC: 2004 return("Illegal sector address"); 2005 case XY_ERR_SMAL: 2006 return("Last sector too small"); 2007 case XY_ERR_SACK: 2008 return("Slave ACK error (non-existent memory)"); 2009 case XY_ERR_CHER: 2010 return("Cylinder and head/header error"); 2011 case XY_ERR_SRTR: 2012 return("Auto-seek retry successful"); 2013 case XY_ERR_WPRO: 2014 return("Write-protect error"); 2015 case XY_ERR_UIMP: 2016 return("Unimplemented command"); 2017 case XY_ERR_DNRY: 2018 return("Drive not ready"); 2019 case XY_ERR_SZER: 2020 return("Sector count zero"); 2021 case XY_ERR_DFLT: 2022 return("Drive faulted"); 2023 case XY_ERR_ISSZ: 2024 return("Illegal sector size"); 2025 case XY_ERR_SLTA: 2026 return("Self test A"); 2027 case XY_ERR_SLTB: 2028 return("Self test B"); 2029 case XY_ERR_SLTC: 2030 return("Self test C"); 2031 case XY_ERR_SOFT: 2032 return("Soft ECC error"); 2033 case XY_ERR_SFOK: 2034 return("Soft ECC error recovered"); 2035 case XY_ERR_IHED: 2036 return("Illegal head"); 2037 case XY_ERR_DSEQ: 2038 return("Disk sequencer error"); 2039 case XY_ERR_SEEK: 2040 return("Seek error"); 2041 default: 2042 return ("Unknown error"); 2043 } 2044 } 2045 2046 int 2047 xyc_entoact(int errno) 2048 { 2049 switch (errno) { 2050 case XY_ERR_FAIL: case XY_ERR_DERR: case XY_ERR_IPEN: 2051 case XY_ERR_BCFL: case XY_ERR_ICYL: case XY_ERR_ISEC: 2052 case XY_ERR_UIMP: case XY_ERR_SZER: case XY_ERR_ISSZ: 2053 case XY_ERR_SLTA: case XY_ERR_SLTB: case XY_ERR_SLTC: 2054 case XY_ERR_IHED: case XY_ERR_SACK: case XY_ERR_SMAL: 2055 2056 return(XY_ERA_PROG); /* program error ! */ 2057 2058 case XY_ERR_TIMO: case XY_ERR_NHDR: case XY_ERR_HARD: 2059 case XY_ERR_DNRY: case XY_ERR_CHER: case XY_ERR_SEEK: 2060 case XY_ERR_SOFT: 2061 2062 return(XY_ERA_HARD); /* hard error, retry */ 2063 2064 case XY_ERR_DFLT: case XY_ERR_DSEQ: 2065 2066 return(XY_ERA_RSET); /* hard error reset */ 2067 2068 case XY_ERR_SRTR: case XY_ERR_SFOK: case XY_ERR_AOK: 2069 2070 return(XY_ERA_SOFT); /* an FYI error */ 2071 2072 case XY_ERR_WPRO: 2073 2074 return(XY_ERA_WPRO); /* write protect */ 2075 } 2076 2077 return(XY_ERA_PROG); /* ??? */ 2078 } 2079