1 /* $NetBSD: xy.c,v 1.74 2014/03/16 05:20:26 dholland Exp $ */ 2 3 /* 4 * Copyright (c) 1995 Charles D. Cranor 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * 30 * x y . c x y l o g i c s 4 5 0 / 4 5 1 s m d d r i v e r 31 * 32 * author: Chuck Cranor <chuck@netbsd> 33 * id: &Id: xy.c,v 1.1 1995/09/25 20:35:14 chuck Exp & 34 * started: 14-Sep-95 35 * references: [1] Xylogics Model 753 User's Manual 36 * part number: 166-753-001, Revision B, May 21, 1988. 37 * "Your Partner For Performance" 38 * [2] other NetBSD disk device drivers 39 * [3] Xylogics Model 450 User's Manual 40 * part number: 166-017-001, Revision B, 1983. 41 * [4] Addendum to Xylogics Model 450 Disk Controller User's 42 * Manual, Jan. 1985. 43 * [5] The 451 Controller, Rev. B3, September 2, 1986. 44 * [6] David Jones <dej@achilles.net>'s unfinished 450/451 driver 45 * 46 */ 47 48 #include <sys/cdefs.h> 49 __KERNEL_RCSID(0, "$NetBSD: xy.c,v 1.74 2014/03/16 05:20:26 dholland Exp $"); 50 51 #undef XYC_DEBUG /* full debug */ 52 #undef XYC_DIAG /* extra sanity checks */ 53 #if defined(DIAGNOSTIC) && !defined(XYC_DIAG) 54 #define XYC_DIAG /* link in with master DIAG option */ 55 #endif 56 57 #include <sys/param.h> 58 #include <sys/proc.h> 59 #include <sys/systm.h> 60 #include <sys/kernel.h> 61 #include <sys/file.h> 62 #include <sys/stat.h> 63 #include <sys/ioctl.h> 64 #include <sys/buf.h> 65 #include <sys/bufq.h> 66 #include <sys/uio.h> 67 #include <sys/malloc.h> 68 #include <sys/device.h> 69 #include <sys/disklabel.h> 70 #include <sys/disk.h> 71 #include <sys/syslog.h> 72 #include <sys/dkbad.h> 73 #include <sys/conf.h> 74 #include <sys/kauth.h> 75 76 #include <uvm/uvm_extern.h> 77 78 #include <dev/sun/disklabel.h> 79 80 #include <machine/autoconf.h> 81 #include <machine/dvma.h> 82 83 #include <sun3/dev/xyreg.h> 84 #include <sun3/dev/xyvar.h> 85 #include <sun3/dev/xio.h> 86 87 #include "ioconf.h" 88 #include "locators.h" 89 90 /* 91 * Print a complaint when no xy children were specified 92 * in the config file. Better than a link error... 93 * 94 * XXX: Some folks say this driver should be split in two, 95 * but that seems pointless with ONLY one type of child. 96 */ 97 #include "xy.h" 98 #if NXY == 0 99 #error "xyc but no xy?" 100 #endif 101 102 /* 103 * macros 104 */ 105 106 /* 107 * XYC_GO: start iopb ADDR (DVMA addr in a u_long) on XYC 108 */ 109 #define XYC_GO(XYC, ADDR) \ 110 do { \ 111 (XYC)->xyc_addr_lo = ((ADDR) & 0xff); \ 112 (ADDR) = ((ADDR) >> 8); \ 113 (XYC)->xyc_addr_hi = ((ADDR) & 0xff); \ 114 (ADDR) = ((ADDR) >> 8); \ 115 (XYC)->xyc_reloc_lo = ((ADDR) & 0xff); \ 116 (ADDR) = ((ADDR) >> 8); \ 117 (XYC)->xyc_reloc_hi = (ADDR); \ 118 (XYC)->xyc_csr = XYC_GBSY; /* go! */ \ 119 } while (/* CONSTCOND */ 0) 120 121 /* 122 * XYC_DONE: don't need IORQ, get error code and free (done after xyc_cmd) 123 */ 124 125 #define XYC_DONE(SC,ER) \ 126 do { \ 127 if ((ER) == XY_ERR_AOK) { \ 128 (ER) = (SC)->ciorq->errno; \ 129 (SC)->ciorq->mode = XY_SUB_FREE; \ 130 wakeup((SC)->ciorq); \ 131 } \ 132 } while (/* CONSTCOND */ 0) 133 134 /* 135 * XYC_ADVANCE: advance iorq's pointers by a number of sectors 136 */ 137 138 #define XYC_ADVANCE(IORQ, N) \ 139 do { \ 140 if (N) { \ 141 (IORQ)->sectcnt -= (N); \ 142 (IORQ)->blockno += (N); \ 143 (IORQ)->dbuf += ((N) * XYFM_BPS); \ 144 } \ 145 } while (/* CONSTCOND */ 0) 146 147 /* 148 * note - addresses you can sleep on: 149 * [1] & of xy_softc's "state" (waiting for a chance to attach a drive) 150 * [2] & an iorq (waiting for an XY_SUB_WAIT iorq to finish) 151 */ 152 153 154 /* 155 * function prototypes 156 * "xyc_*" functions are internal, all others are external interfaces 157 */ 158 159 /* internals */ 160 struct xy_iopb *xyc_chain(struct xyc_softc *, struct xy_iorq *); 161 int xyc_cmd(struct xyc_softc *, int, int, int, int, int, char *, int); 162 const char *xyc_e2str(int); 163 int xyc_entoact(int); 164 int xyc_error(struct xyc_softc *, struct xy_iorq *, struct xy_iopb *, int); 165 int xyc_ioctlcmd(struct xy_softc *, dev_t dev, struct xd_iocmd *); 166 void xyc_perror(struct xy_iorq *, struct xy_iopb *, int); 167 int xyc_piodriver(struct xyc_softc *, struct xy_iorq *); 168 int xyc_remove_iorq(struct xyc_softc *); 169 int xyc_reset(struct xyc_softc *, int, struct xy_iorq *, int, 170 struct xy_softc *); 171 inline void xyc_rqinit(struct xy_iorq *, struct xyc_softc *, struct xy_softc *, 172 int, u_long, int, void *, struct buf *); 173 void xyc_rqtopb(struct xy_iorq *, struct xy_iopb *, int, int); 174 void xyc_start(struct xyc_softc *, struct xy_iorq *); 175 int xyc_startbuf(struct xyc_softc *, struct xy_softc *, struct buf *); 176 int xyc_submit_iorq(struct xyc_softc *, struct xy_iorq *, int); 177 void xyc_tick(void *); 178 int xyc_unbusy(struct xyc *, int); 179 void xyc_xyreset(struct xyc_softc *, struct xy_softc *); 180 181 /* machine interrupt hook */ 182 int xycintr(void *); 183 184 /* autoconf */ 185 static int xycmatch(device_t, cfdata_t, void *); 186 static void xycattach(device_t, device_t, void *); 187 static int xyc_print(void *, const char *); 188 189 static int xymatch(device_t, cfdata_t, void *); 190 static void xyattach(device_t, device_t, void *); 191 static void xy_init(struct xy_softc *); 192 193 static void xydummystrat(struct buf *); 194 int xygetdisklabel(struct xy_softc *, void *); 195 196 /* 197 * cfattach's: device driver interface to autoconfig 198 */ 199 200 CFATTACH_DECL_NEW(xyc, sizeof(struct xyc_softc), 201 xycmatch, xycattach, NULL, NULL); 202 203 CFATTACH_DECL_NEW(xy, sizeof(struct xy_softc), 204 xymatch, xyattach, NULL, NULL); 205 206 struct xyc_attach_args { /* this is the "aux" args to xyattach */ 207 int driveno; /* unit number */ 208 }; 209 210 dev_type_open(xyopen); 211 dev_type_close(xyclose); 212 dev_type_read(xyread); 213 dev_type_write(xywrite); 214 dev_type_ioctl(xyioctl); 215 dev_type_strategy(xystrategy); 216 dev_type_dump(xydump); 217 dev_type_size(xysize); 218 219 const struct bdevsw xy_bdevsw = { 220 .d_open = xyopen, 221 .d_close = xyclose, 222 .d_strategy = xystrategy, 223 .d_ioctl = xyioctl, 224 .d_dump = xydump, 225 .d_psize = xysize, 226 .d_flag = D_DISK 227 }; 228 229 const struct cdevsw xy_cdevsw = { 230 .d_open = xyopen, 231 .d_close = xyclose, 232 .d_read = xyread, 233 .d_write = xywrite, 234 .d_ioctl = xyioctl, 235 .d_stop = nostop, 236 .d_tty = notty, 237 .d_poll = nopoll, 238 .d_mmap = nommap, 239 .d_kqfilter = nokqfilter, 240 .d_flag = D_DISK 241 }; 242 243 /* 244 * dkdriver 245 */ 246 247 struct dkdriver xydkdriver = { xystrategy }; 248 249 /* 250 * start: disk label fix code (XXX) 251 */ 252 253 static void *xy_labeldata; 254 255 static void 256 xydummystrat(struct buf *bp) 257 { 258 259 if (bp->b_bcount != XYFM_BPS) 260 panic("%s: b_bcount", __func__); 261 memcpy(bp->b_data, xy_labeldata, XYFM_BPS); 262 bp->b_oflags |= BO_DONE; 263 bp->b_cflags &= ~BC_BUSY; 264 } 265 266 int 267 xygetdisklabel(struct xy_softc *xy, void *b) 268 { 269 const char *err; 270 struct sun_disklabel *sdl; 271 272 /* We already have the label data in `b'; setup for dummy strategy */ 273 xy_labeldata = b; 274 275 /* Required parameter for readdisklabel() */ 276 xy->sc_dk.dk_label->d_secsize = XYFM_BPS; 277 278 err = readdisklabel(MAKEDISKDEV(0, device_unit(xy->sc_dev), RAW_PART), 279 xydummystrat, xy->sc_dk.dk_label, xy->sc_dk.dk_cpulabel); 280 if (err) { 281 printf("%s: %s\n", device_xname(xy->sc_dev), err); 282 return XY_ERR_FAIL; 283 } 284 285 /* Ok, we have the label; fill in `pcyl' if there's SunOS magic */ 286 sdl = (struct sun_disklabel *)xy->sc_dk.dk_cpulabel->cd_block; 287 if (sdl->sl_magic == SUN_DKMAGIC) 288 xy->pcyl = sdl->sl_pcyl; 289 else { 290 printf("%s: WARNING: no `pcyl' in disk label.\n", 291 device_xname(xy->sc_dev)); 292 xy->pcyl = xy->sc_dk.dk_label->d_ncylinders + 293 xy->sc_dk.dk_label->d_acylinders; 294 printf("%s: WARNING: guessing pcyl=%d (ncyl+acyl)\n", 295 device_xname(xy->sc_dev), xy->pcyl); 296 } 297 298 xy->ncyl = xy->sc_dk.dk_label->d_ncylinders; 299 xy->acyl = xy->sc_dk.dk_label->d_acylinders; 300 xy->nhead = xy->sc_dk.dk_label->d_ntracks; 301 xy->nsect = xy->sc_dk.dk_label->d_nsectors; 302 xy->sectpercyl = xy->nhead * xy->nsect; 303 xy->sc_dk.dk_label->d_secsize = XYFM_BPS; /* not handled by 304 * sun->bsd */ 305 return XY_ERR_AOK; 306 } 307 308 /* 309 * end: disk label fix code (XXX) 310 */ 311 312 /* 313 * a u t o c o n f i g f u n c t i o n s 314 */ 315 316 /* 317 * xycmatch: determine if xyc is present or not. we do a 318 * soft reset to detect the xyc. 319 */ 320 static int 321 xycmatch(device_t parent, cfdata_t cf, void *aux) 322 { 323 struct confargs *ca = aux; 324 325 /* No default VME address. */ 326 if (ca->ca_paddr == -1) 327 return 0; 328 329 /* Make sure something is there... */ 330 if (bus_peek(ca->ca_bustype, ca->ca_paddr + 5, 1) == -1) 331 return 0; 332 333 /* Default interrupt priority. */ 334 if (ca->ca_intpri == -1) 335 ca->ca_intpri = 2; 336 337 return 1; 338 } 339 340 /* 341 * xycattach: attach controller 342 */ 343 static void 344 xycattach(device_t parent, device_t self, void *aux) 345 { 346 struct xyc_softc *xyc = device_private(self); 347 struct confargs *ca = aux; 348 struct xyc_attach_args xa; 349 int lcv, err, res, pbsz; 350 void *tmp, *tmp2; 351 u_long ultmp; 352 353 /* get addressing and intr level stuff from autoconfig and load it 354 * into our xyc_softc. */ 355 356 xyc->sc_dev = self; 357 xyc->xyc = (struct xyc *)bus_mapin(ca->ca_bustype, ca->ca_paddr, 358 sizeof(struct xyc)); 359 xyc->bustype = ca->ca_bustype; 360 xyc->ipl = ca->ca_intpri; 361 xyc->vector = ca->ca_intvec; 362 xyc->no_ols = 0; /* XXX should be from config */ 363 364 for (lcv = 0; lcv < XYC_MAXDEV; lcv++) 365 xyc->sc_drives[lcv] = NULL; 366 367 /* 368 * allocate and zero buffers 369 * check boundaries of the KVA's ... all IOPBs must reside in 370 * the same 64K region. 371 */ 372 373 pbsz = XYC_MAXIOPB * sizeof(struct xy_iopb); 374 tmp = tmp2 = (struct xy_iopb *)dvma_malloc(pbsz); /* KVA */ 375 ultmp = (u_long)tmp; 376 if ((ultmp & 0xffff0000) != ((ultmp + pbsz) & 0xffff0000)) { 377 tmp = (struct xy_iopb *)dvma_malloc(pbsz); /* retry! */ 378 dvma_free(tmp2, pbsz); 379 ultmp = (u_long) tmp; 380 if ((ultmp & 0xffff0000) != ((ultmp + pbsz) & 0xffff0000)) { 381 aprint_error(": can't alloc IOPB mem in 64K\n"); 382 return; 383 } 384 } 385 memset(tmp, 0, pbsz); 386 xyc->iopbase = tmp; 387 xyc->dvmaiopb = 388 (struct xy_iopb *)dvma_kvtopa(xyc->iopbase, xyc->bustype); 389 xyc->reqs = malloc(XYC_MAXIOPB * sizeof(struct xy_iorq), 390 M_DEVBUF, M_NOWAIT | M_ZERO); 391 if (xyc->reqs == NULL) 392 panic("xyc malloc"); 393 394 /* 395 * init iorq to iopb pointers, and non-zero fields in the 396 * iopb which never change. 397 */ 398 399 for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) { 400 xyc->xy_chain[lcv] = NULL; 401 xyc->reqs[lcv].iopb = &xyc->iopbase[lcv]; 402 xyc->iopbase[lcv].asr = 1; /* always the same */ 403 xyc->iopbase[lcv].eef = 1; /* always the same */ 404 xyc->iopbase[lcv].ecm = XY_ECM; /* always the same */ 405 xyc->iopbase[lcv].aud = 1; /* always the same */ 406 xyc->iopbase[lcv].relo = 1; /* always the same */ 407 xyc->iopbase[lcv].thro = XY_THRO;/* always the same */ 408 } 409 xyc->ciorq = &xyc->reqs[XYC_CTLIOPB]; /* short hand name */ 410 xyc->ciopb = &xyc->iopbase[XYC_CTLIOPB]; /* short hand name */ 411 xyc->xy_hand = 0; 412 413 /* read controller parameters and insure we have a 450/451 */ 414 415 err = xyc_cmd(xyc, XYCMD_ST, 0, 0, 0, 0, 0, XY_SUB_POLL); 416 res = xyc->ciopb->ctyp; 417 XYC_DONE(xyc, err); 418 if (res != XYCT_450) { 419 if (err) 420 aprint_error(": %s: ", xyc_e2str(err)); 421 aprint_error(": doesn't identify as a 450/451\n"); 422 return; 423 } 424 aprint_normal(": Xylogics 450/451"); 425 if (xyc->no_ols) 426 /* 450 doesn't overlap seek right */ 427 aprint_normal(" [OLS disabled]"); 428 aprint_normal("\n"); 429 if (err) { 430 aprint_error_dev(self, "error: %s\n", xyc_e2str(err)); 431 return; 432 } 433 if ((xyc->xyc->xyc_csr & XYC_ADRM) == 0) { 434 aprint_error_dev(self, "24 bit addressing turned off\n"); 435 printf("please set hardware jumpers JM1-JM2=in, JM3-JM4=out\n"); 436 printf("to enable 24 bit mode and this driver\n"); 437 return; 438 } 439 440 /* link in interrupt with higher level software */ 441 isr_add_vectored(xycintr, xyc, ca->ca_intpri, ca->ca_intvec); 442 evcnt_attach_dynamic(&xyc->sc_intrcnt, EVCNT_TYPE_INTR, NULL, 443 device_xname(self), "intr"); 444 445 callout_init(&xyc->sc_tick_ch, 0); 446 447 /* now we must look for disks using autoconfig */ 448 for (xa.driveno = 0; xa.driveno < XYC_MAXDEV; xa.driveno++) 449 (void)config_found(self, (void *)&xa, xyc_print); 450 451 /* start the watchdog clock */ 452 callout_reset(&xyc->sc_tick_ch, XYC_TICKCNT, xyc_tick, xyc); 453 } 454 455 static int 456 xyc_print(void *aux, const char *name) 457 { 458 struct xyc_attach_args *xa = aux; 459 460 if (name != NULL) 461 aprint_normal("%s: ", name); 462 463 if (xa->driveno != -1) 464 aprint_normal(" drive %d", xa->driveno); 465 466 return UNCONF; 467 } 468 469 /* 470 * xymatch: probe for disk. 471 * 472 * note: we almost always say disk is present. this allows us to 473 * spin up and configure a disk after the system is booted (we can 474 * call xyattach!). Also, wire down the relationship between the 475 * xy* and xyc* devices, to simplify boot device identification. 476 */ 477 static int 478 xymatch(device_t parent, cfdata_t cf, void *aux) 479 { 480 struct xyc_attach_args *xa = aux; 481 int xy_unit; 482 483 /* Match only on the "wired-down" controller+disk. */ 484 xy_unit = device_unit(parent) * 2 + xa->driveno; 485 if (cf->cf_unit != xy_unit) 486 return 0; 487 488 return 1; 489 } 490 491 /* 492 * xyattach: attach a disk. 493 */ 494 static void 495 xyattach(device_t parent, device_t self, void *aux) 496 { 497 struct xy_softc *xy = device_private(self); 498 struct xyc_softc *xyc = device_private(parent); 499 struct xyc_attach_args *xa = aux; 500 501 xy->sc_dev = self; 502 aprint_normal("\n"); 503 504 /* 505 * Always re-initialize the disk structure. We want statistics 506 * to start with a clean slate. 507 */ 508 memset(&xy->sc_dk, 0, sizeof(xy->sc_dk)); 509 disk_init(&xy->sc_dk, device_xname(self), &xydkdriver); 510 511 xy->state = XY_DRIVE_UNKNOWN; /* to start */ 512 xy->flags = 0; 513 xy->parent = xyc; 514 515 /* init queue of waiting bufs */ 516 bufq_alloc(&xy->xyq, "disksort", BUFQ_SORT_RAWBLOCK); 517 xy->xyrq = &xyc->reqs[xa->driveno]; 518 519 xy->xy_drive = xa->driveno; 520 xyc->sc_drives[xa->driveno] = xy; 521 522 /* Do init work common to attach and open. */ 523 xy_init(xy); 524 } 525 526 /* 527 * end of autoconfig functions 528 */ 529 530 /* 531 * Initialize a disk. This can be called from both autoconf and 532 * also from xyopen/xystrategy. 533 */ 534 static void 535 xy_init(struct xy_softc *xy) 536 { 537 struct xyc_softc *xyc; 538 struct dkbad *dkb; 539 void *dvmabuf; 540 int err, spt, mb, blk, lcv, fullmode, newstate; 541 542 xyc = xy->parent; 543 xy->state = XY_DRIVE_ATTACHING; 544 newstate = XY_DRIVE_UNKNOWN; 545 fullmode = (cold) ? XY_SUB_POLL : XY_SUB_WAIT; 546 dvmabuf = dvma_malloc(XYFM_BPS); 547 548 /* first try and reset the drive */ 549 550 err = xyc_cmd(xyc, XYCMD_RST, 0, xy->xy_drive, 0, 0, 0, fullmode); 551 XYC_DONE(xyc, err); 552 if (err == XY_ERR_DNRY) { 553 printf("%s: drive %d: off-line\n", 554 device_xname(xy->sc_dev), xy->xy_drive); 555 goto done; 556 } 557 if (err) { 558 printf("%s: ERROR 0x%02x (%s)\n", 559 device_xname(xy->sc_dev), err, xyc_e2str(err)); 560 goto done; 561 } 562 printf("%s: drive %d ready", 563 device_xname(xy->sc_dev), xy->xy_drive); 564 565 /* 566 * now set drive parameters (to semi-bogus values) so we can read the 567 * disk label. 568 */ 569 xy->pcyl = xy->ncyl = 1; 570 xy->acyl = 0; 571 xy->nhead = 1; 572 xy->nsect = 1; 573 xy->sectpercyl = 1; 574 for (lcv = 0; lcv < 126; lcv++) /* init empty bad144 table */ 575 xy->dkb.bt_bad[lcv].bt_cyl = 576 xy->dkb.bt_bad[lcv].bt_trksec = 0xffff; 577 578 /* read disk label */ 579 for (xy->drive_type = 0; xy->drive_type <= XYC_MAXDT; 580 xy->drive_type++) { 581 err = xyc_cmd(xyc, XYCMD_RD, 0, xy->xy_drive, 0, 1, 582 dvmabuf, fullmode); 583 XYC_DONE(xyc, err); 584 if (err == XY_ERR_AOK) 585 break; 586 } 587 588 if (err != XY_ERR_AOK) { 589 printf("%s: reading disk label failed: %s\n", 590 device_xname(xy->sc_dev), xyc_e2str(err)); 591 goto done; 592 } 593 printf("%s: drive type %d\n", 594 device_xname(xy->sc_dev), xy->drive_type); 595 596 newstate = XY_DRIVE_NOLABEL; 597 598 xy->hw_spt = spt = 0; /* XXX needed ? */ 599 /* Attach the disk: must be before getdisklabel to malloc label */ 600 disk_attach(&xy->sc_dk); 601 602 if (xygetdisklabel(xy, dvmabuf) != XY_ERR_AOK) 603 goto done; 604 605 /* inform the user of what is up */ 606 printf("%s: <%s>, pcyl %d\n", 607 device_xname(xy->sc_dev), 608 (char *)dvmabuf, xy->pcyl); 609 mb = xy->ncyl * (xy->nhead * xy->nsect) / (1048576 / XYFM_BPS); 610 printf("%s: %dMB, %d cyl, %d head, %d sec\n", 611 device_xname(xy->sc_dev), mb, xy->ncyl, xy->nhead, xy->nsect); 612 613 /* 614 * 450/451 stupidity: the drive type is encoded into the format 615 * of the disk. the drive type in the IOPB must match the drive 616 * type in the format, or you will not be able to do I/O to the 617 * disk (you get header not found errors). if you have two drives 618 * of different sizes that have the same drive type in their 619 * formatting then you are out of luck. 620 * 621 * this problem was corrected in the 753/7053. 622 */ 623 624 for (lcv = 0 ; lcv < XYC_MAXDEV ; lcv++) { 625 struct xy_softc *oxy; 626 627 oxy = xyc->sc_drives[lcv]; 628 if (oxy == NULL || oxy == xy) 629 continue; 630 if (oxy->drive_type != xy->drive_type) 631 continue; 632 if (xy->nsect != oxy->nsect || xy->pcyl != oxy->pcyl || 633 xy->nhead != oxy->nhead) { 634 printf("%s: %s and %s must be the same size!\n", 635 device_xname(xyc->sc_dev), 636 device_xname(xy->sc_dev), 637 device_xname(oxy->sc_dev)); 638 panic("xy drive size mismatch"); 639 } 640 } 641 642 643 /* now set the real drive parameters! */ 644 blk = (xy->nsect - 1) + 645 ((xy->nhead - 1) * xy->nsect) + 646 ((xy->pcyl - 1) * xy->nsect * xy->nhead); 647 err = xyc_cmd(xyc, XYCMD_SDS, 0, xy->xy_drive, blk, 0, 0, fullmode); 648 XYC_DONE(xyc, err); 649 if (err) { 650 printf("%s: write drive size failed: %s\n", 651 device_xname(xy->sc_dev), xyc_e2str(err)); 652 goto done; 653 } 654 newstate = XY_DRIVE_ONLINE; 655 656 /* 657 * read bad144 table. this table resides on the first sector of the 658 * last track of the disk (i.e. second cyl of "acyl" area). 659 */ 660 blk = (xy->ncyl + xy->acyl - 1) * (xy->nhead * xy->nsect) + 661 /* last cyl */ 662 (xy->nhead - 1) * xy->nsect; /* last head */ 663 err = xyc_cmd(xyc, XYCMD_RD, 0, xy->xy_drive, blk, 1, 664 dvmabuf, fullmode); 665 XYC_DONE(xyc, err); 666 if (err) { 667 printf("%s: reading bad144 failed: %s\n", 668 device_xname(xy->sc_dev), xyc_e2str(err)); 669 goto done; 670 } 671 672 /* check dkbad for sanity */ 673 dkb = (struct dkbad *)dvmabuf; 674 for (lcv = 0; lcv < 126; lcv++) { 675 if ((dkb->bt_bad[lcv].bt_cyl == 0xffff || 676 dkb->bt_bad[lcv].bt_cyl == 0) && 677 dkb->bt_bad[lcv].bt_trksec == 0xffff) 678 continue; /* blank */ 679 if (dkb->bt_bad[lcv].bt_cyl >= xy->ncyl) 680 break; 681 if ((dkb->bt_bad[lcv].bt_trksec >> 8) >= xy->nhead) 682 break; 683 if ((dkb->bt_bad[lcv].bt_trksec & 0xff) >= xy->nsect) 684 break; 685 } 686 if (lcv != 126) { 687 printf("%s: warning: invalid bad144 sector!\n", 688 device_xname(xy->sc_dev)); 689 } else { 690 memcpy(&xy->dkb, dvmabuf, XYFM_BPS); 691 } 692 693 done: 694 xy->state = newstate; 695 dvma_free(dvmabuf, XYFM_BPS); 696 } 697 698 /* 699 * { b , c } d e v s w f u n c t i o n s 700 */ 701 702 /* 703 * xyclose: close device 704 */ 705 int 706 xyclose(dev_t dev, int flag, int fmt, struct lwp *l) 707 { 708 struct xy_softc *xy = device_lookup_private(&xy_cd, DISKUNIT(dev)); 709 int part = DISKPART(dev); 710 711 /* clear mask bits */ 712 713 switch (fmt) { 714 case S_IFCHR: 715 xy->sc_dk.dk_copenmask &= ~(1 << part); 716 break; 717 case S_IFBLK: 718 xy->sc_dk.dk_bopenmask &= ~(1 << part); 719 break; 720 } 721 xy->sc_dk.dk_openmask = xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask; 722 723 return 0; 724 } 725 726 /* 727 * xydump: crash dump system 728 */ 729 int 730 xydump(dev_t dev, daddr_t blkno, void *va, size_t sz) 731 { 732 int unit, part; 733 struct xy_softc *xy; 734 735 unit = DISKUNIT(dev); 736 part = DISKPART(dev); 737 738 xy = device_lookup_private(&xy_cd, unit); 739 if (xy == NULL) 740 return ENXIO; 741 742 printf("%s%c: crash dump not supported (yet)\n", 743 device_xname(xy->sc_dev), 'a' + part); 744 745 return ENXIO; 746 747 /* outline: globals: "dumplo" == sector number of partition to start 748 * dump at (convert to physical sector with partition table) 749 * "dumpsize" == size of dump in clicks "physmem" == size of physical 750 * memory (clicks, ctob() to get bytes) (normal case: dumpsize == 751 * physmem) 752 * 753 * dump a copy of physical memory to the dump device starting at sector 754 * "dumplo" in the swap partition (make sure > 0). map in pages as 755 * we go. use polled I/O. 756 * 757 * XXX how to handle NON_CONTIG? 758 */ 759 } 760 761 static enum kauth_device_req 762 xy_getkauthreq(u_char cmd) 763 { 764 enum kauth_device_req req; 765 766 switch (cmd) { 767 case XYCMD_WR: 768 case XYCMD_WTH: 769 case XYCMD_WFM: 770 case XYCMD_WRH: 771 req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_WRITE; 772 break; 773 774 case XYCMD_RD: 775 case XYCMD_RTH: 776 case XYCMD_RDH: 777 req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_READ; 778 break; 779 780 case XYCMD_RDS: 781 case XYCMD_MBD: 782 req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_READCONF; 783 break; 784 785 case XYCMD_RST: 786 case XYCMD_SDS: 787 case XYCMD_MBL: 788 req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_WRITECONF; 789 break; 790 791 case XYCMD_NOP: 792 case XYCMD_SK: 793 case XYCMD_ST: 794 case XYCMD_R: 795 default: 796 req = 0; 797 break; 798 } 799 800 return req; 801 } 802 803 /* 804 * xyioctl: ioctls on XY drives. based on ioctl's of other netbsd disks. 805 */ 806 int 807 xyioctl(dev_t dev, u_long command, void *addr, int flag, struct lwp *l) 808 { 809 struct xy_softc *xy; 810 struct xd_iocmd *xio; 811 int error, s, unit; 812 813 unit = DISKUNIT(dev); 814 815 xy = device_lookup_private(&xy_cd, unit); 816 if (xy == NULL) 817 return ENXIO; 818 819 /* switch on ioctl type */ 820 821 switch (command) { 822 case DIOCSBAD: /* set bad144 info */ 823 if ((flag & FWRITE) == 0) 824 return EBADF; 825 s = splbio(); 826 memcpy(&xy->dkb, addr, sizeof(xy->dkb)); 827 splx(s); 828 return 0; 829 830 case DIOCGDINFO: /* get disk label */ 831 memcpy(addr, xy->sc_dk.dk_label, sizeof(struct disklabel)); 832 return 0; 833 834 case DIOCGPART: /* get partition info */ 835 ((struct partinfo *)addr)->disklab = xy->sc_dk.dk_label; 836 ((struct partinfo *)addr)->part = 837 &xy->sc_dk.dk_label->d_partitions[DISKPART(dev)]; 838 return 0; 839 840 case DIOCSDINFO: /* set disk label */ 841 if ((flag & FWRITE) == 0) 842 return EBADF; 843 error = setdisklabel(xy->sc_dk.dk_label, 844 (struct disklabel *)addr, /* xy->sc_dk.dk_openmask : */ 0, 845 xy->sc_dk.dk_cpulabel); 846 if (error == 0) { 847 if (xy->state == XY_DRIVE_NOLABEL) 848 xy->state = XY_DRIVE_ONLINE; 849 } 850 return error; 851 852 case DIOCWLABEL: /* change write status of disk label */ 853 if ((flag & FWRITE) == 0) 854 return EBADF; 855 if (*(int *)addr) 856 xy->flags |= XY_WLABEL; 857 else 858 xy->flags &= ~XY_WLABEL; 859 return 0; 860 861 case DIOCWDINFO: /* write disk label */ 862 if ((flag & FWRITE) == 0) 863 return EBADF; 864 error = setdisklabel(xy->sc_dk.dk_label, 865 (struct disklabel *)addr, /* xy->sc_dk.dk_openmask : */ 0, 866 xy->sc_dk.dk_cpulabel); 867 if (error == 0) { 868 if (xy->state == XY_DRIVE_NOLABEL) 869 xy->state = XY_DRIVE_ONLINE; 870 871 /* Simulate opening partition 0 so write succeeds. */ 872 xy->sc_dk.dk_openmask |= (1 << 0); 873 error = writedisklabel(MAKEDISKDEV(major(dev), 874 DISKUNIT(dev), RAW_PART), 875 xystrategy, xy->sc_dk.dk_label, 876 xy->sc_dk.dk_cpulabel); 877 xy->sc_dk.dk_openmask = 878 xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask; 879 } 880 return error; 881 882 case DIOSXDCMD: { 883 enum kauth_device_req req; 884 885 xio = (struct xd_iocmd *)addr; 886 req = xy_getkauthreq(xio->cmd); 887 if ((error = kauth_authorize_device_passthru(l->l_cred, 888 dev, req, xio)) != 0) 889 return error; 890 return xyc_ioctlcmd(xy, dev, xio); 891 } 892 893 default: 894 return ENOTTY; 895 } 896 } 897 898 /* 899 * xyopen: open drive 900 */ 901 int 902 xyopen(dev_t dev, int flag, int fmt, struct lwp *l) 903 { 904 int err, unit, part, s; 905 struct xy_softc *xy; 906 907 /* first, could it be a valid target? */ 908 unit = DISKUNIT(dev); 909 xy = device_lookup_private(&xy_cd, unit); 910 if (xy == NULL) 911 return ENXIO; 912 part = DISKPART(dev); 913 err = 0; 914 915 /* 916 * If some other processing is doing init, sleep. 917 */ 918 s = splbio(); 919 while (xy->state == XY_DRIVE_ATTACHING) { 920 if (tsleep(&xy->state, PRIBIO, "xyopen", 0)) { 921 err = EINTR; 922 goto done; 923 } 924 } 925 /* Do we need to init the drive? */ 926 if (xy->state == XY_DRIVE_UNKNOWN) { 927 xy_init(xy); 928 wakeup(&xy->state); 929 } 930 /* Was the init successful? */ 931 if (xy->state == XY_DRIVE_UNKNOWN) { 932 err = EIO; 933 goto done; 934 } 935 936 /* check for partition */ 937 if (part != RAW_PART && 938 (part >= xy->sc_dk.dk_label->d_npartitions || 939 xy->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) { 940 err = ENXIO; 941 goto done; 942 } 943 944 /* set open masks */ 945 switch (fmt) { 946 case S_IFCHR: 947 xy->sc_dk.dk_copenmask |= (1 << part); 948 break; 949 case S_IFBLK: 950 xy->sc_dk.dk_bopenmask |= (1 << part); 951 break; 952 } 953 xy->sc_dk.dk_openmask = xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask; 954 955 done: 956 splx(s); 957 return err; 958 } 959 960 int 961 xyread(dev_t dev, struct uio *uio, int flags) 962 { 963 964 return physio(xystrategy, NULL, dev, B_READ, minphys, uio); 965 } 966 967 int 968 xywrite(dev_t dev, struct uio *uio, int flags) 969 { 970 971 return physio(xystrategy, NULL, dev, B_WRITE, minphys, uio); 972 } 973 974 975 /* 976 * xysize: return size of a partition for a dump 977 */ 978 979 int 980 xysize(dev_t dev) 981 { 982 struct xy_softc *xysc; 983 int unit, part, size, omask; 984 985 /* valid unit? */ 986 unit = DISKUNIT(dev); 987 xysc = device_lookup_private(&xy_cd, unit); 988 if (xysc == NULL) 989 return -1; 990 991 part = DISKPART(dev); 992 omask = xysc->sc_dk.dk_openmask & (1 << part); 993 994 if (omask == 0 && xyopen(dev, 0, S_IFBLK, NULL) != 0) 995 return -1; 996 997 /* do it */ 998 if (xysc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP) 999 size = -1; /* only give valid size for swap partitions */ 1000 else 1001 size = xysc->sc_dk.dk_label->d_partitions[part].p_size * 1002 (xysc->sc_dk.dk_label->d_secsize / DEV_BSIZE); 1003 if (omask == 0 && xyclose(dev, 0, S_IFBLK, NULL) != 0) 1004 return -1; 1005 return size; 1006 } 1007 1008 /* 1009 * xystrategy: buffering system interface to xy. 1010 */ 1011 void 1012 xystrategy(struct buf *bp) 1013 { 1014 struct xy_softc *xy; 1015 int s, unit; 1016 struct disklabel *lp; 1017 daddr_t blkno; 1018 1019 unit = DISKUNIT(bp->b_dev); 1020 1021 /* check for live device */ 1022 1023 xy = device_lookup_private(&xy_cd, unit); 1024 if (xy == NULL || 1025 bp->b_blkno < 0 || 1026 (bp->b_bcount % xy->sc_dk.dk_label->d_secsize) != 0) { 1027 bp->b_error = EINVAL; 1028 goto done; 1029 } 1030 1031 /* There should always be an open first. */ 1032 if (xy->state == XY_DRIVE_UNKNOWN) { 1033 bp->b_error = EIO; 1034 goto done; 1035 } 1036 if (xy->state != XY_DRIVE_ONLINE && DISKPART(bp->b_dev) != RAW_PART) { 1037 /* no I/O to unlabeled disks, unless raw partition */ 1038 bp->b_error = EIO; 1039 goto done; 1040 } 1041 /* short circuit zero length request */ 1042 1043 if (bp->b_bcount == 0) 1044 goto done; 1045 1046 /* check bounds with label (disksubr.c). Determine the size of the 1047 * transfer, and make sure it is within the boundaries of the 1048 * partition. Adjust transfer if needed, and signal errors or early 1049 * completion. */ 1050 1051 lp = xy->sc_dk.dk_label; 1052 1053 if (bounds_check_with_label(&xy->sc_dk, bp, 1054 (xy->flags & XY_WLABEL) != 0) <= 0) 1055 goto done; 1056 1057 /* 1058 * Now convert the block number to absolute and put it in 1059 * terms of the device's logical block size. 1060 */ 1061 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE); 1062 if (DISKPART(bp->b_dev) != RAW_PART) 1063 blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset; 1064 1065 bp->b_rawblkno = blkno; 1066 1067 /* 1068 * now we know we have a valid buf structure that we need to do I/O 1069 * on. 1070 */ 1071 1072 s = splbio(); /* protect the queues */ 1073 1074 bufq_put(xy->xyq, bp); /* XXX disksort_cylinder */ 1075 1076 /* start 'em up */ 1077 1078 xyc_start(xy->parent, NULL); 1079 1080 /* done! */ 1081 1082 splx(s); 1083 return; 1084 1085 done: 1086 /* tells upper layers we are done with this buf */ 1087 bp->b_resid = bp->b_bcount; 1088 biodone(bp); 1089 } 1090 /* 1091 * end of {b,c}devsw functions 1092 */ 1093 1094 /* 1095 * i n t e r r u p t f u n c t i o n 1096 * 1097 * xycintr: hardware interrupt. 1098 */ 1099 int 1100 xycintr(void *v) 1101 { 1102 struct xyc_softc *xycsc = v; 1103 1104 /* kick the event counter */ 1105 xycsc->sc_intrcnt.ev_count++; 1106 1107 /* remove as many done IOPBs as possible */ 1108 xyc_remove_iorq(xycsc); 1109 1110 /* start any iorq's already waiting */ 1111 xyc_start(xycsc, NULL); 1112 1113 return 1; 1114 } 1115 /* 1116 * end of interrupt function 1117 */ 1118 1119 /* 1120 * i n t e r n a l f u n c t i o n s 1121 */ 1122 1123 /* 1124 * xyc_rqinit: fill out the fields of an I/O request 1125 */ 1126 1127 inline void 1128 xyc_rqinit(struct xy_iorq *rq, struct xyc_softc *xyc, struct xy_softc *xy, 1129 int md, u_long blk, int cnt, void *db, struct buf *bp) 1130 { 1131 1132 rq->xyc = xyc; 1133 rq->xy = xy; 1134 rq->ttl = XYC_MAXTTL + 10; 1135 rq->mode = md; 1136 rq->tries = rq->errno = rq->lasterror = 0; 1137 rq->blockno = blk; 1138 rq->sectcnt = cnt; 1139 rq->dbuf = rq->dbufbase = db; 1140 rq->buf = bp; 1141 } 1142 1143 /* 1144 * xyc_rqtopb: load up an IOPB based on an iorq 1145 */ 1146 1147 void 1148 xyc_rqtopb(struct xy_iorq *iorq, struct xy_iopb *iopb, int cmd, int subfun) 1149 { 1150 u_long block, dp; 1151 1152 /* normal IOPB case, standard stuff */ 1153 1154 /* chain bit handled later */ 1155 iopb->ien = (XY_STATE(iorq->mode) == XY_SUB_POLL) ? 0 : 1; 1156 iopb->com = cmd; 1157 iopb->errno = 0; 1158 iopb->errs = 0; 1159 iopb->done = 0; 1160 if (iorq->xy) { 1161 iopb->unit = iorq->xy->xy_drive; 1162 iopb->dt = iorq->xy->drive_type; 1163 } else { 1164 iopb->unit = 0; 1165 iopb->dt = 0; 1166 } 1167 block = iorq->blockno; 1168 if (iorq->xy == NULL || block == 0) { 1169 iopb->sect = iopb->head = iopb->cyl = 0; 1170 } else { 1171 iopb->sect = block % iorq->xy->nsect; 1172 block = block / iorq->xy->nsect; 1173 iopb->head = block % iorq->xy->nhead; 1174 block = block / iorq->xy->nhead; 1175 iopb->cyl = block; 1176 } 1177 iopb->scnt = iorq->sectcnt; 1178 if (iorq->dbuf == NULL) { 1179 iopb->dataa = 0; 1180 iopb->datar = 0; 1181 } else { 1182 dp = dvma_kvtopa(iorq->dbuf, iorq->xyc->bustype); 1183 iopb->dataa = (dp & 0xffff); 1184 iopb->datar = ((dp & 0xff0000) >> 16); 1185 } 1186 iopb->subfn = subfun; 1187 } 1188 1189 1190 /* 1191 * xyc_unbusy: wait for the xyc to go unbusy, or timeout. 1192 */ 1193 1194 int 1195 xyc_unbusy(struct xyc *xyc, int del) 1196 { 1197 1198 while (del-- > 0) { 1199 if ((xyc->xyc_csr & XYC_GBSY) == 0) 1200 break; 1201 DELAY(1); 1202 } 1203 return del == 0 ? XY_ERR_FAIL : XY_ERR_AOK; 1204 } 1205 1206 /* 1207 * xyc_cmd: front end for POLL'd and WAIT'd commands. Returns 0 or error. 1208 * note that NORM requests are handled separately. 1209 */ 1210 int 1211 xyc_cmd(struct xyc_softc *xycsc, int cmd, int subfn, int unit, int block, 1212 int scnt, char *dptr, int fullmode) 1213 { 1214 struct xy_iorq *iorq = xycsc->ciorq; 1215 struct xy_iopb *iopb = xycsc->ciopb; 1216 int submode = XY_STATE(fullmode); 1217 1218 /* 1219 * is someone else using the control iopq wait for it if we can 1220 */ 1221 start: 1222 if (submode == XY_SUB_WAIT && XY_STATE(iorq->mode) != XY_SUB_FREE) { 1223 if (tsleep(iorq, PRIBIO, "xyc_cmd", 0)) 1224 return XY_ERR_FAIL; 1225 goto start; 1226 } 1227 1228 if (XY_STATE(iorq->mode) != XY_SUB_FREE) { 1229 DELAY(1000000); /* XY_SUB_POLL: steal the iorq */ 1230 iorq->mode = XY_SUB_FREE; 1231 printf("%s: stole control iopb\n", device_xname(xycsc->sc_dev)); 1232 } 1233 1234 /* init iorq/iopb */ 1235 1236 xyc_rqinit(iorq, xycsc, 1237 (unit == XYC_NOUNIT) ? NULL : xycsc->sc_drives[unit], 1238 fullmode, block, scnt, dptr, NULL); 1239 1240 /* load IOPB from iorq */ 1241 1242 xyc_rqtopb(iorq, iopb, cmd, subfn); 1243 1244 /* submit it for processing */ 1245 1246 xyc_submit_iorq(xycsc, iorq, fullmode); /* error code will be in iorq */ 1247 1248 return XY_ERR_AOK; 1249 } 1250 1251 /* 1252 * xyc_startbuf 1253 * start a buffer for running 1254 */ 1255 1256 int 1257 xyc_startbuf(struct xyc_softc *xycsc, struct xy_softc *xysc, struct buf *bp) 1258 { 1259 struct xy_iorq *iorq; 1260 struct xy_iopb *iopb; 1261 u_long block; 1262 void *dbuf; 1263 1264 iorq = xysc->xyrq; 1265 iopb = iorq->iopb; 1266 1267 /* get buf */ 1268 1269 if (bp == NULL) 1270 panic("%s null buf", __func__); 1271 1272 #ifdef XYC_DEBUG 1273 int partno = DISKPART(bp->b_dev); 1274 printf("%s: %s%c: %s block %d\n", __func__, device_xname(xysc->sc_dev), 1275 'a' + partno, (bp->b_flags & B_READ) ? "read" : "write", 1276 (int)bp->b_blkno); 1277 printf("xyc_startbuf: b_bcount %d, b_data 0x%x\n", 1278 bp->b_bcount, bp->b_data); 1279 #endif 1280 1281 /* 1282 * load request. 1283 * 1284 * also, note that there are two kinds of buf structures, those with 1285 * B_PHYS set and those without B_PHYS. if B_PHYS is set, then it is 1286 * a raw I/O (to a cdevsw) and we are doing I/O directly to the users' 1287 * buffer which has already been mapped into DVMA space. (Not on sun3) 1288 * However, if B_PHYS is not set, then the buffer is a normal system 1289 * buffer which does *not* live in DVMA space. In that case we call 1290 * dvma_mapin to map it into DVMA space so we can do the DMA to it. 1291 * 1292 * in cases where we do a dvma_mapin, note that iorq points to the 1293 * buffer as mapped into DVMA space, where as the bp->b_data points 1294 * to its non-DVMA mapping. 1295 * 1296 * XXX - On the sun3, B_PHYS does NOT mean the buffer is mapped 1297 * into dvma space, only that it was remapped into the kernel. 1298 * We ALWAYS have to remap the kernel buf into DVMA space. 1299 * (It is done inexpensively, using whole segments!) 1300 */ 1301 1302 block = bp->b_rawblkno; 1303 1304 dbuf = dvma_mapin(bp->b_data, bp->b_bcount, 0); 1305 if (dbuf == NULL) { /* out of DVMA space */ 1306 printf("%s: warning: out of DVMA space\n", 1307 device_xname(xycsc->sc_dev)); 1308 return XY_ERR_FAIL; /* XXX: need some sort of 1309 * call-back scheme here? */ 1310 } 1311 1312 /* init iorq and load iopb from it */ 1313 1314 xyc_rqinit(iorq, xycsc, xysc, XY_SUB_NORM | XY_MODE_VERBO, block, 1315 bp->b_bcount / XYFM_BPS, dbuf, bp); 1316 1317 xyc_rqtopb(iorq, iopb, (bp->b_flags & B_READ) ? XYCMD_RD : XYCMD_WR, 0); 1318 1319 /* Instrumentation. */ 1320 disk_busy(&xysc->sc_dk); 1321 1322 return XY_ERR_AOK; 1323 } 1324 1325 1326 /* 1327 * xyc_submit_iorq: submit an iorq for processing. returns XY_ERR_AOK 1328 * if ok. if it fail returns an error code. type is XY_SUB_*. 1329 * 1330 * note: caller frees iorq in all cases except NORM 1331 * 1332 * return value: 1333 * NORM: XY_AOK (req pending), XY_FAIL (couldn't submit request) 1334 * WAIT: XY_AOK (success), <error-code> (failed) 1335 * POLL: <same as WAIT> 1336 * NOQ : <same as NORM> 1337 * 1338 * there are three sources for i/o requests: 1339 * [1] xystrategy: normal block I/O, using "struct buf" system. 1340 * [2] autoconfig/crash dump: these are polled I/O requests, no interrupts. 1341 * [3] open/ioctl: these are I/O requests done in the context of a process, 1342 * and the process should block until they are done. 1343 * 1344 * software state is stored in the iorq structure. each iorq has an 1345 * iopb structure. the hardware understands the iopb structure. 1346 * every command must go through an iopb. a 450 handles one iopb at a 1347 * time, where as a 451 can take them in chains. [the 450 claims it 1348 * can handle chains, but is appears to be buggy...] iopb are allocated 1349 * in DVMA space at boot up time. each disk gets one iopb, and the 1350 * controller gets one (for POLL and WAIT commands). what happens if 1351 * the iopb is busy? for i/o type [1], the buffers are queued at the 1352 * "buff" layer and * picked up later by the interrupt routine. for case 1353 * [2] we can only be blocked if there is a WAIT type I/O request being 1354 * run. since this can only happen when we are crashing, we wait a sec 1355 * and then steal the IOPB. for case [3] the process can sleep 1356 * on the iorq free list until some iopbs are available. 1357 */ 1358 1359 int 1360 xyc_submit_iorq(struct xyc_softc *xycsc, struct xy_iorq *iorq, int type) 1361 { 1362 struct xy_iopb *iopb; 1363 u_long iopbaddr; 1364 1365 #ifdef XYC_DEBUG 1366 printf("%s(%s, addr=0x%x, type=%d)\n", __func__, 1367 device_xname(xycsc->sc_dev), iorq, type); 1368 #endif 1369 1370 /* first check and see if controller is busy */ 1371 if ((xycsc->xyc->xyc_csr & XYC_GBSY) != 0) { 1372 #ifdef XYC_DEBUG 1373 printf("%s: XYC not ready (BUSY)\n", __func__); 1374 #endif 1375 if (type == XY_SUB_NOQ) 1376 return XY_ERR_FAIL; /* failed */ 1377 switch (type) { 1378 case XY_SUB_NORM: 1379 return XY_ERR_AOK; /* success */ 1380 case XY_SUB_WAIT: 1381 while (iorq->iopb->done == 0) { 1382 (void)tsleep(iorq, PRIBIO, "xyciorq", 0); 1383 } 1384 return (iorq->errno); 1385 case XY_SUB_POLL: /* steal controller */ 1386 iopbaddr = xycsc->xyc->xyc_rsetup; /* RESET */ 1387 if (xyc_unbusy(xycsc->xyc, XYC_RESETUSEC) == 1388 XY_ERR_FAIL) 1389 panic("%s: stuck xyc", __func__); 1390 printf("%s: stole controller\n", 1391 device_xname(xycsc->sc_dev)); 1392 break; 1393 default: 1394 panic("%s adding", __func__); 1395 } 1396 } 1397 1398 iopb = xyc_chain(xycsc, iorq); /* build chain */ 1399 if (iopb == NULL) { /* nothing doing? */ 1400 if (type == XY_SUB_NORM || type == XY_SUB_NOQ) 1401 return XY_ERR_AOK; 1402 panic("xyc_submit_iorq: xyc_chain failed!"); 1403 } 1404 iopbaddr = dvma_kvtopa(iopb, xycsc->bustype); 1405 1406 XYC_GO(xycsc->xyc, iopbaddr); 1407 1408 /* command now running, wrap it up */ 1409 switch (type) { 1410 case XY_SUB_NORM: 1411 case XY_SUB_NOQ: 1412 return XY_ERR_AOK; /* success */ 1413 case XY_SUB_WAIT: 1414 while (iorq->iopb->done == 0) { 1415 (void)tsleep(iorq, PRIBIO, "xyciorq", 0); 1416 } 1417 return iorq->errno; 1418 case XY_SUB_POLL: 1419 return xyc_piodriver(xycsc, iorq); 1420 default: 1421 panic("%s wrap up", __func__); 1422 } 1423 panic("%s impossible", __func__); 1424 return 0; /* not reached */ 1425 } 1426 1427 1428 /* 1429 * xyc_chain: build a chain. return dvma address of first element in 1430 * the chain. iorq != NULL: means we only want that item on the chain. 1431 */ 1432 1433 struct xy_iopb * 1434 xyc_chain(struct xyc_softc *xycsc, struct xy_iorq *iorq) 1435 { 1436 int togo, chain, hand; 1437 struct xy_iopb *iopb, *prev_iopb; 1438 1439 memset(xycsc->xy_chain, 0, sizeof(xycsc->xy_chain)); 1440 1441 /* 1442 * promote control IOPB to the top 1443 */ 1444 if (iorq == NULL) { 1445 if ((XY_STATE(xycsc->reqs[XYC_CTLIOPB].mode) == XY_SUB_POLL || 1446 XY_STATE(xycsc->reqs[XYC_CTLIOPB].mode) == XY_SUB_WAIT) && 1447 xycsc->iopbase[XYC_CTLIOPB].done == 0) 1448 iorq = &xycsc->reqs[XYC_CTLIOPB]; 1449 } 1450 1451 /* 1452 * special case: if iorq != NULL then we have a POLL or WAIT request. 1453 * we let these take priority and do them first. 1454 */ 1455 if (iorq) { 1456 xycsc->xy_chain[0] = iorq; 1457 iorq->iopb->chen = 0; 1458 return iorq->iopb; 1459 } 1460 1461 /* 1462 * NORM case: do round robin and maybe chain (if allowed and possible) 1463 */ 1464 1465 chain = 0; 1466 hand = xycsc->xy_hand; 1467 xycsc->xy_hand = (xycsc->xy_hand + 1) % XYC_MAXIOPB; 1468 1469 for (togo = XYC_MAXIOPB ; togo > 0 ; 1470 togo--, hand = (hand + 1) % XYC_MAXIOPB) { 1471 1472 if (XY_STATE(xycsc->reqs[hand].mode) != XY_SUB_NORM || 1473 xycsc->iopbase[hand].done) 1474 continue; /* not ready-for-i/o */ 1475 1476 xycsc->xy_chain[chain] = &xycsc->reqs[hand]; 1477 iopb = xycsc->xy_chain[chain]->iopb; 1478 iopb->chen = 0; 1479 if (chain != 0) { /* adding a link to a chain? */ 1480 prev_iopb = xycsc->xy_chain[chain-1]->iopb; 1481 prev_iopb->chen = 1; 1482 prev_iopb->nxtiopb = 0xffff & 1483 dvma_kvtopa(iopb, xycsc->bustype); 1484 } else { /* head of chain */ 1485 iorq = xycsc->xy_chain[chain]; 1486 } 1487 chain++; 1488 if (xycsc->no_ols) 1489 break; /* quit if chaining dis-allowed */ 1490 } 1491 return iorq ? iorq->iopb : NULL; 1492 } 1493 1494 /* 1495 * xyc_piodriver 1496 * 1497 * programmed i/o driver. this function takes over the computer 1498 * and drains off the polled i/o request. it returns the status of the iorq 1499 * the caller is interesting in. 1500 */ 1501 int 1502 xyc_piodriver(struct xyc_softc *xycsc, struct xy_iorq *iorq) 1503 { 1504 int nreset = 0; 1505 int retval = 0; 1506 u_long res; 1507 1508 #ifdef XYC_DEBUG 1509 printf("%s(%s, 0x%x)\n", __func__, device_xname(xycsc->sc_dev), iorq); 1510 #endif 1511 1512 while (iorq->iopb->done == 0) { 1513 1514 res = xyc_unbusy(xycsc->xyc, XYC_MAXTIME); 1515 1516 /* we expect some progress soon */ 1517 if (res == XY_ERR_FAIL && nreset >= 2) { 1518 xyc_reset(xycsc, 0, XY_RSET_ALL, XY_ERR_FAIL, 0); 1519 #ifdef XYC_DEBUG 1520 printf("%s: timeout\n", __func__); 1521 #endif 1522 return XY_ERR_FAIL; 1523 } 1524 if (res == XY_ERR_FAIL) { 1525 if (xyc_reset(xycsc, 0, 1526 (nreset++ == 0) ? XY_RSET_NONE : iorq, 1527 XY_ERR_FAIL, 0) == XY_ERR_FAIL) 1528 return XY_ERR_FAIL; /* flushes all but POLL 1529 * requests, resets */ 1530 continue; 1531 } 1532 1533 xyc_remove_iorq(xycsc); /* may resubmit request */ 1534 1535 if (iorq->iopb->done == 0) 1536 xyc_start(xycsc, iorq); 1537 } 1538 1539 /* get return value */ 1540 1541 retval = iorq->errno; 1542 1543 #ifdef XYC_DEBUG 1544 printf("%s: done, retval = 0x%x (%s)\n", __func__, 1545 iorq->errno, xyc_e2str(iorq->errno)); 1546 #endif 1547 1548 /* start up any bufs that have queued */ 1549 1550 xyc_start(xycsc, NULL); 1551 1552 return retval; 1553 } 1554 1555 /* 1556 * xyc_xyreset: reset one drive. NOTE: assumes xyc was just reset. 1557 * we steal iopb[XYC_CTLIOPB] for this, but we put it back when we are done. 1558 */ 1559 void 1560 xyc_xyreset(struct xyc_softc *xycsc, struct xy_softc *xysc) 1561 { 1562 struct xy_iopb tmpiopb; 1563 u_long addr; 1564 int del; 1565 memcpy(&tmpiopb, xycsc->ciopb, sizeof(tmpiopb)); 1566 xycsc->ciopb->chen = xycsc->ciopb->done = xycsc->ciopb->errs = 0; 1567 xycsc->ciopb->ien = 0; 1568 xycsc->ciopb->com = XYCMD_RST; 1569 xycsc->ciopb->unit = xysc->xy_drive; 1570 addr = dvma_kvtopa(xycsc->ciopb, xycsc->bustype); 1571 1572 XYC_GO(xycsc->xyc, addr); 1573 1574 del = XYC_RESETUSEC; 1575 while (del > 0) { 1576 if ((xycsc->xyc->xyc_csr & XYC_GBSY) == 0) 1577 break; 1578 DELAY(1); 1579 del--; 1580 } 1581 1582 if (del <= 0 || xycsc->ciopb->errs) { 1583 printf("%s: off-line: %s\n", device_xname(xycsc->sc_dev), 1584 xyc_e2str(xycsc->ciopb->errno)); 1585 del = xycsc->xyc->xyc_rsetup; 1586 if (xyc_unbusy(xycsc->xyc, XYC_RESETUSEC) == XY_ERR_FAIL) 1587 panic("%s", __func__); 1588 } else { 1589 xycsc->xyc->xyc_csr = XYC_IPND; /* clear IPND */ 1590 } 1591 memcpy(xycsc->ciopb, &tmpiopb, sizeof(tmpiopb)); 1592 } 1593 1594 1595 /* 1596 * xyc_reset: reset everything: requests are marked as errors except 1597 * a polled request (which is resubmitted) 1598 */ 1599 int 1600 xyc_reset(struct xyc_softc *xycsc, int quiet, struct xy_iorq *blastmode, 1601 int error, struct xy_softc *xysc) 1602 { 1603 int del = 0, lcv, retval = XY_ERR_AOK; 1604 struct xy_iorq *iorq; 1605 1606 /* soft reset hardware */ 1607 1608 if (quiet == 0) 1609 printf("%s: soft reset\n", device_xname(xycsc->sc_dev)); 1610 del = xycsc->xyc->xyc_rsetup; 1611 del = xyc_unbusy(xycsc->xyc, XYC_RESETUSEC); 1612 if (del == XY_ERR_FAIL) { 1613 blastmode = XY_RSET_ALL; /* dead, flush all requests */ 1614 retval = XY_ERR_FAIL; 1615 } 1616 if (xysc) 1617 xyc_xyreset(xycsc, xysc); 1618 1619 /* fix queues based on "blast-mode" */ 1620 1621 for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) { 1622 iorq = &xycsc->reqs[lcv]; 1623 1624 if (XY_STATE(iorq->mode) != XY_SUB_POLL && 1625 XY_STATE(iorq->mode) != XY_SUB_WAIT && 1626 XY_STATE(iorq->mode) != XY_SUB_NORM) 1627 /* is it active? */ 1628 continue; 1629 1630 if (blastmode == XY_RSET_ALL || 1631 blastmode != iorq) { 1632 /* failed */ 1633 iorq->errno = error; 1634 xycsc->iopbase[lcv].done = xycsc->iopbase[lcv].errs = 1; 1635 switch (XY_STATE(iorq->mode)) { 1636 case XY_SUB_NORM: 1637 iorq->buf->b_error = EIO; 1638 iorq->buf->b_resid = iorq->sectcnt * XYFM_BPS; 1639 /* Sun3: map/unmap regardless of B_PHYS */ 1640 dvma_mapout(iorq->dbufbase, 1641 iorq->buf->b_bcount); 1642 (void)bufq_get(iorq->xy->xyq); 1643 disk_unbusy(&iorq->xy->sc_dk, 1644 (iorq->buf->b_bcount - iorq->buf->b_resid), 1645 (iorq->buf->b_flags & B_READ)); 1646 biodone(iorq->buf); 1647 iorq->mode = XY_SUB_FREE; 1648 break; 1649 case XY_SUB_WAIT: 1650 wakeup(iorq); 1651 case XY_SUB_POLL: 1652 iorq->mode = 1653 XY_NEWSTATE(iorq->mode, XY_SUB_DONE); 1654 break; 1655 } 1656 1657 } else { 1658 1659 /* resubmit, no need to do anything here */ 1660 } 1661 } 1662 1663 /* 1664 * now, if stuff is waiting, start it. 1665 * since we just reset it should go 1666 */ 1667 xyc_start(xycsc, NULL); 1668 1669 return retval; 1670 } 1671 1672 /* 1673 * xyc_start: start waiting buffers 1674 */ 1675 1676 void 1677 xyc_start(struct xyc_softc *xycsc, struct xy_iorq *iorq) 1678 { 1679 int lcv; 1680 struct xy_softc *xy; 1681 1682 if (iorq == NULL) { 1683 for (lcv = 0; lcv < XYC_MAXDEV ; lcv++) { 1684 if ((xy = xycsc->sc_drives[lcv]) == NULL) 1685 continue; 1686 if (bufq_peek(xy->xyq) == NULL) 1687 continue; 1688 if (xy->xyrq->mode != XY_SUB_FREE) 1689 continue; 1690 xyc_startbuf(xycsc, xy, bufq_peek(xy->xyq)); 1691 } 1692 } 1693 xyc_submit_iorq(xycsc, iorq, XY_SUB_NOQ); 1694 } 1695 1696 /* 1697 * xyc_remove_iorq: remove "done" IOPB's. 1698 */ 1699 1700 int 1701 xyc_remove_iorq(struct xyc_softc *xycsc) 1702 { 1703 int errno, rq, comm, errs; 1704 struct xyc *xyc = xycsc->xyc; 1705 u_long addr; 1706 struct xy_iopb *iopb; 1707 struct xy_iorq *iorq; 1708 struct buf *bp; 1709 1710 if (xyc->xyc_csr & XYC_DERR) { 1711 /* 1712 * DOUBLE ERROR: should never happen under normal use. This 1713 * error is so bad, you can't even tell which IOPB is bad, so 1714 * we dump them all. 1715 */ 1716 errno = XY_ERR_DERR; 1717 printf("%s: DOUBLE ERROR!\n", device_xname(xycsc->sc_dev)); 1718 if (xyc_reset(xycsc, 0, XY_RSET_ALL, errno, 0) != XY_ERR_AOK) { 1719 printf("%s: soft reset failed!\n", 1720 device_xname(xycsc->sc_dev)); 1721 panic("%s: controller DEAD", __func__); 1722 } 1723 return XY_ERR_AOK; 1724 } 1725 1726 /* 1727 * get iopb that is done, loop down the chain 1728 */ 1729 1730 if (xyc->xyc_csr & XYC_ERR) { 1731 xyc->xyc_csr = XYC_ERR; /* clear error condition */ 1732 } 1733 if (xyc->xyc_csr & XYC_IPND) { 1734 xyc->xyc_csr = XYC_IPND; /* clear interrupt */ 1735 } 1736 1737 for (rq = 0; rq < XYC_MAXIOPB; rq++) { 1738 iorq = xycsc->xy_chain[rq]; 1739 if (iorq == NULL) break; /* done ! */ 1740 if (iorq->mode == 0 || XY_STATE(iorq->mode) == XY_SUB_DONE) 1741 continue; /* free, or done */ 1742 iopb = iorq->iopb; 1743 if (iopb->done == 0) 1744 continue; /* not done yet */ 1745 1746 comm = iopb->com; 1747 errs = iopb->errs; 1748 1749 if (errs) 1750 iorq->errno = iopb->errno; 1751 else 1752 iorq->errno = 0; 1753 1754 /* handle non-fatal errors */ 1755 1756 if (errs && 1757 xyc_error(xycsc, iorq, iopb, comm) == XY_ERR_AOK) 1758 continue; /* AOK: we resubmitted it */ 1759 1760 1761 /* this iorq is now done (hasn't been restarted or anything) */ 1762 1763 if ((iorq->mode & XY_MODE_VERBO) && iorq->lasterror) 1764 xyc_perror(iorq, iopb, 0); 1765 1766 /* now, if read/write check to make sure we got all the data 1767 * we needed. (this may not be the case if we got an error in 1768 * the middle of a multisector request). */ 1769 1770 if ((iorq->mode & XY_MODE_B144) != 0 && errs == 0 && 1771 (comm == XYCMD_RD || comm == XYCMD_WR)) { 1772 /* we just successfully processed a bad144 sector 1773 * note: if we are in bad 144 mode, the pointers have 1774 * been advanced already (see above) and are pointing 1775 * at the bad144 sector. to exit bad144 mode, we 1776 * must advance the pointers 1 sector and issue a new 1777 * request if there are still sectors left to process 1778 * 1779 */ 1780 XYC_ADVANCE(iorq, 1); /* advance 1 sector */ 1781 1782 /* exit b144 mode */ 1783 iorq->mode = iorq->mode & (~XY_MODE_B144); 1784 1785 if (iorq->sectcnt) { /* more to go! */ 1786 iorq->lasterror = iorq->errno = iopb->errno = 0; 1787 iopb->errs = iopb->done = 0; 1788 iorq->tries = 0; 1789 iopb->scnt = iorq->sectcnt; 1790 iopb->cyl = 1791 iorq->blockno / iorq->xy->sectpercyl; 1792 iopb->head = 1793 (iorq->blockno / iorq->xy->nhead) % 1794 iorq->xy->nhead; 1795 iopb->sect = iorq->blockno % XYFM_BPS; 1796 addr = dvma_kvtopa(iorq->dbuf, xycsc->bustype); 1797 iopb->dataa = (addr & 0xffff); 1798 iopb->datar = ((addr & 0xff0000) >> 16); 1799 /* will resubit at end */ 1800 continue; 1801 } 1802 } 1803 /* final cleanup, totally done with this request */ 1804 1805 switch (XY_STATE(iorq->mode)) { 1806 case XY_SUB_NORM: 1807 bp = iorq->buf; 1808 if (errs) { 1809 bp->b_error = EIO; 1810 bp->b_resid = iorq->sectcnt * XYFM_BPS; 1811 } else { 1812 bp->b_resid = 0; /* done */ 1813 } 1814 /* Sun3: map/unmap regardless of B_PHYS */ 1815 dvma_mapout(iorq->dbufbase, iorq->buf->b_bcount); 1816 (void)bufq_get(iorq->xy->xyq); 1817 disk_unbusy(&iorq->xy->sc_dk, 1818 (bp->b_bcount - bp->b_resid), 1819 (bp->b_flags & B_READ)); 1820 iorq->mode = XY_SUB_FREE; 1821 biodone(bp); 1822 break; 1823 case XY_SUB_WAIT: 1824 iorq->mode = XY_NEWSTATE(iorq->mode, XY_SUB_DONE); 1825 wakeup(iorq); 1826 break; 1827 case XY_SUB_POLL: 1828 iorq->mode = XY_NEWSTATE(iorq->mode, XY_SUB_DONE); 1829 break; 1830 } 1831 } 1832 1833 return XY_ERR_AOK; 1834 } 1835 1836 /* 1837 * xyc_perror: print error. 1838 * - if still_trying is true: we got an error, retried and got a 1839 * different error. in that case lasterror is the old error, 1840 * and errno is the new one. 1841 * - if still_trying is not true, then if we ever had an error it 1842 * is in lasterror. also, if iorq->errno == 0, then we recovered 1843 * from that error (otherwise iorq->errno == iorq->lasterror). 1844 */ 1845 void 1846 xyc_perror(struct xy_iorq *iorq, struct xy_iopb *iopb, int still_trying) 1847 { 1848 int error = iorq->lasterror; 1849 1850 printf("%s", (iorq->xy) ? device_xname(iorq->xy->sc_dev) 1851 : device_xname(iorq->xyc->sc_dev)); 1852 if (iorq->buf) 1853 printf("%c: ", 'a' + (char)DISKPART(iorq->buf->b_dev)); 1854 if (iopb->com == XYCMD_RD || iopb->com == XYCMD_WR) 1855 printf("%s %d/%d/%d: ", 1856 (iopb->com == XYCMD_RD) ? "read" : "write", 1857 iopb->cyl, iopb->head, iopb->sect); 1858 printf("%s", xyc_e2str(error)); 1859 1860 if (still_trying) 1861 printf(" [still trying, new error=%s]", xyc_e2str(iorq->errno)); 1862 else 1863 if (iorq->errno == 0) 1864 printf(" [recovered in %d tries]", iorq->tries); 1865 1866 printf("\n"); 1867 } 1868 1869 /* 1870 * xyc_error: non-fatal error encountered... recover. 1871 * return AOK if resubmitted, return FAIL if this iopb is done 1872 */ 1873 int 1874 xyc_error(struct xyc_softc *xycsc, struct xy_iorq *iorq, struct xy_iopb *iopb, 1875 int comm) 1876 { 1877 int errno = iorq->errno; 1878 int erract = xyc_entoact(errno); 1879 int oldmode, advance, i; 1880 1881 if (erract == XY_ERA_RSET) { /* some errors require a reset */ 1882 oldmode = iorq->mode; 1883 iorq->mode = XY_SUB_DONE | (~XY_SUB_MASK & oldmode); 1884 /* make xyc_start ignore us */ 1885 xyc_reset(xycsc, 1, XY_RSET_NONE, errno, iorq->xy); 1886 iorq->mode = oldmode; 1887 } 1888 /* check for read/write to a sector in bad144 table if bad: redirect 1889 * request to bad144 area */ 1890 1891 if ((comm == XYCMD_RD || comm == XYCMD_WR) && 1892 (iorq->mode & XY_MODE_B144) == 0) { 1893 advance = iorq->sectcnt - iopb->scnt; 1894 XYC_ADVANCE(iorq, advance); 1895 if ((i = isbad(&iorq->xy->dkb, 1896 iorq->blockno / iorq->xy->sectpercyl, 1897 (iorq->blockno / iorq->xy->nsect) % iorq->xy->nhead, 1898 iorq->blockno % iorq->xy->nsect)) != -1) { 1899 iorq->mode |= XY_MODE_B144; /* enter bad144 mode & 1900 * redirect */ 1901 iopb->errno = iopb->done = iopb->errs = 0; 1902 iopb->scnt = 1; 1903 iopb->cyl = (iorq->xy->ncyl + iorq->xy->acyl) - 2; 1904 /* second to last acyl */ 1905 i = iorq->xy->sectpercyl - 1 - i; /* follow bad144 1906 * standard */ 1907 iopb->head = i / iorq->xy->nhead; 1908 iopb->sect = i % iorq->xy->nhead; 1909 /* will resubmit when we come out of remove_iorq */ 1910 return XY_ERR_AOK; /* recovered! */ 1911 } 1912 } 1913 1914 /* 1915 * it isn't a bad144 sector, must be real error! see if we can retry 1916 * it? 1917 */ 1918 if ((iorq->mode & XY_MODE_VERBO) && iorq->lasterror) 1919 xyc_perror(iorq, iopb, 1); /* inform of error state 1920 * change */ 1921 iorq->lasterror = errno; 1922 1923 if ((erract == XY_ERA_RSET || erract == XY_ERA_HARD) 1924 && iorq->tries < XYC_MAXTRIES) { /* retry? */ 1925 iorq->tries++; 1926 iorq->errno = iopb->errno = iopb->done = iopb->errs = 0; 1927 /* will resubmit at end of remove_iorq */ 1928 return XY_ERR_AOK; /* recovered! */ 1929 } 1930 1931 /* failed to recover from this error */ 1932 return XY_ERR_FAIL; 1933 } 1934 1935 /* 1936 * xyc_tick: make sure xy is still alive and ticking (err, kicking). 1937 */ 1938 void 1939 xyc_tick(void *arg) 1940 { 1941 struct xyc_softc *xycsc = arg; 1942 int lcv, s, reset = 0; 1943 1944 /* reduce ttl for each request if one goes to zero, reset xyc */ 1945 s = splbio(); 1946 for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) { 1947 if (xycsc->reqs[lcv].mode == 0 || 1948 XY_STATE(xycsc->reqs[lcv].mode) == XY_SUB_DONE) 1949 continue; 1950 xycsc->reqs[lcv].ttl--; 1951 if (xycsc->reqs[lcv].ttl == 0) 1952 reset = 1; 1953 } 1954 if (reset) { 1955 printf("%s: watchdog timeout\n", device_xname(xycsc->sc_dev)); 1956 xyc_reset(xycsc, 0, XY_RSET_NONE, XY_ERR_FAIL, NULL); 1957 } 1958 splx(s); 1959 1960 /* until next time */ 1961 1962 callout_reset(&xycsc->sc_tick_ch, XYC_TICKCNT, xyc_tick, xycsc); 1963 } 1964 1965 /* 1966 * xyc_ioctlcmd: this function provides a user level interface to the 1967 * controller via ioctl. this allows "format" programs to be written 1968 * in user code, and is also useful for some debugging. we return 1969 * an error code. called at user priority. 1970 * 1971 * XXX missing a few commands (see the 7053 driver for ideas) 1972 */ 1973 int 1974 xyc_ioctlcmd(struct xy_softc *xy, dev_t dev, struct xd_iocmd *xio) 1975 { 1976 int s, err, rqno; 1977 void *dvmabuf = NULL; 1978 struct xyc_softc *xycsc; 1979 1980 /* check sanity of requested command */ 1981 1982 switch (xio->cmd) { 1983 1984 case XYCMD_NOP: /* no op: everything should be zero */ 1985 if (xio->subfn || xio->dptr || xio->dlen || 1986 xio->block || xio->sectcnt) 1987 return EINVAL; 1988 break; 1989 1990 case XYCMD_RD: /* read / write sectors (up to XD_IOCMD_MAXS) */ 1991 case XYCMD_WR: 1992 if (xio->subfn || xio->sectcnt > XD_IOCMD_MAXS || 1993 xio->sectcnt * XYFM_BPS != xio->dlen || xio->dptr == NULL) 1994 return EINVAL; 1995 break; 1996 1997 case XYCMD_SK: /* seek: doesn't seem useful to export this */ 1998 return EINVAL; 1999 break; 2000 2001 default: 2002 return EINVAL;/* ??? */ 2003 } 2004 2005 /* create DVMA buffer for request if needed */ 2006 2007 if (xio->dlen) { 2008 dvmabuf = dvma_malloc(xio->dlen); 2009 if (xio->cmd == XYCMD_WR) { 2010 err = copyin(xio->dptr, dvmabuf, xio->dlen); 2011 if (err) { 2012 dvma_free(dvmabuf, xio->dlen); 2013 return err; 2014 } 2015 } 2016 } 2017 /* do it! */ 2018 2019 err = 0; 2020 xycsc = xy->parent; 2021 s = splbio(); 2022 rqno = xyc_cmd(xycsc, xio->cmd, xio->subfn, xy->xy_drive, xio->block, 2023 xio->sectcnt, dvmabuf, XY_SUB_WAIT); 2024 if (rqno == XY_ERR_FAIL) { 2025 err = EIO; 2026 goto done; 2027 } 2028 xio->errno = xycsc->ciorq->errno; 2029 xio->tries = xycsc->ciorq->tries; 2030 XYC_DONE(xycsc, err); 2031 2032 if (xio->cmd == XYCMD_RD) 2033 err = copyout(dvmabuf, xio->dptr, xio->dlen); 2034 2035 done: 2036 splx(s); 2037 if (dvmabuf) 2038 dvma_free(dvmabuf, xio->dlen); 2039 return err; 2040 } 2041 2042 /* 2043 * xyc_e2str: convert error code number into an error string 2044 */ 2045 const char * 2046 xyc_e2str(int no) 2047 { 2048 switch (no) { 2049 case XY_ERR_FAIL: 2050 return "Software fatal error"; 2051 case XY_ERR_DERR: 2052 return "DOUBLE ERROR"; 2053 case XY_ERR_AOK: 2054 return "Successful completion"; 2055 case XY_ERR_IPEN: 2056 return "Interrupt pending"; 2057 case XY_ERR_BCFL: 2058 return "Busy conflict"; 2059 case XY_ERR_TIMO: 2060 return "Operation timeout"; 2061 case XY_ERR_NHDR: 2062 return "Header not found"; 2063 case XY_ERR_HARD: 2064 return "Hard ECC error"; 2065 case XY_ERR_ICYL: 2066 return "Illegal cylinder address"; 2067 case XY_ERR_ISEC: 2068 return "Illegal sector address"; 2069 case XY_ERR_SMAL: 2070 return "Last sector too small"; 2071 case XY_ERR_SACK: 2072 return "Slave ACK error (non-existent memory)"; 2073 case XY_ERR_CHER: 2074 return "Cylinder and head/header error"; 2075 case XY_ERR_SRTR: 2076 return "Auto-seek retry successful"; 2077 case XY_ERR_WPRO: 2078 return "Write-protect error"; 2079 case XY_ERR_UIMP: 2080 return "Unimplemented command"; 2081 case XY_ERR_DNRY: 2082 return "Drive not ready"; 2083 case XY_ERR_SZER: 2084 return "Sector count zero"; 2085 case XY_ERR_DFLT: 2086 return "Drive faulted"; 2087 case XY_ERR_ISSZ: 2088 return "Illegal sector size"; 2089 case XY_ERR_SLTA: 2090 return "Self test A"; 2091 case XY_ERR_SLTB: 2092 return "Self test B"; 2093 case XY_ERR_SLTC: 2094 return "Self test C"; 2095 case XY_ERR_SOFT: 2096 return "Soft ECC error"; 2097 case XY_ERR_SFOK: 2098 return "Soft ECC error recovered"; 2099 case XY_ERR_IHED: 2100 return "Illegal head"; 2101 case XY_ERR_DSEQ: 2102 return "Disk sequencer error"; 2103 case XY_ERR_SEEK: 2104 return "Seek error"; 2105 default: 2106 return "Unknown error"; 2107 } 2108 } 2109 2110 int 2111 xyc_entoact(int errno) 2112 { 2113 2114 switch (errno) { 2115 case XY_ERR_FAIL: 2116 case XY_ERR_DERR: 2117 case XY_ERR_IPEN: 2118 case XY_ERR_BCFL: 2119 case XY_ERR_ICYL: 2120 case XY_ERR_ISEC: 2121 case XY_ERR_UIMP: 2122 case XY_ERR_SZER: 2123 case XY_ERR_ISSZ: 2124 case XY_ERR_SLTA: 2125 case XY_ERR_SLTB: 2126 case XY_ERR_SLTC: 2127 case XY_ERR_IHED: 2128 case XY_ERR_SACK: 2129 case XY_ERR_SMAL: 2130 return XY_ERA_PROG; /* program error ! */ 2131 2132 case XY_ERR_TIMO: 2133 case XY_ERR_NHDR: 2134 case XY_ERR_HARD: 2135 case XY_ERR_DNRY: 2136 case XY_ERR_CHER: 2137 case XY_ERR_SEEK: 2138 case XY_ERR_SOFT: 2139 return XY_ERA_HARD; /* hard error, retry */ 2140 2141 case XY_ERR_DFLT: 2142 case XY_ERR_DSEQ: 2143 return XY_ERA_RSET; /* hard error reset */ 2144 2145 case XY_ERR_SRTR: 2146 case XY_ERR_SFOK: 2147 case XY_ERR_AOK: 2148 return XY_ERA_SOFT; /* an FYI error */ 2149 2150 case XY_ERR_WPRO: 2151 return XY_ERA_WPRO; /* write protect */ 2152 } 2153 2154 return XY_ERA_PROG; /* ??? */ 2155 } 2156