1 /* $NetBSD: xy.c,v 1.78 2015/04/26 15:15:19 mlelstv Exp $ */ 2 3 /* 4 * Copyright (c) 1995 Charles D. Cranor 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * 30 * x y . c x y l o g i c s 4 5 0 / 4 5 1 s m d d r i v e r 31 * 32 * author: Chuck Cranor <chuck@netbsd> 33 * id: &Id: xy.c,v 1.1 1995/09/25 20:35:14 chuck Exp & 34 * started: 14-Sep-95 35 * references: [1] Xylogics Model 753 User's Manual 36 * part number: 166-753-001, Revision B, May 21, 1988. 37 * "Your Partner For Performance" 38 * [2] other NetBSD disk device drivers 39 * [3] Xylogics Model 450 User's Manual 40 * part number: 166-017-001, Revision B, 1983. 41 * [4] Addendum to Xylogics Model 450 Disk Controller User's 42 * Manual, Jan. 1985. 43 * [5] The 451 Controller, Rev. B3, September 2, 1986. 44 * [6] David Jones <dej@achilles.net>'s unfinished 450/451 driver 45 * 46 */ 47 48 #include <sys/cdefs.h> 49 __KERNEL_RCSID(0, "$NetBSD: xy.c,v 1.78 2015/04/26 15:15:19 mlelstv Exp $"); 50 51 #undef XYC_DEBUG /* full debug */ 52 #undef XYC_DIAG /* extra sanity checks */ 53 #if defined(DIAGNOSTIC) && !defined(XYC_DIAG) 54 #define XYC_DIAG /* link in with master DIAG option */ 55 #endif 56 57 #include <sys/param.h> 58 #include <sys/proc.h> 59 #include <sys/systm.h> 60 #include <sys/kernel.h> 61 #include <sys/file.h> 62 #include <sys/stat.h> 63 #include <sys/ioctl.h> 64 #include <sys/buf.h> 65 #include <sys/bufq.h> 66 #include <sys/uio.h> 67 #include <sys/malloc.h> 68 #include <sys/device.h> 69 #include <sys/disklabel.h> 70 #include <sys/disk.h> 71 #include <sys/syslog.h> 72 #include <sys/dkbad.h> 73 #include <sys/conf.h> 74 #include <sys/kauth.h> 75 76 #include <uvm/uvm_extern.h> 77 78 #include <dev/sun/disklabel.h> 79 80 #include <machine/autoconf.h> 81 #include <machine/dvma.h> 82 83 #include <sun3/dev/xyreg.h> 84 #include <sun3/dev/xyvar.h> 85 #include <sun3/dev/xio.h> 86 87 #include "ioconf.h" 88 #include "locators.h" 89 90 /* 91 * Print a complaint when no xy children were specified 92 * in the config file. Better than a link error... 93 * 94 * XXX: Some folks say this driver should be split in two, 95 * but that seems pointless with ONLY one type of child. 96 */ 97 #include "xy.h" 98 #if NXY == 0 99 #error "xyc but no xy?" 100 #endif 101 102 /* 103 * macros 104 */ 105 106 /* 107 * XYC_GO: start iopb ADDR (DVMA addr in a u_long) on XYC 108 */ 109 #define XYC_GO(XYC, ADDR) \ 110 do { \ 111 (XYC)->xyc_addr_lo = ((ADDR) & 0xff); \ 112 (ADDR) = ((ADDR) >> 8); \ 113 (XYC)->xyc_addr_hi = ((ADDR) & 0xff); \ 114 (ADDR) = ((ADDR) >> 8); \ 115 (XYC)->xyc_reloc_lo = ((ADDR) & 0xff); \ 116 (ADDR) = ((ADDR) >> 8); \ 117 (XYC)->xyc_reloc_hi = (ADDR); \ 118 (XYC)->xyc_csr = XYC_GBSY; /* go! */ \ 119 } while (/* CONSTCOND */ 0) 120 121 /* 122 * XYC_DONE: don't need IORQ, get error code and free (done after xyc_cmd) 123 */ 124 125 #define XYC_DONE(SC,ER) \ 126 do { \ 127 if ((ER) == XY_ERR_AOK) { \ 128 (ER) = (SC)->ciorq->errno; \ 129 (SC)->ciorq->mode = XY_SUB_FREE; \ 130 wakeup((SC)->ciorq); \ 131 } \ 132 } while (/* CONSTCOND */ 0) 133 134 /* 135 * XYC_ADVANCE: advance iorq's pointers by a number of sectors 136 */ 137 138 #define XYC_ADVANCE(IORQ, N) \ 139 do { \ 140 if (N) { \ 141 (IORQ)->sectcnt -= (N); \ 142 (IORQ)->blockno += (N); \ 143 (IORQ)->dbuf += ((N) * XYFM_BPS); \ 144 } \ 145 } while (/* CONSTCOND */ 0) 146 147 /* 148 * note - addresses you can sleep on: 149 * [1] & of xy_softc's "state" (waiting for a chance to attach a drive) 150 * [2] & an iorq (waiting for an XY_SUB_WAIT iorq to finish) 151 */ 152 153 154 /* 155 * function prototypes 156 * "xyc_*" functions are internal, all others are external interfaces 157 */ 158 159 /* internals */ 160 struct xy_iopb *xyc_chain(struct xyc_softc *, struct xy_iorq *); 161 int xyc_cmd(struct xyc_softc *, int, int, int, int, int, char *, int); 162 const char *xyc_e2str(int); 163 int xyc_entoact(int); 164 int xyc_error(struct xyc_softc *, struct xy_iorq *, struct xy_iopb *, int); 165 int xyc_ioctlcmd(struct xy_softc *, dev_t dev, struct xd_iocmd *); 166 void xyc_perror(struct xy_iorq *, struct xy_iopb *, int); 167 int xyc_piodriver(struct xyc_softc *, struct xy_iorq *); 168 int xyc_remove_iorq(struct xyc_softc *); 169 int xyc_reset(struct xyc_softc *, int, struct xy_iorq *, int, 170 struct xy_softc *); 171 inline void xyc_rqinit(struct xy_iorq *, struct xyc_softc *, struct xy_softc *, 172 int, u_long, int, void *, struct buf *); 173 void xyc_rqtopb(struct xy_iorq *, struct xy_iopb *, int, int); 174 void xyc_start(struct xyc_softc *, struct xy_iorq *); 175 int xyc_startbuf(struct xyc_softc *, struct xy_softc *, struct buf *); 176 int xyc_submit_iorq(struct xyc_softc *, struct xy_iorq *, int); 177 void xyc_tick(void *); 178 int xyc_unbusy(struct xyc *, int); 179 void xyc_xyreset(struct xyc_softc *, struct xy_softc *); 180 181 /* machine interrupt hook */ 182 int xycintr(void *); 183 184 /* autoconf */ 185 static int xycmatch(device_t, cfdata_t, void *); 186 static void xycattach(device_t, device_t, void *); 187 static int xyc_print(void *, const char *); 188 189 static int xymatch(device_t, cfdata_t, void *); 190 static void xyattach(device_t, device_t, void *); 191 static void xy_init(struct xy_softc *); 192 193 static void xydummystrat(struct buf *); 194 int xygetdisklabel(struct xy_softc *, void *); 195 196 /* 197 * cfattach's: device driver interface to autoconfig 198 */ 199 200 CFATTACH_DECL_NEW(xyc, sizeof(struct xyc_softc), 201 xycmatch, xycattach, NULL, NULL); 202 203 CFATTACH_DECL_NEW(xy, sizeof(struct xy_softc), 204 xymatch, xyattach, NULL, NULL); 205 206 struct xyc_attach_args { /* this is the "aux" args to xyattach */ 207 int driveno; /* unit number */ 208 }; 209 210 dev_type_open(xyopen); 211 dev_type_close(xyclose); 212 dev_type_read(xyread); 213 dev_type_write(xywrite); 214 dev_type_ioctl(xyioctl); 215 dev_type_strategy(xystrategy); 216 dev_type_dump(xydump); 217 dev_type_size(xysize); 218 219 const struct bdevsw xy_bdevsw = { 220 .d_open = xyopen, 221 .d_close = xyclose, 222 .d_strategy = xystrategy, 223 .d_ioctl = xyioctl, 224 .d_dump = xydump, 225 .d_psize = xysize, 226 .d_discard = nodiscard, 227 .d_flag = D_DISK 228 }; 229 230 const struct cdevsw xy_cdevsw = { 231 .d_open = xyopen, 232 .d_close = xyclose, 233 .d_read = xyread, 234 .d_write = xywrite, 235 .d_ioctl = xyioctl, 236 .d_stop = nostop, 237 .d_tty = notty, 238 .d_poll = nopoll, 239 .d_mmap = nommap, 240 .d_kqfilter = nokqfilter, 241 .d_discard = nodiscard, 242 .d_flag = D_DISK 243 }; 244 245 /* 246 * dkdriver 247 */ 248 249 struct dkdriver xydkdriver = { 250 .d_strategy = xystrategy 251 }; 252 253 /* 254 * start: disk label fix code (XXX) 255 */ 256 257 static void *xy_labeldata; 258 259 static void 260 xydummystrat(struct buf *bp) 261 { 262 263 if (bp->b_bcount != XYFM_BPS) 264 panic("%s: b_bcount", __func__); 265 memcpy(bp->b_data, xy_labeldata, XYFM_BPS); 266 bp->b_oflags |= BO_DONE; 267 bp->b_cflags &= ~BC_BUSY; 268 } 269 270 int 271 xygetdisklabel(struct xy_softc *xy, void *b) 272 { 273 const char *err; 274 struct sun_disklabel *sdl; 275 276 /* We already have the label data in `b'; setup for dummy strategy */ 277 xy_labeldata = b; 278 279 /* Required parameter for readdisklabel() */ 280 xy->sc_dk.dk_label->d_secsize = XYFM_BPS; 281 282 err = readdisklabel(MAKEDISKDEV(0, device_unit(xy->sc_dev), RAW_PART), 283 xydummystrat, xy->sc_dk.dk_label, xy->sc_dk.dk_cpulabel); 284 if (err) { 285 printf("%s: %s\n", device_xname(xy->sc_dev), err); 286 return XY_ERR_FAIL; 287 } 288 289 /* Ok, we have the label; fill in `pcyl' if there's SunOS magic */ 290 sdl = (struct sun_disklabel *)xy->sc_dk.dk_cpulabel->cd_block; 291 if (sdl->sl_magic == SUN_DKMAGIC) 292 xy->pcyl = sdl->sl_pcyl; 293 else { 294 printf("%s: WARNING: no `pcyl' in disk label.\n", 295 device_xname(xy->sc_dev)); 296 xy->pcyl = xy->sc_dk.dk_label->d_ncylinders + 297 xy->sc_dk.dk_label->d_acylinders; 298 printf("%s: WARNING: guessing pcyl=%d (ncyl+acyl)\n", 299 device_xname(xy->sc_dev), xy->pcyl); 300 } 301 302 xy->ncyl = xy->sc_dk.dk_label->d_ncylinders; 303 xy->acyl = xy->sc_dk.dk_label->d_acylinders; 304 xy->nhead = xy->sc_dk.dk_label->d_ntracks; 305 xy->nsect = xy->sc_dk.dk_label->d_nsectors; 306 xy->sectpercyl = xy->nhead * xy->nsect; 307 xy->sc_dk.dk_label->d_secsize = XYFM_BPS; /* not handled by 308 * sun->bsd */ 309 return XY_ERR_AOK; 310 } 311 312 /* 313 * end: disk label fix code (XXX) 314 */ 315 316 /* 317 * a u t o c o n f i g f u n c t i o n s 318 */ 319 320 /* 321 * xycmatch: determine if xyc is present or not. we do a 322 * soft reset to detect the xyc. 323 */ 324 static int 325 xycmatch(device_t parent, cfdata_t cf, void *aux) 326 { 327 struct confargs *ca = aux; 328 329 /* No default VME address. */ 330 if (ca->ca_paddr == -1) 331 return 0; 332 333 /* Make sure something is there... */ 334 if (bus_peek(ca->ca_bustype, ca->ca_paddr + 5, 1) == -1) 335 return 0; 336 337 /* Default interrupt priority. */ 338 if (ca->ca_intpri == -1) 339 ca->ca_intpri = 2; 340 341 return 1; 342 } 343 344 /* 345 * xycattach: attach controller 346 */ 347 static void 348 xycattach(device_t parent, device_t self, void *aux) 349 { 350 struct xyc_softc *xyc = device_private(self); 351 struct confargs *ca = aux; 352 struct xyc_attach_args xa; 353 int lcv, err, res, pbsz; 354 void *tmp, *tmp2; 355 u_long ultmp; 356 357 /* get addressing and intr level stuff from autoconfig and load it 358 * into our xyc_softc. */ 359 360 xyc->sc_dev = self; 361 xyc->xyc = (struct xyc *)bus_mapin(ca->ca_bustype, ca->ca_paddr, 362 sizeof(struct xyc)); 363 xyc->bustype = ca->ca_bustype; 364 xyc->ipl = ca->ca_intpri; 365 xyc->vector = ca->ca_intvec; 366 xyc->no_ols = 0; /* XXX should be from config */ 367 368 for (lcv = 0; lcv < XYC_MAXDEV; lcv++) 369 xyc->sc_drives[lcv] = NULL; 370 371 /* 372 * allocate and zero buffers 373 * check boundaries of the KVA's ... all IOPBs must reside in 374 * the same 64K region. 375 */ 376 377 pbsz = XYC_MAXIOPB * sizeof(struct xy_iopb); 378 tmp = tmp2 = (struct xy_iopb *)dvma_malloc(pbsz); /* KVA */ 379 ultmp = (u_long)tmp; 380 if ((ultmp & 0xffff0000) != ((ultmp + pbsz) & 0xffff0000)) { 381 tmp = (struct xy_iopb *)dvma_malloc(pbsz); /* retry! */ 382 dvma_free(tmp2, pbsz); 383 ultmp = (u_long) tmp; 384 if ((ultmp & 0xffff0000) != ((ultmp + pbsz) & 0xffff0000)) { 385 aprint_error(": can't alloc IOPB mem in 64K\n"); 386 return; 387 } 388 } 389 memset(tmp, 0, pbsz); 390 xyc->iopbase = tmp; 391 xyc->dvmaiopb = 392 (struct xy_iopb *)dvma_kvtopa(xyc->iopbase, xyc->bustype); 393 xyc->reqs = malloc(XYC_MAXIOPB * sizeof(struct xy_iorq), 394 M_DEVBUF, M_NOWAIT | M_ZERO); 395 if (xyc->reqs == NULL) 396 panic("xyc malloc"); 397 398 /* 399 * init iorq to iopb pointers, and non-zero fields in the 400 * iopb which never change. 401 */ 402 403 for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) { 404 xyc->xy_chain[lcv] = NULL; 405 xyc->reqs[lcv].iopb = &xyc->iopbase[lcv]; 406 xyc->iopbase[lcv].asr = 1; /* always the same */ 407 xyc->iopbase[lcv].eef = 1; /* always the same */ 408 xyc->iopbase[lcv].ecm = XY_ECM; /* always the same */ 409 xyc->iopbase[lcv].aud = 1; /* always the same */ 410 xyc->iopbase[lcv].relo = 1; /* always the same */ 411 xyc->iopbase[lcv].thro = XY_THRO;/* always the same */ 412 } 413 xyc->ciorq = &xyc->reqs[XYC_CTLIOPB]; /* short hand name */ 414 xyc->ciopb = &xyc->iopbase[XYC_CTLIOPB]; /* short hand name */ 415 xyc->xy_hand = 0; 416 417 /* read controller parameters and insure we have a 450/451 */ 418 419 err = xyc_cmd(xyc, XYCMD_ST, 0, 0, 0, 0, 0, XY_SUB_POLL); 420 res = xyc->ciopb->ctyp; 421 XYC_DONE(xyc, err); 422 if (res != XYCT_450) { 423 if (err) 424 aprint_error(": %s: ", xyc_e2str(err)); 425 aprint_error(": doesn't identify as a 450/451\n"); 426 return; 427 } 428 aprint_normal(": Xylogics 450/451"); 429 if (xyc->no_ols) 430 /* 450 doesn't overlap seek right */ 431 aprint_normal(" [OLS disabled]"); 432 aprint_normal("\n"); 433 if (err) { 434 aprint_error_dev(self, "error: %s\n", xyc_e2str(err)); 435 return; 436 } 437 if ((xyc->xyc->xyc_csr & XYC_ADRM) == 0) { 438 aprint_error_dev(self, "24 bit addressing turned off\n"); 439 printf("please set hardware jumpers JM1-JM2=in, JM3-JM4=out\n"); 440 printf("to enable 24 bit mode and this driver\n"); 441 return; 442 } 443 444 /* link in interrupt with higher level software */ 445 isr_add_vectored(xycintr, xyc, ca->ca_intpri, ca->ca_intvec); 446 evcnt_attach_dynamic(&xyc->sc_intrcnt, EVCNT_TYPE_INTR, NULL, 447 device_xname(self), "intr"); 448 449 callout_init(&xyc->sc_tick_ch, 0); 450 451 /* now we must look for disks using autoconfig */ 452 for (xa.driveno = 0; xa.driveno < XYC_MAXDEV; xa.driveno++) 453 (void)config_found(self, (void *)&xa, xyc_print); 454 455 /* start the watchdog clock */ 456 callout_reset(&xyc->sc_tick_ch, XYC_TICKCNT, xyc_tick, xyc); 457 } 458 459 static int 460 xyc_print(void *aux, const char *name) 461 { 462 struct xyc_attach_args *xa = aux; 463 464 if (name != NULL) 465 aprint_normal("%s: ", name); 466 467 if (xa->driveno != -1) 468 aprint_normal(" drive %d", xa->driveno); 469 470 return UNCONF; 471 } 472 473 /* 474 * xymatch: probe for disk. 475 * 476 * note: we almost always say disk is present. this allows us to 477 * spin up and configure a disk after the system is booted (we can 478 * call xyattach!). Also, wire down the relationship between the 479 * xy* and xyc* devices, to simplify boot device identification. 480 */ 481 static int 482 xymatch(device_t parent, cfdata_t cf, void *aux) 483 { 484 struct xyc_attach_args *xa = aux; 485 int xy_unit; 486 487 /* Match only on the "wired-down" controller+disk. */ 488 xy_unit = device_unit(parent) * 2 + xa->driveno; 489 if (cf->cf_unit != xy_unit) 490 return 0; 491 492 return 1; 493 } 494 495 /* 496 * xyattach: attach a disk. 497 */ 498 static void 499 xyattach(device_t parent, device_t self, void *aux) 500 { 501 struct xy_softc *xy = device_private(self); 502 struct xyc_softc *xyc = device_private(parent); 503 struct xyc_attach_args *xa = aux; 504 505 xy->sc_dev = self; 506 aprint_normal("\n"); 507 508 /* 509 * Always re-initialize the disk structure. We want statistics 510 * to start with a clean slate. 511 */ 512 memset(&xy->sc_dk, 0, sizeof(xy->sc_dk)); 513 disk_init(&xy->sc_dk, device_xname(self), &xydkdriver); 514 515 xy->state = XY_DRIVE_UNKNOWN; /* to start */ 516 xy->flags = 0; 517 xy->parent = xyc; 518 519 /* init queue of waiting bufs */ 520 bufq_alloc(&xy->xyq, "disksort", BUFQ_SORT_RAWBLOCK); 521 xy->xyrq = &xyc->reqs[xa->driveno]; 522 523 xy->xy_drive = xa->driveno; 524 xyc->sc_drives[xa->driveno] = xy; 525 526 /* Do init work common to attach and open. */ 527 xy_init(xy); 528 } 529 530 /* 531 * end of autoconfig functions 532 */ 533 534 /* 535 * Initialize a disk. This can be called from both autoconf and 536 * also from xyopen/xystrategy. 537 */ 538 static void 539 xy_init(struct xy_softc *xy) 540 { 541 struct xyc_softc *xyc; 542 struct dkbad *dkb; 543 void *dvmabuf; 544 int err, spt, mb, blk, lcv, fullmode, newstate; 545 546 xyc = xy->parent; 547 xy->state = XY_DRIVE_ATTACHING; 548 newstate = XY_DRIVE_UNKNOWN; 549 fullmode = (cold) ? XY_SUB_POLL : XY_SUB_WAIT; 550 dvmabuf = dvma_malloc(XYFM_BPS); 551 552 /* first try and reset the drive */ 553 554 err = xyc_cmd(xyc, XYCMD_RST, 0, xy->xy_drive, 0, 0, 0, fullmode); 555 XYC_DONE(xyc, err); 556 if (err == XY_ERR_DNRY) { 557 printf("%s: drive %d: off-line\n", 558 device_xname(xy->sc_dev), xy->xy_drive); 559 goto done; 560 } 561 if (err) { 562 printf("%s: ERROR 0x%02x (%s)\n", 563 device_xname(xy->sc_dev), err, xyc_e2str(err)); 564 goto done; 565 } 566 printf("%s: drive %d ready", 567 device_xname(xy->sc_dev), xy->xy_drive); 568 569 /* 570 * now set drive parameters (to semi-bogus values) so we can read the 571 * disk label. 572 */ 573 xy->pcyl = xy->ncyl = 1; 574 xy->acyl = 0; 575 xy->nhead = 1; 576 xy->nsect = 1; 577 xy->sectpercyl = 1; 578 for (lcv = 0; lcv < 126; lcv++) /* init empty bad144 table */ 579 xy->dkb.bt_bad[lcv].bt_cyl = 580 xy->dkb.bt_bad[lcv].bt_trksec = 0xffff; 581 582 /* read disk label */ 583 for (xy->drive_type = 0; xy->drive_type <= XYC_MAXDT; 584 xy->drive_type++) { 585 err = xyc_cmd(xyc, XYCMD_RD, 0, xy->xy_drive, 0, 1, 586 dvmabuf, fullmode); 587 XYC_DONE(xyc, err); 588 if (err == XY_ERR_AOK) 589 break; 590 } 591 592 if (err != XY_ERR_AOK) { 593 printf("%s: reading disk label failed: %s\n", 594 device_xname(xy->sc_dev), xyc_e2str(err)); 595 goto done; 596 } 597 printf("%s: drive type %d\n", 598 device_xname(xy->sc_dev), xy->drive_type); 599 600 newstate = XY_DRIVE_NOLABEL; 601 602 xy->hw_spt = spt = 0; /* XXX needed ? */ 603 /* Attach the disk: must be before getdisklabel to malloc label */ 604 disk_attach(&xy->sc_dk); 605 606 if (xygetdisklabel(xy, dvmabuf) != XY_ERR_AOK) 607 goto done; 608 609 /* inform the user of what is up */ 610 printf("%s: <%s>, pcyl %d\n", 611 device_xname(xy->sc_dev), 612 (char *)dvmabuf, xy->pcyl); 613 mb = xy->ncyl * (xy->nhead * xy->nsect) / (1048576 / XYFM_BPS); 614 printf("%s: %dMB, %d cyl, %d head, %d sec\n", 615 device_xname(xy->sc_dev), mb, xy->ncyl, xy->nhead, xy->nsect); 616 617 /* 618 * 450/451 stupidity: the drive type is encoded into the format 619 * of the disk. the drive type in the IOPB must match the drive 620 * type in the format, or you will not be able to do I/O to the 621 * disk (you get header not found errors). if you have two drives 622 * of different sizes that have the same drive type in their 623 * formatting then you are out of luck. 624 * 625 * this problem was corrected in the 753/7053. 626 */ 627 628 for (lcv = 0 ; lcv < XYC_MAXDEV ; lcv++) { 629 struct xy_softc *oxy; 630 631 oxy = xyc->sc_drives[lcv]; 632 if (oxy == NULL || oxy == xy) 633 continue; 634 if (oxy->drive_type != xy->drive_type) 635 continue; 636 if (xy->nsect != oxy->nsect || xy->pcyl != oxy->pcyl || 637 xy->nhead != oxy->nhead) { 638 printf("%s: %s and %s must be the same size!\n", 639 device_xname(xyc->sc_dev), 640 device_xname(xy->sc_dev), 641 device_xname(oxy->sc_dev)); 642 panic("xy drive size mismatch"); 643 } 644 } 645 646 647 /* now set the real drive parameters! */ 648 blk = (xy->nsect - 1) + 649 ((xy->nhead - 1) * xy->nsect) + 650 ((xy->pcyl - 1) * xy->nsect * xy->nhead); 651 err = xyc_cmd(xyc, XYCMD_SDS, 0, xy->xy_drive, blk, 0, 0, fullmode); 652 XYC_DONE(xyc, err); 653 if (err) { 654 printf("%s: write drive size failed: %s\n", 655 device_xname(xy->sc_dev), xyc_e2str(err)); 656 goto done; 657 } 658 newstate = XY_DRIVE_ONLINE; 659 660 /* 661 * read bad144 table. this table resides on the first sector of the 662 * last track of the disk (i.e. second cyl of "acyl" area). 663 */ 664 blk = (xy->ncyl + xy->acyl - 1) * (xy->nhead * xy->nsect) + 665 /* last cyl */ 666 (xy->nhead - 1) * xy->nsect; /* last head */ 667 err = xyc_cmd(xyc, XYCMD_RD, 0, xy->xy_drive, blk, 1, 668 dvmabuf, fullmode); 669 XYC_DONE(xyc, err); 670 if (err) { 671 printf("%s: reading bad144 failed: %s\n", 672 device_xname(xy->sc_dev), xyc_e2str(err)); 673 goto done; 674 } 675 676 /* check dkbad for sanity */ 677 dkb = (struct dkbad *)dvmabuf; 678 for (lcv = 0; lcv < 126; lcv++) { 679 if ((dkb->bt_bad[lcv].bt_cyl == 0xffff || 680 dkb->bt_bad[lcv].bt_cyl == 0) && 681 dkb->bt_bad[lcv].bt_trksec == 0xffff) 682 continue; /* blank */ 683 if (dkb->bt_bad[lcv].bt_cyl >= xy->ncyl) 684 break; 685 if ((dkb->bt_bad[lcv].bt_trksec >> 8) >= xy->nhead) 686 break; 687 if ((dkb->bt_bad[lcv].bt_trksec & 0xff) >= xy->nsect) 688 break; 689 } 690 if (lcv != 126) { 691 printf("%s: warning: invalid bad144 sector!\n", 692 device_xname(xy->sc_dev)); 693 } else { 694 memcpy(&xy->dkb, dvmabuf, XYFM_BPS); 695 } 696 697 done: 698 xy->state = newstate; 699 dvma_free(dvmabuf, XYFM_BPS); 700 } 701 702 /* 703 * { b , c } d e v s w f u n c t i o n s 704 */ 705 706 /* 707 * xyclose: close device 708 */ 709 int 710 xyclose(dev_t dev, int flag, int fmt, struct lwp *l) 711 { 712 struct xy_softc *xy = device_lookup_private(&xy_cd, DISKUNIT(dev)); 713 int part = DISKPART(dev); 714 715 /* clear mask bits */ 716 717 switch (fmt) { 718 case S_IFCHR: 719 xy->sc_dk.dk_copenmask &= ~(1 << part); 720 break; 721 case S_IFBLK: 722 xy->sc_dk.dk_bopenmask &= ~(1 << part); 723 break; 724 } 725 xy->sc_dk.dk_openmask = xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask; 726 727 return 0; 728 } 729 730 /* 731 * xydump: crash dump system 732 */ 733 int 734 xydump(dev_t dev, daddr_t blkno, void *va, size_t sz) 735 { 736 int unit, part; 737 struct xy_softc *xy; 738 739 unit = DISKUNIT(dev); 740 part = DISKPART(dev); 741 742 xy = device_lookup_private(&xy_cd, unit); 743 if (xy == NULL) 744 return ENXIO; 745 746 printf("%s%c: crash dump not supported (yet)\n", 747 device_xname(xy->sc_dev), 'a' + part); 748 749 return ENXIO; 750 751 /* outline: globals: "dumplo" == sector number of partition to start 752 * dump at (convert to physical sector with partition table) 753 * "dumpsize" == size of dump in clicks "physmem" == size of physical 754 * memory (clicks, ctob() to get bytes) (normal case: dumpsize == 755 * physmem) 756 * 757 * dump a copy of physical memory to the dump device starting at sector 758 * "dumplo" in the swap partition (make sure > 0). map in pages as 759 * we go. use polled I/O. 760 * 761 * XXX how to handle NON_CONTIG? 762 */ 763 } 764 765 static enum kauth_device_req 766 xy_getkauthreq(u_char cmd) 767 { 768 enum kauth_device_req req; 769 770 switch (cmd) { 771 case XYCMD_WR: 772 case XYCMD_WTH: 773 case XYCMD_WFM: 774 case XYCMD_WRH: 775 req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_WRITE; 776 break; 777 778 case XYCMD_RD: 779 case XYCMD_RTH: 780 case XYCMD_RDH: 781 req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_READ; 782 break; 783 784 case XYCMD_RDS: 785 case XYCMD_MBD: 786 req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_READCONF; 787 break; 788 789 case XYCMD_RST: 790 case XYCMD_SDS: 791 case XYCMD_MBL: 792 req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_WRITECONF; 793 break; 794 795 case XYCMD_NOP: 796 case XYCMD_SK: 797 case XYCMD_ST: 798 case XYCMD_R: 799 default: 800 req = 0; 801 break; 802 } 803 804 return req; 805 } 806 807 /* 808 * xyioctl: ioctls on XY drives. based on ioctl's of other netbsd disks. 809 */ 810 int 811 xyioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l) 812 { 813 struct xy_softc *xy; 814 struct xd_iocmd *xio; 815 int error, s, unit; 816 817 unit = DISKUNIT(dev); 818 819 xy = device_lookup_private(&xy_cd, unit); 820 if (xy == NULL) 821 return ENXIO; 822 823 error = disk_ioctl(&xy->sc_dk, dev, cmd, addr, flag, l); 824 if (error != EPASSTHROUGH) 825 return error; 826 827 /* switch on ioctl type */ 828 829 switch (cmd) { 830 case DIOCSBAD: /* set bad144 info */ 831 if ((flag & FWRITE) == 0) 832 return EBADF; 833 s = splbio(); 834 memcpy(&xy->dkb, addr, sizeof(xy->dkb)); 835 splx(s); 836 return 0; 837 838 case DIOCSDINFO: /* set disk label */ 839 if ((flag & FWRITE) == 0) 840 return EBADF; 841 error = setdisklabel(xy->sc_dk.dk_label, 842 (struct disklabel *)addr, /* xy->sc_dk.dk_openmask : */ 0, 843 xy->sc_dk.dk_cpulabel); 844 if (error == 0) { 845 if (xy->state == XY_DRIVE_NOLABEL) 846 xy->state = XY_DRIVE_ONLINE; 847 } 848 return error; 849 850 case DIOCWLABEL: /* change write status of disk label */ 851 if ((flag & FWRITE) == 0) 852 return EBADF; 853 if (*(int *)addr) 854 xy->flags |= XY_WLABEL; 855 else 856 xy->flags &= ~XY_WLABEL; 857 return 0; 858 859 case DIOCWDINFO: /* write disk label */ 860 if ((flag & FWRITE) == 0) 861 return EBADF; 862 error = setdisklabel(xy->sc_dk.dk_label, 863 (struct disklabel *)addr, /* xy->sc_dk.dk_openmask : */ 0, 864 xy->sc_dk.dk_cpulabel); 865 if (error == 0) { 866 if (xy->state == XY_DRIVE_NOLABEL) 867 xy->state = XY_DRIVE_ONLINE; 868 869 /* Simulate opening partition 0 so write succeeds. */ 870 xy->sc_dk.dk_openmask |= (1 << 0); 871 error = writedisklabel(MAKEDISKDEV(major(dev), 872 DISKUNIT(dev), RAW_PART), 873 xystrategy, xy->sc_dk.dk_label, 874 xy->sc_dk.dk_cpulabel); 875 xy->sc_dk.dk_openmask = 876 xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask; 877 } 878 return error; 879 880 case DIOSXDCMD: { 881 enum kauth_device_req req; 882 883 xio = (struct xd_iocmd *)addr; 884 req = xy_getkauthreq(xio->cmd); 885 if ((error = kauth_authorize_device_passthru(l->l_cred, 886 dev, req, xio)) != 0) 887 return error; 888 return xyc_ioctlcmd(xy, dev, xio); 889 } 890 891 default: 892 return ENOTTY; 893 } 894 } 895 896 /* 897 * xyopen: open drive 898 */ 899 int 900 xyopen(dev_t dev, int flag, int fmt, struct lwp *l) 901 { 902 int err, unit, part, s; 903 struct xy_softc *xy; 904 905 /* first, could it be a valid target? */ 906 unit = DISKUNIT(dev); 907 xy = device_lookup_private(&xy_cd, unit); 908 if (xy == NULL) 909 return ENXIO; 910 part = DISKPART(dev); 911 err = 0; 912 913 /* 914 * If some other processing is doing init, sleep. 915 */ 916 s = splbio(); 917 while (xy->state == XY_DRIVE_ATTACHING) { 918 if (tsleep(&xy->state, PRIBIO, "xyopen", 0)) { 919 err = EINTR; 920 goto done; 921 } 922 } 923 /* Do we need to init the drive? */ 924 if (xy->state == XY_DRIVE_UNKNOWN) { 925 xy_init(xy); 926 wakeup(&xy->state); 927 } 928 /* Was the init successful? */ 929 if (xy->state == XY_DRIVE_UNKNOWN) { 930 err = EIO; 931 goto done; 932 } 933 934 /* check for partition */ 935 if (part != RAW_PART && 936 (part >= xy->sc_dk.dk_label->d_npartitions || 937 xy->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) { 938 err = ENXIO; 939 goto done; 940 } 941 942 /* set open masks */ 943 switch (fmt) { 944 case S_IFCHR: 945 xy->sc_dk.dk_copenmask |= (1 << part); 946 break; 947 case S_IFBLK: 948 xy->sc_dk.dk_bopenmask |= (1 << part); 949 break; 950 } 951 xy->sc_dk.dk_openmask = xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask; 952 953 done: 954 splx(s); 955 return err; 956 } 957 958 int 959 xyread(dev_t dev, struct uio *uio, int flags) 960 { 961 962 return physio(xystrategy, NULL, dev, B_READ, minphys, uio); 963 } 964 965 int 966 xywrite(dev_t dev, struct uio *uio, int flags) 967 { 968 969 return physio(xystrategy, NULL, dev, B_WRITE, minphys, uio); 970 } 971 972 973 /* 974 * xysize: return size of a partition for a dump 975 */ 976 977 int 978 xysize(dev_t dev) 979 { 980 struct xy_softc *xysc; 981 int unit, part, size, omask; 982 983 /* valid unit? */ 984 unit = DISKUNIT(dev); 985 xysc = device_lookup_private(&xy_cd, unit); 986 if (xysc == NULL) 987 return -1; 988 989 part = DISKPART(dev); 990 omask = xysc->sc_dk.dk_openmask & (1 << part); 991 992 if (omask == 0 && xyopen(dev, 0, S_IFBLK, NULL) != 0) 993 return -1; 994 995 /* do it */ 996 if (xysc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP) 997 size = -1; /* only give valid size for swap partitions */ 998 else 999 size = xysc->sc_dk.dk_label->d_partitions[part].p_size * 1000 (xysc->sc_dk.dk_label->d_secsize / DEV_BSIZE); 1001 if (omask == 0 && xyclose(dev, 0, S_IFBLK, NULL) != 0) 1002 return -1; 1003 return size; 1004 } 1005 1006 /* 1007 * xystrategy: buffering system interface to xy. 1008 */ 1009 void 1010 xystrategy(struct buf *bp) 1011 { 1012 struct xy_softc *xy; 1013 int s, unit; 1014 struct disklabel *lp; 1015 daddr_t blkno; 1016 1017 unit = DISKUNIT(bp->b_dev); 1018 1019 /* check for live device */ 1020 1021 xy = device_lookup_private(&xy_cd, unit); 1022 if (xy == NULL || 1023 bp->b_blkno < 0 || 1024 (bp->b_bcount % xy->sc_dk.dk_label->d_secsize) != 0) { 1025 bp->b_error = EINVAL; 1026 goto done; 1027 } 1028 1029 /* There should always be an open first. */ 1030 if (xy->state == XY_DRIVE_UNKNOWN) { 1031 bp->b_error = EIO; 1032 goto done; 1033 } 1034 if (xy->state != XY_DRIVE_ONLINE && DISKPART(bp->b_dev) != RAW_PART) { 1035 /* no I/O to unlabeled disks, unless raw partition */ 1036 bp->b_error = EIO; 1037 goto done; 1038 } 1039 /* short circuit zero length request */ 1040 1041 if (bp->b_bcount == 0) 1042 goto done; 1043 1044 /* check bounds with label (disksubr.c). Determine the size of the 1045 * transfer, and make sure it is within the boundaries of the 1046 * partition. Adjust transfer if needed, and signal errors or early 1047 * completion. */ 1048 1049 lp = xy->sc_dk.dk_label; 1050 1051 if (bounds_check_with_label(&xy->sc_dk, bp, 1052 (xy->flags & XY_WLABEL) != 0) <= 0) 1053 goto done; 1054 1055 /* 1056 * Now convert the block number to absolute and put it in 1057 * terms of the device's logical block size. 1058 */ 1059 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE); 1060 if (DISKPART(bp->b_dev) != RAW_PART) 1061 blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset; 1062 1063 bp->b_rawblkno = blkno; 1064 1065 /* 1066 * now we know we have a valid buf structure that we need to do I/O 1067 * on. 1068 */ 1069 1070 s = splbio(); /* protect the queues */ 1071 1072 bufq_put(xy->xyq, bp); /* XXX disksort_cylinder */ 1073 1074 /* start 'em up */ 1075 1076 xyc_start(xy->parent, NULL); 1077 1078 /* done! */ 1079 1080 splx(s); 1081 return; 1082 1083 done: 1084 /* tells upper layers we are done with this buf */ 1085 bp->b_resid = bp->b_bcount; 1086 biodone(bp); 1087 } 1088 /* 1089 * end of {b,c}devsw functions 1090 */ 1091 1092 /* 1093 * i n t e r r u p t f u n c t i o n 1094 * 1095 * xycintr: hardware interrupt. 1096 */ 1097 int 1098 xycintr(void *v) 1099 { 1100 struct xyc_softc *xycsc = v; 1101 1102 /* kick the event counter */ 1103 xycsc->sc_intrcnt.ev_count++; 1104 1105 /* remove as many done IOPBs as possible */ 1106 xyc_remove_iorq(xycsc); 1107 1108 /* start any iorq's already waiting */ 1109 xyc_start(xycsc, NULL); 1110 1111 return 1; 1112 } 1113 /* 1114 * end of interrupt function 1115 */ 1116 1117 /* 1118 * i n t e r n a l f u n c t i o n s 1119 */ 1120 1121 /* 1122 * xyc_rqinit: fill out the fields of an I/O request 1123 */ 1124 1125 inline void 1126 xyc_rqinit(struct xy_iorq *rq, struct xyc_softc *xyc, struct xy_softc *xy, 1127 int md, u_long blk, int cnt, void *db, struct buf *bp) 1128 { 1129 1130 rq->xyc = xyc; 1131 rq->xy = xy; 1132 rq->ttl = XYC_MAXTTL + 10; 1133 rq->mode = md; 1134 rq->tries = rq->errno = rq->lasterror = 0; 1135 rq->blockno = blk; 1136 rq->sectcnt = cnt; 1137 rq->dbuf = rq->dbufbase = db; 1138 rq->buf = bp; 1139 } 1140 1141 /* 1142 * xyc_rqtopb: load up an IOPB based on an iorq 1143 */ 1144 1145 void 1146 xyc_rqtopb(struct xy_iorq *iorq, struct xy_iopb *iopb, int cmd, int subfun) 1147 { 1148 u_long block, dp; 1149 1150 /* normal IOPB case, standard stuff */ 1151 1152 /* chain bit handled later */ 1153 iopb->ien = (XY_STATE(iorq->mode) == XY_SUB_POLL) ? 0 : 1; 1154 iopb->com = cmd; 1155 iopb->errno = 0; 1156 iopb->errs = 0; 1157 iopb->done = 0; 1158 if (iorq->xy) { 1159 iopb->unit = iorq->xy->xy_drive; 1160 iopb->dt = iorq->xy->drive_type; 1161 } else { 1162 iopb->unit = 0; 1163 iopb->dt = 0; 1164 } 1165 block = iorq->blockno; 1166 if (iorq->xy == NULL || block == 0) { 1167 iopb->sect = iopb->head = iopb->cyl = 0; 1168 } else { 1169 iopb->sect = block % iorq->xy->nsect; 1170 block = block / iorq->xy->nsect; 1171 iopb->head = block % iorq->xy->nhead; 1172 block = block / iorq->xy->nhead; 1173 iopb->cyl = block; 1174 } 1175 iopb->scnt = iorq->sectcnt; 1176 if (iorq->dbuf == NULL) { 1177 iopb->dataa = 0; 1178 iopb->datar = 0; 1179 } else { 1180 dp = dvma_kvtopa(iorq->dbuf, iorq->xyc->bustype); 1181 iopb->dataa = (dp & 0xffff); 1182 iopb->datar = ((dp & 0xff0000) >> 16); 1183 } 1184 iopb->subfn = subfun; 1185 } 1186 1187 1188 /* 1189 * xyc_unbusy: wait for the xyc to go unbusy, or timeout. 1190 */ 1191 1192 int 1193 xyc_unbusy(struct xyc *xyc, int del) 1194 { 1195 1196 while (del-- > 0) { 1197 if ((xyc->xyc_csr & XYC_GBSY) == 0) 1198 break; 1199 DELAY(1); 1200 } 1201 return del == 0 ? XY_ERR_FAIL : XY_ERR_AOK; 1202 } 1203 1204 /* 1205 * xyc_cmd: front end for POLL'd and WAIT'd commands. Returns 0 or error. 1206 * note that NORM requests are handled separately. 1207 */ 1208 int 1209 xyc_cmd(struct xyc_softc *xycsc, int cmd, int subfn, int unit, int block, 1210 int scnt, char *dptr, int fullmode) 1211 { 1212 struct xy_iorq *iorq = xycsc->ciorq; 1213 struct xy_iopb *iopb = xycsc->ciopb; 1214 int submode = XY_STATE(fullmode); 1215 1216 /* 1217 * is someone else using the control iopq wait for it if we can 1218 */ 1219 start: 1220 if (submode == XY_SUB_WAIT && XY_STATE(iorq->mode) != XY_SUB_FREE) { 1221 if (tsleep(iorq, PRIBIO, "xyc_cmd", 0)) 1222 return XY_ERR_FAIL; 1223 goto start; 1224 } 1225 1226 if (XY_STATE(iorq->mode) != XY_SUB_FREE) { 1227 DELAY(1000000); /* XY_SUB_POLL: steal the iorq */ 1228 iorq->mode = XY_SUB_FREE; 1229 printf("%s: stole control iopb\n", device_xname(xycsc->sc_dev)); 1230 } 1231 1232 /* init iorq/iopb */ 1233 1234 xyc_rqinit(iorq, xycsc, 1235 (unit == XYC_NOUNIT) ? NULL : xycsc->sc_drives[unit], 1236 fullmode, block, scnt, dptr, NULL); 1237 1238 /* load IOPB from iorq */ 1239 1240 xyc_rqtopb(iorq, iopb, cmd, subfn); 1241 1242 /* submit it for processing */ 1243 1244 xyc_submit_iorq(xycsc, iorq, fullmode); /* error code will be in iorq */ 1245 1246 return XY_ERR_AOK; 1247 } 1248 1249 /* 1250 * xyc_startbuf 1251 * start a buffer for running 1252 */ 1253 1254 int 1255 xyc_startbuf(struct xyc_softc *xycsc, struct xy_softc *xysc, struct buf *bp) 1256 { 1257 struct xy_iorq *iorq; 1258 struct xy_iopb *iopb; 1259 u_long block; 1260 void *dbuf; 1261 1262 iorq = xysc->xyrq; 1263 iopb = iorq->iopb; 1264 1265 /* get buf */ 1266 1267 if (bp == NULL) 1268 panic("%s null buf", __func__); 1269 1270 #ifdef XYC_DEBUG 1271 int partno = DISKPART(bp->b_dev); 1272 printf("%s: %s%c: %s block %d\n", __func__, device_xname(xysc->sc_dev), 1273 'a' + partno, (bp->b_flags & B_READ) ? "read" : "write", 1274 (int)bp->b_blkno); 1275 printf("xyc_startbuf: b_bcount %d, b_data 0x%x\n", 1276 bp->b_bcount, bp->b_data); 1277 #endif 1278 1279 /* 1280 * load request. 1281 * 1282 * also, note that there are two kinds of buf structures, those with 1283 * B_PHYS set and those without B_PHYS. if B_PHYS is set, then it is 1284 * a raw I/O (to a cdevsw) and we are doing I/O directly to the users' 1285 * buffer which has already been mapped into DVMA space. (Not on sun3) 1286 * However, if B_PHYS is not set, then the buffer is a normal system 1287 * buffer which does *not* live in DVMA space. In that case we call 1288 * dvma_mapin to map it into DVMA space so we can do the DMA to it. 1289 * 1290 * in cases where we do a dvma_mapin, note that iorq points to the 1291 * buffer as mapped into DVMA space, where as the bp->b_data points 1292 * to its non-DVMA mapping. 1293 * 1294 * XXX - On the sun3, B_PHYS does NOT mean the buffer is mapped 1295 * into dvma space, only that it was remapped into the kernel. 1296 * We ALWAYS have to remap the kernel buf into DVMA space. 1297 * (It is done inexpensively, using whole segments!) 1298 */ 1299 1300 block = bp->b_rawblkno; 1301 1302 dbuf = dvma_mapin(bp->b_data, bp->b_bcount, 0); 1303 if (dbuf == NULL) { /* out of DVMA space */ 1304 printf("%s: warning: out of DVMA space\n", 1305 device_xname(xycsc->sc_dev)); 1306 return XY_ERR_FAIL; /* XXX: need some sort of 1307 * call-back scheme here? */ 1308 } 1309 1310 /* init iorq and load iopb from it */ 1311 1312 xyc_rqinit(iorq, xycsc, xysc, XY_SUB_NORM | XY_MODE_VERBO, block, 1313 bp->b_bcount / XYFM_BPS, dbuf, bp); 1314 1315 xyc_rqtopb(iorq, iopb, (bp->b_flags & B_READ) ? XYCMD_RD : XYCMD_WR, 0); 1316 1317 /* Instrumentation. */ 1318 disk_busy(&xysc->sc_dk); 1319 1320 return XY_ERR_AOK; 1321 } 1322 1323 1324 /* 1325 * xyc_submit_iorq: submit an iorq for processing. returns XY_ERR_AOK 1326 * if ok. if it fail returns an error code. type is XY_SUB_*. 1327 * 1328 * note: caller frees iorq in all cases except NORM 1329 * 1330 * return value: 1331 * NORM: XY_AOK (req pending), XY_FAIL (couldn't submit request) 1332 * WAIT: XY_AOK (success), <error-code> (failed) 1333 * POLL: <same as WAIT> 1334 * NOQ : <same as NORM> 1335 * 1336 * there are three sources for i/o requests: 1337 * [1] xystrategy: normal block I/O, using "struct buf" system. 1338 * [2] autoconfig/crash dump: these are polled I/O requests, no interrupts. 1339 * [3] open/ioctl: these are I/O requests done in the context of a process, 1340 * and the process should block until they are done. 1341 * 1342 * software state is stored in the iorq structure. each iorq has an 1343 * iopb structure. the hardware understands the iopb structure. 1344 * every command must go through an iopb. a 450 handles one iopb at a 1345 * time, where as a 451 can take them in chains. [the 450 claims it 1346 * can handle chains, but is appears to be buggy...] iopb are allocated 1347 * in DVMA space at boot up time. each disk gets one iopb, and the 1348 * controller gets one (for POLL and WAIT commands). what happens if 1349 * the iopb is busy? for i/o type [1], the buffers are queued at the 1350 * "buff" layer and * picked up later by the interrupt routine. for case 1351 * [2] we can only be blocked if there is a WAIT type I/O request being 1352 * run. since this can only happen when we are crashing, we wait a sec 1353 * and then steal the IOPB. for case [3] the process can sleep 1354 * on the iorq free list until some iopbs are available. 1355 */ 1356 1357 int 1358 xyc_submit_iorq(struct xyc_softc *xycsc, struct xy_iorq *iorq, int type) 1359 { 1360 struct xy_iopb *iopb; 1361 u_long iopbaddr; 1362 1363 #ifdef XYC_DEBUG 1364 printf("%s(%s, addr=0x%x, type=%d)\n", __func__, 1365 device_xname(xycsc->sc_dev), iorq, type); 1366 #endif 1367 1368 /* first check and see if controller is busy */ 1369 if ((xycsc->xyc->xyc_csr & XYC_GBSY) != 0) { 1370 #ifdef XYC_DEBUG 1371 printf("%s: XYC not ready (BUSY)\n", __func__); 1372 #endif 1373 if (type == XY_SUB_NOQ) 1374 return XY_ERR_FAIL; /* failed */ 1375 switch (type) { 1376 case XY_SUB_NORM: 1377 return XY_ERR_AOK; /* success */ 1378 case XY_SUB_WAIT: 1379 while (iorq->iopb->done == 0) { 1380 (void)tsleep(iorq, PRIBIO, "xyciorq", 0); 1381 } 1382 return (iorq->errno); 1383 case XY_SUB_POLL: /* steal controller */ 1384 iopbaddr = xycsc->xyc->xyc_rsetup; /* RESET */ 1385 if (xyc_unbusy(xycsc->xyc, XYC_RESETUSEC) == 1386 XY_ERR_FAIL) 1387 panic("%s: stuck xyc", __func__); 1388 printf("%s: stole controller\n", 1389 device_xname(xycsc->sc_dev)); 1390 break; 1391 default: 1392 panic("%s adding", __func__); 1393 } 1394 } 1395 1396 iopb = xyc_chain(xycsc, iorq); /* build chain */ 1397 if (iopb == NULL) { /* nothing doing? */ 1398 if (type == XY_SUB_NORM || type == XY_SUB_NOQ) 1399 return XY_ERR_AOK; 1400 panic("xyc_submit_iorq: xyc_chain failed!"); 1401 } 1402 iopbaddr = dvma_kvtopa(iopb, xycsc->bustype); 1403 1404 XYC_GO(xycsc->xyc, iopbaddr); 1405 1406 /* command now running, wrap it up */ 1407 switch (type) { 1408 case XY_SUB_NORM: 1409 case XY_SUB_NOQ: 1410 return XY_ERR_AOK; /* success */ 1411 case XY_SUB_WAIT: 1412 while (iorq->iopb->done == 0) { 1413 (void)tsleep(iorq, PRIBIO, "xyciorq", 0); 1414 } 1415 return iorq->errno; 1416 case XY_SUB_POLL: 1417 return xyc_piodriver(xycsc, iorq); 1418 default: 1419 panic("%s wrap up", __func__); 1420 } 1421 panic("%s impossible", __func__); 1422 return 0; /* not reached */ 1423 } 1424 1425 1426 /* 1427 * xyc_chain: build a chain. return dvma address of first element in 1428 * the chain. iorq != NULL: means we only want that item on the chain. 1429 */ 1430 1431 struct xy_iopb * 1432 xyc_chain(struct xyc_softc *xycsc, struct xy_iorq *iorq) 1433 { 1434 int togo, chain, hand; 1435 struct xy_iopb *iopb, *prev_iopb; 1436 1437 memset(xycsc->xy_chain, 0, sizeof(xycsc->xy_chain)); 1438 1439 /* 1440 * promote control IOPB to the top 1441 */ 1442 if (iorq == NULL) { 1443 if ((XY_STATE(xycsc->reqs[XYC_CTLIOPB].mode) == XY_SUB_POLL || 1444 XY_STATE(xycsc->reqs[XYC_CTLIOPB].mode) == XY_SUB_WAIT) && 1445 xycsc->iopbase[XYC_CTLIOPB].done == 0) 1446 iorq = &xycsc->reqs[XYC_CTLIOPB]; 1447 } 1448 1449 /* 1450 * special case: if iorq != NULL then we have a POLL or WAIT request. 1451 * we let these take priority and do them first. 1452 */ 1453 if (iorq) { 1454 xycsc->xy_chain[0] = iorq; 1455 iorq->iopb->chen = 0; 1456 return iorq->iopb; 1457 } 1458 1459 /* 1460 * NORM case: do round robin and maybe chain (if allowed and possible) 1461 */ 1462 1463 chain = 0; 1464 hand = xycsc->xy_hand; 1465 xycsc->xy_hand = (xycsc->xy_hand + 1) % XYC_MAXIOPB; 1466 1467 for (togo = XYC_MAXIOPB ; togo > 0 ; 1468 togo--, hand = (hand + 1) % XYC_MAXIOPB) { 1469 1470 if (XY_STATE(xycsc->reqs[hand].mode) != XY_SUB_NORM || 1471 xycsc->iopbase[hand].done) 1472 continue; /* not ready-for-i/o */ 1473 1474 xycsc->xy_chain[chain] = &xycsc->reqs[hand]; 1475 iopb = xycsc->xy_chain[chain]->iopb; 1476 iopb->chen = 0; 1477 if (chain != 0) { /* adding a link to a chain? */ 1478 prev_iopb = xycsc->xy_chain[chain-1]->iopb; 1479 prev_iopb->chen = 1; 1480 prev_iopb->nxtiopb = 0xffff & 1481 dvma_kvtopa(iopb, xycsc->bustype); 1482 } else { /* head of chain */ 1483 iorq = xycsc->xy_chain[chain]; 1484 } 1485 chain++; 1486 if (xycsc->no_ols) 1487 break; /* quit if chaining dis-allowed */ 1488 } 1489 return iorq ? iorq->iopb : NULL; 1490 } 1491 1492 /* 1493 * xyc_piodriver 1494 * 1495 * programmed i/o driver. this function takes over the computer 1496 * and drains off the polled i/o request. it returns the status of the iorq 1497 * the caller is interesting in. 1498 */ 1499 int 1500 xyc_piodriver(struct xyc_softc *xycsc, struct xy_iorq *iorq) 1501 { 1502 int nreset = 0; 1503 int retval = 0; 1504 u_long res; 1505 1506 #ifdef XYC_DEBUG 1507 printf("%s(%s, 0x%x)\n", __func__, device_xname(xycsc->sc_dev), iorq); 1508 #endif 1509 1510 while (iorq->iopb->done == 0) { 1511 1512 res = xyc_unbusy(xycsc->xyc, XYC_MAXTIME); 1513 1514 /* we expect some progress soon */ 1515 if (res == XY_ERR_FAIL && nreset >= 2) { 1516 xyc_reset(xycsc, 0, XY_RSET_ALL, XY_ERR_FAIL, 0); 1517 #ifdef XYC_DEBUG 1518 printf("%s: timeout\n", __func__); 1519 #endif 1520 return XY_ERR_FAIL; 1521 } 1522 if (res == XY_ERR_FAIL) { 1523 if (xyc_reset(xycsc, 0, 1524 (nreset++ == 0) ? XY_RSET_NONE : iorq, 1525 XY_ERR_FAIL, 0) == XY_ERR_FAIL) 1526 return XY_ERR_FAIL; /* flushes all but POLL 1527 * requests, resets */ 1528 continue; 1529 } 1530 1531 xyc_remove_iorq(xycsc); /* may resubmit request */ 1532 1533 if (iorq->iopb->done == 0) 1534 xyc_start(xycsc, iorq); 1535 } 1536 1537 /* get return value */ 1538 1539 retval = iorq->errno; 1540 1541 #ifdef XYC_DEBUG 1542 printf("%s: done, retval = 0x%x (%s)\n", __func__, 1543 iorq->errno, xyc_e2str(iorq->errno)); 1544 #endif 1545 1546 /* start up any bufs that have queued */ 1547 1548 xyc_start(xycsc, NULL); 1549 1550 return retval; 1551 } 1552 1553 /* 1554 * xyc_xyreset: reset one drive. NOTE: assumes xyc was just reset. 1555 * we steal iopb[XYC_CTLIOPB] for this, but we put it back when we are done. 1556 */ 1557 void 1558 xyc_xyreset(struct xyc_softc *xycsc, struct xy_softc *xysc) 1559 { 1560 struct xy_iopb tmpiopb; 1561 u_long addr; 1562 int del; 1563 memcpy(&tmpiopb, xycsc->ciopb, sizeof(tmpiopb)); 1564 xycsc->ciopb->chen = xycsc->ciopb->done = xycsc->ciopb->errs = 0; 1565 xycsc->ciopb->ien = 0; 1566 xycsc->ciopb->com = XYCMD_RST; 1567 xycsc->ciopb->unit = xysc->xy_drive; 1568 addr = dvma_kvtopa(xycsc->ciopb, xycsc->bustype); 1569 1570 XYC_GO(xycsc->xyc, addr); 1571 1572 del = XYC_RESETUSEC; 1573 while (del > 0) { 1574 if ((xycsc->xyc->xyc_csr & XYC_GBSY) == 0) 1575 break; 1576 DELAY(1); 1577 del--; 1578 } 1579 1580 if (del <= 0 || xycsc->ciopb->errs) { 1581 printf("%s: off-line: %s\n", device_xname(xycsc->sc_dev), 1582 xyc_e2str(xycsc->ciopb->errno)); 1583 del = xycsc->xyc->xyc_rsetup; 1584 if (xyc_unbusy(xycsc->xyc, XYC_RESETUSEC) == XY_ERR_FAIL) 1585 panic("%s", __func__); 1586 } else { 1587 xycsc->xyc->xyc_csr = XYC_IPND; /* clear IPND */ 1588 } 1589 memcpy(xycsc->ciopb, &tmpiopb, sizeof(tmpiopb)); 1590 } 1591 1592 1593 /* 1594 * xyc_reset: reset everything: requests are marked as errors except 1595 * a polled request (which is resubmitted) 1596 */ 1597 int 1598 xyc_reset(struct xyc_softc *xycsc, int quiet, struct xy_iorq *blastmode, 1599 int error, struct xy_softc *xysc) 1600 { 1601 int del = 0, lcv, retval = XY_ERR_AOK; 1602 struct xy_iorq *iorq; 1603 1604 /* soft reset hardware */ 1605 1606 if (quiet == 0) 1607 printf("%s: soft reset\n", device_xname(xycsc->sc_dev)); 1608 del = xycsc->xyc->xyc_rsetup; 1609 del = xyc_unbusy(xycsc->xyc, XYC_RESETUSEC); 1610 if (del == XY_ERR_FAIL) { 1611 blastmode = XY_RSET_ALL; /* dead, flush all requests */ 1612 retval = XY_ERR_FAIL; 1613 } 1614 if (xysc) 1615 xyc_xyreset(xycsc, xysc); 1616 1617 /* fix queues based on "blast-mode" */ 1618 1619 for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) { 1620 iorq = &xycsc->reqs[lcv]; 1621 1622 if (XY_STATE(iorq->mode) != XY_SUB_POLL && 1623 XY_STATE(iorq->mode) != XY_SUB_WAIT && 1624 XY_STATE(iorq->mode) != XY_SUB_NORM) 1625 /* is it active? */ 1626 continue; 1627 1628 if (blastmode == XY_RSET_ALL || 1629 blastmode != iorq) { 1630 /* failed */ 1631 iorq->errno = error; 1632 xycsc->iopbase[lcv].done = xycsc->iopbase[lcv].errs = 1; 1633 switch (XY_STATE(iorq->mode)) { 1634 case XY_SUB_NORM: 1635 iorq->buf->b_error = EIO; 1636 iorq->buf->b_resid = iorq->sectcnt * XYFM_BPS; 1637 /* Sun3: map/unmap regardless of B_PHYS */ 1638 dvma_mapout(iorq->dbufbase, 1639 iorq->buf->b_bcount); 1640 (void)bufq_get(iorq->xy->xyq); 1641 disk_unbusy(&iorq->xy->sc_dk, 1642 (iorq->buf->b_bcount - iorq->buf->b_resid), 1643 (iorq->buf->b_flags & B_READ)); 1644 biodone(iorq->buf); 1645 iorq->mode = XY_SUB_FREE; 1646 break; 1647 case XY_SUB_WAIT: 1648 wakeup(iorq); 1649 case XY_SUB_POLL: 1650 iorq->mode = 1651 XY_NEWSTATE(iorq->mode, XY_SUB_DONE); 1652 break; 1653 } 1654 1655 } else { 1656 1657 /* resubmit, no need to do anything here */ 1658 } 1659 } 1660 1661 /* 1662 * now, if stuff is waiting, start it. 1663 * since we just reset it should go 1664 */ 1665 xyc_start(xycsc, NULL); 1666 1667 return retval; 1668 } 1669 1670 /* 1671 * xyc_start: start waiting buffers 1672 */ 1673 1674 void 1675 xyc_start(struct xyc_softc *xycsc, struct xy_iorq *iorq) 1676 { 1677 int lcv; 1678 struct xy_softc *xy; 1679 1680 if (iorq == NULL) { 1681 for (lcv = 0; lcv < XYC_MAXDEV ; lcv++) { 1682 if ((xy = xycsc->sc_drives[lcv]) == NULL) 1683 continue; 1684 if (bufq_peek(xy->xyq) == NULL) 1685 continue; 1686 if (xy->xyrq->mode != XY_SUB_FREE) 1687 continue; 1688 xyc_startbuf(xycsc, xy, bufq_peek(xy->xyq)); 1689 } 1690 } 1691 xyc_submit_iorq(xycsc, iorq, XY_SUB_NOQ); 1692 } 1693 1694 /* 1695 * xyc_remove_iorq: remove "done" IOPB's. 1696 */ 1697 1698 int 1699 xyc_remove_iorq(struct xyc_softc *xycsc) 1700 { 1701 int errno, rq, comm, errs; 1702 struct xyc *xyc = xycsc->xyc; 1703 u_long addr; 1704 struct xy_iopb *iopb; 1705 struct xy_iorq *iorq; 1706 struct buf *bp; 1707 1708 if (xyc->xyc_csr & XYC_DERR) { 1709 /* 1710 * DOUBLE ERROR: should never happen under normal use. This 1711 * error is so bad, you can't even tell which IOPB is bad, so 1712 * we dump them all. 1713 */ 1714 errno = XY_ERR_DERR; 1715 printf("%s: DOUBLE ERROR!\n", device_xname(xycsc->sc_dev)); 1716 if (xyc_reset(xycsc, 0, XY_RSET_ALL, errno, 0) != XY_ERR_AOK) { 1717 printf("%s: soft reset failed!\n", 1718 device_xname(xycsc->sc_dev)); 1719 panic("%s: controller DEAD", __func__); 1720 } 1721 return XY_ERR_AOK; 1722 } 1723 1724 /* 1725 * get iopb that is done, loop down the chain 1726 */ 1727 1728 if (xyc->xyc_csr & XYC_ERR) { 1729 xyc->xyc_csr = XYC_ERR; /* clear error condition */ 1730 } 1731 if (xyc->xyc_csr & XYC_IPND) { 1732 xyc->xyc_csr = XYC_IPND; /* clear interrupt */ 1733 } 1734 1735 for (rq = 0; rq < XYC_MAXIOPB; rq++) { 1736 iorq = xycsc->xy_chain[rq]; 1737 if (iorq == NULL) break; /* done ! */ 1738 if (iorq->mode == 0 || XY_STATE(iorq->mode) == XY_SUB_DONE) 1739 continue; /* free, or done */ 1740 iopb = iorq->iopb; 1741 if (iopb->done == 0) 1742 continue; /* not done yet */ 1743 1744 comm = iopb->com; 1745 errs = iopb->errs; 1746 1747 if (errs) 1748 iorq->errno = iopb->errno; 1749 else 1750 iorq->errno = 0; 1751 1752 /* handle non-fatal errors */ 1753 1754 if (errs && 1755 xyc_error(xycsc, iorq, iopb, comm) == XY_ERR_AOK) 1756 continue; /* AOK: we resubmitted it */ 1757 1758 1759 /* this iorq is now done (hasn't been restarted or anything) */ 1760 1761 if ((iorq->mode & XY_MODE_VERBO) && iorq->lasterror) 1762 xyc_perror(iorq, iopb, 0); 1763 1764 /* now, if read/write check to make sure we got all the data 1765 * we needed. (this may not be the case if we got an error in 1766 * the middle of a multisector request). */ 1767 1768 if ((iorq->mode & XY_MODE_B144) != 0 && errs == 0 && 1769 (comm == XYCMD_RD || comm == XYCMD_WR)) { 1770 /* we just successfully processed a bad144 sector 1771 * note: if we are in bad 144 mode, the pointers have 1772 * been advanced already (see above) and are pointing 1773 * at the bad144 sector. to exit bad144 mode, we 1774 * must advance the pointers 1 sector and issue a new 1775 * request if there are still sectors left to process 1776 * 1777 */ 1778 XYC_ADVANCE(iorq, 1); /* advance 1 sector */ 1779 1780 /* exit b144 mode */ 1781 iorq->mode = iorq->mode & (~XY_MODE_B144); 1782 1783 if (iorq->sectcnt) { /* more to go! */ 1784 iorq->lasterror = iorq->errno = iopb->errno = 0; 1785 iopb->errs = iopb->done = 0; 1786 iorq->tries = 0; 1787 iopb->scnt = iorq->sectcnt; 1788 iopb->cyl = 1789 iorq->blockno / iorq->xy->sectpercyl; 1790 iopb->head = 1791 (iorq->blockno / iorq->xy->nhead) % 1792 iorq->xy->nhead; 1793 iopb->sect = iorq->blockno % XYFM_BPS; 1794 addr = dvma_kvtopa(iorq->dbuf, xycsc->bustype); 1795 iopb->dataa = (addr & 0xffff); 1796 iopb->datar = ((addr & 0xff0000) >> 16); 1797 /* will resubit at end */ 1798 continue; 1799 } 1800 } 1801 /* final cleanup, totally done with this request */ 1802 1803 switch (XY_STATE(iorq->mode)) { 1804 case XY_SUB_NORM: 1805 bp = iorq->buf; 1806 if (errs) { 1807 bp->b_error = EIO; 1808 bp->b_resid = iorq->sectcnt * XYFM_BPS; 1809 } else { 1810 bp->b_resid = 0; /* done */ 1811 } 1812 /* Sun3: map/unmap regardless of B_PHYS */ 1813 dvma_mapout(iorq->dbufbase, iorq->buf->b_bcount); 1814 (void)bufq_get(iorq->xy->xyq); 1815 disk_unbusy(&iorq->xy->sc_dk, 1816 (bp->b_bcount - bp->b_resid), 1817 (bp->b_flags & B_READ)); 1818 iorq->mode = XY_SUB_FREE; 1819 biodone(bp); 1820 break; 1821 case XY_SUB_WAIT: 1822 iorq->mode = XY_NEWSTATE(iorq->mode, XY_SUB_DONE); 1823 wakeup(iorq); 1824 break; 1825 case XY_SUB_POLL: 1826 iorq->mode = XY_NEWSTATE(iorq->mode, XY_SUB_DONE); 1827 break; 1828 } 1829 } 1830 1831 return XY_ERR_AOK; 1832 } 1833 1834 /* 1835 * xyc_perror: print error. 1836 * - if still_trying is true: we got an error, retried and got a 1837 * different error. in that case lasterror is the old error, 1838 * and errno is the new one. 1839 * - if still_trying is not true, then if we ever had an error it 1840 * is in lasterror. also, if iorq->errno == 0, then we recovered 1841 * from that error (otherwise iorq->errno == iorq->lasterror). 1842 */ 1843 void 1844 xyc_perror(struct xy_iorq *iorq, struct xy_iopb *iopb, int still_trying) 1845 { 1846 int error = iorq->lasterror; 1847 1848 printf("%s", (iorq->xy) ? device_xname(iorq->xy->sc_dev) 1849 : device_xname(iorq->xyc->sc_dev)); 1850 if (iorq->buf) 1851 printf("%c: ", 'a' + (char)DISKPART(iorq->buf->b_dev)); 1852 if (iopb->com == XYCMD_RD || iopb->com == XYCMD_WR) 1853 printf("%s %d/%d/%d: ", 1854 (iopb->com == XYCMD_RD) ? "read" : "write", 1855 iopb->cyl, iopb->head, iopb->sect); 1856 printf("%s", xyc_e2str(error)); 1857 1858 if (still_trying) 1859 printf(" [still trying, new error=%s]", xyc_e2str(iorq->errno)); 1860 else 1861 if (iorq->errno == 0) 1862 printf(" [recovered in %d tries]", iorq->tries); 1863 1864 printf("\n"); 1865 } 1866 1867 /* 1868 * xyc_error: non-fatal error encountered... recover. 1869 * return AOK if resubmitted, return FAIL if this iopb is done 1870 */ 1871 int 1872 xyc_error(struct xyc_softc *xycsc, struct xy_iorq *iorq, struct xy_iopb *iopb, 1873 int comm) 1874 { 1875 int errno = iorq->errno; 1876 int erract = xyc_entoact(errno); 1877 int oldmode, advance, i; 1878 1879 if (erract == XY_ERA_RSET) { /* some errors require a reset */ 1880 oldmode = iorq->mode; 1881 iorq->mode = XY_SUB_DONE | (~XY_SUB_MASK & oldmode); 1882 /* make xyc_start ignore us */ 1883 xyc_reset(xycsc, 1, XY_RSET_NONE, errno, iorq->xy); 1884 iorq->mode = oldmode; 1885 } 1886 /* check for read/write to a sector in bad144 table if bad: redirect 1887 * request to bad144 area */ 1888 1889 if ((comm == XYCMD_RD || comm == XYCMD_WR) && 1890 (iorq->mode & XY_MODE_B144) == 0) { 1891 advance = iorq->sectcnt - iopb->scnt; 1892 XYC_ADVANCE(iorq, advance); 1893 if ((i = isbad(&iorq->xy->dkb, 1894 iorq->blockno / iorq->xy->sectpercyl, 1895 (iorq->blockno / iorq->xy->nsect) % iorq->xy->nhead, 1896 iorq->blockno % iorq->xy->nsect)) != -1) { 1897 iorq->mode |= XY_MODE_B144; /* enter bad144 mode & 1898 * redirect */ 1899 iopb->errno = iopb->done = iopb->errs = 0; 1900 iopb->scnt = 1; 1901 iopb->cyl = (iorq->xy->ncyl + iorq->xy->acyl) - 2; 1902 /* second to last acyl */ 1903 i = iorq->xy->sectpercyl - 1 - i; /* follow bad144 1904 * standard */ 1905 iopb->head = i / iorq->xy->nhead; 1906 iopb->sect = i % iorq->xy->nhead; 1907 /* will resubmit when we come out of remove_iorq */ 1908 return XY_ERR_AOK; /* recovered! */ 1909 } 1910 } 1911 1912 /* 1913 * it isn't a bad144 sector, must be real error! see if we can retry 1914 * it? 1915 */ 1916 if ((iorq->mode & XY_MODE_VERBO) && iorq->lasterror) 1917 xyc_perror(iorq, iopb, 1); /* inform of error state 1918 * change */ 1919 iorq->lasterror = errno; 1920 1921 if ((erract == XY_ERA_RSET || erract == XY_ERA_HARD) 1922 && iorq->tries < XYC_MAXTRIES) { /* retry? */ 1923 iorq->tries++; 1924 iorq->errno = iopb->errno = iopb->done = iopb->errs = 0; 1925 /* will resubmit at end of remove_iorq */ 1926 return XY_ERR_AOK; /* recovered! */ 1927 } 1928 1929 /* failed to recover from this error */ 1930 return XY_ERR_FAIL; 1931 } 1932 1933 /* 1934 * xyc_tick: make sure xy is still alive and ticking (err, kicking). 1935 */ 1936 void 1937 xyc_tick(void *arg) 1938 { 1939 struct xyc_softc *xycsc = arg; 1940 int lcv, s, reset = 0; 1941 1942 /* reduce ttl for each request if one goes to zero, reset xyc */ 1943 s = splbio(); 1944 for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) { 1945 if (xycsc->reqs[lcv].mode == 0 || 1946 XY_STATE(xycsc->reqs[lcv].mode) == XY_SUB_DONE) 1947 continue; 1948 xycsc->reqs[lcv].ttl--; 1949 if (xycsc->reqs[lcv].ttl == 0) 1950 reset = 1; 1951 } 1952 if (reset) { 1953 printf("%s: watchdog timeout\n", device_xname(xycsc->sc_dev)); 1954 xyc_reset(xycsc, 0, XY_RSET_NONE, XY_ERR_FAIL, NULL); 1955 } 1956 splx(s); 1957 1958 /* until next time */ 1959 1960 callout_reset(&xycsc->sc_tick_ch, XYC_TICKCNT, xyc_tick, xycsc); 1961 } 1962 1963 /* 1964 * xyc_ioctlcmd: this function provides a user level interface to the 1965 * controller via ioctl. this allows "format" programs to be written 1966 * in user code, and is also useful for some debugging. we return 1967 * an error code. called at user priority. 1968 * 1969 * XXX missing a few commands (see the 7053 driver for ideas) 1970 */ 1971 int 1972 xyc_ioctlcmd(struct xy_softc *xy, dev_t dev, struct xd_iocmd *xio) 1973 { 1974 int s, err, rqno; 1975 void *dvmabuf = NULL; 1976 struct xyc_softc *xycsc; 1977 1978 /* check sanity of requested command */ 1979 1980 switch (xio->cmd) { 1981 1982 case XYCMD_NOP: /* no op: everything should be zero */ 1983 if (xio->subfn || xio->dptr || xio->dlen || 1984 xio->block || xio->sectcnt) 1985 return EINVAL; 1986 break; 1987 1988 case XYCMD_RD: /* read / write sectors (up to XD_IOCMD_MAXS) */ 1989 case XYCMD_WR: 1990 if (xio->subfn || xio->sectcnt > XD_IOCMD_MAXS || 1991 xio->sectcnt * XYFM_BPS != xio->dlen || xio->dptr == NULL) 1992 return EINVAL; 1993 break; 1994 1995 case XYCMD_SK: /* seek: doesn't seem useful to export this */ 1996 return EINVAL; 1997 break; 1998 1999 default: 2000 return EINVAL;/* ??? */ 2001 } 2002 2003 /* create DVMA buffer for request if needed */ 2004 2005 if (xio->dlen) { 2006 dvmabuf = dvma_malloc(xio->dlen); 2007 if (xio->cmd == XYCMD_WR) { 2008 err = copyin(xio->dptr, dvmabuf, xio->dlen); 2009 if (err) { 2010 dvma_free(dvmabuf, xio->dlen); 2011 return err; 2012 } 2013 } 2014 } 2015 /* do it! */ 2016 2017 err = 0; 2018 xycsc = xy->parent; 2019 s = splbio(); 2020 rqno = xyc_cmd(xycsc, xio->cmd, xio->subfn, xy->xy_drive, xio->block, 2021 xio->sectcnt, dvmabuf, XY_SUB_WAIT); 2022 if (rqno == XY_ERR_FAIL) { 2023 err = EIO; 2024 goto done; 2025 } 2026 xio->errno = xycsc->ciorq->errno; 2027 xio->tries = xycsc->ciorq->tries; 2028 XYC_DONE(xycsc, err); 2029 2030 if (xio->cmd == XYCMD_RD) 2031 err = copyout(dvmabuf, xio->dptr, xio->dlen); 2032 2033 done: 2034 splx(s); 2035 if (dvmabuf) 2036 dvma_free(dvmabuf, xio->dlen); 2037 return err; 2038 } 2039 2040 /* 2041 * xyc_e2str: convert error code number into an error string 2042 */ 2043 const char * 2044 xyc_e2str(int no) 2045 { 2046 switch (no) { 2047 case XY_ERR_FAIL: 2048 return "Software fatal error"; 2049 case XY_ERR_DERR: 2050 return "DOUBLE ERROR"; 2051 case XY_ERR_AOK: 2052 return "Successful completion"; 2053 case XY_ERR_IPEN: 2054 return "Interrupt pending"; 2055 case XY_ERR_BCFL: 2056 return "Busy conflict"; 2057 case XY_ERR_TIMO: 2058 return "Operation timeout"; 2059 case XY_ERR_NHDR: 2060 return "Header not found"; 2061 case XY_ERR_HARD: 2062 return "Hard ECC error"; 2063 case XY_ERR_ICYL: 2064 return "Illegal cylinder address"; 2065 case XY_ERR_ISEC: 2066 return "Illegal sector address"; 2067 case XY_ERR_SMAL: 2068 return "Last sector too small"; 2069 case XY_ERR_SACK: 2070 return "Slave ACK error (non-existent memory)"; 2071 case XY_ERR_CHER: 2072 return "Cylinder and head/header error"; 2073 case XY_ERR_SRTR: 2074 return "Auto-seek retry successful"; 2075 case XY_ERR_WPRO: 2076 return "Write-protect error"; 2077 case XY_ERR_UIMP: 2078 return "Unimplemented command"; 2079 case XY_ERR_DNRY: 2080 return "Drive not ready"; 2081 case XY_ERR_SZER: 2082 return "Sector count zero"; 2083 case XY_ERR_DFLT: 2084 return "Drive faulted"; 2085 case XY_ERR_ISSZ: 2086 return "Illegal sector size"; 2087 case XY_ERR_SLTA: 2088 return "Self test A"; 2089 case XY_ERR_SLTB: 2090 return "Self test B"; 2091 case XY_ERR_SLTC: 2092 return "Self test C"; 2093 case XY_ERR_SOFT: 2094 return "Soft ECC error"; 2095 case XY_ERR_SFOK: 2096 return "Soft ECC error recovered"; 2097 case XY_ERR_IHED: 2098 return "Illegal head"; 2099 case XY_ERR_DSEQ: 2100 return "Disk sequencer error"; 2101 case XY_ERR_SEEK: 2102 return "Seek error"; 2103 default: 2104 return "Unknown error"; 2105 } 2106 } 2107 2108 int 2109 xyc_entoact(int errno) 2110 { 2111 2112 switch (errno) { 2113 case XY_ERR_FAIL: 2114 case XY_ERR_DERR: 2115 case XY_ERR_IPEN: 2116 case XY_ERR_BCFL: 2117 case XY_ERR_ICYL: 2118 case XY_ERR_ISEC: 2119 case XY_ERR_UIMP: 2120 case XY_ERR_SZER: 2121 case XY_ERR_ISSZ: 2122 case XY_ERR_SLTA: 2123 case XY_ERR_SLTB: 2124 case XY_ERR_SLTC: 2125 case XY_ERR_IHED: 2126 case XY_ERR_SACK: 2127 case XY_ERR_SMAL: 2128 return XY_ERA_PROG; /* program error ! */ 2129 2130 case XY_ERR_TIMO: 2131 case XY_ERR_NHDR: 2132 case XY_ERR_HARD: 2133 case XY_ERR_DNRY: 2134 case XY_ERR_CHER: 2135 case XY_ERR_SEEK: 2136 case XY_ERR_SOFT: 2137 return XY_ERA_HARD; /* hard error, retry */ 2138 2139 case XY_ERR_DFLT: 2140 case XY_ERR_DSEQ: 2141 return XY_ERA_RSET; /* hard error reset */ 2142 2143 case XY_ERR_SRTR: 2144 case XY_ERR_SFOK: 2145 case XY_ERR_AOK: 2146 return XY_ERA_SOFT; /* an FYI error */ 2147 2148 case XY_ERR_WPRO: 2149 return XY_ERA_WPRO; /* write protect */ 2150 } 2151 2152 return XY_ERA_PROG; /* ??? */ 2153 } 2154