1 /* $OpenBSD: gdt_common.c,v 1.37 2006/11/28 23:59:45 dlg Exp $ */ 2 3 /* 4 * Copyright (c) 1999, 2000, 2003 Niklas Hallqvist. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /* 28 * This driver would not have written if it was not for the hardware donations 29 * from both ICP-Vortex and �ko.neT. I want to thank them for their support. 30 */ 31 32 #include <sys/param.h> 33 #include <sys/buf.h> 34 #include <sys/device.h> 35 #include <sys/ioctl.h> 36 #include <sys/kernel.h> 37 #include <sys/malloc.h> 38 #include <sys/systm.h> 39 40 #include <machine/bus.h> 41 42 #include <uvm/uvm_extern.h> 43 44 #include <scsi/scsi_all.h> 45 #include <scsi/scsi_disk.h> 46 #include <scsi/scsiconf.h> 47 48 #include <dev/biovar.h> 49 #include <dev/ic/gdtreg.h> 50 #include <dev/ic/gdtvar.h> 51 52 #include "bio.h" 53 54 #ifdef GDT_DEBUG 55 int gdt_maxcmds = GDT_MAXCMDS; 56 #undef GDT_MAXCMDS 57 #define GDT_MAXCMDS gdt_maxcmds 58 #endif 59 60 #define GDT_DRIVER_VERSION 1 61 #define GDT_DRIVER_SUBVERSION 2 62 63 int gdt_async_event(struct gdt_softc *, int); 64 void gdt_chain(struct gdt_softc *); 65 void gdt_clear_events(struct gdt_softc *); 66 void gdt_copy_internal_data(struct scsi_xfer *, u_int8_t *, size_t); 67 struct scsi_xfer *gdt_dequeue(struct gdt_softc *); 68 void gdt_enqueue(struct gdt_softc *, struct scsi_xfer *, int); 69 void gdt_enqueue_ccb(struct gdt_softc *, struct gdt_ccb *); 70 void gdt_eval_mapping(u_int32_t, int *, int *, int *); 71 int gdt_exec_ccb(struct gdt_ccb *); 72 void gdt_free_ccb(struct gdt_softc *, struct gdt_ccb *); 73 struct gdt_ccb *gdt_get_ccb(struct gdt_softc *, int); 74 void gdt_internal_cache_cmd(struct scsi_xfer *); 75 int gdt_internal_cmd(struct gdt_softc *, u_int8_t, u_int16_t, 76 u_int32_t, u_int32_t, u_int32_t); 77 #if NBIO > 0 78 int gdt_ioctl(struct device *, u_long, caddr_t); 79 int gdt_ioctl_inq(struct gdt_softc *, struct bioc_inq *); 80 int gdt_ioctl_vol(struct gdt_softc *, struct bioc_vol *); 81 int gdt_ioctl_disk(struct gdt_softc *, struct bioc_disk *); 82 int gdt_ioctl_alarm(struct gdt_softc *, struct bioc_alarm *); 83 int gdt_ioctl_setstate(struct gdt_softc *, struct bioc_setstate *); 84 #endif /* NBIO > 0 */ 85 int gdt_raw_scsi_cmd(struct scsi_xfer *); 86 int gdt_scsi_cmd(struct scsi_xfer *); 87 void gdt_start_ccbs(struct gdt_softc *); 88 int gdt_sync_event(struct gdt_softc *, int, u_int8_t, 89 struct scsi_xfer *); 90 void gdt_timeout(void *); 91 int gdt_wait(struct gdt_softc *, struct gdt_ccb *, int); 92 void gdt_watchdog(void *); 93 94 struct cfdriver gdt_cd = { 95 NULL, "gdt", DV_DULL 96 }; 97 98 struct scsi_adapter gdt_switch = { 99 gdt_scsi_cmd, gdtminphys, 0, 0, 100 }; 101 102 struct scsi_adapter gdt_raw_switch = { 103 gdt_raw_scsi_cmd, gdtminphys, 0, 0, 104 }; 105 106 struct scsi_device gdt_dev = { 107 NULL, NULL, NULL, NULL 108 }; 109 110 int gdt_cnt = 0; 111 u_int8_t gdt_polling; 112 u_int8_t gdt_from_wait; 113 struct gdt_softc *gdt_wait_gdt; 114 int gdt_wait_index; 115 #ifdef GDT_DEBUG 116 int gdt_debug = GDT_DEBUG; 117 #endif 118 119 int 120 gdt_attach(gdt) 121 struct gdt_softc *gdt; 122 { 123 struct scsibus_attach_args saa; 124 u_int16_t cdev_cnt; 125 int i, id, drv_cyls, drv_hds, drv_secs, error, nsegs; 126 127 gdt_polling = 1; 128 gdt_from_wait = 0; 129 130 if (bus_dmamem_alloc(gdt->sc_dmat, GDT_SCRATCH_SZ, PAGE_SIZE, 0, 131 &gdt->sc_scratch_seg, 1, &nsegs, BUS_DMA_NOWAIT)) 132 panic("%s: bus_dmamem_alloc failed", DEVNAME(gdt)); 133 if (bus_dmamem_map(gdt->sc_dmat, &gdt->sc_scratch_seg, 1, 134 GDT_SCRATCH_SZ, &gdt->sc_scratch, BUS_DMA_NOWAIT)) 135 panic("%s: bus_dmamem_map failed", DEVNAME(gdt)); 136 137 gdt_clear_events(gdt); 138 139 TAILQ_INIT(&gdt->sc_free_ccb); 140 TAILQ_INIT(&gdt->sc_ccbq); 141 TAILQ_INIT(&gdt->sc_ucmdq); 142 LIST_INIT(&gdt->sc_queue); 143 144 /* Initialize the ccbs */ 145 for (i = 0; i < GDT_MAXCMDS; i++) { 146 gdt->sc_ccbs[i].gc_cmd_index = i + 2; 147 error = bus_dmamap_create(gdt->sc_dmat, 148 (GDT_MAXOFFSETS - 1) << PGSHIFT, GDT_MAXOFFSETS, 149 (GDT_MAXOFFSETS - 1) << PGSHIFT, 0, 150 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 151 &gdt->sc_ccbs[i].gc_dmamap_xfer); 152 if (error) { 153 printf("%s: cannot create ccb dmamap (%d)", 154 DEVNAME(gdt), error); 155 return (1); 156 } 157 (void)gdt_ccb_set_cmd(gdt->sc_ccbs + i, GDT_GCF_UNUSED); 158 TAILQ_INSERT_TAIL(&gdt->sc_free_ccb, &gdt->sc_ccbs[i], 159 gc_chain); 160 } 161 162 /* Fill in the prototype scsi_link. */ 163 gdt->sc_link.adapter_softc = gdt; 164 gdt->sc_link.adapter = &gdt_switch; 165 gdt->sc_link.device = &gdt_dev; 166 /* openings will be filled in later. */ 167 gdt->sc_link.adapter_buswidth = 168 (gdt->sc_class & GDT_FC) ? GDT_MAXID : GDT_MAX_HDRIVES; 169 gdt->sc_link.adapter_target = gdt->sc_link.adapter_buswidth; 170 171 if (!gdt_internal_cmd(gdt, GDT_SCREENSERVICE, GDT_INIT, 0, 0, 0)) { 172 printf("screen service initialization error %d\n", 173 gdt->sc_status); 174 return (1); 175 } 176 177 if (!gdt_internal_cmd(gdt, GDT_CACHESERVICE, GDT_INIT, GDT_LINUX_OS, 0, 178 0)) { 179 printf("cache service initialization error %d\n", 180 gdt->sc_status); 181 return (1); 182 } 183 184 cdev_cnt = (u_int16_t)gdt->sc_info; 185 186 /* Detect number of busses */ 187 gdt_enc32(gdt->sc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST); 188 gdt->sc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS; 189 gdt->sc_scratch[GDT_IOC_FIRST_CHAN] = 0; 190 gdt->sc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1; 191 gdt_enc32(gdt->sc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ); 192 if (gdt_internal_cmd(gdt, GDT_CACHESERVICE, GDT_IOCTL, 193 GDT_IOCHAN_RAW_DESC, GDT_INVALID_CHANNEL, 194 GDT_IOC_HDR_SZ + GDT_RAWIOC_SZ)) { 195 gdt->sc_bus_cnt = gdt->sc_scratch[GDT_IOC_CHAN_COUNT]; 196 for (i = 0; i < gdt->sc_bus_cnt; i++) { 197 id = gdt->sc_scratch[GDT_IOC_HDR_SZ + 198 i * GDT_RAWIOC_SZ + GDT_RAWIOC_PROC_ID]; 199 gdt->sc_bus_id[id] = id < GDT_MAXBUS ? id : 0xff; 200 } 201 202 } else { 203 /* New method failed, use fallback. */ 204 gdt_enc32(gdt->sc_scratch + GDT_GETCH_CHANNEL_NO, i); 205 for (i = 0; i < GDT_MAXBUS; i++) { 206 if (!gdt_internal_cmd(gdt, GDT_CACHESERVICE, GDT_IOCTL, 207 GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN, 208 GDT_IO_CHANNEL | GDT_INVALID_CHANNEL, 209 GDT_GETCH_SZ)) { 210 if (i == 0) { 211 printf("cannot get channel count, " 212 "error %d\n", gdt->sc_status); 213 return (1); 214 } 215 break; 216 } 217 gdt->sc_bus_id[i] = 218 (gdt->sc_scratch[GDT_GETCH_SIOP_ID] < GDT_MAXID) ? 219 gdt->sc_scratch[GDT_GETCH_SIOP_ID] : 0xff; 220 } 221 gdt->sc_bus_cnt = i; 222 } 223 224 /* Read cache configuration */ 225 if (!gdt_internal_cmd(gdt, GDT_CACHESERVICE, GDT_IOCTL, GDT_CACHE_INFO, 226 GDT_INVALID_CHANNEL, GDT_CINFO_SZ)) { 227 printf("cannot get cache info, error %d\n", gdt->sc_status); 228 return (1); 229 } 230 gdt->sc_cpar.cp_version = 231 gdt_dec32(gdt->sc_scratch + GDT_CPAR_VERSION); 232 gdt->sc_cpar.cp_state = gdt_dec16(gdt->sc_scratch + GDT_CPAR_STATE); 233 gdt->sc_cpar.cp_strategy = 234 gdt_dec16(gdt->sc_scratch + GDT_CPAR_STRATEGY); 235 gdt->sc_cpar.cp_write_back = 236 gdt_dec16(gdt->sc_scratch + GDT_CPAR_WRITE_BACK); 237 gdt->sc_cpar.cp_block_size = 238 gdt_dec16(gdt->sc_scratch + GDT_CPAR_BLOCK_SIZE); 239 240 /* Read board information and features */ 241 gdt->sc_more_proc = 0; 242 if (gdt_internal_cmd(gdt, GDT_CACHESERVICE, GDT_IOCTL, GDT_BOARD_INFO, 243 GDT_INVALID_CHANNEL, GDT_BINFO_SZ)) { 244 /* XXX A lot of these assignments can probably go later */ 245 gdt->sc_binfo.bi_ser_no = 246 gdt_dec32(gdt->sc_scratch + GDT_BINFO_SER_NO); 247 bcopy(gdt->sc_scratch + GDT_BINFO_OEM_ID, 248 gdt->sc_binfo.bi_oem_id, sizeof gdt->sc_binfo.bi_oem_id); 249 gdt->sc_binfo.bi_ep_flags = 250 gdt_dec16(gdt->sc_scratch + GDT_BINFO_EP_FLAGS); 251 gdt->sc_binfo.bi_proc_id = 252 gdt_dec32(gdt->sc_scratch + GDT_BINFO_PROC_ID); 253 gdt->sc_binfo.bi_memsize = 254 gdt_dec32(gdt->sc_scratch + GDT_BINFO_MEMSIZE); 255 gdt->sc_binfo.bi_mem_banks = 256 gdt->sc_scratch[GDT_BINFO_MEM_BANKS]; 257 gdt->sc_binfo.bi_chan_type = 258 gdt->sc_scratch[GDT_BINFO_CHAN_TYPE]; 259 gdt->sc_binfo.bi_chan_count = 260 gdt->sc_scratch[GDT_BINFO_CHAN_COUNT]; 261 gdt->sc_binfo.bi_rdongle_pres = 262 gdt->sc_scratch[GDT_BINFO_RDONGLE_PRES]; 263 gdt->sc_binfo.bi_epr_fw_ver = 264 gdt_dec32(gdt->sc_scratch + GDT_BINFO_EPR_FW_VER); 265 gdt->sc_binfo.bi_upd_fw_ver = 266 gdt_dec32(gdt->sc_scratch + GDT_BINFO_UPD_FW_VER); 267 gdt->sc_binfo.bi_upd_revision = 268 gdt_dec32(gdt->sc_scratch + GDT_BINFO_UPD_REVISION); 269 bcopy(gdt->sc_scratch + GDT_BINFO_TYPE_STRING, 270 gdt->sc_binfo.bi_type_string, 271 sizeof gdt->sc_binfo.bi_type_string); 272 bcopy(gdt->sc_scratch + GDT_BINFO_RAID_STRING, 273 gdt->sc_binfo.bi_raid_string, 274 sizeof gdt->sc_binfo.bi_raid_string); 275 gdt->sc_binfo.bi_update_pres = 276 gdt->sc_scratch[GDT_BINFO_UPDATE_PRES]; 277 gdt->sc_binfo.bi_xor_pres = 278 gdt->sc_scratch[GDT_BINFO_XOR_PRES]; 279 gdt->sc_binfo.bi_prom_type = 280 gdt->sc_scratch[GDT_BINFO_PROM_TYPE]; 281 gdt->sc_binfo.bi_prom_count = 282 gdt->sc_scratch[GDT_BINFO_PROM_COUNT]; 283 gdt->sc_binfo.bi_dup_pres = 284 gdt_dec32(gdt->sc_scratch + GDT_BINFO_DUP_PRES); 285 gdt->sc_binfo.bi_chan_pres = 286 gdt_dec32(gdt->sc_scratch + GDT_BINFO_CHAN_PRES); 287 gdt->sc_binfo.bi_mem_pres = 288 gdt_dec32(gdt->sc_scratch + GDT_BINFO_MEM_PRES); 289 gdt->sc_binfo.bi_ft_bus_system = 290 gdt->sc_scratch[GDT_BINFO_FT_BUS_SYSTEM]; 291 gdt->sc_binfo.bi_subtype_valid = 292 gdt->sc_scratch[GDT_BINFO_SUBTYPE_VALID]; 293 gdt->sc_binfo.bi_board_subtype = 294 gdt->sc_scratch[GDT_BINFO_BOARD_SUBTYPE]; 295 gdt->sc_binfo.bi_rampar_pres = 296 gdt->sc_scratch[GDT_BINFO_RAMPAR_PRES]; 297 298 if (gdt_internal_cmd(gdt, GDT_CACHESERVICE, GDT_IOCTL, 299 GDT_BOARD_FEATURES, GDT_INVALID_CHANNEL, GDT_BFEAT_SZ)) { 300 gdt->sc_bfeat.bf_chaining = 301 gdt->sc_scratch[GDT_BFEAT_CHAINING]; 302 gdt->sc_bfeat.bf_striping = 303 gdt->sc_scratch[GDT_BFEAT_STRIPING]; 304 gdt->sc_bfeat.bf_mirroring = 305 gdt->sc_scratch[GDT_BFEAT_MIRRORING]; 306 gdt->sc_bfeat.bf_raid = 307 gdt->sc_scratch[GDT_BFEAT_RAID]; 308 gdt->sc_more_proc = 1; 309 } 310 } else { 311 /* XXX Not implemented yet */ 312 } 313 314 /* Read more information */ 315 if (gdt->sc_more_proc) { 316 int bus, j; 317 /* physical drives, channel addresses */ 318 /* step 1: get magical bus number from firmware */ 319 gdt_enc32(gdt->sc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST); 320 gdt->sc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS; 321 gdt->sc_scratch[GDT_IOC_FIRST_CHAN] = 0; 322 gdt->sc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1; 323 gdt_enc32(gdt->sc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ); 324 if (gdt_internal_cmd(gdt, GDT_CACHESERVICE, GDT_IOCTL, 325 GDT_IOCHAN_DESC, GDT_INVALID_CHANNEL, 326 GDT_IOC_HDR_SZ + GDT_IOC_SZ * GDT_MAXBUS)) { 327 GDT_DPRINTF(GDT_D_INFO, ("method 1\n")); 328 for (bus = 0; bus < gdt->sc_bus_cnt; bus++) { 329 gdt->sc_raw[bus].ra_address = 330 gdt_dec32(gdt->sc_scratch + 331 GDT_IOC_HDR_SZ + 332 GDT_IOC_SZ * bus + 333 GDT_IOC_ADDRESS); 334 gdt->sc_raw[bus].ra_local_no = 335 gdt_dec8(gdt->sc_scratch + 336 GDT_IOC_HDR_SZ + 337 GDT_IOC_SZ * bus + 338 GDT_IOC_LOCAL_NO); 339 GDT_DPRINTF(GDT_D_INFO, ( 340 "bus: %d address: %x local: %x\n", 341 bus, 342 gdt->sc_raw[bus].ra_address, 343 gdt->sc_raw[bus].ra_local_no)); 344 } 345 } else { 346 GDT_DPRINTF(GDT_D_INFO, ("method 2\n")); 347 for (bus = 0; bus < gdt->sc_bus_cnt; bus++) { 348 gdt->sc_raw[bus].ra_address = GDT_IO_CHANNEL; 349 gdt->sc_raw[bus].ra_local_no = bus; 350 GDT_DPRINTF(GDT_D_INFO, ( 351 "bus: %d address: %x local: %x\n", 352 bus, 353 gdt->sc_raw[bus].ra_address, 354 gdt->sc_raw[bus].ra_local_no)); 355 } 356 } 357 /* step 2: use magical bus number to get nr of phys disks */ 358 for (bus = 0; bus < gdt->sc_bus_cnt; bus++) { 359 gdt_enc32(gdt->sc_scratch + GDT_GETCH_CHANNEL_NO, 360 gdt->sc_raw[bus].ra_local_no); 361 if (gdt_internal_cmd(gdt, GDT_CACHESERVICE, GDT_IOCTL, 362 GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN, 363 gdt->sc_raw[bus].ra_address | GDT_INVALID_CHANNEL, 364 GDT_GETCH_SZ)) { 365 gdt->sc_raw[bus].ra_phys_cnt = 366 gdt_dec32(gdt->sc_scratch + 367 GDT_GETCH_DRIVE_CNT); 368 GDT_DPRINTF(GDT_D_INFO, ("chan: %d disks: %d\n", 369 bus, gdt->sc_raw[bus].ra_phys_cnt)); 370 } 371 372 /* step 3: get scsi disk nr */ 373 if (gdt->sc_raw[bus].ra_phys_cnt > 0) { 374 gdt_enc32(gdt->sc_scratch + 375 GDT_GETSCSI_CHAN, 376 gdt->sc_raw[bus].ra_local_no); 377 gdt_enc32(gdt->sc_scratch + 378 GDT_GETSCSI_CNT, 379 gdt->sc_raw[bus].ra_phys_cnt); 380 if (gdt_internal_cmd(gdt, GDT_CACHESERVICE, 381 GDT_IOCTL, 382 GDT_SCSI_DR_LIST | GDT_L_CTRL_PATTERN, 383 gdt->sc_raw[bus].ra_address | 384 GDT_INVALID_CHANNEL, 385 GDT_GETSCSI_SZ)) 386 for (j = 0; 387 j < gdt->sc_raw[bus].ra_phys_cnt; 388 j++) { 389 gdt->sc_raw[bus].ra_id_list[j] = 390 gdt_dec32(gdt->sc_scratch + 391 GDT_GETSCSI_LIST + 392 GDT_GETSCSI_LIST_SZ * j); 393 GDT_DPRINTF(GDT_D_INFO, 394 (" diskid: %d\n", 395 gdt->sc_raw[bus].ra_id_list[j])); 396 } 397 else 398 gdt->sc_raw[bus].ra_phys_cnt = 0; 399 } 400 /* add found disks to grand total */ 401 gdt->sc_total_disks += gdt->sc_raw[bus].ra_phys_cnt; 402 } 403 } /* if (gdt->sc_more_proc) */ 404 405 if (!gdt_internal_cmd(gdt, GDT_SCSIRAWSERVICE, GDT_INIT, 0, 0, 0)) { 406 printf("raw service initialization error %d\n", 407 gdt->sc_status); 408 return (1); 409 } 410 411 /* Set/get features raw service (scatter/gather) */ 412 gdt->sc_raw_feat = 0; 413 if (gdt_internal_cmd(gdt, GDT_SCSIRAWSERVICE, GDT_SET_FEAT, 414 GDT_SCATTER_GATHER, 0, 0)) 415 if (gdt_internal_cmd(gdt, GDT_SCSIRAWSERVICE, GDT_GET_FEAT, 0, 416 0, 0)) 417 gdt->sc_raw_feat = gdt->sc_info; 418 419 /* Set/get features cache service (scatter/gather) */ 420 gdt->sc_cache_feat = 0; 421 if (gdt_internal_cmd(gdt, GDT_CACHESERVICE, GDT_SET_FEAT, 0, 422 GDT_SCATTER_GATHER, 0)) 423 if (gdt_internal_cmd(gdt, GDT_CACHESERVICE, GDT_GET_FEAT, 0, 0, 424 0)) 425 gdt->sc_cache_feat = gdt->sc_info; 426 427 /* XXX Linux reserve drives here, potentially */ 428 429 gdt->sc_ndevs = 0; 430 /* Scan for cache devices */ 431 for (i = 0; i < cdev_cnt && i < GDT_MAX_HDRIVES; i++) 432 if (gdt_internal_cmd(gdt, GDT_CACHESERVICE, GDT_INFO, i, 0, 433 0)) { 434 gdt->sc_hdr[i].hd_present = 1; 435 gdt->sc_hdr[i].hd_size = gdt->sc_info; 436 437 if (gdt->sc_hdr[i].hd_size > 0) 438 gdt->sc_ndevs++; 439 440 /* 441 * Evaluate mapping (sectors per head, heads per cyl) 442 */ 443 gdt->sc_hdr[i].hd_size &= ~GDT_SECS32; 444 if (gdt->sc_info2 == 0) 445 gdt_eval_mapping(gdt->sc_hdr[i].hd_size, 446 &drv_cyls, &drv_hds, &drv_secs); 447 else { 448 drv_hds = gdt->sc_info2 & 0xff; 449 drv_secs = (gdt->sc_info2 >> 8) & 0xff; 450 drv_cyls = gdt->sc_hdr[i].hd_size / drv_hds / 451 drv_secs; 452 } 453 gdt->sc_hdr[i].hd_heads = drv_hds; 454 gdt->sc_hdr[i].hd_secs = drv_secs; 455 /* Round the size */ 456 gdt->sc_hdr[i].hd_size = drv_cyls * drv_hds * drv_secs; 457 458 if (gdt_internal_cmd(gdt, GDT_CACHESERVICE, 459 GDT_DEVTYPE, i, 0, 0)) 460 gdt->sc_hdr[i].hd_devtype = gdt->sc_info; 461 } 462 463 if (gdt->sc_ndevs == 0) 464 gdt->sc_link.openings = 0; 465 else 466 gdt->sc_link.openings = (GDT_MAXCMDS - GDT_CMD_RESERVE) / 467 gdt->sc_ndevs; 468 469 printf("dpmem %llx %d-bus %d cache device%s\n", 470 (long long)gdt->sc_dpmembase, 471 gdt->sc_bus_cnt, cdev_cnt, cdev_cnt == 1 ? "" : "s"); 472 printf("%s: ver %x, cache %s, strategy %d, writeback %s, blksz %d\n", 473 DEVNAME(gdt), gdt->sc_cpar.cp_version, 474 gdt->sc_cpar.cp_state ? "on" : "off", gdt->sc_cpar.cp_strategy, 475 gdt->sc_cpar.cp_write_back ? "on" : "off", 476 gdt->sc_cpar.cp_block_size); 477 #if 1 478 printf("%s: raw feat %x cache feat %x\n", DEVNAME(gdt), 479 gdt->sc_raw_feat, gdt->sc_cache_feat); 480 #endif 481 482 #if NBIO > 0 483 if (bio_register(&gdt->sc_dev, gdt_ioctl) != 0) 484 panic("%s: controller registration failed", DEVNAME(gdt)); 485 #endif 486 gdt_cnt++; 487 488 bzero(&saa, sizeof(saa)); 489 saa.saa_sc_link = &gdt->sc_link; 490 491 config_found(&gdt->sc_dev, &saa, scsiprint); 492 493 gdt->sc_raw_link = malloc(gdt->sc_bus_cnt * sizeof (struct scsi_link), 494 M_DEVBUF, M_NOWAIT); 495 if (gdt->sc_raw_link == NULL) 496 panic("gdt_attach"); 497 bzero(gdt->sc_raw_link, gdt->sc_bus_cnt * sizeof (struct scsi_link)); 498 499 for (i = 0; i < gdt->sc_bus_cnt; i++) { 500 /* Fill in the prototype scsi_link. */ 501 gdt->sc_raw_link[i].adapter_softc = gdt; 502 gdt->sc_raw_link[i].adapter = &gdt_raw_switch; 503 gdt->sc_raw_link[i].adapter_target = 7; 504 gdt->sc_raw_link[i].device = &gdt_dev; 505 gdt->sc_raw_link[i].openings = 4; /* XXX a guess */ 506 gdt->sc_raw_link[i].adapter_buswidth = 507 (gdt->sc_class & GDT_FC) ? GDT_MAXID : 16; /* XXX */ 508 509 bzero(&saa, sizeof(saa)); 510 saa.saa_sc_link = &gdt->sc_raw_link[i]; 511 512 config_found(&gdt->sc_dev, &saa, scsiprint); 513 } 514 515 gdt_polling = 0; 516 return (0); 517 } 518 519 void 520 gdt_eval_mapping(size, cyls, heads, secs) 521 u_int32_t size; 522 int *cyls, *heads, *secs; 523 { 524 *cyls = size / GDT_HEADS / GDT_SECS; 525 if (*cyls < GDT_MAXCYLS) { 526 *heads = GDT_HEADS; 527 *secs = GDT_SECS; 528 } else { 529 /* Too high for 64 * 32 */ 530 *cyls = size / GDT_MEDHEADS / GDT_MEDSECS; 531 if (*cyls < GDT_MAXCYLS) { 532 *heads = GDT_MEDHEADS; 533 *secs = GDT_MEDSECS; 534 } else { 535 /* Too high for 127 * 63 */ 536 *cyls = size / GDT_BIGHEADS / GDT_BIGSECS; 537 *heads = GDT_BIGHEADS; 538 *secs = GDT_BIGSECS; 539 } 540 } 541 } 542 543 /* 544 * Insert a command into the driver queue, either at the front or at the tail. 545 * It's ok to overload the freelist link as these structures are never on 546 * the freelist at this time. 547 */ 548 void 549 gdt_enqueue(gdt, xs, infront) 550 struct gdt_softc *gdt; 551 struct scsi_xfer *xs; 552 int infront; 553 { 554 if (infront || LIST_FIRST(&gdt->sc_queue) == NULL) { 555 if (LIST_FIRST(&gdt->sc_queue) == NULL) 556 gdt->sc_queuelast = xs; 557 LIST_INSERT_HEAD(&gdt->sc_queue, xs, free_list); 558 return; 559 } 560 LIST_INSERT_AFTER(gdt->sc_queuelast, xs, free_list); 561 gdt->sc_queuelast = xs; 562 } 563 564 /* 565 * Pull a command off the front of the driver queue. 566 */ 567 struct scsi_xfer * 568 gdt_dequeue(gdt) 569 struct gdt_softc *gdt; 570 { 571 struct scsi_xfer *xs; 572 573 xs = LIST_FIRST(&gdt->sc_queue); 574 if (xs == NULL) 575 return (NULL); 576 LIST_REMOVE(xs, free_list); 577 578 if (LIST_FIRST(&gdt->sc_queue) == NULL) 579 gdt->sc_queuelast = NULL; 580 581 return (xs); 582 } 583 584 /* 585 * Start a SCSI operation on a cache device. 586 * XXX Polled operation is not yet complete. What kind of locking do we need? 587 */ 588 int 589 gdt_scsi_cmd(xs) 590 struct scsi_xfer *xs; 591 { 592 struct scsi_link *link = xs->sc_link; 593 struct gdt_softc *gdt = link->adapter_softc; 594 u_int8_t target = link->target; 595 struct gdt_ccb *ccb; 596 #if 0 597 struct gdt_ucmd *ucmd; 598 #endif 599 u_int32_t blockno, blockcnt; 600 struct scsi_rw *rw; 601 struct scsi_rw_big *rwb; 602 bus_dmamap_t xfer; 603 int error, retval = SUCCESSFULLY_QUEUED; 604 gdt_lock_t lock; 605 606 GDT_DPRINTF(GDT_D_CMD, ("gdt_scsi_cmd ")); 607 608 xs->error = XS_NOERROR; 609 610 if (target >= GDT_MAX_HDRIVES || !gdt->sc_hdr[target].hd_present || 611 link->lun != 0) { 612 /* 613 * XXX Should be XS_SENSE but that would require setting up a 614 * faked sense too. 615 */ 616 xs->error = XS_DRIVER_STUFFUP; 617 xs->flags |= ITSDONE; 618 scsi_done(xs); 619 return (COMPLETE); 620 } 621 622 lock = GDT_LOCK_GDT(lock); 623 624 /* Don't double enqueue if we came from gdt_chain. */ 625 if (xs != LIST_FIRST(&gdt->sc_queue)) 626 gdt_enqueue(gdt, xs, 0); 627 628 while ((xs = gdt_dequeue(gdt)) != NULL) { 629 xs->error = XS_NOERROR; 630 ccb = NULL; 631 link = xs->sc_link; 632 target = link->target; 633 634 if (!gdt_polling && !(xs->flags & SCSI_POLL) && 635 gdt->sc_test_busy(gdt)) { 636 /* 637 * Put it back in front. XXX Should we instead 638 * set xs->error to XS_BUSY? 639 */ 640 gdt_enqueue(gdt, xs, 1); 641 break; 642 } 643 644 switch (xs->cmd->opcode) { 645 case TEST_UNIT_READY: 646 case REQUEST_SENSE: 647 case INQUIRY: 648 case MODE_SENSE: 649 case START_STOP: 650 case READ_CAPACITY: 651 #if 0 652 case VERIFY: 653 #endif 654 gdt_internal_cache_cmd(xs); 655 xs->flags |= ITSDONE; 656 scsi_done(xs); 657 goto ready; 658 659 case PREVENT_ALLOW: 660 GDT_DPRINTF(GDT_D_CMD, ("PREVENT/ALLOW ")); 661 /* XXX Not yet implemented */ 662 xs->error = XS_NOERROR; 663 xs->flags |= ITSDONE; 664 scsi_done(xs); 665 goto ready; 666 667 default: 668 GDT_DPRINTF(GDT_D_CMD, 669 ("unknown opc %d ", xs->cmd->opcode)); 670 /* XXX Not yet implemented */ 671 xs->error = XS_DRIVER_STUFFUP; 672 xs->flags |= ITSDONE; 673 scsi_done(xs); 674 goto ready; 675 676 case READ_COMMAND: 677 case READ_BIG: 678 case WRITE_COMMAND: 679 case WRITE_BIG: 680 case SYNCHRONIZE_CACHE: 681 /* 682 * A new command chain, start from the beginning. 683 */ 684 gdt->sc_cmd_off = 0; 685 686 if (xs->cmd->opcode != SYNCHRONIZE_CACHE) { 687 /* A read or write operation. */ 688 if (xs->cmdlen == 6) { 689 rw = (struct scsi_rw *)xs->cmd; 690 blockno = _3btol(rw->addr) & 691 (SRW_TOPADDR << 16 | 0xffff); 692 blockcnt = 693 rw->length ? rw->length : 0x100; 694 } else { 695 rwb = (struct scsi_rw_big *)xs->cmd; 696 blockno = _4btol(rwb->addr); 697 blockcnt = _2btol(rwb->length); 698 } 699 if (blockno >= gdt->sc_hdr[target].hd_size || 700 blockno + blockcnt > 701 gdt->sc_hdr[target].hd_size) { 702 printf( 703 "%s: out of bounds %u-%u >= %u\n", 704 DEVNAME(gdt), blockno, 705 blockcnt, 706 gdt->sc_hdr[target].hd_size); 707 /* 708 * XXX Should be XS_SENSE but that 709 * would require setting up a faked 710 * sense too. 711 */ 712 xs->error = XS_DRIVER_STUFFUP; 713 xs->flags |= ITSDONE; 714 scsi_done(xs); 715 goto ready; 716 } 717 } 718 719 ccb = gdt_get_ccb(gdt, xs->flags); 720 /* 721 * We are out of commands, try again in a little while. 722 */ 723 if (ccb == NULL) { 724 GDT_UNLOCK_GDT(gdt, lock); 725 return (TRY_AGAIN_LATER); 726 } 727 728 ccb->gc_blockno = blockno; 729 ccb->gc_blockcnt = blockcnt; 730 ccb->gc_xs = xs; 731 ccb->gc_timeout = xs->timeout; 732 ccb->gc_service = GDT_CACHESERVICE; 733 gdt_ccb_set_cmd(ccb, GDT_GCF_SCSI); 734 735 if (xs->cmd->opcode != SYNCHRONIZE_CACHE) { 736 xfer = ccb->gc_dmamap_xfer; 737 error = bus_dmamap_load(gdt->sc_dmat, xfer, 738 xs->data, xs->datalen, NULL, 739 (xs->flags & SCSI_NOSLEEP) ? 740 BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 741 if (error) { 742 printf("%s: gdt_scsi_cmd: ", 743 DEVNAME(gdt)); 744 if (error == EFBIG) 745 printf( 746 "more than %d dma segs\n", 747 GDT_MAXOFFSETS); 748 else 749 printf("error %d " 750 "loading dma map\n", 751 error); 752 753 gdt_free_ccb(gdt, ccb); 754 xs->error = XS_DRIVER_STUFFUP; 755 xs->flags |= ITSDONE; 756 scsi_done(xs); 757 goto ready; 758 } 759 bus_dmamap_sync(gdt->sc_dmat, xfer, 0, 760 xfer->dm_mapsize, 761 (xs->flags & SCSI_DATA_IN) ? 762 BUS_DMASYNC_PREREAD : 763 BUS_DMASYNC_PREWRITE); 764 } 765 766 gdt_enqueue_ccb(gdt, ccb); 767 /* XXX what if enqueue did not start a transfer? */ 768 if (gdt_polling || (xs->flags & SCSI_POLL)) { 769 if (!gdt_wait(gdt, ccb, ccb->gc_timeout)) { 770 GDT_UNLOCK_GDT(gdt, lock); 771 printf("%s: command %d timed out\n", 772 DEVNAME(gdt), 773 ccb->gc_cmd_index); 774 return (TRY_AGAIN_LATER); 775 } 776 xs->flags |= ITSDONE; 777 scsi_done(xs); 778 } 779 } 780 781 ready: 782 /* 783 * Don't process the queue if we are polling. 784 */ 785 if (xs->flags & SCSI_POLL) { 786 retval = COMPLETE; 787 break; 788 } 789 } 790 791 GDT_UNLOCK_GDT(gdt, lock); 792 return (retval); 793 } 794 795 /* XXX Currently only for cacheservice, returns 0 if busy */ 796 int 797 gdt_exec_ccb(ccb) 798 struct gdt_ccb *ccb; 799 { 800 struct scsi_xfer *xs = ccb->gc_xs; 801 struct scsi_link *link = xs->sc_link; 802 struct gdt_softc *gdt = link->adapter_softc; 803 u_int8_t target = link->target; 804 u_int32_t sg_canz; 805 bus_dmamap_t xfer; 806 int i; 807 #if 1 /* XXX */ 808 static int __level = 0; 809 810 if (__level++ > 0) 811 panic("level > 0"); 812 #endif 813 GDT_DPRINTF(GDT_D_CMD, ("gdt_exec_ccb(%p, %p) ", xs, ccb)); 814 815 gdt->sc_cmd_cnt = 0; 816 817 /* 818 * XXX Yeah I know it's an always-true condition, but that may change 819 * later. 820 */ 821 if (gdt->sc_cmd_cnt == 0) 822 gdt->sc_set_sema0(gdt); 823 824 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX, ccb->gc_cmd_index); 825 gdt_enc32(gdt->sc_cmd + GDT_CMD_BOARDNODE, GDT_LOCALBOARD); 826 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO, 827 target); 828 829 switch (xs->cmd->opcode) { 830 case PREVENT_ALLOW: 831 case SYNCHRONIZE_CACHE: 832 if (xs->cmd->opcode == PREVENT_ALLOW) { 833 /* XXX PREVENT_ALLOW support goes here */ 834 } else { 835 GDT_DPRINTF(GDT_D_CMD, 836 ("SYNCHRONIZE CACHE tgt %d ", target)); 837 gdt->sc_cmd[GDT_CMD_OPCODE] = GDT_FLUSH; 838 } 839 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO, 840 1); 841 sg_canz = 0; 842 break; 843 844 case WRITE_COMMAND: 845 case WRITE_BIG: 846 /* XXX WRITE_THR could be supported too */ 847 gdt->sc_cmd[GDT_CMD_OPCODE] = GDT_WRITE; 848 break; 849 850 case READ_COMMAND: 851 case READ_BIG: 852 gdt->sc_cmd[GDT_CMD_OPCODE] = GDT_READ; 853 break; 854 } 855 856 if (xs->cmd->opcode != PREVENT_ALLOW && 857 xs->cmd->opcode != SYNCHRONIZE_CACHE) { 858 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO, 859 ccb->gc_blockno); 860 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT, 861 ccb->gc_blockcnt); 862 863 xfer = ccb->gc_dmamap_xfer; 864 if (gdt->sc_cache_feat & GDT_SCATTER_GATHER) { 865 gdt_enc32( 866 gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR, 867 0xffffffff); 868 for (i = 0; i < xfer->dm_nsegs; i++) { 869 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + 870 GDT_CACHE_SG_LST + i * GDT_SG_SZ + 871 GDT_SG_PTR, 872 xfer->dm_segs[i].ds_addr); 873 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + 874 GDT_CACHE_SG_LST + i * GDT_SG_SZ + 875 GDT_SG_LEN, 876 xfer->dm_segs[i].ds_len); 877 GDT_DPRINTF(GDT_D_IO, 878 ("#%d va %p pa %p len %x\n", i, buf, 879 xfer->dm_segs[i].ds_addr, 880 xfer->dm_segs[i].ds_len)); 881 } 882 sg_canz = xfer->dm_nsegs; 883 gdt_enc32( 884 gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST + 885 sg_canz * GDT_SG_SZ + GDT_SG_LEN, 0); 886 } else { 887 /* XXX Hardly correct */ 888 gdt_enc32( 889 gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR, 890 xfer->dm_segs[0].ds_addr); 891 sg_canz = 0; 892 } 893 } 894 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ, sg_canz); 895 896 gdt->sc_cmd_len = 897 roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST + sg_canz * GDT_SG_SZ, 898 sizeof (u_int32_t)); 899 900 if (gdt->sc_cmd_cnt > 0 && 901 gdt->sc_cmd_off + gdt->sc_cmd_len + GDT_DPMEM_COMMAND_OFFSET > 902 gdt->sc_ic_all_size) { 903 printf("%s: DPMEM overflow\n", DEVNAME(gdt)); 904 gdt_free_ccb(gdt, ccb); 905 xs->error = XS_BUSY; 906 #if 1 /* XXX */ 907 __level--; 908 #endif 909 return (0); 910 } 911 912 gdt->sc_copy_cmd(gdt, ccb); 913 gdt->sc_release_event(gdt, ccb); 914 915 xs->error = XS_NOERROR; 916 xs->resid = 0; 917 #if 1 /* XXX */ 918 __level--; 919 #endif 920 return (1); 921 } 922 923 void 924 gdt_copy_internal_data(xs, data, size) 925 struct scsi_xfer *xs; 926 u_int8_t *data; 927 size_t size; 928 { 929 size_t copy_cnt; 930 931 GDT_DPRINTF(GDT_D_MISC, ("gdt_copy_internal_data ")); 932 933 if (!xs->datalen) 934 printf("uio move not yet supported\n"); 935 else { 936 copy_cnt = MIN(size, xs->datalen); 937 bcopy(data, xs->data, copy_cnt); 938 } 939 } 940 941 /* Emulated SCSI operation on cache device */ 942 void 943 gdt_internal_cache_cmd(xs) 944 struct scsi_xfer *xs; 945 { 946 struct scsi_link *link = xs->sc_link; 947 struct gdt_softc *gdt = link->adapter_softc; 948 struct scsi_inquiry_data inq; 949 struct scsi_sense_data sd; 950 struct scsi_read_cap_data rcd; 951 u_int8_t target = link->target; 952 953 GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cache_cmd ")); 954 955 switch (xs->cmd->opcode) { 956 case TEST_UNIT_READY: 957 case START_STOP: 958 #if 0 959 case VERIFY: 960 #endif 961 GDT_DPRINTF(GDT_D_CMD, ("opc %d tgt %d ", xs->cmd->opcode, 962 target)); 963 break; 964 965 case REQUEST_SENSE: 966 GDT_DPRINTF(GDT_D_CMD, ("REQUEST SENSE tgt %d ", target)); 967 bzero(&sd, sizeof sd); 968 sd.error_code = 0x70; 969 sd.segment = 0; 970 sd.flags = SKEY_NO_SENSE; 971 gdt_enc32(sd.info, 0); 972 sd.extra_len = 0; 973 gdt_copy_internal_data(xs, (u_int8_t *)&sd, sizeof sd); 974 break; 975 976 case INQUIRY: 977 GDT_DPRINTF(GDT_D_CMD, ("INQUIRY tgt %d devtype %x ", target, 978 gdt->sc_hdr[target].hd_devtype)); 979 bzero(&inq, sizeof inq); 980 inq.device = 981 (gdt->sc_hdr[target].hd_devtype & 4) ? T_CDROM : T_DIRECT; 982 inq.dev_qual2 = 983 (gdt->sc_hdr[target].hd_devtype & 1) ? SID_REMOVABLE : 0; 984 inq.version = 2; 985 inq.response_format = 2; 986 inq.additional_length = 32; 987 strlcpy(inq.vendor, "ICP ", sizeof inq.vendor); 988 snprintf(inq.product, sizeof inq.product, "Host drive #%02d", 989 target); 990 strlcpy(inq.revision, " ", sizeof inq.revision); 991 gdt_copy_internal_data(xs, (u_int8_t *)&inq, sizeof inq); 992 break; 993 994 case READ_CAPACITY: 995 GDT_DPRINTF(GDT_D_CMD, ("READ CAPACITY tgt %d ", target)); 996 bzero(&rcd, sizeof rcd); 997 _lto4b(gdt->sc_hdr[target].hd_size - 1, rcd.addr); 998 _lto4b(GDT_SECTOR_SIZE, rcd.length); 999 gdt_copy_internal_data(xs, (u_int8_t *)&rcd, sizeof rcd); 1000 break; 1001 1002 default: 1003 GDT_DPRINTF(GDT_D_CMD, ("unsupported scsi command %#x tgt %d ", 1004 xs->cmd->opcode, target)); 1005 xs->error = XS_DRIVER_STUFFUP; 1006 return; 1007 } 1008 1009 xs->error = XS_NOERROR; 1010 } 1011 1012 /* Start a raw SCSI operation */ 1013 int 1014 gdt_raw_scsi_cmd(xs) 1015 struct scsi_xfer *xs; 1016 { 1017 struct scsi_link *link = xs->sc_link; 1018 struct gdt_softc *gdt = link->adapter_softc; 1019 struct gdt_ccb *ccb; 1020 int s; 1021 1022 GDT_DPRINTF(GDT_D_CMD, ("gdt_raw_scsi_cmd ")); 1023 1024 s = GDT_LOCK_GDT(gdt); 1025 1026 if (xs->cmdlen > 12 /* XXX create #define */) { 1027 GDT_DPRINTF(GDT_D_CMD, ("CDB too big %p ", xs)); 1028 bzero(&xs->sense, sizeof(xs->sense)); 1029 xs->sense.error_code = SSD_ERRCODE_VALID | 0x70; 1030 xs->sense.flags = SKEY_ILLEGAL_REQUEST; 1031 xs->sense.add_sense_code = 0x20; /* illcmd, 0x24 illfield */ 1032 xs->error = XS_SENSE; 1033 scsi_done(xs); 1034 GDT_UNLOCK_GDT(gdt, s); 1035 return (COMPLETE); 1036 } 1037 1038 if ((ccb = gdt_get_ccb(gdt, xs->flags)) == NULL) { 1039 GDT_DPRINTF(GDT_D_CMD, ("no ccb available for %p ", xs)); 1040 xs->error = XS_DRIVER_STUFFUP; 1041 scsi_done(xs); 1042 GDT_UNLOCK_GDT(gdt, s); 1043 return (COMPLETE); 1044 } 1045 1046 xs->error = XS_DRIVER_STUFFUP; 1047 xs->flags |= ITSDONE; 1048 scsi_done(xs); 1049 gdt_free_ccb(gdt, ccb); 1050 1051 GDT_UNLOCK_GDT(gdt, s); 1052 1053 return (COMPLETE); 1054 } 1055 1056 void 1057 gdt_clear_events(gdt) 1058 struct gdt_softc *gdt; 1059 { 1060 GDT_DPRINTF(GDT_D_MISC, ("gdt_clear_events(%p) ", gdt)); 1061 1062 /* XXX To be implemented */ 1063 } 1064 1065 int 1066 gdt_async_event(gdt, service) 1067 struct gdt_softc *gdt; 1068 int service; 1069 { 1070 GDT_DPRINTF(GDT_D_INTR, ("gdt_async_event(%p, %d) ", gdt, service)); 1071 1072 if (service == GDT_SCREENSERVICE) { 1073 /* XXX To be implemented */ 1074 } else { 1075 /* XXX To be implemented */ 1076 } 1077 1078 return (0); 1079 } 1080 1081 int 1082 gdt_sync_event(gdt, service, index, xs) 1083 struct gdt_softc *gdt; 1084 int service; 1085 u_int8_t index; 1086 struct scsi_xfer *xs; 1087 { 1088 GDT_DPRINTF(GDT_D_INTR, 1089 ("gdt_sync_event(%p, %d, %d, %p) ", gdt, service, index, xs)); 1090 1091 if (service == GDT_SCREENSERVICE) { 1092 GDT_DPRINTF(GDT_D_INTR, ("service == GDT_SCREENSERVICE ")); 1093 /* XXX To be implemented */ 1094 return (0); 1095 } else { 1096 switch (gdt->sc_status) { 1097 case GDT_S_OK: 1098 GDT_DPRINTF(GDT_D_INTR, ("sc_status == GDT_S_OK ")); 1099 /* XXX To be implemented */ 1100 break; 1101 case GDT_S_BSY: 1102 GDT_DPRINTF(GDT_D_INTR, ("sc_status == GDT_S_BSY ")); 1103 /* XXX To be implemented */ 1104 return (2); 1105 default: 1106 GDT_DPRINTF(GDT_D_INTR, ("sc_status is %d ", 1107 gdt->sc_status)); 1108 /* XXX To be implemented */ 1109 return (0); 1110 } 1111 } 1112 1113 return (1); 1114 } 1115 1116 int 1117 gdt_intr(arg) 1118 void *arg; 1119 { 1120 struct gdt_softc *gdt = arg; 1121 struct gdt_intr_ctx ctx; 1122 int chain = 1; 1123 int sync_val = 0; 1124 struct scsi_xfer *xs; 1125 int prev_cmd; 1126 struct gdt_ccb *ccb; 1127 gdt_lock_t lock; 1128 1129 GDT_DPRINTF(GDT_D_INTR, ("gdt_intr(%p) ", gdt)); 1130 1131 /* If polling and we were not called from gdt_wait, just return */ 1132 if (gdt_polling && !gdt_from_wait) 1133 return (0); 1134 1135 if (!gdt_polling) 1136 lock = GDT_LOCK_GDT(gdt); 1137 1138 ctx.istatus = gdt->sc_get_status(gdt); 1139 if (!ctx.istatus) { 1140 if (!gdt_polling) 1141 GDT_UNLOCK_GDT(gdt, lock); 1142 gdt->sc_status = GDT_S_NO_STATUS; 1143 return (0); 1144 } 1145 1146 gdt_wait_index = 0; 1147 ctx.service = ctx.info2 = 0; 1148 1149 gdt->sc_intr(gdt, &ctx); 1150 1151 gdt->sc_status = ctx.cmd_status; 1152 gdt->sc_info = ctx.info; 1153 gdt->sc_info2 = ctx.info2; 1154 1155 if (gdt_from_wait) { 1156 gdt_wait_gdt = gdt; 1157 gdt_wait_index = ctx.istatus; 1158 } 1159 1160 switch (ctx.istatus) { 1161 case GDT_ASYNCINDEX: 1162 gdt_async_event(gdt, ctx.service); 1163 goto finish; 1164 1165 case GDT_SPEZINDEX: 1166 printf("%s: uninitialized or unknown service (%d %d)\n", 1167 DEVNAME(gdt), ctx.info, ctx.info2); 1168 chain = 0; 1169 goto finish; 1170 } 1171 1172 ccb = &gdt->sc_ccbs[ctx.istatus - 2]; 1173 xs = ccb->gc_xs; 1174 if (!gdt_polling) 1175 timeout_del(&xs->stimeout); 1176 ctx.service = ccb->gc_service; 1177 prev_cmd = ccb->gc_flags & GDT_GCF_CMD_MASK; 1178 if (xs && xs->cmd->opcode != PREVENT_ALLOW && 1179 xs->cmd->opcode != SYNCHRONIZE_CACHE) { 1180 bus_dmamap_sync(gdt->sc_dmat, ccb->gc_dmamap_xfer, 0, 1181 ccb->gc_dmamap_xfer->dm_mapsize, 1182 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD : 1183 BUS_DMASYNC_POSTWRITE); 1184 bus_dmamap_unload(gdt->sc_dmat, ccb->gc_dmamap_xfer); 1185 } 1186 gdt_free_ccb(gdt, ccb); 1187 switch (prev_cmd) { 1188 case GDT_GCF_UNUSED: 1189 /* XXX Not yet implemented */ 1190 chain = 0; 1191 goto finish; 1192 case GDT_GCF_INTERNAL: 1193 chain = 0; 1194 goto finish; 1195 } 1196 1197 sync_val = gdt_sync_event(gdt, ctx.service, ctx.istatus, xs); 1198 1199 finish: 1200 if (!gdt_polling) 1201 GDT_UNLOCK_GDT(gdt, lock); 1202 1203 switch (sync_val) { 1204 case 1: 1205 xs->flags |= ITSDONE; 1206 scsi_done(xs); 1207 break; 1208 1209 case 2: 1210 gdt_enqueue(gdt, xs, 0); 1211 } 1212 1213 if (chain) 1214 gdt_chain(gdt); 1215 return (1); 1216 } 1217 1218 void 1219 gdtminphys(bp) 1220 struct buf *bp; 1221 { 1222 GDT_DPRINTF(GDT_D_MISC, ("gdtminphys(0x%x) ", bp)); 1223 1224 /* As this is way more than MAXPHYS it's really not necessary. */ 1225 if ((GDT_MAXOFFSETS - 1) * PAGE_SIZE < MAXPHYS && 1226 bp->b_bcount > ((GDT_MAXOFFSETS - 1) * PAGE_SIZE)) 1227 bp->b_bcount = ((GDT_MAXOFFSETS - 1) * PAGE_SIZE); 1228 1229 minphys(bp); 1230 } 1231 1232 int 1233 gdt_wait(gdt, ccb, timeout) 1234 struct gdt_softc *gdt; 1235 struct gdt_ccb *ccb; 1236 int timeout; 1237 { 1238 int rv = 0; 1239 1240 GDT_DPRINTF(GDT_D_MISC, 1241 ("gdt_wait(%p, %p, %d) ", gdt, ccb, timeout)); 1242 1243 gdt_from_wait = 1; 1244 do { 1245 if (gdt_intr(gdt) && gdt == gdt_wait_gdt && 1246 ccb->gc_cmd_index == gdt_wait_index) { 1247 rv = 1; 1248 break; 1249 } 1250 DELAY(1); 1251 } while (--timeout); 1252 gdt_from_wait = 0; 1253 1254 while (gdt->sc_test_busy(gdt)) 1255 DELAY(0); /* XXX correct? */ 1256 1257 return (rv); 1258 } 1259 1260 int 1261 gdt_internal_cmd(gdt, service, opcode, arg1, arg2, arg3) 1262 struct gdt_softc *gdt; 1263 u_int8_t service; 1264 u_int16_t opcode; 1265 u_int32_t arg1, arg2, arg3; 1266 { 1267 int retries; 1268 struct gdt_ccb *ccb; 1269 1270 GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cmd(%p, %d, %d, %d, %d, %d) ", 1271 gdt, service, opcode, arg1, arg2, arg3)); 1272 1273 bzero(gdt->sc_cmd, GDT_CMD_SZ); 1274 1275 for (retries = GDT_RETRIES; ; ) { 1276 ccb = gdt_get_ccb(gdt, SCSI_NOSLEEP); 1277 if (ccb == NULL) { 1278 printf("%s: no free command index found\n", 1279 DEVNAME(gdt)); 1280 return (0); 1281 } 1282 ccb->gc_service = service; 1283 gdt_ccb_set_cmd(ccb, GDT_GCF_INTERNAL); 1284 1285 gdt->sc_set_sema0(gdt); 1286 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX, 1287 ccb->gc_cmd_index); 1288 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, opcode); 1289 gdt_enc32(gdt->sc_cmd + GDT_CMD_BOARDNODE, GDT_LOCALBOARD); 1290 1291 switch (service) { 1292 case GDT_CACHESERVICE: 1293 if (opcode == GDT_IOCTL) { 1294 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + 1295 GDT_IOCTL_SUBFUNC, arg1); 1296 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + 1297 GDT_IOCTL_CHANNEL, arg2); 1298 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + 1299 GDT_IOCTL_PARAM_SIZE, (u_int16_t)arg3); 1300 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + 1301 GDT_IOCTL_P_PARAM, 1302 gdt->sc_scratch_seg.ds_addr); 1303 } else { 1304 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + 1305 GDT_CACHE_DEVICENO, (u_int16_t)arg1); 1306 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + 1307 GDT_CACHE_BLOCKNO, arg2); 1308 } 1309 break; 1310 1311 case GDT_SCSIRAWSERVICE: 1312 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + 1313 GDT_RAW_DIRECTION, arg1); 1314 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] = 1315 (u_int8_t)arg2; 1316 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] = 1317 (u_int8_t)arg3; 1318 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] = 1319 (u_int8_t)(arg3 >> 8); 1320 } 1321 1322 gdt->sc_cmd_len = GDT_CMD_SZ; 1323 gdt->sc_cmd_off = 0; 1324 gdt->sc_cmd_cnt = 0; 1325 gdt->sc_copy_cmd(gdt, ccb); 1326 gdt->sc_release_event(gdt, ccb); 1327 DELAY(20); 1328 if (!gdt_wait(gdt, ccb, GDT_POLL_TIMEOUT)) 1329 return (0); 1330 if (gdt->sc_status != GDT_S_BSY || --retries == 0) 1331 break; 1332 DELAY(1); 1333 } 1334 return (gdt->sc_status == GDT_S_OK); 1335 } 1336 1337 struct gdt_ccb * 1338 gdt_get_ccb(gdt, flags) 1339 struct gdt_softc *gdt; 1340 int flags; 1341 { 1342 struct gdt_ccb *ccb; 1343 gdt_lock_t lock; 1344 1345 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_get_ccb(%p, 0x%x) ", gdt, flags)); 1346 1347 lock = GDT_LOCK_GDT(gdt); 1348 1349 for (;;) { 1350 ccb = TAILQ_FIRST(&gdt->sc_free_ccb); 1351 if (ccb != NULL) 1352 break; 1353 if (flags & SCSI_NOSLEEP) 1354 goto bail_out; 1355 tsleep(&gdt->sc_free_ccb, PRIBIO, "gdt_ccb", 0); 1356 } 1357 1358 TAILQ_REMOVE(&gdt->sc_free_ccb, ccb, gc_chain); 1359 1360 bail_out: 1361 GDT_UNLOCK_GDT(gdt, lock); 1362 return (ccb); 1363 } 1364 1365 void 1366 gdt_free_ccb(gdt, ccb) 1367 struct gdt_softc *gdt; 1368 struct gdt_ccb *ccb; 1369 { 1370 gdt_lock_t lock; 1371 1372 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_free_ccb(%p, %p) ", gdt, ccb)); 1373 1374 lock = GDT_LOCK_GDT(gdt); 1375 1376 TAILQ_INSERT_HEAD(&gdt->sc_free_ccb, ccb, gc_chain); 1377 1378 /* If the free list was empty, wake up potential waiters. */ 1379 if (TAILQ_NEXT(ccb, gc_chain) == NULL) 1380 wakeup(&gdt->sc_free_ccb); 1381 1382 GDT_UNLOCK_GDT(gdt, lock); 1383 } 1384 1385 void 1386 gdt_enqueue_ccb(gdt, ccb) 1387 struct gdt_softc *gdt; 1388 struct gdt_ccb *ccb; 1389 { 1390 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_enqueue_ccb(%p, %p) ", gdt, ccb)); 1391 1392 timeout_set(&ccb->gc_xs->stimeout, gdt_timeout, ccb); 1393 TAILQ_INSERT_TAIL(&gdt->sc_ccbq, ccb, gc_chain); 1394 gdt_start_ccbs(gdt); 1395 } 1396 1397 void 1398 gdt_start_ccbs(gdt) 1399 struct gdt_softc *gdt; 1400 { 1401 struct gdt_ccb *ccb; 1402 struct scsi_xfer *xs; 1403 1404 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_start_ccbs(%p) ", gdt)); 1405 1406 while ((ccb = TAILQ_FIRST(&gdt->sc_ccbq)) != NULL) { 1407 1408 xs = ccb->gc_xs; 1409 if (ccb->gc_flags & GDT_GCF_WATCHDOG) 1410 timeout_del(&xs->stimeout); 1411 1412 if (gdt_exec_ccb(ccb) == 0) { 1413 ccb->gc_flags |= GDT_GCF_WATCHDOG; 1414 timeout_set(&ccb->gc_xs->stimeout, gdt_watchdog, ccb); 1415 timeout_add(&xs->stimeout, 1416 (GDT_WATCH_TIMEOUT * hz) / 1000); 1417 break; 1418 } 1419 TAILQ_REMOVE(&gdt->sc_ccbq, ccb, gc_chain); 1420 1421 if ((xs->flags & SCSI_POLL) == 0) { 1422 timeout_set(&ccb->gc_xs->stimeout, gdt_timeout, ccb); 1423 timeout_add(&xs->stimeout, 1424 (ccb->gc_timeout * hz) / 1000); 1425 } 1426 } 1427 } 1428 1429 void 1430 gdt_chain(gdt) 1431 struct gdt_softc *gdt; 1432 { 1433 GDT_DPRINTF(GDT_D_INTR, ("gdt_chain(%p) ", gdt)); 1434 1435 if (LIST_FIRST(&gdt->sc_queue)) 1436 gdt_scsi_cmd(LIST_FIRST(&gdt->sc_queue)); 1437 } 1438 1439 void 1440 gdt_timeout(arg) 1441 void *arg; 1442 { 1443 struct gdt_ccb *ccb = arg; 1444 struct scsi_link *link = ccb->gc_xs->sc_link; 1445 struct gdt_softc *gdt = link->adapter_softc; 1446 gdt_lock_t lock; 1447 1448 sc_print_addr(link); 1449 printf("timed out\n"); 1450 1451 /* XXX Test for multiple timeouts */ 1452 1453 ccb->gc_xs->error = XS_TIMEOUT; 1454 lock = GDT_LOCK_GDT(gdt); 1455 gdt_enqueue_ccb(gdt, ccb); 1456 GDT_UNLOCK_GDT(gdt, lock); 1457 } 1458 1459 void 1460 gdt_watchdog(arg) 1461 void *arg; 1462 { 1463 struct gdt_ccb *ccb = arg; 1464 struct scsi_link *link = ccb->gc_xs->sc_link; 1465 struct gdt_softc *gdt = link->adapter_softc; 1466 gdt_lock_t lock; 1467 1468 lock = GDT_LOCK_GDT(gdt); 1469 ccb->gc_flags &= ~GDT_GCF_WATCHDOG; 1470 gdt_start_ccbs(gdt); 1471 GDT_UNLOCK_GDT(gdt, lock); 1472 } 1473 1474 #if NBIO > 0 1475 int 1476 gdt_ioctl(struct device *dev, u_long cmd, caddr_t addr) 1477 { 1478 struct gdt_softc *sc = (struct gdt_softc *)dev; 1479 int error = 0; 1480 1481 GDT_DPRINTF(GDT_D_IOCTL, ("%s: ioctl ", DEVNAME(sc))); 1482 1483 switch (cmd) { 1484 case BIOCINQ: 1485 GDT_DPRINTF(GDT_D_IOCTL, ("inq ")); 1486 error = gdt_ioctl_inq(sc, (struct bioc_inq *)addr); 1487 break; 1488 1489 case BIOCVOL: 1490 GDT_DPRINTF(GDT_D_IOCTL, ("vol ")); 1491 error = gdt_ioctl_vol(sc, (struct bioc_vol *)addr); 1492 break; 1493 1494 case BIOCDISK: 1495 GDT_DPRINTF(GDT_D_IOCTL, ("disk ")); 1496 error = gdt_ioctl_disk(sc, (struct bioc_disk *)addr); 1497 break; 1498 1499 case BIOCALARM: 1500 GDT_DPRINTF(GDT_D_IOCTL, ("alarm ")); 1501 error = gdt_ioctl_alarm(sc, (struct bioc_alarm *)addr); 1502 break; 1503 1504 case BIOCSETSTATE: 1505 GDT_DPRINTF(GDT_D_IOCTL, ("setstate ")); 1506 error = gdt_ioctl_setstate(sc, (struct bioc_setstate *)addr); 1507 break; 1508 1509 default: 1510 GDT_DPRINTF(GDT_D_IOCTL, (" invalid ioctl\n")); 1511 error = EINVAL; 1512 } 1513 1514 return (error); 1515 } 1516 1517 int 1518 gdt_ioctl_inq(struct gdt_softc *sc, struct bioc_inq *bi) 1519 { 1520 bi->bi_novol = sc->sc_ndevs; 1521 bi->bi_nodisk = sc->sc_total_disks; 1522 1523 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev)); 1524 1525 return (0); 1526 } 1527 1528 int 1529 gdt_ioctl_vol(struct gdt_softc *sc, struct bioc_vol *bv) 1530 { 1531 return (1); /* XXX not yet */ 1532 } 1533 1534 int 1535 gdt_ioctl_disk(struct gdt_softc *sc, struct bioc_disk *bd) 1536 { 1537 return (1); /* XXX not yet */ 1538 } 1539 1540 int 1541 gdt_ioctl_alarm(struct gdt_softc *sc, struct bioc_alarm *ba) 1542 { 1543 return (1); /* XXX not yet */ 1544 } 1545 1546 int 1547 gdt_ioctl_setstate(struct gdt_softc *sc, struct bioc_setstate *bs) 1548 { 1549 return (1); /* XXX not yet */ 1550 } 1551 1552 #if 0 1553 int 1554 gdt_ioctl(dev, cmd, addr) 1555 struct device *dev; 1556 u_long cmd; 1557 caddr_t addr; 1558 { 1559 int error = 0; 1560 struct gdt_dummy *dummy; 1561 1562 switch (cmd) { 1563 case GDT_IOCTL_DUMMY: 1564 dummy = (struct gdt_dummy *)addr; 1565 printf("%s: GDT_IOCTL_DUMMY %d\n", dev->dv_xname, dummy->x++); 1566 break; 1567 1568 case GDT_IOCTL_GENERAL: { 1569 gdt_ucmd_t *ucmd; 1570 struct gdt_softc *gdt = (struct gdt_softc *)dev; 1571 gdt_lock_t lock; 1572 1573 ucmd = (gdt_ucmd_t *)addr; 1574 lock = GDT_LOCK_GDT(gdt); 1575 TAILQ_INSERT_TAIL(&gdt->sc_ucmdq, ucmd, links); 1576 ucmd->complete_flag = FALSE; 1577 GDT_UNLOCK_GDT(gdt, lock); 1578 gdt_chain(gdt); 1579 if (!ucmd->complete_flag) 1580 (void)tsleep((void *)ucmd, PCATCH | PRIBIO, "gdtucw", 1581 0); 1582 break; 1583 } 1584 1585 case GDT_IOCTL_DRVERS: 1586 ((gdt_drvers_t *)addr)->vers = 1587 (GDT_DRIVER_VERSION << 8) | GDT_DRIVER_SUBVERSION; 1588 break; 1589 1590 case GDT_IOCTL_CTRCNT: 1591 ((gdt_ctrcnt_t *)addr)->cnt = gdt_cnt; 1592 break; 1593 1594 #ifdef notyet 1595 case GDT_IOCTL_CTRTYPE: { 1596 gdt_ctrt_t *p; 1597 struct gdt_softc *gdt = (struct gdt_softc *)dev; 1598 1599 p = (gdt_ctrt_t *)addr; 1600 p->oem_id = 0x8000; 1601 p->type = 0xfd; 1602 p->info = (gdt->sc_bus << 8) | (gdt->sc_slot << 3); 1603 p->ext_type = 0x6000 | gdt->sc_subdevice; 1604 p->device_id = gdt->sc_device; 1605 p->sub_device_id = gdt->sc_subdevice; 1606 break; 1607 } 1608 #endif 1609 1610 case GDT_IOCTL_OSVERS: { 1611 gdt_osv_t *p; 1612 1613 p = (gdt_osv_t *)addr; 1614 p->oscode = 10; 1615 p->version = osrelease[0] - '0'; 1616 if (osrelease[1] == '.') 1617 p->subversion = osrelease[2] - '0'; 1618 else 1619 p->subversion = 0; 1620 if (osrelease[3] == '.') 1621 p->revision = osrelease[4] - '0'; 1622 else 1623 p->revision = 0; 1624 strlcpy(p->name, ostype, sizeof p->name); 1625 break; 1626 } 1627 1628 #ifdef notyet 1629 case GDT_IOCTL_EVENT: { 1630 gdt_event_t *p; 1631 gdt_lock_t lock; 1632 1633 p = (gdt_event_t *)addr; 1634 if (p->erase == 0xff) { 1635 if (p->dvr.event_source == GDT_ES_TEST) 1636 p->dvr.event_data.size = 1637 sizeof(p->dvr.event_data.eu.test); 1638 else if (p->dvr.event_source == GDT_ES_DRIVER) 1639 p->dvr.event_data.size = 1640 sizeof(p->dvr.event_data.eu.driver); 1641 else if (p->dvr.event_source == GDT_ES_SYNC) 1642 p->dvr.event_data.size = 1643 sizeof(p->dvr.event_data.eu.sync); 1644 else 1645 p->dvr.event_data.size = 1646 sizeof(p->dvr.event_data.eu.async); 1647 lock = GDT_LOCK_GDT(gdt); 1648 gdt_store_event(p->dvr.event_source, p->dvr.event_idx, 1649 &p->dvr.event_data); 1650 GDT_UNLOCK_GDT(gdt, lock); 1651 } else if (p->erase == 0xfe) { 1652 lock = GDT_LOCK_GDT(gdt); 1653 gdt_clear_events(); 1654 GDT_UNLOCK_GDT(gdt, lock); 1655 } else if (p->erase == 0) { 1656 p->handle = gdt_read_event(p->handle, &p->dvr); 1657 } else { 1658 gdt_readapp_event((u_int8_t)p->erase, &p->dvr); 1659 } 1660 break; 1661 } 1662 #endif 1663 1664 case GDT_IOCTL_STATIST: 1665 #if 0 1666 bcopy(&gdt_stat, (gdt_statist_t *)addr, sizeof gdt_stat); 1667 #else 1668 error = EOPNOTSUPP; 1669 #endif 1670 break; 1671 1672 default: 1673 error = EINVAL; 1674 } 1675 return (error); 1676 } 1677 #endif /* 0 */ 1678 #endif /* NBIO > 0 */ 1679