1*85Scth /* 2*85Scth * CDDL HEADER START 3*85Scth * 4*85Scth * The contents of this file are subject to the terms of the 5*85Scth * Common Development and Distribution License, Version 1.0 only 6*85Scth * (the "License"). You may not use this file except in compliance 7*85Scth * with the License. 8*85Scth * 9*85Scth * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*85Scth * or http://www.opensolaris.org/os/licensing. 11*85Scth * See the License for the specific language governing permissions 12*85Scth * and limitations under the License. 13*85Scth * 14*85Scth * When distributing Covered Code, include this CDDL HEADER in each 15*85Scth * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*85Scth * If applicable, add the following below this CDDL HEADER, with the 17*85Scth * fields enclosed by brackets "[]" replaced with your own identifying 18*85Scth * information: Portions Copyright [yyyy] [name of copyright owner] 19*85Scth * 20*85Scth * CDDL HEADER END 21*85Scth */ 22*85Scth /* 23*85Scth * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*85Scth * Use is subject to license terms. 25*85Scth */ 26*85Scth 27*85Scth #pragma ident "%Z%%M% %I% %E% SMI" 28*85Scth 29*85Scth /* 30*85Scth * SCSA HBA nexus driver that emulates an HBA connected to SCSI target 31*85Scth * devices (large disks). 32*85Scth */ 33*85Scth 34*85Scth #ifdef DEBUG 35*85Scth #define EMUL64DEBUG 36*85Scth #endif 37*85Scth 38*85Scth #include <sys/scsi/scsi.h> 39*85Scth #include <sys/ddi.h> 40*85Scth #include <sys/sunddi.h> 41*85Scth #include <sys/taskq.h> 42*85Scth #include <sys/disp.h> 43*85Scth #include <sys/types.h> 44*85Scth #include <sys/buf.h> 45*85Scth #include <sys/cpuvar.h> 46*85Scth #include <sys/dklabel.h> 47*85Scth 48*85Scth #include <sys/emul64.h> 49*85Scth #include <sys/emul64cmd.h> 50*85Scth #include <sys/emul64var.h> 51*85Scth 52*85Scth int emul64_usetaskq = 1; /* set to zero for debugging */ 53*85Scth int emul64debug = 0; 54*85Scth #ifdef EMUL64DEBUG 55*85Scth static int emul64_cdb_debug = 0; 56*85Scth #include <sys/debug.h> 57*85Scth #endif 58*85Scth 59*85Scth /* 60*85Scth * cb_ops function prototypes 61*85Scth */ 62*85Scth static int emul64_ioctl(dev_t, int cmd, intptr_t arg, int mode, 63*85Scth cred_t *credp, int *rvalp); 64*85Scth 65*85Scth /* 66*85Scth * dev_ops functions prototypes 67*85Scth */ 68*85Scth static int emul64_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 69*85Scth void *arg, void **result); 70*85Scth static int emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 71*85Scth static int emul64_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 72*85Scth 73*85Scth /* 74*85Scth * Function prototypes 75*85Scth * 76*85Scth * SCSA functions exported by means of the transport table 77*85Scth */ 78*85Scth static int emul64_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 79*85Scth scsi_hba_tran_t *tran, struct scsi_device *sd); 80*85Scth static int emul64_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt); 81*85Scth static void emul64_pkt_comp(void *); 82*85Scth static int emul64_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt); 83*85Scth static int emul64_scsi_reset(struct scsi_address *ap, int level); 84*85Scth static int emul64_scsi_getcap(struct scsi_address *ap, char *cap, int whom); 85*85Scth static int emul64_scsi_setcap(struct scsi_address *ap, char *cap, int value, 86*85Scth int whom); 87*85Scth static struct scsi_pkt *emul64_scsi_init_pkt(struct scsi_address *ap, 88*85Scth struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen, 89*85Scth int tgtlen, int flags, int (*callback)(), caddr_t arg); 90*85Scth static void emul64_scsi_destroy_pkt(struct scsi_address *ap, 91*85Scth struct scsi_pkt *pkt); 92*85Scth static void emul64_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt); 93*85Scth static void emul64_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt); 94*85Scth static int emul64_scsi_reset_notify(struct scsi_address *ap, int flag, 95*85Scth void (*callback)(caddr_t), caddr_t arg); 96*85Scth 97*85Scth /* 98*85Scth * internal functions 99*85Scth */ 100*85Scth static void emul64_i_initcap(struct emul64 *emul64); 101*85Scth 102*85Scth static void emul64_i_log(struct emul64 *emul64, int level, char *fmt, ...); 103*85Scth static int emul64_get_tgtrange(struct emul64 *, 104*85Scth intptr_t, 105*85Scth emul64_tgt_t **, 106*85Scth emul64_tgt_range_t *); 107*85Scth static int emul64_write_off(struct emul64 *, 108*85Scth emul64_tgt_t *, 109*85Scth emul64_tgt_range_t *); 110*85Scth static int emul64_write_on(struct emul64 *, 111*85Scth emul64_tgt_t *, 112*85Scth emul64_tgt_range_t *); 113*85Scth static emul64_nowrite_t *emul64_nowrite_alloc(emul64_range_t *); 114*85Scth static void emul64_nowrite_free(emul64_nowrite_t *); 115*85Scth static emul64_nowrite_t *emul64_find_nowrite(emul64_tgt_t *, 116*85Scth diskaddr_t start_block, 117*85Scth size_t blkcnt, 118*85Scth emul64_rng_overlap_t *overlapp, 119*85Scth emul64_nowrite_t ***prevp); 120*85Scth 121*85Scth extern emul64_tgt_t *find_tgt(struct emul64 *, ushort_t, ushort_t); 122*85Scth 123*85Scth #ifdef EMUL64DEBUG 124*85Scth static void emul64_debug_dump_cdb(struct scsi_address *ap, 125*85Scth struct scsi_pkt *pkt); 126*85Scth #endif 127*85Scth 128*85Scth 129*85Scth #ifdef _DDICT 130*85Scth static int ddi_in_panic(void); 131*85Scth static int ddi_in_panic() { return (0); } 132*85Scth #ifndef SCSI_CAP_RESET_NOTIFICATION 133*85Scth #define SCSI_CAP_RESET_NOTIFICATION 14 134*85Scth #endif 135*85Scth #ifndef SCSI_RESET_NOTIFY 136*85Scth #define SCSI_RESET_NOTIFY 0x01 137*85Scth #endif 138*85Scth #ifndef SCSI_RESET_CANCEL 139*85Scth #define SCSI_RESET_CANCEL 0x02 140*85Scth #endif 141*85Scth #endif 142*85Scth 143*85Scth /* 144*85Scth * Tunables: 145*85Scth * 146*85Scth * emul64_max_task 147*85Scth * The taskq facility is used to queue up SCSI start requests on a per 148*85Scth * controller basis. If the maximum number of queued tasks is hit, 149*85Scth * taskq_ent_alloc() delays for a second, which adversely impacts our 150*85Scth * performance. This value establishes the maximum number of task 151*85Scth * queue entries when taskq_create is called. 152*85Scth * 153*85Scth * emul64_task_nthreads 154*85Scth * Specifies the number of threads that should be used to process a 155*85Scth * controller's task queue. Our init function sets this to the number 156*85Scth * of CPUs on the system, but this can be overridden in emul64.conf. 157*85Scth */ 158*85Scth int emul64_max_task = 16; 159*85Scth int emul64_task_nthreads = 1; 160*85Scth 161*85Scth /* 162*85Scth * Local static data 163*85Scth */ 164*85Scth static void *emul64_state = NULL; 165*85Scth 166*85Scth /* 167*85Scth * Character/block operations. 168*85Scth */ 169*85Scth static struct cb_ops emul64_cbops = { 170*85Scth scsi_hba_open, /* cb_open */ 171*85Scth scsi_hba_close, /* cb_close */ 172*85Scth nodev, /* cb_strategy */ 173*85Scth nodev, /* cb_print */ 174*85Scth nodev, /* cb_dump */ 175*85Scth nodev, /* cb_read */ 176*85Scth nodev, /* cb_write */ 177*85Scth emul64_ioctl, /* cb_ioctl */ 178*85Scth nodev, /* cb_devmap */ 179*85Scth nodev, /* cb_mmap */ 180*85Scth nodev, /* cb_segmap */ 181*85Scth nochpoll, /* cb_chpoll */ 182*85Scth ddi_prop_op, /* cb_prop_op */ 183*85Scth NULL, /* cb_str */ 184*85Scth D_MP | D_64BIT | D_HOTPLUG, /* cb_flag */ 185*85Scth CB_REV, /* cb_rev */ 186*85Scth nodev, /* cb_aread */ 187*85Scth nodev /* cb_awrite */ 188*85Scth }; 189*85Scth 190*85Scth /* 191*85Scth * autoconfiguration routines. 192*85Scth */ 193*85Scth 194*85Scth static struct dev_ops emul64_ops = { 195*85Scth DEVO_REV, /* rev, */ 196*85Scth 0, /* refcnt */ 197*85Scth emul64_info, /* getinfo */ 198*85Scth nulldev, /* identify */ 199*85Scth nulldev, /* probe */ 200*85Scth emul64_attach, /* attach */ 201*85Scth emul64_detach, /* detach */ 202*85Scth nodev, /* reset */ 203*85Scth &emul64_cbops, /* char/block ops */ 204*85Scth NULL /* bus ops */ 205*85Scth }; 206*85Scth 207*85Scth char _depends_on[] = "misc/scsi"; 208*85Scth 209*85Scth static struct modldrv modldrv = { 210*85Scth &mod_driverops, /* module type - driver */ 211*85Scth "emul64 SCSI Host Bus Adapter", /* module name */ 212*85Scth &emul64_ops, /* driver ops */ 213*85Scth }; 214*85Scth 215*85Scth static struct modlinkage modlinkage = { 216*85Scth MODREV_1, /* ml_rev - must be MODREV_1 */ 217*85Scth &modldrv, /* ml_linkage */ 218*85Scth NULL /* end of driver linkage */ 219*85Scth }; 220*85Scth 221*85Scth int 222*85Scth _init(void) 223*85Scth { 224*85Scth int ret; 225*85Scth 226*85Scth ret = ddi_soft_state_init(&emul64_state, sizeof (struct emul64), 227*85Scth EMUL64_INITIAL_SOFT_SPACE); 228*85Scth if (ret != 0) 229*85Scth return (ret); 230*85Scth 231*85Scth if ((ret = scsi_hba_init(&modlinkage)) != 0) { 232*85Scth ddi_soft_state_fini(&emul64_state); 233*85Scth return (ret); 234*85Scth } 235*85Scth 236*85Scth /* Set the number of task threads to the number of CPUs */ 237*85Scth if (boot_max_ncpus == -1) { 238*85Scth emul64_task_nthreads = max_ncpus; 239*85Scth } else { 240*85Scth emul64_task_nthreads = boot_max_ncpus; 241*85Scth } 242*85Scth 243*85Scth emul64_bsd_init(); 244*85Scth 245*85Scth ret = mod_install(&modlinkage); 246*85Scth if (ret != 0) { 247*85Scth emul64_bsd_fini(); 248*85Scth scsi_hba_fini(&modlinkage); 249*85Scth ddi_soft_state_fini(&emul64_state); 250*85Scth } 251*85Scth 252*85Scth return (ret); 253*85Scth } 254*85Scth 255*85Scth int 256*85Scth _fini(void) 257*85Scth { 258*85Scth int ret; 259*85Scth 260*85Scth if ((ret = mod_remove(&modlinkage)) != 0) 261*85Scth return (ret); 262*85Scth 263*85Scth emul64_bsd_fini(); 264*85Scth 265*85Scth scsi_hba_fini(&modlinkage); 266*85Scth 267*85Scth ddi_soft_state_fini(&emul64_state); 268*85Scth 269*85Scth return (ret); 270*85Scth } 271*85Scth 272*85Scth int 273*85Scth _info(struct modinfo *modinfop) 274*85Scth { 275*85Scth return (mod_info(&modlinkage, modinfop)); 276*85Scth } 277*85Scth 278*85Scth /* 279*85Scth * Given the device number return the devinfo pointer 280*85Scth * from the scsi_device structure. 281*85Scth */ 282*85Scth /*ARGSUSED*/ 283*85Scth static int 284*85Scth emul64_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 285*85Scth { 286*85Scth struct emul64 *foo; 287*85Scth int instance = getminor((dev_t)arg); 288*85Scth 289*85Scth switch (cmd) { 290*85Scth case DDI_INFO_DEVT2DEVINFO: 291*85Scth foo = ddi_get_soft_state(emul64_state, instance); 292*85Scth if (foo != NULL) 293*85Scth *result = (void *)foo->emul64_dip; 294*85Scth else { 295*85Scth *result = NULL; 296*85Scth return (DDI_FAILURE); 297*85Scth } 298*85Scth break; 299*85Scth 300*85Scth case DDI_INFO_DEVT2INSTANCE: 301*85Scth *result = (void *)(uintptr_t)instance; 302*85Scth break; 303*85Scth 304*85Scth default: 305*85Scth return (DDI_FAILURE); 306*85Scth } 307*85Scth 308*85Scth return (DDI_SUCCESS); 309*85Scth } 310*85Scth 311*85Scth /* 312*85Scth * Attach an instance of an emul64 host adapter. Allocate data structures, 313*85Scth * initialize the emul64 and we're on the air. 314*85Scth */ 315*85Scth /*ARGSUSED*/ 316*85Scth static int 317*85Scth emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 318*85Scth { 319*85Scth int mutex_initted = 0; 320*85Scth struct emul64 *emul64; 321*85Scth int instance; 322*85Scth scsi_hba_tran_t *tran = NULL; 323*85Scth ddi_dma_attr_t tmp_dma_attr; 324*85Scth 325*85Scth emul64_bsd_get_props(dip); 326*85Scth 327*85Scth bzero((void *) &tmp_dma_attr, sizeof (tmp_dma_attr)); 328*85Scth instance = ddi_get_instance(dip); 329*85Scth 330*85Scth switch (cmd) { 331*85Scth case DDI_ATTACH: 332*85Scth break; 333*85Scth 334*85Scth case DDI_RESUME: 335*85Scth tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 336*85Scth if (!tran) { 337*85Scth return (DDI_FAILURE); 338*85Scth } 339*85Scth emul64 = TRAN2EMUL64(tran); 340*85Scth 341*85Scth return (DDI_SUCCESS); 342*85Scth 343*85Scth default: 344*85Scth emul64_i_log(NULL, CE_WARN, 345*85Scth "emul64%d: Cmd != DDI_ATTACH/DDI_RESUME", instance); 346*85Scth return (DDI_FAILURE); 347*85Scth } 348*85Scth 349*85Scth /* 350*85Scth * Allocate emul64 data structure. 351*85Scth */ 352*85Scth if (ddi_soft_state_zalloc(emul64_state, instance) != DDI_SUCCESS) { 353*85Scth emul64_i_log(NULL, CE_WARN, 354*85Scth "emul64%d: Failed to alloc soft state", 355*85Scth instance); 356*85Scth return (DDI_FAILURE); 357*85Scth } 358*85Scth 359*85Scth emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance); 360*85Scth if (emul64 == (struct emul64 *)NULL) { 361*85Scth emul64_i_log(NULL, CE_WARN, "emul64%d: Bad soft state", 362*85Scth instance); 363*85Scth ddi_soft_state_free(emul64_state, instance); 364*85Scth return (DDI_FAILURE); 365*85Scth } 366*85Scth 367*85Scth 368*85Scth /* 369*85Scth * Allocate a transport structure 370*85Scth */ 371*85Scth tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 372*85Scth if (tran == NULL) { 373*85Scth cmn_err(CE_WARN, "emul64: scsi_hba_tran_alloc failed\n"); 374*85Scth goto fail; 375*85Scth } 376*85Scth 377*85Scth emul64->emul64_tran = tran; 378*85Scth emul64->emul64_dip = dip; 379*85Scth 380*85Scth tran->tran_hba_private = emul64; 381*85Scth tran->tran_tgt_private = NULL; 382*85Scth tran->tran_tgt_init = emul64_tran_tgt_init; 383*85Scth tran->tran_tgt_probe = scsi_hba_probe; 384*85Scth tran->tran_tgt_free = NULL; 385*85Scth 386*85Scth tran->tran_start = emul64_scsi_start; 387*85Scth tran->tran_abort = emul64_scsi_abort; 388*85Scth tran->tran_reset = emul64_scsi_reset; 389*85Scth tran->tran_getcap = emul64_scsi_getcap; 390*85Scth tran->tran_setcap = emul64_scsi_setcap; 391*85Scth tran->tran_init_pkt = emul64_scsi_init_pkt; 392*85Scth tran->tran_destroy_pkt = emul64_scsi_destroy_pkt; 393*85Scth tran->tran_dmafree = emul64_scsi_dmafree; 394*85Scth tran->tran_sync_pkt = emul64_scsi_sync_pkt; 395*85Scth tran->tran_reset_notify = emul64_scsi_reset_notify; 396*85Scth 397*85Scth tmp_dma_attr.dma_attr_minxfer = 0x1; 398*85Scth tmp_dma_attr.dma_attr_burstsizes = 0x7f; 399*85Scth 400*85Scth /* 401*85Scth * Attach this instance of the hba 402*85Scth */ 403*85Scth if (scsi_hba_attach_setup(dip, &tmp_dma_attr, tran, 404*85Scth 0) != DDI_SUCCESS) { 405*85Scth cmn_err(CE_WARN, "emul64: scsi_hba_attach failed\n"); 406*85Scth goto fail; 407*85Scth } 408*85Scth 409*85Scth emul64->emul64_initiator_id = 2; 410*85Scth 411*85Scth /* 412*85Scth * Look up the scsi-options property 413*85Scth */ 414*85Scth emul64->emul64_scsi_options = 415*85Scth ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "scsi-options", 416*85Scth EMUL64_DEFAULT_SCSI_OPTIONS); 417*85Scth EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64 scsi-options=%x", 418*85Scth emul64->emul64_scsi_options); 419*85Scth 420*85Scth 421*85Scth /* mutexes to protect the emul64 request and response queue */ 422*85Scth mutex_init(EMUL64_REQ_MUTEX(emul64), NULL, MUTEX_DRIVER, 423*85Scth emul64->emul64_iblock); 424*85Scth mutex_init(EMUL64_RESP_MUTEX(emul64), NULL, MUTEX_DRIVER, 425*85Scth emul64->emul64_iblock); 426*85Scth 427*85Scth mutex_initted = 1; 428*85Scth 429*85Scth EMUL64_MUTEX_ENTER(emul64); 430*85Scth 431*85Scth /* 432*85Scth * Initialize the default Target Capabilities and Sync Rates 433*85Scth */ 434*85Scth emul64_i_initcap(emul64); 435*85Scth 436*85Scth EMUL64_MUTEX_EXIT(emul64); 437*85Scth 438*85Scth 439*85Scth ddi_report_dev(dip); 440*85Scth emul64->emul64_taskq = taskq_create("emul64_comp", 441*85Scth emul64_task_nthreads, MINCLSYSPRI, 1, emul64_max_task, 0); 442*85Scth 443*85Scth return (DDI_SUCCESS); 444*85Scth 445*85Scth fail: 446*85Scth emul64_i_log(NULL, CE_WARN, "emul64%d: Unable to attach", instance); 447*85Scth 448*85Scth if (mutex_initted) { 449*85Scth mutex_destroy(EMUL64_REQ_MUTEX(emul64)); 450*85Scth mutex_destroy(EMUL64_RESP_MUTEX(emul64)); 451*85Scth } 452*85Scth if (tran) { 453*85Scth scsi_hba_tran_free(tran); 454*85Scth } 455*85Scth ddi_soft_state_free(emul64_state, instance); 456*85Scth return (DDI_FAILURE); 457*85Scth } 458*85Scth 459*85Scth /*ARGSUSED*/ 460*85Scth static int 461*85Scth emul64_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 462*85Scth { 463*85Scth struct emul64 *emul64; 464*85Scth scsi_hba_tran_t *tran; 465*85Scth int instance = ddi_get_instance(dip); 466*85Scth 467*85Scth 468*85Scth /* get transport structure pointer from the dip */ 469*85Scth if (!(tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip))) { 470*85Scth return (DDI_FAILURE); 471*85Scth } 472*85Scth 473*85Scth /* get soft state from transport structure */ 474*85Scth emul64 = TRAN2EMUL64(tran); 475*85Scth 476*85Scth if (!emul64) { 477*85Scth return (DDI_FAILURE); 478*85Scth } 479*85Scth 480*85Scth EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: cmd = %d", cmd); 481*85Scth 482*85Scth switch (cmd) { 483*85Scth case DDI_DETACH: 484*85Scth EMUL64_MUTEX_ENTER(emul64); 485*85Scth 486*85Scth taskq_destroy(emul64->emul64_taskq); 487*85Scth (void) scsi_hba_detach(dip); 488*85Scth 489*85Scth scsi_hba_tran_free(emul64->emul64_tran); 490*85Scth 491*85Scth 492*85Scth EMUL64_MUTEX_EXIT(emul64); 493*85Scth 494*85Scth mutex_destroy(EMUL64_REQ_MUTEX(emul64)); 495*85Scth mutex_destroy(EMUL64_RESP_MUTEX(emul64)); 496*85Scth 497*85Scth 498*85Scth EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: done"); 499*85Scth ddi_soft_state_free(emul64_state, instance); 500*85Scth 501*85Scth return (DDI_SUCCESS); 502*85Scth 503*85Scth case DDI_SUSPEND: 504*85Scth return (DDI_SUCCESS); 505*85Scth 506*85Scth default: 507*85Scth return (DDI_FAILURE); 508*85Scth } 509*85Scth } 510*85Scth 511*85Scth /* 512*85Scth * Function name : emul64_tran_tgt_init 513*85Scth * 514*85Scth * Return Values : DDI_SUCCESS if target supported, DDI_FAILURE otherwise 515*85Scth * 516*85Scth */ 517*85Scth /*ARGSUSED*/ 518*85Scth static int 519*85Scth emul64_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 520*85Scth scsi_hba_tran_t *tran, struct scsi_device *sd) 521*85Scth { 522*85Scth struct emul64 *emul64; 523*85Scth emul64_tgt_t *tgt; 524*85Scth char **geo_vidpid = NULL; 525*85Scth char *geo, *vidpid; 526*85Scth uint32_t *geoip = NULL; 527*85Scth uint_t length; 528*85Scth uint_t length2; 529*85Scth lldaddr_t sector_count; 530*85Scth char prop_name[15]; 531*85Scth int ret = DDI_FAILURE; 532*85Scth 533*85Scth emul64 = TRAN2EMUL64(tran); 534*85Scth EMUL64_MUTEX_ENTER(emul64); 535*85Scth 536*85Scth /* 537*85Scth * We get called for each target driver.conf node, multiple 538*85Scth * nodes may map to the same tgt,lun (sd.conf, st.conf, etc). 539*85Scth * Check to see if transport to tgt,lun already established. 540*85Scth */ 541*85Scth tgt = find_tgt(emul64, sd->sd_address.a_target, sd->sd_address.a_lun); 542*85Scth if (tgt) { 543*85Scth ret = DDI_SUCCESS; 544*85Scth goto out; 545*85Scth } 546*85Scth 547*85Scth /* see if we have driver.conf specified device for this target,lun */ 548*85Scth (void) snprintf(prop_name, sizeof (prop_name), "targ_%d_%d", 549*85Scth sd->sd_address.a_target, sd->sd_address.a_lun); 550*85Scth if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba_dip, 551*85Scth DDI_PROP_DONTPASS, prop_name, 552*85Scth &geo_vidpid, &length) != DDI_PROP_SUCCESS) 553*85Scth goto out; 554*85Scth if (length < 2) { 555*85Scth cmn_err(CE_WARN, "emul64: %s property does not have 2 " 556*85Scth "elements", prop_name); 557*85Scth goto out; 558*85Scth } 559*85Scth 560*85Scth /* pick geometry name and vidpid string from string array */ 561*85Scth geo = *geo_vidpid; 562*85Scth vidpid = *(geo_vidpid + 1); 563*85Scth 564*85Scth /* lookup geometry property integer array */ 565*85Scth if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hba_dip, DDI_PROP_DONTPASS, 566*85Scth geo, (int **)&geoip, &length2) != DDI_PROP_SUCCESS) { 567*85Scth cmn_err(CE_WARN, "emul64: didn't get prop '%s'", geo); 568*85Scth goto out; 569*85Scth } 570*85Scth if (length2 < 6) { 571*85Scth cmn_err(CE_WARN, "emul64: property %s does not have 6 " 572*85Scth "elements", *geo_vidpid); 573*85Scth goto out; 574*85Scth } 575*85Scth 576*85Scth /* allocate and initialize tgt structure for tgt,lun */ 577*85Scth tgt = kmem_zalloc(sizeof (emul64_tgt_t), KM_SLEEP); 578*85Scth rw_init(&tgt->emul64_tgt_nw_lock, NULL, RW_DRIVER, NULL); 579*85Scth mutex_init(&tgt->emul64_tgt_blk_lock, NULL, MUTEX_DRIVER, NULL); 580*85Scth 581*85Scth /* create avl for data block storage */ 582*85Scth avl_create(&tgt->emul64_tgt_data, emul64_bsd_blkcompare, 583*85Scth sizeof (blklist_t), offsetof(blklist_t, bl_node)); 584*85Scth 585*85Scth /* save scsi_address and vidpid */ 586*85Scth bcopy(sd, &tgt->emul64_tgt_saddr, sizeof (struct scsi_address)); 587*85Scth (void) strncpy(tgt->emul64_tgt_inq, vidpid, 588*85Scth sizeof (emul64->emul64_tgt->emul64_tgt_inq)); 589*85Scth 590*85Scth /* 591*85Scth * The high order 4 bytes of the sector count always come first in 592*85Scth * emul64.conf. They are followed by the low order 4 bytes. Not 593*85Scth * all CPU types want them in this order, but laddr_t takes care of 594*85Scth * this for us. We then pick up geometry (ncyl X nheads X nsect). 595*85Scth */ 596*85Scth sector_count._p._u = *(geoip + 0); 597*85Scth sector_count._p._l = *(geoip + 1); 598*85Scth /* 599*85Scth * On 32-bit platforms, fix block size if it's greater than the 600*85Scth * allowable maximum. 601*85Scth */ 602*85Scth #if !defined(_LP64) 603*85Scth if (sector_count._f > DK_MAX_BLOCKS) 604*85Scth sector_count._f = DK_MAX_BLOCKS; 605*85Scth #endif 606*85Scth tgt->emul64_tgt_sectors = sector_count._f; 607*85Scth tgt->emul64_tgt_dtype = *(geoip + 2); 608*85Scth tgt->emul64_tgt_ncyls = *(geoip + 3); 609*85Scth tgt->emul64_tgt_nheads = *(geoip + 4); 610*85Scth tgt->emul64_tgt_nsect = *(geoip + 5); 611*85Scth 612*85Scth /* insert target structure into list */ 613*85Scth tgt->emul64_tgt_next = emul64->emul64_tgt; 614*85Scth emul64->emul64_tgt = tgt; 615*85Scth ret = DDI_SUCCESS; 616*85Scth 617*85Scth out: EMUL64_MUTEX_EXIT(emul64); 618*85Scth if (geoip) 619*85Scth ddi_prop_free(geoip); 620*85Scth if (geo_vidpid) 621*85Scth ddi_prop_free(geo_vidpid); 622*85Scth return (ret); 623*85Scth } 624*85Scth 625*85Scth /* 626*85Scth * Function name : emul64_i_initcap 627*85Scth * 628*85Scth * Return Values : NONE 629*85Scth * Description : Initializes the default target capabilities and 630*85Scth * Sync Rates. 631*85Scth * 632*85Scth * Context : Called from the user thread through attach. 633*85Scth * 634*85Scth */ 635*85Scth static void 636*85Scth emul64_i_initcap(struct emul64 *emul64) 637*85Scth { 638*85Scth uint16_t cap, synch; 639*85Scth int i; 640*85Scth 641*85Scth cap = 0; 642*85Scth synch = 0; 643*85Scth for (i = 0; i < NTARGETS_WIDE; i++) { 644*85Scth emul64->emul64_cap[i] = cap; 645*85Scth emul64->emul64_synch[i] = synch; 646*85Scth } 647*85Scth EMUL64_DEBUG(emul64, SCSI_DEBUG, "default cap = 0x%x", cap); 648*85Scth } 649*85Scth 650*85Scth /* 651*85Scth * Function name : emul64_scsi_getcap() 652*85Scth * 653*85Scth * Return Values : current value of capability, if defined 654*85Scth * -1 if capability is not defined 655*85Scth * Description : returns current capability value 656*85Scth * 657*85Scth * Context : Can be called from different kernel process threads. 658*85Scth * Can be called by interrupt thread. 659*85Scth */ 660*85Scth static int 661*85Scth emul64_scsi_getcap(struct scsi_address *ap, char *cap, int whom) 662*85Scth { 663*85Scth struct emul64 *emul64 = ADDR2EMUL64(ap); 664*85Scth int rval = 0; 665*85Scth 666*85Scth /* 667*85Scth * We don't allow inquiring about capabilities for other targets 668*85Scth */ 669*85Scth if (cap == NULL || whom == 0) { 670*85Scth return (-1); 671*85Scth } 672*85Scth 673*85Scth EMUL64_MUTEX_ENTER(emul64); 674*85Scth 675*85Scth switch (scsi_hba_lookup_capstr(cap)) { 676*85Scth case SCSI_CAP_DMA_MAX: 677*85Scth rval = 1 << 24; /* Limit to 16MB max transfer */ 678*85Scth break; 679*85Scth case SCSI_CAP_MSG_OUT: 680*85Scth rval = 1; 681*85Scth break; 682*85Scth case SCSI_CAP_DISCONNECT: 683*85Scth rval = 1; 684*85Scth break; 685*85Scth case SCSI_CAP_SYNCHRONOUS: 686*85Scth rval = 1; 687*85Scth break; 688*85Scth case SCSI_CAP_WIDE_XFER: 689*85Scth rval = 1; 690*85Scth break; 691*85Scth case SCSI_CAP_TAGGED_QING: 692*85Scth rval = 1; 693*85Scth break; 694*85Scth case SCSI_CAP_UNTAGGED_QING: 695*85Scth rval = 1; 696*85Scth break; 697*85Scth case SCSI_CAP_PARITY: 698*85Scth rval = 1; 699*85Scth break; 700*85Scth case SCSI_CAP_INITIATOR_ID: 701*85Scth rval = emul64->emul64_initiator_id; 702*85Scth break; 703*85Scth case SCSI_CAP_ARQ: 704*85Scth rval = 1; 705*85Scth break; 706*85Scth case SCSI_CAP_LINKED_CMDS: 707*85Scth break; 708*85Scth case SCSI_CAP_RESET_NOTIFICATION: 709*85Scth rval = 1; 710*85Scth break; 711*85Scth 712*85Scth default: 713*85Scth rval = -1; 714*85Scth break; 715*85Scth } 716*85Scth 717*85Scth EMUL64_MUTEX_EXIT(emul64); 718*85Scth 719*85Scth return (rval); 720*85Scth } 721*85Scth 722*85Scth /* 723*85Scth * Function name : emul64_scsi_setcap() 724*85Scth * 725*85Scth * Return Values : 1 - capability exists and can be set to new value 726*85Scth * 0 - capability could not be set to new value 727*85Scth * -1 - no such capability 728*85Scth * 729*85Scth * Description : sets a capability for a target 730*85Scth * 731*85Scth * Context : Can be called from different kernel process threads. 732*85Scth * Can be called by interrupt thread. 733*85Scth */ 734*85Scth static int 735*85Scth emul64_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom) 736*85Scth { 737*85Scth struct emul64 *emul64 = ADDR2EMUL64(ap); 738*85Scth int rval = 0; 739*85Scth 740*85Scth /* 741*85Scth * We don't allow setting capabilities for other targets 742*85Scth */ 743*85Scth if (cap == NULL || whom == 0) { 744*85Scth return (-1); 745*85Scth } 746*85Scth 747*85Scth EMUL64_MUTEX_ENTER(emul64); 748*85Scth 749*85Scth switch (scsi_hba_lookup_capstr(cap)) { 750*85Scth case SCSI_CAP_DMA_MAX: 751*85Scth case SCSI_CAP_MSG_OUT: 752*85Scth case SCSI_CAP_PARITY: 753*85Scth case SCSI_CAP_UNTAGGED_QING: 754*85Scth case SCSI_CAP_LINKED_CMDS: 755*85Scth case SCSI_CAP_RESET_NOTIFICATION: 756*85Scth /* 757*85Scth * None of these are settable via 758*85Scth * the capability interface. 759*85Scth */ 760*85Scth break; 761*85Scth case SCSI_CAP_DISCONNECT: 762*85Scth rval = 1; 763*85Scth break; 764*85Scth case SCSI_CAP_SYNCHRONOUS: 765*85Scth rval = 1; 766*85Scth break; 767*85Scth case SCSI_CAP_TAGGED_QING: 768*85Scth rval = 1; 769*85Scth break; 770*85Scth case SCSI_CAP_WIDE_XFER: 771*85Scth rval = 1; 772*85Scth break; 773*85Scth case SCSI_CAP_INITIATOR_ID: 774*85Scth rval = -1; 775*85Scth break; 776*85Scth case SCSI_CAP_ARQ: 777*85Scth rval = 1; 778*85Scth break; 779*85Scth case SCSI_CAP_TOTAL_SECTORS: 780*85Scth emul64->nt_total_sectors[ap->a_target][ap->a_lun] = value; 781*85Scth rval = TRUE; 782*85Scth break; 783*85Scth case SCSI_CAP_SECTOR_SIZE: 784*85Scth rval = TRUE; 785*85Scth break; 786*85Scth default: 787*85Scth rval = -1; 788*85Scth break; 789*85Scth } 790*85Scth 791*85Scth 792*85Scth EMUL64_MUTEX_EXIT(emul64); 793*85Scth 794*85Scth return (rval); 795*85Scth } 796*85Scth 797*85Scth /* 798*85Scth * Function name : emul64_scsi_init_pkt 799*85Scth * 800*85Scth * Return Values : pointer to scsi_pkt, or NULL 801*85Scth * Description : Called by kernel on behalf of a target driver 802*85Scth * calling scsi_init_pkt(9F). 803*85Scth * Refer to tran_init_pkt(9E) man page 804*85Scth * 805*85Scth * Context : Can be called from different kernel process threads. 806*85Scth * Can be called by interrupt thread. 807*85Scth */ 808*85Scth /* ARGSUSED */ 809*85Scth static struct scsi_pkt * 810*85Scth emul64_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 811*85Scth struct buf *bp, int cmdlen, int statuslen, int tgtlen, 812*85Scth int flags, int (*callback)(), caddr_t arg) 813*85Scth { 814*85Scth struct emul64 *emul64 = ADDR2EMUL64(ap); 815*85Scth struct emul64_cmd *sp; 816*85Scth 817*85Scth ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC); 818*85Scth 819*85Scth /* 820*85Scth * First step of emul64_scsi_init_pkt: pkt allocation 821*85Scth */ 822*85Scth if (pkt == NULL) { 823*85Scth pkt = scsi_hba_pkt_alloc(emul64->emul64_dip, ap, cmdlen, 824*85Scth statuslen, 825*85Scth tgtlen, sizeof (struct emul64_cmd), callback, arg); 826*85Scth if (pkt == NULL) { 827*85Scth cmn_err(CE_WARN, "emul64_scsi_init_pkt: " 828*85Scth "scsi_hba_pkt_alloc failed"); 829*85Scth return (NULL); 830*85Scth } 831*85Scth 832*85Scth sp = PKT2CMD(pkt); 833*85Scth 834*85Scth /* 835*85Scth * Initialize the new pkt - we redundantly initialize 836*85Scth * all the fields for illustrative purposes. 837*85Scth */ 838*85Scth sp->cmd_pkt = pkt; 839*85Scth sp->cmd_flags = 0; 840*85Scth sp->cmd_scblen = statuslen; 841*85Scth sp->cmd_cdblen = cmdlen; 842*85Scth sp->cmd_emul64 = emul64; 843*85Scth pkt->pkt_address = *ap; 844*85Scth pkt->pkt_comp = (void (*)())NULL; 845*85Scth pkt->pkt_flags = 0; 846*85Scth pkt->pkt_time = 0; 847*85Scth pkt->pkt_resid = 0; 848*85Scth pkt->pkt_statistics = 0; 849*85Scth pkt->pkt_reason = 0; 850*85Scth 851*85Scth } else { 852*85Scth sp = PKT2CMD(pkt); 853*85Scth } 854*85Scth 855*85Scth /* 856*85Scth * Second step of emul64_scsi_init_pkt: dma allocation/move 857*85Scth */ 858*85Scth if (bp && bp->b_bcount != 0) { 859*85Scth if (bp->b_flags & B_READ) { 860*85Scth sp->cmd_flags &= ~CFLAG_DMASEND; 861*85Scth } else { 862*85Scth sp->cmd_flags |= CFLAG_DMASEND; 863*85Scth } 864*85Scth bp_mapin(bp); 865*85Scth sp->cmd_addr = (unsigned char *) bp->b_un.b_addr; 866*85Scth sp->cmd_count = bp->b_bcount; 867*85Scth pkt->pkt_resid = 0; 868*85Scth } 869*85Scth 870*85Scth return (pkt); 871*85Scth } 872*85Scth 873*85Scth 874*85Scth /* 875*85Scth * Function name : emul64_scsi_destroy_pkt 876*85Scth * 877*85Scth * Return Values : none 878*85Scth * Description : Called by kernel on behalf of a target driver 879*85Scth * calling scsi_destroy_pkt(9F). 880*85Scth * Refer to tran_destroy_pkt(9E) man page 881*85Scth * 882*85Scth * Context : Can be called from different kernel process threads. 883*85Scth * Can be called by interrupt thread. 884*85Scth */ 885*85Scth static void 886*85Scth emul64_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 887*85Scth { 888*85Scth struct emul64_cmd *sp = PKT2CMD(pkt); 889*85Scth 890*85Scth /* 891*85Scth * emul64_scsi_dmafree inline to make things faster 892*85Scth */ 893*85Scth if (sp->cmd_flags & CFLAG_DMAVALID) { 894*85Scth /* 895*85Scth * Free the mapping. 896*85Scth */ 897*85Scth sp->cmd_flags &= ~CFLAG_DMAVALID; 898*85Scth } 899*85Scth 900*85Scth /* 901*85Scth * Free the pkt 902*85Scth */ 903*85Scth scsi_hba_pkt_free(ap, pkt); 904*85Scth } 905*85Scth 906*85Scth 907*85Scth /* 908*85Scth * Function name : emul64_scsi_dmafree() 909*85Scth * 910*85Scth * Return Values : none 911*85Scth * Description : free dvma resources 912*85Scth * 913*85Scth * Context : Can be called from different kernel process threads. 914*85Scth * Can be called by interrupt thread. 915*85Scth */ 916*85Scth /*ARGSUSED*/ 917*85Scth static void 918*85Scth emul64_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 919*85Scth { 920*85Scth } 921*85Scth 922*85Scth /* 923*85Scth * Function name : emul64_scsi_sync_pkt() 924*85Scth * 925*85Scth * Return Values : none 926*85Scth * Description : sync dma 927*85Scth * 928*85Scth * Context : Can be called from different kernel process threads. 929*85Scth * Can be called by interrupt thread. 930*85Scth */ 931*85Scth /*ARGSUSED*/ 932*85Scth static void 933*85Scth emul64_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 934*85Scth { 935*85Scth } 936*85Scth 937*85Scth /* 938*85Scth * routine for reset notification setup, to register or cancel. 939*85Scth */ 940*85Scth static int 941*85Scth emul64_scsi_reset_notify(struct scsi_address *ap, int flag, 942*85Scth void (*callback)(caddr_t), caddr_t arg) 943*85Scth { 944*85Scth struct emul64 *emul64 = ADDR2EMUL64(ap); 945*85Scth struct emul64_reset_notify_entry *p, *beforep; 946*85Scth int rval = DDI_FAILURE; 947*85Scth 948*85Scth mutex_enter(EMUL64_REQ_MUTEX(emul64)); 949*85Scth 950*85Scth p = emul64->emul64_reset_notify_listf; 951*85Scth beforep = NULL; 952*85Scth 953*85Scth while (p) { 954*85Scth if (p->ap == ap) 955*85Scth break; /* An entry exists for this target */ 956*85Scth beforep = p; 957*85Scth p = p->next; 958*85Scth } 959*85Scth 960*85Scth if ((flag & SCSI_RESET_CANCEL) && (p != NULL)) { 961*85Scth if (beforep == NULL) { 962*85Scth emul64->emul64_reset_notify_listf = p->next; 963*85Scth } else { 964*85Scth beforep->next = p->next; 965*85Scth } 966*85Scth kmem_free((caddr_t)p, 967*85Scth sizeof (struct emul64_reset_notify_entry)); 968*85Scth rval = DDI_SUCCESS; 969*85Scth 970*85Scth } else if ((flag & SCSI_RESET_NOTIFY) && (p == NULL)) { 971*85Scth p = kmem_zalloc(sizeof (struct emul64_reset_notify_entry), 972*85Scth KM_SLEEP); 973*85Scth p->ap = ap; 974*85Scth p->callback = callback; 975*85Scth p->arg = arg; 976*85Scth p->next = emul64->emul64_reset_notify_listf; 977*85Scth emul64->emul64_reset_notify_listf = p; 978*85Scth rval = DDI_SUCCESS; 979*85Scth } 980*85Scth 981*85Scth mutex_exit(EMUL64_REQ_MUTEX(emul64)); 982*85Scth 983*85Scth return (rval); 984*85Scth } 985*85Scth 986*85Scth /* 987*85Scth * Function name : emul64_scsi_start() 988*85Scth * 989*85Scth * Return Values : TRAN_FATAL_ERROR - emul64 has been shutdown 990*85Scth * TRAN_BUSY - request queue is full 991*85Scth * TRAN_ACCEPT - pkt has been submitted to emul64 992*85Scth * 993*85Scth * Description : init pkt, start the request 994*85Scth * 995*85Scth * Context : Can be called from different kernel process threads. 996*85Scth * Can be called by interrupt thread. 997*85Scth */ 998*85Scth static int 999*85Scth emul64_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt) 1000*85Scth { 1001*85Scth struct emul64_cmd *sp = PKT2CMD(pkt); 1002*85Scth int rval = TRAN_ACCEPT; 1003*85Scth struct emul64 *emul64 = ADDR2EMUL64(ap); 1004*85Scth clock_t cur_lbolt; 1005*85Scth taskqid_t dispatched; 1006*85Scth 1007*85Scth ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic()); 1008*85Scth ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic()); 1009*85Scth 1010*85Scth EMUL64_DEBUG2(emul64, SCSI_DEBUG, "emul64_scsi_start %x", sp); 1011*85Scth 1012*85Scth pkt->pkt_reason = CMD_CMPLT; 1013*85Scth 1014*85Scth #ifdef EMUL64DEBUG 1015*85Scth if (emul64_cdb_debug) { 1016*85Scth emul64_debug_dump_cdb(ap, pkt); 1017*85Scth } 1018*85Scth #endif /* EMUL64DEBUG */ 1019*85Scth 1020*85Scth /* 1021*85Scth * calculate deadline from pkt_time 1022*85Scth * Instead of multiplying by 100 (ie. HZ), we multiply by 128 so 1023*85Scth * we can shift and at the same time have a 28% grace period 1024*85Scth * we ignore the rare case of pkt_time == 0 and deal with it 1025*85Scth * in emul64_i_watch() 1026*85Scth */ 1027*85Scth cur_lbolt = ddi_get_lbolt(); 1028*85Scth sp->cmd_deadline = cur_lbolt + (pkt->pkt_time * 128); 1029*85Scth 1030*85Scth if ((emul64_usetaskq == 0) || (pkt->pkt_flags & FLAG_NOINTR) != 0) { 1031*85Scth emul64_pkt_comp((caddr_t)pkt); 1032*85Scth } else { 1033*85Scth dispatched = NULL; 1034*85Scth if (emul64_collect_stats) { 1035*85Scth /* 1036*85Scth * If we are collecting statistics, call 1037*85Scth * taskq_dispatch in no sleep mode, so that we can 1038*85Scth * detect if we are exceeding the queue length that 1039*85Scth * was established in the call to taskq_create in 1040*85Scth * emul64_attach. If the no sleep call fails 1041*85Scth * (returns NULL), the task will be dispatched in 1042*85Scth * sleep mode below. 1043*85Scth */ 1044*85Scth dispatched = taskq_dispatch(emul64->emul64_taskq, 1045*85Scth emul64_pkt_comp, 1046*85Scth (void *)pkt, TQ_NOSLEEP); 1047*85Scth if (dispatched == NULL) { 1048*85Scth /* Queue was full. dispatch failed. */ 1049*85Scth mutex_enter(&emul64_stats_mutex); 1050*85Scth emul64_taskq_max++; 1051*85Scth mutex_exit(&emul64_stats_mutex); 1052*85Scth } 1053*85Scth } 1054*85Scth if (dispatched == NULL) { 1055*85Scth (void) taskq_dispatch(emul64->emul64_taskq, 1056*85Scth emul64_pkt_comp, (void *)pkt, TQ_SLEEP); 1057*85Scth } 1058*85Scth } 1059*85Scth 1060*85Scth done: 1061*85Scth ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic()); 1062*85Scth ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic()); 1063*85Scth 1064*85Scth return (rval); 1065*85Scth } 1066*85Scth 1067*85Scth void 1068*85Scth emul64_check_cond(struct scsi_pkt *pkt, uchar_t key, uchar_t asc, uchar_t ascq) 1069*85Scth { 1070*85Scth struct scsi_arq_status *arq = 1071*85Scth (struct scsi_arq_status *)pkt->pkt_scbp; 1072*85Scth 1073*85Scth /* got check, no data transferred and ARQ done */ 1074*85Scth arq->sts_status.sts_chk = 1; 1075*85Scth pkt->pkt_state |= STATE_ARQ_DONE; 1076*85Scth pkt->pkt_state &= ~STATE_XFERRED_DATA; 1077*85Scth 1078*85Scth /* for ARQ */ 1079*85Scth arq->sts_rqpkt_reason = CMD_CMPLT; 1080*85Scth arq->sts_rqpkt_resid = 0; 1081*85Scth arq->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 1082*85Scth STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS; 1083*85Scth arq->sts_sensedata.es_valid = 1; 1084*85Scth arq->sts_sensedata.es_class = 0x7; 1085*85Scth arq->sts_sensedata.es_key = key; 1086*85Scth arq->sts_sensedata.es_add_code = asc; 1087*85Scth arq->sts_sensedata.es_qual_code = ascq; 1088*85Scth } 1089*85Scth 1090*85Scth int bsd_scsi_start_stop_unit(struct scsi_pkt *); 1091*85Scth int bsd_scsi_test_unit_ready(struct scsi_pkt *); 1092*85Scth int bsd_scsi_request_sense(struct scsi_pkt *); 1093*85Scth int bsd_scsi_inquiry(struct scsi_pkt *); 1094*85Scth int bsd_scsi_format(struct scsi_pkt *); 1095*85Scth int bsd_scsi_io(struct scsi_pkt *); 1096*85Scth int bsd_scsi_log_sense(struct scsi_pkt *); 1097*85Scth int bsd_scsi_mode_sense(struct scsi_pkt *); 1098*85Scth int bsd_scsi_mode_select(struct scsi_pkt *); 1099*85Scth int bsd_scsi_read_capacity(struct scsi_pkt *); 1100*85Scth int bsd_scsi_read_capacity_16(struct scsi_pkt *); 1101*85Scth int bsd_scsi_reserve(struct scsi_pkt *); 1102*85Scth int bsd_scsi_format(struct scsi_pkt *); 1103*85Scth int bsd_scsi_release(struct scsi_pkt *); 1104*85Scth int bsd_scsi_read_defect_list(struct scsi_pkt *); 1105*85Scth int bsd_scsi_reassign_block(struct scsi_pkt *); 1106*85Scth int bsd_freeblkrange(emul64_tgt_t *, emul64_range_t *); 1107*85Scth 1108*85Scth static void 1109*85Scth emul64_handle_cmd(struct scsi_pkt *pkt) 1110*85Scth { 1111*85Scth switch (pkt->pkt_cdbp[0]) { 1112*85Scth case SCMD_START_STOP: 1113*85Scth (void) bsd_scsi_start_stop_unit(pkt); 1114*85Scth break; 1115*85Scth case SCMD_TEST_UNIT_READY: 1116*85Scth (void) bsd_scsi_test_unit_ready(pkt); 1117*85Scth break; 1118*85Scth case SCMD_REQUEST_SENSE: 1119*85Scth (void) bsd_scsi_request_sense(pkt); 1120*85Scth break; 1121*85Scth case SCMD_INQUIRY: 1122*85Scth (void) bsd_scsi_inquiry(pkt); 1123*85Scth break; 1124*85Scth case SCMD_FORMAT: 1125*85Scth (void) bsd_scsi_format(pkt); 1126*85Scth break; 1127*85Scth case SCMD_READ: 1128*85Scth case SCMD_WRITE: 1129*85Scth case SCMD_READ_G1: 1130*85Scth case SCMD_WRITE_G1: 1131*85Scth case SCMD_READ_G4: 1132*85Scth case SCMD_WRITE_G4: 1133*85Scth (void) bsd_scsi_io(pkt); 1134*85Scth break; 1135*85Scth case SCMD_LOG_SENSE_G1: 1136*85Scth (void) bsd_scsi_log_sense(pkt); 1137*85Scth break; 1138*85Scth case SCMD_MODE_SENSE: 1139*85Scth case SCMD_MODE_SENSE_G1: 1140*85Scth (void) bsd_scsi_mode_sense(pkt); 1141*85Scth break; 1142*85Scth case SCMD_MODE_SELECT: 1143*85Scth case SCMD_MODE_SELECT_G1: 1144*85Scth (void) bsd_scsi_mode_select(pkt); 1145*85Scth break; 1146*85Scth case SCMD_READ_CAPACITY: 1147*85Scth (void) bsd_scsi_read_capacity(pkt); 1148*85Scth break; 1149*85Scth case SCMD_SVC_ACTION_IN_G4: 1150*85Scth if (pkt->pkt_cdbp[1] == SSVC_ACTION_READ_CAPACITY_G4) { 1151*85Scth (void) bsd_scsi_read_capacity_16(pkt); 1152*85Scth } else { 1153*85Scth cmn_err(CE_WARN, "emul64: unrecognized G4 service " 1154*85Scth "action 0x%x", pkt->pkt_cdbp[1]); 1155*85Scth } 1156*85Scth break; 1157*85Scth case SCMD_RESERVE: 1158*85Scth case SCMD_RESERVE_G1: 1159*85Scth (void) bsd_scsi_reserve(pkt); 1160*85Scth break; 1161*85Scth case SCMD_RELEASE: 1162*85Scth case SCMD_RELEASE_G1: 1163*85Scth (void) bsd_scsi_release(pkt); 1164*85Scth break; 1165*85Scth case SCMD_REASSIGN_BLOCK: 1166*85Scth (void) bsd_scsi_reassign_block(pkt); 1167*85Scth break; 1168*85Scth case SCMD_READ_DEFECT_LIST: 1169*85Scth (void) bsd_scsi_read_defect_list(pkt); 1170*85Scth break; 1171*85Scth case SCMD_PRIN: 1172*85Scth case SCMD_PROUT: 1173*85Scth case SCMD_REPORT_LUNS: 1174*85Scth /* ASC 0x24 INVALID FIELD IN CDB */ 1175*85Scth emul64_check_cond(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x0); 1176*85Scth break; 1177*85Scth default: 1178*85Scth cmn_err(CE_WARN, "emul64: unrecognized " 1179*85Scth "SCSI cmd 0x%x", pkt->pkt_cdbp[0]); 1180*85Scth emul64_check_cond(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x0); 1181*85Scth break; 1182*85Scth case SCMD_GET_CONFIGURATION: 1183*85Scth case 0x35: /* SCMD_SYNCHRONIZE_CACHE */ 1184*85Scth /* Don't complain */ 1185*85Scth break; 1186*85Scth } 1187*85Scth } 1188*85Scth 1189*85Scth static void 1190*85Scth emul64_pkt_comp(void * arg) 1191*85Scth { 1192*85Scth struct scsi_pkt *pkt = (struct scsi_pkt *)arg; 1193*85Scth struct emul64_cmd *sp = PKT2CMD(pkt); 1194*85Scth emul64_tgt_t *tgt; 1195*85Scth 1196*85Scth EMUL64_MUTEX_ENTER(sp->cmd_emul64); 1197*85Scth tgt = find_tgt(sp->cmd_emul64, 1198*85Scth pkt->pkt_address.a_target, pkt->pkt_address.a_lun); 1199*85Scth EMUL64_MUTEX_EXIT(sp->cmd_emul64); 1200*85Scth if (!tgt) { 1201*85Scth pkt->pkt_reason = CMD_TIMEOUT; 1202*85Scth pkt->pkt_state = STATE_GOT_BUS | STATE_SENT_CMD; 1203*85Scth pkt->pkt_statistics = STAT_TIMEOUT; 1204*85Scth } else { 1205*85Scth pkt->pkt_reason = CMD_CMPLT; 1206*85Scth *pkt->pkt_scbp = STATUS_GOOD; 1207*85Scth pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 1208*85Scth STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS; 1209*85Scth pkt->pkt_statistics = 0; 1210*85Scth emul64_handle_cmd(pkt); 1211*85Scth } 1212*85Scth (*pkt->pkt_comp)(pkt); 1213*85Scth } 1214*85Scth 1215*85Scth /* ARGSUSED */ 1216*85Scth static int 1217*85Scth emul64_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 1218*85Scth { 1219*85Scth return (1); 1220*85Scth } 1221*85Scth 1222*85Scth /* ARGSUSED */ 1223*85Scth static int 1224*85Scth emul64_scsi_reset(struct scsi_address *ap, int level) 1225*85Scth { 1226*85Scth return (1); 1227*85Scth } 1228*85Scth 1229*85Scth static int 1230*85Scth emul64_get_tgtrange(struct emul64 *emul64, 1231*85Scth intptr_t arg, 1232*85Scth emul64_tgt_t **tgtp, 1233*85Scth emul64_tgt_range_t *tgtr) 1234*85Scth { 1235*85Scth if (ddi_copyin((void *)arg, tgtr, sizeof (*tgtr), 0) != 0) { 1236*85Scth cmn_err(CE_WARN, "emul64: ioctl - copy in failed\n"); 1237*85Scth return (EFAULT); 1238*85Scth } 1239*85Scth EMUL64_MUTEX_ENTER(emul64); 1240*85Scth *tgtp = find_tgt(emul64, tgtr->emul64_target, tgtr->emul64_lun); 1241*85Scth EMUL64_MUTEX_EXIT(emul64); 1242*85Scth if (*tgtp == NULL) { 1243*85Scth cmn_err(CE_WARN, "emul64: ioctl - no target for %d,%d on %d", 1244*85Scth tgtr->emul64_target, tgtr->emul64_lun, 1245*85Scth ddi_get_instance(emul64->emul64_dip)); 1246*85Scth return (ENXIO); 1247*85Scth } 1248*85Scth return (0); 1249*85Scth } 1250*85Scth 1251*85Scth static int 1252*85Scth emul64_ioctl(dev_t dev, 1253*85Scth int cmd, 1254*85Scth intptr_t arg, 1255*85Scth int mode, 1256*85Scth cred_t *credp, 1257*85Scth int *rvalp) 1258*85Scth { 1259*85Scth struct emul64 *emul64; 1260*85Scth int instance; 1261*85Scth int rv = 0; 1262*85Scth emul64_tgt_range_t tgtr; 1263*85Scth emul64_tgt_t *tgt; 1264*85Scth 1265*85Scth instance = MINOR2INST(getminor(dev)); 1266*85Scth emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance); 1267*85Scth if (emul64 == NULL) { 1268*85Scth cmn_err(CE_WARN, "emul64: ioctl - no softstate for %d\n", 1269*85Scth getminor(dev)); 1270*85Scth return (ENXIO); 1271*85Scth } 1272*85Scth 1273*85Scth switch (cmd) { 1274*85Scth case EMUL64_WRITE_OFF: 1275*85Scth rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr); 1276*85Scth if (rv == 0) { 1277*85Scth rv = emul64_write_off(emul64, tgt, &tgtr); 1278*85Scth } 1279*85Scth break; 1280*85Scth case EMUL64_WRITE_ON: 1281*85Scth rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr); 1282*85Scth if (rv == 0) { 1283*85Scth rv = emul64_write_on(emul64, tgt, &tgtr); 1284*85Scth } 1285*85Scth break; 1286*85Scth case EMUL64_ZERO_RANGE: 1287*85Scth rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr); 1288*85Scth if (rv == 0) { 1289*85Scth mutex_enter(&tgt->emul64_tgt_blk_lock); 1290*85Scth rv = bsd_freeblkrange(tgt, &tgtr.emul64_blkrange); 1291*85Scth mutex_exit(&tgt->emul64_tgt_blk_lock); 1292*85Scth } 1293*85Scth break; 1294*85Scth default: 1295*85Scth rv = scsi_hba_ioctl(dev, cmd, arg, mode, credp, rvalp); 1296*85Scth break; 1297*85Scth } 1298*85Scth return (rv); 1299*85Scth } 1300*85Scth 1301*85Scth /* ARGSUSED */ 1302*85Scth static int 1303*85Scth emul64_write_off(struct emul64 *emul64, 1304*85Scth emul64_tgt_t *tgt, 1305*85Scth emul64_tgt_range_t *tgtr) 1306*85Scth { 1307*85Scth size_t blkcnt = tgtr->emul64_blkrange.emul64_blkcnt; 1308*85Scth emul64_nowrite_t *cur; 1309*85Scth emul64_nowrite_t *nowrite; 1310*85Scth emul64_rng_overlap_t overlap = O_NONE; 1311*85Scth emul64_nowrite_t **prev = NULL; 1312*85Scth diskaddr_t sb = tgtr->emul64_blkrange.emul64_sb; 1313*85Scth 1314*85Scth nowrite = emul64_nowrite_alloc(&tgtr->emul64_blkrange); 1315*85Scth 1316*85Scth /* Find spot in list */ 1317*85Scth rw_enter(&tgt->emul64_tgt_nw_lock, RW_WRITER); 1318*85Scth cur = emul64_find_nowrite(tgt, sb, blkcnt, &overlap, &prev); 1319*85Scth if (overlap == O_NONE) { 1320*85Scth /* Insert into list */ 1321*85Scth *prev = nowrite; 1322*85Scth nowrite->emul64_nwnext = cur; 1323*85Scth } 1324*85Scth rw_exit(&tgt->emul64_tgt_nw_lock); 1325*85Scth if (overlap == O_NONE) { 1326*85Scth if (emul64_collect_stats) { 1327*85Scth mutex_enter(&emul64_stats_mutex); 1328*85Scth emul64_nowrite_count++; 1329*85Scth mutex_exit(&emul64_stats_mutex); 1330*85Scth } 1331*85Scth } else { 1332*85Scth cmn_err(CE_WARN, "emul64: EMUL64_WRITE_OFF 0x%llx,0x%" 1333*85Scth PRIx64 "overlaps 0x%llx,0x%" PRIx64 "\n", 1334*85Scth nowrite->emul64_blocked.emul64_sb, 1335*85Scth nowrite->emul64_blocked.emul64_blkcnt, 1336*85Scth cur->emul64_blocked.emul64_sb, 1337*85Scth cur->emul64_blocked.emul64_blkcnt); 1338*85Scth emul64_nowrite_free(nowrite); 1339*85Scth return (EINVAL); 1340*85Scth } 1341*85Scth return (0); 1342*85Scth } 1343*85Scth 1344*85Scth /* ARGSUSED */ 1345*85Scth static int 1346*85Scth emul64_write_on(struct emul64 *emul64, 1347*85Scth emul64_tgt_t *tgt, 1348*85Scth emul64_tgt_range_t *tgtr) 1349*85Scth { 1350*85Scth size_t blkcnt = tgtr->emul64_blkrange.emul64_blkcnt; 1351*85Scth emul64_nowrite_t *cur; 1352*85Scth emul64_rng_overlap_t overlap = O_NONE; 1353*85Scth emul64_nowrite_t **prev = NULL; 1354*85Scth int rv = 0; 1355*85Scth diskaddr_t sb = tgtr->emul64_blkrange.emul64_sb; 1356*85Scth 1357*85Scth /* Find spot in list */ 1358*85Scth rw_enter(&tgt->emul64_tgt_nw_lock, RW_WRITER); 1359*85Scth cur = emul64_find_nowrite(tgt, sb, blkcnt, &overlap, &prev); 1360*85Scth if (overlap == O_SAME) { 1361*85Scth /* Remove from list */ 1362*85Scth *prev = cur->emul64_nwnext; 1363*85Scth } 1364*85Scth rw_exit(&tgt->emul64_tgt_nw_lock); 1365*85Scth 1366*85Scth switch (overlap) { 1367*85Scth case O_NONE: 1368*85Scth cmn_err(CE_WARN, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx " 1369*85Scth "range not found\n", sb, blkcnt); 1370*85Scth rv = ENXIO; 1371*85Scth break; 1372*85Scth case O_SAME: 1373*85Scth if (emul64_collect_stats) { 1374*85Scth mutex_enter(&emul64_stats_mutex); 1375*85Scth emul64_nowrite_count--; 1376*85Scth mutex_exit(&emul64_stats_mutex); 1377*85Scth } 1378*85Scth emul64_nowrite_free(cur); 1379*85Scth break; 1380*85Scth case O_OVERLAP: 1381*85Scth case O_SUBSET: 1382*85Scth cmn_err(CE_WARN, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx " 1383*85Scth "overlaps 0x%llx,0x%" PRIx64 "\n", 1384*85Scth sb, blkcnt, cur->emul64_blocked.emul64_sb, 1385*85Scth cur->emul64_blocked.emul64_blkcnt); 1386*85Scth rv = EINVAL; 1387*85Scth break; 1388*85Scth } 1389*85Scth return (rv); 1390*85Scth } 1391*85Scth 1392*85Scth static emul64_nowrite_t * 1393*85Scth emul64_find_nowrite(emul64_tgt_t *tgt, 1394*85Scth diskaddr_t sb, 1395*85Scth size_t blkcnt, 1396*85Scth emul64_rng_overlap_t *overlap, 1397*85Scth emul64_nowrite_t ***prevp) 1398*85Scth { 1399*85Scth emul64_nowrite_t *cur; 1400*85Scth emul64_nowrite_t **prev; 1401*85Scth 1402*85Scth /* Find spot in list */ 1403*85Scth *overlap = O_NONE; 1404*85Scth prev = &tgt->emul64_tgt_nowrite; 1405*85Scth cur = tgt->emul64_tgt_nowrite; 1406*85Scth while (cur != NULL) { 1407*85Scth *overlap = emul64_overlap(&cur->emul64_blocked, sb, blkcnt); 1408*85Scth if (*overlap != O_NONE) 1409*85Scth break; 1410*85Scth prev = &cur->emul64_nwnext; 1411*85Scth cur = cur->emul64_nwnext; 1412*85Scth } 1413*85Scth 1414*85Scth *prevp = prev; 1415*85Scth return (cur); 1416*85Scth } 1417*85Scth 1418*85Scth static emul64_nowrite_t * 1419*85Scth emul64_nowrite_alloc(emul64_range_t *range) 1420*85Scth { 1421*85Scth emul64_nowrite_t *nw; 1422*85Scth 1423*85Scth nw = kmem_zalloc(sizeof (*nw), KM_SLEEP); 1424*85Scth bcopy((void *) range, 1425*85Scth (void *) &nw->emul64_blocked, 1426*85Scth sizeof (nw->emul64_blocked)); 1427*85Scth return (nw); 1428*85Scth } 1429*85Scth 1430*85Scth static void 1431*85Scth emul64_nowrite_free(emul64_nowrite_t *nw) 1432*85Scth { 1433*85Scth kmem_free((void *) nw, sizeof (*nw)); 1434*85Scth } 1435*85Scth 1436*85Scth emul64_rng_overlap_t 1437*85Scth emul64_overlap(emul64_range_t *rng, diskaddr_t sb, size_t cnt) 1438*85Scth { 1439*85Scth 1440*85Scth if (rng->emul64_sb >= sb + cnt) 1441*85Scth return (O_NONE); 1442*85Scth if (rng->emul64_sb + rng->emul64_blkcnt <= sb) 1443*85Scth return (O_NONE); 1444*85Scth if ((rng->emul64_sb == sb) && (rng->emul64_blkcnt == cnt)) 1445*85Scth return (O_SAME); 1446*85Scth if ((sb >= rng->emul64_sb) && 1447*85Scth ((sb + cnt) <= (rng->emul64_sb + rng->emul64_blkcnt))) { 1448*85Scth return (O_SUBSET); 1449*85Scth } 1450*85Scth return (O_OVERLAP); 1451*85Scth } 1452*85Scth 1453*85Scth #include <sys/varargs.h> 1454*85Scth 1455*85Scth /* 1456*85Scth * Error logging, printing, and debug print routines 1457*85Scth */ 1458*85Scth 1459*85Scth /*VARARGS3*/ 1460*85Scth static void 1461*85Scth emul64_i_log(struct emul64 *emul64, int level, char *fmt, ...) 1462*85Scth { 1463*85Scth char buf[256]; 1464*85Scth va_list ap; 1465*85Scth 1466*85Scth va_start(ap, fmt); 1467*85Scth (void) vsnprintf(buf, sizeof (buf), fmt, ap); 1468*85Scth va_end(ap); 1469*85Scth 1470*85Scth scsi_log(emul64 ? emul64->emul64_dip : NULL, 1471*85Scth "emul64", level, "%s\n", buf); 1472*85Scth } 1473*85Scth 1474*85Scth 1475*85Scth #ifdef EMUL64DEBUG 1476*85Scth 1477*85Scth static void 1478*85Scth emul64_debug_dump_cdb(struct scsi_address *ap, struct scsi_pkt *pkt) 1479*85Scth { 1480*85Scth static char hex[] = "0123456789abcdef"; 1481*85Scth struct emul64 *emul64 = ADDR2EMUL64(ap); 1482*85Scth struct emul64_cmd *sp = PKT2CMD(pkt); 1483*85Scth uint8_t *cdb = pkt->pkt_cdbp; 1484*85Scth char buf [256]; 1485*85Scth char *p; 1486*85Scth int i; 1487*85Scth 1488*85Scth (void) snprintf(buf, sizeof (buf), "emul64%d: <%d,%d> ", 1489*85Scth ddi_get_instance(emul64->emul64_dip), 1490*85Scth ap->a_target, ap->a_lun); 1491*85Scth 1492*85Scth p = buf + strlen(buf); 1493*85Scth 1494*85Scth *p++ = '['; 1495*85Scth for (i = 0; i < sp->cmd_cdblen; i++, cdb++) { 1496*85Scth if (i != 0) 1497*85Scth *p++ = ' '; 1498*85Scth *p++ = hex[(*cdb >> 4) & 0x0f]; 1499*85Scth *p++ = hex[*cdb & 0x0f]; 1500*85Scth } 1501*85Scth *p++ = ']'; 1502*85Scth *p++ = '\n'; 1503*85Scth *p = 0; 1504*85Scth 1505*85Scth cmn_err(CE_CONT, buf); 1506*85Scth } 1507*85Scth #endif /* EMUL64DEBUG */ 1508