185Scth /* 285Scth * CDDL HEADER START 385Scth * 485Scth * The contents of this file are subject to the terms of the 585Scth * Common Development and Distribution License, Version 1.0 only 685Scth * (the "License"). You may not use this file except in compliance 785Scth * with the License. 885Scth * 985Scth * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 1085Scth * or http://www.opensolaris.org/os/licensing. 1185Scth * See the License for the specific language governing permissions 1285Scth * and limitations under the License. 1385Scth * 1485Scth * When distributing Covered Code, include this CDDL HEADER in each 1585Scth * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 1685Scth * If applicable, add the following below this CDDL HEADER, with the 1785Scth * fields enclosed by brackets "[]" replaced with your own identifying 1885Scth * information: Portions Copyright [yyyy] [name of copyright owner] 1985Scth * 2085Scth * CDDL HEADER END 2185Scth */ 2285Scth /* 2385Scth * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 2485Scth * Use is subject to license terms. 2585Scth */ 2685Scth 2785Scth #pragma ident "%Z%%M% %I% %E% SMI" 2885Scth 2985Scth /* 3085Scth * SCSA HBA nexus driver that emulates an HBA connected to SCSI target 3185Scth * devices (large disks). 3285Scth */ 3385Scth 3485Scth #ifdef DEBUG 3585Scth #define EMUL64DEBUG 3685Scth #endif 3785Scth 3885Scth #include <sys/scsi/scsi.h> 3985Scth #include <sys/ddi.h> 4085Scth #include <sys/sunddi.h> 4185Scth #include <sys/taskq.h> 4285Scth #include <sys/disp.h> 4385Scth #include <sys/types.h> 4485Scth #include <sys/buf.h> 4585Scth #include <sys/cpuvar.h> 4685Scth #include <sys/dklabel.h> 4785Scth 4885Scth #include <sys/emul64.h> 4985Scth #include <sys/emul64cmd.h> 5085Scth #include <sys/emul64var.h> 5185Scth 5285Scth int emul64_usetaskq = 1; /* set to zero for debugging */ 5385Scth int emul64debug = 0; 5485Scth #ifdef EMUL64DEBUG 5585Scth static int emul64_cdb_debug = 0; 5685Scth #include <sys/debug.h> 5785Scth #endif 5885Scth 5985Scth /* 6085Scth * cb_ops function prototypes 6185Scth */ 6285Scth static int emul64_ioctl(dev_t, int cmd, intptr_t arg, int mode, 6385Scth cred_t *credp, int *rvalp); 6485Scth 6585Scth /* 6685Scth * dev_ops functions prototypes 6785Scth */ 6885Scth static int emul64_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 6985Scth void *arg, void **result); 7085Scth static int emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 7185Scth static int emul64_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 7285Scth 7385Scth /* 7485Scth * Function prototypes 7585Scth * 7685Scth * SCSA functions exported by means of the transport table 7785Scth */ 7885Scth static int emul64_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 7985Scth scsi_hba_tran_t *tran, struct scsi_device *sd); 8085Scth static int emul64_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt); 8185Scth static void emul64_pkt_comp(void *); 8285Scth static int emul64_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt); 8385Scth static int emul64_scsi_reset(struct scsi_address *ap, int level); 8485Scth static int emul64_scsi_getcap(struct scsi_address *ap, char *cap, int whom); 8585Scth static int emul64_scsi_setcap(struct scsi_address *ap, char *cap, int value, 8685Scth int whom); 8785Scth static struct scsi_pkt *emul64_scsi_init_pkt(struct scsi_address *ap, 8885Scth struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen, 8985Scth int tgtlen, int flags, int (*callback)(), caddr_t arg); 9085Scth static void emul64_scsi_destroy_pkt(struct scsi_address *ap, 9185Scth struct scsi_pkt *pkt); 9285Scth static void emul64_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt); 9385Scth static void emul64_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt); 9485Scth static int emul64_scsi_reset_notify(struct scsi_address *ap, int flag, 9585Scth void (*callback)(caddr_t), caddr_t arg); 9685Scth 9785Scth /* 9885Scth * internal functions 9985Scth */ 10085Scth static void emul64_i_initcap(struct emul64 *emul64); 10185Scth 10285Scth static void emul64_i_log(struct emul64 *emul64, int level, char *fmt, ...); 10385Scth static int emul64_get_tgtrange(struct emul64 *, 10485Scth intptr_t, 10585Scth emul64_tgt_t **, 10685Scth emul64_tgt_range_t *); 10785Scth static int emul64_write_off(struct emul64 *, 10885Scth emul64_tgt_t *, 10985Scth emul64_tgt_range_t *); 11085Scth static int emul64_write_on(struct emul64 *, 11185Scth emul64_tgt_t *, 11285Scth emul64_tgt_range_t *); 11385Scth static emul64_nowrite_t *emul64_nowrite_alloc(emul64_range_t *); 11485Scth static void emul64_nowrite_free(emul64_nowrite_t *); 11585Scth static emul64_nowrite_t *emul64_find_nowrite(emul64_tgt_t *, 11685Scth diskaddr_t start_block, 11785Scth size_t blkcnt, 11885Scth emul64_rng_overlap_t *overlapp, 11985Scth emul64_nowrite_t ***prevp); 12085Scth 12185Scth extern emul64_tgt_t *find_tgt(struct emul64 *, ushort_t, ushort_t); 12285Scth 12385Scth #ifdef EMUL64DEBUG 12485Scth static void emul64_debug_dump_cdb(struct scsi_address *ap, 12585Scth struct scsi_pkt *pkt); 12685Scth #endif 12785Scth 12885Scth 12985Scth #ifdef _DDICT 13085Scth static int ddi_in_panic(void); 13185Scth static int ddi_in_panic() { return (0); } 13285Scth #ifndef SCSI_CAP_RESET_NOTIFICATION 13385Scth #define SCSI_CAP_RESET_NOTIFICATION 14 13485Scth #endif 13585Scth #ifndef SCSI_RESET_NOTIFY 13685Scth #define SCSI_RESET_NOTIFY 0x01 13785Scth #endif 13885Scth #ifndef SCSI_RESET_CANCEL 13985Scth #define SCSI_RESET_CANCEL 0x02 14085Scth #endif 14185Scth #endif 14285Scth 14385Scth /* 14485Scth * Tunables: 14585Scth * 14685Scth * emul64_max_task 14785Scth * The taskq facility is used to queue up SCSI start requests on a per 14885Scth * controller basis. If the maximum number of queued tasks is hit, 14985Scth * taskq_ent_alloc() delays for a second, which adversely impacts our 15085Scth * performance. This value establishes the maximum number of task 15185Scth * queue entries when taskq_create is called. 15285Scth * 15385Scth * emul64_task_nthreads 15485Scth * Specifies the number of threads that should be used to process a 15585Scth * controller's task queue. Our init function sets this to the number 15685Scth * of CPUs on the system, but this can be overridden in emul64.conf. 15785Scth */ 15885Scth int emul64_max_task = 16; 15985Scth int emul64_task_nthreads = 1; 16085Scth 16185Scth /* 16285Scth * Local static data 16385Scth */ 16485Scth static void *emul64_state = NULL; 16585Scth 16685Scth /* 16785Scth * Character/block operations. 16885Scth */ 16985Scth static struct cb_ops emul64_cbops = { 17085Scth scsi_hba_open, /* cb_open */ 17185Scth scsi_hba_close, /* cb_close */ 17285Scth nodev, /* cb_strategy */ 17385Scth nodev, /* cb_print */ 17485Scth nodev, /* cb_dump */ 17585Scth nodev, /* cb_read */ 17685Scth nodev, /* cb_write */ 17785Scth emul64_ioctl, /* cb_ioctl */ 17885Scth nodev, /* cb_devmap */ 17985Scth nodev, /* cb_mmap */ 18085Scth nodev, /* cb_segmap */ 18185Scth nochpoll, /* cb_chpoll */ 18285Scth ddi_prop_op, /* cb_prop_op */ 18385Scth NULL, /* cb_str */ 18485Scth D_MP | D_64BIT | D_HOTPLUG, /* cb_flag */ 18585Scth CB_REV, /* cb_rev */ 18685Scth nodev, /* cb_aread */ 18785Scth nodev /* cb_awrite */ 18885Scth }; 18985Scth 19085Scth /* 19185Scth * autoconfiguration routines. 19285Scth */ 19385Scth 19485Scth static struct dev_ops emul64_ops = { 19585Scth DEVO_REV, /* rev, */ 19685Scth 0, /* refcnt */ 19785Scth emul64_info, /* getinfo */ 19885Scth nulldev, /* identify */ 19985Scth nulldev, /* probe */ 20085Scth emul64_attach, /* attach */ 20185Scth emul64_detach, /* detach */ 20285Scth nodev, /* reset */ 20385Scth &emul64_cbops, /* char/block ops */ 20485Scth NULL /* bus ops */ 20585Scth }; 20685Scth 20785Scth char _depends_on[] = "misc/scsi"; 20885Scth 20985Scth static struct modldrv modldrv = { 21085Scth &mod_driverops, /* module type - driver */ 21185Scth "emul64 SCSI Host Bus Adapter", /* module name */ 21285Scth &emul64_ops, /* driver ops */ 21385Scth }; 21485Scth 21585Scth static struct modlinkage modlinkage = { 21685Scth MODREV_1, /* ml_rev - must be MODREV_1 */ 21785Scth &modldrv, /* ml_linkage */ 21885Scth NULL /* end of driver linkage */ 21985Scth }; 22085Scth 22185Scth int 22285Scth _init(void) 22385Scth { 22485Scth int ret; 22585Scth 22685Scth ret = ddi_soft_state_init(&emul64_state, sizeof (struct emul64), 22785Scth EMUL64_INITIAL_SOFT_SPACE); 22885Scth if (ret != 0) 22985Scth return (ret); 23085Scth 23185Scth if ((ret = scsi_hba_init(&modlinkage)) != 0) { 23285Scth ddi_soft_state_fini(&emul64_state); 23385Scth return (ret); 23485Scth } 23585Scth 23685Scth /* Set the number of task threads to the number of CPUs */ 23785Scth if (boot_max_ncpus == -1) { 23885Scth emul64_task_nthreads = max_ncpus; 23985Scth } else { 24085Scth emul64_task_nthreads = boot_max_ncpus; 24185Scth } 24285Scth 24385Scth emul64_bsd_init(); 24485Scth 24585Scth ret = mod_install(&modlinkage); 24685Scth if (ret != 0) { 24785Scth emul64_bsd_fini(); 24885Scth scsi_hba_fini(&modlinkage); 24985Scth ddi_soft_state_fini(&emul64_state); 25085Scth } 25185Scth 25285Scth return (ret); 25385Scth } 25485Scth 25585Scth int 25685Scth _fini(void) 25785Scth { 25885Scth int ret; 25985Scth 26085Scth if ((ret = mod_remove(&modlinkage)) != 0) 26185Scth return (ret); 26285Scth 26385Scth emul64_bsd_fini(); 26485Scth 26585Scth scsi_hba_fini(&modlinkage); 26685Scth 26785Scth ddi_soft_state_fini(&emul64_state); 26885Scth 26985Scth return (ret); 27085Scth } 27185Scth 27285Scth int 27385Scth _info(struct modinfo *modinfop) 27485Scth { 27585Scth return (mod_info(&modlinkage, modinfop)); 27685Scth } 27785Scth 27885Scth /* 27985Scth * Given the device number return the devinfo pointer 28085Scth * from the scsi_device structure. 28185Scth */ 28285Scth /*ARGSUSED*/ 28385Scth static int 28485Scth emul64_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 28585Scth { 28685Scth struct emul64 *foo; 28785Scth int instance = getminor((dev_t)arg); 28885Scth 28985Scth switch (cmd) { 29085Scth case DDI_INFO_DEVT2DEVINFO: 29185Scth foo = ddi_get_soft_state(emul64_state, instance); 29285Scth if (foo != NULL) 29385Scth *result = (void *)foo->emul64_dip; 29485Scth else { 29585Scth *result = NULL; 29685Scth return (DDI_FAILURE); 29785Scth } 29885Scth break; 29985Scth 30085Scth case DDI_INFO_DEVT2INSTANCE: 30185Scth *result = (void *)(uintptr_t)instance; 30285Scth break; 30385Scth 30485Scth default: 30585Scth return (DDI_FAILURE); 30685Scth } 30785Scth 30885Scth return (DDI_SUCCESS); 30985Scth } 31085Scth 31185Scth /* 31285Scth * Attach an instance of an emul64 host adapter. Allocate data structures, 31385Scth * initialize the emul64 and we're on the air. 31485Scth */ 31585Scth /*ARGSUSED*/ 31685Scth static int 31785Scth emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 31885Scth { 31985Scth int mutex_initted = 0; 32085Scth struct emul64 *emul64; 32185Scth int instance; 32285Scth scsi_hba_tran_t *tran = NULL; 32385Scth ddi_dma_attr_t tmp_dma_attr; 32485Scth 32585Scth emul64_bsd_get_props(dip); 32685Scth 32785Scth bzero((void *) &tmp_dma_attr, sizeof (tmp_dma_attr)); 32885Scth instance = ddi_get_instance(dip); 32985Scth 33085Scth switch (cmd) { 33185Scth case DDI_ATTACH: 33285Scth break; 33385Scth 33485Scth case DDI_RESUME: 33585Scth tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 33685Scth if (!tran) { 33785Scth return (DDI_FAILURE); 33885Scth } 33985Scth emul64 = TRAN2EMUL64(tran); 34085Scth 34185Scth return (DDI_SUCCESS); 34285Scth 34385Scth default: 34485Scth emul64_i_log(NULL, CE_WARN, 34585Scth "emul64%d: Cmd != DDI_ATTACH/DDI_RESUME", instance); 34685Scth return (DDI_FAILURE); 34785Scth } 34885Scth 34985Scth /* 35085Scth * Allocate emul64 data structure. 35185Scth */ 35285Scth if (ddi_soft_state_zalloc(emul64_state, instance) != DDI_SUCCESS) { 35385Scth emul64_i_log(NULL, CE_WARN, 35485Scth "emul64%d: Failed to alloc soft state", 35585Scth instance); 35685Scth return (DDI_FAILURE); 35785Scth } 35885Scth 35985Scth emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance); 36085Scth if (emul64 == (struct emul64 *)NULL) { 36185Scth emul64_i_log(NULL, CE_WARN, "emul64%d: Bad soft state", 36285Scth instance); 36385Scth ddi_soft_state_free(emul64_state, instance); 36485Scth return (DDI_FAILURE); 36585Scth } 36685Scth 36785Scth 36885Scth /* 36985Scth * Allocate a transport structure 37085Scth */ 37185Scth tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 37285Scth if (tran == NULL) { 37385Scth cmn_err(CE_WARN, "emul64: scsi_hba_tran_alloc failed\n"); 37485Scth goto fail; 37585Scth } 37685Scth 37785Scth emul64->emul64_tran = tran; 37885Scth emul64->emul64_dip = dip; 37985Scth 38085Scth tran->tran_hba_private = emul64; 38185Scth tran->tran_tgt_private = NULL; 38285Scth tran->tran_tgt_init = emul64_tran_tgt_init; 38385Scth tran->tran_tgt_probe = scsi_hba_probe; 38485Scth tran->tran_tgt_free = NULL; 38585Scth 38685Scth tran->tran_start = emul64_scsi_start; 38785Scth tran->tran_abort = emul64_scsi_abort; 38885Scth tran->tran_reset = emul64_scsi_reset; 38985Scth tran->tran_getcap = emul64_scsi_getcap; 39085Scth tran->tran_setcap = emul64_scsi_setcap; 39185Scth tran->tran_init_pkt = emul64_scsi_init_pkt; 39285Scth tran->tran_destroy_pkt = emul64_scsi_destroy_pkt; 39385Scth tran->tran_dmafree = emul64_scsi_dmafree; 39485Scth tran->tran_sync_pkt = emul64_scsi_sync_pkt; 39585Scth tran->tran_reset_notify = emul64_scsi_reset_notify; 39685Scth 39785Scth tmp_dma_attr.dma_attr_minxfer = 0x1; 39885Scth tmp_dma_attr.dma_attr_burstsizes = 0x7f; 39985Scth 40085Scth /* 40185Scth * Attach this instance of the hba 40285Scth */ 40385Scth if (scsi_hba_attach_setup(dip, &tmp_dma_attr, tran, 40485Scth 0) != DDI_SUCCESS) { 40585Scth cmn_err(CE_WARN, "emul64: scsi_hba_attach failed\n"); 40685Scth goto fail; 40785Scth } 40885Scth 40985Scth emul64->emul64_initiator_id = 2; 41085Scth 41185Scth /* 41285Scth * Look up the scsi-options property 41385Scth */ 41485Scth emul64->emul64_scsi_options = 41585Scth ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "scsi-options", 41685Scth EMUL64_DEFAULT_SCSI_OPTIONS); 41785Scth EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64 scsi-options=%x", 41885Scth emul64->emul64_scsi_options); 41985Scth 42085Scth 42185Scth /* mutexes to protect the emul64 request and response queue */ 42285Scth mutex_init(EMUL64_REQ_MUTEX(emul64), NULL, MUTEX_DRIVER, 42385Scth emul64->emul64_iblock); 42485Scth mutex_init(EMUL64_RESP_MUTEX(emul64), NULL, MUTEX_DRIVER, 42585Scth emul64->emul64_iblock); 42685Scth 42785Scth mutex_initted = 1; 42885Scth 42985Scth EMUL64_MUTEX_ENTER(emul64); 43085Scth 43185Scth /* 43285Scth * Initialize the default Target Capabilities and Sync Rates 43385Scth */ 43485Scth emul64_i_initcap(emul64); 43585Scth 43685Scth EMUL64_MUTEX_EXIT(emul64); 43785Scth 43885Scth 43985Scth ddi_report_dev(dip); 44085Scth emul64->emul64_taskq = taskq_create("emul64_comp", 44185Scth emul64_task_nthreads, MINCLSYSPRI, 1, emul64_max_task, 0); 44285Scth 44385Scth return (DDI_SUCCESS); 44485Scth 44585Scth fail: 44685Scth emul64_i_log(NULL, CE_WARN, "emul64%d: Unable to attach", instance); 44785Scth 44885Scth if (mutex_initted) { 44985Scth mutex_destroy(EMUL64_REQ_MUTEX(emul64)); 45085Scth mutex_destroy(EMUL64_RESP_MUTEX(emul64)); 45185Scth } 45285Scth if (tran) { 45385Scth scsi_hba_tran_free(tran); 45485Scth } 45585Scth ddi_soft_state_free(emul64_state, instance); 45685Scth return (DDI_FAILURE); 45785Scth } 45885Scth 45985Scth /*ARGSUSED*/ 46085Scth static int 46185Scth emul64_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 46285Scth { 46385Scth struct emul64 *emul64; 46485Scth scsi_hba_tran_t *tran; 46585Scth int instance = ddi_get_instance(dip); 46685Scth 46785Scth 46885Scth /* get transport structure pointer from the dip */ 46985Scth if (!(tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip))) { 47085Scth return (DDI_FAILURE); 47185Scth } 47285Scth 47385Scth /* get soft state from transport structure */ 47485Scth emul64 = TRAN2EMUL64(tran); 47585Scth 47685Scth if (!emul64) { 47785Scth return (DDI_FAILURE); 47885Scth } 47985Scth 48085Scth EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: cmd = %d", cmd); 48185Scth 48285Scth switch (cmd) { 48385Scth case DDI_DETACH: 48485Scth EMUL64_MUTEX_ENTER(emul64); 48585Scth 48685Scth taskq_destroy(emul64->emul64_taskq); 48785Scth (void) scsi_hba_detach(dip); 48885Scth 48985Scth scsi_hba_tran_free(emul64->emul64_tran); 49085Scth 49185Scth 49285Scth EMUL64_MUTEX_EXIT(emul64); 49385Scth 49485Scth mutex_destroy(EMUL64_REQ_MUTEX(emul64)); 49585Scth mutex_destroy(EMUL64_RESP_MUTEX(emul64)); 49685Scth 49785Scth 49885Scth EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: done"); 49985Scth ddi_soft_state_free(emul64_state, instance); 50085Scth 50185Scth return (DDI_SUCCESS); 50285Scth 50385Scth case DDI_SUSPEND: 50485Scth return (DDI_SUCCESS); 50585Scth 50685Scth default: 50785Scth return (DDI_FAILURE); 50885Scth } 50985Scth } 51085Scth 51185Scth /* 51285Scth * Function name : emul64_tran_tgt_init 51385Scth * 51485Scth * Return Values : DDI_SUCCESS if target supported, DDI_FAILURE otherwise 51585Scth * 51685Scth */ 51785Scth /*ARGSUSED*/ 51885Scth static int 51985Scth emul64_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 52085Scth scsi_hba_tran_t *tran, struct scsi_device *sd) 52185Scth { 52285Scth struct emul64 *emul64; 52385Scth emul64_tgt_t *tgt; 52485Scth char **geo_vidpid = NULL; 52585Scth char *geo, *vidpid; 52685Scth uint32_t *geoip = NULL; 52785Scth uint_t length; 52885Scth uint_t length2; 52985Scth lldaddr_t sector_count; 53085Scth char prop_name[15]; 53185Scth int ret = DDI_FAILURE; 53285Scth 53385Scth emul64 = TRAN2EMUL64(tran); 53485Scth EMUL64_MUTEX_ENTER(emul64); 53585Scth 53685Scth /* 53785Scth * We get called for each target driver.conf node, multiple 53885Scth * nodes may map to the same tgt,lun (sd.conf, st.conf, etc). 53985Scth * Check to see if transport to tgt,lun already established. 54085Scth */ 54185Scth tgt = find_tgt(emul64, sd->sd_address.a_target, sd->sd_address.a_lun); 54285Scth if (tgt) { 54385Scth ret = DDI_SUCCESS; 54485Scth goto out; 54585Scth } 54685Scth 54785Scth /* see if we have driver.conf specified device for this target,lun */ 54885Scth (void) snprintf(prop_name, sizeof (prop_name), "targ_%d_%d", 54985Scth sd->sd_address.a_target, sd->sd_address.a_lun); 55085Scth if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba_dip, 55185Scth DDI_PROP_DONTPASS, prop_name, 55285Scth &geo_vidpid, &length) != DDI_PROP_SUCCESS) 55385Scth goto out; 55485Scth if (length < 2) { 55585Scth cmn_err(CE_WARN, "emul64: %s property does not have 2 " 55685Scth "elements", prop_name); 55785Scth goto out; 55885Scth } 55985Scth 56085Scth /* pick geometry name and vidpid string from string array */ 56185Scth geo = *geo_vidpid; 56285Scth vidpid = *(geo_vidpid + 1); 56385Scth 56485Scth /* lookup geometry property integer array */ 56585Scth if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hba_dip, DDI_PROP_DONTPASS, 56685Scth geo, (int **)&geoip, &length2) != DDI_PROP_SUCCESS) { 56785Scth cmn_err(CE_WARN, "emul64: didn't get prop '%s'", geo); 56885Scth goto out; 56985Scth } 57085Scth if (length2 < 6) { 57185Scth cmn_err(CE_WARN, "emul64: property %s does not have 6 " 57285Scth "elements", *geo_vidpid); 57385Scth goto out; 57485Scth } 57585Scth 57685Scth /* allocate and initialize tgt structure for tgt,lun */ 57785Scth tgt = kmem_zalloc(sizeof (emul64_tgt_t), KM_SLEEP); 57885Scth rw_init(&tgt->emul64_tgt_nw_lock, NULL, RW_DRIVER, NULL); 57985Scth mutex_init(&tgt->emul64_tgt_blk_lock, NULL, MUTEX_DRIVER, NULL); 58085Scth 58185Scth /* create avl for data block storage */ 58285Scth avl_create(&tgt->emul64_tgt_data, emul64_bsd_blkcompare, 58385Scth sizeof (blklist_t), offsetof(blklist_t, bl_node)); 58485Scth 58585Scth /* save scsi_address and vidpid */ 58685Scth bcopy(sd, &tgt->emul64_tgt_saddr, sizeof (struct scsi_address)); 58785Scth (void) strncpy(tgt->emul64_tgt_inq, vidpid, 58885Scth sizeof (emul64->emul64_tgt->emul64_tgt_inq)); 58985Scth 59085Scth /* 59185Scth * The high order 4 bytes of the sector count always come first in 59285Scth * emul64.conf. They are followed by the low order 4 bytes. Not 59385Scth * all CPU types want them in this order, but laddr_t takes care of 59485Scth * this for us. We then pick up geometry (ncyl X nheads X nsect). 59585Scth */ 59685Scth sector_count._p._u = *(geoip + 0); 59785Scth sector_count._p._l = *(geoip + 1); 59885Scth /* 59985Scth * On 32-bit platforms, fix block size if it's greater than the 60085Scth * allowable maximum. 60185Scth */ 60285Scth #if !defined(_LP64) 60385Scth if (sector_count._f > DK_MAX_BLOCKS) 60485Scth sector_count._f = DK_MAX_BLOCKS; 60585Scth #endif 60685Scth tgt->emul64_tgt_sectors = sector_count._f; 60785Scth tgt->emul64_tgt_dtype = *(geoip + 2); 60885Scth tgt->emul64_tgt_ncyls = *(geoip + 3); 60985Scth tgt->emul64_tgt_nheads = *(geoip + 4); 61085Scth tgt->emul64_tgt_nsect = *(geoip + 5); 61185Scth 61285Scth /* insert target structure into list */ 61385Scth tgt->emul64_tgt_next = emul64->emul64_tgt; 61485Scth emul64->emul64_tgt = tgt; 61585Scth ret = DDI_SUCCESS; 61685Scth 61785Scth out: EMUL64_MUTEX_EXIT(emul64); 61885Scth if (geoip) 61985Scth ddi_prop_free(geoip); 62085Scth if (geo_vidpid) 62185Scth ddi_prop_free(geo_vidpid); 62285Scth return (ret); 62385Scth } 62485Scth 62585Scth /* 62685Scth * Function name : emul64_i_initcap 62785Scth * 62885Scth * Return Values : NONE 62985Scth * Description : Initializes the default target capabilities and 63085Scth * Sync Rates. 63185Scth * 63285Scth * Context : Called from the user thread through attach. 63385Scth * 63485Scth */ 63585Scth static void 63685Scth emul64_i_initcap(struct emul64 *emul64) 63785Scth { 63885Scth uint16_t cap, synch; 63985Scth int i; 64085Scth 64185Scth cap = 0; 64285Scth synch = 0; 64385Scth for (i = 0; i < NTARGETS_WIDE; i++) { 64485Scth emul64->emul64_cap[i] = cap; 64585Scth emul64->emul64_synch[i] = synch; 64685Scth } 64785Scth EMUL64_DEBUG(emul64, SCSI_DEBUG, "default cap = 0x%x", cap); 64885Scth } 64985Scth 65085Scth /* 65185Scth * Function name : emul64_scsi_getcap() 65285Scth * 65385Scth * Return Values : current value of capability, if defined 65485Scth * -1 if capability is not defined 65585Scth * Description : returns current capability value 65685Scth * 65785Scth * Context : Can be called from different kernel process threads. 65885Scth * Can be called by interrupt thread. 65985Scth */ 66085Scth static int 66185Scth emul64_scsi_getcap(struct scsi_address *ap, char *cap, int whom) 66285Scth { 66385Scth struct emul64 *emul64 = ADDR2EMUL64(ap); 66485Scth int rval = 0; 66585Scth 66685Scth /* 66785Scth * We don't allow inquiring about capabilities for other targets 66885Scth */ 66985Scth if (cap == NULL || whom == 0) { 67085Scth return (-1); 67185Scth } 67285Scth 67385Scth EMUL64_MUTEX_ENTER(emul64); 67485Scth 67585Scth switch (scsi_hba_lookup_capstr(cap)) { 67685Scth case SCSI_CAP_DMA_MAX: 67785Scth rval = 1 << 24; /* Limit to 16MB max transfer */ 67885Scth break; 67985Scth case SCSI_CAP_MSG_OUT: 68085Scth rval = 1; 68185Scth break; 68285Scth case SCSI_CAP_DISCONNECT: 68385Scth rval = 1; 68485Scth break; 68585Scth case SCSI_CAP_SYNCHRONOUS: 68685Scth rval = 1; 68785Scth break; 68885Scth case SCSI_CAP_WIDE_XFER: 68985Scth rval = 1; 69085Scth break; 69185Scth case SCSI_CAP_TAGGED_QING: 69285Scth rval = 1; 69385Scth break; 69485Scth case SCSI_CAP_UNTAGGED_QING: 69585Scth rval = 1; 69685Scth break; 69785Scth case SCSI_CAP_PARITY: 69885Scth rval = 1; 69985Scth break; 70085Scth case SCSI_CAP_INITIATOR_ID: 70185Scth rval = emul64->emul64_initiator_id; 70285Scth break; 70385Scth case SCSI_CAP_ARQ: 70485Scth rval = 1; 70585Scth break; 70685Scth case SCSI_CAP_LINKED_CMDS: 70785Scth break; 70885Scth case SCSI_CAP_RESET_NOTIFICATION: 70985Scth rval = 1; 71085Scth break; 71185Scth 71285Scth default: 71385Scth rval = -1; 71485Scth break; 71585Scth } 71685Scth 71785Scth EMUL64_MUTEX_EXIT(emul64); 71885Scth 71985Scth return (rval); 72085Scth } 72185Scth 72285Scth /* 72385Scth * Function name : emul64_scsi_setcap() 72485Scth * 72585Scth * Return Values : 1 - capability exists and can be set to new value 72685Scth * 0 - capability could not be set to new value 72785Scth * -1 - no such capability 72885Scth * 72985Scth * Description : sets a capability for a target 73085Scth * 73185Scth * Context : Can be called from different kernel process threads. 73285Scth * Can be called by interrupt thread. 73385Scth */ 73485Scth static int 73585Scth emul64_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom) 73685Scth { 73785Scth struct emul64 *emul64 = ADDR2EMUL64(ap); 73885Scth int rval = 0; 73985Scth 74085Scth /* 74185Scth * We don't allow setting capabilities for other targets 74285Scth */ 74385Scth if (cap == NULL || whom == 0) { 74485Scth return (-1); 74585Scth } 74685Scth 74785Scth EMUL64_MUTEX_ENTER(emul64); 74885Scth 74985Scth switch (scsi_hba_lookup_capstr(cap)) { 75085Scth case SCSI_CAP_DMA_MAX: 75185Scth case SCSI_CAP_MSG_OUT: 75285Scth case SCSI_CAP_PARITY: 75385Scth case SCSI_CAP_UNTAGGED_QING: 75485Scth case SCSI_CAP_LINKED_CMDS: 75585Scth case SCSI_CAP_RESET_NOTIFICATION: 75685Scth /* 75785Scth * None of these are settable via 75885Scth * the capability interface. 75985Scth */ 76085Scth break; 76185Scth case SCSI_CAP_DISCONNECT: 76285Scth rval = 1; 76385Scth break; 76485Scth case SCSI_CAP_SYNCHRONOUS: 76585Scth rval = 1; 76685Scth break; 76785Scth case SCSI_CAP_TAGGED_QING: 76885Scth rval = 1; 76985Scth break; 77085Scth case SCSI_CAP_WIDE_XFER: 77185Scth rval = 1; 77285Scth break; 77385Scth case SCSI_CAP_INITIATOR_ID: 77485Scth rval = -1; 77585Scth break; 77685Scth case SCSI_CAP_ARQ: 77785Scth rval = 1; 77885Scth break; 77985Scth case SCSI_CAP_TOTAL_SECTORS: 78085Scth emul64->nt_total_sectors[ap->a_target][ap->a_lun] = value; 78185Scth rval = TRUE; 78285Scth break; 78385Scth case SCSI_CAP_SECTOR_SIZE: 78485Scth rval = TRUE; 78585Scth break; 78685Scth default: 78785Scth rval = -1; 78885Scth break; 78985Scth } 79085Scth 79185Scth 79285Scth EMUL64_MUTEX_EXIT(emul64); 79385Scth 79485Scth return (rval); 79585Scth } 79685Scth 79785Scth /* 79885Scth * Function name : emul64_scsi_init_pkt 79985Scth * 80085Scth * Return Values : pointer to scsi_pkt, or NULL 80185Scth * Description : Called by kernel on behalf of a target driver 80285Scth * calling scsi_init_pkt(9F). 80385Scth * Refer to tran_init_pkt(9E) man page 80485Scth * 80585Scth * Context : Can be called from different kernel process threads. 80685Scth * Can be called by interrupt thread. 80785Scth */ 80885Scth /* ARGSUSED */ 80985Scth static struct scsi_pkt * 81085Scth emul64_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 81185Scth struct buf *bp, int cmdlen, int statuslen, int tgtlen, 81285Scth int flags, int (*callback)(), caddr_t arg) 81385Scth { 81485Scth struct emul64 *emul64 = ADDR2EMUL64(ap); 81585Scth struct emul64_cmd *sp; 81685Scth 81785Scth ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC); 81885Scth 81985Scth /* 82085Scth * First step of emul64_scsi_init_pkt: pkt allocation 82185Scth */ 82285Scth if (pkt == NULL) { 82385Scth pkt = scsi_hba_pkt_alloc(emul64->emul64_dip, ap, cmdlen, 82485Scth statuslen, 82585Scth tgtlen, sizeof (struct emul64_cmd), callback, arg); 82685Scth if (pkt == NULL) { 82785Scth cmn_err(CE_WARN, "emul64_scsi_init_pkt: " 82885Scth "scsi_hba_pkt_alloc failed"); 82985Scth return (NULL); 83085Scth } 83185Scth 83285Scth sp = PKT2CMD(pkt); 83385Scth 83485Scth /* 83585Scth * Initialize the new pkt - we redundantly initialize 83685Scth * all the fields for illustrative purposes. 83785Scth */ 83885Scth sp->cmd_pkt = pkt; 83985Scth sp->cmd_flags = 0; 84085Scth sp->cmd_scblen = statuslen; 84185Scth sp->cmd_cdblen = cmdlen; 84285Scth sp->cmd_emul64 = emul64; 84385Scth pkt->pkt_address = *ap; 84485Scth pkt->pkt_comp = (void (*)())NULL; 84585Scth pkt->pkt_flags = 0; 84685Scth pkt->pkt_time = 0; 84785Scth pkt->pkt_resid = 0; 84885Scth pkt->pkt_statistics = 0; 84985Scth pkt->pkt_reason = 0; 85085Scth 85185Scth } else { 85285Scth sp = PKT2CMD(pkt); 85385Scth } 85485Scth 85585Scth /* 85685Scth * Second step of emul64_scsi_init_pkt: dma allocation/move 85785Scth */ 85885Scth if (bp && bp->b_bcount != 0) { 85985Scth if (bp->b_flags & B_READ) { 86085Scth sp->cmd_flags &= ~CFLAG_DMASEND; 86185Scth } else { 86285Scth sp->cmd_flags |= CFLAG_DMASEND; 86385Scth } 86485Scth bp_mapin(bp); 86585Scth sp->cmd_addr = (unsigned char *) bp->b_un.b_addr; 86685Scth sp->cmd_count = bp->b_bcount; 86785Scth pkt->pkt_resid = 0; 86885Scth } 86985Scth 87085Scth return (pkt); 87185Scth } 87285Scth 87385Scth 87485Scth /* 87585Scth * Function name : emul64_scsi_destroy_pkt 87685Scth * 87785Scth * Return Values : none 87885Scth * Description : Called by kernel on behalf of a target driver 87985Scth * calling scsi_destroy_pkt(9F). 88085Scth * Refer to tran_destroy_pkt(9E) man page 88185Scth * 88285Scth * Context : Can be called from different kernel process threads. 88385Scth * Can be called by interrupt thread. 88485Scth */ 88585Scth static void 88685Scth emul64_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 88785Scth { 88885Scth struct emul64_cmd *sp = PKT2CMD(pkt); 88985Scth 89085Scth /* 89185Scth * emul64_scsi_dmafree inline to make things faster 89285Scth */ 89385Scth if (sp->cmd_flags & CFLAG_DMAVALID) { 89485Scth /* 89585Scth * Free the mapping. 89685Scth */ 89785Scth sp->cmd_flags &= ~CFLAG_DMAVALID; 89885Scth } 89985Scth 90085Scth /* 90185Scth * Free the pkt 90285Scth */ 90385Scth scsi_hba_pkt_free(ap, pkt); 90485Scth } 90585Scth 90685Scth 90785Scth /* 90885Scth * Function name : emul64_scsi_dmafree() 90985Scth * 91085Scth * Return Values : none 91185Scth * Description : free dvma resources 91285Scth * 91385Scth * Context : Can be called from different kernel process threads. 91485Scth * Can be called by interrupt thread. 91585Scth */ 91685Scth /*ARGSUSED*/ 91785Scth static void 91885Scth emul64_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 91985Scth { 92085Scth } 92185Scth 92285Scth /* 92385Scth * Function name : emul64_scsi_sync_pkt() 92485Scth * 92585Scth * Return Values : none 92685Scth * Description : sync dma 92785Scth * 92885Scth * Context : Can be called from different kernel process threads. 92985Scth * Can be called by interrupt thread. 93085Scth */ 93185Scth /*ARGSUSED*/ 93285Scth static void 93385Scth emul64_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 93485Scth { 93585Scth } 93685Scth 93785Scth /* 93885Scth * routine for reset notification setup, to register or cancel. 93985Scth */ 94085Scth static int 94185Scth emul64_scsi_reset_notify(struct scsi_address *ap, int flag, 94285Scth void (*callback)(caddr_t), caddr_t arg) 94385Scth { 94485Scth struct emul64 *emul64 = ADDR2EMUL64(ap); 94585Scth struct emul64_reset_notify_entry *p, *beforep; 94685Scth int rval = DDI_FAILURE; 94785Scth 94885Scth mutex_enter(EMUL64_REQ_MUTEX(emul64)); 94985Scth 95085Scth p = emul64->emul64_reset_notify_listf; 95185Scth beforep = NULL; 95285Scth 95385Scth while (p) { 95485Scth if (p->ap == ap) 95585Scth break; /* An entry exists for this target */ 95685Scth beforep = p; 95785Scth p = p->next; 95885Scth } 95985Scth 96085Scth if ((flag & SCSI_RESET_CANCEL) && (p != NULL)) { 96185Scth if (beforep == NULL) { 96285Scth emul64->emul64_reset_notify_listf = p->next; 96385Scth } else { 96485Scth beforep->next = p->next; 96585Scth } 96685Scth kmem_free((caddr_t)p, 96785Scth sizeof (struct emul64_reset_notify_entry)); 96885Scth rval = DDI_SUCCESS; 96985Scth 97085Scth } else if ((flag & SCSI_RESET_NOTIFY) && (p == NULL)) { 97185Scth p = kmem_zalloc(sizeof (struct emul64_reset_notify_entry), 97285Scth KM_SLEEP); 97385Scth p->ap = ap; 97485Scth p->callback = callback; 97585Scth p->arg = arg; 97685Scth p->next = emul64->emul64_reset_notify_listf; 97785Scth emul64->emul64_reset_notify_listf = p; 97885Scth rval = DDI_SUCCESS; 97985Scth } 98085Scth 98185Scth mutex_exit(EMUL64_REQ_MUTEX(emul64)); 98285Scth 98385Scth return (rval); 98485Scth } 98585Scth 98685Scth /* 98785Scth * Function name : emul64_scsi_start() 98885Scth * 98985Scth * Return Values : TRAN_FATAL_ERROR - emul64 has been shutdown 99085Scth * TRAN_BUSY - request queue is full 99185Scth * TRAN_ACCEPT - pkt has been submitted to emul64 99285Scth * 99385Scth * Description : init pkt, start the request 99485Scth * 99585Scth * Context : Can be called from different kernel process threads. 99685Scth * Can be called by interrupt thread. 99785Scth */ 99885Scth static int 99985Scth emul64_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt) 100085Scth { 100185Scth struct emul64_cmd *sp = PKT2CMD(pkt); 100285Scth int rval = TRAN_ACCEPT; 100385Scth struct emul64 *emul64 = ADDR2EMUL64(ap); 100485Scth clock_t cur_lbolt; 100585Scth taskqid_t dispatched; 100685Scth 100785Scth ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic()); 100885Scth ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic()); 100985Scth 101085Scth EMUL64_DEBUG2(emul64, SCSI_DEBUG, "emul64_scsi_start %x", sp); 101185Scth 101285Scth pkt->pkt_reason = CMD_CMPLT; 101385Scth 101485Scth #ifdef EMUL64DEBUG 101585Scth if (emul64_cdb_debug) { 101685Scth emul64_debug_dump_cdb(ap, pkt); 101785Scth } 101885Scth #endif /* EMUL64DEBUG */ 101985Scth 102085Scth /* 102185Scth * calculate deadline from pkt_time 102285Scth * Instead of multiplying by 100 (ie. HZ), we multiply by 128 so 102385Scth * we can shift and at the same time have a 28% grace period 102485Scth * we ignore the rare case of pkt_time == 0 and deal with it 102585Scth * in emul64_i_watch() 102685Scth */ 102785Scth cur_lbolt = ddi_get_lbolt(); 102885Scth sp->cmd_deadline = cur_lbolt + (pkt->pkt_time * 128); 102985Scth 103085Scth if ((emul64_usetaskq == 0) || (pkt->pkt_flags & FLAG_NOINTR) != 0) { 103185Scth emul64_pkt_comp((caddr_t)pkt); 103285Scth } else { 103385Scth dispatched = NULL; 103485Scth if (emul64_collect_stats) { 103585Scth /* 103685Scth * If we are collecting statistics, call 103785Scth * taskq_dispatch in no sleep mode, so that we can 103885Scth * detect if we are exceeding the queue length that 103985Scth * was established in the call to taskq_create in 104085Scth * emul64_attach. If the no sleep call fails 104185Scth * (returns NULL), the task will be dispatched in 104285Scth * sleep mode below. 104385Scth */ 104485Scth dispatched = taskq_dispatch(emul64->emul64_taskq, 104585Scth emul64_pkt_comp, 104685Scth (void *)pkt, TQ_NOSLEEP); 104785Scth if (dispatched == NULL) { 104885Scth /* Queue was full. dispatch failed. */ 104985Scth mutex_enter(&emul64_stats_mutex); 105085Scth emul64_taskq_max++; 105185Scth mutex_exit(&emul64_stats_mutex); 105285Scth } 105385Scth } 105485Scth if (dispatched == NULL) { 105585Scth (void) taskq_dispatch(emul64->emul64_taskq, 105685Scth emul64_pkt_comp, (void *)pkt, TQ_SLEEP); 105785Scth } 105885Scth } 105985Scth 106085Scth done: 106185Scth ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic()); 106285Scth ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic()); 106385Scth 106485Scth return (rval); 106585Scth } 106685Scth 106785Scth void 106885Scth emul64_check_cond(struct scsi_pkt *pkt, uchar_t key, uchar_t asc, uchar_t ascq) 106985Scth { 107085Scth struct scsi_arq_status *arq = 107185Scth (struct scsi_arq_status *)pkt->pkt_scbp; 107285Scth 107385Scth /* got check, no data transferred and ARQ done */ 107485Scth arq->sts_status.sts_chk = 1; 107585Scth pkt->pkt_state |= STATE_ARQ_DONE; 107685Scth pkt->pkt_state &= ~STATE_XFERRED_DATA; 107785Scth 107885Scth /* for ARQ */ 107985Scth arq->sts_rqpkt_reason = CMD_CMPLT; 108085Scth arq->sts_rqpkt_resid = 0; 108185Scth arq->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 108285Scth STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS; 108385Scth arq->sts_sensedata.es_valid = 1; 108485Scth arq->sts_sensedata.es_class = 0x7; 108585Scth arq->sts_sensedata.es_key = key; 108685Scth arq->sts_sensedata.es_add_code = asc; 108785Scth arq->sts_sensedata.es_qual_code = ascq; 108885Scth } 108985Scth 1090*1144Spd144616 ushort_t 1091*1144Spd144616 emul64_error_inject(struct scsi_pkt *pkt) 1092*1144Spd144616 { 1093*1144Spd144616 struct emul64_cmd *sp = PKT2CMD(pkt); 1094*1144Spd144616 emul64_tgt_t *tgt; 1095*1144Spd144616 struct scsi_arq_status *arq = 1096*1144Spd144616 (struct scsi_arq_status *)pkt->pkt_scbp; 1097*1144Spd144616 uint_t max_sense_len; 1098*1144Spd144616 1099*1144Spd144616 EMUL64_MUTEX_ENTER(sp->cmd_emul64); 1100*1144Spd144616 tgt = find_tgt(sp->cmd_emul64, 1101*1144Spd144616 pkt->pkt_address.a_target, pkt->pkt_address.a_lun); 1102*1144Spd144616 EMUL64_MUTEX_EXIT(sp->cmd_emul64); 1103*1144Spd144616 1104*1144Spd144616 /* 1105*1144Spd144616 * If there is no target, skip the error injection and 1106*1144Spd144616 * let the packet be handled normally. This would normally 1107*1144Spd144616 * never happen since a_target and a_lun are setup in 1108*1144Spd144616 * emul64_scsi_init_pkt. 1109*1144Spd144616 */ 1110*1144Spd144616 if (tgt == NULL) { 1111*1144Spd144616 return (ERR_INJ_DISABLE); 1112*1144Spd144616 } 1113*1144Spd144616 1114*1144Spd144616 if (tgt->emul64_einj_state != ERR_INJ_DISABLE) { 1115*1144Spd144616 arq->sts_status = tgt->emul64_einj_scsi_status; 1116*1144Spd144616 pkt->pkt_state = tgt->emul64_einj_pkt_state; 1117*1144Spd144616 pkt->pkt_reason = tgt->emul64_einj_pkt_reason; 1118*1144Spd144616 1119*1144Spd144616 /* 1120*1144Spd144616 * Calculate available sense buffer length. We could just 1121*1144Spd144616 * assume sizeof(struct scsi_extended_sense) but hopefully 1122*1144Spd144616 * that limitation will go away soon. 1123*1144Spd144616 */ 1124*1144Spd144616 max_sense_len = sp->cmd_scblen - 1125*1144Spd144616 (sizeof (struct scsi_arq_status) - 1126*1144Spd144616 sizeof (struct scsi_extended_sense)); 1127*1144Spd144616 if (max_sense_len > tgt->emul64_einj_sense_length) { 1128*1144Spd144616 max_sense_len = tgt->emul64_einj_sense_length; 1129*1144Spd144616 } 1130*1144Spd144616 1131*1144Spd144616 /* for ARQ */ 1132*1144Spd144616 arq->sts_rqpkt_reason = CMD_CMPLT; 1133*1144Spd144616 arq->sts_rqpkt_resid = 0; 1134*1144Spd144616 arq->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 1135*1144Spd144616 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS; 1136*1144Spd144616 1137*1144Spd144616 /* Copy sense data */ 1138*1144Spd144616 if (tgt->emul64_einj_sense_data != 0) { 1139*1144Spd144616 bcopy(tgt->emul64_einj_sense_data, 1140*1144Spd144616 (uint8_t *)&arq->sts_sensedata, 1141*1144Spd144616 max_sense_len); 1142*1144Spd144616 } 1143*1144Spd144616 } 1144*1144Spd144616 1145*1144Spd144616 /* Return current error injection state */ 1146*1144Spd144616 return (tgt->emul64_einj_state); 1147*1144Spd144616 } 1148*1144Spd144616 1149*1144Spd144616 int 1150*1144Spd144616 emul64_error_inject_req(struct emul64 *emul64, intptr_t arg) 1151*1144Spd144616 { 1152*1144Spd144616 emul64_tgt_t *tgt; 1153*1144Spd144616 struct emul64_error_inj_data error_inj_req; 1154*1144Spd144616 1155*1144Spd144616 /* Check args */ 1156*1144Spd144616 if (arg == NULL) { 1157*1144Spd144616 return (EINVAL); 1158*1144Spd144616 } 1159*1144Spd144616 1160*1144Spd144616 if (ddi_copyin((void *)arg, &error_inj_req, 1161*1144Spd144616 sizeof (error_inj_req), 0) != 0) { 1162*1144Spd144616 cmn_err(CE_WARN, "emul64: ioctl - inj copyin failed\n"); 1163*1144Spd144616 return (EFAULT); 1164*1144Spd144616 } 1165*1144Spd144616 1166*1144Spd144616 EMUL64_MUTEX_ENTER(emul64); 1167*1144Spd144616 tgt = find_tgt(emul64, error_inj_req.eccd_target, 1168*1144Spd144616 error_inj_req.eccd_lun); 1169*1144Spd144616 EMUL64_MUTEX_EXIT(emul64); 1170*1144Spd144616 1171*1144Spd144616 /* Make sure device exists */ 1172*1144Spd144616 if (tgt == NULL) { 1173*1144Spd144616 return (ENODEV); 1174*1144Spd144616 } 1175*1144Spd144616 1176*1144Spd144616 /* Free old sense buffer if we have one */ 1177*1144Spd144616 if (tgt->emul64_einj_sense_data != NULL) { 1178*1144Spd144616 ASSERT(tgt->emul64_einj_sense_length != 0); 1179*1144Spd144616 kmem_free(tgt->emul64_einj_sense_data, 1180*1144Spd144616 tgt->emul64_einj_sense_length); 1181*1144Spd144616 tgt->emul64_einj_sense_data = NULL; 1182*1144Spd144616 tgt->emul64_einj_sense_length = 0; 1183*1144Spd144616 } 1184*1144Spd144616 1185*1144Spd144616 /* 1186*1144Spd144616 * Now handle error injection request. If error injection 1187*1144Spd144616 * is requested we will return the sense data provided for 1188*1144Spd144616 * any I/O to this target until told to stop. 1189*1144Spd144616 */ 1190*1144Spd144616 tgt->emul64_einj_state = error_inj_req.eccd_inj_state; 1191*1144Spd144616 tgt->emul64_einj_sense_length = error_inj_req.eccd_sns_dlen; 1192*1144Spd144616 tgt->emul64_einj_pkt_state = error_inj_req.eccd_pkt_state; 1193*1144Spd144616 tgt->emul64_einj_pkt_reason = error_inj_req.eccd_pkt_reason; 1194*1144Spd144616 tgt->emul64_einj_scsi_status = error_inj_req.eccd_scsi_status; 1195*1144Spd144616 switch (error_inj_req.eccd_inj_state) { 1196*1144Spd144616 case ERR_INJ_ENABLE: 1197*1144Spd144616 case ERR_INJ_ENABLE_NODATA: 1198*1144Spd144616 if (error_inj_req.eccd_sns_dlen) { 1199*1144Spd144616 tgt->emul64_einj_sense_data = 1200*1144Spd144616 kmem_alloc(error_inj_req.eccd_sns_dlen, KM_SLEEP); 1201*1144Spd144616 /* Copy sense data */ 1202*1144Spd144616 if (ddi_copyin((void *)(arg + sizeof (error_inj_req)), 1203*1144Spd144616 tgt->emul64_einj_sense_data, 1204*1144Spd144616 error_inj_req.eccd_sns_dlen, 0) != 0) { 1205*1144Spd144616 cmn_err(CE_WARN, 1206*1144Spd144616 "emul64: sense data copy in failed\n"); 1207*1144Spd144616 return (EFAULT); 1208*1144Spd144616 } 1209*1144Spd144616 } 1210*1144Spd144616 break; 1211*1144Spd144616 case ERR_INJ_DISABLE: 1212*1144Spd144616 default: 1213*1144Spd144616 break; 1214*1144Spd144616 } 1215*1144Spd144616 1216*1144Spd144616 return (0); 1217*1144Spd144616 } 1218*1144Spd144616 121985Scth int bsd_scsi_start_stop_unit(struct scsi_pkt *); 122085Scth int bsd_scsi_test_unit_ready(struct scsi_pkt *); 122185Scth int bsd_scsi_request_sense(struct scsi_pkt *); 122285Scth int bsd_scsi_inquiry(struct scsi_pkt *); 122385Scth int bsd_scsi_format(struct scsi_pkt *); 122485Scth int bsd_scsi_io(struct scsi_pkt *); 122585Scth int bsd_scsi_log_sense(struct scsi_pkt *); 122685Scth int bsd_scsi_mode_sense(struct scsi_pkt *); 122785Scth int bsd_scsi_mode_select(struct scsi_pkt *); 122885Scth int bsd_scsi_read_capacity(struct scsi_pkt *); 122985Scth int bsd_scsi_read_capacity_16(struct scsi_pkt *); 123085Scth int bsd_scsi_reserve(struct scsi_pkt *); 123185Scth int bsd_scsi_format(struct scsi_pkt *); 123285Scth int bsd_scsi_release(struct scsi_pkt *); 123385Scth int bsd_scsi_read_defect_list(struct scsi_pkt *); 123485Scth int bsd_scsi_reassign_block(struct scsi_pkt *); 123585Scth int bsd_freeblkrange(emul64_tgt_t *, emul64_range_t *); 123685Scth 123785Scth static void 123885Scth emul64_handle_cmd(struct scsi_pkt *pkt) 123985Scth { 1240*1144Spd144616 if (emul64_error_inject(pkt) == ERR_INJ_ENABLE_NODATA) { 1241*1144Spd144616 /* 1242*1144Spd144616 * If error injection is configured to return with 1243*1144Spd144616 * no data return now without handling the command. 1244*1144Spd144616 * This is how normal check conditions work. 1245*1144Spd144616 * 1246*1144Spd144616 * If the error injection state is ERR_INJ_ENABLE 1247*1144Spd144616 * (or if error injection is disabled) continue and 1248*1144Spd144616 * handle the command. This would be used for 1249*1144Spd144616 * KEY_RECOVERABLE_ERROR type conditions. 1250*1144Spd144616 */ 1251*1144Spd144616 return; 1252*1144Spd144616 } 1253*1144Spd144616 125485Scth switch (pkt->pkt_cdbp[0]) { 125585Scth case SCMD_START_STOP: 125685Scth (void) bsd_scsi_start_stop_unit(pkt); 125785Scth break; 125885Scth case SCMD_TEST_UNIT_READY: 125985Scth (void) bsd_scsi_test_unit_ready(pkt); 126085Scth break; 126185Scth case SCMD_REQUEST_SENSE: 126285Scth (void) bsd_scsi_request_sense(pkt); 126385Scth break; 126485Scth case SCMD_INQUIRY: 126585Scth (void) bsd_scsi_inquiry(pkt); 126685Scth break; 126785Scth case SCMD_FORMAT: 126885Scth (void) bsd_scsi_format(pkt); 126985Scth break; 127085Scth case SCMD_READ: 127185Scth case SCMD_WRITE: 127285Scth case SCMD_READ_G1: 127385Scth case SCMD_WRITE_G1: 127485Scth case SCMD_READ_G4: 127585Scth case SCMD_WRITE_G4: 127685Scth (void) bsd_scsi_io(pkt); 127785Scth break; 127885Scth case SCMD_LOG_SENSE_G1: 127985Scth (void) bsd_scsi_log_sense(pkt); 128085Scth break; 128185Scth case SCMD_MODE_SENSE: 128285Scth case SCMD_MODE_SENSE_G1: 128385Scth (void) bsd_scsi_mode_sense(pkt); 128485Scth break; 128585Scth case SCMD_MODE_SELECT: 128685Scth case SCMD_MODE_SELECT_G1: 128785Scth (void) bsd_scsi_mode_select(pkt); 128885Scth break; 128985Scth case SCMD_READ_CAPACITY: 129085Scth (void) bsd_scsi_read_capacity(pkt); 129185Scth break; 129285Scth case SCMD_SVC_ACTION_IN_G4: 129385Scth if (pkt->pkt_cdbp[1] == SSVC_ACTION_READ_CAPACITY_G4) { 129485Scth (void) bsd_scsi_read_capacity_16(pkt); 129585Scth } else { 129685Scth cmn_err(CE_WARN, "emul64: unrecognized G4 service " 129785Scth "action 0x%x", pkt->pkt_cdbp[1]); 129885Scth } 129985Scth break; 130085Scth case SCMD_RESERVE: 130185Scth case SCMD_RESERVE_G1: 130285Scth (void) bsd_scsi_reserve(pkt); 130385Scth break; 130485Scth case SCMD_RELEASE: 130585Scth case SCMD_RELEASE_G1: 130685Scth (void) bsd_scsi_release(pkt); 130785Scth break; 130885Scth case SCMD_REASSIGN_BLOCK: 130985Scth (void) bsd_scsi_reassign_block(pkt); 131085Scth break; 131185Scth case SCMD_READ_DEFECT_LIST: 131285Scth (void) bsd_scsi_read_defect_list(pkt); 131385Scth break; 131485Scth case SCMD_PRIN: 131585Scth case SCMD_PROUT: 131685Scth case SCMD_REPORT_LUNS: 131785Scth /* ASC 0x24 INVALID FIELD IN CDB */ 131885Scth emul64_check_cond(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x0); 131985Scth break; 132085Scth default: 132185Scth cmn_err(CE_WARN, "emul64: unrecognized " 132285Scth "SCSI cmd 0x%x", pkt->pkt_cdbp[0]); 132385Scth emul64_check_cond(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x0); 132485Scth break; 132585Scth case SCMD_GET_CONFIGURATION: 132685Scth case 0x35: /* SCMD_SYNCHRONIZE_CACHE */ 132785Scth /* Don't complain */ 132885Scth break; 132985Scth } 133085Scth } 133185Scth 133285Scth static void 133385Scth emul64_pkt_comp(void * arg) 133485Scth { 133585Scth struct scsi_pkt *pkt = (struct scsi_pkt *)arg; 133685Scth struct emul64_cmd *sp = PKT2CMD(pkt); 133785Scth emul64_tgt_t *tgt; 133885Scth 133985Scth EMUL64_MUTEX_ENTER(sp->cmd_emul64); 134085Scth tgt = find_tgt(sp->cmd_emul64, 134185Scth pkt->pkt_address.a_target, pkt->pkt_address.a_lun); 134285Scth EMUL64_MUTEX_EXIT(sp->cmd_emul64); 134385Scth if (!tgt) { 134485Scth pkt->pkt_reason = CMD_TIMEOUT; 134585Scth pkt->pkt_state = STATE_GOT_BUS | STATE_SENT_CMD; 134685Scth pkt->pkt_statistics = STAT_TIMEOUT; 134785Scth } else { 134885Scth pkt->pkt_reason = CMD_CMPLT; 134985Scth *pkt->pkt_scbp = STATUS_GOOD; 135085Scth pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 135185Scth STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS; 135285Scth pkt->pkt_statistics = 0; 135385Scth emul64_handle_cmd(pkt); 135485Scth } 135585Scth (*pkt->pkt_comp)(pkt); 135685Scth } 135785Scth 135885Scth /* ARGSUSED */ 135985Scth static int 136085Scth emul64_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 136185Scth { 136285Scth return (1); 136385Scth } 136485Scth 136585Scth /* ARGSUSED */ 136685Scth static int 136785Scth emul64_scsi_reset(struct scsi_address *ap, int level) 136885Scth { 136985Scth return (1); 137085Scth } 137185Scth 137285Scth static int 137385Scth emul64_get_tgtrange(struct emul64 *emul64, 137485Scth intptr_t arg, 137585Scth emul64_tgt_t **tgtp, 137685Scth emul64_tgt_range_t *tgtr) 137785Scth { 137885Scth if (ddi_copyin((void *)arg, tgtr, sizeof (*tgtr), 0) != 0) { 137985Scth cmn_err(CE_WARN, "emul64: ioctl - copy in failed\n"); 138085Scth return (EFAULT); 138185Scth } 138285Scth EMUL64_MUTEX_ENTER(emul64); 138385Scth *tgtp = find_tgt(emul64, tgtr->emul64_target, tgtr->emul64_lun); 138485Scth EMUL64_MUTEX_EXIT(emul64); 138585Scth if (*tgtp == NULL) { 138685Scth cmn_err(CE_WARN, "emul64: ioctl - no target for %d,%d on %d", 138785Scth tgtr->emul64_target, tgtr->emul64_lun, 138885Scth ddi_get_instance(emul64->emul64_dip)); 138985Scth return (ENXIO); 139085Scth } 139185Scth return (0); 139285Scth } 139385Scth 139485Scth static int 139585Scth emul64_ioctl(dev_t dev, 139685Scth int cmd, 139785Scth intptr_t arg, 139885Scth int mode, 139985Scth cred_t *credp, 140085Scth int *rvalp) 140185Scth { 140285Scth struct emul64 *emul64; 140385Scth int instance; 140485Scth int rv = 0; 140585Scth emul64_tgt_range_t tgtr; 140685Scth emul64_tgt_t *tgt; 140785Scth 140885Scth instance = MINOR2INST(getminor(dev)); 140985Scth emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance); 141085Scth if (emul64 == NULL) { 141185Scth cmn_err(CE_WARN, "emul64: ioctl - no softstate for %d\n", 141285Scth getminor(dev)); 141385Scth return (ENXIO); 141485Scth } 141585Scth 141685Scth switch (cmd) { 141785Scth case EMUL64_WRITE_OFF: 141885Scth rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr); 141985Scth if (rv == 0) { 142085Scth rv = emul64_write_off(emul64, tgt, &tgtr); 142185Scth } 142285Scth break; 142385Scth case EMUL64_WRITE_ON: 142485Scth rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr); 142585Scth if (rv == 0) { 142685Scth rv = emul64_write_on(emul64, tgt, &tgtr); 142785Scth } 142885Scth break; 142985Scth case EMUL64_ZERO_RANGE: 143085Scth rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr); 143185Scth if (rv == 0) { 143285Scth mutex_enter(&tgt->emul64_tgt_blk_lock); 143385Scth rv = bsd_freeblkrange(tgt, &tgtr.emul64_blkrange); 143485Scth mutex_exit(&tgt->emul64_tgt_blk_lock); 143585Scth } 143685Scth break; 1437*1144Spd144616 case EMUL64_ERROR_INJECT: 1438*1144Spd144616 rv = emul64_error_inject_req(emul64, arg); 1439*1144Spd144616 break; 144085Scth default: 144185Scth rv = scsi_hba_ioctl(dev, cmd, arg, mode, credp, rvalp); 144285Scth break; 144385Scth } 144485Scth return (rv); 144585Scth } 144685Scth 144785Scth /* ARGSUSED */ 144885Scth static int 144985Scth emul64_write_off(struct emul64 *emul64, 145085Scth emul64_tgt_t *tgt, 145185Scth emul64_tgt_range_t *tgtr) 145285Scth { 145385Scth size_t blkcnt = tgtr->emul64_blkrange.emul64_blkcnt; 145485Scth emul64_nowrite_t *cur; 145585Scth emul64_nowrite_t *nowrite; 145685Scth emul64_rng_overlap_t overlap = O_NONE; 145785Scth emul64_nowrite_t **prev = NULL; 145885Scth diskaddr_t sb = tgtr->emul64_blkrange.emul64_sb; 145985Scth 146085Scth nowrite = emul64_nowrite_alloc(&tgtr->emul64_blkrange); 146185Scth 146285Scth /* Find spot in list */ 146385Scth rw_enter(&tgt->emul64_tgt_nw_lock, RW_WRITER); 146485Scth cur = emul64_find_nowrite(tgt, sb, blkcnt, &overlap, &prev); 146585Scth if (overlap == O_NONE) { 146685Scth /* Insert into list */ 146785Scth *prev = nowrite; 146885Scth nowrite->emul64_nwnext = cur; 146985Scth } 147085Scth rw_exit(&tgt->emul64_tgt_nw_lock); 147185Scth if (overlap == O_NONE) { 147285Scth if (emul64_collect_stats) { 147385Scth mutex_enter(&emul64_stats_mutex); 147485Scth emul64_nowrite_count++; 147585Scth mutex_exit(&emul64_stats_mutex); 147685Scth } 147785Scth } else { 147885Scth cmn_err(CE_WARN, "emul64: EMUL64_WRITE_OFF 0x%llx,0x%" 147985Scth PRIx64 "overlaps 0x%llx,0x%" PRIx64 "\n", 148085Scth nowrite->emul64_blocked.emul64_sb, 148185Scth nowrite->emul64_blocked.emul64_blkcnt, 148285Scth cur->emul64_blocked.emul64_sb, 148385Scth cur->emul64_blocked.emul64_blkcnt); 148485Scth emul64_nowrite_free(nowrite); 148585Scth return (EINVAL); 148685Scth } 148785Scth return (0); 148885Scth } 148985Scth 149085Scth /* ARGSUSED */ 149185Scth static int 149285Scth emul64_write_on(struct emul64 *emul64, 149385Scth emul64_tgt_t *tgt, 149485Scth emul64_tgt_range_t *tgtr) 149585Scth { 149685Scth size_t blkcnt = tgtr->emul64_blkrange.emul64_blkcnt; 149785Scth emul64_nowrite_t *cur; 149885Scth emul64_rng_overlap_t overlap = O_NONE; 149985Scth emul64_nowrite_t **prev = NULL; 150085Scth int rv = 0; 150185Scth diskaddr_t sb = tgtr->emul64_blkrange.emul64_sb; 150285Scth 150385Scth /* Find spot in list */ 150485Scth rw_enter(&tgt->emul64_tgt_nw_lock, RW_WRITER); 150585Scth cur = emul64_find_nowrite(tgt, sb, blkcnt, &overlap, &prev); 150685Scth if (overlap == O_SAME) { 150785Scth /* Remove from list */ 150885Scth *prev = cur->emul64_nwnext; 150985Scth } 151085Scth rw_exit(&tgt->emul64_tgt_nw_lock); 151185Scth 151285Scth switch (overlap) { 151385Scth case O_NONE: 151485Scth cmn_err(CE_WARN, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx " 151585Scth "range not found\n", sb, blkcnt); 151685Scth rv = ENXIO; 151785Scth break; 151885Scth case O_SAME: 151985Scth if (emul64_collect_stats) { 152085Scth mutex_enter(&emul64_stats_mutex); 152185Scth emul64_nowrite_count--; 152285Scth mutex_exit(&emul64_stats_mutex); 152385Scth } 152485Scth emul64_nowrite_free(cur); 152585Scth break; 152685Scth case O_OVERLAP: 152785Scth case O_SUBSET: 152885Scth cmn_err(CE_WARN, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx " 152985Scth "overlaps 0x%llx,0x%" PRIx64 "\n", 153085Scth sb, blkcnt, cur->emul64_blocked.emul64_sb, 153185Scth cur->emul64_blocked.emul64_blkcnt); 153285Scth rv = EINVAL; 153385Scth break; 153485Scth } 153585Scth return (rv); 153685Scth } 153785Scth 153885Scth static emul64_nowrite_t * 153985Scth emul64_find_nowrite(emul64_tgt_t *tgt, 154085Scth diskaddr_t sb, 154185Scth size_t blkcnt, 154285Scth emul64_rng_overlap_t *overlap, 154385Scth emul64_nowrite_t ***prevp) 154485Scth { 154585Scth emul64_nowrite_t *cur; 154685Scth emul64_nowrite_t **prev; 154785Scth 154885Scth /* Find spot in list */ 154985Scth *overlap = O_NONE; 155085Scth prev = &tgt->emul64_tgt_nowrite; 155185Scth cur = tgt->emul64_tgt_nowrite; 155285Scth while (cur != NULL) { 155385Scth *overlap = emul64_overlap(&cur->emul64_blocked, sb, blkcnt); 155485Scth if (*overlap != O_NONE) 155585Scth break; 155685Scth prev = &cur->emul64_nwnext; 155785Scth cur = cur->emul64_nwnext; 155885Scth } 155985Scth 156085Scth *prevp = prev; 156185Scth return (cur); 156285Scth } 156385Scth 156485Scth static emul64_nowrite_t * 156585Scth emul64_nowrite_alloc(emul64_range_t *range) 156685Scth { 156785Scth emul64_nowrite_t *nw; 156885Scth 156985Scth nw = kmem_zalloc(sizeof (*nw), KM_SLEEP); 157085Scth bcopy((void *) range, 157185Scth (void *) &nw->emul64_blocked, 157285Scth sizeof (nw->emul64_blocked)); 157385Scth return (nw); 157485Scth } 157585Scth 157685Scth static void 157785Scth emul64_nowrite_free(emul64_nowrite_t *nw) 157885Scth { 157985Scth kmem_free((void *) nw, sizeof (*nw)); 158085Scth } 158185Scth 158285Scth emul64_rng_overlap_t 158385Scth emul64_overlap(emul64_range_t *rng, diskaddr_t sb, size_t cnt) 158485Scth { 158585Scth 158685Scth if (rng->emul64_sb >= sb + cnt) 158785Scth return (O_NONE); 158885Scth if (rng->emul64_sb + rng->emul64_blkcnt <= sb) 158985Scth return (O_NONE); 159085Scth if ((rng->emul64_sb == sb) && (rng->emul64_blkcnt == cnt)) 159185Scth return (O_SAME); 159285Scth if ((sb >= rng->emul64_sb) && 159385Scth ((sb + cnt) <= (rng->emul64_sb + rng->emul64_blkcnt))) { 159485Scth return (O_SUBSET); 159585Scth } 159685Scth return (O_OVERLAP); 159785Scth } 159885Scth 159985Scth #include <sys/varargs.h> 160085Scth 160185Scth /* 160285Scth * Error logging, printing, and debug print routines 160385Scth */ 160485Scth 160585Scth /*VARARGS3*/ 160685Scth static void 160785Scth emul64_i_log(struct emul64 *emul64, int level, char *fmt, ...) 160885Scth { 160985Scth char buf[256]; 161085Scth va_list ap; 161185Scth 161285Scth va_start(ap, fmt); 161385Scth (void) vsnprintf(buf, sizeof (buf), fmt, ap); 161485Scth va_end(ap); 161585Scth 161685Scth scsi_log(emul64 ? emul64->emul64_dip : NULL, 161785Scth "emul64", level, "%s\n", buf); 161885Scth } 161985Scth 162085Scth 162185Scth #ifdef EMUL64DEBUG 162285Scth 162385Scth static void 162485Scth emul64_debug_dump_cdb(struct scsi_address *ap, struct scsi_pkt *pkt) 162585Scth { 162685Scth static char hex[] = "0123456789abcdef"; 162785Scth struct emul64 *emul64 = ADDR2EMUL64(ap); 162885Scth struct emul64_cmd *sp = PKT2CMD(pkt); 162985Scth uint8_t *cdb = pkt->pkt_cdbp; 163085Scth char buf [256]; 163185Scth char *p; 163285Scth int i; 163385Scth 163485Scth (void) snprintf(buf, sizeof (buf), "emul64%d: <%d,%d> ", 163585Scth ddi_get_instance(emul64->emul64_dip), 163685Scth ap->a_target, ap->a_lun); 163785Scth 163885Scth p = buf + strlen(buf); 163985Scth 164085Scth *p++ = '['; 164185Scth for (i = 0; i < sp->cmd_cdblen; i++, cdb++) { 164285Scth if (i != 0) 164385Scth *p++ = ' '; 164485Scth *p++ = hex[(*cdb >> 4) & 0x0f]; 164585Scth *p++ = hex[*cdb & 0x0f]; 164685Scth } 164785Scth *p++ = ']'; 164885Scth *p++ = '\n'; 164985Scth *p = 0; 165085Scth 165185Scth cmn_err(CE_CONT, buf); 165285Scth } 165385Scth #endif /* EMUL64DEBUG */ 1654