185Scth /* 285Scth * CDDL HEADER START 385Scth * 485Scth * The contents of this file are subject to the terms of the 5*2314Smcneal * Common Development and Distribution License (the "License"). 6*2314Smcneal * You may not use this file except in compliance with the License. 785Scth * 885Scth * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 985Scth * or http://www.opensolaris.org/os/licensing. 1085Scth * See the License for the specific language governing permissions 1185Scth * and limitations under the License. 1285Scth * 1385Scth * When distributing Covered Code, include this CDDL HEADER in each 1485Scth * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 1585Scth * If applicable, add the following below this CDDL HEADER, with the 1685Scth * fields enclosed by brackets "[]" replaced with your own identifying 1785Scth * information: Portions Copyright [yyyy] [name of copyright owner] 1885Scth * 1985Scth * CDDL HEADER END 2085Scth */ 2185Scth /* 22*2314Smcneal * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 2385Scth * Use is subject to license terms. 2485Scth */ 2585Scth 2685Scth #pragma ident "%Z%%M% %I% %E% SMI" 2785Scth 2885Scth /* 2985Scth * pseudo scsi disk driver 3085Scth */ 3185Scth 3285Scth #include <sys/scsi/scsi.h> 3385Scth #include <sys/ddi.h> 3485Scth #include <sys/sunddi.h> 3585Scth #include <sys/kmem.h> 3685Scth #include <sys/taskq.h> 3785Scth #include <sys/disp.h> 3885Scth #include <sys/types.h> 3985Scth #include <sys/buf.h> 4085Scth 4185Scth #include <sys/emul64.h> 4285Scth #include <sys/emul64cmd.h> 4385Scth #include <sys/emul64var.h> 4485Scth 4585Scth /* 4685Scth * Mode sense/select page control 4785Scth */ 4885Scth #define MODE_SENSE_PC_CURRENT 0 4985Scth #define MODE_SENSE_PC_CHANGEABLE 1 5085Scth #define MODE_SENSE_PC_DEFAULT 2 5185Scth #define MODE_SENSE_PC_SAVED 3 5285Scth 5385Scth /* 5485Scth * Byte conversion macros 5585Scth */ 5685Scth #if defined(_BIG_ENDIAN) 5785Scth #define ushort_to_scsi_ushort(n) (n) 5885Scth #define uint32_to_scsi_uint32(n) (n) 5985Scth #define uint64_to_scsi_uint64(n) (n) 6085Scth #elif defined(_LITTLE_ENDIAN) 6185Scth 6285Scth #define ushort_to_scsi_ushort(n) \ 6385Scth ((((n) & 0x00ff) << 8) | \ 6485Scth (((n) & 0xff00) >> 8)) 6585Scth 6685Scth #define uint32_to_scsi_uint32(n) \ 6785Scth ((((n) & 0x000000ff) << 24) | \ 6885Scth (((n) & 0x0000ff00) << 8) | \ 6985Scth (((n) & 0x00ff0000) >> 8) | \ 7085Scth (((n) & 0xff000000) >> 24)) 7185Scth #define uint64_to_scsi_uint64(n) \ 7285Scth ((((n) & 0x00000000000000ff) << 56) | \ 7385Scth (((n) & 0x000000000000ff00) << 40) | \ 7485Scth (((n) & 0x0000000000ff0000) << 24) | \ 7585Scth (((n) & 0x00000000ff000000) << 8) | \ 7685Scth (((n) & 0x000000ff00000000) >> 8) | \ 7785Scth (((n) & 0x0000ff0000000000) >> 24) | \ 7885Scth (((n) & 0x00ff000000000000) >> 40) | \ 7985Scth (((n) & 0xff00000000000000) >> 56)) 8085Scth #else 8185Scth error no _BIG_ENDIAN or _LITTLE_ENDIAN 8285Scth #endif 8385Scth #define uint_to_byte0(n) ((n) & 0xff) 8485Scth #define uint_to_byte1(n) (((n)>>8) & 0xff) 8585Scth #define uint_to_byte2(n) (((n)>>16) & 0xff) 8685Scth #define uint_to_byte3(n) (((n)>>24) & 0xff) 8785Scth 8885Scth /* 8985Scth * struct prop_map 9085Scth * 9185Scth * This structure maps a property name to the place to store its value. 9285Scth */ 9385Scth struct prop_map { 9485Scth char *pm_name; /* Name of the property. */ 9585Scth int *pm_value; /* Place to store the value. */ 9685Scth }; 9785Scth 9885Scth static int emul64_debug_blklist = 0; 9985Scth 10085Scth /* 10185Scth * Some interesting statistics. These are protected by the 10285Scth * emul64_stats_mutex. It would be nice to have an ioctl to print them out, 10385Scth * but we don't have the development time for that now. You can at least 10485Scth * look at them with adb. 10585Scth */ 10685Scth 10785Scth int emul64_collect_stats = 1; /* Collect stats if non-zero */ 10885Scth kmutex_t emul64_stats_mutex; /* Protect these variables */ 10985Scth long emul64_nowrite_count = 0; /* # active nowrite ranges */ 11085Scth static uint64_t emul64_skipped_io = 0; /* Skipped I/O operations, because of */ 11185Scth /* EMUL64_WRITE_OFF. */ 11285Scth static uint64_t emul64_skipped_blk = 0; /* Skipped blocks because of */ 11385Scth /* EMUL64_WRITE_OFF. */ 11485Scth static uint64_t emul64_io_ops = 0; /* Total number of I/O operations */ 11585Scth /* including skipped and actual. */ 11685Scth static uint64_t emul64_io_blocks = 0; /* Total number of blocks involved */ 11785Scth /* in I/O operations. */ 11885Scth static uint64_t emul64_nonzero = 0; /* Number of non-zero data blocks */ 11985Scth /* currently held in memory */ 12085Scth static uint64_t emul64_max_list_length = 0; /* Maximum size of a linked */ 12185Scth /* list of non-zero blocks. */ 12285Scth uint64_t emul64_taskq_max = 0; /* emul64_scsi_start uses the taskq */ 12385Scth /* mechanism to dispatch work. */ 12485Scth /* If the number of entries in the */ 12585Scth /* exceeds the maximum for the queue */ 12685Scth /* the queue a 1 second delay is */ 12785Scth /* encountered in taskq_ent_alloc. */ 12885Scth /* This counter counts the number */ 12985Scth /* times that this happens. */ 13085Scth 13185Scth /* 13285Scth * Since emul64 does no physical I/O, operations that would normally be I/O 13385Scth * intensive become CPU bound. An example of this is RAID 5 13485Scth * initialization. When the kernel becomes CPU bound, it looks as if the 13585Scth * machine is hung. 13685Scth * 13785Scth * To avoid this problem, we provide a function, emul64_yield_check, that does a 13885Scth * delay from time to time to yield up the CPU. The following variables 13985Scth * are tunables for this algorithm. 14085Scth * 14185Scth * emul64_num_delay_called Number of times we called delay. This is 14285Scth * not really a tunable. Rather it is a 14385Scth * counter that provides useful information 14485Scth * for adjusting the tunables. 14585Scth * emul64_yield_length Number of microseconds to yield the CPU. 14685Scth * emul64_yield_period Number of I/O operations between yields. 14785Scth * emul64_yield_enable emul64 will yield the CPU, only if this 14885Scth * variable contains a non-zero value. This 14985Scth * allows the yield functionality to be turned 15085Scth * off for experimentation purposes. 15185Scth * 15285Scth * The value of 1000 for emul64_yield_period has been determined by 15385Scth * experience with running the tests. 15485Scth */ 15585Scth static uint64_t emul64_num_delay_called = 0; 15685Scth static int emul64_yield_length = 1000; 15785Scth static int emul64_yield_period = 1000; 15885Scth static int emul64_yield_enable = 1; 15985Scth static kmutex_t emul64_yield_mutex; 16085Scth static kcondvar_t emul64_yield_cv; 16185Scth 16285Scth /* 16385Scth * This array establishes a set of tunable variables that can be set by 16485Scth * defining properties in the emul64.conf file. 16585Scth */ 16685Scth struct prop_map emul64_properties[] = { 16785Scth "emul64_collect_stats", &emul64_collect_stats, 16885Scth "emul64_yield_length", &emul64_yield_length, 16985Scth "emul64_yield_period", &emul64_yield_period, 17085Scth "emul64_yield_enable", &emul64_yield_enable, 17185Scth "emul64_max_task", &emul64_max_task, 17285Scth "emul64_task_nthreads", &emul64_task_nthreads 17385Scth }; 17485Scth 17585Scth static unsigned char *emul64_zeros = NULL; /* Block of 0s for comparison */ 17685Scth 17785Scth extern void emul64_check_cond(struct scsi_pkt *pkt, uchar_t key, 17885Scth uchar_t asc, uchar_t ascq); 17985Scth /* ncyl=250000 acyl=2 nhead=24 nsect=357 */ 18085Scth uint_t dkg_rpm = 3600; 18185Scth 18285Scth static int bsd_mode_sense_dad_mode_geometry(struct scsi_pkt *); 18385Scth static int bsd_mode_sense_dad_mode_err_recov(struct scsi_pkt *); 18485Scth static int bsd_mode_sense_modepage_disco_reco(struct scsi_pkt *); 18585Scth static int bsd_mode_sense_dad_mode_format(struct scsi_pkt *); 18685Scth static int bsd_mode_sense_dad_mode_cache(struct scsi_pkt *); 18785Scth static int bsd_readblks(struct emul64 *, ushort_t, ushort_t, diskaddr_t, 18885Scth int, unsigned char *); 18985Scth static int bsd_writeblks(struct emul64 *, ushort_t, ushort_t, diskaddr_t, 19085Scth int, unsigned char *); 19185Scth emul64_tgt_t *find_tgt(struct emul64 *, ushort_t, ushort_t); 19285Scth static blklist_t *bsd_findblk(emul64_tgt_t *, diskaddr_t, avl_index_t *); 19385Scth static void bsd_allocblk(emul64_tgt_t *, diskaddr_t, caddr_t, avl_index_t); 19485Scth static void bsd_freeblk(emul64_tgt_t *, blklist_t *); 19585Scth static void emul64_yield_check(); 19685Scth static emul64_rng_overlap_t bsd_tgt_overlap(emul64_tgt_t *, diskaddr_t, int); 19785Scth 19885Scth char *emul64_name = "emul64"; 19985Scth 20085Scth 20185Scth /* 20285Scth * Initialize globals in this file. 20385Scth */ 20485Scth void 20585Scth emul64_bsd_init() 20685Scth { 20785Scth emul64_zeros = (unsigned char *) kmem_zalloc(DEV_BSIZE, KM_SLEEP); 20885Scth mutex_init(&emul64_stats_mutex, NULL, MUTEX_DRIVER, NULL); 20985Scth mutex_init(&emul64_yield_mutex, NULL, MUTEX_DRIVER, NULL); 21085Scth cv_init(&emul64_yield_cv, NULL, CV_DRIVER, NULL); 21185Scth } 21285Scth 21385Scth /* 21485Scth * Clean up globals in this file. 21585Scth */ 21685Scth void 21785Scth emul64_bsd_fini() 21885Scth { 21985Scth cv_destroy(&emul64_yield_cv); 22085Scth mutex_destroy(&emul64_yield_mutex); 22185Scth mutex_destroy(&emul64_stats_mutex); 22285Scth if (emul64_zeros != NULL) { 22385Scth kmem_free(emul64_zeros, DEV_BSIZE); 22485Scth emul64_zeros = NULL; 22585Scth } 22685Scth } 22785Scth 22885Scth /* 22985Scth * Attempt to get the values of the properties that are specified in the 23085Scth * emul64_properties array. If the property exists, copy its value to the 23185Scth * specified location. All the properties have been assigned default 23285Scth * values in this driver, so if we cannot get the property that is not a 23385Scth * problem. 23485Scth */ 23585Scth void 23685Scth emul64_bsd_get_props(dev_info_t *dip) 23785Scth { 23885Scth uint_t count; 23985Scth uint_t i; 24085Scth struct prop_map *pmp; 24185Scth int *properties; 24285Scth 24385Scth for (pmp = emul64_properties, i = 0; 24485Scth i < sizeof (emul64_properties) / sizeof (struct prop_map); 24585Scth i++, pmp++) { 24685Scth if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 24785Scth DDI_PROP_DONTPASS, 24885Scth pmp->pm_name, &properties, 24985Scth &count) == DDI_PROP_SUCCESS) { 25085Scth if (count >= 1) { 25185Scth *pmp->pm_value = *properties; 25285Scth } 25385Scth ddi_prop_free((void *) properties); 25485Scth } 25585Scth } 25685Scth } 25785Scth 25885Scth int 25985Scth emul64_bsd_blkcompare(const void *a1, const void *b1) 26085Scth { 26185Scth blklist_t *a = (blklist_t *)a1; 26285Scth blklist_t *b = (blklist_t *)b1; 26385Scth 26485Scth if (a->bl_blkno < b->bl_blkno) 26585Scth return (-1); 26685Scth if (a->bl_blkno == b->bl_blkno) 26785Scth return (0); 26885Scth return (1); 26985Scth } 27085Scth 27185Scth /* ARGSUSED 0 */ 27285Scth int 27385Scth bsd_scsi_start_stop_unit(struct scsi_pkt *pkt) 27485Scth { 27585Scth return (0); 27685Scth } 27785Scth 27885Scth /* ARGSUSED 0 */ 27985Scth int 28085Scth bsd_scsi_test_unit_ready(struct scsi_pkt *pkt) 28185Scth { 28285Scth return (0); 28385Scth } 28485Scth 28585Scth /* ARGSUSED 0 */ 28685Scth int 28785Scth bsd_scsi_request_sense(struct scsi_pkt *pkt) 28885Scth { 28985Scth return (0); 29085Scth } 29185Scth 29285Scth int 29385Scth bsd_scsi_inq_page0(struct scsi_pkt *pkt, uchar_t pqdtype) 29485Scth { 29585Scth struct emul64_cmd *sp = PKT2CMD(pkt); 29685Scth 29785Scth if (sp->cmd_count < 6) { 29885Scth cmn_err(CE_CONT, "%s: bsd_scsi_inq_page0: size %d required\n", 29985Scth emul64_name, 6); 30085Scth return (EIO); 30185Scth } 30285Scth 30385Scth sp->cmd_addr[0] = pqdtype; /* periph qual., dtype */ 30485Scth sp->cmd_addr[1] = 0; /* page code */ 30585Scth sp->cmd_addr[2] = 0; /* reserved */ 30685Scth sp->cmd_addr[3] = 6 - 3; /* length */ 30785Scth sp->cmd_addr[4] = 0; /* 1st page */ 30885Scth sp->cmd_addr[5] = 0x83; /* 2nd page */ 30985Scth 31085Scth pkt->pkt_resid = sp->cmd_count - 6; 31185Scth return (0); 31285Scth } 31385Scth 31485Scth int 31585Scth bsd_scsi_inq_page83(struct scsi_pkt *pkt, uchar_t pqdtype) 31685Scth { 31785Scth struct emul64 *emul64 = PKT2EMUL64(pkt); 31885Scth struct emul64_cmd *sp = PKT2CMD(pkt); 31985Scth int instance = ddi_get_instance(emul64->emul64_dip); 32085Scth 32185Scth if (sp->cmd_count < 22) { 32285Scth cmn_err(CE_CONT, "%s: bsd_scsi_inq_page83: size %d required\n", 32385Scth emul64_name, 22); 32485Scth return (EIO); 32585Scth } 32685Scth 32785Scth sp->cmd_addr[0] = pqdtype; /* periph qual., dtype */ 32885Scth sp->cmd_addr[1] = 0x83; /* page code */ 32985Scth sp->cmd_addr[2] = 0; /* reserved */ 33085Scth sp->cmd_addr[3] = (22 - 8) + 4; /* length */ 33185Scth 33285Scth sp->cmd_addr[4] = 1; /* code set - binary */ 33385Scth sp->cmd_addr[5] = 3; /* association and device ID type 3 */ 33485Scth sp->cmd_addr[6] = 0; /* reserved */ 33585Scth sp->cmd_addr[7] = 22 - 8; /* ID length */ 33685Scth 33785Scth sp->cmd_addr[8] = 0xde; /* @8: identifier, byte 0 */ 33885Scth sp->cmd_addr[9] = 0xca; 33985Scth sp->cmd_addr[10] = 0xde; 34085Scth sp->cmd_addr[11] = 0x80; 34185Scth 34285Scth sp->cmd_addr[12] = 0xba; 34385Scth sp->cmd_addr[13] = 0xbe; 34485Scth sp->cmd_addr[14] = 0xab; 34585Scth sp->cmd_addr[15] = 0xba; 34685Scth /* @22: */ 34785Scth 34885Scth /* 34985Scth * Instances seem to be assigned sequentially, so it unlikely that we 35085Scth * will have more than 65535 of them. 35185Scth */ 35285Scth sp->cmd_addr[16] = uint_to_byte1(instance); 35385Scth sp->cmd_addr[17] = uint_to_byte0(instance); 35485Scth sp->cmd_addr[18] = uint_to_byte1(TGT(sp)); 35585Scth sp->cmd_addr[19] = uint_to_byte0(TGT(sp)); 35685Scth sp->cmd_addr[20] = uint_to_byte1(LUN(sp)); 35785Scth sp->cmd_addr[21] = uint_to_byte0(LUN(sp)); 35885Scth 35985Scth pkt->pkt_resid = sp->cmd_count - 22; 36085Scth return (0); 36185Scth } 36285Scth 36385Scth int 36485Scth bsd_scsi_inquiry(struct scsi_pkt *pkt) 36585Scth { 36685Scth struct emul64_cmd *sp = PKT2CMD(pkt); 36785Scth union scsi_cdb *cdb = (union scsi_cdb *)pkt->pkt_cdbp; 36885Scth emul64_tgt_t *tgt; 36985Scth uchar_t pqdtype; 37085Scth struct scsi_inquiry inq; 37185Scth 37285Scth EMUL64_MUTEX_ENTER(sp->cmd_emul64); 37385Scth tgt = find_tgt(sp->cmd_emul64, 37485Scth pkt->pkt_address.a_target, pkt->pkt_address.a_lun); 37585Scth EMUL64_MUTEX_EXIT(sp->cmd_emul64); 37685Scth 37785Scth if (sp->cmd_count < sizeof (inq)) { 37885Scth cmn_err(CE_CONT, "%s: bsd_scsi_inquiry: size %d required\n", 37985Scth emul64_name, (int)sizeof (inq)); 38085Scth return (EIO); 38185Scth } 38285Scth 38385Scth if (cdb->cdb_opaque[1] & 0xfc) { 38485Scth cmn_err(CE_WARN, "%s: bsd_scsi_inquiry: 0x%x", 38585Scth emul64_name, cdb->cdb_opaque[1]); 38685Scth emul64_check_cond(pkt, 0x5, 0x24, 0x0); /* inv. fld in cdb */ 38785Scth return (0); 38885Scth } 38985Scth 39085Scth pqdtype = tgt->emul64_tgt_dtype; 39185Scth if (cdb->cdb_opaque[1] & 0x1) { 39285Scth switch (cdb->cdb_opaque[2]) { 39385Scth case 0x00: 39485Scth return (bsd_scsi_inq_page0(pkt, pqdtype)); 39585Scth case 0x83: 39685Scth return (bsd_scsi_inq_page83(pkt, pqdtype)); 39785Scth default: 39885Scth cmn_err(CE_WARN, "%s: bsd_scsi_inquiry: " 39985Scth "unsupported 0x%x", 40085Scth emul64_name, cdb->cdb_opaque[2]); 40185Scth return (0); 40285Scth } 40385Scth } 40485Scth 40585Scth /* set up the inquiry data we return */ 40685Scth (void) bzero((void *)&inq, sizeof (inq)); 40785Scth 40885Scth inq.inq_dtype = pqdtype; 40985Scth inq.inq_ansi = 2; 41085Scth inq.inq_rdf = 2; 41185Scth inq.inq_len = sizeof (inq) - 4; 41285Scth inq.inq_wbus16 = 1; 41385Scth inq.inq_cmdque = 1; 41485Scth 41585Scth (void) bcopy(tgt->emul64_tgt_inq, inq.inq_vid, 41685Scth sizeof (tgt->emul64_tgt_inq)); 41785Scth (void) bcopy("1", inq.inq_revision, 2); 41885Scth (void) bcopy((void *)&inq, sp->cmd_addr, sizeof (inq)); 41985Scth 42085Scth pkt->pkt_resid = sp->cmd_count - sizeof (inq); 42185Scth return (0); 42285Scth } 42385Scth 42485Scth /* ARGSUSED 0 */ 42585Scth int 42685Scth bsd_scsi_format(struct scsi_pkt *pkt) 42785Scth { 42885Scth return (0); 42985Scth } 43085Scth 43185Scth int 43285Scth bsd_scsi_io(struct scsi_pkt *pkt) 43385Scth { 43485Scth struct emul64_cmd *sp = PKT2CMD(pkt); 43585Scth union scsi_cdb *cdb = (union scsi_cdb *)pkt->pkt_cdbp; 43685Scth diskaddr_t lblkno; 43785Scth int nblks; 43885Scth 43985Scth switch (cdb->scc_cmd) { 44085Scth case SCMD_READ: 44185Scth lblkno = (uint32_t)GETG0ADDR(cdb); 44285Scth nblks = GETG0COUNT(cdb); 44385Scth pkt->pkt_resid = bsd_readblks(sp->cmd_emul64, 44485Scth pkt->pkt_address.a_target, 44585Scth pkt->pkt_address.a_lun, 44685Scth lblkno, nblks, sp->cmd_addr); 44785Scth if (emul64debug) { 44885Scth cmn_err(CE_CONT, "%s: bsd_scsi_io: " 44985Scth "read g0 blk=%lld (0x%llx) nblks=%d\n", 45085Scth emul64_name, lblkno, lblkno, nblks); 45185Scth } 45285Scth break; 45385Scth case SCMD_WRITE: 45485Scth lblkno = (uint32_t)GETG0ADDR(cdb); 45585Scth nblks = GETG0COUNT(cdb); 45685Scth pkt->pkt_resid = bsd_writeblks(sp->cmd_emul64, 45785Scth pkt->pkt_address.a_target, 45885Scth pkt->pkt_address.a_lun, 45985Scth lblkno, nblks, sp->cmd_addr); 46085Scth if (emul64debug) { 46185Scth cmn_err(CE_CONT, "%s: bsd_scsi_io: " 46285Scth "write g0 blk=%lld (0x%llx) nblks=%d\n", 46385Scth emul64_name, lblkno, lblkno, nblks); 46485Scth } 46585Scth break; 46685Scth case SCMD_READ_G1: 46785Scth lblkno = (uint32_t)GETG1ADDR(cdb); 46885Scth nblks = GETG1COUNT(cdb); 46985Scth pkt->pkt_resid = bsd_readblks(sp->cmd_emul64, 47085Scth pkt->pkt_address.a_target, 47185Scth pkt->pkt_address.a_lun, 47285Scth lblkno, nblks, sp->cmd_addr); 47385Scth if (emul64debug) { 47485Scth cmn_err(CE_CONT, "%s: bsd_scsi_io: " 47585Scth "read g1 blk=%lld (0x%llx) nblks=%d\n", 47685Scth emul64_name, lblkno, lblkno, nblks); 47785Scth } 47885Scth break; 47985Scth case SCMD_WRITE_G1: 48085Scth lblkno = (uint32_t)GETG1ADDR(cdb); 48185Scth nblks = GETG1COUNT(cdb); 48285Scth pkt->pkt_resid = bsd_writeblks(sp->cmd_emul64, 48385Scth pkt->pkt_address.a_target, 48485Scth pkt->pkt_address.a_lun, 48585Scth lblkno, nblks, sp->cmd_addr); 48685Scth if (emul64debug) { 48785Scth cmn_err(CE_CONT, "%s: bsd_scsi_io: " 48885Scth "write g1 blk=%lld (0x%llx) nblks=%d\n", 48985Scth emul64_name, lblkno, lblkno, nblks); 49085Scth } 49185Scth break; 49285Scth case SCMD_READ_G4: 49385Scth lblkno = GETG4ADDR(cdb); 49485Scth lblkno <<= 32; 49585Scth lblkno |= (uint32_t)GETG4ADDRTL(cdb); 49685Scth nblks = GETG4COUNT(cdb); 49785Scth pkt->pkt_resid = bsd_readblks(sp->cmd_emul64, 49885Scth pkt->pkt_address.a_target, 49985Scth pkt->pkt_address.a_lun, 50085Scth lblkno, nblks, sp->cmd_addr); 50185Scth if (emul64debug) { 50285Scth cmn_err(CE_CONT, "%s: bsd_scsi_io: " 50385Scth "read g4 blk=%lld (0x%llx) nblks=%d\n", 50485Scth emul64_name, lblkno, lblkno, nblks); 50585Scth } 50685Scth break; 50785Scth case SCMD_WRITE_G4: 50885Scth lblkno = GETG4ADDR(cdb); 50985Scth lblkno <<= 32; 51085Scth lblkno |= (uint32_t)GETG4ADDRTL(cdb); 51185Scth nblks = GETG4COUNT(cdb); 51285Scth pkt->pkt_resid = bsd_writeblks(sp->cmd_emul64, 51385Scth pkt->pkt_address.a_target, 51485Scth pkt->pkt_address.a_lun, 51585Scth lblkno, nblks, sp->cmd_addr); 51685Scth if (emul64debug) { 51785Scth cmn_err(CE_CONT, "%s: bsd_scsi_io: " 51885Scth "write g4 blk=%lld (0x%llx) nblks=%d\n", 51985Scth emul64_name, lblkno, lblkno, nblks); 52085Scth } 52185Scth break; 52285Scth default: 52385Scth cmn_err(CE_WARN, "%s: bsd_scsi_io: unhandled I/O: 0x%x", 52485Scth emul64_name, cdb->scc_cmd); 52585Scth break; 52685Scth } 52785Scth 52885Scth if (pkt->pkt_resid != 0) 52985Scth cmn_err(CE_WARN, "%s: bsd_scsi_io: " 53085Scth "pkt_resid: 0x%lx, lblkno %lld, nblks %d", 53185Scth emul64_name, pkt->pkt_resid, lblkno, nblks); 53285Scth 53385Scth return (0); 53485Scth } 53585Scth 53685Scth int 53785Scth bsd_scsi_log_sense(struct scsi_pkt *pkt) 53885Scth { 53985Scth union scsi_cdb *cdb = (union scsi_cdb *)pkt->pkt_cdbp; 54085Scth struct emul64_cmd *sp = PKT2CMD(pkt); 54185Scth int page_code; 54285Scth 54385Scth if (sp->cmd_count < 9) { 54485Scth cmn_err(CE_CONT, "%s: bsd_scsi_log_sense size %d required\n", 54585Scth emul64_name, 9); 54685Scth return (EIO); 54785Scth } 54885Scth 54985Scth page_code = cdb->cdb_opaque[2] & 0x3f; 55085Scth if (page_code) { 55185Scth cmn_err(CE_CONT, "%s: bsd_scsi_log_sense: " 55285Scth "page 0x%x not supported\n", emul64_name, page_code); 55385Scth emul64_check_cond(pkt, 0x5, 0x24, 0x0); /* inv. fld in cdb */ 55485Scth return (0); 55585Scth } 55685Scth 55785Scth sp->cmd_addr[0] = 0; /* page code */ 55885Scth sp->cmd_addr[1] = 0; /* reserved */ 55985Scth sp->cmd_addr[2] = 0; /* MSB of page length */ 56085Scth sp->cmd_addr[3] = 8 - 3; /* LSB of page length */ 56185Scth 56285Scth sp->cmd_addr[4] = 0; /* MSB of parameter code */ 56385Scth sp->cmd_addr[5] = 0; /* LSB of parameter code */ 56485Scth sp->cmd_addr[6] = 0; /* parameter control byte */ 56585Scth sp->cmd_addr[7] = 4 - 3; /* parameter length */ 56685Scth sp->cmd_addr[8] = 0x0; /* parameter value */ 56785Scth 56885Scth pkt->pkt_resid = sp->cmd_count - 9; 56985Scth return (0); 57085Scth } 57185Scth 57285Scth int 57385Scth bsd_scsi_mode_sense(struct scsi_pkt *pkt) 57485Scth { 57585Scth union scsi_cdb *cdb = (union scsi_cdb *)pkt->pkt_cdbp; 57685Scth int page_control; 57785Scth int page_code; 57885Scth int rval = 0; 57985Scth 58085Scth switch (cdb->scc_cmd) { 58185Scth case SCMD_MODE_SENSE: 58285Scth page_code = cdb->cdb_opaque[2] & 0x3f; 58385Scth page_control = (cdb->cdb_opaque[2] >> 6) & 0x03; 58485Scth if (emul64debug) { 58585Scth cmn_err(CE_CONT, "%s: bsd_scsi_mode_sense: " 58685Scth "page=0x%x control=0x%x nbytes=%d\n", 58785Scth emul64_name, page_code, page_control, 58885Scth GETG0COUNT(cdb)); 58985Scth } 59085Scth break; 59185Scth case SCMD_MODE_SENSE_G1: 59285Scth page_code = cdb->cdb_opaque[2] & 0x3f; 59385Scth page_control = (cdb->cdb_opaque[2] >> 6) & 0x03; 59485Scth if (emul64debug) { 59585Scth cmn_err(CE_CONT, "%s: bsd_scsi_mode_sense: " 59685Scth "page=0x%x control=0x%x nbytes=%d\n", 59785Scth emul64_name, page_code, page_control, 59885Scth GETG1COUNT(cdb)); 59985Scth } 60085Scth break; 60185Scth default: 60285Scth cmn_err(CE_CONT, "%s: bsd_scsi_mode_sense: " 60385Scth "cmd 0x%x not supported\n", emul64_name, cdb->scc_cmd); 60485Scth return (EIO); 60585Scth } 60685Scth 60785Scth switch (page_code) { 60885Scth case DAD_MODE_GEOMETRY: 60985Scth rval = bsd_mode_sense_dad_mode_geometry(pkt); 61085Scth break; 61185Scth case DAD_MODE_ERR_RECOV: 61285Scth rval = bsd_mode_sense_dad_mode_err_recov(pkt); 61385Scth break; 61485Scth case MODEPAGE_DISCO_RECO: 61585Scth rval = bsd_mode_sense_modepage_disco_reco(pkt); 61685Scth break; 61785Scth case DAD_MODE_FORMAT: 61885Scth rval = bsd_mode_sense_dad_mode_format(pkt); 61985Scth break; 62085Scth case DAD_MODE_CACHE: 62185Scth rval = bsd_mode_sense_dad_mode_cache(pkt); 62285Scth break; 62385Scth default: 62485Scth cmn_err(CE_CONT, "%s: bsd_scsi_mode_sense: " 62585Scth "page 0x%x not supported\n", emul64_name, page_code); 62685Scth rval = EIO; 62785Scth break; 62885Scth } 62985Scth 63085Scth return (rval); 63185Scth } 63285Scth 63385Scth 63485Scth static int 63585Scth bsd_mode_sense_dad_mode_geometry(struct scsi_pkt *pkt) 63685Scth { 63785Scth struct emul64_cmd *sp = PKT2CMD(pkt); 63885Scth union scsi_cdb *cdb = (union scsi_cdb *)pkt->pkt_cdbp; 63985Scth uchar_t *addr = (uchar_t *)sp->cmd_addr; 64085Scth emul64_tgt_t *tgt; 64185Scth int page_control; 64285Scth struct mode_header header; 64385Scth struct mode_geometry page4; 64485Scth int ncyl; 64585Scth int rval = 0; 64685Scth 64785Scth page_control = (cdb->cdb_opaque[2] >> 6) & 0x03; 64885Scth 64985Scth if (emul64debug) { 65085Scth cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_geometry: " 65185Scth "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count); 65285Scth } 65385Scth 65485Scth if (sp->cmd_count < (sizeof (header) + sizeof (page4))) { 65585Scth cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_geometry: " 65685Scth "size %d required\n", 65785Scth emul64_name, (int)(sizeof (header) + sizeof (page4))); 65885Scth return (EIO); 65985Scth } 66085Scth 66185Scth (void) bzero(&header, sizeof (header)); 66285Scth (void) bzero(&page4, sizeof (page4)); 66385Scth 66485Scth header.length = sizeof (header) + sizeof (page4) - 1; 66585Scth header.bdesc_length = 0; 66685Scth 66785Scth page4.mode_page.code = DAD_MODE_GEOMETRY; 66885Scth page4.mode_page.ps = 1; 66985Scth page4.mode_page.length = sizeof (page4) - sizeof (struct mode_page); 67085Scth 67185Scth switch (page_control) { 67285Scth case MODE_SENSE_PC_CURRENT: 67385Scth case MODE_SENSE_PC_DEFAULT: 67485Scth case MODE_SENSE_PC_SAVED: 67585Scth EMUL64_MUTEX_ENTER(sp->cmd_emul64); 67685Scth tgt = find_tgt(sp->cmd_emul64, 67785Scth pkt->pkt_address.a_target, pkt->pkt_address.a_lun); 67885Scth EMUL64_MUTEX_EXIT(sp->cmd_emul64); 67985Scth ncyl = tgt->emul64_tgt_ncyls; 68085Scth page4.cyl_ub = uint_to_byte2(ncyl); 68185Scth page4.cyl_mb = uint_to_byte1(ncyl); 68285Scth page4.cyl_lb = uint_to_byte0(ncyl); 68385Scth page4.heads = uint_to_byte0(tgt->emul64_tgt_nheads); 68485Scth page4.rpm = ushort_to_scsi_ushort(dkg_rpm); 68585Scth break; 68685Scth case MODE_SENSE_PC_CHANGEABLE: 68785Scth page4.cyl_ub = 0xff; 68885Scth page4.cyl_mb = 0xff; 68985Scth page4.cyl_lb = 0xff; 69085Scth page4.heads = 0xff; 69185Scth page4.rpm = 0xffff; 69285Scth break; 69385Scth } 69485Scth 69585Scth (void) bcopy(&header, addr, sizeof (header)); 69685Scth (void) bcopy(&page4, addr + sizeof (header), sizeof (page4)); 69785Scth 69885Scth pkt->pkt_resid = sp->cmd_count - sizeof (page4) - sizeof (header); 69985Scth rval = 0; 70085Scth 70185Scth return (rval); 70285Scth } 70385Scth 70485Scth static int 70585Scth bsd_mode_sense_dad_mode_err_recov(struct scsi_pkt *pkt) 70685Scth { 70785Scth struct emul64_cmd *sp = PKT2CMD(pkt); 70885Scth union scsi_cdb *cdb = (union scsi_cdb *)pkt->pkt_cdbp; 70985Scth uchar_t *addr = (uchar_t *)sp->cmd_addr; 71085Scth int page_control; 71185Scth struct mode_header header; 71285Scth struct mode_err_recov page1; 71385Scth int rval = 0; 71485Scth 71585Scth page_control = (cdb->cdb_opaque[2] >> 6) & 0x03; 71685Scth 71785Scth if (emul64debug) { 71885Scth cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_err_recov: " 71985Scth "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count); 72085Scth } 72185Scth 72285Scth if (sp->cmd_count < (sizeof (header) + sizeof (page1))) { 72385Scth cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_err_recov: " 72485Scth "size %d required\n", 72585Scth emul64_name, (int)(sizeof (header) + sizeof (page1))); 72685Scth return (EIO); 72785Scth } 72885Scth 72985Scth (void) bzero(&header, sizeof (header)); 73085Scth (void) bzero(&page1, sizeof (page1)); 73185Scth 73285Scth header.length = sizeof (header) + sizeof (page1) - 1; 73385Scth header.bdesc_length = 0; 73485Scth 73585Scth page1.mode_page.code = DAD_MODE_ERR_RECOV; 73685Scth page1.mode_page.ps = 1; 73785Scth page1.mode_page.length = sizeof (page1) - sizeof (struct mode_page); 73885Scth 73985Scth switch (page_control) { 74085Scth case MODE_SENSE_PC_CURRENT: 74185Scth case MODE_SENSE_PC_DEFAULT: 74285Scth case MODE_SENSE_PC_SAVED: 74385Scth break; 74485Scth case MODE_SENSE_PC_CHANGEABLE: 74585Scth break; 74685Scth } 74785Scth 74885Scth (void) bcopy(&header, addr, sizeof (header)); 74985Scth (void) bcopy(&page1, addr + sizeof (header), sizeof (page1)); 75085Scth 75185Scth pkt->pkt_resid = sp->cmd_count - sizeof (page1) - sizeof (header); 75285Scth rval = 0; 75385Scth 75485Scth return (rval); 75585Scth } 75685Scth 75785Scth static int 75885Scth bsd_mode_sense_modepage_disco_reco(struct scsi_pkt *pkt) 75985Scth { 76085Scth struct emul64_cmd *sp = PKT2CMD(pkt); 76185Scth union scsi_cdb *cdb = (union scsi_cdb *)pkt->pkt_cdbp; 76285Scth int rval = 0; 76385Scth uchar_t *addr = (uchar_t *)sp->cmd_addr; 76485Scth int page_control; 76585Scth struct mode_header header; 76685Scth struct mode_disco_reco page2; 76785Scth 76885Scth page_control = (cdb->cdb_opaque[2] >> 6) & 0x03; 76985Scth 77085Scth if (emul64debug) { 77185Scth cmn_err(CE_CONT, "%s: bsd_mode_sense_modepage_disco_reco: " 77285Scth "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count); 77385Scth } 77485Scth 77585Scth if (sp->cmd_count < (sizeof (header) + sizeof (page2))) { 77685Scth cmn_err(CE_CONT, "%s: bsd_mode_sense_modepage_disco_reco: " 77785Scth "size %d required\n", 77885Scth emul64_name, (int)(sizeof (header) + sizeof (page2))); 77985Scth return (EIO); 78085Scth } 78185Scth 78285Scth (void) bzero(&header, sizeof (header)); 78385Scth (void) bzero(&page2, sizeof (page2)); 78485Scth 78585Scth header.length = sizeof (header) + sizeof (page2) - 1; 78685Scth header.bdesc_length = 0; 78785Scth 78885Scth page2.mode_page.code = MODEPAGE_DISCO_RECO; 78985Scth page2.mode_page.ps = 1; 79085Scth page2.mode_page.length = sizeof (page2) - sizeof (struct mode_page); 79185Scth 79285Scth switch (page_control) { 79385Scth case MODE_SENSE_PC_CURRENT: 79485Scth case MODE_SENSE_PC_DEFAULT: 79585Scth case MODE_SENSE_PC_SAVED: 79685Scth break; 79785Scth case MODE_SENSE_PC_CHANGEABLE: 79885Scth break; 79985Scth } 80085Scth 80185Scth (void) bcopy(&header, addr, sizeof (header)); 80285Scth (void) bcopy(&page2, addr + sizeof (header), sizeof (page2)); 80385Scth 80485Scth pkt->pkt_resid = sp->cmd_count - sizeof (page2) - sizeof (header); 80585Scth rval = 0; 80685Scth 80785Scth return (rval); 80885Scth } 80985Scth 81085Scth static int 81185Scth bsd_mode_sense_dad_mode_format(struct scsi_pkt *pkt) 81285Scth { 81385Scth struct emul64_cmd *sp = PKT2CMD(pkt); 81485Scth union scsi_cdb *cdb = (union scsi_cdb *)pkt->pkt_cdbp; 81585Scth uchar_t *addr = (uchar_t *)sp->cmd_addr; 81685Scth emul64_tgt_t *tgt; 81785Scth int page_control; 81885Scth struct mode_header header; 81985Scth struct mode_format page3; 82085Scth int rval = 0; 82185Scth 82285Scth page_control = (cdb->cdb_opaque[2] >> 6) & 0x03; 82385Scth 82485Scth if (emul64debug) { 82585Scth cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_format: " 82685Scth "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count); 82785Scth } 82885Scth 82985Scth if (sp->cmd_count < (sizeof (header) + sizeof (page3))) { 83085Scth cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_format: " 83185Scth "size %d required\n", 83285Scth emul64_name, (int)(sizeof (header) + sizeof (page3))); 83385Scth return (EIO); 83485Scth } 83585Scth 83685Scth (void) bzero(&header, sizeof (header)); 83785Scth (void) bzero(&page3, sizeof (page3)); 83885Scth 83985Scth header.length = sizeof (header) + sizeof (page3) - 1; 84085Scth header.bdesc_length = 0; 84185Scth 84285Scth page3.mode_page.code = DAD_MODE_FORMAT; 84385Scth page3.mode_page.ps = 1; 84485Scth page3.mode_page.length = sizeof (page3) - sizeof (struct mode_page); 84585Scth 84685Scth switch (page_control) { 84785Scth case MODE_SENSE_PC_CURRENT: 84885Scth case MODE_SENSE_PC_DEFAULT: 84985Scth case MODE_SENSE_PC_SAVED: 85085Scth page3.data_bytes_sect = ushort_to_scsi_ushort(DEV_BSIZE); 85185Scth page3.interleave = ushort_to_scsi_ushort(1); 85285Scth EMUL64_MUTEX_ENTER(sp->cmd_emul64); 85385Scth tgt = find_tgt(sp->cmd_emul64, 85485Scth pkt->pkt_address.a_target, pkt->pkt_address.a_lun); 85585Scth EMUL64_MUTEX_EXIT(sp->cmd_emul64); 85685Scth page3.sect_track = ushort_to_scsi_ushort(tgt->emul64_tgt_nsect); 85785Scth break; 85885Scth case MODE_SENSE_PC_CHANGEABLE: 85985Scth break; 86085Scth } 86185Scth 86285Scth (void) bcopy(&header, addr, sizeof (header)); 86385Scth (void) bcopy(&page3, addr + sizeof (header), sizeof (page3)); 86485Scth 86585Scth pkt->pkt_resid = sp->cmd_count - sizeof (page3) - sizeof (header); 86685Scth rval = 0; 86785Scth 86885Scth return (rval); 86985Scth } 87085Scth 87185Scth static int 87285Scth bsd_mode_sense_dad_mode_cache(struct scsi_pkt *pkt) 87385Scth { 87485Scth struct emul64_cmd *sp = PKT2CMD(pkt); 87585Scth union scsi_cdb *cdb = (union scsi_cdb *)pkt->pkt_cdbp; 87685Scth uchar_t *addr = (uchar_t *)sp->cmd_addr; 87785Scth int page_control; 87885Scth struct mode_header header; 87985Scth struct mode_cache page8; 88085Scth int rval = 0; 88185Scth 88285Scth page_control = (cdb->cdb_opaque[2] >> 6) & 0x03; 88385Scth 88485Scth if (emul64debug) { 88585Scth cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_cache: " 88685Scth "pc=%d n=%d\n", emul64_name, page_control, sp->cmd_count); 88785Scth } 88885Scth 88985Scth if (sp->cmd_count < (sizeof (header) + sizeof (page8))) { 89085Scth cmn_err(CE_CONT, "%s: bsd_mode_sense_dad_mode_cache: " 89185Scth "size %d required\n", 89285Scth emul64_name, (int)(sizeof (header) + sizeof (page8))); 89385Scth return (EIO); 89485Scth } 89585Scth 89685Scth (void) bzero(&header, sizeof (header)); 89785Scth (void) bzero(&page8, sizeof (page8)); 89885Scth 89985Scth header.length = sizeof (header) + sizeof (page8) - 1; 90085Scth header.bdesc_length = 0; 90185Scth 90285Scth page8.mode_page.code = DAD_MODE_CACHE; 90385Scth page8.mode_page.ps = 1; 90485Scth page8.mode_page.length = sizeof (page8) - sizeof (struct mode_page); 90585Scth 90685Scth switch (page_control) { 90785Scth case MODE_SENSE_PC_CURRENT: 90885Scth case MODE_SENSE_PC_DEFAULT: 90985Scth case MODE_SENSE_PC_SAVED: 91085Scth break; 91185Scth case MODE_SENSE_PC_CHANGEABLE: 91285Scth break; 91385Scth } 91485Scth 91585Scth (void) bcopy(&header, addr, sizeof (header)); 91685Scth (void) bcopy(&page8, addr + sizeof (header), sizeof (page8)); 91785Scth 91885Scth pkt->pkt_resid = sp->cmd_count - sizeof (page8) - sizeof (header); 91985Scth rval = 0; 92085Scth 92185Scth return (rval); 92285Scth } 92385Scth 92485Scth /* ARGSUSED 0 */ 92585Scth int 92685Scth bsd_scsi_mode_select(struct scsi_pkt *pkt) 92785Scth { 92885Scth return (0); 92985Scth } 93085Scth 93185Scth int 93285Scth bsd_scsi_read_capacity_8(struct scsi_pkt *pkt) 93385Scth { 93485Scth struct emul64_cmd *sp = PKT2CMD(pkt); 93585Scth emul64_tgt_t *tgt; 93685Scth struct scsi_capacity cap; 93785Scth int rval = 0; 93885Scth 93985Scth EMUL64_MUTEX_ENTER(sp->cmd_emul64); 94085Scth tgt = find_tgt(sp->cmd_emul64, 94185Scth pkt->pkt_address.a_target, pkt->pkt_address.a_lun); 94285Scth EMUL64_MUTEX_EXIT(sp->cmd_emul64); 94385Scth if (tgt->emul64_tgt_sectors > 0xffffffff) 94485Scth cap.capacity = 0xffffffff; 94585Scth else 94685Scth cap.capacity = 94785Scth uint32_to_scsi_uint32(tgt->emul64_tgt_sectors); 94885Scth cap.lbasize = uint32_to_scsi_uint32((uint_t)DEV_BSIZE); 94985Scth 95085Scth pkt->pkt_resid = sp->cmd_count - sizeof (struct scsi_capacity); 95185Scth 95285Scth (void) bcopy(&cap, (caddr_t)sp->cmd_addr, 95385Scth sizeof (struct scsi_capacity)); 95485Scth return (rval); 95585Scth } 95685Scth 95785Scth int 95885Scth bsd_scsi_read_capacity_16(struct scsi_pkt *pkt) 95985Scth { 96085Scth struct emul64_cmd *sp = PKT2CMD(pkt); 96185Scth emul64_tgt_t *tgt; 96285Scth struct scsi_capacity_16 cap; 96385Scth int rval = 0; 96485Scth 96585Scth EMUL64_MUTEX_ENTER(sp->cmd_emul64); 96685Scth tgt = find_tgt(sp->cmd_emul64, 96785Scth pkt->pkt_address.a_target, pkt->pkt_address.a_lun); 96885Scth EMUL64_MUTEX_EXIT(sp->cmd_emul64); 96985Scth 97085Scth cap.sc_capacity = uint64_to_scsi_uint64(tgt->emul64_tgt_sectors); 97185Scth cap.sc_lbasize = uint32_to_scsi_uint32((uint_t)DEV_BSIZE); 97285Scth cap.sc_rto_en = 0; 97385Scth cap.sc_prot_en = 0; 97485Scth cap.sc_rsvd0 = 0; 97585Scth bzero(&cap.sc_rsvd1[0], sizeof (cap.sc_rsvd1)); 97685Scth 97785Scth pkt->pkt_resid = sp->cmd_count - sizeof (struct scsi_capacity_16); 97885Scth 97985Scth (void) bcopy(&cap, (caddr_t)sp->cmd_addr, 98085Scth sizeof (struct scsi_capacity_16)); 98185Scth return (rval); 98285Scth } 98385Scth int 98485Scth bsd_scsi_read_capacity(struct scsi_pkt *pkt) 98585Scth { 98685Scth return (bsd_scsi_read_capacity_8(pkt)); 98785Scth } 98885Scth 98985Scth 99085Scth /* ARGSUSED 0 */ 99185Scth int 99285Scth bsd_scsi_reserve(struct scsi_pkt *pkt) 99385Scth { 99485Scth return (0); 99585Scth } 99685Scth 99785Scth /* ARGSUSED 0 */ 99885Scth int 99985Scth bsd_scsi_release(struct scsi_pkt *pkt) 100085Scth { 100185Scth return (0); 100285Scth } 100385Scth 100485Scth 100585Scth int 100685Scth bsd_scsi_read_defect_list(struct scsi_pkt *pkt) 100785Scth { 100885Scth pkt->pkt_resid = 0; 100985Scth return (0); 101085Scth } 101185Scth 101285Scth 101385Scth /* ARGSUSED 0 */ 101485Scth int 101585Scth bsd_scsi_reassign_block(struct scsi_pkt *pkt) 101685Scth { 101785Scth return (0); 101885Scth } 101985Scth 102085Scth 102185Scth static int 102285Scth bsd_readblks(struct emul64 *emul64, ushort_t a_target, ushort_t a_lun, 102385Scth diskaddr_t blkno, int nblks, unsigned char *bufaddr) 102485Scth { 102585Scth emul64_tgt_t *tgt; 102685Scth blklist_t *blk; 102785Scth emul64_rng_overlap_t overlap; 102885Scth int i = 0; 102985Scth 103085Scth if (emul64debug) { 103185Scth cmn_err(CE_CONT, "%s: bsd_readblks: " 103285Scth "<%d,%d> blk %llu (0x%llx) nblks %d\n", 103385Scth emul64_name, a_target, a_lun, blkno, blkno, nblks); 103485Scth } 103585Scth 103685Scth emul64_yield_check(); 103785Scth 103885Scth EMUL64_MUTEX_ENTER(emul64); 103985Scth tgt = find_tgt(emul64, a_target, a_lun); 104085Scth EMUL64_MUTEX_EXIT(emul64); 104185Scth if (tgt == NULL) { 104285Scth cmn_err(CE_WARN, "%s: bsd_readblks: no target for %d,%d\n", 104385Scth emul64_name, a_target, a_lun); 104485Scth goto unlocked_out; 104585Scth } 104685Scth 104785Scth if (emul64_collect_stats) { 104885Scth mutex_enter(&emul64_stats_mutex); 104985Scth emul64_io_ops++; 105085Scth emul64_io_blocks += nblks; 105185Scth mutex_exit(&emul64_stats_mutex); 105285Scth } 105385Scth mutex_enter(&tgt->emul64_tgt_blk_lock); 105485Scth 105585Scth /* 105685Scth * Keep the ioctls from changing the nowrite list for the duration 105785Scth * of this I/O by grabbing emul64_tgt_nw_lock. This will keep the 105885Scth * results from our call to bsd_tgt_overlap from changing while we 105985Scth * do the I/O. 106085Scth */ 106185Scth rw_enter(&tgt->emul64_tgt_nw_lock, RW_READER); 106285Scth 106385Scth overlap = bsd_tgt_overlap(tgt, blkno, nblks); 106485Scth switch (overlap) { 106585Scth case O_SAME: 106685Scth case O_SUBSET: 106785Scth case O_OVERLAP: 106885Scth cmn_err(CE_WARN, "%s: bsd_readblks: " 106985Scth "read to blocked area %lld,%d\n", 107085Scth emul64_name, blkno, nblks); 107185Scth rw_exit(&tgt->emul64_tgt_nw_lock); 107285Scth goto errout; 107385Scth case O_NONE: 107485Scth break; 107585Scth } 107685Scth for (i = 0; i < nblks; i++) { 107785Scth if (emul64_debug_blklist) 107885Scth cmn_err(CE_CONT, "%s: bsd_readblks: " 107985Scth "%d of %d: blkno %lld\n", 108085Scth emul64_name, i+1, nblks, blkno); 108185Scth if (blkno > tgt->emul64_tgt_sectors) 108285Scth break; 108385Scth blk = bsd_findblk(tgt, blkno, NULL); 108485Scth if (blk) { 108585Scth (void) bcopy(blk->bl_data, bufaddr, DEV_BSIZE); 108685Scth } else { 108785Scth (void) bzero(bufaddr, DEV_BSIZE); 108885Scth } 108985Scth blkno++; 109085Scth bufaddr += DEV_BSIZE; 109185Scth } 109285Scth rw_exit(&tgt->emul64_tgt_nw_lock); 109385Scth 109485Scth errout: 109585Scth mutex_exit(&tgt->emul64_tgt_blk_lock); 109685Scth 109785Scth unlocked_out: 109885Scth return ((nblks - i) * DEV_BSIZE); 109985Scth } 110085Scth 110185Scth 110285Scth static int 110385Scth bsd_writeblks(struct emul64 *emul64, ushort_t a_target, ushort_t a_lun, 110485Scth diskaddr_t blkno, int nblks, unsigned char *bufaddr) 110585Scth { 110685Scth emul64_tgt_t *tgt; 110785Scth blklist_t *blk; 110885Scth emul64_rng_overlap_t overlap; 110985Scth avl_index_t where; 111085Scth int i = 0; 111185Scth 111285Scth if (emul64debug) { 111385Scth cmn_err(CE_CONT, "%s: bsd_writeblks: " 111485Scth "<%d,%d> blk %llu (0x%llx) nblks %d\n", 111585Scth emul64_name, a_target, a_lun, blkno, blkno, nblks); 111685Scth } 111785Scth 111885Scth emul64_yield_check(); 111985Scth 112085Scth EMUL64_MUTEX_ENTER(emul64); 112185Scth tgt = find_tgt(emul64, a_target, a_lun); 112285Scth EMUL64_MUTEX_EXIT(emul64); 112385Scth if (tgt == NULL) { 112485Scth cmn_err(CE_WARN, "%s: bsd_writeblks: no target for %d,%d\n", 112585Scth emul64_name, a_target, a_lun); 112685Scth goto unlocked_out; 112785Scth } 112885Scth 112985Scth if (emul64_collect_stats) { 113085Scth mutex_enter(&emul64_stats_mutex); 113185Scth emul64_io_ops++; 113285Scth emul64_io_blocks += nblks; 113385Scth mutex_exit(&emul64_stats_mutex); 113485Scth } 113585Scth mutex_enter(&tgt->emul64_tgt_blk_lock); 113685Scth 113785Scth /* 113885Scth * Keep the ioctls from changing the nowrite list for the duration 113985Scth * of this I/O by grabbing emul64_tgt_nw_lock. This will keep the 114085Scth * results from our call to bsd_tgt_overlap from changing while we 114185Scth * do the I/O. 114285Scth */ 114385Scth rw_enter(&tgt->emul64_tgt_nw_lock, RW_READER); 114485Scth overlap = bsd_tgt_overlap(tgt, blkno, nblks); 114585Scth switch (overlap) { 114685Scth case O_SAME: 114785Scth case O_SUBSET: 114885Scth if (emul64_collect_stats) { 114985Scth mutex_enter(&emul64_stats_mutex); 115085Scth emul64_skipped_io++; 115185Scth emul64_skipped_blk += nblks; 115285Scth mutex_exit(&emul64_stats_mutex); 115385Scth } 115485Scth rw_exit(&tgt->emul64_tgt_nw_lock); 115585Scth mutex_exit(&tgt->emul64_tgt_blk_lock); 115685Scth return (0); 115785Scth case O_OVERLAP: 115885Scth case O_NONE: 115985Scth break; 116085Scth } 116185Scth for (i = 0; i < nblks; i++) { 116285Scth if ((overlap == O_NONE) || 116385Scth (bsd_tgt_overlap(tgt, blkno, 1) == O_NONE)) { 116485Scth /* 116585Scth * If there was no overlap for the entire I/O range 116685Scth * or if there is no overlap for this particular 116785Scth * block, then we need to do the write. 116885Scth */ 116985Scth if (emul64_debug_blklist) 117085Scth cmn_err(CE_CONT, "%s: bsd_writeblks: " 117185Scth "%d of %d: blkno %lld\n", 117285Scth emul64_name, i+1, nblks, blkno); 117385Scth if (blkno > tgt->emul64_tgt_sectors) { 117485Scth cmn_err(CE_WARN, "%s: bsd_writeblks: " 117585Scth "blkno %lld, tgt_sectors %lld\n", 117685Scth emul64_name, blkno, 117785Scth tgt->emul64_tgt_sectors); 117885Scth break; 117985Scth } 118085Scth 118185Scth blk = bsd_findblk(tgt, blkno, &where); 118285Scth if (bcmp(bufaddr, emul64_zeros, DEV_BSIZE) == 0) { 118385Scth if (blk) { 118485Scth bsd_freeblk(tgt, blk); 118585Scth } 118685Scth } else { 118785Scth if (blk) { 118885Scth (void) bcopy(bufaddr, blk->bl_data, 118985Scth DEV_BSIZE); 119085Scth } else { 119185Scth bsd_allocblk(tgt, 119285Scth blkno, 119385Scth (caddr_t)bufaddr, 119485Scth where); 119585Scth } 119685Scth } 119785Scth } 119885Scth blkno++; 119985Scth bufaddr += DEV_BSIZE; 120085Scth } 120185Scth 120285Scth /* 120385Scth * Now that we're done with our I/O, allow the ioctls to change the 120485Scth * nowrite list. 120585Scth */ 120685Scth rw_exit(&tgt->emul64_tgt_nw_lock); 120785Scth 120885Scth errout: 120985Scth mutex_exit(&tgt->emul64_tgt_blk_lock); 121085Scth 121185Scth unlocked_out: 121285Scth return ((nblks - i) * DEV_BSIZE); 121385Scth } 121485Scth 121585Scth emul64_tgt_t * 121685Scth find_tgt(struct emul64 *emul64, ushort_t a_target, ushort_t a_lun) 121785Scth { 121885Scth emul64_tgt_t *tgt; 121985Scth 122085Scth tgt = emul64->emul64_tgt; 122185Scth while (tgt) { 122285Scth if (tgt->emul64_tgt_saddr.a_target == a_target && 122385Scth tgt->emul64_tgt_saddr.a_lun == a_lun) { 122485Scth break; 122585Scth } 122685Scth tgt = tgt->emul64_tgt_next; 122785Scth } 122885Scth return (tgt); 122985Scth 123085Scth } 123185Scth 123285Scth /* 123385Scth * Free all blocks that are part of the specified range. 123485Scth */ 123585Scth int 123685Scth bsd_freeblkrange(emul64_tgt_t *tgt, emul64_range_t *range) 123785Scth { 123885Scth blklist_t *blk; 123985Scth blklist_t *nextblk; 124085Scth 124185Scth ASSERT(mutex_owned(&tgt->emul64_tgt_blk_lock)); 124285Scth for (blk = (blklist_t *)avl_first(&tgt->emul64_tgt_data); 124385Scth blk != NULL; 124485Scth blk = nextblk) { 124585Scth /* 124685Scth * We need to get the next block pointer now, because blk 124785Scth * will be freed inside the if statement. 124885Scth */ 124985Scth nextblk = AVL_NEXT(&tgt->emul64_tgt_data, blk); 125085Scth 125185Scth if (emul64_overlap(range, blk->bl_blkno, (size_t)1) != O_NONE) { 125285Scth bsd_freeblk(tgt, blk); 125385Scth } 125485Scth } 125585Scth return (0); 125685Scth } 125785Scth 125885Scth static blklist_t * 125985Scth bsd_findblk(emul64_tgt_t *tgt, diskaddr_t blkno, avl_index_t *where) 126085Scth { 126185Scth blklist_t *blk; 126285Scth blklist_t search; 126385Scth 126485Scth ASSERT(mutex_owned(&tgt->emul64_tgt_blk_lock)); 126585Scth 126685Scth search.bl_blkno = blkno; 126785Scth blk = (blklist_t *)avl_find(&tgt->emul64_tgt_data, &search, where); 126885Scth return (blk); 126985Scth } 127085Scth 127185Scth 127285Scth static void 127385Scth bsd_allocblk(emul64_tgt_t *tgt, 127485Scth diskaddr_t blkno, 127585Scth caddr_t data, 127685Scth avl_index_t where) 127785Scth { 127885Scth blklist_t *blk; 127985Scth 128085Scth if (emul64_debug_blklist) 128185Scth cmn_err(CE_CONT, "%s: bsd_allocblk: %llu\n", 128285Scth emul64_name, blkno); 128385Scth 128485Scth ASSERT(mutex_owned(&tgt->emul64_tgt_blk_lock)); 128585Scth 128685Scth blk = (blklist_t *)kmem_zalloc(sizeof (blklist_t), KM_SLEEP); 128785Scth blk->bl_data = (uchar_t *)kmem_zalloc(DEV_BSIZE, KM_SLEEP); 128885Scth blk->bl_blkno = blkno; 128985Scth (void) bcopy(data, blk->bl_data, DEV_BSIZE); 129085Scth avl_insert(&tgt->emul64_tgt_data, (void *) blk, where); 129185Scth 129285Scth if (emul64_collect_stats) { 129385Scth mutex_enter(&emul64_stats_mutex); 129485Scth emul64_nonzero++; 129585Scth tgt->emul64_list_length++; 129685Scth if (tgt->emul64_list_length > emul64_max_list_length) { 129785Scth emul64_max_list_length = tgt->emul64_list_length; 129885Scth } 129985Scth mutex_exit(&emul64_stats_mutex); 130085Scth } 130185Scth } 130285Scth 130385Scth static void 130485Scth bsd_freeblk(emul64_tgt_t *tgt, blklist_t *blk) 130585Scth { 130685Scth if (emul64_debug_blklist) 130785Scth cmn_err(CE_CONT, "%s: bsd_freeblk: <%d,%d> blk=%lld\n", 130885Scth emul64_name, tgt->emul64_tgt_saddr.a_target, 130985Scth tgt->emul64_tgt_saddr.a_lun, blk->bl_blkno); 131085Scth 131185Scth ASSERT(mutex_owned(&tgt->emul64_tgt_blk_lock)); 131285Scth 131385Scth avl_remove(&tgt->emul64_tgt_data, (void *) blk); 131485Scth if (emul64_collect_stats) { 131585Scth mutex_enter(&emul64_stats_mutex); 131685Scth emul64_nonzero--; 131785Scth tgt->emul64_list_length--; 131885Scth mutex_exit(&emul64_stats_mutex); 131985Scth } 132085Scth kmem_free(blk->bl_data, DEV_BSIZE); 132185Scth kmem_free(blk, sizeof (blklist_t)); 132285Scth } 132385Scth 132485Scth /* 132585Scth * Look for overlap between a nowrite range and a block range. 132685Scth * 132785Scth * NOTE: Callers of this function must hold the tgt->emul64_tgt_nw_lock 132885Scth * lock. For the purposes of this function, a reader lock is 132985Scth * sufficient. 133085Scth */ 133185Scth static emul64_rng_overlap_t 133285Scth bsd_tgt_overlap(emul64_tgt_t *tgt, diskaddr_t blkno, int count) 133385Scth { 133485Scth emul64_nowrite_t *nw; 133585Scth emul64_rng_overlap_t rv = O_NONE; 133685Scth 133785Scth for (nw = tgt->emul64_tgt_nowrite; 133885Scth (nw != NULL) && (rv == O_NONE); 133985Scth nw = nw->emul64_nwnext) { 134085Scth rv = emul64_overlap(&nw->emul64_blocked, 134185Scth blkno, 134285Scth (size_t)count); 134385Scth } 134485Scth return (rv); 134585Scth } 134685Scth 134785Scth /* 134885Scth * Operations that do a lot of I/O, such as RAID 5 initializations, result 134985Scth * in a CPU bound kernel when the device is an emul64 device. This makes 135085Scth * the machine look hung. To avoid this problem, give up the CPU from time 135185Scth * to time. 135285Scth */ 135385Scth 135485Scth static void 135585Scth emul64_yield_check() 135685Scth { 135785Scth static uint_t emul64_io_count = 0; /* # I/Os since last wait */ 135885Scth static uint_t emul64_waiting = FALSE; /* TRUE -> a thread is in */ 135985Scth /* cv_timed wait. */ 136085Scth clock_t ticks; 136185Scth 136285Scth if (emul64_yield_enable == 0) 136385Scth return; 136485Scth 136585Scth mutex_enter(&emul64_yield_mutex); 136685Scth 136785Scth if (emul64_waiting == TRUE) { 136885Scth /* 136985Scth * Another thread has already started the timer. We'll 137085Scth * just wait here until their time expires, and they 137185Scth * broadcast to us. When they do that, we'll return and 137285Scth * let our caller do more I/O. 137385Scth */ 137485Scth cv_wait(&emul64_yield_cv, &emul64_yield_mutex); 137585Scth } else if (emul64_io_count++ > emul64_yield_period) { 137685Scth /* 137785Scth * Set emul64_waiting to let other threads know that we 137885Scth * have started the timer. 137985Scth */ 138085Scth emul64_waiting = TRUE; 138185Scth emul64_num_delay_called++; 138285Scth ticks = drv_usectohz(emul64_yield_length); 138385Scth if (ticks == 0) 138485Scth ticks = 1; 138585Scth (void) cv_timedwait(&emul64_yield_cv, 138685Scth &emul64_yield_mutex, ddi_get_lbolt() + ticks); 138785Scth emul64_io_count = 0; 138885Scth emul64_waiting = FALSE; 138985Scth 139085Scth /* Broadcast in case others are waiting. */ 139185Scth cv_broadcast(&emul64_yield_cv); 139285Scth } 139385Scth 139485Scth mutex_exit(&emul64_yield_mutex); 139585Scth } 1396