1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /*
26 * Multiplexed I/O SCSI vHCI implementation
27 */
28
29 #include <sys/conf.h>
30 #include <sys/file.h>
31 #include <sys/ddi.h>
32 #include <sys/sunddi.h>
33 #include <sys/scsi/scsi.h>
34 #include <sys/scsi/impl/scsi_reset_notify.h>
35 #include <sys/scsi/impl/services.h>
36 #include <sys/sunmdi.h>
37 #include <sys/mdi_impldefs.h>
38 #include <sys/scsi/adapters/scsi_vhci.h>
39 #include <sys/disp.h>
40 #include <sys/byteorder.h>
41
42 extern uintptr_t scsi_callback_id;
43 extern ddi_dma_attr_t scsi_alloc_attr;
44
45 #ifdef DEBUG
46 int vhci_debug = VHCI_DEBUG_DEFAULT_VAL;
47 #endif
48
49 /* retry for the vhci_do_prout command when a not ready is returned */
50 int vhci_prout_not_ready_retry = 180;
51
52 /*
53 * These values are defined to support the internal retry of
54 * SCSI packets for better sense code handling.
55 */
56 #define VHCI_CMD_CMPLT 0
57 #define VHCI_CMD_RETRY 1
58 #define VHCI_CMD_ERROR -1
59
60 #define PROPFLAGS (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)
61 #define VHCI_SCSI_PERR 0x47
62 #define VHCI_PGR_ILLEGALOP -2
63 #define VHCI_NUM_UPDATE_TASKQ 8
64 /* changed to 132 to accomodate HDS */
65
66 /*
67 * Version Macros
68 */
69 #define VHCI_NAME_VERSION "SCSI VHCI Driver"
70 char vhci_version_name[] = VHCI_NAME_VERSION;
71
72 int vhci_first_time = 0;
73 clock_t vhci_to_ticks = 0;
74 int vhci_init_wait_timeout = VHCI_INIT_WAIT_TIMEOUT;
75 kcondvar_t vhci_cv;
76 kmutex_t vhci_global_mutex;
77 void *vhci_softstate = NULL; /* for soft state */
78
79 /*
80 * Flag to delay the retry of the reserve command
81 */
82 int vhci_reserve_delay = 100000;
83 static int vhci_path_quiesce_timeout = 60;
84 static uchar_t zero_key[MHIOC_RESV_KEY_SIZE];
85
86 /* uscsi delay for a TRAN_BUSY */
87 static int vhci_uscsi_delay = 100000;
88 static int vhci_uscsi_retry_count = 180;
89 /* uscsi_restart_sense timeout id in case it needs to get canceled */
90 static timeout_id_t vhci_restart_timeid = 0;
91
92 static int vhci_bus_config_debug = 0;
93
94 /*
95 * Bidirectional map of 'target-port' to port id <pid> for support of
96 * iostat(1M) '-Xx' and '-Yx' output.
97 */
98 static kmutex_t vhci_targetmap_mutex;
99 static uint_t vhci_targetmap_pid = 1;
100 static mod_hash_t *vhci_targetmap_bypid; /* <pid> -> 'target-port' */
101 static mod_hash_t *vhci_targetmap_byport; /* 'target-port' -> <pid> */
102
103 /*
104 * functions exported by scsi_vhci struct cb_ops
105 */
106 static int vhci_open(dev_t *, int, int, cred_t *);
107 static int vhci_close(dev_t, int, int, cred_t *);
108 static int vhci_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
109
110 /*
111 * functions exported by scsi_vhci struct dev_ops
112 */
113 static int vhci_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
114 static int vhci_attach(dev_info_t *, ddi_attach_cmd_t);
115 static int vhci_detach(dev_info_t *, ddi_detach_cmd_t);
116
117 /*
118 * functions exported by scsi_vhci scsi_hba_tran_t transport table
119 */
120 static int vhci_scsi_tgt_init(dev_info_t *, dev_info_t *,
121 scsi_hba_tran_t *, struct scsi_device *);
122 static void vhci_scsi_tgt_free(dev_info_t *, dev_info_t *, scsi_hba_tran_t *,
123 struct scsi_device *);
124 static int vhci_pgr_register_start(scsi_vhci_lun_t *, struct scsi_pkt *);
125 static int vhci_scsi_start(struct scsi_address *, struct scsi_pkt *);
126 static int vhci_scsi_abort(struct scsi_address *, struct scsi_pkt *);
127 static int vhci_scsi_reset(struct scsi_address *, int);
128 static int vhci_scsi_reset_target(struct scsi_address *, int level,
129 uint8_t select_path);
130 static int vhci_scsi_reset_bus(struct scsi_address *);
131 static int vhci_scsi_getcap(struct scsi_address *, char *, int);
132 static int vhci_scsi_setcap(struct scsi_address *, char *, int, int);
133 static int vhci_commoncap(struct scsi_address *, char *, int, int, int);
134 static int vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom,
135 mdi_pathinfo_t *pip);
136 static struct scsi_pkt *vhci_scsi_init_pkt(struct scsi_address *,
137 struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t);
138 static void vhci_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *);
139 static void vhci_scsi_dmafree(struct scsi_address *, struct scsi_pkt *);
140 static void vhci_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *);
141 static int vhci_scsi_reset_notify(struct scsi_address *, int, void (*)(caddr_t),
142 caddr_t);
143 static int vhci_scsi_get_bus_addr(struct scsi_device *, char *, int);
144 static int vhci_scsi_get_name(struct scsi_device *, char *, int);
145 static int vhci_scsi_bus_power(dev_info_t *, void *, pm_bus_power_op_t,
146 void *, void *);
147 static int vhci_scsi_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t,
148 void *, dev_info_t **);
149 static int vhci_scsi_bus_unconfig(dev_info_t *, uint_t, ddi_bus_config_op_t,
150 void *);
151 static struct scsi_failover_ops *vhci_dev_fo(dev_info_t *, struct scsi_device *,
152 void **, char **);
153
154 /*
155 * functions registered with the mpxio framework via mdi_vhci_ops_t
156 */
157 static int vhci_pathinfo_init(dev_info_t *, mdi_pathinfo_t *, int);
158 static int vhci_pathinfo_uninit(dev_info_t *, mdi_pathinfo_t *, int);
159 static int vhci_pathinfo_state_change(dev_info_t *, mdi_pathinfo_t *,
160 mdi_pathinfo_state_t, uint32_t, int);
161 static int vhci_pathinfo_online(dev_info_t *, mdi_pathinfo_t *, int);
162 static int vhci_pathinfo_offline(dev_info_t *, mdi_pathinfo_t *, int);
163 static int vhci_failover(dev_info_t *, dev_info_t *, int);
164 static void vhci_client_attached(dev_info_t *);
165 static int vhci_is_dev_supported(dev_info_t *, dev_info_t *, void *);
166
167 static int vhci_ctl(dev_t, int, intptr_t, int, cred_t *, int *);
168 static int vhci_devctl(dev_t, int, intptr_t, int, cred_t *, int *);
169 static int vhci_ioc_get_phci_path(sv_iocdata_t *, caddr_t, int, caddr_t);
170 static int vhci_ioc_get_client_path(sv_iocdata_t *, caddr_t, int, caddr_t);
171 static int vhci_ioc_get_paddr(sv_iocdata_t *, caddr_t, int, caddr_t);
172 static int vhci_ioc_send_client_path(caddr_t, sv_iocdata_t *, int, caddr_t);
173 static void vhci_ioc_devi_to_path(dev_info_t *, caddr_t);
174 static int vhci_get_phci_path_list(dev_info_t *, sv_path_info_t *, uint_t);
175 static int vhci_get_client_path_list(dev_info_t *, sv_path_info_t *, uint_t);
176 static int vhci_get_iocdata(const void *, sv_iocdata_t *, int, caddr_t);
177 static int vhci_get_iocswitchdata(const void *, sv_switch_to_cntlr_iocdata_t *,
178 int, caddr_t);
179 static int vhci_ioc_alloc_pathinfo(sv_path_info_t **, sv_path_info_t **,
180 uint_t, sv_iocdata_t *, int, caddr_t);
181 static void vhci_ioc_free_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t);
182 static int vhci_ioc_send_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t,
183 sv_iocdata_t *, int, caddr_t);
184 static int vhci_handle_ext_fo(struct scsi_pkt *, int);
185 static int vhci_efo_watch_cb(caddr_t, struct scsi_watch_result *);
186 static int vhci_quiesce_lun(struct scsi_vhci_lun *);
187 static int vhci_pgr_validate_and_register(scsi_vhci_priv_t *);
188 static void vhci_dispatch_scsi_start(void *);
189 static void vhci_efo_done(void *);
190 static void vhci_initiate_auto_failback(void *);
191 static void vhci_update_pHCI_pkt(struct vhci_pkt *, struct scsi_pkt *);
192 static int vhci_update_pathinfo(struct scsi_device *, mdi_pathinfo_t *,
193 struct scsi_failover_ops *, scsi_vhci_lun_t *, struct scsi_vhci *);
194 static void vhci_kstat_create_pathinfo(mdi_pathinfo_t *);
195 static int vhci_quiesce_paths(dev_info_t *, dev_info_t *,
196 scsi_vhci_lun_t *, char *, char *);
197
198 static char *vhci_devnm_to_guid(char *);
199 static int vhci_bind_transport(struct scsi_address *, struct vhci_pkt *,
200 int, int (*func)(caddr_t));
201 static void vhci_intr(struct scsi_pkt *);
202 static int vhci_do_prout(scsi_vhci_priv_t *);
203 static void vhci_run_cmd(void *);
204 static int vhci_do_prin(struct vhci_pkt **);
205 static struct scsi_pkt *vhci_create_retry_pkt(struct vhci_pkt *);
206 static struct vhci_pkt *vhci_sync_retry_pkt(struct vhci_pkt *);
207 static struct scsi_vhci_lun *vhci_lun_lookup(dev_info_t *);
208 static struct scsi_vhci_lun *vhci_lun_lookup_alloc(dev_info_t *, char *, int *);
209 static void vhci_lun_free(struct scsi_vhci_lun *dvlp, struct scsi_device *sd);
210 static int vhci_recovery_reset(scsi_vhci_lun_t *, struct scsi_address *,
211 uint8_t, uint8_t);
212 void vhci_update_pathstates(void *);
213
214 #ifdef DEBUG
215 static void vhci_print_prin_keys(vhci_prin_readkeys_t *, int);
216 static void vhci_print_cdb(dev_info_t *dip, uint_t level,
217 char *title, uchar_t *cdb);
218 static void vhci_clean_print(dev_info_t *dev, uint_t level,
219 char *title, uchar_t *data, int len);
220 #endif
221 static void vhci_print_prout_keys(scsi_vhci_lun_t *, char *);
222 static void vhci_uscsi_iodone(struct scsi_pkt *pkt);
223 static void vhci_invalidate_mpapi_lu(struct scsi_vhci *, scsi_vhci_lun_t *);
224
225 /*
226 * MP-API related functions
227 */
228 extern int vhci_mpapi_init(struct scsi_vhci *);
229 extern void vhci_mpapi_add_dev_prod(struct scsi_vhci *, char *);
230 extern int vhci_mpapi_ctl(dev_t, int, intptr_t, int, cred_t *, int *);
231 extern void vhci_update_mpapi_data(struct scsi_vhci *,
232 scsi_vhci_lun_t *, mdi_pathinfo_t *);
233 extern void* vhci_get_mpapi_item(struct scsi_vhci *, mpapi_list_header_t *,
234 uint8_t, void*);
235 extern void vhci_mpapi_set_path_state(dev_info_t *, mdi_pathinfo_t *, int);
236 extern int vhci_mpapi_update_tpg_acc_state_for_lu(struct scsi_vhci *,
237 scsi_vhci_lun_t *);
238
239 #define VHCI_DMA_MAX_XFER_CAP INT_MAX
240
241 #define VHCI_MAX_PGR_RETRIES 3
242
243 /*
244 * Macros for the device-type mpxio options
245 */
246 #define LOAD_BALANCE_OPTIONS "load-balance-options"
247 #define LOGICAL_BLOCK_REGION_SIZE "region-size"
248 #define MPXIO_OPTIONS_LIST "device-type-mpxio-options-list"
249 #define DEVICE_TYPE_STR "device-type"
250 #define isdigit(ch) ((ch) >= '0' && (ch) <= '9')
251
252 static struct cb_ops vhci_cb_ops = {
253 vhci_open, /* open */
254 vhci_close, /* close */
255 nodev, /* strategy */
256 nodev, /* print */
257 nodev, /* dump */
258 nodev, /* read */
259 nodev, /* write */
260 vhci_ioctl, /* ioctl */
261 nodev, /* devmap */
262 nodev, /* mmap */
263 nodev, /* segmap */
264 nochpoll, /* chpoll */
265 ddi_prop_op, /* cb_prop_op */
266 0, /* streamtab */
267 D_NEW | D_MP, /* cb_flag */
268 CB_REV, /* rev */
269 nodev, /* aread */
270 nodev /* awrite */
271 };
272
273 static struct dev_ops vhci_ops = {
274 DEVO_REV,
275 0,
276 vhci_getinfo,
277 nulldev, /* identify */
278 nulldev, /* probe */
279 vhci_attach, /* attach and detach are mandatory */
280 vhci_detach,
281 nodev, /* reset */
282 &vhci_cb_ops, /* cb_ops */
283 NULL, /* bus_ops */
284 NULL, /* power */
285 ddi_quiesce_not_needed, /* quiesce */
286 };
287
288 extern struct mod_ops mod_driverops;
289
290 static struct modldrv modldrv = {
291 &mod_driverops,
292 vhci_version_name, /* module name */
293 &vhci_ops
294 };
295
296 static struct modlinkage modlinkage = {
297 MODREV_1,
298 &modldrv,
299 NULL
300 };
301
302 static mdi_vhci_ops_t vhci_opinfo = {
303 MDI_VHCI_OPS_REV,
304 vhci_pathinfo_init, /* Pathinfo node init callback */
305 vhci_pathinfo_uninit, /* Pathinfo uninit callback */
306 vhci_pathinfo_state_change, /* Pathinfo node state change */
307 vhci_failover, /* failover callback */
308 vhci_client_attached, /* client attached callback */
309 vhci_is_dev_supported /* is device supported by mdi */
310 };
311
312 /*
313 * The scsi_failover table defines an ordered set of 'fops' modules supported
314 * by scsi_vhci. Currently, initialize this table from the 'ddi-forceload'
315 * property specified in scsi_vhci.conf.
316 */
317 static struct scsi_failover {
318 ddi_modhandle_t sf_mod;
319 struct scsi_failover_ops *sf_sfo;
320 } *scsi_failover_table;
321 static uint_t scsi_nfailover;
322
323 int
_init(void)324 _init(void)
325 {
326 int rval;
327
328 /*
329 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
330 * before registering with the transport first.
331 */
332 if ((rval = ddi_soft_state_init(&vhci_softstate,
333 sizeof (struct scsi_vhci), 1)) != 0) {
334 VHCI_DEBUG(1, (CE_NOTE, NULL,
335 "!_init:soft state init failed\n"));
336 return (rval);
337 }
338
339 if ((rval = scsi_hba_init(&modlinkage)) != 0) {
340 VHCI_DEBUG(1, (CE_NOTE, NULL,
341 "!_init: scsi hba init failed\n"));
342 ddi_soft_state_fini(&vhci_softstate);
343 return (rval);
344 }
345
346 mutex_init(&vhci_global_mutex, NULL, MUTEX_DRIVER, NULL);
347 cv_init(&vhci_cv, NULL, CV_DRIVER, NULL);
348
349 mutex_init(&vhci_targetmap_mutex, NULL, MUTEX_DRIVER, NULL);
350 vhci_targetmap_byport = mod_hash_create_strhash(
351 "vhci_targetmap_byport", 256, mod_hash_null_valdtor);
352 vhci_targetmap_bypid = mod_hash_create_idhash(
353 "vhci_targetmap_bypid", 256, mod_hash_null_valdtor);
354
355 if ((rval = mod_install(&modlinkage)) != 0) {
356 VHCI_DEBUG(1, (CE_NOTE, NULL, "!_init: mod_install failed\n"));
357 if (vhci_targetmap_bypid)
358 mod_hash_destroy_idhash(vhci_targetmap_bypid);
359 if (vhci_targetmap_byport)
360 mod_hash_destroy_strhash(vhci_targetmap_byport);
361 mutex_destroy(&vhci_targetmap_mutex);
362 cv_destroy(&vhci_cv);
363 mutex_destroy(&vhci_global_mutex);
364 scsi_hba_fini(&modlinkage);
365 ddi_soft_state_fini(&vhci_softstate);
366 }
367 return (rval);
368 }
369
370
371 /*
372 * the system is done with us as a driver, so clean up
373 */
374 int
_fini(void)375 _fini(void)
376 {
377 int rval;
378
379 /*
380 * don't start cleaning up until we know that the module remove
381 * has worked -- if this works, then we know that each instance
382 * has successfully been DDI_DETACHed
383 */
384 if ((rval = mod_remove(&modlinkage)) != 0) {
385 VHCI_DEBUG(4, (CE_NOTE, NULL, "!_fini: mod_remove failed\n"));
386 return (rval);
387 }
388
389 if (vhci_targetmap_bypid)
390 mod_hash_destroy_idhash(vhci_targetmap_bypid);
391 if (vhci_targetmap_byport)
392 mod_hash_destroy_strhash(vhci_targetmap_byport);
393 mutex_destroy(&vhci_targetmap_mutex);
394 cv_destroy(&vhci_cv);
395 mutex_destroy(&vhci_global_mutex);
396 scsi_hba_fini(&modlinkage);
397 ddi_soft_state_fini(&vhci_softstate);
398
399 return (rval);
400 }
401
402 int
_info(struct modinfo * modinfop)403 _info(struct modinfo *modinfop)
404 {
405 return (mod_info(&modlinkage, modinfop));
406 }
407
408 /*
409 * Lookup scsi_failover by "short name" of failover module.
410 */
411 struct scsi_failover_ops *
vhci_failover_ops_by_name(char * name)412 vhci_failover_ops_by_name(char *name)
413 {
414 struct scsi_failover *sf;
415
416 for (sf = scsi_failover_table; sf->sf_mod; sf++) {
417 if (sf->sf_sfo == NULL)
418 continue;
419 if (strcmp(sf->sf_sfo->sfo_name, name) == 0)
420 return (sf->sf_sfo);
421 }
422 return (NULL);
423 }
424
425 /*
426 * Load all scsi_failover_ops 'fops' modules.
427 */
428 static void
vhci_failover_modopen(struct scsi_vhci * vhci)429 vhci_failover_modopen(struct scsi_vhci *vhci)
430 {
431 char **module;
432 int i;
433 struct scsi_failover *sf;
434 char **dt;
435 int e;
436
437 if (scsi_failover_table)
438 return;
439
440 /* Get the list of modules from scsi_vhci.conf */
441 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY,
442 vhci->vhci_dip, DDI_PROP_DONTPASS, "ddi-forceload",
443 &module, &scsi_nfailover) != DDI_PROP_SUCCESS) {
444 cmn_err(CE_WARN, "scsi_vhci: "
445 "scsi_vhci.conf is missing 'ddi-forceload'");
446 return;
447 }
448 if (scsi_nfailover == 0) {
449 cmn_err(CE_WARN, "scsi_vhci: "
450 "scsi_vhci.conf has empty 'ddi-forceload'");
451 ddi_prop_free(module);
452 return;
453 }
454
455 /* allocate failover table based on number of modules */
456 scsi_failover_table = (struct scsi_failover *)
457 kmem_zalloc(sizeof (struct scsi_failover) * (scsi_nfailover + 1),
458 KM_SLEEP);
459
460 /* loop over modules specified in scsi_vhci.conf and open each module */
461 for (i = 0, sf = scsi_failover_table; i < scsi_nfailover; i++) {
462 if (module[i] == NULL)
463 continue;
464
465 sf->sf_mod = ddi_modopen(module[i], KRTLD_MODE_FIRST, &e);
466 if (sf->sf_mod == NULL) {
467 /*
468 * A module returns EEXIST if other software is
469 * supporting the intended function: for example
470 * the scsi_vhci_f_sum_emc module returns EEXIST
471 * from _init if EMC powerpath software is installed.
472 */
473 if (e != EEXIST)
474 cmn_err(CE_WARN, "scsi_vhci: unable to open "
475 "module '%s', error %d", module[i], e);
476 continue;
477 }
478 sf->sf_sfo = ddi_modsym(sf->sf_mod,
479 "scsi_vhci_failover_ops", &e);
480 if (sf->sf_sfo == NULL) {
481 cmn_err(CE_WARN, "scsi_vhci: "
482 "unable to import 'scsi_failover_ops' from '%s', "
483 "error %d", module[i], e);
484 (void) ddi_modclose(sf->sf_mod);
485 sf->sf_mod = NULL;
486 continue;
487 }
488
489 /* register vid/pid of devices supported with mpapi */
490 for (dt = sf->sf_sfo->sfo_devices; *dt; dt++)
491 vhci_mpapi_add_dev_prod(vhci, *dt);
492 sf++;
493 }
494
495 /* verify that at least the "well-known" modules were there */
496 if (vhci_failover_ops_by_name(SFO_NAME_SYM) == NULL)
497 cmn_err(CE_WARN, "scsi_vhci: well-known module \""
498 SFO_NAME_SYM "\" not defined in scsi_vhci.conf's "
499 "'ddi-forceload'");
500 if (vhci_failover_ops_by_name(SFO_NAME_TPGS) == NULL)
501 cmn_err(CE_WARN, "scsi_vhci: well-known module \""
502 SFO_NAME_TPGS "\" not defined in scsi_vhci.conf's "
503 "'ddi-forceload'");
504
505 /* call sfo_init for modules that need it */
506 for (sf = scsi_failover_table; sf->sf_mod; sf++) {
507 if (sf->sf_sfo && sf->sf_sfo->sfo_init)
508 sf->sf_sfo->sfo_init();
509 }
510
511 ddi_prop_free(module);
512 }
513
514 /*
515 * unload all loaded scsi_failover_ops modules
516 */
517 static void
vhci_failover_modclose()518 vhci_failover_modclose()
519 {
520 struct scsi_failover *sf;
521
522 for (sf = scsi_failover_table; sf->sf_mod; sf++) {
523 if ((sf->sf_mod == NULL) || (sf->sf_sfo == NULL))
524 continue;
525 (void) ddi_modclose(sf->sf_mod);
526 sf->sf_mod = NULL;
527 sf->sf_sfo = NULL;
528 }
529
530 if (scsi_failover_table && scsi_nfailover)
531 kmem_free(scsi_failover_table,
532 sizeof (struct scsi_failover) * (scsi_nfailover + 1));
533 scsi_failover_table = NULL;
534 scsi_nfailover = 0;
535 }
536
537 /* ARGSUSED */
538 static int
vhci_open(dev_t * devp,int flag,int otype,cred_t * credp)539 vhci_open(dev_t *devp, int flag, int otype, cred_t *credp)
540 {
541 struct scsi_vhci *vhci;
542
543 if (otype != OTYP_CHR) {
544 return (EINVAL);
545 }
546
547 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(*devp)));
548 if (vhci == NULL) {
549 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_open: failed ENXIO\n"));
550 return (ENXIO);
551 }
552
553 mutex_enter(&vhci->vhci_mutex);
554 if ((flag & FEXCL) && (vhci->vhci_state & VHCI_STATE_OPEN)) {
555 mutex_exit(&vhci->vhci_mutex);
556 vhci_log(CE_NOTE, vhci->vhci_dip,
557 "!vhci%d: Already open\n", getminor(*devp));
558 return (EBUSY);
559 }
560
561 vhci->vhci_state |= VHCI_STATE_OPEN;
562 mutex_exit(&vhci->vhci_mutex);
563 return (0);
564 }
565
566
567 /* ARGSUSED */
568 static int
vhci_close(dev_t dev,int flag,int otype,cred_t * credp)569 vhci_close(dev_t dev, int flag, int otype, cred_t *credp)
570 {
571 struct scsi_vhci *vhci;
572
573 if (otype != OTYP_CHR) {
574 return (EINVAL);
575 }
576
577 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev)));
578 if (vhci == NULL) {
579 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_close: failed ENXIO\n"));
580 return (ENXIO);
581 }
582
583 mutex_enter(&vhci->vhci_mutex);
584 vhci->vhci_state &= ~VHCI_STATE_OPEN;
585 mutex_exit(&vhci->vhci_mutex);
586
587 return (0);
588 }
589
590 /* ARGSUSED */
591 static int
vhci_ioctl(dev_t dev,int cmd,intptr_t data,int mode,cred_t * credp,int * rval)592 vhci_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
593 cred_t *credp, int *rval)
594 {
595 if (IS_DEVCTL(cmd)) {
596 return (vhci_devctl(dev, cmd, data, mode, credp, rval));
597 } else if (cmd == MP_CMD) {
598 return (vhci_mpapi_ctl(dev, cmd, data, mode, credp, rval));
599 } else {
600 return (vhci_ctl(dev, cmd, data, mode, credp, rval));
601 }
602 }
603
604 /*
605 * attach the module
606 */
607 static int
vhci_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)608 vhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
609 {
610 int rval = DDI_FAILURE;
611 int scsi_hba_attached = 0;
612 int vhci_attached = 0;
613 int mutex_initted = 0;
614 int instance;
615 struct scsi_vhci *vhci;
616 scsi_hba_tran_t *tran;
617 char cache_name_buf[64];
618 char *data;
619
620 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_attach: cmd=0x%x\n", cmd));
621
622 instance = ddi_get_instance(dip);
623
624 switch (cmd) {
625 case DDI_ATTACH:
626 break;
627
628 case DDI_RESUME:
629 case DDI_PM_RESUME:
630 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_attach: resume not yet"
631 "implemented\n"));
632 return (rval);
633
634 default:
635 VHCI_DEBUG(1, (CE_NOTE, NULL,
636 "!vhci_attach: unknown ddi command\n"));
637 return (rval);
638 }
639
640 /*
641 * Allocate vhci data structure.
642 */
643 if (ddi_soft_state_zalloc(vhci_softstate, instance) != DDI_SUCCESS) {
644 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
645 "soft state alloc failed\n"));
646 return (DDI_FAILURE);
647 }
648
649 if ((vhci = ddi_get_soft_state(vhci_softstate, instance)) == NULL) {
650 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
651 "bad soft state\n"));
652 ddi_soft_state_free(vhci_softstate, instance);
653 return (DDI_FAILURE);
654 }
655
656 /* Allocate packet cache */
657 (void) snprintf(cache_name_buf, sizeof (cache_name_buf),
658 "vhci%d_cache", instance);
659
660 mutex_init(&vhci->vhci_mutex, NULL, MUTEX_DRIVER, NULL);
661 mutex_initted++;
662
663 /*
664 * Allocate a transport structure
665 */
666 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
667 ASSERT(tran != NULL);
668
669 vhci->vhci_tran = tran;
670 vhci->vhci_dip = dip;
671 vhci->vhci_instance = instance;
672
673 tran->tran_hba_private = vhci;
674 tran->tran_tgt_init = vhci_scsi_tgt_init;
675 tran->tran_tgt_probe = NULL;
676 tran->tran_tgt_free = vhci_scsi_tgt_free;
677
678 tran->tran_start = vhci_scsi_start;
679 tran->tran_abort = vhci_scsi_abort;
680 tran->tran_reset = vhci_scsi_reset;
681 tran->tran_getcap = vhci_scsi_getcap;
682 tran->tran_setcap = vhci_scsi_setcap;
683 tran->tran_init_pkt = vhci_scsi_init_pkt;
684 tran->tran_destroy_pkt = vhci_scsi_destroy_pkt;
685 tran->tran_dmafree = vhci_scsi_dmafree;
686 tran->tran_sync_pkt = vhci_scsi_sync_pkt;
687 tran->tran_reset_notify = vhci_scsi_reset_notify;
688
689 tran->tran_get_bus_addr = vhci_scsi_get_bus_addr;
690 tran->tran_get_name = vhci_scsi_get_name;
691 tran->tran_bus_reset = NULL;
692 tran->tran_quiesce = NULL;
693 tran->tran_unquiesce = NULL;
694
695 /*
696 * register event notification routines with scsa
697 */
698 tran->tran_get_eventcookie = NULL;
699 tran->tran_add_eventcall = NULL;
700 tran->tran_remove_eventcall = NULL;
701 tran->tran_post_event = NULL;
702
703 tran->tran_bus_power = vhci_scsi_bus_power;
704
705 tran->tran_bus_config = vhci_scsi_bus_config;
706 tran->tran_bus_unconfig = vhci_scsi_bus_unconfig;
707
708 /*
709 * Attach this instance with the mpxio framework
710 */
711 if (mdi_vhci_register(MDI_HCI_CLASS_SCSI, dip, &vhci_opinfo, 0)
712 != MDI_SUCCESS) {
713 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
714 "mdi_vhci_register failed\n"));
715 goto attach_fail;
716 }
717 vhci_attached++;
718
719 /*
720 * Attach this instance of the hba.
721 *
722 * Regarding dma attributes: Since scsi_vhci is a virtual scsi HBA
723 * driver, it has nothing to do with DMA. However, when calling
724 * scsi_hba_attach_setup() we need to pass something valid in the
725 * dma attributes parameter. So we just use scsi_alloc_attr.
726 * SCSA itself seems to care only for dma_attr_minxfer and
727 * dma_attr_burstsizes fields of dma attributes structure.
728 * It expects those fileds to be non-zero.
729 */
730 if (scsi_hba_attach_setup(dip, &scsi_alloc_attr, tran,
731 SCSI_HBA_ADDR_COMPLEX) != DDI_SUCCESS) {
732 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
733 "hba attach failed\n"));
734 goto attach_fail;
735 }
736 scsi_hba_attached++;
737
738 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
739 INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) {
740 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
741 " ddi_create_minor_node failed\n"));
742 goto attach_fail;
743 }
744
745 /*
746 * Set pm-want-child-notification property for
747 * power management of the phci and client
748 */
749 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
750 "pm-want-child-notification?", NULL, NULL) != DDI_PROP_SUCCESS) {
751 cmn_err(CE_WARN,
752 "%s%d fail to create pm-want-child-notification? prop",
753 ddi_driver_name(dip), ddi_get_instance(dip));
754 goto attach_fail;
755 }
756
757 vhci->vhci_taskq = taskq_create("vhci_taskq", 1, MINCLSYSPRI, 1, 4, 0);
758 vhci->vhci_update_pathstates_taskq =
759 taskq_create("vhci_update_pathstates", VHCI_NUM_UPDATE_TASKQ,
760 MINCLSYSPRI, 1, 4, 0);
761 ASSERT(vhci->vhci_taskq);
762 ASSERT(vhci->vhci_update_pathstates_taskq);
763
764 /*
765 * Set appropriate configuration flags based on options set in
766 * conf file.
767 */
768 vhci->vhci_conf_flags = 0;
769 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, PROPFLAGS,
770 "auto-failback", &data) == DDI_SUCCESS) {
771 if (strcmp(data, "enable") == 0)
772 vhci->vhci_conf_flags |= VHCI_CONF_FLAGS_AUTO_FAILBACK;
773 ddi_prop_free(data);
774 }
775
776 if (!(vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK))
777 vhci_log(CE_NOTE, dip, "!Auto-failback capability "
778 "disabled through scsi_vhci.conf file.");
779
780 /*
781 * Allocate an mpapi private structure
782 */
783 vhci->mp_priv = kmem_zalloc(sizeof (mpapi_priv_t), KM_SLEEP);
784 if (vhci_mpapi_init(vhci) != 0) {
785 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_attach: "
786 "vhci_mpapi_init() failed"));
787 }
788
789 vhci_failover_modopen(vhci); /* load failover modules */
790
791 ddi_report_dev(dip);
792 return (DDI_SUCCESS);
793
794 attach_fail:
795 if (vhci_attached)
796 (void) mdi_vhci_unregister(dip, 0);
797
798 if (scsi_hba_attached)
799 (void) scsi_hba_detach(dip);
800
801 if (vhci->vhci_tran)
802 scsi_hba_tran_free(vhci->vhci_tran);
803
804 if (mutex_initted) {
805 mutex_destroy(&vhci->vhci_mutex);
806 }
807
808 ddi_soft_state_free(vhci_softstate, instance);
809 return (DDI_FAILURE);
810 }
811
812
813 /*ARGSUSED*/
814 static int
vhci_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)815 vhci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
816 {
817 int instance = ddi_get_instance(dip);
818 scsi_hba_tran_t *tran;
819 struct scsi_vhci *vhci;
820
821 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_detach: cmd=0x%x\n", cmd));
822
823 if ((tran = ddi_get_driver_private(dip)) == NULL)
824 return (DDI_FAILURE);
825
826 vhci = TRAN2HBAPRIVATE(tran);
827 if (!vhci) {
828 return (DDI_FAILURE);
829 }
830
831 switch (cmd) {
832 case DDI_DETACH:
833 break;
834
835 case DDI_SUSPEND:
836 case DDI_PM_SUSPEND:
837 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_detach: suspend/pm not yet"
838 "implemented\n"));
839 return (DDI_FAILURE);
840
841 default:
842 VHCI_DEBUG(1, (CE_NOTE, NULL,
843 "!vhci_detach: unknown ddi command\n"));
844 return (DDI_FAILURE);
845 }
846
847 (void) mdi_vhci_unregister(dip, 0);
848 (void) scsi_hba_detach(dip);
849 scsi_hba_tran_free(tran);
850
851 if (ddi_prop_remove(DDI_DEV_T_NONE, dip,
852 "pm-want-child-notification?") != DDI_PROP_SUCCESS) {
853 cmn_err(CE_WARN,
854 "%s%d unable to remove prop pm-want_child_notification?",
855 ddi_driver_name(dip), ddi_get_instance(dip));
856 }
857 if (vhci_restart_timeid != 0) {
858 (void) untimeout(vhci_restart_timeid);
859 }
860 vhci_restart_timeid = 0;
861
862 mutex_destroy(&vhci->vhci_mutex);
863 vhci->vhci_dip = NULL;
864 vhci->vhci_tran = NULL;
865 taskq_destroy(vhci->vhci_taskq);
866 taskq_destroy(vhci->vhci_update_pathstates_taskq);
867 ddi_remove_minor_node(dip, NULL);
868 ddi_soft_state_free(vhci_softstate, instance);
869
870 vhci_failover_modclose(); /* unload failover modules */
871 return (DDI_SUCCESS);
872 }
873
874 /*
875 * vhci_getinfo()
876 * Given the device number, return the devinfo pointer or the
877 * instance number.
878 * Note: always succeed DDI_INFO_DEVT2INSTANCE, even before attach.
879 */
880
881 /*ARGSUSED*/
882 static int
vhci_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** result)883 vhci_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
884 {
885 struct scsi_vhci *vhcip;
886 int instance = MINOR2INST(getminor((dev_t)arg));
887
888 switch (cmd) {
889 case DDI_INFO_DEVT2DEVINFO:
890 vhcip = ddi_get_soft_state(vhci_softstate, instance);
891 if (vhcip != NULL)
892 *result = vhcip->vhci_dip;
893 else {
894 *result = NULL;
895 return (DDI_FAILURE);
896 }
897 break;
898
899 case DDI_INFO_DEVT2INSTANCE:
900 *result = (void *)(uintptr_t)instance;
901 break;
902
903 default:
904 return (DDI_FAILURE);
905 }
906
907 return (DDI_SUCCESS);
908 }
909
910 /*ARGSUSED*/
911 static int
vhci_scsi_tgt_init(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)912 vhci_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
913 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
914 {
915 char *guid;
916 scsi_vhci_lun_t *vlun;
917 struct scsi_vhci *vhci;
918 clock_t from_ticks;
919 mdi_pathinfo_t *pip;
920 int rval;
921
922 ASSERT(hba_dip != NULL);
923 ASSERT(tgt_dip != NULL);
924
925 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS,
926 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
927 /*
928 * This must be the .conf node without GUID property.
929 * The node under fp already inserts a delay, so we
930 * just return from here. We rely on this delay to have
931 * all dips be posted to the ndi hotplug thread's newdev
932 * list. This is necessary for the deferred attach
933 * mechanism to work and opens() done soon after boot to
934 * succeed.
935 */
936 VHCI_DEBUG(4, (CE_WARN, hba_dip, "tgt_init: lun guid "
937 "property failed"));
938 return (DDI_NOT_WELL_FORMED);
939 }
940
941 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
942 /*
943 * This must be .conf node with the GUID property. We don't
944 * merge property by ndi_merge_node() here because the
945 * devi_addr_buf of .conf node is "" always according the
946 * implementation of vhci_scsi_get_name_bus_addr().
947 */
948 ddi_set_name_addr(tgt_dip, NULL);
949 return (DDI_FAILURE);
950 }
951
952 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(hba_dip));
953 ASSERT(vhci != NULL);
954
955 VHCI_DEBUG(4, (CE_NOTE, hba_dip,
956 "!tgt_init: called for %s (instance %d)\n",
957 ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip)));
958
959 vlun = vhci_lun_lookup(tgt_dip);
960
961 mutex_enter(&vhci_global_mutex);
962
963 from_ticks = ddi_get_lbolt();
964 if (vhci_to_ticks == 0) {
965 vhci_to_ticks = from_ticks +
966 drv_usectohz(vhci_init_wait_timeout);
967 }
968
969 #if DEBUG
970 if (vlun) {
971 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
972 "vhci_scsi_tgt_init: guid %s : found vlun 0x%p "
973 "from_ticks %lx to_ticks %lx",
974 guid, (void *)vlun, from_ticks, vhci_to_ticks));
975 } else {
976 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
977 "vhci_scsi_tgt_init: guid %s : vlun not found "
978 "from_ticks %lx to_ticks %lx", guid, from_ticks,
979 vhci_to_ticks));
980 }
981 #endif
982
983 rval = mdi_select_path(tgt_dip, NULL,
984 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), NULL, &pip);
985 if (rval == MDI_SUCCESS) {
986 mdi_rele_path(pip);
987 }
988
989 /*
990 * Wait for the following conditions :
991 * 1. no vlun available yet
992 * 2. no path established
993 * 3. timer did not expire
994 */
995 while ((vlun == NULL) || (mdi_client_get_path_count(tgt_dip) == 0) ||
996 (rval != MDI_SUCCESS)) {
997 if (vlun && vlun->svl_not_supported) {
998 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
999 "vlun 0x%p lun guid %s not supported!",
1000 (void *)vlun, guid));
1001 mutex_exit(&vhci_global_mutex);
1002 ddi_prop_free(guid);
1003 return (DDI_NOT_WELL_FORMED);
1004 }
1005 if ((vhci_first_time == 0) && (from_ticks >= vhci_to_ticks)) {
1006 vhci_first_time = 1;
1007 }
1008 if (vhci_first_time == 1) {
1009 VHCI_DEBUG(1, (CE_WARN, hba_dip, "vhci_scsi_tgt_init: "
1010 "no wait for %s. from_tick %lx, to_tick %lx",
1011 guid, from_ticks, vhci_to_ticks));
1012 mutex_exit(&vhci_global_mutex);
1013 ddi_prop_free(guid);
1014 return (DDI_NOT_WELL_FORMED);
1015 }
1016
1017 if (cv_timedwait(&vhci_cv,
1018 &vhci_global_mutex, vhci_to_ticks) == -1) {
1019 /* Timed out */
1020 #ifdef DEBUG
1021 if (vlun == NULL) {
1022 VHCI_DEBUG(1, (CE_WARN, hba_dip,
1023 "tgt_init: no vlun for %s!", guid));
1024 } else if (mdi_client_get_path_count(tgt_dip) == 0) {
1025 VHCI_DEBUG(1, (CE_WARN, hba_dip,
1026 "tgt_init: client path count is "
1027 "zero for %s!", guid));
1028 } else {
1029 VHCI_DEBUG(1, (CE_WARN, hba_dip,
1030 "tgt_init: client path not "
1031 "available yet for %s!", guid));
1032 }
1033 #endif /* DEBUG */
1034 mutex_exit(&vhci_global_mutex);
1035 ddi_prop_free(guid);
1036 return (DDI_NOT_WELL_FORMED);
1037 }
1038 vlun = vhci_lun_lookup(tgt_dip);
1039 rval = mdi_select_path(tgt_dip, NULL,
1040 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
1041 NULL, &pip);
1042 if (rval == MDI_SUCCESS) {
1043 mdi_rele_path(pip);
1044 }
1045 from_ticks = ddi_get_lbolt();
1046 }
1047 mutex_exit(&vhci_global_mutex);
1048
1049 ASSERT(vlun != NULL);
1050 ddi_prop_free(guid);
1051
1052 scsi_device_hba_private_set(sd, vlun);
1053
1054 return (DDI_SUCCESS);
1055 }
1056
1057 /*ARGSUSED*/
1058 static void
vhci_scsi_tgt_free(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)1059 vhci_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1060 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1061 {
1062 struct scsi_vhci_lun *dvlp;
1063 ASSERT(mdi_client_get_path_count(tgt_dip) <= 0);
1064 dvlp = (struct scsi_vhci_lun *)scsi_device_hba_private_get(sd);
1065 ASSERT(dvlp != NULL);
1066
1067 vhci_lun_free(dvlp, sd);
1068 }
1069
1070 /*
1071 * a PGR register command has started; copy the info we need
1072 */
1073 int
vhci_pgr_register_start(scsi_vhci_lun_t * vlun,struct scsi_pkt * pkt)1074 vhci_pgr_register_start(scsi_vhci_lun_t *vlun, struct scsi_pkt *pkt)
1075 {
1076 struct vhci_pkt *vpkt = TGTPKT2VHCIPKT(pkt);
1077 void *addr;
1078
1079 if (!vpkt->vpkt_tgt_init_bp)
1080 return (TRAN_BADPKT);
1081
1082 addr = bp_mapin_common(vpkt->vpkt_tgt_init_bp,
1083 (vpkt->vpkt_flags & CFLAG_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
1084 if (addr == NULL)
1085 return (TRAN_BUSY);
1086
1087 mutex_enter(&vlun->svl_mutex);
1088
1089 vhci_print_prout_keys(vlun, "v_pgr_reg_start: before bcopy:");
1090
1091 bcopy(addr, &vlun->svl_prout, sizeof (vhci_prout_t) -
1092 (2 * MHIOC_RESV_KEY_SIZE*sizeof (char)));
1093 bcopy(pkt->pkt_cdbp, vlun->svl_cdb, sizeof (vlun->svl_cdb));
1094
1095 vhci_print_prout_keys(vlun, "v_pgr_reg_start: after bcopy:");
1096
1097 vlun->svl_time = pkt->pkt_time;
1098 vlun->svl_bcount = vpkt->vpkt_tgt_init_bp->b_bcount;
1099 vlun->svl_first_path = vpkt->vpkt_path;
1100 mutex_exit(&vlun->svl_mutex);
1101 return (0);
1102 }
1103
1104 /*
1105 * Function name : vhci_scsi_start()
1106 *
1107 * Return Values : TRAN_FATAL_ERROR - vhci has been shutdown
1108 * or other fatal failure
1109 * preventing packet transportation
1110 * TRAN_BUSY - request queue is full
1111 * TRAN_ACCEPT - pkt has been submitted to phci
1112 * (or is held in the waitQ)
1113 * Description : Implements SCSA's tran_start() entry point for
1114 * packet transport
1115 *
1116 */
1117 static int
vhci_scsi_start(struct scsi_address * ap,struct scsi_pkt * pkt)1118 vhci_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1119 {
1120 int rval = TRAN_ACCEPT;
1121 int instance, held;
1122 struct scsi_vhci *vhci = ADDR2VHCI(ap);
1123 struct scsi_vhci_lun *vlun = ADDR2VLUN(ap);
1124 struct vhci_pkt *vpkt = TGTPKT2VHCIPKT(pkt);
1125 int flags = 0;
1126 scsi_vhci_priv_t *svp, *svp_resrv;
1127 dev_info_t *cdip;
1128 client_lb_t lbp;
1129 int restore_lbp = 0;
1130 /* set if pkt is SCSI-II RESERVE cmd */
1131 int pkt_reserve_cmd = 0;
1132 int reserve_failed = 0;
1133 int resrv_instance = 0;
1134 mdi_pathinfo_t *pip;
1135 struct scsi_pkt *rel_pkt;
1136
1137 ASSERT(vhci != NULL);
1138 ASSERT(vpkt != NULL);
1139 ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED);
1140 cdip = ADDR2DIP(ap);
1141
1142 /*
1143 * Block IOs if LUN is held or QUIESCED for IOs.
1144 */
1145 if ((VHCI_LUN_IS_HELD(vlun)) ||
1146 ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) {
1147 return (TRAN_BUSY);
1148 }
1149
1150 /*
1151 * vhci_lun needs to be quiesced before SCSI-II RESERVE command
1152 * can be issued. This may require a cv_timedwait, which is
1153 * dangerous to perform in an interrupt context. So if this
1154 * is a RESERVE command a taskq is dispatched to service it.
1155 * This taskq shall again call vhci_scsi_start, but we shall be
1156 * sure its not in an interrupt context.
1157 */
1158 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
1159 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
1160 if (!(vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ)) {
1161 if (taskq_dispatch(vhci->vhci_taskq,
1162 vhci_dispatch_scsi_start, (void *) vpkt,
1163 KM_NOSLEEP)) {
1164 return (TRAN_ACCEPT);
1165 } else {
1166 return (TRAN_BUSY);
1167 }
1168 }
1169
1170 /*
1171 * Here we ensure that simultaneous SCSI-II RESERVE cmds don't
1172 * get serviced for a lun.
1173 */
1174 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
1175 if (!held) {
1176 return (TRAN_BUSY);
1177 } else if ((vlun->svl_flags & VLUN_QUIESCED_FLG) ==
1178 VLUN_QUIESCED_FLG) {
1179 VHCI_RELEASE_LUN(vlun);
1180 return (TRAN_BUSY);
1181 }
1182
1183 /*
1184 * To ensure that no IOs occur for this LUN for the duration
1185 * of this pkt set the VLUN_QUIESCED_FLG.
1186 * In case this routine needs to exit on error make sure that
1187 * this flag is cleared.
1188 */
1189 vlun->svl_flags |= VLUN_QUIESCED_FLG;
1190 pkt_reserve_cmd = 1;
1191
1192 /*
1193 * if this is a SCSI-II RESERVE command, set load balancing
1194 * policy to be ALTERNATE PATH to ensure that all subsequent
1195 * IOs are routed on the same path. This is because if commands
1196 * are routed across multiple paths then IOs on paths other than
1197 * the one on which the RESERVE was executed will get a
1198 * RESERVATION CONFLICT
1199 */
1200 lbp = mdi_get_lb_policy(cdip);
1201 if (lbp != LOAD_BALANCE_NONE) {
1202 if (vhci_quiesce_lun(vlun) != 1) {
1203 vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1204 VHCI_RELEASE_LUN(vlun);
1205 return (TRAN_FATAL_ERROR);
1206 }
1207 vlun->svl_lb_policy_save = lbp;
1208 if (mdi_set_lb_policy(cdip, LOAD_BALANCE_NONE) !=
1209 MDI_SUCCESS) {
1210 vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1211 VHCI_RELEASE_LUN(vlun);
1212 return (TRAN_FATAL_ERROR);
1213 }
1214 restore_lbp = 1;
1215 }
1216
1217 VHCI_DEBUG(2, (CE_NOTE, vhci->vhci_dip,
1218 "!vhci_scsi_start: sending SCSI-2 RESERVE, vlun 0x%p, "
1219 "svl_resrv_pip 0x%p, svl_flags: %x, lb_policy %x",
1220 (void *)vlun, (void *)vlun->svl_resrv_pip, vlun->svl_flags,
1221 mdi_get_lb_policy(cdip)));
1222
1223 /*
1224 * See comments for VLUN_RESERVE_ACTIVE_FLG in scsi_vhci.h
1225 * To narrow this window where a reserve command may be sent
1226 * down an inactive path the path states first need to be
1227 * updated. Before calling vhci_update_pathstates reset
1228 * VLUN_RESERVE_ACTIVE_FLG, just in case it was already set
1229 * for this lun. This shall prevent an unnecessary reset
1230 * from being sent out. Also remember currently reserved path
1231 * just for a case the new reservation will go to another path.
1232 */
1233 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
1234 resrv_instance = mdi_pi_get_path_instance(
1235 vlun->svl_resrv_pip);
1236 }
1237 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
1238 vhci_update_pathstates((void *)vlun);
1239 }
1240
1241 instance = ddi_get_instance(vhci->vhci_dip);
1242
1243 /*
1244 * If the command is PRIN with action of zero, then the cmd
1245 * is reading PR keys which requires filtering on completion.
1246 * Data cache sync must be guaranteed.
1247 */
1248 if ((pkt->pkt_cdbp[0] == SCMD_PRIN) && (pkt->pkt_cdbp[1] == 0) &&
1249 (vpkt->vpkt_org_vpkt == NULL)) {
1250 vpkt->vpkt_tgt_init_pkt_flags |= PKT_CONSISTENT;
1251 }
1252
1253 /*
1254 * Do not defer bind for PKT_DMA_PARTIAL
1255 */
1256 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1257
1258 /* This is a non pkt_dma_partial case */
1259 if ((rval = vhci_bind_transport(
1260 ap, vpkt, vpkt->vpkt_tgt_init_pkt_flags, NULL_FUNC))
1261 != TRAN_ACCEPT) {
1262 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1263 "!vhci%d %x: failed to bind transport: "
1264 "vlun 0x%p pkt_reserved %x restore_lbp %x,"
1265 "lbp %x", instance, rval, (void *)vlun,
1266 pkt_reserve_cmd, restore_lbp, lbp));
1267 if (restore_lbp)
1268 (void) mdi_set_lb_policy(cdip, lbp);
1269 if (pkt_reserve_cmd)
1270 vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1271 return (rval);
1272 }
1273 VHCI_DEBUG(8, (CE_NOTE, NULL,
1274 "vhci_scsi_start: v_b_t called 0x%p\n", (void *)vpkt));
1275 }
1276 ASSERT(vpkt->vpkt_hba_pkt != NULL);
1277 ASSERT(vpkt->vpkt_path != NULL);
1278
1279 /*
1280 * This is the chance to adjust the pHCI's pkt and other information
1281 * from target driver's pkt.
1282 */
1283 VHCI_DEBUG(8, (CE_NOTE, vhci->vhci_dip, "vhci_scsi_start vpkt %p\n",
1284 (void *)vpkt));
1285 vhci_update_pHCI_pkt(vpkt, pkt);
1286
1287 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
1288 if (vpkt->vpkt_path != vlun->svl_resrv_pip) {
1289 VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip,
1290 "!vhci_bind: reserve flag set for vlun 0x%p, but, "
1291 "pktpath 0x%p resrv path 0x%p differ. lb_policy %x",
1292 (void *)vlun, (void *)vpkt->vpkt_path,
1293 (void *)vlun->svl_resrv_pip,
1294 mdi_get_lb_policy(cdip)));
1295 reserve_failed = 1;
1296 }
1297 }
1298
1299 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
1300 if (svp == NULL || reserve_failed) {
1301 if (pkt_reserve_cmd) {
1302 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1303 "!vhci_bind returned null svp vlun 0x%p",
1304 (void *)vlun));
1305 vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1306 if (restore_lbp)
1307 (void) mdi_set_lb_policy(cdip, lbp);
1308 }
1309 pkt_cleanup:
1310 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1311 scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1312 vpkt->vpkt_hba_pkt = NULL;
1313 if (vpkt->vpkt_path) {
1314 mdi_rele_path(vpkt->vpkt_path);
1315 vpkt->vpkt_path = NULL;
1316 }
1317 }
1318 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1319 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1320 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
1321 sema_v(&vlun->svl_pgr_sema);
1322 }
1323 return (TRAN_BUSY);
1324 }
1325
1326 if ((resrv_instance != 0) && (resrv_instance !=
1327 mdi_pi_get_path_instance(vpkt->vpkt_path))) {
1328 /*
1329 * This is an attempt to reserve vpkt->vpkt_path. But the
1330 * previously reserved path referred by resrv_instance might
1331 * still be reserved. Hence we will send a release command
1332 * there in order to avoid a reservation conflict.
1333 */
1334 VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip, "!vhci_scsi_start: "
1335 "conflicting reservation on another path, vlun 0x%p, "
1336 "reserved instance %d, new instance: %d, pip: 0x%p",
1337 (void *)vlun, resrv_instance,
1338 mdi_pi_get_path_instance(vpkt->vpkt_path),
1339 (void *)vpkt->vpkt_path));
1340
1341 /*
1342 * In rare cases, the path referred by resrv_instance could
1343 * disappear in the meantime. Calling mdi_select_path() below
1344 * is an attempt to find out if the path still exists. It also
1345 * ensures that the path will be held when the release is sent.
1346 */
1347 rval = mdi_select_path(cdip, NULL, MDI_SELECT_PATH_INSTANCE,
1348 (void *)(intptr_t)resrv_instance, &pip);
1349
1350 if ((rval == MDI_SUCCESS) && (pip != NULL)) {
1351 svp_resrv = (scsi_vhci_priv_t *)
1352 mdi_pi_get_vhci_private(pip);
1353 rel_pkt = scsi_init_pkt(&svp_resrv->svp_psd->sd_address,
1354 NULL, NULL, CDB_GROUP0,
1355 sizeof (struct scsi_arq_status), 0, 0, SLEEP_FUNC,
1356 NULL);
1357
1358 if (rel_pkt == NULL) {
1359 char *p_path;
1360
1361 /*
1362 * This is very unlikely.
1363 * scsi_init_pkt(SLEEP_FUNC) does not fail
1364 * because of resources. But in theory it could
1365 * fail for some other reason. There is not an
1366 * easy way how to recover though. Log a warning
1367 * and return.
1368 */
1369 p_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
1370 vhci_log(CE_WARN, vhci->vhci_dip, "!Sending "
1371 "RELEASE(6) to %s failed, a potential "
1372 "reservation conflict ahead.",
1373 ddi_pathname(mdi_pi_get_phci(pip), p_path));
1374 kmem_free(p_path, MAXPATHLEN);
1375
1376 if (restore_lbp)
1377 (void) mdi_set_lb_policy(cdip, lbp);
1378
1379 /* no need to check pkt_reserve_cmd here */
1380 vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1381 return (TRAN_FATAL_ERROR);
1382 }
1383
1384 rel_pkt->pkt_cdbp[0] = SCMD_RELEASE;
1385 rel_pkt->pkt_time = 60;
1386
1387 /*
1388 * Ignore the return value. If it will fail
1389 * then most likely it is no longer reserved
1390 * anyway.
1391 */
1392 (void) vhci_do_scsi_cmd(rel_pkt);
1393 VHCI_DEBUG(1, (CE_NOTE, NULL,
1394 "!vhci_scsi_start: path 0x%p, issued SCSI-2"
1395 " RELEASE\n", (void *)pip));
1396 scsi_destroy_pkt(rel_pkt);
1397 mdi_rele_path(pip);
1398 }
1399 }
1400
1401 VHCI_INCR_PATH_CMDCOUNT(svp);
1402
1403 /*
1404 * Ensure that no other IOs raced ahead, while a RESERVE cmd was
1405 * QUIESCING the same lun.
1406 */
1407 if ((!pkt_reserve_cmd) &&
1408 ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) {
1409 VHCI_DECR_PATH_CMDCOUNT(svp);
1410 goto pkt_cleanup;
1411 }
1412
1413 if ((pkt->pkt_cdbp[0] == SCMD_PRIN) ||
1414 (pkt->pkt_cdbp[0] == SCMD_PROUT)) {
1415 /*
1416 * currently this thread only handles running PGR
1417 * commands, so don't bother creating it unless
1418 * something interesting is going to happen (like
1419 * either a PGR out, or a PGR in with enough space
1420 * to hold the keys that are getting returned)
1421 */
1422 mutex_enter(&vlun->svl_mutex);
1423 if (((vlun->svl_flags & VLUN_TASK_D_ALIVE_FLG) == 0) &&
1424 (pkt->pkt_cdbp[0] == SCMD_PROUT)) {
1425 vlun->svl_taskq = taskq_create("vlun_pgr_task_daemon",
1426 1, MINCLSYSPRI, 1, 4, 0);
1427 vlun->svl_flags |= VLUN_TASK_D_ALIVE_FLG;
1428 }
1429 mutex_exit(&vlun->svl_mutex);
1430 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1431 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1432 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
1433 if (rval = vhci_pgr_register_start(vlun, pkt)) {
1434 /* an error */
1435 sema_v(&vlun->svl_pgr_sema);
1436 return (rval);
1437 }
1438 }
1439 }
1440
1441 /*
1442 * SCSI-II RESERVE cmd is not expected in polled mode.
1443 * If this changes it needs to be handled for the polled scenario.
1444 */
1445 flags = vpkt->vpkt_hba_pkt->pkt_flags;
1446
1447 /*
1448 * Set the path_instance *before* sending the scsi_pkt down the path
1449 * to mpxio's pHCI so that additional path abstractions at a pHCI
1450 * level (like maybe iSCSI at some point in the future) can update
1451 * the path_instance.
1452 */
1453 if (scsi_pkt_allocated_correctly(vpkt->vpkt_hba_pkt))
1454 vpkt->vpkt_hba_pkt->pkt_path_instance =
1455 mdi_pi_get_path_instance(vpkt->vpkt_path);
1456
1457 rval = scsi_transport(vpkt->vpkt_hba_pkt);
1458 if (rval == TRAN_ACCEPT) {
1459 if (flags & FLAG_NOINTR) {
1460 struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt;
1461 struct scsi_pkt *pkt = vpkt->vpkt_hba_pkt;
1462
1463 ASSERT(tpkt != NULL);
1464 *(tpkt->pkt_scbp) = *(pkt->pkt_scbp);
1465 tpkt->pkt_resid = pkt->pkt_resid;
1466 tpkt->pkt_state = pkt->pkt_state;
1467 tpkt->pkt_statistics = pkt->pkt_statistics;
1468 tpkt->pkt_reason = pkt->pkt_reason;
1469
1470 if ((*(pkt->pkt_scbp) == STATUS_CHECK) &&
1471 (pkt->pkt_state & STATE_ARQ_DONE)) {
1472 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
1473 vpkt->vpkt_tgt_init_scblen);
1474 }
1475
1476 VHCI_DECR_PATH_CMDCOUNT(svp);
1477 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1478 scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1479 vpkt->vpkt_hba_pkt = NULL;
1480 if (vpkt->vpkt_path) {
1481 mdi_rele_path(vpkt->vpkt_path);
1482 vpkt->vpkt_path = NULL;
1483 }
1484 }
1485 /*
1486 * This path will not automatically retry pkts
1487 * internally, therefore, vpkt_org_vpkt should
1488 * never be set.
1489 */
1490 ASSERT(vpkt->vpkt_org_vpkt == NULL);
1491 scsi_hba_pkt_comp(tpkt);
1492 }
1493 return (rval);
1494 } else if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1495 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1496 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
1497 /* the command exited with bad status */
1498 sema_v(&vlun->svl_pgr_sema);
1499 } else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) {
1500 /* the command exited with bad status */
1501 sema_v(&vlun->svl_pgr_sema);
1502 } else if (pkt_reserve_cmd) {
1503 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1504 "!vhci_scsi_start: reserve failed vlun 0x%p",
1505 (void *)vlun));
1506 vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1507 if (restore_lbp)
1508 (void) mdi_set_lb_policy(cdip, lbp);
1509 }
1510
1511 ASSERT(vpkt->vpkt_hba_pkt != NULL);
1512 VHCI_DECR_PATH_CMDCOUNT(svp);
1513
1514 /* Do not destroy phci packet information for PKT_DMA_PARTIAL */
1515 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1516 scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1517 vpkt->vpkt_hba_pkt = NULL;
1518 if (vpkt->vpkt_path) {
1519 MDI_PI_ERRSTAT(vpkt->vpkt_path, MDI_PI_TRANSERR);
1520 mdi_rele_path(vpkt->vpkt_path);
1521 vpkt->vpkt_path = NULL;
1522 }
1523 }
1524 return (TRAN_BUSY);
1525 }
1526
1527 /*
1528 * Function name : vhci_scsi_reset()
1529 *
1530 * Return Values : 0 - reset failed
1531 * 1 - reset succeeded
1532 */
1533
1534 /* ARGSUSED */
1535 static int
vhci_scsi_reset(struct scsi_address * ap,int level)1536 vhci_scsi_reset(struct scsi_address *ap, int level)
1537 {
1538 int rval = 0;
1539
1540 cmn_err(CE_WARN, "!vhci_scsi_reset 0x%x", level);
1541 if ((level == RESET_TARGET) || (level == RESET_LUN)) {
1542 return (vhci_scsi_reset_target(ap, level, TRUE));
1543 } else if (level == RESET_ALL) {
1544 return (vhci_scsi_reset_bus(ap));
1545 }
1546
1547 return (rval);
1548 }
1549
1550 /*
1551 * vhci_recovery_reset:
1552 * Issues reset to the device
1553 * Input:
1554 * vlun - vhci lun pointer of the device
1555 * ap - address of the device
1556 * select_path:
1557 * If select_path is FALSE, then the address specified in ap is
1558 * the path on which reset will be issued.
1559 * If select_path is TRUE, then path is obtained by calling
1560 * mdi_select_path.
1561 *
1562 * recovery_depth:
1563 * Caller can specify the level of reset.
1564 * VHCI_DEPTH_LUN -
1565 * Issues LUN RESET if device supports lun reset.
1566 * VHCI_DEPTH_TARGET -
1567 * If Lun Reset fails or the device does not support
1568 * Lun Reset, issues TARGET RESET
1569 * VHCI_DEPTH_ALL -
1570 * If Lun Reset fails or the device does not support
1571 * Lun Reset, issues TARGET RESET.
1572 * If TARGET RESET does not succeed, issues Bus Reset.
1573 */
1574
1575 static int
vhci_recovery_reset(scsi_vhci_lun_t * vlun,struct scsi_address * ap,uint8_t select_path,uint8_t recovery_depth)1576 vhci_recovery_reset(scsi_vhci_lun_t *vlun, struct scsi_address *ap,
1577 uint8_t select_path, uint8_t recovery_depth)
1578 {
1579 int ret = 0;
1580
1581 ASSERT(ap != NULL);
1582
1583 if (vlun && vlun->svl_support_lun_reset == 1) {
1584 ret = vhci_scsi_reset_target(ap, RESET_LUN,
1585 select_path);
1586 }
1587
1588 recovery_depth--;
1589
1590 if ((ret == 0) && recovery_depth) {
1591 ret = vhci_scsi_reset_target(ap, RESET_TARGET,
1592 select_path);
1593 recovery_depth--;
1594 }
1595
1596 if ((ret == 0) && recovery_depth) {
1597 (void) scsi_reset(ap, RESET_ALL);
1598 }
1599
1600 return (ret);
1601 }
1602
1603 /*
1604 * Note: The scsi_address passed to this routine could be the scsi_address
1605 * for the virtual device or the physical device. No assumptions should be
1606 * made in this routine about the contents of the ap structure.
1607 * Further, note that the child dip would be the dip of the ssd node regardless
1608 * of the scsi_address passed in.
1609 */
1610 static int
vhci_scsi_reset_target(struct scsi_address * ap,int level,uint8_t select_path)1611 vhci_scsi_reset_target(struct scsi_address *ap, int level, uint8_t select_path)
1612 {
1613 dev_info_t *vdip, *cdip;
1614 mdi_pathinfo_t *pip = NULL;
1615 mdi_pathinfo_t *npip = NULL;
1616 int rval = -1;
1617 scsi_vhci_priv_t *svp = NULL;
1618 struct scsi_address *pap = NULL;
1619 scsi_hba_tran_t *hba = NULL;
1620 int sps;
1621 struct scsi_vhci *vhci = NULL;
1622
1623 if (select_path != TRUE) {
1624 ASSERT(ap != NULL);
1625 if (level == RESET_LUN) {
1626 hba = ap->a_hba_tran;
1627 ASSERT(hba != NULL);
1628 return (hba->tran_reset(ap, RESET_LUN));
1629 }
1630 return (scsi_reset(ap, level));
1631 }
1632
1633 cdip = ADDR2DIP(ap);
1634 ASSERT(cdip != NULL);
1635 vdip = ddi_get_parent(cdip);
1636 ASSERT(vdip != NULL);
1637 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
1638 ASSERT(vhci != NULL);
1639
1640 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &pip);
1641 if ((rval != MDI_SUCCESS) || (pip == NULL)) {
1642 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1643 "Unable to get a path, dip 0x%p", (void *)cdip));
1644 return (0);
1645 }
1646 again:
1647 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
1648 if (svp == NULL) {
1649 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1650 "priv is NULL, pip 0x%p", (void *)pip));
1651 mdi_rele_path(pip);
1652 return (0);
1653 }
1654
1655 if (svp->svp_psd == NULL) {
1656 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1657 "psd is NULL, pip 0x%p, svp 0x%p",
1658 (void *)pip, (void *)svp));
1659 mdi_rele_path(pip);
1660 return (0);
1661 }
1662
1663 pap = &svp->svp_psd->sd_address;
1664 hba = pap->a_hba_tran;
1665
1666 ASSERT(pap != NULL);
1667 ASSERT(hba != NULL);
1668
1669 if (hba->tran_reset != NULL) {
1670 if (hba->tran_reset(pap, level) == 0) {
1671 vhci_log(CE_WARN, vdip, "!%s%d: "
1672 "path %s, reset %d failed",
1673 ddi_driver_name(cdip), ddi_get_instance(cdip),
1674 mdi_pi_spathname(pip), level);
1675
1676 /*
1677 * Select next path and issue the reset, repeat
1678 * until all paths are exhausted
1679 */
1680 sps = mdi_select_path(cdip, NULL,
1681 MDI_SELECT_ONLINE_PATH, pip, &npip);
1682 if ((sps != MDI_SUCCESS) || (npip == NULL)) {
1683 mdi_rele_path(pip);
1684 return (0);
1685 }
1686 mdi_rele_path(pip);
1687 pip = npip;
1688 goto again;
1689 }
1690 mdi_rele_path(pip);
1691 mutex_enter(&vhci->vhci_mutex);
1692 scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
1693 &vhci->vhci_reset_notify_listf);
1694 mutex_exit(&vhci->vhci_mutex);
1695 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_scsi_reset_target: "
1696 "reset %d sent down pip:%p for cdip:%p\n", level,
1697 (void *)pip, (void *)cdip));
1698 return (1);
1699 }
1700 mdi_rele_path(pip);
1701 return (0);
1702 }
1703
1704
1705 /* ARGSUSED */
1706 static int
vhci_scsi_reset_bus(struct scsi_address * ap)1707 vhci_scsi_reset_bus(struct scsi_address *ap)
1708 {
1709 return (1);
1710 }
1711
1712
1713 /*
1714 * called by vhci_getcap and vhci_setcap to get and set (respectively)
1715 * SCSI capabilities
1716 */
1717 /* ARGSUSED */
1718 static int
vhci_commoncap(struct scsi_address * ap,char * cap,int val,int tgtonly,int doset)1719 vhci_commoncap(struct scsi_address *ap, char *cap,
1720 int val, int tgtonly, int doset)
1721 {
1722 struct scsi_vhci *vhci = ADDR2VHCI(ap);
1723 struct scsi_vhci_lun *vlun = ADDR2VLUN(ap);
1724 int cidx;
1725 int rval = 0;
1726
1727 if (cap == (char *)0) {
1728 VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip,
1729 "!vhci_commoncap: invalid arg"));
1730 return (rval);
1731 }
1732
1733 if (vlun == NULL) {
1734 VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip,
1735 "!vhci_commoncap: vlun is null"));
1736 return (rval);
1737 }
1738
1739 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
1740 return (UNDEFINED);
1741 }
1742
1743 /*
1744 * Process setcap request.
1745 */
1746 if (doset) {
1747 /*
1748 * At present, we can only set binary (0/1) values
1749 */
1750 switch (cidx) {
1751 case SCSI_CAP_ARQ:
1752 if (val == 0) {
1753 rval = 0;
1754 } else {
1755 rval = 1;
1756 }
1757 break;
1758
1759 case SCSI_CAP_LUN_RESET:
1760 if (tgtonly == 0) {
1761 VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip,
1762 "scsi_vhci_setcap: "
1763 "Returning error since whom = 0"));
1764 rval = -1;
1765 break;
1766 }
1767 /*
1768 * Set the capability accordingly.
1769 */
1770 mutex_enter(&vlun->svl_mutex);
1771 vlun->svl_support_lun_reset = val;
1772 rval = val;
1773 mutex_exit(&vlun->svl_mutex);
1774 break;
1775
1776 case SCSI_CAP_SECTOR_SIZE:
1777 mutex_enter(&vlun->svl_mutex);
1778 vlun->svl_sector_size = val;
1779 vlun->svl_setcap_done = 1;
1780 mutex_exit(&vlun->svl_mutex);
1781 (void) vhci_pHCI_cap(ap, cap, val, tgtonly, NULL);
1782
1783 /* Always return success */
1784 rval = 1;
1785 break;
1786
1787 default:
1788 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1789 "!vhci_setcap: unsupported %d", cidx));
1790 rval = UNDEFINED;
1791 break;
1792 }
1793
1794 VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip,
1795 "!set cap: cap=%s, val/tgtonly/doset/rval = "
1796 "0x%x/0x%x/0x%x/%d\n",
1797 cap, val, tgtonly, doset, rval));
1798
1799 } else {
1800 /*
1801 * Process getcap request.
1802 */
1803 switch (cidx) {
1804 case SCSI_CAP_DMA_MAX:
1805 /*
1806 * For X86 this capability is caught in scsi_ifgetcap().
1807 * XXX Should this be getting the value from the pHCI?
1808 */
1809 rval = (int)VHCI_DMA_MAX_XFER_CAP;
1810 break;
1811
1812 case SCSI_CAP_INITIATOR_ID:
1813 rval = 0x00;
1814 break;
1815
1816 case SCSI_CAP_ARQ:
1817 case SCSI_CAP_RESET_NOTIFICATION:
1818 case SCSI_CAP_TAGGED_QING:
1819 rval = 1;
1820 break;
1821
1822 case SCSI_CAP_SCSI_VERSION:
1823 rval = 3;
1824 break;
1825
1826 case SCSI_CAP_INTERCONNECT_TYPE:
1827 rval = INTERCONNECT_FABRIC;
1828 break;
1829
1830 case SCSI_CAP_LUN_RESET:
1831 /*
1832 * scsi_vhci will always return success for LUN reset.
1833 * When request for doing LUN reset comes
1834 * through scsi_reset entry point, at that time attempt
1835 * will be made to do reset through all the possible
1836 * paths.
1837 */
1838 mutex_enter(&vlun->svl_mutex);
1839 rval = vlun->svl_support_lun_reset;
1840 mutex_exit(&vlun->svl_mutex);
1841 VHCI_DEBUG(4, (CE_WARN, vhci->vhci_dip,
1842 "scsi_vhci_getcap:"
1843 "Getting the Lun reset capability %d", rval));
1844 break;
1845
1846 case SCSI_CAP_SECTOR_SIZE:
1847 mutex_enter(&vlun->svl_mutex);
1848 rval = vlun->svl_sector_size;
1849 mutex_exit(&vlun->svl_mutex);
1850 break;
1851
1852 case SCSI_CAP_CDB_LEN:
1853 rval = VHCI_SCSI_CDB_SIZE;
1854 break;
1855
1856 case SCSI_CAP_DMA_MAX_ARCH:
1857 /*
1858 * For X86 this capability is caught in scsi_ifgetcap().
1859 * XXX Should this be getting the value from the pHCI?
1860 */
1861 rval = 0;
1862 break;
1863
1864 default:
1865 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1866 "!vhci_getcap: unsupported %d", cidx));
1867 rval = UNDEFINED;
1868 break;
1869 }
1870
1871 VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip,
1872 "!get cap: cap=%s, val/tgtonly/doset/rval = "
1873 "0x%x/0x%x/0x%x/%d\n",
1874 cap, val, tgtonly, doset, rval));
1875 }
1876 return (rval);
1877 }
1878
1879
1880 /*
1881 * Function name : vhci_scsi_getcap()
1882 *
1883 */
1884 static int
vhci_scsi_getcap(struct scsi_address * ap,char * cap,int whom)1885 vhci_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
1886 {
1887 return (vhci_commoncap(ap, cap, 0, whom, 0));
1888 }
1889
1890 static int
vhci_scsi_setcap(struct scsi_address * ap,char * cap,int value,int whom)1891 vhci_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
1892 {
1893 return (vhci_commoncap(ap, cap, value, whom, 1));
1894 }
1895
1896 /*
1897 * Function name : vhci_scsi_abort()
1898 */
1899 /* ARGSUSED */
1900 static int
vhci_scsi_abort(struct scsi_address * ap,struct scsi_pkt * pkt)1901 vhci_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1902 {
1903 return (0);
1904 }
1905
1906 /*
1907 * Function name : vhci_scsi_init_pkt
1908 *
1909 * Return Values : pointer to scsi_pkt, or NULL
1910 */
1911 /* ARGSUSED */
1912 static struct scsi_pkt *
vhci_scsi_init_pkt(struct scsi_address * ap,struct scsi_pkt * pkt,struct buf * bp,int cmdlen,int statuslen,int tgtlen,int flags,int (* callback)(caddr_t),caddr_t arg)1913 vhci_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
1914 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1915 int flags, int (*callback)(caddr_t), caddr_t arg)
1916 {
1917 struct scsi_vhci *vhci = ADDR2VHCI(ap);
1918 struct vhci_pkt *vpkt;
1919 int rval;
1920 int newpkt = 0;
1921 struct scsi_pkt *pktp;
1922
1923
1924 if (pkt == NULL) {
1925 if (cmdlen > VHCI_SCSI_CDB_SIZE) {
1926 if ((cmdlen != VHCI_SCSI_OSD_CDB_SIZE) ||
1927 ((flags & VHCI_SCSI_OSD_PKT_FLAGS) !=
1928 VHCI_SCSI_OSD_PKT_FLAGS)) {
1929 VHCI_DEBUG(1, (CE_NOTE, NULL,
1930 "!init pkt: cdb size not supported\n"));
1931 return (NULL);
1932 }
1933 }
1934
1935 pktp = scsi_hba_pkt_alloc(vhci->vhci_dip,
1936 ap, cmdlen, statuslen, tgtlen, sizeof (*vpkt), callback,
1937 arg);
1938
1939 if (pktp == NULL) {
1940 return (NULL);
1941 }
1942
1943 /* Get the vhci's private structure */
1944 vpkt = (struct vhci_pkt *)(pktp->pkt_ha_private);
1945 ASSERT(vpkt);
1946
1947 /* Save the target driver's packet */
1948 vpkt->vpkt_tgt_pkt = pktp;
1949
1950 /*
1951 * Save pkt_tgt_init_pkt fields if deferred binding
1952 * is needed or for other purposes.
1953 */
1954 vpkt->vpkt_tgt_init_pkt_flags = flags;
1955 vpkt->vpkt_flags = (callback == NULL_FUNC) ? CFLAG_NOWAIT : 0;
1956 vpkt->vpkt_state = VHCI_PKT_IDLE;
1957 vpkt->vpkt_tgt_init_cdblen = cmdlen;
1958 vpkt->vpkt_tgt_init_scblen = statuslen;
1959 newpkt = 1;
1960 } else { /* pkt not NULL */
1961 vpkt = pkt->pkt_ha_private;
1962 }
1963
1964 VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_scsi_init_pkt "
1965 "vpkt %p flags %x\n", (void *)vpkt, flags));
1966
1967 /* Clear any stale error flags */
1968 if (bp) {
1969 bioerror(bp, 0);
1970 }
1971
1972 vpkt->vpkt_tgt_init_bp = bp;
1973
1974 if (flags & PKT_DMA_PARTIAL) {
1975
1976 /*
1977 * Immediate binding is needed.
1978 * Target driver may not set this flag in next invocation.
1979 * vhci has to remember this flag was set during first
1980 * invocation of vhci_scsi_init_pkt.
1981 */
1982 vpkt->vpkt_flags |= CFLAG_DMA_PARTIAL;
1983 }
1984
1985 if (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) {
1986
1987 /*
1988 * Re-initialize some of the target driver packet state
1989 * information.
1990 */
1991 vpkt->vpkt_tgt_pkt->pkt_state = 0;
1992 vpkt->vpkt_tgt_pkt->pkt_statistics = 0;
1993 vpkt->vpkt_tgt_pkt->pkt_reason = 0;
1994
1995 /*
1996 * Binding a vpkt->vpkt_path for this IO at init_time.
1997 * If an IO error happens later, target driver will clear
1998 * this vpkt->vpkt_path binding before re-init IO again.
1999 */
2000 VHCI_DEBUG(8, (CE_NOTE, NULL,
2001 "vhci_scsi_init_pkt: calling v_b_t %p, newpkt %d\n",
2002 (void *)vpkt, newpkt));
2003 if (pkt && vpkt->vpkt_hba_pkt) {
2004 VHCI_DEBUG(4, (CE_NOTE, NULL,
2005 "v_s_i_p calling update_pHCI_pkt resid %ld\n",
2006 pkt->pkt_resid));
2007 vhci_update_pHCI_pkt(vpkt, pkt);
2008 }
2009 if (callback == SLEEP_FUNC) {
2010 rval = vhci_bind_transport(
2011 ap, vpkt, flags, callback);
2012 } else {
2013 rval = vhci_bind_transport(
2014 ap, vpkt, flags, NULL_FUNC);
2015 }
2016 VHCI_DEBUG(8, (CE_NOTE, NULL,
2017 "vhci_scsi_init_pkt: v_b_t called 0x%p rval 0x%x\n",
2018 (void *)vpkt, rval));
2019 if (bp) {
2020 if (rval == TRAN_FATAL_ERROR) {
2021 /*
2022 * No paths available. Could not bind
2023 * any pHCI. Setting EFAULT as a way
2024 * to indicate no DMA is mapped.
2025 */
2026 bioerror(bp, EFAULT);
2027 } else {
2028 /*
2029 * Do not indicate any pHCI errors to
2030 * target driver otherwise.
2031 */
2032 bioerror(bp, 0);
2033 }
2034 }
2035 if (rval != TRAN_ACCEPT) {
2036 VHCI_DEBUG(8, (CE_NOTE, NULL,
2037 "vhci_scsi_init_pkt: "
2038 "v_b_t failed 0x%p newpkt %x\n",
2039 (void *)vpkt, newpkt));
2040 if (newpkt) {
2041 scsi_hba_pkt_free(ap,
2042 vpkt->vpkt_tgt_pkt);
2043 }
2044 return (NULL);
2045 }
2046 ASSERT(vpkt->vpkt_hba_pkt != NULL);
2047 ASSERT(vpkt->vpkt_path != NULL);
2048
2049 /* Update the resid for the target driver */
2050 vpkt->vpkt_tgt_pkt->pkt_resid =
2051 vpkt->vpkt_hba_pkt->pkt_resid;
2052 }
2053
2054 return (vpkt->vpkt_tgt_pkt);
2055 }
2056
2057 /*
2058 * Function name : vhci_scsi_destroy_pkt
2059 *
2060 * Return Values : none
2061 */
2062 static void
vhci_scsi_destroy_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)2063 vhci_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2064 {
2065 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
2066
2067 VHCI_DEBUG(8, (CE_NOTE, NULL,
2068 "vhci_scsi_destroy_pkt: vpkt 0x%p\n", (void *)vpkt));
2069
2070 vpkt->vpkt_tgt_init_pkt_flags = 0;
2071 if (vpkt->vpkt_hba_pkt) {
2072 scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
2073 vpkt->vpkt_hba_pkt = NULL;
2074 }
2075 if (vpkt->vpkt_path) {
2076 mdi_rele_path(vpkt->vpkt_path);
2077 vpkt->vpkt_path = NULL;
2078 }
2079
2080 ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED);
2081 scsi_hba_pkt_free(ap, vpkt->vpkt_tgt_pkt);
2082 }
2083
2084 /*
2085 * Function name : vhci_scsi_dmafree()
2086 *
2087 * Return Values : none
2088 */
2089 /*ARGSUSED*/
2090 static void
vhci_scsi_dmafree(struct scsi_address * ap,struct scsi_pkt * pkt)2091 vhci_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2092 {
2093 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
2094
2095 VHCI_DEBUG(6, (CE_NOTE, NULL,
2096 "vhci_scsi_dmafree: vpkt 0x%p\n", (void *)vpkt));
2097
2098 ASSERT(vpkt != NULL);
2099 if (vpkt->vpkt_hba_pkt) {
2100 scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
2101 vpkt->vpkt_hba_pkt = NULL;
2102 }
2103 if (vpkt->vpkt_path) {
2104 mdi_rele_path(vpkt->vpkt_path);
2105 vpkt->vpkt_path = NULL;
2106 }
2107 }
2108
2109 /*
2110 * Function name : vhci_scsi_sync_pkt()
2111 *
2112 * Return Values : none
2113 */
2114 /*ARGSUSED*/
2115 static void
vhci_scsi_sync_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)2116 vhci_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2117 {
2118 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
2119
2120 ASSERT(vpkt != NULL);
2121 if (vpkt->vpkt_hba_pkt) {
2122 scsi_sync_pkt(vpkt->vpkt_hba_pkt);
2123 }
2124 }
2125
2126 /*
2127 * routine for reset notification setup, to register or cancel.
2128 */
2129 static int
vhci_scsi_reset_notify(struct scsi_address * ap,int flag,void (* callback)(caddr_t),caddr_t arg)2130 vhci_scsi_reset_notify(struct scsi_address *ap, int flag,
2131 void (*callback)(caddr_t), caddr_t arg)
2132 {
2133 struct scsi_vhci *vhci = ADDR2VHCI(ap);
2134 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
2135 &vhci->vhci_mutex, &vhci->vhci_reset_notify_listf));
2136 }
2137
2138 static int
vhci_scsi_get_name_bus_addr(struct scsi_device * sd,char * name,int len,int bus_addr)2139 vhci_scsi_get_name_bus_addr(struct scsi_device *sd,
2140 char *name, int len, int bus_addr)
2141 {
2142 dev_info_t *cdip;
2143 char *guid;
2144 scsi_vhci_lun_t *vlun;
2145
2146 ASSERT(sd != NULL);
2147 ASSERT(name != NULL);
2148
2149 *name = 0;
2150 cdip = sd->sd_dev;
2151
2152 ASSERT(cdip != NULL);
2153
2154 if (mdi_component_is_client(cdip, NULL) != MDI_SUCCESS)
2155 return (1);
2156
2157 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS,
2158 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS)
2159 return (1);
2160
2161 /*
2162 * Message is "sd# at scsi_vhci0: unit-address <guid>: <bus_addr>".
2163 * <guid> bus_addr argument == 0
2164 * <bus_addr> bus_addr argument != 0
2165 * Since the <guid> is already provided with unit-address, we just
2166 * provide failover module in <bus_addr> to keep output shorter.
2167 */
2168 vlun = ADDR2VLUN(&sd->sd_address);
2169 if (bus_addr == 0) {
2170 /* report the guid: */
2171 (void) snprintf(name, len, "g%s", guid);
2172 } else if (vlun && vlun->svl_fops_name) {
2173 /* report the name of the failover module */
2174 (void) snprintf(name, len, "%s", vlun->svl_fops_name);
2175 }
2176
2177 ddi_prop_free(guid);
2178 return (1);
2179 }
2180
2181 static int
vhci_scsi_get_bus_addr(struct scsi_device * sd,char * name,int len)2182 vhci_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
2183 {
2184 return (vhci_scsi_get_name_bus_addr(sd, name, len, 1));
2185 }
2186
2187 static int
vhci_scsi_get_name(struct scsi_device * sd,char * name,int len)2188 vhci_scsi_get_name(struct scsi_device *sd, char *name, int len)
2189 {
2190 return (vhci_scsi_get_name_bus_addr(sd, name, len, 0));
2191 }
2192
2193 /*
2194 * Return a pointer to the guid part of the devnm.
2195 * devnm format is "nodename@busaddr", busaddr format is "gGUID".
2196 */
2197 static char *
vhci_devnm_to_guid(char * devnm)2198 vhci_devnm_to_guid(char *devnm)
2199 {
2200 char *cp = devnm;
2201
2202 if (devnm == NULL)
2203 return (NULL);
2204
2205 while (*cp != '\0' && *cp != '@')
2206 cp++;
2207 if (*cp == '@' && *(cp + 1) == 'g')
2208 return (cp + 2);
2209 return (NULL);
2210 }
2211
2212 static int
vhci_bind_transport(struct scsi_address * ap,struct vhci_pkt * vpkt,int flags,int (* func)(caddr_t))2213 vhci_bind_transport(struct scsi_address *ap, struct vhci_pkt *vpkt, int flags,
2214 int (*func)(caddr_t))
2215 {
2216 struct scsi_vhci *vhci = ADDR2VHCI(ap);
2217 dev_info_t *cdip = ADDR2DIP(ap);
2218 mdi_pathinfo_t *pip = NULL;
2219 mdi_pathinfo_t *npip = NULL;
2220 scsi_vhci_priv_t *svp = NULL;
2221 struct scsi_device *psd = NULL;
2222 struct scsi_address *address = NULL;
2223 struct scsi_pkt *pkt = NULL;
2224 int rval = -1;
2225 int pgr_sema_held = 0;
2226 int held;
2227 int mps_flag = MDI_SELECT_ONLINE_PATH;
2228 struct scsi_vhci_lun *vlun;
2229 time_t tnow;
2230 int path_instance = 0;
2231
2232 vlun = ADDR2VLUN(ap);
2233 ASSERT(vlun != 0);
2234
2235 if ((vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PROUT) &&
2236 (((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) ==
2237 VHCI_PROUT_REGISTER) ||
2238 ((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) ==
2239 VHCI_PROUT_R_AND_IGNORE))) {
2240 if (!sema_tryp(&vlun->svl_pgr_sema))
2241 return (TRAN_BUSY);
2242 pgr_sema_held = 1;
2243 if (vlun->svl_first_path != NULL) {
2244 rval = mdi_select_path(cdip, NULL,
2245 MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH,
2246 NULL, &pip);
2247 if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2248 VHCI_DEBUG(4, (CE_NOTE, NULL,
2249 "vhci_bind_transport: path select fail\n"));
2250 } else {
2251 npip = pip;
2252 do {
2253 if (npip == vlun->svl_first_path) {
2254 VHCI_DEBUG(4, (CE_NOTE, NULL,
2255 "vhci_bind_transport: "
2256 "valid first path 0x%p\n",
2257 (void *)
2258 vlun->svl_first_path));
2259 pip = vlun->svl_first_path;
2260 goto bind_path;
2261 }
2262 pip = npip;
2263 rval = mdi_select_path(cdip, NULL,
2264 MDI_SELECT_ONLINE_PATH |
2265 MDI_SELECT_STANDBY_PATH,
2266 pip, &npip);
2267 mdi_rele_path(pip);
2268 } while ((rval == MDI_SUCCESS) &&
2269 (npip != NULL));
2270 }
2271 }
2272
2273 if (vlun->svl_first_path) {
2274 VHCI_DEBUG(4, (CE_NOTE, NULL,
2275 "vhci_bind_transport: invalid first path 0x%p\n",
2276 (void *)vlun->svl_first_path));
2277 vlun->svl_first_path = NULL;
2278 }
2279 } else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) {
2280 if ((vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ) == 0) {
2281 if (!sema_tryp(&vlun->svl_pgr_sema))
2282 return (TRAN_BUSY);
2283 }
2284 pgr_sema_held = 1;
2285 }
2286
2287 /*
2288 * If the path is already bound for PKT_PARTIAL_DMA case,
2289 * try to use the same path.
2290 */
2291 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) && vpkt->vpkt_path) {
2292 VHCI_DEBUG(4, (CE_NOTE, NULL,
2293 "vhci_bind_transport: PKT_PARTIAL_DMA "
2294 "vpkt 0x%p, path 0x%p\n",
2295 (void *)vpkt, (void *)vpkt->vpkt_path));
2296 pip = vpkt->vpkt_path;
2297 goto bind_path;
2298 }
2299
2300 /*
2301 * Get path_instance. Non-zero with FLAG_PKT_PATH_INSTANCE set
2302 * indicates that mdi_select_path should be called to select a
2303 * specific instance.
2304 *
2305 * NB: Condition pkt_path_instance reference on proper allocation.
2306 */
2307 if ((vpkt->vpkt_tgt_pkt->pkt_flags & FLAG_PKT_PATH_INSTANCE) &&
2308 scsi_pkt_allocated_correctly(vpkt->vpkt_tgt_pkt)) {
2309 path_instance = vpkt->vpkt_tgt_pkt->pkt_path_instance;
2310 }
2311
2312 /*
2313 * If reservation is active bind the transport directly to the pip
2314 * with the reservation.
2315 */
2316 if (vpkt->vpkt_hba_pkt == NULL) {
2317 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
2318 if (MDI_PI_IS_ONLINE(vlun->svl_resrv_pip)) {
2319 pip = vlun->svl_resrv_pip;
2320 mdi_hold_path(pip);
2321 vlun->svl_waiting_for_activepath = 0;
2322 rval = MDI_SUCCESS;
2323 goto bind_path;
2324 } else {
2325 if (pgr_sema_held) {
2326 sema_v(&vlun->svl_pgr_sema);
2327 }
2328 return (TRAN_BUSY);
2329 }
2330 }
2331 try_again:
2332 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2333 path_instance ? MDI_SELECT_PATH_INSTANCE : 0,
2334 (void *)(intptr_t)path_instance, &pip);
2335 if (rval == MDI_BUSY) {
2336 if (pgr_sema_held) {
2337 sema_v(&vlun->svl_pgr_sema);
2338 }
2339 return (TRAN_BUSY);
2340 } else if (rval == MDI_DEVI_ONLINING) {
2341 /*
2342 * if we are here then we are in the midst of
2343 * an attach/probe of the client device.
2344 * We attempt to bind to ONLINE path if available,
2345 * else it is OK to bind to a STANDBY path (instead
2346 * of triggering a failover) because IO associated
2347 * with attach/probe (eg. INQUIRY, block 0 read)
2348 * are completed by targets even on passive paths
2349 * If no ONLINE paths available, it is important
2350 * to set svl_waiting_for_activepath for two
2351 * reasons: (1) avoid sense analysis in the
2352 * "external failure detection" codepath in
2353 * vhci_intr(). Failure to do so will result in
2354 * infinite loop (unless an ONLINE path becomes
2355 * available at some point) (2) avoid
2356 * unnecessary failover (see "---Waiting For Active
2357 * Path---" comment below).
2358 */
2359 VHCI_DEBUG(1, (CE_NOTE, NULL, "!%p in onlining "
2360 "state\n", (void *)cdip));
2361 pip = NULL;
2362 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2363 mps_flag, NULL, &pip);
2364 if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2365 if (vlun->svl_waiting_for_activepath == 0) {
2366 vlun->svl_waiting_for_activepath = 1;
2367 vlun->svl_wfa_time = ddi_get_time();
2368 }
2369 mps_flag |= MDI_SELECT_STANDBY_PATH;
2370 rval = mdi_select_path(cdip,
2371 vpkt->vpkt_tgt_init_bp,
2372 mps_flag, NULL, &pip);
2373 if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2374 if (pgr_sema_held) {
2375 sema_v(&vlun->svl_pgr_sema);
2376 }
2377 return (TRAN_FATAL_ERROR);
2378 }
2379 goto bind_path;
2380 }
2381 } else if ((rval == MDI_FAILURE) ||
2382 ((rval == MDI_NOPATH) && (path_instance))) {
2383 if (pgr_sema_held) {
2384 sema_v(&vlun->svl_pgr_sema);
2385 }
2386 return (TRAN_FATAL_ERROR);
2387 }
2388
2389 if ((pip == NULL) || (rval == MDI_NOPATH)) {
2390 while (vlun->svl_waiting_for_activepath) {
2391 /*
2392 * ---Waiting For Active Path---
2393 * This device was discovered across a
2394 * passive path; lets wait for a little
2395 * bit, hopefully an active path will
2396 * show up obviating the need for a
2397 * failover
2398 */
2399 tnow = ddi_get_time();
2400 if (tnow - vlun->svl_wfa_time >= 60) {
2401 vlun->svl_waiting_for_activepath = 0;
2402 } else {
2403 drv_usecwait(1000);
2404 if (vlun->svl_waiting_for_activepath
2405 == 0) {
2406 /*
2407 * an active path has come
2408 * online!
2409 */
2410 goto try_again;
2411 }
2412 }
2413 }
2414 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
2415 if (!held) {
2416 VHCI_DEBUG(4, (CE_NOTE, NULL,
2417 "!Lun not held\n"));
2418 if (pgr_sema_held) {
2419 sema_v(&vlun->svl_pgr_sema);
2420 }
2421 return (TRAN_BUSY);
2422 }
2423 /*
2424 * now that the LUN is stable, one last check
2425 * to make sure no other changes sneaked in
2426 * (like a path coming online or a
2427 * failover initiated by another thread)
2428 */
2429 pip = NULL;
2430 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2431 0, NULL, &pip);
2432 if (pip != NULL) {
2433 VHCI_RELEASE_LUN(vlun);
2434 vlun->svl_waiting_for_activepath = 0;
2435 goto bind_path;
2436 }
2437
2438 /*
2439 * Check if there is an ONLINE path OR a STANDBY path
2440 * available. If none is available, do not attempt
2441 * to do a failover, just return a fatal error at this
2442 * point.
2443 */
2444 npip = NULL;
2445 rval = mdi_select_path(cdip, NULL,
2446 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
2447 NULL, &npip);
2448 if ((npip == NULL) || (rval != MDI_SUCCESS)) {
2449 /*
2450 * No paths available, jus return FATAL error.
2451 */
2452 VHCI_RELEASE_LUN(vlun);
2453 if (pgr_sema_held) {
2454 sema_v(&vlun->svl_pgr_sema);
2455 }
2456 return (TRAN_FATAL_ERROR);
2457 }
2458 mdi_rele_path(npip);
2459 if (!(vpkt->vpkt_state & VHCI_PKT_IN_FAILOVER)) {
2460 VHCI_DEBUG(1, (CE_NOTE, NULL, "!invoking "
2461 "mdi_failover\n"));
2462 rval = mdi_failover(vhci->vhci_dip, cdip,
2463 MDI_FAILOVER_ASYNC);
2464 } else {
2465 rval = vlun->svl_failover_status;
2466 }
2467 if (rval == MDI_FAILURE) {
2468 VHCI_RELEASE_LUN(vlun);
2469 if (pgr_sema_held) {
2470 sema_v(&vlun->svl_pgr_sema);
2471 }
2472 return (TRAN_FATAL_ERROR);
2473 } else if (rval == MDI_BUSY) {
2474 VHCI_RELEASE_LUN(vlun);
2475 if (pgr_sema_held) {
2476 sema_v(&vlun->svl_pgr_sema);
2477 }
2478 return (TRAN_BUSY);
2479 } else {
2480 if (pgr_sema_held) {
2481 sema_v(&vlun->svl_pgr_sema);
2482 }
2483 vpkt->vpkt_state |= VHCI_PKT_IN_FAILOVER;
2484 return (TRAN_BUSY);
2485 }
2486 }
2487 vlun->svl_waiting_for_activepath = 0;
2488 bind_path:
2489 vpkt->vpkt_path = pip;
2490 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
2491 ASSERT(svp != NULL);
2492
2493 psd = svp->svp_psd;
2494 ASSERT(psd != NULL);
2495 address = &psd->sd_address;
2496 } else {
2497 pkt = vpkt->vpkt_hba_pkt;
2498 address = &pkt->pkt_address;
2499 }
2500
2501 /* Verify match of specified path_instance and selected path_instance */
2502 ASSERT((path_instance == 0) ||
2503 (path_instance == mdi_pi_get_path_instance(vpkt->vpkt_path)));
2504
2505 /*
2506 * For PKT_PARTIAL_DMA case, call pHCI's scsi_init_pkt whenever
2507 * target driver calls vhci_scsi_init_pkt.
2508 */
2509 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) &&
2510 vpkt->vpkt_path && vpkt->vpkt_hba_pkt) {
2511 VHCI_DEBUG(4, (CE_NOTE, NULL,
2512 "vhci_bind_transport: PKT_PARTIAL_DMA "
2513 "vpkt 0x%p, path 0x%p hba_pkt 0x%p\n",
2514 (void *)vpkt, (void *)vpkt->vpkt_path, (void *)pkt));
2515 pkt = vpkt->vpkt_hba_pkt;
2516 address = &pkt->pkt_address;
2517 }
2518
2519 if (pkt == NULL || (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL)) {
2520 pkt = scsi_init_pkt(address, pkt,
2521 vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen,
2522 vpkt->vpkt_tgt_init_scblen, 0, flags, func, NULL);
2523
2524 if (pkt == NULL) {
2525 VHCI_DEBUG(4, (CE_NOTE, NULL,
2526 "!bind transport: 0x%p 0x%p 0x%p\n",
2527 (void *)vhci, (void *)psd, (void *)vpkt));
2528 if ((vpkt->vpkt_hba_pkt == NULL) && vpkt->vpkt_path) {
2529 MDI_PI_ERRSTAT(vpkt->vpkt_path,
2530 MDI_PI_TRANSERR);
2531 mdi_rele_path(vpkt->vpkt_path);
2532 vpkt->vpkt_path = NULL;
2533 }
2534 if (pgr_sema_held) {
2535 sema_v(&vlun->svl_pgr_sema);
2536 }
2537 /*
2538 * Consider it a fatal error if b_error is
2539 * set as a result of DMA binding failure
2540 * vs. a condition of being temporarily out of
2541 * some resource
2542 */
2543 if (vpkt->vpkt_tgt_init_bp == NULL ||
2544 geterror(vpkt->vpkt_tgt_init_bp))
2545 return (TRAN_FATAL_ERROR);
2546 else
2547 return (TRAN_BUSY);
2548 }
2549 }
2550
2551 pkt->pkt_private = vpkt;
2552 vpkt->vpkt_hba_pkt = pkt;
2553 return (TRAN_ACCEPT);
2554 }
2555
2556
2557 /*PRINTFLIKE3*/
2558 void
vhci_log(int level,dev_info_t * dip,const char * fmt,...)2559 vhci_log(int level, dev_info_t *dip, const char *fmt, ...)
2560 {
2561 char buf[256];
2562 va_list ap;
2563
2564 va_start(ap, fmt);
2565 (void) vsprintf(buf, fmt, ap);
2566 va_end(ap);
2567
2568 scsi_log(dip, "scsi_vhci", level, buf);
2569 }
2570
2571 /* do a PGR out with the information we've saved away */
2572 static int
vhci_do_prout(scsi_vhci_priv_t * svp)2573 vhci_do_prout(scsi_vhci_priv_t *svp)
2574 {
2575
2576 struct scsi_pkt *new_pkt;
2577 struct buf *bp;
2578 scsi_vhci_lun_t *vlun = svp->svp_svl;
2579 int rval, retry, nr_retry, ua_retry;
2580 uint8_t *sns, skey;
2581
2582 bp = getrbuf(KM_SLEEP);
2583 bp->b_flags = B_WRITE;
2584 bp->b_resid = 0;
2585 bp->b_un.b_addr = (caddr_t)&vlun->svl_prout;
2586 bp->b_bcount = vlun->svl_bcount;
2587
2588 VHCI_INCR_PATH_CMDCOUNT(svp);
2589
2590 new_pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
2591 CDB_GROUP1, sizeof (struct scsi_arq_status), 0, 0,
2592 SLEEP_FUNC, NULL);
2593 if (new_pkt == NULL) {
2594 VHCI_DECR_PATH_CMDCOUNT(svp);
2595 freerbuf(bp);
2596 cmn_err(CE_WARN, "!vhci_do_prout: scsi_init_pkt failed");
2597 return (0);
2598 }
2599 mutex_enter(&vlun->svl_mutex);
2600 bp->b_un.b_addr = (caddr_t)&vlun->svl_prout;
2601 bp->b_bcount = vlun->svl_bcount;
2602 bcopy(vlun->svl_cdb, new_pkt->pkt_cdbp,
2603 sizeof (vlun->svl_cdb));
2604 new_pkt->pkt_time = vlun->svl_time;
2605 mutex_exit(&vlun->svl_mutex);
2606 new_pkt->pkt_flags = FLAG_NOINTR;
2607
2608 ua_retry = nr_retry = retry = 0;
2609 again:
2610 rval = vhci_do_scsi_cmd(new_pkt);
2611 if (rval != 1) {
2612 if ((new_pkt->pkt_reason == CMD_CMPLT) &&
2613 (SCBP_C(new_pkt) == STATUS_CHECK) &&
2614 (new_pkt->pkt_state & STATE_ARQ_DONE)) {
2615 sns = (uint8_t *)
2616 &(((struct scsi_arq_status *)(uintptr_t)
2617 (new_pkt->pkt_scbp))->sts_sensedata);
2618 skey = scsi_sense_key(sns);
2619 if ((skey == KEY_UNIT_ATTENTION) ||
2620 (skey == KEY_NOT_READY)) {
2621 int max_retry;
2622 struct scsi_failover_ops *fops;
2623 fops = vlun->svl_fops;
2624 rval = fops->sfo_analyze_sense(svp->svp_psd,
2625 sns, vlun->svl_fops_ctpriv);
2626 if (rval == SCSI_SENSE_NOT_READY) {
2627 max_retry = vhci_prout_not_ready_retry;
2628 retry = nr_retry++;
2629 delay(1*drv_usectohz(1000000));
2630 } else {
2631 /* chk for state change and update */
2632 if (rval == SCSI_SENSE_STATE_CHANGED) {
2633 int held;
2634 VHCI_HOLD_LUN(vlun,
2635 VH_NOSLEEP, held);
2636 if (!held) {
2637 rval = TRAN_BUSY;
2638 } else {
2639 /* chk for alua first */
2640 vhci_update_pathstates(
2641 (void *)vlun);
2642 }
2643 }
2644 retry = ua_retry++;
2645 max_retry = VHCI_MAX_PGR_RETRIES;
2646 }
2647 if (retry < max_retry) {
2648 VHCI_DEBUG(4, (CE_WARN, NULL,
2649 "!vhci_do_prout retry 0x%x "
2650 "(0x%x 0x%x 0x%x)",
2651 SCBP_C(new_pkt),
2652 new_pkt->pkt_cdbp[0],
2653 new_pkt->pkt_cdbp[1],
2654 new_pkt->pkt_cdbp[2]));
2655 goto again;
2656 }
2657 rval = 0;
2658 VHCI_DEBUG(4, (CE_WARN, NULL,
2659 "!vhci_do_prout 0x%x "
2660 "(0x%x 0x%x 0x%x)",
2661 SCBP_C(new_pkt),
2662 new_pkt->pkt_cdbp[0],
2663 new_pkt->pkt_cdbp[1],
2664 new_pkt->pkt_cdbp[2]));
2665 } else if (skey == KEY_ILLEGAL_REQUEST)
2666 rval = VHCI_PGR_ILLEGALOP;
2667 }
2668 } else {
2669 rval = 1;
2670 }
2671 scsi_destroy_pkt(new_pkt);
2672 VHCI_DECR_PATH_CMDCOUNT(svp);
2673 freerbuf(bp);
2674 return (rval);
2675 }
2676
2677 static void
vhci_run_cmd(void * arg)2678 vhci_run_cmd(void *arg)
2679 {
2680 struct scsi_pkt *pkt = (struct scsi_pkt *)arg;
2681 struct scsi_pkt *tpkt;
2682 scsi_vhci_priv_t *svp;
2683 mdi_pathinfo_t *pip, *npip;
2684 scsi_vhci_lun_t *vlun;
2685 dev_info_t *cdip;
2686 scsi_vhci_priv_t *nsvp;
2687 int fail = 0;
2688 int rval;
2689 struct vhci_pkt *vpkt;
2690 uchar_t cdb_1;
2691 vhci_prout_t *prout;
2692
2693 vpkt = (struct vhci_pkt *)pkt->pkt_private;
2694 tpkt = vpkt->vpkt_tgt_pkt;
2695 pip = vpkt->vpkt_path;
2696 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
2697 if (svp == NULL) {
2698 tpkt->pkt_reason = CMD_TRAN_ERR;
2699 tpkt->pkt_statistics = STAT_ABORTED;
2700 goto done;
2701 }
2702 vlun = svp->svp_svl;
2703 prout = &vlun->svl_prout;
2704 if (SCBP_C(pkt) != STATUS_GOOD)
2705 fail++;
2706 cdip = vlun->svl_dip;
2707 pip = npip = NULL;
2708 rval = mdi_select_path(cdip, NULL,
2709 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, NULL, &npip);
2710 if ((rval != MDI_SUCCESS) || (npip == NULL)) {
2711 VHCI_DEBUG(4, (CE_NOTE, NULL,
2712 "vhci_run_cmd: no path! 0x%p\n", (void *)svp));
2713 tpkt->pkt_reason = CMD_TRAN_ERR;
2714 tpkt->pkt_statistics = STAT_ABORTED;
2715 goto done;
2716 }
2717
2718 cdb_1 = vlun->svl_cdb[1];
2719 vlun->svl_cdb[1] &= 0xe0;
2720 vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE;
2721
2722 do {
2723 nsvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip);
2724 if (nsvp == NULL) {
2725 VHCI_DEBUG(4, (CE_NOTE, NULL,
2726 "vhci_run_cmd: no "
2727 "client priv! 0x%p offlined?\n",
2728 (void *)npip));
2729 goto next_path;
2730 }
2731 if (vlun->svl_first_path == npip) {
2732 goto next_path;
2733 } else {
2734 if (vhci_do_prout(nsvp) != 1)
2735 fail++;
2736 }
2737 next_path:
2738 pip = npip;
2739 rval = mdi_select_path(cdip, NULL,
2740 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH,
2741 pip, &npip);
2742 mdi_rele_path(pip);
2743 } while ((rval == MDI_SUCCESS) && (npip != NULL));
2744
2745 vlun->svl_cdb[1] = cdb_1;
2746
2747 if (fail) {
2748 VHCI_DEBUG(4, (CE_WARN, NULL, "%s%d: key registration failed, "
2749 "couldn't be replicated on all paths",
2750 ddi_driver_name(cdip), ddi_get_instance(cdip)));
2751 vhci_print_prout_keys(vlun, "vhci_run_cmd: ");
2752
2753 if (SCBP_C(pkt) != STATUS_GOOD) {
2754 tpkt->pkt_reason = CMD_TRAN_ERR;
2755 tpkt->pkt_statistics = STAT_ABORTED;
2756 }
2757 } else {
2758 vlun->svl_pgr_active = 1;
2759 vhci_print_prout_keys(vlun, "vhci_run_cmd: before bcopy:");
2760
2761 bcopy((const void *)prout->service_key,
2762 (void *)prout->active_service_key, MHIOC_RESV_KEY_SIZE);
2763 bcopy((const void *)prout->res_key,
2764 (void *)prout->active_res_key, MHIOC_RESV_KEY_SIZE);
2765
2766 vhci_print_prout_keys(vlun, "vhci_run_cmd: after bcopy:");
2767 }
2768 done:
2769 if (SCBP_C(pkt) == STATUS_GOOD)
2770 vlun->svl_first_path = NULL;
2771
2772 if (svp)
2773 VHCI_DECR_PATH_CMDCOUNT(svp);
2774
2775 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
2776 scsi_destroy_pkt(pkt);
2777 vpkt->vpkt_hba_pkt = NULL;
2778 if (vpkt->vpkt_path) {
2779 mdi_rele_path(vpkt->vpkt_path);
2780 vpkt->vpkt_path = NULL;
2781 }
2782 }
2783
2784 sema_v(&vlun->svl_pgr_sema);
2785 /*
2786 * The PROUT commands are not included in the automatic retry
2787 * mechanism, therefore, vpkt_org_vpkt should never be set here.
2788 */
2789 ASSERT(vpkt->vpkt_org_vpkt == NULL);
2790 scsi_hba_pkt_comp(tpkt);
2791 }
2792
2793 /*
2794 * Get the keys registered with this target. Since we will have
2795 * registered the same key with multiple initiators, strip out
2796 * any duplicate keys.
2797 *
2798 * The pointers which will be used to filter the registered keys from
2799 * the device will be stored in filter_prin and filter_pkt. If the
2800 * allocation length of the buffer was sufficient for the number of
2801 * parameter data bytes available to be returned by the device then the
2802 * key filtering will use the keylist returned from the original
2803 * request. If the allocation length of the buffer was not sufficient,
2804 * then the filtering will use the keylist returned from the request
2805 * that is resent below.
2806 *
2807 * If the device returns an additional length field that is greater than
2808 * the allocation length of the buffer, then allocate a new buffer which
2809 * can accommodate the number of parameter data bytes available to be
2810 * returned. Resend the scsi PRIN command, filter out the duplicate
2811 * keys and return as many of the unique keys found that was originally
2812 * requested and set the additional length field equal to the data bytes
2813 * of unique reservation keys available to be returned.
2814 *
2815 * If the device returns an additional length field that is less than or
2816 * equal to the allocation length of the buffer, then all the available
2817 * keys registered were returned by the device. Filter out the
2818 * duplicate keys and return all of the unique keys found and set the
2819 * additional length field equal to the data bytes of the reservation
2820 * keys to be returned.
2821 */
2822
2823 #define VHCI_PRIN_HEADER_SZ (sizeof (prin->length) + sizeof (prin->generation))
2824
2825 static int
vhci_do_prin(struct vhci_pkt ** intr_vpkt)2826 vhci_do_prin(struct vhci_pkt **intr_vpkt)
2827 {
2828 scsi_vhci_priv_t *svp;
2829 struct vhci_pkt *vpkt = *intr_vpkt;
2830 vhci_prin_readkeys_t *prin;
2831 scsi_vhci_lun_t *vlun;
2832 struct scsi_vhci *vhci = ADDR2VHCI(&vpkt->vpkt_tgt_pkt->pkt_address);
2833
2834 struct buf *new_bp = NULL;
2835 struct scsi_pkt *new_pkt = NULL;
2836 struct vhci_pkt *new_vpkt = NULL;
2837 uint32_t needed_length;
2838 int rval = VHCI_CMD_CMPLT;
2839 uint32_t prin_length = 0;
2840 uint32_t svl_prin_length = 0;
2841
2842 ASSERT(vpkt->vpkt_path);
2843 svp = mdi_pi_get_vhci_private(vpkt->vpkt_path);
2844 ASSERT(svp);
2845 vlun = svp->svp_svl;
2846 ASSERT(vlun);
2847
2848 /*
2849 * If the caller only asked for an amount of data that would not
2850 * be enough to include any key data it is likely that they will
2851 * send the next command with a buffer size based on the information
2852 * from this header. Doing recovery on this would be a duplication
2853 * of efforts.
2854 */
2855 if (vpkt->vpkt_tgt_init_bp->b_bcount <= VHCI_PRIN_HEADER_SZ) {
2856 rval = VHCI_CMD_CMPLT;
2857 goto exit;
2858 }
2859
2860 if (vpkt->vpkt_org_vpkt == NULL) {
2861 /*
2862 * Can fail as sleep is not allowed.
2863 */
2864 prin = (vhci_prin_readkeys_t *)
2865 bp_mapin_common(vpkt->vpkt_tgt_init_bp, VM_NOSLEEP);
2866 } else {
2867 /*
2868 * The retry buf doesn't need to be mapped in.
2869 */
2870 prin = (vhci_prin_readkeys_t *)
2871 vpkt->vpkt_tgt_init_bp->b_un.b_daddr;
2872 }
2873
2874 if (prin == NULL) {
2875 VHCI_DEBUG(5, (CE_WARN, NULL,
2876 "vhci_do_prin: bp_mapin_common failed."));
2877 rval = VHCI_CMD_ERROR;
2878 goto fail;
2879 }
2880
2881 prin_length = BE_32(prin->length);
2882
2883 /*
2884 * According to SPC-3r22, sec 4.3.4.6: "If the amount of
2885 * information to be transferred exceeds the maximum value
2886 * that the ALLOCATION LENGTH field is capable of specifying,
2887 * the device server shall...terminate the command with CHECK
2888 * CONDITION status". The ALLOCATION LENGTH field of the
2889 * PERSISTENT RESERVE IN command is 2 bytes. We should never
2890 * get here with an ADDITIONAL LENGTH greater than 0xFFFF
2891 * so if we do, then it is an error!
2892 */
2893
2894
2895 if ((prin_length + VHCI_PRIN_HEADER_SZ) > 0xFFFF) {
2896 VHCI_DEBUG(5, (CE_NOTE, NULL,
2897 "vhci_do_prin: Device returned invalid "
2898 "length 0x%x\n", prin_length));
2899 rval = VHCI_CMD_ERROR;
2900 goto fail;
2901 }
2902 needed_length = prin_length + VHCI_PRIN_HEADER_SZ;
2903
2904 /*
2905 * If prin->length is greater than the byte count allocated in the
2906 * original buffer, then resend the request with enough buffer
2907 * allocated to get all of the available registered keys.
2908 */
2909 if ((vpkt->vpkt_tgt_init_bp->b_bcount < needed_length) &&
2910 (vpkt->vpkt_org_vpkt == NULL)) {
2911
2912 new_pkt = vhci_create_retry_pkt(vpkt);
2913 if (new_pkt == NULL) {
2914 rval = VHCI_CMD_ERROR;
2915 goto fail;
2916 }
2917 new_vpkt = TGTPKT2VHCIPKT(new_pkt);
2918
2919 /*
2920 * This is the buf with buffer pointer
2921 * where the prin readkeys will be
2922 * returned from the device
2923 */
2924 new_bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address,
2925 NULL, needed_length, B_READ, NULL_FUNC, NULL);
2926 if ((new_bp == NULL) || (new_bp->b_un.b_addr == NULL)) {
2927 if (new_bp) {
2928 scsi_free_consistent_buf(new_bp);
2929 }
2930 vhci_scsi_destroy_pkt(&new_pkt->pkt_address, new_pkt);
2931 rval = VHCI_CMD_ERROR;
2932 goto fail;
2933 }
2934 new_bp->b_bcount = needed_length;
2935 new_pkt->pkt_cdbp[7] = (uchar_t)(needed_length >> 8);
2936 new_pkt->pkt_cdbp[8] = (uchar_t)needed_length;
2937
2938 rval = VHCI_CMD_RETRY;
2939
2940 new_vpkt->vpkt_tgt_init_bp = new_bp;
2941 }
2942
2943 if (rval == VHCI_CMD_RETRY) {
2944
2945 /*
2946 * There were more keys then the original request asked for.
2947 */
2948 mdi_pathinfo_t *path_holder = vpkt->vpkt_path;
2949
2950 /*
2951 * Release the old path because it does not matter which path
2952 * this command is sent down. This allows the normal bind
2953 * transport mechanism to be used.
2954 */
2955 if (vpkt->vpkt_path != NULL) {
2956 mdi_rele_path(vpkt->vpkt_path);
2957 vpkt->vpkt_path = NULL;
2958 }
2959
2960 /*
2961 * Dispatch the retry command
2962 */
2963 if (taskq_dispatch(vhci->vhci_taskq, vhci_dispatch_scsi_start,
2964 (void *) new_vpkt, KM_NOSLEEP) == NULL) {
2965 if (path_holder) {
2966 vpkt->vpkt_path = path_holder;
2967 mdi_hold_path(path_holder);
2968 }
2969 scsi_free_consistent_buf(new_bp);
2970 vhci_scsi_destroy_pkt(&new_pkt->pkt_address, new_pkt);
2971 rval = VHCI_CMD_ERROR;
2972 goto fail;
2973 }
2974
2975 /*
2976 * If we return VHCI_CMD_RETRY, that means the caller
2977 * is going to bail and wait for the reissued command
2978 * to complete. In that case, we need to decrement
2979 * the path command count right now. In any other
2980 * case, it'll be decremented by the caller.
2981 */
2982 VHCI_DECR_PATH_CMDCOUNT(svp);
2983 goto exit;
2984
2985 }
2986
2987 if (rval == VHCI_CMD_CMPLT) {
2988 /*
2989 * The original request got all of the keys or the recovery
2990 * packet returns.
2991 */
2992 int new;
2993 int old;
2994 int num_keys = prin_length / MHIOC_RESV_KEY_SIZE;
2995
2996 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_do_prin: %d keys read\n",
2997 num_keys));
2998
2999 #ifdef DEBUG
3000 VHCI_DEBUG(5, (CE_NOTE, NULL, "vhci_do_prin: from storage\n"));
3001 if (vhci_debug == 5)
3002 vhci_print_prin_keys(prin, num_keys);
3003 VHCI_DEBUG(5, (CE_NOTE, NULL,
3004 "vhci_do_prin: MPxIO old keys:\n"));
3005 if (vhci_debug == 5)
3006 vhci_print_prin_keys(&vlun->svl_prin, num_keys);
3007 #endif
3008
3009 /*
3010 * Filter out all duplicate keys returned from the device
3011 * We know that we use a different key for every host, so we
3012 * can simply strip out duplicates. Otherwise we would need to
3013 * do more bookkeeping to figure out which keys to strip out.
3014 */
3015
3016 new = 0;
3017
3018 /*
3019 * If we got at least 1 key copy it.
3020 */
3021 if (num_keys > 0) {
3022 vlun->svl_prin.keylist[0] = prin->keylist[0];
3023 new++;
3024 }
3025
3026 /*
3027 * find next unique key.
3028 */
3029 for (old = 1; old < num_keys; old++) {
3030 int j;
3031 int match = 0;
3032
3033 if (new >= VHCI_NUM_RESV_KEYS)
3034 break;
3035 for (j = 0; j < new; j++) {
3036 if (bcmp(&prin->keylist[old],
3037 &vlun->svl_prin.keylist[j],
3038 sizeof (mhioc_resv_key_t)) == 0) {
3039 match = 1;
3040 break;
3041 }
3042 }
3043 if (!match) {
3044 vlun->svl_prin.keylist[new] =
3045 prin->keylist[old];
3046 new++;
3047 }
3048 }
3049
3050 /* Stored Big Endian */
3051 vlun->svl_prin.generation = prin->generation;
3052 svl_prin_length = new * sizeof (mhioc_resv_key_t);
3053 /* Stored Big Endian */
3054 vlun->svl_prin.length = BE_32(svl_prin_length);
3055 svl_prin_length += VHCI_PRIN_HEADER_SZ;
3056
3057 /*
3058 * If we arrived at this point after issuing a retry, make sure
3059 * that we put everything back the way it originally was so
3060 * that the target driver can complete the command correctly.
3061 */
3062 if (vpkt->vpkt_org_vpkt != NULL) {
3063 new_bp = vpkt->vpkt_tgt_init_bp;
3064
3065 scsi_free_consistent_buf(new_bp);
3066
3067 vpkt = vhci_sync_retry_pkt(vpkt);
3068 *intr_vpkt = vpkt;
3069
3070 /*
3071 * Make sure the original buffer is mapped into kernel
3072 * space before we try to copy the filtered keys into
3073 * it.
3074 */
3075 prin = (vhci_prin_readkeys_t *)bp_mapin_common(
3076 vpkt->vpkt_tgt_init_bp, VM_NOSLEEP);
3077 }
3078
3079 /*
3080 * Now copy the desired number of prin keys into the original
3081 * target buffer.
3082 */
3083 if (svl_prin_length <= vpkt->vpkt_tgt_init_bp->b_bcount) {
3084 /*
3085 * It is safe to return all of the available unique
3086 * keys
3087 */
3088 bcopy(&vlun->svl_prin, prin, svl_prin_length);
3089 } else {
3090 /*
3091 * Not all of the available keys were requested by the
3092 * original command.
3093 */
3094 bcopy(&vlun->svl_prin, prin,
3095 vpkt->vpkt_tgt_init_bp->b_bcount);
3096 }
3097 #ifdef DEBUG
3098 VHCI_DEBUG(5, (CE_NOTE, NULL,
3099 "vhci_do_prin: To Application:\n"));
3100 if (vhci_debug == 5)
3101 vhci_print_prin_keys(prin, new);
3102 VHCI_DEBUG(5, (CE_NOTE, NULL,
3103 "vhci_do_prin: MPxIO new keys:\n"));
3104 if (vhci_debug == 5)
3105 vhci_print_prin_keys(&vlun->svl_prin, new);
3106 #endif
3107 }
3108 fail:
3109 if (rval == VHCI_CMD_ERROR) {
3110 /*
3111 * If we arrived at this point after issuing a
3112 * retry, make sure that we put everything back
3113 * the way it originally was so that ssd can
3114 * complete the command correctly.
3115 */
3116
3117 if (vpkt->vpkt_org_vpkt != NULL) {
3118 new_bp = vpkt->vpkt_tgt_init_bp;
3119 if (new_bp != NULL) {
3120 scsi_free_consistent_buf(new_bp);
3121 }
3122
3123 new_vpkt = vpkt;
3124 vpkt = vpkt->vpkt_org_vpkt;
3125
3126 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
3127 new_vpkt->vpkt_tgt_pkt);
3128 }
3129
3130 /*
3131 * Mark this command completion as having an error so that
3132 * ssd will retry the command.
3133 */
3134
3135 vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED;
3136 vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED;
3137
3138 rval = VHCI_CMD_CMPLT;
3139 }
3140 exit:
3141 /*
3142 * Make sure that the semaphore is only released once.
3143 */
3144 if (rval == VHCI_CMD_CMPLT) {
3145 sema_v(&vlun->svl_pgr_sema);
3146 }
3147
3148 return (rval);
3149 }
3150
3151 static void
vhci_intr(struct scsi_pkt * pkt)3152 vhci_intr(struct scsi_pkt *pkt)
3153 {
3154 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_private;
3155 struct scsi_pkt *tpkt;
3156 scsi_vhci_priv_t *svp;
3157 scsi_vhci_lun_t *vlun;
3158 int rval, held;
3159 struct scsi_failover_ops *fops;
3160 uint8_t *sns, skey, asc, ascq;
3161 mdi_pathinfo_t *lpath;
3162 static char *timeout_err = "Command Timeout";
3163 static char *parity_err = "Parity Error";
3164 char *err_str = NULL;
3165 dev_info_t *vdip, *cdip;
3166 char *cpath;
3167
3168 ASSERT(vpkt != NULL);
3169 tpkt = vpkt->vpkt_tgt_pkt;
3170 ASSERT(tpkt != NULL);
3171 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
3172 ASSERT(svp != NULL);
3173 vlun = svp->svp_svl;
3174 ASSERT(vlun != NULL);
3175 lpath = vpkt->vpkt_path;
3176
3177 /*
3178 * sync up the target driver's pkt with the pkt that
3179 * we actually used
3180 */
3181 *(tpkt->pkt_scbp) = *(pkt->pkt_scbp);
3182 tpkt->pkt_resid = pkt->pkt_resid;
3183 tpkt->pkt_state = pkt->pkt_state;
3184 tpkt->pkt_statistics = pkt->pkt_statistics;
3185 tpkt->pkt_reason = pkt->pkt_reason;
3186
3187 /* Return path_instance information back to the target driver. */
3188 if (scsi_pkt_allocated_correctly(tpkt)) {
3189 if (scsi_pkt_allocated_correctly(pkt)) {
3190 /*
3191 * If both packets were correctly allocated,
3192 * return path returned by pHCI.
3193 */
3194 tpkt->pkt_path_instance = pkt->pkt_path_instance;
3195 } else {
3196 /* Otherwise return path of pHCI we used */
3197 tpkt->pkt_path_instance =
3198 mdi_pi_get_path_instance(lpath);
3199 }
3200 }
3201
3202 if (pkt->pkt_cdbp[0] == SCMD_PROUT &&
3203 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
3204 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE)) {
3205 if ((SCBP_C(pkt) != STATUS_GOOD) ||
3206 (pkt->pkt_reason != CMD_CMPLT)) {
3207 sema_v(&vlun->svl_pgr_sema);
3208 }
3209 } else if (pkt->pkt_cdbp[0] == SCMD_PRIN) {
3210 if (pkt->pkt_reason != CMD_CMPLT ||
3211 (SCBP_C(pkt) != STATUS_GOOD)) {
3212 sema_v(&vlun->svl_pgr_sema);
3213 }
3214 }
3215
3216 switch (pkt->pkt_reason) {
3217 case CMD_CMPLT:
3218 /*
3219 * cmd completed successfully, check for scsi errors
3220 */
3221 switch (*(pkt->pkt_scbp)) {
3222 case STATUS_CHECK:
3223 if (pkt->pkt_state & STATE_ARQ_DONE) {
3224 sns = (uint8_t *)
3225 &(((struct scsi_arq_status *)(uintptr_t)
3226 (pkt->pkt_scbp))->sts_sensedata);
3227 skey = scsi_sense_key(sns);
3228 asc = scsi_sense_asc(sns);
3229 ascq = scsi_sense_ascq(sns);
3230 fops = vlun->svl_fops;
3231 ASSERT(fops != NULL);
3232 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_intr: "
3233 "Received sns key %x esc %x escq %x\n",
3234 skey, asc, ascq));
3235
3236 if (vlun->svl_waiting_for_activepath == 1) {
3237 /*
3238 * if we are here it means we are
3239 * in the midst of a probe/attach
3240 * through a passive path; this
3241 * case is exempt from sense analysis
3242 * for detection of ext. failover
3243 * because that would unnecessarily
3244 * increase attach time.
3245 */
3246 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3247 vpkt->vpkt_tgt_init_scblen);
3248 break;
3249 }
3250 if (asc == VHCI_SCSI_PERR) {
3251 /*
3252 * parity error
3253 */
3254 err_str = parity_err;
3255 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3256 vpkt->vpkt_tgt_init_scblen);
3257 break;
3258 }
3259 rval = fops->sfo_analyze_sense(svp->svp_psd,
3260 sns, vlun->svl_fops_ctpriv);
3261 if ((rval == SCSI_SENSE_NOFAILOVER) ||
3262 (rval == SCSI_SENSE_UNKNOWN) ||
3263 (rval == SCSI_SENSE_NOT_READY)) {
3264 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3265 vpkt->vpkt_tgt_init_scblen);
3266 break;
3267 } else if (rval == SCSI_SENSE_STATE_CHANGED) {
3268 struct scsi_vhci *vhci;
3269 vhci = ADDR2VHCI(&tpkt->pkt_address);
3270 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
3271 if (!held) {
3272 /*
3273 * looks like some other thread
3274 * has already detected this
3275 * condition
3276 */
3277 tpkt->pkt_state &=
3278 ~STATE_ARQ_DONE;
3279 *(tpkt->pkt_scbp) =
3280 STATUS_BUSY;
3281 break;
3282 }
3283 (void) taskq_dispatch(
3284 vhci->vhci_update_pathstates_taskq,
3285 vhci_update_pathstates,
3286 (void *)vlun, KM_SLEEP);
3287 } else {
3288 /*
3289 * externally initiated failover
3290 * has occurred or is in progress
3291 */
3292 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
3293 if (!held) {
3294 /*
3295 * looks like some other thread
3296 * has already detected this
3297 * condition
3298 */
3299 tpkt->pkt_state &=
3300 ~STATE_ARQ_DONE;
3301 *(tpkt->pkt_scbp) =
3302 STATUS_BUSY;
3303 break;
3304 } else {
3305 rval = vhci_handle_ext_fo
3306 (pkt, rval);
3307 if (rval == BUSY_RETURN) {
3308 tpkt->pkt_state &=
3309 ~STATE_ARQ_DONE;
3310 *(tpkt->pkt_scbp) =
3311 STATUS_BUSY;
3312 break;
3313 }
3314 bcopy(pkt->pkt_scbp,
3315 tpkt->pkt_scbp,
3316 vpkt->vpkt_tgt_init_scblen);
3317 break;
3318 }
3319 }
3320 }
3321 break;
3322
3323 /*
3324 * If this is a good SCSI-II RELEASE cmd completion then restore
3325 * the load balancing policy and reset VLUN_RESERVE_ACTIVE_FLG.
3326 * If this is a good SCSI-II RESERVE cmd completion then set
3327 * VLUN_RESERVE_ACTIVE_FLG.
3328 */
3329 case STATUS_GOOD:
3330 if ((pkt->pkt_cdbp[0] == SCMD_RELEASE) ||
3331 (pkt->pkt_cdbp[0] == SCMD_RELEASE_G1)) {
3332 (void) mdi_set_lb_policy(vlun->svl_dip,
3333 vlun->svl_lb_policy_save);
3334 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
3335 VHCI_DEBUG(1, (CE_WARN, NULL,
3336 "!vhci_intr: vlun 0x%p release path 0x%p",
3337 (void *)vlun, (void *)vpkt->vpkt_path));
3338 }
3339
3340 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3341 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3342 vlun->svl_flags |= VLUN_RESERVE_ACTIVE_FLG;
3343 vlun->svl_resrv_pip = vpkt->vpkt_path;
3344 VHCI_DEBUG(1, (CE_WARN, NULL,
3345 "!vhci_intr: vlun 0x%p reserved path 0x%p",
3346 (void *)vlun, (void *)vpkt->vpkt_path));
3347 }
3348 break;
3349
3350 case STATUS_RESERVATION_CONFLICT:
3351 VHCI_DEBUG(1, (CE_WARN, NULL,
3352 "!vhci_intr: vlun 0x%p "
3353 "reserve conflict on path 0x%p",
3354 (void *)vlun, (void *)vpkt->vpkt_path));
3355 /* FALLTHROUGH */
3356 default:
3357 break;
3358 }
3359
3360 /*
3361 * Update I/O completion statistics for the path
3362 */
3363 mdi_pi_kstat_iosupdate(vpkt->vpkt_path, vpkt->vpkt_tgt_init_bp);
3364
3365 /*
3366 * Command completed successfully, release the dma binding and
3367 * destroy the transport side of the packet.
3368 */
3369 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
3370 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
3371 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
3372 if (SCBP_C(pkt) == STATUS_GOOD) {
3373 ASSERT(vlun->svl_taskq);
3374 svp->svp_last_pkt_reason = pkt->pkt_reason;
3375 (void) taskq_dispatch(vlun->svl_taskq,
3376 vhci_run_cmd, pkt, KM_SLEEP);
3377 return;
3378 }
3379 }
3380 if ((SCBP_C(pkt) == STATUS_GOOD) &&
3381 (pkt->pkt_cdbp[0] == SCMD_PRIN) && vpkt->vpkt_tgt_init_bp) {
3382 /*
3383 * If the action (value in byte 1 of the cdb) is zero,
3384 * we're reading keys, and that's the only condition
3385 * where we need to be concerned with filtering keys
3386 * and potential retries. Otherwise, we simply signal
3387 * the semaphore and move on.
3388 */
3389 if (pkt->pkt_cdbp[1] == 0) {
3390 /*
3391 * If this is the completion of an internal
3392 * retry then we need to make sure that the
3393 * pkt and tpkt pointers are readjusted so
3394 * the calls to scsi_destroy_pkt and pkt_comp
3395 * below work * correctly.
3396 */
3397 if (vpkt->vpkt_org_vpkt != NULL) {
3398 pkt = vpkt->vpkt_org_vpkt->vpkt_hba_pkt;
3399 tpkt = vpkt->vpkt_org_vpkt->
3400 vpkt_tgt_pkt;
3401
3402 /*
3403 * If this command was issued through
3404 * the taskq then we need to clear
3405 * this flag for proper processing in
3406 * the case of a retry from the target
3407 * driver.
3408 */
3409 vpkt->vpkt_state &=
3410 ~VHCI_PKT_THRU_TASKQ;
3411 }
3412
3413 /*
3414 * if vhci_do_prin returns VHCI_CMD_CMPLT then
3415 * vpkt will contain the address of the
3416 * original vpkt
3417 */
3418 if (vhci_do_prin(&vpkt) == VHCI_CMD_RETRY) {
3419 /*
3420 * The command has been resent to get
3421 * all the keys from the device. Don't
3422 * complete the command with ssd until
3423 * the retry completes.
3424 */
3425 return;
3426 }
3427 } else {
3428 sema_v(&vlun->svl_pgr_sema);
3429 }
3430 }
3431
3432 break;
3433
3434 case CMD_TIMEOUT:
3435 if ((pkt->pkt_statistics &
3436 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) {
3437
3438 VHCI_DEBUG(1, (CE_NOTE, NULL,
3439 "!scsi vhci timeout invoked\n"));
3440
3441 (void) vhci_recovery_reset(vlun, &pkt->pkt_address,
3442 FALSE, VHCI_DEPTH_ALL);
3443 }
3444 MDI_PI_ERRSTAT(lpath, MDI_PI_TRANSERR);
3445 tpkt->pkt_statistics |= STAT_ABORTED;
3446 err_str = timeout_err;
3447 break;
3448
3449 case CMD_TRAN_ERR:
3450 /*
3451 * This status is returned if the transport has sent the cmd
3452 * down the link to the target and then some error occurs.
3453 * In case of SCSI-II RESERVE cmd, we don't know if the
3454 * reservation been accepted by the target or not, so we need
3455 * to clear the reservation.
3456 */
3457 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3458 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3459 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_intr received"
3460 " cmd_tran_err for scsi-2 reserve cmd\n"));
3461 if (!vhci_recovery_reset(vlun, &pkt->pkt_address,
3462 TRUE, VHCI_DEPTH_TARGET)) {
3463 VHCI_DEBUG(1, (CE_WARN, NULL,
3464 "!vhci_intr cmd_tran_err reset failed!"));
3465 }
3466 }
3467 break;
3468
3469 case CMD_DEV_GONE:
3470 /*
3471 * If this is the last path then report CMD_DEV_GONE to the
3472 * target driver, otherwise report BUSY to triggger retry.
3473 */
3474 if (vlun->svl_dip &&
3475 (mdi_client_get_path_count(vlun->svl_dip) <= 1)) {
3476 struct scsi_vhci *vhci;
3477 vhci = ADDR2VHCI(&tpkt->pkt_address);
3478 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_intr received "
3479 "cmd_dev_gone on last path\n"));
3480 (void) vhci_invalidate_mpapi_lu(vhci, vlun);
3481 break;
3482 }
3483
3484 /* Report CMD_CMPLT-with-BUSY to cause retry. */
3485 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_intr received "
3486 "cmd_dev_gone\n"));
3487 tpkt->pkt_reason = CMD_CMPLT;
3488 tpkt->pkt_state = STATE_GOT_BUS |
3489 STATE_GOT_TARGET | STATE_SENT_CMD |
3490 STATE_GOT_STATUS;
3491 *(tpkt->pkt_scbp) = STATUS_BUSY;
3492 break;
3493
3494 default:
3495 break;
3496 }
3497
3498 /*
3499 * SCSI-II RESERVE cmd has been serviced by the lower layers clear
3500 * the flag so the lun is not QUIESCED any longer.
3501 * Also clear the VHCI_PKT_THRU_TASKQ flag, to ensure that if this pkt
3502 * is retried, a taskq shall again be dispatched to service it. Else
3503 * it may lead to a system hang if the retry is within interrupt
3504 * context.
3505 */
3506 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3507 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3508 vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
3509 vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ;
3510 }
3511
3512 /*
3513 * vpkt_org_vpkt should always be NULL here if the retry command
3514 * has been successfully processed. If vpkt_org_vpkt != NULL at
3515 * this point, it is an error so restore the original vpkt and
3516 * return an error to the target driver so it can retry the
3517 * command as appropriate.
3518 */
3519 if (vpkt->vpkt_org_vpkt != NULL) {
3520 struct vhci_pkt *new_vpkt = vpkt;
3521 vpkt = vpkt->vpkt_org_vpkt;
3522
3523 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
3524 new_vpkt->vpkt_tgt_pkt);
3525
3526 /*
3527 * Mark this command completion as having an error so that
3528 * ssd will retry the command.
3529 */
3530 vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED;
3531 vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED;
3532
3533 pkt = vpkt->vpkt_hba_pkt;
3534 tpkt = vpkt->vpkt_tgt_pkt;
3535 }
3536
3537 if ((err_str != NULL) && (pkt->pkt_reason !=
3538 svp->svp_last_pkt_reason)) {
3539 cdip = vlun->svl_dip;
3540 vdip = ddi_get_parent(cdip);
3541 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3542 vhci_log(CE_WARN, vdip, "!%s (%s%d): %s on path %s",
3543 ddi_pathname(cdip, cpath), ddi_driver_name(cdip),
3544 ddi_get_instance(cdip), err_str,
3545 mdi_pi_spathname(vpkt->vpkt_path));
3546 kmem_free(cpath, MAXPATHLEN);
3547 }
3548 svp->svp_last_pkt_reason = pkt->pkt_reason;
3549 VHCI_DECR_PATH_CMDCOUNT(svp);
3550
3551 /*
3552 * For PARTIAL_DMA, vhci should not free the path.
3553 * Target driver will call into vhci_scsi_dmafree or
3554 * destroy pkt to release this path.
3555 */
3556 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
3557 scsi_destroy_pkt(pkt);
3558 vpkt->vpkt_hba_pkt = NULL;
3559 if (vpkt->vpkt_path) {
3560 mdi_rele_path(vpkt->vpkt_path);
3561 vpkt->vpkt_path = NULL;
3562 }
3563 }
3564
3565 scsi_hba_pkt_comp(tpkt);
3566 }
3567
3568 /*
3569 * two possibilities: (1) failover has completed
3570 * or (2) is in progress; update our path states for
3571 * the former case; for the latter case,
3572 * initiate a scsi_watch request to
3573 * determine when failover completes - vlun is HELD
3574 * until failover completes; BUSY is returned to upper
3575 * layer in both the cases
3576 */
3577 static int
vhci_handle_ext_fo(struct scsi_pkt * pkt,int fostat)3578 vhci_handle_ext_fo(struct scsi_pkt *pkt, int fostat)
3579 {
3580 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_private;
3581 struct scsi_pkt *tpkt;
3582 scsi_vhci_priv_t *svp;
3583 scsi_vhci_lun_t *vlun;
3584 struct scsi_vhci *vhci;
3585 scsi_vhci_swarg_t *swarg;
3586 char *path;
3587
3588 ASSERT(vpkt != NULL);
3589 tpkt = vpkt->vpkt_tgt_pkt;
3590 ASSERT(tpkt != NULL);
3591 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
3592 ASSERT(svp != NULL);
3593 vlun = svp->svp_svl;
3594 ASSERT(vlun != NULL);
3595 ASSERT(VHCI_LUN_IS_HELD(vlun));
3596
3597 vhci = ADDR2VHCI(&tpkt->pkt_address);
3598
3599 if (fostat == SCSI_SENSE_INACTIVE) {
3600 VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover "
3601 "detected for %s; updating path states...\n",
3602 vlun->svl_lun_wwn));
3603 /*
3604 * set the vlun flag to indicate to the task that the target
3605 * port group needs updating
3606 */
3607 vlun->svl_flags |= VLUN_UPDATE_TPG;
3608 (void) taskq_dispatch(vhci->vhci_update_pathstates_taskq,
3609 vhci_update_pathstates, (void *)vlun, KM_SLEEP);
3610 } else {
3611 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3612 vhci_log(CE_NOTE, ddi_get_parent(vlun->svl_dip),
3613 "!%s (%s%d): Waiting for externally initiated failover "
3614 "to complete", ddi_pathname(vlun->svl_dip, path),
3615 ddi_driver_name(vlun->svl_dip),
3616 ddi_get_instance(vlun->svl_dip));
3617 kmem_free(path, MAXPATHLEN);
3618 swarg = kmem_alloc(sizeof (*swarg), KM_NOSLEEP);
3619 if (swarg == NULL) {
3620 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_handle_ext_fo: "
3621 "request packet allocation for %s failed....\n",
3622 vlun->svl_lun_wwn));
3623 VHCI_RELEASE_LUN(vlun);
3624 return (PKT_RETURN);
3625 }
3626 swarg->svs_svp = svp;
3627 swarg->svs_tos = ddi_get_time();
3628 swarg->svs_pi = vpkt->vpkt_path;
3629 swarg->svs_release_lun = 0;
3630 swarg->svs_done = 0;
3631 /*
3632 * place a hold on the path...we don't want it to
3633 * vanish while scsi_watch is in progress
3634 */
3635 mdi_hold_path(vpkt->vpkt_path);
3636 svp->svp_sw_token = scsi_watch_request_submit(svp->svp_psd,
3637 VHCI_FOWATCH_INTERVAL, SENSE_LENGTH, vhci_efo_watch_cb,
3638 (caddr_t)swarg);
3639 }
3640 return (BUSY_RETURN);
3641 }
3642
3643 /*
3644 * vhci_efo_watch_cb:
3645 * Callback from scsi_watch request to check the failover status.
3646 * Completion is either due to successful failover or timeout.
3647 * Upon successful completion, vhci_update_path_states is called.
3648 * For timeout condition, vhci_efo_done is called.
3649 * Always returns 0 to scsi_watch to keep retrying till vhci_efo_done
3650 * terminates this request properly in a separate thread.
3651 */
3652
3653 static int
vhci_efo_watch_cb(caddr_t arg,struct scsi_watch_result * resultp)3654 vhci_efo_watch_cb(caddr_t arg, struct scsi_watch_result *resultp)
3655 {
3656 struct scsi_status *statusp = resultp->statusp;
3657 uint8_t *sensep = (uint8_t *)resultp->sensep;
3658 struct scsi_pkt *pkt = resultp->pkt;
3659 scsi_vhci_swarg_t *swarg;
3660 scsi_vhci_priv_t *svp;
3661 scsi_vhci_lun_t *vlun;
3662 struct scsi_vhci *vhci;
3663 dev_info_t *vdip;
3664 int rval, updt_paths;
3665
3666 swarg = (scsi_vhci_swarg_t *)(uintptr_t)arg;
3667 svp = swarg->svs_svp;
3668 if (swarg->svs_done) {
3669 /*
3670 * Already completed failover or timedout.
3671 * Waiting for vhci_efo_done to terminate this scsi_watch.
3672 */
3673 return (0);
3674 }
3675
3676 ASSERT(svp != NULL);
3677 vlun = svp->svp_svl;
3678 ASSERT(vlun != NULL);
3679 ASSERT(VHCI_LUN_IS_HELD(vlun));
3680 vlun->svl_efo_update_path = 0;
3681 vdip = ddi_get_parent(vlun->svl_dip);
3682 vhci = ddi_get_soft_state(vhci_softstate,
3683 ddi_get_instance(vdip));
3684
3685 updt_paths = 0;
3686
3687 if (pkt->pkt_reason != CMD_CMPLT) {
3688 if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) {
3689 swarg->svs_release_lun = 1;
3690 goto done;
3691 }
3692 return (0);
3693 }
3694 if (*((unsigned char *)statusp) == STATUS_CHECK) {
3695 rval = vlun->svl_fops->sfo_analyze_sense(svp->svp_psd, sensep,
3696 vlun->svl_fops_ctpriv);
3697 switch (rval) {
3698 /*
3699 * Only update path states in case path is definitely
3700 * inactive, or no failover occurred. For all other
3701 * check conditions continue pinging. A unexpected
3702 * check condition shouldn't cause pinging to complete
3703 * prematurely.
3704 */
3705 case SCSI_SENSE_INACTIVE:
3706 case SCSI_SENSE_NOFAILOVER:
3707 updt_paths = 1;
3708 break;
3709 default:
3710 if ((ddi_get_time() - swarg->svs_tos)
3711 >= VHCI_EXTFO_TIMEOUT) {
3712 swarg->svs_release_lun = 1;
3713 goto done;
3714 }
3715 return (0);
3716 }
3717 } else if (*((unsigned char *)statusp) ==
3718 STATUS_RESERVATION_CONFLICT) {
3719 updt_paths = 1;
3720 } else if ((*((unsigned char *)statusp)) &
3721 (STATUS_BUSY | STATUS_QFULL)) {
3722 return (0);
3723 }
3724 if ((*((unsigned char *)statusp) == STATUS_GOOD) ||
3725 (updt_paths == 1)) {
3726 /*
3727 * we got here because we had detected an
3728 * externally initiated failover; things
3729 * have settled down now, so let's
3730 * start up a task to update the
3731 * path states and target port group
3732 */
3733 vlun->svl_efo_update_path = 1;
3734 swarg->svs_done = 1;
3735 vlun->svl_swarg = swarg;
3736 vlun->svl_flags |= VLUN_UPDATE_TPG;
3737 (void) taskq_dispatch(vhci->vhci_update_pathstates_taskq,
3738 vhci_update_pathstates, (void *)vlun,
3739 KM_SLEEP);
3740 return (0);
3741 }
3742 if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) {
3743 swarg->svs_release_lun = 1;
3744 goto done;
3745 }
3746 return (0);
3747 done:
3748 swarg->svs_done = 1;
3749 (void) taskq_dispatch(vhci->vhci_taskq,
3750 vhci_efo_done, (void *)swarg, KM_SLEEP);
3751 return (0);
3752 }
3753
3754 /*
3755 * vhci_efo_done:
3756 * cleanly terminates scsi_watch and free up resources.
3757 * Called as taskq function in vhci_efo_watch_cb for EFO timeout condition
3758 * or by vhci_update_path_states invoked during external initiated
3759 * failover completion.
3760 */
3761 static void
vhci_efo_done(void * arg)3762 vhci_efo_done(void *arg)
3763 {
3764 scsi_vhci_lun_t *vlun;
3765 scsi_vhci_swarg_t *swarg = (scsi_vhci_swarg_t *)arg;
3766 scsi_vhci_priv_t *svp = swarg->svs_svp;
3767 ASSERT(svp);
3768
3769 vlun = svp->svp_svl;
3770 ASSERT(vlun);
3771
3772 /* Wait for clean termination of scsi_watch */
3773 (void) scsi_watch_request_terminate(svp->svp_sw_token,
3774 SCSI_WATCH_TERMINATE_ALL_WAIT);
3775 svp->svp_sw_token = NULL;
3776
3777 /* release path and freeup resources to indicate failover completion */
3778 mdi_rele_path(swarg->svs_pi);
3779 if (swarg->svs_release_lun) {
3780 VHCI_RELEASE_LUN(vlun);
3781 }
3782 kmem_free((void *)swarg, sizeof (*swarg));
3783 }
3784
3785 /*
3786 * Update the path states
3787 * vlun should be HELD when this is invoked.
3788 * Calls vhci_efo_done to cleanup resources allocated for EFO.
3789 */
3790 void
vhci_update_pathstates(void * arg)3791 vhci_update_pathstates(void *arg)
3792 {
3793 mdi_pathinfo_t *pip, *npip;
3794 dev_info_t *dip;
3795 struct scsi_failover_ops *fo;
3796 struct scsi_vhci_priv *svp;
3797 struct scsi_device *psd;
3798 struct scsi_path_opinfo opinfo;
3799 char *pclass, *tptr;
3800 struct scsi_vhci_lun *vlun = (struct scsi_vhci_lun *)arg;
3801 int sps; /* mdi_select_path() status */
3802 char *cpath;
3803 struct scsi_vhci *vhci;
3804 struct scsi_pkt *pkt;
3805 struct buf *bp;
3806 struct scsi_vhci_priv *svp_conflict = NULL;
3807
3808 ASSERT(VHCI_LUN_IS_HELD(vlun));
3809 dip = vlun->svl_dip;
3810 pip = npip = NULL;
3811
3812 vhci = ddi_get_soft_state(vhci_softstate,
3813 ddi_get_instance(ddi_get_parent(dip)));
3814
3815 sps = mdi_select_path(dip, NULL, (MDI_SELECT_ONLINE_PATH |
3816 MDI_SELECT_STANDBY_PATH | MDI_SELECT_NO_PREFERRED), NULL, &npip);
3817 if ((npip == NULL) || (sps != MDI_SUCCESS)) {
3818 goto done;
3819 }
3820
3821 fo = vlun->svl_fops;
3822 do {
3823 pip = npip;
3824 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
3825 psd = svp->svp_psd;
3826 if (fo->sfo_path_get_opinfo(psd, &opinfo,
3827 vlun->svl_fops_ctpriv) != 0) {
3828 sps = mdi_select_path(dip, NULL,
3829 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH |
3830 MDI_SELECT_NO_PREFERRED), pip, &npip);
3831 mdi_rele_path(pip);
3832 continue;
3833 }
3834
3835 if (mdi_prop_lookup_string(pip, "path-class", &pclass) !=
3836 MDI_SUCCESS) {
3837 VHCI_DEBUG(1, (CE_NOTE, NULL,
3838 "!vhci_update_pathstates: prop lookup failed for "
3839 "path 0x%p\n", (void *)pip));
3840 sps = mdi_select_path(dip, NULL,
3841 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH |
3842 MDI_SELECT_NO_PREFERRED), pip, &npip);
3843 mdi_rele_path(pip);
3844 continue;
3845 }
3846
3847 /*
3848 * Need to update the "path-class" property
3849 * value in the device tree if different
3850 * from the existing value.
3851 */
3852 if (strcmp(pclass, opinfo.opinfo_path_attr) != 0) {
3853 (void) mdi_prop_update_string(pip, "path-class",
3854 opinfo.opinfo_path_attr);
3855 }
3856
3857 /*
3858 * Only change the state if needed. i.e. Don't call
3859 * mdi_pi_set_state to ONLINE a path if its already
3860 * ONLINE. Same for STANDBY paths.
3861 */
3862
3863 if ((opinfo.opinfo_path_state == SCSI_PATH_ACTIVE ||
3864 opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT)) {
3865 if (!(MDI_PI_IS_ONLINE(pip))) {
3866 VHCI_DEBUG(1, (CE_NOTE, NULL,
3867 "!vhci_update_pathstates: marking path"
3868 " 0x%p as ONLINE\n", (void *)pip));
3869 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3870 vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s "
3871 "(%s%d): path %s "
3872 "is now ONLINE because of "
3873 "an externally initiated failover",
3874 ddi_pathname(dip, cpath),
3875 ddi_driver_name(dip),
3876 ddi_get_instance(dip),
3877 mdi_pi_spathname(pip));
3878 kmem_free(cpath, MAXPATHLEN);
3879 mdi_pi_set_state(pip,
3880 MDI_PATHINFO_STATE_ONLINE);
3881 mdi_pi_set_preferred(pip,
3882 opinfo.opinfo_preferred);
3883 tptr = kmem_alloc(strlen
3884 (opinfo.opinfo_path_attr)+1, KM_SLEEP);
3885 (void) strlcpy(tptr, opinfo.opinfo_path_attr,
3886 (strlen(opinfo.opinfo_path_attr)+1));
3887 mutex_enter(&vlun->svl_mutex);
3888 if (vlun->svl_active_pclass != NULL) {
3889 kmem_free(vlun->svl_active_pclass,
3890 strlen(vlun->svl_active_pclass)+1);
3891 }
3892 vlun->svl_active_pclass = tptr;
3893 if (vlun->svl_waiting_for_activepath) {
3894 vlun->svl_waiting_for_activepath = 0;
3895 }
3896 mutex_exit(&vlun->svl_mutex);
3897 } else if (MDI_PI_IS_ONLINE(pip)) {
3898 if (strcmp(pclass, opinfo.opinfo_path_attr)
3899 != 0) {
3900 mdi_pi_set_preferred(pip,
3901 opinfo.opinfo_preferred);
3902 mutex_enter(&vlun->svl_mutex);
3903 if (vlun->svl_active_pclass == NULL ||
3904 strcmp(opinfo.opinfo_path_attr,
3905 vlun->svl_active_pclass) != 0) {
3906 mutex_exit(&vlun->svl_mutex);
3907 tptr = kmem_alloc(strlen
3908 (opinfo.opinfo_path_attr)+1,
3909 KM_SLEEP);
3910 (void) strlcpy(tptr,
3911 opinfo.opinfo_path_attr,
3912 (strlen
3913 (opinfo.opinfo_path_attr)
3914 +1));
3915 mutex_enter(&vlun->svl_mutex);
3916 } else {
3917 /*
3918 * No need to update
3919 * svl_active_pclass
3920 */
3921 tptr = NULL;
3922 mutex_exit(&vlun->svl_mutex);
3923 }
3924 if (tptr) {
3925 if (vlun->svl_active_pclass
3926 != NULL) {
3927 kmem_free(vlun->
3928 svl_active_pclass,
3929 strlen(vlun->
3930 svl_active_pclass)
3931 +1);
3932 }
3933 vlun->svl_active_pclass = tptr;
3934 mutex_exit(&vlun->svl_mutex);
3935 }
3936 }
3937 }
3938
3939 /* Check for Reservation Conflict */
3940 bp = scsi_alloc_consistent_buf(
3941 &svp->svp_psd->sd_address, (struct buf *)NULL,
3942 DEV_BSIZE, B_READ, NULL, NULL);
3943 if (!bp) {
3944 VHCI_DEBUG(1, (CE_NOTE, NULL,
3945 "!vhci_update_pathstates: No resources "
3946 "(buf)\n"));
3947 mdi_rele_path(pip);
3948 goto done;
3949 }
3950 pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
3951 CDB_GROUP1, sizeof (struct scsi_arq_status), 0,
3952 PKT_CONSISTENT, NULL, NULL);
3953 if (pkt) {
3954 (void) scsi_setup_cdb((union scsi_cdb *)
3955 (uintptr_t)pkt->pkt_cdbp, SCMD_READ, 1, 1,
3956 0);
3957 pkt->pkt_time = 3*30;
3958 pkt->pkt_flags = FLAG_NOINTR;
3959 pkt->pkt_path_instance =
3960 mdi_pi_get_path_instance(pip);
3961
3962 if ((scsi_transport(pkt) == TRAN_ACCEPT) &&
3963 (pkt->pkt_reason == CMD_CMPLT) &&
3964 (SCBP_C(pkt) ==
3965 STATUS_RESERVATION_CONFLICT)) {
3966 VHCI_DEBUG(1, (CE_NOTE, NULL,
3967 "!vhci_update_pathstates: reserv. "
3968 "conflict to be resolved on 0x%p\n",
3969 (void *)pip));
3970 svp_conflict = svp;
3971 }
3972 scsi_destroy_pkt(pkt);
3973 }
3974 scsi_free_consistent_buf(bp);
3975 } else if ((opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) &&
3976 !(MDI_PI_IS_STANDBY(pip))) {
3977 VHCI_DEBUG(1, (CE_NOTE, NULL,
3978 "!vhci_update_pathstates: marking path"
3979 " 0x%p as STANDBY\n", (void *)pip));
3980 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3981 vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s "
3982 "(%s%d): path %s "
3983 "is now STANDBY because of "
3984 "an externally initiated failover",
3985 ddi_pathname(dip, cpath),
3986 ddi_driver_name(dip),
3987 ddi_get_instance(dip),
3988 mdi_pi_spathname(pip));
3989 kmem_free(cpath, MAXPATHLEN);
3990 mdi_pi_set_state(pip,
3991 MDI_PATHINFO_STATE_STANDBY);
3992 mdi_pi_set_preferred(pip,
3993 opinfo.opinfo_preferred);
3994 mutex_enter(&vlun->svl_mutex);
3995 if (vlun->svl_active_pclass != NULL) {
3996 if (strcmp(vlun->svl_active_pclass,
3997 opinfo.opinfo_path_attr) == 0) {
3998 kmem_free(vlun->
3999 svl_active_pclass,
4000 strlen(vlun->
4001 svl_active_pclass)+1);
4002 vlun->svl_active_pclass = NULL;
4003 }
4004 }
4005 mutex_exit(&vlun->svl_mutex);
4006 }
4007 (void) mdi_prop_free(pclass);
4008 sps = mdi_select_path(dip, NULL,
4009 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH |
4010 MDI_SELECT_NO_PREFERRED), pip, &npip);
4011 mdi_rele_path(pip);
4012
4013 } while ((npip != NULL) && (sps == MDI_SUCCESS));
4014
4015 /*
4016 * Check to see if this vlun has an active SCSI-II RESERVE. If so
4017 * clear the reservation by sending a reset, so the host doesn't
4018 * receive a reservation conflict. The reset has to be sent via a
4019 * working path. Let's use a path referred to by svp_conflict as it
4020 * should be working.
4021 * Reset VLUN_RESERVE_ACTIVE_FLG for this vlun. Also notify ssd
4022 * of the reset, explicitly.
4023 */
4024 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
4025 if (svp_conflict && (vlun->svl_xlf_capable == 0)) {
4026 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_update_pathstates:"
4027 " sending recovery reset on 0x%p, path_state: %x",
4028 svp_conflict->svp_psd->sd_private,
4029 mdi_pi_get_state((mdi_pathinfo_t *)
4030 svp_conflict->svp_psd->sd_private)));
4031
4032 (void) vhci_recovery_reset(vlun,
4033 &svp_conflict->svp_psd->sd_address, FALSE,
4034 VHCI_DEPTH_TARGET);
4035 }
4036 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
4037 mutex_enter(&vhci->vhci_mutex);
4038 scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
4039 &vhci->vhci_reset_notify_listf);
4040 mutex_exit(&vhci->vhci_mutex);
4041 }
4042 if (vlun->svl_flags & VLUN_UPDATE_TPG) {
4043 /*
4044 * Update the AccessState of related MP-API TPGs
4045 */
4046 (void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun);
4047 vlun->svl_flags &= ~VLUN_UPDATE_TPG;
4048 }
4049 done:
4050 if (vlun->svl_efo_update_path) {
4051 vlun->svl_efo_update_path = 0;
4052 vhci_efo_done(vlun->svl_swarg);
4053 vlun->svl_swarg = 0;
4054 }
4055 VHCI_RELEASE_LUN(vlun);
4056 }
4057
4058 /* ARGSUSED */
4059 static int
vhci_pathinfo_init(dev_info_t * vdip,mdi_pathinfo_t * pip,int flags)4060 vhci_pathinfo_init(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
4061 {
4062 scsi_hba_tran_t *hba = NULL;
4063 struct scsi_device *psd = NULL;
4064 scsi_vhci_lun_t *vlun = NULL;
4065 dev_info_t *pdip = NULL;
4066 dev_info_t *tgt_dip;
4067 struct scsi_vhci *vhci;
4068 char *guid;
4069 scsi_vhci_priv_t *svp = NULL;
4070 int rval = MDI_FAILURE;
4071 int vlun_alloced = 0;
4072
4073 ASSERT(vdip != NULL);
4074 ASSERT(pip != NULL);
4075
4076 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
4077 ASSERT(vhci != NULL);
4078
4079 pdip = mdi_pi_get_phci(pip);
4080 ASSERT(pdip != NULL);
4081
4082 hba = ddi_get_driver_private(pdip);
4083 ASSERT(hba != NULL);
4084
4085 tgt_dip = mdi_pi_get_client(pip);
4086 ASSERT(tgt_dip != NULL);
4087
4088 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS,
4089 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
4090 VHCI_DEBUG(1, (CE_WARN, NULL,
4091 "vhci_pathinfo_init: lun guid property failed"));
4092 goto failure;
4093 }
4094
4095 vlun = vhci_lun_lookup_alloc(tgt_dip, guid, &vlun_alloced);
4096 ddi_prop_free(guid);
4097
4098 vlun->svl_dip = tgt_dip;
4099
4100 svp = kmem_zalloc(sizeof (*svp), KM_SLEEP);
4101 svp->svp_svl = vlun;
4102
4103 /*
4104 * Initialize svl_lb_policy_save only for newly allocated vlun. Writing
4105 * to svl_lb_policy_save later could accidentally overwrite saved lb
4106 * policy.
4107 */
4108 if (vlun_alloced) {
4109 vlun->svl_lb_policy_save = mdi_get_lb_policy(tgt_dip);
4110 }
4111
4112 mutex_init(&svp->svp_mutex, NULL, MUTEX_DRIVER, NULL);
4113 cv_init(&svp->svp_cv, NULL, CV_DRIVER, NULL);
4114
4115 psd = kmem_zalloc(sizeof (*psd), KM_SLEEP);
4116 mutex_init(&psd->sd_mutex, NULL, MUTEX_DRIVER, NULL);
4117
4118 if (hba->tran_hba_flags & SCSI_HBA_ADDR_COMPLEX) {
4119 /*
4120 * For a SCSI_HBA_ADDR_COMPLEX transport we store a pointer to
4121 * scsi_device in the scsi_address structure. This allows an
4122 * an HBA driver to find its scsi_device(9S) and
4123 * per-scsi_device(9S) HBA private data given a
4124 * scsi_address(9S) by using scsi_address_device(9F) and
4125 * scsi_device_hba_private_get(9F)).
4126 */
4127 psd->sd_address.a.a_sd = psd;
4128 } else if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
4129 /*
4130 * Clone transport structure if requested, so
4131 * Self enumerating HBAs always need to use cloning
4132 */
4133 scsi_hba_tran_t *clone =
4134 kmem_alloc(sizeof (scsi_hba_tran_t), KM_SLEEP);
4135 bcopy(hba, clone, sizeof (scsi_hba_tran_t));
4136 hba = clone;
4137 hba->tran_sd = psd;
4138 } else {
4139 /*
4140 * SPI pHCI unit-address. If we ever need to support this
4141 * we could set a.spi.a_target/a.spi.a_lun based on pathinfo
4142 * node unit-address properties. For now we fail...
4143 */
4144 goto failure;
4145 }
4146
4147 psd->sd_dev = tgt_dip;
4148 psd->sd_address.a_hba_tran = hba;
4149
4150 /*
4151 * Mark scsi_device as being associated with a pathinfo node. For
4152 * a scsi_device structure associated with a devinfo node,
4153 * scsi_ctlops_initchild sets this field to NULL.
4154 */
4155 psd->sd_pathinfo = pip;
4156
4157 /*
4158 * LEGACY: sd_private: set for older mpxio-capable pHCI drivers with
4159 * too much scsi_vhci/mdi/ndi knowledge. Remove this code when all
4160 * mpxio-capable pHCI drivers use SCSA enumeration services (or at
4161 * least have been changed to use sd_pathinfo instead).
4162 */
4163 psd->sd_private = (caddr_t)pip;
4164
4165 /* See scsi_hba.c for info on sd_tran_safe kludge */
4166 psd->sd_tran_safe = hba;
4167
4168 svp->svp_psd = psd;
4169 mdi_pi_set_vhci_private(pip, (caddr_t)svp);
4170
4171 /*
4172 * call hba's target init entry point if it exists
4173 */
4174 if (hba->tran_tgt_init != NULL) {
4175 psd->sd_tran_tgt_free_done = 0;
4176 if ((rval = (*hba->tran_tgt_init)(pdip, tgt_dip,
4177 hba, psd)) != DDI_SUCCESS) {
4178 VHCI_DEBUG(1, (CE_WARN, pdip,
4179 "!vhci_pathinfo_init: tran_tgt_init failed for "
4180 "path=0x%p rval=%x", (void *)pip, rval));
4181 goto failure;
4182 }
4183 }
4184
4185 svp->svp_new_path = 1;
4186
4187 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_init: path:%p\n",
4188 (void *)pip));
4189 return (MDI_SUCCESS);
4190
4191 failure:
4192 if (psd) {
4193 mutex_destroy(&psd->sd_mutex);
4194 kmem_free(psd, sizeof (*psd));
4195 }
4196 if (svp) {
4197 mdi_pi_set_vhci_private(pip, NULL);
4198 mutex_destroy(&svp->svp_mutex);
4199 cv_destroy(&svp->svp_cv);
4200 kmem_free(svp, sizeof (*svp));
4201 }
4202 if (hba && (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE))
4203 kmem_free(hba, sizeof (scsi_hba_tran_t));
4204
4205 if (vlun_alloced)
4206 vhci_lun_free(vlun, NULL);
4207
4208 return (rval);
4209 }
4210
4211 /* ARGSUSED */
4212 static int
vhci_pathinfo_uninit(dev_info_t * vdip,mdi_pathinfo_t * pip,int flags)4213 vhci_pathinfo_uninit(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
4214 {
4215 scsi_hba_tran_t *hba = NULL;
4216 struct scsi_device *psd = NULL;
4217 dev_info_t *pdip = NULL;
4218 dev_info_t *cdip = NULL;
4219 scsi_vhci_priv_t *svp = NULL;
4220
4221 ASSERT(vdip != NULL);
4222 ASSERT(pip != NULL);
4223
4224 pdip = mdi_pi_get_phci(pip);
4225 ASSERT(pdip != NULL);
4226
4227 cdip = mdi_pi_get_client(pip);
4228 ASSERT(cdip != NULL);
4229
4230 hba = ddi_get_driver_private(pdip);
4231 ASSERT(hba != NULL);
4232
4233 vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_UNINIT);
4234 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
4235 if (svp == NULL) {
4236 /* path already freed. Nothing to do. */
4237 return (MDI_SUCCESS);
4238 }
4239
4240 psd = svp->svp_psd;
4241 ASSERT(psd != NULL);
4242
4243 if (hba->tran_hba_flags & SCSI_HBA_ADDR_COMPLEX) {
4244 /* Verify plumbing */
4245 ASSERT(psd->sd_address.a_hba_tran == hba);
4246 ASSERT(psd->sd_address.a.a_sd == psd);
4247 } else if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
4248 /* Switch to cloned scsi_hba_tran(9S) structure */
4249 hba = psd->sd_address.a_hba_tran;
4250 ASSERT(hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE);
4251 ASSERT(hba->tran_sd == psd);
4252 }
4253
4254 if ((hba->tran_tgt_free != NULL) && !psd->sd_tran_tgt_free_done) {
4255 (*hba->tran_tgt_free) (pdip, cdip, hba, psd);
4256 psd->sd_tran_tgt_free_done = 1;
4257 }
4258 mutex_destroy(&psd->sd_mutex);
4259 if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
4260 kmem_free(hba, sizeof (*hba));
4261 }
4262
4263 mdi_pi_set_vhci_private(pip, NULL);
4264
4265 /*
4266 * Free the pathinfo related scsi_device inquiry data. Note that this
4267 * matches what happens for scsi_hba.c devinfo case at uninitchild time.
4268 */
4269 if (psd->sd_inq)
4270 kmem_free((caddr_t)psd->sd_inq, sizeof (struct scsi_inquiry));
4271 kmem_free((caddr_t)psd, sizeof (*psd));
4272
4273 mutex_destroy(&svp->svp_mutex);
4274 cv_destroy(&svp->svp_cv);
4275 kmem_free((caddr_t)svp, sizeof (*svp));
4276
4277 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_uninit: path=0x%p\n",
4278 (void *)pip));
4279 return (MDI_SUCCESS);
4280 }
4281
4282 /* ARGSUSED */
4283 static int
vhci_pathinfo_state_change(dev_info_t * vdip,mdi_pathinfo_t * pip,mdi_pathinfo_state_t state,uint32_t ext_state,int flags)4284 vhci_pathinfo_state_change(dev_info_t *vdip, mdi_pathinfo_t *pip,
4285 mdi_pathinfo_state_t state, uint32_t ext_state, int flags)
4286 {
4287 int rval = MDI_SUCCESS;
4288 scsi_vhci_priv_t *svp;
4289 scsi_vhci_lun_t *vlun;
4290 int held;
4291 int op = (flags & 0xf00) >> 8;
4292 struct scsi_vhci *vhci;
4293
4294 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
4295
4296 if (flags & MDI_EXT_STATE_CHANGE) {
4297 /*
4298 * We do not want to issue any commands down the path in case
4299 * sync flag is set. Lower layers might not be ready to accept
4300 * any I/O commands.
4301 */
4302 if (op == DRIVER_DISABLE)
4303 return (MDI_SUCCESS);
4304
4305 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
4306 if (svp == NULL) {
4307 return (MDI_FAILURE);
4308 }
4309 vlun = svp->svp_svl;
4310
4311 if (flags & MDI_BEFORE_STATE_CHANGE) {
4312 /*
4313 * Hold the LUN.
4314 */
4315 VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
4316 if (flags & MDI_DISABLE_OP) {
4317 /*
4318 * Issue scsi reset if it happens to be
4319 * reserved path.
4320 */
4321 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
4322 /*
4323 * if reservation pending on
4324 * this path, dont' mark the
4325 * path busy
4326 */
4327 if (op == DRIVER_DISABLE_TRANSIENT) {
4328 VHCI_DEBUG(1, (CE_NOTE, NULL,
4329 "!vhci_pathinfo"
4330 "_state_change (pip:%p): "
4331 " reservation: fail busy\n",
4332 (void *)pip));
4333 return (MDI_FAILURE);
4334 }
4335 if (pip == vlun->svl_resrv_pip) {
4336 if (vhci_recovery_reset(
4337 svp->svp_svl,
4338 &svp->svp_psd->sd_address,
4339 TRUE,
4340 VHCI_DEPTH_TARGET) == 0) {
4341 VHCI_DEBUG(1,
4342 (CE_NOTE, NULL,
4343 "!vhci_pathinfo"
4344 "_state_change "
4345 " (pip:%p): "
4346 "reset failed, "
4347 "give up!\n",
4348 (void *)pip));
4349 }
4350 vlun->svl_flags &=
4351 ~VLUN_RESERVE_ACTIVE_FLG;
4352 }
4353 }
4354 } else if (flags & MDI_ENABLE_OP) {
4355 if (((vhci->vhci_conf_flags &
4356 VHCI_CONF_FLAGS_AUTO_FAILBACK) ==
4357 VHCI_CONF_FLAGS_AUTO_FAILBACK) &&
4358 MDI_PI_IS_USER_DISABLE(pip) &&
4359 MDI_PI_IS_STANDBY(pip)) {
4360 struct scsi_failover_ops *fo;
4361 char *best_pclass, *pclass = NULL;
4362 int best_class, rv;
4363 /*
4364 * Failback if enabling a standby path
4365 * and it is the primary class or
4366 * preferred class
4367 */
4368 best_class = mdi_pi_get_preferred(pip);
4369 if (best_class == 0) {
4370 /*
4371 * if not preferred - compare
4372 * path-class with class
4373 */
4374 fo = vlun->svl_fops;
4375 (void) fo->sfo_pathclass_next(
4376 NULL, &best_pclass,
4377 vlun->svl_fops_ctpriv);
4378 pclass = NULL;
4379 rv = mdi_prop_lookup_string(pip,
4380 "path-class", &pclass);
4381 if (rv != MDI_SUCCESS ||
4382 pclass == NULL) {
4383 vhci_log(CE_NOTE, vdip,
4384 "!path-class "
4385 " lookup "
4386 "failed. rv: %d"
4387 "class: %p", rv,
4388 (void *)pclass);
4389 } else if (strncmp(pclass,
4390 best_pclass,
4391 strlen(best_pclass)) == 0) {
4392 best_class = 1;
4393 }
4394 if (rv == MDI_SUCCESS &&
4395 pclass != NULL) {
4396 rv = mdi_prop_free(
4397 pclass);
4398 if (rv !=
4399 DDI_PROP_SUCCESS) {
4400 vhci_log(
4401 CE_NOTE,
4402 vdip,
4403 "!path-"
4404 "class"
4405 " free"
4406 " failed"
4407 " rv: %d"
4408 " class: "
4409 "%p",
4410 rv,
4411 (void *)
4412 pclass);
4413 }
4414 }
4415 }
4416 if (best_class == 1) {
4417 VHCI_DEBUG(1, (CE_NOTE, NULL,
4418 "preferred path: %p "
4419 "USER_DISABLE->USER_ENABLE "
4420 "transition for lun %s\n",
4421 (void *)pip,
4422 vlun->svl_lun_wwn));
4423 (void) taskq_dispatch(
4424 vhci->vhci_taskq,
4425 vhci_initiate_auto_failback,
4426 (void *) vlun, KM_SLEEP);
4427 }
4428 }
4429 /*
4430 * if PGR is active, revalidate key and
4431 * register on this path also, if key is
4432 * still valid
4433 */
4434 sema_p(&vlun->svl_pgr_sema);
4435 if (vlun->svl_pgr_active)
4436 (void)
4437 vhci_pgr_validate_and_register(svp);
4438 sema_v(&vlun->svl_pgr_sema);
4439 /*
4440 * Inform target driver about any
4441 * reservations to be reinstated if target
4442 * has dropped reservation during the busy
4443 * period.
4444 */
4445 mutex_enter(&vhci->vhci_mutex);
4446 scsi_hba_reset_notify_callback(
4447 &vhci->vhci_mutex,
4448 &vhci->vhci_reset_notify_listf);
4449 mutex_exit(&vhci->vhci_mutex);
4450 }
4451 }
4452 if (flags & MDI_AFTER_STATE_CHANGE) {
4453 if (flags & MDI_ENABLE_OP) {
4454 mutex_enter(&vhci_global_mutex);
4455 cv_broadcast(&vhci_cv);
4456 mutex_exit(&vhci_global_mutex);
4457 }
4458 if (vlun->svl_setcap_done) {
4459 (void) vhci_pHCI_cap(&svp->svp_psd->sd_address,
4460 "sector-size", vlun->svl_sector_size,
4461 1, pip);
4462 }
4463
4464 /*
4465 * Release the LUN
4466 */
4467 VHCI_RELEASE_LUN(vlun);
4468
4469 /*
4470 * Path transition is complete.
4471 * Run callback to indicate target driver to
4472 * retry to prevent IO starvation.
4473 */
4474 if (scsi_callback_id != 0) {
4475 ddi_run_callback(&scsi_callback_id);
4476 }
4477 }
4478 } else {
4479 switch (state) {
4480 case MDI_PATHINFO_STATE_ONLINE:
4481 rval = vhci_pathinfo_online(vdip, pip, flags);
4482 break;
4483
4484 case MDI_PATHINFO_STATE_OFFLINE:
4485 rval = vhci_pathinfo_offline(vdip, pip, flags);
4486 break;
4487
4488 default:
4489 break;
4490 }
4491 /*
4492 * Path transition is complete.
4493 * Run callback to indicate target driver to
4494 * retry to prevent IO starvation.
4495 */
4496 if ((rval == MDI_SUCCESS) && (scsi_callback_id != 0)) {
4497 ddi_run_callback(&scsi_callback_id);
4498 }
4499 return (rval);
4500 }
4501
4502 return (MDI_SUCCESS);
4503 }
4504
4505 /*
4506 * Parse the mpxio load balancing options. The datanameptr
4507 * will point to a string containing the load-balance-options value.
4508 * The load-balance-options value will be a property that
4509 * defines the load-balance algorithm and any arguments to that
4510 * algorithm.
4511 * For example:
4512 * device-type-mpxio-options-list=
4513 * "device-type=SUN SENA", "load-balance-options=logical-block-options"
4514 * "device-type=SUN SE6920", "round-robin-options";
4515 * logical-block-options="load-balance=logical-block", "region-size=15";
4516 * round-robin-options="load-balance=round-robin";
4517 *
4518 * If the load-balance is not defined the load balance algorithm will
4519 * default to the global setting. There will be default values assigned
4520 * to the arguments (region-size=18) and if an argument is one
4521 * that is not known, it will be ignored.
4522 */
4523 static void
vhci_parse_mpxio_lb_options(dev_info_t * dip,dev_info_t * cdip,caddr_t datanameptr)4524 vhci_parse_mpxio_lb_options(dev_info_t *dip, dev_info_t *cdip,
4525 caddr_t datanameptr)
4526 {
4527 char *dataptr, *next_entry;
4528 caddr_t config_list = NULL;
4529 int config_list_len = 0, list_len = 0;
4530 int region_size = -1;
4531 client_lb_t load_balance;
4532
4533 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, datanameptr,
4534 (caddr_t)&config_list, &config_list_len) != DDI_PROP_SUCCESS) {
4535 return;
4536 }
4537
4538 list_len = config_list_len;
4539 next_entry = config_list;
4540 while (config_list_len > 0) {
4541 dataptr = next_entry;
4542
4543 if (strncmp(mdi_load_balance, dataptr,
4544 strlen(mdi_load_balance)) == 0) {
4545 /* get the load-balance scheme */
4546 dataptr += strlen(mdi_load_balance) + 1;
4547 if (strcmp(dataptr, LOAD_BALANCE_PROP_RR) == 0) {
4548 (void) mdi_set_lb_policy(cdip, LOAD_BALANCE_RR);
4549 load_balance = LOAD_BALANCE_RR;
4550 } else if (strcmp(dataptr,
4551 LOAD_BALANCE_PROP_LBA) == 0) {
4552 (void) mdi_set_lb_policy(cdip,
4553 LOAD_BALANCE_LBA);
4554 load_balance = LOAD_BALANCE_LBA;
4555 } else if (strcmp(dataptr,
4556 LOAD_BALANCE_PROP_NONE) == 0) {
4557 (void) mdi_set_lb_policy(cdip,
4558 LOAD_BALANCE_NONE);
4559 load_balance = LOAD_BALANCE_NONE;
4560 }
4561 } else if (strncmp(dataptr, LOGICAL_BLOCK_REGION_SIZE,
4562 strlen(LOGICAL_BLOCK_REGION_SIZE)) == 0) {
4563 int i = 0;
4564 char *ptr;
4565 char *tmp;
4566
4567 tmp = dataptr + (strlen(LOGICAL_BLOCK_REGION_SIZE) + 1);
4568 /* check for numeric value */
4569 for (ptr = tmp; i < strlen(tmp); i++, ptr++) {
4570 if (!isdigit(*ptr)) {
4571 cmn_err(CE_WARN,
4572 "Illegal region size: %s."
4573 " Setting to default value: %d",
4574 tmp,
4575 LOAD_BALANCE_DEFAULT_REGION_SIZE);
4576 region_size =
4577 LOAD_BALANCE_DEFAULT_REGION_SIZE;
4578 break;
4579 }
4580 }
4581 if (i >= strlen(tmp)) {
4582 region_size = stoi(&tmp);
4583 }
4584 (void) mdi_set_lb_region_size(cdip, region_size);
4585 }
4586 config_list_len -= (strlen(next_entry) + 1);
4587 next_entry += strlen(next_entry) + 1;
4588 }
4589 #ifdef DEBUG
4590 if ((region_size >= 0) && (load_balance != LOAD_BALANCE_LBA)) {
4591 VHCI_DEBUG(1, (CE_NOTE, dip,
4592 "!vhci_parse_mpxio_lb_options: region-size: %d"
4593 "only valid for load-balance=logical-block\n",
4594 region_size));
4595 }
4596 #endif
4597 if ((region_size == -1) && (load_balance == LOAD_BALANCE_LBA)) {
4598 VHCI_DEBUG(1, (CE_NOTE, dip,
4599 "!vhci_parse_mpxio_lb_options: No region-size"
4600 " defined load-balance=logical-block."
4601 " Default to: %d\n", LOAD_BALANCE_DEFAULT_REGION_SIZE));
4602 (void) mdi_set_lb_region_size(cdip,
4603 LOAD_BALANCE_DEFAULT_REGION_SIZE);
4604 }
4605 if (list_len > 0) {
4606 kmem_free(config_list, list_len);
4607 }
4608 }
4609
4610 /*
4611 * Parse the device-type-mpxio-options-list looking for the key of
4612 * "load-balance-options". If found, parse the load balancing options.
4613 * Check the comment of the vhci_get_device_type_mpxio_options()
4614 * for the device-type-mpxio-options-list.
4615 */
4616 static void
vhci_parse_mpxio_options(dev_info_t * dip,dev_info_t * cdip,caddr_t datanameptr,int list_len)4617 vhci_parse_mpxio_options(dev_info_t *dip, dev_info_t *cdip,
4618 caddr_t datanameptr, int list_len)
4619 {
4620 char *dataptr;
4621 int len;
4622
4623 /*
4624 * get the data list
4625 */
4626 dataptr = datanameptr;
4627 len = 0;
4628 while (len < list_len &&
4629 strncmp(dataptr, DEVICE_TYPE_STR, strlen(DEVICE_TYPE_STR))
4630 != 0) {
4631 if (strncmp(dataptr, LOAD_BALANCE_OPTIONS,
4632 strlen(LOAD_BALANCE_OPTIONS)) == 0) {
4633 len += strlen(LOAD_BALANCE_OPTIONS) + 1;
4634 dataptr += strlen(LOAD_BALANCE_OPTIONS) + 1;
4635 vhci_parse_mpxio_lb_options(dip, cdip, dataptr);
4636 }
4637 len += strlen(dataptr) + 1;
4638 dataptr += strlen(dataptr) + 1;
4639 }
4640 }
4641
4642 /*
4643 * Check the inquriy string returned from the device with the device-type
4644 * Check for the existence of the device-type-mpxio-options-list and
4645 * if found parse the list checking for a match with the device-type
4646 * value and the inquiry string returned from the device. If a match
4647 * is found, parse the mpxio options list. The format of the
4648 * device-type-mpxio-options-list is:
4649 * device-type-mpxio-options-list=
4650 * "device-type=SUN SENA", "load-balance-options=logical-block-options"
4651 * "device-type=SUN SE6920", "round-robin-options";
4652 * logical-block-options="load-balance=logical-block", "region-size=15";
4653 * round-robin-options="load-balance=round-robin";
4654 */
4655 void
vhci_get_device_type_mpxio_options(dev_info_t * dip,dev_info_t * cdip,struct scsi_device * devp)4656 vhci_get_device_type_mpxio_options(dev_info_t *dip, dev_info_t *cdip,
4657 struct scsi_device *devp)
4658 {
4659
4660 caddr_t config_list = NULL;
4661 caddr_t vidptr, datanameptr;
4662 int vidlen, dupletlen = 0;
4663 int config_list_len = 0, len;
4664 struct scsi_inquiry *inq = devp->sd_inq;
4665
4666 /*
4667 * look up the device-type-mpxio-options-list and walk thru
4668 * the list compare the vendor ids of the earlier inquiry command and
4669 * with those vids in the list if there is a match, lookup
4670 * the mpxio-options value
4671 */
4672 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
4673 MPXIO_OPTIONS_LIST,
4674 (caddr_t)&config_list, &config_list_len) == DDI_PROP_SUCCESS) {
4675
4676 /*
4677 * Compare vids in each duplet - if it matches,
4678 * parse the mpxio options list.
4679 */
4680 for (len = config_list_len, vidptr = config_list; len > 0;
4681 len -= dupletlen) {
4682
4683 dupletlen = 0;
4684
4685 if (strlen(vidptr) != 0 &&
4686 strncmp(vidptr, DEVICE_TYPE_STR,
4687 strlen(DEVICE_TYPE_STR)) == 0) {
4688 /* point to next duplet */
4689 datanameptr = vidptr + strlen(vidptr) + 1;
4690 /* add len of this duplet */
4691 dupletlen += strlen(vidptr) + 1;
4692 /* get to device type */
4693 vidptr += strlen(DEVICE_TYPE_STR) + 1;
4694 vidlen = strlen(vidptr);
4695 if ((vidlen != 0) &&
4696 bcmp(inq->inq_vid, vidptr, vidlen) == 0) {
4697 vhci_parse_mpxio_options(dip, cdip,
4698 datanameptr, len - dupletlen);
4699 break;
4700 }
4701 /* get to next duplet */
4702 vidptr += strlen(vidptr) + 1;
4703 }
4704 /* get to the next device-type */
4705 while (len - dupletlen > 0 &&
4706 strlen(vidptr) != 0 &&
4707 strncmp(vidptr, DEVICE_TYPE_STR,
4708 strlen(DEVICE_TYPE_STR)) != 0) {
4709 dupletlen += strlen(vidptr) + 1;
4710 vidptr += strlen(vidptr) + 1;
4711 }
4712 }
4713 if (config_list_len > 0) {
4714 kmem_free(config_list, config_list_len);
4715 }
4716 }
4717 }
4718
4719 static int
vhci_update_pathinfo(struct scsi_device * psd,mdi_pathinfo_t * pip,struct scsi_failover_ops * fo,scsi_vhci_lun_t * vlun,struct scsi_vhci * vhci)4720 vhci_update_pathinfo(struct scsi_device *psd, mdi_pathinfo_t *pip,
4721 struct scsi_failover_ops *fo,
4722 scsi_vhci_lun_t *vlun,
4723 struct scsi_vhci *vhci)
4724 {
4725 struct scsi_path_opinfo opinfo;
4726 char *pclass, *best_pclass;
4727 char *resrv_pclass = NULL;
4728 int force_rereserve = 0;
4729 int update_pathinfo_done = 0;
4730
4731 if (fo->sfo_path_get_opinfo(psd, &opinfo, vlun->svl_fops_ctpriv) != 0) {
4732 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_update_pathinfo: "
4733 "Failed to get operation info for path:%p\n", (void *)pip));
4734 return (MDI_FAILURE);
4735 }
4736 /* set the xlf capable flag in the vlun for future use */
4737 vlun->svl_xlf_capable = opinfo.opinfo_xlf_capable;
4738 (void) mdi_prop_update_string(pip, "path-class",
4739 opinfo.opinfo_path_attr);
4740
4741 pclass = opinfo.opinfo_path_attr;
4742 if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE) {
4743 mutex_enter(&vlun->svl_mutex);
4744 if (vlun->svl_active_pclass != NULL) {
4745 if (strcmp(vlun->svl_active_pclass, pclass) != 0) {
4746 mutex_exit(&vlun->svl_mutex);
4747 /*
4748 * Externally initiated failover has happened;
4749 * force the path state to be STANDBY/ONLINE,
4750 * next IO will trigger failover and thus
4751 * sync-up the pathstates. Reason we don't
4752 * sync-up immediately by invoking
4753 * vhci_update_pathstates() is because it
4754 * needs a VHCI_HOLD_LUN() and we don't
4755 * want to block here.
4756 *
4757 * Further, if the device is an ALUA device,
4758 * then failure to exactly match 'pclass' and
4759 * 'svl_active_pclass'(as is the case here)
4760 * indicates that the currently active path
4761 * is a 'non-optimized' path - which means
4762 * that 'svl_active_pclass' needs to be
4763 * replaced with opinfo.opinfo_path_state
4764 * value.
4765 */
4766
4767 if (SCSI_FAILOVER_IS_TPGS(vlun->svl_fops)) {
4768 char *tptr;
4769
4770 /*
4771 * The device is ALUA compliant. The
4772 * state need to be changed to online
4773 * rather than standby state which is
4774 * done typically for a asymmetric
4775 * device that is non ALUA compliant.
4776 */
4777 mdi_pi_set_state(pip,
4778 MDI_PATHINFO_STATE_ONLINE);
4779 tptr = kmem_alloc(strlen
4780 (opinfo.opinfo_path_attr)+1,
4781 KM_SLEEP);
4782 (void) strlcpy(tptr,
4783 opinfo.opinfo_path_attr,
4784 (strlen(opinfo.opinfo_path_attr)
4785 +1));
4786 mutex_enter(&vlun->svl_mutex);
4787 kmem_free(vlun->svl_active_pclass,
4788 strlen(vlun->svl_active_pclass)+1);
4789 vlun->svl_active_pclass = tptr;
4790 mutex_exit(&vlun->svl_mutex);
4791 } else {
4792 /*
4793 * Non ALUA device case.
4794 */
4795 mdi_pi_set_state(pip,
4796 MDI_PATHINFO_STATE_STANDBY);
4797 }
4798 vlun->svl_fo_support = opinfo.opinfo_mode;
4799 mdi_pi_set_preferred(pip,
4800 opinfo.opinfo_preferred);
4801 update_pathinfo_done = 1;
4802 }
4803
4804 /*
4805 * Find out a class of currently reserved path if there
4806 * is any.
4807 */
4808 if ((vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) &&
4809 mdi_prop_lookup_string(vlun->svl_resrv_pip,
4810 "path-class", &resrv_pclass) != MDI_SUCCESS) {
4811 VHCI_DEBUG(1, (CE_NOTE, NULL,
4812 "!vhci_update_pathinfo: prop lookup "
4813 "failed for path 0x%p\n",
4814 (void *)vlun->svl_resrv_pip));
4815 /*
4816 * Something is wrong with the reserved path.
4817 * We can't do much with that right here. Just
4818 * force re-reservation to another path.
4819 */
4820 force_rereserve = 1;
4821 }
4822
4823 (void) fo->sfo_pathclass_next(NULL, &best_pclass,
4824 vlun->svl_fops_ctpriv);
4825 if ((force_rereserve == 1) || ((resrv_pclass != NULL) &&
4826 (strcmp(pclass, best_pclass) == 0) &&
4827 (strcmp(resrv_pclass, best_pclass) != 0))) {
4828 /*
4829 * Inform target driver that a reservation
4830 * should be reinstated because the reserved
4831 * path is not the most preferred one.
4832 */
4833 mutex_enter(&vhci->vhci_mutex);
4834 scsi_hba_reset_notify_callback(
4835 &vhci->vhci_mutex,
4836 &vhci->vhci_reset_notify_listf);
4837 mutex_exit(&vhci->vhci_mutex);
4838 }
4839
4840 if (update_pathinfo_done == 1) {
4841 return (MDI_SUCCESS);
4842 }
4843 } else {
4844 char *tptr;
4845
4846 /*
4847 * lets release the mutex before we try to
4848 * allocate since the potential to sleep is
4849 * possible.
4850 */
4851 mutex_exit(&vlun->svl_mutex);
4852 tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP);
4853 (void) strlcpy(tptr, pclass, (strlen(pclass)+1));
4854 mutex_enter(&vlun->svl_mutex);
4855 vlun->svl_active_pclass = tptr;
4856 }
4857 mutex_exit(&vlun->svl_mutex);
4858 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE);
4859 vlun->svl_waiting_for_activepath = 0;
4860 } else if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT) {
4861 mutex_enter(&vlun->svl_mutex);
4862 if (vlun->svl_active_pclass == NULL) {
4863 char *tptr;
4864
4865 mutex_exit(&vlun->svl_mutex);
4866 tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP);
4867 (void) strlcpy(tptr, pclass, (strlen(pclass)+1));
4868 mutex_enter(&vlun->svl_mutex);
4869 vlun->svl_active_pclass = tptr;
4870 }
4871 mutex_exit(&vlun->svl_mutex);
4872 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE);
4873 vlun->svl_waiting_for_activepath = 0;
4874 } else if (opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) {
4875 mutex_enter(&vlun->svl_mutex);
4876 if (vlun->svl_active_pclass != NULL) {
4877 if (strcmp(vlun->svl_active_pclass, pclass) == 0) {
4878 mutex_exit(&vlun->svl_mutex);
4879 /*
4880 * externally initiated failover has happened;
4881 * force state to ONLINE (see comment above)
4882 */
4883 mdi_pi_set_state(pip,
4884 MDI_PATHINFO_STATE_ONLINE);
4885 vlun->svl_fo_support = opinfo.opinfo_mode;
4886 mdi_pi_set_preferred(pip,
4887 opinfo.opinfo_preferred);
4888 return (MDI_SUCCESS);
4889 }
4890 }
4891 mutex_exit(&vlun->svl_mutex);
4892 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_STANDBY);
4893
4894 /*
4895 * Initiate auto-failback, if enabled, for path if path-state
4896 * is transitioning from OFFLINE->STANDBY and pathclass is the
4897 * preferred pathclass for this storage.
4898 * NOTE: In case where opinfo_path_state is SCSI_PATH_ACTIVE
4899 * (above), where the pi state is set to STANDBY, we don't
4900 * initiate auto-failback as the next IO shall take care of.
4901 * this. See comment above.
4902 */
4903 (void) fo->sfo_pathclass_next(NULL, &best_pclass,
4904 vlun->svl_fops_ctpriv);
4905 if (((vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK) ==
4906 VHCI_CONF_FLAGS_AUTO_FAILBACK) &&
4907 (strcmp(pclass, best_pclass) == 0) &&
4908 ((MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_OFFLINE)||
4909 (MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_INIT))) {
4910 VHCI_DEBUG(1, (CE_NOTE, NULL, "%s pathclass path: %p"
4911 " OFFLINE->STANDBY transition for lun %s\n",
4912 best_pclass, (void *)pip, vlun->svl_lun_wwn));
4913 (void) taskq_dispatch(vhci->vhci_taskq,
4914 vhci_initiate_auto_failback, (void *) vlun,
4915 KM_SLEEP);
4916 }
4917 }
4918 vlun->svl_fo_support = opinfo.opinfo_mode;
4919 mdi_pi_set_preferred(pip, opinfo.opinfo_preferred);
4920
4921 VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_update_pathinfo: opinfo_rev = %x,"
4922 " opinfo_path_state = %x opinfo_preferred = %x, opinfo_mode = %x\n",
4923 opinfo.opinfo_rev, opinfo.opinfo_path_state,
4924 opinfo.opinfo_preferred, opinfo.opinfo_mode));
4925
4926 return (MDI_SUCCESS);
4927 }
4928
4929 /*
4930 * Form the kstat name and and call mdi_pi_kstat_create()
4931 */
4932 void
vhci_kstat_create_pathinfo(mdi_pathinfo_t * pip)4933 vhci_kstat_create_pathinfo(mdi_pathinfo_t *pip)
4934 {
4935 dev_info_t *tgt_dip;
4936 dev_info_t *pdip;
4937 char *guid;
4938 char *target_port, *target_port_dup;
4939 char ks_name[KSTAT_STRLEN];
4940 uint_t pid;
4941 int by_id;
4942 mod_hash_val_t hv;
4943
4944
4945 /* return if we have already allocated kstats */
4946 if (mdi_pi_kstat_exists(pip))
4947 return;
4948
4949 /*
4950 * We need instance numbers to create a kstat name, return if we don't
4951 * have instance numbers assigned yet.
4952 */
4953 tgt_dip = mdi_pi_get_client(pip);
4954 pdip = mdi_pi_get_phci(pip);
4955 if ((ddi_get_instance(tgt_dip) == -1) || (ddi_get_instance(pdip) == -1))
4956 return;
4957
4958 /*
4959 * A path oriented kstat has a ks_name of the form:
4960 *
4961 * <client-driver><instance>.t<pid>.<pHCI-driver><instance>
4962 *
4963 * We maintain a bidirectional 'target-port' to <pid> map,
4964 * called targetmap. All pathinfo nodes with the same
4965 * 'target-port' map to the same <pid>. The iostat(1M) code,
4966 * when parsing a path oriented kstat name, uses the <pid> as
4967 * a SCSI_VHCI_GET_TARGET_LONGNAME ioctl argument in order
4968 * to get the 'target-port'. For KSTAT_FLAG_PERSISTENT kstats,
4969 * this ioctl needs to translate a <pid> to a 'target-port'
4970 * even after all pathinfo nodes associated with the
4971 * 'target-port' have been destroyed. This is needed to support
4972 * consistent first-iteration activity-since-boot iostat(1M)
4973 * output. Because of this requirement, the mapping can't be
4974 * based on pathinfo information in a devinfo snapshot.
4975 */
4976
4977 /* determine 'target-port' */
4978 if (mdi_prop_lookup_string(pip,
4979 SCSI_ADDR_PROP_TARGET_PORT, &target_port) == MDI_SUCCESS) {
4980 target_port_dup = i_ddi_strdup(target_port, KM_SLEEP);
4981 (void) mdi_prop_free(target_port);
4982 by_id = 1;
4983 } else {
4984 /*
4985 * If the pHCI did not set up 'target-port' on this
4986 * pathinfo node, assume that our client is the only
4987 * one with paths to the device by using the guid
4988 * value as the 'target-port'. Since no other client
4989 * will have the same guid, no other client will use
4990 * the same <pid>. NOTE: a client with an instance
4991 * number always has a guid.
4992 */
4993 (void) ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
4994 PROPFLAGS, MDI_CLIENT_GUID_PROP, &guid);
4995 target_port_dup = i_ddi_strdup(guid, KM_SLEEP);
4996 ddi_prop_free(guid);
4997
4998 /*
4999 * For this type of mapping we don't want the
5000 * <id> -> 'target-port' mapping to be made. This
5001 * will cause the SCSI_VHCI_GET_TARGET_LONGNAME ioctl
5002 * to fail, and the iostat(1M) long '-n' output will
5003 * still use the <pid>. We do this because we just
5004 * made up the 'target-port' using the guid, and we
5005 * don't want to expose that fact in iostat output.
5006 */
5007 by_id = 0;
5008 }
5009
5010 /* find/establish <pid> given 'target-port' */
5011 mutex_enter(&vhci_targetmap_mutex);
5012 if (mod_hash_find(vhci_targetmap_byport,
5013 (mod_hash_key_t)target_port_dup, &hv) == 0) {
5014 pid = (int)(intptr_t)hv; /* mapping exists */
5015 } else {
5016 pid = vhci_targetmap_pid++; /* new mapping */
5017
5018 (void) mod_hash_insert(vhci_targetmap_byport,
5019 (mod_hash_key_t)target_port_dup,
5020 (mod_hash_val_t)(intptr_t)pid);
5021 if (by_id) {
5022 (void) mod_hash_insert(vhci_targetmap_bypid,
5023 (mod_hash_key_t)(uintptr_t)pid,
5024 (mod_hash_val_t)(uintptr_t)target_port_dup);
5025 }
5026 target_port_dup = NULL; /* owned by hash */
5027 }
5028 mutex_exit(&vhci_targetmap_mutex);
5029
5030 /* form kstat name */
5031 (void) snprintf(ks_name, KSTAT_STRLEN, "%s%d.t%d.%s%d",
5032 ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip),
5033 pid, ddi_driver_name(pdip), ddi_get_instance(pdip));
5034
5035 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p "
5036 "kstat %s: pid %x <-> port %s\n", (void *)pip,
5037 ks_name, pid, target_port_dup));
5038 if (target_port_dup)
5039 kmem_free(target_port_dup, strlen(target_port_dup) + 1);
5040
5041 /* call mdi to create kstats with the name we built */
5042 (void) mdi_pi_kstat_create(pip, ks_name);
5043 }
5044
5045 /* ARGSUSED */
5046 static int
vhci_pathinfo_online(dev_info_t * vdip,mdi_pathinfo_t * pip,int flags)5047 vhci_pathinfo_online(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
5048 {
5049 scsi_hba_tran_t *hba = NULL;
5050 struct scsi_device *psd = NULL;
5051 scsi_vhci_lun_t *vlun = NULL;
5052 dev_info_t *pdip = NULL;
5053 dev_info_t *cdip;
5054 dev_info_t *tgt_dip;
5055 struct scsi_vhci *vhci;
5056 char *guid;
5057 struct scsi_failover_ops *sfo;
5058 scsi_vhci_priv_t *svp = NULL;
5059 struct scsi_address *ap;
5060 struct scsi_pkt *pkt;
5061 int rval = MDI_FAILURE;
5062 mpapi_item_list_t *list_ptr;
5063 mpapi_lu_data_t *ld;
5064
5065 ASSERT(vdip != NULL);
5066 ASSERT(pip != NULL);
5067
5068 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
5069 ASSERT(vhci != NULL);
5070
5071 pdip = mdi_pi_get_phci(pip);
5072 hba = ddi_get_driver_private(pdip);
5073 ASSERT(hba != NULL);
5074
5075 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
5076 ASSERT(svp != NULL);
5077
5078 cdip = mdi_pi_get_client(pip);
5079 ASSERT(cdip != NULL);
5080 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS,
5081 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
5082 VHCI_DEBUG(1, (CE_WARN, NULL, "vhci_path_online: lun guid "
5083 "property failed"));
5084 goto failure;
5085 }
5086
5087 vlun = vhci_lun_lookup(cdip);
5088 ASSERT(vlun != NULL);
5089
5090 ddi_prop_free(guid);
5091
5092 vlun->svl_dip = mdi_pi_get_client(pip);
5093 ASSERT(vlun->svl_dip != NULL);
5094
5095 psd = svp->svp_psd;
5096 ASSERT(psd != NULL);
5097
5098 ap = &psd->sd_address;
5099
5100 /*
5101 * Get inquiry data into pathinfo related scsi_device structure.
5102 * Free sq_inq when pathinfo related scsi_device structure is destroyed
5103 * by vhci_pathinfo_uninit(). In other words, vhci maintains its own
5104 * copy of scsi_device and scsi_inquiry data on a per-path basis.
5105 */
5106 if (scsi_probe(psd, SLEEP_FUNC) != SCSIPROBE_EXISTS) {
5107 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_pathinfo_online: "
5108 "scsi_probe failed path:%p rval:%x\n", (void *)pip, rval));
5109 rval = MDI_FAILURE;
5110 goto failure;
5111 }
5112
5113 /*
5114 * See if we have a failover module to support the device.
5115 *
5116 * We re-probe to determine the failover ops for each path. This
5117 * is done in case there are any path-specific side-effects associated
5118 * with the sfo_device_probe implementation.
5119 *
5120 * Give the first successfull sfo_device_probe the opportunity to
5121 * establish 'ctpriv', vlun/client private data. The ctpriv will
5122 * then be passed into the failover module on all other sfo_device_*()
5123 * operations (and must be freed by sfo_device_unprobe implementation).
5124 *
5125 * NOTE: While sfo_device_probe is done once per path,
5126 * sfo_device_unprobe only occurs once - when the vlun is destroyed.
5127 *
5128 * NOTE: We don't currently support per-path fops private data
5129 * mechanism.
5130 */
5131 sfo = vhci_dev_fo(vdip, psd,
5132 &vlun->svl_fops_ctpriv, &vlun->svl_fops_name);
5133
5134 /* check path configuration result with current vlun state */
5135 if (((sfo && vlun->svl_fops) && (sfo != vlun->svl_fops)) ||
5136 (sfo && vlun->svl_not_supported) ||
5137 ((sfo == NULL) && vlun->svl_fops)) {
5138 /* Getting different results for different paths. */
5139 VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip,
5140 "!vhci_pathinfo_online: dev (path 0x%p) contradiction\n",
5141 (void *)pip));
5142 cmn_err(CE_WARN, "scsi_vhci: failover contradiction: "
5143 "'%s'.vs.'%s': path %s\n",
5144 vlun->svl_fops ? vlun->svl_fops->sfo_name : "NULL",
5145 sfo ? sfo->sfo_name : "NULL", mdi_pi_pathname(pip));
5146 vlun->svl_not_supported = 1;
5147 rval = MDI_NOT_SUPPORTED;
5148 goto done;
5149 } else if (sfo == NULL) {
5150 /* No failover module - device not supported under vHCI. */
5151 VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip,
5152 "!vhci_pathinfo_online: dev (path 0x%p) not "
5153 "supported\n", (void *)pip));
5154
5155 /* XXX does this contradict vhci_is_dev_supported ? */
5156 vlun->svl_not_supported = 1;
5157 rval = MDI_NOT_SUPPORTED;
5158 goto done;
5159 }
5160
5161 /* failover supported for device - save failover_ops in vlun */
5162 vlun->svl_fops = sfo;
5163 ASSERT(vlun->svl_fops_name != NULL);
5164
5165 /*
5166 * Obtain the device-type based mpxio options as specified in
5167 * scsi_vhci.conf file.
5168 *
5169 * NOTE: currently, the end result is a call to
5170 * mdi_set_lb_region_size().
5171 */
5172 tgt_dip = psd->sd_dev;
5173 ASSERT(tgt_dip != NULL);
5174 vhci_get_device_type_mpxio_options(vdip, tgt_dip, psd);
5175
5176 /*
5177 * if PGR is active, revalidate key and register on this path also,
5178 * if key is still valid
5179 */
5180 sema_p(&vlun->svl_pgr_sema);
5181 if (vlun->svl_pgr_active) {
5182 rval = vhci_pgr_validate_and_register(svp);
5183 if (rval != 1) {
5184 rval = MDI_FAILURE;
5185 sema_v(&vlun->svl_pgr_sema);
5186 goto failure;
5187 }
5188 }
5189 sema_v(&vlun->svl_pgr_sema);
5190
5191 if (svp->svp_new_path) {
5192 /*
5193 * Last chance to perform any cleanup operations on this
5194 * new path before making this path completely online.
5195 */
5196 svp->svp_new_path = 0;
5197
5198 /*
5199 * If scsi_vhci knows the lun is alread RESERVE'd,
5200 * then skip the issue of RELEASE on new path.
5201 */
5202 if ((vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) == 0) {
5203 /*
5204 * Issue SCSI-2 RELEASE only for the first time on
5205 * a new path just in case the host rebooted and
5206 * a reservation is still pending on this path.
5207 * IBM Shark storage does not clear RESERVE upon
5208 * host reboot.
5209 */
5210 pkt = scsi_init_pkt(ap, NULL, NULL, CDB_GROUP0,
5211 sizeof (struct scsi_arq_status), 0, 0,
5212 SLEEP_FUNC, NULL);
5213 if (pkt == NULL) {
5214 VHCI_DEBUG(1, (CE_NOTE, NULL,
5215 "!vhci_pathinfo_online: "
5216 "Release init_pkt failed :%p\n",
5217 (void *)pip));
5218 rval = MDI_FAILURE;
5219 goto failure;
5220 }
5221 pkt->pkt_cdbp[0] = SCMD_RELEASE;
5222 pkt->pkt_time = 60;
5223
5224 VHCI_DEBUG(1, (CE_NOTE, NULL,
5225 "!vhci_path_online: path:%p "
5226 "Issued SCSI-2 RELEASE\n", (void *)pip));
5227
5228 /* Ignore the return value */
5229 (void) vhci_do_scsi_cmd(pkt);
5230 scsi_destroy_pkt(pkt);
5231 }
5232 }
5233
5234 rval = vhci_update_pathinfo(psd, pip, sfo, vlun, vhci);
5235 if (rval == MDI_FAILURE) {
5236 goto failure;
5237 }
5238
5239 /* Initialize MP-API data */
5240 vhci_update_mpapi_data(vhci, vlun, pip);
5241
5242 /*
5243 * MP-API also needs the Inquiry data to be maintained in the
5244 * mp_vendor_prop_t structure, so find the lun and update its
5245 * structure with this data.
5246 */
5247 list_ptr = (mpapi_item_list_t *)vhci_get_mpapi_item(vhci, NULL,
5248 MP_OBJECT_TYPE_MULTIPATH_LU, (void *)vlun);
5249 ld = (mpapi_lu_data_t *)list_ptr->item->idata;
5250 if (ld != NULL) {
5251 bcopy(psd->sd_inq->inq_vid, ld->prop.prodInfo.vendor, 8);
5252 bcopy(psd->sd_inq->inq_pid, ld->prop.prodInfo.product, 16);
5253 bcopy(psd->sd_inq->inq_revision, ld->prop.prodInfo.revision, 4);
5254 } else {
5255 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_pathinfo_online: "
5256 "mpapi_lu_data_t is NULL"));
5257 }
5258
5259 /* create kstats for path */
5260 vhci_kstat_create_pathinfo(pip);
5261
5262 done:
5263 mutex_enter(&vhci_global_mutex);
5264 cv_broadcast(&vhci_cv);
5265 mutex_exit(&vhci_global_mutex);
5266
5267 if (vlun->svl_setcap_done) {
5268 (void) vhci_pHCI_cap(ap, "sector-size",
5269 vlun->svl_sector_size, 1, pip);
5270 }
5271
5272 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p\n",
5273 (void *)pip));
5274
5275 failure:
5276 return (rval);
5277 }
5278
5279 /*
5280 * path offline handler. Release all bindings that will not be
5281 * released by the normal packet transport/completion code path.
5282 * Since we don't (presently) keep any bindings alive outside of
5283 * the in-transport packets (which will be released on completion)
5284 * there is not much to do here.
5285 */
5286 /* ARGSUSED */
5287 static int
vhci_pathinfo_offline(dev_info_t * vdip,mdi_pathinfo_t * pip,int flags)5288 vhci_pathinfo_offline(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
5289 {
5290 scsi_hba_tran_t *hba = NULL;
5291 struct scsi_device *psd = NULL;
5292 dev_info_t *pdip = NULL;
5293 dev_info_t *cdip = NULL;
5294 scsi_vhci_priv_t *svp = NULL;
5295
5296 ASSERT(vdip != NULL);
5297 ASSERT(pip != NULL);
5298
5299 pdip = mdi_pi_get_phci(pip);
5300 ASSERT(pdip != NULL);
5301 if (pdip == NULL) {
5302 VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL "
5303 "phci dip", (void *)pip));
5304 return (MDI_FAILURE);
5305 }
5306
5307 cdip = mdi_pi_get_client(pip);
5308 ASSERT(cdip != NULL);
5309 if (cdip == NULL) {
5310 VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL "
5311 "client dip", (void *)pip));
5312 return (MDI_FAILURE);
5313 }
5314
5315 hba = ddi_get_driver_private(pdip);
5316 ASSERT(hba != NULL);
5317
5318 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
5319 if (svp == NULL) {
5320 /*
5321 * mdi_pathinfo node in INIT state can have vHCI private
5322 * information set to null
5323 */
5324 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5325 "svp is NULL for pip 0x%p\n", (void *)pip));
5326 return (MDI_SUCCESS);
5327 }
5328
5329 psd = svp->svp_psd;
5330 ASSERT(psd != NULL);
5331
5332 mutex_enter(&svp->svp_mutex);
5333
5334 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5335 "%d cmds pending on path: 0x%p\n", svp->svp_cmds, (void *)pip));
5336 while (svp->svp_cmds != 0) {
5337 if (cv_reltimedwait(&svp->svp_cv, &svp->svp_mutex,
5338 drv_usectohz(vhci_path_quiesce_timeout * 1000000),
5339 TR_CLOCK_TICK) == -1) {
5340 /*
5341 * The timeout time reached without the condition
5342 * being signaled.
5343 */
5344 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5345 "Timeout reached on path 0x%p without the cond\n",
5346 (void *)pip));
5347 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5348 "%d cmds still pending on path: 0x%p\n",
5349 svp->svp_cmds, (void *)pip));
5350 break;
5351 }
5352 }
5353 mutex_exit(&svp->svp_mutex);
5354
5355 /*
5356 * Check to see if this vlun has an active SCSI-II RESERVE. And this
5357 * is the pip for the path that has been reserved.
5358 * If so clear the reservation by sending a reset, so the host will not
5359 * get a reservation conflict. Reset the flag VLUN_RESERVE_ACTIVE_FLG
5360 * for this lun. Also a reset notify is sent to the target driver
5361 * just in case the POR check condition is cleared by some other layer
5362 * in the stack.
5363 */
5364 if (svp->svp_svl->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
5365 if (pip == svp->svp_svl->svl_resrv_pip) {
5366 if (vhci_recovery_reset(svp->svp_svl,
5367 &svp->svp_psd->sd_address, TRUE,
5368 VHCI_DEPTH_TARGET) == 0) {
5369 VHCI_DEBUG(1, (CE_NOTE, NULL,
5370 "!vhci_pathinfo_offline (pip:%p):"
5371 "reset failed, retrying\n", (void *)pip));
5372 delay(1*drv_usectohz(1000000));
5373 if (vhci_recovery_reset(svp->svp_svl,
5374 &svp->svp_psd->sd_address, TRUE,
5375 VHCI_DEPTH_TARGET) == 0) {
5376 VHCI_DEBUG(1, (CE_NOTE, NULL,
5377 "!vhci_pathinfo_offline "
5378 "(pip:%p): reset failed, "
5379 "giving up!\n", (void *)pip));
5380 }
5381 }
5382 svp->svp_svl->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
5383 }
5384 }
5385
5386 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_OFFLINE);
5387 vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_REMOVED);
5388
5389 VHCI_DEBUG(1, (CE_NOTE, NULL,
5390 "!vhci_pathinfo_offline: offlined path 0x%p\n", (void *)pip));
5391 return (MDI_SUCCESS);
5392 }
5393
5394
5395 /*
5396 * routine for SCSI VHCI IOCTL implementation.
5397 */
5398 /* ARGSUSED */
5399 static int
vhci_ctl(dev_t dev,int cmd,intptr_t data,int mode,cred_t * credp,int * rval)5400 vhci_ctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval)
5401 {
5402 struct scsi_vhci *vhci;
5403 dev_info_t *vdip;
5404 mdi_pathinfo_t *pip;
5405 int instance, held;
5406 int retval = 0;
5407 caddr_t phci_path = NULL, client_path = NULL;
5408 caddr_t paddr = NULL;
5409 sv_iocdata_t ioc;
5410 sv_iocdata_t *pioc = &ioc;
5411 sv_switch_to_cntlr_iocdata_t iocsc;
5412 sv_switch_to_cntlr_iocdata_t *piocsc = &iocsc;
5413 caddr_t s;
5414 scsi_vhci_lun_t *vlun;
5415 struct scsi_failover_ops *fo;
5416 char *pclass;
5417
5418 /* Check for validity of vhci structure */
5419 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev)));
5420 if (vhci == NULL) {
5421 return (ENXIO);
5422 }
5423
5424 mutex_enter(&vhci->vhci_mutex);
5425 if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) {
5426 mutex_exit(&vhci->vhci_mutex);
5427 return (ENXIO);
5428 }
5429 mutex_exit(&vhci->vhci_mutex);
5430
5431 /* Get the vhci dip */
5432 vdip = vhci->vhci_dip;
5433 ASSERT(vdip != NULL);
5434 instance = ddi_get_instance(vdip);
5435
5436 /* Allocate memory for getting parameters from userland */
5437 phci_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
5438 client_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
5439 paddr = kmem_zalloc(MAXNAMELEN, KM_SLEEP);
5440
5441 /*
5442 * Set a local variable indicating the ioctl name. Used for
5443 * printing debug strings.
5444 */
5445 switch (cmd) {
5446 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5447 s = "GET_CLIENT_MULTIPATH_INFO";
5448 break;
5449
5450 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5451 s = "GET_PHCI_MULTIPATH_INFO";
5452 break;
5453
5454 case SCSI_VHCI_GET_CLIENT_NAME:
5455 s = "GET_CLIENT_NAME";
5456 break;
5457
5458 case SCSI_VHCI_PATH_ONLINE:
5459 s = "PATH_ONLINE";
5460 break;
5461
5462 case SCSI_VHCI_PATH_OFFLINE:
5463 s = "PATH_OFFLINE";
5464 break;
5465
5466 case SCSI_VHCI_PATH_STANDBY:
5467 s = "PATH_STANDBY";
5468 break;
5469
5470 case SCSI_VHCI_PATH_TEST:
5471 s = "PATH_TEST";
5472 break;
5473
5474 case SCSI_VHCI_SWITCH_TO_CNTLR:
5475 s = "SWITCH_TO_CNTLR";
5476 break;
5477 case SCSI_VHCI_PATH_DISABLE:
5478 s = "PATH_DISABLE";
5479 break;
5480 case SCSI_VHCI_PATH_ENABLE:
5481 s = "PATH_ENABLE";
5482 break;
5483
5484 case SCSI_VHCI_GET_TARGET_LONGNAME:
5485 s = "GET_TARGET_LONGNAME";
5486 break;
5487
5488 #ifdef DEBUG
5489 case SCSI_VHCI_CONFIGURE_PHCI:
5490 s = "CONFIGURE_PHCI";
5491 break;
5492
5493 case SCSI_VHCI_UNCONFIGURE_PHCI:
5494 s = "UNCONFIGURE_PHCI";
5495 break;
5496 #endif
5497
5498 default:
5499 s = "Unknown";
5500 vhci_log(CE_NOTE, vdip,
5501 "!vhci%d: ioctl %x (unsupported ioctl)", instance, cmd);
5502 retval = ENOTSUP;
5503 break;
5504 }
5505 if (retval != 0) {
5506 goto end;
5507 }
5508
5509 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci%d: ioctl <%s>", instance, s));
5510
5511 /*
5512 * Get IOCTL parameters from userland
5513 */
5514 switch (cmd) {
5515 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5516 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5517 case SCSI_VHCI_GET_CLIENT_NAME:
5518 case SCSI_VHCI_PATH_ONLINE:
5519 case SCSI_VHCI_PATH_OFFLINE:
5520 case SCSI_VHCI_PATH_STANDBY:
5521 case SCSI_VHCI_PATH_TEST:
5522 case SCSI_VHCI_PATH_DISABLE:
5523 case SCSI_VHCI_PATH_ENABLE:
5524 case SCSI_VHCI_GET_TARGET_LONGNAME:
5525 #ifdef DEBUG
5526 case SCSI_VHCI_CONFIGURE_PHCI:
5527 case SCSI_VHCI_UNCONFIGURE_PHCI:
5528 #endif
5529 retval = vhci_get_iocdata((const void *)data, pioc, mode, s);
5530 break;
5531
5532 case SCSI_VHCI_SWITCH_TO_CNTLR:
5533 retval = vhci_get_iocswitchdata((const void *)data, piocsc,
5534 mode, s);
5535 break;
5536 }
5537 if (retval != 0) {
5538 goto end;
5539 }
5540
5541
5542 /*
5543 * Process the IOCTL
5544 */
5545 switch (cmd) {
5546 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5547 {
5548 uint_t num_paths; /* Num paths to client dev */
5549 sv_path_info_t *upibuf = NULL; /* To keep userland values */
5550 sv_path_info_t *kpibuf = NULL; /* Kernel data for ioctls */
5551 dev_info_t *cdip; /* Client device dip */
5552
5553 if (pioc->ret_elem == NULL) {
5554 retval = EINVAL;
5555 break;
5556 }
5557
5558 /* Get client device path from user land */
5559 if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) {
5560 retval = EFAULT;
5561 break;
5562 }
5563
5564 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5565 "client <%s>", s, client_path));
5566
5567 /* Get number of paths to this client device */
5568 if ((cdip = mdi_client_path2devinfo(vdip, client_path))
5569 == NULL) {
5570 retval = ENXIO;
5571 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5572 "client dip doesn't exist. invalid path <%s>",
5573 s, client_path));
5574 break;
5575 }
5576 num_paths = mdi_client_get_path_count(cdip);
5577
5578 if (ddi_copyout(&num_paths, pioc->ret_elem,
5579 sizeof (num_paths), mode)) {
5580 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5581 "num_paths copyout failed", s));
5582 retval = EFAULT;
5583 break;
5584 }
5585
5586 /* If user just wanted num_paths, then return */
5587 if (pioc->buf_elem == 0 || pioc->ret_buf == NULL ||
5588 num_paths == 0) {
5589 break;
5590 }
5591
5592 /* Set num_paths to value as much as can be sent to userland */
5593 if (num_paths > pioc->buf_elem) {
5594 num_paths = pioc->buf_elem;
5595 }
5596
5597 /* Allocate memory and get userland pointers */
5598 if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths,
5599 pioc, mode, s) != 0) {
5600 retval = EFAULT;
5601 break;
5602 }
5603 ASSERT(upibuf != NULL);
5604 ASSERT(kpibuf != NULL);
5605
5606 /*
5607 * Get the path information and send it to userland.
5608 */
5609 if (vhci_get_client_path_list(cdip, kpibuf, num_paths)
5610 != MDI_SUCCESS) {
5611 retval = ENXIO;
5612 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5613 break;
5614 }
5615
5616 if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths,
5617 pioc, mode, s)) {
5618 retval = EFAULT;
5619 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5620 break;
5621 }
5622
5623 /* Free the memory allocated for path information */
5624 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5625 break;
5626 }
5627
5628 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5629 {
5630 uint_t num_paths; /* Num paths to client dev */
5631 sv_path_info_t *upibuf = NULL; /* To keep userland values */
5632 sv_path_info_t *kpibuf = NULL; /* Kernel data for ioctls */
5633 dev_info_t *pdip; /* PHCI device dip */
5634
5635 if (pioc->ret_elem == NULL) {
5636 retval = EINVAL;
5637 break;
5638 }
5639
5640 /* Get PHCI device path from user land */
5641 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5642 retval = EFAULT;
5643 break;
5644 }
5645
5646 VHCI_DEBUG(6, (CE_WARN, vdip,
5647 "!vhci_ioctl: ioctl <%s> phci <%s>", s, phci_path));
5648
5649 /* Get number of devices associated with this PHCI device */
5650 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5651 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5652 "phci dip doesn't exist. invalid path <%s>",
5653 s, phci_path));
5654 retval = ENXIO;
5655 break;
5656 }
5657
5658 num_paths = mdi_phci_get_path_count(pdip);
5659
5660 if (ddi_copyout(&num_paths, pioc->ret_elem,
5661 sizeof (num_paths), mode)) {
5662 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5663 "num_paths copyout failed", s));
5664 retval = EFAULT;
5665 break;
5666 }
5667
5668 /* If user just wanted num_paths, then return */
5669 if (pioc->buf_elem == 0 || pioc->ret_buf == NULL ||
5670 num_paths == 0) {
5671 break;
5672 }
5673
5674 /* Set num_paths to value as much as can be sent to userland */
5675 if (num_paths > pioc->buf_elem) {
5676 num_paths = pioc->buf_elem;
5677 }
5678
5679 /* Allocate memory and get userland pointers */
5680 if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths,
5681 pioc, mode, s) != 0) {
5682 retval = EFAULT;
5683 break;
5684 }
5685 ASSERT(upibuf != NULL);
5686 ASSERT(kpibuf != NULL);
5687
5688 /*
5689 * Get the path information and send it to userland.
5690 */
5691 if (vhci_get_phci_path_list(pdip, kpibuf, num_paths)
5692 != MDI_SUCCESS) {
5693 retval = ENXIO;
5694 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5695 break;
5696 }
5697
5698 if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths,
5699 pioc, mode, s)) {
5700 retval = EFAULT;
5701 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5702 break;
5703 }
5704
5705 /* Free the memory allocated for path information */
5706 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5707 break;
5708 }
5709
5710 case SCSI_VHCI_GET_CLIENT_NAME:
5711 {
5712 dev_info_t *cdip, *pdip;
5713
5714 /* Get PHCI path and device address from user land */
5715 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) ||
5716 vhci_ioc_get_paddr(pioc, paddr, mode, s)) {
5717 retval = EFAULT;
5718 break;
5719 }
5720
5721 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5722 "phci <%s>, paddr <%s>", s, phci_path, paddr));
5723
5724 /* Get the PHCI dip */
5725 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5726 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5727 "phci dip doesn't exist. invalid path <%s>",
5728 s, phci_path));
5729 retval = ENXIO;
5730 break;
5731 }
5732
5733 if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) {
5734 VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5735 "pathinfo doesn't exist. invalid device addr", s));
5736 retval = ENXIO;
5737 break;
5738 }
5739
5740 /* Get the client device pathname and send to userland */
5741 cdip = mdi_pi_get_client(pip);
5742 vhci_ioc_devi_to_path(cdip, client_path);
5743
5744 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5745 "client <%s>", s, client_path));
5746
5747 if (vhci_ioc_send_client_path(client_path, pioc, mode, s)) {
5748 retval = EFAULT;
5749 break;
5750 }
5751 break;
5752 }
5753
5754 case SCSI_VHCI_PATH_ONLINE:
5755 case SCSI_VHCI_PATH_OFFLINE:
5756 case SCSI_VHCI_PATH_STANDBY:
5757 case SCSI_VHCI_PATH_TEST:
5758 {
5759 dev_info_t *pdip; /* PHCI dip */
5760
5761 /* Get PHCI path and device address from user land */
5762 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) ||
5763 vhci_ioc_get_paddr(pioc, paddr, mode, s)) {
5764 retval = EFAULT;
5765 break;
5766 }
5767
5768 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5769 "phci <%s>, paddr <%s>", s, phci_path, paddr));
5770
5771 /* Get the PHCI dip */
5772 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5773 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5774 "phci dip doesn't exist. invalid path <%s>",
5775 s, phci_path));
5776 retval = ENXIO;
5777 break;
5778 }
5779
5780 if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) {
5781 VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5782 "pathinfo doesn't exist. invalid device addr", s));
5783 retval = ENXIO;
5784 break;
5785 }
5786
5787 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5788 "Calling MDI function to change device state", s));
5789
5790 switch (cmd) {
5791 case SCSI_VHCI_PATH_ONLINE:
5792 retval = mdi_pi_online(pip, 0);
5793 break;
5794
5795 case SCSI_VHCI_PATH_OFFLINE:
5796 retval = mdi_pi_offline(pip, 0);
5797 break;
5798
5799 case SCSI_VHCI_PATH_STANDBY:
5800 retval = mdi_pi_standby(pip, 0);
5801 break;
5802
5803 case SCSI_VHCI_PATH_TEST:
5804 break;
5805 }
5806 break;
5807 }
5808
5809 case SCSI_VHCI_SWITCH_TO_CNTLR:
5810 {
5811 dev_info_t *cdip;
5812 struct scsi_device *devp;
5813
5814 /* Get the client device pathname */
5815 if (ddi_copyin(piocsc->client, client_path,
5816 MAXPATHLEN, mode)) {
5817 VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5818 "client_path copyin failed", s));
5819 retval = EFAULT;
5820 break;
5821 }
5822
5823 /* Get the path class to which user wants to switch */
5824 if (ddi_copyin(piocsc->class, paddr, MAXNAMELEN, mode)) {
5825 VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5826 "controller_class copyin failed", s));
5827 retval = EFAULT;
5828 break;
5829 }
5830
5831 /* Perform validity checks */
5832 if ((cdip = mdi_client_path2devinfo(vdip,
5833 client_path)) == NULL) {
5834 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5835 "client dip doesn't exist. invalid path <%s>",
5836 s, client_path));
5837 retval = ENXIO;
5838 break;
5839 }
5840
5841 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: Calling MDI func "
5842 "to switch controller"));
5843 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: client <%s> "
5844 "class <%s>", client_path, paddr));
5845
5846 if (strcmp(paddr, PCLASS_PRIMARY) &&
5847 strcmp(paddr, PCLASS_SECONDARY)) {
5848 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5849 "invalid path class <%s>", s, paddr));
5850 retval = ENXIO;
5851 break;
5852 }
5853
5854 devp = ddi_get_driver_private(cdip);
5855 if (devp == NULL) {
5856 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5857 "invalid scsi device <%s>", s, client_path));
5858 retval = ENXIO;
5859 break;
5860 }
5861 vlun = ADDR2VLUN(&devp->sd_address);
5862 ASSERT(vlun);
5863
5864 /*
5865 * Checking to see if device has only one pclass, PRIMARY.
5866 * If so this device doesn't support failovers. Assumed
5867 * that the devices with one pclass is PRIMARY, as thats the
5868 * case today. If this is not true and in future other
5869 * symmetric devices are supported with other pclass, this
5870 * IOCTL shall have to be overhauled anyways as now the only
5871 * arguments it accepts are PRIMARY and SECONDARY.
5872 */
5873 fo = vlun->svl_fops;
5874 if (fo->sfo_pathclass_next(PCLASS_PRIMARY, &pclass,
5875 vlun->svl_fops_ctpriv)) {
5876 retval = ENOTSUP;
5877 break;
5878 }
5879
5880 VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
5881 mutex_enter(&vlun->svl_mutex);
5882 if (vlun->svl_active_pclass != NULL) {
5883 if (strcmp(vlun->svl_active_pclass, paddr) == 0) {
5884 mutex_exit(&vlun->svl_mutex);
5885 retval = EALREADY;
5886 VHCI_RELEASE_LUN(vlun);
5887 break;
5888 }
5889 }
5890 mutex_exit(&vlun->svl_mutex);
5891 /* Call mdi function to cause a switch over */
5892 retval = mdi_failover(vdip, cdip, MDI_FAILOVER_SYNC);
5893 if (retval == MDI_SUCCESS) {
5894 retval = 0;
5895 } else if (retval == MDI_BUSY) {
5896 retval = EBUSY;
5897 } else {
5898 retval = EIO;
5899 }
5900 VHCI_RELEASE_LUN(vlun);
5901 break;
5902 }
5903
5904 case SCSI_VHCI_PATH_ENABLE:
5905 case SCSI_VHCI_PATH_DISABLE:
5906 {
5907 dev_info_t *cdip, *pdip;
5908
5909 /*
5910 * Get client device path from user land
5911 */
5912 if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) {
5913 retval = EFAULT;
5914 break;
5915 }
5916
5917 /*
5918 * Get Phci device path from user land
5919 */
5920 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5921 retval = EFAULT;
5922 break;
5923 }
5924
5925 /*
5926 * Get the devinfo for the Phci.
5927 */
5928 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5929 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5930 "phci dip doesn't exist. invalid path <%s>",
5931 s, phci_path));
5932 retval = ENXIO;
5933 break;
5934 }
5935
5936 /*
5937 * If the client path is set to /scsi_vhci then we need
5938 * to do the operation on all the clients so set cdip to NULL.
5939 * Else, try to get the client dip.
5940 */
5941 if (strcmp(client_path, "/scsi_vhci") == 0) {
5942 cdip = NULL;
5943 } else {
5944 if ((cdip = mdi_client_path2devinfo(vdip,
5945 client_path)) == NULL) {
5946 retval = ENXIO;
5947 VHCI_DEBUG(1, (CE_WARN, NULL,
5948 "!vhci_ioctl: ioctl <%s> client dip "
5949 "doesn't exist. invalid path <%s>",
5950 s, client_path));
5951 break;
5952 }
5953 }
5954
5955 if (cmd == SCSI_VHCI_PATH_ENABLE)
5956 retval = mdi_pi_enable(cdip, pdip, USER_DISABLE);
5957 else
5958 retval = mdi_pi_disable(cdip, pdip, USER_DISABLE);
5959
5960 break;
5961 }
5962
5963 case SCSI_VHCI_GET_TARGET_LONGNAME:
5964 {
5965 uint_t pid = pioc->buf_elem;
5966 char *target_port;
5967 mod_hash_val_t hv;
5968
5969 /* targetmap lookup of 'target-port' by <pid> */
5970 if (mod_hash_find(vhci_targetmap_bypid,
5971 (mod_hash_key_t)(uintptr_t)pid, &hv) != 0) {
5972 /*
5973 * NOTE: failure to find the mapping is OK for guid
5974 * based 'target-port' values.
5975 */
5976 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5977 "targetport mapping doesn't exist: pid %d",
5978 s, pid));
5979 retval = ENXIO;
5980 break;
5981 }
5982
5983 /* copyout 'target-port' result */
5984 target_port = (char *)hv;
5985 if (copyoutstr(target_port, pioc->addr, MAXNAMELEN, NULL)) {
5986 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5987 "targetport copyout failed: len: %d",
5988 s, (int)strlen(target_port)));
5989 retval = EFAULT;
5990 }
5991 break;
5992 }
5993
5994 #ifdef DEBUG
5995 case SCSI_VHCI_CONFIGURE_PHCI:
5996 {
5997 dev_info_t *pdip;
5998
5999 /* Get PHCI path and device address from user land */
6000 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
6001 retval = EFAULT;
6002 break;
6003 }
6004
6005 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
6006 "phci <%s>", s, phci_path));
6007
6008 /* Get the PHCI dip */
6009 if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) {
6010 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
6011 "phci dip doesn't exist. invalid path <%s>",
6012 s, phci_path));
6013 retval = ENXIO;
6014 break;
6015 }
6016
6017 if (ndi_devi_config(pdip,
6018 NDI_DEVFS_CLEAN|NDI_DEVI_PERSIST) != NDI_SUCCESS) {
6019 retval = EIO;
6020 }
6021
6022 ddi_release_devi(pdip);
6023 break;
6024 }
6025
6026 case SCSI_VHCI_UNCONFIGURE_PHCI:
6027 {
6028 dev_info_t *pdip;
6029
6030 /* Get PHCI path and device address from user land */
6031 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
6032 retval = EFAULT;
6033 break;
6034 }
6035
6036 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
6037 "phci <%s>", s, phci_path));
6038
6039 /* Get the PHCI dip */
6040 if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) {
6041 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
6042 "phci dip doesn't exist. invalid path <%s>",
6043 s, phci_path));
6044 retval = ENXIO;
6045 break;
6046 }
6047
6048 if (ndi_devi_unconfig(pdip,
6049 NDI_DEVI_REMOVE|NDI_DEVFS_CLEAN) != NDI_SUCCESS) {
6050 retval = EBUSY;
6051 }
6052
6053 ddi_release_devi(pdip);
6054 break;
6055 }
6056 #endif
6057 }
6058
6059 end:
6060 /* Free the memory allocated above */
6061 if (phci_path != NULL) {
6062 kmem_free(phci_path, MAXPATHLEN);
6063 }
6064 if (client_path != NULL) {
6065 kmem_free(client_path, MAXPATHLEN);
6066 }
6067 if (paddr != NULL) {
6068 kmem_free(paddr, MAXNAMELEN);
6069 }
6070 return (retval);
6071 }
6072
6073 /*
6074 * devctl IOCTL support for client device DR
6075 */
6076 /* ARGSUSED */
6077 int
vhci_devctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)6078 vhci_devctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
6079 int *rvalp)
6080 {
6081 dev_info_t *self;
6082 dev_info_t *child;
6083 scsi_hba_tran_t *hba;
6084 struct devctl_iocdata *dcp;
6085 struct scsi_vhci *vhci;
6086 int rv = 0;
6087 int retval = 0;
6088 scsi_vhci_priv_t *svp;
6089 mdi_pathinfo_t *pip;
6090
6091 if ((vhci = ddi_get_soft_state(vhci_softstate,
6092 MINOR2INST(getminor(dev)))) == NULL)
6093 return (ENXIO);
6094
6095 /*
6096 * check if :devctl minor device has been opened
6097 */
6098 mutex_enter(&vhci->vhci_mutex);
6099 if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) {
6100 mutex_exit(&vhci->vhci_mutex);
6101 return (ENXIO);
6102 }
6103 mutex_exit(&vhci->vhci_mutex);
6104
6105 self = vhci->vhci_dip;
6106 hba = ddi_get_driver_private(self);
6107 if (hba == NULL)
6108 return (ENXIO);
6109
6110 /*
6111 * We can use the generic implementation for these ioctls
6112 */
6113 switch (cmd) {
6114 case DEVCTL_DEVICE_GETSTATE:
6115 case DEVCTL_DEVICE_ONLINE:
6116 case DEVCTL_DEVICE_OFFLINE:
6117 case DEVCTL_DEVICE_REMOVE:
6118 case DEVCTL_BUS_GETSTATE:
6119 return (ndi_devctl_ioctl(self, cmd, arg, mode, 0));
6120 }
6121
6122 /*
6123 * read devctl ioctl data
6124 */
6125 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS)
6126 return (EFAULT);
6127
6128 switch (cmd) {
6129
6130 case DEVCTL_DEVICE_RESET:
6131 /*
6132 * lookup and hold child device
6133 */
6134 if ((child = ndi_devi_find(self, ndi_dc_getname(dcp),
6135 ndi_dc_getaddr(dcp))) == NULL) {
6136 rv = ENXIO;
6137 break;
6138 }
6139 retval = mdi_select_path(child, NULL,
6140 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
6141 NULL, &pip);
6142 if ((retval != MDI_SUCCESS) || (pip == NULL)) {
6143 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl:"
6144 "Unable to get a path, dip 0x%p", (void *)child));
6145 rv = ENXIO;
6146 break;
6147 }
6148 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
6149 if (vhci_recovery_reset(svp->svp_svl,
6150 &svp->svp_psd->sd_address, TRUE,
6151 VHCI_DEPTH_TARGET) == 0) {
6152 VHCI_DEBUG(1, (CE_NOTE, NULL,
6153 "!vhci_ioctl(pip:%p): "
6154 "reset failed\n", (void *)pip));
6155 rv = ENXIO;
6156 }
6157 mdi_rele_path(pip);
6158 break;
6159
6160 case DEVCTL_BUS_QUIESCE:
6161 case DEVCTL_BUS_UNQUIESCE:
6162 case DEVCTL_BUS_RESET:
6163 case DEVCTL_BUS_RESETALL:
6164 #ifdef DEBUG
6165 case DEVCTL_BUS_CONFIGURE:
6166 case DEVCTL_BUS_UNCONFIGURE:
6167 #endif
6168 rv = ENOTSUP;
6169 break;
6170
6171 default:
6172 rv = ENOTTY;
6173 } /* end of outer switch */
6174
6175 ndi_dc_freehdl(dcp);
6176 return (rv);
6177 }
6178
6179 /*
6180 * Routine to get the PHCI pathname from ioctl structures in userland
6181 */
6182 /* ARGSUSED */
6183 static int
vhci_ioc_get_phci_path(sv_iocdata_t * pioc,caddr_t phci_path,int mode,caddr_t s)6184 vhci_ioc_get_phci_path(sv_iocdata_t *pioc, caddr_t phci_path,
6185 int mode, caddr_t s)
6186 {
6187 int retval = 0;
6188
6189 if (ddi_copyin(pioc->phci, phci_path, MAXPATHLEN, mode)) {
6190 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_phci: ioctl <%s> "
6191 "phci_path copyin failed", s));
6192 retval = EFAULT;
6193 }
6194 return (retval);
6195
6196 }
6197
6198
6199 /*
6200 * Routine to get the Client device pathname from ioctl structures in userland
6201 */
6202 /* ARGSUSED */
6203 static int
vhci_ioc_get_client_path(sv_iocdata_t * pioc,caddr_t client_path,int mode,caddr_t s)6204 vhci_ioc_get_client_path(sv_iocdata_t *pioc, caddr_t client_path,
6205 int mode, caddr_t s)
6206 {
6207 int retval = 0;
6208
6209 if (ddi_copyin(pioc->client, client_path, MAXPATHLEN, mode)) {
6210 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_client: "
6211 "ioctl <%s> client_path copyin failed", s));
6212 retval = EFAULT;
6213 }
6214 return (retval);
6215 }
6216
6217
6218 /*
6219 * Routine to get physical device address from ioctl structure in userland
6220 */
6221 /* ARGSUSED */
6222 static int
vhci_ioc_get_paddr(sv_iocdata_t * pioc,caddr_t paddr,int mode,caddr_t s)6223 vhci_ioc_get_paddr(sv_iocdata_t *pioc, caddr_t paddr, int mode, caddr_t s)
6224 {
6225 int retval = 0;
6226
6227 if (ddi_copyin(pioc->addr, paddr, MAXNAMELEN, mode)) {
6228 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_paddr: "
6229 "ioctl <%s> device addr copyin failed", s));
6230 retval = EFAULT;
6231 }
6232 return (retval);
6233 }
6234
6235
6236 /*
6237 * Routine to send client device pathname to userland.
6238 */
6239 /* ARGSUSED */
6240 static int
vhci_ioc_send_client_path(caddr_t client_path,sv_iocdata_t * pioc,int mode,caddr_t s)6241 vhci_ioc_send_client_path(caddr_t client_path, sv_iocdata_t *pioc,
6242 int mode, caddr_t s)
6243 {
6244 int retval = 0;
6245
6246 if (ddi_copyout(client_path, pioc->client, MAXPATHLEN, mode)) {
6247 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_send_client: "
6248 "ioctl <%s> client_path copyout failed", s));
6249 retval = EFAULT;
6250 }
6251 return (retval);
6252 }
6253
6254
6255 /*
6256 * Routine to translated dev_info pointer (dip) to device pathname.
6257 */
6258 static void
vhci_ioc_devi_to_path(dev_info_t * dip,caddr_t path)6259 vhci_ioc_devi_to_path(dev_info_t *dip, caddr_t path)
6260 {
6261 (void) ddi_pathname(dip, path);
6262 }
6263
6264
6265 /*
6266 * vhci_get_phci_path_list:
6267 * get information about devices associated with a
6268 * given PHCI device.
6269 *
6270 * Return Values:
6271 * path information elements
6272 */
6273 int
vhci_get_phci_path_list(dev_info_t * pdip,sv_path_info_t * pibuf,uint_t num_elems)6274 vhci_get_phci_path_list(dev_info_t *pdip, sv_path_info_t *pibuf,
6275 uint_t num_elems)
6276 {
6277 uint_t count, done;
6278 mdi_pathinfo_t *pip;
6279 sv_path_info_t *ret_pip;
6280 int status;
6281 size_t prop_size;
6282 int circular;
6283
6284 /*
6285 * Get the PHCI structure and retrieve the path information
6286 * from the GUID hash table.
6287 */
6288
6289 ret_pip = pibuf;
6290 count = 0;
6291
6292 ndi_devi_enter(pdip, &circular);
6293
6294 done = (count >= num_elems);
6295 pip = mdi_get_next_client_path(pdip, NULL);
6296 while (pip && !done) {
6297 mdi_pi_lock(pip);
6298 (void) ddi_pathname(mdi_pi_get_phci(pip),
6299 ret_pip->device.ret_phci);
6300 (void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip));
6301 (void) mdi_pi_get_state2(pip, &ret_pip->ret_state,
6302 &ret_pip->ret_ext_state);
6303
6304 status = mdi_prop_size(pip, &prop_size);
6305 if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) {
6306 *ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size;
6307 }
6308
6309 #ifdef DEBUG
6310 if (status != MDI_SUCCESS) {
6311 VHCI_DEBUG(2, (CE_WARN, NULL,
6312 "!vhci_get_phci_path_list: "
6313 "phci <%s>, prop size failure 0x%x",
6314 ret_pip->device.ret_phci, status));
6315 }
6316 #endif /* DEBUG */
6317
6318
6319 if (status == MDI_SUCCESS && ret_pip->ret_prop.buf &&
6320 prop_size && ret_pip->ret_prop.buf_size >= prop_size) {
6321 status = mdi_prop_pack(pip,
6322 &ret_pip->ret_prop.buf,
6323 ret_pip->ret_prop.buf_size);
6324
6325 #ifdef DEBUG
6326 if (status != MDI_SUCCESS) {
6327 VHCI_DEBUG(2, (CE_WARN, NULL,
6328 "!vhci_get_phci_path_list: "
6329 "phci <%s>, prop pack failure 0x%x",
6330 ret_pip->device.ret_phci, status));
6331 }
6332 #endif /* DEBUG */
6333 }
6334
6335 mdi_pi_unlock(pip);
6336 pip = mdi_get_next_client_path(pdip, pip);
6337 ret_pip++;
6338 count++;
6339 done = (count >= num_elems);
6340 }
6341
6342 ndi_devi_exit(pdip, circular);
6343
6344 return (MDI_SUCCESS);
6345 }
6346
6347
6348 /*
6349 * vhci_get_client_path_list:
6350 * get information about various paths associated with a
6351 * given client device.
6352 *
6353 * Return Values:
6354 * path information elements
6355 */
6356 int
vhci_get_client_path_list(dev_info_t * cdip,sv_path_info_t * pibuf,uint_t num_elems)6357 vhci_get_client_path_list(dev_info_t *cdip, sv_path_info_t *pibuf,
6358 uint_t num_elems)
6359 {
6360 uint_t count, done;
6361 mdi_pathinfo_t *pip;
6362 sv_path_info_t *ret_pip;
6363 int status;
6364 size_t prop_size;
6365 int circular;
6366
6367 ret_pip = pibuf;
6368 count = 0;
6369
6370 ndi_devi_enter(cdip, &circular);
6371
6372 done = (count >= num_elems);
6373 pip = mdi_get_next_phci_path(cdip, NULL);
6374 while (pip && !done) {
6375 mdi_pi_lock(pip);
6376 (void) ddi_pathname(mdi_pi_get_phci(pip),
6377 ret_pip->device.ret_phci);
6378 (void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip));
6379 (void) mdi_pi_get_state2(pip, &ret_pip->ret_state,
6380 &ret_pip->ret_ext_state);
6381
6382 status = mdi_prop_size(pip, &prop_size);
6383 if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) {
6384 *ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size;
6385 }
6386
6387 #ifdef DEBUG
6388 if (status != MDI_SUCCESS) {
6389 VHCI_DEBUG(2, (CE_WARN, NULL,
6390 "!vhci_get_client_path_list: "
6391 "phci <%s>, prop size failure 0x%x",
6392 ret_pip->device.ret_phci, status));
6393 }
6394 #endif /* DEBUG */
6395
6396
6397 if (status == MDI_SUCCESS && ret_pip->ret_prop.buf &&
6398 prop_size && ret_pip->ret_prop.buf_size >= prop_size) {
6399 status = mdi_prop_pack(pip,
6400 &ret_pip->ret_prop.buf,
6401 ret_pip->ret_prop.buf_size);
6402
6403 #ifdef DEBUG
6404 if (status != MDI_SUCCESS) {
6405 VHCI_DEBUG(2, (CE_WARN, NULL,
6406 "!vhci_get_client_path_list: "
6407 "phci <%s>, prop pack failure 0x%x",
6408 ret_pip->device.ret_phci, status));
6409 }
6410 #endif /* DEBUG */
6411 }
6412
6413 mdi_pi_unlock(pip);
6414 pip = mdi_get_next_phci_path(cdip, pip);
6415 ret_pip++;
6416 count++;
6417 done = (count >= num_elems);
6418 }
6419
6420 ndi_devi_exit(cdip, circular);
6421
6422 return (MDI_SUCCESS);
6423 }
6424
6425
6426 /*
6427 * Routine to get ioctl argument structure from userland.
6428 */
6429 /* ARGSUSED */
6430 static int
vhci_get_iocdata(const void * data,sv_iocdata_t * pioc,int mode,caddr_t s)6431 vhci_get_iocdata(const void *data, sv_iocdata_t *pioc, int mode, caddr_t s)
6432 {
6433 int retval = 0;
6434
6435 #ifdef _MULTI_DATAMODEL
6436 switch (ddi_model_convert_from(mode & FMODELS)) {
6437 case DDI_MODEL_ILP32:
6438 {
6439 sv_iocdata32_t ioc32;
6440
6441 if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) {
6442 retval = EFAULT;
6443 break;
6444 }
6445 pioc->client = (caddr_t)(uintptr_t)ioc32.client;
6446 pioc->phci = (caddr_t)(uintptr_t)ioc32.phci;
6447 pioc->addr = (caddr_t)(uintptr_t)ioc32.addr;
6448 pioc->buf_elem = (uint_t)ioc32.buf_elem;
6449 pioc->ret_buf = (sv_path_info_t *)(uintptr_t)ioc32.ret_buf;
6450 pioc->ret_elem = (uint_t *)(uintptr_t)ioc32.ret_elem;
6451 break;
6452 }
6453
6454 case DDI_MODEL_NONE:
6455 if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) {
6456 retval = EFAULT;
6457 break;
6458 }
6459 break;
6460 }
6461 #else /* _MULTI_DATAMODEL */
6462 if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) {
6463 retval = EFAULT;
6464 }
6465 #endif /* _MULTI_DATAMODEL */
6466
6467 #ifdef DEBUG
6468 if (retval) {
6469 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> "
6470 "iocdata copyin failed", s));
6471 }
6472 #endif
6473
6474 return (retval);
6475 }
6476
6477
6478 /*
6479 * Routine to get the ioctl argument for ioctl causing controller switchover.
6480 */
6481 /* ARGSUSED */
6482 static int
vhci_get_iocswitchdata(const void * data,sv_switch_to_cntlr_iocdata_t * piocsc,int mode,caddr_t s)6483 vhci_get_iocswitchdata(const void *data, sv_switch_to_cntlr_iocdata_t *piocsc,
6484 int mode, caddr_t s)
6485 {
6486 int retval = 0;
6487
6488 #ifdef _MULTI_DATAMODEL
6489 switch (ddi_model_convert_from(mode & FMODELS)) {
6490 case DDI_MODEL_ILP32:
6491 {
6492 sv_switch_to_cntlr_iocdata32_t ioc32;
6493
6494 if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) {
6495 retval = EFAULT;
6496 break;
6497 }
6498 piocsc->client = (caddr_t)(uintptr_t)ioc32.client;
6499 piocsc->class = (caddr_t)(uintptr_t)ioc32.class;
6500 break;
6501 }
6502
6503 case DDI_MODEL_NONE:
6504 if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) {
6505 retval = EFAULT;
6506 }
6507 break;
6508 }
6509 #else /* _MULTI_DATAMODEL */
6510 if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) {
6511 retval = EFAULT;
6512 }
6513 #endif /* _MULTI_DATAMODEL */
6514
6515 #ifdef DEBUG
6516 if (retval) {
6517 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> "
6518 "switch_to_cntlr_iocdata copyin failed", s));
6519 }
6520 #endif
6521
6522 return (retval);
6523 }
6524
6525
6526 /*
6527 * Routine to allocate memory for the path information structures.
6528 * It allocates two chunks of memory - one for keeping userland
6529 * pointers/values for path information and path properties, second for
6530 * keeping allocating kernel memory for path properties. These path
6531 * properties are finally copied to userland.
6532 */
6533 /* ARGSUSED */
6534 static int
vhci_ioc_alloc_pathinfo(sv_path_info_t ** upibuf,sv_path_info_t ** kpibuf,uint_t num_paths,sv_iocdata_t * pioc,int mode,caddr_t s)6535 vhci_ioc_alloc_pathinfo(sv_path_info_t **upibuf, sv_path_info_t **kpibuf,
6536 uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s)
6537 {
6538 sv_path_info_t *pi;
6539 uint_t bufsize;
6540 int retval = 0;
6541 int index;
6542
6543 /* Allocate memory */
6544 *upibuf = (sv_path_info_t *)
6545 kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP);
6546 ASSERT(*upibuf != NULL);
6547 *kpibuf = (sv_path_info_t *)
6548 kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP);
6549 ASSERT(*kpibuf != NULL);
6550
6551 /*
6552 * Get the path info structure from the user space.
6553 * We are interested in the following fields:
6554 * - user size of buffer for per path properties.
6555 * - user address of buffer for path info properties.
6556 * - user pointer for returning actual buffer size
6557 * Keep these fields in the 'upibuf' structures.
6558 * Allocate buffer for per path info properties in kernel
6559 * structure ('kpibuf').
6560 * Size of these buffers will be equal to the size of buffers
6561 * in the user space.
6562 */
6563 #ifdef _MULTI_DATAMODEL
6564 switch (ddi_model_convert_from(mode & FMODELS)) {
6565 case DDI_MODEL_ILP32:
6566 {
6567 sv_path_info32_t *src;
6568 sv_path_info32_t pi32;
6569
6570 src = (sv_path_info32_t *)pioc->ret_buf;
6571 pi = (sv_path_info_t *)*upibuf;
6572 for (index = 0; index < num_paths; index++, src++, pi++) {
6573 if (ddi_copyin(src, &pi32, sizeof (pi32), mode)) {
6574 retval = EFAULT;
6575 break;
6576 }
6577
6578 pi->ret_prop.buf_size =
6579 (uint_t)pi32.ret_prop.buf_size;
6580 pi->ret_prop.ret_buf_size =
6581 (uint_t *)(uintptr_t)pi32.ret_prop.ret_buf_size;
6582 pi->ret_prop.buf =
6583 (caddr_t)(uintptr_t)pi32.ret_prop.buf;
6584 }
6585 break;
6586 }
6587
6588 case DDI_MODEL_NONE:
6589 if (ddi_copyin(pioc->ret_buf, *upibuf,
6590 sizeof (sv_path_info_t) * num_paths, mode)) {
6591 retval = EFAULT;
6592 }
6593 break;
6594 }
6595 #else /* _MULTI_DATAMODEL */
6596 if (ddi_copyin(pioc->ret_buf, *upibuf,
6597 sizeof (sv_path_info_t) * num_paths, mode)) {
6598 retval = EFAULT;
6599 }
6600 #endif /* _MULTI_DATAMODEL */
6601
6602 if (retval != 0) {
6603 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_alloc_path_info: "
6604 "ioctl <%s> normal: path_info copyin failed", s));
6605 kmem_free(*upibuf, sizeof (sv_path_info_t) * num_paths);
6606 kmem_free(*kpibuf, sizeof (sv_path_info_t) * num_paths);
6607 *upibuf = NULL;
6608 *kpibuf = NULL;
6609 return (retval);
6610 }
6611
6612 /*
6613 * Allocate memory for per path properties.
6614 */
6615 for (index = 0, pi = *kpibuf; index < num_paths; index++, pi++) {
6616 bufsize = (*upibuf)[index].ret_prop.buf_size;
6617
6618 if (bufsize && bufsize <= SV_PROP_MAX_BUF_SIZE) {
6619 pi->ret_prop.buf_size = bufsize;
6620 pi->ret_prop.buf = (caddr_t)
6621 kmem_zalloc(bufsize, KM_SLEEP);
6622 ASSERT(pi->ret_prop.buf != NULL);
6623 } else {
6624 pi->ret_prop.buf_size = 0;
6625 pi->ret_prop.buf = NULL;
6626 }
6627
6628 if ((*upibuf)[index].ret_prop.ret_buf_size != NULL) {
6629 pi->ret_prop.ret_buf_size = (uint_t *)kmem_zalloc(
6630 sizeof (*pi->ret_prop.ret_buf_size), KM_SLEEP);
6631 ASSERT(pi->ret_prop.ret_buf_size != NULL);
6632 } else {
6633 pi->ret_prop.ret_buf_size = NULL;
6634 }
6635 }
6636
6637 return (0);
6638 }
6639
6640
6641 /*
6642 * Routine to free memory for the path information structures.
6643 * This is the memory which was allocated earlier.
6644 */
6645 /* ARGSUSED */
6646 static void
vhci_ioc_free_pathinfo(sv_path_info_t * upibuf,sv_path_info_t * kpibuf,uint_t num_paths)6647 vhci_ioc_free_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf,
6648 uint_t num_paths)
6649 {
6650 sv_path_info_t *pi;
6651 int index;
6652
6653 /* Free memory for per path properties */
6654 for (index = 0, pi = kpibuf; index < num_paths; index++, pi++) {
6655 if (pi->ret_prop.ret_buf_size != NULL) {
6656 kmem_free(pi->ret_prop.ret_buf_size,
6657 sizeof (*pi->ret_prop.ret_buf_size));
6658 }
6659
6660 if (pi->ret_prop.buf != NULL) {
6661 kmem_free(pi->ret_prop.buf, pi->ret_prop.buf_size);
6662 }
6663 }
6664
6665 /* Free memory for path info structures */
6666 kmem_free(upibuf, sizeof (sv_path_info_t) * num_paths);
6667 kmem_free(kpibuf, sizeof (sv_path_info_t) * num_paths);
6668 }
6669
6670
6671 /*
6672 * Routine to copy path information and path properties to userland.
6673 */
6674 /* ARGSUSED */
6675 static int
vhci_ioc_send_pathinfo(sv_path_info_t * upibuf,sv_path_info_t * kpibuf,uint_t num_paths,sv_iocdata_t * pioc,int mode,caddr_t s)6676 vhci_ioc_send_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf,
6677 uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s)
6678 {
6679 int retval = 0, index;
6680 sv_path_info_t *upi_ptr;
6681 sv_path_info32_t *upi32_ptr;
6682
6683 #ifdef _MULTI_DATAMODEL
6684 switch (ddi_model_convert_from(mode & FMODELS)) {
6685 case DDI_MODEL_ILP32:
6686 goto copy_32bit;
6687
6688 case DDI_MODEL_NONE:
6689 goto copy_normal;
6690 }
6691 #else /* _MULTI_DATAMODEL */
6692
6693 goto copy_normal;
6694
6695 #endif /* _MULTI_DATAMODEL */
6696
6697 copy_normal:
6698
6699 /*
6700 * Copy path information and path properties to user land.
6701 * Pointer fields inside the path property structure were
6702 * saved in the 'upibuf' structure earlier.
6703 */
6704 upi_ptr = pioc->ret_buf;
6705 for (index = 0; index < num_paths; index++) {
6706 if (ddi_copyout(kpibuf[index].device.ret_ct,
6707 upi_ptr[index].device.ret_ct, MAXPATHLEN, mode)) {
6708 retval = EFAULT;
6709 break;
6710 }
6711
6712 if (ddi_copyout(kpibuf[index].ret_addr,
6713 upi_ptr[index].ret_addr, MAXNAMELEN, mode)) {
6714 retval = EFAULT;
6715 break;
6716 }
6717
6718 if (ddi_copyout(&kpibuf[index].ret_state,
6719 &upi_ptr[index].ret_state, sizeof (kpibuf[index].ret_state),
6720 mode)) {
6721 retval = EFAULT;
6722 break;
6723 }
6724
6725 if (ddi_copyout(&kpibuf[index].ret_ext_state,
6726 &upi_ptr[index].ret_ext_state,
6727 sizeof (kpibuf[index].ret_ext_state), mode)) {
6728 retval = EFAULT;
6729 break;
6730 }
6731
6732 if ((kpibuf[index].ret_prop.ret_buf_size != NULL) &&
6733 ddi_copyout(kpibuf[index].ret_prop.ret_buf_size,
6734 upibuf[index].ret_prop.ret_buf_size,
6735 sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) {
6736 retval = EFAULT;
6737 break;
6738 }
6739
6740 if ((kpibuf[index].ret_prop.buf != NULL) &&
6741 ddi_copyout(kpibuf[index].ret_prop.buf,
6742 upibuf[index].ret_prop.buf,
6743 upibuf[index].ret_prop.buf_size, mode)) {
6744 retval = EFAULT;
6745 break;
6746 }
6747 }
6748
6749 #ifdef DEBUG
6750 if (retval) {
6751 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> "
6752 "normal: path_info copyout failed", s));
6753 }
6754 #endif
6755
6756 return (retval);
6757
6758 copy_32bit:
6759 /*
6760 * Copy path information and path properties to user land.
6761 * Pointer fields inside the path property structure were
6762 * saved in the 'upibuf' structure earlier.
6763 */
6764 upi32_ptr = (sv_path_info32_t *)pioc->ret_buf;
6765 for (index = 0; index < num_paths; index++) {
6766 if (ddi_copyout(kpibuf[index].device.ret_ct,
6767 upi32_ptr[index].device.ret_ct, MAXPATHLEN, mode)) {
6768 retval = EFAULT;
6769 break;
6770 }
6771
6772 if (ddi_copyout(kpibuf[index].ret_addr,
6773 upi32_ptr[index].ret_addr, MAXNAMELEN, mode)) {
6774 retval = EFAULT;
6775 break;
6776 }
6777
6778 if (ddi_copyout(&kpibuf[index].ret_state,
6779 &upi32_ptr[index].ret_state,
6780 sizeof (kpibuf[index].ret_state), mode)) {
6781 retval = EFAULT;
6782 break;
6783 }
6784
6785 if (ddi_copyout(&kpibuf[index].ret_ext_state,
6786 &upi32_ptr[index].ret_ext_state,
6787 sizeof (kpibuf[index].ret_ext_state), mode)) {
6788 retval = EFAULT;
6789 break;
6790 }
6791 if ((kpibuf[index].ret_prop.ret_buf_size != NULL) &&
6792 ddi_copyout(kpibuf[index].ret_prop.ret_buf_size,
6793 upibuf[index].ret_prop.ret_buf_size,
6794 sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) {
6795 retval = EFAULT;
6796 break;
6797 }
6798
6799 if ((kpibuf[index].ret_prop.buf != NULL) &&
6800 ddi_copyout(kpibuf[index].ret_prop.buf,
6801 upibuf[index].ret_prop.buf,
6802 upibuf[index].ret_prop.buf_size, mode)) {
6803 retval = EFAULT;
6804 break;
6805 }
6806 }
6807
6808 #ifdef DEBUG
6809 if (retval) {
6810 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> "
6811 "normal: path_info copyout failed", s));
6812 }
6813 #endif
6814
6815 return (retval);
6816 }
6817
6818
6819 /*
6820 * vhci_failover()
6821 * This routine expects VHCI_HOLD_LUN before being invoked. It can be invoked
6822 * as MDI_FAILOVER_ASYNC or MDI_FAILOVER_SYNC. For Asynchronous failovers
6823 * this routine shall VHCI_RELEASE_LUN on exiting. For synchronous failovers
6824 * it is the callers responsibility to release lun.
6825 */
6826
6827 /* ARGSUSED */
6828 static int
vhci_failover(dev_info_t * vdip,dev_info_t * cdip,int flags)6829 vhci_failover(dev_info_t *vdip, dev_info_t *cdip, int flags)
6830 {
6831 char *guid;
6832 scsi_vhci_lun_t *vlun = NULL;
6833 struct scsi_vhci *vhci;
6834 mdi_pathinfo_t *pip, *npip;
6835 char *s_pclass, *pclass1, *pclass2, *pclass;
6836 char active_pclass_copy[255], *active_pclass_ptr;
6837 char *ptr1, *ptr2;
6838 mdi_pathinfo_state_t pi_state;
6839 uint32_t pi_ext_state;
6840 scsi_vhci_priv_t *svp;
6841 struct scsi_device *sd;
6842 struct scsi_failover_ops *sfo;
6843 int sps; /* mdi_select_path() status */
6844 int activation_done = 0;
6845 int rval, retval = MDI_FAILURE;
6846 int reserve_pending, check_condition, UA_condition;
6847 struct scsi_pkt *pkt;
6848 struct buf *bp;
6849
6850 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
6851 sd = ddi_get_driver_private(cdip);
6852 vlun = ADDR2VLUN(&sd->sd_address);
6853 ASSERT(vlun != 0);
6854 ASSERT(VHCI_LUN_IS_HELD(vlun));
6855 guid = vlun->svl_lun_wwn;
6856 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(1): guid %s\n", guid));
6857 vhci_log(CE_NOTE, vdip, "!Initiating failover for device %s "
6858 "(GUID %s)", ddi_node_name(cdip), guid);
6859
6860 /*
6861 * Lets maintain a local copy of the vlun->svl_active_pclass
6862 * for the rest of the processing. Accessing the field
6863 * directly in the loop below causes loop logic to break
6864 * especially when the field gets updated by other threads
6865 * update path status etc and causes 'paths are not currently
6866 * available' condition to be declared prematurely.
6867 */
6868 mutex_enter(&vlun->svl_mutex);
6869 if (vlun->svl_active_pclass != NULL) {
6870 (void) strlcpy(active_pclass_copy, vlun->svl_active_pclass,
6871 sizeof (active_pclass_copy));
6872 active_pclass_ptr = &active_pclass_copy[0];
6873 mutex_exit(&vlun->svl_mutex);
6874 if (vhci_quiesce_paths(vdip, cdip, vlun, guid,
6875 active_pclass_ptr) != 0) {
6876 retval = MDI_FAILURE;
6877 }
6878 } else {
6879 /*
6880 * can happen only when the available path to device
6881 * discovered is a STANDBY path.
6882 */
6883 mutex_exit(&vlun->svl_mutex);
6884 active_pclass_copy[0] = '\0';
6885 active_pclass_ptr = NULL;
6886 }
6887
6888 sfo = vlun->svl_fops;
6889 ASSERT(sfo != NULL);
6890 pclass1 = s_pclass = active_pclass_ptr;
6891 VHCI_DEBUG(1, (CE_NOTE, NULL, "!(%s)failing over from %s\n", guid,
6892 (s_pclass == NULL ? "<none>" : s_pclass)));
6893
6894 next_pathclass:
6895
6896 rval = sfo->sfo_pathclass_next(pclass1, &pclass2,
6897 vlun->svl_fops_ctpriv);
6898 if (rval == ENOENT) {
6899 if (s_pclass == NULL) {
6900 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(4)(%s): "
6901 "failed, no more pathclasses\n", guid));
6902 goto done;
6903 } else {
6904 (void) sfo->sfo_pathclass_next(NULL, &pclass2,
6905 vlun->svl_fops_ctpriv);
6906 }
6907 } else if (rval == EINVAL) {
6908 vhci_log(CE_NOTE, vdip, "!Failover operation failed for "
6909 "device %s (GUID %s): Invalid path-class %s",
6910 ddi_node_name(cdip), guid,
6911 ((pclass1 == NULL) ? "<none>" : pclass1));
6912 goto done;
6913 }
6914 if ((s_pclass != NULL) && (strcmp(pclass2, s_pclass) == 0)) {
6915 /*
6916 * paths are not currently available
6917 */
6918 vhci_log(CE_NOTE, vdip, "!Failover path currently unavailable"
6919 " for device %s (GUID %s)",
6920 ddi_node_name(cdip), guid);
6921 goto done;
6922 }
6923 pip = npip = NULL;
6924 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(5.2)(%s): considering "
6925 "%s as failover destination\n", guid, pclass2));
6926 sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH, NULL, &npip);
6927 if ((npip == NULL) || (sps != MDI_SUCCESS)) {
6928 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(%s): no "
6929 "STANDBY paths found (status:%x)!\n", guid, sps));
6930 pclass1 = pclass2;
6931 goto next_pathclass;
6932 }
6933 do {
6934 pclass = NULL;
6935 if ((mdi_prop_lookup_string(npip, "path-class",
6936 &pclass) != MDI_SUCCESS) || (strcmp(pclass2,
6937 pclass) != 0)) {
6938 VHCI_DEBUG(1, (CE_NOTE, NULL,
6939 "!vhci_failover(5.5)(%s): skipping path "
6940 "%p(%s)...\n", guid, (void *)npip, pclass));
6941 pip = npip;
6942 sps = mdi_select_path(cdip, NULL,
6943 MDI_SELECT_STANDBY_PATH, pip, &npip);
6944 mdi_rele_path(pip);
6945 (void) mdi_prop_free(pclass);
6946 continue;
6947 }
6948 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip);
6949
6950 /*
6951 * Issue READ at non-zer block on this STANDBY path.
6952 * Purple returns
6953 * 1. RESERVATION_CONFLICT if reservation is pending
6954 * 2. POR check condition if it reset happened.
6955 * 2. failover Check Conditions if one is already in progress.
6956 */
6957 reserve_pending = 0;
6958 check_condition = 0;
6959 UA_condition = 0;
6960
6961 bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address,
6962 (struct buf *)NULL, DEV_BSIZE, B_READ, NULL, NULL);
6963 if (!bp) {
6964 VHCI_DEBUG(1, (CE_NOTE, NULL,
6965 "vhci_failover !No resources (buf)\n"));
6966 mdi_rele_path(npip);
6967 goto done;
6968 }
6969 pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
6970 CDB_GROUP1, sizeof (struct scsi_arq_status), 0,
6971 PKT_CONSISTENT, NULL, NULL);
6972 if (pkt) {
6973 (void) scsi_setup_cdb((union scsi_cdb *)(uintptr_t)
6974 pkt->pkt_cdbp, SCMD_READ, 1, 1, 0);
6975 pkt->pkt_flags = FLAG_NOINTR;
6976 check_path_again:
6977 pkt->pkt_path_instance = mdi_pi_get_path_instance(npip);
6978 pkt->pkt_time = 3*30;
6979
6980 if (scsi_transport(pkt) == TRAN_ACCEPT) {
6981 switch (pkt->pkt_reason) {
6982 case CMD_CMPLT:
6983 switch (SCBP_C(pkt)) {
6984 case STATUS_GOOD:
6985 /* Already failed over */
6986 activation_done = 1;
6987 break;
6988 case STATUS_RESERVATION_CONFLICT:
6989 reserve_pending = 1;
6990 break;
6991 case STATUS_CHECK:
6992 check_condition = 1;
6993 break;
6994 }
6995 }
6996 }
6997 if (check_condition &&
6998 (pkt->pkt_state & STATE_ARQ_DONE)) {
6999 uint8_t *sns, skey, asc, ascq;
7000 sns = (uint8_t *)
7001 &(((struct scsi_arq_status *)(uintptr_t)
7002 (pkt->pkt_scbp))->sts_sensedata);
7003 skey = scsi_sense_key(sns);
7004 asc = scsi_sense_asc(sns);
7005 ascq = scsi_sense_ascq(sns);
7006 if (skey == KEY_UNIT_ATTENTION &&
7007 asc == 0x29) {
7008 /* Already failed over */
7009 VHCI_DEBUG(1, (CE_NOTE, NULL,
7010 "!vhci_failover(7)(%s): "
7011 "path 0x%p POR UA condition\n",
7012 guid, (void *)npip));
7013 if (UA_condition == 0) {
7014 UA_condition = 1;
7015 goto check_path_again;
7016 }
7017 } else {
7018 activation_done = 0;
7019 VHCI_DEBUG(1, (CE_NOTE, NULL,
7020 "!vhci_failover(%s): path 0x%p "
7021 "unhandled chkcond %x %x %x\n",
7022 guid, (void *)npip, skey,
7023 asc, ascq));
7024 }
7025 }
7026 scsi_destroy_pkt(pkt);
7027 }
7028 scsi_free_consistent_buf(bp);
7029
7030 if (activation_done) {
7031 mdi_rele_path(npip);
7032 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): "
7033 "path 0x%p already failedover\n", guid,
7034 (void *)npip));
7035 break;
7036 }
7037 if (reserve_pending && (vlun->svl_xlf_capable == 0)) {
7038 (void) vhci_recovery_reset(vlun,
7039 &svp->svp_psd->sd_address,
7040 FALSE, VHCI_DEPTH_ALL);
7041 }
7042 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(6)(%s): "
7043 "activating path 0x%p(psd:%p)\n", guid, (void *)npip,
7044 (void *)svp->svp_psd));
7045 if (sfo->sfo_path_activate(svp->svp_psd, pclass2,
7046 vlun->svl_fops_ctpriv) == 0) {
7047 activation_done = 1;
7048 mdi_rele_path(npip);
7049 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): "
7050 "path 0x%p successfully activated\n", guid,
7051 (void *)npip));
7052 break;
7053 }
7054 pip = npip;
7055 sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH,
7056 pip, &npip);
7057 mdi_rele_path(pip);
7058 } while ((npip != NULL) && (sps == MDI_SUCCESS));
7059 if (activation_done == 0) {
7060 pclass1 = pclass2;
7061 goto next_pathclass;
7062 }
7063
7064 /*
7065 * if we are here, we have succeeded in activating path npip of
7066 * pathclass pclass2; let us validate all paths of pclass2 by
7067 * "ping"-ing each one and mark the good ones ONLINE
7068 * Also, set the state of the paths belonging to the previously
7069 * active pathclass to STANDBY
7070 */
7071 pip = npip = NULL;
7072 sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH |
7073 MDI_SELECT_STANDBY_PATH | MDI_SELECT_USER_DISABLE_PATH),
7074 NULL, &npip);
7075 if (npip == NULL || sps != MDI_SUCCESS) {
7076 VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover operation failed for "
7077 "device %s (GUID %s): paths may be busy\n",
7078 ddi_node_name(cdip), guid));
7079 goto done;
7080 }
7081 do {
7082 (void) mdi_pi_get_state2(npip, &pi_state, &pi_ext_state);
7083 if (mdi_prop_lookup_string(npip, "path-class", &pclass)
7084 != MDI_SUCCESS) {
7085 pip = npip;
7086 sps = mdi_select_path(cdip, NULL,
7087 (MDI_SELECT_ONLINE_PATH |
7088 MDI_SELECT_STANDBY_PATH |
7089 MDI_SELECT_USER_DISABLE_PATH),
7090 pip, &npip);
7091 mdi_rele_path(pip);
7092 continue;
7093 }
7094 if (strcmp(pclass, pclass2) == 0) {
7095 if (pi_state == MDI_PATHINFO_STATE_STANDBY) {
7096 svp = (scsi_vhci_priv_t *)
7097 mdi_pi_get_vhci_private(npip);
7098 VHCI_DEBUG(1, (CE_NOTE, NULL,
7099 "!vhci_failover(8)(%s): "
7100 "pinging path 0x%p\n",
7101 guid, (void *)npip));
7102 if (sfo->sfo_path_ping(svp->svp_psd,
7103 vlun->svl_fops_ctpriv) == 1) {
7104 mdi_pi_set_state(npip,
7105 MDI_PATHINFO_STATE_ONLINE);
7106 VHCI_DEBUG(1, (CE_NOTE, NULL,
7107 "!vhci_failover(9)(%s): "
7108 "path 0x%p ping successful, "
7109 "marked online\n", guid,
7110 (void *)npip));
7111 MDI_PI_ERRSTAT(npip, MDI_PI_FAILTO);
7112 }
7113 }
7114 } else if ((s_pclass != NULL) && (strcmp(pclass, s_pclass)
7115 == 0)) {
7116 if (pi_state == MDI_PATHINFO_STATE_ONLINE) {
7117 mdi_pi_set_state(npip,
7118 MDI_PATHINFO_STATE_STANDBY);
7119 VHCI_DEBUG(1, (CE_NOTE, NULL,
7120 "!vhci_failover(10)(%s): path 0x%p marked "
7121 "STANDBY\n", guid, (void *)npip));
7122 MDI_PI_ERRSTAT(npip, MDI_PI_FAILFROM);
7123 }
7124 }
7125 (void) mdi_prop_free(pclass);
7126 pip = npip;
7127 sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH |
7128 MDI_SELECT_STANDBY_PATH|MDI_SELECT_USER_DISABLE_PATH),
7129 pip, &npip);
7130 mdi_rele_path(pip);
7131 } while ((npip != NULL) && (sps == MDI_SUCCESS));
7132
7133 /*
7134 * Update the AccessState of related MP-API TPGs
7135 */
7136 (void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun);
7137
7138 vhci_log(CE_NOTE, vdip, "!Failover operation completed successfully "
7139 "for device %s (GUID %s): failed over from %s to %s",
7140 ddi_node_name(cdip), guid, ((s_pclass == NULL) ? "<none>" :
7141 s_pclass), pclass2);
7142 ptr1 = kmem_alloc(strlen(pclass2)+1, KM_SLEEP);
7143 (void) strlcpy(ptr1, pclass2, (strlen(pclass2)+1));
7144 mutex_enter(&vlun->svl_mutex);
7145 ptr2 = vlun->svl_active_pclass;
7146 vlun->svl_active_pclass = ptr1;
7147 mutex_exit(&vlun->svl_mutex);
7148 if (ptr2) {
7149 kmem_free(ptr2, strlen(ptr2)+1);
7150 }
7151 mutex_enter(&vhci->vhci_mutex);
7152 scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
7153 &vhci->vhci_reset_notify_listf);
7154 /* All reservations are cleared upon these resets. */
7155 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
7156 mutex_exit(&vhci->vhci_mutex);
7157 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(11): DONE! Active "
7158 "pathclass for %s is now %s\n", guid, pclass2));
7159 retval = MDI_SUCCESS;
7160
7161 done:
7162 vlun->svl_failover_status = retval;
7163 if (flags == MDI_FAILOVER_ASYNC) {
7164 VHCI_RELEASE_LUN(vlun);
7165 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! "
7166 "releasing lun, as failover was ASYNC\n"));
7167 } else {
7168 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! "
7169 "NOT releasing lun, as failover was SYNC\n"));
7170 }
7171 return (retval);
7172 }
7173
7174 /*
7175 * vhci_client_attached is called after the successful attach of a
7176 * client devinfo node.
7177 */
7178 static void
vhci_client_attached(dev_info_t * cdip)7179 vhci_client_attached(dev_info_t *cdip)
7180 {
7181 mdi_pathinfo_t *pip;
7182 int circular;
7183
7184 /*
7185 * At this point the client has attached and it's instance number is
7186 * valid, so we can set up kstats. We need to do this here because it
7187 * is possible for paths to go online prior to client attach, in which
7188 * case the call to vhci_kstat_create_pathinfo in vhci_pathinfo_online
7189 * was a noop.
7190 */
7191 ndi_devi_enter(cdip, &circular);
7192 for (pip = mdi_get_next_phci_path(cdip, NULL); pip;
7193 pip = mdi_get_next_phci_path(cdip, pip))
7194 vhci_kstat_create_pathinfo(pip);
7195 ndi_devi_exit(cdip, circular);
7196 }
7197
7198 /*
7199 * quiesce all of the online paths
7200 */
7201 static int
vhci_quiesce_paths(dev_info_t * vdip,dev_info_t * cdip,scsi_vhci_lun_t * vlun,char * guid,char * active_pclass_ptr)7202 vhci_quiesce_paths(dev_info_t *vdip, dev_info_t *cdip, scsi_vhci_lun_t *vlun,
7203 char *guid, char *active_pclass_ptr)
7204 {
7205 scsi_vhci_priv_t *svp;
7206 char *s_pclass = NULL;
7207 mdi_pathinfo_t *npip, *pip;
7208 int sps;
7209
7210 /* quiesce currently active paths */
7211 s_pclass = NULL;
7212 pip = npip = NULL;
7213 sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &npip);
7214 if ((npip == NULL) || (sps != MDI_SUCCESS)) {
7215 return (1);
7216 }
7217 do {
7218 if (mdi_prop_lookup_string(npip, "path-class",
7219 &s_pclass) != MDI_SUCCESS) {
7220 mdi_rele_path(npip);
7221 vhci_log(CE_NOTE, vdip, "!Failover operation failed "
7222 "for device %s (GUID %s) due to an internal "
7223 "error", ddi_node_name(cdip), guid);
7224 return (1);
7225 }
7226 if (strcmp(s_pclass, active_pclass_ptr) == 0) {
7227 /*
7228 * quiesce path. Free s_pclass since
7229 * we don't need it anymore
7230 */
7231 VHCI_DEBUG(1, (CE_NOTE, NULL,
7232 "!vhci_failover(2)(%s): failing over "
7233 "from %s; quiescing path %p\n",
7234 guid, s_pclass, (void *)npip));
7235 (void) mdi_prop_free(s_pclass);
7236 svp = (scsi_vhci_priv_t *)
7237 mdi_pi_get_vhci_private(npip);
7238 if (svp == NULL) {
7239 VHCI_DEBUG(1, (CE_NOTE, NULL,
7240 "!vhci_failover(2.5)(%s): no "
7241 "client priv! %p offlined?\n",
7242 guid, (void *)npip));
7243 pip = npip;
7244 sps = mdi_select_path(cdip, NULL,
7245 MDI_SELECT_ONLINE_PATH, pip, &npip);
7246 mdi_rele_path(pip);
7247 continue;
7248 }
7249 if (scsi_abort(&svp->svp_psd->sd_address, NULL)
7250 == 0) {
7251 (void) vhci_recovery_reset(vlun,
7252 &svp->svp_psd->sd_address, FALSE,
7253 VHCI_DEPTH_TARGET);
7254 }
7255 mutex_enter(&svp->svp_mutex);
7256 if (svp->svp_cmds == 0) {
7257 VHCI_DEBUG(1, (CE_NOTE, NULL,
7258 "!vhci_failover(3)(%s):"
7259 "quiesced path %p\n", guid, (void *)npip));
7260 } else {
7261 while (svp->svp_cmds != 0) {
7262 cv_wait(&svp->svp_cv, &svp->svp_mutex);
7263 VHCI_DEBUG(1, (CE_NOTE, NULL,
7264 "!vhci_failover(3.cv)(%s):"
7265 "quiesced path %p\n", guid,
7266 (void *)npip));
7267 }
7268 }
7269 mutex_exit(&svp->svp_mutex);
7270 } else {
7271 /*
7272 * make sure we freeup the memory
7273 */
7274 (void) mdi_prop_free(s_pclass);
7275 }
7276 pip = npip;
7277 sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH,
7278 pip, &npip);
7279 mdi_rele_path(pip);
7280 } while ((npip != NULL) && (sps == MDI_SUCCESS));
7281 return (0);
7282 }
7283
7284 static struct scsi_vhci_lun *
vhci_lun_lookup(dev_info_t * tgt_dip)7285 vhci_lun_lookup(dev_info_t *tgt_dip)
7286 {
7287 return ((struct scsi_vhci_lun *)
7288 mdi_client_get_vhci_private(tgt_dip));
7289 }
7290
7291 static struct scsi_vhci_lun *
vhci_lun_lookup_alloc(dev_info_t * tgt_dip,char * guid,int * didalloc)7292 vhci_lun_lookup_alloc(dev_info_t *tgt_dip, char *guid, int *didalloc)
7293 {
7294 struct scsi_vhci_lun *svl;
7295
7296 if (svl = vhci_lun_lookup(tgt_dip)) {
7297 return (svl);
7298 }
7299
7300 svl = kmem_zalloc(sizeof (*svl), KM_SLEEP);
7301 svl->svl_lun_wwn = kmem_zalloc(strlen(guid)+1, KM_SLEEP);
7302 (void) strcpy(svl->svl_lun_wwn, guid);
7303 mutex_init(&svl->svl_mutex, NULL, MUTEX_DRIVER, NULL);
7304 cv_init(&svl->svl_cv, NULL, CV_DRIVER, NULL);
7305 sema_init(&svl->svl_pgr_sema, 1, NULL, SEMA_DRIVER, NULL);
7306 svl->svl_waiting_for_activepath = 1;
7307 svl->svl_sector_size = 1;
7308 mdi_client_set_vhci_private(tgt_dip, svl);
7309 *didalloc = 1;
7310 VHCI_DEBUG(1, (CE_NOTE, NULL,
7311 "vhci_lun_lookup_alloc: guid %s vlun 0x%p\n",
7312 guid, (void *)svl));
7313 return (svl);
7314 }
7315
7316 static void
vhci_lun_free(struct scsi_vhci_lun * dvlp,struct scsi_device * sd)7317 vhci_lun_free(struct scsi_vhci_lun *dvlp, struct scsi_device *sd)
7318 {
7319 char *guid;
7320
7321 guid = dvlp->svl_lun_wwn;
7322 ASSERT(guid != NULL);
7323 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_lun_free: %s\n", guid));
7324
7325 mutex_enter(&dvlp->svl_mutex);
7326 if (dvlp->svl_active_pclass != NULL) {
7327 kmem_free(dvlp->svl_active_pclass,
7328 strlen(dvlp->svl_active_pclass)+1);
7329 }
7330 dvlp->svl_active_pclass = NULL;
7331 mutex_exit(&dvlp->svl_mutex);
7332
7333 if (dvlp->svl_lun_wwn != NULL) {
7334 kmem_free(dvlp->svl_lun_wwn, strlen(dvlp->svl_lun_wwn)+1);
7335 }
7336 dvlp->svl_lun_wwn = NULL;
7337
7338 if (dvlp->svl_fops_name) {
7339 kmem_free(dvlp->svl_fops_name, strlen(dvlp->svl_fops_name)+1);
7340 }
7341 dvlp->svl_fops_name = NULL;
7342
7343 if (dvlp->svl_fops_ctpriv != NULL &&
7344 dvlp->svl_fops != NULL) {
7345 dvlp->svl_fops->sfo_device_unprobe(sd, dvlp->svl_fops_ctpriv);
7346 }
7347
7348 if (dvlp->svl_flags & VLUN_TASK_D_ALIVE_FLG)
7349 taskq_destroy(dvlp->svl_taskq);
7350
7351 mutex_destroy(&dvlp->svl_mutex);
7352 cv_destroy(&dvlp->svl_cv);
7353 sema_destroy(&dvlp->svl_pgr_sema);
7354 kmem_free(dvlp, sizeof (*dvlp));
7355 /*
7356 * vhci_lun_free may be called before the tgt_dip
7357 * initialization so check if the sd is NULL.
7358 */
7359 if (sd != NULL)
7360 scsi_device_hba_private_set(sd, NULL);
7361 }
7362
7363 int
vhci_do_scsi_cmd(struct scsi_pkt * pkt)7364 vhci_do_scsi_cmd(struct scsi_pkt *pkt)
7365 {
7366 int err = 0;
7367 int retry_cnt = 0;
7368 uint8_t *sns, skey;
7369
7370 #ifdef DEBUG
7371 if (vhci_debug > 5) {
7372 vhci_print_cdb(pkt->pkt_address.a_hba_tran->tran_hba_dip,
7373 CE_WARN, "Vhci command", pkt->pkt_cdbp);
7374 }
7375 #endif
7376
7377 retry:
7378 err = scsi_poll(pkt);
7379 if (err) {
7380 if (pkt->pkt_cdbp[0] == SCMD_RELEASE) {
7381 if (SCBP_C(pkt) == STATUS_RESERVATION_CONFLICT) {
7382 VHCI_DEBUG(1, (CE_NOTE, NULL,
7383 "!v_s_do_s_c: RELEASE conflict\n"));
7384 return (0);
7385 }
7386 }
7387 if (retry_cnt++ < 6) {
7388 VHCI_DEBUG(1, (CE_WARN, NULL,
7389 "!v_s_do_s_c:retry packet 0x%p "
7390 "status 0x%x reason %s",
7391 (void *)pkt, SCBP_C(pkt),
7392 scsi_rname(pkt->pkt_reason)));
7393 if ((pkt->pkt_reason == CMD_CMPLT) &&
7394 (SCBP_C(pkt) == STATUS_CHECK) &&
7395 (pkt->pkt_state & STATE_ARQ_DONE)) {
7396 sns = (uint8_t *)
7397 &(((struct scsi_arq_status *)(uintptr_t)
7398 (pkt->pkt_scbp))->sts_sensedata);
7399 skey = scsi_sense_key(sns);
7400 VHCI_DEBUG(1, (CE_WARN, NULL,
7401 "!v_s_do_s_c:retry "
7402 "packet 0x%p sense data %s", (void *)pkt,
7403 scsi_sname(skey)));
7404 }
7405 goto retry;
7406 }
7407 VHCI_DEBUG(1, (CE_WARN, NULL,
7408 "!v_s_do_s_c: failed transport 0x%p 0x%x",
7409 (void *)pkt, SCBP_C(pkt)));
7410 return (0);
7411 }
7412
7413 switch (pkt->pkt_reason) {
7414 case CMD_TIMEOUT:
7415 VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt timed "
7416 "out (pkt 0x%p)", (void *)pkt));
7417 return (0);
7418 case CMD_CMPLT:
7419 switch (SCBP_C(pkt)) {
7420 case STATUS_GOOD:
7421 break;
7422 case STATUS_CHECK:
7423 if (pkt->pkt_state & STATE_ARQ_DONE) {
7424 sns = (uint8_t *)&(((
7425 struct scsi_arq_status *)
7426 (uintptr_t)
7427 (pkt->pkt_scbp))->
7428 sts_sensedata);
7429 skey = scsi_sense_key(sns);
7430 if ((skey ==
7431 KEY_UNIT_ATTENTION) ||
7432 (skey ==
7433 KEY_NOT_READY)) {
7434 /*
7435 * clear unit attn.
7436 */
7437
7438 VHCI_DEBUG(1,
7439 (CE_WARN, NULL,
7440 "!v_s_do_s_c: "
7441 "retry "
7442 "packet 0x%p sense "
7443 "data %s",
7444 (void *)pkt,
7445 scsi_sname
7446 (skey)));
7447 goto retry;
7448 }
7449 VHCI_DEBUG(4, (CE_WARN, NULL,
7450 "!ARQ while "
7451 "transporting "
7452 "(pkt 0x%p)",
7453 (void *)pkt));
7454 return (0);
7455 }
7456 return (0);
7457 default:
7458 VHCI_DEBUG(1, (CE_WARN, NULL,
7459 "!Bad status returned "
7460 "(pkt 0x%p, status %x)",
7461 (void *)pkt, SCBP_C(pkt)));
7462 return (0);
7463 }
7464 break;
7465 case CMD_INCOMPLETE:
7466 case CMD_RESET:
7467 case CMD_ABORTED:
7468 case CMD_TRAN_ERR:
7469 if (retry_cnt++ < 1) {
7470 VHCI_DEBUG(1, (CE_WARN, NULL,
7471 "!v_s_do_s_c: retry packet 0x%p %s",
7472 (void *)pkt, scsi_rname(pkt->pkt_reason)));
7473 goto retry;
7474 }
7475 /* FALLTHROUGH */
7476 default:
7477 VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt did not "
7478 "complete successfully (pkt 0x%p,"
7479 "reason %x)", (void *)pkt, pkt->pkt_reason));
7480 return (0);
7481 }
7482 return (1);
7483 }
7484
7485 static int
vhci_quiesce_lun(struct scsi_vhci_lun * vlun)7486 vhci_quiesce_lun(struct scsi_vhci_lun *vlun)
7487 {
7488 mdi_pathinfo_t *pip, *spip;
7489 dev_info_t *cdip;
7490 struct scsi_vhci_priv *svp;
7491 mdi_pathinfo_state_t pstate;
7492 uint32_t p_ext_state;
7493 int circular;
7494
7495 cdip = vlun->svl_dip;
7496 pip = spip = NULL;
7497 ndi_devi_enter(cdip, &circular);
7498 pip = mdi_get_next_phci_path(cdip, NULL);
7499 while (pip != NULL) {
7500 (void) mdi_pi_get_state2(pip, &pstate, &p_ext_state);
7501 if (pstate != MDI_PATHINFO_STATE_ONLINE) {
7502 spip = pip;
7503 pip = mdi_get_next_phci_path(cdip, spip);
7504 continue;
7505 }
7506 mdi_hold_path(pip);
7507 ndi_devi_exit(cdip, circular);
7508 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
7509 mutex_enter(&svp->svp_mutex);
7510 while (svp->svp_cmds != 0) {
7511 if (cv_reltimedwait(&svp->svp_cv, &svp->svp_mutex,
7512 drv_usectohz(vhci_path_quiesce_timeout * 1000000),
7513 TR_CLOCK_TICK) == -1) {
7514 mutex_exit(&svp->svp_mutex);
7515 mdi_rele_path(pip);
7516 VHCI_DEBUG(1, (CE_WARN, NULL,
7517 "Quiesce of lun is not successful "
7518 "vlun: 0x%p.", (void *)vlun));
7519 return (0);
7520 }
7521 }
7522 mutex_exit(&svp->svp_mutex);
7523 ndi_devi_enter(cdip, &circular);
7524 spip = pip;
7525 pip = mdi_get_next_phci_path(cdip, spip);
7526 mdi_rele_path(spip);
7527 }
7528 ndi_devi_exit(cdip, circular);
7529 return (1);
7530 }
7531
7532 static int
vhci_pgr_validate_and_register(scsi_vhci_priv_t * svp)7533 vhci_pgr_validate_and_register(scsi_vhci_priv_t *svp)
7534 {
7535 scsi_vhci_lun_t *vlun;
7536 vhci_prout_t *prout;
7537 int rval, success;
7538 mdi_pathinfo_t *pip, *npip;
7539 scsi_vhci_priv_t *osvp;
7540 dev_info_t *cdip;
7541 uchar_t cdb_1;
7542 uchar_t temp_res_key[MHIOC_RESV_KEY_SIZE];
7543
7544
7545 /*
7546 * see if there are any other paths available; if none,
7547 * then there is nothing to do.
7548 */
7549 cdip = svp->svp_svl->svl_dip;
7550 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH |
7551 MDI_SELECT_STANDBY_PATH, NULL, &pip);
7552 if ((rval != MDI_SUCCESS) || (pip == NULL)) {
7553 VHCI_DEBUG(4, (CE_NOTE, NULL,
7554 "%s%d: vhci_pgr_validate_and_register: first path\n",
7555 ddi_driver_name(cdip), ddi_get_instance(cdip)));
7556 return (1);
7557 }
7558
7559 vlun = svp->svp_svl;
7560 prout = &vlun->svl_prout;
7561 ASSERT(vlun->svl_pgr_active != 0);
7562
7563 /*
7564 * When the path was busy/offlined, some other host might have
7565 * cleared this key. Validate key on some other path first.
7566 * If it fails, return failure.
7567 */
7568
7569 npip = pip;
7570 pip = NULL;
7571 success = 0;
7572
7573 /* Save the res key */
7574 bcopy(prout->res_key, temp_res_key, MHIOC_RESV_KEY_SIZE);
7575
7576 /*
7577 * Sometimes CDB from application can be a Register_And_Ignore.
7578 * Instead of validation, this cdb would result in force registration.
7579 * Convert it to normal cdb for validation.
7580 * After that be sure to restore the cdb.
7581 */
7582 cdb_1 = vlun->svl_cdb[1];
7583 vlun->svl_cdb[1] &= 0xe0;
7584
7585 do {
7586 osvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip);
7587 if (osvp == NULL) {
7588 VHCI_DEBUG(4, (CE_NOTE, NULL,
7589 "vhci_pgr_validate_and_register: no "
7590 "client priv! 0x%p offlined?\n",
7591 (void *)npip));
7592 goto next_path_1;
7593 }
7594
7595 if (osvp == svp) {
7596 VHCI_DEBUG(4, (CE_NOTE, NULL,
7597 "vhci_pgr_validate_and_register: same svp 0x%p"
7598 " npip 0x%p vlun 0x%p\n",
7599 (void *)svp, (void *)npip, (void *)vlun));
7600 goto next_path_1;
7601 }
7602
7603 VHCI_DEBUG(4, (CE_NOTE, NULL,
7604 "vhci_pgr_validate_and_register: First validate on"
7605 " osvp 0x%p being done. vlun 0x%p thread 0x%p Before bcopy"
7606 " cdb1 %x\n", (void *)osvp, (void *)vlun,
7607 (void *)curthread, vlun->svl_cdb[1]));
7608 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy:");
7609
7610 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7611
7612 VHCI_DEBUG(4, (CE_WARN, NULL, "vlun 0x%p After bcopy",
7613 (void *)vlun));
7614 vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: ");
7615
7616 rval = vhci_do_prout(osvp);
7617 if (rval == 1) {
7618 VHCI_DEBUG(4, (CE_NOTE, NULL,
7619 "%s%d: vhci_pgr_validate_and_register: key"
7620 " validated thread 0x%p\n", ddi_driver_name(cdip),
7621 ddi_get_instance(cdip), (void *)curthread));
7622 pip = npip;
7623 success = 1;
7624 break;
7625 } else {
7626 VHCI_DEBUG(4, (CE_NOTE, NULL,
7627 "vhci_pgr_validate_and_register: First validation"
7628 " on osvp 0x%p failed %x\n", (void *)osvp, rval));
7629 vhci_print_prout_keys(vlun, "v_pgr_val_reg: failed:");
7630 }
7631
7632 /*
7633 * Try other paths
7634 */
7635 next_path_1:
7636 pip = npip;
7637 rval = mdi_select_path(cdip, NULL,
7638 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH,
7639 pip, &npip);
7640 mdi_rele_path(pip);
7641 } while ((rval == MDI_SUCCESS) && (npip != NULL));
7642
7643
7644 /* Be sure to restore original cdb */
7645 vlun->svl_cdb[1] = cdb_1;
7646
7647 /* Restore the res_key */
7648 bcopy(temp_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7649
7650 /*
7651 * If key could not be registered on any path for the first time,
7652 * return success as online should still continue.
7653 */
7654 if (success == 0) {
7655 return (1);
7656 }
7657
7658 ASSERT(pip != NULL);
7659
7660 /*
7661 * Force register on new path
7662 */
7663 cdb_1 = vlun->svl_cdb[1]; /* store the cdb */
7664
7665 vlun->svl_cdb[1] &= 0xe0;
7666 vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE;
7667
7668 vhci_print_prout_keys(vlun, "v_pgr_val_reg: keys before bcopy: ");
7669
7670 bcopy(prout->active_service_key, prout->service_key,
7671 MHIOC_RESV_KEY_SIZE);
7672 bcopy(prout->active_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7673
7674 vhci_print_prout_keys(vlun, "v_pgr_val_reg:keys after bcopy: ");
7675
7676 rval = vhci_do_prout(svp);
7677 vlun->svl_cdb[1] = cdb_1; /* restore the cdb */
7678 if (rval != 1) {
7679 VHCI_DEBUG(4, (CE_NOTE, NULL,
7680 "vhci_pgr_validate_and_register: register on new"
7681 " path 0x%p svp 0x%p failed %x\n",
7682 (void *)pip, (void *)svp, rval));
7683 vhci_print_prout_keys(vlun, "v_pgr_val_reg: reg failed: ");
7684 mdi_rele_path(pip);
7685 return (0);
7686 }
7687
7688 if (bcmp(prout->service_key, zero_key, MHIOC_RESV_KEY_SIZE) == 0) {
7689 VHCI_DEBUG(4, (CE_NOTE, NULL,
7690 "vhci_pgr_validate_and_register: zero service key\n"));
7691 mdi_rele_path(pip);
7692 return (rval);
7693 }
7694
7695 /*
7696 * While the key was force registered, some other host might have
7697 * cleared the key. Re-validate key on another pre-existing path
7698 * before declaring success.
7699 */
7700 npip = pip;
7701 pip = NULL;
7702
7703 /*
7704 * Sometimes CDB from application can be Register and Ignore.
7705 * Instead of validation, it would result in force registration.
7706 * Convert it to normal cdb for validation.
7707 * After that be sure to restore the cdb.
7708 */
7709 cdb_1 = vlun->svl_cdb[1];
7710 vlun->svl_cdb[1] &= 0xe0;
7711 success = 0;
7712
7713 do {
7714 osvp = (scsi_vhci_priv_t *)
7715 mdi_pi_get_vhci_private(npip);
7716 if (osvp == NULL) {
7717 VHCI_DEBUG(4, (CE_NOTE, NULL,
7718 "vhci_pgr_validate_and_register: no "
7719 "client priv! 0x%p offlined?\n",
7720 (void *)npip));
7721 goto next_path_2;
7722 }
7723
7724 if (osvp == svp) {
7725 VHCI_DEBUG(4, (CE_NOTE, NULL,
7726 "vhci_pgr_validate_and_register: same osvp 0x%p"
7727 " npip 0x%p vlun 0x%p\n",
7728 (void *)svp, (void *)npip, (void *)vlun));
7729 goto next_path_2;
7730 }
7731
7732 VHCI_DEBUG(4, (CE_NOTE, NULL,
7733 "vhci_pgr_validate_and_register: Re-validation on"
7734 " osvp 0x%p being done. vlun 0x%p Before bcopy cdb1 %x\n",
7735 (void *)osvp, (void *)vlun, vlun->svl_cdb[1]));
7736 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7737
7738 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7739
7740 vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: ");
7741
7742 rval = vhci_do_prout(osvp);
7743 if (rval == 1) {
7744 VHCI_DEBUG(4, (CE_NOTE, NULL,
7745 "%s%d: vhci_pgr_validate_and_register: key"
7746 " validated thread 0x%p\n", ddi_driver_name(cdip),
7747 ddi_get_instance(cdip), (void *)curthread));
7748 pip = npip;
7749 success = 1;
7750 break;
7751 } else {
7752 VHCI_DEBUG(4, (CE_NOTE, NULL,
7753 "vhci_pgr_validate_and_register: Re-validation on"
7754 " osvp 0x%p failed %x\n", (void *)osvp, rval));
7755 vhci_print_prout_keys(vlun,
7756 "v_pgr_val_reg: reval failed: ");
7757 }
7758
7759 /*
7760 * Try other paths
7761 */
7762 next_path_2:
7763 pip = npip;
7764 rval = mdi_select_path(cdip, NULL,
7765 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH,
7766 pip, &npip);
7767 mdi_rele_path(pip);
7768 } while ((rval == MDI_SUCCESS) && (npip != NULL));
7769
7770 /* Be sure to restore original cdb */
7771 vlun->svl_cdb[1] = cdb_1;
7772
7773 if (success == 1) {
7774 /* Successfully validated registration */
7775 mdi_rele_path(pip);
7776 return (1);
7777 }
7778
7779 VHCI_DEBUG(4, (CE_WARN, NULL, "key validation failed"));
7780
7781 /*
7782 * key invalid, back out by registering key value of 0
7783 */
7784 VHCI_DEBUG(4, (CE_NOTE, NULL,
7785 "vhci_pgr_validate_and_register: backout on"
7786 " svp 0x%p being done\n", (void *)svp));
7787 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7788
7789 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7790 bzero(prout->service_key, MHIOC_RESV_KEY_SIZE);
7791
7792 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7793
7794 /*
7795 * Get a new path
7796 */
7797 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH |
7798 MDI_SELECT_STANDBY_PATH, NULL, &pip);
7799 if ((rval != MDI_SUCCESS) || (pip == NULL)) {
7800 VHCI_DEBUG(4, (CE_NOTE, NULL,
7801 "%s%d: vhci_pgr_validate_and_register: no valid pip\n",
7802 ddi_driver_name(cdip), ddi_get_instance(cdip)));
7803 return (0);
7804 }
7805
7806 if ((rval = vhci_do_prout(svp)) != 1) {
7807 VHCI_DEBUG(4, (CE_NOTE, NULL,
7808 "vhci_pgr_validate_and_register: backout on"
7809 " svp 0x%p failed\n", (void *)svp));
7810 vhci_print_prout_keys(vlun, "backout failed");
7811
7812 VHCI_DEBUG(4, (CE_WARN, NULL,
7813 "%s%d: vhci_pgr_validate_and_register: key"
7814 " validation and backout failed", ddi_driver_name(cdip),
7815 ddi_get_instance(cdip)));
7816 if (rval == VHCI_PGR_ILLEGALOP) {
7817 VHCI_DEBUG(4, (CE_WARN, NULL,
7818 "%s%d: vhci_pgr_validate_and_register: key"
7819 " already cleared", ddi_driver_name(cdip),
7820 ddi_get_instance(cdip)));
7821 rval = 1;
7822 } else
7823 rval = 0;
7824 } else {
7825 VHCI_DEBUG(4, (CE_NOTE, NULL,
7826 "%s%d: vhci_pgr_validate_and_register: key"
7827 " validation failed, key backed out\n",
7828 ddi_driver_name(cdip), ddi_get_instance(cdip)));
7829 vhci_print_prout_keys(vlun, "v_pgr_val_reg: key backed out: ");
7830 }
7831 mdi_rele_path(pip);
7832
7833 return (rval);
7834 }
7835
7836 /*
7837 * taskq routine to dispatch a scsi cmd to vhci_scsi_start. This ensures
7838 * that vhci_scsi_start is not called in interrupt context.
7839 * As the upper layer gets TRAN_ACCEPT when the command is dispatched, we
7840 * need to complete the command if something goes wrong.
7841 */
7842 static void
vhci_dispatch_scsi_start(void * arg)7843 vhci_dispatch_scsi_start(void *arg)
7844 {
7845 struct vhci_pkt *vpkt = (struct vhci_pkt *)arg;
7846 struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt;
7847 int rval = TRAN_BUSY;
7848
7849 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_dispatch_scsi_start: sending"
7850 " scsi-2 reserve for 0x%p\n",
7851 (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address))));
7852
7853 /*
7854 * To prevent the taskq from being called recursively we set the
7855 * the VHCI_PKT_THRU_TASKQ bit in the vhci_pkt_states.
7856 */
7857 vpkt->vpkt_state |= VHCI_PKT_THRU_TASKQ;
7858
7859 /*
7860 * Wait for the transport to get ready to send packets
7861 * and if it times out, it will return something other than
7862 * TRAN_BUSY. The vhci_reserve_delay may want to
7863 * get tuned for other transports and is therefore a global.
7864 * Using delay since this routine is called by taskq dispatch
7865 * and not called during interrupt context.
7866 */
7867 while ((rval = vhci_scsi_start(&(vpkt->vpkt_tgt_pkt->pkt_address),
7868 vpkt->vpkt_tgt_pkt)) == TRAN_BUSY) {
7869 delay(drv_usectohz(vhci_reserve_delay));
7870 }
7871
7872 switch (rval) {
7873 case TRAN_ACCEPT:
7874 return;
7875
7876 default:
7877 /*
7878 * This pkt shall be retried, and to ensure another taskq
7879 * is dispatched for it, clear the VHCI_PKT_THRU_TASKQ
7880 * flag.
7881 */
7882 vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ;
7883
7884 /* Ensure that the pkt is retried without a reset */
7885 tpkt->pkt_reason = CMD_ABORTED;
7886 tpkt->pkt_statistics |= STAT_ABORTED;
7887 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_dispatch_scsi_start: "
7888 "TRAN_rval %d returned for dip 0x%p", rval,
7889 (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address))));
7890 break;
7891 }
7892
7893 /*
7894 * vpkt_org_vpkt should always be NULL here if the retry command
7895 * has been successfully dispatched. If vpkt_org_vpkt != NULL at
7896 * this point, it is an error so restore the original vpkt and
7897 * return an error to the target driver so it can retry the
7898 * command as appropriate.
7899 */
7900 if (vpkt->vpkt_org_vpkt != NULL) {
7901 struct vhci_pkt *new_vpkt = vpkt;
7902 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *)
7903 mdi_pi_get_vhci_private(vpkt->vpkt_path);
7904
7905 vpkt = vpkt->vpkt_org_vpkt;
7906
7907 vpkt->vpkt_tgt_pkt->pkt_reason = tpkt->pkt_reason;
7908 vpkt->vpkt_tgt_pkt->pkt_statistics = tpkt->pkt_statistics;
7909
7910 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
7911 new_vpkt->vpkt_tgt_pkt);
7912
7913 tpkt = vpkt->vpkt_tgt_pkt;
7914 }
7915
7916 scsi_hba_pkt_comp(tpkt);
7917 }
7918
7919 static void
vhci_initiate_auto_failback(void * arg)7920 vhci_initiate_auto_failback(void *arg)
7921 {
7922 struct scsi_vhci_lun *vlun = (struct scsi_vhci_lun *)arg;
7923 dev_info_t *vdip, *cdip;
7924 int held;
7925
7926 cdip = vlun->svl_dip;
7927 vdip = ddi_get_parent(cdip);
7928
7929 VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
7930
7931 /*
7932 * Perform a final check to see if the active path class is indeed
7933 * not the preferred path class. As in the time the auto failback
7934 * was dispatched, an external failover could have been detected.
7935 * [Some other host could have detected this condition and triggered
7936 * the auto failback before].
7937 * In such a case if we go ahead with failover we will be negating the
7938 * whole purpose of auto failback.
7939 */
7940 mutex_enter(&vlun->svl_mutex);
7941 if (vlun->svl_active_pclass != NULL) {
7942 char *best_pclass;
7943 struct scsi_failover_ops *fo;
7944
7945 fo = vlun->svl_fops;
7946
7947 (void) fo->sfo_pathclass_next(NULL, &best_pclass,
7948 vlun->svl_fops_ctpriv);
7949 if (strcmp(vlun->svl_active_pclass, best_pclass) == 0) {
7950 mutex_exit(&vlun->svl_mutex);
7951 VHCI_RELEASE_LUN(vlun);
7952 VHCI_DEBUG(1, (CE_NOTE, NULL, "Not initiating "
7953 "auto failback for %s as %s pathclass already "
7954 "active.\n", vlun->svl_lun_wwn, best_pclass));
7955 return;
7956 }
7957 }
7958 mutex_exit(&vlun->svl_mutex);
7959 if (mdi_failover(vdip, vlun->svl_dip, MDI_FAILOVER_SYNC)
7960 == MDI_SUCCESS) {
7961 vhci_log(CE_NOTE, vdip, "!Auto failback operation "
7962 "succeeded for device %s (GUID %s)",
7963 ddi_node_name(cdip), vlun->svl_lun_wwn);
7964 } else {
7965 vhci_log(CE_NOTE, vdip, "!Auto failback operation "
7966 "failed for device %s (GUID %s)",
7967 ddi_node_name(cdip), vlun->svl_lun_wwn);
7968 }
7969 VHCI_RELEASE_LUN(vlun);
7970 }
7971
7972 #ifdef DEBUG
7973 static void
vhci_print_prin_keys(vhci_prin_readkeys_t * prin,int numkeys)7974 vhci_print_prin_keys(vhci_prin_readkeys_t *prin, int numkeys)
7975 {
7976 vhci_clean_print(NULL, 5, "Current PGR Keys",
7977 (uchar_t *)prin, numkeys * 8);
7978 }
7979 #endif
7980
7981 static void
vhci_print_prout_keys(scsi_vhci_lun_t * vlun,char * msg)7982 vhci_print_prout_keys(scsi_vhci_lun_t *vlun, char *msg)
7983 {
7984 int i;
7985 vhci_prout_t *prout;
7986 char buf1[4*MHIOC_RESV_KEY_SIZE + 1];
7987 char buf2[4*MHIOC_RESV_KEY_SIZE + 1];
7988 char buf3[4*MHIOC_RESV_KEY_SIZE + 1];
7989 char buf4[4*MHIOC_RESV_KEY_SIZE + 1];
7990
7991 prout = &vlun->svl_prout;
7992
7993 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
7994 (void) sprintf(&buf1[4*i], "[%02x]", prout->res_key[i]);
7995 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
7996 (void) sprintf(&buf2[(4*i)], "[%02x]", prout->service_key[i]);
7997 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
7998 (void) sprintf(&buf3[4*i], "[%02x]", prout->active_res_key[i]);
7999 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
8000 (void) sprintf(&buf4[4*i], "[%02x]",
8001 prout->active_service_key[i]);
8002
8003 /* Printing all in one go. Otherwise it will jumble up */
8004 VHCI_DEBUG(5, (CE_CONT, NULL, "%s vlun 0x%p, thread 0x%p\n"
8005 "res_key: : %s\n"
8006 "service_key : %s\n"
8007 "active_res_key : %s\n"
8008 "active_service_key: %s\n",
8009 msg, (void *)vlun, (void *)curthread, buf1, buf2, buf3, buf4));
8010 }
8011
8012 /*
8013 * Called from vhci_scsi_start to update the pHCI pkt with target packet.
8014 */
8015 static void
vhci_update_pHCI_pkt(struct vhci_pkt * vpkt,struct scsi_pkt * pkt)8016 vhci_update_pHCI_pkt(struct vhci_pkt *vpkt, struct scsi_pkt *pkt)
8017 {
8018
8019 ASSERT(vpkt->vpkt_hba_pkt);
8020
8021 vpkt->vpkt_hba_pkt->pkt_flags = pkt->pkt_flags;
8022 vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOQUEUE;
8023
8024 if ((vpkt->vpkt_hba_pkt->pkt_flags & FLAG_NOINTR) ||
8025 MDI_PI_IS_SUSPENDED(vpkt->vpkt_path)) {
8026 /*
8027 * Polled Command is requested or HBA is in
8028 * suspended state
8029 */
8030 vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOINTR;
8031 vpkt->vpkt_hba_pkt->pkt_comp = NULL;
8032 } else {
8033 vpkt->vpkt_hba_pkt->pkt_comp = vhci_intr;
8034 }
8035 vpkt->vpkt_hba_pkt->pkt_time = pkt->pkt_time;
8036 bcopy(pkt->pkt_cdbp, vpkt->vpkt_hba_pkt->pkt_cdbp,
8037 vpkt->vpkt_tgt_init_cdblen);
8038 vpkt->vpkt_hba_pkt->pkt_resid = pkt->pkt_resid;
8039
8040 /* Re-initialize the following pHCI packet state information */
8041 vpkt->vpkt_hba_pkt->pkt_state = 0;
8042 vpkt->vpkt_hba_pkt->pkt_statistics = 0;
8043 vpkt->vpkt_hba_pkt->pkt_reason = 0;
8044 }
8045
8046 static int
vhci_scsi_bus_power(dev_info_t * parent,void * impl_arg,pm_bus_power_op_t op,void * arg,void * result)8047 vhci_scsi_bus_power(dev_info_t *parent, void *impl_arg, pm_bus_power_op_t op,
8048 void *arg, void *result)
8049 {
8050 int ret = DDI_SUCCESS;
8051
8052 /*
8053 * Generic processing in MPxIO framework
8054 */
8055 ret = mdi_bus_power(parent, impl_arg, op, arg, result);
8056
8057 switch (ret) {
8058 case MDI_SUCCESS:
8059 ret = DDI_SUCCESS;
8060 break;
8061 case MDI_FAILURE:
8062 ret = DDI_FAILURE;
8063 break;
8064 default:
8065 break;
8066 }
8067
8068 return (ret);
8069 }
8070
8071 static int
vhci_pHCI_cap(struct scsi_address * ap,char * cap,int val,int whom,mdi_pathinfo_t * pip)8072 vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom,
8073 mdi_pathinfo_t *pip)
8074 {
8075 dev_info_t *cdip;
8076 mdi_pathinfo_t *npip = NULL;
8077 scsi_vhci_priv_t *svp = NULL;
8078 struct scsi_address *pap = NULL;
8079 scsi_hba_tran_t *hba = NULL;
8080 int sps;
8081 int mps_flag;
8082 int rval = 0;
8083
8084 mps_flag = (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH);
8085 if (pip) {
8086 /*
8087 * If the call is from vhci_pathinfo_state_change,
8088 * then this path was busy and is becoming ready to accept IO.
8089 */
8090 ASSERT(ap != NULL);
8091 hba = ap->a_hba_tran;
8092 ASSERT(hba != NULL);
8093 rval = scsi_ifsetcap(ap, cap, val, whom);
8094
8095 VHCI_DEBUG(2, (CE_NOTE, NULL,
8096 "!vhci_pHCI_cap: only on path %p, ap %p, rval %x\n",
8097 (void *)pip, (void *)ap, rval));
8098
8099 return (rval);
8100 }
8101
8102 /*
8103 * Set capability on all the pHCIs.
8104 * If any path is busy, then the capability would be set by
8105 * vhci_pathinfo_state_change.
8106 */
8107
8108 cdip = ADDR2DIP(ap);
8109 ASSERT(cdip != NULL);
8110 sps = mdi_select_path(cdip, NULL, mps_flag, NULL, &pip);
8111 if ((sps != MDI_SUCCESS) || (pip == NULL)) {
8112 VHCI_DEBUG(2, (CE_WARN, NULL,
8113 "!vhci_pHCI_cap: Unable to get a path, dip 0x%p",
8114 (void *)cdip));
8115 return (0);
8116 }
8117
8118 again:
8119 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
8120 if (svp == NULL) {
8121 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: "
8122 "priv is NULL, pip 0x%p", (void *)pip));
8123 mdi_rele_path(pip);
8124 return (rval);
8125 }
8126
8127 if (svp->svp_psd == NULL) {
8128 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: "
8129 "psd is NULL, pip 0x%p, svp 0x%p",
8130 (void *)pip, (void *)svp));
8131 mdi_rele_path(pip);
8132 return (rval);
8133 }
8134
8135 pap = &svp->svp_psd->sd_address;
8136 ASSERT(pap != NULL);
8137 hba = pap->a_hba_tran;
8138 ASSERT(hba != NULL);
8139
8140 if (hba->tran_setcap != NULL) {
8141 rval = scsi_ifsetcap(pap, cap, val, whom);
8142
8143 VHCI_DEBUG(2, (CE_NOTE, NULL,
8144 "!vhci_pHCI_cap: path %p, ap %p, rval %x\n",
8145 (void *)pip, (void *)ap, rval));
8146
8147 /*
8148 * Select next path and issue the setcap, repeat
8149 * until all paths are exhausted
8150 */
8151 sps = mdi_select_path(cdip, NULL, mps_flag, pip, &npip);
8152 if ((sps != MDI_SUCCESS) || (npip == NULL)) {
8153 mdi_rele_path(pip);
8154 return (1);
8155 }
8156 mdi_rele_path(pip);
8157 pip = npip;
8158 goto again;
8159 }
8160 mdi_rele_path(pip);
8161 return (rval);
8162 }
8163
8164 static int
vhci_scsi_bus_config(dev_info_t * pdip,uint_t flags,ddi_bus_config_op_t op,void * arg,dev_info_t ** child)8165 vhci_scsi_bus_config(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op,
8166 void *arg, dev_info_t **child)
8167 {
8168 char *guid;
8169
8170 if (vhci_bus_config_debug)
8171 flags |= NDI_DEVI_DEBUG;
8172
8173 if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ONE)
8174 guid = vhci_devnm_to_guid((char *)arg);
8175 else
8176 guid = NULL;
8177
8178 if (mdi_vhci_bus_config(pdip, flags, op, arg, child, guid)
8179 == MDI_SUCCESS)
8180 return (NDI_SUCCESS);
8181 else
8182 return (NDI_FAILURE);
8183 }
8184
8185 static int
vhci_scsi_bus_unconfig(dev_info_t * pdip,uint_t flags,ddi_bus_config_op_t op,void * arg)8186 vhci_scsi_bus_unconfig(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op,
8187 void *arg)
8188 {
8189 if (vhci_bus_config_debug)
8190 flags |= NDI_DEVI_DEBUG;
8191
8192 return (ndi_busop_bus_unconfig(pdip, flags, op, arg));
8193 }
8194
8195 /*
8196 * Take the original vhci_pkt, create a duplicate of the pkt for resending
8197 * as though it originated in ssd.
8198 */
8199 static struct scsi_pkt *
vhci_create_retry_pkt(struct vhci_pkt * vpkt)8200 vhci_create_retry_pkt(struct vhci_pkt *vpkt)
8201 {
8202 struct vhci_pkt *new_vpkt = NULL;
8203 struct scsi_pkt *pkt = NULL;
8204
8205 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *)
8206 mdi_pi_get_vhci_private(vpkt->vpkt_path);
8207
8208 /*
8209 * Ensure consistent data at completion time by setting PKT_CONSISTENT
8210 */
8211 pkt = vhci_scsi_init_pkt(&svp->svp_psd->sd_address, pkt,
8212 vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen,
8213 vpkt->vpkt_tgt_init_scblen, 0, PKT_CONSISTENT, NULL_FUNC, NULL);
8214 if (pkt != NULL) {
8215 new_vpkt = TGTPKT2VHCIPKT(pkt);
8216
8217 pkt->pkt_address = vpkt->vpkt_tgt_pkt->pkt_address;
8218 pkt->pkt_flags = vpkt->vpkt_tgt_pkt->pkt_flags;
8219 pkt->pkt_time = vpkt->vpkt_tgt_pkt->pkt_time;
8220 pkt->pkt_comp = vpkt->vpkt_tgt_pkt->pkt_comp;
8221
8222 pkt->pkt_resid = 0;
8223 pkt->pkt_statistics = 0;
8224 pkt->pkt_reason = 0;
8225
8226 bcopy(vpkt->vpkt_tgt_pkt->pkt_cdbp,
8227 pkt->pkt_cdbp, vpkt->vpkt_tgt_init_cdblen);
8228
8229 /*
8230 * Save a pointer to the original vhci_pkt
8231 */
8232 new_vpkt->vpkt_org_vpkt = vpkt;
8233 }
8234
8235 return (pkt);
8236 }
8237
8238 /*
8239 * Copy the successful completion information from the hba packet into
8240 * the original target pkt from the upper layer. Returns the original
8241 * vpkt and destroys the new vpkt from the internal retry.
8242 */
8243 static struct vhci_pkt *
vhci_sync_retry_pkt(struct vhci_pkt * vpkt)8244 vhci_sync_retry_pkt(struct vhci_pkt *vpkt)
8245 {
8246 struct vhci_pkt *ret_vpkt = NULL;
8247 struct scsi_pkt *tpkt = NULL;
8248 struct scsi_pkt *hba_pkt = NULL;
8249 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *)
8250 mdi_pi_get_vhci_private(vpkt->vpkt_path);
8251
8252 ASSERT(vpkt->vpkt_org_vpkt != NULL);
8253 VHCI_DEBUG(0, (CE_NOTE, NULL, "vhci_sync_retry_pkt: Retry pkt "
8254 "completed successfully!\n"));
8255
8256 ret_vpkt = vpkt->vpkt_org_vpkt;
8257 tpkt = ret_vpkt->vpkt_tgt_pkt;
8258 hba_pkt = vpkt->vpkt_hba_pkt;
8259
8260 /*
8261 * Copy the good status into the target driver's packet
8262 */
8263 *(tpkt->pkt_scbp) = *(hba_pkt->pkt_scbp);
8264 tpkt->pkt_resid = hba_pkt->pkt_resid;
8265 tpkt->pkt_state = hba_pkt->pkt_state;
8266 tpkt->pkt_statistics = hba_pkt->pkt_statistics;
8267 tpkt->pkt_reason = hba_pkt->pkt_reason;
8268
8269 /*
8270 * Destroy the internally created vpkt for the retry
8271 */
8272 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
8273 vpkt->vpkt_tgt_pkt);
8274
8275 return (ret_vpkt);
8276 }
8277
8278 /* restart the request sense request */
8279 static void
vhci_uscsi_restart_sense(void * arg)8280 vhci_uscsi_restart_sense(void *arg)
8281 {
8282 struct buf *rqbp;
8283 struct buf *bp;
8284 struct scsi_pkt *rqpkt = (struct scsi_pkt *)arg;
8285 mp_uscsi_cmd_t *mp_uscmdp;
8286
8287 VHCI_DEBUG(4, (CE_WARN, NULL,
8288 "vhci_uscsi_restart_sense: enter: rqpkt: %p", (void *)rqpkt));
8289
8290 if (scsi_transport(rqpkt) != TRAN_ACCEPT) {
8291 /* if it fails - need to wakeup the original command */
8292 mp_uscmdp = rqpkt->pkt_private;
8293 bp = mp_uscmdp->cmdbp;
8294 rqbp = mp_uscmdp->rqbp;
8295 ASSERT(mp_uscmdp && bp && rqbp);
8296 scsi_free_consistent_buf(rqbp);
8297 scsi_destroy_pkt(rqpkt);
8298 bp->b_resid = bp->b_bcount;
8299 bioerror(bp, EIO);
8300 biodone(bp);
8301 }
8302 }
8303
8304 /*
8305 * auto-rqsense is not enabled so we have to retrieve the request sense
8306 * manually.
8307 */
8308 static int
vhci_uscsi_send_sense(struct scsi_pkt * pkt,mp_uscsi_cmd_t * mp_uscmdp)8309 vhci_uscsi_send_sense(struct scsi_pkt *pkt, mp_uscsi_cmd_t *mp_uscmdp)
8310 {
8311 struct buf *rqbp, *cmdbp;
8312 struct scsi_pkt *rqpkt;
8313 int rval = 0;
8314
8315 cmdbp = mp_uscmdp->cmdbp;
8316 ASSERT(cmdbp != NULL);
8317
8318 VHCI_DEBUG(4, (CE_WARN, NULL,
8319 "vhci_uscsi_send_sense: enter: bp: %p pkt: %p scmd: %p",
8320 (void *)cmdbp, (void *)pkt, (void *)mp_uscmdp));
8321 /* set up the packet information and cdb */
8322 if ((rqbp = scsi_alloc_consistent_buf(mp_uscmdp->ap, NULL,
8323 SENSE_LENGTH, B_READ, NULL, NULL)) == NULL) {
8324 return (-1);
8325 }
8326
8327 if ((rqpkt = scsi_init_pkt(mp_uscmdp->ap, NULL, rqbp,
8328 CDB_GROUP0, 1, 0, PKT_CONSISTENT, NULL, NULL)) == NULL) {
8329 scsi_free_consistent_buf(rqbp);
8330 return (-1);
8331 }
8332
8333 (void) scsi_setup_cdb((union scsi_cdb *)(intptr_t)rqpkt->pkt_cdbp,
8334 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0);
8335
8336 mp_uscmdp->rqbp = rqbp;
8337 rqbp->b_private = mp_uscmdp;
8338 rqpkt->pkt_flags |= FLAG_SENSING;
8339 rqpkt->pkt_time = 60;
8340 rqpkt->pkt_comp = vhci_uscsi_iodone;
8341 rqpkt->pkt_private = mp_uscmdp;
8342
8343 /*
8344 * NOTE: This code path is related to MPAPI uscsi(7I), so path
8345 * selection is not based on path_instance.
8346 */
8347 if (scsi_pkt_allocated_correctly(rqpkt))
8348 rqpkt->pkt_path_instance = 0;
8349
8350 /* get her done */
8351 switch (scsi_transport(rqpkt)) {
8352 case TRAN_ACCEPT:
8353 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8354 "transport accepted."));
8355 break;
8356 case TRAN_BUSY:
8357 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8358 "transport busy, setting timeout."));
8359 vhci_restart_timeid = timeout(vhci_uscsi_restart_sense, rqpkt,
8360 (drv_usectohz(5 * 1000000)));
8361 break;
8362 default:
8363 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8364 "transport failed"));
8365 scsi_free_consistent_buf(rqbp);
8366 scsi_destroy_pkt(rqpkt);
8367 rval = -1;
8368 }
8369
8370 return (rval);
8371 }
8372
8373 /*
8374 * done routine for the mpapi uscsi command - this is behaving as though
8375 * FLAG_DIAGNOSE is set meaning there are no retries except for a manual
8376 * request sense.
8377 */
8378 void
vhci_uscsi_iodone(struct scsi_pkt * pkt)8379 vhci_uscsi_iodone(struct scsi_pkt *pkt)
8380 {
8381 struct buf *bp;
8382 mp_uscsi_cmd_t *mp_uscmdp;
8383 struct uscsi_cmd *uscmdp;
8384 struct scsi_arq_status *arqstat;
8385 int err;
8386
8387 mp_uscmdp = (mp_uscsi_cmd_t *)pkt->pkt_private;
8388 uscmdp = mp_uscmdp->uscmdp;
8389 bp = mp_uscmdp->cmdbp;
8390 ASSERT(bp != NULL);
8391 VHCI_DEBUG(4, (CE_WARN, NULL,
8392 "vhci_uscsi_iodone: enter: bp: %p pkt: %p scmd: %p",
8393 (void *)bp, (void *)pkt, (void *)mp_uscmdp));
8394 /* Save the status and the residual into the uscsi_cmd struct */
8395 uscmdp->uscsi_status = ((*(pkt)->pkt_scbp) & STATUS_MASK);
8396 uscmdp->uscsi_resid = bp->b_resid;
8397
8398 /* return on a very successful command */
8399 if (pkt->pkt_reason == CMD_CMPLT &&
8400 SCBP_C(pkt) == 0 && ((pkt->pkt_flags & FLAG_SENSING) == 0) &&
8401 pkt->pkt_resid == 0) {
8402 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8403 scsi_destroy_pkt(pkt);
8404 biodone(bp);
8405 return;
8406 }
8407 VHCI_DEBUG(4, (CE_NOTE, NULL, "iodone: reason=0x%x "
8408 " pkt_resid=%ld pkt_state: 0x%x b_count: %ld b_resid: %ld",
8409 pkt->pkt_reason, pkt->pkt_resid,
8410 pkt->pkt_state, bp->b_bcount, bp->b_resid));
8411
8412 err = EIO;
8413
8414 arqstat = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp);
8415 if (pkt->pkt_reason != CMD_CMPLT) {
8416 /*
8417 * The command did not complete.
8418 */
8419 VHCI_DEBUG(4, (CE_NOTE, NULL,
8420 "vhci_uscsi_iodone: command did not complete."
8421 " reason: %x flag: %x", pkt->pkt_reason, pkt->pkt_flags));
8422 if (pkt->pkt_flags & FLAG_SENSING) {
8423 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8424 } else if (pkt->pkt_reason == CMD_TIMEOUT) {
8425 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_HARDERR);
8426 err = ETIMEDOUT;
8427 }
8428 } else if (pkt->pkt_state & STATE_ARQ_DONE && mp_uscmdp->arq_enabled) {
8429 /*
8430 * The auto-rqsense happened, and the packet has a filled-in
8431 * scsi_arq_status structure, pointed to by pkt_scbp.
8432 */
8433 VHCI_DEBUG(4, (CE_NOTE, NULL,
8434 "vhci_uscsi_iodone: received auto-requested sense"));
8435 if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8436 /* get the amount of data to copy into rqbuf */
8437 int rqlen = SENSE_LENGTH - arqstat->sts_rqpkt_resid;
8438 rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen);
8439 uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen;
8440 uscmdp->uscsi_rqstatus =
8441 *((char *)&arqstat->sts_rqpkt_status);
8442 if (uscmdp->uscsi_rqbuf && uscmdp->uscsi_rqlen &&
8443 rqlen != 0) {
8444 bcopy(&(arqstat->sts_sensedata),
8445 uscmdp->uscsi_rqbuf, rqlen);
8446 }
8447 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8448 VHCI_DEBUG(4, (CE_NOTE, NULL,
8449 "vhci_uscsi_iodone: ARQ "
8450 "uscsi_rqstatus=0x%x uscsi_rqresid=%d rqlen: %d "
8451 "xfer: %d rqpkt_resid: %d\n",
8452 uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid,
8453 uscmdp->uscsi_rqlen, rqlen,
8454 arqstat->sts_rqpkt_resid));
8455 }
8456 } else if (pkt->pkt_flags & FLAG_SENSING) {
8457 struct buf *rqbp;
8458 struct scsi_status *rqstatus;
8459
8460 rqstatus = (struct scsi_status *)pkt->pkt_scbp;
8461 /* a manual request sense was done - get the information */
8462 if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8463 int rqlen = SENSE_LENGTH - pkt->pkt_resid;
8464
8465 rqbp = mp_uscmdp->rqbp;
8466 /* get the amount of data to copy into rqbuf */
8467 rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen);
8468 uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen;
8469 uscmdp->uscsi_rqstatus = *((char *)rqstatus);
8470 if (uscmdp->uscsi_rqlen && uscmdp->uscsi_rqbuf) {
8471 bcopy(rqbp->b_un.b_addr, uscmdp->uscsi_rqbuf,
8472 rqlen);
8473 }
8474 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8475 scsi_free_consistent_buf(rqbp);
8476 }
8477 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_uscsi_iodone: FLAG_SENSING"
8478 "uscsi_rqstatus=0x%x uscsi_rqresid=%d\n",
8479 uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid));
8480 } else {
8481 struct scsi_status *status =
8482 (struct scsi_status *)pkt->pkt_scbp;
8483 /*
8484 * Command completed and we're not getting sense. Check for
8485 * errors and decide what to do next.
8486 */
8487 VHCI_DEBUG(4, (CE_NOTE, NULL,
8488 "vhci_uscsi_iodone: command appears complete: reason: %x",
8489 pkt->pkt_reason));
8490 if (status->sts_chk) {
8491 /* need to manually get the request sense */
8492 if (vhci_uscsi_send_sense(pkt, mp_uscmdp) == 0) {
8493 scsi_destroy_pkt(pkt);
8494 return;
8495 }
8496 } else {
8497 VHCI_DEBUG(4, (CE_NOTE, NULL,
8498 "vhci_chk_err: appears complete"));
8499 err = 0;
8500 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8501 if (pkt->pkt_resid) {
8502 bp->b_resid += pkt->pkt_resid;
8503 }
8504 }
8505 }
8506
8507 if (err) {
8508 if (bp->b_resid == 0)
8509 bp->b_resid = bp->b_bcount;
8510 bioerror(bp, err);
8511 bp->b_flags |= B_ERROR;
8512 }
8513
8514 scsi_destroy_pkt(pkt);
8515 biodone(bp);
8516
8517 VHCI_DEBUG(4, (CE_WARN, NULL, "vhci_uscsi_iodone: exit"));
8518 }
8519
8520 /*
8521 * start routine for the mpapi uscsi command
8522 */
8523 int
vhci_uscsi_iostart(struct buf * bp)8524 vhci_uscsi_iostart(struct buf *bp)
8525 {
8526 struct scsi_pkt *pkt;
8527 struct uscsi_cmd *uscmdp;
8528 mp_uscsi_cmd_t *mp_uscmdp;
8529 int stat_size, rval;
8530 int retry = 0;
8531
8532 ASSERT(bp->b_private != NULL);
8533
8534 mp_uscmdp = (mp_uscsi_cmd_t *)bp->b_private;
8535 uscmdp = mp_uscmdp->uscmdp;
8536 if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8537 stat_size = SENSE_LENGTH;
8538 } else {
8539 stat_size = 1;
8540 }
8541
8542 pkt = scsi_init_pkt(mp_uscmdp->ap, NULL, bp, uscmdp->uscsi_cdblen,
8543 stat_size, 0, 0, SLEEP_FUNC, NULL);
8544 if (pkt == NULL) {
8545 VHCI_DEBUG(4, (CE_NOTE, NULL,
8546 "vhci_uscsi_iostart: rval: EINVAL"));
8547 bp->b_resid = bp->b_bcount;
8548 uscmdp->uscsi_resid = bp->b_bcount;
8549 bioerror(bp, EINVAL);
8550 biodone(bp);
8551 return (EINVAL);
8552 }
8553
8554 pkt->pkt_time = uscmdp->uscsi_timeout;
8555 bcopy(uscmdp->uscsi_cdb, pkt->pkt_cdbp, (size_t)uscmdp->uscsi_cdblen);
8556 pkt->pkt_comp = vhci_uscsi_iodone;
8557 pkt->pkt_private = mp_uscmdp;
8558 if (uscmdp->uscsi_flags & USCSI_SILENT)
8559 pkt->pkt_flags |= FLAG_SILENT;
8560 if (uscmdp->uscsi_flags & USCSI_ISOLATE)
8561 pkt->pkt_flags |= FLAG_ISOLATE;
8562 if (uscmdp->uscsi_flags & USCSI_DIAGNOSE)
8563 pkt->pkt_flags |= FLAG_DIAGNOSE;
8564 if (uscmdp->uscsi_flags & USCSI_RENEGOT) {
8565 pkt->pkt_flags |= FLAG_RENEGOTIATE_WIDE_SYNC;
8566 }
8567 VHCI_DEBUG(4, (CE_WARN, NULL,
8568 "vhci_uscsi_iostart: ap: %p pkt: %p pcdbp: %p uscmdp: %p"
8569 " ucdbp: %p pcdblen: %d bp: %p count: %ld pip: %p"
8570 " stat_size: %d",
8571 (void *)mp_uscmdp->ap, (void *)pkt, (void *)pkt->pkt_cdbp,
8572 (void *)uscmdp, (void *)uscmdp->uscsi_cdb, pkt->pkt_cdblen,
8573 (void *)bp, bp->b_bcount, (void *)mp_uscmdp->pip, stat_size));
8574
8575 /*
8576 * NOTE: This code path is related to MPAPI uscsi(7I), so path
8577 * selection is not based on path_instance.
8578 */
8579 if (scsi_pkt_allocated_correctly(pkt))
8580 pkt->pkt_path_instance = 0;
8581
8582 while (((rval = scsi_transport(pkt)) == TRAN_BUSY) &&
8583 retry < vhci_uscsi_retry_count) {
8584 delay(drv_usectohz(vhci_uscsi_delay));
8585 retry++;
8586 }
8587 if (retry >= vhci_uscsi_retry_count) {
8588 VHCI_DEBUG(4, (CE_NOTE, NULL,
8589 "vhci_uscsi_iostart: tran_busy - retry: %d", retry));
8590 }
8591 switch (rval) {
8592 case TRAN_ACCEPT:
8593 rval = 0;
8594 break;
8595
8596 default:
8597 VHCI_DEBUG(4, (CE_NOTE, NULL,
8598 "vhci_uscsi_iostart: rval: %d count: %ld res: %ld",
8599 rval, bp->b_bcount, bp->b_resid));
8600 bp->b_resid = bp->b_bcount;
8601 uscmdp->uscsi_resid = bp->b_bcount;
8602 bioerror(bp, EIO);
8603 scsi_destroy_pkt(pkt);
8604 biodone(bp);
8605 rval = EIO;
8606 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8607 break;
8608 }
8609 VHCI_DEBUG(4, (CE_NOTE, NULL,
8610 "vhci_uscsi_iostart: exit: rval: %d", rval));
8611 return (rval);
8612 }
8613
8614 /* ARGSUSED */
8615 static struct scsi_failover_ops *
vhci_dev_fo(dev_info_t * vdip,struct scsi_device * psd,void ** ctprivp,char ** fo_namep)8616 vhci_dev_fo(dev_info_t *vdip, struct scsi_device *psd,
8617 void **ctprivp, char **fo_namep)
8618 {
8619 struct scsi_failover_ops *sfo;
8620 char *sfo_name;
8621 char *override;
8622 struct scsi_failover *sf;
8623
8624 ASSERT(psd && psd->sd_inq);
8625 if ((psd == NULL) || (psd->sd_inq == NULL)) {
8626 VHCI_DEBUG(1, (CE_NOTE, NULL,
8627 "!vhci_dev_fo:return NULL no scsi_device or inquiry"));
8628 return (NULL);
8629 }
8630
8631 /*
8632 * Determine if device is supported under scsi_vhci, and select
8633 * failover module.
8634 *
8635 * See if there is a scsi_vhci.conf file override for this devices's
8636 * VID/PID. The following values can be returned:
8637 *
8638 * NULL If the NULL is returned then there is no scsi_vhci.conf
8639 * override. For NULL, we determine the failover_ops for
8640 * this device by checking the sfo_device_probe entry
8641 * point for each 'fops' module, in order.
8642 *
8643 * NOTE: Correct operation may depend on module ordering
8644 * of 'specific' (failover modules that are completely
8645 * VID/PID table based) to 'generic' (failover modules
8646 * that based on T10 standards like TPGS). Currently,
8647 * the value of 'ddi-forceload' in scsi_vhci.conf is used
8648 * to establish the module list and probe order.
8649 *
8650 * "NONE" If value "NONE" is returned then there is a
8651 * scsi_vhci.conf VID/PID override to indicate the device
8652 * should not be supported under scsi_vhci (even if there
8653 * is an 'fops' module supporting the device).
8654 *
8655 * "<other>" If another value is returned then that value is the
8656 * name of the 'fops' module that should be used.
8657 */
8658 sfo = NULL; /* "NONE" */
8659 override = scsi_get_device_type_string(
8660 "scsi-vhci-failover-override", vdip, psd);
8661 if (override == NULL) {
8662 /* NULL: default: select based on sfo_device_probe results */
8663 for (sf = scsi_failover_table; sf->sf_mod; sf++) {
8664 if ((sf->sf_sfo == NULL) ||
8665 sf->sf_sfo->sfo_device_probe(psd, psd->sd_inq,
8666 ctprivp) == SFO_DEVICE_PROBE_PHCI)
8667 continue;
8668
8669 /* found failover module, supported under scsi_vhci */
8670 sfo = sf->sf_sfo;
8671 if (fo_namep && (*fo_namep == NULL)) {
8672 sfo_name = i_ddi_strdup(sfo->sfo_name,
8673 KM_SLEEP);
8674 *fo_namep = sfo_name;
8675 }
8676 break;
8677 }
8678 } else if (strcasecmp(override, "NONE")) {
8679 /* !"NONE": select based on driver.conf specified name */
8680 for (sf = scsi_failover_table, sfo = NULL; sf->sf_mod; sf++) {
8681 if ((sf->sf_sfo == NULL) ||
8682 (sf->sf_sfo->sfo_name == NULL) ||
8683 strcmp(override, sf->sf_sfo->sfo_name))
8684 continue;
8685
8686 /*
8687 * NOTE: If sfo_device_probe() has side-effects,
8688 * including setting *ctprivp, these are not going
8689 * to occur with override config.
8690 */
8691
8692 /* found failover module, supported under scsi_vhci */
8693 sfo = sf->sf_sfo;
8694 if (fo_namep && (*fo_namep == NULL)) {
8695 sfo_name = kmem_alloc(strlen("conf ") +
8696 strlen(sfo->sfo_name) + 1, KM_SLEEP);
8697 (void) sprintf(sfo_name, "conf %s",
8698 sfo->sfo_name);
8699 *fo_namep = sfo_name;
8700 }
8701 break;
8702 }
8703 }
8704 if (override)
8705 kmem_free(override, strlen(override) + 1);
8706 return (sfo);
8707 }
8708
8709 /*
8710 * Determine the device described by cinfo should be enumerated under
8711 * the vHCI or the pHCI - if there is a failover ops then device is
8712 * supported under vHCI. By agreement with SCSA cinfo is a pointer
8713 * to a scsi_device structure associated with a decorated pHCI probe node.
8714 */
8715 /* ARGSUSED */
8716 int
vhci_is_dev_supported(dev_info_t * vdip,dev_info_t * pdip,void * cinfo)8717 vhci_is_dev_supported(dev_info_t *vdip, dev_info_t *pdip, void *cinfo)
8718 {
8719 struct scsi_device *psd = (struct scsi_device *)cinfo;
8720
8721 return (vhci_dev_fo(vdip, psd, NULL, NULL) ? MDI_SUCCESS : MDI_FAILURE);
8722 }
8723
8724
8725 #ifdef DEBUG
8726 extern struct scsi_key_strings scsi_cmds[];
8727
8728 static char *
vhci_print_scsi_cmd(char cmd)8729 vhci_print_scsi_cmd(char cmd)
8730 {
8731 char tmp[64];
8732 char *cpnt;
8733
8734 cpnt = scsi_cmd_name(cmd, scsi_cmds, tmp);
8735 /* tmp goes out of scope on return and caller sees garbage */
8736 if (cpnt == tmp) {
8737 cpnt = "Unknown Command";
8738 }
8739 return (cpnt);
8740 }
8741
8742 extern uchar_t scsi_cdb_size[];
8743
8744 static void
vhci_print_cdb(dev_info_t * dip,uint_t level,char * title,uchar_t * cdb)8745 vhci_print_cdb(dev_info_t *dip, uint_t level, char *title, uchar_t *cdb)
8746 {
8747 int len = scsi_cdb_size[CDB_GROUPID(cdb[0])];
8748 char buf[256];
8749
8750 if (level == CE_NOTE) {
8751 vhci_log(level, dip, "path cmd %s\n",
8752 vhci_print_scsi_cmd(*cdb));
8753 return;
8754 }
8755
8756 (void) sprintf(buf, "%s for cmd(%s)", title, vhci_print_scsi_cmd(*cdb));
8757 vhci_clean_print(dip, level, buf, cdb, len);
8758 }
8759
8760 static void
vhci_clean_print(dev_info_t * dev,uint_t level,char * title,uchar_t * data,int len)8761 vhci_clean_print(dev_info_t *dev, uint_t level, char *title, uchar_t *data,
8762 int len)
8763 {
8764 int i;
8765 int c;
8766 char *format;
8767 char buf[256];
8768 uchar_t byte;
8769
8770 (void) sprintf(buf, "%s:\n", title);
8771 vhci_log(level, dev, "%s", buf);
8772 level = CE_CONT;
8773 for (i = 0; i < len; ) {
8774 buf[0] = 0;
8775 for (c = 0; c < 8 && i < len; c++, i++) {
8776 byte = (uchar_t)data[i];
8777 if (byte < 0x10)
8778 format = "0x0%x ";
8779 else
8780 format = "0x%x ";
8781 (void) sprintf(&buf[(int)strlen(buf)], format, byte);
8782 }
8783 (void) sprintf(&buf[(int)strlen(buf)], "\n");
8784
8785 vhci_log(level, dev, "%s\n", buf);
8786 }
8787 }
8788 #endif
8789 static void
vhci_invalidate_mpapi_lu(struct scsi_vhci * vhci,scsi_vhci_lun_t * vlun)8790 vhci_invalidate_mpapi_lu(struct scsi_vhci *vhci, scsi_vhci_lun_t *vlun)
8791 {
8792 char *svl_wwn;
8793 mpapi_item_list_t *ilist;
8794 mpapi_lu_data_t *ld;
8795
8796 if (vlun == NULL) {
8797 return;
8798 } else {
8799 svl_wwn = vlun->svl_lun_wwn;
8800 }
8801
8802 ilist = vhci->mp_priv->obj_hdr_list[MP_OBJECT_TYPE_MULTIPATH_LU]->head;
8803
8804 while (ilist != NULL) {
8805 ld = (mpapi_lu_data_t *)(ilist->item->idata);
8806 if ((ld != NULL) && (strncmp(ld->prop.name, svl_wwn,
8807 strlen(svl_wwn)) == 0)) {
8808 ld->valid = 0;
8809 VHCI_DEBUG(6, (CE_WARN, NULL,
8810 "vhci_invalidate_mpapi_lu: "
8811 "Invalidated LU(%s)", svl_wwn));
8812 return;
8813 }
8814 ilist = ilist->next;
8815 }
8816 VHCI_DEBUG(6, (CE_WARN, NULL, "vhci_invalidate_mpapi_lu: "
8817 "Could not find LU(%s) to invalidate.", svl_wwn));
8818 }
8819