1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 #include <sys/conf.h>
26 #include <sys/file.h>
27 #include <sys/ddi.h>
28 #include <sys/sunddi.h>
29 #include <sys/modctl.h>
30 #include <sys/scsi/scsi.h>
31 #include <sys/scsi/generic/persist.h>
32 #include <sys/scsi/impl/scsi_reset_notify.h>
33 #include <sys/disp.h>
34 #include <sys/byteorder.h>
35 #include <sys/atomic.h>
36 #include <sys/ethernet.h>
37 #include <sys/sdt.h>
38 #include <sys/nvpair.h>
39 #include <sys/zone.h>
40 #include <sys/id_space.h>
41
42 #include <sys/stmf.h>
43 #include <sys/lpif.h>
44 #include <sys/portif.h>
45 #include <sys/stmf_ioctl.h>
46 #include <sys/pppt_ic_if.h>
47
48 #include "stmf_impl.h"
49 #include "lun_map.h"
50 #include "stmf_state.h"
51 #include "stmf_stats.h"
52
53 /*
54 * Lock order:
55 * stmf_state_lock --> ilport_lock/iss_lockp --> ilu_task_lock
56 */
57
58 static uint64_t stmf_session_counter = 0;
59 static uint16_t stmf_rtpid_counter = 0;
60 /* start messages at 1 */
61 static uint64_t stmf_proxy_msg_id = 1;
62 #define MSG_ID_TM_BIT 0x8000000000000000
63 #define ALIGNED_TO_8BYTE_BOUNDARY(i) (((i) + 7) & ~7)
64
65 static int stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
66 static int stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
67 static int stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
68 void **result);
69 static int stmf_open(dev_t *devp, int flag, int otype, cred_t *credp);
70 static int stmf_close(dev_t dev, int flag, int otype, cred_t *credp);
71 static int stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
72 cred_t *credp, int *rval);
73 static int stmf_get_stmf_state(stmf_state_desc_t *std);
74 static int stmf_set_stmf_state(stmf_state_desc_t *std);
75 static void stmf_abort_task_offline(scsi_task_t *task, int offline_lu,
76 char *info);
77 static int stmf_set_alua_state(stmf_alua_state_desc_t *alua_state);
78 static void stmf_get_alua_state(stmf_alua_state_desc_t *alua_state);
79
80 static void stmf_task_audit(stmf_i_scsi_task_t *itask,
81 task_audit_event_t te, uint32_t cmd_or_iof, stmf_data_buf_t *dbuf);
82
83 static boolean_t stmf_base16_str_to_binary(char *c, int dplen, uint8_t *dp);
84 static char stmf_ctoi(char c);
85 stmf_xfer_data_t *stmf_prepare_tpgs_data(uint8_t ilu_alua);
86 void stmf_svc_init();
87 stmf_status_t stmf_svc_fini();
88 void stmf_svc(void *arg);
89 void stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info);
90 void stmf_check_freetask();
91 void stmf_abort_target_reset(scsi_task_t *task);
92 stmf_status_t stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task,
93 int target_reset);
94 void stmf_target_reset_poll(struct scsi_task *task);
95 void stmf_handle_lun_reset(scsi_task_t *task);
96 void stmf_handle_target_reset(scsi_task_t *task);
97 void stmf_xd_to_dbuf(stmf_data_buf_t *dbuf, int set_rel_off);
98 int stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token,
99 uint32_t *err_ret);
100 int stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi);
101 int stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out,
102 uint32_t *err_ret);
103 void stmf_delete_ppd(stmf_pp_data_t *ppd);
104 void stmf_delete_all_ppds();
105 void stmf_trace_clear();
106 void stmf_worker_init();
107 stmf_status_t stmf_worker_fini();
108 void stmf_worker_mgmt();
109 void stmf_worker_task(void *arg);
110 static void stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss);
111 static stmf_status_t stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg,
112 uint32_t type);
113 static stmf_status_t stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg);
114 static stmf_status_t stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg);
115 static stmf_status_t stmf_ic_rx_status(stmf_ic_status_msg_t *msg);
116 static stmf_status_t stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg);
117 void stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s);
118
119 /* pppt modhandle */
120 ddi_modhandle_t pppt_mod;
121
122 /* pppt modload imported functions */
123 stmf_ic_reg_port_msg_alloc_func_t ic_reg_port_msg_alloc;
124 stmf_ic_dereg_port_msg_alloc_func_t ic_dereg_port_msg_alloc;
125 stmf_ic_reg_lun_msg_alloc_func_t ic_reg_lun_msg_alloc;
126 stmf_ic_dereg_lun_msg_alloc_func_t ic_dereg_lun_msg_alloc;
127 stmf_ic_lun_active_msg_alloc_func_t ic_lun_active_msg_alloc;
128 stmf_ic_scsi_cmd_msg_alloc_func_t ic_scsi_cmd_msg_alloc;
129 stmf_ic_scsi_data_xfer_done_msg_alloc_func_t ic_scsi_data_xfer_done_msg_alloc;
130 stmf_ic_session_create_msg_alloc_func_t ic_session_reg_msg_alloc;
131 stmf_ic_session_destroy_msg_alloc_func_t ic_session_dereg_msg_alloc;
132 stmf_ic_tx_msg_func_t ic_tx_msg;
133 stmf_ic_msg_free_func_t ic_msg_free;
134
135 static void stmf_itl_task_start(stmf_i_scsi_task_t *itask);
136 static void stmf_itl_lu_new_task(stmf_i_scsi_task_t *itask);
137 static void stmf_itl_task_done(stmf_i_scsi_task_t *itask);
138
139 static void stmf_lport_xfer_start(stmf_i_scsi_task_t *itask,
140 stmf_data_buf_t *dbuf);
141 static void stmf_lport_xfer_done(stmf_i_scsi_task_t *itask,
142 stmf_data_buf_t *dbuf);
143
144 static void stmf_update_kstat_lu_q(scsi_task_t *, void());
145 static void stmf_update_kstat_lport_q(scsi_task_t *, void());
146 static void stmf_update_kstat_lu_io(scsi_task_t *, stmf_data_buf_t *);
147 static void stmf_update_kstat_lport_io(scsi_task_t *, stmf_data_buf_t *);
148
149 static int stmf_irport_compare(const void *void_irport1,
150 const void *void_irport2);
151 static stmf_i_remote_port_t *stmf_irport_create(scsi_devid_desc_t *rport_devid);
152 static void stmf_irport_destroy(stmf_i_remote_port_t *irport);
153 static stmf_i_remote_port_t *stmf_irport_register(
154 scsi_devid_desc_t *rport_devid);
155 static stmf_i_remote_port_t *stmf_irport_lookup_locked(
156 scsi_devid_desc_t *rport_devid);
157 static void stmf_irport_deregister(stmf_i_remote_port_t *irport);
158
159 static void stmf_teardown_itl_kstats(stmf_i_itl_kstat_t *ks);
160 static void stmf_delete_itl_kstat_by_lport(char *);
161 static void stmf_delete_itl_kstat_by_guid(char *);
162 static int stmf_itl_kstat_compare(const void*, const void*);
163 static stmf_i_itl_kstat_t *stmf_itl_kstat_lookup(char *kstat_nm);
164 static stmf_i_itl_kstat_t *stmf_itl_kstat_create(stmf_itl_data_t *itl,
165 char *nm, scsi_devid_desc_t *lport, scsi_devid_desc_t *lun);
166
167 extern struct mod_ops mod_driverops;
168
169 /* =====[ Tunables ]===== */
170 /* Internal tracing */
171 volatile int stmf_trace_on = 1;
172 volatile int stmf_trace_buf_size = (1 * 1024 * 1024);
173 /*
174 * The reason default task timeout is 75 is because we want the
175 * host to timeout 1st and mostly host timeout is 60 seconds.
176 */
177 volatile int stmf_default_task_timeout = 75;
178 /*
179 * Setting this to one means, you are responsible for config load and keeping
180 * things in sync with persistent database.
181 */
182 volatile int stmf_allow_modunload = 0;
183
184 volatile int stmf_max_nworkers = 256;
185 volatile int stmf_min_nworkers = 4;
186 volatile int stmf_worker_scale_down_delay = 20;
187
188 /* === [ Debugging and fault injection ] === */
189 #ifdef DEBUG
190 volatile int stmf_drop_task_counter = 0;
191 volatile int stmf_drop_buf_counter = 0;
192
193 #endif
194
195 stmf_state_t stmf_state;
196 static stmf_lu_t *dlun0;
197
198 static uint8_t stmf_first_zero[] =
199 { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
200 static uint8_t stmf_first_one[] =
201 { 0xff, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 };
202
203 static kmutex_t trace_buf_lock;
204 static int trace_buf_size;
205 static int trace_buf_curndx;
206 caddr_t stmf_trace_buf;
207
208 static enum {
209 STMF_WORKERS_DISABLED = 0,
210 STMF_WORKERS_ENABLING,
211 STMF_WORKERS_ENABLED
212 } stmf_workers_state = STMF_WORKERS_DISABLED;
213 static int stmf_i_max_nworkers;
214 static int stmf_i_min_nworkers;
215 static int stmf_nworkers_cur; /* # of workers currently running */
216 static int stmf_nworkers_needed; /* # of workers need to be running */
217 static int stmf_worker_sel_counter = 0;
218 static uint32_t stmf_cur_ntasks = 0;
219 static clock_t stmf_wm_last = 0;
220 /*
221 * This is equal to stmf_nworkers_cur while we are increasing # workers and
222 * stmf_nworkers_needed while we are decreasing the worker count.
223 */
224 static int stmf_nworkers_accepting_cmds;
225 static stmf_worker_t *stmf_workers = NULL;
226 static clock_t stmf_worker_mgmt_delay = 2;
227 static clock_t stmf_worker_scale_down_timer = 0;
228 static int stmf_worker_scale_down_qd = 0;
229
230 static struct cb_ops stmf_cb_ops = {
231 stmf_open, /* open */
232 stmf_close, /* close */
233 nodev, /* strategy */
234 nodev, /* print */
235 nodev, /* dump */
236 nodev, /* read */
237 nodev, /* write */
238 stmf_ioctl, /* ioctl */
239 nodev, /* devmap */
240 nodev, /* mmap */
241 nodev, /* segmap */
242 nochpoll, /* chpoll */
243 ddi_prop_op, /* cb_prop_op */
244 0, /* streamtab */
245 D_NEW | D_MP, /* cb_flag */
246 CB_REV, /* rev */
247 nodev, /* aread */
248 nodev /* awrite */
249 };
250
251 static struct dev_ops stmf_ops = {
252 DEVO_REV,
253 0,
254 stmf_getinfo,
255 nulldev, /* identify */
256 nulldev, /* probe */
257 stmf_attach,
258 stmf_detach,
259 nodev, /* reset */
260 &stmf_cb_ops,
261 NULL, /* bus_ops */
262 NULL /* power */
263 };
264
265 #define STMF_NAME "COMSTAR STMF"
266 #define STMF_MODULE_NAME "stmf"
267
268 static struct modldrv modldrv = {
269 &mod_driverops,
270 STMF_NAME,
271 &stmf_ops
272 };
273
274 static struct modlinkage modlinkage = {
275 MODREV_1,
276 &modldrv,
277 NULL
278 };
279
280 int
_init(void)281 _init(void)
282 {
283 int ret;
284
285 ret = mod_install(&modlinkage);
286 if (ret)
287 return (ret);
288 stmf_trace_buf = kmem_zalloc(stmf_trace_buf_size, KM_SLEEP);
289 trace_buf_size = stmf_trace_buf_size;
290 trace_buf_curndx = 0;
291 mutex_init(&trace_buf_lock, NULL, MUTEX_DRIVER, 0);
292 bzero(&stmf_state, sizeof (stmf_state_t));
293 /* STMF service is off by default */
294 stmf_state.stmf_service_running = 0;
295 /* default lu/lport states are online */
296 stmf_state.stmf_default_lu_state = STMF_STATE_ONLINE;
297 stmf_state.stmf_default_lport_state = STMF_STATE_ONLINE;
298 mutex_init(&stmf_state.stmf_lock, NULL, MUTEX_DRIVER, NULL);
299 cv_init(&stmf_state.stmf_cv, NULL, CV_DRIVER, NULL);
300 stmf_session_counter = (uint64_t)ddi_get_lbolt();
301 avl_create(&stmf_state.stmf_irportlist,
302 stmf_irport_compare, sizeof (stmf_i_remote_port_t),
303 offsetof(stmf_i_remote_port_t, irport_ln));
304 stmf_state.stmf_ilport_inst_space =
305 id_space_create("lport-instances", 0, MAX_ILPORT);
306 stmf_state.stmf_irport_inst_space =
307 id_space_create("rport-instances", 0, MAX_IRPORT);
308 avl_create(&stmf_state.stmf_itl_kstat_list,
309 stmf_itl_kstat_compare, sizeof (stmf_i_itl_kstat_t),
310 offsetof(stmf_i_itl_kstat_t, iitl_kstat_ln));
311 stmf_view_init();
312 stmf_svc_init();
313 stmf_dlun_init();
314 return (ret);
315 }
316
317 int
_fini(void)318 _fini(void)
319 {
320 int ret;
321 stmf_i_remote_port_t *irport;
322 stmf_i_itl_kstat_t *ks_itl;
323 void *avl_dest_cookie = NULL;
324
325 if (stmf_state.stmf_service_running)
326 return (EBUSY);
327 if ((!stmf_allow_modunload) &&
328 (stmf_state.stmf_config_state != STMF_CONFIG_NONE)) {
329 return (EBUSY);
330 }
331 if (stmf_state.stmf_nlps || stmf_state.stmf_npps) {
332 return (EBUSY);
333 }
334 if (stmf_dlun_fini() != STMF_SUCCESS)
335 return (EBUSY);
336 if (stmf_worker_fini() != STMF_SUCCESS) {
337 stmf_dlun_init();
338 return (EBUSY);
339 }
340 if (stmf_svc_fini() != STMF_SUCCESS) {
341 stmf_dlun_init();
342 stmf_worker_init();
343 return (EBUSY);
344 }
345
346 ret = mod_remove(&modlinkage);
347 if (ret) {
348 stmf_svc_init();
349 stmf_dlun_init();
350 stmf_worker_init();
351 return (ret);
352 }
353
354 stmf_view_clear_config();
355
356 while ((irport = avl_destroy_nodes(&stmf_state.stmf_irportlist,
357 &avl_dest_cookie)) != NULL)
358 stmf_irport_destroy(irport);
359 avl_destroy(&stmf_state.stmf_irportlist);
360 id_space_destroy(stmf_state.stmf_ilport_inst_space);
361 id_space_destroy(stmf_state.stmf_irport_inst_space);
362
363 avl_dest_cookie = NULL;
364 while ((ks_itl = avl_destroy_nodes(&stmf_state.stmf_itl_kstat_list,
365 &avl_dest_cookie)) != NULL) {
366 stmf_teardown_itl_kstats(ks_itl);
367 kmem_free(ks_itl, sizeof (ks_itl));
368 }
369 avl_destroy(&stmf_state.stmf_itl_kstat_list);
370
371 kmem_free(stmf_trace_buf, stmf_trace_buf_size);
372 mutex_destroy(&trace_buf_lock);
373 mutex_destroy(&stmf_state.stmf_lock);
374 cv_destroy(&stmf_state.stmf_cv);
375 return (ret);
376 }
377
378 int
_info(struct modinfo * modinfop)379 _info(struct modinfo *modinfop)
380 {
381 return (mod_info(&modlinkage, modinfop));
382 }
383
384 /* ARGSUSED */
385 static int
stmf_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** result)386 stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
387 {
388 switch (cmd) {
389 case DDI_INFO_DEVT2DEVINFO:
390 *result = stmf_state.stmf_dip;
391 break;
392 case DDI_INFO_DEVT2INSTANCE:
393 *result =
394 (void *)(uintptr_t)ddi_get_instance(stmf_state.stmf_dip);
395 break;
396 default:
397 return (DDI_FAILURE);
398 }
399
400 return (DDI_SUCCESS);
401 }
402
403 static int
stmf_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)404 stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
405 {
406 switch (cmd) {
407 case DDI_ATTACH:
408 stmf_state.stmf_dip = dip;
409
410 if (ddi_create_minor_node(dip, "admin", S_IFCHR, 0,
411 DDI_NT_STMF, 0) != DDI_SUCCESS) {
412 break;
413 }
414 ddi_report_dev(dip);
415 return (DDI_SUCCESS);
416 }
417
418 return (DDI_FAILURE);
419 }
420
421 static int
stmf_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)422 stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
423 {
424 switch (cmd) {
425 case DDI_DETACH:
426 ddi_remove_minor_node(dip, 0);
427 return (DDI_SUCCESS);
428 }
429
430 return (DDI_FAILURE);
431 }
432
433 /* ARGSUSED */
434 static int
stmf_open(dev_t * devp,int flag,int otype,cred_t * credp)435 stmf_open(dev_t *devp, int flag, int otype, cred_t *credp)
436 {
437 mutex_enter(&stmf_state.stmf_lock);
438 if (stmf_state.stmf_exclusive_open) {
439 mutex_exit(&stmf_state.stmf_lock);
440 return (EBUSY);
441 }
442 if (flag & FEXCL) {
443 if (stmf_state.stmf_opened) {
444 mutex_exit(&stmf_state.stmf_lock);
445 return (EBUSY);
446 }
447 stmf_state.stmf_exclusive_open = 1;
448 }
449 stmf_state.stmf_opened = 1;
450 mutex_exit(&stmf_state.stmf_lock);
451 return (0);
452 }
453
454 /* ARGSUSED */
455 static int
stmf_close(dev_t dev,int flag,int otype,cred_t * credp)456 stmf_close(dev_t dev, int flag, int otype, cred_t *credp)
457 {
458 mutex_enter(&stmf_state.stmf_lock);
459 stmf_state.stmf_opened = 0;
460 if (stmf_state.stmf_exclusive_open &&
461 (stmf_state.stmf_config_state != STMF_CONFIG_INIT_DONE)) {
462 stmf_state.stmf_config_state = STMF_CONFIG_NONE;
463 stmf_delete_all_ppds();
464 stmf_view_clear_config();
465 stmf_view_init();
466 }
467 stmf_state.stmf_exclusive_open = 0;
468 mutex_exit(&stmf_state.stmf_lock);
469 return (0);
470 }
471
472 int
stmf_copyin_iocdata(intptr_t data,int mode,stmf_iocdata_t ** iocd,void ** ibuf,void ** obuf)473 stmf_copyin_iocdata(intptr_t data, int mode, stmf_iocdata_t **iocd,
474 void **ibuf, void **obuf)
475 {
476 int ret;
477
478 *ibuf = NULL;
479 *obuf = NULL;
480 *iocd = kmem_zalloc(sizeof (stmf_iocdata_t), KM_SLEEP);
481
482 ret = ddi_copyin((void *)data, *iocd, sizeof (stmf_iocdata_t), mode);
483 if (ret)
484 return (EFAULT);
485 if ((*iocd)->stmf_version != STMF_VERSION_1) {
486 ret = EINVAL;
487 goto copyin_iocdata_done;
488 }
489 if ((*iocd)->stmf_ibuf_size) {
490 *ibuf = kmem_zalloc((*iocd)->stmf_ibuf_size, KM_SLEEP);
491 ret = ddi_copyin((void *)((unsigned long)(*iocd)->stmf_ibuf),
492 *ibuf, (*iocd)->stmf_ibuf_size, mode);
493 }
494 if ((*iocd)->stmf_obuf_size)
495 *obuf = kmem_zalloc((*iocd)->stmf_obuf_size, KM_SLEEP);
496
497 if (ret == 0)
498 return (0);
499 ret = EFAULT;
500 copyin_iocdata_done:;
501 if (*obuf) {
502 kmem_free(*obuf, (*iocd)->stmf_obuf_size);
503 *obuf = NULL;
504 }
505 if (*ibuf) {
506 kmem_free(*ibuf, (*iocd)->stmf_ibuf_size);
507 *ibuf = NULL;
508 }
509 kmem_free(*iocd, sizeof (stmf_iocdata_t));
510 return (ret);
511 }
512
513 int
stmf_copyout_iocdata(intptr_t data,int mode,stmf_iocdata_t * iocd,void * obuf)514 stmf_copyout_iocdata(intptr_t data, int mode, stmf_iocdata_t *iocd, void *obuf)
515 {
516 int ret;
517
518 if (iocd->stmf_obuf_size) {
519 ret = ddi_copyout(obuf, (void *)(unsigned long)iocd->stmf_obuf,
520 iocd->stmf_obuf_size, mode);
521 if (ret)
522 return (EFAULT);
523 }
524 ret = ddi_copyout(iocd, (void *)data, sizeof (stmf_iocdata_t), mode);
525 if (ret)
526 return (EFAULT);
527 return (0);
528 }
529
530 /* ARGSUSED */
531 static int
stmf_ioctl(dev_t dev,int cmd,intptr_t data,int mode,cred_t * credp,int * rval)532 stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
533 cred_t *credp, int *rval)
534 {
535 stmf_iocdata_t *iocd;
536 void *ibuf = NULL, *obuf = NULL;
537 slist_lu_t *luid_list;
538 slist_target_port_t *lportid_list;
539 stmf_i_lu_t *ilu;
540 stmf_i_local_port_t *ilport;
541 stmf_i_scsi_session_t *iss;
542 slist_scsi_session_t *iss_list;
543 sioc_lu_props_t *lup;
544 sioc_target_port_props_t *lportp;
545 stmf_ppioctl_data_t *ppi, *ppi_out = NULL;
546 uint64_t *ppi_token = NULL;
547 uint8_t *p_id, *id;
548 stmf_state_desc_t *std;
549 stmf_status_t ctl_ret;
550 stmf_state_change_info_t ssi;
551 int ret = 0;
552 uint32_t n;
553 int i;
554 stmf_group_op_data_t *grp_entry;
555 stmf_group_name_t *grpname;
556 stmf_view_op_entry_t *ve;
557 stmf_id_type_t idtype;
558 stmf_id_data_t *id_entry;
559 stmf_id_list_t *id_list;
560 stmf_view_entry_t *view_entry;
561 stmf_set_props_t *stmf_set_props;
562 uint32_t veid;
563 if ((cmd & 0xff000000) != STMF_IOCTL) {
564 return (ENOTTY);
565 }
566
567 if (drv_priv(credp) != 0) {
568 return (EPERM);
569 }
570
571 ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
572 if (ret)
573 return (ret);
574 iocd->stmf_error = 0;
575
576 switch (cmd) {
577 case STMF_IOCTL_LU_LIST:
578 /* retrieves both registered/unregistered */
579 mutex_enter(&stmf_state.stmf_lock);
580 id_list = &stmf_state.stmf_luid_list;
581 n = min(id_list->id_count,
582 (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
583 iocd->stmf_obuf_max_nentries = id_list->id_count;
584 luid_list = (slist_lu_t *)obuf;
585 id_entry = id_list->idl_head;
586 for (i = 0; i < n; i++) {
587 bcopy(id_entry->id_data, luid_list[i].lu_guid, 16);
588 id_entry = id_entry->id_next;
589 }
590
591 n = iocd->stmf_obuf_size/sizeof (slist_lu_t);
592 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
593 id = (uint8_t *)ilu->ilu_lu->lu_id;
594 if (stmf_lookup_id(id_list, 16, id + 4) == NULL) {
595 iocd->stmf_obuf_max_nentries++;
596 if (i < n) {
597 bcopy(id + 4, luid_list[i].lu_guid,
598 sizeof (slist_lu_t));
599 i++;
600 }
601 }
602 }
603 iocd->stmf_obuf_nentries = i;
604 mutex_exit(&stmf_state.stmf_lock);
605 break;
606
607 case STMF_IOCTL_REG_LU_LIST:
608 mutex_enter(&stmf_state.stmf_lock);
609 iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlus;
610 n = min(stmf_state.stmf_nlus,
611 (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
612 iocd->stmf_obuf_nentries = n;
613 ilu = stmf_state.stmf_ilulist;
614 luid_list = (slist_lu_t *)obuf;
615 for (i = 0; i < n; i++) {
616 uint8_t *id;
617 id = (uint8_t *)ilu->ilu_lu->lu_id;
618 bcopy(id + 4, luid_list[i].lu_guid, 16);
619 ilu = ilu->ilu_next;
620 }
621 mutex_exit(&stmf_state.stmf_lock);
622 break;
623
624 case STMF_IOCTL_VE_LU_LIST:
625 mutex_enter(&stmf_state.stmf_lock);
626 id_list = &stmf_state.stmf_luid_list;
627 n = min(id_list->id_count,
628 (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
629 iocd->stmf_obuf_max_nentries = id_list->id_count;
630 iocd->stmf_obuf_nentries = n;
631 luid_list = (slist_lu_t *)obuf;
632 id_entry = id_list->idl_head;
633 for (i = 0; i < n; i++) {
634 bcopy(id_entry->id_data, luid_list[i].lu_guid, 16);
635 id_entry = id_entry->id_next;
636 }
637 mutex_exit(&stmf_state.stmf_lock);
638 break;
639
640 case STMF_IOCTL_TARGET_PORT_LIST:
641 mutex_enter(&stmf_state.stmf_lock);
642 iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlports;
643 n = min(stmf_state.stmf_nlports,
644 (iocd->stmf_obuf_size)/sizeof (slist_target_port_t));
645 iocd->stmf_obuf_nentries = n;
646 ilport = stmf_state.stmf_ilportlist;
647 lportid_list = (slist_target_port_t *)obuf;
648 for (i = 0; i < n; i++) {
649 uint8_t *id;
650 id = (uint8_t *)ilport->ilport_lport->lport_id;
651 bcopy(id, lportid_list[i].target, id[3] + 4);
652 ilport = ilport->ilport_next;
653 }
654 mutex_exit(&stmf_state.stmf_lock);
655 break;
656
657 case STMF_IOCTL_SESSION_LIST:
658 p_id = (uint8_t *)ibuf;
659 if ((p_id == NULL) || (iocd->stmf_ibuf_size < 4) ||
660 (iocd->stmf_ibuf_size < (p_id[3] + 4))) {
661 ret = EINVAL;
662 break;
663 }
664 mutex_enter(&stmf_state.stmf_lock);
665 for (ilport = stmf_state.stmf_ilportlist; ilport; ilport =
666 ilport->ilport_next) {
667 uint8_t *id;
668 id = (uint8_t *)ilport->ilport_lport->lport_id;
669 if ((p_id[3] == id[3]) &&
670 (bcmp(p_id + 4, id + 4, id[3]) == 0)) {
671 break;
672 }
673 }
674 if (ilport == NULL) {
675 mutex_exit(&stmf_state.stmf_lock);
676 ret = ENOENT;
677 break;
678 }
679 iocd->stmf_obuf_max_nentries = ilport->ilport_nsessions;
680 n = min(ilport->ilport_nsessions,
681 (iocd->stmf_obuf_size)/sizeof (slist_scsi_session_t));
682 iocd->stmf_obuf_nentries = n;
683 iss = ilport->ilport_ss_list;
684 iss_list = (slist_scsi_session_t *)obuf;
685 for (i = 0; i < n; i++) {
686 uint8_t *id;
687 id = (uint8_t *)iss->iss_ss->ss_rport_id;
688 bcopy(id, iss_list[i].initiator, id[3] + 4);
689 iss_list[i].creation_time = (uint32_t)
690 iss->iss_creation_time;
691 if (iss->iss_ss->ss_rport_alias) {
692 (void) strncpy(iss_list[i].alias,
693 iss->iss_ss->ss_rport_alias, 255);
694 iss_list[i].alias[255] = 0;
695 } else {
696 iss_list[i].alias[0] = 0;
697 }
698 iss = iss->iss_next;
699 }
700 mutex_exit(&stmf_state.stmf_lock);
701 break;
702
703 case STMF_IOCTL_GET_LU_PROPERTIES:
704 p_id = (uint8_t *)ibuf;
705 if ((iocd->stmf_ibuf_size < 16) ||
706 (iocd->stmf_obuf_size < sizeof (sioc_lu_props_t)) ||
707 (p_id[0] == 0)) {
708 ret = EINVAL;
709 break;
710 }
711 mutex_enter(&stmf_state.stmf_lock);
712 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
713 if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0)
714 break;
715 }
716 if (ilu == NULL) {
717 mutex_exit(&stmf_state.stmf_lock);
718 ret = ENOENT;
719 break;
720 }
721 lup = (sioc_lu_props_t *)obuf;
722 bcopy(ilu->ilu_lu->lu_id->ident, lup->lu_guid, 16);
723 lup->lu_state = ilu->ilu_state & 0x0f;
724 lup->lu_present = 1; /* XXX */
725 (void) strncpy(lup->lu_provider_name,
726 ilu->ilu_lu->lu_lp->lp_name, 255);
727 lup->lu_provider_name[254] = 0;
728 if (ilu->ilu_lu->lu_alias) {
729 (void) strncpy(lup->lu_alias,
730 ilu->ilu_lu->lu_alias, 255);
731 lup->lu_alias[255] = 0;
732 } else {
733 lup->lu_alias[0] = 0;
734 }
735 mutex_exit(&stmf_state.stmf_lock);
736 break;
737
738 case STMF_IOCTL_GET_TARGET_PORT_PROPERTIES:
739 p_id = (uint8_t *)ibuf;
740 if ((p_id == NULL) ||
741 (iocd->stmf_ibuf_size < (p_id[3] + 4)) ||
742 (iocd->stmf_obuf_size <
743 sizeof (sioc_target_port_props_t))) {
744 ret = EINVAL;
745 break;
746 }
747 mutex_enter(&stmf_state.stmf_lock);
748 for (ilport = stmf_state.stmf_ilportlist; ilport;
749 ilport = ilport->ilport_next) {
750 uint8_t *id;
751 id = (uint8_t *)ilport->ilport_lport->lport_id;
752 if ((p_id[3] == id[3]) &&
753 (bcmp(p_id+4, id+4, id[3]) == 0))
754 break;
755 }
756 if (ilport == NULL) {
757 mutex_exit(&stmf_state.stmf_lock);
758 ret = ENOENT;
759 break;
760 }
761 lportp = (sioc_target_port_props_t *)obuf;
762 bcopy(ilport->ilport_lport->lport_id, lportp->tgt_id,
763 ilport->ilport_lport->lport_id->ident_length + 4);
764 lportp->tgt_state = ilport->ilport_state & 0x0f;
765 lportp->tgt_present = 1; /* XXX */
766 (void) strncpy(lportp->tgt_provider_name,
767 ilport->ilport_lport->lport_pp->pp_name, 255);
768 lportp->tgt_provider_name[254] = 0;
769 if (ilport->ilport_lport->lport_alias) {
770 (void) strncpy(lportp->tgt_alias,
771 ilport->ilport_lport->lport_alias, 255);
772 lportp->tgt_alias[255] = 0;
773 } else {
774 lportp->tgt_alias[0] = 0;
775 }
776 mutex_exit(&stmf_state.stmf_lock);
777 break;
778
779 case STMF_IOCTL_SET_STMF_STATE:
780 if ((ibuf == NULL) ||
781 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
782 ret = EINVAL;
783 break;
784 }
785 ret = stmf_set_stmf_state((stmf_state_desc_t *)ibuf);
786 break;
787
788 case STMF_IOCTL_GET_STMF_STATE:
789 if ((obuf == NULL) ||
790 (iocd->stmf_obuf_size < sizeof (stmf_state_desc_t))) {
791 ret = EINVAL;
792 break;
793 }
794 ret = stmf_get_stmf_state((stmf_state_desc_t *)obuf);
795 break;
796
797 case STMF_IOCTL_SET_ALUA_STATE:
798 if ((ibuf == NULL) ||
799 (iocd->stmf_ibuf_size < sizeof (stmf_alua_state_desc_t))) {
800 ret = EINVAL;
801 break;
802 }
803 ret = stmf_set_alua_state((stmf_alua_state_desc_t *)ibuf);
804 break;
805
806 case STMF_IOCTL_GET_ALUA_STATE:
807 if ((obuf == NULL) ||
808 (iocd->stmf_obuf_size < sizeof (stmf_alua_state_desc_t))) {
809 ret = EINVAL;
810 break;
811 }
812 stmf_get_alua_state((stmf_alua_state_desc_t *)obuf);
813 break;
814
815 case STMF_IOCTL_SET_LU_STATE:
816 ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
817 ssi.st_additional_info = NULL;
818 std = (stmf_state_desc_t *)ibuf;
819 if ((ibuf == NULL) ||
820 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
821 ret = EINVAL;
822 break;
823 }
824 p_id = std->ident;
825 mutex_enter(&stmf_state.stmf_lock);
826 if (stmf_state.stmf_inventory_locked) {
827 mutex_exit(&stmf_state.stmf_lock);
828 ret = EBUSY;
829 break;
830 }
831 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
832 if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0)
833 break;
834 }
835 if (ilu == NULL) {
836 mutex_exit(&stmf_state.stmf_lock);
837 ret = ENOENT;
838 break;
839 }
840 stmf_state.stmf_inventory_locked = 1;
841 mutex_exit(&stmf_state.stmf_lock);
842 cmd = (std->state == STMF_STATE_ONLINE) ? STMF_CMD_LU_ONLINE :
843 STMF_CMD_LU_OFFLINE;
844 ctl_ret = stmf_ctl(cmd, (void *)ilu->ilu_lu, &ssi);
845 if (ctl_ret == STMF_ALREADY)
846 ret = 0;
847 else if (ctl_ret == STMF_BUSY)
848 ret = EBUSY;
849 else if (ctl_ret != STMF_SUCCESS)
850 ret = EIO;
851 mutex_enter(&stmf_state.stmf_lock);
852 stmf_state.stmf_inventory_locked = 0;
853 mutex_exit(&stmf_state.stmf_lock);
854 break;
855
856 case STMF_IOCTL_SET_STMF_PROPS:
857 if ((ibuf == NULL) ||
858 (iocd->stmf_ibuf_size < sizeof (stmf_set_props_t))) {
859 ret = EINVAL;
860 break;
861 }
862 stmf_set_props = (stmf_set_props_t *)ibuf;
863 mutex_enter(&stmf_state.stmf_lock);
864 if ((stmf_set_props->default_lu_state_value ==
865 STMF_STATE_OFFLINE) ||
866 (stmf_set_props->default_lu_state_value ==
867 STMF_STATE_ONLINE)) {
868 stmf_state.stmf_default_lu_state =
869 stmf_set_props->default_lu_state_value;
870 }
871 if ((stmf_set_props->default_target_state_value ==
872 STMF_STATE_OFFLINE) ||
873 (stmf_set_props->default_target_state_value ==
874 STMF_STATE_ONLINE)) {
875 stmf_state.stmf_default_lport_state =
876 stmf_set_props->default_target_state_value;
877 }
878
879 mutex_exit(&stmf_state.stmf_lock);
880 break;
881
882 case STMF_IOCTL_SET_TARGET_PORT_STATE:
883 ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
884 ssi.st_additional_info = NULL;
885 std = (stmf_state_desc_t *)ibuf;
886 if ((ibuf == NULL) ||
887 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
888 ret = EINVAL;
889 break;
890 }
891 p_id = std->ident;
892 mutex_enter(&stmf_state.stmf_lock);
893 if (stmf_state.stmf_inventory_locked) {
894 mutex_exit(&stmf_state.stmf_lock);
895 ret = EBUSY;
896 break;
897 }
898 for (ilport = stmf_state.stmf_ilportlist; ilport;
899 ilport = ilport->ilport_next) {
900 uint8_t *id;
901 id = (uint8_t *)ilport->ilport_lport->lport_id;
902 if ((id[3] == p_id[3]) &&
903 (bcmp(id+4, p_id+4, id[3]) == 0)) {
904 break;
905 }
906 }
907 if (ilport == NULL) {
908 mutex_exit(&stmf_state.stmf_lock);
909 ret = ENOENT;
910 break;
911 }
912 stmf_state.stmf_inventory_locked = 1;
913 mutex_exit(&stmf_state.stmf_lock);
914 cmd = (std->state == STMF_STATE_ONLINE) ?
915 STMF_CMD_LPORT_ONLINE : STMF_CMD_LPORT_OFFLINE;
916 ctl_ret = stmf_ctl(cmd, (void *)ilport->ilport_lport, &ssi);
917 if (ctl_ret == STMF_ALREADY)
918 ret = 0;
919 else if (ctl_ret == STMF_BUSY)
920 ret = EBUSY;
921 else if (ctl_ret != STMF_SUCCESS)
922 ret = EIO;
923 mutex_enter(&stmf_state.stmf_lock);
924 stmf_state.stmf_inventory_locked = 0;
925 mutex_exit(&stmf_state.stmf_lock);
926 break;
927
928 case STMF_IOCTL_ADD_HG_ENTRY:
929 idtype = STMF_ID_TYPE_HOST;
930 /* FALLTHROUGH */
931 case STMF_IOCTL_ADD_TG_ENTRY:
932 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
933 ret = EACCES;
934 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
935 break;
936 }
937 if (cmd == STMF_IOCTL_ADD_TG_ENTRY) {
938 idtype = STMF_ID_TYPE_TARGET;
939 }
940 grp_entry = (stmf_group_op_data_t *)ibuf;
941 if ((ibuf == NULL) ||
942 (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) {
943 ret = EINVAL;
944 break;
945 }
946 if (grp_entry->group.name[0] == '*') {
947 ret = EINVAL;
948 break; /* not allowed */
949 }
950 mutex_enter(&stmf_state.stmf_lock);
951 ret = stmf_add_group_member(grp_entry->group.name,
952 grp_entry->group.name_size,
953 grp_entry->ident + 4,
954 grp_entry->ident[3],
955 idtype,
956 &iocd->stmf_error);
957 mutex_exit(&stmf_state.stmf_lock);
958 break;
959 case STMF_IOCTL_REMOVE_HG_ENTRY:
960 idtype = STMF_ID_TYPE_HOST;
961 /* FALLTHROUGH */
962 case STMF_IOCTL_REMOVE_TG_ENTRY:
963 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
964 ret = EACCES;
965 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
966 break;
967 }
968 if (cmd == STMF_IOCTL_REMOVE_TG_ENTRY) {
969 idtype = STMF_ID_TYPE_TARGET;
970 }
971 grp_entry = (stmf_group_op_data_t *)ibuf;
972 if ((ibuf == NULL) ||
973 (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) {
974 ret = EINVAL;
975 break;
976 }
977 if (grp_entry->group.name[0] == '*') {
978 ret = EINVAL;
979 break; /* not allowed */
980 }
981 mutex_enter(&stmf_state.stmf_lock);
982 ret = stmf_remove_group_member(grp_entry->group.name,
983 grp_entry->group.name_size,
984 grp_entry->ident + 4,
985 grp_entry->ident[3],
986 idtype,
987 &iocd->stmf_error);
988 mutex_exit(&stmf_state.stmf_lock);
989 break;
990 case STMF_IOCTL_CREATE_HOST_GROUP:
991 idtype = STMF_ID_TYPE_HOST_GROUP;
992 /* FALLTHROUGH */
993 case STMF_IOCTL_CREATE_TARGET_GROUP:
994 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
995 ret = EACCES;
996 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
997 break;
998 }
999 grpname = (stmf_group_name_t *)ibuf;
1000
1001 if (cmd == STMF_IOCTL_CREATE_TARGET_GROUP)
1002 idtype = STMF_ID_TYPE_TARGET_GROUP;
1003 if ((ibuf == NULL) ||
1004 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
1005 ret = EINVAL;
1006 break;
1007 }
1008 if (grpname->name[0] == '*') {
1009 ret = EINVAL;
1010 break; /* not allowed */
1011 }
1012 mutex_enter(&stmf_state.stmf_lock);
1013 ret = stmf_add_group(grpname->name,
1014 grpname->name_size, idtype, &iocd->stmf_error);
1015 mutex_exit(&stmf_state.stmf_lock);
1016 break;
1017 case STMF_IOCTL_REMOVE_HOST_GROUP:
1018 idtype = STMF_ID_TYPE_HOST_GROUP;
1019 /* FALLTHROUGH */
1020 case STMF_IOCTL_REMOVE_TARGET_GROUP:
1021 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1022 ret = EACCES;
1023 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1024 break;
1025 }
1026 grpname = (stmf_group_name_t *)ibuf;
1027 if (cmd == STMF_IOCTL_REMOVE_TARGET_GROUP)
1028 idtype = STMF_ID_TYPE_TARGET_GROUP;
1029 if ((ibuf == NULL) ||
1030 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
1031 ret = EINVAL;
1032 break;
1033 }
1034 if (grpname->name[0] == '*') {
1035 ret = EINVAL;
1036 break; /* not allowed */
1037 }
1038 mutex_enter(&stmf_state.stmf_lock);
1039 ret = stmf_remove_group(grpname->name,
1040 grpname->name_size, idtype, &iocd->stmf_error);
1041 mutex_exit(&stmf_state.stmf_lock);
1042 break;
1043 case STMF_IOCTL_VALIDATE_VIEW:
1044 case STMF_IOCTL_ADD_VIEW_ENTRY:
1045 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1046 ret = EACCES;
1047 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1048 break;
1049 }
1050 ve = (stmf_view_op_entry_t *)ibuf;
1051 if ((ibuf == NULL) ||
1052 (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) {
1053 ret = EINVAL;
1054 break;
1055 }
1056 if (!ve->ve_lu_number_valid)
1057 ve->ve_lu_nbr[2] = 0xFF;
1058 if (ve->ve_all_hosts) {
1059 ve->ve_host_group.name[0] = '*';
1060 ve->ve_host_group.name_size = 1;
1061 }
1062 if (ve->ve_all_targets) {
1063 ve->ve_target_group.name[0] = '*';
1064 ve->ve_target_group.name_size = 1;
1065 }
1066 if (ve->ve_ndx_valid)
1067 veid = ve->ve_ndx;
1068 else
1069 veid = 0xffffffff;
1070 mutex_enter(&stmf_state.stmf_lock);
1071 if (cmd == STMF_IOCTL_ADD_VIEW_ENTRY) {
1072 ret = stmf_add_ve(ve->ve_host_group.name,
1073 ve->ve_host_group.name_size,
1074 ve->ve_target_group.name,
1075 ve->ve_target_group.name_size,
1076 ve->ve_guid,
1077 &veid,
1078 ve->ve_lu_nbr,
1079 &iocd->stmf_error);
1080 } else { /* STMF_IOCTL_VALIDATE_VIEW */
1081 ret = stmf_validate_lun_ve(ve->ve_host_group.name,
1082 ve->ve_host_group.name_size,
1083 ve->ve_target_group.name,
1084 ve->ve_target_group.name_size,
1085 ve->ve_lu_nbr,
1086 &iocd->stmf_error);
1087 }
1088 mutex_exit(&stmf_state.stmf_lock);
1089 if (ret == 0 &&
1090 (!ve->ve_ndx_valid || !ve->ve_lu_number_valid) &&
1091 iocd->stmf_obuf_size >= sizeof (stmf_view_op_entry_t)) {
1092 stmf_view_op_entry_t *ve_ret =
1093 (stmf_view_op_entry_t *)obuf;
1094 iocd->stmf_obuf_nentries = 1;
1095 iocd->stmf_obuf_max_nentries = 1;
1096 if (!ve->ve_ndx_valid) {
1097 ve_ret->ve_ndx = veid;
1098 ve_ret->ve_ndx_valid = 1;
1099 }
1100 if (!ve->ve_lu_number_valid) {
1101 ve_ret->ve_lu_number_valid = 1;
1102 bcopy(ve->ve_lu_nbr, ve_ret->ve_lu_nbr, 8);
1103 }
1104 }
1105 break;
1106 case STMF_IOCTL_REMOVE_VIEW_ENTRY:
1107 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1108 ret = EACCES;
1109 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1110 break;
1111 }
1112 ve = (stmf_view_op_entry_t *)ibuf;
1113 if ((ibuf == NULL) ||
1114 (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) {
1115 ret = EINVAL;
1116 break;
1117 }
1118 if (!ve->ve_ndx_valid) {
1119 ret = EINVAL;
1120 break;
1121 }
1122 mutex_enter(&stmf_state.stmf_lock);
1123 ret = stmf_remove_ve_by_id(ve->ve_guid, ve->ve_ndx,
1124 &iocd->stmf_error);
1125 mutex_exit(&stmf_state.stmf_lock);
1126 break;
1127 case STMF_IOCTL_GET_HG_LIST:
1128 id_list = &stmf_state.stmf_hg_list;
1129 /* FALLTHROUGH */
1130 case STMF_IOCTL_GET_TG_LIST:
1131 if (cmd == STMF_IOCTL_GET_TG_LIST)
1132 id_list = &stmf_state.stmf_tg_list;
1133 mutex_enter(&stmf_state.stmf_lock);
1134 iocd->stmf_obuf_max_nentries = id_list->id_count;
1135 n = min(id_list->id_count,
1136 (iocd->stmf_obuf_size)/sizeof (stmf_group_name_t));
1137 iocd->stmf_obuf_nentries = n;
1138 id_entry = id_list->idl_head;
1139 grpname = (stmf_group_name_t *)obuf;
1140 for (i = 0; i < n; i++) {
1141 if (id_entry->id_data[0] == '*') {
1142 if (iocd->stmf_obuf_nentries > 0) {
1143 iocd->stmf_obuf_nentries--;
1144 }
1145 id_entry = id_entry->id_next;
1146 continue;
1147 }
1148 grpname->name_size = id_entry->id_data_size;
1149 bcopy(id_entry->id_data, grpname->name,
1150 id_entry->id_data_size);
1151 grpname++;
1152 id_entry = id_entry->id_next;
1153 }
1154 mutex_exit(&stmf_state.stmf_lock);
1155 break;
1156 case STMF_IOCTL_GET_HG_ENTRIES:
1157 id_list = &stmf_state.stmf_hg_list;
1158 /* FALLTHROUGH */
1159 case STMF_IOCTL_GET_TG_ENTRIES:
1160 grpname = (stmf_group_name_t *)ibuf;
1161 if ((ibuf == NULL) ||
1162 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
1163 ret = EINVAL;
1164 break;
1165 }
1166 if (cmd == STMF_IOCTL_GET_TG_ENTRIES) {
1167 id_list = &stmf_state.stmf_tg_list;
1168 }
1169 mutex_enter(&stmf_state.stmf_lock);
1170 id_entry = stmf_lookup_id(id_list, grpname->name_size,
1171 grpname->name);
1172 if (!id_entry)
1173 ret = ENODEV;
1174 else {
1175 stmf_ge_ident_t *grp_entry;
1176 id_list = (stmf_id_list_t *)id_entry->id_impl_specific;
1177 iocd->stmf_obuf_max_nentries = id_list->id_count;
1178 n = min(id_list->id_count,
1179 iocd->stmf_obuf_size/sizeof (stmf_ge_ident_t));
1180 iocd->stmf_obuf_nentries = n;
1181 id_entry = id_list->idl_head;
1182 grp_entry = (stmf_ge_ident_t *)obuf;
1183 for (i = 0; i < n; i++) {
1184 bcopy(id_entry->id_data, grp_entry->ident,
1185 id_entry->id_data_size);
1186 grp_entry->ident_size = id_entry->id_data_size;
1187 id_entry = id_entry->id_next;
1188 grp_entry++;
1189 }
1190 }
1191 mutex_exit(&stmf_state.stmf_lock);
1192 break;
1193
1194 case STMF_IOCTL_GET_VE_LIST:
1195 n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t);
1196 mutex_enter(&stmf_state.stmf_lock);
1197 ve = (stmf_view_op_entry_t *)obuf;
1198 for (id_entry = stmf_state.stmf_luid_list.idl_head;
1199 id_entry; id_entry = id_entry->id_next) {
1200 for (view_entry = (stmf_view_entry_t *)
1201 id_entry->id_impl_specific; view_entry;
1202 view_entry = view_entry->ve_next) {
1203 iocd->stmf_obuf_max_nentries++;
1204 if (iocd->stmf_obuf_nentries >= n)
1205 continue;
1206 ve->ve_ndx_valid = 1;
1207 ve->ve_ndx = view_entry->ve_id;
1208 ve->ve_lu_number_valid = 1;
1209 bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8);
1210 bcopy(view_entry->ve_luid->id_data, ve->ve_guid,
1211 view_entry->ve_luid->id_data_size);
1212 if (view_entry->ve_hg->id_data[0] == '*') {
1213 ve->ve_all_hosts = 1;
1214 } else {
1215 bcopy(view_entry->ve_hg->id_data,
1216 ve->ve_host_group.name,
1217 view_entry->ve_hg->id_data_size);
1218 ve->ve_host_group.name_size =
1219 view_entry->ve_hg->id_data_size;
1220 }
1221
1222 if (view_entry->ve_tg->id_data[0] == '*') {
1223 ve->ve_all_targets = 1;
1224 } else {
1225 bcopy(view_entry->ve_tg->id_data,
1226 ve->ve_target_group.name,
1227 view_entry->ve_tg->id_data_size);
1228 ve->ve_target_group.name_size =
1229 view_entry->ve_tg->id_data_size;
1230 }
1231 ve++;
1232 iocd->stmf_obuf_nentries++;
1233 }
1234 }
1235 mutex_exit(&stmf_state.stmf_lock);
1236 break;
1237
1238 case STMF_IOCTL_LU_VE_LIST:
1239 p_id = (uint8_t *)ibuf;
1240 if ((iocd->stmf_ibuf_size != 16) ||
1241 (iocd->stmf_obuf_size < sizeof (stmf_view_op_entry_t))) {
1242 ret = EINVAL;
1243 break;
1244 }
1245
1246 n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t);
1247 mutex_enter(&stmf_state.stmf_lock);
1248 ve = (stmf_view_op_entry_t *)obuf;
1249 for (id_entry = stmf_state.stmf_luid_list.idl_head;
1250 id_entry; id_entry = id_entry->id_next) {
1251 if (bcmp(id_entry->id_data, p_id, 16) != 0)
1252 continue;
1253 for (view_entry = (stmf_view_entry_t *)
1254 id_entry->id_impl_specific; view_entry;
1255 view_entry = view_entry->ve_next) {
1256 iocd->stmf_obuf_max_nentries++;
1257 if (iocd->stmf_obuf_nentries >= n)
1258 continue;
1259 ve->ve_ndx_valid = 1;
1260 ve->ve_ndx = view_entry->ve_id;
1261 ve->ve_lu_number_valid = 1;
1262 bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8);
1263 bcopy(view_entry->ve_luid->id_data, ve->ve_guid,
1264 view_entry->ve_luid->id_data_size);
1265 if (view_entry->ve_hg->id_data[0] == '*') {
1266 ve->ve_all_hosts = 1;
1267 } else {
1268 bcopy(view_entry->ve_hg->id_data,
1269 ve->ve_host_group.name,
1270 view_entry->ve_hg->id_data_size);
1271 ve->ve_host_group.name_size =
1272 view_entry->ve_hg->id_data_size;
1273 }
1274
1275 if (view_entry->ve_tg->id_data[0] == '*') {
1276 ve->ve_all_targets = 1;
1277 } else {
1278 bcopy(view_entry->ve_tg->id_data,
1279 ve->ve_target_group.name,
1280 view_entry->ve_tg->id_data_size);
1281 ve->ve_target_group.name_size =
1282 view_entry->ve_tg->id_data_size;
1283 }
1284 ve++;
1285 iocd->stmf_obuf_nentries++;
1286 }
1287 break;
1288 }
1289 mutex_exit(&stmf_state.stmf_lock);
1290 break;
1291
1292 case STMF_IOCTL_LOAD_PP_DATA:
1293 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1294 ret = EACCES;
1295 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1296 break;
1297 }
1298 ppi = (stmf_ppioctl_data_t *)ibuf;
1299 if ((ppi == NULL) ||
1300 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1301 ret = EINVAL;
1302 break;
1303 }
1304 /* returned token */
1305 ppi_token = (uint64_t *)obuf;
1306 if ((ppi_token == NULL) ||
1307 (iocd->stmf_obuf_size < sizeof (uint64_t))) {
1308 ret = EINVAL;
1309 break;
1310 }
1311 ret = stmf_load_ppd_ioctl(ppi, ppi_token, &iocd->stmf_error);
1312 break;
1313
1314 case STMF_IOCTL_GET_PP_DATA:
1315 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1316 ret = EACCES;
1317 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1318 break;
1319 }
1320 ppi = (stmf_ppioctl_data_t *)ibuf;
1321 if (ppi == NULL ||
1322 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1323 ret = EINVAL;
1324 break;
1325 }
1326 ppi_out = (stmf_ppioctl_data_t *)obuf;
1327 if ((ppi_out == NULL) ||
1328 (iocd->stmf_obuf_size < sizeof (stmf_ppioctl_data_t))) {
1329 ret = EINVAL;
1330 break;
1331 }
1332 ret = stmf_get_ppd_ioctl(ppi, ppi_out, &iocd->stmf_error);
1333 break;
1334
1335 case STMF_IOCTL_CLEAR_PP_DATA:
1336 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1337 ret = EACCES;
1338 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1339 break;
1340 }
1341 ppi = (stmf_ppioctl_data_t *)ibuf;
1342 if ((ppi == NULL) ||
1343 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1344 ret = EINVAL;
1345 break;
1346 }
1347 ret = stmf_delete_ppd_ioctl(ppi);
1348 break;
1349
1350 case STMF_IOCTL_CLEAR_TRACE:
1351 stmf_trace_clear();
1352 break;
1353
1354 case STMF_IOCTL_ADD_TRACE:
1355 if (iocd->stmf_ibuf_size && ibuf) {
1356 ((uint8_t *)ibuf)[iocd->stmf_ibuf_size - 1] = 0;
1357 stmf_trace("\nstradm", "%s\n", ibuf);
1358 }
1359 break;
1360
1361 case STMF_IOCTL_GET_TRACE_POSITION:
1362 if (obuf && (iocd->stmf_obuf_size > 3)) {
1363 mutex_enter(&trace_buf_lock);
1364 *((int *)obuf) = trace_buf_curndx;
1365 mutex_exit(&trace_buf_lock);
1366 } else {
1367 ret = EINVAL;
1368 }
1369 break;
1370
1371 case STMF_IOCTL_GET_TRACE:
1372 if ((iocd->stmf_obuf_size == 0) || (iocd->stmf_ibuf_size < 4)) {
1373 ret = EINVAL;
1374 break;
1375 }
1376 i = *((int *)ibuf);
1377 if ((i > trace_buf_size) || ((i + iocd->stmf_obuf_size) >
1378 trace_buf_size)) {
1379 ret = EINVAL;
1380 break;
1381 }
1382 mutex_enter(&trace_buf_lock);
1383 bcopy(stmf_trace_buf + i, obuf, iocd->stmf_obuf_size);
1384 mutex_exit(&trace_buf_lock);
1385 break;
1386
1387 default:
1388 ret = ENOTTY;
1389 }
1390
1391 if (ret == 0) {
1392 ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
1393 } else if (iocd->stmf_error) {
1394 (void) stmf_copyout_iocdata(data, mode, iocd, obuf);
1395 }
1396 if (obuf) {
1397 kmem_free(obuf, iocd->stmf_obuf_size);
1398 obuf = NULL;
1399 }
1400 if (ibuf) {
1401 kmem_free(ibuf, iocd->stmf_ibuf_size);
1402 ibuf = NULL;
1403 }
1404 kmem_free(iocd, sizeof (stmf_iocdata_t));
1405 return (ret);
1406 }
1407
1408 static int
stmf_get_service_state()1409 stmf_get_service_state()
1410 {
1411 stmf_i_local_port_t *ilport;
1412 stmf_i_lu_t *ilu;
1413 int online = 0;
1414 int offline = 0;
1415 int onlining = 0;
1416 int offlining = 0;
1417
1418 ASSERT(mutex_owned(&stmf_state.stmf_lock));
1419 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1420 ilport = ilport->ilport_next) {
1421 if (ilport->ilport_state == STMF_STATE_OFFLINE)
1422 offline++;
1423 else if (ilport->ilport_state == STMF_STATE_ONLINE)
1424 online++;
1425 else if (ilport->ilport_state == STMF_STATE_ONLINING)
1426 onlining++;
1427 else if (ilport->ilport_state == STMF_STATE_OFFLINING)
1428 offlining++;
1429 }
1430
1431 for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1432 ilu = ilu->ilu_next) {
1433 if (ilu->ilu_state == STMF_STATE_OFFLINE)
1434 offline++;
1435 else if (ilu->ilu_state == STMF_STATE_ONLINE)
1436 online++;
1437 else if (ilu->ilu_state == STMF_STATE_ONLINING)
1438 onlining++;
1439 else if (ilu->ilu_state == STMF_STATE_OFFLINING)
1440 offlining++;
1441 }
1442
1443 if (stmf_state.stmf_service_running) {
1444 if (onlining)
1445 return (STMF_STATE_ONLINING);
1446 else
1447 return (STMF_STATE_ONLINE);
1448 }
1449
1450 if (offlining) {
1451 return (STMF_STATE_OFFLINING);
1452 }
1453
1454 return (STMF_STATE_OFFLINE);
1455 }
1456
1457 static int
stmf_set_stmf_state(stmf_state_desc_t * std)1458 stmf_set_stmf_state(stmf_state_desc_t *std)
1459 {
1460 stmf_i_local_port_t *ilport;
1461 stmf_i_lu_t *ilu;
1462 stmf_state_change_info_t ssi;
1463 int svc_state;
1464
1465 ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
1466 ssi.st_additional_info = NULL;
1467
1468 mutex_enter(&stmf_state.stmf_lock);
1469 if (!stmf_state.stmf_exclusive_open) {
1470 mutex_exit(&stmf_state.stmf_lock);
1471 return (EACCES);
1472 }
1473
1474 if (stmf_state.stmf_inventory_locked) {
1475 mutex_exit(&stmf_state.stmf_lock);
1476 return (EBUSY);
1477 }
1478
1479 if ((std->state != STMF_STATE_ONLINE) &&
1480 (std->state != STMF_STATE_OFFLINE)) {
1481 mutex_exit(&stmf_state.stmf_lock);
1482 return (EINVAL);
1483 }
1484
1485 svc_state = stmf_get_service_state();
1486 if ((svc_state == STMF_STATE_OFFLINING) ||
1487 (svc_state == STMF_STATE_ONLINING)) {
1488 mutex_exit(&stmf_state.stmf_lock);
1489 return (EBUSY);
1490 }
1491
1492 if (svc_state == STMF_STATE_OFFLINE) {
1493 if (std->config_state == STMF_CONFIG_INIT) {
1494 if (std->state != STMF_STATE_OFFLINE) {
1495 mutex_exit(&stmf_state.stmf_lock);
1496 return (EINVAL);
1497 }
1498 stmf_state.stmf_config_state = STMF_CONFIG_INIT;
1499 stmf_delete_all_ppds();
1500 stmf_view_clear_config();
1501 stmf_view_init();
1502 mutex_exit(&stmf_state.stmf_lock);
1503 return (0);
1504 }
1505 if ((stmf_state.stmf_config_state == STMF_CONFIG_INIT) ||
1506 (stmf_state.stmf_config_state == STMF_CONFIG_NONE)) {
1507 if (std->config_state != STMF_CONFIG_INIT_DONE) {
1508 mutex_exit(&stmf_state.stmf_lock);
1509 return (EINVAL);
1510 }
1511 stmf_state.stmf_config_state = STMF_CONFIG_INIT_DONE;
1512 }
1513 if (std->state == STMF_STATE_OFFLINE) {
1514 mutex_exit(&stmf_state.stmf_lock);
1515 return (0);
1516 }
1517 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) {
1518 mutex_exit(&stmf_state.stmf_lock);
1519 return (EINVAL);
1520 }
1521 stmf_state.stmf_inventory_locked = 1;
1522 stmf_state.stmf_service_running = 1;
1523 mutex_exit(&stmf_state.stmf_lock);
1524
1525 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1526 ilport = ilport->ilport_next) {
1527 if (stmf_state.stmf_default_lport_state !=
1528 STMF_STATE_ONLINE)
1529 continue;
1530 (void) stmf_ctl(STMF_CMD_LPORT_ONLINE,
1531 ilport->ilport_lport, &ssi);
1532 }
1533
1534 for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1535 ilu = ilu->ilu_next) {
1536 if (stmf_state.stmf_default_lu_state !=
1537 STMF_STATE_ONLINE)
1538 continue;
1539 (void) stmf_ctl(STMF_CMD_LU_ONLINE, ilu->ilu_lu, &ssi);
1540 }
1541 mutex_enter(&stmf_state.stmf_lock);
1542 stmf_state.stmf_inventory_locked = 0;
1543 mutex_exit(&stmf_state.stmf_lock);
1544 return (0);
1545 }
1546
1547 /* svc_state is STMF_STATE_ONLINE here */
1548 if ((std->state != STMF_STATE_OFFLINE) ||
1549 (std->config_state == STMF_CONFIG_INIT)) {
1550 mutex_exit(&stmf_state.stmf_lock);
1551 return (EACCES);
1552 }
1553
1554 stmf_state.stmf_inventory_locked = 1;
1555 stmf_state.stmf_service_running = 0;
1556
1557 mutex_exit(&stmf_state.stmf_lock);
1558 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1559 ilport = ilport->ilport_next) {
1560 if (ilport->ilport_state != STMF_STATE_ONLINE)
1561 continue;
1562 (void) stmf_ctl(STMF_CMD_LPORT_OFFLINE,
1563 ilport->ilport_lport, &ssi);
1564 }
1565
1566 for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1567 ilu = ilu->ilu_next) {
1568 if (ilu->ilu_state != STMF_STATE_ONLINE)
1569 continue;
1570 (void) stmf_ctl(STMF_CMD_LU_OFFLINE, ilu->ilu_lu, &ssi);
1571 }
1572 mutex_enter(&stmf_state.stmf_lock);
1573 stmf_state.stmf_inventory_locked = 0;
1574 mutex_exit(&stmf_state.stmf_lock);
1575 return (0);
1576 }
1577
1578 static int
stmf_get_stmf_state(stmf_state_desc_t * std)1579 stmf_get_stmf_state(stmf_state_desc_t *std)
1580 {
1581 mutex_enter(&stmf_state.stmf_lock);
1582 std->state = stmf_get_service_state();
1583 std->config_state = stmf_state.stmf_config_state;
1584 mutex_exit(&stmf_state.stmf_lock);
1585
1586 return (0);
1587 }
1588 /*
1589 * handles registration message from pppt for a logical unit
1590 */
1591 stmf_status_t
stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t * msg,uint32_t type)1592 stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg, uint32_t type)
1593 {
1594 stmf_i_lu_provider_t *ilp;
1595 stmf_lu_provider_t *lp;
1596 mutex_enter(&stmf_state.stmf_lock);
1597 for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) {
1598 if (strcmp(msg->icrl_lu_provider_name,
1599 ilp->ilp_lp->lp_name) == 0) {
1600 lp = ilp->ilp_lp;
1601 mutex_exit(&stmf_state.stmf_lock);
1602 lp->lp_proxy_msg(msg->icrl_lun_id, msg->icrl_cb_arg,
1603 msg->icrl_cb_arg_len, type);
1604 return (STMF_SUCCESS);
1605 }
1606 }
1607 mutex_exit(&stmf_state.stmf_lock);
1608 return (STMF_SUCCESS);
1609 }
1610
1611 /*
1612 * handles de-registration message from pppt for a logical unit
1613 */
1614 stmf_status_t
stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t * msg)1615 stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg)
1616 {
1617 stmf_i_lu_provider_t *ilp;
1618 stmf_lu_provider_t *lp;
1619 mutex_enter(&stmf_state.stmf_lock);
1620 for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) {
1621 if (strcmp(msg->icrl_lu_provider_name,
1622 ilp->ilp_lp->lp_name) == 0) {
1623 lp = ilp->ilp_lp;
1624 mutex_exit(&stmf_state.stmf_lock);
1625 lp->lp_proxy_msg(msg->icrl_lun_id, NULL, 0,
1626 STMF_MSG_LU_DEREGISTER);
1627 return (STMF_SUCCESS);
1628 }
1629 }
1630 mutex_exit(&stmf_state.stmf_lock);
1631 return (STMF_SUCCESS);
1632 }
1633
1634 /*
1635 * helper function to find a task that matches a task_msgid
1636 */
1637 scsi_task_t *
find_task_from_msgid(uint8_t * lu_id,stmf_ic_msgid_t task_msgid)1638 find_task_from_msgid(uint8_t *lu_id, stmf_ic_msgid_t task_msgid)
1639 {
1640 stmf_i_lu_t *ilu;
1641 stmf_i_scsi_task_t *itask;
1642
1643 mutex_enter(&stmf_state.stmf_lock);
1644 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
1645 if (bcmp(lu_id, ilu->ilu_lu->lu_id->ident, 16) == 0) {
1646 break;
1647 }
1648 }
1649
1650 if (ilu == NULL) {
1651 mutex_exit(&stmf_state.stmf_lock);
1652 return (NULL);
1653 }
1654
1655 mutex_enter(&ilu->ilu_task_lock);
1656 for (itask = ilu->ilu_tasks; itask != NULL;
1657 itask = itask->itask_lu_next) {
1658 if (itask->itask_flags & (ITASK_IN_FREE_LIST |
1659 ITASK_BEING_ABORTED)) {
1660 continue;
1661 }
1662 if (itask->itask_proxy_msg_id == task_msgid) {
1663 break;
1664 }
1665 }
1666 mutex_exit(&ilu->ilu_task_lock);
1667 mutex_exit(&stmf_state.stmf_lock);
1668
1669 if (itask != NULL) {
1670 return (itask->itask_task);
1671 } else {
1672 /* task not found. Likely already aborted. */
1673 return (NULL);
1674 }
1675 }
1676
1677 /*
1678 * message received from pppt/ic
1679 */
1680 stmf_status_t
stmf_msg_rx(stmf_ic_msg_t * msg)1681 stmf_msg_rx(stmf_ic_msg_t *msg)
1682 {
1683 mutex_enter(&stmf_state.stmf_lock);
1684 if (stmf_state.stmf_alua_state != 1) {
1685 mutex_exit(&stmf_state.stmf_lock);
1686 cmn_err(CE_WARN, "stmf alua state is disabled");
1687 ic_msg_free(msg);
1688 return (STMF_FAILURE);
1689 }
1690 mutex_exit(&stmf_state.stmf_lock);
1691
1692 switch (msg->icm_msg_type) {
1693 case STMF_ICM_REGISTER_LUN:
1694 (void) stmf_ic_lu_reg(
1695 (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg,
1696 STMF_MSG_LU_REGISTER);
1697 break;
1698 case STMF_ICM_LUN_ACTIVE:
1699 (void) stmf_ic_lu_reg(
1700 (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg,
1701 STMF_MSG_LU_ACTIVE);
1702 break;
1703 case STMF_ICM_DEREGISTER_LUN:
1704 (void) stmf_ic_lu_dereg(
1705 (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg);
1706 break;
1707 case STMF_ICM_SCSI_DATA:
1708 (void) stmf_ic_rx_scsi_data(
1709 (stmf_ic_scsi_data_msg_t *)msg->icm_msg);
1710 break;
1711 case STMF_ICM_SCSI_STATUS:
1712 (void) stmf_ic_rx_scsi_status(
1713 (stmf_ic_scsi_status_msg_t *)msg->icm_msg);
1714 break;
1715 case STMF_ICM_STATUS:
1716 (void) stmf_ic_rx_status(
1717 (stmf_ic_status_msg_t *)msg->icm_msg);
1718 break;
1719 default:
1720 cmn_err(CE_WARN, "unknown message received %d",
1721 msg->icm_msg_type);
1722 ic_msg_free(msg);
1723 return (STMF_FAILURE);
1724 }
1725 ic_msg_free(msg);
1726 return (STMF_SUCCESS);
1727 }
1728
1729 stmf_status_t
stmf_ic_rx_status(stmf_ic_status_msg_t * msg)1730 stmf_ic_rx_status(stmf_ic_status_msg_t *msg)
1731 {
1732 stmf_i_local_port_t *ilport;
1733
1734 if (msg->ics_msg_type != STMF_ICM_REGISTER_PROXY_PORT) {
1735 /* for now, ignore other message status */
1736 return (STMF_SUCCESS);
1737 }
1738
1739 if (msg->ics_status != STMF_SUCCESS) {
1740 return (STMF_SUCCESS);
1741 }
1742
1743 mutex_enter(&stmf_state.stmf_lock);
1744 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1745 ilport = ilport->ilport_next) {
1746 if (msg->ics_msgid == ilport->ilport_reg_msgid) {
1747 ilport->ilport_proxy_registered = 1;
1748 break;
1749 }
1750 }
1751 mutex_exit(&stmf_state.stmf_lock);
1752 return (STMF_SUCCESS);
1753 }
1754
1755 /*
1756 * handles scsi status message from pppt
1757 */
1758 stmf_status_t
stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t * msg)1759 stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg)
1760 {
1761 scsi_task_t *task;
1762
1763 /* is this a task management command */
1764 if (msg->icss_task_msgid & MSG_ID_TM_BIT) {
1765 return (STMF_SUCCESS);
1766 }
1767
1768 task = find_task_from_msgid(msg->icss_lun_id, msg->icss_task_msgid);
1769
1770 if (task == NULL) {
1771 return (STMF_SUCCESS);
1772 }
1773
1774 task->task_scsi_status = msg->icss_status;
1775 task->task_sense_data = msg->icss_sense;
1776 task->task_sense_length = msg->icss_sense_len;
1777 (void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE);
1778
1779 return (STMF_SUCCESS);
1780 }
1781
1782 /*
1783 * handles scsi data message from pppt
1784 */
1785 stmf_status_t
stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t * msg)1786 stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg)
1787 {
1788 stmf_i_scsi_task_t *itask;
1789 scsi_task_t *task;
1790 stmf_xfer_data_t *xd = NULL;
1791 stmf_data_buf_t *dbuf;
1792 uint32_t sz, minsz, xd_sz, asz;
1793
1794 /* is this a task management command */
1795 if (msg->icsd_task_msgid & MSG_ID_TM_BIT) {
1796 return (STMF_SUCCESS);
1797 }
1798
1799 task = find_task_from_msgid(msg->icsd_lun_id, msg->icsd_task_msgid);
1800 if (task == NULL) {
1801 stmf_ic_msg_t *ic_xfer_done_msg = NULL;
1802 static uint64_t data_msg_id;
1803 stmf_status_t ic_ret = STMF_FAILURE;
1804 mutex_enter(&stmf_state.stmf_lock);
1805 data_msg_id = stmf_proxy_msg_id++;
1806 mutex_exit(&stmf_state.stmf_lock);
1807 /*
1808 * send xfer done status to pppt
1809 * for now, set the session id to 0 as we cannot
1810 * ascertain it since we cannot find the task
1811 */
1812 ic_xfer_done_msg = ic_scsi_data_xfer_done_msg_alloc(
1813 msg->icsd_task_msgid, 0, STMF_FAILURE, data_msg_id);
1814 if (ic_xfer_done_msg) {
1815 ic_ret = ic_tx_msg(ic_xfer_done_msg);
1816 if (ic_ret != STMF_IC_MSG_SUCCESS) {
1817 cmn_err(CE_WARN, "unable to xmit proxy msg");
1818 }
1819 }
1820 return (STMF_FAILURE);
1821 }
1822
1823 itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
1824 dbuf = itask->itask_proxy_dbuf;
1825
1826 task->task_cmd_xfer_length += msg->icsd_data_len;
1827
1828 if (task->task_additional_flags &
1829 TASK_AF_NO_EXPECTED_XFER_LENGTH) {
1830 task->task_expected_xfer_length =
1831 task->task_cmd_xfer_length;
1832 }
1833
1834 sz = min(task->task_expected_xfer_length,
1835 task->task_cmd_xfer_length);
1836
1837 xd_sz = msg->icsd_data_len;
1838 asz = xd_sz + sizeof (*xd) - 4;
1839 xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP);
1840
1841 if (xd == NULL) {
1842 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
1843 STMF_ALLOC_FAILURE, NULL);
1844 return (STMF_FAILURE);
1845 }
1846
1847 xd->alloc_size = asz;
1848 xd->size_left = xd_sz;
1849 bcopy(msg->icsd_data, xd->buf, xd_sz);
1850
1851 sz = min(sz, xd->size_left);
1852 xd->size_left = sz;
1853 minsz = min(512, sz);
1854
1855 if (dbuf == NULL)
1856 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0);
1857 if (dbuf == NULL) {
1858 kmem_free(xd, xd->alloc_size);
1859 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
1860 STMF_ALLOC_FAILURE, NULL);
1861 return (STMF_FAILURE);
1862 }
1863 dbuf->db_lu_private = xd;
1864 dbuf->db_relative_offset = task->task_nbytes_transferred;
1865 stmf_xd_to_dbuf(dbuf, 0);
1866
1867 dbuf->db_flags = DB_DIRECTION_TO_RPORT;
1868 (void) stmf_xfer_data(task, dbuf, 0);
1869 return (STMF_SUCCESS);
1870 }
1871
1872 stmf_status_t
stmf_proxy_scsi_cmd(scsi_task_t * task,stmf_data_buf_t * dbuf)1873 stmf_proxy_scsi_cmd(scsi_task_t *task, stmf_data_buf_t *dbuf)
1874 {
1875 stmf_i_scsi_task_t *itask =
1876 (stmf_i_scsi_task_t *)task->task_stmf_private;
1877 stmf_i_local_port_t *ilport =
1878 (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
1879 stmf_ic_msg_t *ic_cmd_msg;
1880 stmf_ic_msg_status_t ic_ret;
1881 stmf_status_t ret = STMF_FAILURE;
1882
1883 if (stmf_state.stmf_alua_state != 1) {
1884 cmn_err(CE_WARN, "stmf alua state is disabled");
1885 return (STMF_FAILURE);
1886 }
1887
1888 if (ilport->ilport_proxy_registered == 0) {
1889 return (STMF_FAILURE);
1890 }
1891
1892 mutex_enter(&stmf_state.stmf_lock);
1893 itask->itask_proxy_msg_id = stmf_proxy_msg_id++;
1894 mutex_exit(&stmf_state.stmf_lock);
1895 itask->itask_proxy_dbuf = dbuf;
1896
1897 /*
1898 * stmf will now take over the task handling for this task
1899 * but it still needs to be treated differently from other
1900 * default handled tasks, hence the ITASK_PROXY_TASK.
1901 * If this is a task management function, we're really just
1902 * duping the command to the peer. Set the TM bit so that
1903 * we can recognize this on return since we won't be completing
1904 * the proxied task in that case.
1905 */
1906 if (task->task_mgmt_function) {
1907 itask->itask_proxy_msg_id |= MSG_ID_TM_BIT;
1908 } else {
1909 uint32_t new, old;
1910 do {
1911 new = old = itask->itask_flags;
1912 if (new & ITASK_BEING_ABORTED)
1913 return (STMF_FAILURE);
1914 new |= ITASK_DEFAULT_HANDLING | ITASK_PROXY_TASK;
1915 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
1916 }
1917 if (dbuf) {
1918 ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id,
1919 task, dbuf->db_data_size, dbuf->db_sglist[0].seg_addr,
1920 itask->itask_proxy_msg_id);
1921 } else {
1922 ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id,
1923 task, 0, NULL, itask->itask_proxy_msg_id);
1924 }
1925 if (ic_cmd_msg) {
1926 ic_ret = ic_tx_msg(ic_cmd_msg);
1927 if (ic_ret == STMF_IC_MSG_SUCCESS) {
1928 ret = STMF_SUCCESS;
1929 }
1930 }
1931 return (ret);
1932 }
1933
1934
1935 stmf_status_t
pppt_modload()1936 pppt_modload()
1937 {
1938 int error;
1939
1940 if (pppt_mod == NULL && ((pppt_mod =
1941 ddi_modopen("drv/pppt", KRTLD_MODE_FIRST, &error)) == NULL)) {
1942 cmn_err(CE_WARN, "Unable to load pppt");
1943 return (STMF_FAILURE);
1944 }
1945
1946 if (ic_reg_port_msg_alloc == NULL && ((ic_reg_port_msg_alloc =
1947 (stmf_ic_reg_port_msg_alloc_func_t)
1948 ddi_modsym(pppt_mod, "stmf_ic_reg_port_msg_alloc",
1949 &error)) == NULL)) {
1950 cmn_err(CE_WARN,
1951 "Unable to find symbol - stmf_ic_reg_port_msg_alloc");
1952 return (STMF_FAILURE);
1953 }
1954
1955
1956 if (ic_dereg_port_msg_alloc == NULL && ((ic_dereg_port_msg_alloc =
1957 (stmf_ic_dereg_port_msg_alloc_func_t)
1958 ddi_modsym(pppt_mod, "stmf_ic_dereg_port_msg_alloc",
1959 &error)) == NULL)) {
1960 cmn_err(CE_WARN,
1961 "Unable to find symbol - stmf_ic_dereg_port_msg_alloc");
1962 return (STMF_FAILURE);
1963 }
1964
1965 if (ic_reg_lun_msg_alloc == NULL && ((ic_reg_lun_msg_alloc =
1966 (stmf_ic_reg_lun_msg_alloc_func_t)
1967 ddi_modsym(pppt_mod, "stmf_ic_reg_lun_msg_alloc",
1968 &error)) == NULL)) {
1969 cmn_err(CE_WARN,
1970 "Unable to find symbol - stmf_ic_reg_lun_msg_alloc");
1971 return (STMF_FAILURE);
1972 }
1973
1974 if (ic_lun_active_msg_alloc == NULL && ((ic_lun_active_msg_alloc =
1975 (stmf_ic_lun_active_msg_alloc_func_t)
1976 ddi_modsym(pppt_mod, "stmf_ic_lun_active_msg_alloc",
1977 &error)) == NULL)) {
1978 cmn_err(CE_WARN,
1979 "Unable to find symbol - stmf_ic_lun_active_msg_alloc");
1980 return (STMF_FAILURE);
1981 }
1982
1983 if (ic_dereg_lun_msg_alloc == NULL && ((ic_dereg_lun_msg_alloc =
1984 (stmf_ic_dereg_lun_msg_alloc_func_t)
1985 ddi_modsym(pppt_mod, "stmf_ic_dereg_lun_msg_alloc",
1986 &error)) == NULL)) {
1987 cmn_err(CE_WARN,
1988 "Unable to find symbol - stmf_ic_dereg_lun_msg_alloc");
1989 return (STMF_FAILURE);
1990 }
1991
1992 if (ic_scsi_cmd_msg_alloc == NULL && ((ic_scsi_cmd_msg_alloc =
1993 (stmf_ic_scsi_cmd_msg_alloc_func_t)
1994 ddi_modsym(pppt_mod, "stmf_ic_scsi_cmd_msg_alloc",
1995 &error)) == NULL)) {
1996 cmn_err(CE_WARN,
1997 "Unable to find symbol - stmf_ic_scsi_cmd_msg_alloc");
1998 return (STMF_FAILURE);
1999 }
2000
2001 if (ic_scsi_data_xfer_done_msg_alloc == NULL &&
2002 ((ic_scsi_data_xfer_done_msg_alloc =
2003 (stmf_ic_scsi_data_xfer_done_msg_alloc_func_t)
2004 ddi_modsym(pppt_mod, "stmf_ic_scsi_data_xfer_done_msg_alloc",
2005 &error)) == NULL)) {
2006 cmn_err(CE_WARN,
2007 "Unable to find symbol -"
2008 "stmf_ic_scsi_data_xfer_done_msg_alloc");
2009 return (STMF_FAILURE);
2010 }
2011
2012 if (ic_session_reg_msg_alloc == NULL &&
2013 ((ic_session_reg_msg_alloc =
2014 (stmf_ic_session_create_msg_alloc_func_t)
2015 ddi_modsym(pppt_mod, "stmf_ic_session_create_msg_alloc",
2016 &error)) == NULL)) {
2017 cmn_err(CE_WARN,
2018 "Unable to find symbol -"
2019 "stmf_ic_session_create_msg_alloc");
2020 return (STMF_FAILURE);
2021 }
2022
2023 if (ic_session_dereg_msg_alloc == NULL &&
2024 ((ic_session_dereg_msg_alloc =
2025 (stmf_ic_session_destroy_msg_alloc_func_t)
2026 ddi_modsym(pppt_mod, "stmf_ic_session_destroy_msg_alloc",
2027 &error)) == NULL)) {
2028 cmn_err(CE_WARN,
2029 "Unable to find symbol -"
2030 "stmf_ic_session_destroy_msg_alloc");
2031 return (STMF_FAILURE);
2032 }
2033
2034 if (ic_tx_msg == NULL && ((ic_tx_msg =
2035 (stmf_ic_tx_msg_func_t)ddi_modsym(pppt_mod, "stmf_ic_tx_msg",
2036 &error)) == NULL)) {
2037 cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_tx_msg");
2038 return (STMF_FAILURE);
2039 }
2040
2041 if (ic_msg_free == NULL && ((ic_msg_free =
2042 (stmf_ic_msg_free_func_t)ddi_modsym(pppt_mod, "stmf_ic_msg_free",
2043 &error)) == NULL)) {
2044 cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_msg_free");
2045 return (STMF_FAILURE);
2046 }
2047 return (STMF_SUCCESS);
2048 }
2049
2050 static void
stmf_get_alua_state(stmf_alua_state_desc_t * alua_state)2051 stmf_get_alua_state(stmf_alua_state_desc_t *alua_state)
2052 {
2053 mutex_enter(&stmf_state.stmf_lock);
2054 alua_state->alua_node = stmf_state.stmf_alua_node;
2055 alua_state->alua_state = stmf_state.stmf_alua_state;
2056 mutex_exit(&stmf_state.stmf_lock);
2057 }
2058
2059
2060 static int
stmf_set_alua_state(stmf_alua_state_desc_t * alua_state)2061 stmf_set_alua_state(stmf_alua_state_desc_t *alua_state)
2062 {
2063 stmf_i_local_port_t *ilport;
2064 stmf_i_lu_t *ilu;
2065 stmf_lu_t *lu;
2066 stmf_ic_msg_status_t ic_ret;
2067 stmf_ic_msg_t *ic_reg_lun, *ic_reg_port;
2068 stmf_local_port_t *lport;
2069 int ret = 0;
2070
2071 if (alua_state->alua_state > 1 || alua_state->alua_node > 1) {
2072 return (EINVAL);
2073 }
2074
2075 mutex_enter(&stmf_state.stmf_lock);
2076 if (alua_state->alua_state == 1) {
2077 if (pppt_modload() == STMF_FAILURE) {
2078 ret = EIO;
2079 goto err;
2080 }
2081 if (alua_state->alua_node != 0) {
2082 /* reset existing rtpids to new base */
2083 stmf_rtpid_counter = 255;
2084 }
2085 stmf_state.stmf_alua_node = alua_state->alua_node;
2086 stmf_state.stmf_alua_state = 1;
2087 /* register existing local ports with ppp */
2088 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
2089 ilport = ilport->ilport_next) {
2090 /* skip standby ports and non-alua participants */
2091 if (ilport->ilport_standby == 1 ||
2092 ilport->ilport_alua == 0) {
2093 continue;
2094 }
2095 if (alua_state->alua_node != 0) {
2096 ilport->ilport_rtpid =
2097 atomic_add_16_nv(&stmf_rtpid_counter, 1);
2098 }
2099 lport = ilport->ilport_lport;
2100 ic_reg_port = ic_reg_port_msg_alloc(
2101 lport->lport_id, ilport->ilport_rtpid,
2102 0, NULL, stmf_proxy_msg_id);
2103 if (ic_reg_port) {
2104 ic_ret = ic_tx_msg(ic_reg_port);
2105 if (ic_ret == STMF_IC_MSG_SUCCESS) {
2106 ilport->ilport_reg_msgid =
2107 stmf_proxy_msg_id++;
2108 } else {
2109 cmn_err(CE_WARN,
2110 "error on port registration "
2111 "port - %s",
2112 ilport->ilport_kstat_tgt_name);
2113 }
2114 }
2115 }
2116 /* register existing logical units */
2117 for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
2118 ilu = ilu->ilu_next) {
2119 if (ilu->ilu_access != STMF_LU_ACTIVE) {
2120 continue;
2121 }
2122 /* register with proxy module */
2123 lu = ilu->ilu_lu;
2124 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
2125 lu->lu_lp->lp_alua_support) {
2126 ilu->ilu_alua = 1;
2127 /* allocate the register message */
2128 ic_reg_lun = ic_reg_lun_msg_alloc(
2129 lu->lu_id->ident, lu->lu_lp->lp_name,
2130 lu->lu_proxy_reg_arg_len,
2131 (uint8_t *)lu->lu_proxy_reg_arg,
2132 stmf_proxy_msg_id);
2133 /* send the message */
2134 if (ic_reg_lun) {
2135 ic_ret = ic_tx_msg(ic_reg_lun);
2136 if (ic_ret == STMF_IC_MSG_SUCCESS) {
2137 stmf_proxy_msg_id++;
2138 }
2139 }
2140 }
2141 }
2142 } else {
2143 stmf_state.stmf_alua_state = 0;
2144 }
2145
2146 err:
2147 mutex_exit(&stmf_state.stmf_lock);
2148 return (ret);
2149 }
2150
2151
2152 typedef struct {
2153 void *bp; /* back pointer from internal struct to main struct */
2154 int alloc_size;
2155 } __istmf_t;
2156
2157 typedef struct {
2158 __istmf_t *fp; /* Framework private */
2159 void *cp; /* Caller private */
2160 void *ss; /* struct specific */
2161 } __stmf_t;
2162
2163 static struct {
2164 int shared;
2165 int fw_private;
2166 } stmf_sizes[] = { { 0, 0 },
2167 { GET_STRUCT_SIZE(stmf_lu_provider_t),
2168 GET_STRUCT_SIZE(stmf_i_lu_provider_t) },
2169 { GET_STRUCT_SIZE(stmf_port_provider_t),
2170 GET_STRUCT_SIZE(stmf_i_port_provider_t) },
2171 { GET_STRUCT_SIZE(stmf_local_port_t),
2172 GET_STRUCT_SIZE(stmf_i_local_port_t) },
2173 { GET_STRUCT_SIZE(stmf_lu_t),
2174 GET_STRUCT_SIZE(stmf_i_lu_t) },
2175 { GET_STRUCT_SIZE(stmf_scsi_session_t),
2176 GET_STRUCT_SIZE(stmf_i_scsi_session_t) },
2177 { GET_STRUCT_SIZE(scsi_task_t),
2178 GET_STRUCT_SIZE(stmf_i_scsi_task_t) },
2179 { GET_STRUCT_SIZE(stmf_data_buf_t),
2180 GET_STRUCT_SIZE(__istmf_t) },
2181 { GET_STRUCT_SIZE(stmf_dbuf_store_t),
2182 GET_STRUCT_SIZE(__istmf_t) }
2183
2184 };
2185
2186 void *
stmf_alloc(stmf_struct_id_t struct_id,int additional_size,int flags)2187 stmf_alloc(stmf_struct_id_t struct_id, int additional_size, int flags)
2188 {
2189 int stmf_size;
2190 int kmem_flag;
2191 __stmf_t *sh;
2192
2193 if ((struct_id == 0) || (struct_id >= STMF_MAX_STRUCT_IDS))
2194 return (NULL);
2195
2196 if ((curthread->t_flag & T_INTR_THREAD) || (flags & AF_FORCE_NOSLEEP)) {
2197 kmem_flag = KM_NOSLEEP;
2198 } else {
2199 kmem_flag = KM_SLEEP;
2200 }
2201
2202 additional_size = (additional_size + 7) & (~7);
2203 stmf_size = stmf_sizes[struct_id].shared +
2204 stmf_sizes[struct_id].fw_private + additional_size;
2205
2206 if (flags & AF_DONTZERO)
2207 sh = (__stmf_t *)kmem_alloc(stmf_size, kmem_flag);
2208 else
2209 sh = (__stmf_t *)kmem_zalloc(stmf_size, kmem_flag);
2210
2211 if (sh == NULL)
2212 return (NULL);
2213
2214 /*
2215 * In principle, the implementation inside stmf_alloc should not
2216 * be changed anyway. But the original order of framework private
2217 * data and caller private data does not support sglist in the caller
2218 * private data.
2219 * To work around this, the memory segments of framework private
2220 * data and caller private data are re-ordered here.
2221 * A better solution is to provide a specific interface to allocate
2222 * the sglist, then we will not need this workaround any more.
2223 * But before the new interface is available, the memory segment
2224 * ordering should be kept as is.
2225 */
2226 sh->cp = GET_BYTE_OFFSET(sh, stmf_sizes[struct_id].shared);
2227 sh->fp = (__istmf_t *)GET_BYTE_OFFSET(sh,
2228 stmf_sizes[struct_id].shared + additional_size);
2229
2230 sh->fp->bp = sh;
2231 /* Just store the total size instead of storing additional size */
2232 sh->fp->alloc_size = stmf_size;
2233
2234 return (sh);
2235 }
2236
2237 void
stmf_free(void * ptr)2238 stmf_free(void *ptr)
2239 {
2240 __stmf_t *sh = (__stmf_t *)ptr;
2241
2242 /*
2243 * So far we dont need any struct specific processing. If such
2244 * a need ever arises, then store the struct id in the framework
2245 * private section and get it here as sh->fp->struct_id.
2246 */
2247 kmem_free(ptr, sh->fp->alloc_size);
2248 }
2249
2250 /*
2251 * Given a pointer to stmf_lu_t, verifies if this lu is registered with the
2252 * framework and returns a pointer to framework private data for the lu.
2253 * Returns NULL if the lu was not found.
2254 */
2255 stmf_i_lu_t *
stmf_lookup_lu(stmf_lu_t * lu)2256 stmf_lookup_lu(stmf_lu_t *lu)
2257 {
2258 stmf_i_lu_t *ilu;
2259 ASSERT(mutex_owned(&stmf_state.stmf_lock));
2260
2261 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
2262 if (ilu->ilu_lu == lu)
2263 return (ilu);
2264 }
2265 return (NULL);
2266 }
2267
2268 /*
2269 * Given a pointer to stmf_local_port_t, verifies if this lport is registered
2270 * with the framework and returns a pointer to framework private data for
2271 * the lport.
2272 * Returns NULL if the lport was not found.
2273 */
2274 stmf_i_local_port_t *
stmf_lookup_lport(stmf_local_port_t * lport)2275 stmf_lookup_lport(stmf_local_port_t *lport)
2276 {
2277 stmf_i_local_port_t *ilport;
2278 ASSERT(mutex_owned(&stmf_state.stmf_lock));
2279
2280 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
2281 ilport = ilport->ilport_next) {
2282 if (ilport->ilport_lport == lport)
2283 return (ilport);
2284 }
2285 return (NULL);
2286 }
2287
2288 stmf_status_t
stmf_register_lu_provider(stmf_lu_provider_t * lp)2289 stmf_register_lu_provider(stmf_lu_provider_t *lp)
2290 {
2291 stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private;
2292 stmf_pp_data_t *ppd;
2293 uint32_t cb_flags;
2294
2295 if (lp->lp_lpif_rev != LPIF_REV_1 && lp->lp_lpif_rev != LPIF_REV_2)
2296 return (STMF_FAILURE);
2297
2298 mutex_enter(&stmf_state.stmf_lock);
2299 ilp->ilp_next = stmf_state.stmf_ilplist;
2300 stmf_state.stmf_ilplist = ilp;
2301 stmf_state.stmf_nlps++;
2302
2303 /* See if we need to do a callback */
2304 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2305 if (strcmp(ppd->ppd_name, lp->lp_name) == 0) {
2306 break;
2307 }
2308 }
2309 if ((ppd == NULL) || (ppd->ppd_nv == NULL)) {
2310 goto rlp_bail_out;
2311 }
2312 ilp->ilp_ppd = ppd;
2313 ppd->ppd_provider = ilp;
2314 if (lp->lp_cb == NULL)
2315 goto rlp_bail_out;
2316 ilp->ilp_cb_in_progress = 1;
2317 cb_flags = STMF_PCB_PREG_COMPLETE;
2318 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2319 cb_flags |= STMF_PCB_STMF_ONLINING;
2320 mutex_exit(&stmf_state.stmf_lock);
2321 lp->lp_cb(lp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2322 mutex_enter(&stmf_state.stmf_lock);
2323 ilp->ilp_cb_in_progress = 0;
2324
2325 rlp_bail_out:
2326 mutex_exit(&stmf_state.stmf_lock);
2327
2328 return (STMF_SUCCESS);
2329 }
2330
2331 stmf_status_t
stmf_deregister_lu_provider(stmf_lu_provider_t * lp)2332 stmf_deregister_lu_provider(stmf_lu_provider_t *lp)
2333 {
2334 stmf_i_lu_provider_t **ppilp;
2335 stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private;
2336
2337 mutex_enter(&stmf_state.stmf_lock);
2338 if (ilp->ilp_nlus || ilp->ilp_cb_in_progress) {
2339 mutex_exit(&stmf_state.stmf_lock);
2340 return (STMF_BUSY);
2341 }
2342 for (ppilp = &stmf_state.stmf_ilplist; *ppilp != NULL;
2343 ppilp = &((*ppilp)->ilp_next)) {
2344 if (*ppilp == ilp) {
2345 *ppilp = ilp->ilp_next;
2346 stmf_state.stmf_nlps--;
2347 if (ilp->ilp_ppd) {
2348 ilp->ilp_ppd->ppd_provider = NULL;
2349 ilp->ilp_ppd = NULL;
2350 }
2351 mutex_exit(&stmf_state.stmf_lock);
2352 return (STMF_SUCCESS);
2353 }
2354 }
2355 mutex_exit(&stmf_state.stmf_lock);
2356 return (STMF_NOT_FOUND);
2357 }
2358
2359 stmf_status_t
stmf_register_port_provider(stmf_port_provider_t * pp)2360 stmf_register_port_provider(stmf_port_provider_t *pp)
2361 {
2362 stmf_i_port_provider_t *ipp =
2363 (stmf_i_port_provider_t *)pp->pp_stmf_private;
2364 stmf_pp_data_t *ppd;
2365 uint32_t cb_flags;
2366
2367 if (pp->pp_portif_rev != PORTIF_REV_1)
2368 return (STMF_FAILURE);
2369
2370 mutex_enter(&stmf_state.stmf_lock);
2371 ipp->ipp_next = stmf_state.stmf_ipplist;
2372 stmf_state.stmf_ipplist = ipp;
2373 stmf_state.stmf_npps++;
2374 /* See if we need to do a callback */
2375 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2376 if (strcmp(ppd->ppd_name, pp->pp_name) == 0) {
2377 break;
2378 }
2379 }
2380 if ((ppd == NULL) || (ppd->ppd_nv == NULL)) {
2381 goto rpp_bail_out;
2382 }
2383 ipp->ipp_ppd = ppd;
2384 ppd->ppd_provider = ipp;
2385 if (pp->pp_cb == NULL)
2386 goto rpp_bail_out;
2387 ipp->ipp_cb_in_progress = 1;
2388 cb_flags = STMF_PCB_PREG_COMPLETE;
2389 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2390 cb_flags |= STMF_PCB_STMF_ONLINING;
2391 mutex_exit(&stmf_state.stmf_lock);
2392 pp->pp_cb(pp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2393 mutex_enter(&stmf_state.stmf_lock);
2394 ipp->ipp_cb_in_progress = 0;
2395
2396 rpp_bail_out:
2397 mutex_exit(&stmf_state.stmf_lock);
2398
2399 return (STMF_SUCCESS);
2400 }
2401
2402 stmf_status_t
stmf_deregister_port_provider(stmf_port_provider_t * pp)2403 stmf_deregister_port_provider(stmf_port_provider_t *pp)
2404 {
2405 stmf_i_port_provider_t *ipp =
2406 (stmf_i_port_provider_t *)pp->pp_stmf_private;
2407 stmf_i_port_provider_t **ppipp;
2408
2409 mutex_enter(&stmf_state.stmf_lock);
2410 if (ipp->ipp_npps || ipp->ipp_cb_in_progress) {
2411 mutex_exit(&stmf_state.stmf_lock);
2412 return (STMF_BUSY);
2413 }
2414 for (ppipp = &stmf_state.stmf_ipplist; *ppipp != NULL;
2415 ppipp = &((*ppipp)->ipp_next)) {
2416 if (*ppipp == ipp) {
2417 *ppipp = ipp->ipp_next;
2418 stmf_state.stmf_npps--;
2419 if (ipp->ipp_ppd) {
2420 ipp->ipp_ppd->ppd_provider = NULL;
2421 ipp->ipp_ppd = NULL;
2422 }
2423 mutex_exit(&stmf_state.stmf_lock);
2424 return (STMF_SUCCESS);
2425 }
2426 }
2427 mutex_exit(&stmf_state.stmf_lock);
2428 return (STMF_NOT_FOUND);
2429 }
2430
2431 int
stmf_load_ppd_ioctl(stmf_ppioctl_data_t * ppi,uint64_t * ppi_token,uint32_t * err_ret)2432 stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token,
2433 uint32_t *err_ret)
2434 {
2435 stmf_i_port_provider_t *ipp;
2436 stmf_i_lu_provider_t *ilp;
2437 stmf_pp_data_t *ppd;
2438 nvlist_t *nv;
2439 int s;
2440 int ret;
2441
2442 *err_ret = 0;
2443
2444 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2445 return (EINVAL);
2446 }
2447
2448 mutex_enter(&stmf_state.stmf_lock);
2449 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2450 if (ppi->ppi_lu_provider) {
2451 if (!ppd->ppd_lu_provider)
2452 continue;
2453 } else if (ppi->ppi_port_provider) {
2454 if (!ppd->ppd_port_provider)
2455 continue;
2456 }
2457 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2458 break;
2459 }
2460
2461 if (ppd == NULL) {
2462 /* New provider */
2463 s = strlen(ppi->ppi_name);
2464 if (s > 254) {
2465 mutex_exit(&stmf_state.stmf_lock);
2466 return (EINVAL);
2467 }
2468 s += sizeof (stmf_pp_data_t) - 7;
2469
2470 ppd = kmem_zalloc(s, KM_NOSLEEP);
2471 if (ppd == NULL) {
2472 mutex_exit(&stmf_state.stmf_lock);
2473 return (ENOMEM);
2474 }
2475 ppd->ppd_alloc_size = s;
2476 (void) strcpy(ppd->ppd_name, ppi->ppi_name);
2477
2478 /* See if this provider already exists */
2479 if (ppi->ppi_lu_provider) {
2480 ppd->ppd_lu_provider = 1;
2481 for (ilp = stmf_state.stmf_ilplist; ilp != NULL;
2482 ilp = ilp->ilp_next) {
2483 if (strcmp(ppi->ppi_name,
2484 ilp->ilp_lp->lp_name) == 0) {
2485 ppd->ppd_provider = ilp;
2486 ilp->ilp_ppd = ppd;
2487 break;
2488 }
2489 }
2490 } else {
2491 ppd->ppd_port_provider = 1;
2492 for (ipp = stmf_state.stmf_ipplist; ipp != NULL;
2493 ipp = ipp->ipp_next) {
2494 if (strcmp(ppi->ppi_name,
2495 ipp->ipp_pp->pp_name) == 0) {
2496 ppd->ppd_provider = ipp;
2497 ipp->ipp_ppd = ppd;
2498 break;
2499 }
2500 }
2501 }
2502
2503 /* Link this ppd in */
2504 ppd->ppd_next = stmf_state.stmf_ppdlist;
2505 stmf_state.stmf_ppdlist = ppd;
2506 }
2507
2508 /*
2509 * User is requesting that the token be checked.
2510 * If there was another set after the user's get
2511 * it's an error
2512 */
2513 if (ppi->ppi_token_valid) {
2514 if (ppi->ppi_token != ppd->ppd_token) {
2515 *err_ret = STMF_IOCERR_PPD_UPDATED;
2516 mutex_exit(&stmf_state.stmf_lock);
2517 return (EINVAL);
2518 }
2519 }
2520
2521 if ((ret = nvlist_unpack((char *)ppi->ppi_data,
2522 (size_t)ppi->ppi_data_size, &nv, KM_NOSLEEP)) != 0) {
2523 mutex_exit(&stmf_state.stmf_lock);
2524 return (ret);
2525 }
2526
2527 /* Free any existing lists and add this one to the ppd */
2528 if (ppd->ppd_nv)
2529 nvlist_free(ppd->ppd_nv);
2530 ppd->ppd_nv = nv;
2531
2532 /* set the token for writes */
2533 ppd->ppd_token++;
2534 /* return token to caller */
2535 if (ppi_token) {
2536 *ppi_token = ppd->ppd_token;
2537 }
2538
2539 /* If there is a provider registered, do the notifications */
2540 if (ppd->ppd_provider) {
2541 uint32_t cb_flags = 0;
2542
2543 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2544 cb_flags |= STMF_PCB_STMF_ONLINING;
2545 if (ppi->ppi_lu_provider) {
2546 ilp = (stmf_i_lu_provider_t *)ppd->ppd_provider;
2547 if (ilp->ilp_lp->lp_cb == NULL)
2548 goto bail_out;
2549 ilp->ilp_cb_in_progress = 1;
2550 mutex_exit(&stmf_state.stmf_lock);
2551 ilp->ilp_lp->lp_cb(ilp->ilp_lp,
2552 STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2553 mutex_enter(&stmf_state.stmf_lock);
2554 ilp->ilp_cb_in_progress = 0;
2555 } else {
2556 ipp = (stmf_i_port_provider_t *)ppd->ppd_provider;
2557 if (ipp->ipp_pp->pp_cb == NULL)
2558 goto bail_out;
2559 ipp->ipp_cb_in_progress = 1;
2560 mutex_exit(&stmf_state.stmf_lock);
2561 ipp->ipp_pp->pp_cb(ipp->ipp_pp,
2562 STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2563 mutex_enter(&stmf_state.stmf_lock);
2564 ipp->ipp_cb_in_progress = 0;
2565 }
2566 }
2567
2568 bail_out:
2569 mutex_exit(&stmf_state.stmf_lock);
2570
2571 return (0);
2572 }
2573
2574 void
stmf_delete_ppd(stmf_pp_data_t * ppd)2575 stmf_delete_ppd(stmf_pp_data_t *ppd)
2576 {
2577 stmf_pp_data_t **pppd;
2578
2579 ASSERT(mutex_owned(&stmf_state.stmf_lock));
2580 if (ppd->ppd_provider) {
2581 if (ppd->ppd_lu_provider) {
2582 ((stmf_i_lu_provider_t *)
2583 ppd->ppd_provider)->ilp_ppd = NULL;
2584 } else {
2585 ((stmf_i_port_provider_t *)
2586 ppd->ppd_provider)->ipp_ppd = NULL;
2587 }
2588 ppd->ppd_provider = NULL;
2589 }
2590
2591 for (pppd = &stmf_state.stmf_ppdlist; *pppd != NULL;
2592 pppd = &((*pppd)->ppd_next)) {
2593 if (*pppd == ppd)
2594 break;
2595 }
2596
2597 if (*pppd == NULL)
2598 return;
2599
2600 *pppd = ppd->ppd_next;
2601 if (ppd->ppd_nv)
2602 nvlist_free(ppd->ppd_nv);
2603
2604 kmem_free(ppd, ppd->ppd_alloc_size);
2605 }
2606
2607 int
stmf_delete_ppd_ioctl(stmf_ppioctl_data_t * ppi)2608 stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi)
2609 {
2610 stmf_pp_data_t *ppd;
2611 int ret = ENOENT;
2612
2613 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2614 return (EINVAL);
2615 }
2616
2617 mutex_enter(&stmf_state.stmf_lock);
2618
2619 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2620 if (ppi->ppi_lu_provider) {
2621 if (!ppd->ppd_lu_provider)
2622 continue;
2623 } else if (ppi->ppi_port_provider) {
2624 if (!ppd->ppd_port_provider)
2625 continue;
2626 }
2627 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2628 break;
2629 }
2630
2631 if (ppd) {
2632 ret = 0;
2633 stmf_delete_ppd(ppd);
2634 }
2635 mutex_exit(&stmf_state.stmf_lock);
2636
2637 return (ret);
2638 }
2639
2640 int
stmf_get_ppd_ioctl(stmf_ppioctl_data_t * ppi,stmf_ppioctl_data_t * ppi_out,uint32_t * err_ret)2641 stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out,
2642 uint32_t *err_ret)
2643 {
2644 stmf_pp_data_t *ppd;
2645 size_t req_size;
2646 int ret = ENOENT;
2647 char *bufp = (char *)ppi_out->ppi_data;
2648
2649 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2650 return (EINVAL);
2651 }
2652
2653 mutex_enter(&stmf_state.stmf_lock);
2654
2655 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2656 if (ppi->ppi_lu_provider) {
2657 if (!ppd->ppd_lu_provider)
2658 continue;
2659 } else if (ppi->ppi_port_provider) {
2660 if (!ppd->ppd_port_provider)
2661 continue;
2662 }
2663 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2664 break;
2665 }
2666
2667 if (ppd && ppd->ppd_nv) {
2668 ppi_out->ppi_token = ppd->ppd_token;
2669 if ((ret = nvlist_size(ppd->ppd_nv, &req_size,
2670 NV_ENCODE_XDR)) != 0) {
2671 goto done;
2672 }
2673 ppi_out->ppi_data_size = req_size;
2674 if (req_size > ppi->ppi_data_size) {
2675 *err_ret = STMF_IOCERR_INSUFFICIENT_BUF;
2676 ret = EINVAL;
2677 goto done;
2678 }
2679
2680 if ((ret = nvlist_pack(ppd->ppd_nv, &bufp, &req_size,
2681 NV_ENCODE_XDR, 0)) != 0) {
2682 goto done;
2683 }
2684 ret = 0;
2685 }
2686
2687 done:
2688 mutex_exit(&stmf_state.stmf_lock);
2689
2690 return (ret);
2691 }
2692
2693 void
stmf_delete_all_ppds()2694 stmf_delete_all_ppds()
2695 {
2696 stmf_pp_data_t *ppd, *nppd;
2697
2698 ASSERT(mutex_owned(&stmf_state.stmf_lock));
2699 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = nppd) {
2700 nppd = ppd->ppd_next;
2701 stmf_delete_ppd(ppd);
2702 }
2703 }
2704
2705 /*
2706 * 16 is the max string length of a protocol_ident, increase
2707 * the size if needed.
2708 */
2709 #define STMF_KSTAT_LU_SZ (STMF_GUID_INPUT + 1 + 256)
2710 #define STMF_KSTAT_TGT_SZ (256 * 2 + 16)
2711
2712 /*
2713 * This array matches the Protocol Identifier in stmf_ioctl.h
2714 */
2715 #define MAX_PROTO_STR_LEN 32
2716
2717 char *protocol_ident[PROTOCOL_ANY] = {
2718 "Fibre Channel",
2719 "Parallel SCSI",
2720 "SSA",
2721 "IEEE_1394",
2722 "SRP",
2723 "iSCSI",
2724 "SAS",
2725 "ADT",
2726 "ATAPI",
2727 "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN"
2728 };
2729
2730 /*
2731 * Update the lun wait/run queue count
2732 */
2733 static void
stmf_update_kstat_lu_q(scsi_task_t * task,void func ())2734 stmf_update_kstat_lu_q(scsi_task_t *task, void func())
2735 {
2736 stmf_i_lu_t *ilu;
2737 kstat_io_t *kip;
2738
2739 if (task->task_lu == dlun0)
2740 return;
2741 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
2742 if (ilu != NULL && ilu->ilu_kstat_io != NULL) {
2743 kip = KSTAT_IO_PTR(ilu->ilu_kstat_io);
2744 if (kip != NULL) {
2745 func(kip);
2746 }
2747 }
2748 }
2749
2750 /*
2751 * Update the target(lport) wait/run queue count
2752 */
2753 static void
stmf_update_kstat_lport_q(scsi_task_t * task,void func ())2754 stmf_update_kstat_lport_q(scsi_task_t *task, void func())
2755 {
2756 stmf_i_local_port_t *ilp;
2757 kstat_io_t *kip;
2758
2759 ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
2760 if (ilp != NULL && ilp->ilport_kstat_io != NULL) {
2761 kip = KSTAT_IO_PTR(ilp->ilport_kstat_io);
2762 if (kip != NULL) {
2763 mutex_enter(ilp->ilport_kstat_io->ks_lock);
2764 func(kip);
2765 mutex_exit(ilp->ilport_kstat_io->ks_lock);
2766 }
2767 }
2768 }
2769
2770 static void
stmf_update_kstat_lport_io(scsi_task_t * task,stmf_data_buf_t * dbuf)2771 stmf_update_kstat_lport_io(scsi_task_t *task, stmf_data_buf_t *dbuf)
2772 {
2773 stmf_i_local_port_t *ilp;
2774 kstat_io_t *kip;
2775
2776 ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
2777 if (ilp != NULL && ilp->ilport_kstat_io != NULL) {
2778 kip = KSTAT_IO_PTR(ilp->ilport_kstat_io);
2779 if (kip != NULL) {
2780 mutex_enter(ilp->ilport_kstat_io->ks_lock);
2781 STMF_UPDATE_KSTAT_IO(kip, dbuf);
2782 mutex_exit(ilp->ilport_kstat_io->ks_lock);
2783 }
2784 }
2785 }
2786
2787 static void
stmf_update_kstat_lu_io(scsi_task_t * task,stmf_data_buf_t * dbuf)2788 stmf_update_kstat_lu_io(scsi_task_t *task, stmf_data_buf_t *dbuf)
2789 {
2790 stmf_i_lu_t *ilu;
2791 kstat_io_t *kip;
2792
2793 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
2794 if (ilu != NULL && ilu->ilu_kstat_io != NULL) {
2795 kip = KSTAT_IO_PTR(ilu->ilu_kstat_io);
2796 if (kip != NULL) {
2797 mutex_enter(ilu->ilu_kstat_io->ks_lock);
2798 STMF_UPDATE_KSTAT_IO(kip, dbuf);
2799 mutex_exit(ilu->ilu_kstat_io->ks_lock);
2800 }
2801 }
2802 }
2803
2804 static void
stmf_create_kstat_lu(stmf_i_lu_t * ilu)2805 stmf_create_kstat_lu(stmf_i_lu_t *ilu)
2806 {
2807 char ks_nm[KSTAT_STRLEN];
2808 stmf_kstat_lu_info_t *ks_lu;
2809
2810 /* create kstat lun info */
2811 ks_lu = (stmf_kstat_lu_info_t *)kmem_zalloc(STMF_KSTAT_LU_SZ,
2812 KM_NOSLEEP);
2813 if (ks_lu == NULL) {
2814 cmn_err(CE_WARN, "STMF: kmem_zalloc failed");
2815 return;
2816 }
2817
2818 bzero(ks_nm, sizeof (ks_nm));
2819 (void) sprintf(ks_nm, "stmf_lu_%"PRIxPTR"", (uintptr_t)ilu);
2820 if ((ilu->ilu_kstat_info = kstat_create(STMF_MODULE_NAME, 0,
2821 ks_nm, "misc", KSTAT_TYPE_NAMED,
2822 sizeof (stmf_kstat_lu_info_t) / sizeof (kstat_named_t),
2823 KSTAT_FLAG_VIRTUAL)) == NULL) {
2824 kmem_free(ks_lu, STMF_KSTAT_LU_SZ);
2825 cmn_err(CE_WARN, "STMF: kstat_create lu failed");
2826 return;
2827 }
2828
2829 ilu->ilu_kstat_info->ks_data_size = STMF_KSTAT_LU_SZ;
2830 ilu->ilu_kstat_info->ks_data = ks_lu;
2831
2832 kstat_named_init(&ks_lu->i_lun_guid, "lun-guid",
2833 KSTAT_DATA_STRING);
2834 kstat_named_init(&ks_lu->i_lun_alias, "lun-alias",
2835 KSTAT_DATA_STRING);
2836
2837 /* convert guid to hex string */
2838 int i;
2839 uint8_t *p = ilu->ilu_lu->lu_id->ident;
2840 bzero(ilu->ilu_ascii_hex_guid, sizeof (ilu->ilu_ascii_hex_guid));
2841 for (i = 0; i < STMF_GUID_INPUT / 2; i++) {
2842 (void) sprintf(&ilu->ilu_ascii_hex_guid[i * 2], "%02x", p[i]);
2843 }
2844 kstat_named_setstr(&ks_lu->i_lun_guid,
2845 (const char *)ilu->ilu_ascii_hex_guid);
2846 kstat_named_setstr(&ks_lu->i_lun_alias,
2847 (const char *)ilu->ilu_lu->lu_alias);
2848 kstat_install(ilu->ilu_kstat_info);
2849
2850 /* create kstat lun io */
2851 bzero(ks_nm, sizeof (ks_nm));
2852 (void) sprintf(ks_nm, "stmf_lu_io_%"PRIxPTR"", (uintptr_t)ilu);
2853 if ((ilu->ilu_kstat_io = kstat_create(STMF_MODULE_NAME, 0,
2854 ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
2855 cmn_err(CE_WARN, "STMF: kstat_create lu_io failed");
2856 return;
2857 }
2858 mutex_init(&ilu->ilu_kstat_lock, NULL, MUTEX_DRIVER, 0);
2859 ilu->ilu_kstat_io->ks_lock = &ilu->ilu_kstat_lock;
2860 kstat_install(ilu->ilu_kstat_io);
2861 }
2862
2863 static void
stmf_create_kstat_lport(stmf_i_local_port_t * ilport)2864 stmf_create_kstat_lport(stmf_i_local_port_t *ilport)
2865 {
2866 char ks_nm[KSTAT_STRLEN];
2867 stmf_kstat_tgt_info_t *ks_tgt;
2868 int id, len;
2869
2870 /* create kstat lport info */
2871 ks_tgt = (stmf_kstat_tgt_info_t *)kmem_zalloc(STMF_KSTAT_TGT_SZ,
2872 KM_NOSLEEP);
2873 if (ks_tgt == NULL) {
2874 cmn_err(CE_WARN, "STMF: kmem_zalloc failed");
2875 return;
2876 }
2877
2878 bzero(ks_nm, sizeof (ks_nm));
2879 (void) sprintf(ks_nm, "stmf_tgt_%"PRIxPTR"", (uintptr_t)ilport);
2880 if ((ilport->ilport_kstat_info = kstat_create(STMF_MODULE_NAME,
2881 0, ks_nm, "misc", KSTAT_TYPE_NAMED,
2882 sizeof (stmf_kstat_tgt_info_t) / sizeof (kstat_named_t),
2883 KSTAT_FLAG_VIRTUAL)) == NULL) {
2884 kmem_free(ks_tgt, STMF_KSTAT_TGT_SZ);
2885 cmn_err(CE_WARN, "STMF: kstat_create target failed");
2886 return;
2887 }
2888
2889 ilport->ilport_kstat_info->ks_data_size = STMF_KSTAT_TGT_SZ;
2890 ilport->ilport_kstat_info->ks_data = ks_tgt;
2891
2892 kstat_named_init(&ks_tgt->i_tgt_name, "target-name",
2893 KSTAT_DATA_STRING);
2894 kstat_named_init(&ks_tgt->i_tgt_alias, "target-alias",
2895 KSTAT_DATA_STRING);
2896 kstat_named_init(&ks_tgt->i_protocol, "protocol",
2897 KSTAT_DATA_STRING);
2898
2899 /* ident might not be null terminated */
2900 len = ilport->ilport_lport->lport_id->ident_length;
2901 bcopy(ilport->ilport_lport->lport_id->ident,
2902 ilport->ilport_kstat_tgt_name, len);
2903 ilport->ilport_kstat_tgt_name[len + 1] = NULL;
2904 kstat_named_setstr(&ks_tgt->i_tgt_name,
2905 (const char *)ilport->ilport_kstat_tgt_name);
2906 kstat_named_setstr(&ks_tgt->i_tgt_alias,
2907 (const char *)ilport->ilport_lport->lport_alias);
2908 /* protocol */
2909 if ((id = ilport->ilport_lport->lport_id->protocol_id) > PROTOCOL_ANY) {
2910 cmn_err(CE_WARN, "STMF: protocol_id out of bound");
2911 id = PROTOCOL_ANY;
2912 }
2913 kstat_named_setstr(&ks_tgt->i_protocol, protocol_ident[id]);
2914 kstat_install(ilport->ilport_kstat_info);
2915
2916 /* create kstat lport io */
2917 bzero(ks_nm, sizeof (ks_nm));
2918 (void) sprintf(ks_nm, "stmf_tgt_io_%"PRIxPTR"", (uintptr_t)ilport);
2919 if ((ilport->ilport_kstat_io = kstat_create(STMF_MODULE_NAME, 0,
2920 ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
2921 cmn_err(CE_WARN, "STMF: kstat_create target_io failed");
2922 return;
2923 }
2924 mutex_init(&ilport->ilport_kstat_lock, NULL, MUTEX_DRIVER, 0);
2925 ilport->ilport_kstat_io->ks_lock = &ilport->ilport_kstat_lock;
2926 kstat_install(ilport->ilport_kstat_io);
2927 }
2928
2929 /*
2930 * set the asymmetric access state for a logical unit
2931 * caller is responsible for establishing SCSI unit attention on
2932 * state change
2933 */
2934 stmf_status_t
stmf_set_lu_access(stmf_lu_t * lu,uint8_t access_state)2935 stmf_set_lu_access(stmf_lu_t *lu, uint8_t access_state)
2936 {
2937 stmf_i_lu_t *ilu;
2938 uint8_t *p1, *p2;
2939
2940 if ((access_state != STMF_LU_STANDBY) &&
2941 (access_state != STMF_LU_ACTIVE)) {
2942 return (STMF_INVALID_ARG);
2943 }
2944
2945 p1 = &lu->lu_id->ident[0];
2946 mutex_enter(&stmf_state.stmf_lock);
2947 if (stmf_state.stmf_inventory_locked) {
2948 mutex_exit(&stmf_state.stmf_lock);
2949 return (STMF_BUSY);
2950 }
2951
2952 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
2953 p2 = &ilu->ilu_lu->lu_id->ident[0];
2954 if (bcmp(p1, p2, 16) == 0) {
2955 break;
2956 }
2957 }
2958
2959 if (!ilu) {
2960 ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
2961 } else {
2962 /*
2963 * We're changing access state on an existing logical unit
2964 * Send the proxy registration message for this logical unit
2965 * if we're in alua mode.
2966 * If the requested state is STMF_LU_ACTIVE, we want to register
2967 * this logical unit.
2968 * If the requested state is STMF_LU_STANDBY, we're going to
2969 * abort all tasks for this logical unit.
2970 */
2971 if (stmf_state.stmf_alua_state == 1 &&
2972 access_state == STMF_LU_ACTIVE) {
2973 stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
2974 stmf_ic_msg_t *ic_reg_lun;
2975 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
2976 lu->lu_lp->lp_alua_support) {
2977 ilu->ilu_alua = 1;
2978 /* allocate the register message */
2979 ic_reg_lun = ic_lun_active_msg_alloc(p1,
2980 lu->lu_lp->lp_name,
2981 lu->lu_proxy_reg_arg_len,
2982 (uint8_t *)lu->lu_proxy_reg_arg,
2983 stmf_proxy_msg_id);
2984 /* send the message */
2985 if (ic_reg_lun) {
2986 ic_ret = ic_tx_msg(ic_reg_lun);
2987 if (ic_ret == STMF_IC_MSG_SUCCESS) {
2988 stmf_proxy_msg_id++;
2989 }
2990 }
2991 }
2992 } else if (stmf_state.stmf_alua_state == 1 &&
2993 access_state == STMF_LU_STANDBY) {
2994 /* abort all tasks for this lu */
2995 stmf_task_lu_killall(lu, NULL, STMF_ABORTED);
2996 }
2997 }
2998
2999 ilu->ilu_access = access_state;
3000
3001 mutex_exit(&stmf_state.stmf_lock);
3002 return (STMF_SUCCESS);
3003 }
3004
3005
3006 stmf_status_t
stmf_register_lu(stmf_lu_t * lu)3007 stmf_register_lu(stmf_lu_t *lu)
3008 {
3009 stmf_i_lu_t *ilu;
3010 uint8_t *p1, *p2;
3011 stmf_state_change_info_t ssci;
3012 stmf_id_data_t *luid;
3013
3014 if ((lu->lu_id->ident_type != ID_TYPE_NAA) ||
3015 (lu->lu_id->ident_length != 16) ||
3016 ((lu->lu_id->ident[0] & 0xf0) != 0x60)) {
3017 return (STMF_INVALID_ARG);
3018 }
3019 p1 = &lu->lu_id->ident[0];
3020 mutex_enter(&stmf_state.stmf_lock);
3021 if (stmf_state.stmf_inventory_locked) {
3022 mutex_exit(&stmf_state.stmf_lock);
3023 return (STMF_BUSY);
3024 }
3025
3026 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
3027 p2 = &ilu->ilu_lu->lu_id->ident[0];
3028 if (bcmp(p1, p2, 16) == 0) {
3029 mutex_exit(&stmf_state.stmf_lock);
3030 return (STMF_ALREADY);
3031 }
3032 }
3033
3034 ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
3035 luid = stmf_lookup_id(&stmf_state.stmf_luid_list,
3036 lu->lu_id->ident_length, lu->lu_id->ident);
3037 if (luid) {
3038 luid->id_pt_to_object = (void *)ilu;
3039 ilu->ilu_luid = luid;
3040 }
3041 ilu->ilu_alias = NULL;
3042
3043 ilu->ilu_next = stmf_state.stmf_ilulist;
3044 ilu->ilu_prev = NULL;
3045 if (ilu->ilu_next)
3046 ilu->ilu_next->ilu_prev = ilu;
3047 stmf_state.stmf_ilulist = ilu;
3048 stmf_state.stmf_nlus++;
3049 if (lu->lu_lp) {
3050 ((stmf_i_lu_provider_t *)
3051 (lu->lu_lp->lp_stmf_private))->ilp_nlus++;
3052 }
3053 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1;
3054 STMF_EVENT_ALLOC_HANDLE(ilu->ilu_event_hdl);
3055 stmf_create_kstat_lu(ilu);
3056 /*
3057 * register with proxy module if available and logical unit
3058 * is in active state
3059 */
3060 if (stmf_state.stmf_alua_state == 1 &&
3061 ilu->ilu_access == STMF_LU_ACTIVE) {
3062 stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
3063 stmf_ic_msg_t *ic_reg_lun;
3064 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
3065 lu->lu_lp->lp_alua_support) {
3066 ilu->ilu_alua = 1;
3067 /* allocate the register message */
3068 ic_reg_lun = ic_reg_lun_msg_alloc(p1,
3069 lu->lu_lp->lp_name, lu->lu_proxy_reg_arg_len,
3070 (uint8_t *)lu->lu_proxy_reg_arg, stmf_proxy_msg_id);
3071 /* send the message */
3072 if (ic_reg_lun) {
3073 ic_ret = ic_tx_msg(ic_reg_lun);
3074 if (ic_ret == STMF_IC_MSG_SUCCESS) {
3075 stmf_proxy_msg_id++;
3076 }
3077 }
3078 }
3079 }
3080 mutex_exit(&stmf_state.stmf_lock);
3081
3082 /* check the default state for lu */
3083 if (stmf_state.stmf_default_lu_state == STMF_STATE_OFFLINE) {
3084 ilu->ilu_prev_state = STMF_STATE_OFFLINE;
3085 } else {
3086 ilu->ilu_prev_state = STMF_STATE_ONLINE;
3087 if (stmf_state.stmf_service_running) {
3088 ssci.st_rflags = 0;
3089 ssci.st_additional_info = NULL;
3090 (void) stmf_ctl(STMF_CMD_LU_ONLINE, lu, &ssci);
3091 }
3092 }
3093
3094 /* XXX: Generate event */
3095 return (STMF_SUCCESS);
3096 }
3097
3098 stmf_status_t
stmf_deregister_lu(stmf_lu_t * lu)3099 stmf_deregister_lu(stmf_lu_t *lu)
3100 {
3101 stmf_i_lu_t *ilu;
3102
3103 mutex_enter(&stmf_state.stmf_lock);
3104 if (stmf_state.stmf_inventory_locked) {
3105 mutex_exit(&stmf_state.stmf_lock);
3106 return (STMF_BUSY);
3107 }
3108 ilu = stmf_lookup_lu(lu);
3109 if (ilu == NULL) {
3110 mutex_exit(&stmf_state.stmf_lock);
3111 return (STMF_INVALID_ARG);
3112 }
3113 if (ilu->ilu_state == STMF_STATE_OFFLINE) {
3114 ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free);
3115 while (ilu->ilu_flags & ILU_STALL_DEREGISTER) {
3116 cv_wait(&stmf_state.stmf_cv, &stmf_state.stmf_lock);
3117 }
3118 if (ilu->ilu_ntasks) {
3119 stmf_i_scsi_task_t *itask, *nitask;
3120
3121 nitask = ilu->ilu_tasks;
3122 do {
3123 itask = nitask;
3124 nitask = itask->itask_lu_next;
3125 lu->lu_task_free(itask->itask_task);
3126 stmf_free(itask->itask_task);
3127 } while (nitask != NULL);
3128
3129 ilu->ilu_tasks = ilu->ilu_free_tasks = NULL;
3130 ilu->ilu_ntasks = ilu->ilu_ntasks_free = 0;
3131 }
3132 /* de-register with proxy if available */
3133 if (ilu->ilu_access == STMF_LU_ACTIVE &&
3134 stmf_state.stmf_alua_state == 1) {
3135 /* de-register with proxy module */
3136 stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
3137 stmf_ic_msg_t *ic_dereg_lun;
3138 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
3139 lu->lu_lp->lp_alua_support) {
3140 ilu->ilu_alua = 1;
3141 /* allocate the de-register message */
3142 ic_dereg_lun = ic_dereg_lun_msg_alloc(
3143 lu->lu_id->ident, lu->lu_lp->lp_name, 0,
3144 NULL, stmf_proxy_msg_id);
3145 /* send the message */
3146 if (ic_dereg_lun) {
3147 ic_ret = ic_tx_msg(ic_dereg_lun);
3148 if (ic_ret == STMF_IC_MSG_SUCCESS) {
3149 stmf_proxy_msg_id++;
3150 }
3151 }
3152 }
3153 }
3154
3155 if (ilu->ilu_next)
3156 ilu->ilu_next->ilu_prev = ilu->ilu_prev;
3157 if (ilu->ilu_prev)
3158 ilu->ilu_prev->ilu_next = ilu->ilu_next;
3159 else
3160 stmf_state.stmf_ilulist = ilu->ilu_next;
3161 stmf_state.stmf_nlus--;
3162
3163 if (ilu == stmf_state.stmf_svc_ilu_draining) {
3164 stmf_state.stmf_svc_ilu_draining = ilu->ilu_next;
3165 }
3166 if (ilu == stmf_state.stmf_svc_ilu_timing) {
3167 stmf_state.stmf_svc_ilu_timing = ilu->ilu_next;
3168 }
3169 if (lu->lu_lp) {
3170 ((stmf_i_lu_provider_t *)
3171 (lu->lu_lp->lp_stmf_private))->ilp_nlus--;
3172 }
3173 if (ilu->ilu_luid) {
3174 ((stmf_id_data_t *)ilu->ilu_luid)->id_pt_to_object =
3175 NULL;
3176 ilu->ilu_luid = NULL;
3177 }
3178 STMF_EVENT_FREE_HANDLE(ilu->ilu_event_hdl);
3179 } else {
3180 mutex_exit(&stmf_state.stmf_lock);
3181 return (STMF_BUSY);
3182 }
3183 if (ilu->ilu_kstat_info) {
3184 kmem_free(ilu->ilu_kstat_info->ks_data,
3185 ilu->ilu_kstat_info->ks_data_size);
3186 kstat_delete(ilu->ilu_kstat_info);
3187 }
3188 if (ilu->ilu_kstat_io) {
3189 kstat_delete(ilu->ilu_kstat_io);
3190 mutex_destroy(&ilu->ilu_kstat_lock);
3191 }
3192 stmf_delete_itl_kstat_by_guid(ilu->ilu_ascii_hex_guid);
3193 mutex_exit(&stmf_state.stmf_lock);
3194 return (STMF_SUCCESS);
3195 }
3196
3197 void
stmf_set_port_standby(stmf_local_port_t * lport,uint16_t rtpid)3198 stmf_set_port_standby(stmf_local_port_t *lport, uint16_t rtpid)
3199 {
3200 stmf_i_local_port_t *ilport =
3201 (stmf_i_local_port_t *)lport->lport_stmf_private;
3202 ilport->ilport_rtpid = rtpid;
3203 ilport->ilport_standby = 1;
3204 }
3205
3206 void
stmf_set_port_alua(stmf_local_port_t * lport)3207 stmf_set_port_alua(stmf_local_port_t *lport)
3208 {
3209 stmf_i_local_port_t *ilport =
3210 (stmf_i_local_port_t *)lport->lport_stmf_private;
3211 ilport->ilport_alua = 1;
3212 }
3213
3214 stmf_status_t
stmf_register_local_port(stmf_local_port_t * lport)3215 stmf_register_local_port(stmf_local_port_t *lport)
3216 {
3217 stmf_i_local_port_t *ilport;
3218 stmf_state_change_info_t ssci;
3219 int start_workers = 0;
3220
3221 mutex_enter(&stmf_state.stmf_lock);
3222 if (stmf_state.stmf_inventory_locked) {
3223 mutex_exit(&stmf_state.stmf_lock);
3224 return (STMF_BUSY);
3225 }
3226 ilport = (stmf_i_local_port_t *)lport->lport_stmf_private;
3227 rw_init(&ilport->ilport_lock, NULL, RW_DRIVER, NULL);
3228
3229 ilport->ilport_instance =
3230 id_alloc_nosleep(stmf_state.stmf_ilport_inst_space);
3231 if (ilport->ilport_instance == -1) {
3232 mutex_exit(&stmf_state.stmf_lock);
3233 return (STMF_FAILURE);
3234 }
3235 ilport->ilport_next = stmf_state.stmf_ilportlist;
3236 ilport->ilport_prev = NULL;
3237 if (ilport->ilport_next)
3238 ilport->ilport_next->ilport_prev = ilport;
3239 stmf_state.stmf_ilportlist = ilport;
3240 stmf_state.stmf_nlports++;
3241 if (lport->lport_pp) {
3242 ((stmf_i_port_provider_t *)
3243 (lport->lport_pp->pp_stmf_private))->ipp_npps++;
3244 }
3245 ilport->ilport_tg =
3246 stmf_lookup_group_for_target(lport->lport_id->ident,
3247 lport->lport_id->ident_length);
3248
3249 /*
3250 * rtpid will/must be set if this is a standby port
3251 * only register ports that are not standby (proxy) ports
3252 * and ports that are alua participants (ilport_alua == 1)
3253 */
3254 if (ilport->ilport_standby == 0) {
3255 ilport->ilport_rtpid = atomic_add_16_nv(&stmf_rtpid_counter, 1);
3256 }
3257
3258 if (stmf_state.stmf_alua_state == 1 &&
3259 ilport->ilport_standby == 0 &&
3260 ilport->ilport_alua == 1) {
3261 stmf_ic_msg_t *ic_reg_port;
3262 stmf_ic_msg_status_t ic_ret;
3263 stmf_local_port_t *lport;
3264 lport = ilport->ilport_lport;
3265 ic_reg_port = ic_reg_port_msg_alloc(
3266 lport->lport_id, ilport->ilport_rtpid,
3267 0, NULL, stmf_proxy_msg_id);
3268 if (ic_reg_port) {
3269 ic_ret = ic_tx_msg(ic_reg_port);
3270 if (ic_ret == STMF_IC_MSG_SUCCESS) {
3271 ilport->ilport_reg_msgid = stmf_proxy_msg_id++;
3272 } else {
3273 cmn_err(CE_WARN, "error on port registration "
3274 "port - %s", ilport->ilport_kstat_tgt_name);
3275 }
3276 }
3277 }
3278 STMF_EVENT_ALLOC_HANDLE(ilport->ilport_event_hdl);
3279 stmf_create_kstat_lport(ilport);
3280 if (stmf_workers_state == STMF_WORKERS_DISABLED) {
3281 stmf_workers_state = STMF_WORKERS_ENABLING;
3282 start_workers = 1;
3283 }
3284 mutex_exit(&stmf_state.stmf_lock);
3285
3286 if (start_workers)
3287 stmf_worker_init();
3288
3289 /* the default state of LPORT */
3290
3291 if (stmf_state.stmf_default_lport_state == STMF_STATE_OFFLINE) {
3292 ilport->ilport_prev_state = STMF_STATE_OFFLINE;
3293 } else {
3294 ilport->ilport_prev_state = STMF_STATE_ONLINE;
3295 if (stmf_state.stmf_service_running) {
3296 ssci.st_rflags = 0;
3297 ssci.st_additional_info = NULL;
3298 (void) stmf_ctl(STMF_CMD_LPORT_ONLINE, lport, &ssci);
3299 }
3300 }
3301
3302 /* XXX: Generate event */
3303 return (STMF_SUCCESS);
3304 }
3305
3306 stmf_status_t
stmf_deregister_local_port(stmf_local_port_t * lport)3307 stmf_deregister_local_port(stmf_local_port_t *lport)
3308 {
3309 stmf_i_local_port_t *ilport;
3310
3311 mutex_enter(&stmf_state.stmf_lock);
3312 if (stmf_state.stmf_inventory_locked) {
3313 mutex_exit(&stmf_state.stmf_lock);
3314 return (STMF_BUSY);
3315 }
3316
3317 ilport = (stmf_i_local_port_t *)lport->lport_stmf_private;
3318
3319 /*
3320 * deregister ports that are not standby (proxy)
3321 */
3322 if (stmf_state.stmf_alua_state == 1 &&
3323 ilport->ilport_standby == 0 &&
3324 ilport->ilport_alua == 1) {
3325 stmf_ic_msg_t *ic_dereg_port;
3326 stmf_ic_msg_status_t ic_ret;
3327 ic_dereg_port = ic_dereg_port_msg_alloc(
3328 lport->lport_id, 0, NULL, stmf_proxy_msg_id);
3329 if (ic_dereg_port) {
3330 ic_ret = ic_tx_msg(ic_dereg_port);
3331 if (ic_ret == STMF_IC_MSG_SUCCESS) {
3332 stmf_proxy_msg_id++;
3333 }
3334 }
3335 }
3336
3337 if (ilport->ilport_nsessions == 0) {
3338 if (ilport->ilport_next)
3339 ilport->ilport_next->ilport_prev = ilport->ilport_prev;
3340 if (ilport->ilport_prev)
3341 ilport->ilport_prev->ilport_next = ilport->ilport_next;
3342 else
3343 stmf_state.stmf_ilportlist = ilport->ilport_next;
3344 id_free(stmf_state.stmf_ilport_inst_space,
3345 ilport->ilport_instance);
3346 rw_destroy(&ilport->ilport_lock);
3347 stmf_state.stmf_nlports--;
3348 if (lport->lport_pp) {
3349 ((stmf_i_port_provider_t *)
3350 (lport->lport_pp->pp_stmf_private))->ipp_npps--;
3351 }
3352 ilport->ilport_tg = NULL;
3353 STMF_EVENT_FREE_HANDLE(ilport->ilport_event_hdl);
3354 } else {
3355 mutex_exit(&stmf_state.stmf_lock);
3356 return (STMF_BUSY);
3357 }
3358 if (ilport->ilport_kstat_info) {
3359 kmem_free(ilport->ilport_kstat_info->ks_data,
3360 ilport->ilport_kstat_info->ks_data_size);
3361 kstat_delete(ilport->ilport_kstat_info);
3362 }
3363 if (ilport->ilport_kstat_io) {
3364 kstat_delete(ilport->ilport_kstat_io);
3365 mutex_destroy(&ilport->ilport_kstat_lock);
3366 }
3367 stmf_delete_itl_kstat_by_lport(ilport->ilport_kstat_tgt_name);
3368 mutex_exit(&stmf_state.stmf_lock);
3369 return (STMF_SUCCESS);
3370 }
3371
3372 /*
3373 * Rport id/instance mappings remain valid until STMF is unloaded
3374 */
3375 static int
stmf_irport_compare(const void * void_irport1,const void * void_irport2)3376 stmf_irport_compare(const void *void_irport1, const void *void_irport2)
3377 {
3378 const stmf_i_remote_port_t *irport1 = void_irport1;
3379 const stmf_i_remote_port_t *irport2 = void_irport2;
3380 int result;
3381
3382 /* Sort by code set then ident */
3383 if (irport1->irport_id->code_set <
3384 irport2->irport_id->code_set) {
3385 return (-1);
3386 } else if (irport1->irport_id->code_set >
3387 irport2->irport_id->code_set) {
3388 return (1);
3389 }
3390
3391 /* Next by ident length */
3392 if (irport1->irport_id->ident_length <
3393 irport2->irport_id->ident_length) {
3394 return (-1);
3395 } else if (irport1->irport_id->ident_length >
3396 irport2->irport_id->ident_length) {
3397 return (1);
3398 }
3399
3400 /* Code set and ident length both match, now compare idents */
3401 result = memcmp(irport1->irport_id->ident,
3402 irport2->irport_id->ident,
3403 irport1->irport_id->ident_length);
3404
3405 if (result < 0) {
3406 return (-1);
3407 } else if (result > 0) {
3408 return (1);
3409 }
3410
3411 return (0);
3412 }
3413
3414 static stmf_i_remote_port_t *
stmf_irport_create(scsi_devid_desc_t * rport_devid)3415 stmf_irport_create(scsi_devid_desc_t *rport_devid)
3416 {
3417 int alloc_len;
3418 stmf_i_remote_port_t *irport;
3419
3420 /*
3421 * Lookup will bump the refcnt if there's an existing rport
3422 * context for this identifier.
3423 */
3424 ASSERT(mutex_owned(&stmf_state.stmf_lock));
3425
3426 alloc_len = sizeof (*irport) + sizeof (scsi_devid_desc_t) +
3427 rport_devid->ident_length - 1;
3428 irport = kmem_zalloc(alloc_len, KM_NOSLEEP);
3429 if (irport == NULL) {
3430 return (NULL);
3431 }
3432
3433 irport->irport_instance =
3434 id_alloc_nosleep(stmf_state.stmf_irport_inst_space);
3435 if (irport->irport_instance == -1) {
3436 kmem_free(irport, alloc_len);
3437 return (NULL);
3438 }
3439
3440 irport->irport_id =
3441 (struct scsi_devid_desc *)(irport + 1); /* Ptr. Arith. */
3442 bcopy(rport_devid, irport->irport_id,
3443 sizeof (scsi_devid_desc_t) + rport_devid->ident_length - 1);
3444 irport->irport_refcnt = 1;
3445 mutex_init(&irport->irport_mutex, NULL, MUTEX_DEFAULT, NULL);
3446
3447 return (irport);
3448 }
3449
3450 static void
stmf_irport_destroy(stmf_i_remote_port_t * irport)3451 stmf_irport_destroy(stmf_i_remote_port_t *irport)
3452 {
3453 id_free(stmf_state.stmf_irport_inst_space, irport->irport_instance);
3454 mutex_destroy(&irport->irport_mutex);
3455 kmem_free(irport, sizeof (*irport) + sizeof (scsi_devid_desc_t) +
3456 irport->irport_id->ident_length - 1);
3457 }
3458
3459 static stmf_i_remote_port_t *
stmf_irport_register(scsi_devid_desc_t * rport_devid)3460 stmf_irport_register(scsi_devid_desc_t *rport_devid)
3461 {
3462 stmf_i_remote_port_t *irport;
3463
3464 mutex_enter(&stmf_state.stmf_lock);
3465
3466 /*
3467 * Lookup will bump the refcnt if there's an existing rport
3468 * context for this identifier.
3469 */
3470 if ((irport = stmf_irport_lookup_locked(rport_devid)) != NULL) {
3471 mutex_exit(&stmf_state.stmf_lock);
3472 return (irport);
3473 }
3474
3475 irport = stmf_irport_create(rport_devid);
3476 if (irport == NULL) {
3477 mutex_exit(&stmf_state.stmf_lock);
3478 return (NULL);
3479 }
3480
3481 avl_add(&stmf_state.stmf_irportlist, irport);
3482 mutex_exit(&stmf_state.stmf_lock);
3483
3484 return (irport);
3485 }
3486
3487 static stmf_i_remote_port_t *
stmf_irport_lookup_locked(scsi_devid_desc_t * rport_devid)3488 stmf_irport_lookup_locked(scsi_devid_desc_t *rport_devid)
3489 {
3490 stmf_i_remote_port_t *irport;
3491 stmf_i_remote_port_t tmp_irport;
3492
3493 ASSERT(mutex_owned(&stmf_state.stmf_lock));
3494 tmp_irport.irport_id = rport_devid;
3495 irport = avl_find(&stmf_state.stmf_irportlist, &tmp_irport, NULL);
3496 if (irport != NULL) {
3497 mutex_enter(&irport->irport_mutex);
3498 irport->irport_refcnt++;
3499 mutex_exit(&irport->irport_mutex);
3500 }
3501
3502 return (irport);
3503 }
3504
3505 static void
stmf_irport_deregister(stmf_i_remote_port_t * irport)3506 stmf_irport_deregister(stmf_i_remote_port_t *irport)
3507 {
3508 /*
3509 * If we were actually going to remove unreferenced remote ports
3510 * we would want to acquire stmf_state.stmf_lock before getting
3511 * the irport mutex.
3512 *
3513 * Instead we're just going to leave it there even if unreferenced.
3514 */
3515 mutex_enter(&irport->irport_mutex);
3516 irport->irport_refcnt--;
3517 mutex_exit(&irport->irport_mutex);
3518 }
3519
3520 /*
3521 * Port provider has to make sure that register/deregister session and
3522 * port are serialized calls.
3523 */
3524 stmf_status_t
stmf_register_scsi_session(stmf_local_port_t * lport,stmf_scsi_session_t * ss)3525 stmf_register_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss)
3526 {
3527 stmf_i_scsi_session_t *iss;
3528 stmf_i_local_port_t *ilport = (stmf_i_local_port_t *)
3529 lport->lport_stmf_private;
3530 uint8_t lun[8];
3531
3532 /*
3533 * Port state has to be online to register a scsi session. It is
3534 * possible that we started an offline operation and a new SCSI
3535 * session started at the same time (in that case also we are going
3536 * to fail the registeration). But any other state is simply
3537 * a bad port provider implementation.
3538 */
3539 if (ilport->ilport_state != STMF_STATE_ONLINE) {
3540 if (ilport->ilport_state != STMF_STATE_OFFLINING) {
3541 stmf_trace(lport->lport_alias, "Port is trying to "
3542 "register a session while the state is neither "
3543 "online nor offlining");
3544 }
3545 return (STMF_FAILURE);
3546 }
3547 bzero(lun, 8);
3548 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
3549 if ((iss->iss_irport = stmf_irport_register(ss->ss_rport_id)) == NULL) {
3550 stmf_trace(lport->lport_alias, "Could not register "
3551 "remote port during session registration");
3552 return (STMF_FAILURE);
3553 }
3554
3555 iss->iss_flags |= ISS_BEING_CREATED;
3556
3557 if (ss->ss_rport == NULL) {
3558 iss->iss_flags |= ISS_NULL_TPTID;
3559 ss->ss_rport = stmf_scsilib_devid_to_remote_port(
3560 ss->ss_rport_id);
3561 if (ss->ss_rport == NULL) {
3562 iss->iss_flags &= ~(ISS_NULL_TPTID | ISS_BEING_CREATED);
3563 stmf_trace(lport->lport_alias, "Device id to "
3564 "remote port conversion failed");
3565 return (STMF_FAILURE);
3566 }
3567 } else {
3568 if (!stmf_scsilib_tptid_validate(ss->ss_rport->rport_tptid,
3569 ss->ss_rport->rport_tptid_sz, NULL)) {
3570 iss->iss_flags &= ~ISS_BEING_CREATED;
3571 stmf_trace(lport->lport_alias, "Remote port "
3572 "transport id validation failed");
3573 return (STMF_FAILURE);
3574 }
3575 }
3576
3577 /* sessions use the ilport_lock. No separate lock is required */
3578 iss->iss_lockp = &ilport->ilport_lock;
3579
3580 if (iss->iss_sm != NULL)
3581 cmn_err(CE_PANIC, "create lun map called with non NULL map");
3582 iss->iss_sm = (stmf_lun_map_t *)kmem_zalloc(sizeof (stmf_lun_map_t),
3583 KM_SLEEP);
3584
3585 mutex_enter(&stmf_state.stmf_lock);
3586 rw_enter(&ilport->ilport_lock, RW_WRITER);
3587 (void) stmf_session_create_lun_map(ilport, iss);
3588 ilport->ilport_nsessions++;
3589 iss->iss_next = ilport->ilport_ss_list;
3590 ilport->ilport_ss_list = iss;
3591 rw_exit(&ilport->ilport_lock);
3592 mutex_exit(&stmf_state.stmf_lock);
3593
3594 iss->iss_creation_time = ddi_get_time();
3595 ss->ss_session_id = atomic_add_64_nv(&stmf_session_counter, 1);
3596 iss->iss_flags &= ~ISS_BEING_CREATED;
3597 /* XXX should we remove ISS_LUN_INVENTORY_CHANGED on new session? */
3598 iss->iss_flags &= ~ISS_LUN_INVENTORY_CHANGED;
3599 DTRACE_PROBE2(session__online, stmf_local_port_t *, lport,
3600 stmf_scsi_session_t *, ss);
3601 return (STMF_SUCCESS);
3602 }
3603
3604 void
stmf_deregister_scsi_session(stmf_local_port_t * lport,stmf_scsi_session_t * ss)3605 stmf_deregister_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss)
3606 {
3607 stmf_i_local_port_t *ilport = (stmf_i_local_port_t *)
3608 lport->lport_stmf_private;
3609 stmf_i_scsi_session_t *iss, **ppss;
3610 int found = 0;
3611 stmf_ic_msg_t *ic_session_dereg;
3612 stmf_status_t ic_ret = STMF_FAILURE;
3613
3614 DTRACE_PROBE2(session__offline, stmf_local_port_t *, lport,
3615 stmf_scsi_session_t *, ss);
3616
3617 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
3618 if (ss->ss_rport_alias) {
3619 ss->ss_rport_alias = NULL;
3620 }
3621
3622 try_dereg_ss_again:
3623 mutex_enter(&stmf_state.stmf_lock);
3624 atomic_and_32(&iss->iss_flags,
3625 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS));
3626 if (iss->iss_flags & ISS_EVENT_ACTIVE) {
3627 mutex_exit(&stmf_state.stmf_lock);
3628 delay(1);
3629 goto try_dereg_ss_again;
3630 }
3631
3632 /* dereg proxy session if not standby port */
3633 if (stmf_state.stmf_alua_state == 1 &&
3634 ilport->ilport_standby == 0 &&
3635 ilport->ilport_alua == 1) {
3636 ic_session_dereg = ic_session_dereg_msg_alloc(
3637 ss, stmf_proxy_msg_id);
3638 if (ic_session_dereg) {
3639 ic_ret = ic_tx_msg(ic_session_dereg);
3640 if (ic_ret == STMF_IC_MSG_SUCCESS) {
3641 stmf_proxy_msg_id++;
3642 }
3643 }
3644 }
3645
3646 rw_enter(&ilport->ilport_lock, RW_WRITER);
3647 for (ppss = &ilport->ilport_ss_list; *ppss != NULL;
3648 ppss = &((*ppss)->iss_next)) {
3649 if (iss == (*ppss)) {
3650 *ppss = (*ppss)->iss_next;
3651 found = 1;
3652 break;
3653 }
3654 }
3655 if (!found) {
3656 cmn_err(CE_PANIC, "Deregister session called for non existent"
3657 " session");
3658 }
3659 ilport->ilport_nsessions--;
3660
3661 stmf_irport_deregister(iss->iss_irport);
3662 (void) stmf_session_destroy_lun_map(ilport, iss);
3663 rw_exit(&ilport->ilport_lock);
3664 mutex_exit(&stmf_state.stmf_lock);
3665
3666 if (iss->iss_flags & ISS_NULL_TPTID) {
3667 stmf_remote_port_free(ss->ss_rport);
3668 }
3669 }
3670
3671 stmf_i_scsi_session_t *
stmf_session_id_to_issptr(uint64_t session_id,int stay_locked)3672 stmf_session_id_to_issptr(uint64_t session_id, int stay_locked)
3673 {
3674 stmf_i_local_port_t *ilport;
3675 stmf_i_scsi_session_t *iss;
3676
3677 mutex_enter(&stmf_state.stmf_lock);
3678 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
3679 ilport = ilport->ilport_next) {
3680 rw_enter(&ilport->ilport_lock, RW_WRITER);
3681 for (iss = ilport->ilport_ss_list; iss != NULL;
3682 iss = iss->iss_next) {
3683 if (iss->iss_ss->ss_session_id == session_id) {
3684 if (!stay_locked)
3685 rw_exit(&ilport->ilport_lock);
3686 mutex_exit(&stmf_state.stmf_lock);
3687 return (iss);
3688 }
3689 }
3690 rw_exit(&ilport->ilport_lock);
3691 }
3692 mutex_exit(&stmf_state.stmf_lock);
3693 return (NULL);
3694 }
3695
3696 #define MAX_ALIAS 128
3697
3698 static int
stmf_itl_kstat_compare(const void * itl_kstat_1,const void * itl_kstat_2)3699 stmf_itl_kstat_compare(const void *itl_kstat_1, const void *itl_kstat_2)
3700 {
3701 const stmf_i_itl_kstat_t *kstat_nm1 = itl_kstat_1;
3702 const stmf_i_itl_kstat_t *kstat_nm2 = itl_kstat_2;
3703 int ret;
3704
3705 ret = strcmp(kstat_nm1->iitl_kstat_nm, kstat_nm2->iitl_kstat_nm);
3706 if (ret < 0) {
3707 return (-1);
3708 } else if (ret > 0) {
3709 return (1);
3710 }
3711 return (0);
3712 }
3713
3714 static stmf_i_itl_kstat_t *
stmf_itl_kstat_lookup(char * kstat_nm)3715 stmf_itl_kstat_lookup(char *kstat_nm)
3716 {
3717 stmf_i_itl_kstat_t tmp;
3718 stmf_i_itl_kstat_t *itl_kstat;
3719
3720 ASSERT(mutex_owned(&stmf_state.stmf_lock));
3721 (void) strcpy(tmp.iitl_kstat_nm, kstat_nm);
3722 itl_kstat = avl_find(&stmf_state.stmf_itl_kstat_list, &tmp, NULL);
3723 return (itl_kstat);
3724 }
3725
3726 static void
stmf_delete_itl_kstat_by_lport(char * tgt)3727 stmf_delete_itl_kstat_by_lport(char *tgt)
3728 {
3729 stmf_i_itl_kstat_t *ks_itl, *next;
3730
3731 ASSERT(mutex_owned(&stmf_state.stmf_lock));
3732 ks_itl = avl_first(&stmf_state.stmf_itl_kstat_list);
3733 for (; ks_itl != NULL; ks_itl = next) {
3734 next = AVL_NEXT(&stmf_state.stmf_itl_kstat_list, ks_itl);
3735 if (strcmp(ks_itl->iitl_kstat_lport, tgt) == 0) {
3736 stmf_teardown_itl_kstats(ks_itl);
3737 avl_remove(&stmf_state.stmf_itl_kstat_list, ks_itl);
3738 kmem_free(ks_itl, sizeof (stmf_i_itl_kstat_t));
3739 }
3740 }
3741 }
3742
3743 static void
stmf_delete_itl_kstat_by_guid(char * guid)3744 stmf_delete_itl_kstat_by_guid(char *guid)
3745 {
3746 stmf_i_itl_kstat_t *ks_itl, *next;
3747
3748 ASSERT(mutex_owned(&stmf_state.stmf_lock));
3749 ks_itl = avl_first(&stmf_state.stmf_itl_kstat_list);
3750 for (; ks_itl != NULL; ks_itl = next) {
3751 next = AVL_NEXT(&stmf_state.stmf_itl_kstat_list, ks_itl);
3752 if (strcmp(ks_itl->iitl_kstat_guid, guid) == 0) {
3753 stmf_teardown_itl_kstats(ks_itl);
3754 avl_remove(&stmf_state.stmf_itl_kstat_list, ks_itl);
3755 kmem_free(ks_itl, sizeof (stmf_i_itl_kstat_t));
3756 }
3757 }
3758 }
3759
3760 static stmf_i_itl_kstat_t *
stmf_itl_kstat_create(stmf_itl_data_t * itl,char * nm,scsi_devid_desc_t * lport,scsi_devid_desc_t * lun)3761 stmf_itl_kstat_create(stmf_itl_data_t *itl, char *nm,
3762 scsi_devid_desc_t *lport, scsi_devid_desc_t *lun)
3763 {
3764 stmf_i_itl_kstat_t *ks_itl;
3765 int i, len;
3766
3767 ASSERT(mutex_owned(&stmf_state.stmf_lock));
3768 if ((ks_itl = stmf_itl_kstat_lookup(nm)) != NULL)
3769 return (ks_itl);
3770
3771 len = sizeof (stmf_i_itl_kstat_t);
3772 ks_itl = kmem_zalloc(len, KM_NOSLEEP);
3773 if (ks_itl == NULL)
3774 return (NULL);
3775
3776 (void) strcpy(ks_itl->iitl_kstat_nm, nm);
3777 bcopy(lport->ident, ks_itl->iitl_kstat_lport, lport->ident_length);
3778 ks_itl->iitl_kstat_lport[lport->ident_length] = '\0';
3779 for (i = 0; i < STMF_GUID_INPUT / 2; i++) {
3780 (void) sprintf(&ks_itl->iitl_kstat_guid[i * 2], "%02x",
3781 lun->ident[i]);
3782 }
3783 ks_itl->iitl_kstat_strbuf = itl->itl_kstat_strbuf;
3784 ks_itl->iitl_kstat_strbuflen = itl->itl_kstat_strbuflen;
3785 ks_itl->iitl_kstat_info = itl->itl_kstat_info;
3786 ks_itl->iitl_kstat_taskq = itl->itl_kstat_taskq;
3787 ks_itl->iitl_kstat_lu_xfer = itl->itl_kstat_lu_xfer;
3788 ks_itl->iitl_kstat_lport_xfer = itl->itl_kstat_lport_xfer;
3789 avl_add(&stmf_state.stmf_itl_kstat_list, ks_itl);
3790
3791 return (ks_itl);
3792 }
3793
3794 stmf_status_t
stmf_setup_itl_kstats(stmf_itl_data_t * itl)3795 stmf_setup_itl_kstats(stmf_itl_data_t *itl)
3796 {
3797 char ks_itl_id[32];
3798 char ks_nm[KSTAT_STRLEN];
3799 char ks_itl_nm[KSTAT_STRLEN];
3800 stmf_kstat_itl_info_t *ks_itl;
3801 stmf_scsi_session_t *ss;
3802 stmf_i_scsi_session_t *iss;
3803 stmf_i_local_port_t *ilport;
3804 char *strbuf;
3805 int id, len, i;
3806 char *rport_alias;
3807 char *lport_alias;
3808 char *lu_alias;
3809 stmf_i_itl_kstat_t *tmp_kstat;
3810
3811 /*
3812 * Allocate enough memory in the ITL to hold the relevant
3813 * identifiers.
3814 * rport and lport identifiers come from the stmf_scsi_session_t.
3815 * ident might not be null terminated.
3816 */
3817 ss = itl->itl_session->iss_ss;
3818 iss = ss->ss_stmf_private;
3819 ilport = ss->ss_lport->lport_stmf_private;
3820 (void) snprintf(ks_itl_id, 32, "%d.%d.%d",
3821 iss->iss_irport->irport_instance, ilport->ilport_instance,
3822 itl->itl_lun);
3823
3824 (void) snprintf(ks_itl_nm, KSTAT_STRLEN, "itl_%s", ks_itl_id);
3825 /*
3826 * let's verify this itl_kstat already exist
3827 */
3828 if ((tmp_kstat = stmf_itl_kstat_lookup(ks_itl_nm)) != NULL) {
3829 itl->itl_kstat_strbuf = tmp_kstat->iitl_kstat_strbuf;
3830 itl->itl_kstat_strbuflen = tmp_kstat->iitl_kstat_strbuflen;
3831 itl->itl_kstat_info = tmp_kstat->iitl_kstat_info;
3832 itl->itl_kstat_taskq = tmp_kstat->iitl_kstat_taskq;
3833 itl->itl_kstat_lu_xfer = tmp_kstat->iitl_kstat_lu_xfer;
3834 itl->itl_kstat_lport_xfer = tmp_kstat->iitl_kstat_lport_xfer;
3835 return (STMF_SUCCESS);
3836 }
3837
3838 /* New itl_kstat */
3839 rport_alias = (ss->ss_rport_alias == NULL) ?
3840 "" : ss->ss_rport_alias;
3841 lport_alias = (ss->ss_lport->lport_alias == NULL) ?
3842 "" : ss->ss_lport->lport_alias;
3843 lu_alias = (itl->itl_ilu->ilu_lu->lu_alias == NULL) ?
3844 "" : itl->itl_ilu->ilu_lu->lu_alias;
3845
3846 itl->itl_kstat_strbuflen = (ss->ss_rport_id->ident_length + 1) +
3847 (strnlen(rport_alias, MAX_ALIAS) + 1) +
3848 (ss->ss_lport->lport_id->ident_length + 1) +
3849 (strnlen(lport_alias, MAX_ALIAS) + 1) +
3850 (STMF_GUID_INPUT + 1) +
3851 (strnlen(lu_alias, MAX_ALIAS) + 1) +
3852 MAX_PROTO_STR_LEN;
3853 itl->itl_kstat_strbuf = kmem_zalloc(itl->itl_kstat_strbuflen,
3854 KM_NOSLEEP);
3855 if (itl->itl_kstat_strbuf == NULL) {
3856 return (STMF_ALLOC_FAILURE);
3857 }
3858
3859 ks_itl = (stmf_kstat_itl_info_t *)kmem_zalloc(sizeof (*ks_itl),
3860 KM_NOSLEEP);
3861 if (ks_itl == NULL) {
3862 kmem_free(itl->itl_kstat_strbuf, itl->itl_kstat_strbuflen);
3863 return (STMF_ALLOC_FAILURE);
3864 }
3865
3866 if ((itl->itl_kstat_info = kstat_create(STMF_MODULE_NAME,
3867 0, ks_itl_nm, "misc", KSTAT_TYPE_NAMED,
3868 sizeof (stmf_kstat_itl_info_t) / sizeof (kstat_named_t),
3869 KSTAT_FLAG_VIRTUAL)) == NULL) {
3870 goto itl_kstat_cleanup;
3871 }
3872
3873 itl->itl_kstat_info->ks_data_size += itl->itl_kstat_strbuflen;
3874 itl->itl_kstat_info->ks_data = ks_itl;
3875
3876 kstat_named_init(&ks_itl->i_rport_name, "rport-name",
3877 KSTAT_DATA_STRING);
3878 kstat_named_init(&ks_itl->i_rport_alias, "rport-alias",
3879 KSTAT_DATA_STRING);
3880 kstat_named_init(&ks_itl->i_lport_name, "lport-name",
3881 KSTAT_DATA_STRING);
3882 kstat_named_init(&ks_itl->i_lport_alias, "lport-alias",
3883 KSTAT_DATA_STRING);
3884 kstat_named_init(&ks_itl->i_protocol, "protocol",
3885 KSTAT_DATA_STRING);
3886 kstat_named_init(&ks_itl->i_lu_guid, "lu-guid",
3887 KSTAT_DATA_STRING);
3888 kstat_named_init(&ks_itl->i_lu_alias, "lu-alias",
3889 KSTAT_DATA_STRING);
3890 kstat_named_init(&ks_itl->i_lu_number, "lu-number",
3891 KSTAT_DATA_UINT64);
3892 kstat_named_init(&ks_itl->i_task_waitq_elapsed, "task-waitq-elapsed",
3893 KSTAT_DATA_UINT64);
3894 kstat_named_init(&ks_itl->i_task_read_elapsed, "task-read-elapsed",
3895 KSTAT_DATA_UINT64);
3896 kstat_named_init(&ks_itl->i_task_write_elapsed, "task-write-elapsed",
3897 KSTAT_DATA_UINT64);
3898 kstat_named_init(&ks_itl->i_lu_read_elapsed, "lu-read-elapsed",
3899 KSTAT_DATA_UINT64);
3900 kstat_named_init(&ks_itl->i_lu_write_elapsed, "lu-write-elapsed",
3901 KSTAT_DATA_UINT64);
3902 kstat_named_init(&ks_itl->i_lport_read_elapsed, "lport-read-elapsed",
3903 KSTAT_DATA_UINT64);
3904 kstat_named_init(&ks_itl->i_lport_write_elapsed, "lport-write-elapsed",
3905 KSTAT_DATA_UINT64);
3906
3907 strbuf = itl->itl_kstat_strbuf;
3908
3909 /* Rport */
3910 len = ss->ss_rport_id->ident_length;
3911 bcopy(ss->ss_rport_id->ident, strbuf, len);
3912 strbuf += len;
3913 *strbuf = '\0';
3914 kstat_named_setstr(&ks_itl->i_rport_name, strbuf - len);
3915 strbuf++;
3916
3917 len = strnlen(rport_alias, MAX_ALIAS);
3918 (void) strncpy(strbuf, rport_alias, len + 1);
3919 kstat_named_setstr(&ks_itl->i_rport_alias, strbuf);
3920 strbuf += len + 1;
3921
3922 /* Lport */
3923 len = ss->ss_lport->lport_id->ident_length;
3924 bcopy(ss->ss_lport->lport_id->ident, strbuf, len);
3925 strbuf += len;
3926 *strbuf = '\0';
3927 kstat_named_setstr(&ks_itl->i_lport_name, strbuf - len);
3928 strbuf++;
3929
3930 len = strnlen(lport_alias, MAX_ALIAS);
3931 (void) strncpy(strbuf, lport_alias, len + 1);
3932 kstat_named_setstr(&ks_itl->i_lport_alias, strbuf);
3933 strbuf += len + 1;
3934
3935 id = (ss->ss_lport->lport_id->protocol_id > PROTOCOL_ANY) ?
3936 PROTOCOL_ANY : ss->ss_lport->lport_id->protocol_id;
3937 kstat_named_setstr(&ks_itl->i_protocol, protocol_ident[id]);
3938
3939 /* LU */
3940 for (i = 0; i < STMF_GUID_INPUT / 2; i++) {
3941 (void) sprintf(&strbuf[i * 2], "%02x",
3942 itl->itl_ilu->ilu_lu->lu_id->ident[i]);
3943 }
3944 kstat_named_setstr(&ks_itl->i_lu_guid, strbuf);
3945 strbuf += STMF_GUID_INPUT + 1;
3946
3947 len = strnlen(lu_alias, MAX_ALIAS);
3948 (void) strncpy(strbuf, lu_alias, len + 1);
3949 kstat_named_setstr(&ks_itl->i_lu_alias, strbuf);
3950 strbuf += len + 1;
3951
3952 ks_itl->i_lu_number.value.ui64 = itl->itl_lun;
3953
3954 /* Now create the I/O kstats */
3955 (void) snprintf(ks_nm, KSTAT_STRLEN, "itl_tasks_%s", ks_itl_id);
3956 if ((itl->itl_kstat_taskq = kstat_create(STMF_MODULE_NAME, 0,
3957 ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
3958 goto itl_kstat_cleanup;
3959 }
3960
3961 (void) snprintf(ks_nm, KSTAT_STRLEN, "itl_lu_%s", ks_itl_id);
3962 if ((itl->itl_kstat_lu_xfer = kstat_create(STMF_MODULE_NAME, 0,
3963 ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
3964 goto itl_kstat_cleanup;
3965 }
3966
3967 (void) snprintf(ks_nm, KSTAT_STRLEN, "itl_lport_%s", ks_itl_id);
3968 if ((itl->itl_kstat_lport_xfer = kstat_create(STMF_MODULE_NAME, 0,
3969 ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
3970 goto itl_kstat_cleanup;
3971 }
3972
3973 /* Install all the kstats */
3974 kstat_install(itl->itl_kstat_info);
3975 kstat_install(itl->itl_kstat_taskq);
3976 kstat_install(itl->itl_kstat_lu_xfer);
3977 kstat_install(itl->itl_kstat_lport_xfer);
3978
3979 /* Add new itl_kstat to stmf_itl_kstat_list */
3980 if (stmf_itl_kstat_create(itl, ks_itl_nm, ss->ss_lport->lport_id,
3981 itl->itl_ilu->ilu_lu->lu_id) != NULL)
3982 return (STMF_SUCCESS);
3983
3984 itl_kstat_cleanup:
3985 if (itl->itl_kstat_taskq)
3986 kstat_delete(itl->itl_kstat_taskq);
3987 if (itl->itl_kstat_lu_xfer)
3988 kstat_delete(itl->itl_kstat_lu_xfer);
3989 if (itl->itl_kstat_lport_xfer)
3990 kstat_delete(itl->itl_kstat_lport_xfer);
3991 if (itl->itl_kstat_info)
3992 kstat_delete(itl->itl_kstat_info);
3993 kmem_free(ks_itl, sizeof (*ks_itl));
3994 kmem_free(itl->itl_kstat_strbuf, itl->itl_kstat_strbuflen);
3995 cmn_err(CE_WARN, "STMF: kstat_create itl failed");
3996 return (STMF_ALLOC_FAILURE);
3997 }
3998
3999 static void
stmf_teardown_itl_kstats(stmf_i_itl_kstat_t * ks)4000 stmf_teardown_itl_kstats(stmf_i_itl_kstat_t *ks)
4001 {
4002 kstat_delete(ks->iitl_kstat_lport_xfer);
4003 kstat_delete(ks->iitl_kstat_lu_xfer);
4004 kstat_delete(ks->iitl_kstat_taskq);
4005 kmem_free(ks->iitl_kstat_info->ks_data, sizeof (stmf_kstat_itl_info_t));
4006 kstat_delete(ks->iitl_kstat_info);
4007 kmem_free(ks->iitl_kstat_strbuf, ks->iitl_kstat_strbuflen);
4008 }
4009
4010 void
stmf_release_itl_handle(stmf_lu_t * lu,stmf_itl_data_t * itl)4011 stmf_release_itl_handle(stmf_lu_t *lu, stmf_itl_data_t *itl)
4012 {
4013 stmf_itl_data_t **itlpp;
4014 stmf_i_lu_t *ilu;
4015
4016 ASSERT(itl->itl_flags & STMF_ITL_BEING_TERMINATED);
4017
4018 ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4019 mutex_enter(&ilu->ilu_task_lock);
4020 for (itlpp = &ilu->ilu_itl_list; (*itlpp) != NULL;
4021 itlpp = &(*itlpp)->itl_next) {
4022 if ((*itlpp) == itl)
4023 break;
4024 }
4025 ASSERT((*itlpp) != NULL);
4026 *itlpp = itl->itl_next;
4027 mutex_exit(&ilu->ilu_task_lock);
4028 lu->lu_abort(lu, STMF_LU_ITL_HANDLE_REMOVED, itl->itl_handle,
4029 (uint32_t)itl->itl_hdlrm_reason);
4030
4031 kmem_free(itl, sizeof (*itl));
4032 }
4033
4034 stmf_status_t
stmf_register_itl_handle(stmf_lu_t * lu,uint8_t * lun,stmf_scsi_session_t * ss,uint64_t session_id,void * itl_handle)4035 stmf_register_itl_handle(stmf_lu_t *lu, uint8_t *lun,
4036 stmf_scsi_session_t *ss, uint64_t session_id, void *itl_handle)
4037 {
4038 stmf_itl_data_t *itl;
4039 stmf_i_scsi_session_t *iss;
4040 stmf_lun_map_ent_t *lun_map_ent;
4041 stmf_i_lu_t *ilu;
4042 uint16_t n;
4043
4044 ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4045 if (ss == NULL) {
4046 iss = stmf_session_id_to_issptr(session_id, 1);
4047 if (iss == NULL)
4048 return (STMF_NOT_FOUND);
4049 } else {
4050 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4051 }
4052
4053 /*
4054 * Acquire stmf_lock for stmf_itl_kstat_lookup.
4055 */
4056 mutex_enter(&stmf_state.stmf_lock);
4057 rw_enter(iss->iss_lockp, RW_WRITER);
4058 n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4059 lun_map_ent = (stmf_lun_map_ent_t *)
4060 stmf_get_ent_from_map(iss->iss_sm, n);
4061 if ((lun_map_ent == NULL) || (lun_map_ent->ent_lu != lu)) {
4062 rw_exit(iss->iss_lockp);
4063 mutex_exit(&stmf_state.stmf_lock);
4064 return (STMF_NOT_FOUND);
4065 }
4066 if (lun_map_ent->ent_itl_datap != NULL) {
4067 rw_exit(iss->iss_lockp);
4068 mutex_exit(&stmf_state.stmf_lock);
4069 return (STMF_ALREADY);
4070 }
4071
4072 itl = (stmf_itl_data_t *)kmem_zalloc(sizeof (*itl), KM_NOSLEEP);
4073 if (itl == NULL) {
4074 rw_exit(iss->iss_lockp);
4075 mutex_exit(&stmf_state.stmf_lock);
4076 return (STMF_ALLOC_FAILURE);
4077 }
4078
4079 itl->itl_ilu = ilu;
4080 itl->itl_session = iss;
4081 itl->itl_counter = 1;
4082 itl->itl_lun = n;
4083 itl->itl_handle = itl_handle;
4084
4085 if (stmf_setup_itl_kstats(itl) != STMF_SUCCESS) {
4086 kmem_free(itl, sizeof (*itl));
4087 rw_exit(iss->iss_lockp);
4088 mutex_exit(&stmf_state.stmf_lock);
4089 return (STMF_ALLOC_FAILURE);
4090 }
4091
4092 mutex_enter(&ilu->ilu_task_lock);
4093 itl->itl_next = ilu->ilu_itl_list;
4094 ilu->ilu_itl_list = itl;
4095 mutex_exit(&ilu->ilu_task_lock);
4096 lun_map_ent->ent_itl_datap = itl;
4097 rw_exit(iss->iss_lockp);
4098 mutex_exit(&stmf_state.stmf_lock);
4099
4100 return (STMF_SUCCESS);
4101 }
4102
4103 void
stmf_do_itl_dereg(stmf_lu_t * lu,stmf_itl_data_t * itl,uint8_t hdlrm_reason)4104 stmf_do_itl_dereg(stmf_lu_t *lu, stmf_itl_data_t *itl, uint8_t hdlrm_reason)
4105 {
4106 uint8_t old, new;
4107
4108 do {
4109 old = new = itl->itl_flags;
4110 if (old & STMF_ITL_BEING_TERMINATED)
4111 return;
4112 new |= STMF_ITL_BEING_TERMINATED;
4113 } while (atomic_cas_8(&itl->itl_flags, old, new) != old);
4114 itl->itl_hdlrm_reason = hdlrm_reason;
4115
4116 ASSERT(itl->itl_counter);
4117
4118 if (atomic_add_32_nv(&itl->itl_counter, -1))
4119 return;
4120
4121 drv_usecwait(10);
4122 if (itl->itl_counter)
4123 return;
4124
4125 stmf_release_itl_handle(lu, itl);
4126 }
4127
4128 stmf_status_t
stmf_deregister_all_lu_itl_handles(stmf_lu_t * lu)4129 stmf_deregister_all_lu_itl_handles(stmf_lu_t *lu)
4130 {
4131 stmf_i_lu_t *ilu;
4132 stmf_i_local_port_t *ilport;
4133 stmf_i_scsi_session_t *iss;
4134 stmf_lun_map_t *lm;
4135 stmf_lun_map_ent_t *ent;
4136 uint32_t nmaps, nu;
4137 stmf_itl_data_t **itl_list;
4138 int i;
4139
4140 ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4141
4142 dereg_itl_start:;
4143 nmaps = ilu->ilu_ref_cnt;
4144 if (nmaps == 0)
4145 return (STMF_NOT_FOUND);
4146 itl_list = (stmf_itl_data_t **)kmem_zalloc(
4147 nmaps * sizeof (stmf_itl_data_t *), KM_SLEEP);
4148 mutex_enter(&stmf_state.stmf_lock);
4149 if (nmaps != ilu->ilu_ref_cnt) {
4150 /* Something changed, start all over */
4151 mutex_exit(&stmf_state.stmf_lock);
4152 kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *));
4153 goto dereg_itl_start;
4154 }
4155 nu = 0;
4156 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
4157 ilport = ilport->ilport_next) {
4158 rw_enter(&ilport->ilport_lock, RW_WRITER);
4159 for (iss = ilport->ilport_ss_list; iss != NULL;
4160 iss = iss->iss_next) {
4161 lm = iss->iss_sm;
4162 if (!lm)
4163 continue;
4164 for (i = 0; i < lm->lm_nentries; i++) {
4165 if (lm->lm_plus[i] == NULL)
4166 continue;
4167 ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
4168 if ((ent->ent_lu == lu) &&
4169 (ent->ent_itl_datap)) {
4170 itl_list[nu++] = ent->ent_itl_datap;
4171 ent->ent_itl_datap = NULL;
4172 if (nu == nmaps) {
4173 rw_exit(&ilport->ilport_lock);
4174 goto dai_scan_done;
4175 }
4176 }
4177 } /* lun table for a session */
4178 } /* sessions */
4179 rw_exit(&ilport->ilport_lock);
4180 } /* ports */
4181
4182 dai_scan_done:
4183 mutex_exit(&stmf_state.stmf_lock);
4184
4185 for (i = 0; i < nu; i++) {
4186 stmf_do_itl_dereg(lu, itl_list[i],
4187 STMF_ITL_REASON_DEREG_REQUEST);
4188 }
4189 kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *));
4190
4191 return (STMF_SUCCESS);
4192 }
4193
4194 stmf_status_t
stmf_deregister_itl_handle(stmf_lu_t * lu,uint8_t * lun,stmf_scsi_session_t * ss,uint64_t session_id,void * itl_handle)4195 stmf_deregister_itl_handle(stmf_lu_t *lu, uint8_t *lun,
4196 stmf_scsi_session_t *ss, uint64_t session_id, void *itl_handle)
4197 {
4198 stmf_i_scsi_session_t *iss;
4199 stmf_itl_data_t *itl;
4200 stmf_lun_map_ent_t *ent;
4201 stmf_lun_map_t *lm;
4202 int i;
4203 uint16_t n;
4204
4205 if (ss == NULL) {
4206 if (session_id == STMF_SESSION_ID_NONE)
4207 return (STMF_INVALID_ARG);
4208 iss = stmf_session_id_to_issptr(session_id, 1);
4209 if (iss == NULL)
4210 return (STMF_NOT_FOUND);
4211 } else {
4212 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4213 rw_enter(iss->iss_lockp, RW_WRITER);
4214 }
4215 lm = iss->iss_sm;
4216 if (lm == NULL) {
4217 rw_exit(iss->iss_lockp);
4218 return (STMF_NOT_FOUND);
4219 }
4220
4221 if (lun) {
4222 n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4223 ent = (stmf_lun_map_ent_t *)
4224 stmf_get_ent_from_map(iss->iss_sm, n);
4225 } else {
4226 if (itl_handle == NULL) {
4227 rw_exit(iss->iss_lockp);
4228 return (STMF_INVALID_ARG);
4229 }
4230 ent = NULL;
4231 for (i = 0; i < lm->lm_nentries; i++) {
4232 if (lm->lm_plus[i] == NULL)
4233 continue;
4234 ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
4235 if (ent->ent_itl_datap &&
4236 (ent->ent_itl_datap->itl_handle == itl_handle)) {
4237 break;
4238 }
4239 }
4240 }
4241 if ((ent == NULL) || (ent->ent_lu != lu) ||
4242 (ent->ent_itl_datap == NULL)) {
4243 rw_exit(iss->iss_lockp);
4244 return (STMF_NOT_FOUND);
4245 }
4246 itl = ent->ent_itl_datap;
4247 ent->ent_itl_datap = NULL;
4248 rw_exit(iss->iss_lockp);
4249 stmf_do_itl_dereg(lu, itl, STMF_ITL_REASON_DEREG_REQUEST);
4250
4251 return (STMF_SUCCESS);
4252 }
4253
4254 stmf_status_t
stmf_get_itl_handle(stmf_lu_t * lu,uint8_t * lun,stmf_scsi_session_t * ss,uint64_t session_id,void ** itl_handle_retp)4255 stmf_get_itl_handle(stmf_lu_t *lu, uint8_t *lun, stmf_scsi_session_t *ss,
4256 uint64_t session_id, void **itl_handle_retp)
4257 {
4258 stmf_i_scsi_session_t *iss;
4259 stmf_lun_map_ent_t *ent;
4260 stmf_lun_map_t *lm;
4261 stmf_status_t ret;
4262 int i;
4263 uint16_t n;
4264
4265 if (ss == NULL) {
4266 iss = stmf_session_id_to_issptr(session_id, 1);
4267 if (iss == NULL)
4268 return (STMF_NOT_FOUND);
4269 } else {
4270 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4271 rw_enter(iss->iss_lockp, RW_WRITER);
4272 }
4273
4274 ent = NULL;
4275 if (lun == NULL) {
4276 lm = iss->iss_sm;
4277 for (i = 0; i < lm->lm_nentries; i++) {
4278 if (lm->lm_plus[i] == NULL)
4279 continue;
4280 ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
4281 if (ent->ent_lu == lu)
4282 break;
4283 }
4284 } else {
4285 n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4286 ent = (stmf_lun_map_ent_t *)
4287 stmf_get_ent_from_map(iss->iss_sm, n);
4288 if (lu && (ent->ent_lu != lu))
4289 ent = NULL;
4290 }
4291 if (ent && ent->ent_itl_datap) {
4292 *itl_handle_retp = ent->ent_itl_datap->itl_handle;
4293 ret = STMF_SUCCESS;
4294 } else {
4295 ret = STMF_NOT_FOUND;
4296 }
4297
4298 rw_exit(iss->iss_lockp);
4299 return (ret);
4300 }
4301
4302 stmf_data_buf_t *
stmf_alloc_dbuf(scsi_task_t * task,uint32_t size,uint32_t * pminsize,uint32_t flags)4303 stmf_alloc_dbuf(scsi_task_t *task, uint32_t size, uint32_t *pminsize,
4304 uint32_t flags)
4305 {
4306 stmf_i_scsi_task_t *itask =
4307 (stmf_i_scsi_task_t *)task->task_stmf_private;
4308 stmf_local_port_t *lport = task->task_lport;
4309 stmf_data_buf_t *dbuf;
4310 uint8_t ndx;
4311
4312 ndx = stmf_first_zero[itask->itask_allocated_buf_map];
4313 if (ndx == 0xff)
4314 return (NULL);
4315 dbuf = itask->itask_dbufs[ndx] = lport->lport_ds->ds_alloc_data_buf(
4316 task, size, pminsize, flags);
4317 if (dbuf) {
4318 task->task_cur_nbufs++;
4319 itask->itask_allocated_buf_map |= (1 << ndx);
4320 dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE;
4321 dbuf->db_handle = ndx;
4322 return (dbuf);
4323 }
4324
4325 return (NULL);
4326 }
4327
4328 stmf_status_t
stmf_setup_dbuf(scsi_task_t * task,stmf_data_buf_t * dbuf,uint32_t flags)4329 stmf_setup_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t flags)
4330 {
4331 stmf_i_scsi_task_t *itask =
4332 (stmf_i_scsi_task_t *)task->task_stmf_private;
4333 stmf_local_port_t *lport = task->task_lport;
4334 uint8_t ndx;
4335 stmf_status_t ret;
4336
4337 ASSERT(task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF);
4338 ASSERT(lport->lport_ds->ds_setup_dbuf != NULL);
4339 ASSERT(dbuf->db_flags & DB_LU_DATA_BUF);
4340
4341 if ((task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF) == 0)
4342 return (STMF_FAILURE);
4343 if (lport->lport_ds->ds_setup_dbuf == NULL)
4344 return (STMF_FAILURE);
4345
4346 ndx = stmf_first_zero[itask->itask_allocated_buf_map];
4347 if (ndx == 0xff)
4348 return (STMF_FAILURE);
4349 ret = lport->lport_ds->ds_setup_dbuf(task, dbuf, flags);
4350 if (ret == STMF_FAILURE)
4351 return (STMF_FAILURE);
4352 itask->itask_dbufs[ndx] = dbuf;
4353 task->task_cur_nbufs++;
4354 itask->itask_allocated_buf_map |= (1 << ndx);
4355 dbuf->db_handle = ndx;
4356
4357 return (STMF_SUCCESS);
4358 }
4359
4360 void
stmf_teardown_dbuf(scsi_task_t * task,stmf_data_buf_t * dbuf)4361 stmf_teardown_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf)
4362 {
4363 stmf_i_scsi_task_t *itask =
4364 (stmf_i_scsi_task_t *)task->task_stmf_private;
4365 stmf_local_port_t *lport = task->task_lport;
4366
4367 ASSERT(task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF);
4368 ASSERT(lport->lport_ds->ds_teardown_dbuf != NULL);
4369 ASSERT(dbuf->db_flags & DB_LU_DATA_BUF);
4370
4371 itask->itask_allocated_buf_map &= ~(1 << dbuf->db_handle);
4372 task->task_cur_nbufs--;
4373 lport->lport_ds->ds_teardown_dbuf(lport->lport_ds, dbuf);
4374 }
4375
4376 void
stmf_free_dbuf(scsi_task_t * task,stmf_data_buf_t * dbuf)4377 stmf_free_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf)
4378 {
4379 stmf_i_scsi_task_t *itask =
4380 (stmf_i_scsi_task_t *)task->task_stmf_private;
4381 stmf_local_port_t *lport = task->task_lport;
4382
4383 itask->itask_allocated_buf_map &= ~(1 << dbuf->db_handle);
4384 task->task_cur_nbufs--;
4385 lport->lport_ds->ds_free_data_buf(lport->lport_ds, dbuf);
4386 }
4387
4388 stmf_data_buf_t *
stmf_handle_to_buf(scsi_task_t * task,uint8_t h)4389 stmf_handle_to_buf(scsi_task_t *task, uint8_t h)
4390 {
4391 stmf_i_scsi_task_t *itask;
4392
4393 itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
4394 if (h > 3)
4395 return (NULL);
4396 return (itask->itask_dbufs[h]);
4397 }
4398
4399 /* ARGSUSED */
4400 struct scsi_task *
stmf_task_alloc(struct stmf_local_port * lport,stmf_scsi_session_t * ss,uint8_t * lun,uint16_t cdb_length_in,uint16_t ext_id)4401 stmf_task_alloc(struct stmf_local_port *lport, stmf_scsi_session_t *ss,
4402 uint8_t *lun, uint16_t cdb_length_in, uint16_t ext_id)
4403 {
4404 stmf_lu_t *lu;
4405 stmf_i_scsi_session_t *iss;
4406 stmf_i_lu_t *ilu;
4407 stmf_i_scsi_task_t *itask;
4408 stmf_i_scsi_task_t **ppitask;
4409 scsi_task_t *task;
4410 uint8_t *l;
4411 stmf_lun_map_ent_t *lun_map_ent;
4412 uint16_t cdb_length;
4413 uint16_t luNbr;
4414 uint8_t new_task = 0;
4415
4416 /*
4417 * We allocate 7 extra bytes for CDB to provide a cdb pointer which
4418 * is guaranteed to be 8 byte aligned. Some LU providers like OSD
4419 * depend upon this alignment.
4420 */
4421 if (cdb_length_in >= 16)
4422 cdb_length = cdb_length_in + 7;
4423 else
4424 cdb_length = 16 + 7;
4425 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4426 luNbr = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4427 rw_enter(iss->iss_lockp, RW_READER);
4428 lun_map_ent =
4429 (stmf_lun_map_ent_t *)stmf_get_ent_from_map(iss->iss_sm, luNbr);
4430 if (!lun_map_ent) {
4431 lu = dlun0;
4432 } else {
4433 lu = lun_map_ent->ent_lu;
4434 }
4435 ilu = lu->lu_stmf_private;
4436 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4437 rw_exit(iss->iss_lockp);
4438 return (NULL);
4439 }
4440 do {
4441 if (ilu->ilu_free_tasks == NULL) {
4442 new_task = 1;
4443 break;
4444 }
4445 mutex_enter(&ilu->ilu_task_lock);
4446 for (ppitask = &ilu->ilu_free_tasks; (*ppitask != NULL) &&
4447 ((*ppitask)->itask_cdb_buf_size < cdb_length);
4448 ppitask = &((*ppitask)->itask_lu_free_next))
4449 ;
4450 if (*ppitask) {
4451 itask = *ppitask;
4452 *ppitask = (*ppitask)->itask_lu_free_next;
4453 ilu->ilu_ntasks_free--;
4454 if (ilu->ilu_ntasks_free < ilu->ilu_ntasks_min_free)
4455 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4456 } else {
4457 new_task = 1;
4458 }
4459 mutex_exit(&ilu->ilu_task_lock);
4460 /* CONSTCOND */
4461 } while (0);
4462
4463 if (!new_task) {
4464 /*
4465 * Save the task_cdb pointer and zero per cmd fields.
4466 * We know the task_cdb_length is large enough by task
4467 * selection process above.
4468 */
4469 uint8_t *save_cdb;
4470 uintptr_t t_start, t_end;
4471
4472 task = itask->itask_task;
4473 save_cdb = task->task_cdb; /* save */
4474 t_start = (uintptr_t)&task->task_flags;
4475 t_end = (uintptr_t)&task->task_extended_cmd;
4476 bzero((void *)t_start, (size_t)(t_end - t_start));
4477 task->task_cdb = save_cdb; /* restore */
4478 itask->itask_ncmds = 0;
4479 } else {
4480 task = (scsi_task_t *)stmf_alloc(STMF_STRUCT_SCSI_TASK,
4481 cdb_length, AF_FORCE_NOSLEEP);
4482 if (task == NULL) {
4483 rw_exit(iss->iss_lockp);
4484 return (NULL);
4485 }
4486 task->task_lu = lu;
4487 l = task->task_lun_no;
4488 l[0] = lun[0];
4489 l[1] = lun[1];
4490 l[2] = lun[2];
4491 l[3] = lun[3];
4492 l[4] = lun[4];
4493 l[5] = lun[5];
4494 l[6] = lun[6];
4495 l[7] = lun[7];
4496 task->task_cdb = (uint8_t *)task->task_port_private;
4497 if ((ulong_t)(task->task_cdb) & 7ul) {
4498 task->task_cdb = (uint8_t *)(((ulong_t)
4499 (task->task_cdb) + 7ul) & ~(7ul));
4500 }
4501 itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
4502 itask->itask_cdb_buf_size = cdb_length;
4503 mutex_init(&itask->itask_audit_mutex, NULL, MUTEX_DRIVER, NULL);
4504 }
4505 task->task_session = ss;
4506 task->task_lport = lport;
4507 task->task_cdb_length = cdb_length_in;
4508 itask->itask_flags = ITASK_IN_TRANSITION;
4509 itask->itask_waitq_time = 0;
4510 itask->itask_lu_read_time = itask->itask_lu_write_time = 0;
4511 itask->itask_lport_read_time = itask->itask_lport_write_time = 0;
4512 itask->itask_read_xfer = itask->itask_write_xfer = 0;
4513 itask->itask_audit_index = 0;
4514
4515 if (new_task) {
4516 if (lu->lu_task_alloc(task) != STMF_SUCCESS) {
4517 rw_exit(iss->iss_lockp);
4518 stmf_free(task);
4519 return (NULL);
4520 }
4521 mutex_enter(&ilu->ilu_task_lock);
4522 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4523 mutex_exit(&ilu->ilu_task_lock);
4524 rw_exit(iss->iss_lockp);
4525 stmf_free(task);
4526 return (NULL);
4527 }
4528 itask->itask_lu_next = ilu->ilu_tasks;
4529 if (ilu->ilu_tasks)
4530 ilu->ilu_tasks->itask_lu_prev = itask;
4531 ilu->ilu_tasks = itask;
4532 /* kmem_zalloc automatically makes itask->itask_lu_prev NULL */
4533 ilu->ilu_ntasks++;
4534 mutex_exit(&ilu->ilu_task_lock);
4535 }
4536
4537 itask->itask_ilu_task_cntr = ilu->ilu_cur_task_cntr;
4538 atomic_add_32(itask->itask_ilu_task_cntr, 1);
4539 itask->itask_start_time = ddi_get_lbolt();
4540
4541 if ((lun_map_ent != NULL) && ((itask->itask_itl_datap =
4542 lun_map_ent->ent_itl_datap) != NULL)) {
4543 atomic_add_32(&itask->itask_itl_datap->itl_counter, 1);
4544 task->task_lu_itl_handle = itask->itask_itl_datap->itl_handle;
4545 } else {
4546 itask->itask_itl_datap = NULL;
4547 task->task_lu_itl_handle = NULL;
4548 }
4549
4550 rw_exit(iss->iss_lockp);
4551 return (task);
4552 }
4553
4554 static void
stmf_task_lu_free(scsi_task_t * task,stmf_i_scsi_session_t * iss)4555 stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss)
4556 {
4557 stmf_i_scsi_task_t *itask =
4558 (stmf_i_scsi_task_t *)task->task_stmf_private;
4559 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
4560
4561 ASSERT(rw_lock_held(iss->iss_lockp));
4562 itask->itask_flags = ITASK_IN_FREE_LIST;
4563 itask->itask_proxy_msg_id = 0;
4564 mutex_enter(&ilu->ilu_task_lock);
4565 itask->itask_lu_free_next = ilu->ilu_free_tasks;
4566 ilu->ilu_free_tasks = itask;
4567 ilu->ilu_ntasks_free++;
4568 mutex_exit(&ilu->ilu_task_lock);
4569 atomic_add_32(itask->itask_ilu_task_cntr, -1);
4570 }
4571
4572 void
stmf_task_lu_check_freelist(stmf_i_lu_t * ilu)4573 stmf_task_lu_check_freelist(stmf_i_lu_t *ilu)
4574 {
4575 uint32_t num_to_release, ndx;
4576 stmf_i_scsi_task_t *itask;
4577 stmf_lu_t *lu = ilu->ilu_lu;
4578
4579 ASSERT(ilu->ilu_ntasks_min_free <= ilu->ilu_ntasks_free);
4580
4581 /* free half of the minimal free of the free tasks */
4582 num_to_release = (ilu->ilu_ntasks_min_free + 1) / 2;
4583 if (!num_to_release) {
4584 return;
4585 }
4586 for (ndx = 0; ndx < num_to_release; ndx++) {
4587 mutex_enter(&ilu->ilu_task_lock);
4588 itask = ilu->ilu_free_tasks;
4589 if (itask == NULL) {
4590 mutex_exit(&ilu->ilu_task_lock);
4591 break;
4592 }
4593 ilu->ilu_free_tasks = itask->itask_lu_free_next;
4594 ilu->ilu_ntasks_free--;
4595 mutex_exit(&ilu->ilu_task_lock);
4596
4597 lu->lu_task_free(itask->itask_task);
4598 mutex_enter(&ilu->ilu_task_lock);
4599 if (itask->itask_lu_next)
4600 itask->itask_lu_next->itask_lu_prev =
4601 itask->itask_lu_prev;
4602 if (itask->itask_lu_prev)
4603 itask->itask_lu_prev->itask_lu_next =
4604 itask->itask_lu_next;
4605 else
4606 ilu->ilu_tasks = itask->itask_lu_next;
4607
4608 ilu->ilu_ntasks--;
4609 mutex_exit(&ilu->ilu_task_lock);
4610 stmf_free(itask->itask_task);
4611 }
4612 }
4613
4614 /*
4615 * Called with stmf_lock held
4616 */
4617 void
stmf_check_freetask()4618 stmf_check_freetask()
4619 {
4620 stmf_i_lu_t *ilu;
4621 clock_t endtime = ddi_get_lbolt() + drv_usectohz(10000);
4622
4623 /* stmf_svc_ilu_draining may get changed after stmf_lock is released */
4624 while ((ilu = stmf_state.stmf_svc_ilu_draining) != NULL) {
4625 stmf_state.stmf_svc_ilu_draining = ilu->ilu_next;
4626 if (!ilu->ilu_ntasks_min_free) {
4627 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4628 continue;
4629 }
4630 ilu->ilu_flags |= ILU_STALL_DEREGISTER;
4631 mutex_exit(&stmf_state.stmf_lock);
4632 stmf_task_lu_check_freelist(ilu);
4633 /*
4634 * we do not care about the accuracy of
4635 * ilu_ntasks_min_free, so we don't lock here
4636 */
4637 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4638 mutex_enter(&stmf_state.stmf_lock);
4639 ilu->ilu_flags &= ~ILU_STALL_DEREGISTER;
4640 cv_broadcast(&stmf_state.stmf_cv);
4641 if (ddi_get_lbolt() >= endtime)
4642 break;
4643 }
4644 }
4645
4646 void
stmf_do_ilu_timeouts(stmf_i_lu_t * ilu)4647 stmf_do_ilu_timeouts(stmf_i_lu_t *ilu)
4648 {
4649 clock_t l = ddi_get_lbolt();
4650 clock_t ps = drv_usectohz(1000000);
4651 stmf_i_scsi_task_t *itask;
4652 scsi_task_t *task;
4653 uint32_t to;
4654
4655 mutex_enter(&ilu->ilu_task_lock);
4656 for (itask = ilu->ilu_tasks; itask != NULL;
4657 itask = itask->itask_lu_next) {
4658 if (itask->itask_flags & (ITASK_IN_FREE_LIST |
4659 ITASK_BEING_ABORTED)) {
4660 continue;
4661 }
4662 task = itask->itask_task;
4663 if (task->task_timeout == 0)
4664 to = stmf_default_task_timeout;
4665 else
4666 to = task->task_timeout;
4667 if ((itask->itask_start_time + (to * ps)) > l)
4668 continue;
4669 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
4670 STMF_TIMEOUT, NULL);
4671 }
4672 mutex_exit(&ilu->ilu_task_lock);
4673 }
4674
4675 /*
4676 * Called with stmf_lock held
4677 */
4678 void
stmf_check_ilu_timing()4679 stmf_check_ilu_timing()
4680 {
4681 stmf_i_lu_t *ilu;
4682 clock_t endtime = ddi_get_lbolt() + drv_usectohz(10000);
4683
4684 /* stmf_svc_ilu_timing may get changed after stmf_lock is released */
4685 while ((ilu = stmf_state.stmf_svc_ilu_timing) != NULL) {
4686 stmf_state.stmf_svc_ilu_timing = ilu->ilu_next;
4687 if (ilu->ilu_cur_task_cntr == (&ilu->ilu_task_cntr1)) {
4688 if (ilu->ilu_task_cntr2 == 0) {
4689 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr2;
4690 continue;
4691 }
4692 } else {
4693 if (ilu->ilu_task_cntr1 == 0) {
4694 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1;
4695 continue;
4696 }
4697 }
4698 /*
4699 * If we are here then it means that there is some slowdown
4700 * in tasks on this lu. We need to check.
4701 */
4702 ilu->ilu_flags |= ILU_STALL_DEREGISTER;
4703 mutex_exit(&stmf_state.stmf_lock);
4704 stmf_do_ilu_timeouts(ilu);
4705 mutex_enter(&stmf_state.stmf_lock);
4706 ilu->ilu_flags &= ~ILU_STALL_DEREGISTER;
4707 cv_broadcast(&stmf_state.stmf_cv);
4708 if (ddi_get_lbolt() >= endtime)
4709 break;
4710 }
4711 }
4712
4713 /*
4714 * Kills all tasks on a lu except tm_task
4715 */
4716 void
stmf_task_lu_killall(stmf_lu_t * lu,scsi_task_t * tm_task,stmf_status_t s)4717 stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s)
4718 {
4719 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4720 stmf_i_scsi_task_t *itask;
4721
4722 mutex_enter(&ilu->ilu_task_lock);
4723
4724 for (itask = ilu->ilu_tasks; itask != NULL;
4725 itask = itask->itask_lu_next) {
4726 if (itask->itask_flags & ITASK_IN_FREE_LIST)
4727 continue;
4728 if (itask->itask_task == tm_task)
4729 continue;
4730 stmf_abort(STMF_QUEUE_TASK_ABORT, itask->itask_task, s, NULL);
4731 }
4732 mutex_exit(&ilu->ilu_task_lock);
4733 }
4734
4735 void
stmf_free_task_bufs(stmf_i_scsi_task_t * itask,stmf_local_port_t * lport)4736 stmf_free_task_bufs(stmf_i_scsi_task_t *itask, stmf_local_port_t *lport)
4737 {
4738 int i;
4739 uint8_t map;
4740
4741 if ((map = itask->itask_allocated_buf_map) == 0)
4742 return;
4743 for (i = 0; i < 4; i++) {
4744 if (map & 1) {
4745 stmf_data_buf_t *dbuf;
4746
4747 dbuf = itask->itask_dbufs[i];
4748 if (dbuf->db_xfer_start_timestamp) {
4749 stmf_lport_xfer_done(itask, dbuf);
4750 }
4751 if (dbuf->db_flags & DB_LU_DATA_BUF) {
4752 /*
4753 * LU needs to clean up buffer.
4754 * LU is required to free the buffer
4755 * in the xfer_done handler.
4756 */
4757 scsi_task_t *task = itask->itask_task;
4758 stmf_lu_t *lu = task->task_lu;
4759
4760 lu->lu_dbuf_free(task, dbuf);
4761 ASSERT(((itask->itask_allocated_buf_map>>i)
4762 & 1) == 0); /* must be gone */
4763 } else {
4764 ASSERT(dbuf->db_lu_private == NULL);
4765 dbuf->db_lu_private = NULL;
4766 lport->lport_ds->ds_free_data_buf(
4767 lport->lport_ds, dbuf);
4768 }
4769 }
4770 map >>= 1;
4771 }
4772 itask->itask_allocated_buf_map = 0;
4773 }
4774
4775 void
stmf_task_free(scsi_task_t * task)4776 stmf_task_free(scsi_task_t *task)
4777 {
4778 stmf_local_port_t *lport = task->task_lport;
4779 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
4780 task->task_stmf_private;
4781 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
4782 task->task_session->ss_stmf_private;
4783
4784 stmf_task_audit(itask, TE_TASK_FREE, CMD_OR_IOF_NA, NULL);
4785
4786 stmf_free_task_bufs(itask, lport);
4787 stmf_itl_task_done(itask);
4788 DTRACE_PROBE2(stmf__task__end, scsi_task_t *, task,
4789 hrtime_t,
4790 itask->itask_done_timestamp - itask->itask_start_timestamp);
4791 if (itask->itask_itl_datap) {
4792 if (atomic_add_32_nv(&itask->itask_itl_datap->itl_counter,
4793 -1) == 0) {
4794 stmf_release_itl_handle(task->task_lu,
4795 itask->itask_itl_datap);
4796 }
4797 }
4798
4799 rw_enter(iss->iss_lockp, RW_READER);
4800 lport->lport_task_free(task);
4801 if (itask->itask_worker) {
4802 atomic_add_32(&stmf_cur_ntasks, -1);
4803 atomic_add_32(&itask->itask_worker->worker_ref_count, -1);
4804 }
4805 /*
4806 * After calling stmf_task_lu_free, the task pointer can no longer
4807 * be trusted.
4808 */
4809 stmf_task_lu_free(task, iss);
4810 rw_exit(iss->iss_lockp);
4811 }
4812
4813 void
stmf_post_task(scsi_task_t * task,stmf_data_buf_t * dbuf)4814 stmf_post_task(scsi_task_t *task, stmf_data_buf_t *dbuf)
4815 {
4816 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
4817 task->task_stmf_private;
4818 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
4819 int nv;
4820 uint32_t old, new;
4821 uint32_t ct;
4822 stmf_worker_t *w, *w1;
4823 uint8_t tm;
4824
4825 if (task->task_max_nbufs > 4)
4826 task->task_max_nbufs = 4;
4827 task->task_cur_nbufs = 0;
4828 /* Latest value of currently running tasks */
4829 ct = atomic_add_32_nv(&stmf_cur_ntasks, 1);
4830
4831 /* Select the next worker using round robin */
4832 nv = (int)atomic_add_32_nv((uint32_t *)&stmf_worker_sel_counter, 1);
4833 if (nv >= stmf_nworkers_accepting_cmds) {
4834 int s = nv;
4835 do {
4836 nv -= stmf_nworkers_accepting_cmds;
4837 } while (nv >= stmf_nworkers_accepting_cmds);
4838 if (nv < 0)
4839 nv = 0;
4840 /* Its ok if this cas fails */
4841 (void) atomic_cas_32((uint32_t *)&stmf_worker_sel_counter,
4842 s, nv);
4843 }
4844 w = &stmf_workers[nv];
4845
4846 /*
4847 * A worker can be pinned by interrupt. So select the next one
4848 * if it has lower load.
4849 */
4850 if ((nv + 1) >= stmf_nworkers_accepting_cmds) {
4851 w1 = stmf_workers;
4852 } else {
4853 w1 = &stmf_workers[nv + 1];
4854 }
4855 if (w1->worker_queue_depth < w->worker_queue_depth)
4856 w = w1;
4857
4858 mutex_enter(&w->worker_lock);
4859 if (((w->worker_flags & STMF_WORKER_STARTED) == 0) ||
4860 (w->worker_flags & STMF_WORKER_TERMINATE)) {
4861 /*
4862 * Maybe we are in the middle of a change. Just go to
4863 * the 1st worker.
4864 */
4865 mutex_exit(&w->worker_lock);
4866 w = stmf_workers;
4867 mutex_enter(&w->worker_lock);
4868 }
4869 itask->itask_worker = w;
4870 /*
4871 * Track max system load inside the worker as we already have the
4872 * worker lock (no point implementing another lock). The service
4873 * thread will do the comparisons and figure out the max overall
4874 * system load.
4875 */
4876 if (w->worker_max_sys_qdepth_pu < ct)
4877 w->worker_max_sys_qdepth_pu = ct;
4878
4879 do {
4880 old = new = itask->itask_flags;
4881 new |= ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE;
4882 if (task->task_mgmt_function) {
4883 tm = task->task_mgmt_function;
4884 if ((tm == TM_TARGET_RESET) ||
4885 (tm == TM_TARGET_COLD_RESET) ||
4886 (tm == TM_TARGET_WARM_RESET)) {
4887 new |= ITASK_DEFAULT_HANDLING;
4888 }
4889 } else if (task->task_cdb[0] == SCMD_REPORT_LUNS) {
4890 new |= ITASK_DEFAULT_HANDLING;
4891 }
4892 new &= ~ITASK_IN_TRANSITION;
4893 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
4894
4895 stmf_itl_task_start(itask);
4896
4897 itask->itask_worker_next = NULL;
4898 if (w->worker_task_tail) {
4899 w->worker_task_tail->itask_worker_next = itask;
4900 } else {
4901 w->worker_task_head = itask;
4902 }
4903 w->worker_task_tail = itask;
4904 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
4905 w->worker_max_qdepth_pu = w->worker_queue_depth;
4906 }
4907 /* Measure task waitq time */
4908 itask->itask_waitq_enter_timestamp = gethrtime();
4909 atomic_add_32(&w->worker_ref_count, 1);
4910 itask->itask_cmd_stack[0] = ITASK_CMD_NEW_TASK;
4911 itask->itask_ncmds = 1;
4912 stmf_task_audit(itask, TE_TASK_START, CMD_OR_IOF_NA, dbuf);
4913 if (dbuf) {
4914 itask->itask_allocated_buf_map = 1;
4915 itask->itask_dbufs[0] = dbuf;
4916 dbuf->db_handle = 0;
4917 } else {
4918 itask->itask_allocated_buf_map = 0;
4919 itask->itask_dbufs[0] = NULL;
4920 }
4921
4922 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) {
4923 w->worker_signal_timestamp = gethrtime();
4924 DTRACE_PROBE2(worker__signal, stmf_worker_t *, w,
4925 scsi_task_t *, task);
4926 cv_signal(&w->worker_cv);
4927 }
4928 mutex_exit(&w->worker_lock);
4929
4930 /*
4931 * This can only happen if during stmf_task_alloc(), ILU_RESET_ACTIVE
4932 * was set between checking of ILU_RESET_ACTIVE and clearing of the
4933 * ITASK_IN_FREE_LIST flag. Take care of these "sneaked-in" tasks here.
4934 */
4935 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4936 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ABORTED, NULL);
4937 }
4938 }
4939
4940 static void
stmf_task_audit(stmf_i_scsi_task_t * itask,task_audit_event_t te,uint32_t cmd_or_iof,stmf_data_buf_t * dbuf)4941 stmf_task_audit(stmf_i_scsi_task_t *itask,
4942 task_audit_event_t te, uint32_t cmd_or_iof, stmf_data_buf_t *dbuf)
4943 {
4944 stmf_task_audit_rec_t *ar;
4945
4946 mutex_enter(&itask->itask_audit_mutex);
4947 ar = &itask->itask_audit_records[itask->itask_audit_index++];
4948 itask->itask_audit_index &= (ITASK_TASK_AUDIT_DEPTH - 1);
4949 ar->ta_event = te;
4950 ar->ta_cmd_or_iof = cmd_or_iof;
4951 ar->ta_itask_flags = itask->itask_flags;
4952 ar->ta_dbuf = dbuf;
4953 gethrestime(&ar->ta_timestamp);
4954 mutex_exit(&itask->itask_audit_mutex);
4955 }
4956
4957
4958 /*
4959 * ++++++++++++++ ABORT LOGIC ++++++++++++++++++++
4960 * Once ITASK_BEING_ABORTED is set, ITASK_KNOWN_TO_LU can be reset already
4961 * i.e. before ITASK_BEING_ABORTED being set. But if it was not, it cannot
4962 * be reset until the LU explicitly calls stmf_task_lu_aborted(). Of course
4963 * the LU will make this call only if we call the LU's abort entry point.
4964 * we will only call that entry point if ITASK_KNOWN_TO_LU was set.
4965 *
4966 * Same logic applies for the port.
4967 *
4968 * Also ITASK_BEING_ABORTED will not be allowed to set if both KNOWN_TO_LU
4969 * and KNOWN_TO_TGT_PORT are reset.
4970 *
4971 * +++++++++++++++++++++++++++++++++++++++++++++++
4972 */
4973
4974 stmf_status_t
stmf_xfer_data(scsi_task_t * task,stmf_data_buf_t * dbuf,uint32_t ioflags)4975 stmf_xfer_data(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t ioflags)
4976 {
4977 stmf_status_t ret = STMF_SUCCESS;
4978
4979 stmf_i_scsi_task_t *itask =
4980 (stmf_i_scsi_task_t *)task->task_stmf_private;
4981
4982 stmf_task_audit(itask, TE_XFER_START, ioflags, dbuf);
4983
4984 if (ioflags & STMF_IOF_LU_DONE) {
4985 uint32_t new, old;
4986 do {
4987 new = old = itask->itask_flags;
4988 if (new & ITASK_BEING_ABORTED)
4989 return (STMF_ABORTED);
4990 new &= ~ITASK_KNOWN_TO_LU;
4991 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
4992 }
4993 if (itask->itask_flags & ITASK_BEING_ABORTED)
4994 return (STMF_ABORTED);
4995 #ifdef DEBUG
4996 if (!(ioflags & STMF_IOF_STATS_ONLY) && stmf_drop_buf_counter > 0) {
4997 if (atomic_add_32_nv((uint32_t *)&stmf_drop_buf_counter, -1) ==
4998 1)
4999 return (STMF_SUCCESS);
5000 }
5001 #endif
5002
5003 stmf_update_kstat_lu_io(task, dbuf);
5004 stmf_update_kstat_lport_io(task, dbuf);
5005 stmf_lport_xfer_start(itask, dbuf);
5006 if (ioflags & STMF_IOF_STATS_ONLY) {
5007 stmf_lport_xfer_done(itask, dbuf);
5008 return (STMF_SUCCESS);
5009 }
5010
5011 dbuf->db_flags |= DB_LPORT_XFER_ACTIVE;
5012 ret = task->task_lport->lport_xfer_data(task, dbuf, ioflags);
5013
5014 /*
5015 * Port provider may have already called the buffer callback in
5016 * which case dbuf->db_xfer_start_timestamp will be 0.
5017 */
5018 if (ret != STMF_SUCCESS) {
5019 dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE;
5020 if (dbuf->db_xfer_start_timestamp != 0)
5021 stmf_lport_xfer_done(itask, dbuf);
5022 }
5023
5024 return (ret);
5025 }
5026
5027 void
stmf_data_xfer_done(scsi_task_t * task,stmf_data_buf_t * dbuf,uint32_t iof)5028 stmf_data_xfer_done(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t iof)
5029 {
5030 stmf_i_scsi_task_t *itask =
5031 (stmf_i_scsi_task_t *)task->task_stmf_private;
5032 stmf_i_local_port_t *ilport;
5033 stmf_worker_t *w = itask->itask_worker;
5034 uint32_t new, old;
5035 uint8_t update_queue_flags, free_it, queue_it;
5036
5037 stmf_lport_xfer_done(itask, dbuf);
5038
5039 stmf_task_audit(itask, TE_XFER_DONE, iof, dbuf);
5040
5041 /* Guard against unexpected completions from the lport */
5042 if (dbuf->db_flags & DB_LPORT_XFER_ACTIVE) {
5043 dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE;
5044 } else {
5045 /*
5046 * This should never happen.
5047 */
5048 ilport = task->task_lport->lport_stmf_private;
5049 ilport->ilport_unexpected_comp++;
5050 cmn_err(CE_PANIC, "Unexpected xfer completion task %p dbuf %p",
5051 (void *)task, (void *)dbuf);
5052 return;
5053 }
5054
5055 mutex_enter(&w->worker_lock);
5056 do {
5057 new = old = itask->itask_flags;
5058 if (old & ITASK_BEING_ABORTED) {
5059 mutex_exit(&w->worker_lock);
5060 return;
5061 }
5062 free_it = 0;
5063 if (iof & STMF_IOF_LPORT_DONE) {
5064 new &= ~ITASK_KNOWN_TO_TGT_PORT;
5065 task->task_completion_status = dbuf->db_xfer_status;
5066 free_it = 1;
5067 }
5068 /*
5069 * If the task is known to LU then queue it. But if
5070 * it is already queued (multiple completions) then
5071 * just update the buffer information by grabbing the
5072 * worker lock. If the task is not known to LU,
5073 * completed/aborted, then see if we need to
5074 * free this task.
5075 */
5076 if (old & ITASK_KNOWN_TO_LU) {
5077 free_it = 0;
5078 update_queue_flags = 1;
5079 if (old & ITASK_IN_WORKER_QUEUE) {
5080 queue_it = 0;
5081 } else {
5082 queue_it = 1;
5083 new |= ITASK_IN_WORKER_QUEUE;
5084 }
5085 } else {
5086 update_queue_flags = 0;
5087 queue_it = 0;
5088 }
5089 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5090
5091 if (update_queue_flags) {
5092 uint8_t cmd = (dbuf->db_handle << 5) | ITASK_CMD_DATA_XFER_DONE;
5093
5094 ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS);
5095 itask->itask_cmd_stack[itask->itask_ncmds++] = cmd;
5096 if (queue_it) {
5097 itask->itask_worker_next = NULL;
5098 if (w->worker_task_tail) {
5099 w->worker_task_tail->itask_worker_next = itask;
5100 } else {
5101 w->worker_task_head = itask;
5102 }
5103 w->worker_task_tail = itask;
5104 /* Measure task waitq time */
5105 itask->itask_waitq_enter_timestamp = gethrtime();
5106 if (++(w->worker_queue_depth) >
5107 w->worker_max_qdepth_pu) {
5108 w->worker_max_qdepth_pu = w->worker_queue_depth;
5109 }
5110 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5111 cv_signal(&w->worker_cv);
5112 }
5113 }
5114 mutex_exit(&w->worker_lock);
5115
5116 if (free_it) {
5117 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5118 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
5119 ITASK_BEING_ABORTED)) == 0) {
5120 stmf_task_free(task);
5121 }
5122 }
5123 }
5124
5125 stmf_status_t
stmf_send_scsi_status(scsi_task_t * task,uint32_t ioflags)5126 stmf_send_scsi_status(scsi_task_t *task, uint32_t ioflags)
5127 {
5128 DTRACE_PROBE1(scsi__send__status, scsi_task_t *, task);
5129
5130 stmf_i_scsi_task_t *itask =
5131 (stmf_i_scsi_task_t *)task->task_stmf_private;
5132
5133 stmf_task_audit(itask, TE_SEND_STATUS, ioflags, NULL);
5134
5135 if (ioflags & STMF_IOF_LU_DONE) {
5136 uint32_t new, old;
5137 do {
5138 new = old = itask->itask_flags;
5139 if (new & ITASK_BEING_ABORTED)
5140 return (STMF_ABORTED);
5141 new &= ~ITASK_KNOWN_TO_LU;
5142 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5143 }
5144
5145 if (!(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT)) {
5146 return (STMF_SUCCESS);
5147 }
5148
5149 if (itask->itask_flags & ITASK_BEING_ABORTED)
5150 return (STMF_ABORTED);
5151
5152 if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
5153 task->task_status_ctrl = 0;
5154 task->task_resid = 0;
5155 } else if (task->task_cmd_xfer_length >
5156 task->task_expected_xfer_length) {
5157 task->task_status_ctrl = TASK_SCTRL_OVER;
5158 task->task_resid = task->task_cmd_xfer_length -
5159 task->task_expected_xfer_length;
5160 } else if (task->task_nbytes_transferred <
5161 task->task_expected_xfer_length) {
5162 task->task_status_ctrl = TASK_SCTRL_UNDER;
5163 task->task_resid = task->task_expected_xfer_length -
5164 task->task_nbytes_transferred;
5165 } else {
5166 task->task_status_ctrl = 0;
5167 task->task_resid = 0;
5168 }
5169 return (task->task_lport->lport_send_status(task, ioflags));
5170 }
5171
5172 void
stmf_send_status_done(scsi_task_t * task,stmf_status_t s,uint32_t iof)5173 stmf_send_status_done(scsi_task_t *task, stmf_status_t s, uint32_t iof)
5174 {
5175 stmf_i_scsi_task_t *itask =
5176 (stmf_i_scsi_task_t *)task->task_stmf_private;
5177 stmf_worker_t *w = itask->itask_worker;
5178 uint32_t new, old;
5179 uint8_t free_it, queue_it;
5180
5181 stmf_task_audit(itask, TE_SEND_STATUS_DONE, iof, NULL);
5182
5183 mutex_enter(&w->worker_lock);
5184 do {
5185 new = old = itask->itask_flags;
5186 if (old & ITASK_BEING_ABORTED) {
5187 mutex_exit(&w->worker_lock);
5188 return;
5189 }
5190 free_it = 0;
5191 if (iof & STMF_IOF_LPORT_DONE) {
5192 new &= ~ITASK_KNOWN_TO_TGT_PORT;
5193 free_it = 1;
5194 }
5195 /*
5196 * If the task is known to LU then queue it. But if
5197 * it is already queued (multiple completions) then
5198 * just update the buffer information by grabbing the
5199 * worker lock. If the task is not known to LU,
5200 * completed/aborted, then see if we need to
5201 * free this task.
5202 */
5203 if (old & ITASK_KNOWN_TO_LU) {
5204 free_it = 0;
5205 queue_it = 1;
5206 if (old & ITASK_IN_WORKER_QUEUE) {
5207 cmn_err(CE_PANIC, "status completion received"
5208 " when task is already in worker queue "
5209 " task = %p", (void *)task);
5210 }
5211 new |= ITASK_IN_WORKER_QUEUE;
5212 } else {
5213 queue_it = 0;
5214 }
5215 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5216 task->task_completion_status = s;
5217
5218
5219 if (queue_it) {
5220 ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS);
5221 itask->itask_cmd_stack[itask->itask_ncmds++] =
5222 ITASK_CMD_STATUS_DONE;
5223 itask->itask_worker_next = NULL;
5224 if (w->worker_task_tail) {
5225 w->worker_task_tail->itask_worker_next = itask;
5226 } else {
5227 w->worker_task_head = itask;
5228 }
5229 w->worker_task_tail = itask;
5230 /* Measure task waitq time */
5231 itask->itask_waitq_enter_timestamp = gethrtime();
5232 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5233 w->worker_max_qdepth_pu = w->worker_queue_depth;
5234 }
5235 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5236 cv_signal(&w->worker_cv);
5237 }
5238 mutex_exit(&w->worker_lock);
5239
5240 if (free_it) {
5241 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5242 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
5243 ITASK_BEING_ABORTED)) == 0) {
5244 stmf_task_free(task);
5245 } else {
5246 cmn_err(CE_PANIC, "LU is done with the task but LPORT "
5247 " is not done, itask %p itask_flags %x",
5248 (void *)itask, itask->itask_flags);
5249 }
5250 }
5251 }
5252
5253 void
stmf_task_lu_done(scsi_task_t * task)5254 stmf_task_lu_done(scsi_task_t *task)
5255 {
5256 stmf_i_scsi_task_t *itask =
5257 (stmf_i_scsi_task_t *)task->task_stmf_private;
5258 stmf_worker_t *w = itask->itask_worker;
5259 uint32_t new, old;
5260
5261 mutex_enter(&w->worker_lock);
5262 do {
5263 new = old = itask->itask_flags;
5264 if (old & ITASK_BEING_ABORTED) {
5265 mutex_exit(&w->worker_lock);
5266 return;
5267 }
5268 if (old & ITASK_IN_WORKER_QUEUE) {
5269 cmn_err(CE_PANIC, "task_lu_done received"
5270 " when task is in worker queue "
5271 " task = %p", (void *)task);
5272 }
5273 new &= ~ITASK_KNOWN_TO_LU;
5274 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5275
5276 mutex_exit(&w->worker_lock);
5277
5278 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5279 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
5280 ITASK_BEING_ABORTED)) == 0) {
5281 stmf_task_free(task);
5282 } else {
5283 cmn_err(CE_PANIC, "stmf_lu_done should be the last stage but "
5284 " the task is still not done, task = %p", (void *)task);
5285 }
5286 }
5287
5288 void
stmf_queue_task_for_abort(scsi_task_t * task,stmf_status_t s)5289 stmf_queue_task_for_abort(scsi_task_t *task, stmf_status_t s)
5290 {
5291 stmf_i_scsi_task_t *itask =
5292 (stmf_i_scsi_task_t *)task->task_stmf_private;
5293 stmf_worker_t *w;
5294 uint32_t old, new;
5295
5296 stmf_task_audit(itask, TE_TASK_ABORT, CMD_OR_IOF_NA, NULL);
5297
5298 do {
5299 old = new = itask->itask_flags;
5300 if ((old & ITASK_BEING_ABORTED) ||
5301 ((old & (ITASK_KNOWN_TO_TGT_PORT |
5302 ITASK_KNOWN_TO_LU)) == 0)) {
5303 return;
5304 }
5305 new |= ITASK_BEING_ABORTED;
5306 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5307 task->task_completion_status = s;
5308 itask->itask_start_time = ddi_get_lbolt();
5309
5310 if (((w = itask->itask_worker) == NULL) ||
5311 (itask->itask_flags & ITASK_IN_TRANSITION)) {
5312 return;
5313 }
5314
5315 /* Queue it and get out */
5316 mutex_enter(&w->worker_lock);
5317 if (itask->itask_flags & ITASK_IN_WORKER_QUEUE) {
5318 mutex_exit(&w->worker_lock);
5319 return;
5320 }
5321 atomic_or_32(&itask->itask_flags, ITASK_IN_WORKER_QUEUE);
5322 itask->itask_worker_next = NULL;
5323 if (w->worker_task_tail) {
5324 w->worker_task_tail->itask_worker_next = itask;
5325 } else {
5326 w->worker_task_head = itask;
5327 }
5328 w->worker_task_tail = itask;
5329 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5330 w->worker_max_qdepth_pu = w->worker_queue_depth;
5331 }
5332 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5333 cv_signal(&w->worker_cv);
5334 mutex_exit(&w->worker_lock);
5335 }
5336
5337 void
stmf_abort(int abort_cmd,scsi_task_t * task,stmf_status_t s,void * arg)5338 stmf_abort(int abort_cmd, scsi_task_t *task, stmf_status_t s, void *arg)
5339 {
5340 stmf_i_scsi_task_t *itask = NULL;
5341 uint32_t old, new, f, rf;
5342
5343 DTRACE_PROBE2(scsi__task__abort, scsi_task_t *, task,
5344 stmf_status_t, s);
5345
5346 switch (abort_cmd) {
5347 case STMF_QUEUE_ABORT_LU:
5348 stmf_task_lu_killall((stmf_lu_t *)arg, task, s);
5349 return;
5350 case STMF_QUEUE_TASK_ABORT:
5351 stmf_queue_task_for_abort(task, s);
5352 return;
5353 case STMF_REQUEUE_TASK_ABORT_LPORT:
5354 rf = ITASK_TGT_PORT_ABORT_CALLED;
5355 f = ITASK_KNOWN_TO_TGT_PORT;
5356 break;
5357 case STMF_REQUEUE_TASK_ABORT_LU:
5358 rf = ITASK_LU_ABORT_CALLED;
5359 f = ITASK_KNOWN_TO_LU;
5360 break;
5361 default:
5362 return;
5363 }
5364 itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
5365 f |= ITASK_BEING_ABORTED | rf;
5366 do {
5367 old = new = itask->itask_flags;
5368 if ((old & f) != f) {
5369 return;
5370 }
5371 new &= ~rf;
5372 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5373 }
5374
5375 void
stmf_task_lu_aborted(scsi_task_t * task,stmf_status_t s,uint32_t iof)5376 stmf_task_lu_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof)
5377 {
5378 char info[STMF_CHANGE_INFO_LEN];
5379 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task);
5380 unsigned long long st;
5381
5382 stmf_task_audit(itask, TE_TASK_LU_ABORTED, iof, NULL);
5383
5384 st = s; /* gcc fix */
5385 if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) {
5386 (void) snprintf(info, sizeof (info),
5387 "task %p, lu failed to abort ret=%llx", (void *)task, st);
5388 } else if ((iof & STMF_IOF_LU_DONE) == 0) {
5389 (void) snprintf(info, sizeof (info),
5390 "Task aborted but LU is not finished, task ="
5391 "%p, s=%llx, iof=%x", (void *)task, st, iof);
5392 } else {
5393 /*
5394 * LU abort successfully
5395 */
5396 atomic_and_32(&itask->itask_flags, ~ITASK_KNOWN_TO_LU);
5397 return;
5398 }
5399
5400 stmf_abort_task_offline(task, 1, info);
5401 }
5402
5403 void
stmf_task_lport_aborted(scsi_task_t * task,stmf_status_t s,uint32_t iof)5404 stmf_task_lport_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof)
5405 {
5406 char info[STMF_CHANGE_INFO_LEN];
5407 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task);
5408 unsigned long long st;
5409 uint32_t old, new;
5410
5411 stmf_task_audit(itask, TE_TASK_LPORT_ABORTED, iof, NULL);
5412
5413 st = s;
5414 if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) {
5415 (void) snprintf(info, sizeof (info),
5416 "task %p, tgt port failed to abort ret=%llx", (void *)task,
5417 st);
5418 } else if ((iof & STMF_IOF_LPORT_DONE) == 0) {
5419 (void) snprintf(info, sizeof (info),
5420 "Task aborted but tgt port is not finished, "
5421 "task=%p, s=%llx, iof=%x", (void *)task, st, iof);
5422 } else {
5423 /*
5424 * LPORT abort successfully
5425 */
5426 do {
5427 old = new = itask->itask_flags;
5428 if (!(old & ITASK_KNOWN_TO_TGT_PORT))
5429 return;
5430 new &= ~ITASK_KNOWN_TO_TGT_PORT;
5431 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5432 return;
5433 }
5434
5435 stmf_abort_task_offline(task, 0, info);
5436 }
5437
5438 stmf_status_t
stmf_task_poll_lu(scsi_task_t * task,uint32_t timeout)5439 stmf_task_poll_lu(scsi_task_t *task, uint32_t timeout)
5440 {
5441 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
5442 task->task_stmf_private;
5443 stmf_worker_t *w = itask->itask_worker;
5444 int i;
5445
5446 ASSERT(itask->itask_flags & ITASK_KNOWN_TO_LU);
5447 mutex_enter(&w->worker_lock);
5448 if (itask->itask_ncmds >= ITASK_MAX_NCMDS) {
5449 mutex_exit(&w->worker_lock);
5450 return (STMF_BUSY);
5451 }
5452 for (i = 0; i < itask->itask_ncmds; i++) {
5453 if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LU) {
5454 mutex_exit(&w->worker_lock);
5455 return (STMF_SUCCESS);
5456 }
5457 }
5458 itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LU;
5459 if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) {
5460 itask->itask_poll_timeout = ddi_get_lbolt() + 1;
5461 } else {
5462 clock_t t = drv_usectohz(timeout * 1000);
5463 if (t == 0)
5464 t = 1;
5465 itask->itask_poll_timeout = ddi_get_lbolt() + t;
5466 }
5467 if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) {
5468 itask->itask_worker_next = NULL;
5469 if (w->worker_task_tail) {
5470 w->worker_task_tail->itask_worker_next = itask;
5471 } else {
5472 w->worker_task_head = itask;
5473 }
5474 w->worker_task_tail = itask;
5475 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5476 w->worker_max_qdepth_pu = w->worker_queue_depth;
5477 }
5478 atomic_or_32(&itask->itask_flags, ITASK_IN_WORKER_QUEUE);
5479 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5480 cv_signal(&w->worker_cv);
5481 }
5482 mutex_exit(&w->worker_lock);
5483 return (STMF_SUCCESS);
5484 }
5485
5486 stmf_status_t
stmf_task_poll_lport(scsi_task_t * task,uint32_t timeout)5487 stmf_task_poll_lport(scsi_task_t *task, uint32_t timeout)
5488 {
5489 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
5490 task->task_stmf_private;
5491 stmf_worker_t *w = itask->itask_worker;
5492 int i;
5493
5494 ASSERT(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT);
5495 mutex_enter(&w->worker_lock);
5496 if (itask->itask_ncmds >= ITASK_MAX_NCMDS) {
5497 mutex_exit(&w->worker_lock);
5498 return (STMF_BUSY);
5499 }
5500 for (i = 0; i < itask->itask_ncmds; i++) {
5501 if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LPORT) {
5502 mutex_exit(&w->worker_lock);
5503 return (STMF_SUCCESS);
5504 }
5505 }
5506 itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LPORT;
5507 if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) {
5508 itask->itask_poll_timeout = ddi_get_lbolt() + 1;
5509 } else {
5510 clock_t t = drv_usectohz(timeout * 1000);
5511 if (t == 0)
5512 t = 1;
5513 itask->itask_poll_timeout = ddi_get_lbolt() + t;
5514 }
5515 if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) {
5516 itask->itask_worker_next = NULL;
5517 if (w->worker_task_tail) {
5518 w->worker_task_tail->itask_worker_next = itask;
5519 } else {
5520 w->worker_task_head = itask;
5521 }
5522 w->worker_task_tail = itask;
5523 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5524 w->worker_max_qdepth_pu = w->worker_queue_depth;
5525 }
5526 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5527 cv_signal(&w->worker_cv);
5528 }
5529 mutex_exit(&w->worker_lock);
5530 return (STMF_SUCCESS);
5531 }
5532
5533 void
stmf_do_task_abort(scsi_task_t * task)5534 stmf_do_task_abort(scsi_task_t *task)
5535 {
5536 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task);
5537 stmf_lu_t *lu;
5538 stmf_local_port_t *lport;
5539 unsigned long long ret;
5540 uint32_t old, new;
5541 uint8_t call_lu_abort, call_port_abort;
5542 char info[STMF_CHANGE_INFO_LEN];
5543
5544 lu = task->task_lu;
5545 lport = task->task_lport;
5546 do {
5547 old = new = itask->itask_flags;
5548 if ((old & (ITASK_KNOWN_TO_LU | ITASK_LU_ABORT_CALLED)) ==
5549 ITASK_KNOWN_TO_LU) {
5550 new |= ITASK_LU_ABORT_CALLED;
5551 call_lu_abort = 1;
5552 } else {
5553 call_lu_abort = 0;
5554 }
5555 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5556
5557 if (call_lu_abort) {
5558 if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0) {
5559 ret = lu->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0);
5560 } else {
5561 ret = dlun0->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0);
5562 }
5563 if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) {
5564 stmf_task_lu_aborted(task, ret, STMF_IOF_LU_DONE);
5565 } else if (ret == STMF_BUSY) {
5566 atomic_and_32(&itask->itask_flags,
5567 ~ITASK_LU_ABORT_CALLED);
5568 } else if (ret != STMF_SUCCESS) {
5569 (void) snprintf(info, sizeof (info),
5570 "Abort failed by LU %p, ret %llx", (void *)lu, ret);
5571 stmf_abort_task_offline(task, 1, info);
5572 }
5573 } else if (itask->itask_flags & ITASK_KNOWN_TO_LU) {
5574 if (ddi_get_lbolt() > (itask->itask_start_time +
5575 STMF_SEC2TICK(lu->lu_abort_timeout?
5576 lu->lu_abort_timeout : ITASK_DEFAULT_ABORT_TIMEOUT))) {
5577 (void) snprintf(info, sizeof (info),
5578 "lu abort timed out");
5579 stmf_abort_task_offline(itask->itask_task, 1, info);
5580 }
5581 }
5582
5583 do {
5584 old = new = itask->itask_flags;
5585 if ((old & (ITASK_KNOWN_TO_TGT_PORT |
5586 ITASK_TGT_PORT_ABORT_CALLED)) == ITASK_KNOWN_TO_TGT_PORT) {
5587 new |= ITASK_TGT_PORT_ABORT_CALLED;
5588 call_port_abort = 1;
5589 } else {
5590 call_port_abort = 0;
5591 }
5592 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5593 if (call_port_abort) {
5594 ret = lport->lport_abort(lport, STMF_LPORT_ABORT_TASK, task, 0);
5595 if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) {
5596 stmf_task_lport_aborted(task, ret, STMF_IOF_LPORT_DONE);
5597 } else if (ret == STMF_BUSY) {
5598 atomic_and_32(&itask->itask_flags,
5599 ~ITASK_TGT_PORT_ABORT_CALLED);
5600 } else if (ret != STMF_SUCCESS) {
5601 (void) snprintf(info, sizeof (info),
5602 "Abort failed by tgt port %p ret %llx",
5603 (void *)lport, ret);
5604 stmf_abort_task_offline(task, 0, info);
5605 }
5606 } else if (itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT) {
5607 if (ddi_get_lbolt() > (itask->itask_start_time +
5608 STMF_SEC2TICK(lport->lport_abort_timeout?
5609 lport->lport_abort_timeout :
5610 ITASK_DEFAULT_ABORT_TIMEOUT))) {
5611 (void) snprintf(info, sizeof (info),
5612 "lport abort timed out");
5613 stmf_abort_task_offline(itask->itask_task, 0, info);
5614 }
5615 }
5616 }
5617
5618 stmf_status_t
stmf_ctl(int cmd,void * obj,void * arg)5619 stmf_ctl(int cmd, void *obj, void *arg)
5620 {
5621 stmf_status_t ret;
5622 stmf_i_lu_t *ilu;
5623 stmf_i_local_port_t *ilport;
5624 stmf_state_change_info_t *ssci = (stmf_state_change_info_t *)arg;
5625
5626 mutex_enter(&stmf_state.stmf_lock);
5627 ret = STMF_INVALID_ARG;
5628 if (cmd & STMF_CMD_LU_OP) {
5629 ilu = stmf_lookup_lu((stmf_lu_t *)obj);
5630 if (ilu == NULL) {
5631 goto stmf_ctl_lock_exit;
5632 }
5633 DTRACE_PROBE3(lu__state__change,
5634 stmf_lu_t *, ilu->ilu_lu,
5635 int, cmd, stmf_state_change_info_t *, ssci);
5636 } else if (cmd & STMF_CMD_LPORT_OP) {
5637 ilport = stmf_lookup_lport((stmf_local_port_t *)obj);
5638 if (ilport == NULL) {
5639 goto stmf_ctl_lock_exit;
5640 }
5641 DTRACE_PROBE3(lport__state__change,
5642 stmf_local_port_t *, ilport->ilport_lport,
5643 int, cmd, stmf_state_change_info_t *, ssci);
5644 } else {
5645 goto stmf_ctl_lock_exit;
5646 }
5647
5648 switch (cmd) {
5649 case STMF_CMD_LU_ONLINE:
5650 switch (ilu->ilu_state) {
5651 case STMF_STATE_OFFLINE:
5652 ret = STMF_SUCCESS;
5653 break;
5654 case STMF_STATE_ONLINE:
5655 case STMF_STATE_ONLINING:
5656 ret = STMF_ALREADY;
5657 break;
5658 case STMF_STATE_OFFLINING:
5659 ret = STMF_BUSY;
5660 break;
5661 default:
5662 ret = STMF_BADSTATE;
5663 break;
5664 }
5665 if (ret != STMF_SUCCESS)
5666 goto stmf_ctl_lock_exit;
5667
5668 ilu->ilu_state = STMF_STATE_ONLINING;
5669 mutex_exit(&stmf_state.stmf_lock);
5670 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5671 break;
5672
5673 case STMF_CMD_LU_ONLINE_COMPLETE:
5674 if (ilu->ilu_state != STMF_STATE_ONLINING) {
5675 ret = STMF_BADSTATE;
5676 goto stmf_ctl_lock_exit;
5677 }
5678 if (((stmf_change_status_t *)arg)->st_completion_status ==
5679 STMF_SUCCESS) {
5680 ilu->ilu_state = STMF_STATE_ONLINE;
5681 mutex_exit(&stmf_state.stmf_lock);
5682 ((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj,
5683 STMF_ACK_LU_ONLINE_COMPLETE, arg);
5684 mutex_enter(&stmf_state.stmf_lock);
5685 stmf_add_lu_to_active_sessions((stmf_lu_t *)obj);
5686 } else {
5687 /* XXX: should throw a meesage an record more data */
5688 ilu->ilu_state = STMF_STATE_OFFLINE;
5689 }
5690 ret = STMF_SUCCESS;
5691 goto stmf_ctl_lock_exit;
5692
5693 case STMF_CMD_LU_OFFLINE:
5694 switch (ilu->ilu_state) {
5695 case STMF_STATE_ONLINE:
5696 ret = STMF_SUCCESS;
5697 break;
5698 case STMF_STATE_OFFLINE:
5699 case STMF_STATE_OFFLINING:
5700 ret = STMF_ALREADY;
5701 break;
5702 case STMF_STATE_ONLINING:
5703 ret = STMF_BUSY;
5704 break;
5705 default:
5706 ret = STMF_BADSTATE;
5707 break;
5708 }
5709 if (ret != STMF_SUCCESS)
5710 goto stmf_ctl_lock_exit;
5711 ilu->ilu_state = STMF_STATE_OFFLINING;
5712 mutex_exit(&stmf_state.stmf_lock);
5713 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5714 break;
5715
5716 case STMF_CMD_LU_OFFLINE_COMPLETE:
5717 if (ilu->ilu_state != STMF_STATE_OFFLINING) {
5718 ret = STMF_BADSTATE;
5719 goto stmf_ctl_lock_exit;
5720 }
5721 if (((stmf_change_status_t *)arg)->st_completion_status ==
5722 STMF_SUCCESS) {
5723 ilu->ilu_state = STMF_STATE_OFFLINE;
5724 mutex_exit(&stmf_state.stmf_lock);
5725 ((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj,
5726 STMF_ACK_LU_OFFLINE_COMPLETE, arg);
5727 mutex_enter(&stmf_state.stmf_lock);
5728 } else {
5729 ilu->ilu_state = STMF_STATE_ONLINE;
5730 stmf_add_lu_to_active_sessions((stmf_lu_t *)obj);
5731 }
5732 mutex_exit(&stmf_state.stmf_lock);
5733 break;
5734
5735 /*
5736 * LPORT_ONLINE/OFFLINE has nothing to do with link offline/online.
5737 * It's related with hardware disable/enable.
5738 */
5739 case STMF_CMD_LPORT_ONLINE:
5740 switch (ilport->ilport_state) {
5741 case STMF_STATE_OFFLINE:
5742 ret = STMF_SUCCESS;
5743 break;
5744 case STMF_STATE_ONLINE:
5745 case STMF_STATE_ONLINING:
5746 ret = STMF_ALREADY;
5747 break;
5748 case STMF_STATE_OFFLINING:
5749 ret = STMF_BUSY;
5750 break;
5751 default:
5752 ret = STMF_BADSTATE;
5753 break;
5754 }
5755 if (ret != STMF_SUCCESS)
5756 goto stmf_ctl_lock_exit;
5757
5758 /*
5759 * Only user request can recover the port from the
5760 * FORCED_OFFLINE state
5761 */
5762 if (ilport->ilport_flags & ILPORT_FORCED_OFFLINE) {
5763 if (!(ssci->st_rflags & STMF_RFLAG_USER_REQUEST)) {
5764 ret = STMF_FAILURE;
5765 goto stmf_ctl_lock_exit;
5766 }
5767 }
5768
5769 /*
5770 * Avoid too frequent request to online
5771 */
5772 if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) {
5773 ilport->ilport_online_times = 0;
5774 ilport->ilport_avg_interval = 0;
5775 }
5776 if ((ilport->ilport_avg_interval < STMF_AVG_ONLINE_INTERVAL) &&
5777 (ilport->ilport_online_times >= 4)) {
5778 ret = STMF_FAILURE;
5779 ilport->ilport_flags |= ILPORT_FORCED_OFFLINE;
5780 stmf_trace(NULL, "stmf_ctl: too frequent request to "
5781 "online the port");
5782 cmn_err(CE_WARN, "stmf_ctl: too frequent request to "
5783 "online the port, set FORCED_OFFLINE now");
5784 goto stmf_ctl_lock_exit;
5785 }
5786 if (ilport->ilport_online_times > 0) {
5787 if (ilport->ilport_online_times == 1) {
5788 ilport->ilport_avg_interval = ddi_get_lbolt() -
5789 ilport->ilport_last_online_clock;
5790 } else {
5791 ilport->ilport_avg_interval =
5792 (ilport->ilport_avg_interval +
5793 ddi_get_lbolt() -
5794 ilport->ilport_last_online_clock) >> 1;
5795 }
5796 }
5797 ilport->ilport_last_online_clock = ddi_get_lbolt();
5798 ilport->ilport_online_times++;
5799
5800 /*
5801 * Submit online service request
5802 */
5803 ilport->ilport_flags &= ~ILPORT_FORCED_OFFLINE;
5804 ilport->ilport_state = STMF_STATE_ONLINING;
5805 mutex_exit(&stmf_state.stmf_lock);
5806 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5807 break;
5808
5809 case STMF_CMD_LPORT_ONLINE_COMPLETE:
5810 if (ilport->ilport_state != STMF_STATE_ONLINING) {
5811 ret = STMF_BADSTATE;
5812 goto stmf_ctl_lock_exit;
5813 }
5814 if (((stmf_change_status_t *)arg)->st_completion_status ==
5815 STMF_SUCCESS) {
5816 ilport->ilport_state = STMF_STATE_ONLINE;
5817 mutex_exit(&stmf_state.stmf_lock);
5818 ((stmf_local_port_t *)obj)->lport_ctl(
5819 (stmf_local_port_t *)obj,
5820 STMF_ACK_LPORT_ONLINE_COMPLETE, arg);
5821 mutex_enter(&stmf_state.stmf_lock);
5822 } else {
5823 ilport->ilport_state = STMF_STATE_OFFLINE;
5824 }
5825 ret = STMF_SUCCESS;
5826 goto stmf_ctl_lock_exit;
5827
5828 case STMF_CMD_LPORT_OFFLINE:
5829 switch (ilport->ilport_state) {
5830 case STMF_STATE_ONLINE:
5831 ret = STMF_SUCCESS;
5832 break;
5833 case STMF_STATE_OFFLINE:
5834 case STMF_STATE_OFFLINING:
5835 ret = STMF_ALREADY;
5836 break;
5837 case STMF_STATE_ONLINING:
5838 ret = STMF_BUSY;
5839 break;
5840 default:
5841 ret = STMF_BADSTATE;
5842 break;
5843 }
5844 if (ret != STMF_SUCCESS)
5845 goto stmf_ctl_lock_exit;
5846
5847 ilport->ilport_state = STMF_STATE_OFFLINING;
5848 mutex_exit(&stmf_state.stmf_lock);
5849 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5850 break;
5851
5852 case STMF_CMD_LPORT_OFFLINE_COMPLETE:
5853 if (ilport->ilport_state != STMF_STATE_OFFLINING) {
5854 ret = STMF_BADSTATE;
5855 goto stmf_ctl_lock_exit;
5856 }
5857 if (((stmf_change_status_t *)arg)->st_completion_status ==
5858 STMF_SUCCESS) {
5859 ilport->ilport_state = STMF_STATE_OFFLINE;
5860 mutex_exit(&stmf_state.stmf_lock);
5861 ((stmf_local_port_t *)obj)->lport_ctl(
5862 (stmf_local_port_t *)obj,
5863 STMF_ACK_LPORT_OFFLINE_COMPLETE, arg);
5864 mutex_enter(&stmf_state.stmf_lock);
5865 } else {
5866 ilport->ilport_state = STMF_STATE_ONLINE;
5867 }
5868 mutex_exit(&stmf_state.stmf_lock);
5869 break;
5870
5871 default:
5872 cmn_err(CE_WARN, "Invalid ctl cmd received %x", cmd);
5873 ret = STMF_INVALID_ARG;
5874 goto stmf_ctl_lock_exit;
5875 }
5876
5877 return (STMF_SUCCESS);
5878
5879 stmf_ctl_lock_exit:;
5880 mutex_exit(&stmf_state.stmf_lock);
5881 return (ret);
5882 }
5883
5884 /* ARGSUSED */
5885 stmf_status_t
stmf_info_impl(uint32_t cmd,void * arg1,void * arg2,uint8_t * buf,uint32_t * bufsizep)5886 stmf_info_impl(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf,
5887 uint32_t *bufsizep)
5888 {
5889 return (STMF_NOT_SUPPORTED);
5890 }
5891
5892 /* ARGSUSED */
5893 stmf_status_t
stmf_info(uint32_t cmd,void * arg1,void * arg2,uint8_t * buf,uint32_t * bufsizep)5894 stmf_info(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf,
5895 uint32_t *bufsizep)
5896 {
5897 uint32_t cl = SI_GET_CLASS(cmd);
5898
5899 if (cl == SI_STMF) {
5900 return (stmf_info_impl(cmd, arg1, arg2, buf, bufsizep));
5901 }
5902 if (cl == SI_LPORT) {
5903 return (((stmf_local_port_t *)arg1)->lport_info(cmd, arg1,
5904 arg2, buf, bufsizep));
5905 } else if (cl == SI_LU) {
5906 return (((stmf_lu_t *)arg1)->lu_info(cmd, arg1, arg2, buf,
5907 bufsizep));
5908 }
5909
5910 return (STMF_NOT_SUPPORTED);
5911 }
5912
5913 /*
5914 * Used by port providers. pwwn is 8 byte wwn, sdid is the devid used by
5915 * stmf to register local ports. The ident should have 20 bytes in buffer
5916 * space to convert the wwn to "wwn.xxxxxxxxxxxxxxxx" string.
5917 */
5918 void
stmf_wwn_to_devid_desc(scsi_devid_desc_t * sdid,uint8_t * wwn,uint8_t protocol_id)5919 stmf_wwn_to_devid_desc(scsi_devid_desc_t *sdid, uint8_t *wwn,
5920 uint8_t protocol_id)
5921 {
5922 char wwn_str[20+1];
5923
5924 sdid->protocol_id = protocol_id;
5925 sdid->piv = 1;
5926 sdid->code_set = CODE_SET_ASCII;
5927 sdid->association = ID_IS_TARGET_PORT;
5928 sdid->ident_length = 20;
5929 /* Convert wwn value to "wwn.XXXXXXXXXXXXXXXX" format */
5930 (void) snprintf(wwn_str, sizeof (wwn_str),
5931 "wwn.%02X%02X%02X%02X%02X%02X%02X%02X",
5932 wwn[0], wwn[1], wwn[2], wwn[3], wwn[4], wwn[5], wwn[6], wwn[7]);
5933 bcopy(wwn_str, (char *)sdid->ident, 20);
5934 }
5935
5936
5937 stmf_xfer_data_t *
stmf_prepare_tpgs_data(uint8_t ilu_alua)5938 stmf_prepare_tpgs_data(uint8_t ilu_alua)
5939 {
5940 stmf_xfer_data_t *xd;
5941 stmf_i_local_port_t *ilport;
5942 uint8_t *p;
5943 uint32_t sz, asz, nports = 0, nports_standby = 0;
5944
5945 mutex_enter(&stmf_state.stmf_lock);
5946 /* check if any ports are standby and create second group */
5947 for (ilport = stmf_state.stmf_ilportlist; ilport;
5948 ilport = ilport->ilport_next) {
5949 if (ilport->ilport_standby == 1) {
5950 nports_standby++;
5951 } else {
5952 nports++;
5953 }
5954 }
5955
5956 /* The spec only allows for 255 ports to be reported per group */
5957 nports = min(nports, 255);
5958 nports_standby = min(nports_standby, 255);
5959 sz = (nports * 4) + 12;
5960 if (nports_standby && ilu_alua) {
5961 sz += (nports_standby * 4) + 8;
5962 }
5963 asz = sz + sizeof (*xd) - 4;
5964 xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP);
5965 if (xd == NULL) {
5966 mutex_exit(&stmf_state.stmf_lock);
5967 return (NULL);
5968 }
5969 xd->alloc_size = asz;
5970 xd->size_left = sz;
5971
5972 p = xd->buf;
5973
5974 *((uint32_t *)p) = BE_32(sz - 4);
5975 p += 4;
5976 p[0] = 0x80; /* PREF */
5977 p[1] = 5; /* AO_SUP, S_SUP */
5978 if (stmf_state.stmf_alua_node == 1) {
5979 p[3] = 1; /* Group 1 */
5980 } else {
5981 p[3] = 0; /* Group 0 */
5982 }
5983 p[7] = nports & 0xff;
5984 p += 8;
5985 for (ilport = stmf_state.stmf_ilportlist; ilport;
5986 ilport = ilport->ilport_next) {
5987 if (ilport->ilport_standby == 1) {
5988 continue;
5989 }
5990 ((uint16_t *)p)[1] = BE_16(ilport->ilport_rtpid);
5991 p += 4;
5992 }
5993 if (nports_standby && ilu_alua) {
5994 p[0] = 0x02; /* Non PREF, Standby */
5995 p[1] = 5; /* AO_SUP, S_SUP */
5996 if (stmf_state.stmf_alua_node == 1) {
5997 p[3] = 0; /* Group 0 */
5998 } else {
5999 p[3] = 1; /* Group 1 */
6000 }
6001 p[7] = nports_standby & 0xff;
6002 p += 8;
6003 for (ilport = stmf_state.stmf_ilportlist; ilport;
6004 ilport = ilport->ilport_next) {
6005 if (ilport->ilport_standby == 0) {
6006 continue;
6007 }
6008 ((uint16_t *)p)[1] = BE_16(ilport->ilport_rtpid);
6009 p += 4;
6010 }
6011 }
6012
6013 mutex_exit(&stmf_state.stmf_lock);
6014
6015 return (xd);
6016 }
6017
6018 struct scsi_devid_desc *
stmf_scsilib_get_devid_desc(uint16_t rtpid)6019 stmf_scsilib_get_devid_desc(uint16_t rtpid)
6020 {
6021 scsi_devid_desc_t *devid = NULL;
6022 stmf_i_local_port_t *ilport;
6023
6024 mutex_enter(&stmf_state.stmf_lock);
6025
6026 for (ilport = stmf_state.stmf_ilportlist; ilport;
6027 ilport = ilport->ilport_next) {
6028 if (ilport->ilport_rtpid == rtpid) {
6029 scsi_devid_desc_t *id = ilport->ilport_lport->lport_id;
6030 uint32_t id_sz = sizeof (scsi_devid_desc_t) +
6031 id->ident_length;
6032 devid = (scsi_devid_desc_t *)kmem_zalloc(id_sz,
6033 KM_NOSLEEP);
6034 if (devid != NULL) {
6035 bcopy(id, devid, id_sz);
6036 }
6037 break;
6038 }
6039 }
6040
6041 mutex_exit(&stmf_state.stmf_lock);
6042 return (devid);
6043 }
6044
6045 uint16_t
stmf_scsilib_get_lport_rtid(struct scsi_devid_desc * devid)6046 stmf_scsilib_get_lport_rtid(struct scsi_devid_desc *devid)
6047 {
6048 stmf_i_local_port_t *ilport;
6049 scsi_devid_desc_t *id;
6050 uint16_t rtpid = 0;
6051
6052 mutex_enter(&stmf_state.stmf_lock);
6053 for (ilport = stmf_state.stmf_ilportlist; ilport;
6054 ilport = ilport->ilport_next) {
6055 id = ilport->ilport_lport->lport_id;
6056 if ((devid->ident_length == id->ident_length) &&
6057 (memcmp(devid->ident, id->ident, id->ident_length) == 0)) {
6058 rtpid = ilport->ilport_rtpid;
6059 break;
6060 }
6061 }
6062 mutex_exit(&stmf_state.stmf_lock);
6063 return (rtpid);
6064 }
6065
6066 static uint16_t stmf_lu_id_gen_number = 0;
6067
6068 stmf_status_t
stmf_scsilib_uniq_lu_id(uint32_t company_id,scsi_devid_desc_t * lu_id)6069 stmf_scsilib_uniq_lu_id(uint32_t company_id, scsi_devid_desc_t *lu_id)
6070 {
6071 return (stmf_scsilib_uniq_lu_id2(company_id, 0, lu_id));
6072 }
6073
6074 stmf_status_t
stmf_scsilib_uniq_lu_id2(uint32_t company_id,uint32_t host_id,scsi_devid_desc_t * lu_id)6075 stmf_scsilib_uniq_lu_id2(uint32_t company_id, uint32_t host_id,
6076 scsi_devid_desc_t *lu_id)
6077 {
6078 uint8_t *p;
6079 struct timeval32 timestamp32;
6080 uint32_t *t = (uint32_t *)×tamp32;
6081 struct ether_addr mac;
6082 uint8_t *e = (uint8_t *)&mac;
6083 int hid = (int)host_id;
6084
6085 if (company_id == COMPANY_ID_NONE)
6086 company_id = COMPANY_ID_SUN;
6087
6088 if (lu_id->ident_length != 0x10)
6089 return (STMF_INVALID_ARG);
6090
6091 p = (uint8_t *)lu_id;
6092
6093 atomic_add_16(&stmf_lu_id_gen_number, 1);
6094
6095 p[0] = 0xf1; p[1] = 3; p[2] = 0; p[3] = 0x10;
6096 p[4] = ((company_id >> 20) & 0xf) | 0x60;
6097 p[5] = (company_id >> 12) & 0xff;
6098 p[6] = (company_id >> 4) & 0xff;
6099 p[7] = (company_id << 4) & 0xf0;
6100 if (hid == 0 && !localetheraddr((struct ether_addr *)NULL, &mac)) {
6101 hid = BE_32((int)zone_get_hostid(NULL));
6102 }
6103 if (hid != 0) {
6104 e[0] = (hid >> 24) & 0xff;
6105 e[1] = (hid >> 16) & 0xff;
6106 e[2] = (hid >> 8) & 0xff;
6107 e[3] = hid & 0xff;
6108 e[4] = e[5] = 0;
6109 }
6110 bcopy(e, p+8, 6);
6111 uniqtime32(×tamp32);
6112 *t = BE_32(*t);
6113 bcopy(t, p+14, 4);
6114 p[18] = (stmf_lu_id_gen_number >> 8) & 0xff;
6115 p[19] = stmf_lu_id_gen_number & 0xff;
6116
6117 return (STMF_SUCCESS);
6118 }
6119
6120 /*
6121 * saa is sense key, ASC, ASCQ
6122 */
6123 void
stmf_scsilib_send_status(scsi_task_t * task,uint8_t st,uint32_t saa)6124 stmf_scsilib_send_status(scsi_task_t *task, uint8_t st, uint32_t saa)
6125 {
6126 uint8_t sd[18];
6127 task->task_scsi_status = st;
6128 if (st == 2) {
6129 bzero(sd, 18);
6130 sd[0] = 0x70;
6131 sd[2] = (saa >> 16) & 0xf;
6132 sd[7] = 10;
6133 sd[12] = (saa >> 8) & 0xff;
6134 sd[13] = saa & 0xff;
6135 task->task_sense_data = sd;
6136 task->task_sense_length = 18;
6137 } else {
6138 task->task_sense_data = NULL;
6139 task->task_sense_length = 0;
6140 }
6141 (void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE);
6142 }
6143
6144 uint32_t
stmf_scsilib_prepare_vpd_page83(scsi_task_t * task,uint8_t * page,uint32_t page_len,uint8_t byte0,uint32_t vpd_mask)6145 stmf_scsilib_prepare_vpd_page83(scsi_task_t *task, uint8_t *page,
6146 uint32_t page_len, uint8_t byte0, uint32_t vpd_mask)
6147 {
6148 uint8_t *p = NULL;
6149 uint8_t small_buf[32];
6150 uint32_t sz = 0;
6151 uint32_t n = 4;
6152 uint32_t m = 0;
6153 uint32_t last_bit = 0;
6154
6155 if (page_len < 4)
6156 return (0);
6157 if (page_len > 65535)
6158 page_len = 65535;
6159
6160 page[0] = byte0;
6161 page[1] = 0x83;
6162
6163 /* CONSTCOND */
6164 while (1) {
6165 m += sz;
6166 if (sz && (page_len > n)) {
6167 uint32_t copysz;
6168 copysz = page_len > (n + sz) ? sz : page_len - n;
6169 bcopy(p, page + n, copysz);
6170 n += copysz;
6171 }
6172 vpd_mask &= ~last_bit;
6173 if (vpd_mask == 0)
6174 break;
6175
6176 if (vpd_mask & STMF_VPD_LU_ID) {
6177 last_bit = STMF_VPD_LU_ID;
6178 sz = task->task_lu->lu_id->ident_length + 4;
6179 p = (uint8_t *)task->task_lu->lu_id;
6180 continue;
6181 } else if (vpd_mask & STMF_VPD_TARGET_ID) {
6182 last_bit = STMF_VPD_TARGET_ID;
6183 sz = task->task_lport->lport_id->ident_length + 4;
6184 p = (uint8_t *)task->task_lport->lport_id;
6185 continue;
6186 } else if (vpd_mask & STMF_VPD_TP_GROUP) {
6187 stmf_i_local_port_t *ilport;
6188 last_bit = STMF_VPD_TP_GROUP;
6189 p = small_buf;
6190 bzero(p, 8);
6191 p[0] = 1;
6192 p[1] = 0x15;
6193 p[3] = 4;
6194 ilport = (stmf_i_local_port_t *)
6195 task->task_lport->lport_stmf_private;
6196 /*
6197 * If we're in alua mode, group 1 contains all alua
6198 * participating ports and all standby ports
6199 * > 255. Otherwise, if we're in alua mode, any local
6200 * ports (non standby/pppt) are also in group 1 if the
6201 * alua node is 1. Otherwise the group is 0.
6202 */
6203 if ((stmf_state.stmf_alua_state &&
6204 (ilport->ilport_alua || ilport->ilport_standby) &&
6205 ilport->ilport_rtpid > 255) ||
6206 (stmf_state.stmf_alua_node == 1 &&
6207 ilport->ilport_standby != 1)) {
6208 p[7] = 1; /* Group 1 */
6209 }
6210 sz = 8;
6211 continue;
6212 } else if (vpd_mask & STMF_VPD_RELATIVE_TP_ID) {
6213 stmf_i_local_port_t *ilport;
6214
6215 last_bit = STMF_VPD_RELATIVE_TP_ID;
6216 p = small_buf;
6217 bzero(p, 8);
6218 p[0] = 1;
6219 p[1] = 0x14;
6220 p[3] = 4;
6221 ilport = (stmf_i_local_port_t *)
6222 task->task_lport->lport_stmf_private;
6223 p[6] = (ilport->ilport_rtpid >> 8) & 0xff;
6224 p[7] = ilport->ilport_rtpid & 0xff;
6225 sz = 8;
6226 continue;
6227 } else {
6228 cmn_err(CE_WARN, "Invalid vpd_mask");
6229 break;
6230 }
6231 }
6232
6233 page[2] = (m >> 8) & 0xff;
6234 page[3] = m & 0xff;
6235
6236 return (n);
6237 }
6238
6239 void
stmf_scsilib_handle_report_tpgs(scsi_task_t * task,stmf_data_buf_t * dbuf)6240 stmf_scsilib_handle_report_tpgs(scsi_task_t *task, stmf_data_buf_t *dbuf)
6241 {
6242 stmf_i_scsi_task_t *itask =
6243 (stmf_i_scsi_task_t *)task->task_stmf_private;
6244 stmf_i_lu_t *ilu =
6245 (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
6246 stmf_xfer_data_t *xd;
6247 uint32_t sz, minsz;
6248
6249 itask->itask_flags |= ITASK_DEFAULT_HANDLING;
6250 task->task_cmd_xfer_length =
6251 ((((uint32_t)task->task_cdb[6]) << 24) |
6252 (((uint32_t)task->task_cdb[7]) << 16) |
6253 (((uint32_t)task->task_cdb[8]) << 8) |
6254 ((uint32_t)task->task_cdb[9]));
6255
6256 if (task->task_additional_flags &
6257 TASK_AF_NO_EXPECTED_XFER_LENGTH) {
6258 task->task_expected_xfer_length =
6259 task->task_cmd_xfer_length;
6260 }
6261
6262 if (task->task_cmd_xfer_length == 0) {
6263 stmf_scsilib_send_status(task, STATUS_GOOD, 0);
6264 return;
6265 }
6266 if (task->task_cmd_xfer_length < 4) {
6267 stmf_scsilib_send_status(task, STATUS_CHECK,
6268 STMF_SAA_INVALID_FIELD_IN_CDB);
6269 return;
6270 }
6271
6272 sz = min(task->task_expected_xfer_length,
6273 task->task_cmd_xfer_length);
6274
6275 xd = stmf_prepare_tpgs_data(ilu->ilu_alua);
6276
6277 if (xd == NULL) {
6278 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
6279 STMF_ALLOC_FAILURE, NULL);
6280 return;
6281 }
6282
6283 sz = min(sz, xd->size_left);
6284 xd->size_left = sz;
6285 minsz = min(512, sz);
6286
6287 if (dbuf == NULL)
6288 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0);
6289 if (dbuf == NULL) {
6290 kmem_free(xd, xd->alloc_size);
6291 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
6292 STMF_ALLOC_FAILURE, NULL);
6293 return;
6294 }
6295 dbuf->db_lu_private = xd;
6296 stmf_xd_to_dbuf(dbuf, 1);
6297
6298 dbuf->db_flags = DB_DIRECTION_TO_RPORT;
6299 (void) stmf_xfer_data(task, dbuf, 0);
6300
6301 }
6302
6303 void
stmf_scsilib_handle_task_mgmt(scsi_task_t * task)6304 stmf_scsilib_handle_task_mgmt(scsi_task_t *task)
6305 {
6306
6307 switch (task->task_mgmt_function) {
6308 /*
6309 * For now we will abort all I/Os on the LU in case of ABORT_TASK_SET
6310 * and ABORT_TASK. But unlike LUN_RESET we will not reset LU state
6311 * in these cases. This needs to be changed to abort only the required
6312 * set.
6313 */
6314 case TM_ABORT_TASK:
6315 case TM_ABORT_TASK_SET:
6316 case TM_CLEAR_TASK_SET:
6317 case TM_LUN_RESET:
6318 stmf_handle_lun_reset(task);
6319 /* issue the reset to the proxy node as well */
6320 if (stmf_state.stmf_alua_state == 1) {
6321 (void) stmf_proxy_scsi_cmd(task, NULL);
6322 }
6323 return;
6324 case TM_TARGET_RESET:
6325 case TM_TARGET_COLD_RESET:
6326 case TM_TARGET_WARM_RESET:
6327 stmf_handle_target_reset(task);
6328 return;
6329 default:
6330 /* We dont support this task mgmt function */
6331 stmf_scsilib_send_status(task, STATUS_CHECK,
6332 STMF_SAA_INVALID_FIELD_IN_CMD_IU);
6333 return;
6334 }
6335 }
6336
6337 void
stmf_handle_lun_reset(scsi_task_t * task)6338 stmf_handle_lun_reset(scsi_task_t *task)
6339 {
6340 stmf_i_scsi_task_t *itask;
6341 stmf_i_lu_t *ilu;
6342
6343 itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
6344 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
6345
6346 /*
6347 * To sync with target reset, grab this lock. The LU is not going
6348 * anywhere as there is atleast one task pending (this task).
6349 */
6350 mutex_enter(&stmf_state.stmf_lock);
6351
6352 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
6353 mutex_exit(&stmf_state.stmf_lock);
6354 stmf_scsilib_send_status(task, STATUS_CHECK,
6355 STMF_SAA_OPERATION_IN_PROGRESS);
6356 return;
6357 }
6358 atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE);
6359 mutex_exit(&stmf_state.stmf_lock);
6360
6361 /*
6362 * Mark this task as the one causing LU reset so that we know who
6363 * was responsible for setting the ILU_RESET_ACTIVE. In case this
6364 * task itself gets aborted, we will clear ILU_RESET_ACTIVE.
6365 */
6366 itask->itask_flags |= ITASK_DEFAULT_HANDLING | ITASK_CAUSING_LU_RESET;
6367
6368 /* Initiatiate abort on all commands on this LU except this one */
6369 stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED, task->task_lu);
6370
6371 /* Start polling on this task */
6372 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
6373 != STMF_SUCCESS) {
6374 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE,
6375 NULL);
6376 return;
6377 }
6378 }
6379
6380 void
stmf_handle_target_reset(scsi_task_t * task)6381 stmf_handle_target_reset(scsi_task_t *task)
6382 {
6383 stmf_i_scsi_task_t *itask;
6384 stmf_i_lu_t *ilu;
6385 stmf_i_scsi_session_t *iss;
6386 stmf_lun_map_t *lm;
6387 stmf_lun_map_ent_t *lm_ent;
6388 int i, lf;
6389
6390 itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
6391 iss = (stmf_i_scsi_session_t *)task->task_session->ss_stmf_private;
6392 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
6393
6394 /*
6395 * To sync with LUN reset, grab this lock. The session is not going
6396 * anywhere as there is atleast one task pending (this task).
6397 */
6398 mutex_enter(&stmf_state.stmf_lock);
6399
6400 /* Grab the session lock as a writer to prevent any changes in it */
6401 rw_enter(iss->iss_lockp, RW_WRITER);
6402
6403 if (iss->iss_flags & ISS_RESET_ACTIVE) {
6404 rw_exit(iss->iss_lockp);
6405 mutex_exit(&stmf_state.stmf_lock);
6406 stmf_scsilib_send_status(task, STATUS_CHECK,
6407 STMF_SAA_OPERATION_IN_PROGRESS);
6408 return;
6409 }
6410 atomic_or_32(&iss->iss_flags, ISS_RESET_ACTIVE);
6411
6412 /*
6413 * Now go through each LUN in this session and make sure all of them
6414 * can be reset.
6415 */
6416 lm = iss->iss_sm;
6417 for (i = 0, lf = 0; i < lm->lm_nentries; i++) {
6418 if (lm->lm_plus[i] == NULL)
6419 continue;
6420 lf++;
6421 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
6422 ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private);
6423 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
6424 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
6425 rw_exit(iss->iss_lockp);
6426 mutex_exit(&stmf_state.stmf_lock);
6427 stmf_scsilib_send_status(task, STATUS_CHECK,
6428 STMF_SAA_OPERATION_IN_PROGRESS);
6429 return;
6430 }
6431 }
6432 if (lf == 0) {
6433 /* No luns in this session */
6434 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
6435 rw_exit(iss->iss_lockp);
6436 mutex_exit(&stmf_state.stmf_lock);
6437 stmf_scsilib_send_status(task, STATUS_GOOD, 0);
6438 return;
6439 }
6440
6441 /* ok, start the damage */
6442 itask->itask_flags |= ITASK_DEFAULT_HANDLING |
6443 ITASK_CAUSING_TARGET_RESET;
6444 for (i = 0; i < lm->lm_nentries; i++) {
6445 if (lm->lm_plus[i] == NULL)
6446 continue;
6447 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
6448 ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private);
6449 atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE);
6450 }
6451
6452 for (i = 0; i < lm->lm_nentries; i++) {
6453 if (lm->lm_plus[i] == NULL)
6454 continue;
6455 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
6456 stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED,
6457 lm_ent->ent_lu);
6458 }
6459
6460 rw_exit(iss->iss_lockp);
6461 mutex_exit(&stmf_state.stmf_lock);
6462
6463 /* Start polling on this task */
6464 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
6465 != STMF_SUCCESS) {
6466 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE,
6467 NULL);
6468 return;
6469 }
6470 }
6471
6472 int
stmf_handle_cmd_during_ic(stmf_i_scsi_task_t * itask)6473 stmf_handle_cmd_during_ic(stmf_i_scsi_task_t *itask)
6474 {
6475 scsi_task_t *task = itask->itask_task;
6476 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
6477 task->task_session->ss_stmf_private;
6478
6479 rw_enter(iss->iss_lockp, RW_WRITER);
6480 if (((iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) == 0) ||
6481 (task->task_cdb[0] == SCMD_INQUIRY)) {
6482 rw_exit(iss->iss_lockp);
6483 return (0);
6484 }
6485 atomic_and_32(&iss->iss_flags,
6486 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS));
6487 rw_exit(iss->iss_lockp);
6488
6489 if (task->task_cdb[0] == SCMD_REPORT_LUNS) {
6490 return (0);
6491 }
6492 stmf_scsilib_send_status(task, STATUS_CHECK,
6493 STMF_SAA_REPORT_LUN_DATA_HAS_CHANGED);
6494 return (1);
6495 }
6496
6497 void
stmf_worker_init()6498 stmf_worker_init()
6499 {
6500 uint32_t i;
6501
6502 /* Make local copy of global tunables */
6503 stmf_i_max_nworkers = stmf_max_nworkers;
6504 stmf_i_min_nworkers = stmf_min_nworkers;
6505
6506 ASSERT(stmf_workers == NULL);
6507 if (stmf_i_min_nworkers < 4) {
6508 stmf_i_min_nworkers = 4;
6509 }
6510 if (stmf_i_max_nworkers < stmf_i_min_nworkers) {
6511 stmf_i_max_nworkers = stmf_i_min_nworkers;
6512 }
6513 stmf_workers = (stmf_worker_t *)kmem_zalloc(
6514 sizeof (stmf_worker_t) * stmf_i_max_nworkers, KM_SLEEP);
6515 for (i = 0; i < stmf_i_max_nworkers; i++) {
6516 stmf_worker_t *w = &stmf_workers[i];
6517 mutex_init(&w->worker_lock, NULL, MUTEX_DRIVER, NULL);
6518 cv_init(&w->worker_cv, NULL, CV_DRIVER, NULL);
6519 }
6520 stmf_worker_mgmt_delay = drv_usectohz(20 * 1000);
6521 stmf_workers_state = STMF_WORKERS_ENABLED;
6522
6523 /* Workers will be started by stmf_worker_mgmt() */
6524
6525 /* Lets wait for atleast one worker to start */
6526 while (stmf_nworkers_cur == 0)
6527 delay(drv_usectohz(20 * 1000));
6528 stmf_worker_mgmt_delay = drv_usectohz(3 * 1000 * 1000);
6529 }
6530
6531 stmf_status_t
stmf_worker_fini()6532 stmf_worker_fini()
6533 {
6534 int i;
6535 clock_t sb;
6536
6537 if (stmf_workers_state == STMF_WORKERS_DISABLED)
6538 return (STMF_SUCCESS);
6539 ASSERT(stmf_workers);
6540 stmf_workers_state = STMF_WORKERS_DISABLED;
6541 stmf_worker_mgmt_delay = drv_usectohz(20 * 1000);
6542 cv_signal(&stmf_state.stmf_cv);
6543
6544 sb = ddi_get_lbolt() + drv_usectohz(10 * 1000 * 1000);
6545 /* Wait for all the threads to die */
6546 while (stmf_nworkers_cur != 0) {
6547 if (ddi_get_lbolt() > sb) {
6548 stmf_workers_state = STMF_WORKERS_ENABLED;
6549 return (STMF_BUSY);
6550 }
6551 delay(drv_usectohz(100 * 1000));
6552 }
6553 for (i = 0; i < stmf_i_max_nworkers; i++) {
6554 stmf_worker_t *w = &stmf_workers[i];
6555 mutex_destroy(&w->worker_lock);
6556 cv_destroy(&w->worker_cv);
6557 }
6558 kmem_free(stmf_workers, sizeof (stmf_worker_t) * stmf_i_max_nworkers);
6559 stmf_workers = NULL;
6560
6561 return (STMF_SUCCESS);
6562 }
6563
6564 void
stmf_worker_task(void * arg)6565 stmf_worker_task(void *arg)
6566 {
6567 stmf_worker_t *w;
6568 stmf_i_scsi_session_t *iss;
6569 scsi_task_t *task;
6570 stmf_i_scsi_task_t *itask;
6571 stmf_data_buf_t *dbuf;
6572 stmf_lu_t *lu;
6573 clock_t wait_timer = 0;
6574 clock_t wait_ticks, wait_delta = 0;
6575 uint32_t old, new;
6576 uint8_t curcmd;
6577 uint8_t abort_free;
6578 uint8_t wait_queue;
6579 uint8_t dec_qdepth;
6580
6581 w = (stmf_worker_t *)arg;
6582 wait_ticks = drv_usectohz(10000);
6583
6584 DTRACE_PROBE1(worker__create, stmf_worker_t, w);
6585 mutex_enter(&w->worker_lock);
6586 w->worker_flags |= STMF_WORKER_STARTED | STMF_WORKER_ACTIVE;
6587 stmf_worker_loop:;
6588 if ((w->worker_ref_count == 0) &&
6589 (w->worker_flags & STMF_WORKER_TERMINATE)) {
6590 w->worker_flags &= ~(STMF_WORKER_STARTED |
6591 STMF_WORKER_ACTIVE | STMF_WORKER_TERMINATE);
6592 w->worker_tid = NULL;
6593 mutex_exit(&w->worker_lock);
6594 DTRACE_PROBE1(worker__destroy, stmf_worker_t, w);
6595 thread_exit();
6596 }
6597 /* CONSTCOND */
6598 while (1) {
6599 dec_qdepth = 0;
6600 if (wait_timer && (ddi_get_lbolt() >= wait_timer)) {
6601 wait_timer = 0;
6602 wait_delta = 0;
6603 if (w->worker_wait_head) {
6604 ASSERT(w->worker_wait_tail);
6605 if (w->worker_task_head == NULL)
6606 w->worker_task_head =
6607 w->worker_wait_head;
6608 else
6609 w->worker_task_tail->itask_worker_next =
6610 w->worker_wait_head;
6611 w->worker_task_tail = w->worker_wait_tail;
6612 w->worker_wait_head = w->worker_wait_tail =
6613 NULL;
6614 }
6615 }
6616 if ((itask = w->worker_task_head) == NULL) {
6617 break;
6618 }
6619 task = itask->itask_task;
6620 DTRACE_PROBE2(worker__active, stmf_worker_t, w,
6621 scsi_task_t *, task);
6622 w->worker_task_head = itask->itask_worker_next;
6623 if (w->worker_task_head == NULL)
6624 w->worker_task_tail = NULL;
6625
6626 wait_queue = 0;
6627 abort_free = 0;
6628 if (itask->itask_ncmds > 0) {
6629 curcmd = itask->itask_cmd_stack[itask->itask_ncmds - 1];
6630 } else {
6631 ASSERT(itask->itask_flags & ITASK_BEING_ABORTED);
6632 }
6633 do {
6634 old = itask->itask_flags;
6635 if (old & ITASK_BEING_ABORTED) {
6636 itask->itask_ncmds = 1;
6637 curcmd = itask->itask_cmd_stack[0] =
6638 ITASK_CMD_ABORT;
6639 goto out_itask_flag_loop;
6640 } else if ((curcmd & ITASK_CMD_MASK) ==
6641 ITASK_CMD_NEW_TASK) {
6642 /*
6643 * set ITASK_KSTAT_IN_RUNQ, this flag
6644 * will not reset until task completed
6645 */
6646 new = old | ITASK_KNOWN_TO_LU |
6647 ITASK_KSTAT_IN_RUNQ;
6648 } else {
6649 goto out_itask_flag_loop;
6650 }
6651 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
6652
6653 out_itask_flag_loop:
6654
6655 /*
6656 * Decide if this task needs to go to a queue and/or if
6657 * we can decrement the itask_cmd_stack.
6658 */
6659 if (curcmd == ITASK_CMD_ABORT) {
6660 if (itask->itask_flags & (ITASK_KNOWN_TO_LU |
6661 ITASK_KNOWN_TO_TGT_PORT)) {
6662 wait_queue = 1;
6663 } else {
6664 abort_free = 1;
6665 }
6666 } else if ((curcmd & ITASK_CMD_POLL) &&
6667 (itask->itask_poll_timeout > ddi_get_lbolt())) {
6668 wait_queue = 1;
6669 }
6670
6671 if (wait_queue) {
6672 itask->itask_worker_next = NULL;
6673 if (w->worker_wait_tail) {
6674 w->worker_wait_tail->itask_worker_next = itask;
6675 } else {
6676 w->worker_wait_head = itask;
6677 }
6678 w->worker_wait_tail = itask;
6679 if (wait_timer == 0) {
6680 wait_timer = ddi_get_lbolt() + wait_ticks;
6681 wait_delta = wait_ticks;
6682 }
6683 } else if ((--(itask->itask_ncmds)) != 0) {
6684 itask->itask_worker_next = NULL;
6685 if (w->worker_task_tail) {
6686 w->worker_task_tail->itask_worker_next = itask;
6687 } else {
6688 w->worker_task_head = itask;
6689 }
6690 w->worker_task_tail = itask;
6691 } else {
6692 atomic_and_32(&itask->itask_flags,
6693 ~ITASK_IN_WORKER_QUEUE);
6694 /*
6695 * This is where the queue depth should go down by
6696 * one but we delay that on purpose to account for
6697 * the call into the provider. The actual decrement
6698 * happens after the worker has done its job.
6699 */
6700 dec_qdepth = 1;
6701 itask->itask_waitq_time +=
6702 gethrtime() - itask->itask_waitq_enter_timestamp;
6703 }
6704
6705 /* We made it here means we are going to call LU */
6706 if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0)
6707 lu = task->task_lu;
6708 else
6709 lu = dlun0;
6710 dbuf = itask->itask_dbufs[ITASK_CMD_BUF_NDX(curcmd)];
6711 mutex_exit(&w->worker_lock);
6712 curcmd &= ITASK_CMD_MASK;
6713 stmf_task_audit(itask, TE_PROCESS_CMD, curcmd, dbuf);
6714 switch (curcmd) {
6715 case ITASK_CMD_NEW_TASK:
6716 iss = (stmf_i_scsi_session_t *)
6717 task->task_session->ss_stmf_private;
6718 stmf_itl_lu_new_task(itask);
6719 if (iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) {
6720 if (stmf_handle_cmd_during_ic(itask))
6721 break;
6722 }
6723 #ifdef DEBUG
6724 if (stmf_drop_task_counter > 0) {
6725 if (atomic_add_32_nv(
6726 (uint32_t *)&stmf_drop_task_counter,
6727 -1) == 1) {
6728 break;
6729 }
6730 }
6731 #endif
6732 DTRACE_PROBE1(scsi__task__start, scsi_task_t *, task);
6733 lu->lu_new_task(task, dbuf);
6734 break;
6735 case ITASK_CMD_DATA_XFER_DONE:
6736 lu->lu_dbuf_xfer_done(task, dbuf);
6737 break;
6738 case ITASK_CMD_STATUS_DONE:
6739 lu->lu_send_status_done(task);
6740 break;
6741 case ITASK_CMD_ABORT:
6742 if (abort_free) {
6743 stmf_task_free(task);
6744 } else {
6745 stmf_do_task_abort(task);
6746 }
6747 break;
6748 case ITASK_CMD_POLL_LU:
6749 if (!wait_queue) {
6750 lu->lu_task_poll(task);
6751 }
6752 break;
6753 case ITASK_CMD_POLL_LPORT:
6754 if (!wait_queue)
6755 task->task_lport->lport_task_poll(task);
6756 break;
6757 case ITASK_CMD_SEND_STATUS:
6758 /* case ITASK_CMD_XFER_DATA: */
6759 break;
6760 }
6761 mutex_enter(&w->worker_lock);
6762 if (dec_qdepth) {
6763 w->worker_queue_depth--;
6764 }
6765 }
6766 if ((w->worker_flags & STMF_WORKER_TERMINATE) && (wait_timer == 0)) {
6767 if (w->worker_ref_count == 0)
6768 goto stmf_worker_loop;
6769 else {
6770 wait_timer = ddi_get_lbolt() + 1;
6771 wait_delta = 1;
6772 }
6773 }
6774 w->worker_flags &= ~STMF_WORKER_ACTIVE;
6775 if (wait_timer) {
6776 DTRACE_PROBE1(worker__timed__sleep, stmf_worker_t, w);
6777 (void) cv_reltimedwait(&w->worker_cv, &w->worker_lock,
6778 wait_delta, TR_CLOCK_TICK);
6779 } else {
6780 DTRACE_PROBE1(worker__sleep, stmf_worker_t, w);
6781 cv_wait(&w->worker_cv, &w->worker_lock);
6782 }
6783 DTRACE_PROBE1(worker__wakeup, stmf_worker_t, w);
6784 w->worker_flags |= STMF_WORKER_ACTIVE;
6785 goto stmf_worker_loop;
6786 }
6787
6788 void
stmf_worker_mgmt()6789 stmf_worker_mgmt()
6790 {
6791 int i;
6792 int workers_needed;
6793 uint32_t qd;
6794 clock_t tps, d = 0;
6795 uint32_t cur_max_ntasks = 0;
6796 stmf_worker_t *w;
6797
6798 /* Check if we are trying to increase the # of threads */
6799 for (i = stmf_nworkers_cur; i < stmf_nworkers_needed; i++) {
6800 if (stmf_workers[i].worker_flags & STMF_WORKER_STARTED) {
6801 stmf_nworkers_cur++;
6802 stmf_nworkers_accepting_cmds++;
6803 } else {
6804 /* Wait for transition to complete */
6805 return;
6806 }
6807 }
6808 /* Check if we are trying to decrease the # of workers */
6809 for (i = (stmf_nworkers_cur - 1); i >= stmf_nworkers_needed; i--) {
6810 if ((stmf_workers[i].worker_flags & STMF_WORKER_STARTED) == 0) {
6811 stmf_nworkers_cur--;
6812 /*
6813 * stmf_nworkers_accepting_cmds has already been
6814 * updated by the request to reduce the # of workers.
6815 */
6816 } else {
6817 /* Wait for transition to complete */
6818 return;
6819 }
6820 }
6821 /* Check if we are being asked to quit */
6822 if (stmf_workers_state != STMF_WORKERS_ENABLED) {
6823 if (stmf_nworkers_cur) {
6824 workers_needed = 0;
6825 goto worker_mgmt_trigger_change;
6826 }
6827 return;
6828 }
6829 /* Check if we are starting */
6830 if (stmf_nworkers_cur < stmf_i_min_nworkers) {
6831 workers_needed = stmf_i_min_nworkers;
6832 goto worker_mgmt_trigger_change;
6833 }
6834
6835 tps = drv_usectohz(1 * 1000 * 1000);
6836 if ((stmf_wm_last != 0) &&
6837 ((d = ddi_get_lbolt() - stmf_wm_last) > tps)) {
6838 qd = 0;
6839 for (i = 0; i < stmf_nworkers_accepting_cmds; i++) {
6840 qd += stmf_workers[i].worker_max_qdepth_pu;
6841 stmf_workers[i].worker_max_qdepth_pu = 0;
6842 if (stmf_workers[i].worker_max_sys_qdepth_pu >
6843 cur_max_ntasks) {
6844 cur_max_ntasks =
6845 stmf_workers[i].worker_max_sys_qdepth_pu;
6846 }
6847 stmf_workers[i].worker_max_sys_qdepth_pu = 0;
6848 }
6849 }
6850 stmf_wm_last = ddi_get_lbolt();
6851 if (d <= tps) {
6852 /* still ramping up */
6853 return;
6854 }
6855 /* max qdepth cannot be more than max tasks */
6856 if (qd > cur_max_ntasks)
6857 qd = cur_max_ntasks;
6858
6859 /* See if we have more workers */
6860 if (qd < stmf_nworkers_accepting_cmds) {
6861 /*
6862 * Since we dont reduce the worker count right away, monitor
6863 * the highest load during the scale_down_delay.
6864 */
6865 if (qd > stmf_worker_scale_down_qd)
6866 stmf_worker_scale_down_qd = qd;
6867 if (stmf_worker_scale_down_timer == 0) {
6868 stmf_worker_scale_down_timer = ddi_get_lbolt() +
6869 drv_usectohz(stmf_worker_scale_down_delay *
6870 1000 * 1000);
6871 return;
6872 }
6873 if (ddi_get_lbolt() < stmf_worker_scale_down_timer) {
6874 return;
6875 }
6876 /* Its time to reduce the workers */
6877 if (stmf_worker_scale_down_qd < stmf_i_min_nworkers)
6878 stmf_worker_scale_down_qd = stmf_i_min_nworkers;
6879 if (stmf_worker_scale_down_qd > stmf_i_max_nworkers)
6880 stmf_worker_scale_down_qd = stmf_i_max_nworkers;
6881 if (stmf_worker_scale_down_qd == stmf_nworkers_cur)
6882 return;
6883 workers_needed = stmf_worker_scale_down_qd;
6884 stmf_worker_scale_down_qd = 0;
6885 goto worker_mgmt_trigger_change;
6886 }
6887 stmf_worker_scale_down_qd = 0;
6888 stmf_worker_scale_down_timer = 0;
6889 if (qd > stmf_i_max_nworkers)
6890 qd = stmf_i_max_nworkers;
6891 if (qd < stmf_i_min_nworkers)
6892 qd = stmf_i_min_nworkers;
6893 if (qd == stmf_nworkers_cur)
6894 return;
6895 workers_needed = qd;
6896 goto worker_mgmt_trigger_change;
6897
6898 /* NOTREACHED */
6899 return;
6900
6901 worker_mgmt_trigger_change:
6902 ASSERT(workers_needed != stmf_nworkers_cur);
6903 if (workers_needed > stmf_nworkers_cur) {
6904 stmf_nworkers_needed = workers_needed;
6905 for (i = stmf_nworkers_cur; i < workers_needed; i++) {
6906 w = &stmf_workers[i];
6907 w->worker_tid = thread_create(NULL, 0, stmf_worker_task,
6908 (void *)&stmf_workers[i], 0, &p0, TS_RUN,
6909 minclsyspri);
6910 }
6911 return;
6912 }
6913 /* At this point we know that we are decreasing the # of workers */
6914 stmf_nworkers_accepting_cmds = workers_needed;
6915 stmf_nworkers_needed = workers_needed;
6916 /* Signal the workers that its time to quit */
6917 for (i = (stmf_nworkers_cur - 1); i >= stmf_nworkers_needed; i--) {
6918 w = &stmf_workers[i];
6919 ASSERT(w && (w->worker_flags & STMF_WORKER_STARTED));
6920 mutex_enter(&w->worker_lock);
6921 w->worker_flags |= STMF_WORKER_TERMINATE;
6922 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
6923 cv_signal(&w->worker_cv);
6924 mutex_exit(&w->worker_lock);
6925 }
6926 }
6927
6928 /*
6929 * Fills out a dbuf from stmf_xfer_data_t (contained in the db_lu_private).
6930 * If all the data has been filled out, frees the xd and makes
6931 * db_lu_private NULL.
6932 */
6933 void
stmf_xd_to_dbuf(stmf_data_buf_t * dbuf,int set_rel_off)6934 stmf_xd_to_dbuf(stmf_data_buf_t *dbuf, int set_rel_off)
6935 {
6936 stmf_xfer_data_t *xd;
6937 uint8_t *p;
6938 int i;
6939 uint32_t s;
6940
6941 xd = (stmf_xfer_data_t *)dbuf->db_lu_private;
6942 dbuf->db_data_size = 0;
6943 if (set_rel_off)
6944 dbuf->db_relative_offset = xd->size_done;
6945 for (i = 0; i < dbuf->db_sglist_length; i++) {
6946 s = min(xd->size_left, dbuf->db_sglist[i].seg_length);
6947 p = &xd->buf[xd->size_done];
6948 bcopy(p, dbuf->db_sglist[i].seg_addr, s);
6949 xd->size_left -= s;
6950 xd->size_done += s;
6951 dbuf->db_data_size += s;
6952 if (xd->size_left == 0) {
6953 kmem_free(xd, xd->alloc_size);
6954 dbuf->db_lu_private = NULL;
6955 return;
6956 }
6957 }
6958 }
6959
6960 /* ARGSUSED */
6961 stmf_status_t
stmf_dlun0_task_alloc(scsi_task_t * task)6962 stmf_dlun0_task_alloc(scsi_task_t *task)
6963 {
6964 return (STMF_SUCCESS);
6965 }
6966
6967 void
stmf_dlun0_new_task(scsi_task_t * task,stmf_data_buf_t * dbuf)6968 stmf_dlun0_new_task(scsi_task_t *task, stmf_data_buf_t *dbuf)
6969 {
6970 uint8_t *cdbp = (uint8_t *)&task->task_cdb[0];
6971 stmf_i_scsi_session_t *iss;
6972 uint32_t sz, minsz;
6973 uint8_t *p;
6974 stmf_xfer_data_t *xd;
6975 uint8_t inq_page_length = 31;
6976
6977 if (task->task_mgmt_function) {
6978 stmf_scsilib_handle_task_mgmt(task);
6979 return;
6980 }
6981
6982 switch (cdbp[0]) {
6983 case SCMD_INQUIRY:
6984 /*
6985 * Basic protocol checks. In addition, only reply to
6986 * standard inquiry. Otherwise, the LU provider needs
6987 * to respond.
6988 */
6989
6990 if (cdbp[2] || (cdbp[1] & 1) || cdbp[5]) {
6991 stmf_scsilib_send_status(task, STATUS_CHECK,
6992 STMF_SAA_INVALID_FIELD_IN_CDB);
6993 return;
6994 }
6995
6996 task->task_cmd_xfer_length =
6997 (((uint32_t)cdbp[3]) << 8) | cdbp[4];
6998
6999 if (task->task_additional_flags &
7000 TASK_AF_NO_EXPECTED_XFER_LENGTH) {
7001 task->task_expected_xfer_length =
7002 task->task_cmd_xfer_length;
7003 }
7004
7005 sz = min(task->task_expected_xfer_length,
7006 min(36, task->task_cmd_xfer_length));
7007 minsz = 36;
7008
7009 if (sz == 0) {
7010 stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7011 return;
7012 }
7013
7014 if (dbuf && (dbuf->db_sglist[0].seg_length < 36)) {
7015 /*
7016 * Ignore any preallocated dbuf if the size is less
7017 * than 36. It will be freed during the task_free.
7018 */
7019 dbuf = NULL;
7020 }
7021 if (dbuf == NULL)
7022 dbuf = stmf_alloc_dbuf(task, minsz, &minsz, 0);
7023 if ((dbuf == NULL) || (dbuf->db_sglist[0].seg_length < sz)) {
7024 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7025 STMF_ALLOC_FAILURE, NULL);
7026 return;
7027 }
7028 dbuf->db_lu_private = NULL;
7029
7030 p = dbuf->db_sglist[0].seg_addr;
7031
7032 /*
7033 * Standard inquiry handling only.
7034 */
7035
7036 bzero(p, inq_page_length + 5);
7037
7038 p[0] = DPQ_SUPPORTED | DTYPE_UNKNOWN;
7039 p[2] = 5;
7040 p[3] = 0x12;
7041 p[4] = inq_page_length;
7042 p[6] = 0x80;
7043
7044 (void) strncpy((char *)p+8, "SUN ", 8);
7045 (void) strncpy((char *)p+16, "COMSTAR ", 16);
7046 (void) strncpy((char *)p+32, "1.0 ", 4);
7047
7048 dbuf->db_data_size = sz;
7049 dbuf->db_relative_offset = 0;
7050 dbuf->db_flags = DB_DIRECTION_TO_RPORT;
7051 (void) stmf_xfer_data(task, dbuf, 0);
7052
7053 return;
7054
7055 case SCMD_REPORT_LUNS:
7056 task->task_cmd_xfer_length =
7057 ((((uint32_t)task->task_cdb[6]) << 24) |
7058 (((uint32_t)task->task_cdb[7]) << 16) |
7059 (((uint32_t)task->task_cdb[8]) << 8) |
7060 ((uint32_t)task->task_cdb[9]));
7061
7062 if (task->task_additional_flags &
7063 TASK_AF_NO_EXPECTED_XFER_LENGTH) {
7064 task->task_expected_xfer_length =
7065 task->task_cmd_xfer_length;
7066 }
7067
7068 sz = min(task->task_expected_xfer_length,
7069 task->task_cmd_xfer_length);
7070
7071 if (sz < 16) {
7072 stmf_scsilib_send_status(task, STATUS_CHECK,
7073 STMF_SAA_INVALID_FIELD_IN_CDB);
7074 return;
7075 }
7076
7077 iss = (stmf_i_scsi_session_t *)
7078 task->task_session->ss_stmf_private;
7079 rw_enter(iss->iss_lockp, RW_WRITER);
7080 xd = stmf_session_prepare_report_lun_data(iss->iss_sm);
7081 rw_exit(iss->iss_lockp);
7082
7083 if (xd == NULL) {
7084 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7085 STMF_ALLOC_FAILURE, NULL);
7086 return;
7087 }
7088
7089 sz = min(sz, xd->size_left);
7090 xd->size_left = sz;
7091 minsz = min(512, sz);
7092
7093 if (dbuf == NULL)
7094 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0);
7095 if (dbuf == NULL) {
7096 kmem_free(xd, xd->alloc_size);
7097 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7098 STMF_ALLOC_FAILURE, NULL);
7099 return;
7100 }
7101 dbuf->db_lu_private = xd;
7102 stmf_xd_to_dbuf(dbuf, 1);
7103
7104 atomic_and_32(&iss->iss_flags,
7105 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS));
7106 dbuf->db_flags = DB_DIRECTION_TO_RPORT;
7107 (void) stmf_xfer_data(task, dbuf, 0);
7108 return;
7109 }
7110
7111 stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_INVALID_OPCODE);
7112 }
7113
7114 void
stmf_dlun0_dbuf_done(scsi_task_t * task,stmf_data_buf_t * dbuf)7115 stmf_dlun0_dbuf_done(scsi_task_t *task, stmf_data_buf_t *dbuf)
7116 {
7117 stmf_i_scsi_task_t *itask =
7118 (stmf_i_scsi_task_t *)task->task_stmf_private;
7119
7120 if (dbuf->db_xfer_status != STMF_SUCCESS) {
7121 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7122 dbuf->db_xfer_status, NULL);
7123 return;
7124 }
7125 task->task_nbytes_transferred += dbuf->db_data_size;
7126 if (dbuf->db_lu_private) {
7127 /* There is more */
7128 stmf_xd_to_dbuf(dbuf, 1);
7129 (void) stmf_xfer_data(task, dbuf, 0);
7130 return;
7131 }
7132
7133 stmf_free_dbuf(task, dbuf);
7134 /*
7135 * If this is a proxy task, it will need to be completed from the
7136 * proxy port provider. This message lets pppt know that the xfer
7137 * is complete. When we receive the status from pppt, we will
7138 * then relay that status back to the lport.
7139 */
7140 if (itask->itask_flags & ITASK_PROXY_TASK) {
7141 stmf_ic_msg_t *ic_xfer_done_msg = NULL;
7142 stmf_status_t ic_ret = STMF_FAILURE;
7143 uint64_t session_msg_id;
7144 mutex_enter(&stmf_state.stmf_lock);
7145 session_msg_id = stmf_proxy_msg_id++;
7146 mutex_exit(&stmf_state.stmf_lock);
7147 /* send xfer done status to pppt */
7148 ic_xfer_done_msg = ic_scsi_data_xfer_done_msg_alloc(
7149 itask->itask_proxy_msg_id,
7150 task->task_session->ss_session_id,
7151 STMF_SUCCESS, session_msg_id);
7152 if (ic_xfer_done_msg) {
7153 ic_ret = ic_tx_msg(ic_xfer_done_msg);
7154 if (ic_ret != STMF_IC_MSG_SUCCESS) {
7155 cmn_err(CE_WARN, "unable to xmit session msg");
7156 }
7157 }
7158 /* task will be completed from pppt */
7159 return;
7160 }
7161 stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7162 }
7163
7164 /* ARGSUSED */
7165 void
stmf_dlun0_status_done(scsi_task_t * task)7166 stmf_dlun0_status_done(scsi_task_t *task)
7167 {
7168 }
7169
7170 /* ARGSUSED */
7171 void
stmf_dlun0_task_free(scsi_task_t * task)7172 stmf_dlun0_task_free(scsi_task_t *task)
7173 {
7174 }
7175
7176 /* ARGSUSED */
7177 stmf_status_t
stmf_dlun0_abort(struct stmf_lu * lu,int abort_cmd,void * arg,uint32_t flags)7178 stmf_dlun0_abort(struct stmf_lu *lu, int abort_cmd, void *arg, uint32_t flags)
7179 {
7180 scsi_task_t *task = (scsi_task_t *)arg;
7181 stmf_i_scsi_task_t *itask =
7182 (stmf_i_scsi_task_t *)task->task_stmf_private;
7183 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7184 int i;
7185 uint8_t map;
7186
7187 if ((task->task_mgmt_function) && (itask->itask_flags &
7188 (ITASK_CAUSING_LU_RESET | ITASK_CAUSING_TARGET_RESET))) {
7189 switch (task->task_mgmt_function) {
7190 case TM_ABORT_TASK:
7191 case TM_ABORT_TASK_SET:
7192 case TM_CLEAR_TASK_SET:
7193 case TM_LUN_RESET:
7194 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE);
7195 break;
7196 case TM_TARGET_RESET:
7197 case TM_TARGET_COLD_RESET:
7198 case TM_TARGET_WARM_RESET:
7199 stmf_abort_target_reset(task);
7200 break;
7201 }
7202 return (STMF_ABORT_SUCCESS);
7203 }
7204
7205 /*
7206 * OK so its not a task mgmt. Make sure we free any xd sitting
7207 * inside any dbuf.
7208 */
7209 if ((map = itask->itask_allocated_buf_map) != 0) {
7210 for (i = 0; i < 4; i++) {
7211 if ((map & 1) &&
7212 ((itask->itask_dbufs[i])->db_lu_private)) {
7213 stmf_xfer_data_t *xd;
7214 stmf_data_buf_t *dbuf;
7215
7216 dbuf = itask->itask_dbufs[i];
7217 xd = (stmf_xfer_data_t *)dbuf->db_lu_private;
7218 dbuf->db_lu_private = NULL;
7219 kmem_free(xd, xd->alloc_size);
7220 }
7221 map >>= 1;
7222 }
7223 }
7224 return (STMF_ABORT_SUCCESS);
7225 }
7226
7227 void
stmf_dlun0_task_poll(struct scsi_task * task)7228 stmf_dlun0_task_poll(struct scsi_task *task)
7229 {
7230 /* Right now we only do this for handling task management functions */
7231 ASSERT(task->task_mgmt_function);
7232
7233 switch (task->task_mgmt_function) {
7234 case TM_ABORT_TASK:
7235 case TM_ABORT_TASK_SET:
7236 case TM_CLEAR_TASK_SET:
7237 case TM_LUN_RESET:
7238 (void) stmf_lun_reset_poll(task->task_lu, task, 0);
7239 return;
7240 case TM_TARGET_RESET:
7241 case TM_TARGET_COLD_RESET:
7242 case TM_TARGET_WARM_RESET:
7243 stmf_target_reset_poll(task);
7244 return;
7245 }
7246 }
7247
7248 /* ARGSUSED */
7249 void
stmf_dlun0_ctl(struct stmf_lu * lu,int cmd,void * arg)7250 stmf_dlun0_ctl(struct stmf_lu *lu, int cmd, void *arg)
7251 {
7252 /* This function will never be called */
7253 cmn_err(CE_WARN, "stmf_dlun0_ctl called with cmd %x", cmd);
7254 }
7255
7256 void
stmf_dlun_init()7257 stmf_dlun_init()
7258 {
7259 stmf_i_lu_t *ilu;
7260
7261 dlun0 = stmf_alloc(STMF_STRUCT_STMF_LU, 0, 0);
7262 dlun0->lu_task_alloc = stmf_dlun0_task_alloc;
7263 dlun0->lu_new_task = stmf_dlun0_new_task;
7264 dlun0->lu_dbuf_xfer_done = stmf_dlun0_dbuf_done;
7265 dlun0->lu_send_status_done = stmf_dlun0_status_done;
7266 dlun0->lu_task_free = stmf_dlun0_task_free;
7267 dlun0->lu_abort = stmf_dlun0_abort;
7268 dlun0->lu_task_poll = stmf_dlun0_task_poll;
7269 dlun0->lu_ctl = stmf_dlun0_ctl;
7270
7271 ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private;
7272 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1;
7273 }
7274
7275 stmf_status_t
stmf_dlun_fini()7276 stmf_dlun_fini()
7277 {
7278 stmf_i_lu_t *ilu;
7279
7280 ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private;
7281
7282 ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free);
7283 if (ilu->ilu_ntasks) {
7284 stmf_i_scsi_task_t *itask, *nitask;
7285
7286 nitask = ilu->ilu_tasks;
7287 do {
7288 itask = nitask;
7289 nitask = itask->itask_lu_next;
7290 dlun0->lu_task_free(itask->itask_task);
7291 stmf_free(itask->itask_task);
7292 } while (nitask != NULL);
7293
7294 }
7295 stmf_free(dlun0);
7296 return (STMF_SUCCESS);
7297 }
7298
7299 void
stmf_abort_target_reset(scsi_task_t * task)7300 stmf_abort_target_reset(scsi_task_t *task)
7301 {
7302 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
7303 task->task_session->ss_stmf_private;
7304 stmf_lun_map_t *lm;
7305 stmf_lun_map_ent_t *lm_ent;
7306 stmf_i_lu_t *ilu;
7307 int i;
7308
7309 rw_enter(iss->iss_lockp, RW_READER);
7310 lm = iss->iss_sm;
7311 for (i = 0; i < lm->lm_nentries; i++) {
7312 if (lm->lm_plus[i] == NULL)
7313 continue;
7314 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
7315 ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private;
7316 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
7317 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE);
7318 }
7319 }
7320 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
7321 rw_exit(iss->iss_lockp);
7322 }
7323
7324 /*
7325 * The return value is only used by function managing target reset.
7326 */
7327 stmf_status_t
stmf_lun_reset_poll(stmf_lu_t * lu,struct scsi_task * task,int target_reset)7328 stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task, int target_reset)
7329 {
7330 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7331 int ntasks_pending;
7332
7333 ntasks_pending = ilu->ilu_ntasks - ilu->ilu_ntasks_free;
7334 /*
7335 * This function is also used during Target reset. The idea is that
7336 * once all the commands are aborted, call the LU's reset entry
7337 * point (abort entry point with a reset flag). But if this Task
7338 * mgmt is running on this LU then all the tasks cannot be aborted.
7339 * one task (this task) will still be running which is OK.
7340 */
7341 if ((ntasks_pending == 0) || ((task->task_lu == lu) &&
7342 (ntasks_pending == 1))) {
7343 stmf_status_t ret;
7344
7345 if ((task->task_mgmt_function == TM_LUN_RESET) ||
7346 (task->task_mgmt_function == TM_TARGET_RESET) ||
7347 (task->task_mgmt_function == TM_TARGET_WARM_RESET) ||
7348 (task->task_mgmt_function == TM_TARGET_COLD_RESET)) {
7349 ret = lu->lu_abort(lu, STMF_LU_RESET_STATE, task, 0);
7350 } else {
7351 ret = STMF_SUCCESS;
7352 }
7353 if (ret == STMF_SUCCESS) {
7354 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE);
7355 }
7356 if (target_reset) {
7357 return (ret);
7358 }
7359 if (ret == STMF_SUCCESS) {
7360 stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7361 return (ret);
7362 }
7363 if (ret != STMF_BUSY) {
7364 stmf_abort(STMF_QUEUE_TASK_ABORT, task, ret, NULL);
7365 return (ret);
7366 }
7367 }
7368
7369 if (target_reset) {
7370 /* Tell target reset polling code that we are not done */
7371 return (STMF_BUSY);
7372 }
7373
7374 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
7375 != STMF_SUCCESS) {
7376 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7377 STMF_ALLOC_FAILURE, NULL);
7378 return (STMF_SUCCESS);
7379 }
7380
7381 return (STMF_SUCCESS);
7382 }
7383
7384 void
stmf_target_reset_poll(struct scsi_task * task)7385 stmf_target_reset_poll(struct scsi_task *task)
7386 {
7387 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
7388 task->task_session->ss_stmf_private;
7389 stmf_lun_map_t *lm;
7390 stmf_lun_map_ent_t *lm_ent;
7391 stmf_i_lu_t *ilu;
7392 stmf_status_t ret;
7393 int i;
7394 int not_done = 0;
7395
7396 ASSERT(iss->iss_flags & ISS_RESET_ACTIVE);
7397
7398 rw_enter(iss->iss_lockp, RW_READER);
7399 lm = iss->iss_sm;
7400 for (i = 0; i < lm->lm_nentries; i++) {
7401 if (lm->lm_plus[i] == NULL)
7402 continue;
7403 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
7404 ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private;
7405 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
7406 rw_exit(iss->iss_lockp);
7407 ret = stmf_lun_reset_poll(lm_ent->ent_lu, task, 1);
7408 rw_enter(iss->iss_lockp, RW_READER);
7409 if (ret == STMF_SUCCESS)
7410 continue;
7411 not_done = 1;
7412 if (ret != STMF_BUSY) {
7413 rw_exit(iss->iss_lockp);
7414 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7415 STMF_ABORTED, NULL);
7416 return;
7417 }
7418 }
7419 }
7420 rw_exit(iss->iss_lockp);
7421
7422 if (not_done) {
7423 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
7424 != STMF_SUCCESS) {
7425 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7426 STMF_ALLOC_FAILURE, NULL);
7427 return;
7428 }
7429 return;
7430 }
7431
7432 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
7433
7434 stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7435 }
7436
7437 stmf_status_t
stmf_lu_add_event(stmf_lu_t * lu,int eventid)7438 stmf_lu_add_event(stmf_lu_t *lu, int eventid)
7439 {
7440 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7441
7442 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7443 return (STMF_INVALID_ARG);
7444 }
7445
7446 STMF_EVENT_ADD(ilu->ilu_event_hdl, eventid);
7447 return (STMF_SUCCESS);
7448 }
7449
7450 stmf_status_t
stmf_lu_remove_event(stmf_lu_t * lu,int eventid)7451 stmf_lu_remove_event(stmf_lu_t *lu, int eventid)
7452 {
7453 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7454
7455 if (eventid == STMF_EVENT_ALL) {
7456 STMF_EVENT_CLEAR_ALL(ilu->ilu_event_hdl);
7457 return (STMF_SUCCESS);
7458 }
7459
7460 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7461 return (STMF_INVALID_ARG);
7462 }
7463
7464 STMF_EVENT_REMOVE(ilu->ilu_event_hdl, eventid);
7465 return (STMF_SUCCESS);
7466 }
7467
7468 stmf_status_t
stmf_lport_add_event(stmf_local_port_t * lport,int eventid)7469 stmf_lport_add_event(stmf_local_port_t *lport, int eventid)
7470 {
7471 stmf_i_local_port_t *ilport =
7472 (stmf_i_local_port_t *)lport->lport_stmf_private;
7473
7474 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7475 return (STMF_INVALID_ARG);
7476 }
7477
7478 STMF_EVENT_ADD(ilport->ilport_event_hdl, eventid);
7479 return (STMF_SUCCESS);
7480 }
7481
7482 stmf_status_t
stmf_lport_remove_event(stmf_local_port_t * lport,int eventid)7483 stmf_lport_remove_event(stmf_local_port_t *lport, int eventid)
7484 {
7485 stmf_i_local_port_t *ilport =
7486 (stmf_i_local_port_t *)lport->lport_stmf_private;
7487
7488 if (eventid == STMF_EVENT_ALL) {
7489 STMF_EVENT_CLEAR_ALL(ilport->ilport_event_hdl);
7490 return (STMF_SUCCESS);
7491 }
7492
7493 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7494 return (STMF_INVALID_ARG);
7495 }
7496
7497 STMF_EVENT_REMOVE(ilport->ilport_event_hdl, eventid);
7498 return (STMF_SUCCESS);
7499 }
7500
7501 void
stmf_generate_lu_event(stmf_i_lu_t * ilu,int eventid,void * arg,uint32_t flags)7502 stmf_generate_lu_event(stmf_i_lu_t *ilu, int eventid, void *arg, uint32_t flags)
7503 {
7504 if (STMF_EVENT_ENABLED(ilu->ilu_event_hdl, eventid) &&
7505 (ilu->ilu_lu->lu_event_handler != NULL)) {
7506 ilu->ilu_lu->lu_event_handler(ilu->ilu_lu, eventid, arg, flags);
7507 }
7508 }
7509
7510 void
stmf_generate_lport_event(stmf_i_local_port_t * ilport,int eventid,void * arg,uint32_t flags)7511 stmf_generate_lport_event(stmf_i_local_port_t *ilport, int eventid, void *arg,
7512 uint32_t flags)
7513 {
7514 if (STMF_EVENT_ENABLED(ilport->ilport_event_hdl, eventid) &&
7515 (ilport->ilport_lport->lport_event_handler != NULL)) {
7516 ilport->ilport_lport->lport_event_handler(
7517 ilport->ilport_lport, eventid, arg, flags);
7518 }
7519 }
7520
7521 /*
7522 * With the possibility of having multiple itl sessions pointing to the
7523 * same itl_kstat_info, the ilu_kstat_lock mutex is used to synchronize
7524 * the kstat update of the ilu_kstat_io, itl_kstat_taskq and itl_kstat_lu_xfer
7525 * statistics.
7526 */
7527 void
stmf_itl_task_start(stmf_i_scsi_task_t * itask)7528 stmf_itl_task_start(stmf_i_scsi_task_t *itask)
7529 {
7530 stmf_itl_data_t *itl = itask->itask_itl_datap;
7531 scsi_task_t *task = itask->itask_task;
7532 stmf_i_lu_t *ilu;
7533
7534 if (itl == NULL || task->task_lu == dlun0)
7535 return;
7536 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7537 mutex_enter(ilu->ilu_kstat_io->ks_lock);
7538 itask->itask_start_timestamp = gethrtime();
7539 kstat_waitq_enter(KSTAT_IO_PTR(itl->itl_kstat_taskq));
7540 stmf_update_kstat_lu_q(itask->itask_task, kstat_waitq_enter);
7541 mutex_exit(ilu->ilu_kstat_io->ks_lock);
7542
7543 stmf_update_kstat_lport_q(itask->itask_task, kstat_waitq_enter);
7544 }
7545
7546 void
stmf_itl_lu_new_task(stmf_i_scsi_task_t * itask)7547 stmf_itl_lu_new_task(stmf_i_scsi_task_t *itask)
7548 {
7549 stmf_itl_data_t *itl = itask->itask_itl_datap;
7550 scsi_task_t *task = itask->itask_task;
7551 stmf_i_lu_t *ilu;
7552
7553 if (itl == NULL || task->task_lu == dlun0)
7554 return;
7555 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7556 mutex_enter(ilu->ilu_kstat_io->ks_lock);
7557 kstat_waitq_to_runq(KSTAT_IO_PTR(itl->itl_kstat_taskq));
7558 stmf_update_kstat_lu_q(itask->itask_task, kstat_waitq_to_runq);
7559 mutex_exit(ilu->ilu_kstat_io->ks_lock);
7560
7561 stmf_update_kstat_lport_q(itask->itask_task, kstat_waitq_to_runq);
7562 }
7563
7564 void
stmf_itl_task_done(stmf_i_scsi_task_t * itask)7565 stmf_itl_task_done(stmf_i_scsi_task_t *itask)
7566 {
7567 stmf_itl_data_t *itl = itask->itask_itl_datap;
7568 scsi_task_t *task = itask->itask_task;
7569 kstat_io_t *kip;
7570 hrtime_t elapsed_time;
7571 stmf_kstat_itl_info_t *itli;
7572 stmf_i_lu_t *ilu;
7573
7574 if (itl == NULL || task->task_lu == dlun0)
7575 return;
7576 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7577
7578 mutex_enter(ilu->ilu_kstat_io->ks_lock);
7579 itli = (stmf_kstat_itl_info_t *)KSTAT_NAMED_PTR(itl->itl_kstat_info);
7580 kip = KSTAT_IO_PTR(itl->itl_kstat_taskq);
7581
7582 itli->i_task_waitq_elapsed.value.ui64 += itask->itask_waitq_time;
7583
7584 itask->itask_done_timestamp = gethrtime();
7585 elapsed_time =
7586 itask->itask_done_timestamp - itask->itask_start_timestamp;
7587
7588 if (task->task_flags & TF_READ_DATA) {
7589 kip->reads++;
7590 kip->nread += itask->itask_read_xfer;
7591 itli->i_task_read_elapsed.value.ui64 += elapsed_time;
7592 itli->i_lu_read_elapsed.value.ui64 +=
7593 itask->itask_lu_read_time;
7594 itli->i_lport_read_elapsed.value.ui64 +=
7595 itask->itask_lport_read_time;
7596 }
7597
7598 if (task->task_flags & TF_WRITE_DATA) {
7599 kip->writes++;
7600 kip->nwritten += itask->itask_write_xfer;
7601 itli->i_task_write_elapsed.value.ui64 += elapsed_time;
7602 itli->i_lu_write_elapsed.value.ui64 +=
7603 itask->itask_lu_write_time;
7604 itli->i_lport_write_elapsed.value.ui64 +=
7605 itask->itask_lport_write_time;
7606 }
7607
7608 if (itask->itask_flags & ITASK_KSTAT_IN_RUNQ) {
7609 kstat_runq_exit(kip);
7610 stmf_update_kstat_lu_q(task, kstat_runq_exit);
7611 mutex_exit(ilu->ilu_kstat_io->ks_lock);
7612 stmf_update_kstat_lport_q(task, kstat_runq_exit);
7613 } else {
7614 kstat_waitq_exit(kip);
7615 stmf_update_kstat_lu_q(task, kstat_waitq_exit);
7616 mutex_exit(ilu->ilu_kstat_io->ks_lock);
7617 stmf_update_kstat_lport_q(task, kstat_waitq_exit);
7618 }
7619 }
7620
7621 void
stmf_lu_xfer_start(scsi_task_t * task)7622 stmf_lu_xfer_start(scsi_task_t *task)
7623 {
7624 stmf_i_scsi_task_t *itask = task->task_stmf_private;
7625 stmf_itl_data_t *itl = itask->itask_itl_datap;
7626 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7627 kstat_io_t *kip;
7628
7629 if (itl == NULL || task->task_lu == dlun0)
7630 return;
7631
7632 kip = KSTAT_IO_PTR(itl->itl_kstat_lu_xfer);
7633 mutex_enter(ilu->ilu_kstat_io->ks_lock);
7634 kstat_runq_enter(kip);
7635 mutex_exit(ilu->ilu_kstat_io->ks_lock);
7636 }
7637
7638 void
stmf_lu_xfer_done(scsi_task_t * task,boolean_t read,uint64_t xfer_bytes,hrtime_t elapsed_time)7639 stmf_lu_xfer_done(scsi_task_t *task, boolean_t read, uint64_t xfer_bytes,
7640 hrtime_t elapsed_time)
7641 {
7642 stmf_i_scsi_task_t *itask = task->task_stmf_private;
7643 stmf_itl_data_t *itl = itask->itask_itl_datap;
7644 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7645 kstat_io_t *kip;
7646
7647 if (itl == NULL || task->task_lu == dlun0)
7648 return;
7649
7650 if (read) {
7651 atomic_add_64((uint64_t *)&itask->itask_lu_read_time,
7652 elapsed_time);
7653 } else {
7654 atomic_add_64((uint64_t *)&itask->itask_lu_write_time,
7655 elapsed_time);
7656 }
7657
7658 kip = KSTAT_IO_PTR(itl->itl_kstat_lu_xfer);
7659 mutex_enter(ilu->ilu_kstat_io->ks_lock);
7660 kstat_runq_exit(kip);
7661 if (read) {
7662 kip->reads++;
7663 kip->nread += xfer_bytes;
7664 } else {
7665 kip->writes++;
7666 kip->nwritten += xfer_bytes;
7667 }
7668 mutex_exit(ilu->ilu_kstat_io->ks_lock);
7669 }
7670
7671 static void
stmf_lport_xfer_start(stmf_i_scsi_task_t * itask,stmf_data_buf_t * dbuf)7672 stmf_lport_xfer_start(stmf_i_scsi_task_t *itask, stmf_data_buf_t *dbuf)
7673 {
7674 stmf_itl_data_t *itl = itask->itask_itl_datap;
7675
7676 if (itl == NULL)
7677 return;
7678
7679 DTRACE_PROBE2(scsi__xfer__start, scsi_task_t *, itask->itask_task,
7680 stmf_data_buf_t *, dbuf);
7681
7682 dbuf->db_xfer_start_timestamp = gethrtime();
7683 }
7684
7685 static void
stmf_lport_xfer_done(stmf_i_scsi_task_t * itask,stmf_data_buf_t * dbuf)7686 stmf_lport_xfer_done(stmf_i_scsi_task_t *itask, stmf_data_buf_t *dbuf)
7687 {
7688 stmf_itl_data_t *itl = itask->itask_itl_datap;
7689 scsi_task_t *task;
7690 stmf_i_local_port_t *ilp;
7691 kstat_io_t *kip;
7692 hrtime_t elapsed_time;
7693 uint64_t xfer_size;
7694
7695 if (itl == NULL)
7696 return;
7697
7698 task = (scsi_task_t *)itask->itask_task;
7699 ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
7700 xfer_size = (dbuf->db_xfer_status == STMF_SUCCESS) ?
7701 dbuf->db_data_size : 0;
7702
7703 elapsed_time = gethrtime() - dbuf->db_xfer_start_timestamp;
7704 if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
7705 atomic_add_64((uint64_t *)&itask->itask_lport_read_time,
7706 elapsed_time);
7707 atomic_add_64((uint64_t *)&itask->itask_read_xfer,
7708 xfer_size);
7709 } else {
7710 atomic_add_64((uint64_t *)&itask->itask_lport_write_time,
7711 elapsed_time);
7712 atomic_add_64((uint64_t *)&itask->itask_write_xfer,
7713 xfer_size);
7714 }
7715
7716 DTRACE_PROBE3(scsi__xfer__end, scsi_task_t *, itask->itask_task,
7717 stmf_data_buf_t *, dbuf, hrtime_t, elapsed_time);
7718
7719 kip = KSTAT_IO_PTR(itl->itl_kstat_lport_xfer);
7720 mutex_enter(ilp->ilport_kstat_io->ks_lock);
7721 if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
7722 kip->reads++;
7723 kip->nread += xfer_size;
7724 } else {
7725 kip->writes++;
7726 kip->nwritten += xfer_size;
7727 }
7728 mutex_exit(ilp->ilport_kstat_io->ks_lock);
7729
7730 dbuf->db_xfer_start_timestamp = 0;
7731 }
7732
7733 void
stmf_svc_init()7734 stmf_svc_init()
7735 {
7736 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED)
7737 return;
7738 stmf_state.stmf_svc_taskq = ddi_taskq_create(0, "STMF_SVC_TASKQ", 1,
7739 TASKQ_DEFAULTPRI, 0);
7740 (void) ddi_taskq_dispatch(stmf_state.stmf_svc_taskq,
7741 stmf_svc, 0, DDI_SLEEP);
7742 }
7743
7744 stmf_status_t
stmf_svc_fini()7745 stmf_svc_fini()
7746 {
7747 uint32_t i;
7748
7749 mutex_enter(&stmf_state.stmf_lock);
7750 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED) {
7751 stmf_state.stmf_svc_flags |= STMF_SVC_TERMINATE;
7752 cv_signal(&stmf_state.stmf_cv);
7753 }
7754 mutex_exit(&stmf_state.stmf_lock);
7755
7756 /* Wait for 5 seconds */
7757 for (i = 0; i < 500; i++) {
7758 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED)
7759 delay(drv_usectohz(10000));
7760 else
7761 break;
7762 }
7763 if (i == 500)
7764 return (STMF_BUSY);
7765
7766 ddi_taskq_destroy(stmf_state.stmf_svc_taskq);
7767
7768 return (STMF_SUCCESS);
7769 }
7770
7771 /* ARGSUSED */
7772 void
stmf_svc(void * arg)7773 stmf_svc(void *arg)
7774 {
7775 stmf_svc_req_t *req, **preq;
7776 clock_t td;
7777 clock_t drain_start, drain_next = 0;
7778 clock_t timing_start, timing_next = 0;
7779 clock_t worker_delay = 0;
7780 int deq;
7781 stmf_lu_t *lu;
7782 stmf_i_lu_t *ilu;
7783 stmf_local_port_t *lport;
7784 stmf_i_local_port_t *ilport, *next_ilport;
7785 stmf_i_scsi_session_t *iss;
7786
7787 td = drv_usectohz(20000);
7788
7789 mutex_enter(&stmf_state.stmf_lock);
7790 stmf_state.stmf_svc_flags |= STMF_SVC_STARTED | STMF_SVC_ACTIVE;
7791
7792 stmf_svc_loop:
7793 if (stmf_state.stmf_svc_flags & STMF_SVC_TERMINATE) {
7794 stmf_state.stmf_svc_flags &=
7795 ~(STMF_SVC_STARTED | STMF_SVC_ACTIVE);
7796 mutex_exit(&stmf_state.stmf_lock);
7797 return;
7798 }
7799
7800 if (stmf_state.stmf_svc_active) {
7801 int waitq_add = 0;
7802 req = stmf_state.stmf_svc_active;
7803 stmf_state.stmf_svc_active = req->svc_next;
7804
7805 switch (req->svc_cmd) {
7806 case STMF_CMD_LPORT_ONLINE:
7807 /* Fallthrough */
7808 case STMF_CMD_LPORT_OFFLINE:
7809 /* Fallthrough */
7810 case STMF_CMD_LU_ONLINE:
7811 /* Nothing to do */
7812 waitq_add = 1;
7813 break;
7814
7815 case STMF_CMD_LU_OFFLINE:
7816 /* Remove all mappings of this LU */
7817 stmf_session_lu_unmapall((stmf_lu_t *)req->svc_obj);
7818 /* Kill all the pending I/Os for this LU */
7819 mutex_exit(&stmf_state.stmf_lock);
7820 stmf_task_lu_killall((stmf_lu_t *)req->svc_obj, NULL,
7821 STMF_ABORTED);
7822 mutex_enter(&stmf_state.stmf_lock);
7823 waitq_add = 1;
7824 break;
7825 default:
7826 cmn_err(CE_PANIC, "stmf_svc: unknown cmd %d",
7827 req->svc_cmd);
7828 }
7829
7830 if (waitq_add) {
7831 /* Put it in the wait queue */
7832 req->svc_next = stmf_state.stmf_svc_waiting;
7833 stmf_state.stmf_svc_waiting = req;
7834 }
7835 }
7836
7837 /* The waiting list is not going to be modified by anybody else */
7838 mutex_exit(&stmf_state.stmf_lock);
7839
7840 for (preq = &stmf_state.stmf_svc_waiting; (*preq) != NULL; ) {
7841 req = *preq;
7842 deq = 0;
7843
7844 switch (req->svc_cmd) {
7845 case STMF_CMD_LU_ONLINE:
7846 lu = (stmf_lu_t *)req->svc_obj;
7847 deq = 1;
7848 lu->lu_ctl(lu, req->svc_cmd, &req->svc_info);
7849 break;
7850
7851 case STMF_CMD_LU_OFFLINE:
7852 lu = (stmf_lu_t *)req->svc_obj;
7853 ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7854 if (ilu->ilu_ntasks != ilu->ilu_ntasks_free)
7855 break;
7856 deq = 1;
7857 lu->lu_ctl(lu, req->svc_cmd, &req->svc_info);
7858 break;
7859
7860 case STMF_CMD_LPORT_OFFLINE:
7861 /* Fallthrough */
7862 case STMF_CMD_LPORT_ONLINE:
7863 lport = (stmf_local_port_t *)req->svc_obj;
7864 deq = 1;
7865 lport->lport_ctl(lport, req->svc_cmd, &req->svc_info);
7866 break;
7867 }
7868 if (deq) {
7869 *preq = req->svc_next;
7870 kmem_free(req, req->svc_req_alloc_size);
7871 } else {
7872 preq = &req->svc_next;
7873 }
7874 }
7875
7876 mutex_enter(&stmf_state.stmf_lock);
7877 if (stmf_state.stmf_svc_active == NULL) {
7878 /* Do timeouts */
7879 if (stmf_state.stmf_nlus &&
7880 ((!timing_next) || (ddi_get_lbolt() >= timing_next))) {
7881 if (!stmf_state.stmf_svc_ilu_timing) {
7882 /* we are starting a new round */
7883 stmf_state.stmf_svc_ilu_timing =
7884 stmf_state.stmf_ilulist;
7885 timing_start = ddi_get_lbolt();
7886 }
7887 stmf_check_ilu_timing();
7888 if (!stmf_state.stmf_svc_ilu_timing) {
7889 /* we finished a complete round */
7890 timing_next =
7891 timing_start + drv_usectohz(5*1000*1000);
7892 } else {
7893 /* we still have some ilu items to check */
7894 timing_next =
7895 ddi_get_lbolt() + drv_usectohz(1*1000*1000);
7896 }
7897 if (stmf_state.stmf_svc_active)
7898 goto stmf_svc_loop;
7899 }
7900 /* Check if there are free tasks to clear */
7901 if (stmf_state.stmf_nlus &&
7902 ((!drain_next) || (ddi_get_lbolt() >= drain_next))) {
7903 if (!stmf_state.stmf_svc_ilu_draining) {
7904 /* we are starting a new round */
7905 stmf_state.stmf_svc_ilu_draining =
7906 stmf_state.stmf_ilulist;
7907 drain_start = ddi_get_lbolt();
7908 }
7909 stmf_check_freetask();
7910 if (!stmf_state.stmf_svc_ilu_draining) {
7911 /* we finished a complete round */
7912 drain_next =
7913 drain_start + drv_usectohz(10*1000*1000);
7914 } else {
7915 /* we still have some ilu items to check */
7916 drain_next =
7917 ddi_get_lbolt() + drv_usectohz(1*1000*1000);
7918 }
7919 if (stmf_state.stmf_svc_active)
7920 goto stmf_svc_loop;
7921 }
7922
7923 /* Check if we need to run worker_mgmt */
7924 if (ddi_get_lbolt() > worker_delay) {
7925 stmf_worker_mgmt();
7926 worker_delay = ddi_get_lbolt() +
7927 stmf_worker_mgmt_delay;
7928 }
7929
7930 /* Check if any active session got its 1st LUN */
7931 if (stmf_state.stmf_process_initial_luns) {
7932 int stmf_level = 0;
7933 int port_level;
7934 for (ilport = stmf_state.stmf_ilportlist; ilport;
7935 ilport = next_ilport) {
7936 int ilport_lock_held;
7937 next_ilport = ilport->ilport_next;
7938 if ((ilport->ilport_flags &
7939 ILPORT_SS_GOT_INITIAL_LUNS) == 0) {
7940 continue;
7941 }
7942 port_level = 0;
7943 rw_enter(&ilport->ilport_lock, RW_READER);
7944 ilport_lock_held = 1;
7945 for (iss = ilport->ilport_ss_list; iss;
7946 iss = iss->iss_next) {
7947 if ((iss->iss_flags &
7948 ISS_GOT_INITIAL_LUNS) == 0) {
7949 continue;
7950 }
7951 port_level++;
7952 stmf_level++;
7953 atomic_and_32(&iss->iss_flags,
7954 ~ISS_GOT_INITIAL_LUNS);
7955 atomic_or_32(&iss->iss_flags,
7956 ISS_EVENT_ACTIVE);
7957 rw_exit(&ilport->ilport_lock);
7958 ilport_lock_held = 0;
7959 mutex_exit(&stmf_state.stmf_lock);
7960 stmf_generate_lport_event(ilport,
7961 LPORT_EVENT_INITIAL_LUN_MAPPED,
7962 iss->iss_ss, 0);
7963 atomic_and_32(&iss->iss_flags,
7964 ~ISS_EVENT_ACTIVE);
7965 mutex_enter(&stmf_state.stmf_lock);
7966 /*
7967 * scan all the ilports again as the
7968 * ilport list might have changed.
7969 */
7970 next_ilport =
7971 stmf_state.stmf_ilportlist;
7972 break;
7973 }
7974 if (port_level == 0) {
7975 atomic_and_32(&ilport->ilport_flags,
7976 ~ILPORT_SS_GOT_INITIAL_LUNS);
7977 }
7978 /* drop the lock if we are holding it. */
7979 if (ilport_lock_held == 1)
7980 rw_exit(&ilport->ilport_lock);
7981
7982 /* Max 4 session at a time */
7983 if (stmf_level >= 4) {
7984 break;
7985 }
7986 }
7987 if (stmf_level == 0) {
7988 stmf_state.stmf_process_initial_luns = 0;
7989 }
7990 }
7991
7992 stmf_state.stmf_svc_flags &= ~STMF_SVC_ACTIVE;
7993 (void) cv_reltimedwait(&stmf_state.stmf_cv,
7994 &stmf_state.stmf_lock, td, TR_CLOCK_TICK);
7995 stmf_state.stmf_svc_flags |= STMF_SVC_ACTIVE;
7996 }
7997 goto stmf_svc_loop;
7998 }
7999
8000 void
stmf_svc_queue(int cmd,void * obj,stmf_state_change_info_t * info)8001 stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info)
8002 {
8003 stmf_svc_req_t *req;
8004 int s;
8005
8006 ASSERT(!mutex_owned(&stmf_state.stmf_lock));
8007 s = sizeof (stmf_svc_req_t);
8008 if (info->st_additional_info) {
8009 s += strlen(info->st_additional_info) + 1;
8010 }
8011 req = kmem_zalloc(s, KM_SLEEP);
8012
8013 req->svc_cmd = cmd;
8014 req->svc_obj = obj;
8015 req->svc_info.st_rflags = info->st_rflags;
8016 if (info->st_additional_info) {
8017 req->svc_info.st_additional_info = (char *)(GET_BYTE_OFFSET(req,
8018 sizeof (stmf_svc_req_t)));
8019 (void) strcpy(req->svc_info.st_additional_info,
8020 info->st_additional_info);
8021 }
8022 req->svc_req_alloc_size = s;
8023
8024 mutex_enter(&stmf_state.stmf_lock);
8025 req->svc_next = stmf_state.stmf_svc_active;
8026 stmf_state.stmf_svc_active = req;
8027 if ((stmf_state.stmf_svc_flags & STMF_SVC_ACTIVE) == 0) {
8028 cv_signal(&stmf_state.stmf_cv);
8029 }
8030 mutex_exit(&stmf_state.stmf_lock);
8031 }
8032
8033 void
stmf_trace(caddr_t ident,const char * fmt,...)8034 stmf_trace(caddr_t ident, const char *fmt, ...)
8035 {
8036 va_list args;
8037 char tbuf[160];
8038 int len;
8039
8040 if (!stmf_trace_on)
8041 return;
8042 len = snprintf(tbuf, 158, "%s:%07lu: ", ident ? ident : "",
8043 ddi_get_lbolt());
8044 va_start(args, fmt);
8045 len += vsnprintf(tbuf + len, 158 - len, fmt, args);
8046 va_end(args);
8047
8048 if (len > 158) {
8049 len = 158;
8050 }
8051 tbuf[len++] = '\n';
8052 tbuf[len] = 0;
8053
8054 mutex_enter(&trace_buf_lock);
8055 bcopy(tbuf, &stmf_trace_buf[trace_buf_curndx], len+1);
8056 trace_buf_curndx += len;
8057 if (trace_buf_curndx > (trace_buf_size - 320))
8058 trace_buf_curndx = 0;
8059 mutex_exit(&trace_buf_lock);
8060 }
8061
8062 void
stmf_trace_clear()8063 stmf_trace_clear()
8064 {
8065 if (!stmf_trace_on)
8066 return;
8067 mutex_enter(&trace_buf_lock);
8068 trace_buf_curndx = 0;
8069 if (trace_buf_size > 0)
8070 stmf_trace_buf[0] = 0;
8071 mutex_exit(&trace_buf_lock);
8072 }
8073
8074 static void
stmf_abort_task_offline(scsi_task_t * task,int offline_lu,char * info)8075 stmf_abort_task_offline(scsi_task_t *task, int offline_lu, char *info)
8076 {
8077 stmf_state_change_info_t change_info;
8078 void *ctl_private;
8079 uint32_t ctl_cmd;
8080 int msg = 0;
8081
8082 stmf_trace("FROM STMF", "abort_task_offline called for %s: %s",
8083 offline_lu ? "LU" : "LPORT", info ? info : "no additional info");
8084 change_info.st_additional_info = info;
8085 if (offline_lu) {
8086 change_info.st_rflags = STMF_RFLAG_RESET |
8087 STMF_RFLAG_LU_ABORT;
8088 ctl_private = task->task_lu;
8089 if (((stmf_i_lu_t *)
8090 task->task_lu->lu_stmf_private)->ilu_state ==
8091 STMF_STATE_ONLINE) {
8092 msg = 1;
8093 }
8094 ctl_cmd = STMF_CMD_LU_OFFLINE;
8095 } else {
8096 change_info.st_rflags = STMF_RFLAG_RESET |
8097 STMF_RFLAG_LPORT_ABORT;
8098 ctl_private = task->task_lport;
8099 if (((stmf_i_local_port_t *)
8100 task->task_lport->lport_stmf_private)->ilport_state ==
8101 STMF_STATE_ONLINE) {
8102 msg = 1;
8103 }
8104 ctl_cmd = STMF_CMD_LPORT_OFFLINE;
8105 }
8106
8107 if (msg) {
8108 stmf_trace(0, "Calling stmf_ctl to offline %s : %s",
8109 offline_lu ? "LU" : "LPORT", info ? info :
8110 "<no additional info>");
8111 }
8112 (void) stmf_ctl(ctl_cmd, ctl_private, &change_info);
8113 }
8114
8115 static char
stmf_ctoi(char c)8116 stmf_ctoi(char c)
8117 {
8118 if ((c >= '0') && (c <= '9'))
8119 c -= '0';
8120 else if ((c >= 'A') && (c <= 'F'))
8121 c = c - 'A' + 10;
8122 else if ((c >= 'a') && (c <= 'f'))
8123 c = c - 'a' + 10;
8124 else
8125 c = -1;
8126 return (c);
8127 }
8128
8129 /* Convert from Hex value in ASCII format to the equivalent bytes */
8130 static boolean_t
stmf_base16_str_to_binary(char * c,int dplen,uint8_t * dp)8131 stmf_base16_str_to_binary(char *c, int dplen, uint8_t *dp)
8132 {
8133 int ii;
8134
8135 for (ii = 0; ii < dplen; ii++) {
8136 char nibble1, nibble2;
8137 char enc_char = *c++;
8138 nibble1 = stmf_ctoi(enc_char);
8139
8140 enc_char = *c++;
8141 nibble2 = stmf_ctoi(enc_char);
8142 if (nibble1 == -1 || nibble2 == -1)
8143 return (B_FALSE);
8144
8145 dp[ii] = (nibble1 << 4) | nibble2;
8146 }
8147 return (B_TRUE);
8148 }
8149
8150 boolean_t
stmf_scsilib_tptid_validate(scsi_transport_id_t * tptid,uint32_t total_sz,uint16_t * tptid_sz)8151 stmf_scsilib_tptid_validate(scsi_transport_id_t *tptid, uint32_t total_sz,
8152 uint16_t *tptid_sz)
8153 {
8154 uint16_t tpd_len = SCSI_TPTID_SIZE;
8155
8156 if (tptid_sz)
8157 *tptid_sz = 0;
8158 if (total_sz < sizeof (scsi_transport_id_t))
8159 return (B_FALSE);
8160
8161 switch (tptid->protocol_id) {
8162
8163 case PROTOCOL_FIBRE_CHANNEL:
8164 /* FC Transport ID validation checks. SPC3 rev23, Table 284 */
8165 if (total_sz < tpd_len || tptid->format_code != 0)
8166 return (B_FALSE);
8167 break;
8168
8169 case PROTOCOL_iSCSI:
8170 {
8171 iscsi_transport_id_t *iscsiid;
8172 uint16_t adn_len, name_len;
8173
8174 /* Check for valid format code, SPC3 rev 23 Table 288 */
8175 if ((total_sz < tpd_len) ||
8176 (tptid->format_code != 0 && tptid->format_code != 1))
8177 return (B_FALSE);
8178
8179 iscsiid = (iscsi_transport_id_t *)tptid;
8180 adn_len = READ_SCSI16(iscsiid->add_len, uint16_t);
8181 tpd_len = sizeof (iscsi_transport_id_t) + adn_len - 1;
8182
8183 /*
8184 * iSCSI Transport ID validation checks.
8185 * As per SPC3 rev 23 Section 7.5.4.6 and Table 289 & Table 290
8186 */
8187 if (adn_len < 20 || (adn_len % 4 != 0))
8188 return (B_FALSE);
8189
8190 name_len = strnlen(iscsiid->iscsi_name, adn_len);
8191 if (name_len == 0 || name_len >= adn_len)
8192 return (B_FALSE);
8193
8194 /* If the format_code is 1 check for ISID seperator */
8195 if ((tptid->format_code == 1) && (strstr(iscsiid->iscsi_name,
8196 SCSI_TPTID_ISCSI_ISID_SEPERATOR) == NULL))
8197 return (B_FALSE);
8198
8199 }
8200 break;
8201
8202 case PROTOCOL_SRP:
8203 /* SRP Transport ID validation checks. SPC3 rev23, Table 287 */
8204 if (total_sz < tpd_len || tptid->format_code != 0)
8205 return (B_FALSE);
8206 break;
8207
8208 case PROTOCOL_PARALLEL_SCSI:
8209 case PROTOCOL_SSA:
8210 case PROTOCOL_IEEE_1394:
8211 case PROTOCOL_SAS:
8212 case PROTOCOL_ADT:
8213 case PROTOCOL_ATAPI:
8214 default:
8215 {
8216 stmf_dflt_scsi_tptid_t *dflttpd;
8217
8218 tpd_len = sizeof (stmf_dflt_scsi_tptid_t);
8219 if (total_sz < tpd_len)
8220 return (B_FALSE);
8221 dflttpd = (stmf_dflt_scsi_tptid_t *)tptid;
8222 tpd_len = tpd_len + SCSI_READ16(&dflttpd->ident_len) - 1;
8223 if (total_sz < tpd_len)
8224 return (B_FALSE);
8225 }
8226 break;
8227 }
8228 if (tptid_sz)
8229 *tptid_sz = tpd_len;
8230 return (B_TRUE);
8231 }
8232
8233 boolean_t
stmf_scsilib_tptid_compare(scsi_transport_id_t * tpd1,scsi_transport_id_t * tpd2)8234 stmf_scsilib_tptid_compare(scsi_transport_id_t *tpd1,
8235 scsi_transport_id_t *tpd2)
8236 {
8237 if ((tpd1->protocol_id != tpd2->protocol_id) ||
8238 (tpd1->format_code != tpd2->format_code))
8239 return (B_FALSE);
8240
8241 switch (tpd1->protocol_id) {
8242
8243 case PROTOCOL_iSCSI:
8244 {
8245 iscsi_transport_id_t *iscsitpd1, *iscsitpd2;
8246 uint16_t len;
8247
8248 iscsitpd1 = (iscsi_transport_id_t *)tpd1;
8249 iscsitpd2 = (iscsi_transport_id_t *)tpd2;
8250 len = SCSI_READ16(&iscsitpd1->add_len);
8251 if ((memcmp(iscsitpd1->add_len, iscsitpd2->add_len, 2) != 0) ||
8252 (memcmp(iscsitpd1->iscsi_name, iscsitpd2->iscsi_name, len)
8253 != 0))
8254 return (B_FALSE);
8255 }
8256 break;
8257
8258 case PROTOCOL_SRP:
8259 {
8260 scsi_srp_transport_id_t *srptpd1, *srptpd2;
8261
8262 srptpd1 = (scsi_srp_transport_id_t *)tpd1;
8263 srptpd2 = (scsi_srp_transport_id_t *)tpd2;
8264 if (memcmp(srptpd1->srp_name, srptpd2->srp_name,
8265 sizeof (srptpd1->srp_name)) != 0)
8266 return (B_FALSE);
8267 }
8268 break;
8269
8270 case PROTOCOL_FIBRE_CHANNEL:
8271 {
8272 scsi_fc_transport_id_t *fctpd1, *fctpd2;
8273
8274 fctpd1 = (scsi_fc_transport_id_t *)tpd1;
8275 fctpd2 = (scsi_fc_transport_id_t *)tpd2;
8276 if (memcmp(fctpd1->port_name, fctpd2->port_name,
8277 sizeof (fctpd1->port_name)) != 0)
8278 return (B_FALSE);
8279 }
8280 break;
8281
8282 case PROTOCOL_PARALLEL_SCSI:
8283 case PROTOCOL_SSA:
8284 case PROTOCOL_IEEE_1394:
8285 case PROTOCOL_SAS:
8286 case PROTOCOL_ADT:
8287 case PROTOCOL_ATAPI:
8288 default:
8289 {
8290 stmf_dflt_scsi_tptid_t *dflt1, *dflt2;
8291 uint16_t len;
8292
8293 dflt1 = (stmf_dflt_scsi_tptid_t *)tpd1;
8294 dflt2 = (stmf_dflt_scsi_tptid_t *)tpd2;
8295 len = SCSI_READ16(&dflt1->ident_len);
8296 if ((memcmp(dflt1->ident_len, dflt2->ident_len, 2) != 0) ||
8297 (memcmp(dflt1->ident, dflt2->ident, len) != 0))
8298 return (B_FALSE);
8299 }
8300 break;
8301 }
8302 return (B_TRUE);
8303 }
8304
8305 /*
8306 * Changes devid_desc to corresponding TransportID format
8307 * Returns :- pointer to stmf_remote_port_t
8308 * Note :- Allocates continous memory for stmf_remote_port_t and TransportID,
8309 * This memory need to be freed when this remote_port is no longer
8310 * used.
8311 */
8312 stmf_remote_port_t *
stmf_scsilib_devid_to_remote_port(scsi_devid_desc_t * devid)8313 stmf_scsilib_devid_to_remote_port(scsi_devid_desc_t *devid)
8314 {
8315 struct scsi_fc_transport_id *fc_tpd;
8316 struct iscsi_transport_id *iscsi_tpd;
8317 struct scsi_srp_transport_id *srp_tpd;
8318 struct stmf_dflt_scsi_tptid *dflt_tpd;
8319 uint16_t ident_len, sz = 0;
8320 stmf_remote_port_t *rpt = NULL;
8321
8322 ident_len = devid->ident_length;
8323 ASSERT(ident_len);
8324 switch (devid->protocol_id) {
8325 case PROTOCOL_FIBRE_CHANNEL:
8326 sz = sizeof (scsi_fc_transport_id_t);
8327 rpt = stmf_remote_port_alloc(sz);
8328 rpt->rport_tptid->format_code = 0;
8329 rpt->rport_tptid->protocol_id = devid->protocol_id;
8330 fc_tpd = (scsi_fc_transport_id_t *)rpt->rport_tptid;
8331 /*
8332 * convert from "wwn.xxxxxxxxxxxxxxxx" to 8-byte binary
8333 * skip first 4 byte for "wwn."
8334 */
8335 ASSERT(strncmp("wwn.", (char *)devid->ident, 4) == 0);
8336 if ((ident_len < SCSI_TPTID_FC_PORT_NAME_SIZE * 2 + 4) ||
8337 !stmf_base16_str_to_binary((char *)devid->ident + 4,
8338 SCSI_TPTID_FC_PORT_NAME_SIZE, fc_tpd->port_name))
8339 goto devid_to_remote_port_fail;
8340 break;
8341
8342 case PROTOCOL_iSCSI:
8343 sz = ALIGNED_TO_8BYTE_BOUNDARY(sizeof (iscsi_transport_id_t) +
8344 ident_len - 1);
8345 rpt = stmf_remote_port_alloc(sz);
8346 rpt->rport_tptid->format_code = 0;
8347 rpt->rport_tptid->protocol_id = devid->protocol_id;
8348 iscsi_tpd = (iscsi_transport_id_t *)rpt->rport_tptid;
8349 SCSI_WRITE16(iscsi_tpd->add_len, ident_len);
8350 (void) memcpy(iscsi_tpd->iscsi_name, devid->ident, ident_len);
8351 break;
8352
8353 case PROTOCOL_SRP:
8354 sz = sizeof (scsi_srp_transport_id_t);
8355 rpt = stmf_remote_port_alloc(sz);
8356 rpt->rport_tptid->format_code = 0;
8357 rpt->rport_tptid->protocol_id = devid->protocol_id;
8358 srp_tpd = (scsi_srp_transport_id_t *)rpt->rport_tptid;
8359 /*
8360 * convert from "eui.xxxxxxxxxxxxxxx" to 8-byte binary
8361 * skip first 4 byte for "eui."
8362 * Assume 8-byte initiator-extension part of srp_name is NOT
8363 * stored in devid and hence will be set as zero
8364 */
8365 ASSERT(strncmp("eui.", (char *)devid->ident, 4) == 0);
8366 if ((ident_len < (SCSI_TPTID_SRP_PORT_NAME_LEN - 8) * 2 + 4) ||
8367 !stmf_base16_str_to_binary((char *)devid->ident+4,
8368 SCSI_TPTID_SRP_PORT_NAME_LEN, srp_tpd->srp_name))
8369 goto devid_to_remote_port_fail;
8370 break;
8371
8372 case PROTOCOL_PARALLEL_SCSI:
8373 case PROTOCOL_SSA:
8374 case PROTOCOL_IEEE_1394:
8375 case PROTOCOL_SAS:
8376 case PROTOCOL_ADT:
8377 case PROTOCOL_ATAPI:
8378 default :
8379 ident_len = devid->ident_length;
8380 sz = ALIGNED_TO_8BYTE_BOUNDARY(sizeof (stmf_dflt_scsi_tptid_t) +
8381 ident_len - 1);
8382 rpt = stmf_remote_port_alloc(sz);
8383 rpt->rport_tptid->format_code = 0;
8384 rpt->rport_tptid->protocol_id = devid->protocol_id;
8385 dflt_tpd = (stmf_dflt_scsi_tptid_t *)rpt->rport_tptid;
8386 SCSI_WRITE16(dflt_tpd->ident_len, ident_len);
8387 (void) memcpy(dflt_tpd->ident, devid->ident, ident_len);
8388 break;
8389 }
8390 return (rpt);
8391
8392 devid_to_remote_port_fail:
8393 stmf_remote_port_free(rpt);
8394 return (NULL);
8395
8396 }
8397
8398 stmf_remote_port_t *
stmf_remote_port_alloc(uint16_t tptid_sz)8399 stmf_remote_port_alloc(uint16_t tptid_sz) {
8400 stmf_remote_port_t *rpt;
8401 rpt = (stmf_remote_port_t *)kmem_zalloc(
8402 sizeof (stmf_remote_port_t) + tptid_sz, KM_SLEEP);
8403 rpt->rport_tptid_sz = tptid_sz;
8404 rpt->rport_tptid = (scsi_transport_id_t *)(rpt + 1);
8405 return (rpt);
8406 }
8407
8408 void
stmf_remote_port_free(stmf_remote_port_t * rpt)8409 stmf_remote_port_free(stmf_remote_port_t *rpt)
8410 {
8411 /*
8412 * Note: stmf_scsilib_devid_to_remote_port() function allocates
8413 * remote port structures for all transports in the same way, So
8414 * it is safe to deallocate it in a protocol independent manner.
8415 * If any of the allocation method changes, corresponding changes
8416 * need to be made here too.
8417 */
8418 kmem_free(rpt, sizeof (stmf_remote_port_t) + rpt->rport_tptid_sz);
8419 }
8420