xref: /onnv-gate/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c (revision 12128:1f70ce0b33d3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 /*
27  * Copyright (c) 2000 to 2010, LSI Corporation.
28  * All rights reserved.
29  *
30  * Redistribution and use in source and binary forms of all code within
31  * this file that is exclusively owned by LSI, with or without
32  * modification, is permitted provided that, in addition to the CDDL 1.0
33  * License requirements, the following conditions are met:
34  *
35  *    Neither the name of the author nor the names of its contributors may be
36  *    used to endorse or promote products derived from this software without
37  *    specific prior written permission.
38  *
39  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
40  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
41  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
42  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
43  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
44  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
45  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
46  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
47  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
48  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
49  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
50  * DAMAGE.
51  */
52 
53 /*
54  * mptsas - This is a driver based on LSI Logic's MPT2.0 interface.
55  *
56  */
57 
58 #if defined(lint) || defined(DEBUG)
59 #define	MPTSAS_DEBUG
60 #endif
61 
62 /*
63  * standard header files.
64  */
65 #include <sys/note.h>
66 #include <sys/scsi/scsi.h>
67 #include <sys/pci.h>
68 #include <sys/file.h>
69 #include <sys/policy.h>
70 #include <sys/sysevent.h>
71 #include <sys/sysevent/eventdefs.h>
72 #include <sys/sysevent/dr.h>
73 #include <sys/sata/sata_defs.h>
74 
75 #pragma pack(1)
76 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
77 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
78 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
79 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
80 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
81 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
82 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
83 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
84 #pragma pack()
85 
86 /*
87  * private header files.
88  *
89  */
90 #include <sys/scsi/impl/scsi_reset_notify.h>
91 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
92 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
93 #include <sys/raidioctl.h>
94 
95 #include <sys/fs/dv_node.h>	/* devfs_clean */
96 
97 /*
98  * FMA header files
99  */
100 #include <sys/ddifm.h>
101 #include <sys/fm/protocol.h>
102 #include <sys/fm/util.h>
103 #include <sys/fm/io/ddi.h>
104 
105 /*
106  * autoconfiguration data and routines.
107  */
108 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
109 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
110 static int mptsas_power(dev_info_t *dip, int component, int level);
111 
112 /*
113  * cb_ops function
114  */
115 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
116 	cred_t *credp, int *rval);
117 #ifdef __sparc
118 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
119 #else  /* __sparc */
120 static int mptsas_quiesce(dev_info_t *devi);
121 #endif	/* __sparc */
122 
123 /*
124  * Resource initilaization for hardware
125  */
126 static void mptsas_setup_cmd_reg(mptsas_t *mpt);
127 static void mptsas_disable_bus_master(mptsas_t *mpt);
128 static void mptsas_hba_fini(mptsas_t *mpt);
129 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp);
130 static int mptsas_alloc_request_frames(mptsas_t *mpt);
131 static int mptsas_alloc_reply_frames(mptsas_t *mpt);
132 static int mptsas_alloc_free_queue(mptsas_t *mpt);
133 static int mptsas_alloc_post_queue(mptsas_t *mpt);
134 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
135 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
136 
137 /*
138  * SCSA function prototypes
139  */
140 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
141 static int mptsas_scsi_reset(struct scsi_address *ap, int level);
142 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
143 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
144 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
145     int tgtonly);
146 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
147 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap,
148     struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
149 	int tgtlen, int flags, int (*callback)(), caddr_t arg);
150 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
151 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap,
152     struct scsi_pkt *pkt);
153 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
154     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
155 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
156     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
157 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
158     void (*callback)(caddr_t), caddr_t arg);
159 static int mptsas_get_name(struct scsi_device *sd, char *name, int len);
160 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len);
161 static int mptsas_scsi_quiesce(dev_info_t *dip);
162 static int mptsas_scsi_unquiesce(dev_info_t *dip);
163 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags,
164     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
165 
166 /*
167  * SMP functions
168  */
169 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
170 
171 /*
172  * internal function prototypes.
173  */
174 static int mptsas_quiesce_bus(mptsas_t *mpt);
175 static int mptsas_unquiesce_bus(mptsas_t *mpt);
176 
177 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
178 static void mptsas_free_handshake_msg(mptsas_t *mpt);
179 
180 static void mptsas_ncmds_checkdrain(void *arg);
181 
182 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
183 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
184 static int mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
185 static void mptsas_accept_tx_waitq(mptsas_t *mpt);
186 
187 static int mptsas_do_detach(dev_info_t *dev);
188 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
189 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
190     struct scsi_pkt *pkt);
191 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
192 
193 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
194 static void mptsas_handle_event(void *args);
195 static int mptsas_handle_event_sync(void *args);
196 static void mptsas_handle_dr(void *args);
197 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
198     dev_info_t *pdip);
199 
200 static void mptsas_restart_cmd(void *);
201 
202 static void mptsas_flush_hba(mptsas_t *mpt);
203 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
204 	uint8_t tasktype);
205 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
206     uchar_t reason, uint_t stat);
207 
208 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
209 static void mptsas_process_intr(mptsas_t *mpt,
210     pMpi2ReplyDescriptorsUnion_t reply_desc_union);
211 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
212     pMpi2ReplyDescriptorsUnion_t reply_desc);
213 static void mptsas_handle_address_reply(mptsas_t *mpt,
214     pMpi2ReplyDescriptorsUnion_t reply_desc);
215 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
216 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
217     uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
218 
219 static void mptsas_watch(void *arg);
220 static void mptsas_watchsubr(mptsas_t *mpt);
221 static void mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl);
222 
223 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
224 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
225     uint8_t *data, uint32_t request_size, uint32_t reply_size,
226     uint32_t data_size, uint32_t direction, uint8_t *dataout,
227     uint32_t dataout_size, short timeout, int mode);
228 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
229 
230 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
231     uint32_t unique_id);
232 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
233 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
234     mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
235 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
236     mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
237     uint32_t diag_type);
238 static int mptsas_diag_register(mptsas_t *mpt,
239     mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
240 static int mptsas_diag_unregister(mptsas_t *mpt,
241     mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
242 static int mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
243     uint32_t *return_code);
244 static int mptsas_diag_read_buffer(mptsas_t *mpt,
245     mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
246     uint32_t *return_code, int ioctl_mode);
247 static int mptsas_diag_release(mptsas_t *mpt,
248     mptsas_fw_diag_release_t *diag_release, uint32_t *return_code);
249 static int mptsas_do_diag_action(mptsas_t *mpt, uint32_t action,
250     uint8_t *diag_action, uint32_t length, uint32_t *return_code,
251     int ioctl_mode);
252 static int mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *data,
253     int mode);
254 
255 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
256     int cmdlen, int tgtlen, int statuslen, int kf);
257 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
258 
259 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
260 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
261 
262 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
263     int kmflags);
264 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
265 
266 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
267     mptsas_cmd_t *cmd);
268 static void mptsas_check_task_mgt(mptsas_t *mpt,
269     pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
270 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
271     mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
272     int *resid);
273 
274 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
275 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
276 
277 static void mptsas_restart_hba(mptsas_t *mpt);
278 static void mptsas_restart_waitq(mptsas_t *mpt);
279 
280 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
281 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
282 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
283 
284 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
285 static void mptsas_doneq_empty(mptsas_t *mpt);
286 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
287 
288 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
289 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
290 static mptsas_cmd_t *mptsas_tx_waitq_rm(mptsas_t *mpt);
291 static void mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
292 
293 
294 static void mptsas_start_watch_reset_delay();
295 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
296 static void mptsas_watch_reset_delay(void *arg);
297 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
298 
299 /*
300  * helper functions
301  */
302 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
303 
304 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
305 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
306 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
307     int lun);
308 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
309     int lun);
310 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
311 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
312 
313 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
314     int *lun);
315 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
316 
317 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt, int phymask,
318     uint8_t phy);
319 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask,
320     uint64_t wwid);
321 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask,
322     uint64_t wwid);
323 
324 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
325     uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
326 
327 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
328     uint16_t *handle, mptsas_target_t **pptgt);
329 static void mptsas_update_phymask(mptsas_t *mpt);
330 
331 static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
332     uint32_t *status, uint8_t cmd);
333 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
334     mptsas_phymask_t *phymask);
335 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
336     mptsas_phymask_t phymask);
337 static int mptsas_set_led_status(mptsas_t *mpt, mptsas_target_t *ptgt,
338     uint32_t slotstatus);
339 
340 
341 /*
342  * Enumeration / DR functions
343  */
344 static void mptsas_config_all(dev_info_t *pdip);
345 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
346     dev_info_t **lundip);
347 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
348     dev_info_t **lundip);
349 
350 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
351 static int mptsas_offline_target(dev_info_t *pdip, char *name);
352 
353 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
354     dev_info_t **dip);
355 
356 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
357 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
358     dev_info_t **dip, mptsas_target_t *ptgt);
359 
360 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
361     dev_info_t **dip, mptsas_target_t *ptgt, int lun);
362 
363 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
364     char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
365 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
366     char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
367     int lun);
368 
369 static void mptsas_offline_missed_luns(dev_info_t *pdip,
370     uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
371 static int mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
372     mdi_pathinfo_t *rpip, uint_t flags);
373 
374 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
375     dev_info_t **smp_dip);
376 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
377     uint_t flags);
378 
379 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
380     int mode, int *rval);
381 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
382     int mode, int *rval);
383 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
384     int mode, int *rval);
385 static void mptsas_record_event(void *args);
386 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
387     int mode);
388 
389 static void mptsas_hash_init(mptsas_hash_table_t *hashtab);
390 static void mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen);
391 static void mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data);
392 static void * mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1,
393     mptsas_phymask_t key2);
394 static void * mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1,
395     mptsas_phymask_t key2);
396 static void * mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos);
397 
398 mptsas_target_t *mptsas_tgt_alloc(mptsas_hash_table_t *, uint16_t, uint64_t,
399     uint32_t, mptsas_phymask_t, uint8_t);
400 static mptsas_smp_t *mptsas_smp_alloc(mptsas_hash_table_t *hashtab,
401     mptsas_smp_t *data);
402 static void mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
403     mptsas_phymask_t phymask);
404 static void mptsas_tgt_free(mptsas_hash_table_t *, uint64_t, mptsas_phymask_t);
405 static void * mptsas_search_by_devhdl(mptsas_hash_table_t *, uint16_t);
406 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
407     dev_info_t **smp_dip);
408 
409 /*
410  * Power management functions
411  */
412 static void mptsas_idle_pm(void *arg);
413 static int mptsas_init_pm(mptsas_t *mpt);
414 
415 /*
416  * MPT MSI tunable:
417  *
418  * By default MSI is enabled on all supported platforms.
419  */
420 boolean_t mptsas_enable_msi = B_TRUE;
421 
422 static int mptsas_add_intrs(mptsas_t *, int);
423 static void mptsas_rem_intrs(mptsas_t *);
424 
425 /*
426  * FMA Prototypes
427  */
428 static void mptsas_fm_init(mptsas_t *mpt);
429 static void mptsas_fm_fini(mptsas_t *mpt);
430 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
431 
432 extern pri_t minclsyspri, maxclsyspri;
433 
434 /*
435  * This device is created by the SCSI pseudo nexus driver (SCSI vHCI).  It is
436  * under this device that the paths to a physical device are created when
437  * MPxIO is used.
438  */
439 extern dev_info_t	*scsi_vhci_dip;
440 
441 /*
442  * Tunable timeout value for Inquiry VPD page 0x83
443  * By default the value is 30 seconds.
444  */
445 int mptsas_inq83_retry_timeout = 30;
446 
447 /*
448  * This is used to allocate memory for message frame storage, not for
449  * data I/O DMA. All message frames must be stored in the first 4G of
450  * physical memory.
451  */
452 ddi_dma_attr_t mptsas_dma_attrs = {
453 	DMA_ATTR_V0,	/* attribute layout version		*/
454 	0x0ull,		/* address low - should be 0 (longlong)	*/
455 	0xffffffffull,	/* address high - 32-bit max range	*/
456 	0x00ffffffull,	/* count max - max DMA object size	*/
457 	4,		/* allocation alignment requirements	*/
458 	0x78,		/* burstsizes - binary encoded values	*/
459 	1,		/* minxfer - gran. of DMA engine	*/
460 	0x00ffffffull,	/* maxxfer - gran. of DMA engine	*/
461 	0xffffffffull,	/* max segment size (DMA boundary)	*/
462 	MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length	*/
463 	512,		/* granularity - device transfer size	*/
464 	0		/* flags, set to 0			*/
465 };
466 
467 /*
468  * This is used for data I/O DMA memory allocation. (full 64-bit DMA
469  * physical addresses are supported.)
470  */
471 ddi_dma_attr_t mptsas_dma_attrs64 = {
472 	DMA_ATTR_V0,	/* attribute layout version		*/
473 	0x0ull,		/* address low - should be 0 (longlong)	*/
474 	0xffffffffffffffffull,	/* address high - 64-bit max	*/
475 	0x00ffffffull,	/* count max - max DMA object size	*/
476 	4,		/* allocation alignment requirements	*/
477 	0x78,		/* burstsizes - binary encoded values	*/
478 	1,		/* minxfer - gran. of DMA engine	*/
479 	0x00ffffffull,	/* maxxfer - gran. of DMA engine	*/
480 	0xffffffffull,	/* max segment size (DMA boundary)	*/
481 	MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length	*/
482 	512,		/* granularity - device transfer size	*/
483 	DDI_DMA_RELAXED_ORDERING	/* flags, enable relaxed ordering */
484 };
485 
486 ddi_device_acc_attr_t mptsas_dev_attr = {
487 	DDI_DEVICE_ATTR_V1,
488 	DDI_STRUCTURE_LE_ACC,
489 	DDI_STRICTORDER_ACC,
490 	DDI_DEFAULT_ACC
491 };
492 
493 static struct cb_ops mptsas_cb_ops = {
494 	scsi_hba_open,		/* open */
495 	scsi_hba_close,		/* close */
496 	nodev,			/* strategy */
497 	nodev,			/* print */
498 	nodev,			/* dump */
499 	nodev,			/* read */
500 	nodev,			/* write */
501 	mptsas_ioctl,		/* ioctl */
502 	nodev,			/* devmap */
503 	nodev,			/* mmap */
504 	nodev,			/* segmap */
505 	nochpoll,		/* chpoll */
506 	ddi_prop_op,		/* cb_prop_op */
507 	NULL,			/* streamtab */
508 	D_MP,			/* cb_flag */
509 	CB_REV,			/* rev */
510 	nodev,			/* aread */
511 	nodev			/* awrite */
512 };
513 
514 static struct dev_ops mptsas_ops = {
515 	DEVO_REV,		/* devo_rev, */
516 	0,			/* refcnt  */
517 	ddi_no_info,		/* info */
518 	nulldev,		/* identify */
519 	nulldev,		/* probe */
520 	mptsas_attach,		/* attach */
521 	mptsas_detach,		/* detach */
522 #ifdef  __sparc
523 	mptsas_reset,
524 #else
525 	nodev,			/* reset */
526 #endif  /* __sparc */
527 	&mptsas_cb_ops,		/* driver operations */
528 	NULL,			/* bus operations */
529 	mptsas_power,		/* power management */
530 #ifdef	__sparc
531 	ddi_quiesce_not_needed
532 #else
533 	mptsas_quiesce		/* quiesce */
534 #endif	/* __sparc */
535 };
536 
537 
538 #define	MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.23"
539 
540 static struct modldrv modldrv = {
541 	&mod_driverops,	/* Type of module. This one is a driver */
542 	MPTSAS_MOD_STRING, /* Name of the module. */
543 	&mptsas_ops,	/* driver ops */
544 };
545 
546 static struct modlinkage modlinkage = {
547 	MODREV_1, &modldrv, NULL
548 };
549 #define	TARGET_PROP	"target"
550 #define	LUN_PROP	"lun"
551 #define	SAS_PROP	"sas-mpt"
552 #define	MDI_GUID	"wwn"
553 #define	NDI_GUID	"guid"
554 #define	MPTSAS_DEV_GONE	"mptsas_dev_gone"
555 
556 /*
557  * Local static data
558  */
559 #if defined(MPTSAS_DEBUG)
560 uint32_t mptsas_debug_flags = 0;
561 #endif	/* defined(MPTSAS_DEBUG) */
562 uint32_t mptsas_debug_resets = 0;
563 
564 static kmutex_t		mptsas_global_mutex;
565 static void		*mptsas_state;		/* soft	state ptr */
566 static krwlock_t	mptsas_global_rwlock;
567 
568 static kmutex_t		mptsas_log_mutex;
569 static char		mptsas_log_buf[256];
570 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf))
571 
572 static mptsas_t *mptsas_head, *mptsas_tail;
573 static clock_t mptsas_scsi_watchdog_tick;
574 static clock_t mptsas_tick;
575 static timeout_id_t mptsas_reset_watch;
576 static timeout_id_t mptsas_timeout_id;
577 static int mptsas_timeouts_enabled = 0;
578 /*
579  * warlock directives
580  */
581 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
582 	mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status))
583 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt))
584 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
585 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private))
586 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private))
587 
588 #ifdef MPTSAS_DEBUG
589 void debug_enter(char *);
590 #endif
591 
592 /*
593  * Notes:
594  *	- scsi_hba_init(9F) initializes SCSI HBA modules
595  *	- must call scsi_hba_fini(9F) if modload() fails
596  */
597 int
598 _init(void)
599 {
600 	int status;
601 	/* CONSTCOND */
602 	ASSERT(NO_COMPETING_THREADS);
603 
604 	NDBG0(("_init"));
605 
606 	status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE,
607 	    MPTSAS_INITIAL_SOFT_SPACE);
608 	if (status != 0) {
609 		return (status);
610 	}
611 
612 	if ((status = scsi_hba_init(&modlinkage)) != 0) {
613 		ddi_soft_state_fini(&mptsas_state);
614 		return (status);
615 	}
616 
617 	mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL);
618 	rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL);
619 	mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL);
620 
621 	if ((status = mod_install(&modlinkage)) != 0) {
622 		mutex_destroy(&mptsas_log_mutex);
623 		rw_destroy(&mptsas_global_rwlock);
624 		mutex_destroy(&mptsas_global_mutex);
625 		ddi_soft_state_fini(&mptsas_state);
626 		scsi_hba_fini(&modlinkage);
627 	}
628 
629 	return (status);
630 }
631 
632 /*
633  * Notes:
634  *	- scsi_hba_fini(9F) uninitializes SCSI HBA modules
635  */
636 int
637 _fini(void)
638 {
639 	int	status;
640 	/* CONSTCOND */
641 	ASSERT(NO_COMPETING_THREADS);
642 
643 	NDBG0(("_fini"));
644 
645 	if ((status = mod_remove(&modlinkage)) == 0) {
646 		ddi_soft_state_fini(&mptsas_state);
647 		scsi_hba_fini(&modlinkage);
648 		mutex_destroy(&mptsas_global_mutex);
649 		rw_destroy(&mptsas_global_rwlock);
650 		mutex_destroy(&mptsas_log_mutex);
651 	}
652 	return (status);
653 }
654 
655 /*
656  * The loadable-module _info(9E) entry point
657  */
658 int
659 _info(struct modinfo *modinfop)
660 {
661 	/* CONSTCOND */
662 	ASSERT(NO_COMPETING_THREADS);
663 	NDBG0(("mptsas _info"));
664 
665 	return (mod_info(&modlinkage, modinfop));
666 }
667 
668 
669 static int
670 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
671 {
672 	dev_info_t		*pdip;
673 	mptsas_t		*mpt;
674 	scsi_hba_tran_t		*hba_tran;
675 	char			*iport = NULL;
676 	char			phymask[MPTSAS_MAX_PHYS];
677 	mptsas_phymask_t	phy_mask = 0;
678 	int			physport = -1;
679 	int			dynamic_port = 0;
680 	uint32_t		page_address;
681 	char			initiator_wwnstr[MPTSAS_WWN_STRLEN];
682 	int			rval = DDI_FAILURE;
683 	int			i = 0;
684 	uint64_t		wwid = 0;
685 	uint8_t			portwidth = 0;
686 
687 	/* CONSTCOND */
688 	ASSERT(NO_COMPETING_THREADS);
689 
690 	switch (cmd) {
691 	case DDI_ATTACH:
692 		break;
693 
694 	case DDI_RESUME:
695 		/*
696 		 * If this a scsi-iport node, nothing to do here.
697 		 */
698 		return (DDI_SUCCESS);
699 
700 	default:
701 		return (DDI_FAILURE);
702 	}
703 
704 	pdip = ddi_get_parent(dip);
705 
706 	if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) ==
707 	    NULL) {
708 		cmn_err(CE_WARN, "Failed attach iport because fail to "
709 		    "get tran vector for the HBA node");
710 		return (DDI_FAILURE);
711 	}
712 
713 	mpt = TRAN2MPT(hba_tran);
714 	ASSERT(mpt != NULL);
715 	if (mpt == NULL)
716 		return (DDI_FAILURE);
717 
718 	if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) ==
719 	    NULL) {
720 		mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to "
721 		    "get tran vector for the iport node");
722 		return (DDI_FAILURE);
723 	}
724 
725 	/*
726 	 * Overwrite parent's tran_hba_private to iport's tran vector
727 	 */
728 	hba_tran->tran_hba_private = mpt;
729 
730 	ddi_report_dev(dip);
731 
732 	/*
733 	 * Get SAS address for initiator port according dev_handle
734 	 */
735 	iport = ddi_get_name_addr(dip);
736 	if (iport && strncmp(iport, "v0", 2) == 0) {
737 		return (DDI_SUCCESS);
738 	}
739 
740 	mutex_enter(&mpt->m_mutex);
741 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
742 		bzero(phymask, sizeof (phymask));
743 		(void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask);
744 		if (strcmp(phymask, iport) == 0) {
745 			break;
746 		}
747 	}
748 
749 	if (i == MPTSAS_MAX_PHYS) {
750 		mptsas_log(mpt, CE_WARN, "Failed attach port %s because port"
751 		    "seems not exist", iport);
752 		mutex_exit(&mpt->m_mutex);
753 		return (DDI_FAILURE);
754 	}
755 
756 	phy_mask = mpt->m_phy_info[i].phy_mask;
757 	physport = mpt->m_phy_info[i].port_num;
758 
759 	if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION)
760 		dynamic_port = 1;
761 	else
762 		dynamic_port = 0;
763 
764 	page_address = (MPI2_SASPORT_PGAD_FORM_PORT_NUM |
765 	    (MPI2_SASPORT_PGAD_PORTNUMBER_MASK & physport));
766 
767 	rval = mptsas_get_sas_port_page0(mpt, page_address, &wwid, &portwidth);
768 	if (rval != DDI_SUCCESS) {
769 		mptsas_log(mpt, CE_WARN, "Failed attach port %s because get"
770 		    "SAS address of initiator failed!", iport);
771 		mutex_exit(&mpt->m_mutex);
772 		return (DDI_FAILURE);
773 	}
774 	mutex_exit(&mpt->m_mutex);
775 
776 	bzero(initiator_wwnstr, sizeof (initiator_wwnstr));
777 	(void) sprintf(initiator_wwnstr, "%016"PRIx64,
778 	    wwid);
779 
780 	if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
781 	    "initiator-port", initiator_wwnstr) !=
782 	    DDI_PROP_SUCCESS) {
783 		(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "initiator-port");
784 		return (DDI_FAILURE);
785 	}
786 
787 	if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
788 	    "phymask", phy_mask) !=
789 	    DDI_PROP_SUCCESS) {
790 		(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask");
791 		return (DDI_FAILURE);
792 	}
793 
794 	if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
795 	    "dynamic-port", dynamic_port) !=
796 	    DDI_PROP_SUCCESS) {
797 		(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port");
798 		return (DDI_FAILURE);
799 	}
800 	/*
801 	 * register sas hba iport with mdi (MPxIO/vhci)
802 	 */
803 	if (mdi_phci_register(MDI_HCI_CLASS_SCSI,
804 	    dip, 0) == MDI_SUCCESS) {
805 		mpt->m_mpxio_enable = TRUE;
806 	}
807 	return (DDI_SUCCESS);
808 }
809 
810 /*
811  * Notes:
812  *	Set up all device state and allocate data structures,
813  *	mutexes, condition variables, etc. for device operation.
814  *	Add interrupts needed.
815  *	Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
816  */
817 static int
818 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
819 {
820 	mptsas_t		*mpt = NULL;
821 	int			instance, i, j;
822 	int			doneq_thread_num;
823 	char			buf[64];
824 	char			intr_added = 0;
825 	char			map_setup = 0;
826 	char			config_setup = 0;
827 	char			hba_attach_setup = 0;
828 	char			smp_attach_setup = 0;
829 	char			mutex_init_done = 0;
830 	char			event_taskq_create = 0;
831 	char			dr_taskq_create = 0;
832 	char			doneq_thread_create = 0;
833 	scsi_hba_tran_t		*hba_tran;
834 	int			intr_types;
835 	uint_t			mem_bar = MEM_SPACE;
836 	mptsas_phymask_t	mask = 0x0;
837 	int			tran_flags = 0;
838 	int			rval = DDI_FAILURE;
839 
840 	/* CONSTCOND */
841 	ASSERT(NO_COMPETING_THREADS);
842 
843 	if (scsi_hba_iport_unit_address(dip)) {
844 		return (mptsas_iport_attach(dip, cmd));
845 	}
846 
847 	switch (cmd) {
848 	case DDI_ATTACH:
849 		break;
850 
851 	case DDI_RESUME:
852 		if ((hba_tran = ddi_get_driver_private(dip)) == NULL)
853 			return (DDI_FAILURE);
854 
855 		mpt = TRAN2MPT(hba_tran);
856 
857 		if (!mpt) {
858 			return (DDI_FAILURE);
859 		}
860 
861 		/*
862 		 * Reset hardware and softc to "no outstanding commands"
863 		 * Note	that a check condition can result on first command
864 		 * to a	target.
865 		 */
866 		mutex_enter(&mpt->m_mutex);
867 
868 		/*
869 		 * raise power.
870 		 */
871 		if (mpt->m_options & MPTSAS_OPT_PM) {
872 			mutex_exit(&mpt->m_mutex);
873 			(void) pm_busy_component(dip, 0);
874 			if (mpt->m_power_level != PM_LEVEL_D0) {
875 				rval = pm_raise_power(dip, 0, PM_LEVEL_D0);
876 			} else {
877 				rval = pm_power_has_changed(dip, 0,
878 				    PM_LEVEL_D0);
879 			}
880 			if (rval == DDI_SUCCESS) {
881 				mutex_enter(&mpt->m_mutex);
882 			} else {
883 				/*
884 				 * The pm_raise_power() call above failed,
885 				 * and that can only occur if we were unable
886 				 * to reset the hardware.  This is probably
887 				 * due to unhealty hardware, and because
888 				 * important filesystems(such as the root
889 				 * filesystem) could be on the attached disks,
890 				 * it would not be a good idea to continue,
891 				 * as we won't be entirely certain we are
892 				 * writing correct data.  So we panic() here
893 				 * to not only prevent possible data corruption,
894 				 * but to give developers or end users a hope
895 				 * of identifying and correcting any problems.
896 				 */
897 				fm_panic("mptsas could not reset hardware "
898 				    "during resume");
899 			}
900 		}
901 
902 		mpt->m_suspended = 0;
903 
904 		/*
905 		 * Reinitialize ioc
906 		 */
907 		if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
908 			mutex_exit(&mpt->m_mutex);
909 			if (mpt->m_options & MPTSAS_OPT_PM) {
910 				(void) pm_idle_component(dip, 0);
911 			}
912 			fm_panic("mptsas init chip fail during resume");
913 		}
914 		/*
915 		 * mptsas_update_driver_data needs interrupts so enable them
916 		 * first.
917 		 */
918 		MPTSAS_ENABLE_INTR(mpt);
919 		mptsas_update_driver_data(mpt);
920 
921 		/* start requests, if possible */
922 		mptsas_restart_hba(mpt);
923 
924 		mutex_exit(&mpt->m_mutex);
925 
926 		/*
927 		 * Restart watch thread
928 		 */
929 		mutex_enter(&mptsas_global_mutex);
930 		if (mptsas_timeout_id == 0) {
931 			mptsas_timeout_id = timeout(mptsas_watch, NULL,
932 			    mptsas_tick);
933 			mptsas_timeouts_enabled = 1;
934 		}
935 		mutex_exit(&mptsas_global_mutex);
936 
937 		/* report idle status to pm framework */
938 		if (mpt->m_options & MPTSAS_OPT_PM) {
939 			(void) pm_idle_component(dip, 0);
940 		}
941 
942 		return (DDI_SUCCESS);
943 
944 	default:
945 		return (DDI_FAILURE);
946 
947 	}
948 
949 	instance = ddi_get_instance(dip);
950 
951 	/*
952 	 * Allocate softc information.
953 	 */
954 	if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
955 		mptsas_log(NULL, CE_WARN,
956 		    "mptsas%d: cannot allocate soft state", instance);
957 		goto fail;
958 	}
959 
960 	mpt = ddi_get_soft_state(mptsas_state, instance);
961 
962 	if (mpt == NULL) {
963 		mptsas_log(NULL, CE_WARN,
964 		    "mptsas%d: cannot get soft state", instance);
965 		goto fail;
966 	}
967 
968 	/* Allocate a transport structure */
969 	hba_tran = mpt->m_tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
970 	ASSERT(mpt->m_tran != NULL);
971 
972 	/* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
973 	scsi_size_clean(dip);
974 
975 	mpt->m_dip = dip;
976 	mpt->m_instance = instance;
977 
978 	/* Make a per-instance copy of the structures */
979 	mpt->m_io_dma_attr = mptsas_dma_attrs64;
980 	mpt->m_msg_dma_attr = mptsas_dma_attrs;
981 	mpt->m_reg_acc_attr = mptsas_dev_attr;
982 	mpt->m_dev_acc_attr = mptsas_dev_attr;
983 
984 	/*
985 	 * Initialize FMA
986 	 */
987 	mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip,
988 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
989 	    DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
990 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
991 
992 	mptsas_fm_init(mpt);
993 
994 	if (pci_config_setup(mpt->m_dip,
995 	    &mpt->m_config_handle) != DDI_SUCCESS) {
996 		mptsas_log(mpt, CE_WARN, "cannot map configuration space.");
997 		goto fail;
998 	}
999 	config_setup++;
1000 
1001 	if (mptsas_alloc_handshake_msg(mpt,
1002 	    sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
1003 		mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg.");
1004 		goto fail;
1005 	}
1006 
1007 	/*
1008 	 * This is a workaround for a XMITS ASIC bug which does not
1009 	 * drive the CBE upper bits.
1010 	 */
1011 	if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) &
1012 	    PCI_STAT_PERROR) {
1013 		pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT,
1014 		    PCI_STAT_PERROR);
1015 	}
1016 
1017 	/*
1018 	 * Setup configuration space
1019 	 */
1020 	if (mptsas_config_space_init(mpt) == FALSE) {
1021 		mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed");
1022 		goto fail;
1023 	}
1024 
1025 	if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg,
1026 	    0, 0, &mpt->m_reg_acc_attr, &mpt->m_datap) != DDI_SUCCESS) {
1027 		mptsas_log(mpt, CE_WARN, "map setup failed");
1028 		goto fail;
1029 	}
1030 	map_setup++;
1031 
1032 	/*
1033 	 * A taskq is created for dealing with the event handler
1034 	 */
1035 	if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1036 	    1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1037 		mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1038 		goto fail;
1039 	}
1040 	event_taskq_create++;
1041 
1042 	/*
1043 	 * A taskq is created for dealing with dr events
1044 	 */
1045 	if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1046 	    "mptsas_dr_taskq",
1047 	    1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1048 		mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1049 		    "failed");
1050 		goto fail;
1051 	}
1052 	dr_taskq_create++;
1053 
1054 	mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1055 	    0, "mptsas_doneq_thread_threshold_prop", 10);
1056 	mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1057 	    0, "mptsas_doneq_length_threshold_prop", 8);
1058 	mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1059 	    0, "mptsas_doneq_thread_n_prop", 8);
1060 
1061 	if (mpt->m_doneq_thread_n) {
1062 		cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1063 		mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1064 
1065 		mutex_enter(&mpt->m_doneq_mutex);
1066 		mpt->m_doneq_thread_id =
1067 		    kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1068 		    * mpt->m_doneq_thread_n, KM_SLEEP);
1069 
1070 		for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1071 			cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1072 			    CV_DRIVER, NULL);
1073 			mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1074 			    MUTEX_DRIVER, NULL);
1075 			mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1076 			mpt->m_doneq_thread_id[j].flag |=
1077 			    MPTSAS_DONEQ_THREAD_ACTIVE;
1078 			mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1079 			mpt->m_doneq_thread_id[j].arg.t = j;
1080 			mpt->m_doneq_thread_id[j].threadp =
1081 			    thread_create(NULL, 0, mptsas_doneq_thread,
1082 			    &mpt->m_doneq_thread_id[j].arg,
1083 			    0, &p0, TS_RUN, minclsyspri);
1084 			mpt->m_doneq_thread_id[j].donetail =
1085 			    &mpt->m_doneq_thread_id[j].doneq;
1086 			mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1087 		}
1088 		mutex_exit(&mpt->m_doneq_mutex);
1089 		doneq_thread_create++;
1090 	}
1091 
1092 	/* Get supported interrupt types */
1093 	if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
1094 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
1095 		    "failed\n");
1096 		goto fail;
1097 	}
1098 
1099 	NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
1100 
1101 	if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
1102 		/*
1103 		 * Try MSI, but fall back to FIXED
1104 		 */
1105 		if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
1106 			NDBG0(("Using MSI interrupt type"));
1107 			mpt->m_intr_type = DDI_INTR_TYPE_MSI;
1108 			goto intr_done;
1109 		}
1110 	}
1111 
1112 	if (intr_types & DDI_INTR_TYPE_FIXED) {
1113 
1114 		if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
1115 			NDBG0(("Using FIXED interrupt type"));
1116 			mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
1117 
1118 			goto intr_done;
1119 		}
1120 
1121 		NDBG0(("FIXED interrupt registration failed"));
1122 	}
1123 
1124 	goto fail;
1125 
1126 intr_done:
1127 	intr_added++;
1128 
1129 	/* Initialize mutex used in interrupt handler */
1130 	mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1131 	    DDI_INTR_PRI(mpt->m_intr_pri));
1132 	mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1133 	    DDI_INTR_PRI(mpt->m_intr_pri));
1134 	cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1135 	cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1136 	cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1137 	cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1138 	cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1139 	mutex_init_done++;
1140 
1141 	/*
1142 	 * Disable hardware interrupt since we're not ready to
1143 	 * handle it yet.
1144 	 */
1145 	MPTSAS_DISABLE_INTR(mpt);
1146 
1147 	/*
1148 	 * Enable interrupts
1149 	 */
1150 	if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
1151 		/* Call ddi_intr_block_enable() for MSI interrupts */
1152 		(void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
1153 	} else {
1154 		/* Call ddi_intr_enable for MSI or FIXED interrupts */
1155 		for (i = 0; i < mpt->m_intr_cnt; i++) {
1156 			(void) ddi_intr_enable(mpt->m_htable[i]);
1157 		}
1158 	}
1159 
1160 	mutex_enter(&mpt->m_mutex);
1161 	/*
1162 	 * Initialize power management component
1163 	 */
1164 	if (mpt->m_options & MPTSAS_OPT_PM) {
1165 		if (mptsas_init_pm(mpt)) {
1166 			mutex_exit(&mpt->m_mutex);
1167 			mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1168 			    "failed");
1169 			goto fail;
1170 		}
1171 	}
1172 
1173 	/*
1174 	 * Initialize chip
1175 	 */
1176 	if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1177 		mutex_exit(&mpt->m_mutex);
1178 		mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1179 		goto fail;
1180 	}
1181 	mutex_exit(&mpt->m_mutex);
1182 
1183 	/*
1184 	 * initialize SCSI HBA transport structure
1185 	 */
1186 	hba_tran->tran_hba_private	= mpt;
1187 	hba_tran->tran_tgt_private	= NULL;
1188 
1189 	hba_tran->tran_tgt_init		= mptsas_scsi_tgt_init;
1190 	hba_tran->tran_tgt_free		= mptsas_scsi_tgt_free;
1191 
1192 	hba_tran->tran_start		= mptsas_scsi_start;
1193 	hba_tran->tran_reset		= mptsas_scsi_reset;
1194 	hba_tran->tran_abort		= mptsas_scsi_abort;
1195 	hba_tran->tran_getcap		= mptsas_scsi_getcap;
1196 	hba_tran->tran_setcap		= mptsas_scsi_setcap;
1197 	hba_tran->tran_init_pkt		= mptsas_scsi_init_pkt;
1198 	hba_tran->tran_destroy_pkt	= mptsas_scsi_destroy_pkt;
1199 
1200 	hba_tran->tran_dmafree		= mptsas_scsi_dmafree;
1201 	hba_tran->tran_sync_pkt		= mptsas_scsi_sync_pkt;
1202 	hba_tran->tran_reset_notify	= mptsas_scsi_reset_notify;
1203 
1204 	hba_tran->tran_get_bus_addr	= mptsas_get_bus_addr;
1205 	hba_tran->tran_get_name		= mptsas_get_name;
1206 
1207 	hba_tran->tran_quiesce		= mptsas_scsi_quiesce;
1208 	hba_tran->tran_unquiesce	= mptsas_scsi_unquiesce;
1209 	hba_tran->tran_bus_reset	= NULL;
1210 
1211 	hba_tran->tran_add_eventcall	= NULL;
1212 	hba_tran->tran_get_eventcookie	= NULL;
1213 	hba_tran->tran_post_event	= NULL;
1214 	hba_tran->tran_remove_eventcall	= NULL;
1215 
1216 	hba_tran->tran_bus_config	= mptsas_bus_config;
1217 
1218 	hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
1219 
1220 	if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
1221 		goto fail;
1222 	}
1223 
1224 	/*
1225 	 * Register the iport for multiple port HBA
1226 	 */
1227 	/*
1228 	 * initial value of mask is 0
1229 	 */
1230 	mutex_enter(&mpt->m_mutex);
1231 	for (i = 0; i < mpt->m_num_phys; i++) {
1232 		mptsas_phymask_t phy_mask = 0x0;
1233 		char phy_mask_name[MPTSAS_MAX_PHYS];
1234 		uint8_t current_port;
1235 
1236 		if (mpt->m_phy_info[i].attached_devhdl == 0)
1237 			continue;
1238 
1239 		bzero(phy_mask_name, sizeof (phy_mask_name));
1240 
1241 		current_port = mpt->m_phy_info[i].port_num;
1242 
1243 		if ((mask & (1 << i)) != 0)
1244 			continue;
1245 
1246 		for (j = 0; j < mpt->m_num_phys; j++) {
1247 			if (mpt->m_phy_info[j].attached_devhdl &&
1248 			    (mpt->m_phy_info[j].port_num == current_port)) {
1249 				phy_mask |= (1 << j);
1250 			}
1251 		}
1252 		mask = mask | phy_mask;
1253 
1254 		for (j = 0; j < mpt->m_num_phys; j++) {
1255 			if ((phy_mask >> j) & 0x01) {
1256 				mpt->m_phy_info[j].phy_mask = phy_mask;
1257 			}
1258 		}
1259 
1260 		(void) sprintf(phy_mask_name, "%x", phy_mask);
1261 
1262 		mutex_exit(&mpt->m_mutex);
1263 		/*
1264 		 * register a iport
1265 		 */
1266 		(void) scsi_hba_iport_register(dip, phy_mask_name);
1267 		mutex_enter(&mpt->m_mutex);
1268 	}
1269 	mutex_exit(&mpt->m_mutex);
1270 	/*
1271 	 * register a virtual port for RAID volume always
1272 	 */
1273 	(void) scsi_hba_iport_register(dip, "v0");
1274 	/*
1275 	 * All children of the HBA are iports. We need tran was cloned.
1276 	 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
1277 	 * inherited to iport's tran vector.
1278 	 */
1279 	tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE);
1280 
1281 	if (scsi_hba_attach_setup(dip, &mpt->m_msg_dma_attr,
1282 	    hba_tran, tran_flags) != DDI_SUCCESS) {
1283 		mptsas_log(mpt, CE_WARN, "hba attach setup failed");
1284 		goto fail;
1285 	}
1286 	hba_attach_setup++;
1287 
1288 	mpt->m_smptran = smp_hba_tran_alloc(dip);
1289 	ASSERT(mpt->m_smptran != NULL);
1290 	mpt->m_smptran->smp_tran_hba_private = mpt;
1291 	mpt->m_smptran->smp_tran_start = mptsas_smp_start;
1292 	if (smp_hba_attach_setup(dip, mpt->m_smptran) != DDI_SUCCESS) {
1293 		mptsas_log(mpt, CE_WARN, "smp attach setup failed");
1294 		goto fail;
1295 	}
1296 	smp_attach_setup++;
1297 
1298 	/*
1299 	 * Initialize smp hash table
1300 	 */
1301 	mptsas_hash_init(&mpt->m_active->m_smptbl);
1302 	mpt->m_smp_devhdl = 0xFFFF;
1303 
1304 	/*
1305 	 * create kmem cache for packets
1306 	 */
1307 	(void) sprintf(buf, "mptsas%d_cache", instance);
1308 	mpt->m_kmem_cache = kmem_cache_create(buf,
1309 	    sizeof (struct mptsas_cmd) + scsi_pkt_size(), 8,
1310 	    mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor,
1311 	    NULL, (void *)mpt, NULL, 0);
1312 
1313 	if (mpt->m_kmem_cache == NULL) {
1314 		mptsas_log(mpt, CE_WARN, "creating kmem cache failed");
1315 		goto fail;
1316 	}
1317 
1318 	/*
1319 	 * create kmem cache for extra SGL frames if SGL cannot
1320 	 * be accomodated into main request frame.
1321 	 */
1322 	(void) sprintf(buf, "mptsas%d_cache_frames", instance);
1323 	mpt->m_cache_frames = kmem_cache_create(buf,
1324 	    sizeof (mptsas_cache_frames_t), 8,
1325 	    mptsas_cache_frames_constructor, mptsas_cache_frames_destructor,
1326 	    NULL, (void *)mpt, NULL, 0);
1327 
1328 	if (mpt->m_cache_frames == NULL) {
1329 		mptsas_log(mpt, CE_WARN, "creating cache for frames failed");
1330 		goto fail;
1331 	}
1332 
1333 	mpt->m_scsi_reset_delay	= ddi_prop_get_int(DDI_DEV_T_ANY,
1334 	    dip, 0, "scsi-reset-delay",	SCSI_DEFAULT_RESET_DELAY);
1335 	if (mpt->m_scsi_reset_delay == 0) {
1336 		mptsas_log(mpt, CE_NOTE,
1337 		    "scsi_reset_delay of 0 is not recommended,"
1338 		    " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1339 		mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1340 	}
1341 
1342 	/*
1343 	 * Initialize the wait and done FIFO queue
1344 	 */
1345 	mpt->m_donetail = &mpt->m_doneq;
1346 	mpt->m_waitqtail = &mpt->m_waitq;
1347 
1348 	mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1349 	mpt->m_tx_draining = 0;
1350 
1351 	/*
1352 	 * ioc cmd queue initialize
1353 	 */
1354 	mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1355 
1356 	mpt->m_dev_handle = 0xFFFF;
1357 
1358 	MPTSAS_ENABLE_INTR(mpt);
1359 
1360 	/*
1361 	 * enable event notification
1362 	 */
1363 	mutex_enter(&mpt->m_mutex);
1364 	if (mptsas_ioc_enable_event_notification(mpt)) {
1365 		mutex_exit(&mpt->m_mutex);
1366 		goto fail;
1367 	}
1368 	mutex_exit(&mpt->m_mutex);
1369 
1370 
1371 	/* Check all dma handles allocated in attach */
1372 	if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl)
1373 	    != DDI_SUCCESS) ||
1374 	    (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl)
1375 	    != DDI_SUCCESS) ||
1376 	    (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl)
1377 	    != DDI_SUCCESS) ||
1378 	    (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl)
1379 	    != DDI_SUCCESS) ||
1380 	    (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl)
1381 	    != DDI_SUCCESS)) {
1382 		goto fail;
1383 	}
1384 
1385 	/* Check all acc handles allocated in attach */
1386 	if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
1387 	    (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl)
1388 	    != DDI_SUCCESS) ||
1389 	    (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl)
1390 	    != DDI_SUCCESS) ||
1391 	    (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl)
1392 	    != DDI_SUCCESS) ||
1393 	    (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl)
1394 	    != DDI_SUCCESS) ||
1395 	    (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl)
1396 	    != DDI_SUCCESS) ||
1397 	    (mptsas_check_acc_handle(mpt->m_config_handle)
1398 	    != DDI_SUCCESS)) {
1399 		goto fail;
1400 	}
1401 
1402 	/*
1403 	 * After this point, we are not going to fail the attach.
1404 	 */
1405 	/*
1406 	 * used for mptsas_watch
1407 	 */
1408 	rw_enter(&mptsas_global_rwlock, RW_WRITER);
1409 	if (mptsas_head == NULL) {
1410 		mptsas_head = mpt;
1411 	} else {
1412 		mptsas_tail->m_next = mpt;
1413 	}
1414 	mptsas_tail = mpt;
1415 	rw_exit(&mptsas_global_rwlock);
1416 
1417 	mutex_enter(&mptsas_global_mutex);
1418 	if (mptsas_timeouts_enabled == 0) {
1419 		mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
1420 		    dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
1421 
1422 		mptsas_tick = mptsas_scsi_watchdog_tick *
1423 		    drv_usectohz((clock_t)1000000);
1424 
1425 		mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
1426 		mptsas_timeouts_enabled = 1;
1427 	}
1428 	mutex_exit(&mptsas_global_mutex);
1429 
1430 	/* Print message of HBA present */
1431 	ddi_report_dev(dip);
1432 
1433 	/* report idle status to pm framework */
1434 	if (mpt->m_options & MPTSAS_OPT_PM) {
1435 		(void) pm_idle_component(dip, 0);
1436 	}
1437 
1438 	return (DDI_SUCCESS);
1439 
1440 fail:
1441 	mptsas_log(mpt, CE_WARN, "attach failed");
1442 	mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
1443 	ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
1444 	if (mpt) {
1445 		mutex_enter(&mptsas_global_mutex);
1446 
1447 		if (mptsas_timeout_id && (mptsas_head == NULL)) {
1448 			timeout_id_t tid = mptsas_timeout_id;
1449 			mptsas_timeouts_enabled = 0;
1450 			mptsas_timeout_id = 0;
1451 			mutex_exit(&mptsas_global_mutex);
1452 			(void) untimeout(tid);
1453 			mutex_enter(&mptsas_global_mutex);
1454 		}
1455 		mutex_exit(&mptsas_global_mutex);
1456 		/* deallocate in reverse order */
1457 		if (mpt->m_cache_frames) {
1458 			kmem_cache_destroy(mpt->m_cache_frames);
1459 		}
1460 		if (mpt->m_kmem_cache) {
1461 			kmem_cache_destroy(mpt->m_kmem_cache);
1462 		}
1463 		if (hba_attach_setup) {
1464 			(void) scsi_hba_detach(dip);
1465 		}
1466 		if (smp_attach_setup) {
1467 			(void) smp_hba_detach(dip);
1468 		}
1469 		if (intr_added) {
1470 			mptsas_rem_intrs(mpt);
1471 		}
1472 		if (doneq_thread_create) {
1473 			mutex_enter(&mpt->m_doneq_mutex);
1474 			doneq_thread_num = mpt->m_doneq_thread_n;
1475 			for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1476 				mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1477 				mpt->m_doneq_thread_id[j].flag &=
1478 				    (~MPTSAS_DONEQ_THREAD_ACTIVE);
1479 				cv_signal(&mpt->m_doneq_thread_id[j].cv);
1480 				mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1481 			}
1482 			while (mpt->m_doneq_thread_n) {
1483 				cv_wait(&mpt->m_doneq_thread_cv,
1484 				    &mpt->m_doneq_mutex);
1485 			}
1486 			for (j = 0; j < doneq_thread_num; j++) {
1487 				cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1488 				mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1489 			}
1490 			kmem_free(mpt->m_doneq_thread_id,
1491 			    sizeof (mptsas_doneq_thread_list_t)
1492 			    * doneq_thread_num);
1493 			mutex_exit(&mpt->m_doneq_mutex);
1494 			cv_destroy(&mpt->m_doneq_thread_cv);
1495 			mutex_destroy(&mpt->m_doneq_mutex);
1496 		}
1497 		if (event_taskq_create) {
1498 			ddi_taskq_destroy(mpt->m_event_taskq);
1499 		}
1500 		if (dr_taskq_create) {
1501 			ddi_taskq_destroy(mpt->m_dr_taskq);
1502 		}
1503 		if (mutex_init_done) {
1504 			mutex_destroy(&mpt->m_tx_waitq_mutex);
1505 			mutex_destroy(&mpt->m_mutex);
1506 			cv_destroy(&mpt->m_cv);
1507 			cv_destroy(&mpt->m_passthru_cv);
1508 			cv_destroy(&mpt->m_fw_cv);
1509 			cv_destroy(&mpt->m_config_cv);
1510 			cv_destroy(&mpt->m_fw_diag_cv);
1511 		}
1512 		mptsas_free_handshake_msg(mpt);
1513 		mptsas_hba_fini(mpt);
1514 		if (map_setup) {
1515 			mptsas_cfg_fini(mpt);
1516 		}
1517 		if (config_setup) {
1518 			pci_config_teardown(&mpt->m_config_handle);
1519 		}
1520 		if (mpt->m_tran) {
1521 			scsi_hba_tran_free(mpt->m_tran);
1522 			mpt->m_tran = NULL;
1523 		}
1524 		if (mpt->m_smptran) {
1525 			smp_hba_tran_free(mpt->m_smptran);
1526 			mpt->m_smptran = NULL;
1527 		}
1528 		mptsas_fm_fini(mpt);
1529 		ddi_soft_state_free(mptsas_state, instance);
1530 		ddi_prop_remove_all(dip);
1531 	}
1532 	return (DDI_FAILURE);
1533 }
1534 
1535 static int
1536 mptsas_suspend(dev_info_t *devi)
1537 {
1538 	mptsas_t	*mpt, *g;
1539 	scsi_hba_tran_t	*tran;
1540 
1541 	if (scsi_hba_iport_unit_address(devi)) {
1542 		return (DDI_SUCCESS);
1543 	}
1544 
1545 	if ((tran = ddi_get_driver_private(devi)) == NULL)
1546 		return (DDI_SUCCESS);
1547 
1548 	mpt = TRAN2MPT(tran);
1549 	if (!mpt) {
1550 		return (DDI_SUCCESS);
1551 	}
1552 
1553 	mutex_enter(&mpt->m_mutex);
1554 
1555 	if (mpt->m_suspended++) {
1556 		mutex_exit(&mpt->m_mutex);
1557 		return (DDI_SUCCESS);
1558 	}
1559 
1560 	/*
1561 	 * Cancel timeout threads for this mpt
1562 	 */
1563 	if (mpt->m_quiesce_timeid) {
1564 		timeout_id_t tid = mpt->m_quiesce_timeid;
1565 		mpt->m_quiesce_timeid = 0;
1566 		mutex_exit(&mpt->m_mutex);
1567 		(void) untimeout(tid);
1568 		mutex_enter(&mpt->m_mutex);
1569 	}
1570 
1571 	if (mpt->m_restart_cmd_timeid) {
1572 		timeout_id_t tid = mpt->m_restart_cmd_timeid;
1573 		mpt->m_restart_cmd_timeid = 0;
1574 		mutex_exit(&mpt->m_mutex);
1575 		(void) untimeout(tid);
1576 		mutex_enter(&mpt->m_mutex);
1577 	}
1578 
1579 	if (mpt->m_pm_timeid != 0) {
1580 		timeout_id_t tid = mpt->m_pm_timeid;
1581 		mpt->m_pm_timeid = 0;
1582 		mutex_exit(&mpt->m_mutex);
1583 		(void) untimeout(tid);
1584 		/*
1585 		 * Report idle status for last ioctl since
1586 		 * calls to pm_busy_component(9F) are stacked.
1587 		 */
1588 		(void) pm_idle_component(mpt->m_dip, 0);
1589 		mutex_enter(&mpt->m_mutex);
1590 	}
1591 	mutex_exit(&mpt->m_mutex);
1592 
1593 	/*
1594 	 * Cancel watch threads if all mpts suspended
1595 	 */
1596 	rw_enter(&mptsas_global_rwlock, RW_WRITER);
1597 	for (g = mptsas_head; g != NULL; g = g->m_next) {
1598 		if (!g->m_suspended)
1599 			break;
1600 	}
1601 	rw_exit(&mptsas_global_rwlock);
1602 
1603 	mutex_enter(&mptsas_global_mutex);
1604 	if (g == NULL) {
1605 		timeout_id_t tid;
1606 
1607 		mptsas_timeouts_enabled = 0;
1608 		if (mptsas_timeout_id) {
1609 			tid = mptsas_timeout_id;
1610 			mptsas_timeout_id = 0;
1611 			mutex_exit(&mptsas_global_mutex);
1612 			(void) untimeout(tid);
1613 			mutex_enter(&mptsas_global_mutex);
1614 		}
1615 		if (mptsas_reset_watch) {
1616 			tid = mptsas_reset_watch;
1617 			mptsas_reset_watch = 0;
1618 			mutex_exit(&mptsas_global_mutex);
1619 			(void) untimeout(tid);
1620 			mutex_enter(&mptsas_global_mutex);
1621 		}
1622 	}
1623 	mutex_exit(&mptsas_global_mutex);
1624 
1625 	mutex_enter(&mpt->m_mutex);
1626 
1627 	/*
1628 	 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1629 	 */
1630 	if ((mpt->m_options & MPTSAS_OPT_PM) &&
1631 	    (mpt->m_power_level != PM_LEVEL_D0)) {
1632 		mutex_exit(&mpt->m_mutex);
1633 		return (DDI_SUCCESS);
1634 	}
1635 
1636 	/* Disable HBA interrupts in hardware */
1637 	MPTSAS_DISABLE_INTR(mpt);
1638 	/*
1639 	 * Send RAID action system shutdown to sync IR
1640 	 */
1641 	mptsas_raid_action_system_shutdown(mpt);
1642 
1643 	mutex_exit(&mpt->m_mutex);
1644 
1645 	/* drain the taskq */
1646 	ddi_taskq_wait(mpt->m_event_taskq);
1647 	ddi_taskq_wait(mpt->m_dr_taskq);
1648 
1649 	return (DDI_SUCCESS);
1650 }
1651 
1652 #ifdef	__sparc
1653 /*ARGSUSED*/
1654 static int
1655 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1656 {
1657 	mptsas_t	*mpt;
1658 	scsi_hba_tran_t *tran;
1659 
1660 	/*
1661 	 * If this call is for iport, just return.
1662 	 */
1663 	if (scsi_hba_iport_unit_address(devi))
1664 		return (DDI_SUCCESS);
1665 
1666 	if ((tran = ddi_get_driver_private(devi)) == NULL)
1667 		return (DDI_SUCCESS);
1668 
1669 	if ((mpt = TRAN2MPT(tran)) == NULL)
1670 		return (DDI_SUCCESS);
1671 
1672 	/*
1673 	 * Send RAID action system shutdown to sync IR.  Disable HBA
1674 	 * interrupts in hardware first.
1675 	 */
1676 	MPTSAS_DISABLE_INTR(mpt);
1677 	mptsas_raid_action_system_shutdown(mpt);
1678 
1679 	return (DDI_SUCCESS);
1680 }
1681 #else /* __sparc */
1682 /*
1683  * quiesce(9E) entry point.
1684  *
1685  * This function is called when the system is single-threaded at high
1686  * PIL with preemption disabled. Therefore, this function must not be
1687  * blocked.
1688  *
1689  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1690  * DDI_FAILURE indicates an error condition and should almost never happen.
1691  */
1692 static int
1693 mptsas_quiesce(dev_info_t *devi)
1694 {
1695 	mptsas_t	*mpt;
1696 	scsi_hba_tran_t *tran;
1697 
1698 	/*
1699 	 * If this call is for iport, just return.
1700 	 */
1701 	if (scsi_hba_iport_unit_address(devi))
1702 		return (DDI_SUCCESS);
1703 
1704 	if ((tran = ddi_get_driver_private(devi)) == NULL)
1705 		return (DDI_SUCCESS);
1706 
1707 	if ((mpt = TRAN2MPT(tran)) == NULL)
1708 		return (DDI_SUCCESS);
1709 
1710 	/* Disable HBA interrupts in hardware */
1711 	MPTSAS_DISABLE_INTR(mpt);
1712 	/* Send RAID action system shutdonw to sync IR */
1713 	mptsas_raid_action_system_shutdown(mpt);
1714 
1715 	return (DDI_SUCCESS);
1716 }
1717 #endif	/* __sparc */
1718 
1719 /*
1720  * detach(9E).	Remove all device allocations and system resources;
1721  * disable device interrupts.
1722  * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1723  */
1724 static int
1725 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1726 {
1727 	/* CONSTCOND */
1728 	ASSERT(NO_COMPETING_THREADS);
1729 	NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd));
1730 
1731 	switch (cmd) {
1732 	case DDI_DETACH:
1733 		return (mptsas_do_detach(devi));
1734 
1735 	case DDI_SUSPEND:
1736 		return (mptsas_suspend(devi));
1737 
1738 	default:
1739 		return (DDI_FAILURE);
1740 	}
1741 	/* NOTREACHED */
1742 }
1743 
1744 static int
1745 mptsas_do_detach(dev_info_t *dip)
1746 {
1747 	mptsas_t	*mpt, *m;
1748 	scsi_hba_tran_t	*tran;
1749 	mptsas_slots_t	*active;
1750 	int		circ = 0;
1751 	int		circ1 = 0;
1752 	mdi_pathinfo_t	*pip = NULL;
1753 	int		i;
1754 	int		doneq_thread_num = 0;
1755 
1756 	NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1757 
1758 	if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1759 		return (DDI_FAILURE);
1760 
1761 	mpt = TRAN2MPT(tran);
1762 	if (!mpt) {
1763 		return (DDI_FAILURE);
1764 	}
1765 	/*
1766 	 * Still have pathinfo child, should not detach mpt driver
1767 	 */
1768 	if (scsi_hba_iport_unit_address(dip)) {
1769 		if (mpt->m_mpxio_enable) {
1770 			/*
1771 			 * MPxIO enabled for the iport
1772 			 */
1773 			ndi_devi_enter(scsi_vhci_dip, &circ1);
1774 			ndi_devi_enter(dip, &circ);
1775 			while (pip = mdi_get_next_client_path(dip, NULL)) {
1776 				if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1777 					continue;
1778 				}
1779 				ndi_devi_exit(dip, circ);
1780 				ndi_devi_exit(scsi_vhci_dip, circ1);
1781 				NDBG12(("detach failed because of "
1782 				    "outstanding path info"));
1783 				return (DDI_FAILURE);
1784 			}
1785 			ndi_devi_exit(dip, circ);
1786 			ndi_devi_exit(scsi_vhci_dip, circ1);
1787 			(void) mdi_phci_unregister(dip, 0);
1788 		}
1789 
1790 		ddi_prop_remove_all(dip);
1791 
1792 		return (DDI_SUCCESS);
1793 	}
1794 
1795 	/* Make sure power level is D0 before accessing registers */
1796 	if (mpt->m_options & MPTSAS_OPT_PM) {
1797 		(void) pm_busy_component(dip, 0);
1798 		if (mpt->m_power_level != PM_LEVEL_D0) {
1799 			if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1800 			    DDI_SUCCESS) {
1801 				mptsas_log(mpt, CE_WARN,
1802 				    "mptsas%d: Raise power request failed.",
1803 				    mpt->m_instance);
1804 				(void) pm_idle_component(dip, 0);
1805 				return (DDI_FAILURE);
1806 			}
1807 		}
1808 	}
1809 
1810 	mutex_enter(&mpt->m_mutex);
1811 	MPTSAS_DISABLE_INTR(mpt);
1812 	mutex_exit(&mpt->m_mutex);
1813 	mptsas_rem_intrs(mpt);
1814 	ddi_taskq_destroy(mpt->m_event_taskq);
1815 	ddi_taskq_destroy(mpt->m_dr_taskq);
1816 
1817 	if (mpt->m_doneq_thread_n) {
1818 		mutex_enter(&mpt->m_doneq_mutex);
1819 		doneq_thread_num = mpt->m_doneq_thread_n;
1820 		for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1821 			mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1822 			mpt->m_doneq_thread_id[i].flag &=
1823 			    (~MPTSAS_DONEQ_THREAD_ACTIVE);
1824 			cv_signal(&mpt->m_doneq_thread_id[i].cv);
1825 			mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1826 		}
1827 		while (mpt->m_doneq_thread_n) {
1828 			cv_wait(&mpt->m_doneq_thread_cv,
1829 			    &mpt->m_doneq_mutex);
1830 		}
1831 		for (i = 0;  i < doneq_thread_num; i++) {
1832 			cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1833 			mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);
1834 		}
1835 		kmem_free(mpt->m_doneq_thread_id,
1836 		    sizeof (mptsas_doneq_thread_list_t)
1837 		    * doneq_thread_num);
1838 		mutex_exit(&mpt->m_doneq_mutex);
1839 		cv_destroy(&mpt->m_doneq_thread_cv);
1840 		mutex_destroy(&mpt->m_doneq_mutex);
1841 	}
1842 
1843 	scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf);
1844 
1845 	/*
1846 	 * Remove device instance from the global linked list
1847 	 */
1848 	rw_enter(&mptsas_global_rwlock, RW_WRITER);
1849 	if (mptsas_head == mpt) {
1850 		m = mptsas_head = mpt->m_next;
1851 	} else {
1852 		for (m = mptsas_head; m != NULL; m = m->m_next) {
1853 			if (m->m_next == mpt) {
1854 				m->m_next = mpt->m_next;
1855 				break;
1856 			}
1857 		}
1858 		if (m == NULL) {
1859 			mptsas_log(mpt, CE_PANIC, "Not in softc list!");
1860 		}
1861 	}
1862 
1863 	if (mptsas_tail == mpt) {
1864 		mptsas_tail = m;
1865 	}
1866 	rw_exit(&mptsas_global_rwlock);
1867 
1868 	/*
1869 	 * Cancel timeout threads for this mpt
1870 	 */
1871 	mutex_enter(&mpt->m_mutex);
1872 	if (mpt->m_quiesce_timeid) {
1873 		timeout_id_t tid = mpt->m_quiesce_timeid;
1874 		mpt->m_quiesce_timeid = 0;
1875 		mutex_exit(&mpt->m_mutex);
1876 		(void) untimeout(tid);
1877 		mutex_enter(&mpt->m_mutex);
1878 	}
1879 
1880 	if (mpt->m_restart_cmd_timeid) {
1881 		timeout_id_t tid = mpt->m_restart_cmd_timeid;
1882 		mpt->m_restart_cmd_timeid = 0;
1883 		mutex_exit(&mpt->m_mutex);
1884 		(void) untimeout(tid);
1885 		mutex_enter(&mpt->m_mutex);
1886 	}
1887 
1888 	if (mpt->m_pm_timeid != 0) {
1889 		timeout_id_t tid = mpt->m_pm_timeid;
1890 		mpt->m_pm_timeid = 0;
1891 		mutex_exit(&mpt->m_mutex);
1892 		(void) untimeout(tid);
1893 		/*
1894 		 * Report idle status for last ioctl since
1895 		 * calls to pm_busy_component(9F) are stacked.
1896 		 */
1897 		(void) pm_idle_component(mpt->m_dip, 0);
1898 		mutex_enter(&mpt->m_mutex);
1899 	}
1900 	mutex_exit(&mpt->m_mutex);
1901 
1902 	/*
1903 	 * last mpt? ... if active, CANCEL watch threads.
1904 	 */
1905 	mutex_enter(&mptsas_global_mutex);
1906 	if (mptsas_head == NULL) {
1907 		timeout_id_t tid;
1908 		/*
1909 		 * Clear mptsas_timeouts_enable so that the watch thread
1910 		 * gets restarted on DDI_ATTACH
1911 		 */
1912 		mptsas_timeouts_enabled = 0;
1913 		if (mptsas_timeout_id) {
1914 			tid = mptsas_timeout_id;
1915 			mptsas_timeout_id = 0;
1916 			mutex_exit(&mptsas_global_mutex);
1917 			(void) untimeout(tid);
1918 			mutex_enter(&mptsas_global_mutex);
1919 		}
1920 		if (mptsas_reset_watch) {
1921 			tid = mptsas_reset_watch;
1922 			mptsas_reset_watch = 0;
1923 			mutex_exit(&mptsas_global_mutex);
1924 			(void) untimeout(tid);
1925 			mutex_enter(&mptsas_global_mutex);
1926 		}
1927 	}
1928 	mutex_exit(&mptsas_global_mutex);
1929 
1930 	/*
1931 	 * Delete nt_active.
1932 	 */
1933 	active = mpt->m_active;
1934 	mutex_enter(&mpt->m_mutex);
1935 	mptsas_hash_uninit(&active->m_smptbl, sizeof (mptsas_smp_t));
1936 	mutex_exit(&mpt->m_mutex);
1937 
1938 	if (active) {
1939 		kmem_free(active, active->m_size);
1940 		mpt->m_active = NULL;
1941 	}
1942 
1943 	/* deallocate everything that was allocated in mptsas_attach */
1944 	mptsas_fm_fini(mpt);
1945 	kmem_cache_destroy(mpt->m_cache_frames);
1946 	kmem_cache_destroy(mpt->m_kmem_cache);
1947 
1948 	(void) scsi_hba_detach(dip);
1949 	(void) smp_hba_detach(dip);
1950 	mptsas_free_handshake_msg(mpt);
1951 	mptsas_hba_fini(mpt);
1952 	mptsas_cfg_fini(mpt);
1953 
1954 	/* Lower the power informing PM Framework */
1955 	if (mpt->m_options & MPTSAS_OPT_PM) {
1956 		if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
1957 			mptsas_log(mpt, CE_WARN,
1958 			    "!mptsas%d: Lower power request failed "
1959 			    "during detach, ignoring.",
1960 			    mpt->m_instance);
1961 	}
1962 
1963 	mutex_destroy(&mpt->m_tx_waitq_mutex);
1964 	mutex_destroy(&mpt->m_mutex);
1965 	cv_destroy(&mpt->m_cv);
1966 	cv_destroy(&mpt->m_passthru_cv);
1967 	cv_destroy(&mpt->m_fw_cv);
1968 	cv_destroy(&mpt->m_config_cv);
1969 	cv_destroy(&mpt->m_fw_diag_cv);
1970 
1971 	pci_config_teardown(&mpt->m_config_handle);
1972 	if (mpt->m_tran) {
1973 		scsi_hba_tran_free(mpt->m_tran);
1974 		mpt->m_tran = NULL;
1975 	}
1976 
1977 	if (mpt->m_smptran) {
1978 		smp_hba_tran_free(mpt->m_smptran);
1979 		mpt->m_smptran = NULL;
1980 	}
1981 
1982 	ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
1983 	ddi_prop_remove_all(dip);
1984 
1985 	return (DDI_SUCCESS);
1986 }
1987 
1988 static int
1989 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size)
1990 {
1991 	ddi_dma_attr_t		task_dma_attrs;
1992 	ddi_dma_cookie_t	tmp_dma_cookie;
1993 	size_t			alloc_len;
1994 	uint_t			ncookie;
1995 
1996 	/* allocate Task Management ddi_dma resources */
1997 	task_dma_attrs = mpt->m_msg_dma_attr;
1998 	task_dma_attrs.dma_attr_sgllen = 1;
1999 	task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size);
2000 
2001 	if (ddi_dma_alloc_handle(mpt->m_dip, &task_dma_attrs,
2002 	    DDI_DMA_SLEEP, NULL, &mpt->m_hshk_dma_hdl) != DDI_SUCCESS) {
2003 		mpt->m_hshk_dma_hdl = NULL;
2004 		return (DDI_FAILURE);
2005 	}
2006 
2007 	if (ddi_dma_mem_alloc(mpt->m_hshk_dma_hdl, alloc_size,
2008 	    &mpt->m_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
2009 	    &mpt->m_hshk_memp, &alloc_len, &mpt->m_hshk_acc_hdl)
2010 	    != DDI_SUCCESS) {
2011 		ddi_dma_free_handle(&mpt->m_hshk_dma_hdl);
2012 		mpt->m_hshk_dma_hdl = NULL;
2013 		return (DDI_FAILURE);
2014 	}
2015 
2016 	if (ddi_dma_addr_bind_handle(mpt->m_hshk_dma_hdl, NULL,
2017 	    mpt->m_hshk_memp, alloc_len, (DDI_DMA_RDWR | DDI_DMA_CONSISTENT),
2018 	    DDI_DMA_SLEEP, NULL, &tmp_dma_cookie, &ncookie)
2019 	    != DDI_DMA_MAPPED) {
2020 		(void) ddi_dma_mem_free(&mpt->m_hshk_acc_hdl);
2021 		ddi_dma_free_handle(&mpt->m_hshk_dma_hdl);
2022 		mpt->m_hshk_dma_hdl = NULL;
2023 		return (DDI_FAILURE);
2024 	}
2025 	mpt->m_hshk_dma_size = alloc_size;
2026 	return (DDI_SUCCESS);
2027 }
2028 
2029 static void
2030 mptsas_free_handshake_msg(mptsas_t *mpt)
2031 {
2032 	if (mpt->m_hshk_dma_hdl != NULL) {
2033 		(void) ddi_dma_unbind_handle(mpt->m_hshk_dma_hdl);
2034 		(void) ddi_dma_mem_free(&mpt->m_hshk_acc_hdl);
2035 		ddi_dma_free_handle(&mpt->m_hshk_dma_hdl);
2036 		mpt->m_hshk_dma_hdl = NULL;
2037 		mpt->m_hshk_dma_size = 0;
2038 	}
2039 }
2040 
2041 static int
2042 mptsas_power(dev_info_t *dip, int component, int level)
2043 {
2044 #ifndef __lock_lint
2045 	_NOTE(ARGUNUSED(component))
2046 #endif
2047 	mptsas_t	*mpt;
2048 	int		rval = DDI_SUCCESS;
2049 	int		polls = 0;
2050 	uint32_t	ioc_status;
2051 
2052 	if (scsi_hba_iport_unit_address(dip) != 0)
2053 		return (DDI_SUCCESS);
2054 
2055 	mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip));
2056 	if (mpt == NULL) {
2057 		return (DDI_FAILURE);
2058 	}
2059 
2060 	mutex_enter(&mpt->m_mutex);
2061 
2062 	/*
2063 	 * If the device is busy, don't lower its power level
2064 	 */
2065 	if (mpt->m_busy && (mpt->m_power_level > level)) {
2066 		mutex_exit(&mpt->m_mutex);
2067 		return (DDI_FAILURE);
2068 	}
2069 
2070 	switch (level) {
2071 	case PM_LEVEL_D0:
2072 		NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2073 		MPTSAS_POWER_ON(mpt);
2074 		/*
2075 		 * Wait up to 30 seconds for IOC to come out of reset.
2076 		 */
2077 		while (((ioc_status = ddi_get32(mpt->m_datap,
2078 		    &mpt->m_reg->Doorbell)) &
2079 		    MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2080 			if (polls++ > 3000) {
2081 				break;
2082 			}
2083 			delay(drv_usectohz(10000));
2084 		}
2085 		/*
2086 		 * If IOC is not in operational state, try to hard reset it.
2087 		 */
2088 		if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2089 		    MPI2_IOC_STATE_OPERATIONAL) {
2090 			if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2091 				mptsas_log(mpt, CE_WARN,
2092 				    "mptsas_power: hard reset failed");
2093 				mutex_exit(&mpt->m_mutex);
2094 				return (DDI_FAILURE);
2095 			}
2096 		}
2097 		mpt->m_power_level = PM_LEVEL_D0;
2098 		break;
2099 	case PM_LEVEL_D3:
2100 		NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2101 		MPTSAS_POWER_OFF(mpt);
2102 		break;
2103 	default:
2104 		mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2105 		    mpt->m_instance, level);
2106 		rval = DDI_FAILURE;
2107 		break;
2108 	}
2109 	mutex_exit(&mpt->m_mutex);
2110 	return (rval);
2111 }
2112 
2113 /*
2114  * Initialize configuration space and figure out which
2115  * chip and revison of the chip the mpt driver is using.
2116  */
2117 int
2118 mptsas_config_space_init(mptsas_t *mpt)
2119 {
2120 	ushort_t	caps_ptr, cap, cap_count;
2121 
2122 	NDBG0(("mptsas_config_space_init"));
2123 
2124 	mptsas_setup_cmd_reg(mpt);
2125 
2126 	/*
2127 	 * Get the chip device id:
2128 	 */
2129 	mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID);
2130 
2131 	/*
2132 	 * Save the revision.
2133 	 */
2134 	mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID);
2135 
2136 	/*
2137 	 * Save the SubSystem Vendor and Device IDs
2138 	 */
2139 	mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID);
2140 	mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID);
2141 
2142 	/*
2143 	 * Set the latency timer to 0x40 as specified by the upa -> pci
2144 	 * bridge chip design team.  This may be done by the sparc pci
2145 	 * bus nexus driver, but the driver should make sure the latency
2146 	 * timer is correct for performance reasons.
2147 	 */
2148 	pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER,
2149 	    MPTSAS_LATENCY_TIMER);
2150 
2151 	/*
2152 	 * Check if capabilities list is supported and if so,
2153 	 * get initial capabilities pointer and clear bits 0,1.
2154 	 */
2155 	if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
2156 	    & PCI_STAT_CAP) {
2157 		caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
2158 		    PCI_CONF_CAP_PTR), 4);
2159 	} else {
2160 		caps_ptr = PCI_CAP_NEXT_PTR_NULL;
2161 	}
2162 
2163 	/*
2164 	 * Walk capabilities if supported.
2165 	 */
2166 	for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
2167 
2168 		/*
2169 		 * Check that we haven't exceeded the maximum number of
2170 		 * capabilities and that the pointer is in a valid range.
2171 		 */
2172 		if (++cap_count > 48) {
2173 			mptsas_log(mpt, CE_WARN,
2174 			    "too many device capabilities.\n");
2175 			return (FALSE);
2176 		}
2177 		if (caps_ptr < 64) {
2178 			mptsas_log(mpt, CE_WARN,
2179 			    "capabilities pointer 0x%x out of range.\n",
2180 			    caps_ptr);
2181 			return (FALSE);
2182 		}
2183 
2184 		/*
2185 		 * Get next capability and check that it is valid.
2186 		 * For now, we only support power management.
2187 		 */
2188 		cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
2189 		switch (cap) {
2190 			case PCI_CAP_ID_PM:
2191 				mptsas_log(mpt, CE_NOTE,
2192 				    "?mptsas%d supports power management.\n",
2193 				    mpt->m_instance);
2194 				mpt->m_options |= MPTSAS_OPT_PM;
2195 
2196 				/* Save PMCSR offset */
2197 				mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
2198 				break;
2199 
2200 			/*
2201 			 * 0x5 is Message signaled interrupts and 0x7
2202 			 * is pci-x capable.  Both are unsupported for now
2203 			 * but supported by the 1030 chip so we don't
2204 			 * need to keep printing out the notice.
2205 			 * 0x10 is PCI-E support (1064E/1068E)
2206 			 * 0x11 is MSIX supported by the 1064/1068
2207 			 */
2208 			case 0x5:
2209 			case 0x7:
2210 			case 0x10:
2211 			case 0x11:
2212 				break;
2213 			default:
2214 				mptsas_log(mpt, CE_NOTE,
2215 				    "?mptsas%d unrecognized capability "
2216 				    "0x%x.\n", mpt->m_instance, cap);
2217 			break;
2218 		}
2219 
2220 		/*
2221 		 * Get next capabilities pointer and clear bits 0,1.
2222 		 */
2223 		caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
2224 		    (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
2225 	}
2226 
2227 	return (TRUE);
2228 }
2229 
2230 static void
2231 mptsas_setup_cmd_reg(mptsas_t *mpt)
2232 {
2233 	ushort_t	cmdreg;
2234 
2235 	/*
2236 	 * Set the command register to the needed values.
2237 	 */
2238 	cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2239 	cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE |
2240 	    PCI_COMM_PARITY_DETECT | PCI_COMM_MAE);
2241 	cmdreg &= ~PCI_COMM_IO;
2242 	pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2243 }
2244 
2245 static void
2246 mptsas_disable_bus_master(mptsas_t *mpt)
2247 {
2248 	ushort_t	cmdreg;
2249 
2250 	/*
2251 	 * Clear the master enable bit in the PCI command register.
2252 	 * This prevents any bus mastering activity like DMA.
2253 	 */
2254 	cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2255 	cmdreg &= ~PCI_COMM_ME;
2256 	pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2257 }
2258 
2259 int
2260 mptsas_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep)
2261 {
2262 	ddi_dma_attr_t	attrs;
2263 	uint_t		ncookie;
2264 	size_t		alloc_len;
2265 
2266 	attrs = mpt->m_io_dma_attr;
2267 	attrs.dma_attr_sgllen = 1;
2268 
2269 	ASSERT(dma_statep != NULL);
2270 
2271 	if (ddi_dma_alloc_handle(mpt->m_dip, &attrs,
2272 	    DDI_DMA_SLEEP, NULL, &dma_statep->handle) != DDI_SUCCESS) {
2273 		mptsas_log(mpt, CE_WARN,
2274 		    "unable to allocate dma handle.");
2275 		return (DDI_FAILURE);
2276 	}
2277 
2278 	if (ddi_dma_mem_alloc(dma_statep->handle, dma_statep->size,
2279 	    &mpt->m_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
2280 	    &dma_statep->memp, &alloc_len, &dma_statep->accessp) !=
2281 	    DDI_SUCCESS) {
2282 		ddi_dma_free_handle(&dma_statep->handle);
2283 		dma_statep->handle = NULL;
2284 		mptsas_log(mpt, CE_WARN,
2285 		    "unable to allocate memory for dma xfer.");
2286 		return (DDI_FAILURE);
2287 	}
2288 
2289 	if (ddi_dma_addr_bind_handle(dma_statep->handle, NULL, dma_statep->memp,
2290 	    alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2291 	    NULL, &dma_statep->cookie, &ncookie) != DDI_DMA_MAPPED) {
2292 		ddi_dma_mem_free(&dma_statep->accessp);
2293 		dma_statep->accessp = NULL;
2294 		ddi_dma_free_handle(&dma_statep->handle);
2295 		dma_statep->handle = NULL;
2296 		mptsas_log(mpt, CE_WARN, "unable to bind DMA resources.");
2297 		return (DDI_FAILURE);
2298 	}
2299 	return (DDI_SUCCESS);
2300 }
2301 
2302 void
2303 mptsas_dma_free(mptsas_dma_alloc_state_t *dma_statep)
2304 {
2305 	ASSERT(dma_statep != NULL);
2306 	if (dma_statep->handle != NULL) {
2307 		(void) ddi_dma_unbind_handle(dma_statep->handle);
2308 		(void) ddi_dma_mem_free(&dma_statep->accessp);
2309 		ddi_dma_free_handle(&dma_statep->handle);
2310 	}
2311 }
2312 
2313 int
2314 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)())
2315 {
2316 	ddi_dma_attr_t		attrs;
2317 	ddi_dma_handle_t	dma_handle;
2318 	caddr_t			memp;
2319 	uint_t			ncookie;
2320 	ddi_dma_cookie_t	cookie;
2321 	ddi_acc_handle_t	accessp;
2322 	size_t			alloc_len;
2323 	int			rval;
2324 
2325 	ASSERT(mutex_owned(&mpt->m_mutex));
2326 
2327 	attrs = mpt->m_msg_dma_attr;
2328 	attrs.dma_attr_sgllen = 1;
2329 	attrs.dma_attr_granular = size;
2330 
2331 	if (ddi_dma_alloc_handle(mpt->m_dip, &attrs,
2332 	    DDI_DMA_SLEEP, NULL, &dma_handle) != DDI_SUCCESS) {
2333 		mptsas_log(mpt, CE_WARN,
2334 		    "unable to allocate dma handle.");
2335 		return (DDI_FAILURE);
2336 	}
2337 
2338 	if (ddi_dma_mem_alloc(dma_handle, size,
2339 	    &mpt->m_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
2340 	    &memp, &alloc_len, &accessp) != DDI_SUCCESS) {
2341 		ddi_dma_free_handle(&dma_handle);
2342 		mptsas_log(mpt, CE_WARN,
2343 		    "unable to allocate request structure.");
2344 		return (DDI_FAILURE);
2345 	}
2346 
2347 	if (ddi_dma_addr_bind_handle(dma_handle, NULL, memp,
2348 	    alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2349 	    NULL, &cookie, &ncookie) != DDI_DMA_MAPPED) {
2350 		(void) ddi_dma_mem_free(&accessp);
2351 		ddi_dma_free_handle(&dma_handle);
2352 		mptsas_log(mpt, CE_WARN, "unable to bind DMA resources.");
2353 		return (DDI_FAILURE);
2354 	}
2355 
2356 	rval = (*callback) (mpt, memp, var, accessp);
2357 
2358 	if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) ||
2359 	    (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) {
2360 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
2361 		rval = DDI_FAILURE;
2362 	}
2363 
2364 	if (dma_handle != NULL) {
2365 		(void) ddi_dma_unbind_handle(dma_handle);
2366 		(void) ddi_dma_mem_free(&accessp);
2367 		ddi_dma_free_handle(&dma_handle);
2368 	}
2369 
2370 	return (rval);
2371 
2372 }
2373 
2374 static int
2375 mptsas_alloc_request_frames(mptsas_t *mpt)
2376 {
2377 	ddi_dma_attr_t		frame_dma_attrs;
2378 	caddr_t			memp;
2379 	uint_t			ncookie;
2380 	ddi_dma_cookie_t	cookie;
2381 	size_t			alloc_len;
2382 	size_t			mem_size;
2383 
2384 	/*
2385 	 * The size of the request frame pool is:
2386 	 *   Number of Request Frames * Request Frame Size
2387 	 */
2388 	mem_size = mpt->m_max_requests * mpt->m_req_frame_size;
2389 
2390 	/*
2391 	 * set the DMA attributes.  System Request Message Frames must be
2392 	 * aligned on a 16-byte boundry.
2393 	 */
2394 	frame_dma_attrs = mpt->m_msg_dma_attr;
2395 	frame_dma_attrs.dma_attr_align = 16;
2396 	frame_dma_attrs.dma_attr_sgllen = 1;
2397 
2398 	/*
2399 	 * allocate the request frame pool.
2400 	 */
2401 	if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attrs,
2402 	    DDI_DMA_SLEEP, NULL, &mpt->m_dma_req_frame_hdl) != DDI_SUCCESS) {
2403 		mptsas_log(mpt, CE_WARN,
2404 		    "Unable to allocate dma handle.");
2405 		return (DDI_FAILURE);
2406 	}
2407 
2408 	if (ddi_dma_mem_alloc(mpt->m_dma_req_frame_hdl,
2409 	    mem_size, &mpt->m_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2410 	    NULL, (caddr_t *)&memp, &alloc_len, &mpt->m_acc_req_frame_hdl)
2411 	    != DDI_SUCCESS) {
2412 		ddi_dma_free_handle(&mpt->m_dma_req_frame_hdl);
2413 		mpt->m_dma_req_frame_hdl = NULL;
2414 		mptsas_log(mpt, CE_WARN,
2415 		    "Unable to allocate request frames.");
2416 		return (DDI_FAILURE);
2417 	}
2418 
2419 	if (ddi_dma_addr_bind_handle(mpt->m_dma_req_frame_hdl, NULL,
2420 	    memp, alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2421 	    DDI_DMA_SLEEP, NULL, &cookie, &ncookie) != DDI_DMA_MAPPED) {
2422 		(void) ddi_dma_mem_free(&mpt->m_acc_req_frame_hdl);
2423 		ddi_dma_free_handle(&mpt->m_dma_req_frame_hdl);
2424 		mpt->m_dma_req_frame_hdl = NULL;
2425 		mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources.");
2426 		return (DDI_FAILURE);
2427 	}
2428 
2429 	/*
2430 	 * Store the request frame memory address.  This chip uses this
2431 	 * address to dma to and from the driver's frame.  The second
2432 	 * address is the address mpt uses to fill in the frame.
2433 	 */
2434 	mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2435 	mpt->m_req_frame = memp;
2436 
2437 	/*
2438 	 * Clear the request frame pool.
2439 	 */
2440 	bzero(mpt->m_req_frame, alloc_len);
2441 
2442 	return (DDI_SUCCESS);
2443 }
2444 
2445 static int
2446 mptsas_alloc_reply_frames(mptsas_t *mpt)
2447 {
2448 	ddi_dma_attr_t		frame_dma_attrs;
2449 	caddr_t			memp;
2450 	uint_t			ncookie;
2451 	ddi_dma_cookie_t	cookie;
2452 	size_t			alloc_len;
2453 	size_t			mem_size;
2454 
2455 	/*
2456 	 * The size of the reply frame pool is:
2457 	 *   Number of Reply Frames * Reply Frame Size
2458 	 */
2459 	mem_size = mpt->m_max_replies * mpt->m_reply_frame_size;
2460 
2461 	/*
2462 	 * set the DMA attributes.   System Reply Message Frames must be
2463 	 * aligned on a 4-byte boundry.  This is the default.
2464 	 */
2465 	frame_dma_attrs = mpt->m_msg_dma_attr;
2466 	frame_dma_attrs.dma_attr_sgllen = 1;
2467 
2468 	/*
2469 	 * allocate the reply frame pool
2470 	 */
2471 	if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attrs,
2472 	    DDI_DMA_SLEEP, NULL, &mpt->m_dma_reply_frame_hdl) != DDI_SUCCESS) {
2473 		mptsas_log(mpt, CE_WARN,
2474 		    "Unable to allocate dma handle.");
2475 		return (DDI_FAILURE);
2476 	}
2477 
2478 	if (ddi_dma_mem_alloc(mpt->m_dma_reply_frame_hdl,
2479 	    mem_size, &mpt->m_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2480 	    NULL, (caddr_t *)&memp, &alloc_len, &mpt->m_acc_reply_frame_hdl)
2481 	    != DDI_SUCCESS) {
2482 		ddi_dma_free_handle(&mpt->m_dma_reply_frame_hdl);
2483 		mpt->m_dma_reply_frame_hdl = NULL;
2484 		mptsas_log(mpt, CE_WARN,
2485 		    "Unable to allocate reply frames.");
2486 		return (DDI_FAILURE);
2487 	}
2488 
2489 	if (ddi_dma_addr_bind_handle(mpt->m_dma_reply_frame_hdl, NULL,
2490 	    memp, alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2491 	    DDI_DMA_SLEEP, NULL, &cookie, &ncookie) != DDI_DMA_MAPPED) {
2492 		(void) ddi_dma_mem_free(&mpt->m_acc_reply_frame_hdl);
2493 		ddi_dma_free_handle(&mpt->m_dma_reply_frame_hdl);
2494 		mpt->m_dma_reply_frame_hdl = NULL;
2495 		mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources.");
2496 		return (DDI_FAILURE);
2497 	}
2498 
2499 	/*
2500 	 * Store the reply frame memory address.  This chip uses this
2501 	 * address to dma to and from the driver's frame.  The second
2502 	 * address is the address mpt uses to process the frame.
2503 	 */
2504 	mpt->m_reply_frame_dma_addr = cookie.dmac_laddress;
2505 	mpt->m_reply_frame = memp;
2506 
2507 	/*
2508 	 * Clear the reply frame pool.
2509 	 */
2510 	bzero(mpt->m_reply_frame, alloc_len);
2511 
2512 	return (DDI_SUCCESS);
2513 }
2514 
2515 static int
2516 mptsas_alloc_free_queue(mptsas_t *mpt)
2517 {
2518 	ddi_dma_attr_t		frame_dma_attrs;
2519 	caddr_t			memp;
2520 	uint_t			ncookie;
2521 	ddi_dma_cookie_t	cookie;
2522 	size_t			alloc_len;
2523 	size_t			mem_size;
2524 
2525 	/*
2526 	 * The reply free queue size is:
2527 	 *   Reply Free Queue Depth * 4
2528 	 * The "4" is the size of one 32 bit address (low part of 64-bit
2529 	 *   address)
2530 	 */
2531 	mem_size = mpt->m_free_queue_depth * 4;
2532 
2533 	/*
2534 	 * set the DMA attributes  The Reply Free Queue must be aligned on a
2535 	 * 16-byte boundry.
2536 	 */
2537 	frame_dma_attrs = mpt->m_msg_dma_attr;
2538 	frame_dma_attrs.dma_attr_align = 16;
2539 	frame_dma_attrs.dma_attr_sgllen = 1;
2540 
2541 	/*
2542 	 * allocate the reply free queue
2543 	 */
2544 	if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attrs,
2545 	    DDI_DMA_SLEEP, NULL, &mpt->m_dma_free_queue_hdl) != DDI_SUCCESS) {
2546 		mptsas_log(mpt, CE_WARN,
2547 		    "Unable to allocate dma handle.");
2548 		return (DDI_FAILURE);
2549 	}
2550 
2551 	if (ddi_dma_mem_alloc(mpt->m_dma_free_queue_hdl,
2552 	    mem_size, &mpt->m_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2553 	    NULL, (caddr_t *)&memp, &alloc_len, &mpt->m_acc_free_queue_hdl)
2554 	    != DDI_SUCCESS) {
2555 		ddi_dma_free_handle(&mpt->m_dma_free_queue_hdl);
2556 		mpt->m_dma_free_queue_hdl = NULL;
2557 		mptsas_log(mpt, CE_WARN,
2558 		    "Unable to allocate free queue.");
2559 		return (DDI_FAILURE);
2560 	}
2561 
2562 	if (ddi_dma_addr_bind_handle(mpt->m_dma_free_queue_hdl, NULL,
2563 	    memp, alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2564 	    DDI_DMA_SLEEP, NULL, &cookie, &ncookie) != DDI_DMA_MAPPED) {
2565 		(void) ddi_dma_mem_free(&mpt->m_acc_free_queue_hdl);
2566 		ddi_dma_free_handle(&mpt->m_dma_free_queue_hdl);
2567 		mpt->m_dma_free_queue_hdl = NULL;
2568 		mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources.");
2569 		return (DDI_FAILURE);
2570 	}
2571 
2572 	/*
2573 	 * Store the reply free queue memory address.  This chip uses this
2574 	 * address to read from the reply free queue.  The second address
2575 	 * is the address mpt uses to manage the queue.
2576 	 */
2577 	mpt->m_free_queue_dma_addr = cookie.dmac_laddress;
2578 	mpt->m_free_queue = memp;
2579 
2580 	/*
2581 	 * Clear the reply free queue memory.
2582 	 */
2583 	bzero(mpt->m_free_queue, alloc_len);
2584 
2585 	return (DDI_SUCCESS);
2586 }
2587 
2588 static int
2589 mptsas_alloc_post_queue(mptsas_t *mpt)
2590 {
2591 	ddi_dma_attr_t		frame_dma_attrs;
2592 	caddr_t			memp;
2593 	uint_t			ncookie;
2594 	ddi_dma_cookie_t	cookie;
2595 	size_t			alloc_len;
2596 	size_t			mem_size;
2597 
2598 	/*
2599 	 * The reply descriptor post queue size is:
2600 	 *   Reply Descriptor Post Queue Depth * 8
2601 	 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2602 	 */
2603 	mem_size = mpt->m_post_queue_depth * 8;
2604 
2605 	/*
2606 	 * set the DMA attributes.  The Reply Descriptor Post Queue must be
2607 	 * aligned on a 16-byte boundry.
2608 	 */
2609 	frame_dma_attrs = mpt->m_msg_dma_attr;
2610 	frame_dma_attrs.dma_attr_align = 16;
2611 	frame_dma_attrs.dma_attr_sgllen = 1;
2612 
2613 	/*
2614 	 * allocate the reply post queue
2615 	 */
2616 	if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attrs,
2617 	    DDI_DMA_SLEEP, NULL, &mpt->m_dma_post_queue_hdl) != DDI_SUCCESS) {
2618 		mptsas_log(mpt, CE_WARN,
2619 		    "Unable to allocate dma handle.");
2620 		return (DDI_FAILURE);
2621 	}
2622 
2623 	if (ddi_dma_mem_alloc(mpt->m_dma_post_queue_hdl,
2624 	    mem_size, &mpt->m_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2625 	    NULL, (caddr_t *)&memp, &alloc_len, &mpt->m_acc_post_queue_hdl)
2626 	    != DDI_SUCCESS) {
2627 		ddi_dma_free_handle(&mpt->m_dma_post_queue_hdl);
2628 		mpt->m_dma_post_queue_hdl = NULL;
2629 		mptsas_log(mpt, CE_WARN,
2630 		    "Unable to allocate post queue.");
2631 		return (DDI_FAILURE);
2632 	}
2633 
2634 	if (ddi_dma_addr_bind_handle(mpt->m_dma_post_queue_hdl, NULL,
2635 	    memp, alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2636 	    DDI_DMA_SLEEP, NULL, &cookie, &ncookie) != DDI_DMA_MAPPED) {
2637 		(void) ddi_dma_mem_free(&mpt->m_acc_post_queue_hdl);
2638 		ddi_dma_free_handle(&mpt->m_dma_post_queue_hdl);
2639 		mpt->m_dma_post_queue_hdl = NULL;
2640 		mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources.");
2641 		return (DDI_FAILURE);
2642 	}
2643 
2644 	/*
2645 	 * Store the reply descriptor post queue memory address.  This chip
2646 	 * uses this address to write to the reply descriptor post queue.  The
2647 	 * second address is the address mpt uses to manage the queue.
2648 	 */
2649 	mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2650 	mpt->m_post_queue = memp;
2651 
2652 	/*
2653 	 * Clear the reply post queue memory.
2654 	 */
2655 	bzero(mpt->m_post_queue, alloc_len);
2656 
2657 	return (DDI_SUCCESS);
2658 }
2659 
2660 static int
2661 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2662 {
2663 	mptsas_cache_frames_t	*frames = NULL;
2664 	if (cmd->cmd_extra_frames == NULL) {
2665 		frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2666 		if (frames == NULL) {
2667 			return (DDI_FAILURE);
2668 		}
2669 		cmd->cmd_extra_frames = frames;
2670 	}
2671 	return (DDI_SUCCESS);
2672 }
2673 
2674 static void
2675 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2676 {
2677 	if (cmd->cmd_extra_frames) {
2678 		kmem_cache_free(mpt->m_cache_frames,
2679 		    (void *)cmd->cmd_extra_frames);
2680 		cmd->cmd_extra_frames = NULL;
2681 	}
2682 }
2683 
2684 static void
2685 mptsas_cfg_fini(mptsas_t *mpt)
2686 {
2687 	NDBG0(("mptsas_cfg_fini"));
2688 	ddi_regs_map_free(&mpt->m_datap);
2689 }
2690 
2691 static void
2692 mptsas_hba_fini(mptsas_t *mpt)
2693 {
2694 	NDBG0(("mptsas_hba_fini"));
2695 
2696 	/*
2697 	 * Disable any bus mastering ability (i.e: DMA) prior to freeing any
2698 	 * allocated DMA resources.
2699 	 */
2700 	if (mpt->m_config_handle != NULL)
2701 		mptsas_disable_bus_master(mpt);
2702 
2703 	/*
2704 	 * Free up any allocated memory
2705 	 */
2706 	if (mpt->m_dma_req_frame_hdl != NULL) {
2707 		(void) ddi_dma_unbind_handle(mpt->m_dma_req_frame_hdl);
2708 		ddi_dma_mem_free(&mpt->m_acc_req_frame_hdl);
2709 		ddi_dma_free_handle(&mpt->m_dma_req_frame_hdl);
2710 		mpt->m_dma_req_frame_hdl = NULL;
2711 	}
2712 
2713 	if (mpt->m_dma_reply_frame_hdl != NULL) {
2714 		(void) ddi_dma_unbind_handle(mpt->m_dma_reply_frame_hdl);
2715 		ddi_dma_mem_free(&mpt->m_acc_reply_frame_hdl);
2716 		ddi_dma_free_handle(&mpt->m_dma_reply_frame_hdl);
2717 		mpt->m_dma_reply_frame_hdl = NULL;
2718 	}
2719 
2720 	if (mpt->m_dma_free_queue_hdl != NULL) {
2721 		(void) ddi_dma_unbind_handle(mpt->m_dma_free_queue_hdl);
2722 		ddi_dma_mem_free(&mpt->m_acc_free_queue_hdl);
2723 		ddi_dma_free_handle(&mpt->m_dma_free_queue_hdl);
2724 		mpt->m_dma_free_queue_hdl = NULL;
2725 	}
2726 
2727 	if (mpt->m_dma_post_queue_hdl != NULL) {
2728 		(void) ddi_dma_unbind_handle(mpt->m_dma_post_queue_hdl);
2729 		ddi_dma_mem_free(&mpt->m_acc_post_queue_hdl);
2730 		ddi_dma_free_handle(&mpt->m_dma_post_queue_hdl);
2731 		mpt->m_dma_post_queue_hdl = NULL;
2732 	}
2733 
2734 	if (mpt->m_replyh_args != NULL) {
2735 		kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2736 		    * mpt->m_max_replies);
2737 	}
2738 }
2739 
2740 static int
2741 mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
2742 {
2743 	int		lun = 0;
2744 	char		*sas_wwn = NULL;
2745 	int		phynum = -1;
2746 	int		reallen = 0;
2747 
2748 	/* Get the target num */
2749 	lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
2750 	    LUN_PROP, 0);
2751 
2752 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
2753 	    SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn) == DDI_PROP_SUCCESS) {
2754 		/*
2755 		 * Stick in the address of the form "wWWN,LUN"
2756 		 */
2757 		reallen = snprintf(name, len, "w%s,%x", sas_wwn, lun);
2758 		ddi_prop_free(sas_wwn);
2759 	} else if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
2760 	    DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
2761 		/*
2762 		 * Stick in the address of form "pPHY,LUN"
2763 		 */
2764 		reallen = snprintf(name, len, "p%x,%x", phynum, lun);
2765 	} else {
2766 		return (DDI_FAILURE);
2767 	}
2768 
2769 	ASSERT(reallen < len);
2770 	if (reallen >= len) {
2771 		mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter "
2772 		    "length too small, it needs to be %d bytes", reallen + 1);
2773 	}
2774 	return (DDI_SUCCESS);
2775 }
2776 
2777 /*
2778  * tran_tgt_init(9E) - target device instance initialization
2779  */
2780 static int
2781 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2782     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2783 {
2784 #ifndef __lock_lint
2785 	_NOTE(ARGUNUSED(hba_tran))
2786 #endif
2787 
2788 	/*
2789 	 * At this point, the scsi_device structure already exists
2790 	 * and has been initialized.
2791 	 *
2792 	 * Use this function to allocate target-private data structures,
2793 	 * if needed by this HBA.  Add revised flow-control and queue
2794 	 * properties for child here, if desired and if you can tell they
2795 	 * support tagged queueing by now.
2796 	 */
2797 	mptsas_t		*mpt;
2798 	int			lun = sd->sd_address.a_lun;
2799 	mdi_pathinfo_t		*pip = NULL;
2800 	mptsas_tgt_private_t	*tgt_private = NULL;
2801 	mptsas_target_t		*ptgt = NULL;
2802 	char			*psas_wwn = NULL;
2803 	int			phymask = 0;
2804 	uint64_t		sas_wwn = 0;
2805 	mpt = SDEV2MPT(sd);
2806 
2807 	ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
2808 
2809 	NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
2810 	    (void *)hba_dip, (void *)tgt_dip, lun));
2811 
2812 	if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
2813 		(void) ndi_merge_node(tgt_dip, mptsas_name_child);
2814 		ddi_set_name_addr(tgt_dip, NULL);
2815 		return (DDI_FAILURE);
2816 	}
2817 	/*
2818 	 * phymask is 0 means the virtual port for RAID
2819 	 */
2820 	phymask = ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
2821 	    "phymask", 0);
2822 	if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2823 		if ((pip = (void *)(sd->sd_private)) == NULL) {
2824 			/*
2825 			 * Very bad news if this occurs. Somehow scsi_vhci has
2826 			 * lost the pathinfo node for this target.
2827 			 */
2828 			return (DDI_NOT_WELL_FORMED);
2829 		}
2830 
2831 		if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
2832 		    DDI_PROP_SUCCESS) {
2833 			mptsas_log(mpt, CE_WARN, "Get lun property failed\n");
2834 			return (DDI_FAILURE);
2835 		}
2836 
2837 		if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
2838 		    &psas_wwn) == MDI_SUCCESS) {
2839 			if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2840 				sas_wwn = 0;
2841 			}
2842 			(void) mdi_prop_free(psas_wwn);
2843 		}
2844 	} else {
2845 		lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
2846 		    DDI_PROP_DONTPASS, LUN_PROP, 0);
2847 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
2848 		    DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
2849 		    DDI_PROP_SUCCESS) {
2850 			if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2851 				sas_wwn = 0;
2852 			}
2853 			ddi_prop_free(psas_wwn);
2854 		} else {
2855 			sas_wwn = 0;
2856 		}
2857 	}
2858 	ASSERT((sas_wwn != 0) || (phymask != 0));
2859 	mutex_enter(&mpt->m_mutex);
2860 	ptgt = mptsas_hash_search(&mpt->m_active->m_tgttbl, sas_wwn, phymask);
2861 	mutex_exit(&mpt->m_mutex);
2862 	if (ptgt == NULL) {
2863 		mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or "
2864 		    "gone already! phymask:%x, saswwn %"PRIx64, phymask,
2865 		    sas_wwn);
2866 		return (DDI_FAILURE);
2867 	}
2868 	if (hba_tran->tran_tgt_private == NULL) {
2869 		tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
2870 		    KM_SLEEP);
2871 		tgt_private->t_lun = lun;
2872 		tgt_private->t_private = ptgt;
2873 		hba_tran->tran_tgt_private = tgt_private;
2874 	}
2875 
2876 	if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2877 		return (DDI_SUCCESS);
2878 	}
2879 	mutex_enter(&mpt->m_mutex);
2880 
2881 	if (ptgt->m_deviceinfo &
2882 	    (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
2883 	    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
2884 		uchar_t *inq89 = NULL;
2885 		int inq89_len = 0x238;
2886 		int reallen = 0;
2887 		int rval = 0;
2888 		struct sata_id *sid = NULL;
2889 		char model[SATA_ID_MODEL_LEN + 1];
2890 		char fw[SATA_ID_FW_LEN + 1];
2891 		char *vid, *pid;
2892 		int i;
2893 
2894 		mutex_exit(&mpt->m_mutex);
2895 		/*
2896 		 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
2897 		 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
2898 		 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
2899 		 */
2900 		inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
2901 		rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
2902 		    inq89, inq89_len, &reallen, 1);
2903 
2904 		if (rval != 0) {
2905 			if (inq89 != NULL) {
2906 				kmem_free(inq89, inq89_len);
2907 			}
2908 
2909 			mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
2910 			    "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
2911 			return (DDI_SUCCESS);
2912 		}
2913 		sid = (void *)(&inq89[60]);
2914 
2915 		swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
2916 		swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
2917 
2918 		model[SATA_ID_MODEL_LEN] = 0;
2919 		fw[SATA_ID_FW_LEN] = 0;
2920 
2921 		/*
2922 		 * split model into into vid/pid
2923 		 */
2924 		for (i = 0, pid = model; i < SATA_ID_MODEL_LEN; i++, pid++)
2925 			if ((*pid == ' ') || (*pid == '\t'))
2926 				break;
2927 		if (i < SATA_ID_MODEL_LEN) {
2928 			vid = model;
2929 			/*
2930 			 * terminate vid, establish pid
2931 			 */
2932 			*pid++ = 0;
2933 		} else {
2934 			/*
2935 			 * vid will stay "ATA     ", the rule is same
2936 			 * as sata framework implementation.
2937 			 */
2938 			vid = NULL;
2939 			/*
2940 			 * model is all pid
2941 			 */
2942 			pid = model;
2943 		}
2944 
2945 		/*
2946 		 * override SCSA "inquiry-*" properties
2947 		 */
2948 		if (vid)
2949 			(void) scsi_device_prop_update_inqstring(sd,
2950 			    INQUIRY_VENDOR_ID, vid, strlen(vid));
2951 		if (pid)
2952 			(void) scsi_device_prop_update_inqstring(sd,
2953 			    INQUIRY_PRODUCT_ID, pid, strlen(pid));
2954 		(void) scsi_device_prop_update_inqstring(sd,
2955 		    INQUIRY_REVISION_ID, fw, strlen(fw));
2956 
2957 		if (inq89 != NULL) {
2958 			kmem_free(inq89, inq89_len);
2959 		}
2960 	} else {
2961 		mutex_exit(&mpt->m_mutex);
2962 	}
2963 
2964 	return (DDI_SUCCESS);
2965 }
2966 /*
2967  * tran_tgt_free(9E) - target device instance deallocation
2968  */
2969 static void
2970 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2971     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2972 {
2973 #ifndef __lock_lint
2974 	_NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd))
2975 #endif
2976 
2977 	mptsas_tgt_private_t	*tgt_private = hba_tran->tran_tgt_private;
2978 
2979 	if (tgt_private != NULL) {
2980 		kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
2981 		hba_tran->tran_tgt_private = NULL;
2982 	}
2983 }
2984 
2985 /*
2986  * scsi_pkt handling
2987  *
2988  * Visible to the external world via the transport structure.
2989  */
2990 
2991 /*
2992  * Notes:
2993  *	- transport the command to the addressed SCSI target/lun device
2994  *	- normal operation is to schedule the command to be transported,
2995  *	  and return TRAN_ACCEPT if this is successful.
2996  *	- if NO_INTR, tran_start must poll device for command completion
2997  */
2998 static int
2999 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
3000 {
3001 #ifndef __lock_lint
3002 	_NOTE(ARGUNUSED(ap))
3003 #endif
3004 	mptsas_t	*mpt = PKT2MPT(pkt);
3005 	mptsas_cmd_t	*cmd = PKT2CMD(pkt);
3006 	int		rval;
3007 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
3008 
3009 	NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt));
3010 	ASSERT(ptgt);
3011 	if (ptgt == NULL)
3012 		return (TRAN_FATAL_ERROR);
3013 
3014 	/*
3015 	 * prepare the pkt before taking mutex.
3016 	 */
3017 	rval = mptsas_prepare_pkt(cmd);
3018 	if (rval != TRAN_ACCEPT) {
3019 		return (rval);
3020 	}
3021 
3022 	/*
3023 	 * Send the command to target/lun, however your HBA requires it.
3024 	 * If busy, return TRAN_BUSY; if there's some other formatting error
3025 	 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
3026 	 * return of TRAN_ACCEPT.
3027 	 *
3028 	 * Remember that access to shared resources, including the mptsas_t
3029 	 * data structure and the HBA hardware registers, must be protected
3030 	 * with mutexes, here and everywhere.
3031 	 *
3032 	 * Also remember that at interrupt time, you'll get an argument
3033 	 * to the interrupt handler which is a pointer to your mptsas_t
3034 	 * structure; you'll have to remember which commands are outstanding
3035 	 * and which scsi_pkt is the currently-running command so the
3036 	 * interrupt handler can refer to the pkt to set completion
3037 	 * status, call the target driver back through pkt_comp, etc.
3038 	 *
3039 	 * If the instance lock is held by other thread, don't spin to wait
3040 	 * for it. Instead, queue the cmd and next time when the instance lock
3041 	 * is not held, accept all the queued cmd. A extra tx_waitq is
3042 	 * introduced to protect the queue.
3043 	 *
3044 	 * The polled cmd will not be queud and accepted as usual.
3045 	 *
3046 	 * Under the tx_waitq mutex, record whether a thread is draining
3047 	 * the tx_waitq.  An IO requesting thread that finds the instance
3048 	 * mutex contended appends to the tx_waitq and while holding the
3049 	 * tx_wait mutex, if the draining flag is not set, sets it and then
3050 	 * proceeds to spin for the instance mutex. This scheme ensures that
3051 	 * the last cmd in a burst be processed.
3052 	 *
3053 	 * we enable this feature only when the helper threads are enabled,
3054 	 * at which we think the loads are heavy.
3055 	 *
3056 	 * per instance mutex m_tx_waitq_mutex is introduced to protect the
3057 	 * m_tx_waitqtail, m_tx_waitq, m_tx_draining.
3058 	 */
3059 
3060 	if (mpt->m_doneq_thread_n) {
3061 		if (mutex_tryenter(&mpt->m_mutex) != 0) {
3062 			rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3063 			mutex_exit(&mpt->m_mutex);
3064 		} else if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3065 			mutex_enter(&mpt->m_mutex);
3066 			rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3067 			mutex_exit(&mpt->m_mutex);
3068 		} else {
3069 			mutex_enter(&mpt->m_tx_waitq_mutex);
3070 			/*
3071 			 * ptgt->m_dr_flag is protected by m_mutex or
3072 			 * m_tx_waitq_mutex. In this case, m_tx_waitq_mutex
3073 			 * is acquired.
3074 			 */
3075 			if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3076 				if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3077 					/*
3078 					 * The command should be allowed to
3079 					 * retry by returning TRAN_BUSY to
3080 					 * to stall the I/O's which come from
3081 					 * scsi_vhci since the device/path is
3082 					 * in unstable state now.
3083 					 */
3084 					mutex_exit(&mpt->m_tx_waitq_mutex);
3085 					return (TRAN_BUSY);
3086 				} else {
3087 					/*
3088 					 * The device is offline, just fail the
3089 					 * command by returning
3090 					 * TRAN_FATAL_ERROR.
3091 					 */
3092 					mutex_exit(&mpt->m_tx_waitq_mutex);
3093 					return (TRAN_FATAL_ERROR);
3094 				}
3095 			}
3096 			if (mpt->m_tx_draining) {
3097 				cmd->cmd_flags |= CFLAG_TXQ;
3098 				*mpt->m_tx_waitqtail = cmd;
3099 				mpt->m_tx_waitqtail = &cmd->cmd_linkp;
3100 				mutex_exit(&mpt->m_tx_waitq_mutex);
3101 			} else { /* drain the queue */
3102 				mpt->m_tx_draining = 1;
3103 				mutex_exit(&mpt->m_tx_waitq_mutex);
3104 				mutex_enter(&mpt->m_mutex);
3105 				rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3106 				mutex_exit(&mpt->m_mutex);
3107 			}
3108 		}
3109 	} else {
3110 		mutex_enter(&mpt->m_mutex);
3111 		/*
3112 		 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3113 		 * in this case, m_mutex is acquired.
3114 		 */
3115 		if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3116 			if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3117 				/*
3118 				 * commands should be allowed to retry by
3119 				 * returning TRAN_BUSY to stall the I/O's
3120 				 * which come from scsi_vhci since the device/
3121 				 * path is in unstable state now.
3122 				 */
3123 				mutex_exit(&mpt->m_mutex);
3124 				return (TRAN_BUSY);
3125 			} else {
3126 				/*
3127 				 * The device is offline, just fail the
3128 				 * command by returning TRAN_FATAL_ERROR.
3129 				 */
3130 				mutex_exit(&mpt->m_mutex);
3131 				return (TRAN_FATAL_ERROR);
3132 			}
3133 		}
3134 		rval = mptsas_accept_pkt(mpt, cmd);
3135 		mutex_exit(&mpt->m_mutex);
3136 	}
3137 
3138 	return (rval);
3139 }
3140 
3141 /*
3142  * Accept all the queued cmds(if any) before accept the current one.
3143  */
3144 static int
3145 mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3146 {
3147 	int rval;
3148 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
3149 
3150 	ASSERT(mutex_owned(&mpt->m_mutex));
3151 	/*
3152 	 * The call to mptsas_accept_tx_waitq() must always be performed
3153 	 * because that is where mpt->m_tx_draining is cleared.
3154 	 */
3155 	mutex_enter(&mpt->m_tx_waitq_mutex);
3156 	mptsas_accept_tx_waitq(mpt);
3157 	mutex_exit(&mpt->m_tx_waitq_mutex);
3158 	/*
3159 	 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3160 	 * in this case, m_mutex is acquired.
3161 	 */
3162 	if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3163 		if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3164 			/*
3165 			 * The command should be allowed to retry by returning
3166 			 * TRAN_BUSY to stall the I/O's which come from
3167 			 * scsi_vhci since the device/path is in unstable state
3168 			 * now.
3169 			 */
3170 			return (TRAN_BUSY);
3171 		} else {
3172 			/*
3173 			 * The device is offline, just fail the command by
3174 			 * return TRAN_FATAL_ERROR.
3175 			 */
3176 			return (TRAN_FATAL_ERROR);
3177 		}
3178 	}
3179 	rval = mptsas_accept_pkt(mpt, cmd);
3180 
3181 	return (rval);
3182 }
3183 
3184 static int
3185 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3186 {
3187 	int		rval = TRAN_ACCEPT;
3188 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
3189 
3190 	NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3191 
3192 	ASSERT(mutex_owned(&mpt->m_mutex));
3193 
3194 	if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3195 		rval = mptsas_prepare_pkt(cmd);
3196 		if (rval != TRAN_ACCEPT) {
3197 			cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3198 			return (rval);
3199 		}
3200 	}
3201 
3202 	/*
3203 	 * reset the throttle if we were draining
3204 	 */
3205 	if ((ptgt->m_t_ncmds == 0) &&
3206 	    (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3207 		NDBG23(("reset throttle"));
3208 		ASSERT(ptgt->m_reset_delay == 0);
3209 		mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3210 	}
3211 
3212 	/*
3213 	 * If HBA is being reset, the DevHandles are being re-initialized,
3214 	 * which means that they could be invalid even if the target is still
3215 	 * attached.  Check if being reset and if DevHandle is being
3216 	 * re-initialized.  If this is the case, return BUSY so the I/O can be
3217 	 * retried later.
3218 	 */
3219 	if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3220 		mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3221 		if (cmd->cmd_flags & CFLAG_TXQ) {
3222 			mptsas_doneq_add(mpt, cmd);
3223 			mptsas_doneq_empty(mpt);
3224 			return (rval);
3225 		} else {
3226 			return (TRAN_BUSY);
3227 		}
3228 	}
3229 
3230 	/*
3231 	 * If device handle has already been invalidated, just
3232 	 * fail the command. In theory, command from scsi_vhci
3233 	 * client is impossible send down command with invalid
3234 	 * devhdl since devhdl is set after path offline, target
3235 	 * driver is not suppose to select a offlined path.
3236 	 */
3237 	if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3238 		NDBG20(("rejecting command, it might because invalid devhdl "
3239 		    "request."));
3240 		mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3241 		if (cmd->cmd_flags & CFLAG_TXQ) {
3242 			mptsas_doneq_add(mpt, cmd);
3243 			mptsas_doneq_empty(mpt);
3244 			return (rval);
3245 		} else {
3246 			return (TRAN_FATAL_ERROR);
3247 		}
3248 	}
3249 	/*
3250 	 * The first case is the normal case.  mpt gets a command from the
3251 	 * target driver and starts it.
3252 	 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3253 	 * commands is m_max_requests - 2.
3254 	 */
3255 	if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3256 	    (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3257 	    (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3258 	    (ptgt->m_reset_delay == 0) &&
3259 	    (ptgt->m_t_nwait == 0) &&
3260 	    ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3261 		if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3262 			(void) mptsas_start_cmd(mpt, cmd);
3263 		} else {
3264 			mptsas_waitq_add(mpt, cmd);
3265 		}
3266 	} else {
3267 		/*
3268 		 * Add this pkt to the work queue
3269 		 */
3270 		mptsas_waitq_add(mpt, cmd);
3271 
3272 		if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3273 			(void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3274 
3275 			/*
3276 			 * Only flush the doneq if this is not a TM
3277 			 * cmd.  For TM cmds the flushing of the
3278 			 * doneq will be done in those routines.
3279 			 */
3280 			if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3281 				mptsas_doneq_empty(mpt);
3282 			}
3283 		}
3284 	}
3285 	return (rval);
3286 }
3287 
3288 int
3289 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3290 {
3291 	mptsas_slots_t	*slots;
3292 	int		slot;
3293 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
3294 
3295 	ASSERT(mutex_owned(&mpt->m_mutex));
3296 	slots = mpt->m_active;
3297 
3298 	/*
3299 	 * Account for reserved TM request slot and reserved SMID of 0.
3300 	 */
3301 	ASSERT(slots->m_n_slots == (mpt->m_max_requests - 2));
3302 
3303 	/*
3304 	 * m_tags is equivalent to the SMID when sending requests.  Since the
3305 	 * SMID cannot be 0, start out at one if rolling over past the size
3306 	 * of the request queue depth.  Also, don't use the last SMID, which is
3307 	 * reserved for TM requests.
3308 	 */
3309 	slot = (slots->m_tags)++;
3310 	if (slots->m_tags > slots->m_n_slots) {
3311 		slots->m_tags = 1;
3312 	}
3313 
3314 alloc_tag:
3315 	/* Validate tag, should never fail. */
3316 	if (slots->m_slot[slot] == NULL) {
3317 		/*
3318 		 * Make sure SMID is not using reserved value of 0
3319 		 * and the TM request slot.
3320 		 */
3321 		ASSERT((slot > 0) && (slot <= slots->m_n_slots));
3322 		cmd->cmd_slot = slot;
3323 		slots->m_slot[slot] = cmd;
3324 		mpt->m_ncmds++;
3325 
3326 		/*
3327 		 * only increment per target ncmds if this is not a
3328 		 * command that has no target associated with it (i.e. a
3329 		 * event acknoledgment)
3330 		 */
3331 		if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3332 			ptgt->m_t_ncmds++;
3333 		}
3334 		cmd->cmd_active_timeout = cmd->cmd_pkt->pkt_time;
3335 
3336 		/*
3337 		 * If initial timout is less than or equal to one tick, bump
3338 		 * the timeout by a tick so that command doesn't timeout before
3339 		 * its allotted time.
3340 		 */
3341 		if (cmd->cmd_active_timeout <= mptsas_scsi_watchdog_tick) {
3342 			cmd->cmd_active_timeout += mptsas_scsi_watchdog_tick;
3343 		}
3344 		return (TRUE);
3345 	} else {
3346 		int i;
3347 
3348 		/*
3349 		 * If slot in use, scan until a free one is found. Don't use 0
3350 		 * or final slot, which is reserved for TM requests.
3351 		 */
3352 		for (i = 0; i < slots->m_n_slots; i++) {
3353 			slot = slots->m_tags;
3354 			if (++(slots->m_tags) > slots->m_n_slots) {
3355 				slots->m_tags = 1;
3356 			}
3357 			if (slots->m_slot[slot] == NULL) {
3358 				NDBG22(("found free slot %d", slot));
3359 				goto alloc_tag;
3360 			}
3361 		}
3362 	}
3363 	return (FALSE);
3364 }
3365 
3366 /*
3367  * prepare the pkt:
3368  * the pkt may have been resubmitted or just reused so
3369  * initialize some fields and do some checks.
3370  */
3371 static int
3372 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3373 {
3374 	struct scsi_pkt	*pkt = CMD2PKT(cmd);
3375 
3376 	NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3377 
3378 	/*
3379 	 * Reinitialize some fields that need it; the packet may
3380 	 * have been resubmitted
3381 	 */
3382 	pkt->pkt_reason = CMD_CMPLT;
3383 	pkt->pkt_state = 0;
3384 	pkt->pkt_statistics = 0;
3385 	pkt->pkt_resid = 0;
3386 	cmd->cmd_age = 0;
3387 	cmd->cmd_pkt_flags = pkt->pkt_flags;
3388 
3389 	/*
3390 	 * zero status byte.
3391 	 */
3392 	*(pkt->pkt_scbp) = 0;
3393 
3394 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
3395 		pkt->pkt_resid = cmd->cmd_dmacount;
3396 
3397 		/*
3398 		 * consistent packets need to be sync'ed first
3399 		 * (only for data going out)
3400 		 */
3401 		if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
3402 		    (cmd->cmd_flags & CFLAG_DMASEND)) {
3403 			(void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
3404 			    DDI_DMA_SYNC_FORDEV);
3405 		}
3406 	}
3407 
3408 	cmd->cmd_flags =
3409 	    (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) |
3410 	    CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
3411 
3412 	return (TRAN_ACCEPT);
3413 }
3414 
3415 /*
3416  * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3417  *
3418  * One of three possibilities:
3419  *	- allocate scsi_pkt
3420  *	- allocate scsi_pkt and DMA resources
3421  *	- allocate DMA resources to an already-allocated pkt
3422  */
3423 static struct scsi_pkt *
3424 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3425     struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3426     int (*callback)(), caddr_t arg)
3427 {
3428 	mptsas_cmd_t		*cmd, *new_cmd;
3429 	mptsas_t		*mpt = ADDR2MPT(ap);
3430 	int			failure = 1;
3431 	uint_t			oldcookiec;
3432 	mptsas_target_t		*ptgt = NULL;
3433 	int			rval;
3434 	mptsas_tgt_private_t	*tgt_private;
3435 	int			kf;
3436 
3437 	kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3438 
3439 	tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3440 	    tran_tgt_private;
3441 	ASSERT(tgt_private != NULL);
3442 	if (tgt_private == NULL) {
3443 		return (NULL);
3444 	}
3445 	ptgt = tgt_private->t_private;
3446 	ASSERT(ptgt != NULL);
3447 	if (ptgt == NULL)
3448 		return (NULL);
3449 	ap->a_target = ptgt->m_devhdl;
3450 	ap->a_lun = tgt_private->t_lun;
3451 
3452 	ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3453 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3454 	statuslen *= 100; tgtlen *= 4;
3455 #endif
3456 	NDBG3(("mptsas_scsi_init_pkt:\n"
3457 	    "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3458 	    ap->a_target, (void *)pkt, (void *)bp,
3459 	    cmdlen, statuslen, tgtlen, flags));
3460 
3461 	/*
3462 	 * Allocate the new packet.
3463 	 */
3464 	if (pkt == NULL) {
3465 		ddi_dma_handle_t	save_dma_handle;
3466 		ddi_dma_handle_t	save_arq_dma_handle;
3467 		struct buf		*save_arq_bp;
3468 		ddi_dma_cookie_t	save_arqcookie;
3469 
3470 		cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3471 
3472 		if (cmd) {
3473 			save_dma_handle = cmd->cmd_dmahandle;
3474 			save_arq_dma_handle = cmd->cmd_arqhandle;
3475 			save_arq_bp = cmd->cmd_arq_buf;
3476 			save_arqcookie = cmd->cmd_arqcookie;
3477 			bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3478 			cmd->cmd_dmahandle = save_dma_handle;
3479 			cmd->cmd_arqhandle = save_arq_dma_handle;
3480 			cmd->cmd_arq_buf = save_arq_bp;
3481 			cmd->cmd_arqcookie = save_arqcookie;
3482 
3483 			pkt = (void *)((uchar_t *)cmd +
3484 			    sizeof (struct mptsas_cmd));
3485 			pkt->pkt_ha_private = (opaque_t)cmd;
3486 			pkt->pkt_address = *ap;
3487 			pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3488 			pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3489 			pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3490 			cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3491 			cmd->cmd_cdblen = (uchar_t)cmdlen;
3492 			cmd->cmd_scblen = statuslen;
3493 			cmd->cmd_rqslen = SENSE_LENGTH;
3494 			cmd->cmd_tgt_addr = ptgt;
3495 			failure = 0;
3496 		}
3497 
3498 		if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
3499 		    (tgtlen > PKT_PRIV_LEN) ||
3500 		    (statuslen > EXTCMDS_STATUS_SIZE)) {
3501 			if (failure == 0) {
3502 				/*
3503 				 * if extern alloc fails, all will be
3504 				 * deallocated, including cmd
3505 				 */
3506 				failure = mptsas_pkt_alloc_extern(mpt, cmd,
3507 				    cmdlen, tgtlen, statuslen, kf);
3508 			}
3509 			if (failure) {
3510 				/*
3511 				 * if extern allocation fails, it will
3512 				 * deallocate the new pkt as well
3513 				 */
3514 				return (NULL);
3515 			}
3516 		}
3517 		new_cmd = cmd;
3518 
3519 	} else {
3520 		cmd = PKT2CMD(pkt);
3521 		new_cmd = NULL;
3522 	}
3523 
3524 
3525 	/* grab cmd->cmd_cookiec here as oldcookiec */
3526 
3527 	oldcookiec = cmd->cmd_cookiec;
3528 
3529 	/*
3530 	 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3531 	 * greater than 0 and we'll need to grab the next dma window
3532 	 */
3533 	/*
3534 	 * SLM-not doing extra command frame right now; may add later
3535 	 */
3536 
3537 	if (cmd->cmd_nwin > 0) {
3538 
3539 		/*
3540 		 * Make sure we havn't gone past the the total number
3541 		 * of windows
3542 		 */
3543 		if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3544 			return (NULL);
3545 		}
3546 		if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3547 		    &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
3548 		    &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) {
3549 			return (NULL);
3550 		}
3551 		goto get_dma_cookies;
3552 	}
3553 
3554 
3555 	if (flags & PKT_XARQ) {
3556 		cmd->cmd_flags |= CFLAG_XARQ;
3557 	}
3558 
3559 	/*
3560 	 * DMA resource allocation.  This version assumes your
3561 	 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3562 	 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3563 	 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3564 	 */
3565 	if (bp && (bp->b_bcount != 0) &&
3566 	    (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
3567 
3568 		int	cnt, dma_flags;
3569 		mptti_t	*dmap;		/* ptr to the S/G list */
3570 
3571 		/*
3572 		 * Set up DMA memory and position to the next DMA segment.
3573 		 */
3574 		ASSERT(cmd->cmd_dmahandle != NULL);
3575 
3576 		if (bp->b_flags & B_READ) {
3577 			dma_flags = DDI_DMA_READ;
3578 			cmd->cmd_flags &= ~CFLAG_DMASEND;
3579 		} else {
3580 			dma_flags = DDI_DMA_WRITE;
3581 			cmd->cmd_flags |= CFLAG_DMASEND;
3582 		}
3583 		if (flags & PKT_CONSISTENT) {
3584 			cmd->cmd_flags |= CFLAG_CMDIOPB;
3585 			dma_flags |= DDI_DMA_CONSISTENT;
3586 		}
3587 
3588 		if (flags & PKT_DMA_PARTIAL) {
3589 			dma_flags |= DDI_DMA_PARTIAL;
3590 		}
3591 
3592 		/*
3593 		 * workaround for byte hole issue on psycho and
3594 		 * schizo pre 2.1
3595 		 */
3596 		if ((bp->b_flags & B_READ) && ((bp->b_flags &
3597 		    (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
3598 		    ((uintptr_t)bp->b_un.b_addr & 0x7)) {
3599 			dma_flags |= DDI_DMA_CONSISTENT;
3600 		}
3601 
3602 		rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
3603 		    dma_flags, callback, arg,
3604 		    &cmd->cmd_cookie, &cmd->cmd_cookiec);
3605 		if (rval == DDI_DMA_PARTIAL_MAP) {
3606 			(void) ddi_dma_numwin(cmd->cmd_dmahandle,
3607 			    &cmd->cmd_nwin);
3608 			cmd->cmd_winindex = 0;
3609 			(void) ddi_dma_getwin(cmd->cmd_dmahandle,
3610 			    cmd->cmd_winindex, &cmd->cmd_dma_offset,
3611 			    &cmd->cmd_dma_len, &cmd->cmd_cookie,
3612 			    &cmd->cmd_cookiec);
3613 		} else if (rval && (rval != DDI_DMA_MAPPED)) {
3614 			switch (rval) {
3615 			case DDI_DMA_NORESOURCES:
3616 				bioerror(bp, 0);
3617 				break;
3618 			case DDI_DMA_BADATTR:
3619 			case DDI_DMA_NOMAPPING:
3620 				bioerror(bp, EFAULT);
3621 				break;
3622 			case DDI_DMA_TOOBIG:
3623 			default:
3624 				bioerror(bp, EINVAL);
3625 				break;
3626 			}
3627 			cmd->cmd_flags &= ~CFLAG_DMAVALID;
3628 			if (new_cmd) {
3629 				mptsas_scsi_destroy_pkt(ap, pkt);
3630 			}
3631 			return ((struct scsi_pkt *)NULL);
3632 		}
3633 
3634 get_dma_cookies:
3635 		cmd->cmd_flags |= CFLAG_DMAVALID;
3636 		ASSERT(cmd->cmd_cookiec > 0);
3637 
3638 		if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
3639 			mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n",
3640 			    cmd->cmd_cookiec);
3641 			bioerror(bp, EINVAL);
3642 			if (new_cmd) {
3643 				mptsas_scsi_destroy_pkt(ap, pkt);
3644 			}
3645 			return ((struct scsi_pkt *)NULL);
3646 		}
3647 
3648 		/*
3649 		 * Allocate extra SGL buffer if needed.
3650 		 */
3651 		if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
3652 		    (cmd->cmd_extra_frames == NULL)) {
3653 			if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
3654 			    DDI_FAILURE) {
3655 				mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
3656 				    "failed");
3657 				bioerror(bp, ENOMEM);
3658 				if (new_cmd) {
3659 					mptsas_scsi_destroy_pkt(ap, pkt);
3660 				}
3661 				return ((struct scsi_pkt *)NULL);
3662 			}
3663 		}
3664 
3665 		/*
3666 		 * Always use scatter-gather transfer
3667 		 * Use the loop below to store physical addresses of
3668 		 * DMA segments, from the DMA cookies, into your HBA's
3669 		 * scatter-gather list.
3670 		 * We need to ensure we have enough kmem alloc'd
3671 		 * for the sg entries since we are no longer using an
3672 		 * array inside mptsas_cmd_t.
3673 		 *
3674 		 * We check cmd->cmd_cookiec against oldcookiec so
3675 		 * the scatter-gather list is correctly allocated
3676 		 */
3677 
3678 		if (oldcookiec != cmd->cmd_cookiec) {
3679 			if (cmd->cmd_sg != (mptti_t *)NULL) {
3680 				kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
3681 				    oldcookiec);
3682 				cmd->cmd_sg = NULL;
3683 			}
3684 		}
3685 
3686 		if (cmd->cmd_sg == (mptti_t *)NULL) {
3687 			cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3688 			    cmd->cmd_cookiec), kf);
3689 
3690 			if (cmd->cmd_sg == (mptti_t *)NULL) {
3691 				mptsas_log(mpt, CE_WARN,
3692 				    "unable to kmem_alloc enough memory "
3693 				    "for scatter/gather list");
3694 		/*
3695 		 * if we have an ENOMEM condition we need to behave
3696 		 * the same way as the rest of this routine
3697 		 */
3698 
3699 				bioerror(bp, ENOMEM);
3700 				if (new_cmd) {
3701 					mptsas_scsi_destroy_pkt(ap, pkt);
3702 				}
3703 				return ((struct scsi_pkt *)NULL);
3704 			}
3705 		}
3706 
3707 		dmap = cmd->cmd_sg;
3708 
3709 		ASSERT(cmd->cmd_cookie.dmac_size != 0);
3710 
3711 		/*
3712 		 * store the first segment into the S/G list
3713 		 */
3714 		dmap->count = cmd->cmd_cookie.dmac_size;
3715 		dmap->addr.address64.Low = (uint32_t)
3716 		    (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3717 		dmap->addr.address64.High = (uint32_t)
3718 		    (cmd->cmd_cookie.dmac_laddress >> 32);
3719 
3720 		/*
3721 		 * dmacount counts the size of the dma for this window
3722 		 * (if partial dma is being used).  totaldmacount
3723 		 * keeps track of the total amount of dma we have
3724 		 * transferred for all the windows (needed to calculate
3725 		 * the resid value below).
3726 		 */
3727 		cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size;
3728 		cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3729 
3730 		/*
3731 		 * We already stored the first DMA scatter gather segment,
3732 		 * start at 1 if we need to store more.
3733 		 */
3734 		for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) {
3735 			/*
3736 			 * Get next DMA cookie
3737 			 */
3738 			ddi_dma_nextcookie(cmd->cmd_dmahandle,
3739 			    &cmd->cmd_cookie);
3740 			dmap++;
3741 
3742 			cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size;
3743 			cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3744 
3745 			/*
3746 			 * store the segment parms into the S/G list
3747 			 */
3748 			dmap->count = cmd->cmd_cookie.dmac_size;
3749 			dmap->addr.address64.Low = (uint32_t)
3750 			    (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3751 			dmap->addr.address64.High = (uint32_t)
3752 			    (cmd->cmd_cookie.dmac_laddress >> 32);
3753 		}
3754 
3755 		/*
3756 		 * If this was partially allocated we set the resid
3757 		 * the amount of data NOT transferred in this window
3758 		 * If there is only one window, the resid will be 0
3759 		 */
3760 		pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount);
3761 		NDBG16(("mptsas_dmaget: cmd_dmacount=%d.", cmd->cmd_dmacount));
3762 	}
3763 	return (pkt);
3764 }
3765 
3766 /*
3767  * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
3768  *
3769  * Notes:
3770  *	- also frees DMA resources if allocated
3771  *	- implicit DMA synchonization
3772  */
3773 static void
3774 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
3775 {
3776 	mptsas_cmd_t	*cmd = PKT2CMD(pkt);
3777 	mptsas_t	*mpt = ADDR2MPT(ap);
3778 
3779 	NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
3780 	    ap->a_target, (void *)pkt));
3781 
3782 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
3783 		(void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
3784 		cmd->cmd_flags &= ~CFLAG_DMAVALID;
3785 	}
3786 
3787 	if (cmd->cmd_sg) {
3788 		kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
3789 		cmd->cmd_sg = NULL;
3790 	}
3791 
3792 	mptsas_free_extra_sgl_frame(mpt, cmd);
3793 
3794 	if ((cmd->cmd_flags &
3795 	    (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
3796 	    CFLAG_SCBEXTERN)) == 0) {
3797 		cmd->cmd_flags = CFLAG_FREE;
3798 		kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
3799 	} else {
3800 		mptsas_pkt_destroy_extern(mpt, cmd);
3801 	}
3802 }
3803 
3804 /*
3805  * kmem cache constructor and destructor:
3806  * When constructing, we bzero the cmd and allocate the dma handle
3807  * When destructing, just free the dma handle
3808  */
3809 static int
3810 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
3811 {
3812 	mptsas_cmd_t		*cmd = buf;
3813 	mptsas_t		*mpt  = cdrarg;
3814 	struct scsi_address	ap;
3815 	uint_t			cookiec;
3816 	ddi_dma_attr_t		arq_dma_attr;
3817 	int			(*callback)(caddr_t);
3818 
3819 	callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3820 
3821 	NDBG4(("mptsas_kmem_cache_constructor"));
3822 
3823 	ap.a_hba_tran = mpt->m_tran;
3824 	ap.a_target = 0;
3825 	ap.a_lun = 0;
3826 
3827 	/*
3828 	 * allocate a dma handle
3829 	 */
3830 	if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback,
3831 	    NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
3832 		cmd->cmd_dmahandle = NULL;
3833 		return (-1);
3834 	}
3835 
3836 	cmd->cmd_arq_buf = scsi_alloc_consistent_buf(&ap, (struct buf *)NULL,
3837 	    SENSE_LENGTH, B_READ, callback, NULL);
3838 	if (cmd->cmd_arq_buf == NULL) {
3839 		ddi_dma_free_handle(&cmd->cmd_dmahandle);
3840 		cmd->cmd_dmahandle = NULL;
3841 		return (-1);
3842 	}
3843 
3844 	/*
3845 	 * allocate a arq handle
3846 	 */
3847 	arq_dma_attr = mpt->m_msg_dma_attr;
3848 	arq_dma_attr.dma_attr_sgllen = 1;
3849 	if ((ddi_dma_alloc_handle(mpt->m_dip, &arq_dma_attr, callback,
3850 	    NULL, &cmd->cmd_arqhandle)) != DDI_SUCCESS) {
3851 		ddi_dma_free_handle(&cmd->cmd_dmahandle);
3852 		scsi_free_consistent_buf(cmd->cmd_arq_buf);
3853 		cmd->cmd_dmahandle = NULL;
3854 		cmd->cmd_arqhandle = NULL;
3855 		return (-1);
3856 	}
3857 
3858 	if (ddi_dma_buf_bind_handle(cmd->cmd_arqhandle,
3859 	    cmd->cmd_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3860 	    callback, NULL, &cmd->cmd_arqcookie, &cookiec) != DDI_SUCCESS) {
3861 		ddi_dma_free_handle(&cmd->cmd_dmahandle);
3862 		ddi_dma_free_handle(&cmd->cmd_arqhandle);
3863 		scsi_free_consistent_buf(cmd->cmd_arq_buf);
3864 		cmd->cmd_dmahandle = NULL;
3865 		cmd->cmd_arqhandle = NULL;
3866 		cmd->cmd_arq_buf = NULL;
3867 		return (-1);
3868 	}
3869 
3870 	return (0);
3871 }
3872 
3873 static void
3874 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
3875 {
3876 #ifndef __lock_lint
3877 	_NOTE(ARGUNUSED(cdrarg))
3878 #endif
3879 	mptsas_cmd_t	*cmd = buf;
3880 
3881 	NDBG4(("mptsas_kmem_cache_destructor"));
3882 
3883 	if (cmd->cmd_arqhandle) {
3884 		(void) ddi_dma_unbind_handle(cmd->cmd_arqhandle);
3885 		ddi_dma_free_handle(&cmd->cmd_arqhandle);
3886 		cmd->cmd_arqhandle = NULL;
3887 	}
3888 	if (cmd->cmd_arq_buf) {
3889 		scsi_free_consistent_buf(cmd->cmd_arq_buf);
3890 		cmd->cmd_arq_buf = NULL;
3891 	}
3892 	if (cmd->cmd_dmahandle) {
3893 		ddi_dma_free_handle(&cmd->cmd_dmahandle);
3894 		cmd->cmd_dmahandle = NULL;
3895 	}
3896 }
3897 
3898 static int
3899 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
3900 {
3901 	mptsas_cache_frames_t	*p = buf;
3902 	mptsas_t		*mpt = cdrarg;
3903 	ddi_dma_attr_t		frame_dma_attr;
3904 	size_t			mem_size, alloc_len;
3905 	ddi_dma_cookie_t	cookie;
3906 	uint_t			ncookie;
3907 	int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
3908 	    ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3909 
3910 	frame_dma_attr = mpt->m_msg_dma_attr;
3911 	frame_dma_attr.dma_attr_align = 0x10;
3912 	frame_dma_attr.dma_attr_sgllen = 1;
3913 
3914 	if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
3915 	    &p->m_dma_hdl) != DDI_SUCCESS) {
3916 		mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for"
3917 		    " extra SGL.");
3918 		return (DDI_FAILURE);
3919 	}
3920 
3921 	mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size;
3922 
3923 	if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr,
3924 	    DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr,
3925 	    &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) {
3926 		ddi_dma_free_handle(&p->m_dma_hdl);
3927 		p->m_dma_hdl = NULL;
3928 		mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for"
3929 		    " extra SGL.");
3930 		return (DDI_FAILURE);
3931 	}
3932 
3933 	if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr,
3934 	    alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
3935 	    &cookie, &ncookie) != DDI_DMA_MAPPED) {
3936 		(void) ddi_dma_mem_free(&p->m_acc_hdl);
3937 		ddi_dma_free_handle(&p->m_dma_hdl);
3938 		p->m_dma_hdl = NULL;
3939 		mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for"
3940 		    " extra SGL");
3941 		return (DDI_FAILURE);
3942 	}
3943 
3944 	/*
3945 	 * Store the SGL memory address.  This chip uses this
3946 	 * address to dma to and from the driver.  The second
3947 	 * address is the address mpt uses to fill in the SGL.
3948 	 */
3949 	p->m_phys_addr = cookie.dmac_address;
3950 
3951 	return (DDI_SUCCESS);
3952 }
3953 
3954 static void
3955 mptsas_cache_frames_destructor(void *buf, void *cdrarg)
3956 {
3957 #ifndef __lock_lint
3958 	_NOTE(ARGUNUSED(cdrarg))
3959 #endif
3960 	mptsas_cache_frames_t	*p = buf;
3961 	if (p->m_dma_hdl != NULL) {
3962 		(void) ddi_dma_unbind_handle(p->m_dma_hdl);
3963 		(void) ddi_dma_mem_free(&p->m_acc_hdl);
3964 		ddi_dma_free_handle(&p->m_dma_hdl);
3965 		p->m_phys_addr = NULL;
3966 		p->m_frames_addr = NULL;
3967 		p->m_dma_hdl = NULL;
3968 		p->m_acc_hdl = NULL;
3969 	}
3970 
3971 }
3972 
3973 /*
3974  * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
3975  * for non-standard length cdb, pkt_private, status areas
3976  * if allocation fails, then deallocate all external space and the pkt
3977  */
3978 /* ARGSUSED */
3979 static int
3980 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
3981     int cmdlen, int tgtlen, int statuslen, int kf)
3982 {
3983 	caddr_t			cdbp, scbp, tgt;
3984 	int			(*callback)(caddr_t) = (kf == KM_SLEEP) ?
3985 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
3986 	struct scsi_address	ap;
3987 	size_t			senselength;
3988 	ddi_dma_attr_t		ext_arq_dma_attr;
3989 	uint_t			cookiec;
3990 
3991 	NDBG3(("mptsas_pkt_alloc_extern: "
3992 	    "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
3993 	    (void *)cmd, cmdlen, tgtlen, statuslen, kf));
3994 
3995 	tgt = cdbp = scbp = NULL;
3996 	cmd->cmd_scblen		= statuslen;
3997 	cmd->cmd_privlen	= (uchar_t)tgtlen;
3998 
3999 	if (cmdlen > sizeof (cmd->cmd_cdb)) {
4000 		if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
4001 			goto fail;
4002 		}
4003 		cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
4004 		cmd->cmd_flags |= CFLAG_CDBEXTERN;
4005 	}
4006 	if (tgtlen > PKT_PRIV_LEN) {
4007 		if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) {
4008 			goto fail;
4009 		}
4010 		cmd->cmd_flags |= CFLAG_PRIVEXTERN;
4011 		cmd->cmd_pkt->pkt_private = tgt;
4012 	}
4013 	if (statuslen > EXTCMDS_STATUS_SIZE) {
4014 		if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
4015 			goto fail;
4016 		}
4017 		cmd->cmd_flags |= CFLAG_SCBEXTERN;
4018 		cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp;
4019 
4020 		/* allocate sense data buf for DMA */
4021 
4022 		senselength = statuslen - MPTSAS_GET_ITEM_OFF(
4023 		    struct scsi_arq_status, sts_sensedata);
4024 		cmd->cmd_rqslen = (uchar_t)senselength;
4025 
4026 		ap.a_hba_tran = mpt->m_tran;
4027 		ap.a_target = 0;
4028 		ap.a_lun = 0;
4029 
4030 		cmd->cmd_ext_arq_buf = scsi_alloc_consistent_buf(&ap,
4031 		    (struct buf *)NULL, senselength, B_READ,
4032 		    callback, NULL);
4033 
4034 		if (cmd->cmd_ext_arq_buf == NULL) {
4035 			goto fail;
4036 		}
4037 		/*
4038 		 * allocate a extern arq handle and bind the buf
4039 		 */
4040 		ext_arq_dma_attr = mpt->m_msg_dma_attr;
4041 		ext_arq_dma_attr.dma_attr_sgllen = 1;
4042 		if ((ddi_dma_alloc_handle(mpt->m_dip,
4043 		    &ext_arq_dma_attr, callback,
4044 		    NULL, &cmd->cmd_ext_arqhandle)) != DDI_SUCCESS) {
4045 			goto fail;
4046 		}
4047 
4048 		if (ddi_dma_buf_bind_handle(cmd->cmd_ext_arqhandle,
4049 		    cmd->cmd_ext_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
4050 		    callback, NULL, &cmd->cmd_ext_arqcookie,
4051 		    &cookiec)
4052 		    != DDI_SUCCESS) {
4053 			goto fail;
4054 		}
4055 		cmd->cmd_flags |= CFLAG_EXTARQBUFVALID;
4056 	}
4057 	return (0);
4058 fail:
4059 	mptsas_pkt_destroy_extern(mpt, cmd);
4060 	return (1);
4061 }
4062 
4063 /*
4064  * deallocate external pkt space and deallocate the pkt
4065  */
4066 static void
4067 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd)
4068 {
4069 	NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd));
4070 
4071 	if (cmd->cmd_flags & CFLAG_FREE) {
4072 		mptsas_log(mpt, CE_PANIC,
4073 		    "mptsas_pkt_destroy_extern: freeing free packet");
4074 		_NOTE(NOT_REACHED)
4075 		/* NOTREACHED */
4076 	}
4077 	if (cmd->cmd_flags & CFLAG_CDBEXTERN) {
4078 		kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen);
4079 	}
4080 	if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
4081 		kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen);
4082 		if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4083 			(void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4084 		}
4085 		if (cmd->cmd_ext_arqhandle) {
4086 			ddi_dma_free_handle(&cmd->cmd_ext_arqhandle);
4087 			cmd->cmd_ext_arqhandle = NULL;
4088 		}
4089 		if (cmd->cmd_ext_arq_buf)
4090 			scsi_free_consistent_buf(cmd->cmd_ext_arq_buf);
4091 	}
4092 	if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
4093 		kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen);
4094 	}
4095 	cmd->cmd_flags = CFLAG_FREE;
4096 	kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4097 }
4098 
4099 /*
4100  * tran_sync_pkt(9E) - explicit DMA synchronization
4101  */
4102 /*ARGSUSED*/
4103 static void
4104 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4105 {
4106 	mptsas_cmd_t	*cmd = PKT2CMD(pkt);
4107 
4108 	NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
4109 	    ap->a_target, (void *)pkt));
4110 
4111 	if (cmd->cmd_dmahandle) {
4112 		(void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4113 		    (cmd->cmd_flags & CFLAG_DMASEND) ?
4114 		    DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
4115 	}
4116 }
4117 
4118 /*
4119  * tran_dmafree(9E) - deallocate DMA resources allocated for command
4120  */
4121 /*ARGSUSED*/
4122 static void
4123 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4124 {
4125 	mptsas_cmd_t	*cmd = PKT2CMD(pkt);
4126 	mptsas_t	*mpt = ADDR2MPT(ap);
4127 
4128 	NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4129 	    ap->a_target, (void *)pkt));
4130 
4131 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
4132 		(void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4133 		cmd->cmd_flags &= ~CFLAG_DMAVALID;
4134 	}
4135 
4136 	if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4137 		(void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4138 		cmd->cmd_flags &= ~CFLAG_EXTARQBUFVALID;
4139 	}
4140 
4141 	mptsas_free_extra_sgl_frame(mpt, cmd);
4142 }
4143 
4144 static void
4145 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd)
4146 {
4147 	if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
4148 	    (!(cmd->cmd_flags & CFLAG_DMASEND))) {
4149 		(void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4150 		    DDI_DMA_SYNC_FORCPU);
4151 	}
4152 	(*pkt->pkt_comp)(pkt);
4153 }
4154 
4155 static void
4156 mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
4157 	pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4158 {
4159 	uint_t			cookiec;
4160 	mptti_t			*dmap;
4161 	uint32_t		flags;
4162 	pMpi2SGESimple64_t	sge;
4163 	pMpi2SGEChain64_t	sgechain;
4164 	ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
4165 
4166 	/*
4167 	 * Save the number of entries in the DMA
4168 	 * Scatter/Gather list
4169 	 */
4170 	cookiec = cmd->cmd_cookiec;
4171 
4172 	NDBG1(("mptsas_sge_setup: cookiec=%d", cookiec));
4173 
4174 	/*
4175 	 * Set read/write bit in control.
4176 	 */
4177 	if (cmd->cmd_flags & CFLAG_DMASEND) {
4178 		*control |= MPI2_SCSIIO_CONTROL_WRITE;
4179 	} else {
4180 		*control |= MPI2_SCSIIO_CONTROL_READ;
4181 	}
4182 
4183 	ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
4184 
4185 	/*
4186 	 * We have 2 cases here.  First where we can fit all the
4187 	 * SG elements into the main frame, and the case
4188 	 * where we can't.
4189 	 * If we have more cookies than we can attach to a frame
4190 	 * we will need to use a chain element to point
4191 	 * a location of memory where the rest of the S/G
4192 	 * elements reside.
4193 	 */
4194 	if (cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
4195 		dmap = cmd->cmd_sg;
4196 		sge = (pMpi2SGESimple64_t)(&frame->SGL);
4197 		while (cookiec--) {
4198 			ddi_put32(acc_hdl,
4199 			    &sge->Address.Low, dmap->addr.address64.Low);
4200 			ddi_put32(acc_hdl,
4201 			    &sge->Address.High, dmap->addr.address64.High);
4202 			ddi_put32(acc_hdl, &sge->FlagsLength,
4203 			    dmap->count);
4204 			flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4205 			flags |= ((uint32_t)
4206 			    (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4207 			    MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4208 			    MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4209 			    MPI2_SGE_FLAGS_SHIFT);
4210 
4211 			/*
4212 			 * If this is the last cookie, we set the flags
4213 			 * to indicate so
4214 			 */
4215 			if (cookiec == 0) {
4216 				flags |=
4217 				    ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
4218 				    | MPI2_SGE_FLAGS_END_OF_BUFFER
4219 				    | MPI2_SGE_FLAGS_END_OF_LIST) <<
4220 				    MPI2_SGE_FLAGS_SHIFT);
4221 			}
4222 			if (cmd->cmd_flags & CFLAG_DMASEND) {
4223 				flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4224 				    MPI2_SGE_FLAGS_SHIFT);
4225 			} else {
4226 				flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4227 				    MPI2_SGE_FLAGS_SHIFT);
4228 			}
4229 			ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4230 			dmap++;
4231 			sge++;
4232 		}
4233 	} else {
4234 		/*
4235 		 * Hereby we start to deal with multiple frames.
4236 		 * The process is as follows:
4237 		 * 1. Determine how many frames are needed for SGL element
4238 		 *    storage; Note that all frames are stored in contiguous
4239 		 *    memory space and in 64-bit DMA mode each element is
4240 		 *    3 double-words (12 bytes) long.
4241 		 * 2. Fill up the main frame. We need to do this separately
4242 		 *    since it contains the SCSI IO request header and needs
4243 		 *    dedicated processing. Note that the last 4 double-words
4244 		 *    of the SCSI IO header is for SGL element storage
4245 		 *    (MPI2_SGE_IO_UNION).
4246 		 * 3. Fill the chain element in the main frame, so the DMA
4247 		 *    engine can use the following frames.
4248 		 * 4. Enter a loop to fill the remaining frames. Note that the
4249 		 *    last frame contains no chain element.  The remaining
4250 		 *    frames go into the mpt SGL buffer allocated on the fly,
4251 		 *    not immediately following the main message frame, as in
4252 		 *    Gen1.
4253 		 * Some restrictions:
4254 		 * 1. For 64-bit DMA, the simple element and chain element
4255 		 *    are both of 3 double-words (12 bytes) in size, even
4256 		 *    though all frames are stored in the first 4G of mem
4257 		 *    range and the higher 32-bits of the address are always 0.
4258 		 * 2. On some controllers (like the 1064/1068), a frame can
4259 		 *    hold SGL elements with the last 1 or 2 double-words
4260 		 *    (4 or 8 bytes) un-used. On these controllers, we should
4261 		 *    recognize that there's not enough room for another SGL
4262 		 *    element and move the sge pointer to the next frame.
4263 		 */
4264 		int		i, j, k, l, frames, sgemax;
4265 		int		temp;
4266 		uint8_t		chainflags;
4267 		uint16_t	chainlength;
4268 		mptsas_cache_frames_t *p;
4269 
4270 		/*
4271 		 * Sgemax is the number of SGE's that will fit
4272 		 * each extra frame and frames is total
4273 		 * number of frames we'll need.  1 sge entry per
4274 		 * frame is reseverd for the chain element thus the -1 below.
4275 		 */
4276 		sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64))
4277 		    - 1);
4278 		temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4279 
4280 		/*
4281 		 * A little check to see if we need to round up the number
4282 		 * of frames we need
4283 		 */
4284 		if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4285 		    sgemax) > 1) {
4286 			frames = (temp + 1);
4287 		} else {
4288 			frames = temp;
4289 		}
4290 		dmap = cmd->cmd_sg;
4291 		sge = (pMpi2SGESimple64_t)(&frame->SGL);
4292 
4293 		/*
4294 		 * First fill in the main frame
4295 		 */
4296 		for (j = 1; j < MPTSAS_MAX_FRAME_SGES64(mpt); j++) {
4297 			ddi_put32(acc_hdl, &sge->Address.Low,
4298 			    dmap->addr.address64.Low);
4299 			ddi_put32(acc_hdl, &sge->Address.High,
4300 			    dmap->addr.address64.High);
4301 			ddi_put32(acc_hdl, &sge->FlagsLength, dmap->count);
4302 			flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4303 			flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4304 			    MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4305 			    MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4306 			    MPI2_SGE_FLAGS_SHIFT);
4307 
4308 			/*
4309 			 * If this is the last SGE of this frame
4310 			 * we set the end of list flag
4311 			 */
4312 			if (j == (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) {
4313 				flags |= ((uint32_t)
4314 				    (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4315 				    MPI2_SGE_FLAGS_SHIFT);
4316 			}
4317 			if (cmd->cmd_flags & CFLAG_DMASEND) {
4318 				flags |=
4319 				    (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4320 				    MPI2_SGE_FLAGS_SHIFT);
4321 			} else {
4322 				flags |=
4323 				    (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4324 				    MPI2_SGE_FLAGS_SHIFT);
4325 			}
4326 			ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4327 			dmap++;
4328 			sge++;
4329 		}
4330 
4331 		/*
4332 		 * Fill in the chain element in the main frame.
4333 		 * About calculation on ChainOffset:
4334 		 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4335 		 *    in the end reserved for SGL element storage
4336 		 *    (MPI2_SGE_IO_UNION); we should count it in our
4337 		 *    calculation.  See its definition in the header file.
4338 		 * 2. Constant j is the counter of the current SGL element
4339 		 *    that will be processed, and (j - 1) is the number of
4340 		 *    SGL elements that have been processed (stored in the
4341 		 *    main frame).
4342 		 * 3. ChainOffset value should be in units of double-words (4
4343 		 *    bytes) so the last value should be divided by 4.
4344 		 */
4345 		ddi_put8(acc_hdl, &frame->ChainOffset,
4346 		    (sizeof (MPI2_SCSI_IO_REQUEST) -
4347 		    sizeof (MPI2_SGE_IO_UNION) +
4348 		    (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4349 		sgechain = (pMpi2SGEChain64_t)sge;
4350 		chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4351 		    MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4352 		    MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4353 		ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4354 
4355 		/*
4356 		 * The size of the next frame is the accurate size of space
4357 		 * (in bytes) used to store the SGL elements. j is the counter
4358 		 * of SGL elements. (j - 1) is the number of SGL elements that
4359 		 * have been processed (stored in frames).
4360 		 */
4361 		if (frames >= 2) {
4362 			chainlength = mpt->m_req_frame_size /
4363 			    sizeof (MPI2_SGE_SIMPLE64) *
4364 			    sizeof (MPI2_SGE_SIMPLE64);
4365 		} else {
4366 			chainlength = ((cookiec - (j - 1)) *
4367 			    sizeof (MPI2_SGE_SIMPLE64));
4368 		}
4369 
4370 		p = cmd->cmd_extra_frames;
4371 
4372 		ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4373 		ddi_put32(acc_hdl, &sgechain->Address.Low,
4374 		    p->m_phys_addr);
4375 		/* SGL is allocated in the first 4G mem range */
4376 		ddi_put32(acc_hdl, &sgechain->Address.High, 0);
4377 
4378 		/*
4379 		 * If there are more than 2 frames left we have to
4380 		 * fill in the next chain offset to the location of
4381 		 * the chain element in the next frame.
4382 		 * sgemax is the number of simple elements in an extra
4383 		 * frame. Note that the value NextChainOffset should be
4384 		 * in double-words (4 bytes).
4385 		 */
4386 		if (frames >= 2) {
4387 			ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4388 			    (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4389 		} else {
4390 			ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4391 		}
4392 
4393 		/*
4394 		 * Jump to next frame;
4395 		 * Starting here, chain buffers go into the per command SGL.
4396 		 * This buffer is allocated when chain buffers are needed.
4397 		 */
4398 		sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4399 		i = cookiec;
4400 
4401 		/*
4402 		 * Start filling in frames with SGE's.  If we
4403 		 * reach the end of frame and still have SGE's
4404 		 * to fill we need to add a chain element and
4405 		 * use another frame.  j will be our counter
4406 		 * for what cookie we are at and i will be
4407 		 * the total cookiec. k is the current frame
4408 		 */
4409 		for (k = 1; k <= frames; k++) {
4410 			for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4411 
4412 				/*
4413 				 * If we have reached the end of frame
4414 				 * and we have more SGE's to fill in
4415 				 * we have to fill the final entry
4416 				 * with a chain element and then
4417 				 * continue to the next frame
4418 				 */
4419 				if ((l == (sgemax + 1)) && (k != frames)) {
4420 					sgechain = (pMpi2SGEChain64_t)sge;
4421 					j--;
4422 					chainflags = (
4423 					    MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4424 					    MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4425 					    MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4426 					ddi_put8(p->m_acc_hdl,
4427 					    &sgechain->Flags, chainflags);
4428 					/*
4429 					 * k is the frame counter and (k + 1)
4430 					 * is the number of the next frame.
4431 					 * Note that frames are in contiguous
4432 					 * memory space.
4433 					 */
4434 					ddi_put32(p->m_acc_hdl,
4435 					    &sgechain->Address.Low,
4436 					    (p->m_phys_addr +
4437 					    (mpt->m_req_frame_size * k)));
4438 					ddi_put32(p->m_acc_hdl,
4439 					    &sgechain->Address.High, 0);
4440 
4441 					/*
4442 					 * If there are more than 2 frames left
4443 					 * we have to next chain offset to
4444 					 * the location of the chain element
4445 					 * in the next frame and fill in the
4446 					 * length of the next chain
4447 					 */
4448 					if ((frames - k) >= 2) {
4449 						ddi_put8(p->m_acc_hdl,
4450 						    &sgechain->NextChainOffset,
4451 						    (sgemax *
4452 						    sizeof (MPI2_SGE_SIMPLE64))
4453 						    >> 2);
4454 						ddi_put16(p->m_acc_hdl,
4455 						    &sgechain->Length,
4456 						    mpt->m_req_frame_size /
4457 						    sizeof (MPI2_SGE_SIMPLE64) *
4458 						    sizeof (MPI2_SGE_SIMPLE64));
4459 					} else {
4460 						/*
4461 						 * This is the last frame. Set
4462 						 * the NextChainOffset to 0 and
4463 						 * Length is the total size of
4464 						 * all remaining simple elements
4465 						 */
4466 						ddi_put8(p->m_acc_hdl,
4467 						    &sgechain->NextChainOffset,
4468 						    0);
4469 						ddi_put16(p->m_acc_hdl,
4470 						    &sgechain->Length,
4471 						    (cookiec - j) *
4472 						    sizeof (MPI2_SGE_SIMPLE64));
4473 					}
4474 
4475 					/* Jump to the next frame */
4476 					sge = (pMpi2SGESimple64_t)
4477 					    ((char *)p->m_frames_addr +
4478 					    (int)mpt->m_req_frame_size * k);
4479 
4480 					continue;
4481 				}
4482 
4483 				ddi_put32(p->m_acc_hdl,
4484 				    &sge->Address.Low,
4485 				    dmap->addr.address64.Low);
4486 				ddi_put32(p->m_acc_hdl,
4487 				    &sge->Address.High,
4488 				    dmap->addr.address64.High);
4489 				ddi_put32(p->m_acc_hdl,
4490 				    &sge->FlagsLength, dmap->count);
4491 				flags = ddi_get32(p->m_acc_hdl,
4492 				    &sge->FlagsLength);
4493 				flags |= ((uint32_t)(
4494 				    MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4495 				    MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4496 				    MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4497 				    MPI2_SGE_FLAGS_SHIFT);
4498 
4499 				/*
4500 				 * If we are at the end of the frame and
4501 				 * there is another frame to fill in
4502 				 * we set the last simple element as last
4503 				 * element
4504 				 */
4505 				if ((l == sgemax) && (k != frames)) {
4506 					flags |= ((uint32_t)
4507 					    (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4508 					    MPI2_SGE_FLAGS_SHIFT);
4509 				}
4510 
4511 				/*
4512 				 * If this is the final cookie we
4513 				 * indicate it by setting the flags
4514 				 */
4515 				if (j == i) {
4516 					flags |= ((uint32_t)
4517 					    (MPI2_SGE_FLAGS_LAST_ELEMENT |
4518 					    MPI2_SGE_FLAGS_END_OF_BUFFER |
4519 					    MPI2_SGE_FLAGS_END_OF_LIST) <<
4520 					    MPI2_SGE_FLAGS_SHIFT);
4521 				}
4522 				if (cmd->cmd_flags & CFLAG_DMASEND) {
4523 					flags |=
4524 					    (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4525 					    MPI2_SGE_FLAGS_SHIFT);
4526 				} else {
4527 					flags |=
4528 					    (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4529 					    MPI2_SGE_FLAGS_SHIFT);
4530 				}
4531 				ddi_put32(p->m_acc_hdl,
4532 				    &sge->FlagsLength, flags);
4533 				dmap++;
4534 				sge++;
4535 			}
4536 		}
4537 
4538 		/*
4539 		 * Sync DMA with the chain buffers that were just created
4540 		 */
4541 		(void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4542 	}
4543 }
4544 
4545 /*
4546  * Interrupt handling
4547  * Utility routine.  Poll for status of a command sent to HBA
4548  * without interrupts (a FLAG_NOINTR command).
4549  */
4550 int
4551 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
4552 {
4553 	int	rval = TRUE;
4554 
4555 	NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
4556 
4557 	if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
4558 		mptsas_restart_hba(mpt);
4559 	}
4560 
4561 	/*
4562 	 * Wait, using drv_usecwait(), long enough for the command to
4563 	 * reasonably return from the target if the target isn't
4564 	 * "dead".  A polled command may well be sent from scsi_poll, and
4565 	 * there are retries built in to scsi_poll if the transport
4566 	 * accepted the packet (TRAN_ACCEPT).  scsi_poll waits 1 second
4567 	 * and retries the transport up to scsi_poll_busycnt times
4568 	 * (currently 60) if
4569 	 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
4570 	 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
4571 	 *
4572 	 * limit the waiting to avoid a hang in the event that the
4573 	 * cmd never gets started but we are still receiving interrupts
4574 	 */
4575 	while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
4576 		if (mptsas_wait_intr(mpt, polltime) == FALSE) {
4577 			NDBG5(("mptsas_poll: command incomplete"));
4578 			rval = FALSE;
4579 			break;
4580 		}
4581 	}
4582 
4583 	if (rval == FALSE) {
4584 
4585 		/*
4586 		 * this isn't supposed to happen, the hba must be wedged
4587 		 * Mark this cmd as a timeout.
4588 		 */
4589 		mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
4590 		    (STAT_TIMEOUT|STAT_ABORTED));
4591 
4592 		if (poll_cmd->cmd_queued == FALSE) {
4593 
4594 			NDBG5(("mptsas_poll: not on waitq"));
4595 
4596 			poll_cmd->cmd_pkt->pkt_state |=
4597 			    (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
4598 		} else {
4599 
4600 			/* find and remove it from the waitq */
4601 			NDBG5(("mptsas_poll: delete from waitq"));
4602 			mptsas_waitq_delete(mpt, poll_cmd);
4603 		}
4604 
4605 	}
4606 	mptsas_fma_check(mpt, poll_cmd);
4607 	NDBG5(("mptsas_poll: done"));
4608 	return (rval);
4609 }
4610 
4611 /*
4612  * Used for polling cmds and TM function
4613  */
4614 static int
4615 mptsas_wait_intr(mptsas_t *mpt, int polltime)
4616 {
4617 	int				cnt;
4618 	pMpi2ReplyDescriptorsUnion_t	reply_desc_union;
4619 	uint32_t			int_mask;
4620 
4621 	NDBG5(("mptsas_wait_intr"));
4622 
4623 	mpt->m_polled_intr = 1;
4624 
4625 	/*
4626 	 * Get the current interrupt mask and disable interrupts.  When
4627 	 * re-enabling ints, set mask to saved value.
4628 	 */
4629 	int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask);
4630 	MPTSAS_DISABLE_INTR(mpt);
4631 
4632 	/*
4633 	 * Keep polling for at least (polltime * 1000) seconds
4634 	 */
4635 	for (cnt = 0; cnt < polltime; cnt++) {
4636 		(void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
4637 		    DDI_DMA_SYNC_FORCPU);
4638 
4639 		reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
4640 		    MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
4641 
4642 		if (ddi_get32(mpt->m_acc_post_queue_hdl,
4643 		    &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
4644 		    ddi_get32(mpt->m_acc_post_queue_hdl,
4645 		    &reply_desc_union->Words.High) == 0xFFFFFFFF) {
4646 			drv_usecwait(1000);
4647 			continue;
4648 		}
4649 
4650 		/*
4651 		 * The reply is valid, process it according to its
4652 		 * type.
4653 		 */
4654 		mptsas_process_intr(mpt, reply_desc_union);
4655 
4656 		if (++mpt->m_post_index == mpt->m_post_queue_depth) {
4657 			mpt->m_post_index = 0;
4658 		}
4659 
4660 		/*
4661 		 * Update the global reply index
4662 		 */
4663 		ddi_put32(mpt->m_datap,
4664 		    &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
4665 		mpt->m_polled_intr = 0;
4666 
4667 		/*
4668 		 * Re-enable interrupts and quit.
4669 		 */
4670 		ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
4671 		    int_mask);
4672 		return (TRUE);
4673 
4674 	}
4675 
4676 	/*
4677 	 * Clear polling flag, re-enable interrupts and quit.
4678 	 */
4679 	mpt->m_polled_intr = 0;
4680 	ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
4681 	return (FALSE);
4682 }
4683 
4684 static void
4685 mptsas_handle_scsi_io_success(mptsas_t *mpt,
4686     pMpi2ReplyDescriptorsUnion_t reply_desc)
4687 {
4688 	pMpi2SCSIIOSuccessReplyDescriptor_t	scsi_io_success;
4689 	uint16_t				SMID;
4690 	mptsas_slots_t				*slots = mpt->m_active;
4691 	mptsas_cmd_t				*cmd = NULL;
4692 	struct scsi_pkt				*pkt;
4693 
4694 	ASSERT(mutex_owned(&mpt->m_mutex));
4695 
4696 	scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
4697 	SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
4698 
4699 	/*
4700 	 * This is a success reply so just complete the IO.  First, do a sanity
4701 	 * check on the SMID.  The final slot is used for TM requests, which
4702 	 * would not come into this reply handler.
4703 	 */
4704 	if ((SMID == 0) || (SMID > slots->m_n_slots)) {
4705 		mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
4706 		    SMID);
4707 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4708 		return;
4709 	}
4710 
4711 	cmd = slots->m_slot[SMID];
4712 
4713 	/*
4714 	 * print warning and return if the slot is empty
4715 	 */
4716 	if (cmd == NULL) {
4717 		mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
4718 		    "in slot %d", SMID);
4719 		return;
4720 	}
4721 
4722 	pkt = CMD2PKT(cmd);
4723 	pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
4724 	    STATE_GOT_STATUS);
4725 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
4726 		pkt->pkt_state |= STATE_XFERRED_DATA;
4727 	}
4728 	pkt->pkt_resid = 0;
4729 
4730 	if (cmd->cmd_flags & CFLAG_PASSTHRU) {
4731 		cmd->cmd_flags |= CFLAG_FINISHED;
4732 		cv_broadcast(&mpt->m_passthru_cv);
4733 		return;
4734 	} else {
4735 		mptsas_remove_cmd(mpt, cmd);
4736 	}
4737 
4738 	if (cmd->cmd_flags & CFLAG_RETRY) {
4739 		/*
4740 		 * The target returned QFULL or busy, do not add tihs
4741 		 * pkt to the doneq since the hba will retry
4742 		 * this cmd.
4743 		 *
4744 		 * The pkt has already been resubmitted in
4745 		 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4746 		 * Remove this cmd_flag here.
4747 		 */
4748 		cmd->cmd_flags &= ~CFLAG_RETRY;
4749 	} else {
4750 		mptsas_doneq_add(mpt, cmd);
4751 	}
4752 }
4753 
4754 static void
4755 mptsas_handle_address_reply(mptsas_t *mpt,
4756     pMpi2ReplyDescriptorsUnion_t reply_desc)
4757 {
4758 	pMpi2AddressReplyDescriptor_t	address_reply;
4759 	pMPI2DefaultReply_t		reply;
4760 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
4761 	uint32_t			reply_addr;
4762 	uint16_t			SMID, iocstatus;
4763 	mptsas_slots_t			*slots = mpt->m_active;
4764 	mptsas_cmd_t			*cmd = NULL;
4765 	uint8_t				function, buffer_type;
4766 	m_replyh_arg_t			*args;
4767 	int				reply_frame_no;
4768 
4769 	ASSERT(mutex_owned(&mpt->m_mutex));
4770 
4771 	address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
4772 	reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
4773 	    &address_reply->ReplyFrameAddress);
4774 	SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
4775 
4776 	/*
4777 	 * If reply frame is not in the proper range we should ignore this
4778 	 * message and exit the interrupt handler.
4779 	 */
4780 	if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
4781 	    (reply_addr >= (mpt->m_reply_frame_dma_addr +
4782 	    (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
4783 	    ((reply_addr - mpt->m_reply_frame_dma_addr) %
4784 	    mpt->m_reply_frame_size != 0)) {
4785 		mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
4786 		    "address 0x%x\n", reply_addr);
4787 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4788 		return;
4789 	}
4790 
4791 	(void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
4792 	    DDI_DMA_SYNC_FORCPU);
4793 	reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
4794 	    mpt->m_reply_frame_dma_addr));
4795 	function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
4796 
4797 	/*
4798 	 * don't get slot information and command for events since these values
4799 	 * don't exist
4800 	 */
4801 	if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
4802 	    (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
4803 		/*
4804 		 * This could be a TM reply, which use the last allocated SMID,
4805 		 * so allow for that.
4806 		 */
4807 		if ((SMID == 0) || (SMID > (slots->m_n_slots + 1))) {
4808 			mptsas_log(mpt, CE_WARN, "?Received invalid SMID of "
4809 			    "%d\n", SMID);
4810 			ddi_fm_service_impact(mpt->m_dip,
4811 			    DDI_SERVICE_UNAFFECTED);
4812 			return;
4813 		}
4814 
4815 		cmd = slots->m_slot[SMID];
4816 
4817 		/*
4818 		 * print warning and return if the slot is empty
4819 		 */
4820 		if (cmd == NULL) {
4821 			mptsas_log(mpt, CE_WARN, "?NULL command for address "
4822 			    "reply in slot %d", SMID);
4823 			return;
4824 		}
4825 		if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
4826 		    (cmd->cmd_flags & CFLAG_CONFIG) ||
4827 		    (cmd->cmd_flags & CFLAG_FW_DIAG)) {
4828 			cmd->cmd_rfm = reply_addr;
4829 			cmd->cmd_flags |= CFLAG_FINISHED;
4830 			cv_broadcast(&mpt->m_passthru_cv);
4831 			cv_broadcast(&mpt->m_config_cv);
4832 			cv_broadcast(&mpt->m_fw_diag_cv);
4833 			return;
4834 		} else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
4835 			mptsas_remove_cmd(mpt, cmd);
4836 		}
4837 		NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
4838 	}
4839 	/*
4840 	 * Depending on the function, we need to handle
4841 	 * the reply frame (and cmd) differently.
4842 	 */
4843 	switch (function) {
4844 	case MPI2_FUNCTION_SCSI_IO_REQUEST:
4845 		mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd);
4846 		break;
4847 	case MPI2_FUNCTION_SCSI_TASK_MGMT:
4848 		cmd->cmd_rfm = reply_addr;
4849 		mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply,
4850 		    cmd);
4851 		break;
4852 	case MPI2_FUNCTION_FW_DOWNLOAD:
4853 		cmd->cmd_flags |= CFLAG_FINISHED;
4854 		cv_signal(&mpt->m_fw_cv);
4855 		break;
4856 	case MPI2_FUNCTION_EVENT_NOTIFICATION:
4857 		reply_frame_no = (reply_addr - mpt->m_reply_frame_dma_addr) /
4858 		    mpt->m_reply_frame_size;
4859 		args = &mpt->m_replyh_args[reply_frame_no];
4860 		args->mpt = (void *)mpt;
4861 		args->rfm = reply_addr;
4862 
4863 		/*
4864 		 * Record the event if its type is enabled in
4865 		 * this mpt instance by ioctl.
4866 		 */
4867 		mptsas_record_event(args);
4868 
4869 		/*
4870 		 * Handle time critical events
4871 		 * NOT_RESPONDING/ADDED only now
4872 		 */
4873 		if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
4874 			/*
4875 			 * Would not return main process,
4876 			 * just let taskq resolve ack action
4877 			 * and ack would be sent in taskq thread
4878 			 */
4879 			NDBG20(("send mptsas_handle_event_sync success"));
4880 		}
4881 		if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
4882 		    (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
4883 			mptsas_log(mpt, CE_WARN, "No memory available"
4884 			"for dispatch taskq");
4885 			/*
4886 			 * Return the reply frame to the free queue.
4887 			 */
4888 			ddi_put32(mpt->m_acc_free_queue_hdl,
4889 			    &((uint32_t *)(void *)
4890 			    mpt->m_free_queue)[mpt->m_free_index], reply_addr);
4891 			(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
4892 			    DDI_DMA_SYNC_FORDEV);
4893 			if (++mpt->m_free_index == mpt->m_free_queue_depth) {
4894 				mpt->m_free_index = 0;
4895 			}
4896 
4897 			ddi_put32(mpt->m_datap,
4898 			    &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
4899 		}
4900 		return;
4901 	case MPI2_FUNCTION_DIAG_BUFFER_POST:
4902 		/*
4903 		 * If SMID is 0, this implies that the reply is due to a
4904 		 * release function with a status that the buffer has been
4905 		 * released.  Set the buffer flags accordingly.
4906 		 */
4907 		if (SMID == 0) {
4908 			iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
4909 			    &reply->IOCStatus);
4910 			buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
4911 			    &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
4912 			if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
4913 				pBuffer =
4914 				    &mpt->m_fw_diag_buffer_list[buffer_type];
4915 				pBuffer->valid_data = TRUE;
4916 				pBuffer->owned_by_firmware = FALSE;
4917 				pBuffer->immediate = FALSE;
4918 			}
4919 		} else {
4920 			/*
4921 			 * Normal handling of diag post reply with SMID.
4922 			 */
4923 			cmd = slots->m_slot[SMID];
4924 
4925 			/*
4926 			 * print warning and return if the slot is empty
4927 			 */
4928 			if (cmd == NULL) {
4929 				mptsas_log(mpt, CE_WARN, "?NULL command for "
4930 				    "address reply in slot %d", SMID);
4931 				return;
4932 			}
4933 			cmd->cmd_rfm = reply_addr;
4934 			cmd->cmd_flags |= CFLAG_FINISHED;
4935 			cv_broadcast(&mpt->m_fw_diag_cv);
4936 		}
4937 		return;
4938 	default:
4939 		mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
4940 		break;
4941 	}
4942 
4943 	/*
4944 	 * Return the reply frame to the free queue.
4945 	 */
4946 	ddi_put32(mpt->m_acc_free_queue_hdl,
4947 	    &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
4948 	    reply_addr);
4949 	(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
4950 	    DDI_DMA_SYNC_FORDEV);
4951 	if (++mpt->m_free_index == mpt->m_free_queue_depth) {
4952 		mpt->m_free_index = 0;
4953 	}
4954 	ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
4955 	    mpt->m_free_index);
4956 
4957 	if (cmd->cmd_flags & CFLAG_FW_CMD)
4958 		return;
4959 
4960 	if (cmd->cmd_flags & CFLAG_RETRY) {
4961 		/*
4962 		 * The target returned QFULL or busy, do not add tihs
4963 		 * pkt to the doneq since the hba will retry
4964 		 * this cmd.
4965 		 *
4966 		 * The pkt has already been resubmitted in
4967 		 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4968 		 * Remove this cmd_flag here.
4969 		 */
4970 		cmd->cmd_flags &= ~CFLAG_RETRY;
4971 	} else {
4972 		mptsas_doneq_add(mpt, cmd);
4973 	}
4974 }
4975 
4976 static void
4977 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
4978     mptsas_cmd_t *cmd)
4979 {
4980 	uint8_t			scsi_status, scsi_state;
4981 	uint16_t		ioc_status;
4982 	uint32_t		xferred, sensecount, responsedata, loginfo = 0;
4983 	struct scsi_pkt		*pkt;
4984 	struct scsi_arq_status	*arqstat;
4985 	struct buf		*bp;
4986 	mptsas_target_t		*ptgt = cmd->cmd_tgt_addr;
4987 	uint8_t			*sensedata = NULL;
4988 
4989 	if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
4990 	    (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
4991 		bp = cmd->cmd_ext_arq_buf;
4992 	} else {
4993 		bp = cmd->cmd_arq_buf;
4994 	}
4995 
4996 	scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
4997 	ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
4998 	scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
4999 	xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
5000 	sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
5001 	responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
5002 	    &reply->ResponseInfo);
5003 
5004 	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
5005 		loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
5006 		    &reply->IOCLogInfo);
5007 		mptsas_log(mpt, CE_NOTE,
5008 		    "?Log info 0x%x received for target %d.\n"
5009 		    "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5010 		    loginfo, Tgt(cmd), scsi_status, ioc_status,
5011 		    scsi_state);
5012 	}
5013 
5014 	NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5015 	    scsi_status, ioc_status, scsi_state));
5016 
5017 	pkt = CMD2PKT(cmd);
5018 	*(pkt->pkt_scbp) = scsi_status;
5019 
5020 	if (loginfo == 0x31170000) {
5021 		/*
5022 		 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
5023 		 * 0x31170000 comes, that means the device missing delay
5024 		 * is in progressing, the command need retry later.
5025 		 */
5026 		*(pkt->pkt_scbp) = STATUS_BUSY;
5027 		return;
5028 	}
5029 
5030 	if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
5031 	    ((ioc_status & MPI2_IOCSTATUS_MASK) ==
5032 	    MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
5033 		pkt->pkt_reason = CMD_INCOMPLETE;
5034 		pkt->pkt_state |= STATE_GOT_BUS;
5035 		if (ptgt->m_reset_delay == 0) {
5036 			mptsas_set_throttle(mpt, ptgt,
5037 			    DRAIN_THROTTLE);
5038 		}
5039 		return;
5040 	}
5041 
5042 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5043 		responsedata &= 0x000000FF;
5044 		if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
5045 			mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
5046 			pkt->pkt_reason = CMD_TLR_OFF;
5047 			return;
5048 		}
5049 	}
5050 
5051 
5052 	switch (scsi_status) {
5053 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
5054 		pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5055 		arqstat = (void*)(pkt->pkt_scbp);
5056 		arqstat->sts_rqpkt_status = *((struct scsi_status *)
5057 		    (pkt->pkt_scbp));
5058 		pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5059 		    STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5060 		if (cmd->cmd_flags & CFLAG_XARQ) {
5061 			pkt->pkt_state |= STATE_XARQ_DONE;
5062 		}
5063 		if (pkt->pkt_resid != cmd->cmd_dmacount) {
5064 			pkt->pkt_state |= STATE_XFERRED_DATA;
5065 		}
5066 		arqstat->sts_rqpkt_reason = pkt->pkt_reason;
5067 		arqstat->sts_rqpkt_state  = pkt->pkt_state;
5068 		arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA;
5069 		arqstat->sts_rqpkt_statistics = pkt->pkt_statistics;
5070 		sensedata = (uint8_t *)&arqstat->sts_sensedata;
5071 
5072 		bcopy((uchar_t *)bp->b_un.b_addr, sensedata,
5073 		    ((cmd->cmd_rqslen >= sensecount) ? sensecount :
5074 		    cmd->cmd_rqslen));
5075 		arqstat->sts_rqpkt_resid = (cmd->cmd_rqslen - sensecount);
5076 		cmd->cmd_flags |= CFLAG_CMDARQ;
5077 		/*
5078 		 * Set proper status for pkt if autosense was valid
5079 		 */
5080 		if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5081 			struct scsi_status zero_status = { 0 };
5082 			arqstat->sts_rqpkt_status = zero_status;
5083 		}
5084 
5085 		/*
5086 		 * ASC=0x47 is parity error
5087 		 * ASC=0x48 is initiator detected error received
5088 		 */
5089 		if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) &&
5090 		    ((scsi_sense_asc(sensedata) == 0x47) ||
5091 		    (scsi_sense_asc(sensedata) == 0x48))) {
5092 			mptsas_log(mpt, CE_NOTE, "Aborted_command!");
5093 		}
5094 
5095 		/*
5096 		 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5097 		 * ASC/ASCQ=0x25/0x00 means invalid lun
5098 		 */
5099 		if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5100 		    (scsi_sense_asc(sensedata) == 0x3F) &&
5101 		    (scsi_sense_ascq(sensedata) == 0x0E)) ||
5102 		    ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5103 		    (scsi_sense_asc(sensedata) == 0x25) &&
5104 		    (scsi_sense_ascq(sensedata) == 0x00))) {
5105 			mptsas_topo_change_list_t *topo_node = NULL;
5106 
5107 			topo_node = kmem_zalloc(
5108 			    sizeof (mptsas_topo_change_list_t),
5109 			    KM_NOSLEEP);
5110 			if (topo_node == NULL) {
5111 				mptsas_log(mpt, CE_NOTE, "No memory"
5112 				    "resource for handle SAS dynamic"
5113 				    "reconfigure.\n");
5114 				break;
5115 			}
5116 			topo_node->mpt = mpt;
5117 			topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5118 			topo_node->un.phymask = ptgt->m_phymask;
5119 			topo_node->devhdl = ptgt->m_devhdl;
5120 			topo_node->object = (void *)ptgt;
5121 			topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5122 
5123 			if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5124 			    mptsas_handle_dr,
5125 			    (void *)topo_node,
5126 			    DDI_NOSLEEP)) != DDI_SUCCESS) {
5127 				mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5128 				    "for handle SAS dynamic reconfigure"
5129 				    "failed. \n");
5130 			}
5131 		}
5132 		break;
5133 	case MPI2_SCSI_STATUS_GOOD:
5134 		switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5135 		case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5136 			pkt->pkt_reason = CMD_DEV_GONE;
5137 			pkt->pkt_state |= STATE_GOT_BUS;
5138 			if (ptgt->m_reset_delay == 0) {
5139 				mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5140 			}
5141 			NDBG31(("lost disk for target%d, command:%x",
5142 			    Tgt(cmd), pkt->pkt_cdbp[0]));
5143 			break;
5144 		case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5145 			NDBG31(("data overrun: xferred=%d", xferred));
5146 			NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5147 			pkt->pkt_reason = CMD_DATA_OVR;
5148 			pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5149 			    | STATE_SENT_CMD | STATE_GOT_STATUS
5150 			    | STATE_XFERRED_DATA);
5151 			pkt->pkt_resid = 0;
5152 			break;
5153 		case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5154 		case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5155 			NDBG31(("data underrun: xferred=%d", xferred));
5156 			NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5157 			pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5158 			    | STATE_SENT_CMD | STATE_GOT_STATUS);
5159 			pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5160 			if (pkt->pkt_resid != cmd->cmd_dmacount) {
5161 				pkt->pkt_state |= STATE_XFERRED_DATA;
5162 			}
5163 			break;
5164 		case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5165 			mptsas_set_pkt_reason(mpt,
5166 			    cmd, CMD_RESET, STAT_BUS_RESET);
5167 			break;
5168 		case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5169 		case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5170 			mptsas_set_pkt_reason(mpt,
5171 			    cmd, CMD_RESET, STAT_DEV_RESET);
5172 			break;
5173 		case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5174 		case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5175 			pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5176 			mptsas_set_pkt_reason(mpt,
5177 			    cmd, CMD_TERMINATED, STAT_TERMINATED);
5178 			break;
5179 		case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5180 		case MPI2_IOCSTATUS_BUSY:
5181 			/*
5182 			 * set throttles to drain
5183 			 */
5184 			ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5185 			    &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
5186 			while (ptgt != NULL) {
5187 				mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5188 
5189 				ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5190 				    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
5191 			}
5192 
5193 			/*
5194 			 * retry command
5195 			 */
5196 			cmd->cmd_flags |= CFLAG_RETRY;
5197 			cmd->cmd_pkt_flags |= FLAG_HEAD;
5198 
5199 			(void) mptsas_accept_pkt(mpt, cmd);
5200 			break;
5201 		default:
5202 			mptsas_log(mpt, CE_WARN,
5203 			    "unknown ioc_status = %x\n", ioc_status);
5204 			mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5205 			    "count = %x, scsi_status = %x", scsi_state,
5206 			    xferred, scsi_status);
5207 			break;
5208 		}
5209 		break;
5210 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
5211 		mptsas_handle_qfull(mpt, cmd);
5212 		break;
5213 	case MPI2_SCSI_STATUS_BUSY:
5214 		NDBG31(("scsi_status busy received"));
5215 		break;
5216 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5217 		NDBG31(("scsi_status reservation conflict received"));
5218 		break;
5219 	default:
5220 		mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n",
5221 		    scsi_status, ioc_status);
5222 		mptsas_log(mpt, CE_WARN,
5223 		    "mptsas_process_intr: invalid scsi status\n");
5224 		break;
5225 	}
5226 }
5227 
5228 static void
5229 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5230 	mptsas_cmd_t *cmd)
5231 {
5232 	uint8_t		task_type;
5233 	uint16_t	ioc_status;
5234 	uint32_t	log_info;
5235 	uint16_t	dev_handle;
5236 	struct scsi_pkt *pkt = CMD2PKT(cmd);
5237 
5238 	task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5239 	ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5240 	log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5241 	dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5242 
5243 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5244 		mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5245 		    "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5246 		    task_type, ioc_status, log_info, dev_handle);
5247 		pkt->pkt_reason = CMD_INCOMPLETE;
5248 		return;
5249 	}
5250 
5251 	switch (task_type) {
5252 	case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5253 	case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5254 	case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5255 	case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5256 	case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5257 	case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5258 		break;
5259 	case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5260 	case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5261 	case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5262 		mptsas_flush_target(mpt, dev_handle, Lun(cmd), task_type);
5263 		break;
5264 	default:
5265 		mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5266 		    task_type);
5267 		mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5268 		break;
5269 	}
5270 }
5271 
5272 static void
5273 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5274 {
5275 	mptsas_t			*mpt = arg->mpt;
5276 	uint64_t			t = arg->t;
5277 	mptsas_cmd_t			*cmd;
5278 	struct scsi_pkt			*pkt;
5279 	mptsas_doneq_thread_list_t	*item = &mpt->m_doneq_thread_id[t];
5280 
5281 	mutex_enter(&item->mutex);
5282 	while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) {
5283 		if (!item->doneq) {
5284 			cv_wait(&item->cv, &item->mutex);
5285 		}
5286 		pkt = NULL;
5287 		if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5288 			cmd->cmd_flags |= CFLAG_COMPLETED;
5289 			pkt = CMD2PKT(cmd);
5290 		}
5291 		mutex_exit(&item->mutex);
5292 		if (pkt) {
5293 			mptsas_pkt_comp(pkt, cmd);
5294 		}
5295 		mutex_enter(&item->mutex);
5296 	}
5297 	mutex_exit(&item->mutex);
5298 	mutex_enter(&mpt->m_doneq_mutex);
5299 	mpt->m_doneq_thread_n--;
5300 	cv_broadcast(&mpt->m_doneq_thread_cv);
5301 	mutex_exit(&mpt->m_doneq_mutex);
5302 }
5303 
5304 
5305 /*
5306  * mpt interrupt handler.
5307  */
5308 static uint_t
5309 mptsas_intr(caddr_t arg1, caddr_t arg2)
5310 {
5311 	mptsas_t			*mpt = (void *)arg1;
5312 	pMpi2ReplyDescriptorsUnion_t	reply_desc_union;
5313 	uchar_t				did_reply = FALSE;
5314 
5315 	NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5316 
5317 	mutex_enter(&mpt->m_mutex);
5318 
5319 	/*
5320 	 * If interrupts are shared by two channels then check whether this
5321 	 * interrupt is genuinely for this channel by making sure first the
5322 	 * chip is in high power state.
5323 	 */
5324 	if ((mpt->m_options & MPTSAS_OPT_PM) &&
5325 	    (mpt->m_power_level != PM_LEVEL_D0)) {
5326 		mutex_exit(&mpt->m_mutex);
5327 		return (DDI_INTR_UNCLAIMED);
5328 	}
5329 
5330 	/*
5331 	 * If polling, interrupt was triggered by some shared interrupt because
5332 	 * IOC interrupts are disabled during polling, so polling routine will
5333 	 * handle any replies.  Considering this, if polling is happening,
5334 	 * return with interrupt unclaimed.
5335 	 */
5336 	if (mpt->m_polled_intr) {
5337 		mutex_exit(&mpt->m_mutex);
5338 		mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
5339 		return (DDI_INTR_UNCLAIMED);
5340 	}
5341 
5342 	/*
5343 	 * Read the istat register.
5344 	 */
5345 	if ((INTPENDING(mpt)) != 0) {
5346 		/*
5347 		 * read fifo until empty.
5348 		 */
5349 #ifndef __lock_lint
5350 		_NOTE(CONSTCOND)
5351 #endif
5352 		while (TRUE) {
5353 			(void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5354 			    DDI_DMA_SYNC_FORCPU);
5355 			reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5356 			    MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5357 
5358 			if (ddi_get32(mpt->m_acc_post_queue_hdl,
5359 			    &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5360 			    ddi_get32(mpt->m_acc_post_queue_hdl,
5361 			    &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5362 				break;
5363 			}
5364 
5365 			/*
5366 			 * The reply is valid, process it according to its
5367 			 * type.  Also, set a flag for updating the reply index
5368 			 * after they've all been processed.
5369 			 */
5370 			did_reply = TRUE;
5371 
5372 			mptsas_process_intr(mpt, reply_desc_union);
5373 
5374 			/*
5375 			 * Increment post index and roll over if needed.
5376 			 */
5377 			if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5378 				mpt->m_post_index = 0;
5379 			}
5380 		}
5381 
5382 		/*
5383 		 * Update the global reply index if at least one reply was
5384 		 * processed.
5385 		 */
5386 		if (did_reply) {
5387 			ddi_put32(mpt->m_datap,
5388 			    &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5389 		}
5390 	} else {
5391 		mutex_exit(&mpt->m_mutex);
5392 		return (DDI_INTR_UNCLAIMED);
5393 	}
5394 	NDBG1(("mptsas_intr complete"));
5395 
5396 	/*
5397 	 * If no helper threads are created, process the doneq in ISR. If
5398 	 * helpers are created, use the doneq length as a metric to measure the
5399 	 * load on the interrupt CPU. If it is long enough, which indicates the
5400 	 * load is heavy, then we deliver the IO completions to the helpers.
5401 	 * This measurement has some limitations, although it is simple and
5402 	 * straightforward and works well for most of the cases at present.
5403 	 */
5404 	if (!mpt->m_doneq_thread_n ||
5405 	    (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)) {
5406 		mptsas_doneq_empty(mpt);
5407 	} else {
5408 		mptsas_deliver_doneq_thread(mpt);
5409 	}
5410 
5411 	/*
5412 	 * If there are queued cmd, start them now.
5413 	 */
5414 	if (mpt->m_waitq != NULL) {
5415 		mptsas_restart_waitq(mpt);
5416 	}
5417 
5418 	mutex_exit(&mpt->m_mutex);
5419 	return (DDI_INTR_CLAIMED);
5420 }
5421 
5422 static void
5423 mptsas_process_intr(mptsas_t *mpt,
5424     pMpi2ReplyDescriptorsUnion_t reply_desc_union)
5425 {
5426 	uint8_t	reply_type;
5427 
5428 	ASSERT(mutex_owned(&mpt->m_mutex));
5429 
5430 	/*
5431 	 * The reply is valid, process it according to its
5432 	 * type.  Also, set a flag for updated the reply index
5433 	 * after they've all been processed.
5434 	 */
5435 	reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
5436 	    &reply_desc_union->Default.ReplyFlags);
5437 	reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
5438 	if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
5439 		mptsas_handle_scsi_io_success(mpt, reply_desc_union);
5440 	} else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5441 		mptsas_handle_address_reply(mpt, reply_desc_union);
5442 	} else {
5443 		mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
5444 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5445 	}
5446 
5447 	/*
5448 	 * Clear the reply descriptor for re-use and increment
5449 	 * index.
5450 	 */
5451 	ddi_put64(mpt->m_acc_post_queue_hdl,
5452 	    &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
5453 	    0xFFFFFFFFFFFFFFFF);
5454 	(void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5455 	    DDI_DMA_SYNC_FORDEV);
5456 }
5457 
5458 /*
5459  * handle qfull condition
5460  */
5461 static void
5462 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
5463 {
5464 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
5465 
5466 	if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
5467 	    (ptgt->m_qfull_retries == 0)) {
5468 		/*
5469 		 * We have exhausted the retries on QFULL, or,
5470 		 * the target driver has indicated that it
5471 		 * wants to handle QFULL itself by setting
5472 		 * qfull-retries capability to 0. In either case
5473 		 * we want the target driver's QFULL handling
5474 		 * to kick in. We do this by having pkt_reason
5475 		 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
5476 		 */
5477 		mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5478 	} else {
5479 		if (ptgt->m_reset_delay == 0) {
5480 			ptgt->m_t_throttle =
5481 			    max((ptgt->m_t_ncmds - 2), 0);
5482 		}
5483 
5484 		cmd->cmd_pkt_flags |= FLAG_HEAD;
5485 		cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
5486 		cmd->cmd_flags |= CFLAG_RETRY;
5487 
5488 		(void) mptsas_accept_pkt(mpt, cmd);
5489 
5490 		/*
5491 		 * when target gives queue full status with no commands
5492 		 * outstanding (m_t_ncmds == 0), throttle is set to 0
5493 		 * (HOLD_THROTTLE), and the queue full handling start
5494 		 * (see psarc/1994/313); if there are commands outstanding,
5495 		 * throttle is set to (m_t_ncmds - 2)
5496 		 */
5497 		if (ptgt->m_t_throttle == HOLD_THROTTLE) {
5498 			/*
5499 			 * By setting throttle to QFULL_THROTTLE, we
5500 			 * avoid submitting new commands and in
5501 			 * mptsas_restart_cmd find out slots which need
5502 			 * their throttles to be cleared.
5503 			 */
5504 			mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
5505 			if (mpt->m_restart_cmd_timeid == 0) {
5506 				mpt->m_restart_cmd_timeid =
5507 				    timeout(mptsas_restart_cmd, mpt,
5508 				    ptgt->m_qfull_retry_interval);
5509 			}
5510 		}
5511 	}
5512 }
5513 
5514 mptsas_phymask_t
5515 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
5516 {
5517 	mptsas_phymask_t	phy_mask = 0;
5518 	uint8_t			i = 0;
5519 
5520 	NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
5521 
5522 	ASSERT(mutex_owned(&mpt->m_mutex));
5523 
5524 	/*
5525 	 * If physport is 0xFF, this is a RAID volume.  Use phymask of 0.
5526 	 */
5527 	if (physport == 0xFF) {
5528 		return (0);
5529 	}
5530 
5531 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
5532 		if (mpt->m_phy_info[i].attached_devhdl &&
5533 		    (mpt->m_phy_info[i].phy_mask != 0) &&
5534 		    (mpt->m_phy_info[i].port_num == physport)) {
5535 			phy_mask = mpt->m_phy_info[i].phy_mask;
5536 			break;
5537 		}
5538 	}
5539 	NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
5540 	    mpt->m_instance, physport, phy_mask));
5541 	return (phy_mask);
5542 }
5543 
5544 /*
5545  * mpt free device handle after device gone, by use of passthrough
5546  */
5547 static int
5548 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl)
5549 {
5550 	Mpi2SasIoUnitControlRequest_t	req;
5551 	Mpi2SasIoUnitControlReply_t	rep;
5552 	int				ret;
5553 
5554 	ASSERT(mutex_owned(&mpt->m_mutex));
5555 
5556 	/*
5557 	 * Need to compose a SAS IO Unit Control request message
5558 	 * and call mptsas_do_passthru() function
5559 	 */
5560 	bzero(&req, sizeof (req));
5561 	bzero(&rep, sizeof (rep));
5562 
5563 	req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
5564 	req.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
5565 	req.DevHandle = LE_16(devhdl);
5566 
5567 	ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
5568 	    sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
5569 	if (ret != 0) {
5570 		cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5571 		    "Control error %d", ret);
5572 		return (DDI_FAILURE);
5573 	}
5574 
5575 	/* do passthrough success, check the ioc status */
5576 	if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
5577 		cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5578 		    "Control IOCStatus %d", LE_16(rep.IOCStatus));
5579 		return (DDI_FAILURE);
5580 	}
5581 
5582 	return (DDI_SUCCESS);
5583 }
5584 
5585 static void
5586 mptsas_update_phymask(mptsas_t *mpt)
5587 {
5588 	mptsas_phymask_t mask = 0, phy_mask;
5589 	char		*phy_mask_name;
5590 	uint8_t		current_port;
5591 	int		i, j;
5592 
5593 	NDBG20(("mptsas%d update phymask ", mpt->m_instance));
5594 
5595 	ASSERT(mutex_owned(&mpt->m_mutex));
5596 
5597 	(void) mptsas_get_sas_io_unit_page(mpt);
5598 
5599 	phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5600 
5601 	for (i = 0; i < mpt->m_num_phys; i++) {
5602 		phy_mask = 0x00;
5603 
5604 		if (mpt->m_phy_info[i].attached_devhdl == 0)
5605 			continue;
5606 
5607 		bzero(phy_mask_name, sizeof (phy_mask_name));
5608 
5609 		current_port = mpt->m_phy_info[i].port_num;
5610 
5611 		if ((mask & (1 << i)) != 0)
5612 			continue;
5613 
5614 		for (j = 0; j < mpt->m_num_phys; j++) {
5615 			if (mpt->m_phy_info[j].attached_devhdl &&
5616 			    (mpt->m_phy_info[j].port_num == current_port)) {
5617 				phy_mask |= (1 << j);
5618 			}
5619 		}
5620 		mask = mask | phy_mask;
5621 
5622 		for (j = 0; j < mpt->m_num_phys; j++) {
5623 			if ((phy_mask >> j) & 0x01) {
5624 				mpt->m_phy_info[j].phy_mask = phy_mask;
5625 			}
5626 		}
5627 
5628 		(void) sprintf(phy_mask_name, "%x", phy_mask);
5629 
5630 		mutex_exit(&mpt->m_mutex);
5631 		/*
5632 		 * register a iport, if the port has already been existed
5633 		 * SCSA will do nothing and just return.
5634 		 */
5635 		(void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
5636 		mutex_enter(&mpt->m_mutex);
5637 	}
5638 	kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5639 	NDBG20(("mptsas%d update phymask return", mpt->m_instance));
5640 }
5641 
5642 /*
5643  * mptsas_handle_dr is a task handler for DR, the DR action includes:
5644  * 1. Directly attched Device Added/Removed.
5645  * 2. Expander Device Added/Removed.
5646  * 3. Indirectly Attached Device Added/Expander.
5647  * 4. LUNs of a existing device status change.
5648  * 5. RAID volume created/deleted.
5649  * 6. Member of RAID volume is released because of RAID deletion.
5650  * 7. Physical disks are removed because of RAID creation.
5651  */
5652 static void
5653 mptsas_handle_dr(void *args) {
5654 	mptsas_topo_change_list_t	*topo_node = NULL;
5655 	mptsas_topo_change_list_t	*save_node = NULL;
5656 	mptsas_t			*mpt;
5657 	dev_info_t			*parent = NULL;
5658 	mptsas_phymask_t		phymask = 0;
5659 	char				*phy_mask_name;
5660 	uint8_t				flags = 0, physport = 0xff;
5661 	uint8_t				port_update = 0;
5662 	uint_t				event;
5663 
5664 	topo_node = (mptsas_topo_change_list_t *)args;
5665 
5666 	mpt = topo_node->mpt;
5667 	event = topo_node->event;
5668 	flags = topo_node->flags;
5669 
5670 	phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5671 
5672 	NDBG20(("mptsas%d handle_dr enter", mpt->m_instance));
5673 
5674 	switch (event) {
5675 	case MPTSAS_DR_EVENT_RECONFIG_TARGET:
5676 		if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
5677 		    (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) ||
5678 		    (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
5679 			/*
5680 			 * Direct attached or expander attached device added
5681 			 * into system or a Phys Disk that is being unhidden.
5682 			 */
5683 			port_update = 1;
5684 		}
5685 		break;
5686 	case MPTSAS_DR_EVENT_RECONFIG_SMP:
5687 		/*
5688 		 * New expander added into system, it must be the head
5689 		 * of topo_change_list_t
5690 		 */
5691 		port_update = 1;
5692 		break;
5693 	default:
5694 		port_update = 0;
5695 		break;
5696 	}
5697 	/*
5698 	 * All cases port_update == 1 may cause initiator port form change
5699 	 */
5700 	mutex_enter(&mpt->m_mutex);
5701 	if (mpt->m_port_chng && port_update) {
5702 		/*
5703 		 * mpt->m_port_chng flag indicates some PHYs of initiator
5704 		 * port have changed to online. So when expander added or
5705 		 * directly attached device online event come, we force to
5706 		 * update port information by issueing SAS IO Unit Page and
5707 		 * update PHYMASKs.
5708 		 */
5709 		(void) mptsas_update_phymask(mpt);
5710 		mpt->m_port_chng = 0;
5711 
5712 	}
5713 	mutex_exit(&mpt->m_mutex);
5714 	while (topo_node) {
5715 		phymask = 0;
5716 		if (parent == NULL) {
5717 			physport = topo_node->un.physport;
5718 			event = topo_node->event;
5719 			flags = topo_node->flags;
5720 			if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
5721 			    MPTSAS_DR_EVENT_OFFLINE_SMP)) {
5722 				/*
5723 				 * For all offline events, phymask is known
5724 				 */
5725 				phymask = topo_node->un.phymask;
5726 				goto find_parent;
5727 			}
5728 			if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
5729 				goto handle_topo_change;
5730 			}
5731 			if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
5732 				phymask = topo_node->un.phymask;
5733 				goto find_parent;
5734 			}
5735 
5736 			if ((flags ==
5737 			    MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) &&
5738 			    (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
5739 				/*
5740 				 * There is no any field in IR_CONFIG_CHANGE
5741 				 * event indicate physport/phynum, let's get
5742 				 * parent after SAS Device Page0 request.
5743 				 */
5744 				goto handle_topo_change;
5745 			}
5746 
5747 			mutex_enter(&mpt->m_mutex);
5748 			if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
5749 				/*
5750 				 * If the direct attached device added or a
5751 				 * phys disk is being unhidden, argument
5752 				 * physport actually is PHY#, so we have to get
5753 				 * phymask according PHY#.
5754 				 */
5755 				physport = mpt->m_phy_info[physport].port_num;
5756 			}
5757 
5758 			/*
5759 			 * Translate physport to phymask so that we can search
5760 			 * parent dip.
5761 			 */
5762 			phymask = mptsas_physport_to_phymask(mpt,
5763 			    physport);
5764 			mutex_exit(&mpt->m_mutex);
5765 
5766 find_parent:
5767 			bzero(phy_mask_name, MPTSAS_MAX_PHYS);
5768 			/*
5769 			 * For RAID topology change node, write the iport name
5770 			 * as v0.
5771 			 */
5772 			if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5773 				(void) sprintf(phy_mask_name, "v0");
5774 			} else {
5775 				/*
5776 				 * phymask can bo 0 if the drive has been
5777 				 * pulled by the time an add event is
5778 				 * processed.  If phymask is 0, just skip this
5779 				 * event and continue.
5780 				 */
5781 				if (phymask == 0) {
5782 					mutex_enter(&mpt->m_mutex);
5783 					save_node = topo_node;
5784 					topo_node = topo_node->next;
5785 					ASSERT(save_node);
5786 					kmem_free(save_node,
5787 					    sizeof (mptsas_topo_change_list_t));
5788 					mutex_exit(&mpt->m_mutex);
5789 
5790 					parent = NULL;
5791 					continue;
5792 				}
5793 				(void) sprintf(phy_mask_name, "%x", phymask);
5794 			}
5795 			parent = scsi_hba_iport_find(mpt->m_dip,
5796 			    phy_mask_name);
5797 			if (parent == NULL) {
5798 				mptsas_log(mpt, CE_WARN, "Failed to find an "
5799 				    "iport, should not happen!");
5800 				goto out;
5801 			}
5802 
5803 		}
5804 		ASSERT(parent);
5805 handle_topo_change:
5806 
5807 		mutex_enter(&mpt->m_mutex);
5808 
5809 		mptsas_handle_topo_change(topo_node, parent);
5810 		save_node = topo_node;
5811 		topo_node = topo_node->next;
5812 		ASSERT(save_node);
5813 		kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
5814 		mutex_exit(&mpt->m_mutex);
5815 
5816 		if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
5817 		    (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
5818 		    (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
5819 			/*
5820 			 * If direct attached device associated, make sure
5821 			 * reset the parent before start the next one. But
5822 			 * all devices associated with expander shares the
5823 			 * parent.  Also, reset parent if this is for RAID.
5824 			 */
5825 			parent = NULL;
5826 		}
5827 	}
5828 out:
5829 	kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5830 }
5831 
5832 static void
5833 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
5834     dev_info_t *parent)
5835 {
5836 	mptsas_target_t	*ptgt = NULL;
5837 	mptsas_smp_t	*psmp = NULL;
5838 	mptsas_t	*mpt = (void *)topo_node->mpt;
5839 	uint16_t	devhdl;
5840 	uint64_t	sas_wwn = 0;
5841 	int		rval = 0;
5842 	uint32_t	page_address;
5843 	uint8_t		phy, flags;
5844 	char		*addr = NULL;
5845 	dev_info_t	*lundip;
5846 	int		circ = 0, circ1 = 0;
5847 
5848 	NDBG20(("mptsas%d handle_topo_change enter", mpt->m_instance));
5849 
5850 	ASSERT(mutex_owned(&mpt->m_mutex));
5851 
5852 	switch (topo_node->event) {
5853 	case MPTSAS_DR_EVENT_RECONFIG_TARGET:
5854 	{
5855 		char *phy_mask_name;
5856 		mptsas_phymask_t phymask = 0;
5857 
5858 		if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5859 			/*
5860 			 * Get latest RAID info.
5861 			 */
5862 			(void) mptsas_get_raid_info(mpt);
5863 			ptgt = mptsas_search_by_devhdl(
5864 			    &mpt->m_active->m_tgttbl, topo_node->devhdl);
5865 			if (ptgt == NULL)
5866 				break;
5867 		} else {
5868 			ptgt = (void *)topo_node->object;
5869 		}
5870 
5871 		if (ptgt == NULL) {
5872 			/*
5873 			 * If a Phys Disk was deleted, RAID info needs to be
5874 			 * updated to reflect the new topology.
5875 			 */
5876 			(void) mptsas_get_raid_info(mpt);
5877 
5878 			/*
5879 			 * Get sas device page 0 by DevHandle to make sure if
5880 			 * SSP/SATA end device exist.
5881 			 */
5882 			page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
5883 			    MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
5884 			    topo_node->devhdl;
5885 
5886 			rval = mptsas_get_target_device_info(mpt, page_address,
5887 			    &devhdl, &ptgt);
5888 			if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
5889 				mptsas_log(mpt, CE_NOTE,
5890 				    "mptsas_handle_topo_change: target %d is "
5891 				    "not a SAS/SATA device. \n",
5892 				    topo_node->devhdl);
5893 			} else if (rval == DEV_INFO_FAIL_ALLOC) {
5894 				mptsas_log(mpt, CE_NOTE,
5895 				    "mptsas_handle_topo_change: could not "
5896 				    "allocate memory. \n");
5897 			}
5898 			/*
5899 			 * If rval is DEV_INFO_PHYS_DISK than there is nothing
5900 			 * else to do, just leave.
5901 			 */
5902 			if (rval != DEV_INFO_SUCCESS) {
5903 				return;
5904 			}
5905 		}
5906 
5907 		ASSERT(ptgt->m_devhdl == topo_node->devhdl);
5908 
5909 		mutex_exit(&mpt->m_mutex);
5910 		flags = topo_node->flags;
5911 
5912 		if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
5913 			phymask = ptgt->m_phymask;
5914 			phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5915 			(void) sprintf(phy_mask_name, "%x", phymask);
5916 			parent = scsi_hba_iport_find(mpt->m_dip,
5917 			    phy_mask_name);
5918 			kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5919 			if (parent == NULL) {
5920 				mptsas_log(mpt, CE_WARN, "Failed to find a "
5921 				    "iport for PD, should not happen!");
5922 				mutex_enter(&mpt->m_mutex);
5923 				break;
5924 			}
5925 		}
5926 
5927 		if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5928 			ndi_devi_enter(parent, &circ1);
5929 			(void) mptsas_config_raid(parent, topo_node->devhdl,
5930 			    &lundip);
5931 			ndi_devi_exit(parent, circ1);
5932 		} else {
5933 			/*
5934 			 * hold nexus for bus configure
5935 			 */
5936 			ndi_devi_enter(scsi_vhci_dip, &circ);
5937 			ndi_devi_enter(parent, &circ1);
5938 			rval = mptsas_config_target(parent, ptgt);
5939 			/*
5940 			 * release nexus for bus configure
5941 			 */
5942 			ndi_devi_exit(parent, circ1);
5943 			ndi_devi_exit(scsi_vhci_dip, circ);
5944 
5945 		}
5946 		mutex_enter(&mpt->m_mutex);
5947 
5948 		NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
5949 		    "phymask:%x.", mpt->m_instance, ptgt->m_devhdl,
5950 		    ptgt->m_phymask));
5951 		break;
5952 	}
5953 	case MPTSAS_DR_EVENT_OFFLINE_TARGET:
5954 	{
5955 		mptsas_hash_table_t *tgttbl = &mpt->m_active->m_tgttbl;
5956 		devhdl = topo_node->devhdl;
5957 		ptgt = mptsas_search_by_devhdl(tgttbl, devhdl);
5958 		if (ptgt == NULL)
5959 			break;
5960 
5961 		sas_wwn = ptgt->m_sas_wwn;
5962 		phy = ptgt->m_phynum;
5963 
5964 		addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
5965 
5966 		if (sas_wwn) {
5967 			(void) sprintf(addr, "w%016"PRIx64, sas_wwn);
5968 		} else {
5969 			(void) sprintf(addr, "p%x", phy);
5970 		}
5971 		ASSERT(ptgt->m_devhdl == devhdl);
5972 
5973 		if ((topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) ||
5974 		    (topo_node->flags ==
5975 		    MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
5976 			/*
5977 			 * Get latest RAID info if RAID volume status changes
5978 			 * or Phys Disk status changes
5979 			 */
5980 			(void) mptsas_get_raid_info(mpt);
5981 		}
5982 		/*
5983 		 * Abort all outstanding command on the device
5984 		 */
5985 		rval = mptsas_do_scsi_reset(mpt, devhdl);
5986 		if (rval) {
5987 			NDBG20(("mptsas%d handle_topo_change to reset target "
5988 			    "before offline devhdl:%x, phymask:%x, rval:%x",
5989 			    mpt->m_instance, ptgt->m_devhdl, ptgt->m_phymask,
5990 			    rval));
5991 		}
5992 
5993 		mutex_exit(&mpt->m_mutex);
5994 
5995 		ndi_devi_enter(scsi_vhci_dip, &circ);
5996 		ndi_devi_enter(parent, &circ1);
5997 		rval = mptsas_offline_target(parent, addr);
5998 		ndi_devi_exit(parent, circ1);
5999 		ndi_devi_exit(scsi_vhci_dip, circ);
6000 		NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
6001 		    "phymask:%x, rval:%x", mpt->m_instance,
6002 		    ptgt->m_devhdl, ptgt->m_phymask, rval));
6003 
6004 		kmem_free(addr, SCSI_MAXNAMELEN);
6005 
6006 		mutex_enter(&mpt->m_mutex);
6007 		if (rval == DDI_SUCCESS) {
6008 			mptsas_tgt_free(&mpt->m_active->m_tgttbl,
6009 			    ptgt->m_sas_wwn, ptgt->m_phymask);
6010 			ptgt = NULL;
6011 		} else {
6012 			/*
6013 			 * clean DR_INTRANSITION flag to allow I/O down to
6014 			 * PHCI driver since failover finished.
6015 			 * Invalidate the devhdl
6016 			 */
6017 			ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6018 			ptgt->m_tgt_unconfigured = 0;
6019 			mutex_enter(&mpt->m_tx_waitq_mutex);
6020 			ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6021 			mutex_exit(&mpt->m_tx_waitq_mutex);
6022 		}
6023 
6024 		/*
6025 		 * Send SAS IO Unit Control to free the dev handle
6026 		 */
6027 		flags = topo_node->flags;
6028 		if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6029 		    (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6030 			rval = mptsas_free_devhdl(mpt, devhdl);
6031 
6032 			NDBG20(("mptsas%d handle_topo_change to remove "
6033 			    "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6034 			    rval));
6035 		}
6036 		break;
6037 	}
6038 	case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6039 	{
6040 		devhdl = topo_node->devhdl;
6041 		/*
6042 		 * If this is the remove handle event, do a reset first.
6043 		 */
6044 		if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6045 			rval = mptsas_do_scsi_reset(mpt, devhdl);
6046 			if (rval) {
6047 				NDBG20(("mpt%d reset target before remove "
6048 				    "devhdl:%x, rval:%x", mpt->m_instance,
6049 				    devhdl, rval));
6050 			}
6051 		}
6052 
6053 		/*
6054 		 * Send SAS IO Unit Control to free the dev handle
6055 		 */
6056 		rval = mptsas_free_devhdl(mpt, devhdl);
6057 		NDBG20(("mptsas%d handle_topo_change to remove "
6058 		    "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6059 		    rval));
6060 		break;
6061 	}
6062 	case MPTSAS_DR_EVENT_RECONFIG_SMP:
6063 	{
6064 		mptsas_smp_t smp;
6065 		dev_info_t *smpdip;
6066 		mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl;
6067 
6068 		devhdl = topo_node->devhdl;
6069 
6070 		page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
6071 		    MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl;
6072 		rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp);
6073 		if (rval != DDI_SUCCESS) {
6074 			mptsas_log(mpt, CE_WARN, "failed to online smp, "
6075 			    "handle %x", devhdl);
6076 			return;
6077 		}
6078 
6079 		psmp = mptsas_smp_alloc(smptbl, &smp);
6080 		if (psmp == NULL) {
6081 			return;
6082 		}
6083 
6084 		mutex_exit(&mpt->m_mutex);
6085 		ndi_devi_enter(parent, &circ1);
6086 		(void) mptsas_online_smp(parent, psmp, &smpdip);
6087 		ndi_devi_exit(parent, circ1);
6088 		mutex_enter(&mpt->m_mutex);
6089 		break;
6090 	}
6091 	case MPTSAS_DR_EVENT_OFFLINE_SMP:
6092 	{
6093 		mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl;
6094 		devhdl = topo_node->devhdl;
6095 		psmp = mptsas_search_by_devhdl(smptbl, devhdl);
6096 		if (psmp == NULL)
6097 			break;
6098 		/*
6099 		 * The mptsas_smp_t data is released only if the dip is offlined
6100 		 * successfully.
6101 		 */
6102 		mutex_exit(&mpt->m_mutex);
6103 		ndi_devi_enter(parent, &circ1);
6104 		rval = mptsas_offline_smp(parent, psmp, NDI_DEVI_REMOVE);
6105 		ndi_devi_exit(parent, circ1);
6106 		mutex_enter(&mpt->m_mutex);
6107 		NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
6108 		    "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
6109 		if (rval == DDI_SUCCESS) {
6110 			mptsas_smp_free(smptbl, psmp->m_sasaddr,
6111 			    psmp->m_phymask);
6112 		} else {
6113 			psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
6114 		}
6115 		break;
6116 	}
6117 	default:
6118 		return;
6119 	}
6120 }
6121 
6122 /*
6123  * Record the event if its type is enabled in mpt instance by ioctl.
6124  */
6125 static void
6126 mptsas_record_event(void *args)
6127 {
6128 	m_replyh_arg_t			*replyh_arg;
6129 	pMpi2EventNotificationReply_t	eventreply;
6130 	uint32_t			event, rfm;
6131 	mptsas_t			*mpt;
6132 	int				i, j;
6133 	uint16_t			event_data_len;
6134 	boolean_t			sendAEN = FALSE;
6135 
6136 	replyh_arg = (m_replyh_arg_t *)args;
6137 	rfm = replyh_arg->rfm;
6138 	mpt = replyh_arg->mpt;
6139 
6140 	eventreply = (pMpi2EventNotificationReply_t)
6141 	    (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6142 	event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6143 
6144 
6145 	/*
6146 	 * Generate a system event to let anyone who cares know that a
6147 	 * LOG_ENTRY_ADDED event has occurred.  This is sent no matter what the
6148 	 * event mask is set to.
6149 	 */
6150 	if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
6151 		sendAEN = TRUE;
6152 	}
6153 
6154 	/*
6155 	 * Record the event only if it is not masked.  Determine which dword
6156 	 * and bit of event mask to test.
6157 	 */
6158 	i = (uint8_t)(event / 32);
6159 	j = (uint8_t)(event % 32);
6160 	if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) {
6161 		i = mpt->m_event_index;
6162 		mpt->m_events[i].Type = event;
6163 		mpt->m_events[i].Number = ++mpt->m_event_number;
6164 		bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4);
6165 		event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl,
6166 		    &eventreply->EventDataLength);
6167 
6168 		if (event_data_len > 0) {
6169 			/*
6170 			 * Limit data to size in m_event entry
6171 			 */
6172 			if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) {
6173 				event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH;
6174 			}
6175 			for (j = 0; j < event_data_len; j++) {
6176 				mpt->m_events[i].Data[j] =
6177 				    ddi_get32(mpt->m_acc_reply_frame_hdl,
6178 				    &(eventreply->EventData[j]));
6179 			}
6180 
6181 			/*
6182 			 * check for index wrap-around
6183 			 */
6184 			if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
6185 				i = 0;
6186 			}
6187 			mpt->m_event_index = (uint8_t)i;
6188 
6189 			/*
6190 			 * Set flag to send the event.
6191 			 */
6192 			sendAEN = TRUE;
6193 		}
6194 	}
6195 
6196 	/*
6197 	 * Generate a system event if flag is set to let anyone who cares know
6198 	 * that an event has occurred.
6199 	 */
6200 	if (sendAEN) {
6201 		(void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
6202 		    "SAS", NULL, NULL, DDI_NOSLEEP);
6203 	}
6204 }
6205 
6206 #define	SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
6207 /*
6208  * handle sync events from ioc in interrupt
6209  * return value:
6210  * DDI_SUCCESS: The event is handled by this func
6211  * DDI_FAILURE: Event is not handled
6212  */
6213 static int
6214 mptsas_handle_event_sync(void *args)
6215 {
6216 	m_replyh_arg_t			*replyh_arg;
6217 	pMpi2EventNotificationReply_t	eventreply;
6218 	uint32_t			event, rfm;
6219 	mptsas_t			*mpt;
6220 	uint_t				iocstatus;
6221 
6222 	replyh_arg = (m_replyh_arg_t *)args;
6223 	rfm = replyh_arg->rfm;
6224 	mpt = replyh_arg->mpt;
6225 
6226 	ASSERT(mutex_owned(&mpt->m_mutex));
6227 
6228 	eventreply = (pMpi2EventNotificationReply_t)
6229 	    (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6230 	event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6231 
6232 	if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
6233 	    &eventreply->IOCStatus)) {
6234 		if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
6235 			mptsas_log(mpt, CE_WARN,
6236 			    "!mptsas_handle_event_sync: IOCStatus=0x%x, "
6237 			    "IOCLogInfo=0x%x", iocstatus,
6238 			    ddi_get32(mpt->m_acc_reply_frame_hdl,
6239 			    &eventreply->IOCLogInfo));
6240 		} else {
6241 			mptsas_log(mpt, CE_WARN,
6242 			    "mptsas_handle_event_sync: IOCStatus=0x%x, "
6243 			    "IOCLogInfo=0x%x", iocstatus,
6244 			    ddi_get32(mpt->m_acc_reply_frame_hdl,
6245 			    &eventreply->IOCLogInfo));
6246 		}
6247 	}
6248 
6249 	/*
6250 	 * figure out what kind of event we got and handle accordingly
6251 	 */
6252 	switch (event) {
6253 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
6254 	{
6255 		pMpi2EventDataSasTopologyChangeList_t	sas_topo_change_list;
6256 		uint8_t				num_entries, expstatus, phy;
6257 		uint8_t				phystatus, physport, state, i;
6258 		uint8_t				start_phy_num, link_rate;
6259 		uint16_t			dev_handle, reason_code;
6260 		uint16_t			enc_handle, expd_handle;
6261 		char				string[80], curr[80], prev[80];
6262 		mptsas_topo_change_list_t	*topo_head = NULL;
6263 		mptsas_topo_change_list_t	*topo_tail = NULL;
6264 		mptsas_topo_change_list_t	*topo_node = NULL;
6265 		mptsas_target_t			*ptgt;
6266 		mptsas_smp_t			*psmp;
6267 		mptsas_hash_table_t		*tgttbl, *smptbl;
6268 		uint8_t				flags = 0, exp_flag;
6269 
6270 		NDBG20(("mptsas_handle_event_sync: SAS topology change"));
6271 
6272 		tgttbl = &mpt->m_active->m_tgttbl;
6273 		smptbl = &mpt->m_active->m_smptbl;
6274 
6275 		sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t)
6276 		    eventreply->EventData;
6277 
6278 		enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6279 		    &sas_topo_change_list->EnclosureHandle);
6280 		expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6281 		    &sas_topo_change_list->ExpanderDevHandle);
6282 		num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6283 		    &sas_topo_change_list->NumEntries);
6284 		start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
6285 		    &sas_topo_change_list->StartPhyNum);
6286 		expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6287 		    &sas_topo_change_list->ExpStatus);
6288 		physport = ddi_get8(mpt->m_acc_reply_frame_hdl,
6289 		    &sas_topo_change_list->PhysicalPort);
6290 
6291 		string[0] = 0;
6292 		if (expd_handle) {
6293 			flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED;
6294 			switch (expstatus) {
6295 			case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6296 				(void) sprintf(string, " added");
6297 				/*
6298 				 * New expander device added
6299 				 */
6300 				mpt->m_port_chng = 1;
6301 				topo_node = kmem_zalloc(
6302 				    sizeof (mptsas_topo_change_list_t),
6303 				    KM_SLEEP);
6304 				topo_node->mpt = mpt;
6305 				topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP;
6306 				topo_node->un.physport = physport;
6307 				topo_node->devhdl = expd_handle;
6308 				topo_node->flags = flags;
6309 				topo_node->object = NULL;
6310 				if (topo_head == NULL) {
6311 					topo_head = topo_tail = topo_node;
6312 				} else {
6313 					topo_tail->next = topo_node;
6314 					topo_tail = topo_node;
6315 				}
6316 				break;
6317 			case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6318 				(void) sprintf(string, " not responding, "
6319 				    "removed");
6320 				psmp = mptsas_search_by_devhdl(smptbl,
6321 				    expd_handle);
6322 				if (psmp == NULL)
6323 					break;
6324 
6325 				topo_node = kmem_zalloc(
6326 				    sizeof (mptsas_topo_change_list_t),
6327 				    KM_SLEEP);
6328 				topo_node->mpt = mpt;
6329 				topo_node->un.phymask = psmp->m_phymask;
6330 				topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP;
6331 				topo_node->devhdl = expd_handle;
6332 				topo_node->flags = flags;
6333 				topo_node->object = NULL;
6334 				if (topo_head == NULL) {
6335 					topo_head = topo_tail = topo_node;
6336 				} else {
6337 					topo_tail->next = topo_node;
6338 					topo_tail = topo_node;
6339 				}
6340 				break;
6341 			case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6342 				break;
6343 			case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6344 				(void) sprintf(string, " not responding, "
6345 				    "delaying removal");
6346 				break;
6347 			default:
6348 				break;
6349 			}
6350 		} else {
6351 			flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
6352 		}
6353 
6354 		NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
6355 		    enc_handle, expd_handle, string));
6356 		for (i = 0; i < num_entries; i++) {
6357 			phy = i + start_phy_num;
6358 			phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6359 			    &sas_topo_change_list->PHY[i].PhyStatus);
6360 			dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6361 			    &sas_topo_change_list->PHY[i].AttachedDevHandle);
6362 			reason_code = phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK;
6363 			/*
6364 			 * Filter out processing of Phy Vacant Status unless
6365 			 * the reason code is "Not Responding".  Process all
6366 			 * other combinations of Phy Status and Reason Codes.
6367 			 */
6368 			if ((phystatus &
6369 			    MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) &&
6370 			    (reason_code !=
6371 			    MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
6372 				continue;
6373 			}
6374 			curr[0] = 0;
6375 			prev[0] = 0;
6376 			string[0] = 0;
6377 			switch (reason_code) {
6378 			case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6379 			{
6380 				NDBG20(("mptsas%d phy %d physical_port %d "
6381 				    "dev_handle %d added", mpt->m_instance, phy,
6382 				    physport, dev_handle));
6383 				link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6384 				    &sas_topo_change_list->PHY[i].LinkRate);
6385 				state = (link_rate &
6386 				    MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6387 				    MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6388 				switch (state) {
6389 				case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6390 					(void) sprintf(curr, "is disabled");
6391 					break;
6392 				case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6393 					(void) sprintf(curr, "is offline, "
6394 					    "failed speed negotiation");
6395 					break;
6396 				case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6397 					(void) sprintf(curr, "SATA OOB "
6398 					    "complete");
6399 					break;
6400 				case SMP_RESET_IN_PROGRESS:
6401 					(void) sprintf(curr, "SMP reset in "
6402 					    "progress");
6403 					break;
6404 				case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6405 					(void) sprintf(curr, "is online at "
6406 					    "1.5 Gbps");
6407 					break;
6408 				case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6409 					(void) sprintf(curr, "is online at 3.0 "
6410 					    "Gbps");
6411 					break;
6412 				case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6413 					(void) sprintf(curr, "is online at 6.0 "
6414 					    "Gbps");
6415 					break;
6416 				default:
6417 					(void) sprintf(curr, "state is "
6418 					    "unknown");
6419 					break;
6420 				}
6421 				/*
6422 				 * New target device added into the system.
6423 				 * Set association flag according to if an
6424 				 * expander is used or not.
6425 				 */
6426 				exp_flag =
6427 				    MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6428 				if (flags ==
6429 				    MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6430 					flags = exp_flag;
6431 				}
6432 				topo_node = kmem_zalloc(
6433 				    sizeof (mptsas_topo_change_list_t),
6434 				    KM_SLEEP);
6435 				topo_node->mpt = mpt;
6436 				topo_node->event =
6437 				    MPTSAS_DR_EVENT_RECONFIG_TARGET;
6438 				if (expd_handle == 0) {
6439 					/*
6440 					 * Per MPI 2, if expander dev handle
6441 					 * is 0, it's a directly attached
6442 					 * device. So driver use PHY to decide
6443 					 * which iport is associated
6444 					 */
6445 					physport = phy;
6446 					mpt->m_port_chng = 1;
6447 				}
6448 				topo_node->un.physport = physport;
6449 				topo_node->devhdl = dev_handle;
6450 				topo_node->flags = flags;
6451 				topo_node->object = NULL;
6452 				if (topo_head == NULL) {
6453 					topo_head = topo_tail = topo_node;
6454 				} else {
6455 					topo_tail->next = topo_node;
6456 					topo_tail = topo_node;
6457 				}
6458 				break;
6459 			}
6460 			case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6461 			{
6462 				NDBG20(("mptsas%d phy %d physical_port %d "
6463 				    "dev_handle %d removed", mpt->m_instance,
6464 				    phy, physport, dev_handle));
6465 				/*
6466 				 * Set association flag according to if an
6467 				 * expander is used or not.
6468 				 */
6469 				exp_flag =
6470 				    MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6471 				if (flags ==
6472 				    MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6473 					flags = exp_flag;
6474 				}
6475 				/*
6476 				 * Target device is removed from the system
6477 				 * Before the device is really offline from
6478 				 * from system.
6479 				 */
6480 				ptgt = mptsas_search_by_devhdl(tgttbl,
6481 				    dev_handle);
6482 				/*
6483 				 * If ptgt is NULL here, it means that the
6484 				 * DevHandle is not in the hash table.  This is
6485 				 * reasonable sometimes.  For example, if a
6486 				 * disk was pulled, then added, then pulled
6487 				 * again, the disk will not have been put into
6488 				 * the hash table because the add event will
6489 				 * have an invalid phymask.  BUT, this does not
6490 				 * mean that the DevHandle is invalid.  The
6491 				 * controller will still have a valid DevHandle
6492 				 * that must be removed.  To do this, use the
6493 				 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
6494 				 */
6495 				if (ptgt == NULL) {
6496 					topo_node = kmem_zalloc(
6497 					    sizeof (mptsas_topo_change_list_t),
6498 					    KM_SLEEP);
6499 					topo_node->mpt = mpt;
6500 					topo_node->un.phymask = 0;
6501 					topo_node->event =
6502 					    MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
6503 					topo_node->devhdl = dev_handle;
6504 					topo_node->flags = flags;
6505 					topo_node->object = NULL;
6506 					if (topo_head == NULL) {
6507 						topo_head = topo_tail =
6508 						    topo_node;
6509 					} else {
6510 						topo_tail->next = topo_node;
6511 						topo_tail = topo_node;
6512 					}
6513 					break;
6514 				}
6515 
6516 				/*
6517 				 * Update DR flag immediately avoid I/O failure
6518 				 * before failover finish. Pay attention to the
6519 				 * mutex protect, we need grab m_tx_waitq_mutex
6520 				 * during set m_dr_flag because we won't add
6521 				 * the following command into waitq, instead,
6522 				 * we need return TRAN_BUSY in the tran_start
6523 				 * context.
6524 				 */
6525 				mutex_enter(&mpt->m_tx_waitq_mutex);
6526 				ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6527 				mutex_exit(&mpt->m_tx_waitq_mutex);
6528 
6529 				topo_node = kmem_zalloc(
6530 				    sizeof (mptsas_topo_change_list_t),
6531 				    KM_SLEEP);
6532 				topo_node->mpt = mpt;
6533 				topo_node->un.phymask = ptgt->m_phymask;
6534 				topo_node->event =
6535 				    MPTSAS_DR_EVENT_OFFLINE_TARGET;
6536 				topo_node->devhdl = dev_handle;
6537 				topo_node->flags = flags;
6538 				topo_node->object = NULL;
6539 				if (topo_head == NULL) {
6540 					topo_head = topo_tail = topo_node;
6541 				} else {
6542 					topo_tail->next = topo_node;
6543 					topo_tail = topo_node;
6544 				}
6545 
6546 				break;
6547 			}
6548 			case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6549 				link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6550 				    &sas_topo_change_list->PHY[i].LinkRate);
6551 				state = (link_rate &
6552 				    MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6553 				    MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6554 				switch (state) {
6555 				case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6556 					(void) sprintf(curr, "is disabled");
6557 					break;
6558 				case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6559 					(void) sprintf(curr, "is offline, "
6560 					    "failed speed negotiation");
6561 					break;
6562 				case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6563 					(void) sprintf(curr, "SATA OOB "
6564 					    "complete");
6565 					break;
6566 				case SMP_RESET_IN_PROGRESS:
6567 					(void) sprintf(curr, "SMP reset in "
6568 					    "progress");
6569 					break;
6570 				case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6571 					(void) sprintf(curr, "is online at "
6572 					    "1.5 Gbps");
6573 					if ((expd_handle == 0) &&
6574 					    (enc_handle == 1)) {
6575 						mpt->m_port_chng = 1;
6576 					}
6577 					break;
6578 				case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6579 					(void) sprintf(curr, "is online at 3.0 "
6580 					    "Gbps");
6581 					if ((expd_handle == 0) &&
6582 					    (enc_handle == 1)) {
6583 						mpt->m_port_chng = 1;
6584 					}
6585 					break;
6586 				case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6587 					(void) sprintf(curr, "is online at "
6588 					    "6.0 Gbps");
6589 					if ((expd_handle == 0) &&
6590 					    (enc_handle == 1)) {
6591 						mpt->m_port_chng = 1;
6592 					}
6593 					break;
6594 				default:
6595 					(void) sprintf(curr, "state is "
6596 					    "unknown");
6597 					break;
6598 				}
6599 
6600 				state = (link_rate &
6601 				    MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >>
6602 				    MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT;
6603 				switch (state) {
6604 				case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6605 					(void) sprintf(prev, ", was disabled");
6606 					break;
6607 				case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6608 					(void) sprintf(prev, ", was offline, "
6609 					    "failed speed negotiation");
6610 					break;
6611 				case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6612 					(void) sprintf(prev, ", was SATA OOB "
6613 					    "complete");
6614 					break;
6615 				case SMP_RESET_IN_PROGRESS:
6616 					(void) sprintf(prev, ", was SMP reset "
6617 					    "in progress");
6618 					break;
6619 				case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6620 					(void) sprintf(prev, ", was online at "
6621 					    "1.5 Gbps");
6622 					break;
6623 				case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6624 					(void) sprintf(prev, ", was online at "
6625 					    "3.0 Gbps");
6626 					break;
6627 				case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6628 					(void) sprintf(prev, ", was online at "
6629 					    "6.0 Gbps");
6630 					break;
6631 				default:
6632 				break;
6633 				}
6634 				(void) sprintf(&string[strlen(string)], "link "
6635 				    "changed, ");
6636 				break;
6637 			case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
6638 				continue;
6639 			case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
6640 				(void) sprintf(&string[strlen(string)],
6641 				    "target not responding, delaying "
6642 				    "removal");
6643 				break;
6644 			}
6645 			NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
6646 			    mpt->m_instance, phy, dev_handle, string, curr,
6647 			    prev));
6648 		}
6649 		if (topo_head != NULL) {
6650 			/*
6651 			 * Launch DR taskq to handle topology change
6652 			 */
6653 			if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
6654 			    mptsas_handle_dr, (void *)topo_head,
6655 			    DDI_NOSLEEP)) != DDI_SUCCESS) {
6656 				mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
6657 				    "for handle SAS DR event failed. \n");
6658 			}
6659 		}
6660 		break;
6661 	}
6662 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
6663 	{
6664 		Mpi2EventDataIrConfigChangeList_t	*irChangeList;
6665 		mptsas_topo_change_list_t		*topo_head = NULL;
6666 		mptsas_topo_change_list_t		*topo_tail = NULL;
6667 		mptsas_topo_change_list_t		*topo_node = NULL;
6668 		mptsas_target_t				*ptgt;
6669 		mptsas_hash_table_t			*tgttbl;
6670 		uint8_t					num_entries, i, reason;
6671 		uint16_t				volhandle, diskhandle;
6672 
6673 		irChangeList = (pMpi2EventDataIrConfigChangeList_t)
6674 		    eventreply->EventData;
6675 		num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6676 		    &irChangeList->NumElements);
6677 
6678 		tgttbl = &mpt->m_active->m_tgttbl;
6679 
6680 		NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
6681 		    mpt->m_instance));
6682 
6683 		for (i = 0; i < num_entries; i++) {
6684 			reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
6685 			    &irChangeList->ConfigElement[i].ReasonCode);
6686 			volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6687 			    &irChangeList->ConfigElement[i].VolDevHandle);
6688 			diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6689 			    &irChangeList->ConfigElement[i].PhysDiskDevHandle);
6690 
6691 			switch (reason) {
6692 			case MPI2_EVENT_IR_CHANGE_RC_ADDED:
6693 			case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
6694 			{
6695 				NDBG20(("mptsas %d volume added\n",
6696 				    mpt->m_instance));
6697 
6698 				topo_node = kmem_zalloc(
6699 				    sizeof (mptsas_topo_change_list_t),
6700 				    KM_SLEEP);
6701 
6702 				topo_node->mpt = mpt;
6703 				topo_node->event =
6704 				    MPTSAS_DR_EVENT_RECONFIG_TARGET;
6705 				topo_node->un.physport = 0xff;
6706 				topo_node->devhdl = volhandle;
6707 				topo_node->flags =
6708 				    MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
6709 				topo_node->object = NULL;
6710 				if (topo_head == NULL) {
6711 					topo_head = topo_tail = topo_node;
6712 				} else {
6713 					topo_tail->next = topo_node;
6714 					topo_tail = topo_node;
6715 				}
6716 				break;
6717 			}
6718 			case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
6719 			case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
6720 			{
6721 				NDBG20(("mptsas %d volume deleted\n",
6722 				    mpt->m_instance));
6723 				ptgt = mptsas_search_by_devhdl(tgttbl,
6724 				    volhandle);
6725 				if (ptgt == NULL)
6726 					break;
6727 
6728 				/*
6729 				 * Clear any flags related to volume
6730 				 */
6731 				(void) mptsas_delete_volume(mpt, volhandle);
6732 
6733 				/*
6734 				 * Update DR flag immediately avoid I/O failure
6735 				 */
6736 				mutex_enter(&mpt->m_tx_waitq_mutex);
6737 				ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6738 				mutex_exit(&mpt->m_tx_waitq_mutex);
6739 
6740 				topo_node = kmem_zalloc(
6741 				    sizeof (mptsas_topo_change_list_t),
6742 				    KM_SLEEP);
6743 				topo_node->mpt = mpt;
6744 				topo_node->un.phymask = ptgt->m_phymask;
6745 				topo_node->event =
6746 				    MPTSAS_DR_EVENT_OFFLINE_TARGET;
6747 				topo_node->devhdl = volhandle;
6748 				topo_node->flags =
6749 				    MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
6750 				topo_node->object = (void *)ptgt;
6751 				if (topo_head == NULL) {
6752 					topo_head = topo_tail = topo_node;
6753 				} else {
6754 					topo_tail->next = topo_node;
6755 					topo_tail = topo_node;
6756 				}
6757 				break;
6758 			}
6759 			case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
6760 			case MPI2_EVENT_IR_CHANGE_RC_HIDE:
6761 			{
6762 				ptgt = mptsas_search_by_devhdl(tgttbl,
6763 				    diskhandle);
6764 				if (ptgt == NULL)
6765 					break;
6766 
6767 				/*
6768 				 * Update DR flag immediately avoid I/O failure
6769 				 */
6770 				mutex_enter(&mpt->m_tx_waitq_mutex);
6771 				ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6772 				mutex_exit(&mpt->m_tx_waitq_mutex);
6773 
6774 				topo_node = kmem_zalloc(
6775 				    sizeof (mptsas_topo_change_list_t),
6776 				    KM_SLEEP);
6777 				topo_node->mpt = mpt;
6778 				topo_node->un.phymask = ptgt->m_phymask;
6779 				topo_node->event =
6780 				    MPTSAS_DR_EVENT_OFFLINE_TARGET;
6781 				topo_node->devhdl = diskhandle;
6782 				topo_node->flags =
6783 				    MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
6784 				topo_node->object = (void *)ptgt;
6785 				if (topo_head == NULL) {
6786 					topo_head = topo_tail = topo_node;
6787 				} else {
6788 					topo_tail->next = topo_node;
6789 					topo_tail = topo_node;
6790 				}
6791 				break;
6792 			}
6793 			case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
6794 			case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
6795 			{
6796 				/*
6797 				 * The physical drive is released by a IR
6798 				 * volume. But we cannot get the the physport
6799 				 * or phynum from the event data, so we only
6800 				 * can get the physport/phynum after SAS
6801 				 * Device Page0 request for the devhdl.
6802 				 */
6803 				topo_node = kmem_zalloc(
6804 				    sizeof (mptsas_topo_change_list_t),
6805 				    KM_SLEEP);
6806 				topo_node->mpt = mpt;
6807 				topo_node->un.phymask = 0;
6808 				topo_node->event =
6809 				    MPTSAS_DR_EVENT_RECONFIG_TARGET;
6810 				topo_node->devhdl = diskhandle;
6811 				topo_node->flags =
6812 				    MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
6813 				topo_node->object = NULL;
6814 				mpt->m_port_chng = 1;
6815 				if (topo_head == NULL) {
6816 					topo_head = topo_tail = topo_node;
6817 				} else {
6818 					topo_tail->next = topo_node;
6819 					topo_tail = topo_node;
6820 				}
6821 				break;
6822 			}
6823 			default:
6824 				break;
6825 			}
6826 		}
6827 
6828 		if (topo_head != NULL) {
6829 			/*
6830 			 * Launch DR taskq to handle topology change
6831 			 */
6832 			if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
6833 			    mptsas_handle_dr, (void *)topo_head,
6834 			    DDI_NOSLEEP)) != DDI_SUCCESS) {
6835 				mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
6836 				    "for handle SAS DR event failed. \n");
6837 			}
6838 		}
6839 		break;
6840 	}
6841 	default:
6842 		return (DDI_FAILURE);
6843 	}
6844 
6845 	return (DDI_SUCCESS);
6846 }
6847 
6848 /*
6849  * handle events from ioc
6850  */
6851 static void
6852 mptsas_handle_event(void *args)
6853 {
6854 	m_replyh_arg_t			*replyh_arg;
6855 	pMpi2EventNotificationReply_t	eventreply;
6856 	uint32_t			event, iocloginfo, rfm;
6857 	uint32_t			status;
6858 	uint8_t				port;
6859 	mptsas_t			*mpt;
6860 	uint_t				iocstatus;
6861 
6862 	replyh_arg = (m_replyh_arg_t *)args;
6863 	rfm = replyh_arg->rfm;
6864 	mpt = replyh_arg->mpt;
6865 
6866 	mutex_enter(&mpt->m_mutex);
6867 
6868 	eventreply = (pMpi2EventNotificationReply_t)
6869 	    (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6870 	event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6871 
6872 	if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
6873 	    &eventreply->IOCStatus)) {
6874 		if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
6875 			mptsas_log(mpt, CE_WARN,
6876 			    "!mptsas_handle_event: IOCStatus=0x%x, "
6877 			    "IOCLogInfo=0x%x", iocstatus,
6878 			    ddi_get32(mpt->m_acc_reply_frame_hdl,
6879 			    &eventreply->IOCLogInfo));
6880 		} else {
6881 			mptsas_log(mpt, CE_WARN,
6882 			    "mptsas_handle_event: IOCStatus=0x%x, "
6883 			    "IOCLogInfo=0x%x", iocstatus,
6884 			    ddi_get32(mpt->m_acc_reply_frame_hdl,
6885 			    &eventreply->IOCLogInfo));
6886 		}
6887 	}
6888 
6889 	/*
6890 	 * figure out what kind of event we got and handle accordingly
6891 	 */
6892 	switch (event) {
6893 	case MPI2_EVENT_LOG_ENTRY_ADDED:
6894 		break;
6895 	case MPI2_EVENT_LOG_DATA:
6896 		iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
6897 		    &eventreply->IOCLogInfo);
6898 		NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance,
6899 		    iocloginfo));
6900 		break;
6901 	case MPI2_EVENT_STATE_CHANGE:
6902 		NDBG20(("mptsas%d state change.", mpt->m_instance));
6903 		break;
6904 	case MPI2_EVENT_HARD_RESET_RECEIVED:
6905 		NDBG20(("mptsas%d event change.", mpt->m_instance));
6906 		break;
6907 	case MPI2_EVENT_SAS_DISCOVERY:
6908 	{
6909 		MPI2_EVENT_DATA_SAS_DISCOVERY	*sasdiscovery;
6910 		char				string[80];
6911 		uint8_t				rc;
6912 
6913 		sasdiscovery =
6914 		    (pMpi2EventDataSasDiscovery_t)eventreply->EventData;
6915 
6916 		rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
6917 		    &sasdiscovery->ReasonCode);
6918 		port = ddi_get8(mpt->m_acc_reply_frame_hdl,
6919 		    &sasdiscovery->PhysicalPort);
6920 		status = ddi_get32(mpt->m_acc_reply_frame_hdl,
6921 		    &sasdiscovery->DiscoveryStatus);
6922 
6923 		string[0] = 0;
6924 		switch (rc) {
6925 		case MPI2_EVENT_SAS_DISC_RC_STARTED:
6926 			(void) sprintf(string, "STARTING");
6927 			break;
6928 		case MPI2_EVENT_SAS_DISC_RC_COMPLETED:
6929 			(void) sprintf(string, "COMPLETED");
6930 			break;
6931 		default:
6932 			(void) sprintf(string, "UNKNOWN");
6933 			break;
6934 		}
6935 
6936 		NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string,
6937 		    port, status));
6938 
6939 		break;
6940 	}
6941 	case MPI2_EVENT_EVENT_CHANGE:
6942 		NDBG20(("mptsas%d event change.", mpt->m_instance));
6943 		break;
6944 	case MPI2_EVENT_TASK_SET_FULL:
6945 	{
6946 		pMpi2EventDataTaskSetFull_t	taskfull;
6947 
6948 		taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData;
6949 
6950 		NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
6951 		    mpt->m_instance,  ddi_get16(mpt->m_acc_reply_frame_hdl,
6952 		    &taskfull->CurrentDepth)));
6953 		break;
6954 	}
6955 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
6956 	{
6957 		/*
6958 		 * SAS TOPOLOGY CHANGE LIST Event has already been handled
6959 		 * in mptsas_handle_event_sync() of interrupt context
6960 		 */
6961 		break;
6962 	}
6963 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
6964 	{
6965 		pMpi2EventDataSasEnclDevStatusChange_t	encstatus;
6966 		uint8_t					rc;
6967 		char					string[80];
6968 
6969 		encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
6970 		    eventreply->EventData;
6971 
6972 		rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
6973 		    &encstatus->ReasonCode);
6974 		switch (rc) {
6975 		case MPI2_EVENT_SAS_ENCL_RC_ADDED:
6976 			(void) sprintf(string, "added");
6977 			break;
6978 		case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
6979 			(void) sprintf(string, ", not responding");
6980 			break;
6981 		default:
6982 		break;
6983 		}
6984 		NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure %x%s\n",
6985 		    mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
6986 		    &encstatus->EnclosureHandle), string));
6987 		break;
6988 	}
6989 
6990 	/*
6991 	 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
6992 	 * mptsas_handle_event_sync,in here just send ack message.
6993 	 */
6994 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
6995 	{
6996 		pMpi2EventDataSasDeviceStatusChange_t	statuschange;
6997 		uint8_t					rc;
6998 		uint16_t				devhdl;
6999 		uint64_t				wwn = 0;
7000 		uint32_t				wwn_lo, wwn_hi;
7001 
7002 		statuschange = (pMpi2EventDataSasDeviceStatusChange_t)
7003 		    eventreply->EventData;
7004 		rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7005 		    &statuschange->ReasonCode);
7006 		wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7007 		    (uint32_t *)(void *)&statuschange->SASAddress);
7008 		wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl,
7009 		    (uint32_t *)(void *)&statuschange->SASAddress + 1);
7010 		wwn = ((uint64_t)wwn_hi << 32) | wwn_lo;
7011 		devhdl =  ddi_get16(mpt->m_acc_reply_frame_hdl,
7012 		    &statuschange->DevHandle);
7013 
7014 		NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64,
7015 		    wwn));
7016 
7017 		switch (rc) {
7018 		case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7019 			NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
7020 			    ddi_get8(mpt->m_acc_reply_frame_hdl,
7021 			    &statuschange->ASC),
7022 			    ddi_get8(mpt->m_acc_reply_frame_hdl,
7023 			    &statuschange->ASCQ)));
7024 			break;
7025 
7026 		case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7027 			NDBG20(("Device not supported"));
7028 			break;
7029 
7030 		case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7031 			NDBG20(("IOC internally generated the Target Reset "
7032 			    "for devhdl:%x", devhdl));
7033 			break;
7034 
7035 		case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7036 			NDBG20(("IOC's internally generated Target Reset "
7037 			    "completed for devhdl:%x", devhdl));
7038 			break;
7039 
7040 		case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7041 			NDBG20(("IOC internally generated Abort Task"));
7042 			break;
7043 
7044 		case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7045 			NDBG20(("IOC's internally generated Abort Task "
7046 			    "completed"));
7047 			break;
7048 
7049 		case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7050 			NDBG20(("IOC internally generated Abort Task Set"));
7051 			break;
7052 
7053 		case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7054 			NDBG20(("IOC internally generated Clear Task Set"));
7055 			break;
7056 
7057 		case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7058 			NDBG20(("IOC internally generated Query Task"));
7059 			break;
7060 
7061 		case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7062 			NDBG20(("Device sent an Asynchronous Notification"));
7063 			break;
7064 
7065 		default:
7066 			break;
7067 		}
7068 		break;
7069 	}
7070 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7071 	{
7072 		/*
7073 		 * IR TOPOLOGY CHANGE LIST Event has already been handled
7074 		 * in mpt_handle_event_sync() of interrupt context
7075 		 */
7076 		break;
7077 	}
7078 	case MPI2_EVENT_IR_OPERATION_STATUS:
7079 	{
7080 		Mpi2EventDataIrOperationStatus_t	*irOpStatus;
7081 		char					reason_str[80];
7082 		uint8_t					rc, percent;
7083 		uint16_t				handle;
7084 
7085 		irOpStatus = (pMpi2EventDataIrOperationStatus_t)
7086 		    eventreply->EventData;
7087 		rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7088 		    &irOpStatus->RAIDOperation);
7089 		percent = ddi_get8(mpt->m_acc_reply_frame_hdl,
7090 		    &irOpStatus->PercentComplete);
7091 		handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7092 		    &irOpStatus->VolDevHandle);
7093 
7094 		switch (rc) {
7095 			case MPI2_EVENT_IR_RAIDOP_RESYNC:
7096 				(void) sprintf(reason_str, "resync");
7097 				break;
7098 			case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
7099 				(void) sprintf(reason_str, "online capacity "
7100 				    "expansion");
7101 				break;
7102 			case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
7103 				(void) sprintf(reason_str, "consistency check");
7104 				break;
7105 			default:
7106 				(void) sprintf(reason_str, "unknown reason %x",
7107 				    rc);
7108 		}
7109 
7110 		NDBG20(("mptsas%d raid operational status: (%s)"
7111 		    "\thandle(0x%04x), percent complete(%d)\n",
7112 		    mpt->m_instance, reason_str, handle, percent));
7113 		break;
7114 	}
7115 	case MPI2_EVENT_IR_VOLUME:
7116 	{
7117 		Mpi2EventDataIrVolume_t		*irVolume;
7118 		uint16_t			devhandle;
7119 		uint32_t			state;
7120 		int				config, vol;
7121 		mptsas_slots_t			*slots = mpt->m_active;
7122 		uint8_t				found = FALSE;
7123 
7124 		irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData;
7125 		state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7126 		    &irVolume->NewValue);
7127 		devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7128 		    &irVolume->VolDevHandle);
7129 
7130 		NDBG20(("EVENT_IR_VOLUME event is received"));
7131 
7132 		/*
7133 		 * Get latest RAID info and then find the DevHandle for this
7134 		 * event in the configuration.  If the DevHandle is not found
7135 		 * just exit the event.
7136 		 */
7137 		(void) mptsas_get_raid_info(mpt);
7138 		for (config = 0; (config < slots->m_num_raid_configs) &&
7139 		    (!found); config++) {
7140 			for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
7141 				if (slots->m_raidconfig[config].m_raidvol[vol].
7142 				    m_raidhandle == devhandle) {
7143 					found = TRUE;
7144 					break;
7145 				}
7146 			}
7147 		}
7148 		if (!found) {
7149 			break;
7150 		}
7151 
7152 		switch (irVolume->ReasonCode) {
7153 		case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
7154 		{
7155 			uint32_t i;
7156 			slots->m_raidconfig[config].m_raidvol[vol].m_settings =
7157 			    state;
7158 
7159 			i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
7160 			mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
7161 			    ", auto-config of hot-swap drives is %s"
7162 			    ", write caching is %s"
7163 			    ", hot-spare pool mask is %02x\n",
7164 			    vol, state &
7165 			    MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
7166 			    ? "disabled" : "enabled",
7167 			    i == MPI2_RAIDVOL0_SETTING_UNCHANGED
7168 			    ? "controlled by member disks" :
7169 			    i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
7170 			    ? "disabled" :
7171 			    i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
7172 			    ? "enabled" :
7173 			    "incorrectly set",
7174 			    (state >> 16) & 0xff);
7175 				break;
7176 		}
7177 		case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
7178 		{
7179 			slots->m_raidconfig[config].m_raidvol[vol].m_state =
7180 			    (uint8_t)state;
7181 
7182 			mptsas_log(mpt, CE_NOTE,
7183 			    "Volume %d is now %s\n", vol,
7184 			    state == MPI2_RAID_VOL_STATE_OPTIMAL
7185 			    ? "optimal" :
7186 			    state == MPI2_RAID_VOL_STATE_DEGRADED
7187 			    ? "degraded" :
7188 			    state == MPI2_RAID_VOL_STATE_ONLINE
7189 			    ? "online" :
7190 			    state == MPI2_RAID_VOL_STATE_INITIALIZING
7191 			    ? "initializing" :
7192 			    state == MPI2_RAID_VOL_STATE_FAILED
7193 			    ? "failed" :
7194 			    state == MPI2_RAID_VOL_STATE_MISSING
7195 			    ? "missing" :
7196 			    "state unknown");
7197 			break;
7198 		}
7199 		case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
7200 		{
7201 			slots->m_raidconfig[config].m_raidvol[vol].
7202 			    m_statusflags = state;
7203 
7204 			mptsas_log(mpt, CE_NOTE,
7205 			    " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
7206 			    vol,
7207 			    state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
7208 			    ? ", enabled" : ", disabled",
7209 			    state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
7210 			    ? ", quiesced" : "",
7211 			    state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
7212 			    ? ", inactive" : ", active",
7213 			    state &
7214 			    MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
7215 			    ? ", bad block table is full" : "",
7216 			    state &
7217 			    MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
7218 			    ? ", resync in progress" : "",
7219 			    state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
7220 			    ? ", background initialization in progress" : "",
7221 			    state &
7222 			    MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
7223 			    ? ", capacity expansion in progress" : "",
7224 			    state &
7225 			    MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
7226 			    ? ", consistency check in progress" : "",
7227 			    state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
7228 			    ? ", data scrub in progress" : "");
7229 			break;
7230 		}
7231 		default:
7232 			break;
7233 		}
7234 		break;
7235 	}
7236 	case MPI2_EVENT_IR_PHYSICAL_DISK:
7237 	{
7238 		Mpi2EventDataIrPhysicalDisk_t	*irPhysDisk;
7239 		uint16_t			devhandle, enchandle, slot;
7240 		uint32_t			status, state;
7241 		uint8_t				physdisknum, reason;
7242 
7243 		irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *)
7244 		    eventreply->EventData;
7245 		physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl,
7246 		    &irPhysDisk->PhysDiskNum);
7247 		devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7248 		    &irPhysDisk->PhysDiskDevHandle);
7249 		enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7250 		    &irPhysDisk->EnclosureHandle);
7251 		slot = ddi_get16(mpt->m_acc_reply_frame_hdl,
7252 		    &irPhysDisk->Slot);
7253 		state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7254 		    &irPhysDisk->NewValue);
7255 		reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7256 		    &irPhysDisk->ReasonCode);
7257 
7258 		NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
7259 
7260 		switch (reason) {
7261 		case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
7262 			mptsas_log(mpt, CE_NOTE,
7263 			    " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7264 			    "for enclosure with handle 0x%x is now in hot "
7265 			    "spare pool %d",
7266 			    physdisknum, devhandle, slot, enchandle,
7267 			    (state >> 16) & 0xff);
7268 			break;
7269 
7270 		case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
7271 			status = state;
7272 			mptsas_log(mpt, CE_NOTE,
7273 			    " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7274 			    "for enclosure with handle 0x%x is now "
7275 			    "%s%s%s%s%s\n", physdisknum, devhandle, slot,
7276 			    enchandle,
7277 			    status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
7278 			    ? ", inactive" : ", active",
7279 			    status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
7280 			    ? ", out of sync" : "",
7281 			    status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
7282 			    ? ", quiesced" : "",
7283 			    status &
7284 			    MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
7285 			    ? ", write cache enabled" : "",
7286 			    status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
7287 			    ? ", capacity expansion target" : "");
7288 			break;
7289 
7290 		case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
7291 			mptsas_log(mpt, CE_NOTE,
7292 			    " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7293 			    "for enclosure with handle 0x%x is now %s\n",
7294 			    physdisknum, devhandle, slot, enchandle,
7295 			    state == MPI2_RAID_PD_STATE_OPTIMAL
7296 			    ? "optimal" :
7297 			    state == MPI2_RAID_PD_STATE_REBUILDING
7298 			    ? "rebuilding" :
7299 			    state == MPI2_RAID_PD_STATE_DEGRADED
7300 			    ? "degraded" :
7301 			    state == MPI2_RAID_PD_STATE_HOT_SPARE
7302 			    ? "a hot spare" :
7303 			    state == MPI2_RAID_PD_STATE_ONLINE
7304 			    ? "online" :
7305 			    state == MPI2_RAID_PD_STATE_OFFLINE
7306 			    ? "offline" :
7307 			    state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
7308 			    ? "not compatible" :
7309 			    state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
7310 			    ? "not configured" :
7311 			    "state unknown");
7312 			break;
7313 		}
7314 		break;
7315 	}
7316 	default:
7317 		NDBG20(("mptsas%d: unknown event %x received",
7318 		    mpt->m_instance, event));
7319 		break;
7320 	}
7321 
7322 	/*
7323 	 * Return the reply frame to the free queue.
7324 	 */
7325 	ddi_put32(mpt->m_acc_free_queue_hdl,
7326 	    &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
7327 	(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
7328 	    DDI_DMA_SYNC_FORDEV);
7329 	if (++mpt->m_free_index == mpt->m_free_queue_depth) {
7330 		mpt->m_free_index = 0;
7331 	}
7332 	ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
7333 	    mpt->m_free_index);
7334 	mutex_exit(&mpt->m_mutex);
7335 }
7336 
7337 /*
7338  * invoked from timeout() to restart qfull cmds with throttle == 0
7339  */
7340 static void
7341 mptsas_restart_cmd(void *arg)
7342 {
7343 	mptsas_t	*mpt = arg;
7344 	mptsas_target_t	*ptgt = NULL;
7345 
7346 	mutex_enter(&mpt->m_mutex);
7347 
7348 	mpt->m_restart_cmd_timeid = 0;
7349 
7350 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
7351 	    MPTSAS_HASH_FIRST);
7352 	while (ptgt != NULL) {
7353 		if (ptgt->m_reset_delay == 0) {
7354 			if (ptgt->m_t_throttle == QFULL_THROTTLE) {
7355 				mptsas_set_throttle(mpt, ptgt,
7356 				    MAX_THROTTLE);
7357 			}
7358 		}
7359 
7360 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
7361 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
7362 	}
7363 	mptsas_restart_hba(mpt);
7364 	mutex_exit(&mpt->m_mutex);
7365 }
7366 
7367 void
7368 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7369 {
7370 	int		slot;
7371 	mptsas_slots_t	*slots = mpt->m_active;
7372 	int		t;
7373 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
7374 
7375 	ASSERT(cmd != NULL);
7376 	ASSERT(cmd->cmd_queued == FALSE);
7377 
7378 	/*
7379 	 * Task Management cmds are removed in their own routines.  Also,
7380 	 * we don't want to modify timeout based on TM cmds.
7381 	 */
7382 	if (cmd->cmd_flags & CFLAG_TM_CMD) {
7383 		return;
7384 	}
7385 
7386 	t = Tgt(cmd);
7387 	slot = cmd->cmd_slot;
7388 
7389 	/*
7390 	 * remove the cmd.
7391 	 */
7392 	if (cmd == slots->m_slot[slot]) {
7393 		NDBG31(("mptsas_remove_cmd: removing cmd=0x%p", (void *)cmd));
7394 		slots->m_slot[slot] = NULL;
7395 		mpt->m_ncmds--;
7396 
7397 		/*
7398 		 * only decrement per target ncmds if command
7399 		 * has a target associated with it.
7400 		 */
7401 		if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
7402 			ptgt->m_t_ncmds--;
7403 			/*
7404 			 * reset throttle if we just ran an untagged command
7405 			 * to a tagged target
7406 			 */
7407 			if ((ptgt->m_t_ncmds == 0) &&
7408 			    ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
7409 				mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7410 			}
7411 		}
7412 
7413 	}
7414 
7415 	/*
7416 	 * This is all we need to do for ioc commands.
7417 	 */
7418 	if (cmd->cmd_flags & CFLAG_CMDIOC) {
7419 		mptsas_return_to_pool(mpt, cmd);
7420 		return;
7421 	}
7422 
7423 	/*
7424 	 * Figure out what to set tag Q timeout for...
7425 	 *
7426 	 * Optimize: If we have duplicate's of same timeout
7427 	 * we're using, then we'll use it again until we run
7428 	 * out of duplicates.  This should be the normal case
7429 	 * for block and raw I/O.
7430 	 * If no duplicates, we have to scan through tag que and
7431 	 * find the longest timeout value and use it.  This is
7432 	 * going to take a while...
7433 	 * Add 1 to m_n_slots to account for TM request.
7434 	 */
7435 	if (cmd->cmd_pkt->pkt_time == ptgt->m_timebase) {
7436 		if (--(ptgt->m_dups) == 0) {
7437 			if (ptgt->m_t_ncmds) {
7438 				mptsas_cmd_t *ssp;
7439 				uint_t n = 0;
7440 				ushort_t nslots = (slots->m_n_slots + 1);
7441 				ushort_t i;
7442 				/*
7443 				 * This crude check assumes we don't do
7444 				 * this too often which seems reasonable
7445 				 * for block and raw I/O.
7446 				 */
7447 				for (i = 0; i < nslots; i++) {
7448 					ssp = slots->m_slot[i];
7449 					if (ssp && (Tgt(ssp) == t) &&
7450 					    (ssp->cmd_pkt->pkt_time > n)) {
7451 						n = ssp->cmd_pkt->pkt_time;
7452 						ptgt->m_dups = 1;
7453 					} else if (ssp && (Tgt(ssp) == t) &&
7454 					    (ssp->cmd_pkt->pkt_time == n)) {
7455 						ptgt->m_dups++;
7456 					}
7457 				}
7458 				ptgt->m_timebase = n;
7459 			} else {
7460 				ptgt->m_dups = 0;
7461 				ptgt->m_timebase = 0;
7462 			}
7463 		}
7464 	}
7465 	ptgt->m_timeout = ptgt->m_timebase;
7466 
7467 	ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
7468 }
7469 
7470 /*
7471  * accept all cmds on the tx_waitq if any and then
7472  * start a fresh request from the top of the device queue.
7473  *
7474  * since there are always cmds queued on the tx_waitq, and rare cmds on
7475  * the instance waitq, so this function should not be invoked in the ISR,
7476  * the mptsas_restart_waitq() is invoked in the ISR instead. otherwise, the
7477  * burden belongs to the IO dispatch CPUs is moved the interrupt CPU.
7478  */
7479 static void
7480 mptsas_restart_hba(mptsas_t *mpt)
7481 {
7482 	ASSERT(mutex_owned(&mpt->m_mutex));
7483 
7484 	mutex_enter(&mpt->m_tx_waitq_mutex);
7485 	if (mpt->m_tx_waitq) {
7486 		mptsas_accept_tx_waitq(mpt);
7487 	}
7488 	mutex_exit(&mpt->m_tx_waitq_mutex);
7489 	mptsas_restart_waitq(mpt);
7490 }
7491 
7492 /*
7493  * start a fresh request from the top of the device queue
7494  */
7495 static void
7496 mptsas_restart_waitq(mptsas_t *mpt)
7497 {
7498 	mptsas_cmd_t	*cmd, *next_cmd;
7499 	mptsas_target_t *ptgt = NULL;
7500 
7501 	NDBG1(("mptsas_restart_waitq: mpt=0x%p", (void *)mpt));
7502 
7503 	ASSERT(mutex_owned(&mpt->m_mutex));
7504 
7505 	/*
7506 	 * If there is a reset delay, don't start any cmds.  Otherwise, start
7507 	 * as many cmds as possible.
7508 	 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
7509 	 * commands is m_max_requests - 2.
7510 	 */
7511 	cmd = mpt->m_waitq;
7512 
7513 	while (cmd != NULL) {
7514 		next_cmd = cmd->cmd_linkp;
7515 		if (cmd->cmd_flags & CFLAG_PASSTHRU) {
7516 			if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7517 				/*
7518 				 * passthru command get slot need
7519 				 * set CFLAG_PREPARED.
7520 				 */
7521 				cmd->cmd_flags |= CFLAG_PREPARED;
7522 				mptsas_waitq_delete(mpt, cmd);
7523 				mptsas_start_passthru(mpt, cmd);
7524 			}
7525 			cmd = next_cmd;
7526 			continue;
7527 		}
7528 		if (cmd->cmd_flags & CFLAG_CONFIG) {
7529 			if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7530 				/*
7531 				 * Send the config page request and delete it
7532 				 * from the waitq.
7533 				 */
7534 				cmd->cmd_flags |= CFLAG_PREPARED;
7535 				mptsas_waitq_delete(mpt, cmd);
7536 				mptsas_start_config_page_access(mpt, cmd);
7537 			}
7538 			cmd = next_cmd;
7539 			continue;
7540 		}
7541 		if (cmd->cmd_flags & CFLAG_FW_DIAG) {
7542 			if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7543 				/*
7544 				 * Send the FW Diag request and delete if from
7545 				 * the waitq.
7546 				 */
7547 				cmd->cmd_flags |= CFLAG_PREPARED;
7548 				mptsas_waitq_delete(mpt, cmd);
7549 				mptsas_start_diag(mpt, cmd);
7550 			}
7551 			cmd = next_cmd;
7552 			continue;
7553 		}
7554 
7555 		ptgt = cmd->cmd_tgt_addr;
7556 		if (ptgt && (ptgt->m_t_throttle == DRAIN_THROTTLE) &&
7557 		    (ptgt->m_t_ncmds == 0)) {
7558 			mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7559 		}
7560 		if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
7561 		    (ptgt && (ptgt->m_reset_delay == 0)) &&
7562 		    (ptgt && (ptgt->m_t_ncmds <
7563 		    ptgt->m_t_throttle))) {
7564 			if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7565 				mptsas_waitq_delete(mpt, cmd);
7566 				(void) mptsas_start_cmd(mpt, cmd);
7567 			}
7568 		}
7569 		cmd = next_cmd;
7570 	}
7571 }
7572 /*
7573  * Cmds are queued if tran_start() doesn't get the m_mutexlock(no wait).
7574  * Accept all those queued cmds before new cmd is accept so that the
7575  * cmds are sent in order.
7576  */
7577 static void
7578 mptsas_accept_tx_waitq(mptsas_t *mpt)
7579 {
7580 	mptsas_cmd_t *cmd;
7581 
7582 	ASSERT(mutex_owned(&mpt->m_mutex));
7583 	ASSERT(mutex_owned(&mpt->m_tx_waitq_mutex));
7584 
7585 	/*
7586 	 * A Bus Reset could occur at any time and flush the tx_waitq,
7587 	 * so we cannot count on the tx_waitq to contain even one cmd.
7588 	 * And when the m_tx_waitq_mutex is released and run
7589 	 * mptsas_accept_pkt(), the tx_waitq may be flushed.
7590 	 */
7591 	cmd = mpt->m_tx_waitq;
7592 	for (;;) {
7593 		if ((cmd = mpt->m_tx_waitq) == NULL) {
7594 			mpt->m_tx_draining = 0;
7595 			break;
7596 		}
7597 		if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
7598 			mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
7599 		}
7600 		cmd->cmd_linkp = NULL;
7601 		mutex_exit(&mpt->m_tx_waitq_mutex);
7602 		if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
7603 			cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
7604 			    "to accept cmd on queue\n");
7605 		mutex_enter(&mpt->m_tx_waitq_mutex);
7606 	}
7607 }
7608 
7609 
7610 /*
7611  * mpt tag type lookup
7612  */
7613 static char mptsas_tag_lookup[] =
7614 	{0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
7615 
7616 static int
7617 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7618 {
7619 	struct scsi_pkt		*pkt = CMD2PKT(cmd);
7620 	uint32_t		control = 0;
7621 	int			n;
7622 	caddr_t			mem;
7623 	pMpi2SCSIIORequest_t	io_request;
7624 	ddi_dma_handle_t	dma_hdl = mpt->m_dma_req_frame_hdl;
7625 	ddi_acc_handle_t	acc_hdl = mpt->m_acc_req_frame_hdl;
7626 	mptsas_target_t		*ptgt = cmd->cmd_tgt_addr;
7627 	uint16_t		SMID, io_flags = 0;
7628 	uint32_t		request_desc_low, request_desc_high;
7629 
7630 	NDBG1(("mptsas_start_cmd: cmd=0x%p", (void *)cmd));
7631 
7632 	/*
7633 	 * Set SMID and increment index.  Rollover to 1 instead of 0 if index
7634 	 * is at the max.  0 is an invalid SMID, so we call the first index 1.
7635 	 */
7636 	SMID = cmd->cmd_slot;
7637 
7638 	/*
7639 	 * It is possible for back to back device reset to
7640 	 * happen before the reset delay has expired.  That's
7641 	 * ok, just let the device reset go out on the bus.
7642 	 */
7643 	if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7644 		ASSERT(ptgt->m_reset_delay == 0);
7645 	}
7646 
7647 	/*
7648 	 * if a non-tagged cmd is submitted to an active tagged target
7649 	 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
7650 	 * to be untagged
7651 	 */
7652 	if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
7653 	    (ptgt->m_t_ncmds > 1) &&
7654 	    ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
7655 	    (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
7656 		if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7657 			NDBG23(("target=%d, untagged cmd, start draining\n",
7658 			    ptgt->m_devhdl));
7659 
7660 			if (ptgt->m_reset_delay == 0) {
7661 				mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
7662 			}
7663 
7664 			mptsas_remove_cmd(mpt, cmd);
7665 			cmd->cmd_pkt_flags |= FLAG_HEAD;
7666 			mptsas_waitq_add(mpt, cmd);
7667 		}
7668 		return (DDI_FAILURE);
7669 	}
7670 
7671 	/*
7672 	 * Set correct tag bits.
7673 	 */
7674 	if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
7675 		switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
7676 		    FLAG_TAGMASK) >> 12)]) {
7677 		case MSG_SIMPLE_QTAG:
7678 			control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
7679 			break;
7680 		case MSG_HEAD_QTAG:
7681 			control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
7682 			break;
7683 		case MSG_ORDERED_QTAG:
7684 			control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
7685 			break;
7686 		default:
7687 			mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
7688 			break;
7689 		}
7690 	} else {
7691 		if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
7692 				ptgt->m_t_throttle = 1;
7693 		}
7694 		control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
7695 	}
7696 
7697 	if (cmd->cmd_pkt_flags & FLAG_TLR) {
7698 		control |= MPI2_SCSIIO_CONTROL_TLR_ON;
7699 	}
7700 
7701 	mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
7702 	io_request = (pMpi2SCSIIORequest_t)mem;
7703 
7704 	bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
7705 	ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
7706 	    (MPI2_SCSI_IO_REQUEST, SGL) / 4);
7707 	mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
7708 	    MPI2_FUNCTION_SCSI_IO_REQUEST);
7709 
7710 	(void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
7711 	    io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
7712 
7713 	io_flags = cmd->cmd_cdblen;
7714 	ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
7715 	/*
7716 	 * setup the Scatter/Gather DMA list for this request
7717 	 */
7718 	if (cmd->cmd_cookiec > 0) {
7719 		mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
7720 	} else {
7721 		ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
7722 		    ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
7723 		    MPI2_SGE_FLAGS_END_OF_BUFFER |
7724 		    MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
7725 		    MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
7726 	}
7727 
7728 	/*
7729 	 * save ARQ information
7730 	 */
7731 	ddi_put8(acc_hdl, &io_request->SenseBufferLength, cmd->cmd_rqslen);
7732 	if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
7733 	    (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
7734 		ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
7735 		    cmd->cmd_ext_arqcookie.dmac_address);
7736 	} else {
7737 		ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
7738 		    cmd->cmd_arqcookie.dmac_address);
7739 	}
7740 
7741 	ddi_put32(acc_hdl, &io_request->Control, control);
7742 
7743 	NDBG31(("starting message=0x%p, with cmd=0x%p",
7744 	    (void *)(uintptr_t)mpt->m_req_frame_dma_addr, (void *)cmd));
7745 
7746 	(void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
7747 
7748 	/*
7749 	 * Build request descriptor and write it to the request desc post reg.
7750 	 */
7751 	request_desc_low = (SMID << 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
7752 	request_desc_high = ptgt->m_devhdl << 16;
7753 	MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
7754 
7755 	/*
7756 	 * Start timeout.
7757 	 */
7758 #ifdef MPTSAS_TEST
7759 	/*
7760 	 * Temporarily set timebase = 0;  needed for
7761 	 * timeout torture test.
7762 	 */
7763 	if (mptsas_test_timeouts) {
7764 		ptgt->m_timebase = 0;
7765 	}
7766 #endif
7767 	n = pkt->pkt_time - ptgt->m_timebase;
7768 
7769 	if (n == 0) {
7770 		(ptgt->m_dups)++;
7771 		ptgt->m_timeout = ptgt->m_timebase;
7772 	} else if (n > 0) {
7773 		ptgt->m_timeout =
7774 		    ptgt->m_timebase = pkt->pkt_time;
7775 		ptgt->m_dups = 1;
7776 	} else if (n < 0) {
7777 		ptgt->m_timeout = ptgt->m_timebase;
7778 	}
7779 #ifdef MPTSAS_TEST
7780 	/*
7781 	 * Set back to a number higher than
7782 	 * mptsas_scsi_watchdog_tick
7783 	 * so timeouts will happen in mptsas_watchsubr
7784 	 */
7785 	if (mptsas_test_timeouts) {
7786 		ptgt->m_timebase = 60;
7787 	}
7788 #endif
7789 
7790 	if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
7791 	    (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
7792 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
7793 		return (DDI_FAILURE);
7794 	}
7795 	return (DDI_SUCCESS);
7796 }
7797 
7798 /*
7799  * Select a helper thread to handle current doneq
7800  */
7801 static void
7802 mptsas_deliver_doneq_thread(mptsas_t *mpt)
7803 {
7804 	uint64_t			t, i;
7805 	uint32_t			min = 0xffffffff;
7806 	mptsas_doneq_thread_list_t	*item;
7807 
7808 	for (i = 0; i < mpt->m_doneq_thread_n; i++) {
7809 		item = &mpt->m_doneq_thread_id[i];
7810 		/*
7811 		 * If the completed command on help thread[i] less than
7812 		 * doneq_thread_threshold, then pick the thread[i]. Otherwise
7813 		 * pick a thread which has least completed command.
7814 		 */
7815 
7816 		mutex_enter(&item->mutex);
7817 		if (item->len < mpt->m_doneq_thread_threshold) {
7818 			t = i;
7819 			mutex_exit(&item->mutex);
7820 			break;
7821 		}
7822 		if (item->len < min) {
7823 			min = item->len;
7824 			t = i;
7825 		}
7826 		mutex_exit(&item->mutex);
7827 	}
7828 	mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
7829 	mptsas_doneq_mv(mpt, t);
7830 	cv_signal(&mpt->m_doneq_thread_id[t].cv);
7831 	mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
7832 }
7833 
7834 /*
7835  * move the current global doneq to the doneq of thead[t]
7836  */
7837 static void
7838 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
7839 {
7840 	mptsas_cmd_t			*cmd;
7841 	mptsas_doneq_thread_list_t	*item = &mpt->m_doneq_thread_id[t];
7842 
7843 	ASSERT(mutex_owned(&item->mutex));
7844 	while ((cmd = mpt->m_doneq) != NULL) {
7845 		if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
7846 			mpt->m_donetail = &mpt->m_doneq;
7847 		}
7848 		cmd->cmd_linkp = NULL;
7849 		*item->donetail = cmd;
7850 		item->donetail = &cmd->cmd_linkp;
7851 		mpt->m_doneq_len--;
7852 		item->len++;
7853 	}
7854 }
7855 
7856 void
7857 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
7858 {
7859 	struct scsi_pkt	*pkt = CMD2PKT(cmd);
7860 
7861 	/* Check all acc and dma handles */
7862 	if ((mptsas_check_acc_handle(mpt->m_datap) !=
7863 	    DDI_SUCCESS) ||
7864 	    (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
7865 	    DDI_SUCCESS) ||
7866 	    (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
7867 	    DDI_SUCCESS) ||
7868 	    (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
7869 	    DDI_SUCCESS) ||
7870 	    (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
7871 	    DDI_SUCCESS) ||
7872 	    (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
7873 	    DDI_SUCCESS) ||
7874 	    (mptsas_check_acc_handle(mpt->m_config_handle) !=
7875 	    DDI_SUCCESS)) {
7876 		ddi_fm_service_impact(mpt->m_dip,
7877 		    DDI_SERVICE_UNAFFECTED);
7878 		ddi_fm_acc_err_clear(mpt->m_config_handle,
7879 		    DDI_FME_VER0);
7880 		pkt->pkt_reason = CMD_TRAN_ERR;
7881 		pkt->pkt_statistics = 0;
7882 	}
7883 	if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
7884 	    DDI_SUCCESS) ||
7885 	    (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
7886 	    DDI_SUCCESS) ||
7887 	    (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
7888 	    DDI_SUCCESS) ||
7889 	    (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
7890 	    DDI_SUCCESS) ||
7891 	    (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
7892 	    DDI_SUCCESS)) {
7893 		ddi_fm_service_impact(mpt->m_dip,
7894 		    DDI_SERVICE_UNAFFECTED);
7895 		pkt->pkt_reason = CMD_TRAN_ERR;
7896 		pkt->pkt_statistics = 0;
7897 	}
7898 	if (cmd->cmd_dmahandle &&
7899 	    (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
7900 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
7901 		pkt->pkt_reason = CMD_TRAN_ERR;
7902 		pkt->pkt_statistics = 0;
7903 	}
7904 	if ((cmd->cmd_extra_frames &&
7905 	    ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
7906 	    DDI_SUCCESS) ||
7907 	    (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
7908 	    DDI_SUCCESS)))) {
7909 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
7910 		pkt->pkt_reason = CMD_TRAN_ERR;
7911 		pkt->pkt_statistics = 0;
7912 	}
7913 	if (cmd->cmd_arqhandle &&
7914 	    (mptsas_check_dma_handle(cmd->cmd_arqhandle) != DDI_SUCCESS)) {
7915 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
7916 		pkt->pkt_reason = CMD_TRAN_ERR;
7917 		pkt->pkt_statistics = 0;
7918 	}
7919 	if (cmd->cmd_ext_arqhandle &&
7920 	    (mptsas_check_dma_handle(cmd->cmd_ext_arqhandle) != DDI_SUCCESS)) {
7921 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
7922 		pkt->pkt_reason = CMD_TRAN_ERR;
7923 		pkt->pkt_statistics = 0;
7924 	}
7925 }
7926 
7927 /*
7928  * These routines manipulate the queue of commands that
7929  * are waiting for their completion routines to be called.
7930  * The queue is usually in FIFO order but on an MP system
7931  * it's possible for the completion routines to get out
7932  * of order. If that's a problem you need to add a global
7933  * mutex around the code that calls the completion routine
7934  * in the interrupt handler.
7935  */
7936 static void
7937 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
7938 {
7939 	struct scsi_pkt	*pkt = CMD2PKT(cmd);
7940 
7941 	NDBG31(("mptsas_doneq_add: cmd=0x%p", (void *)cmd));
7942 
7943 	ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
7944 	cmd->cmd_linkp = NULL;
7945 	cmd->cmd_flags |= CFLAG_FINISHED;
7946 	cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
7947 
7948 	mptsas_fma_check(mpt, cmd);
7949 
7950 	/*
7951 	 * only add scsi pkts that have completion routines to
7952 	 * the doneq.  no intr cmds do not have callbacks.
7953 	 */
7954 	if (pkt && (pkt->pkt_comp)) {
7955 		*mpt->m_donetail = cmd;
7956 		mpt->m_donetail = &cmd->cmd_linkp;
7957 		mpt->m_doneq_len++;
7958 	}
7959 }
7960 
7961 static mptsas_cmd_t *
7962 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
7963 {
7964 	mptsas_cmd_t			*cmd;
7965 	mptsas_doneq_thread_list_t	*item = &mpt->m_doneq_thread_id[t];
7966 
7967 	/* pop one off the done queue */
7968 	if ((cmd = item->doneq) != NULL) {
7969 		/* if the queue is now empty fix the tail pointer */
7970 		NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
7971 		if ((item->doneq = cmd->cmd_linkp) == NULL) {
7972 			item->donetail = &item->doneq;
7973 		}
7974 		cmd->cmd_linkp = NULL;
7975 		item->len--;
7976 	}
7977 	return (cmd);
7978 }
7979 
7980 static void
7981 mptsas_doneq_empty(mptsas_t *mpt)
7982 {
7983 	if (mpt->m_doneq && !mpt->m_in_callback) {
7984 		mptsas_cmd_t	*cmd, *next;
7985 		struct scsi_pkt *pkt;
7986 
7987 		mpt->m_in_callback = 1;
7988 		cmd = mpt->m_doneq;
7989 		mpt->m_doneq = NULL;
7990 		mpt->m_donetail = &mpt->m_doneq;
7991 		mpt->m_doneq_len = 0;
7992 
7993 		mutex_exit(&mpt->m_mutex);
7994 		/*
7995 		 * run the completion routines of all the
7996 		 * completed commands
7997 		 */
7998 		while (cmd != NULL) {
7999 			next = cmd->cmd_linkp;
8000 			cmd->cmd_linkp = NULL;
8001 			/* run this command's completion routine */
8002 			cmd->cmd_flags |= CFLAG_COMPLETED;
8003 			pkt = CMD2PKT(cmd);
8004 			mptsas_pkt_comp(pkt, cmd);
8005 			cmd = next;
8006 		}
8007 		mutex_enter(&mpt->m_mutex);
8008 		mpt->m_in_callback = 0;
8009 	}
8010 }
8011 
8012 /*
8013  * These routines manipulate the target's queue of pending requests
8014  */
8015 void
8016 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8017 {
8018 	NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
8019 	mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8020 	cmd->cmd_queued = TRUE;
8021 	if (ptgt)
8022 		ptgt->m_t_nwait++;
8023 	if (cmd->cmd_pkt_flags & FLAG_HEAD) {
8024 		if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
8025 			mpt->m_waitqtail = &cmd->cmd_linkp;
8026 		}
8027 		mpt->m_waitq = cmd;
8028 	} else {
8029 		cmd->cmd_linkp = NULL;
8030 		*(mpt->m_waitqtail) = cmd;
8031 		mpt->m_waitqtail = &cmd->cmd_linkp;
8032 	}
8033 }
8034 
8035 static mptsas_cmd_t *
8036 mptsas_waitq_rm(mptsas_t *mpt)
8037 {
8038 	mptsas_cmd_t	*cmd;
8039 	mptsas_target_t *ptgt;
8040 	NDBG7(("mptsas_waitq_rm"));
8041 
8042 	MPTSAS_WAITQ_RM(mpt, cmd);
8043 
8044 	NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
8045 	if (cmd) {
8046 		ptgt = cmd->cmd_tgt_addr;
8047 		if (ptgt) {
8048 			ptgt->m_t_nwait--;
8049 			ASSERT(ptgt->m_t_nwait >= 0);
8050 		}
8051 	}
8052 	return (cmd);
8053 }
8054 
8055 /*
8056  * remove specified cmd from the middle of the wait queue.
8057  */
8058 static void
8059 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8060 {
8061 	mptsas_cmd_t	*prevp = mpt->m_waitq;
8062 	mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8063 
8064 	NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8065 	    (void *)mpt, (void *)cmd));
8066 	if (ptgt) {
8067 		ptgt->m_t_nwait--;
8068 		ASSERT(ptgt->m_t_nwait >= 0);
8069 	}
8070 
8071 	if (prevp == cmd) {
8072 		if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
8073 			mpt->m_waitqtail = &mpt->m_waitq;
8074 
8075 		cmd->cmd_linkp = NULL;
8076 		cmd->cmd_queued = FALSE;
8077 		NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8078 		    (void *)mpt, (void *)cmd));
8079 		return;
8080 	}
8081 
8082 	while (prevp != NULL) {
8083 		if (prevp->cmd_linkp == cmd) {
8084 			if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8085 				mpt->m_waitqtail = &prevp->cmd_linkp;
8086 
8087 			cmd->cmd_linkp = NULL;
8088 			cmd->cmd_queued = FALSE;
8089 			NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8090 			    (void *)mpt, (void *)cmd));
8091 			return;
8092 		}
8093 		prevp = prevp->cmd_linkp;
8094 	}
8095 	cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
8096 }
8097 
8098 static mptsas_cmd_t *
8099 mptsas_tx_waitq_rm(mptsas_t *mpt)
8100 {
8101 	mptsas_cmd_t *cmd;
8102 	NDBG7(("mptsas_tx_waitq_rm"));
8103 
8104 	MPTSAS_TX_WAITQ_RM(mpt, cmd);
8105 
8106 	NDBG7(("mptsas_tx_waitq_rm: cmd=0x%p", (void *)cmd));
8107 
8108 	return (cmd);
8109 }
8110 
8111 /*
8112  * remove specified cmd from the middle of the tx_waitq.
8113  */
8114 static void
8115 mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8116 {
8117 	mptsas_cmd_t *prevp = mpt->m_tx_waitq;
8118 
8119 	NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8120 	    (void *)mpt, (void *)cmd));
8121 
8122 	if (prevp == cmd) {
8123 		if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL)
8124 			mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8125 
8126 		cmd->cmd_linkp = NULL;
8127 		cmd->cmd_queued = FALSE;
8128 		NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8129 		    (void *)mpt, (void *)cmd));
8130 		return;
8131 	}
8132 
8133 	while (prevp != NULL) {
8134 		if (prevp->cmd_linkp == cmd) {
8135 			if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8136 				mpt->m_tx_waitqtail = &prevp->cmd_linkp;
8137 
8138 			cmd->cmd_linkp = NULL;
8139 			cmd->cmd_queued = FALSE;
8140 			NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8141 			    (void *)mpt, (void *)cmd));
8142 			return;
8143 		}
8144 		prevp = prevp->cmd_linkp;
8145 	}
8146 	cmn_err(CE_PANIC, "mpt: mptsas_tx_waitq_delete: queue botch");
8147 }
8148 
8149 /*
8150  * device and bus reset handling
8151  *
8152  * Notes:
8153  *	- RESET_ALL:	reset the controller
8154  *	- RESET_TARGET:	reset the target specified in scsi_address
8155  */
8156 static int
8157 mptsas_scsi_reset(struct scsi_address *ap, int level)
8158 {
8159 	mptsas_t		*mpt = ADDR2MPT(ap);
8160 	int			rval;
8161 	mptsas_tgt_private_t	*tgt_private;
8162 	mptsas_target_t		*ptgt = NULL;
8163 
8164 	tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
8165 	ptgt = tgt_private->t_private;
8166 	if (ptgt == NULL) {
8167 		return (FALSE);
8168 	}
8169 	NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,
8170 	    level));
8171 
8172 	mutex_enter(&mpt->m_mutex);
8173 	/*
8174 	 * if we are not in panic set up a reset delay for this target
8175 	 */
8176 	if (!ddi_in_panic()) {
8177 		mptsas_setup_bus_reset_delay(mpt);
8178 	} else {
8179 		drv_usecwait(mpt->m_scsi_reset_delay * 1000);
8180 	}
8181 	rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl);
8182 	mutex_exit(&mpt->m_mutex);
8183 
8184 	/*
8185 	 * The transport layer expect to only see TRUE and
8186 	 * FALSE. Therefore, we will adjust the return value
8187 	 * if mptsas_do_scsi_reset returns FAILED.
8188 	 */
8189 	if (rval == FAILED)
8190 		rval = FALSE;
8191 	return (rval);
8192 }
8193 
8194 static int
8195 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl)
8196 {
8197 	int		rval = FALSE;
8198 	uint8_t		config, disk;
8199 	mptsas_slots_t	*slots = mpt->m_active;
8200 
8201 	ASSERT(mutex_owned(&mpt->m_mutex));
8202 
8203 	if (mptsas_debug_resets) {
8204 		mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d",
8205 		    devhdl);
8206 	}
8207 
8208 	/*
8209 	 * Issue a Target Reset message to the target specified but not to a
8210 	 * disk making up a raid volume.  Just look through the RAID config
8211 	 * Phys Disk list of DevHandles.  If the target's DevHandle is in this
8212 	 * list, then don't reset this target.
8213 	 */
8214 	for (config = 0; config < slots->m_num_raid_configs; config++) {
8215 		for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
8216 			if (devhdl == slots->m_raidconfig[config].
8217 			    m_physdisk_devhdl[disk]) {
8218 				return (TRUE);
8219 			}
8220 		}
8221 	}
8222 
8223 	rval = mptsas_ioc_task_management(mpt,
8224 	    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0, NULL, 0, 0);
8225 
8226 	mptsas_doneq_empty(mpt);
8227 	return (rval);
8228 }
8229 
8230 static int
8231 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
8232 	void (*callback)(caddr_t), caddr_t arg)
8233 {
8234 	mptsas_t	*mpt = ADDR2MPT(ap);
8235 
8236 	NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target));
8237 
8238 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
8239 	    &mpt->m_mutex, &mpt->m_reset_notify_listf));
8240 }
8241 
8242 static int
8243 mptsas_get_name(struct scsi_device *sd, char *name, int len)
8244 {
8245 	dev_info_t	*lun_dip = NULL;
8246 
8247 	ASSERT(sd != NULL);
8248 	ASSERT(name != NULL);
8249 	lun_dip = sd->sd_dev;
8250 	ASSERT(lun_dip != NULL);
8251 
8252 	if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) {
8253 		return (1);
8254 	} else {
8255 		return (0);
8256 	}
8257 }
8258 
8259 static int
8260 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len)
8261 {
8262 	return (mptsas_get_name(sd, name, len));
8263 }
8264 
8265 void
8266 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
8267 {
8268 
8269 	NDBG25(("mptsas_set_throttle: throttle=%x", what));
8270 
8271 	/*
8272 	 * if the bus is draining/quiesced, no changes to the throttles
8273 	 * are allowed. Not allowing change of throttles during draining
8274 	 * limits error recovery but will reduce draining time
8275 	 *
8276 	 * all throttles should have been set to HOLD_THROTTLE
8277 	 */
8278 	if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
8279 		return;
8280 	}
8281 
8282 	if (what == HOLD_THROTTLE) {
8283 		ptgt->m_t_throttle = HOLD_THROTTLE;
8284 	} else if (ptgt->m_reset_delay == 0) {
8285 		ptgt->m_t_throttle = what;
8286 	}
8287 }
8288 
8289 /*
8290  * Clean up from a device reset.
8291  * For the case of target reset, this function clears the waitq of all
8292  * commands for a particular target.   For the case of abort task set, this
8293  * function clears the waitq of all commonds for a particular target/lun.
8294  */
8295 static void
8296 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
8297 {
8298 	mptsas_slots_t	*slots = mpt->m_active;
8299 	mptsas_cmd_t	*cmd, *next_cmd;
8300 	int		slot;
8301 	uchar_t		reason;
8302 	uint_t		stat;
8303 
8304 	NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
8305 
8306 	/*
8307 	 * Make sure the I/O Controller has flushed all cmds
8308 	 * that are associated with this target for a target reset
8309 	 * and target/lun for abort task set.
8310 	 * Account for TM requests, which use the last SMID.
8311 	 */
8312 	for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
8313 		if ((cmd = slots->m_slot[slot]) == NULL)
8314 			continue;
8315 		reason = CMD_RESET;
8316 		stat = STAT_DEV_RESET;
8317 		switch (tasktype) {
8318 		case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8319 			if (Tgt(cmd) == target) {
8320 				NDBG25(("mptsas_flush_target discovered non-"
8321 				    "NULL cmd in slot %d, tasktype 0x%x", slot,
8322 				    tasktype));
8323 				mptsas_dump_cmd(mpt, cmd);
8324 				mptsas_remove_cmd(mpt, cmd);
8325 				mptsas_set_pkt_reason(mpt, cmd, reason, stat);
8326 				mptsas_doneq_add(mpt, cmd);
8327 			}
8328 			break;
8329 		case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
8330 			reason = CMD_ABORTED;
8331 			stat = STAT_ABORTED;
8332 			/*FALLTHROUGH*/
8333 		case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
8334 			if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8335 
8336 				NDBG25(("mptsas_flush_target discovered non-"
8337 				    "NULL cmd in slot %d, tasktype 0x%x", slot,
8338 				    tasktype));
8339 				mptsas_dump_cmd(mpt, cmd);
8340 				mptsas_remove_cmd(mpt, cmd);
8341 				mptsas_set_pkt_reason(mpt, cmd, reason,
8342 				    stat);
8343 				mptsas_doneq_add(mpt, cmd);
8344 			}
8345 			break;
8346 		default:
8347 			break;
8348 		}
8349 	}
8350 
8351 	/*
8352 	 * Flush the waitq and tx_waitq of this target's cmds
8353 	 */
8354 	cmd = mpt->m_waitq;
8355 
8356 	reason = CMD_RESET;
8357 	stat = STAT_DEV_RESET;
8358 
8359 	switch (tasktype) {
8360 	case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8361 		while (cmd != NULL) {
8362 			next_cmd = cmd->cmd_linkp;
8363 			if (Tgt(cmd) == target) {
8364 				mptsas_waitq_delete(mpt, cmd);
8365 				mptsas_set_pkt_reason(mpt, cmd,
8366 				    reason, stat);
8367 				mptsas_doneq_add(mpt, cmd);
8368 			}
8369 			cmd = next_cmd;
8370 		}
8371 		mutex_enter(&mpt->m_tx_waitq_mutex);
8372 		cmd = mpt->m_tx_waitq;
8373 		while (cmd != NULL) {
8374 			next_cmd = cmd->cmd_linkp;
8375 			if (Tgt(cmd) == target) {
8376 				mptsas_tx_waitq_delete(mpt, cmd);
8377 				mutex_exit(&mpt->m_tx_waitq_mutex);
8378 				mptsas_set_pkt_reason(mpt, cmd,
8379 				    reason, stat);
8380 				mptsas_doneq_add(mpt, cmd);
8381 				mutex_enter(&mpt->m_tx_waitq_mutex);
8382 			}
8383 			cmd = next_cmd;
8384 		}
8385 		mutex_exit(&mpt->m_tx_waitq_mutex);
8386 		break;
8387 	case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
8388 		reason = CMD_ABORTED;
8389 		stat =  STAT_ABORTED;
8390 		/*FALLTHROUGH*/
8391 	case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
8392 		while (cmd != NULL) {
8393 			next_cmd = cmd->cmd_linkp;
8394 			if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8395 				mptsas_waitq_delete(mpt, cmd);
8396 				mptsas_set_pkt_reason(mpt, cmd,
8397 				    reason, stat);
8398 				mptsas_doneq_add(mpt, cmd);
8399 			}
8400 			cmd = next_cmd;
8401 		}
8402 		mutex_enter(&mpt->m_tx_waitq_mutex);
8403 		cmd = mpt->m_tx_waitq;
8404 		while (cmd != NULL) {
8405 			next_cmd = cmd->cmd_linkp;
8406 			if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8407 				mptsas_tx_waitq_delete(mpt, cmd);
8408 				mutex_exit(&mpt->m_tx_waitq_mutex);
8409 				mptsas_set_pkt_reason(mpt, cmd,
8410 				    reason, stat);
8411 				mptsas_doneq_add(mpt, cmd);
8412 				mutex_enter(&mpt->m_tx_waitq_mutex);
8413 			}
8414 			cmd = next_cmd;
8415 		}
8416 		mutex_exit(&mpt->m_tx_waitq_mutex);
8417 		break;
8418 	default:
8419 		mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
8420 		    tasktype);
8421 		break;
8422 	}
8423 }
8424 
8425 /*
8426  * Clean up hba state, abort all outstanding command and commands in waitq
8427  * reset timeout of all targets.
8428  */
8429 static void
8430 mptsas_flush_hba(mptsas_t *mpt)
8431 {
8432 	mptsas_slots_t	*slots = mpt->m_active;
8433 	mptsas_cmd_t	*cmd;
8434 	int		slot;
8435 
8436 	NDBG25(("mptsas_flush_hba"));
8437 
8438 	/*
8439 	 * The I/O Controller should have already sent back
8440 	 * all commands via the scsi I/O reply frame.  Make
8441 	 * sure all commands have been flushed.
8442 	 * Account for TM request, which use the last SMID.
8443 	 */
8444 	for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
8445 		if ((cmd = slots->m_slot[slot]) == NULL)
8446 			continue;
8447 
8448 		if (cmd->cmd_flags & CFLAG_CMDIOC) {
8449 			/*
8450 			 * Need to make sure to tell everyone that might be
8451 			 * waiting on this command that it's going to fail.  If
8452 			 * we get here, this command will never timeout because
8453 			 * the active command table is going to be re-allocated,
8454 			 * so there will be nothing to check against a time out.
8455 			 * Instead, mark the command as failed due to reset.
8456 			 */
8457 			mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
8458 			    STAT_BUS_RESET);
8459 			if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
8460 			    (cmd->cmd_flags & CFLAG_CONFIG) ||
8461 			    (cmd->cmd_flags & CFLAG_FW_DIAG)) {
8462 				cmd->cmd_flags |= CFLAG_FINISHED;
8463 				cv_broadcast(&mpt->m_passthru_cv);
8464 				cv_broadcast(&mpt->m_config_cv);
8465 				cv_broadcast(&mpt->m_fw_diag_cv);
8466 			}
8467 			continue;
8468 		}
8469 
8470 		NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
8471 		    slot));
8472 		mptsas_dump_cmd(mpt, cmd);
8473 
8474 		mptsas_remove_cmd(mpt, cmd);
8475 		mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8476 		mptsas_doneq_add(mpt, cmd);
8477 	}
8478 
8479 	/*
8480 	 * Flush the waitq.
8481 	 */
8482 	while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
8483 		mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8484 		if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
8485 		    (cmd->cmd_flags & CFLAG_CONFIG) ||
8486 		    (cmd->cmd_flags & CFLAG_FW_DIAG)) {
8487 			cmd->cmd_flags |= CFLAG_FINISHED;
8488 			cv_broadcast(&mpt->m_passthru_cv);
8489 			cv_broadcast(&mpt->m_config_cv);
8490 			cv_broadcast(&mpt->m_fw_diag_cv);
8491 		} else {
8492 			mptsas_doneq_add(mpt, cmd);
8493 		}
8494 	}
8495 
8496 	/*
8497 	 * Flush the tx_waitq
8498 	 */
8499 	mutex_enter(&mpt->m_tx_waitq_mutex);
8500 	while ((cmd = mptsas_tx_waitq_rm(mpt)) != NULL) {
8501 		mutex_exit(&mpt->m_tx_waitq_mutex);
8502 		mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8503 		mptsas_doneq_add(mpt, cmd);
8504 		mutex_enter(&mpt->m_tx_waitq_mutex);
8505 	}
8506 	mutex_exit(&mpt->m_tx_waitq_mutex);
8507 }
8508 
8509 /*
8510  * set pkt_reason and OR in pkt_statistics flag
8511  */
8512 static void
8513 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
8514     uint_t stat)
8515 {
8516 #ifndef __lock_lint
8517 	_NOTE(ARGUNUSED(mpt))
8518 #endif
8519 
8520 	NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
8521 	    (void *)cmd, reason, stat));
8522 
8523 	if (cmd) {
8524 		if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
8525 			cmd->cmd_pkt->pkt_reason = reason;
8526 		}
8527 		cmd->cmd_pkt->pkt_statistics |= stat;
8528 	}
8529 }
8530 
8531 static void
8532 mptsas_start_watch_reset_delay()
8533 {
8534 	NDBG22(("mptsas_start_watch_reset_delay"));
8535 
8536 	mutex_enter(&mptsas_global_mutex);
8537 	if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
8538 		mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
8539 		    drv_usectohz((clock_t)
8540 		    MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
8541 		ASSERT(mptsas_reset_watch != NULL);
8542 	}
8543 	mutex_exit(&mptsas_global_mutex);
8544 }
8545 
8546 static void
8547 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
8548 {
8549 	mptsas_target_t	*ptgt = NULL;
8550 
8551 	NDBG22(("mptsas_setup_bus_reset_delay"));
8552 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
8553 	    MPTSAS_HASH_FIRST);
8554 	while (ptgt != NULL) {
8555 		mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
8556 		ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
8557 
8558 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
8559 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
8560 	}
8561 
8562 	mptsas_start_watch_reset_delay();
8563 }
8564 
8565 /*
8566  * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
8567  * mpt instance for active reset delays
8568  */
8569 static void
8570 mptsas_watch_reset_delay(void *arg)
8571 {
8572 #ifndef __lock_lint
8573 	_NOTE(ARGUNUSED(arg))
8574 #endif
8575 
8576 	mptsas_t	*mpt;
8577 	int		not_done = 0;
8578 
8579 	NDBG22(("mptsas_watch_reset_delay"));
8580 
8581 	mutex_enter(&mptsas_global_mutex);
8582 	mptsas_reset_watch = 0;
8583 	mutex_exit(&mptsas_global_mutex);
8584 	rw_enter(&mptsas_global_rwlock, RW_READER);
8585 	for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) {
8586 		if (mpt->m_tran == 0) {
8587 			continue;
8588 		}
8589 		mutex_enter(&mpt->m_mutex);
8590 		not_done += mptsas_watch_reset_delay_subr(mpt);
8591 		mutex_exit(&mpt->m_mutex);
8592 	}
8593 	rw_exit(&mptsas_global_rwlock);
8594 
8595 	if (not_done) {
8596 		mptsas_start_watch_reset_delay();
8597 	}
8598 }
8599 
8600 static int
8601 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
8602 {
8603 	int		done = 0;
8604 	int		restart = 0;
8605 	mptsas_target_t	*ptgt = NULL;
8606 
8607 	NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
8608 
8609 	ASSERT(mutex_owned(&mpt->m_mutex));
8610 
8611 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
8612 	    MPTSAS_HASH_FIRST);
8613 	while (ptgt != NULL) {
8614 		if (ptgt->m_reset_delay != 0) {
8615 			ptgt->m_reset_delay -=
8616 			    MPTSAS_WATCH_RESET_DELAY_TICK;
8617 			if (ptgt->m_reset_delay <= 0) {
8618 				ptgt->m_reset_delay = 0;
8619 				mptsas_set_throttle(mpt, ptgt,
8620 				    MAX_THROTTLE);
8621 				restart++;
8622 			} else {
8623 				done = -1;
8624 			}
8625 		}
8626 
8627 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
8628 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
8629 	}
8630 
8631 	if (restart > 0) {
8632 		mptsas_restart_hba(mpt);
8633 	}
8634 	return (done);
8635 }
8636 
8637 #ifdef MPTSAS_TEST
8638 static void
8639 mptsas_test_reset(mptsas_t *mpt, int target)
8640 {
8641 	mptsas_target_t    *ptgt = NULL;
8642 
8643 	if (mptsas_rtest == target) {
8644 		if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
8645 			mptsas_rtest = -1;
8646 		}
8647 		if (mptsas_rtest == -1) {
8648 			NDBG22(("mptsas_test_reset success"));
8649 		}
8650 	}
8651 }
8652 #endif
8653 
8654 /*
8655  * abort handling:
8656  *
8657  * Notes:
8658  *	- if pkt is not NULL, abort just that command
8659  *	- if pkt is NULL, abort all outstanding commands for target
8660  */
8661 static int
8662 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
8663 {
8664 	mptsas_t		*mpt = ADDR2MPT(ap);
8665 	int			rval;
8666 	mptsas_tgt_private_t	*tgt_private;
8667 	int			target, lun;
8668 
8669 	tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
8670 	    tran_tgt_private;
8671 	ASSERT(tgt_private != NULL);
8672 	target = tgt_private->t_private->m_devhdl;
8673 	lun = tgt_private->t_lun;
8674 
8675 	NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun));
8676 
8677 	mutex_enter(&mpt->m_mutex);
8678 	rval = mptsas_do_scsi_abort(mpt, target, lun, pkt);
8679 	mutex_exit(&mpt->m_mutex);
8680 	return (rval);
8681 }
8682 
8683 static int
8684 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt)
8685 {
8686 	mptsas_cmd_t	*sp = NULL;
8687 	mptsas_slots_t	*slots = mpt->m_active;
8688 	int		rval = FALSE;
8689 
8690 	ASSERT(mutex_owned(&mpt->m_mutex));
8691 
8692 	/*
8693 	 * Abort the command pkt on the target/lun in ap.  If pkt is
8694 	 * NULL, abort all outstanding commands on that target/lun.
8695 	 * If you can abort them, return 1, else return 0.
8696 	 * Each packet that's aborted should be sent back to the target
8697 	 * driver through the callback routine, with pkt_reason set to
8698 	 * CMD_ABORTED.
8699 	 *
8700 	 * abort cmd pkt on HBA hardware; clean out of outstanding
8701 	 * command lists, etc.
8702 	 */
8703 	if (pkt != NULL) {
8704 		/* abort the specified packet */
8705 		sp = PKT2CMD(pkt);
8706 
8707 		if (sp->cmd_queued) {
8708 			NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
8709 			    (void *)sp));
8710 			mptsas_waitq_delete(mpt, sp);
8711 			mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
8712 			    STAT_ABORTED);
8713 			mptsas_doneq_add(mpt, sp);
8714 			rval = TRUE;
8715 			goto done;
8716 		}
8717 
8718 		/*
8719 		 * Have mpt firmware abort this command
8720 		 */
8721 
8722 		if (slots->m_slot[sp->cmd_slot] != NULL) {
8723 			rval = mptsas_ioc_task_management(mpt,
8724 			    MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
8725 			    lun, NULL, 0, 0);
8726 
8727 			/*
8728 			 * The transport layer expects only TRUE and FALSE.
8729 			 * Therefore, if mptsas_ioc_task_management returns
8730 			 * FAILED we will return FALSE.
8731 			 */
8732 			if (rval == FAILED)
8733 				rval = FALSE;
8734 			goto done;
8735 		}
8736 	}
8737 
8738 	/*
8739 	 * If pkt is NULL then abort task set
8740 	 */
8741 	rval = mptsas_ioc_task_management(mpt,
8742 	    MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
8743 
8744 	/*
8745 	 * The transport layer expects only TRUE and FALSE.
8746 	 * Therefore, if mptsas_ioc_task_management returns
8747 	 * FAILED we will return FALSE.
8748 	 */
8749 	if (rval == FAILED)
8750 		rval = FALSE;
8751 
8752 #ifdef MPTSAS_TEST
8753 	if (rval && mptsas_test_stop) {
8754 		debug_enter("mptsas_do_scsi_abort");
8755 	}
8756 #endif
8757 
8758 done:
8759 	mptsas_doneq_empty(mpt);
8760 	return (rval);
8761 }
8762 
8763 /*
8764  * capability handling:
8765  * (*tran_getcap).  Get the capability named, and return its value.
8766  */
8767 static int
8768 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
8769 {
8770 	mptsas_t	*mpt = ADDR2MPT(ap);
8771 	int		ckey;
8772 	int		rval = FALSE;
8773 
8774 	NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
8775 	    ap->a_target, cap, tgtonly));
8776 
8777 	mutex_enter(&mpt->m_mutex);
8778 
8779 	if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
8780 		mutex_exit(&mpt->m_mutex);
8781 		return (UNDEFINED);
8782 	}
8783 
8784 	switch (ckey) {
8785 	case SCSI_CAP_DMA_MAX:
8786 		rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer;
8787 		break;
8788 	case SCSI_CAP_ARQ:
8789 		rval = TRUE;
8790 		break;
8791 	case SCSI_CAP_MSG_OUT:
8792 	case SCSI_CAP_PARITY:
8793 	case SCSI_CAP_UNTAGGED_QING:
8794 		rval = TRUE;
8795 		break;
8796 	case SCSI_CAP_TAGGED_QING:
8797 		rval = TRUE;
8798 		break;
8799 	case SCSI_CAP_RESET_NOTIFICATION:
8800 		rval = TRUE;
8801 		break;
8802 	case SCSI_CAP_LINKED_CMDS:
8803 		rval = FALSE;
8804 		break;
8805 	case SCSI_CAP_QFULL_RETRIES:
8806 		rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran->
8807 		    tran_tgt_private))->t_private->m_qfull_retries;
8808 		break;
8809 	case SCSI_CAP_QFULL_RETRY_INTERVAL:
8810 		rval = drv_hztousec(((mptsas_tgt_private_t *)
8811 		    (ap->a_hba_tran->tran_tgt_private))->
8812 		    t_private->m_qfull_retry_interval) / 1000;
8813 		break;
8814 	case SCSI_CAP_CDB_LEN:
8815 		rval = CDB_GROUP4;
8816 		break;
8817 	case SCSI_CAP_INTERCONNECT_TYPE:
8818 		rval = INTERCONNECT_SAS;
8819 		break;
8820 	case SCSI_CAP_TRAN_LAYER_RETRIES:
8821 		if (mpt->m_ioc_capabilities &
8822 		    MPI2_IOCFACTS_CAPABILITY_TLR)
8823 			rval = TRUE;
8824 		else
8825 			rval = FALSE;
8826 		break;
8827 	default:
8828 		rval = UNDEFINED;
8829 		break;
8830 	}
8831 
8832 	NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
8833 
8834 	mutex_exit(&mpt->m_mutex);
8835 	return (rval);
8836 }
8837 
8838 /*
8839  * (*tran_setcap).  Set the capability named to the value given.
8840  */
8841 static int
8842 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
8843 {
8844 	mptsas_t	*mpt = ADDR2MPT(ap);
8845 	int		ckey;
8846 	int		rval = FALSE;
8847 
8848 	NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
8849 	    ap->a_target, cap, value, tgtonly));
8850 
8851 	if (!tgtonly) {
8852 		return (rval);
8853 	}
8854 
8855 	mutex_enter(&mpt->m_mutex);
8856 
8857 	if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
8858 		mutex_exit(&mpt->m_mutex);
8859 		return (UNDEFINED);
8860 	}
8861 
8862 	switch (ckey) {
8863 	case SCSI_CAP_DMA_MAX:
8864 	case SCSI_CAP_MSG_OUT:
8865 	case SCSI_CAP_PARITY:
8866 	case SCSI_CAP_INITIATOR_ID:
8867 	case SCSI_CAP_LINKED_CMDS:
8868 	case SCSI_CAP_UNTAGGED_QING:
8869 	case SCSI_CAP_RESET_NOTIFICATION:
8870 		/*
8871 		 * None of these are settable via
8872 		 * the capability interface.
8873 		 */
8874 		break;
8875 	case SCSI_CAP_ARQ:
8876 		/*
8877 		 * We cannot turn off arq so return false if asked to
8878 		 */
8879 		if (value) {
8880 			rval = TRUE;
8881 		} else {
8882 			rval = FALSE;
8883 		}
8884 		break;
8885 	case SCSI_CAP_TAGGED_QING:
8886 		mptsas_set_throttle(mpt, ((mptsas_tgt_private_t *)
8887 		    (ap->a_hba_tran->tran_tgt_private))->t_private,
8888 		    MAX_THROTTLE);
8889 		rval = TRUE;
8890 		break;
8891 	case SCSI_CAP_QFULL_RETRIES:
8892 		((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
8893 		    t_private->m_qfull_retries = (uchar_t)value;
8894 		rval = TRUE;
8895 		break;
8896 	case SCSI_CAP_QFULL_RETRY_INTERVAL:
8897 		((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
8898 		    t_private->m_qfull_retry_interval =
8899 		    drv_usectohz(value * 1000);
8900 		rval = TRUE;
8901 		break;
8902 	default:
8903 		rval = UNDEFINED;
8904 		break;
8905 	}
8906 	mutex_exit(&mpt->m_mutex);
8907 	return (rval);
8908 }
8909 
8910 /*
8911  * Utility routine for mptsas_ifsetcap/ifgetcap
8912  */
8913 /*ARGSUSED*/
8914 static int
8915 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
8916 {
8917 	NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
8918 
8919 	if (!cap)
8920 		return (FALSE);
8921 
8922 	*cidxp = scsi_hba_lookup_capstr(cap);
8923 	return (TRUE);
8924 }
8925 
8926 static int
8927 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
8928 {
8929 	mptsas_slots_t	*old_active = mpt->m_active;
8930 	mptsas_slots_t	*new_active;
8931 	size_t		size;
8932 	int		rval = -1;
8933 
8934 	if (mpt->m_ncmds) {
8935 		NDBG9(("cannot change size of active slots array"));
8936 		return (rval);
8937 	}
8938 
8939 	size = MPTSAS_SLOTS_SIZE(mpt);
8940 	new_active = kmem_zalloc(size, flag);
8941 	if (new_active == NULL) {
8942 		NDBG1(("new active alloc failed"));
8943 	} else {
8944 		/*
8945 		 * Since SMID 0 is reserved and the TM slot is reserved, the
8946 		 * number of slots that can be used at any one time is
8947 		 * m_max_requests - 2.
8948 		 */
8949 		mpt->m_active = new_active;
8950 		mpt->m_active->m_n_slots = (mpt->m_max_requests - 2);
8951 		mpt->m_active->m_size = size;
8952 		mpt->m_active->m_tags = 1;
8953 		if (old_active) {
8954 			kmem_free(old_active, old_active->m_size);
8955 		}
8956 		rval = 0;
8957 	}
8958 
8959 	return (rval);
8960 }
8961 
8962 /*
8963  * Error logging, printing, and debug print routines.
8964  */
8965 static char *mptsas_label = "mpt_sas";
8966 
8967 /*PRINTFLIKE3*/
8968 void
8969 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
8970 {
8971 	dev_info_t	*dev;
8972 	va_list		ap;
8973 
8974 	if (mpt) {
8975 		dev = mpt->m_dip;
8976 	} else {
8977 		dev = 0;
8978 	}
8979 
8980 	mutex_enter(&mptsas_log_mutex);
8981 
8982 	va_start(ap, fmt);
8983 	(void) vsprintf(mptsas_log_buf, fmt, ap);
8984 	va_end(ap);
8985 
8986 	if (level == CE_CONT) {
8987 		scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf);
8988 	} else {
8989 		scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf);
8990 	}
8991 
8992 	mutex_exit(&mptsas_log_mutex);
8993 }
8994 
8995 #ifdef MPTSAS_DEBUG
8996 /*PRINTFLIKE1*/
8997 void
8998 mptsas_printf(char *fmt, ...)
8999 {
9000 	dev_info_t	*dev = 0;
9001 	va_list		ap;
9002 
9003 	mutex_enter(&mptsas_log_mutex);
9004 
9005 	va_start(ap, fmt);
9006 	(void) vsprintf(mptsas_log_buf, fmt, ap);
9007 	va_end(ap);
9008 
9009 #ifdef PROM_PRINTF
9010 	prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf);
9011 #else
9012 	scsi_log(dev, mptsas_label, SCSI_DEBUG, "%s\n", mptsas_log_buf);
9013 #endif
9014 	mutex_exit(&mptsas_log_mutex);
9015 }
9016 #endif
9017 
9018 /*
9019  * timeout handling
9020  */
9021 static void
9022 mptsas_watch(void *arg)
9023 {
9024 #ifndef __lock_lint
9025 	_NOTE(ARGUNUSED(arg))
9026 #endif
9027 
9028 	mptsas_t	*mpt;
9029 	uint32_t	doorbell;
9030 
9031 	NDBG30(("mptsas_watch"));
9032 
9033 	rw_enter(&mptsas_global_rwlock, RW_READER);
9034 	for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
9035 
9036 		mutex_enter(&mpt->m_mutex);
9037 
9038 		/* Skip device if not powered on */
9039 		if (mpt->m_options & MPTSAS_OPT_PM) {
9040 			if (mpt->m_power_level == PM_LEVEL_D0) {
9041 				(void) pm_busy_component(mpt->m_dip, 0);
9042 				mpt->m_busy = 1;
9043 			} else {
9044 				mutex_exit(&mpt->m_mutex);
9045 				continue;
9046 			}
9047 		}
9048 
9049 		/*
9050 		 * Check if controller is in a FAULT state. If so, reset it.
9051 		 */
9052 		doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell);
9053 		if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
9054 			doorbell &= MPI2_DOORBELL_DATA_MASK;
9055 			mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
9056 			    "code: %04x", doorbell);
9057 			if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
9058 				mptsas_log(mpt, CE_WARN, "Reset failed"
9059 				    "after fault was detected");
9060 			}
9061 		}
9062 
9063 		/*
9064 		 * For now, always call mptsas_watchsubr.
9065 		 */
9066 		mptsas_watchsubr(mpt);
9067 
9068 		if (mpt->m_options & MPTSAS_OPT_PM) {
9069 			mpt->m_busy = 0;
9070 			(void) pm_idle_component(mpt->m_dip, 0);
9071 		}
9072 
9073 		mutex_exit(&mpt->m_mutex);
9074 	}
9075 	rw_exit(&mptsas_global_rwlock);
9076 
9077 	mutex_enter(&mptsas_global_mutex);
9078 	if (mptsas_timeouts_enabled)
9079 		mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
9080 	mutex_exit(&mptsas_global_mutex);
9081 }
9082 
9083 static void
9084 mptsas_watchsubr(mptsas_t *mpt)
9085 {
9086 	int		i;
9087 	mptsas_cmd_t	*cmd;
9088 	mptsas_target_t	*ptgt = NULL;
9089 
9090 	NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
9091 
9092 #ifdef MPTSAS_TEST
9093 	if (mptsas_enable_untagged) {
9094 		mptsas_test_untagged++;
9095 	}
9096 #endif
9097 
9098 	/*
9099 	 * Check for commands stuck in active slot
9100 	 * Account for TM requests, which use the last SMID.
9101 	 */
9102 	for (i = 0; i <= mpt->m_active->m_n_slots; i++) {
9103 		if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
9104 			if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
9105 				cmd->cmd_active_timeout -=
9106 				    mptsas_scsi_watchdog_tick;
9107 				if (cmd->cmd_active_timeout <= 0) {
9108 					/*
9109 					 * There seems to be a command stuck
9110 					 * in the active slot.  Drain throttle.
9111 					 */
9112 					mptsas_set_throttle(mpt,
9113 					    cmd->cmd_tgt_addr,
9114 					    DRAIN_THROTTLE);
9115 				}
9116 			}
9117 			if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9118 			    (cmd->cmd_flags & CFLAG_CONFIG) ||
9119 			    (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9120 				cmd->cmd_active_timeout -=
9121 				    mptsas_scsi_watchdog_tick;
9122 				if (cmd->cmd_active_timeout <= 0) {
9123 					/*
9124 					 * passthrough command timeout
9125 					 */
9126 					cmd->cmd_flags |= (CFLAG_FINISHED |
9127 					    CFLAG_TIMEOUT);
9128 					cv_broadcast(&mpt->m_passthru_cv);
9129 					cv_broadcast(&mpt->m_config_cv);
9130 					cv_broadcast(&mpt->m_fw_diag_cv);
9131 				}
9132 			}
9133 		}
9134 	}
9135 
9136 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9137 	    MPTSAS_HASH_FIRST);
9138 	while (ptgt != NULL) {
9139 		/*
9140 		 * If we were draining due to a qfull condition,
9141 		 * go back to full throttle.
9142 		 */
9143 		if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
9144 		    (ptgt->m_t_throttle > HOLD_THROTTLE) &&
9145 		    (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
9146 			mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9147 			mptsas_restart_hba(mpt);
9148 		}
9149 
9150 		if ((ptgt->m_t_ncmds > 0) &&
9151 		    (ptgt->m_timebase)) {
9152 
9153 			if (ptgt->m_timebase <=
9154 			    mptsas_scsi_watchdog_tick) {
9155 				ptgt->m_timebase +=
9156 				    mptsas_scsi_watchdog_tick;
9157 				ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9158 				    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9159 				continue;
9160 			}
9161 
9162 			ptgt->m_timeout -= mptsas_scsi_watchdog_tick;
9163 
9164 			if (ptgt->m_timeout < 0) {
9165 				mptsas_cmd_timeout(mpt, ptgt->m_devhdl);
9166 				ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9167 				    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9168 				continue;
9169 			}
9170 
9171 			if ((ptgt->m_timeout) <=
9172 			    mptsas_scsi_watchdog_tick) {
9173 				NDBG23(("pending timeout"));
9174 				mptsas_set_throttle(mpt, ptgt,
9175 				    DRAIN_THROTTLE);
9176 			}
9177 		}
9178 
9179 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9180 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9181 	}
9182 }
9183 
9184 /*
9185  * timeout recovery
9186  */
9187 static void
9188 mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl)
9189 {
9190 
9191 	NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
9192 	mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
9193 	    "Target %d", devhdl);
9194 
9195 	/*
9196 	 * If the current target is not the target passed in,
9197 	 * try to reset that target.
9198 	 */
9199 	NDBG29(("mptsas_cmd_timeout: device reset"));
9200 	if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
9201 		mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
9202 		    "recovery failed!", devhdl);
9203 	}
9204 }
9205 
9206 /*
9207  * Device / Hotplug control
9208  */
9209 static int
9210 mptsas_scsi_quiesce(dev_info_t *dip)
9211 {
9212 	mptsas_t	*mpt;
9213 	scsi_hba_tran_t	*tran;
9214 
9215 	tran = ddi_get_driver_private(dip);
9216 	if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9217 		return (-1);
9218 
9219 	return (mptsas_quiesce_bus(mpt));
9220 }
9221 
9222 static int
9223 mptsas_scsi_unquiesce(dev_info_t *dip)
9224 {
9225 	mptsas_t		*mpt;
9226 	scsi_hba_tran_t	*tran;
9227 
9228 	tran = ddi_get_driver_private(dip);
9229 	if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9230 		return (-1);
9231 
9232 	return (mptsas_unquiesce_bus(mpt));
9233 }
9234 
9235 static int
9236 mptsas_quiesce_bus(mptsas_t *mpt)
9237 {
9238 	mptsas_target_t	*ptgt = NULL;
9239 
9240 	NDBG28(("mptsas_quiesce_bus"));
9241 	mutex_enter(&mpt->m_mutex);
9242 
9243 	/* Set all the throttles to zero */
9244 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9245 	    MPTSAS_HASH_FIRST);
9246 	while (ptgt != NULL) {
9247 		mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9248 
9249 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9250 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9251 	}
9252 
9253 	/* If there are any outstanding commands in the queue */
9254 	if (mpt->m_ncmds) {
9255 		mpt->m_softstate |= MPTSAS_SS_DRAINING;
9256 		mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9257 		    mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
9258 		if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
9259 			/*
9260 			 * Quiesce has been interrupted
9261 			 */
9262 			mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9263 			ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9264 			    &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
9265 			while (ptgt != NULL) {
9266 				mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9267 
9268 				ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9269 				    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9270 			}
9271 			mptsas_restart_hba(mpt);
9272 			if (mpt->m_quiesce_timeid != 0) {
9273 				timeout_id_t tid = mpt->m_quiesce_timeid;
9274 				mpt->m_quiesce_timeid = 0;
9275 				mutex_exit(&mpt->m_mutex);
9276 				(void) untimeout(tid);
9277 				return (-1);
9278 			}
9279 			mutex_exit(&mpt->m_mutex);
9280 			return (-1);
9281 		} else {
9282 			/* Bus has been quiesced */
9283 			ASSERT(mpt->m_quiesce_timeid == 0);
9284 			mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9285 			mpt->m_softstate |= MPTSAS_SS_QUIESCED;
9286 			mutex_exit(&mpt->m_mutex);
9287 			return (0);
9288 		}
9289 	}
9290 	/* Bus was not busy - QUIESCED */
9291 	mutex_exit(&mpt->m_mutex);
9292 
9293 	return (0);
9294 }
9295 
9296 static int
9297 mptsas_unquiesce_bus(mptsas_t *mpt)
9298 {
9299 	mptsas_target_t	*ptgt = NULL;
9300 
9301 	NDBG28(("mptsas_unquiesce_bus"));
9302 	mutex_enter(&mpt->m_mutex);
9303 	mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
9304 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9305 	    MPTSAS_HASH_FIRST);
9306 	while (ptgt != NULL) {
9307 		mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9308 
9309 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9310 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9311 	}
9312 	mptsas_restart_hba(mpt);
9313 	mutex_exit(&mpt->m_mutex);
9314 	return (0);
9315 }
9316 
9317 static void
9318 mptsas_ncmds_checkdrain(void *arg)
9319 {
9320 	mptsas_t	*mpt = arg;
9321 	mptsas_target_t	*ptgt = NULL;
9322 
9323 	mutex_enter(&mpt->m_mutex);
9324 	if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
9325 		mpt->m_quiesce_timeid = 0;
9326 		if (mpt->m_ncmds == 0) {
9327 			/* Command queue has been drained */
9328 			cv_signal(&mpt->m_cv);
9329 		} else {
9330 			/*
9331 			 * The throttle may have been reset because
9332 			 * of a SCSI bus reset
9333 			 */
9334 			ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9335 			    &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
9336 			while (ptgt != NULL) {
9337 				mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9338 
9339 				ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9340 				    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9341 			}
9342 
9343 			mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9344 			    mpt, (MPTSAS_QUIESCE_TIMEOUT *
9345 			    drv_usectohz(1000000)));
9346 		}
9347 	}
9348 	mutex_exit(&mpt->m_mutex);
9349 }
9350 
9351 /*ARGSUSED*/
9352 static void
9353 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
9354 {
9355 	int	i;
9356 	uint8_t	*cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
9357 	char	buf[128];
9358 
9359 	buf[0] = '\0';
9360 	NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
9361 	    Tgt(cmd), Lun(cmd)));
9362 	(void) sprintf(&buf[0], "\tcdb=[");
9363 	for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
9364 		(void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
9365 	}
9366 	(void) sprintf(&buf[strlen(buf)], " ]");
9367 	NDBG25(("?%s\n", buf));
9368 	NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
9369 	    cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics,
9370 	    cmd->cmd_pkt->pkt_state));
9371 	NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", *(cmd->cmd_pkt->pkt_scbp),
9372 	    cmd->cmd_flags));
9373 }
9374 
9375 static void
9376 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd)
9377 {
9378 	caddr_t			memp;
9379 	pMPI2RequestHeader_t	request_hdrp;
9380 	struct scsi_pkt		*pkt = cmd->cmd_pkt;
9381 	mptsas_pt_request_t	*pt = pkt->pkt_ha_private;
9382 	uint32_t		request_size, data_size, dataout_size;
9383 	uint32_t		direction;
9384 	ddi_dma_cookie_t	data_cookie;
9385 	ddi_dma_cookie_t	dataout_cookie;
9386 	uint32_t		request_desc_low, request_desc_high = 0;
9387 	uint32_t		i, sense_bufp;
9388 	uint8_t			desc_type;
9389 	uint8_t			*request, function;
9390 	ddi_dma_handle_t	dma_hdl = mpt->m_dma_req_frame_hdl;
9391 	ddi_acc_handle_t	acc_hdl = mpt->m_acc_req_frame_hdl;
9392 
9393 	desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
9394 
9395 	request = pt->request;
9396 	direction = pt->direction;
9397 	request_size = pt->request_size;
9398 	data_size = pt->data_size;
9399 	dataout_size = pt->dataout_size;
9400 	data_cookie = pt->data_cookie;
9401 	dataout_cookie = pt->dataout_cookie;
9402 
9403 	/*
9404 	 * Store the passthrough message in memory location
9405 	 * corresponding to our slot number
9406 	 */
9407 	memp = mpt->m_req_frame + (mpt->m_req_frame_size * cmd->cmd_slot);
9408 	request_hdrp = (pMPI2RequestHeader_t)memp;
9409 	bzero(memp, mpt->m_req_frame_size);
9410 
9411 	for (i = 0; i < request_size; i++) {
9412 		bcopy(request + i, memp + i, 1);
9413 	}
9414 
9415 	if (data_size || dataout_size) {
9416 		pMpi2SGESimple64_t	sgep;
9417 		uint32_t		sge_flags;
9418 
9419 		sgep = (pMpi2SGESimple64_t)((uint8_t *)request_hdrp +
9420 		    request_size);
9421 		if (dataout_size) {
9422 
9423 			sge_flags = dataout_size |
9424 			    ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
9425 			    MPI2_SGE_FLAGS_END_OF_BUFFER |
9426 			    MPI2_SGE_FLAGS_HOST_TO_IOC |
9427 			    MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
9428 			    MPI2_SGE_FLAGS_SHIFT);
9429 			ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
9430 			ddi_put32(acc_hdl, &sgep->Address.Low,
9431 			    (uint32_t)(dataout_cookie.dmac_laddress &
9432 			    0xffffffffull));
9433 			ddi_put32(acc_hdl, &sgep->Address.High,
9434 			    (uint32_t)(dataout_cookie.dmac_laddress
9435 			    >> 32));
9436 			sgep++;
9437 		}
9438 		sge_flags = data_size;
9439 		sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
9440 		    MPI2_SGE_FLAGS_LAST_ELEMENT |
9441 		    MPI2_SGE_FLAGS_END_OF_BUFFER |
9442 		    MPI2_SGE_FLAGS_END_OF_LIST |
9443 		    MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
9444 		    MPI2_SGE_FLAGS_SHIFT);
9445 		if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
9446 			sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
9447 			    MPI2_SGE_FLAGS_SHIFT);
9448 		} else {
9449 			sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
9450 			    MPI2_SGE_FLAGS_SHIFT);
9451 		}
9452 		ddi_put32(acc_hdl, &sgep->FlagsLength,
9453 		    sge_flags);
9454 		ddi_put32(acc_hdl, &sgep->Address.Low,
9455 		    (uint32_t)(data_cookie.dmac_laddress &
9456 		    0xffffffffull));
9457 		ddi_put32(acc_hdl, &sgep->Address.High,
9458 		    (uint32_t)(data_cookie.dmac_laddress >> 32));
9459 	}
9460 
9461 	function = request_hdrp->Function;
9462 	if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
9463 	    (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
9464 		pMpi2SCSIIORequest_t	scsi_io_req;
9465 
9466 		scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp;
9467 		/*
9468 		 * Put SGE for data and data_out buffer at the end of
9469 		 * scsi_io_request message header.(64 bytes in total)
9470 		 * Following above SGEs, the residual space will be
9471 		 * used by sense data.
9472 		 */
9473 		ddi_put8(acc_hdl,
9474 		    &scsi_io_req->SenseBufferLength,
9475 		    (uint8_t)(request_size - 64));
9476 
9477 		sense_bufp = mpt->m_req_frame_dma_addr +
9478 		    (mpt->m_req_frame_size * cmd->cmd_slot);
9479 		sense_bufp += 64;
9480 		ddi_put32(acc_hdl,
9481 		    &scsi_io_req->SenseBufferLowAddress, sense_bufp);
9482 
9483 		/*
9484 		 * Set SGLOffset0 value
9485 		 */
9486 		ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0,
9487 		    offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4);
9488 
9489 		/*
9490 		 * Setup descriptor info.  RAID passthrough must use the
9491 		 * default request descriptor which is already set, so if this
9492 		 * is a SCSI IO request, change the descriptor to SCSI IO.
9493 		 */
9494 		if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
9495 			desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
9496 		}
9497 		request_desc_high = (ddi_get16(acc_hdl,
9498 		    &scsi_io_req->DevHandle) << 16);
9499 	}
9500 
9501 	/*
9502 	 * We must wait till the message has been completed before
9503 	 * beginning the next message so we wait for this one to
9504 	 * finish.
9505 	 */
9506 	(void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
9507 	request_desc_low = (cmd->cmd_slot << 16) + desc_type;
9508 	cmd->cmd_rfm = NULL;
9509 	MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
9510 	if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
9511 	    (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
9512 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
9513 	}
9514 }
9515 
9516 
9517 
9518 static int
9519 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
9520     uint8_t *data, uint32_t request_size, uint32_t reply_size,
9521     uint32_t data_size, uint32_t direction, uint8_t *dataout,
9522     uint32_t dataout_size, short timeout, int mode)
9523 {
9524 	mptsas_pt_request_t		pt;
9525 	mptsas_dma_alloc_state_t	data_dma_state;
9526 	mptsas_dma_alloc_state_t	dataout_dma_state;
9527 	caddr_t				memp;
9528 	mptsas_cmd_t			*cmd = NULL;
9529 	struct scsi_pkt			*pkt;
9530 	uint32_t			reply_len = 0, sense_len = 0;
9531 	pMPI2RequestHeader_t		request_hdrp;
9532 	pMPI2RequestHeader_t		request_msg;
9533 	pMPI2DefaultReply_t		reply_msg;
9534 	Mpi2SCSIIOReply_t		rep_msg;
9535 	int				i, status = 0, pt_flags = 0, rv = 0;
9536 	int				rvalue;
9537 	uint8_t				function;
9538 
9539 	ASSERT(mutex_owned(&mpt->m_mutex));
9540 
9541 	reply_msg = (pMPI2DefaultReply_t)(&rep_msg);
9542 	bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY));
9543 	request_msg = kmem_zalloc(request_size, KM_SLEEP);
9544 
9545 	mutex_exit(&mpt->m_mutex);
9546 	/*
9547 	 * copy in the request buffer since it could be used by
9548 	 * another thread when the pt request into waitq
9549 	 */
9550 	if (ddi_copyin(request, request_msg, request_size, mode)) {
9551 		mutex_enter(&mpt->m_mutex);
9552 		status = EFAULT;
9553 		mptsas_log(mpt, CE_WARN, "failed to copy request data");
9554 		goto out;
9555 	}
9556 	mutex_enter(&mpt->m_mutex);
9557 
9558 	function = request_msg->Function;
9559 	if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
9560 		pMpi2SCSITaskManagementRequest_t	task;
9561 		task = (pMpi2SCSITaskManagementRequest_t)request_msg;
9562 		mptsas_setup_bus_reset_delay(mpt);
9563 		rv = mptsas_ioc_task_management(mpt, task->TaskType,
9564 		    task->DevHandle, (int)task->LUN[1], reply, reply_size,
9565 		    mode);
9566 
9567 		if (rv != TRUE) {
9568 			status = EIO;
9569 			mptsas_log(mpt, CE_WARN, "task management failed");
9570 		}
9571 		goto out;
9572 	}
9573 
9574 	if (data_size != 0) {
9575 		data_dma_state.size = data_size;
9576 		if (mptsas_dma_alloc(mpt, &data_dma_state) != DDI_SUCCESS) {
9577 			status = ENOMEM;
9578 			mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
9579 			    "resource");
9580 			goto out;
9581 		}
9582 		pt_flags |= MPTSAS_DATA_ALLOCATED;
9583 		if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
9584 			mutex_exit(&mpt->m_mutex);
9585 			for (i = 0; i < data_size; i++) {
9586 				if (ddi_copyin(data + i, (uint8_t *)
9587 				    data_dma_state.memp + i, 1, mode)) {
9588 					mutex_enter(&mpt->m_mutex);
9589 					status = EFAULT;
9590 					mptsas_log(mpt, CE_WARN, "failed to "
9591 					    "copy read data");
9592 					goto out;
9593 				}
9594 			}
9595 			mutex_enter(&mpt->m_mutex);
9596 		}
9597 	}
9598 
9599 	if (dataout_size != 0) {
9600 		dataout_dma_state.size = dataout_size;
9601 		if (mptsas_dma_alloc(mpt, &dataout_dma_state) != DDI_SUCCESS) {
9602 			status = ENOMEM;
9603 			mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
9604 			    "resource");
9605 			goto out;
9606 		}
9607 		pt_flags |= MPTSAS_DATAOUT_ALLOCATED;
9608 		mutex_exit(&mpt->m_mutex);
9609 		for (i = 0; i < dataout_size; i++) {
9610 			if (ddi_copyin(dataout + i, (uint8_t *)
9611 			    dataout_dma_state.memp + i, 1, mode)) {
9612 				mutex_enter(&mpt->m_mutex);
9613 				mptsas_log(mpt, CE_WARN, "failed to copy out"
9614 				    " data");
9615 				status = EFAULT;
9616 				goto out;
9617 			}
9618 		}
9619 		mutex_enter(&mpt->m_mutex);
9620 	}
9621 
9622 	if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
9623 		status = EAGAIN;
9624 		mptsas_log(mpt, CE_NOTE, "event ack command pool is full");
9625 		goto out;
9626 	}
9627 	pt_flags |= MPTSAS_REQUEST_POOL_CMD;
9628 
9629 	bzero((caddr_t)cmd, sizeof (*cmd));
9630 	bzero((caddr_t)pkt, scsi_pkt_size());
9631 	bzero((caddr_t)&pt, sizeof (pt));
9632 
9633 	cmd->ioc_cmd_slot = (uint32_t)(rvalue);
9634 
9635 	pt.request = (uint8_t *)request_msg;
9636 	pt.direction = direction;
9637 	pt.request_size = request_size;
9638 	pt.data_size = data_size;
9639 	pt.dataout_size = dataout_size;
9640 	pt.data_cookie = data_dma_state.cookie;
9641 	pt.dataout_cookie = dataout_dma_state.cookie;
9642 
9643 	/*
9644 	 * Form a blank cmd/pkt to store the acknowledgement message
9645 	 */
9646 	pkt->pkt_cdbp		= (opaque_t)&cmd->cmd_cdb[0];
9647 	pkt->pkt_scbp		= (opaque_t)&cmd->cmd_scb;
9648 	pkt->pkt_ha_private	= (opaque_t)&pt;
9649 	pkt->pkt_flags		= FLAG_HEAD;
9650 	pkt->pkt_time		= timeout;
9651 	cmd->cmd_pkt		= pkt;
9652 	cmd->cmd_flags		= CFLAG_CMDIOC | CFLAG_PASSTHRU;
9653 
9654 	/*
9655 	 * Save the command in a slot
9656 	 */
9657 	if (mptsas_save_cmd(mpt, cmd) == TRUE) {
9658 		/*
9659 		 * Once passthru command get slot, set cmd_flags
9660 		 * CFLAG_PREPARED.
9661 		 */
9662 		cmd->cmd_flags |= CFLAG_PREPARED;
9663 		mptsas_start_passthru(mpt, cmd);
9664 	} else {
9665 		mptsas_waitq_add(mpt, cmd);
9666 	}
9667 
9668 	while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
9669 		cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex);
9670 	}
9671 
9672 	if (cmd->cmd_flags & CFLAG_PREPARED) {
9673 		memp = mpt->m_req_frame + (mpt->m_req_frame_size *
9674 		    cmd->cmd_slot);
9675 		request_hdrp = (pMPI2RequestHeader_t)memp;
9676 	}
9677 
9678 	if (cmd->cmd_flags & CFLAG_TIMEOUT) {
9679 		status = ETIMEDOUT;
9680 		mptsas_log(mpt, CE_WARN, "passthrough command timeout");
9681 		pt_flags |= MPTSAS_CMD_TIMEOUT;
9682 		goto out;
9683 	}
9684 
9685 	if (cmd->cmd_rfm) {
9686 		/*
9687 		 * cmd_rfm is zero means the command reply is a CONTEXT
9688 		 * reply and no PCI Write to post the free reply SMFA
9689 		 * because no reply message frame is used.
9690 		 * cmd_rfm is non-zero means the reply is a ADDRESS
9691 		 * reply and reply message frame is used.
9692 		 */
9693 		pt_flags |= MPTSAS_ADDRESS_REPLY;
9694 		(void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
9695 		    DDI_DMA_SYNC_FORCPU);
9696 		reply_msg = (pMPI2DefaultReply_t)
9697 		    (mpt->m_reply_frame + (cmd->cmd_rfm -
9698 		    mpt->m_reply_frame_dma_addr));
9699 	}
9700 
9701 	mptsas_fma_check(mpt, cmd);
9702 	if (pkt->pkt_reason == CMD_TRAN_ERR) {
9703 		status = EAGAIN;
9704 		mptsas_log(mpt, CE_WARN, "passthru fma error");
9705 		goto out;
9706 	}
9707 	if (pkt->pkt_reason == CMD_RESET) {
9708 		status = EAGAIN;
9709 		mptsas_log(mpt, CE_WARN, "ioc reset abort passthru");
9710 		goto out;
9711 	}
9712 
9713 	if (pkt->pkt_reason == CMD_INCOMPLETE) {
9714 		status = EIO;
9715 		mptsas_log(mpt, CE_WARN, "passthrough command incomplete");
9716 		goto out;
9717 	}
9718 
9719 	mutex_exit(&mpt->m_mutex);
9720 	if (cmd->cmd_flags & CFLAG_PREPARED) {
9721 		function = request_hdrp->Function;
9722 		if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
9723 		    (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
9724 			reply_len = sizeof (MPI2_SCSI_IO_REPLY);
9725 			sense_len = reply_size - reply_len;
9726 		} else {
9727 			reply_len = reply_size;
9728 			sense_len = 0;
9729 		}
9730 
9731 		for (i = 0; i < reply_len; i++) {
9732 			if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1,
9733 			    mode)) {
9734 				mutex_enter(&mpt->m_mutex);
9735 				status = EFAULT;
9736 				mptsas_log(mpt, CE_WARN, "failed to copy out "
9737 				    "reply data");
9738 				goto out;
9739 			}
9740 		}
9741 		for (i = 0; i < sense_len; i++) {
9742 			if (ddi_copyout((uint8_t *)request_hdrp + 64 + i,
9743 			    reply + reply_len + i, 1, mode)) {
9744 				mutex_enter(&mpt->m_mutex);
9745 				status = EFAULT;
9746 				mptsas_log(mpt, CE_WARN, "failed to copy out "
9747 				    "sense data");
9748 				goto out;
9749 			}
9750 		}
9751 	}
9752 
9753 	if (data_size) {
9754 		if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
9755 			(void) ddi_dma_sync(data_dma_state.handle, 0, 0,
9756 			    DDI_DMA_SYNC_FORCPU);
9757 			for (i = 0; i < data_size; i++) {
9758 				if (ddi_copyout((uint8_t *)(
9759 				    data_dma_state.memp + i), data + i,  1,
9760 				    mode)) {
9761 					mutex_enter(&mpt->m_mutex);
9762 					status = EFAULT;
9763 					mptsas_log(mpt, CE_WARN, "failed to "
9764 					    "copy out the reply data");
9765 					goto out;
9766 				}
9767 			}
9768 		}
9769 	}
9770 	mutex_enter(&mpt->m_mutex);
9771 out:
9772 	/*
9773 	 * Put the reply frame back on the free queue, increment the free
9774 	 * index, and write the new index to the free index register.  But only
9775 	 * if this reply is an ADDRESS reply.
9776 	 */
9777 	if (pt_flags & MPTSAS_ADDRESS_REPLY) {
9778 		ddi_put32(mpt->m_acc_free_queue_hdl,
9779 		    &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
9780 		    cmd->cmd_rfm);
9781 		(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
9782 		    DDI_DMA_SYNC_FORDEV);
9783 		if (++mpt->m_free_index == mpt->m_free_queue_depth) {
9784 			mpt->m_free_index = 0;
9785 		}
9786 		ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
9787 		    mpt->m_free_index);
9788 	}
9789 	if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
9790 		mptsas_remove_cmd(mpt, cmd);
9791 		pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
9792 	}
9793 	if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
9794 		mptsas_return_to_pool(mpt, cmd);
9795 	if (pt_flags & MPTSAS_DATA_ALLOCATED) {
9796 		if (mptsas_check_dma_handle(data_dma_state.handle) !=
9797 		    DDI_SUCCESS) {
9798 			ddi_fm_service_impact(mpt->m_dip,
9799 			    DDI_SERVICE_UNAFFECTED);
9800 			status = EFAULT;
9801 		}
9802 		mptsas_dma_free(&data_dma_state);
9803 	}
9804 	if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
9805 		if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
9806 		    DDI_SUCCESS) {
9807 			ddi_fm_service_impact(mpt->m_dip,
9808 			    DDI_SERVICE_UNAFFECTED);
9809 			status = EFAULT;
9810 		}
9811 		mptsas_dma_free(&dataout_dma_state);
9812 	}
9813 	if (pt_flags & MPTSAS_CMD_TIMEOUT) {
9814 		if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
9815 			mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
9816 		}
9817 	}
9818 	if (request_msg)
9819 		kmem_free(request_msg, request_size);
9820 
9821 	return (status);
9822 }
9823 
9824 static int
9825 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
9826 {
9827 	/*
9828 	 * If timeout is 0, set timeout to default of 60 seconds.
9829 	 */
9830 	if (data->Timeout == 0) {
9831 		data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
9832 	}
9833 
9834 	if (((data->DataSize == 0) &&
9835 	    (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) ||
9836 	    ((data->DataSize != 0) &&
9837 	    ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) ||
9838 	    (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) ||
9839 	    ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) &&
9840 	    (data->DataOutSize != 0))))) {
9841 		if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) {
9842 			data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ;
9843 		} else {
9844 			data->DataOutSize = 0;
9845 		}
9846 		/*
9847 		 * Send passthru request messages
9848 		 */
9849 		return (mptsas_do_passthru(mpt,
9850 		    (uint8_t *)((uintptr_t)data->PtrRequest),
9851 		    (uint8_t *)((uintptr_t)data->PtrReply),
9852 		    (uint8_t *)((uintptr_t)data->PtrData),
9853 		    data->RequestSize, data->ReplySize,
9854 		    data->DataSize, data->DataDirection,
9855 		    (uint8_t *)((uintptr_t)data->PtrDataOut),
9856 		    data->DataOutSize, data->Timeout, mode));
9857 	} else {
9858 		return (EINVAL);
9859 	}
9860 }
9861 
9862 static uint8_t
9863 mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, uint32_t unique_id)
9864 {
9865 	uint8_t	index;
9866 
9867 	for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
9868 		if (mpt->m_fw_diag_buffer_list[index].unique_id == unique_id) {
9869 			return (index);
9870 		}
9871 	}
9872 
9873 	return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND);
9874 }
9875 
9876 static void
9877 mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd)
9878 {
9879 	pMpi2DiagBufferPostRequest_t	pDiag_post_msg;
9880 	pMpi2DiagReleaseRequest_t	pDiag_release_msg;
9881 	struct scsi_pkt			*pkt = cmd->cmd_pkt;
9882 	mptsas_diag_request_t		*diag = pkt->pkt_ha_private;
9883 	uint32_t			request_desc_low, i;
9884 
9885 	ASSERT(mutex_owned(&mpt->m_mutex));
9886 
9887 	/*
9888 	 * Form the diag message depending on the post or release function.
9889 	 */
9890 	if (diag->function == MPI2_FUNCTION_DIAG_BUFFER_POST) {
9891 		pDiag_post_msg = (pMpi2DiagBufferPostRequest_t)
9892 		    (mpt->m_req_frame + (mpt->m_req_frame_size *
9893 		    cmd->cmd_slot));
9894 		bzero(pDiag_post_msg, mpt->m_req_frame_size);
9895 		ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->Function,
9896 		    diag->function);
9897 		ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->BufferType,
9898 		    diag->pBuffer->buffer_type);
9899 		ddi_put8(mpt->m_acc_req_frame_hdl,
9900 		    &pDiag_post_msg->ExtendedType,
9901 		    diag->pBuffer->extended_type);
9902 		ddi_put32(mpt->m_acc_req_frame_hdl,
9903 		    &pDiag_post_msg->BufferLength,
9904 		    diag->pBuffer->buffer_data.size);
9905 		for (i = 0; i < (sizeof (pDiag_post_msg->ProductSpecific) / 4);
9906 		    i++) {
9907 			ddi_put32(mpt->m_acc_req_frame_hdl,
9908 			    &pDiag_post_msg->ProductSpecific[i],
9909 			    diag->pBuffer->product_specific[i]);
9910 		}
9911 		ddi_put32(mpt->m_acc_req_frame_hdl,
9912 		    &pDiag_post_msg->BufferAddress.Low,
9913 		    (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
9914 		    & 0xffffffffull));
9915 		ddi_put32(mpt->m_acc_req_frame_hdl,
9916 		    &pDiag_post_msg->BufferAddress.High,
9917 		    (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
9918 		    >> 32));
9919 	} else {
9920 		pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
9921 		    (mpt->m_req_frame + (mpt->m_req_frame_size *
9922 		    cmd->cmd_slot));
9923 		bzero(pDiag_release_msg, mpt->m_req_frame_size);
9924 		ddi_put8(mpt->m_acc_req_frame_hdl,
9925 		    &pDiag_release_msg->Function, diag->function);
9926 		ddi_put8(mpt->m_acc_req_frame_hdl,
9927 		    &pDiag_release_msg->BufferType,
9928 		    diag->pBuffer->buffer_type);
9929 	}
9930 
9931 	/*
9932 	 * Send the message
9933 	 */
9934 	(void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
9935 	    DDI_DMA_SYNC_FORDEV);
9936 	request_desc_low = (cmd->cmd_slot << 16) +
9937 	    MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
9938 	cmd->cmd_rfm = NULL;
9939 	MPTSAS_START_CMD(mpt, request_desc_low, 0);
9940 	if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
9941 	    DDI_SUCCESS) ||
9942 	    (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
9943 	    DDI_SUCCESS)) {
9944 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
9945 	}
9946 }
9947 
9948 static int
9949 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
9950     mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
9951 {
9952 	mptsas_diag_request_t		diag;
9953 	int				status, slot_num, post_flags = 0;
9954 	mptsas_cmd_t			*cmd = NULL;
9955 	struct scsi_pkt			*pkt;
9956 	pMpi2DiagBufferPostReply_t	reply;
9957 	uint16_t			iocstatus;
9958 	uint32_t			iocloginfo, transfer_length;
9959 
9960 	/*
9961 	 * If buffer is not enabled, just leave.
9962 	 */
9963 	*return_code = MPTSAS_FW_DIAG_ERROR_POST_FAILED;
9964 	if (!pBuffer->enabled) {
9965 		status = DDI_FAILURE;
9966 		goto out;
9967 	}
9968 
9969 	/*
9970 	 * Clear some flags initially.
9971 	 */
9972 	pBuffer->force_release = FALSE;
9973 	pBuffer->valid_data = FALSE;
9974 	pBuffer->owned_by_firmware = FALSE;
9975 
9976 	/*
9977 	 * Get a cmd buffer from the cmd buffer pool
9978 	 */
9979 	if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
9980 		status = DDI_FAILURE;
9981 		mptsas_log(mpt, CE_NOTE, "command pool is full: Post FW Diag");
9982 		goto out;
9983 	}
9984 	post_flags |= MPTSAS_REQUEST_POOL_CMD;
9985 
9986 	bzero((caddr_t)cmd, sizeof (*cmd));
9987 	bzero((caddr_t)pkt, scsi_pkt_size());
9988 
9989 	cmd->ioc_cmd_slot = (uint32_t)(slot_num);
9990 
9991 	diag.pBuffer = pBuffer;
9992 	diag.function = MPI2_FUNCTION_DIAG_BUFFER_POST;
9993 
9994 	/*
9995 	 * Form a blank cmd/pkt to store the acknowledgement message
9996 	 */
9997 	pkt->pkt_ha_private	= (opaque_t)&diag;
9998 	pkt->pkt_flags		= FLAG_HEAD;
9999 	pkt->pkt_time		= 60;
10000 	cmd->cmd_pkt		= pkt;
10001 	cmd->cmd_flags		= CFLAG_CMDIOC | CFLAG_FW_DIAG;
10002 
10003 	/*
10004 	 * Save the command in a slot
10005 	 */
10006 	if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10007 		/*
10008 		 * Once passthru command get slot, set cmd_flags
10009 		 * CFLAG_PREPARED.
10010 		 */
10011 		cmd->cmd_flags |= CFLAG_PREPARED;
10012 		mptsas_start_diag(mpt, cmd);
10013 	} else {
10014 		mptsas_waitq_add(mpt, cmd);
10015 	}
10016 
10017 	while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10018 		cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
10019 	}
10020 
10021 	if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10022 		status = DDI_FAILURE;
10023 		mptsas_log(mpt, CE_WARN, "Post FW Diag command timeout");
10024 		goto out;
10025 	}
10026 
10027 	/*
10028 	 * cmd_rfm points to the reply message if a reply was given.  Check the
10029 	 * IOCStatus to make sure everything went OK with the FW diag request
10030 	 * and set buffer flags.
10031 	 */
10032 	if (cmd->cmd_rfm) {
10033 		post_flags |= MPTSAS_ADDRESS_REPLY;
10034 		(void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10035 		    DDI_DMA_SYNC_FORCPU);
10036 		reply = (pMpi2DiagBufferPostReply_t)(mpt->m_reply_frame +
10037 		    (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
10038 
10039 		/*
10040 		 * Get the reply message data
10041 		 */
10042 		iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
10043 		    &reply->IOCStatus);
10044 		iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
10045 		    &reply->IOCLogInfo);
10046 		transfer_length = ddi_get32(mpt->m_acc_reply_frame_hdl,
10047 		    &reply->TransferLength);
10048 
10049 		/*
10050 		 * If post failed quit.
10051 		 */
10052 		if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
10053 			status = DDI_FAILURE;
10054 			NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, "
10055 			    "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus,
10056 			    iocloginfo, transfer_length));
10057 			goto out;
10058 		}
10059 
10060 		/*
10061 		 * Post was successful.
10062 		 */
10063 		pBuffer->valid_data = TRUE;
10064 		pBuffer->owned_by_firmware = TRUE;
10065 		*return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10066 		status = DDI_SUCCESS;
10067 	}
10068 
10069 out:
10070 	/*
10071 	 * Put the reply frame back on the free queue, increment the free
10072 	 * index, and write the new index to the free index register.  But only
10073 	 * if this reply is an ADDRESS reply.
10074 	 */
10075 	if (post_flags & MPTSAS_ADDRESS_REPLY) {
10076 		ddi_put32(mpt->m_acc_free_queue_hdl,
10077 		    &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10078 		    cmd->cmd_rfm);
10079 		(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10080 		    DDI_DMA_SYNC_FORDEV);
10081 		if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10082 			mpt->m_free_index = 0;
10083 		}
10084 		ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10085 		    mpt->m_free_index);
10086 	}
10087 	if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10088 		mptsas_remove_cmd(mpt, cmd);
10089 		post_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10090 	}
10091 	if (post_flags & MPTSAS_REQUEST_POOL_CMD) {
10092 		mptsas_return_to_pool(mpt, cmd);
10093 	}
10094 
10095 	return (status);
10096 }
10097 
10098 static int
10099 mptsas_release_fw_diag_buffer(mptsas_t *mpt,
10100     mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
10101     uint32_t diag_type)
10102 {
10103 	mptsas_diag_request_t	diag;
10104 	int			status, slot_num, rel_flags = 0;
10105 	mptsas_cmd_t		*cmd = NULL;
10106 	struct scsi_pkt		*pkt;
10107 	pMpi2DiagReleaseReply_t	reply;
10108 	uint16_t		iocstatus;
10109 	uint32_t		iocloginfo;
10110 
10111 	/*
10112 	 * If buffer is not enabled, just leave.
10113 	 */
10114 	*return_code = MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED;
10115 	if (!pBuffer->enabled) {
10116 		mptsas_log(mpt, CE_NOTE, "This buffer type is not supported "
10117 		    "by the IOC");
10118 		status = DDI_FAILURE;
10119 		goto out;
10120 	}
10121 
10122 	/*
10123 	 * Clear some flags initially.
10124 	 */
10125 	pBuffer->force_release = FALSE;
10126 	pBuffer->valid_data = FALSE;
10127 	pBuffer->owned_by_firmware = FALSE;
10128 
10129 	/*
10130 	 * Get a cmd buffer from the cmd buffer pool
10131 	 */
10132 	if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10133 		status = DDI_FAILURE;
10134 		mptsas_log(mpt, CE_NOTE, "command pool is full: Release FW "
10135 		    "Diag");
10136 		goto out;
10137 	}
10138 	rel_flags |= MPTSAS_REQUEST_POOL_CMD;
10139 
10140 	bzero((caddr_t)cmd, sizeof (*cmd));
10141 	bzero((caddr_t)pkt, scsi_pkt_size());
10142 
10143 	cmd->ioc_cmd_slot = (uint32_t)(slot_num);
10144 
10145 	diag.pBuffer = pBuffer;
10146 	diag.function = MPI2_FUNCTION_DIAG_RELEASE;
10147 
10148 	/*
10149 	 * Form a blank cmd/pkt to store the acknowledgement message
10150 	 */
10151 	pkt->pkt_ha_private	= (opaque_t)&diag;
10152 	pkt->pkt_flags		= FLAG_HEAD;
10153 	pkt->pkt_time		= 60;
10154 	cmd->cmd_pkt		= pkt;
10155 	cmd->cmd_flags		= CFLAG_CMDIOC | CFLAG_FW_DIAG;
10156 
10157 	/*
10158 	 * Save the command in a slot
10159 	 */
10160 	if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10161 		/*
10162 		 * Once passthru command get slot, set cmd_flags
10163 		 * CFLAG_PREPARED.
10164 		 */
10165 		cmd->cmd_flags |= CFLAG_PREPARED;
10166 		mptsas_start_diag(mpt, cmd);
10167 	} else {
10168 		mptsas_waitq_add(mpt, cmd);
10169 	}
10170 
10171 	while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10172 		cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
10173 	}
10174 
10175 	if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10176 		status = DDI_FAILURE;
10177 		mptsas_log(mpt, CE_WARN, "Release FW Diag command timeout");
10178 		goto out;
10179 	}
10180 
10181 	/*
10182 	 * cmd_rfm points to the reply message if a reply was given.  Check the
10183 	 * IOCStatus to make sure everything went OK with the FW diag request
10184 	 * and set buffer flags.
10185 	 */
10186 	if (cmd->cmd_rfm) {
10187 		rel_flags |= MPTSAS_ADDRESS_REPLY;
10188 		(void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10189 		    DDI_DMA_SYNC_FORCPU);
10190 		reply = (pMpi2DiagReleaseReply_t)(mpt->m_reply_frame +
10191 		    (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
10192 
10193 		/*
10194 		 * Get the reply message data
10195 		 */
10196 		iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
10197 		    &reply->IOCStatus);
10198 		iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
10199 		    &reply->IOCLogInfo);
10200 
10201 		/*
10202 		 * If release failed quit.
10203 		 */
10204 		if ((iocstatus != MPI2_IOCSTATUS_SUCCESS) ||
10205 		    pBuffer->owned_by_firmware) {
10206 			status = DDI_FAILURE;
10207 			NDBG13(("release FW Diag Buffer failed: "
10208 			    "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus,
10209 			    iocloginfo));
10210 			goto out;
10211 		}
10212 
10213 		/*
10214 		 * Release was successful.
10215 		 */
10216 		*return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10217 		status = DDI_SUCCESS;
10218 
10219 		/*
10220 		 * If this was for an UNREGISTER diag type command, clear the
10221 		 * unique ID.
10222 		 */
10223 		if (diag_type == MPTSAS_FW_DIAG_TYPE_UNREGISTER) {
10224 			pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
10225 		}
10226 	}
10227 
10228 out:
10229 	/*
10230 	 * Put the reply frame back on the free queue, increment the free
10231 	 * index, and write the new index to the free index register.  But only
10232 	 * if this reply is an ADDRESS reply.
10233 	 */
10234 	if (rel_flags & MPTSAS_ADDRESS_REPLY) {
10235 		ddi_put32(mpt->m_acc_free_queue_hdl,
10236 		    &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10237 		    cmd->cmd_rfm);
10238 		(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10239 		    DDI_DMA_SYNC_FORDEV);
10240 		if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10241 			mpt->m_free_index = 0;
10242 		}
10243 		ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10244 		    mpt->m_free_index);
10245 	}
10246 	if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10247 		mptsas_remove_cmd(mpt, cmd);
10248 		rel_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10249 	}
10250 	if (rel_flags & MPTSAS_REQUEST_POOL_CMD) {
10251 		mptsas_return_to_pool(mpt, cmd);
10252 	}
10253 
10254 	return (status);
10255 }
10256 
10257 static int
10258 mptsas_diag_register(mptsas_t *mpt, mptsas_fw_diag_register_t *diag_register,
10259     uint32_t *return_code)
10260 {
10261 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
10262 	uint8_t				extended_type, buffer_type, i;
10263 	uint32_t			buffer_size;
10264 	uint32_t			unique_id;
10265 	int				status;
10266 
10267 	ASSERT(mutex_owned(&mpt->m_mutex));
10268 
10269 	extended_type = diag_register->ExtendedType;
10270 	buffer_type = diag_register->BufferType;
10271 	buffer_size = diag_register->RequestedBufferSize;
10272 	unique_id = diag_register->UniqueId;
10273 
10274 	/*
10275 	 * Check for valid buffer type
10276 	 */
10277 	if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
10278 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10279 		return (DDI_FAILURE);
10280 	}
10281 
10282 	/*
10283 	 * Get the current buffer and look up the unique ID.  The unique ID
10284 	 * should not be found.  If it is, the ID is already in use.
10285 	 */
10286 	i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10287 	pBuffer = &mpt->m_fw_diag_buffer_list[buffer_type];
10288 	if (i != MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10289 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10290 		return (DDI_FAILURE);
10291 	}
10292 
10293 	/*
10294 	 * The buffer's unique ID should not be registered yet, and the given
10295 	 * unique ID cannot be 0.
10296 	 */
10297 	if ((pBuffer->unique_id != MPTSAS_FW_DIAG_INVALID_UID) ||
10298 	    (unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
10299 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10300 		return (DDI_FAILURE);
10301 	}
10302 
10303 	/*
10304 	 * If this buffer is already posted as immediate, just change owner.
10305 	 */
10306 	if (pBuffer->immediate && pBuffer->owned_by_firmware &&
10307 	    (pBuffer->unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
10308 		pBuffer->immediate = FALSE;
10309 		pBuffer->unique_id = unique_id;
10310 		return (DDI_SUCCESS);
10311 	}
10312 
10313 	/*
10314 	 * Post a new buffer after checking if it's enabled.  The DMA buffer
10315 	 * that is allocated will be contiguous (sgl_len = 1).
10316 	 */
10317 	if (!pBuffer->enabled) {
10318 		*return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
10319 		return (DDI_FAILURE);
10320 	}
10321 	bzero(&pBuffer->buffer_data, sizeof (mptsas_dma_alloc_state_t));
10322 	pBuffer->buffer_data.size = buffer_size;
10323 	if (mptsas_dma_alloc(mpt, &pBuffer->buffer_data) != DDI_SUCCESS) {
10324 		mptsas_log(mpt, CE_WARN, "failed to alloc DMA resource for "
10325 		    "diag buffer: size = %d bytes", buffer_size);
10326 		*return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
10327 		return (DDI_FAILURE);
10328 	}
10329 
10330 	/*
10331 	 * Copy the given info to the diag buffer and post the buffer.
10332 	 */
10333 	pBuffer->buffer_type = buffer_type;
10334 	pBuffer->immediate = FALSE;
10335 	if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
10336 		for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
10337 		    i++) {
10338 			pBuffer->product_specific[i] =
10339 			    diag_register->ProductSpecific[i];
10340 		}
10341 	}
10342 	pBuffer->extended_type = extended_type;
10343 	pBuffer->unique_id = unique_id;
10344 	status = mptsas_post_fw_diag_buffer(mpt, pBuffer, return_code);
10345 
10346 	if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
10347 	    DDI_SUCCESS) {
10348 		mptsas_log(mpt, CE_WARN, "Check of DMA handle failed in "
10349 		    "mptsas_diag_register.");
10350 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10351 			status = DDI_FAILURE;
10352 	}
10353 
10354 	/*
10355 	 * In case there was a failure, free the DMA buffer.
10356 	 */
10357 	if (status == DDI_FAILURE) {
10358 		mptsas_dma_free(&pBuffer->buffer_data);
10359 	}
10360 
10361 	return (status);
10362 }
10363 
10364 static int
10365 mptsas_diag_unregister(mptsas_t *mpt,
10366     mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
10367 {
10368 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
10369 	uint8_t				i;
10370 	uint32_t			unique_id;
10371 	int				status;
10372 
10373 	ASSERT(mutex_owned(&mpt->m_mutex));
10374 
10375 	unique_id = diag_unregister->UniqueId;
10376 
10377 	/*
10378 	 * Get the current buffer and look up the unique ID.  The unique ID
10379 	 * should be there.
10380 	 */
10381 	i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10382 	if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10383 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10384 		return (DDI_FAILURE);
10385 	}
10386 
10387 	pBuffer = &mpt->m_fw_diag_buffer_list[i];
10388 
10389 	/*
10390 	 * Try to release the buffer from FW before freeing it.  If release
10391 	 * fails, don't free the DMA buffer in case FW tries to access it
10392 	 * later.  If buffer is not owned by firmware, can't release it.
10393 	 */
10394 	if (!pBuffer->owned_by_firmware) {
10395 		status = DDI_SUCCESS;
10396 	} else {
10397 		status = mptsas_release_fw_diag_buffer(mpt, pBuffer,
10398 		    return_code, MPTSAS_FW_DIAG_TYPE_UNREGISTER);
10399 	}
10400 
10401 	/*
10402 	 * At this point, return the current status no matter what happens with
10403 	 * the DMA buffer.
10404 	 */
10405 	pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
10406 	if (status == DDI_SUCCESS) {
10407 		if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
10408 		    DDI_SUCCESS) {
10409 			mptsas_log(mpt, CE_WARN, "Check of DMA handle failed "
10410 			    "in mptsas_diag_unregister.");
10411 			ddi_fm_service_impact(mpt->m_dip,
10412 			    DDI_SERVICE_UNAFFECTED);
10413 		}
10414 		mptsas_dma_free(&pBuffer->buffer_data);
10415 	}
10416 
10417 	return (status);
10418 }
10419 
10420 static int
10421 mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
10422     uint32_t *return_code)
10423 {
10424 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
10425 	uint8_t				i;
10426 	uint32_t			unique_id;
10427 
10428 	ASSERT(mutex_owned(&mpt->m_mutex));
10429 
10430 	unique_id = diag_query->UniqueId;
10431 
10432 	/*
10433 	 * If ID is valid, query on ID.
10434 	 * If ID is invalid, query on buffer type.
10435 	 */
10436 	if (unique_id == MPTSAS_FW_DIAG_INVALID_UID) {
10437 		i = diag_query->BufferType;
10438 		if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
10439 			*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10440 			return (DDI_FAILURE);
10441 		}
10442 	} else {
10443 		i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10444 		if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10445 			*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10446 			return (DDI_FAILURE);
10447 		}
10448 	}
10449 
10450 	/*
10451 	 * Fill query structure with the diag buffer info.
10452 	 */
10453 	pBuffer = &mpt->m_fw_diag_buffer_list[i];
10454 	diag_query->BufferType = pBuffer->buffer_type;
10455 	diag_query->ExtendedType = pBuffer->extended_type;
10456 	if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
10457 		for (i = 0; i < (sizeof (diag_query->ProductSpecific) / 4);
10458 		    i++) {
10459 			diag_query->ProductSpecific[i] =
10460 			    pBuffer->product_specific[i];
10461 		}
10462 	}
10463 	diag_query->TotalBufferSize = pBuffer->buffer_data.size;
10464 	diag_query->DriverAddedBufferSize = 0;
10465 	diag_query->UniqueId = pBuffer->unique_id;
10466 	diag_query->ApplicationFlags = 0;
10467 	diag_query->DiagnosticFlags = 0;
10468 
10469 	/*
10470 	 * Set/Clear application flags
10471 	 */
10472 	if (pBuffer->immediate) {
10473 		diag_query->ApplicationFlags &= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED;
10474 	} else {
10475 		diag_query->ApplicationFlags |= MPTSAS_FW_DIAG_FLAG_APP_OWNED;
10476 	}
10477 	if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
10478 		diag_query->ApplicationFlags |=
10479 		    MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
10480 	} else {
10481 		diag_query->ApplicationFlags &=
10482 		    ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
10483 	}
10484 	if (pBuffer->owned_by_firmware) {
10485 		diag_query->ApplicationFlags |=
10486 		    MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
10487 	} else {
10488 		diag_query->ApplicationFlags &=
10489 		    ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
10490 	}
10491 
10492 	return (DDI_SUCCESS);
10493 }
10494 
10495 static int
10496 mptsas_diag_read_buffer(mptsas_t *mpt,
10497     mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
10498     uint32_t *return_code, int ioctl_mode)
10499 {
10500 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
10501 	uint8_t				i, *pData;
10502 	uint32_t			unique_id, byte;
10503 	int				status;
10504 
10505 	ASSERT(mutex_owned(&mpt->m_mutex));
10506 
10507 	unique_id = diag_read_buffer->UniqueId;
10508 
10509 	/*
10510 	 * Get the current buffer and look up the unique ID.  The unique ID
10511 	 * should be there.
10512 	 */
10513 	i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10514 	if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10515 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10516 		return (DDI_FAILURE);
10517 	}
10518 
10519 	pBuffer = &mpt->m_fw_diag_buffer_list[i];
10520 
10521 	/*
10522 	 * Make sure requested read is within limits
10523 	 */
10524 	if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
10525 	    pBuffer->buffer_data.size) {
10526 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10527 		return (DDI_FAILURE);
10528 	}
10529 
10530 	/*
10531 	 * Copy the requested data from DMA to the diag_read_buffer.  The DMA
10532 	 * buffer that was allocated is one contiguous buffer.
10533 	 */
10534 	pData = (uint8_t *)(pBuffer->buffer_data.memp +
10535 	    diag_read_buffer->StartingOffset);
10536 	(void) ddi_dma_sync(pBuffer->buffer_data.handle, 0, 0,
10537 	    DDI_DMA_SYNC_FORCPU);
10538 	for (byte = 0; byte < diag_read_buffer->BytesToRead; byte++) {
10539 		if (ddi_copyout(pData + byte, ioctl_buf + byte, 1, ioctl_mode)
10540 		    != 0) {
10541 			return (DDI_FAILURE);
10542 		}
10543 	}
10544 	diag_read_buffer->Status = 0;
10545 
10546 	/*
10547 	 * Set or clear the Force Release flag.
10548 	 */
10549 	if (pBuffer->force_release) {
10550 		diag_read_buffer->Flags |= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
10551 	} else {
10552 		diag_read_buffer->Flags &= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
10553 	}
10554 
10555 	/*
10556 	 * If buffer is to be reregistered, make sure it's not already owned by
10557 	 * firmware first.
10558 	 */
10559 	status = DDI_SUCCESS;
10560 	if (!pBuffer->owned_by_firmware) {
10561 		if (diag_read_buffer->Flags & MPTSAS_FW_DIAG_FLAG_REREGISTER) {
10562 			status = mptsas_post_fw_diag_buffer(mpt, pBuffer,
10563 			    return_code);
10564 		}
10565 	}
10566 
10567 	return (status);
10568 }
10569 
10570 static int
10571 mptsas_diag_release(mptsas_t *mpt, mptsas_fw_diag_release_t *diag_release,
10572     uint32_t *return_code)
10573 {
10574 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
10575 	uint8_t				i;
10576 	uint32_t			unique_id;
10577 	int				status;
10578 
10579 	ASSERT(mutex_owned(&mpt->m_mutex));
10580 
10581 	unique_id = diag_release->UniqueId;
10582 
10583 	/*
10584 	 * Get the current buffer and look up the unique ID.  The unique ID
10585 	 * should be there.
10586 	 */
10587 	i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10588 	if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10589 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10590 		return (DDI_FAILURE);
10591 	}
10592 
10593 	pBuffer = &mpt->m_fw_diag_buffer_list[i];
10594 
10595 	/*
10596 	 * If buffer is not owned by firmware, it's already been released.
10597 	 */
10598 	if (!pBuffer->owned_by_firmware) {
10599 		*return_code = MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED;
10600 		return (DDI_FAILURE);
10601 	}
10602 
10603 	/*
10604 	 * Release the buffer.
10605 	 */
10606 	status = mptsas_release_fw_diag_buffer(mpt, pBuffer, return_code,
10607 	    MPTSAS_FW_DIAG_TYPE_RELEASE);
10608 	return (status);
10609 }
10610 
10611 static int
10612 mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, uint8_t *diag_action,
10613     uint32_t length, uint32_t *return_code, int ioctl_mode)
10614 {
10615 	mptsas_fw_diag_register_t	diag_register;
10616 	mptsas_fw_diag_unregister_t	diag_unregister;
10617 	mptsas_fw_diag_query_t		diag_query;
10618 	mptsas_diag_read_buffer_t	diag_read_buffer;
10619 	mptsas_fw_diag_release_t	diag_release;
10620 	int				status = DDI_SUCCESS;
10621 	uint32_t			original_return_code, read_buf_len;
10622 
10623 	ASSERT(mutex_owned(&mpt->m_mutex));
10624 
10625 	original_return_code = *return_code;
10626 	*return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10627 
10628 	switch (action) {
10629 		case MPTSAS_FW_DIAG_TYPE_REGISTER:
10630 			if (!length) {
10631 				*return_code =
10632 				    MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10633 				status = DDI_FAILURE;
10634 				break;
10635 			}
10636 			if (ddi_copyin(diag_action, &diag_register,
10637 			    sizeof (diag_register), ioctl_mode) != 0) {
10638 				return (DDI_FAILURE);
10639 			}
10640 			status = mptsas_diag_register(mpt, &diag_register,
10641 			    return_code);
10642 			break;
10643 
10644 		case MPTSAS_FW_DIAG_TYPE_UNREGISTER:
10645 			if (length < sizeof (diag_unregister)) {
10646 				*return_code =
10647 				    MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10648 				status = DDI_FAILURE;
10649 				break;
10650 			}
10651 			if (ddi_copyin(diag_action, &diag_unregister,
10652 			    sizeof (diag_unregister), ioctl_mode) != 0) {
10653 				return (DDI_FAILURE);
10654 			}
10655 			status = mptsas_diag_unregister(mpt, &diag_unregister,
10656 			    return_code);
10657 			break;
10658 
10659 		case MPTSAS_FW_DIAG_TYPE_QUERY:
10660 			if (length < sizeof (diag_query)) {
10661 				*return_code =
10662 				    MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10663 				status = DDI_FAILURE;
10664 				break;
10665 			}
10666 			if (ddi_copyin(diag_action, &diag_query,
10667 			    sizeof (diag_query), ioctl_mode) != 0) {
10668 				return (DDI_FAILURE);
10669 			}
10670 			status = mptsas_diag_query(mpt, &diag_query,
10671 			    return_code);
10672 			if (status == DDI_SUCCESS) {
10673 				if (ddi_copyout(&diag_query, diag_action,
10674 				    sizeof (diag_query), ioctl_mode) != 0) {
10675 					return (DDI_FAILURE);
10676 				}
10677 			}
10678 			break;
10679 
10680 		case MPTSAS_FW_DIAG_TYPE_READ_BUFFER:
10681 			if (ddi_copyin(diag_action, &diag_read_buffer,
10682 			    sizeof (diag_read_buffer) - 4, ioctl_mode) != 0) {
10683 				return (DDI_FAILURE);
10684 			}
10685 			read_buf_len = sizeof (diag_read_buffer) -
10686 			    sizeof (diag_read_buffer.DataBuffer) +
10687 			    diag_read_buffer.BytesToRead;
10688 			if (length < read_buf_len) {
10689 				*return_code =
10690 				    MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10691 				status = DDI_FAILURE;
10692 				break;
10693 			}
10694 			status = mptsas_diag_read_buffer(mpt,
10695 			    &diag_read_buffer, diag_action +
10696 			    sizeof (diag_read_buffer) - 4, return_code,
10697 			    ioctl_mode);
10698 			if (status == DDI_SUCCESS) {
10699 				if (ddi_copyout(&diag_read_buffer, diag_action,
10700 				    sizeof (diag_read_buffer) - 4, ioctl_mode)
10701 				    != 0) {
10702 					return (DDI_FAILURE);
10703 				}
10704 			}
10705 			break;
10706 
10707 		case MPTSAS_FW_DIAG_TYPE_RELEASE:
10708 			if (length < sizeof (diag_release)) {
10709 				*return_code =
10710 				    MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10711 				status = DDI_FAILURE;
10712 				break;
10713 			}
10714 			if (ddi_copyin(diag_action, &diag_release,
10715 			    sizeof (diag_release), ioctl_mode) != 0) {
10716 				return (DDI_FAILURE);
10717 			}
10718 			status = mptsas_diag_release(mpt, &diag_release,
10719 			    return_code);
10720 			break;
10721 
10722 		default:
10723 			*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10724 			status = DDI_FAILURE;
10725 			break;
10726 	}
10727 
10728 	if ((status == DDI_FAILURE) &&
10729 	    (original_return_code == MPTSAS_FW_DIAG_NEW) &&
10730 	    (*return_code != MPTSAS_FW_DIAG_ERROR_SUCCESS)) {
10731 		status = DDI_SUCCESS;
10732 	}
10733 
10734 	return (status);
10735 }
10736 
10737 static int
10738 mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *user_data, int mode)
10739 {
10740 	int			status;
10741 	mptsas_diag_action_t	driver_data;
10742 
10743 	ASSERT(mutex_owned(&mpt->m_mutex));
10744 
10745 	/*
10746 	 * Copy the user data to a driver data buffer.
10747 	 */
10748 	if (ddi_copyin(user_data, &driver_data, sizeof (mptsas_diag_action_t),
10749 	    mode) == 0) {
10750 		/*
10751 		 * Send diag action request if Action is valid
10752 		 */
10753 		if (driver_data.Action == MPTSAS_FW_DIAG_TYPE_REGISTER ||
10754 		    driver_data.Action == MPTSAS_FW_DIAG_TYPE_UNREGISTER ||
10755 		    driver_data.Action == MPTSAS_FW_DIAG_TYPE_QUERY ||
10756 		    driver_data.Action == MPTSAS_FW_DIAG_TYPE_READ_BUFFER ||
10757 		    driver_data.Action == MPTSAS_FW_DIAG_TYPE_RELEASE) {
10758 			status = mptsas_do_diag_action(mpt, driver_data.Action,
10759 			    (void *)(uintptr_t)driver_data.PtrDiagAction,
10760 			    driver_data.Length, &driver_data.ReturnCode,
10761 			    mode);
10762 			if (status == DDI_SUCCESS) {
10763 				if (ddi_copyout(&driver_data.ReturnCode,
10764 				    &user_data->ReturnCode,
10765 				    sizeof (user_data->ReturnCode), mode)
10766 				    != 0) {
10767 					status = EFAULT;
10768 				} else {
10769 					status = 0;
10770 				}
10771 			} else {
10772 				status = EIO;
10773 			}
10774 		} else {
10775 			status = EINVAL;
10776 		}
10777 	} else {
10778 		status = EFAULT;
10779 	}
10780 
10781 	return (status);
10782 }
10783 
10784 /*
10785  * This routine handles the "event query" ioctl.
10786  */
10787 static int
10788 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode,
10789     int *rval)
10790 {
10791 	int			status;
10792 	mptsas_event_query_t	driverdata;
10793 	uint8_t			i;
10794 
10795 	driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE;
10796 
10797 	mutex_enter(&mpt->m_mutex);
10798 	for (i = 0; i < 4; i++) {
10799 		driverdata.Types[i] = mpt->m_event_mask[i];
10800 	}
10801 	mutex_exit(&mpt->m_mutex);
10802 
10803 	if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) {
10804 		status = EFAULT;
10805 	} else {
10806 		*rval = MPTIOCTL_STATUS_GOOD;
10807 		status = 0;
10808 	}
10809 
10810 	return (status);
10811 }
10812 
10813 /*
10814  * This routine handles the "event enable" ioctl.
10815  */
10816 static int
10817 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode,
10818     int *rval)
10819 {
10820 	int			status;
10821 	mptsas_event_enable_t	driverdata;
10822 	uint8_t			i;
10823 
10824 	if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
10825 		mutex_enter(&mpt->m_mutex);
10826 		for (i = 0; i < 4; i++) {
10827 			mpt->m_event_mask[i] = driverdata.Types[i];
10828 		}
10829 		mutex_exit(&mpt->m_mutex);
10830 
10831 		*rval = MPTIOCTL_STATUS_GOOD;
10832 		status = 0;
10833 	} else {
10834 		status = EFAULT;
10835 	}
10836 	return (status);
10837 }
10838 
10839 /*
10840  * This routine handles the "event report" ioctl.
10841  */
10842 static int
10843 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode,
10844     int *rval)
10845 {
10846 	int			status;
10847 	mptsas_event_report_t	driverdata;
10848 
10849 	mutex_enter(&mpt->m_mutex);
10850 
10851 	if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size),
10852 	    mode) == 0) {
10853 		if (driverdata.Size >= sizeof (mpt->m_events)) {
10854 			if (ddi_copyout(mpt->m_events, data->Events,
10855 			    sizeof (mpt->m_events), mode) != 0) {
10856 				status = EFAULT;
10857 			} else {
10858 				if (driverdata.Size > sizeof (mpt->m_events)) {
10859 					driverdata.Size =
10860 					    sizeof (mpt->m_events);
10861 					if (ddi_copyout(&driverdata.Size,
10862 					    &data->Size,
10863 					    sizeof (driverdata.Size),
10864 					    mode) != 0) {
10865 						status = EFAULT;
10866 					} else {
10867 						*rval = MPTIOCTL_STATUS_GOOD;
10868 						status = 0;
10869 					}
10870 				} else {
10871 					*rval = MPTIOCTL_STATUS_GOOD;
10872 					status = 0;
10873 				}
10874 			}
10875 		} else {
10876 			*rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
10877 			status = 0;
10878 		}
10879 	} else {
10880 		status = EFAULT;
10881 	}
10882 
10883 	mutex_exit(&mpt->m_mutex);
10884 	return (status);
10885 }
10886 
10887 static void
10888 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
10889 {
10890 	int	*reg_data;
10891 	uint_t	reglen;
10892 
10893 	/*
10894 	 * Lookup the 'reg' property and extract the other data
10895 	 */
10896 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
10897 	    DDI_PROP_DONTPASS, "reg", &reg_data, &reglen) ==
10898 	    DDI_PROP_SUCCESS) {
10899 		/*
10900 		 * Extract the PCI data from the 'reg' property first DWORD.
10901 		 * The entry looks like the following:
10902 		 * First DWORD:
10903 		 * Bits 0 - 7 8-bit Register number
10904 		 * Bits 8 - 10 3-bit Function number
10905 		 * Bits 11 - 15 5-bit Device number
10906 		 * Bits 16 - 23 8-bit Bus number
10907 		 * Bits 24 - 25 2-bit Address Space type identifier
10908 		 *
10909 		 */
10910 		adapter_data->PciInformation.u.bits.BusNumber =
10911 		    (reg_data[0] & 0x00FF0000) >> 16;
10912 		adapter_data->PciInformation.u.bits.DeviceNumber =
10913 		    (reg_data[0] & 0x0000F800) >> 11;
10914 		adapter_data->PciInformation.u.bits.FunctionNumber =
10915 		    (reg_data[0] & 0x00000700) >> 8;
10916 		ddi_prop_free((void *)reg_data);
10917 	} else {
10918 		/*
10919 		 * If we can't determine the PCI data then we fill in FF's for
10920 		 * the data to indicate this.
10921 		 */
10922 		adapter_data->PCIDeviceHwId = 0xFFFFFFFF;
10923 		adapter_data->MpiPortNumber = 0xFFFFFFFF;
10924 		adapter_data->PciInformation.u.AsDWORD = 0xFFFFFFFF;
10925 	}
10926 
10927 	/*
10928 	 * Saved in the mpt->m_fwversion
10929 	 */
10930 	adapter_data->MpiFirmwareVersion = mpt->m_fwversion;
10931 }
10932 
10933 static void
10934 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
10935 {
10936 	char	*driver_verstr = MPTSAS_MOD_STRING;
10937 
10938 	mptsas_lookup_pci_data(mpt, adapter_data);
10939 	adapter_data->AdapterType = MPTIOCTL_ADAPTER_TYPE_SAS2;
10940 	adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid;
10941 	adapter_data->PCIDeviceHwRev = (uint32_t)mpt->m_revid;
10942 	adapter_data->SubSystemId = (uint32_t)mpt->m_ssid;
10943 	adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid;
10944 	(void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr);
10945 	adapter_data->BiosVersion = 0;
10946 	(void) mptsas_get_bios_page3(mpt, &adapter_data->BiosVersion);
10947 }
10948 
10949 static void
10950 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info)
10951 {
10952 	int	*reg_data, i;
10953 	uint_t	reglen;
10954 
10955 	/*
10956 	 * Lookup the 'reg' property and extract the other data
10957 	 */
10958 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
10959 	    DDI_PROP_DONTPASS, "reg", &reg_data, &reglen) ==
10960 	    DDI_PROP_SUCCESS) {
10961 		/*
10962 		 * Extract the PCI data from the 'reg' property first DWORD.
10963 		 * The entry looks like the following:
10964 		 * First DWORD:
10965 		 * Bits 8 - 10 3-bit Function number
10966 		 * Bits 11 - 15 5-bit Device number
10967 		 * Bits 16 - 23 8-bit Bus number
10968 		 */
10969 		pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16;
10970 		pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11;
10971 		pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8;
10972 		ddi_prop_free((void *)reg_data);
10973 	} else {
10974 		/*
10975 		 * If we can't determine the PCI info then we fill in FF's for
10976 		 * the data to indicate this.
10977 		 */
10978 		pci_info->BusNumber = 0xFFFFFFFF;
10979 		pci_info->DeviceNumber = 0xFF;
10980 		pci_info->FunctionNumber = 0xFF;
10981 	}
10982 
10983 	/*
10984 	 * Now get the interrupt vector and the pci header.  The vector can
10985 	 * only be 0 right now.  The header is the first 256 bytes of config
10986 	 * space.
10987 	 */
10988 	pci_info->InterruptVector = 0;
10989 	for (i = 0; i < sizeof (pci_info->PciHeader); i++) {
10990 		pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle,
10991 		    i);
10992 	}
10993 }
10994 
10995 static int
10996 mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, int mode)
10997 {
10998 	int			status = 0;
10999 	mptsas_reg_access_t	driverdata;
11000 
11001 	mutex_enter(&mpt->m_mutex);
11002 	if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11003 		switch (driverdata.Command) {
11004 			/*
11005 			 * IO access is not supported.
11006 			 */
11007 			case REG_IO_READ:
11008 			case REG_IO_WRITE:
11009 				mptsas_log(mpt, CE_WARN, "IO access is not "
11010 				    "supported.  Use memory access.");
11011 				status = EINVAL;
11012 				break;
11013 
11014 			case REG_MEM_READ:
11015 				driverdata.RegData = ddi_get32(mpt->m_datap,
11016 				    (uint32_t *)(void *)mpt->m_reg +
11017 				    driverdata.RegOffset);
11018 				if (ddi_copyout(&driverdata.RegData,
11019 				    &data->RegData,
11020 				    sizeof (driverdata.RegData), mode) != 0) {
11021 					mptsas_log(mpt, CE_WARN, "Register "
11022 					    "Read Failed");
11023 					status = EFAULT;
11024 				}
11025 				break;
11026 
11027 			case REG_MEM_WRITE:
11028 				ddi_put32(mpt->m_datap,
11029 				    (uint32_t *)(void *)mpt->m_reg +
11030 				    driverdata.RegOffset,
11031 				    driverdata.RegData);
11032 				break;
11033 
11034 			default:
11035 				status = EINVAL;
11036 				break;
11037 		}
11038 	} else {
11039 		status = EFAULT;
11040 	}
11041 
11042 	mutex_exit(&mpt->m_mutex);
11043 	return (status);
11044 }
11045 
11046 static int
11047 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
11048     int *rval)
11049 {
11050 	int			status = 0;
11051 	mptsas_t		*mpt;
11052 	mptsas_update_flash_t	flashdata;
11053 	mptsas_pass_thru_t	passthru_data;
11054 	mptsas_adapter_data_t   adapter_data;
11055 	mptsas_pci_info_t	pci_info;
11056 	int			copylen;
11057 
11058 	int			iport_flag = 0;
11059 	dev_info_t		*dip = NULL;
11060 	mptsas_phymask_t	phymask = 0;
11061 	struct devctl_iocdata	*dcp = NULL;
11062 	uint32_t		slotstatus = 0;
11063 	char			*addr = NULL;
11064 	mptsas_target_t		*ptgt = NULL;
11065 
11066 	*rval = MPTIOCTL_STATUS_GOOD;
11067 	if (secpolicy_sys_config(credp, B_FALSE) != 0) {
11068 		return (EPERM);
11069 	}
11070 
11071 	mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
11072 	if (mpt == NULL) {
11073 		/*
11074 		 * Called from iport node, get the states
11075 		 */
11076 		iport_flag = 1;
11077 		dip = mptsas_get_dip_from_dev(dev, &phymask);
11078 		if (dip == NULL) {
11079 			return (ENXIO);
11080 		}
11081 		mpt = DIP2MPT(dip);
11082 	}
11083 	/* Make sure power level is D0 before accessing registers */
11084 	mutex_enter(&mpt->m_mutex);
11085 	if (mpt->m_options & MPTSAS_OPT_PM) {
11086 		(void) pm_busy_component(mpt->m_dip, 0);
11087 		if (mpt->m_power_level != PM_LEVEL_D0) {
11088 			mutex_exit(&mpt->m_mutex);
11089 			if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
11090 			    DDI_SUCCESS) {
11091 				mptsas_log(mpt, CE_WARN,
11092 				    "mptsas%d: mptsas_ioctl: Raise power "
11093 				    "request failed.", mpt->m_instance);
11094 				(void) pm_idle_component(mpt->m_dip, 0);
11095 				return (ENXIO);
11096 			}
11097 		} else {
11098 			mutex_exit(&mpt->m_mutex);
11099 		}
11100 	} else {
11101 		mutex_exit(&mpt->m_mutex);
11102 	}
11103 
11104 	if (iport_flag) {
11105 		status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
11106 		if (status != 0) {
11107 			goto out;
11108 		}
11109 		/*
11110 		 * The following code control the OK2RM LED, it doesn't affect
11111 		 * the ioctl return status.
11112 		 */
11113 		if ((cmd == DEVCTL_DEVICE_ONLINE) ||
11114 		    (cmd == DEVCTL_DEVICE_OFFLINE)) {
11115 			if (ndi_dc_allochdl((void *)data, &dcp) !=
11116 			    NDI_SUCCESS) {
11117 				goto out;
11118 			}
11119 			addr = ndi_dc_getaddr(dcp);
11120 			ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask);
11121 			if (ptgt == NULL) {
11122 				NDBG14(("mptsas_ioctl led control: tgt %s not "
11123 				    "found", addr));
11124 				ndi_dc_freehdl(dcp);
11125 				goto out;
11126 			}
11127 			mutex_enter(&mpt->m_mutex);
11128 			if (cmd == DEVCTL_DEVICE_ONLINE) {
11129 				ptgt->m_tgt_unconfigured = 0;
11130 			} else if (cmd == DEVCTL_DEVICE_OFFLINE) {
11131 				ptgt->m_tgt_unconfigured = 1;
11132 			}
11133 			slotstatus = 0;
11134 #ifdef MPTSAS_GET_LED
11135 			/*
11136 			 * The get led status can't get a valid/reasonable
11137 			 * state, so ignore the get led status, and write the
11138 			 * required value directly
11139 			 */
11140 			if (mptsas_get_led_status(mpt, ptgt, &slotstatus) !=
11141 			    DDI_SUCCESS) {
11142 				NDBG14(("mptsas_ioctl: get LED for tgt %s "
11143 				    "failed %x", addr, slotstatus));
11144 				slotstatus = 0;
11145 			}
11146 			NDBG14(("mptsas_ioctl: LED status %x for %s",
11147 			    slotstatus, addr));
11148 #endif
11149 			if (cmd == DEVCTL_DEVICE_OFFLINE) {
11150 				slotstatus |=
11151 				    MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
11152 			} else {
11153 				slotstatus &=
11154 				    ~MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
11155 			}
11156 			if (mptsas_set_led_status(mpt, ptgt, slotstatus) !=
11157 			    DDI_SUCCESS) {
11158 				NDBG14(("mptsas_ioctl: set LED for tgt %s "
11159 				    "failed %x", addr, slotstatus));
11160 			}
11161 			mutex_exit(&mpt->m_mutex);
11162 			ndi_dc_freehdl(dcp);
11163 		}
11164 		goto out;
11165 	}
11166 	switch (cmd) {
11167 		case MPTIOCTL_UPDATE_FLASH:
11168 			if (ddi_copyin((void *)data, &flashdata,
11169 				sizeof (struct mptsas_update_flash), mode)) {
11170 				status = EFAULT;
11171 				break;
11172 			}
11173 
11174 			mutex_enter(&mpt->m_mutex);
11175 			if (mptsas_update_flash(mpt,
11176 			    (caddr_t)(long)flashdata.PtrBuffer,
11177 			    flashdata.ImageSize, flashdata.ImageType, mode)) {
11178 				status = EFAULT;
11179 			}
11180 
11181 			/*
11182 			 * Reset the chip to start using the new
11183 			 * firmware.  Reset if failed also.
11184 			 */
11185 			if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
11186 				status = EFAULT;
11187 			}
11188 			mutex_exit(&mpt->m_mutex);
11189 			break;
11190 		case MPTIOCTL_PASS_THRU:
11191 			/*
11192 			 * The user has requested to pass through a command to
11193 			 * be executed by the MPT firmware.  Call our routine
11194 			 * which does this.  Only allow one passthru IOCTL at
11195 			 * one time.
11196 			 */
11197 			if (ddi_copyin((void *)data, &passthru_data,
11198 			    sizeof (mptsas_pass_thru_t), mode)) {
11199 				status = EFAULT;
11200 				break;
11201 			}
11202 			mutex_enter(&mpt->m_mutex);
11203 			if (mpt->m_passthru_in_progress) {
11204 				mutex_exit(&mpt->m_mutex);
11205 				return (EBUSY);
11206 			}
11207 			mpt->m_passthru_in_progress = 1;
11208 			status = mptsas_pass_thru(mpt, &passthru_data, mode);
11209 			mpt->m_passthru_in_progress = 0;
11210 			mutex_exit(&mpt->m_mutex);
11211 
11212 			break;
11213 		case MPTIOCTL_GET_ADAPTER_DATA:
11214 			/*
11215 			 * The user has requested to read adapter data.  Call
11216 			 * our routine which does this.
11217 			 */
11218 			bzero(&adapter_data, sizeof (mptsas_adapter_data_t));
11219 			if (ddi_copyin((void *)data, (void *)&adapter_data,
11220 			    sizeof (mptsas_adapter_data_t), mode)) {
11221 				status = EFAULT;
11222 				break;
11223 			}
11224 			if (adapter_data.StructureLength >=
11225 			    sizeof (mptsas_adapter_data_t)) {
11226 				adapter_data.StructureLength = (uint32_t)
11227 				    sizeof (mptsas_adapter_data_t);
11228 				copylen = sizeof (mptsas_adapter_data_t);
11229 				mutex_enter(&mpt->m_mutex);
11230 				mptsas_read_adapter_data(mpt, &adapter_data);
11231 				mutex_exit(&mpt->m_mutex);
11232 			} else {
11233 				adapter_data.StructureLength = (uint32_t)
11234 				    sizeof (mptsas_adapter_data_t);
11235 				copylen = sizeof (adapter_data.StructureLength);
11236 				*rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
11237 			}
11238 			if (ddi_copyout((void *)(&adapter_data), (void *)data,
11239 			    copylen, mode) != 0) {
11240 				status = EFAULT;
11241 			}
11242 			break;
11243 		case MPTIOCTL_GET_PCI_INFO:
11244 			/*
11245 			 * The user has requested to read pci info.  Call
11246 			 * our routine which does this.
11247 			 */
11248 			bzero(&pci_info, sizeof (mptsas_pci_info_t));
11249 			mutex_enter(&mpt->m_mutex);
11250 			mptsas_read_pci_info(mpt, &pci_info);
11251 			mutex_exit(&mpt->m_mutex);
11252 			if (ddi_copyout((void *)(&pci_info), (void *)data,
11253 			    sizeof (mptsas_pci_info_t), mode) != 0) {
11254 				status = EFAULT;
11255 			}
11256 			break;
11257 		case MPTIOCTL_RESET_ADAPTER:
11258 			mutex_enter(&mpt->m_mutex);
11259 			if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
11260 				mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
11261 				    "failed");
11262 				status = EFAULT;
11263 			}
11264 			mutex_exit(&mpt->m_mutex);
11265 			break;
11266 		case MPTIOCTL_DIAG_ACTION:
11267 			/*
11268 			 * The user has done a diag buffer action.  Call our
11269 			 * routine which does this.  Only allow one diag action
11270 			 * at one time.
11271 			 */
11272 			mutex_enter(&mpt->m_mutex);
11273 			if (mpt->m_diag_action_in_progress) {
11274 				mutex_exit(&mpt->m_mutex);
11275 				return (EBUSY);
11276 			}
11277 			mpt->m_diag_action_in_progress = 1;
11278 			status = mptsas_diag_action(mpt,
11279 			    (mptsas_diag_action_t *)data, mode);
11280 			mpt->m_diag_action_in_progress = 0;
11281 			mutex_exit(&mpt->m_mutex);
11282 			break;
11283 		case MPTIOCTL_EVENT_QUERY:
11284 			/*
11285 			 * The user has done an event query. Call our routine
11286 			 * which does this.
11287 			 */
11288 			status = mptsas_event_query(mpt,
11289 			    (mptsas_event_query_t *)data, mode, rval);
11290 			break;
11291 		case MPTIOCTL_EVENT_ENABLE:
11292 			/*
11293 			 * The user has done an event enable. Call our routine
11294 			 * which does this.
11295 			 */
11296 			status = mptsas_event_enable(mpt,
11297 			    (mptsas_event_enable_t *)data, mode, rval);
11298 			break;
11299 		case MPTIOCTL_EVENT_REPORT:
11300 			/*
11301 			 * The user has done an event report. Call our routine
11302 			 * which does this.
11303 			 */
11304 			status = mptsas_event_report(mpt,
11305 			    (mptsas_event_report_t *)data, mode, rval);
11306 			break;
11307 		case MPTIOCTL_REG_ACCESS:
11308 			/*
11309 			 * The user has requested register access.  Call our
11310 			 * routine which does this.
11311 			 */
11312 			status = mptsas_reg_access(mpt,
11313 			    (mptsas_reg_access_t *)data, mode);
11314 			break;
11315 		default:
11316 			status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
11317 			    rval);
11318 			break;
11319 	}
11320 
11321 out:
11322 	/*
11323 	 * Report idle status to pm after grace period because
11324 	 * multiple ioctls may be queued and raising power
11325 	 * for every ioctl is time consuming.  If a timeout is
11326 	 * pending for the previous ioctl, cancel the timeout and
11327 	 * report idle status to pm because calls to pm_busy_component(9F)
11328 	 * are stacked.
11329 	 */
11330 	mutex_enter(&mpt->m_mutex);
11331 	if (mpt->m_options & MPTSAS_OPT_PM) {
11332 		if (mpt->m_pm_timeid != 0) {
11333 			timeout_id_t tid = mpt->m_pm_timeid;
11334 			mpt->m_pm_timeid = 0;
11335 			mutex_exit(&mpt->m_mutex);
11336 			(void) untimeout(tid);
11337 			/*
11338 			 * Report idle status for previous ioctl since
11339 			 * calls to pm_busy_component(9F) are stacked.
11340 			 */
11341 			(void) pm_idle_component(mpt->m_dip, 0);
11342 			mutex_enter(&mpt->m_mutex);
11343 		}
11344 		mpt->m_pm_timeid = timeout(mptsas_idle_pm, mpt,
11345 		    drv_usectohz((clock_t)mpt->m_pm_idle_delay * 1000000));
11346 	}
11347 	mutex_exit(&mpt->m_mutex);
11348 
11349 	return (status);
11350 }
11351 
11352 int
11353 mptsas_restart_ioc(mptsas_t *mpt)
11354 {
11355 	int		rval = DDI_SUCCESS;
11356 	mptsas_target_t	*ptgt = NULL;
11357 
11358 	ASSERT(mutex_owned(&mpt->m_mutex));
11359 
11360 	/*
11361 	 * Set a flag telling I/O path that we're processing a reset.  This is
11362 	 * needed because after the reset is complete, the hash table still
11363 	 * needs to be rebuilt.  If I/Os are started before the hash table is
11364 	 * rebuilt, I/O errors will occur.  This flag allows I/Os to be marked
11365 	 * so that they can be retried.
11366 	 */
11367 	mpt->m_in_reset = TRUE;
11368 
11369 	/*
11370 	 * Set all throttles to HOLD
11371 	 */
11372 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
11373 	    MPTSAS_HASH_FIRST);
11374 	while (ptgt != NULL) {
11375 		mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
11376 
11377 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
11378 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
11379 	}
11380 
11381 	/*
11382 	 * Disable interrupts
11383 	 */
11384 	MPTSAS_DISABLE_INTR(mpt);
11385 
11386 	/*
11387 	 * Abort all commands: outstanding commands, commands in waitq and
11388 	 * tx_waitq.
11389 	 */
11390 	mptsas_flush_hba(mpt);
11391 
11392 	/*
11393 	 * Reinitialize the chip.
11394 	 */
11395 	if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
11396 		rval = DDI_FAILURE;
11397 	}
11398 
11399 	/*
11400 	 * Enable interrupts again
11401 	 */
11402 	MPTSAS_ENABLE_INTR(mpt);
11403 
11404 	/*
11405 	 * If mptsas_init_chip was successful, update the driver data.
11406 	 */
11407 	if (rval == DDI_SUCCESS) {
11408 		mptsas_update_driver_data(mpt);
11409 	}
11410 
11411 	/*
11412 	 * Reset the throttles
11413 	 */
11414 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
11415 	    MPTSAS_HASH_FIRST);
11416 	while (ptgt != NULL) {
11417 		mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
11418 
11419 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
11420 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
11421 	}
11422 
11423 	mptsas_doneq_empty(mpt);
11424 	mptsas_restart_hba(mpt);
11425 
11426 	if (rval != DDI_SUCCESS) {
11427 		mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
11428 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
11429 	}
11430 
11431 	/*
11432 	 * Clear the reset flag so that I/Os can continue.
11433 	 */
11434 	mpt->m_in_reset = FALSE;
11435 
11436 	return (rval);
11437 }
11438 
11439 int
11440 mptsas_init_chip(mptsas_t *mpt, int first_time)
11441 {
11442 	ddi_dma_cookie_t	cookie;
11443 	uint32_t		i;
11444 	mptsas_slots_t		*new_active;
11445 
11446 	/*
11447 	 * Check to see if the firmware image is valid
11448 	 */
11449 	if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
11450 	    MPI2_DIAG_FLASH_BAD_SIG) {
11451 		mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
11452 		goto fail;
11453 	}
11454 
11455 	/*
11456 	 * Reset the chip
11457 	 */
11458 	if (mptsas_ioc_reset(mpt) == MPTSAS_RESET_FAIL) {
11459 		mptsas_log(mpt, CE_WARN, "hard reset failed!");
11460 		goto fail;
11461 	}
11462 
11463 	if (first_time == FALSE) {
11464 		/*
11465 		 * De-allocate buffers before re-allocating them using the
11466 		 * latest IOC facts.
11467 		 */
11468 		mptsas_hba_fini(mpt);
11469 
11470 		/*
11471 		 * Setup configuration space
11472 		 */
11473 		if (mptsas_config_space_init(mpt) == FALSE) {
11474 			mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
11475 			    "failed!");
11476 			goto fail;
11477 		}
11478 	}
11479 
11480 	/*
11481 	 * IOC facts can change after a diag reset so all buffers that are
11482 	 * based on these numbers must be de-allocated and re-allocated.  Get
11483 	 * new IOC facts each time chip is initialized.
11484 	 */
11485 	if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
11486 		mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
11487 		goto fail;
11488 	}
11489 	/*
11490 	 * Re-allocate active slots here if not the first reset.  Since
11491 	 * m_active could have a different number of slots allocated after a
11492 	 * reset, just de-allocate the old m_active structure and re-allocate a
11493 	 * new one.  Save the tables and IR info from the old m_active.
11494 	 */
11495 	if (first_time == FALSE) {
11496 		new_active = kmem_zalloc(MPTSAS_SLOTS_SIZE(mpt), KM_SLEEP);
11497 		if (new_active == NULL) {
11498 			mptsas_log(mpt, CE_WARN, "Re-alloc of active slots "
11499 			    "failed!");
11500 			goto fail;
11501 		} else {
11502 			new_active->m_n_slots = (mpt->m_max_requests - 2);
11503 			new_active->m_size = MPTSAS_SLOTS_SIZE(mpt);
11504 			new_active->m_tags = 1;
11505 			new_active->m_tgttbl = mpt->m_active->m_tgttbl;
11506 			new_active->m_smptbl = mpt->m_active->m_smptbl;
11507 			new_active->m_num_raid_configs =
11508 			    mpt->m_active->m_num_raid_configs;
11509 			for (i = 0; i < new_active->m_num_raid_configs; i++) {
11510 				new_active->m_raidconfig[i] =
11511 				    mpt->m_active->m_raidconfig[i];
11512 			}
11513 			kmem_free(mpt->m_active, mpt->m_active->m_size);
11514 			mpt->m_active = new_active;
11515 		}
11516 	}
11517 
11518 	/*
11519 	 * Allocate request message frames, reply free queue, reply descriptor
11520 	 * post queue, and reply message frames using latest IOC facts.
11521 	 */
11522 	if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
11523 		mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames failed");
11524 		goto fail;
11525 	}
11526 	if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
11527 		mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue failed!");
11528 		goto fail;
11529 	}
11530 	if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
11531 		mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue failed!");
11532 		goto fail;
11533 	}
11534 	if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
11535 		mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames failed!");
11536 		goto fail;
11537 	}
11538 
11539 	/*
11540 	 * Re-Initialize ioc to operational state
11541 	 */
11542 	if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
11543 		mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
11544 		goto fail;
11545 	}
11546 
11547 	mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
11548 	    mpt->m_max_replies, KM_SLEEP);
11549 
11550 	/*
11551 	 * Initialize reply post index.  Reply free index is initialized after
11552 	 * the next loop.
11553 	 */
11554 	mpt->m_post_index = 0;
11555 
11556 	/*
11557 	 * Initialize the Reply Free Queue with the physical addresses of our
11558 	 * reply frames.
11559 	 */
11560 	cookie.dmac_address = mpt->m_reply_frame_dma_addr;
11561 	for (i = 0; i < mpt->m_max_replies; i++) {
11562 		ddi_put32(mpt->m_acc_free_queue_hdl,
11563 		    &((uint32_t *)(void *)mpt->m_free_queue)[i],
11564 		    cookie.dmac_address);
11565 		cookie.dmac_address += mpt->m_reply_frame_size;
11566 	}
11567 	(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11568 	    DDI_DMA_SYNC_FORDEV);
11569 
11570 	/*
11571 	 * Initialize the reply free index to one past the last frame on the
11572 	 * queue.  This will signify that the queue is empty to start with.
11573 	 */
11574 	mpt->m_free_index = i;
11575 	ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i);
11576 
11577 	/*
11578 	 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
11579 	 */
11580 	for (i = 0; i < mpt->m_post_queue_depth; i++) {
11581 		ddi_put64(mpt->m_acc_post_queue_hdl,
11582 		    &((uint64_t *)(void *)mpt->m_post_queue)[i],
11583 		    0xFFFFFFFFFFFFFFFF);
11584 	}
11585 	(void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
11586 	    DDI_DMA_SYNC_FORDEV);
11587 
11588 	/*
11589 	 * Enable ports
11590 	 */
11591 	if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) {
11592 		mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed");
11593 		goto fail;
11594 	}
11595 
11596 	/*
11597 	 * Fill in the phy_info structure and get the base WWID
11598 	 */
11599 
11600 	if (first_time == TRUE) {
11601 		if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) {
11602 			mptsas_log(mpt, CE_WARN,
11603 			    "mptsas_get_manufacture_page5 failed!");
11604 			goto fail;
11605 		}
11606 
11607 		if (mptsas_get_sas_io_unit_page_hndshk(mpt)) {
11608 			mptsas_log(mpt, CE_WARN,
11609 			    "mptsas_get_sas_io_unit_page_hndshk failed!");
11610 			goto fail;
11611 		}
11612 	}
11613 
11614 	/*
11615 	 * enable events
11616 	 */
11617 	if (first_time == FALSE) {
11618 		if (mptsas_ioc_enable_event_notification(mpt)) {
11619 			goto fail;
11620 		}
11621 	}
11622 
11623 	/*
11624 	 * We need checks in attach and these.
11625 	 * chip_init is called in mult. places
11626 	 */
11627 
11628 	if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
11629 	    DDI_SUCCESS) ||
11630 	    (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
11631 	    DDI_SUCCESS) ||
11632 	    (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
11633 	    DDI_SUCCESS) ||
11634 	    (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
11635 	    DDI_SUCCESS) ||
11636 	    (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
11637 	    DDI_SUCCESS)) {
11638 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11639 		goto fail;
11640 	}
11641 
11642 	/* Check all acc handles */
11643 	if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
11644 	    (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
11645 	    DDI_SUCCESS) ||
11646 	    (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
11647 	    DDI_SUCCESS) ||
11648 	    (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
11649 	    DDI_SUCCESS) ||
11650 	    (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
11651 	    DDI_SUCCESS) ||
11652 	    (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
11653 	    DDI_SUCCESS) ||
11654 	    (mptsas_check_acc_handle(mpt->m_config_handle) !=
11655 	    DDI_SUCCESS)) {
11656 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11657 		goto fail;
11658 	}
11659 
11660 	return (DDI_SUCCESS);
11661 
11662 fail:
11663 	return (DDI_FAILURE);
11664 }
11665 
11666 static int
11667 mptsas_init_pm(mptsas_t *mpt)
11668 {
11669 	char		pmc_name[16];
11670 	char		*pmc[] = {
11671 				NULL,
11672 				"0=Off (PCI D3 State)",
11673 				"3=On (PCI D0 State)",
11674 				NULL
11675 			};
11676 	uint16_t	pmcsr_stat;
11677 
11678 	/*
11679 	 * If power management is supported by this chip, create
11680 	 * pm-components property for the power management framework
11681 	 */
11682 	(void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
11683 	pmc[0] = pmc_name;
11684 	if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
11685 	    "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
11686 		mpt->m_options &= ~MPTSAS_OPT_PM;
11687 		mptsas_log(mpt, CE_WARN,
11688 		    "mptsas%d: pm-component property creation failed.",
11689 		    mpt->m_instance);
11690 		return (DDI_FAILURE);
11691 	}
11692 
11693 	/*
11694 	 * Power on device.
11695 	 */
11696 	(void) pm_busy_component(mpt->m_dip, 0);
11697 	pmcsr_stat = pci_config_get16(mpt->m_config_handle,
11698 	    mpt->m_pmcsr_offset);
11699 	if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
11700 		mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
11701 		    mpt->m_instance);
11702 		pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
11703 		    PCI_PMCSR_D0);
11704 	}
11705 	if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
11706 		mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
11707 		return (DDI_FAILURE);
11708 	}
11709 	mpt->m_power_level = PM_LEVEL_D0;
11710 	/*
11711 	 * Set pm idle delay.
11712 	 */
11713 	mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
11714 	    mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
11715 
11716 	return (DDI_SUCCESS);
11717 }
11718 
11719 /*
11720  * mptsas_add_intrs:
11721  *
11722  * Register FIXED or MSI interrupts.
11723  */
11724 static int
11725 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
11726 {
11727 	dev_info_t	*dip = mpt->m_dip;
11728 	int		avail, actual, count = 0;
11729 	int		i, flag, ret;
11730 
11731 	NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
11732 
11733 	/* Get number of interrupts */
11734 	ret = ddi_intr_get_nintrs(dip, intr_type, &count);
11735 	if ((ret != DDI_SUCCESS) || (count <= 0)) {
11736 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
11737 		    "ret %d count %d\n", ret, count);
11738 
11739 		return (DDI_FAILURE);
11740 	}
11741 
11742 	/* Get number of available interrupts */
11743 	ret = ddi_intr_get_navail(dip, intr_type, &avail);
11744 	if ((ret != DDI_SUCCESS) || (avail == 0)) {
11745 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
11746 		    "ret %d avail %d\n", ret, avail);
11747 
11748 		return (DDI_FAILURE);
11749 	}
11750 
11751 	if (avail < count) {
11752 		mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
11753 		    "navail() returned %d", count, avail);
11754 	}
11755 
11756 	/* Mpt only have one interrupt routine */
11757 	if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
11758 		count = 1;
11759 	}
11760 
11761 	/* Allocate an array of interrupt handles */
11762 	mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
11763 	mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
11764 
11765 	flag = DDI_INTR_ALLOC_NORMAL;
11766 
11767 	/* call ddi_intr_alloc() */
11768 	ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
11769 	    count, &actual, flag);
11770 
11771 	if ((ret != DDI_SUCCESS) || (actual == 0)) {
11772 		mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n",
11773 		    ret);
11774 		kmem_free(mpt->m_htable, mpt->m_intr_size);
11775 		return (DDI_FAILURE);
11776 	}
11777 
11778 	/* use interrupt count returned or abort? */
11779 	if (actual < count) {
11780 		mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n",
11781 		    count, actual);
11782 	}
11783 
11784 	mpt->m_intr_cnt = actual;
11785 
11786 	/*
11787 	 * Get priority for first msi, assume remaining are all the same
11788 	 */
11789 	if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
11790 	    &mpt->m_intr_pri)) != DDI_SUCCESS) {
11791 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret);
11792 
11793 		/* Free already allocated intr */
11794 		for (i = 0; i < actual; i++) {
11795 			(void) ddi_intr_free(mpt->m_htable[i]);
11796 		}
11797 
11798 		kmem_free(mpt->m_htable, mpt->m_intr_size);
11799 		return (DDI_FAILURE);
11800 	}
11801 
11802 	/* Test for high level mutex */
11803 	if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
11804 		mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
11805 		    "Hi level interrupt not supported\n");
11806 
11807 		/* Free already allocated intr */
11808 		for (i = 0; i < actual; i++) {
11809 			(void) ddi_intr_free(mpt->m_htable[i]);
11810 		}
11811 
11812 		kmem_free(mpt->m_htable, mpt->m_intr_size);
11813 		return (DDI_FAILURE);
11814 	}
11815 
11816 	/* Call ddi_intr_add_handler() */
11817 	for (i = 0; i < actual; i++) {
11818 		if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
11819 		    (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
11820 			mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
11821 			    "failed %d\n", ret);
11822 
11823 			/* Free already allocated intr */
11824 			for (i = 0; i < actual; i++) {
11825 				(void) ddi_intr_free(mpt->m_htable[i]);
11826 			}
11827 
11828 			kmem_free(mpt->m_htable, mpt->m_intr_size);
11829 			return (DDI_FAILURE);
11830 		}
11831 	}
11832 
11833 	if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
11834 	    != DDI_SUCCESS) {
11835 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret);
11836 
11837 		/* Free already allocated intr */
11838 		for (i = 0; i < actual; i++) {
11839 			(void) ddi_intr_free(mpt->m_htable[i]);
11840 		}
11841 
11842 		kmem_free(mpt->m_htable, mpt->m_intr_size);
11843 		return (DDI_FAILURE);
11844 	}
11845 
11846 	return (DDI_SUCCESS);
11847 }
11848 
11849 /*
11850  * mptsas_rem_intrs:
11851  *
11852  * Unregister FIXED or MSI interrupts
11853  */
11854 static void
11855 mptsas_rem_intrs(mptsas_t *mpt)
11856 {
11857 	int	i;
11858 
11859 	NDBG6(("mptsas_rem_intrs"));
11860 
11861 	/* Disable all interrupts */
11862 	if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
11863 		/* Call ddi_intr_block_disable() */
11864 		(void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt);
11865 	} else {
11866 		for (i = 0; i < mpt->m_intr_cnt; i++) {
11867 			(void) ddi_intr_disable(mpt->m_htable[i]);
11868 		}
11869 	}
11870 
11871 	/* Call ddi_intr_remove_handler() */
11872 	for (i = 0; i < mpt->m_intr_cnt; i++) {
11873 		(void) ddi_intr_remove_handler(mpt->m_htable[i]);
11874 		(void) ddi_intr_free(mpt->m_htable[i]);
11875 	}
11876 
11877 	kmem_free(mpt->m_htable, mpt->m_intr_size);
11878 }
11879 
11880 /*
11881  * The IO fault service error handling callback function
11882  */
11883 /*ARGSUSED*/
11884 static int
11885 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
11886 {
11887 	/*
11888 	 * as the driver can always deal with an error in any dma or
11889 	 * access handle, we can just return the fme_status value.
11890 	 */
11891 	pci_ereport_post(dip, err, NULL);
11892 	return (err->fme_status);
11893 }
11894 
11895 /*
11896  * mptsas_fm_init - initialize fma capabilities and register with IO
11897  *               fault services.
11898  */
11899 static void
11900 mptsas_fm_init(mptsas_t *mpt)
11901 {
11902 	/*
11903 	 * Need to change iblock to priority for new MSI intr
11904 	 */
11905 	ddi_iblock_cookie_t	fm_ibc;
11906 
11907 	/* Only register with IO Fault Services if we have some capability */
11908 	if (mpt->m_fm_capabilities) {
11909 		/* Adjust access and dma attributes for FMA */
11910 		mpt->m_reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
11911 		mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
11912 		mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
11913 
11914 		/*
11915 		 * Register capabilities with IO Fault Services.
11916 		 * mpt->m_fm_capabilities will be updated to indicate
11917 		 * capabilities actually supported (not requested.)
11918 		 */
11919 		ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc);
11920 
11921 		/*
11922 		 * Initialize pci ereport capabilities if ereport
11923 		 * capable (should always be.)
11924 		 */
11925 		if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
11926 		    DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
11927 			pci_ereport_setup(mpt->m_dip);
11928 		}
11929 
11930 		/*
11931 		 * Register error callback if error callback capable.
11932 		 */
11933 		if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
11934 			ddi_fm_handler_register(mpt->m_dip,
11935 			    mptsas_fm_error_cb, (void *) mpt);
11936 		}
11937 	}
11938 }
11939 
11940 /*
11941  * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
11942  *               fault services.
11943  *
11944  */
11945 static void
11946 mptsas_fm_fini(mptsas_t *mpt)
11947 {
11948 	/* Only unregister FMA capabilities if registered */
11949 	if (mpt->m_fm_capabilities) {
11950 
11951 		/*
11952 		 * Un-register error callback if error callback capable.
11953 		 */
11954 
11955 		if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
11956 			ddi_fm_handler_unregister(mpt->m_dip);
11957 		}
11958 
11959 		/*
11960 		 * Release any resources allocated by pci_ereport_setup()
11961 		 */
11962 
11963 		if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
11964 		    DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
11965 			pci_ereport_teardown(mpt->m_dip);
11966 		}
11967 
11968 		/* Unregister from IO Fault Services */
11969 		ddi_fm_fini(mpt->m_dip);
11970 
11971 		/* Adjust access and dma attributes for FMA */
11972 		mpt->m_reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
11973 		mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
11974 		mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
11975 
11976 	}
11977 }
11978 
11979 int
11980 mptsas_check_acc_handle(ddi_acc_handle_t handle)
11981 {
11982 	ddi_fm_error_t	de;
11983 
11984 	if (handle == NULL)
11985 		return (DDI_FAILURE);
11986 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
11987 	return (de.fme_status);
11988 }
11989 
11990 int
11991 mptsas_check_dma_handle(ddi_dma_handle_t handle)
11992 {
11993 	ddi_fm_error_t	de;
11994 
11995 	if (handle == NULL)
11996 		return (DDI_FAILURE);
11997 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
11998 	return (de.fme_status);
11999 }
12000 
12001 void
12002 mptsas_fm_ereport(mptsas_t *mpt, char *detail)
12003 {
12004 	uint64_t	ena;
12005 	char		buf[FM_MAX_CLASS];
12006 
12007 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
12008 	ena = fm_ena_generate(0, FM_ENA_FMT1);
12009 	if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) {
12010 		ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP,
12011 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
12012 	}
12013 }
12014 
12015 static int
12016 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
12017     uint16_t *dev_handle, mptsas_target_t **pptgt)
12018 {
12019 	int		rval;
12020 	uint32_t	dev_info;
12021 	uint64_t	sas_wwn;
12022 	mptsas_phymask_t phymask;
12023 	uint8_t		physport, phynum, config, disk;
12024 	mptsas_slots_t	*slots = mpt->m_active;
12025 	uint64_t	devicename;
12026 	mptsas_target_t	*tmp_tgt = NULL;
12027 	uint16_t	bay_num, enclosure;
12028 
12029 	ASSERT(*pptgt == NULL);
12030 
12031 	rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle,
12032 	    &sas_wwn, &dev_info, &physport, &phynum, &bay_num, &enclosure);
12033 	if (rval != DDI_SUCCESS) {
12034 		rval = DEV_INFO_FAIL_PAGE0;
12035 		return (rval);
12036 	}
12037 
12038 	if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
12039 	    MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12040 	    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == NULL) {
12041 		rval = DEV_INFO_WRONG_DEVICE_TYPE;
12042 		return (rval);
12043 	}
12044 
12045 	/*
12046 	 * Get SATA Device Name from SAS device page0 for
12047 	 * sata device, if device name doesn't exist, set m_sas_wwn to
12048 	 * 0 for direct attached SATA. For the device behind the expander
12049 	 * we still can use STP address assigned by expander.
12050 	 */
12051 	if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12052 	    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
12053 		mutex_exit(&mpt->m_mutex);
12054 		/* alloc a tmp_tgt to send the cmd */
12055 		tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target),
12056 		    KM_SLEEP);
12057 		tmp_tgt->m_devhdl = *dev_handle;
12058 		tmp_tgt->m_deviceinfo = dev_info;
12059 		tmp_tgt->m_qfull_retries = QFULL_RETRIES;
12060 		tmp_tgt->m_qfull_retry_interval =
12061 		    drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
12062 		tmp_tgt->m_t_throttle = MAX_THROTTLE;
12063 		devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
12064 		kmem_free(tmp_tgt, sizeof (struct mptsas_target));
12065 		mutex_enter(&mpt->m_mutex);
12066 		if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
12067 			sas_wwn = devicename;
12068 		} else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
12069 			sas_wwn = 0;
12070 		}
12071 	}
12072 
12073 	/*
12074 	 * Check if the dev handle is for a Phys Disk. If so, set return value
12075 	 * and exit.  Don't add Phys Disks to hash.
12076 	 */
12077 	for (config = 0; config < slots->m_num_raid_configs; config++) {
12078 		for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
12079 			if (*dev_handle == slots->m_raidconfig[config].
12080 			    m_physdisk_devhdl[disk]) {
12081 				rval = DEV_INFO_PHYS_DISK;
12082 				return (rval);
12083 			}
12084 		}
12085 	}
12086 
12087 	phymask = mptsas_physport_to_phymask(mpt, physport);
12088 	*pptgt = mptsas_tgt_alloc(&slots->m_tgttbl, *dev_handle, sas_wwn,
12089 	    dev_info, phymask, phynum);
12090 	if (*pptgt == NULL) {
12091 		mptsas_log(mpt, CE_WARN, "Failed to allocated target"
12092 		    "structure!");
12093 		rval = DEV_INFO_FAIL_ALLOC;
12094 		return (rval);
12095 	}
12096 	(*pptgt)->m_enclosure = enclosure;
12097 	(*pptgt)->m_slot_num = bay_num;
12098 	return (DEV_INFO_SUCCESS);
12099 }
12100 
12101 uint64_t
12102 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
12103 {
12104 	uint64_t	sata_guid = 0, *pwwn = NULL;
12105 	int		target = ptgt->m_devhdl;
12106 	uchar_t		*inq83 = NULL;
12107 	int		inq83_len = 0xFF;
12108 	uchar_t		*dblk = NULL;
12109 	int		inq83_retry = 3;
12110 	int		rval = DDI_FAILURE;
12111 
12112 	inq83	= kmem_zalloc(inq83_len, KM_SLEEP);
12113 
12114 inq83_retry:
12115 	rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
12116 	    inq83_len, NULL, 1);
12117 	if (rval != DDI_SUCCESS) {
12118 		mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
12119 		    "0x83 for target:%x, lun:%x failed!", target, lun);
12120 		goto out;
12121 	}
12122 	/* According to SAT2, the first descriptor is logic unit name */
12123 	dblk = &inq83[4];
12124 	if ((dblk[1] & 0x30) != 0) {
12125 		mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated.");
12126 		goto out;
12127 	}
12128 	pwwn = (uint64_t *)(void *)(&dblk[4]);
12129 	if ((dblk[4] & 0xf0) == 0x50) {
12130 		sata_guid = BE_64(*pwwn);
12131 		goto out;
12132 	} else if (dblk[4] == 'A') {
12133 		NDBG20(("SATA drive has no NAA format GUID."));
12134 		goto out;
12135 	} else {
12136 		/* The data is not ready, wait and retry */
12137 		inq83_retry--;
12138 		if (inq83_retry <= 0) {
12139 			goto out;
12140 		}
12141 		NDBG20(("The GUID is not ready, retry..."));
12142 		delay(1 * drv_usectohz(1000000));
12143 		goto inq83_retry;
12144 	}
12145 out:
12146 	kmem_free(inq83, inq83_len);
12147 	return (sata_guid);
12148 }
12149 
12150 static int
12151 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page,
12152     unsigned char *buf, int len, int *reallen, uchar_t evpd)
12153 {
12154 	uchar_t			cdb[CDB_GROUP0];
12155 	struct scsi_address	ap;
12156 	struct buf		*data_bp = NULL;
12157 	int			resid = 0;
12158 	int			ret = DDI_FAILURE;
12159 
12160 	ASSERT(len <= 0xffff);
12161 
12162 	ap.a_target = MPTSAS_INVALID_DEVHDL;
12163 	ap.a_lun = (uchar_t)(lun);
12164 	ap.a_hba_tran = mpt->m_tran;
12165 
12166 	data_bp = scsi_alloc_consistent_buf(&ap,
12167 	    (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL);
12168 	if (data_bp == NULL) {
12169 		return (ret);
12170 	}
12171 	bzero(cdb, CDB_GROUP0);
12172 	cdb[0] = SCMD_INQUIRY;
12173 	cdb[1] = evpd;
12174 	cdb[2] = page;
12175 	cdb[3] = (len & 0xff00) >> 8;
12176 	cdb[4] = (len & 0x00ff);
12177 	cdb[5] = 0;
12178 
12179 	ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp,
12180 	    &resid);
12181 	if (ret == DDI_SUCCESS) {
12182 		if (reallen) {
12183 			*reallen = len - resid;
12184 		}
12185 		bcopy((caddr_t)data_bp->b_un.b_addr, buf, len);
12186 	}
12187 	if (data_bp) {
12188 		scsi_free_consistent_buf(data_bp);
12189 	}
12190 	return (ret);
12191 }
12192 
12193 static int
12194 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
12195     mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
12196     int *resid)
12197 {
12198 	struct scsi_pkt		*pktp = NULL;
12199 	scsi_hba_tran_t		*tran_clone = NULL;
12200 	mptsas_tgt_private_t	*tgt_private = NULL;
12201 	int			ret = DDI_FAILURE;
12202 
12203 	/*
12204 	 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
12205 	 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
12206 	 * to simulate the cmds from sd
12207 	 */
12208 	tran_clone = kmem_alloc(
12209 	    sizeof (scsi_hba_tran_t), KM_SLEEP);
12210 	if (tran_clone == NULL) {
12211 		goto out;
12212 	}
12213 	bcopy((caddr_t)mpt->m_tran,
12214 	    (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
12215 	tgt_private = kmem_alloc(
12216 	    sizeof (mptsas_tgt_private_t), KM_SLEEP);
12217 	if (tgt_private == NULL) {
12218 		goto out;
12219 	}
12220 	tgt_private->t_lun = ap->a_lun;
12221 	tgt_private->t_private = ptgt;
12222 	tran_clone->tran_tgt_private = tgt_private;
12223 	ap->a_hba_tran = tran_clone;
12224 
12225 	pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
12226 	    data_bp, cdblen, sizeof (struct scsi_arq_status),
12227 	    0, PKT_CONSISTENT, NULL, NULL);
12228 	if (pktp == NULL) {
12229 		goto out;
12230 	}
12231 	bcopy(cdb, pktp->pkt_cdbp, cdblen);
12232 	pktp->pkt_flags = FLAG_NOPARITY;
12233 	if (scsi_poll(pktp) < 0) {
12234 		goto out;
12235 	}
12236 	if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
12237 		goto out;
12238 	}
12239 	if (resid != NULL) {
12240 		*resid = pktp->pkt_resid;
12241 	}
12242 
12243 	ret = DDI_SUCCESS;
12244 out:
12245 	if (pktp) {
12246 		scsi_destroy_pkt(pktp);
12247 	}
12248 	if (tran_clone) {
12249 		kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
12250 	}
12251 	if (tgt_private) {
12252 		kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
12253 	}
12254 	return (ret);
12255 }
12256 static int
12257 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun)
12258 {
12259 	char	*cp = NULL;
12260 	char	*ptr = NULL;
12261 	size_t	s = 0;
12262 	char	*wwid_str = NULL;
12263 	char	*lun_str = NULL;
12264 	long	lunnum;
12265 	long	phyid = -1;
12266 	int	rc = DDI_FAILURE;
12267 
12268 	ptr = name;
12269 	ASSERT(ptr[0] == 'w' || ptr[0] == 'p');
12270 	ptr++;
12271 	if ((cp = strchr(ptr, ',')) == NULL) {
12272 		return (DDI_FAILURE);
12273 	}
12274 
12275 	wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12276 	s = (uintptr_t)cp - (uintptr_t)ptr;
12277 
12278 	bcopy(ptr, wwid_str, s);
12279 	wwid_str[s] = '\0';
12280 
12281 	ptr = ++cp;
12282 
12283 	if ((cp = strchr(ptr, '\0')) == NULL) {
12284 		goto out;
12285 	}
12286 	lun_str =  kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12287 	s = (uintptr_t)cp - (uintptr_t)ptr;
12288 
12289 	bcopy(ptr, lun_str, s);
12290 	lun_str[s] = '\0';
12291 
12292 	if (name[0] == 'p') {
12293 		rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid);
12294 	} else {
12295 		rc = scsi_wwnstr_to_wwn(wwid_str, wwid);
12296 	}
12297 	if (rc != DDI_SUCCESS)
12298 		goto out;
12299 
12300 	if (phyid != -1) {
12301 		ASSERT(phyid < MPTSAS_MAX_PHYS);
12302 		*phy = (uint8_t)phyid;
12303 	}
12304 	rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum);
12305 	if (rc != 0)
12306 		goto out;
12307 
12308 	*lun = (int)lunnum;
12309 	rc = DDI_SUCCESS;
12310 out:
12311 	if (wwid_str)
12312 		kmem_free(wwid_str, SCSI_MAXNAMELEN);
12313 	if (lun_str)
12314 		kmem_free(lun_str, SCSI_MAXNAMELEN);
12315 
12316 	return (rc);
12317 }
12318 
12319 /*
12320  * mptsas_parse_smp_name() is to parse sas wwn string
12321  * which format is "wWWN"
12322  */
12323 static int
12324 mptsas_parse_smp_name(char *name, uint64_t *wwn)
12325 {
12326 	char	*ptr = name;
12327 
12328 	if (*ptr != 'w') {
12329 		return (DDI_FAILURE);
12330 	}
12331 
12332 	ptr++;
12333 	if (scsi_wwnstr_to_wwn(ptr, wwn)) {
12334 		return (DDI_FAILURE);
12335 	}
12336 	return (DDI_SUCCESS);
12337 }
12338 
12339 static int
12340 mptsas_bus_config(dev_info_t *pdip, uint_t flag,
12341     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
12342 {
12343 	int		ret = NDI_FAILURE;
12344 	int		circ = 0;
12345 	int		circ1 = 0;
12346 	mptsas_t	*mpt;
12347 	char		*ptr = NULL;
12348 	char		*devnm = NULL;
12349 	uint64_t	wwid = 0;
12350 	uint8_t		phy = 0xFF;
12351 	int		lun = 0;
12352 	uint_t		mflags = flag;
12353 
12354 	if (scsi_hba_iport_unit_address(pdip) == 0) {
12355 		return (DDI_FAILURE);
12356 	}
12357 
12358 	mpt = DIP2MPT(pdip);
12359 	if (!mpt) {
12360 		return (DDI_FAILURE);
12361 	}
12362 
12363 	/*
12364 	 * Hold the nexus across the bus_config
12365 	 */
12366 	ndi_devi_enter(scsi_vhci_dip, &circ);
12367 	ndi_devi_enter(pdip, &circ1);
12368 	switch (op) {
12369 	case BUS_CONFIG_ONE:
12370 		/* parse wwid/target name out of name given */
12371 		if ((ptr = strchr((char *)arg, '@')) == NULL) {
12372 			ret = NDI_FAILURE;
12373 			break;
12374 		}
12375 		ptr++;
12376 		if (strncmp((char *)arg, "smp", 3) == 0) {
12377 			/*
12378 			 * This is a SMP target device
12379 			 */
12380 			ret = mptsas_parse_smp_name(ptr, &wwid);
12381 			if (ret != DDI_SUCCESS) {
12382 				ret = NDI_FAILURE;
12383 				break;
12384 			}
12385 			ret = mptsas_config_smp(pdip, wwid, childp);
12386 		} else if ((ptr[0] == 'w') || (ptr[0] == 'p')) {
12387 			/*
12388 			 * OBP could pass down a non-canonical form
12389 			 * bootpath without LUN part when LUN is 0.
12390 			 * So driver need adjust the string.
12391 			 */
12392 			if (strchr(ptr, ',') == NULL) {
12393 				devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12394 				(void) sprintf(devnm, "%s,0", (char *)arg);
12395 				ptr = strchr(devnm, '@');
12396 				ptr++;
12397 			}
12398 
12399 			/*
12400 			 * The device path is wWWID format and the device
12401 			 * is not SMP target device.
12402 			 */
12403 			ret = mptsas_parse_address(ptr, &wwid, &phy, &lun);
12404 			if (ret != DDI_SUCCESS) {
12405 				ret = NDI_FAILURE;
12406 				break;
12407 			}
12408 			if (ptr[0] == 'w') {
12409 				ret = mptsas_config_one_addr(pdip, wwid,
12410 				    lun, childp);
12411 			} else if (ptr[0] == 'p') {
12412 				ret = mptsas_config_one_phy(pdip, phy, lun,
12413 				    childp);
12414 			}
12415 		} else {
12416 			ret = NDI_FAILURE;
12417 			break;
12418 		}
12419 
12420 		/*
12421 		 * DDI group instructed us to use this flag.
12422 		 */
12423 		mflags |= NDI_MDI_FALLBACK;
12424 		break;
12425 	case BUS_CONFIG_DRIVER:
12426 	case BUS_CONFIG_ALL:
12427 		mptsas_config_all(pdip);
12428 		ret = NDI_SUCCESS;
12429 		break;
12430 	}
12431 
12432 	if (ret == NDI_SUCCESS) {
12433 		ret = ndi_busop_bus_config(pdip, mflags, op,
12434 		    (devnm == NULL) ? arg : devnm, childp, 0);
12435 	}
12436 
12437 	ndi_devi_exit(pdip, circ1);
12438 	ndi_devi_exit(scsi_vhci_dip, circ);
12439 	if (devnm != NULL)
12440 		kmem_free(devnm, SCSI_MAXNAMELEN);
12441 	return (ret);
12442 }
12443 
12444 static int
12445 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
12446     mptsas_target_t *ptgt)
12447 {
12448 	int			rval = DDI_FAILURE;
12449 	struct scsi_inquiry	*sd_inq = NULL;
12450 	mptsas_t		*mpt = DIP2MPT(pdip);
12451 
12452 	sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
12453 
12454 	rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq,
12455 	    SUN_INQSIZE, 0, (uchar_t)0);
12456 
12457 	if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
12458 		rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
12459 	} else {
12460 		rval = DDI_FAILURE;
12461 	}
12462 
12463 	kmem_free(sd_inq, SUN_INQSIZE);
12464 	return (rval);
12465 }
12466 
12467 static int
12468 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
12469     dev_info_t **lundip)
12470 {
12471 	int		rval;
12472 	mptsas_t		*mpt = DIP2MPT(pdip);
12473 	int		phymask;
12474 	mptsas_target_t	*ptgt = NULL;
12475 
12476 	/*
12477 	 * Get the physical port associated to the iport
12478 	 */
12479 	phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
12480 	    "phymask", 0);
12481 
12482 	ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
12483 	if (ptgt == NULL) {
12484 		/*
12485 		 * didn't match any device by searching
12486 		 */
12487 		return (DDI_FAILURE);
12488 	}
12489 	/*
12490 	 * If the LUN already exists and the status is online,
12491 	 * we just return the pointer to dev_info_t directly.
12492 	 * For the mdi_pathinfo node, we'll handle it in
12493 	 * mptsas_create_virt_lun()
12494 	 * TODO should be also in mptsas_handle_dr
12495 	 */
12496 
12497 	*lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
12498 	if (*lundip != NULL) {
12499 		/*
12500 		 * TODO Another senario is, we hotplug the same disk
12501 		 * on the same slot, the devhdl changed, is this
12502 		 * possible?
12503 		 * tgt_private->t_private != ptgt
12504 		 */
12505 		if (sasaddr != ptgt->m_sas_wwn) {
12506 			/*
12507 			 * The device has changed although the devhdl is the
12508 			 * same (Enclosure mapping mode, change drive on the
12509 			 * same slot)
12510 			 */
12511 			return (DDI_FAILURE);
12512 		}
12513 		return (DDI_SUCCESS);
12514 	}
12515 
12516 	if (phymask == 0) {
12517 		/*
12518 		 * Configure IR volume
12519 		 */
12520 		rval =  mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
12521 		return (rval);
12522 	}
12523 	rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
12524 
12525 	return (rval);
12526 }
12527 
12528 static int
12529 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
12530     dev_info_t **lundip)
12531 {
12532 	int		rval;
12533 	mptsas_t	*mpt = DIP2MPT(pdip);
12534 	int		phymask;
12535 	mptsas_target_t	*ptgt = NULL;
12536 
12537 	/*
12538 	 * Get the physical port associated to the iport
12539 	 */
12540 	phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
12541 	    "phymask", 0);
12542 
12543 	ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
12544 	if (ptgt == NULL) {
12545 		/*
12546 		 * didn't match any device by searching
12547 		 */
12548 		return (DDI_FAILURE);
12549 	}
12550 
12551 	/*
12552 	 * If the LUN already exists and the status is online,
12553 	 * we just return the pointer to dev_info_t directly.
12554 	 * For the mdi_pathinfo node, we'll handle it in
12555 	 * mptsas_create_virt_lun().
12556 	 */
12557 
12558 	*lundip = mptsas_find_child_phy(pdip, phy);
12559 	if (*lundip != NULL) {
12560 		return (DDI_SUCCESS);
12561 	}
12562 
12563 	rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
12564 
12565 	return (rval);
12566 }
12567 
12568 static int
12569 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num,
12570     uint8_t *lun_addr_type)
12571 {
12572 	uint32_t	lun_idx = 0;
12573 
12574 	ASSERT(lun_num != NULL);
12575 	ASSERT(lun_addr_type != NULL);
12576 
12577 	lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
12578 	/* determine report luns addressing type */
12579 	switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) {
12580 		/*
12581 		 * Vendors in the field have been found to be concatenating
12582 		 * bus/target/lun to equal the complete lun value instead
12583 		 * of switching to flat space addressing
12584 		 */
12585 		/* 00b - peripheral device addressing method */
12586 	case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL:
12587 		/* FALLTHRU */
12588 		/* 10b - logical unit addressing method */
12589 	case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT:
12590 		/* FALLTHRU */
12591 		/* 01b - flat space addressing method */
12592 	case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE:
12593 		/* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
12594 		*lun_addr_type = (buf[lun_idx] &
12595 		    MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6;
12596 		*lun_num = (buf[lun_idx] & 0x3F) << 8;
12597 		*lun_num |= buf[lun_idx + 1];
12598 		return (DDI_SUCCESS);
12599 	default:
12600 		return (DDI_FAILURE);
12601 	}
12602 }
12603 
12604 static int
12605 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt)
12606 {
12607 	struct buf		*repluns_bp = NULL;
12608 	struct scsi_address	ap;
12609 	uchar_t			cdb[CDB_GROUP5];
12610 	int			ret = DDI_FAILURE;
12611 	int			retry = 0;
12612 	int			lun_list_len = 0;
12613 	uint16_t		lun_num = 0;
12614 	uint8_t			lun_addr_type = 0;
12615 	uint32_t		lun_cnt = 0;
12616 	uint32_t		lun_total = 0;
12617 	dev_info_t		*cdip = NULL;
12618 	uint16_t		*saved_repluns = NULL;
12619 	char			*buffer = NULL;
12620 	int			buf_len = 128;
12621 	mptsas_t		*mpt = DIP2MPT(pdip);
12622 	uint64_t		sas_wwn = 0;
12623 	uint8_t			phy = 0xFF;
12624 	uint32_t		dev_info = 0;
12625 
12626 	mutex_enter(&mpt->m_mutex);
12627 	sas_wwn = ptgt->m_sas_wwn;
12628 	phy = ptgt->m_phynum;
12629 	dev_info = ptgt->m_deviceinfo;
12630 	mutex_exit(&mpt->m_mutex);
12631 
12632 	if (sas_wwn == 0) {
12633 		/*
12634 		 * It's a SATA without Device Name
12635 		 * So don't try multi-LUNs
12636 		 */
12637 		if (mptsas_find_child_phy(pdip, phy)) {
12638 			return (DDI_SUCCESS);
12639 		} else {
12640 			/*
12641 			 * need configure and create node
12642 			 */
12643 			return (DDI_FAILURE);
12644 		}
12645 	}
12646 
12647 	/*
12648 	 * WWN (SAS address or Device Name exist)
12649 	 */
12650 	if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12651 	    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
12652 		/*
12653 		 * SATA device with Device Name
12654 		 * So don't try multi-LUNs
12655 		 */
12656 		if (mptsas_find_child_addr(pdip, sas_wwn, 0)) {
12657 			return (DDI_SUCCESS);
12658 		} else {
12659 			return (DDI_FAILURE);
12660 		}
12661 	}
12662 
12663 	do {
12664 		ap.a_target = MPTSAS_INVALID_DEVHDL;
12665 		ap.a_lun = 0;
12666 		ap.a_hba_tran = mpt->m_tran;
12667 		repluns_bp = scsi_alloc_consistent_buf(&ap,
12668 		    (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL);
12669 		if (repluns_bp == NULL) {
12670 			retry++;
12671 			continue;
12672 		}
12673 		bzero(cdb, CDB_GROUP5);
12674 		cdb[0] = SCMD_REPORT_LUNS;
12675 		cdb[6] = (buf_len & 0xff000000) >> 24;
12676 		cdb[7] = (buf_len & 0x00ff0000) >> 16;
12677 		cdb[8] = (buf_len & 0x0000ff00) >> 8;
12678 		cdb[9] = (buf_len & 0x000000ff);
12679 
12680 		ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5,
12681 		    repluns_bp, NULL);
12682 		if (ret != DDI_SUCCESS) {
12683 			scsi_free_consistent_buf(repluns_bp);
12684 			retry++;
12685 			continue;
12686 		}
12687 		lun_list_len = BE_32(*(int *)((void *)(
12688 		    repluns_bp->b_un.b_addr)));
12689 		if (buf_len >= lun_list_len + 8) {
12690 			ret = DDI_SUCCESS;
12691 			break;
12692 		}
12693 		scsi_free_consistent_buf(repluns_bp);
12694 		buf_len = lun_list_len + 8;
12695 
12696 	} while (retry < 3);
12697 
12698 	if (ret != DDI_SUCCESS)
12699 		return (ret);
12700 	buffer = (char *)repluns_bp->b_un.b_addr;
12701 	/*
12702 	 * find out the number of luns returned by the SCSI ReportLun call
12703 	 * and allocate buffer space
12704 	 */
12705 	lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
12706 	saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
12707 	if (saved_repluns == NULL) {
12708 		scsi_free_consistent_buf(repluns_bp);
12709 		return (DDI_FAILURE);
12710 	}
12711 	for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
12712 		if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
12713 		    &lun_num, &lun_addr_type) != DDI_SUCCESS) {
12714 			continue;
12715 		}
12716 		saved_repluns[lun_cnt] = lun_num;
12717 		if (cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num))
12718 			ret = DDI_SUCCESS;
12719 		else
12720 			ret = mptsas_probe_lun(pdip, lun_num, &cdip,
12721 			    ptgt);
12722 		if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
12723 			(void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
12724 			    MPTSAS_DEV_GONE);
12725 		}
12726 	}
12727 	mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
12728 	kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
12729 	scsi_free_consistent_buf(repluns_bp);
12730 	return (DDI_SUCCESS);
12731 }
12732 
12733 static int
12734 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
12735 {
12736 	int			rval = DDI_FAILURE;
12737 	struct scsi_inquiry	*sd_inq = NULL;
12738 	mptsas_t		*mpt = DIP2MPT(pdip);
12739 	mptsas_target_t		*ptgt = NULL;
12740 
12741 	mutex_enter(&mpt->m_mutex);
12742 	ptgt = mptsas_search_by_devhdl(&mpt->m_active->m_tgttbl, target);
12743 	mutex_exit(&mpt->m_mutex);
12744 	if (ptgt == NULL) {
12745 		mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x "
12746 		    "not found.", target);
12747 		return (rval);
12748 	}
12749 
12750 	sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
12751 	rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq,
12752 	    SUN_INQSIZE, 0, (uchar_t)0);
12753 
12754 	if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
12755 		rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt,
12756 		    0);
12757 	} else {
12758 		rval = DDI_FAILURE;
12759 	}
12760 
12761 	kmem_free(sd_inq, SUN_INQSIZE);
12762 	return (rval);
12763 }
12764 
12765 /*
12766  * configure all RAID volumes for virtual iport
12767  */
12768 static void
12769 mptsas_config_all_viport(dev_info_t *pdip)
12770 {
12771 	mptsas_t	*mpt = DIP2MPT(pdip);
12772 	int		config, vol;
12773 	int		target;
12774 	dev_info_t	*lundip = NULL;
12775 	mptsas_slots_t	*slots = mpt->m_active;
12776 
12777 	/*
12778 	 * Get latest RAID info and search for any Volume DevHandles.  If any
12779 	 * are found, configure the volume.
12780 	 */
12781 	mutex_enter(&mpt->m_mutex);
12782 	for (config = 0; config < slots->m_num_raid_configs; config++) {
12783 		for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
12784 			if (slots->m_raidconfig[config].m_raidvol[vol].m_israid
12785 			    == 1) {
12786 				target = slots->m_raidconfig[config].
12787 				    m_raidvol[vol].m_raidhandle;
12788 				mutex_exit(&mpt->m_mutex);
12789 				(void) mptsas_config_raid(pdip, target,
12790 				    &lundip);
12791 				mutex_enter(&mpt->m_mutex);
12792 			}
12793 		}
12794 	}
12795 	mutex_exit(&mpt->m_mutex);
12796 }
12797 
12798 static void
12799 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns,
12800     int lun_cnt, mptsas_target_t *ptgt)
12801 {
12802 	dev_info_t	*child = NULL, *savechild = NULL;
12803 	mdi_pathinfo_t	*pip = NULL, *savepip = NULL;
12804 	uint64_t	sas_wwn, wwid;
12805 	uint8_t		phy;
12806 	int		lun;
12807 	int		i;
12808 	int		find;
12809 	char		*addr;
12810 	char		*nodename;
12811 	mptsas_t	*mpt = DIP2MPT(pdip);
12812 
12813 	mutex_enter(&mpt->m_mutex);
12814 	wwid = ptgt->m_sas_wwn;
12815 	mutex_exit(&mpt->m_mutex);
12816 
12817 	child = ddi_get_child(pdip);
12818 	while (child) {
12819 		find = 0;
12820 		savechild = child;
12821 		child = ddi_get_next_sibling(child);
12822 
12823 		nodename = ddi_node_name(savechild);
12824 		if (strcmp(nodename, "smp") == 0) {
12825 			continue;
12826 		}
12827 
12828 		addr = ddi_get_name_addr(savechild);
12829 		if (addr == NULL) {
12830 			continue;
12831 		}
12832 
12833 		if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
12834 		    DDI_SUCCESS) {
12835 			continue;
12836 		}
12837 
12838 		if (wwid == sas_wwn) {
12839 			for (i = 0; i < lun_cnt; i++) {
12840 				if (repluns[i] == lun) {
12841 					find = 1;
12842 					break;
12843 				}
12844 			}
12845 		} else {
12846 			continue;
12847 		}
12848 		if (find == 0) {
12849 			/*
12850 			 * The lun has not been there already
12851 			 */
12852 			(void) mptsas_offline_lun(pdip, savechild, NULL,
12853 			    NDI_DEVI_REMOVE);
12854 		}
12855 	}
12856 
12857 	pip = mdi_get_next_client_path(pdip, NULL);
12858 	while (pip) {
12859 		find = 0;
12860 		savepip = pip;
12861 		addr = MDI_PI(pip)->pi_addr;
12862 
12863 		pip = mdi_get_next_client_path(pdip, pip);
12864 
12865 		if (addr == NULL) {
12866 			continue;
12867 		}
12868 
12869 		if (mptsas_parse_address(addr, &sas_wwn, &phy,
12870 		    &lun) != DDI_SUCCESS) {
12871 			continue;
12872 		}
12873 
12874 		if (sas_wwn == wwid) {
12875 			for (i = 0; i < lun_cnt; i++) {
12876 				if (repluns[i] == lun) {
12877 					find = 1;
12878 					break;
12879 				}
12880 			}
12881 		} else {
12882 			continue;
12883 		}
12884 
12885 		if (find == 0) {
12886 			/*
12887 			 * The lun has not been there already
12888 			 */
12889 			(void) mptsas_offline_lun(pdip, NULL, savepip,
12890 			    NDI_DEVI_REMOVE);
12891 		}
12892 	}
12893 }
12894 
12895 void
12896 mptsas_update_hashtab(struct mptsas *mpt)
12897 {
12898 	uint32_t	page_address;
12899 	int		rval = 0;
12900 	uint16_t	dev_handle;
12901 	mptsas_target_t	*ptgt = NULL;
12902 	mptsas_smp_t	smp_node;
12903 
12904 	/*
12905 	 * Get latest RAID info.
12906 	 */
12907 	(void) mptsas_get_raid_info(mpt);
12908 
12909 	dev_handle = mpt->m_smp_devhdl;
12910 	for (; mpt->m_done_traverse_smp == 0; ) {
12911 		page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
12912 		    MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle;
12913 		if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node)
12914 		    != DDI_SUCCESS) {
12915 			break;
12916 		}
12917 		mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl;
12918 		(void) mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node);
12919 	}
12920 
12921 	/*
12922 	 * Config target devices
12923 	 */
12924 	dev_handle = mpt->m_dev_handle;
12925 
12926 	/*
12927 	 * Do loop to get sas device page 0 by GetNextHandle till the
12928 	 * the last handle. If the sas device is a SATA/SSP target,
12929 	 * we try to config it.
12930 	 */
12931 	for (; mpt->m_done_traverse_dev == 0; ) {
12932 		ptgt = NULL;
12933 		page_address =
12934 		    (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
12935 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
12936 		    (uint32_t)dev_handle;
12937 		rval = mptsas_get_target_device_info(mpt, page_address,
12938 		    &dev_handle, &ptgt);
12939 		if ((rval == DEV_INFO_FAIL_PAGE0) ||
12940 		    (rval == DEV_INFO_FAIL_ALLOC)) {
12941 			break;
12942 		}
12943 
12944 		mpt->m_dev_handle = dev_handle;
12945 	}
12946 
12947 }
12948 
12949 void
12950 mptsas_invalid_hashtab(mptsas_hash_table_t *hashtab)
12951 {
12952 	mptsas_hash_data_t *data;
12953 	data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
12954 	while (data != NULL) {
12955 		data->devhdl = MPTSAS_INVALID_DEVHDL;
12956 		data->device_info = 0;
12957 		/*
12958 		 * For tgttbl, clear dr_flag.
12959 		 */
12960 		data->dr_flag = MPTSAS_DR_INACTIVE;
12961 		data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
12962 	}
12963 }
12964 
12965 void
12966 mptsas_update_driver_data(struct mptsas *mpt)
12967 {
12968 	/*
12969 	 * TODO after hard reset, update the driver data structures
12970 	 * 1. update port/phymask mapping table mpt->m_phy_info
12971 	 * 2. invalid all the entries in hash table
12972 	 *    m_devhdl = 0xffff and m_deviceinfo = 0
12973 	 * 3. call sas_device_page/expander_page to update hash table
12974 	 */
12975 	mptsas_update_phymask(mpt);
12976 	/*
12977 	 * Invalid the existing entries
12978 	 */
12979 	mptsas_invalid_hashtab(&mpt->m_active->m_tgttbl);
12980 	mptsas_invalid_hashtab(&mpt->m_active->m_smptbl);
12981 	mpt->m_done_traverse_dev = 0;
12982 	mpt->m_done_traverse_smp = 0;
12983 	mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
12984 	mptsas_update_hashtab(mpt);
12985 }
12986 
12987 static void
12988 mptsas_config_all(dev_info_t *pdip)
12989 {
12990 	dev_info_t	*smpdip = NULL;
12991 	mptsas_t	*mpt = DIP2MPT(pdip);
12992 	int		phymask = 0;
12993 	mptsas_phymask_t phy_mask;
12994 	mptsas_target_t	*ptgt = NULL;
12995 	mptsas_smp_t	*psmp;
12996 
12997 	/*
12998 	 * Get the phymask associated to the iport
12999 	 */
13000 	phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13001 	    "phymask", 0);
13002 
13003 	/*
13004 	 * Enumerate RAID volumes here (phymask == 0).
13005 	 */
13006 	if (phymask == 0) {
13007 		mptsas_config_all_viport(pdip);
13008 		return;
13009 	}
13010 
13011 	mutex_enter(&mpt->m_mutex);
13012 
13013 	if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp) {
13014 		mptsas_update_hashtab(mpt);
13015 	}
13016 
13017 	psmp = (mptsas_smp_t *)mptsas_hash_traverse(&mpt->m_active->m_smptbl,
13018 	    MPTSAS_HASH_FIRST);
13019 	while (psmp != NULL) {
13020 		phy_mask = psmp->m_phymask;
13021 		if (phy_mask == phymask) {
13022 			smpdip = NULL;
13023 			mutex_exit(&mpt->m_mutex);
13024 			(void) mptsas_online_smp(pdip, psmp, &smpdip);
13025 			mutex_enter(&mpt->m_mutex);
13026 		}
13027 		psmp = (mptsas_smp_t *)mptsas_hash_traverse(
13028 		    &mpt->m_active->m_smptbl, MPTSAS_HASH_NEXT);
13029 	}
13030 
13031 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
13032 	    MPTSAS_HASH_FIRST);
13033 	while (ptgt != NULL) {
13034 		phy_mask = ptgt->m_phymask;
13035 		if (phy_mask == phymask) {
13036 			mutex_exit(&mpt->m_mutex);
13037 			(void) mptsas_config_target(pdip, ptgt);
13038 			mutex_enter(&mpt->m_mutex);
13039 		}
13040 
13041 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
13042 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
13043 	}
13044 	mutex_exit(&mpt->m_mutex);
13045 }
13046 
13047 static int
13048 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt)
13049 {
13050 	int		rval = DDI_FAILURE;
13051 	dev_info_t	*tdip;
13052 
13053 	rval = mptsas_config_luns(pdip, ptgt);
13054 	if (rval != DDI_SUCCESS) {
13055 		/*
13056 		 * The return value means the SCMD_REPORT_LUNS
13057 		 * did not execute successfully. The target maybe
13058 		 * doesn't support such command.
13059 		 */
13060 		rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt);
13061 	}
13062 	return (rval);
13063 }
13064 
13065 /*
13066  * Return fail if not all the childs/paths are freed.
13067  * if there is any path under the HBA, the return value will be always fail
13068  * because we didn't call mdi_pi_free for path
13069  */
13070 static int
13071 mptsas_offline_target(dev_info_t *pdip, char *name)
13072 {
13073 	dev_info_t		*child = NULL, *prechild = NULL;
13074 	mdi_pathinfo_t		*pip = NULL, *savepip = NULL;
13075 	int			tmp_rval, rval = DDI_SUCCESS;
13076 	char			*addr, *cp;
13077 	size_t			s;
13078 	mptsas_t		*mpt = DIP2MPT(pdip);
13079 
13080 	child = ddi_get_child(pdip);
13081 	while (child) {
13082 		addr = ddi_get_name_addr(child);
13083 		prechild = child;
13084 		child = ddi_get_next_sibling(child);
13085 
13086 		if (addr == NULL) {
13087 			continue;
13088 		}
13089 		if ((cp = strchr(addr, ',')) == NULL) {
13090 			continue;
13091 		}
13092 
13093 		s = (uintptr_t)cp - (uintptr_t)addr;
13094 
13095 		if (strncmp(addr, name, s) != 0) {
13096 			continue;
13097 		}
13098 
13099 		tmp_rval = mptsas_offline_lun(pdip, prechild, NULL,
13100 		    NDI_DEVI_REMOVE);
13101 		if (tmp_rval != DDI_SUCCESS) {
13102 			rval = DDI_FAILURE;
13103 			if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
13104 			    prechild, MPTSAS_DEV_GONE) !=
13105 			    DDI_PROP_SUCCESS) {
13106 				mptsas_log(mpt, CE_WARN, "mptsas driver "
13107 				    "unable to create property for "
13108 				    "SAS %s (MPTSAS_DEV_GONE)", addr);
13109 			}
13110 		}
13111 	}
13112 
13113 	pip = mdi_get_next_client_path(pdip, NULL);
13114 	while (pip) {
13115 		addr = MDI_PI(pip)->pi_addr;
13116 		savepip = pip;
13117 		pip = mdi_get_next_client_path(pdip, pip);
13118 		if (addr == NULL) {
13119 			continue;
13120 		}
13121 
13122 		if ((cp = strchr(addr, ',')) == NULL) {
13123 			continue;
13124 		}
13125 
13126 		s = (uintptr_t)cp - (uintptr_t)addr;
13127 
13128 		if (strncmp(addr, name, s) != 0) {
13129 			continue;
13130 		}
13131 
13132 		(void) mptsas_offline_lun(pdip, NULL, savepip,
13133 		    NDI_DEVI_REMOVE);
13134 		/*
13135 		 * driver will not invoke mdi_pi_free, so path will not
13136 		 * be freed forever, return DDI_FAILURE.
13137 		 */
13138 		rval = DDI_FAILURE;
13139 	}
13140 	return (rval);
13141 }
13142 
13143 static int
13144 mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
13145     mdi_pathinfo_t *rpip, uint_t flags)
13146 {
13147 	int		rval = DDI_FAILURE;
13148 	char		*devname;
13149 	dev_info_t	*cdip, *parent;
13150 
13151 	if (rpip != NULL) {
13152 		parent = scsi_vhci_dip;
13153 		cdip = mdi_pi_get_client(rpip);
13154 	} else if (rdip != NULL) {
13155 		parent = pdip;
13156 		cdip = rdip;
13157 	} else {
13158 		return (DDI_FAILURE);
13159 	}
13160 
13161 	/*
13162 	 * Make sure node is attached otherwise
13163 	 * it won't have related cache nodes to
13164 	 * clean up.  i_ddi_devi_attached is
13165 	 * similiar to i_ddi_node_state(cdip) >=
13166 	 * DS_ATTACHED.
13167 	 */
13168 	if (i_ddi_devi_attached(cdip)) {
13169 
13170 		/* Get full devname */
13171 		devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
13172 		(void) ddi_deviname(cdip, devname);
13173 		/* Clean cache */
13174 		(void) devfs_clean(parent, devname + 1,
13175 		    DV_CLEAN_FORCE);
13176 		kmem_free(devname, MAXNAMELEN + 1);
13177 	}
13178 	if (rpip != NULL) {
13179 		if (MDI_PI_IS_OFFLINE(rpip)) {
13180 			rval = DDI_SUCCESS;
13181 		} else {
13182 			rval = mdi_pi_offline(rpip, 0);
13183 		}
13184 	} else {
13185 		rval = ndi_devi_offline(cdip, flags);
13186 	}
13187 
13188 	return (rval);
13189 }
13190 
13191 static dev_info_t *
13192 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
13193 {
13194 	dev_info_t	*child = NULL;
13195 	char		*smp_wwn = NULL;
13196 
13197 	child = ddi_get_child(parent);
13198 	while (child) {
13199 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
13200 		    DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
13201 		    != DDI_SUCCESS) {
13202 			child = ddi_get_next_sibling(child);
13203 			continue;
13204 		}
13205 
13206 		if (strcmp(smp_wwn, str_wwn) == 0) {
13207 			ddi_prop_free(smp_wwn);
13208 			break;
13209 		}
13210 		child = ddi_get_next_sibling(child);
13211 		ddi_prop_free(smp_wwn);
13212 	}
13213 	return (child);
13214 }
13215 
13216 static int
13217 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, uint_t flags)
13218 {
13219 	int		rval = DDI_FAILURE;
13220 	char		*devname;
13221 	char		wwn_str[MPTSAS_WWN_STRLEN];
13222 	dev_info_t	*cdip;
13223 
13224 	(void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr);
13225 
13226 	cdip = mptsas_find_smp_child(pdip, wwn_str);
13227 
13228 	if (cdip == NULL)
13229 		return (DDI_SUCCESS);
13230 
13231 	/*
13232 	 * Make sure node is attached otherwise
13233 	 * it won't have related cache nodes to
13234 	 * clean up.  i_ddi_devi_attached is
13235 	 * similiar to i_ddi_node_state(cdip) >=
13236 	 * DS_ATTACHED.
13237 	 */
13238 	if (i_ddi_devi_attached(cdip)) {
13239 
13240 		/* Get full devname */
13241 		devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
13242 		(void) ddi_deviname(cdip, devname);
13243 		/* Clean cache */
13244 		(void) devfs_clean(pdip, devname + 1,
13245 		    DV_CLEAN_FORCE);
13246 		kmem_free(devname, MAXNAMELEN + 1);
13247 	}
13248 
13249 	rval = ndi_devi_offline(cdip, flags);
13250 
13251 	return (rval);
13252 }
13253 
13254 static dev_info_t *
13255 mptsas_find_child(dev_info_t *pdip, char *name)
13256 {
13257 	dev_info_t	*child = NULL;
13258 	char		*rname = NULL;
13259 	int		rval = DDI_FAILURE;
13260 
13261 	rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13262 
13263 	child = ddi_get_child(pdip);
13264 	while (child) {
13265 		rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
13266 		if (rval != DDI_SUCCESS) {
13267 			child = ddi_get_next_sibling(child);
13268 			bzero(rname, SCSI_MAXNAMELEN);
13269 			continue;
13270 		}
13271 
13272 		if (strcmp(rname, name) == 0) {
13273 			break;
13274 		}
13275 		child = ddi_get_next_sibling(child);
13276 		bzero(rname, SCSI_MAXNAMELEN);
13277 	}
13278 
13279 	kmem_free(rname, SCSI_MAXNAMELEN);
13280 
13281 	return (child);
13282 }
13283 
13284 
13285 static dev_info_t *
13286 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun)
13287 {
13288 	dev_info_t	*child = NULL;
13289 	char		*name = NULL;
13290 	char		*addr = NULL;
13291 
13292 	name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13293 	addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13294 	(void) sprintf(name, "%016"PRIx64, sasaddr);
13295 	(void) sprintf(addr, "w%s,%x", name, lun);
13296 	child = mptsas_find_child(pdip, addr);
13297 	kmem_free(name, SCSI_MAXNAMELEN);
13298 	kmem_free(addr, SCSI_MAXNAMELEN);
13299 	return (child);
13300 }
13301 
13302 static dev_info_t *
13303 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy)
13304 {
13305 	dev_info_t	*child;
13306 	char		*addr;
13307 
13308 	addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13309 	(void) sprintf(addr, "p%x,0", phy);
13310 	child = mptsas_find_child(pdip, addr);
13311 	kmem_free(addr, SCSI_MAXNAMELEN);
13312 	return (child);
13313 }
13314 
13315 static mdi_pathinfo_t *
13316 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy)
13317 {
13318 	mdi_pathinfo_t	*path;
13319 	char		*addr = NULL;
13320 
13321 	addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13322 	(void) sprintf(addr, "p%x,0", phy);
13323 	path = mdi_pi_find(pdip, NULL, addr);
13324 	kmem_free(addr, SCSI_MAXNAMELEN);
13325 	return (path);
13326 }
13327 
13328 static mdi_pathinfo_t *
13329 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun)
13330 {
13331 	mdi_pathinfo_t	*path;
13332 	char		*name = NULL;
13333 	char		*addr = NULL;
13334 
13335 	name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13336 	addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13337 	(void) sprintf(name, "%016"PRIx64, sasaddr);
13338 	(void) sprintf(addr, "w%s,%x", name, lun);
13339 	path = mdi_pi_find(parent, NULL, addr);
13340 	kmem_free(name, SCSI_MAXNAMELEN);
13341 	kmem_free(addr, SCSI_MAXNAMELEN);
13342 
13343 	return (path);
13344 }
13345 
13346 static int
13347 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
13348     dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
13349 {
13350 	int			i = 0;
13351 	uchar_t			*inq83 = NULL;
13352 	int			inq83_len1 = 0xFF;
13353 	int			inq83_len = 0;
13354 	int			rval = DDI_FAILURE;
13355 	ddi_devid_t		devid;
13356 	char			*guid = NULL;
13357 	int			target = ptgt->m_devhdl;
13358 	mdi_pathinfo_t		*pip = NULL;
13359 	mptsas_t		*mpt = DIP2MPT(pdip);
13360 
13361 	/*
13362 	 * For DVD/CD ROM and tape devices and optical
13363 	 * devices, we won't try to enumerate them under
13364 	 * scsi_vhci, so no need to try page83
13365 	 */
13366 	if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
13367 	    sd_inq->inq_dtype == DTYPE_OPTICAL ||
13368 	    sd_inq->inq_dtype == DTYPE_ESI))
13369 		goto create_lun;
13370 
13371 	/*
13372 	 * The LCA returns good SCSI status, but corrupt page 83 data the first
13373 	 * time it is queried. The solution is to keep trying to request page83
13374 	 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
13375 	 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
13376 	 * give up to get VPD page at this stage and fail the enumeration.
13377 	 */
13378 
13379 	inq83	= kmem_zalloc(inq83_len1, KM_SLEEP);
13380 
13381 	for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
13382 		rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
13383 		    inq83_len1, &inq83_len, 1);
13384 		if (rval != 0) {
13385 			mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
13386 			    "0x83 for target:%x, lun:%x failed!", target, lun);
13387 			goto out;
13388 		}
13389 		/*
13390 		 * create DEVID from inquiry data
13391 		 */
13392 		if ((rval = ddi_devid_scsi_encode(
13393 		    DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
13394 		    sizeof (struct scsi_inquiry), NULL, 0, inq83,
13395 		    (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
13396 			/*
13397 			 * extract GUID from DEVID
13398 			 */
13399 			guid = ddi_devid_to_guid(devid);
13400 
13401 			/*
13402 			 * Do not enable MPXIO if the strlen(guid) is greater
13403 			 * than MPTSAS_MAX_GUID_LEN, this constrain would be
13404 			 * handled by framework later.
13405 			 */
13406 			if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
13407 				ddi_devid_free_guid(guid);
13408 				guid = NULL;
13409 				if (mpt->m_mpxio_enable == TRUE) {
13410 					mptsas_log(mpt, CE_NOTE, "!Target:%x, "
13411 					    "lun:%x doesn't have a valid GUID, "
13412 					    "multipathing for this drive is "
13413 					    "not enabled", target, lun);
13414 				}
13415 			}
13416 
13417 			/*
13418 			 * devid no longer needed
13419 			 */
13420 			ddi_devid_free(devid);
13421 			break;
13422 		} else if (rval == DDI_NOT_WELL_FORMED) {
13423 			/*
13424 			 * return value of ddi_devid_scsi_encode equal to
13425 			 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
13426 			 * to retry inquiry page 0x83 and get GUID.
13427 			 */
13428 			NDBG20(("Not well formed devid, retry..."));
13429 			delay(1 * drv_usectohz(1000000));
13430 			continue;
13431 		} else {
13432 			mptsas_log(mpt, CE_WARN, "!Encode devid failed for "
13433 			    "path target:%x, lun:%x", target, lun);
13434 			rval = DDI_FAILURE;
13435 			goto create_lun;
13436 		}
13437 	}
13438 
13439 	if (i == mptsas_inq83_retry_timeout) {
13440 		mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout "
13441 		    "for path target:%x, lun:%x", target, lun);
13442 	}
13443 
13444 	rval = DDI_FAILURE;
13445 
13446 create_lun:
13447 	if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
13448 		rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
13449 		    ptgt, lun);
13450 	}
13451 	if (rval != DDI_SUCCESS) {
13452 		rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
13453 		    ptgt, lun);
13454 	}
13455 out:
13456 	if (guid != NULL) {
13457 		/*
13458 		 * guid no longer needed
13459 		 */
13460 		ddi_devid_free_guid(guid);
13461 	}
13462 	if (inq83 != NULL)
13463 		kmem_free(inq83, inq83_len1);
13464 	return (rval);
13465 }
13466 
13467 static int
13468 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
13469     dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun)
13470 {
13471 	int			target;
13472 	char			*nodename = NULL;
13473 	char			**compatible = NULL;
13474 	int			ncompatible	= 0;
13475 	int			mdi_rtn = MDI_FAILURE;
13476 	int			rval = DDI_FAILURE;
13477 	char			*old_guid = NULL;
13478 	mptsas_t		*mpt = DIP2MPT(pdip);
13479 	char			*lun_addr = NULL;
13480 	char			*wwn_str = NULL;
13481 	char			*component = NULL;
13482 	uint8_t			phy = 0xFF;
13483 	uint64_t		sas_wwn;
13484 	uint32_t		devinfo;
13485 
13486 	mutex_enter(&mpt->m_mutex);
13487 	target = ptgt->m_devhdl;
13488 	sas_wwn = ptgt->m_sas_wwn;
13489 	devinfo = ptgt->m_deviceinfo;
13490 	phy = ptgt->m_phynum;
13491 	mutex_exit(&mpt->m_mutex);
13492 
13493 	if (sas_wwn) {
13494 		*pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
13495 	} else {
13496 		*pip = mptsas_find_path_phy(pdip, phy);
13497 	}
13498 
13499 	if (*pip != NULL) {
13500 		*lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
13501 		ASSERT(*lun_dip != NULL);
13502 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
13503 		    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
13504 		    MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
13505 			if (strncmp(guid, old_guid, strlen(guid)) == 0) {
13506 				/*
13507 				 * Same path back online again.
13508 				 */
13509 				(void) ddi_prop_free(old_guid);
13510 				if ((!MDI_PI_IS_ONLINE(*pip)) &&
13511 				    (!MDI_PI_IS_STANDBY(*pip)) &&
13512 				    (ptgt->m_tgt_unconfigured == 0)) {
13513 					rval = mdi_pi_online(*pip, 0);
13514 					mutex_enter(&mpt->m_mutex);
13515 					(void) mptsas_set_led_status(mpt, ptgt,
13516 					    0);
13517 					mutex_exit(&mpt->m_mutex);
13518 				} else {
13519 					rval = DDI_SUCCESS;
13520 				}
13521 				if (rval != DDI_SUCCESS) {
13522 					mptsas_log(mpt, CE_WARN, "path:target: "
13523 					    "%x, lun:%x online failed!", target,
13524 					    lun);
13525 					*pip = NULL;
13526 					*lun_dip = NULL;
13527 				}
13528 				return (rval);
13529 			} else {
13530 				/*
13531 				 * The GUID of the LUN has changed which maybe
13532 				 * because customer mapped another volume to the
13533 				 * same LUN.
13534 				 */
13535 				mptsas_log(mpt, CE_WARN, "The GUID of the "
13536 				    "target:%x, lun:%x was changed, maybe "
13537 				    "because someone mapped another volume "
13538 				    "to the same LUN", target, lun);
13539 				(void) ddi_prop_free(old_guid);
13540 				if (!MDI_PI_IS_OFFLINE(*pip)) {
13541 					rval = mdi_pi_offline(*pip, 0);
13542 					if (rval != MDI_SUCCESS) {
13543 						mptsas_log(mpt, CE_WARN, "path:"
13544 						    "target:%x, lun:%x offline "
13545 						    "failed!", target, lun);
13546 						*pip = NULL;
13547 						*lun_dip = NULL;
13548 						return (DDI_FAILURE);
13549 					}
13550 				}
13551 				if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) {
13552 					mptsas_log(mpt, CE_WARN, "path:target:"
13553 					    "%x, lun:%x free failed!", target,
13554 					    lun);
13555 					*pip = NULL;
13556 					*lun_dip = NULL;
13557 					return (DDI_FAILURE);
13558 				}
13559 			}
13560 		} else {
13561 			mptsas_log(mpt, CE_WARN, "Can't get client-guid "
13562 			    "property for path:target:%x, lun:%x", target, lun);
13563 			*pip = NULL;
13564 			*lun_dip = NULL;
13565 			return (DDI_FAILURE);
13566 		}
13567 	}
13568 	scsi_hba_nodename_compatible_get(inq, NULL,
13569 	    inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
13570 
13571 	/*
13572 	 * if nodename can't be determined then print a message and skip it
13573 	 */
13574 	if (nodename == NULL) {
13575 		mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible "
13576 		    "driver for target%d lun %d dtype:0x%02x", target, lun,
13577 		    inq->inq_dtype);
13578 		return (DDI_FAILURE);
13579 	}
13580 
13581 	wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
13582 	/* The property is needed by MPAPI */
13583 	(void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
13584 
13585 	lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13586 	if (sas_wwn)
13587 		(void) sprintf(lun_addr, "w%s,%x", wwn_str, lun);
13588 	else
13589 		(void) sprintf(lun_addr, "p%x,%x", phy, lun);
13590 
13591 	mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename,
13592 	    guid, lun_addr, compatible, ncompatible,
13593 	    0, pip);
13594 	if (mdi_rtn == MDI_SUCCESS) {
13595 
13596 		if (mdi_prop_update_string(*pip, MDI_GUID,
13597 		    guid) != DDI_SUCCESS) {
13598 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13599 			    "create property for target %d lun %d (MDI_GUID)",
13600 			    target, lun);
13601 			mdi_rtn = MDI_FAILURE;
13602 			goto virt_create_done;
13603 		}
13604 
13605 		if (mdi_prop_update_int(*pip, LUN_PROP,
13606 		    lun) != DDI_SUCCESS) {
13607 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13608 			    "create property for target %d lun %d (LUN_PROP)",
13609 			    target, lun);
13610 			mdi_rtn = MDI_FAILURE;
13611 			goto virt_create_done;
13612 		}
13613 		if (mdi_prop_update_string_array(*pip, "compatible",
13614 		    compatible, ncompatible) !=
13615 		    DDI_PROP_SUCCESS) {
13616 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13617 			    "create property for target %d lun %d (COMPATIBLE)",
13618 			    target, lun);
13619 			mdi_rtn = MDI_FAILURE;
13620 			goto virt_create_done;
13621 		}
13622 		if (sas_wwn && (mdi_prop_update_string(*pip,
13623 		    SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) {
13624 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13625 			    "create property for target %d lun %d "
13626 			    "(target-port)", target, lun);
13627 			mdi_rtn = MDI_FAILURE;
13628 			goto virt_create_done;
13629 		} else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip,
13630 		    "sata-phy", phy) != DDI_PROP_SUCCESS)) {
13631 			/*
13632 			 * Direct attached SATA device without DeviceName
13633 			 */
13634 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13635 			    "create property for SAS target %d lun %d "
13636 			    "(sata-phy)", target, lun);
13637 			mdi_rtn = NDI_FAILURE;
13638 			goto virt_create_done;
13639 		}
13640 
13641 		if (inq->inq_dtype == 0) {
13642 			component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
13643 			/*
13644 			 * set obp path for pathinfo
13645 			 */
13646 			(void) snprintf(component, MAXPATHLEN,
13647 			    "disk@%s", lun_addr);
13648 
13649 			if (mdi_pi_pathname_obp_set(*pip, component) !=
13650 			    DDI_SUCCESS) {
13651 				mptsas_log(mpt, CE_WARN, "mpt_sas driver "
13652 				    "unable to set obp-path for object %s",
13653 				    component);
13654 				mdi_rtn = MDI_FAILURE;
13655 				goto virt_create_done;
13656 			}
13657 		}
13658 
13659 		*lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
13660 		if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13661 		    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
13662 			if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip,
13663 			    "pm-capable", 1)) !=
13664 			    DDI_PROP_SUCCESS) {
13665 				mptsas_log(mpt, CE_WARN, "mptsas driver"
13666 				    "failed to create pm-capable "
13667 				    "property, target %d", target);
13668 				mdi_rtn = MDI_FAILURE;
13669 				goto virt_create_done;
13670 			}
13671 		}
13672 		NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
13673 		mdi_rtn = mdi_pi_online(*pip, 0);
13674 		if (mdi_rtn == MDI_SUCCESS) {
13675 			mutex_enter(&mpt->m_mutex);
13676 			if (mptsas_set_led_status(mpt, ptgt, 0) !=
13677 			    DDI_SUCCESS) {
13678 				NDBG14(("mptsas: clear LED for slot %x "
13679 				    "failed", ptgt->m_slot_num));
13680 			}
13681 			mutex_exit(&mpt->m_mutex);
13682 		}
13683 		if (mdi_rtn == MDI_NOT_SUPPORTED) {
13684 			mdi_rtn = MDI_FAILURE;
13685 		}
13686 virt_create_done:
13687 		if (*pip && mdi_rtn != MDI_SUCCESS) {
13688 			(void) mdi_pi_free(*pip, 0);
13689 			*pip = NULL;
13690 			*lun_dip = NULL;
13691 		}
13692 	}
13693 
13694 	scsi_hba_nodename_compatible_free(nodename, compatible);
13695 	if (lun_addr != NULL) {
13696 		kmem_free(lun_addr, SCSI_MAXNAMELEN);
13697 	}
13698 	if (wwn_str != NULL) {
13699 		kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
13700 	}
13701 	if (component != NULL) {
13702 		kmem_free(component, MAXPATHLEN);
13703 	}
13704 
13705 	return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
13706 }
13707 
13708 static int
13709 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
13710     char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
13711 {
13712 	int			target;
13713 	int			ndi_rtn = NDI_FAILURE;
13714 	uint64_t		be_sas_wwn;
13715 	char			*nodename = NULL;
13716 	char			**compatible = NULL;
13717 	int			ncompatible = 0;
13718 	int			instance = 0;
13719 	mptsas_t		*mpt = DIP2MPT(pdip);
13720 	char			*wwn_str = NULL;
13721 	char			*component = NULL;
13722 	uint8_t			phy = 0xFF;
13723 	uint64_t		sas_wwn;
13724 	uint32_t		devinfo;
13725 
13726 	mutex_enter(&mpt->m_mutex);
13727 	target = ptgt->m_devhdl;
13728 	sas_wwn = ptgt->m_sas_wwn;
13729 	devinfo = ptgt->m_deviceinfo;
13730 	phy = ptgt->m_phynum;
13731 	mutex_exit(&mpt->m_mutex);
13732 
13733 	/*
13734 	 * generate compatible property with binding-set "mpt"
13735 	 */
13736 	scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
13737 	    &nodename, &compatible, &ncompatible);
13738 
13739 	/*
13740 	 * if nodename can't be determined then print a message and skip it
13741 	 */
13742 	if (nodename == NULL) {
13743 		mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver "
13744 		    "for target %d lun %d", target, lun);
13745 		return (DDI_FAILURE);
13746 	}
13747 
13748 	ndi_rtn = ndi_devi_alloc(pdip, nodename,
13749 	    DEVI_SID_NODEID, lun_dip);
13750 
13751 	/*
13752 	 * if lun alloc success, set props
13753 	 */
13754 	if (ndi_rtn == NDI_SUCCESS) {
13755 
13756 		if (ndi_prop_update_int(DDI_DEV_T_NONE,
13757 		    *lun_dip, LUN_PROP, lun) !=
13758 		    DDI_PROP_SUCCESS) {
13759 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
13760 			    "property for target %d lun %d (LUN_PROP)",
13761 			    target, lun);
13762 			ndi_rtn = NDI_FAILURE;
13763 			goto phys_create_done;
13764 		}
13765 
13766 		if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
13767 		    *lun_dip, "compatible", compatible, ncompatible)
13768 		    != DDI_PROP_SUCCESS) {
13769 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
13770 			    "property for target %d lun %d (COMPATIBLE)",
13771 			    target, lun);
13772 			ndi_rtn = NDI_FAILURE;
13773 			goto phys_create_done;
13774 		}
13775 
13776 		/*
13777 		 * We need the SAS WWN for non-multipath devices, so
13778 		 * we'll use the same property as that multipathing
13779 		 * devices need to present for MPAPI. If we don't have
13780 		 * a WWN (e.g. parallel SCSI), don't create the prop.
13781 		 */
13782 		wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
13783 		(void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
13784 		if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
13785 		    *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str)
13786 		    != DDI_PROP_SUCCESS) {
13787 			mptsas_log(mpt, CE_WARN, "mptsas unable to "
13788 			    "create property for SAS target %d lun %d "
13789 			    "(target-port)", target, lun);
13790 			ndi_rtn = NDI_FAILURE;
13791 			goto phys_create_done;
13792 		}
13793 		be_sas_wwn = BE_64(sas_wwn);
13794 		if (sas_wwn && ndi_prop_update_byte_array(
13795 		    DDI_DEV_T_NONE, *lun_dip, "port-wwn",
13796 		    (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) {
13797 			mptsas_log(mpt, CE_WARN, "mptsas unable to "
13798 			    "create property for SAS target %d lun %d "
13799 			    "(port-wwn)", target, lun);
13800 			ndi_rtn = NDI_FAILURE;
13801 			goto phys_create_done;
13802 		} else if ((sas_wwn == 0) && (ndi_prop_update_int(
13803 		    DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) !=
13804 		    DDI_PROP_SUCCESS)) {
13805 			/*
13806 			 * Direct attached SATA device without DeviceName
13807 			 */
13808 			mptsas_log(mpt, CE_WARN, "mptsas unable to "
13809 			    "create property for SAS target %d lun %d "
13810 			    "(sata-phy)", target, lun);
13811 			ndi_rtn = NDI_FAILURE;
13812 			goto phys_create_done;
13813 		}
13814 		if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
13815 		    *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) {
13816 			mptsas_log(mpt, CE_WARN, "mptsas unable to"
13817 			    "create property for SAS target %d lun %d"
13818 			    " (SAS_PROP)", target, lun);
13819 			ndi_rtn = NDI_FAILURE;
13820 			goto phys_create_done;
13821 		}
13822 		if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE,
13823 		    *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) {
13824 			mptsas_log(mpt, CE_WARN, "mptsas unable "
13825 			    "to create guid property for target %d "
13826 			    "lun %d", target, lun);
13827 			ndi_rtn = NDI_FAILURE;
13828 			goto phys_create_done;
13829 		}
13830 
13831 		/*
13832 		 * if this is a SAS controller, and the target is a SATA
13833 		 * drive, set the 'pm-capable' property for sd and if on
13834 		 * an OPL platform, also check if this is an ATAPI
13835 		 * device.
13836 		 */
13837 		instance = ddi_get_instance(mpt->m_dip);
13838 		if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13839 		    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
13840 			NDBG2(("mptsas%d: creating pm-capable property, "
13841 			    "target %d", instance, target));
13842 
13843 			if ((ndi_prop_update_int(DDI_DEV_T_NONE,
13844 			    *lun_dip, "pm-capable", 1)) !=
13845 			    DDI_PROP_SUCCESS) {
13846 				mptsas_log(mpt, CE_WARN, "mptsas "
13847 				    "failed to create pm-capable "
13848 				    "property, target %d", target);
13849 				ndi_rtn = NDI_FAILURE;
13850 				goto phys_create_done;
13851 			}
13852 
13853 		}
13854 
13855 		if (inq->inq_dtype == 0) {
13856 			/*
13857 			 * add 'obp-path' properties for devinfo
13858 			 */
13859 			component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
13860 			if (sas_wwn) {
13861 				(void) snprintf(component, MAXPATHLEN,
13862 				    "disk@w%s,%x", wwn_str, lun);
13863 			} else {
13864 				(void) snprintf(component, MAXPATHLEN,
13865 				    "disk@p%x,%x", phy, lun);
13866 			}
13867 			if (ddi_pathname_obp_set(*lun_dip, component)
13868 			    != DDI_SUCCESS) {
13869 				mptsas_log(mpt, CE_WARN, "mpt_sas driver "
13870 				    "unable to set obp-path for SAS "
13871 				    "object %s", component);
13872 				ndi_rtn = NDI_FAILURE;
13873 				goto phys_create_done;
13874 			}
13875 		}
13876 
13877 phys_create_done:
13878 		/*
13879 		 * If props were setup ok, online the lun
13880 		 */
13881 		if (ndi_rtn == NDI_SUCCESS) {
13882 			/*
13883 			 * Try to online the new node
13884 			 */
13885 			ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
13886 		}
13887 		if (ndi_rtn == NDI_SUCCESS) {
13888 			mutex_enter(&mpt->m_mutex);
13889 			if (mptsas_set_led_status(mpt, ptgt, 0) !=
13890 			    DDI_SUCCESS) {
13891 				NDBG14(("mptsas: clear LED for tgt %x "
13892 				    "failed", ptgt->m_slot_num));
13893 			}
13894 			mutex_exit(&mpt->m_mutex);
13895 		}
13896 
13897 		/*
13898 		 * If success set rtn flag, else unwire alloc'd lun
13899 		 */
13900 		if (ndi_rtn != NDI_SUCCESS) {
13901 			NDBG12(("mptsas driver unable to online "
13902 			    "target %d lun %d", target, lun));
13903 			ndi_prop_remove_all(*lun_dip);
13904 			(void) ndi_devi_free(*lun_dip);
13905 			*lun_dip = NULL;
13906 		}
13907 	}
13908 
13909 	scsi_hba_nodename_compatible_free(nodename, compatible);
13910 
13911 	if (wwn_str != NULL) {
13912 		kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
13913 	}
13914 	if (component != NULL) {
13915 		kmem_free(component, MAXPATHLEN);
13916 	}
13917 
13918 	return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
13919 }
13920 
13921 static int
13922 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn)
13923 {
13924 	mptsas_t	*mpt = DIP2MPT(pdip);
13925 	struct smp_device smp_sd;
13926 
13927 	/* XXX An HBA driver should not be allocating an smp_device. */
13928 	bzero(&smp_sd, sizeof (struct smp_device));
13929 	smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
13930 	bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
13931 
13932 	if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
13933 		return (NDI_FAILURE);
13934 	return (NDI_SUCCESS);
13935 }
13936 
13937 static int
13938 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
13939 {
13940 	mptsas_t	*mpt = DIP2MPT(pdip);
13941 	mptsas_smp_t	*psmp = NULL;
13942 	int		rval;
13943 	int		phymask;
13944 
13945 	/*
13946 	 * Get the physical port associated to the iport
13947 	 * PHYMASK TODO
13948 	 */
13949 	phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13950 	    "phymask", 0);
13951 	/*
13952 	 * Find the smp node in hash table with specified sas address and
13953 	 * physical port
13954 	 */
13955 	psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
13956 	if (psmp == NULL) {
13957 		return (DDI_FAILURE);
13958 	}
13959 
13960 	rval = mptsas_online_smp(pdip, psmp, smp_dip);
13961 
13962 	return (rval);
13963 }
13964 
13965 static int
13966 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
13967     dev_info_t **smp_dip)
13968 {
13969 	char		wwn_str[MPTSAS_WWN_STRLEN];
13970 	int		ndi_rtn = NDI_FAILURE;
13971 	mptsas_t	*mpt = DIP2MPT(pdip);
13972 
13973 	(void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr);
13974 
13975 	/*
13976 	 * Probe smp device, prevent the node of removed device from being
13977 	 * configured succesfully
13978 	 */
13979 	if (mptsas_probe_smp(pdip, smp_node->m_sasaddr) != NDI_SUCCESS) {
13980 		return (DDI_FAILURE);
13981 	}
13982 
13983 	if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) {
13984 		return (DDI_SUCCESS);
13985 	}
13986 
13987 	ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip);
13988 
13989 	/*
13990 	 * if lun alloc success, set props
13991 	 */
13992 	if (ndi_rtn == NDI_SUCCESS) {
13993 		/*
13994 		 * Set the flavor of the child to be SMP flavored
13995 		 */
13996 		ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP);
13997 
13998 		if (ndi_prop_update_string(DDI_DEV_T_NONE,
13999 		    *smp_dip, SMP_WWN, wwn_str) !=
14000 		    DDI_PROP_SUCCESS) {
14001 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14002 			    "property for smp device %s (sas_wwn)",
14003 			    wwn_str);
14004 			ndi_rtn = NDI_FAILURE;
14005 			goto smp_create_done;
14006 		}
14007 
14008 		if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14009 		    *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) {
14010 			mptsas_log(mpt, CE_WARN, "mptsas unable to "
14011 			    "create property for SMP %s (SMP_PROP) ",
14012 			    wwn_str);
14013 			ndi_rtn = NDI_FAILURE;
14014 			goto smp_create_done;
14015 		}
14016 
14017 smp_create_done:
14018 		/*
14019 		 * If props were setup ok, online the lun
14020 		 */
14021 		if (ndi_rtn == NDI_SUCCESS) {
14022 			/*
14023 			 * Try to online the new node
14024 			 */
14025 			ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH);
14026 		}
14027 
14028 		/*
14029 		 * If success set rtn flag, else unwire alloc'd lun
14030 		 */
14031 		if (ndi_rtn != NDI_SUCCESS) {
14032 			NDBG12(("mptsas unable to online "
14033 			    "SMP target %s", wwn_str));
14034 			ndi_prop_remove_all(*smp_dip);
14035 			(void) ndi_devi_free(*smp_dip);
14036 		}
14037 	}
14038 
14039 	return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14040 }
14041 
14042 /* smp transport routine */
14043 static int mptsas_smp_start(struct smp_pkt *smp_pkt)
14044 {
14045 	uint64_t			wwn;
14046 	Mpi2SmpPassthroughRequest_t	req;
14047 	Mpi2SmpPassthroughReply_t	rep;
14048 	uint32_t			direction = 0;
14049 	mptsas_t			*mpt;
14050 	int				ret;
14051 	uint64_t			tmp64;
14052 
14053 	mpt = (mptsas_t *)smp_pkt->smp_pkt_address->
14054 	    smp_a_hba_tran->smp_tran_hba_private;
14055 
14056 	bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
14057 	/*
14058 	 * Need to compose a SMP request message
14059 	 * and call mptsas_do_passthru() function
14060 	 */
14061 	bzero(&req, sizeof (req));
14062 	bzero(&rep, sizeof (rep));
14063 	req.PassthroughFlags = 0;
14064 	req.PhysicalPort = 0xff;
14065 	req.ChainOffset = 0;
14066 	req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
14067 
14068 	if ((smp_pkt->smp_pkt_reqsize & 0xffff0000ul) != 0) {
14069 		smp_pkt->smp_pkt_reason = ERANGE;
14070 		return (DDI_FAILURE);
14071 	}
14072 	req.RequestDataLength = LE_16((uint16_t)(smp_pkt->smp_pkt_reqsize - 4));
14073 
14074 	req.MsgFlags = 0;
14075 	tmp64 = LE_64(wwn);
14076 	bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE);
14077 	if (smp_pkt->smp_pkt_rspsize > 0) {
14078 		direction |= MPTSAS_PASS_THRU_DIRECTION_READ;
14079 	}
14080 	if (smp_pkt->smp_pkt_reqsize > 0) {
14081 		direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE;
14082 	}
14083 
14084 	mutex_enter(&mpt->m_mutex);
14085 	ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep,
14086 	    (uint8_t *)smp_pkt->smp_pkt_rsp,
14087 	    offsetof(Mpi2SmpPassthroughRequest_t, SGL), sizeof (rep),
14088 	    smp_pkt->smp_pkt_rspsize - 4, direction,
14089 	    (uint8_t *)smp_pkt->smp_pkt_req, smp_pkt->smp_pkt_reqsize - 4,
14090 	    smp_pkt->smp_pkt_timeout, FKIOCTL);
14091 	mutex_exit(&mpt->m_mutex);
14092 	if (ret != 0) {
14093 		cmn_err(CE_WARN, "smp_start do passthru error %d", ret);
14094 		smp_pkt->smp_pkt_reason = (uchar_t)(ret);
14095 		return (DDI_FAILURE);
14096 	}
14097 	/* do passthrough success, check the smp status */
14098 	if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
14099 		switch (LE_16(rep.IOCStatus)) {
14100 		case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
14101 			smp_pkt->smp_pkt_reason = ENODEV;
14102 			break;
14103 		case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
14104 			smp_pkt->smp_pkt_reason = EOVERFLOW;
14105 			break;
14106 		case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
14107 			smp_pkt->smp_pkt_reason = EIO;
14108 			break;
14109 		default:
14110 			mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc"
14111 			    "status:%x", LE_16(rep.IOCStatus));
14112 			smp_pkt->smp_pkt_reason = EIO;
14113 			break;
14114 		}
14115 		return (DDI_FAILURE);
14116 	}
14117 	if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) {
14118 		mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x",
14119 		    rep.SASStatus);
14120 		smp_pkt->smp_pkt_reason = EIO;
14121 		return (DDI_FAILURE);
14122 	}
14123 
14124 	return (DDI_SUCCESS);
14125 }
14126 
14127 static void
14128 mptsas_idle_pm(void *arg)
14129 {
14130 	mptsas_t	*mpt = arg;
14131 
14132 	(void) pm_idle_component(mpt->m_dip, 0);
14133 	mutex_enter(&mpt->m_mutex);
14134 	mpt->m_pm_timeid = 0;
14135 	mutex_exit(&mpt->m_mutex);
14136 }
14137 
14138 /*
14139  * If we didn't get a match, we need to get sas page0 for each device, and
14140  * untill we get a match. If failed, return NULL
14141  */
14142 static mptsas_target_t *
14143 mptsas_phy_to_tgt(mptsas_t *mpt, int phymask, uint8_t phy)
14144 {
14145 	int		i, j = 0;
14146 	int		rval = 0;
14147 	uint16_t	cur_handle;
14148 	uint32_t	page_address;
14149 	mptsas_target_t	*ptgt = NULL;
14150 
14151 	/*
14152 	 * PHY named device must be direct attached and attaches to
14153 	 * narrow port, if the iport is not parent of the device which
14154 	 * we are looking for.
14155 	 */
14156 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
14157 		if ((1 << i) & phymask)
14158 			j++;
14159 	}
14160 
14161 	if (j > 1)
14162 		return (NULL);
14163 
14164 	/*
14165 	 * Must be a narrow port and single device attached to the narrow port
14166 	 * So the physical port num of device  which is equal to the iport's
14167 	 * port num is the device what we are looking for.
14168 	 */
14169 
14170 	if (mpt->m_phy_info[phy].phy_mask != phymask)
14171 		return (NULL);
14172 
14173 	mutex_enter(&mpt->m_mutex);
14174 
14175 	ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
14176 	    MPTSAS_HASH_FIRST);
14177 	while (ptgt != NULL) {
14178 			if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) {
14179 			mutex_exit(&mpt->m_mutex);
14180 			return (ptgt);
14181 		}
14182 
14183 		ptgt = (mptsas_target_t *)mptsas_hash_traverse(
14184 		    &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
14185 	}
14186 
14187 	if (mpt->m_done_traverse_dev) {
14188 		mutex_exit(&mpt->m_mutex);
14189 		return (NULL);
14190 	}
14191 
14192 	/* If didn't get a match, come here */
14193 	cur_handle = mpt->m_dev_handle;
14194 	for (; ; ) {
14195 		ptgt = NULL;
14196 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
14197 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle;
14198 		rval = mptsas_get_target_device_info(mpt, page_address,
14199 		    &cur_handle, &ptgt);
14200 		if ((rval == DEV_INFO_FAIL_PAGE0) ||
14201 		    (rval == DEV_INFO_FAIL_ALLOC)) {
14202 			break;
14203 		}
14204 		if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
14205 		    (rval == DEV_INFO_PHYS_DISK)) {
14206 			continue;
14207 		}
14208 		mpt->m_dev_handle = cur_handle;
14209 
14210 		if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) {
14211 			break;
14212 		}
14213 	}
14214 
14215 	mutex_exit(&mpt->m_mutex);
14216 	return (ptgt);
14217 }
14218 
14219 /*
14220  * The ptgt->m_sas_wwn contains the wwid for each disk.
14221  * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
14222  * If we didn't get a match, we need to get sas page0 for each device, and
14223  * untill we get a match
14224  * If failed, return NULL
14225  */
14226 static mptsas_target_t *
14227 mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask, uint64_t wwid)
14228 {
14229 	int		rval = 0;
14230 	uint16_t	cur_handle;
14231 	uint32_t	page_address;
14232 	mptsas_target_t	*tmp_tgt = NULL;
14233 
14234 	mutex_enter(&mpt->m_mutex);
14235 	tmp_tgt = (struct mptsas_target *)mptsas_hash_search(
14236 	    &mpt->m_active->m_tgttbl, wwid, phymask);
14237 	if (tmp_tgt != NULL) {
14238 		mutex_exit(&mpt->m_mutex);
14239 		return (tmp_tgt);
14240 	}
14241 
14242 	if (phymask == 0) {
14243 		/*
14244 		 * It's IR volume
14245 		 */
14246 		rval = mptsas_get_raid_info(mpt);
14247 		if (rval) {
14248 			tmp_tgt = (struct mptsas_target *)mptsas_hash_search(
14249 			    &mpt->m_active->m_tgttbl, wwid, phymask);
14250 		}
14251 		mutex_exit(&mpt->m_mutex);
14252 		return (tmp_tgt);
14253 	}
14254 
14255 	if (mpt->m_done_traverse_dev) {
14256 		mutex_exit(&mpt->m_mutex);
14257 		return (NULL);
14258 	}
14259 
14260 	/* If didn't get a match, come here */
14261 	cur_handle = mpt->m_dev_handle;
14262 	for (; ; ) {
14263 		tmp_tgt = NULL;
14264 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
14265 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle;
14266 		rval = mptsas_get_target_device_info(mpt, page_address,
14267 		    &cur_handle, &tmp_tgt);
14268 		if ((rval == DEV_INFO_FAIL_PAGE0) ||
14269 		    (rval == DEV_INFO_FAIL_ALLOC)) {
14270 			tmp_tgt = NULL;
14271 			break;
14272 		}
14273 		if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
14274 		    (rval == DEV_INFO_PHYS_DISK)) {
14275 			continue;
14276 		}
14277 		mpt->m_dev_handle = cur_handle;
14278 		if ((tmp_tgt->m_sas_wwn) && (tmp_tgt->m_sas_wwn == wwid) &&
14279 		    (tmp_tgt->m_phymask == phymask)) {
14280 			break;
14281 		}
14282 	}
14283 
14284 	mutex_exit(&mpt->m_mutex);
14285 	return (tmp_tgt);
14286 }
14287 
14288 static mptsas_smp_t *
14289 mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask, uint64_t wwid)
14290 {
14291 	int		rval = 0;
14292 	uint16_t	cur_handle;
14293 	uint32_t	page_address;
14294 	mptsas_smp_t	smp_node, *psmp = NULL;
14295 
14296 	mutex_enter(&mpt->m_mutex);
14297 	psmp = (struct mptsas_smp *)mptsas_hash_search(&mpt->m_active->m_smptbl,
14298 	    wwid, phymask);
14299 	if (psmp != NULL) {
14300 		mutex_exit(&mpt->m_mutex);
14301 		return (psmp);
14302 	}
14303 
14304 	if (mpt->m_done_traverse_smp) {
14305 		mutex_exit(&mpt->m_mutex);
14306 		return (NULL);
14307 	}
14308 
14309 	/* If didn't get a match, come here */
14310 	cur_handle = mpt->m_smp_devhdl;
14311 	for (; ; ) {
14312 		psmp = NULL;
14313 		page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
14314 		    MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle;
14315 		rval = mptsas_get_sas_expander_page0(mpt, page_address,
14316 		    &smp_node);
14317 		if (rval != DDI_SUCCESS) {
14318 			break;
14319 		}
14320 		mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl;
14321 		psmp = mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node);
14322 		ASSERT(psmp);
14323 		if ((psmp->m_sasaddr) && (psmp->m_sasaddr == wwid) &&
14324 		    (psmp->m_phymask == phymask)) {
14325 			break;
14326 		}
14327 	}
14328 
14329 	mutex_exit(&mpt->m_mutex);
14330 	return (psmp);
14331 }
14332 
14333 /* helper functions using hash */
14334 
14335 /*
14336  * Can't have duplicate entries for same devhdl,
14337  * if there are invalid entries, the devhdl should be set to 0xffff
14338  */
14339 static void *
14340 mptsas_search_by_devhdl(mptsas_hash_table_t *hashtab, uint16_t devhdl)
14341 {
14342 	mptsas_hash_data_t *data;
14343 
14344 	data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
14345 	while (data != NULL) {
14346 		if (data->devhdl == devhdl) {
14347 			break;
14348 		}
14349 		data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
14350 	}
14351 	return (data);
14352 }
14353 
14354 mptsas_target_t *
14355 mptsas_tgt_alloc(mptsas_hash_table_t *hashtab, uint16_t devhdl, uint64_t wwid,
14356     uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum)
14357 {
14358 	mptsas_target_t *tmp_tgt = NULL;
14359 
14360 	tmp_tgt = mptsas_hash_search(hashtab, wwid, phymask);
14361 	if (tmp_tgt != NULL) {
14362 		NDBG20(("Hash item already exist"));
14363 		tmp_tgt->m_deviceinfo = devinfo;
14364 		tmp_tgt->m_devhdl = devhdl;
14365 		return (tmp_tgt);
14366 	}
14367 	tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
14368 	if (tmp_tgt == NULL) {
14369 		cmn_err(CE_WARN, "Fatal, allocated tgt failed");
14370 		return (NULL);
14371 	}
14372 	tmp_tgt->m_devhdl = devhdl;
14373 	tmp_tgt->m_sas_wwn = wwid;
14374 	tmp_tgt->m_deviceinfo = devinfo;
14375 	tmp_tgt->m_phymask = phymask;
14376 	tmp_tgt->m_phynum = phynum;
14377 	/* Initialized the tgt structure */
14378 	tmp_tgt->m_qfull_retries = QFULL_RETRIES;
14379 	tmp_tgt->m_qfull_retry_interval =
14380 	    drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
14381 	tmp_tgt->m_t_throttle = MAX_THROTTLE;
14382 
14383 	mptsas_hash_add(hashtab, tmp_tgt);
14384 
14385 	return (tmp_tgt);
14386 }
14387 
14388 static void
14389 mptsas_tgt_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
14390     mptsas_phymask_t phymask)
14391 {
14392 	mptsas_target_t *tmp_tgt;
14393 	tmp_tgt = mptsas_hash_rem(hashtab, wwid, phymask);
14394 	if (tmp_tgt == NULL) {
14395 		cmn_err(CE_WARN, "Tgt not found, nothing to free");
14396 	} else {
14397 		kmem_free(tmp_tgt, sizeof (struct mptsas_target));
14398 	}
14399 }
14400 
14401 /*
14402  * Return the entry in the hash table
14403  */
14404 static mptsas_smp_t *
14405 mptsas_smp_alloc(mptsas_hash_table_t *hashtab, mptsas_smp_t *data)
14406 {
14407 	uint64_t key1 = data->m_sasaddr;
14408 	mptsas_phymask_t key2 = data->m_phymask;
14409 	mptsas_smp_t *ret_data;
14410 
14411 	ret_data = mptsas_hash_search(hashtab, key1, key2);
14412 	if (ret_data != NULL) {
14413 		bcopy(data, ret_data, sizeof (mptsas_smp_t));
14414 		return (ret_data);
14415 	}
14416 
14417 	ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
14418 	bcopy(data, ret_data, sizeof (mptsas_smp_t));
14419 	mptsas_hash_add(hashtab, ret_data);
14420 	return (ret_data);
14421 }
14422 
14423 static void
14424 mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
14425     mptsas_phymask_t phymask)
14426 {
14427 	mptsas_smp_t *tmp_smp;
14428 	tmp_smp = mptsas_hash_rem(hashtab, wwid, phymask);
14429 	if (tmp_smp == NULL) {
14430 		cmn_err(CE_WARN, "Smp element not found, nothing to free");
14431 	} else {
14432 		kmem_free(tmp_smp, sizeof (struct mptsas_smp));
14433 	}
14434 }
14435 
14436 /*
14437  * Hash operation functions
14438  * key1 is the sas_wwn, key2 is the phymask
14439  */
14440 static void
14441 mptsas_hash_init(mptsas_hash_table_t *hashtab)
14442 {
14443 	if (hashtab == NULL) {
14444 		return;
14445 	}
14446 	bzero(hashtab->head, sizeof (mptsas_hash_node_t) *
14447 	    MPTSAS_HASH_ARRAY_SIZE);
14448 	hashtab->cur = NULL;
14449 	hashtab->line = 0;
14450 }
14451 
14452 static void
14453 mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen)
14454 {
14455 	uint16_t line = 0;
14456 	mptsas_hash_node_t *cur = NULL, *last = NULL;
14457 
14458 	if (hashtab == NULL) {
14459 		return;
14460 	}
14461 	for (line = 0; line < MPTSAS_HASH_ARRAY_SIZE; line++) {
14462 		cur = hashtab->head[line];
14463 		while (cur != NULL) {
14464 			last = cur;
14465 			cur = cur->next;
14466 			kmem_free(last->data, datalen);
14467 			kmem_free(last, sizeof (mptsas_hash_node_t));
14468 		}
14469 	}
14470 }
14471 
14472 /*
14473  * You must guarantee the element doesn't exist in the hash table
14474  * before you call mptsas_hash_add()
14475  */
14476 static void
14477 mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data)
14478 {
14479 	uint64_t key1 = ((mptsas_hash_data_t *)data)->key1;
14480 	mptsas_phymask_t key2 = ((mptsas_hash_data_t *)data)->key2;
14481 	mptsas_hash_node_t **head = NULL;
14482 	mptsas_hash_node_t *node = NULL;
14483 
14484 	if (hashtab == NULL) {
14485 		return;
14486 	}
14487 	ASSERT(mptsas_hash_search(hashtab, key1, key2) == NULL);
14488 	node = kmem_zalloc(sizeof (mptsas_hash_node_t), KM_NOSLEEP);
14489 	node->data = data;
14490 
14491 	head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]);
14492 	if (*head == NULL) {
14493 		*head = node;
14494 	} else {
14495 		node->next = *head;
14496 		*head = node;
14497 	}
14498 }
14499 
14500 static void *
14501 mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1,
14502     mptsas_phymask_t key2)
14503 {
14504 	mptsas_hash_node_t **head = NULL;
14505 	mptsas_hash_node_t *last = NULL, *cur = NULL;
14506 	mptsas_hash_data_t *data;
14507 	if (hashtab == NULL) {
14508 		return (NULL);
14509 	}
14510 	head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]);
14511 	cur = *head;
14512 	while (cur != NULL) {
14513 		data = cur->data;
14514 		if ((data->key1 == key1) && (data->key2 == key2)) {
14515 			if (last == NULL) {
14516 				(*head) = cur->next;
14517 			} else {
14518 				last->next = cur->next;
14519 			}
14520 			kmem_free(cur, sizeof (mptsas_hash_node_t));
14521 			return (data);
14522 		} else {
14523 			last = cur;
14524 			cur = cur->next;
14525 		}
14526 	}
14527 	return (NULL);
14528 }
14529 
14530 static void *
14531 mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1,
14532     mptsas_phymask_t key2)
14533 {
14534 	mptsas_hash_node_t *cur = NULL;
14535 	mptsas_hash_data_t *data;
14536 	if (hashtab == NULL) {
14537 		return (NULL);
14538 	}
14539 	cur = hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE];
14540 	while (cur != NULL) {
14541 		data = cur->data;
14542 		if ((data->key1 == key1) && (data->key2 == key2)) {
14543 			return (data);
14544 		} else {
14545 			cur = cur->next;
14546 		}
14547 	}
14548 	return (NULL);
14549 }
14550 
14551 static void *
14552 mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos)
14553 {
14554 	mptsas_hash_node_t *this = NULL;
14555 
14556 	if (hashtab == NULL) {
14557 		return (NULL);
14558 	}
14559 
14560 	if (pos == MPTSAS_HASH_FIRST) {
14561 		hashtab->line = 0;
14562 		hashtab->cur = NULL;
14563 		this = hashtab->head[0];
14564 	} else {
14565 		if (hashtab->cur == NULL) {
14566 			return (NULL);
14567 		} else {
14568 			this = hashtab->cur->next;
14569 		}
14570 	}
14571 
14572 	while (this == NULL) {
14573 		hashtab->line++;
14574 		if (hashtab->line >= MPTSAS_HASH_ARRAY_SIZE) {
14575 			/* the traverse reaches the end */
14576 			hashtab->cur = NULL;
14577 			return (NULL);
14578 		} else {
14579 			this = hashtab->head[hashtab->line];
14580 		}
14581 	}
14582 	hashtab->cur = this;
14583 	return (this->data);
14584 }
14585 
14586 /*
14587  * Functions for SGPIO LED support
14588  */
14589 static dev_info_t *
14590 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
14591 {
14592 	dev_info_t	*dip;
14593 	int		prop;
14594 	dip = e_ddi_hold_devi_by_dev(dev, 0);
14595 	if (dip == NULL)
14596 		return (dip);
14597 	prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
14598 	    "phymask", 0);
14599 	*phymask = (mptsas_phymask_t)prop;
14600 	ddi_release_devi(dip);
14601 	return (dip);
14602 }
14603 static mptsas_target_t *
14604 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
14605 {
14606 	uint8_t			phynum;
14607 	uint64_t		wwn;
14608 	int			lun;
14609 	mptsas_target_t		*ptgt = NULL;
14610 
14611 	if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
14612 		return (NULL);
14613 	}
14614 	if (addr[0] == 'w') {
14615 		ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
14616 	} else {
14617 		ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
14618 	}
14619 	return (ptgt);
14620 }
14621 
14622 #ifdef MPTSAS_GET_LED
14623 static int
14624 mptsas_get_led_status(mptsas_t *mpt, mptsas_target_t *ptgt,
14625     uint32_t *slotstatus)
14626 {
14627 	return (mptsas_send_sep(mpt, ptgt, slotstatus,
14628 	    MPI2_SEP_REQ_ACTION_READ_STATUS));
14629 }
14630 #endif
14631 static int
14632 mptsas_set_led_status(mptsas_t *mpt, mptsas_target_t *ptgt, uint32_t slotstatus)
14633 {
14634 	NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
14635 	    slotstatus, ptgt->m_slot_num));
14636 	return (mptsas_send_sep(mpt, ptgt, &slotstatus,
14637 	    MPI2_SEP_REQ_ACTION_WRITE_STATUS));
14638 }
14639 /*
14640  *  send sep request, use enclosure/slot addressing
14641  */
14642 static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
14643     uint32_t *status, uint8_t act)
14644 {
14645 	Mpi2SepRequest_t	req;
14646 	Mpi2SepReply_t		rep;
14647 	int			ret;
14648 
14649 	ASSERT(mutex_owned(&mpt->m_mutex));
14650 
14651 	bzero(&req, sizeof (req));
14652 	bzero(&rep, sizeof (rep));
14653 
14654 	req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
14655 	req.Action = act;
14656 	req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
14657 	req.EnclosureHandle = LE_16(ptgt->m_enclosure);
14658 	req.Slot = LE_16(ptgt->m_slot_num);
14659 	if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
14660 		req.SlotStatus = LE_32(*status);
14661 	}
14662 	ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
14663 	    sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
14664 	if (ret != 0) {
14665 		mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP "
14666 		    "Processor Request message error %d", ret);
14667 		return (DDI_FAILURE);
14668 	}
14669 	/* do passthrough success, check the ioc status */
14670 	if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
14671 		if ((LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) ==
14672 		    MPI2_IOCSTATUS_INVALID_FIELD) {
14673 			mptsas_log(mpt, CE_NOTE, "send sep act %x: Not "
14674 			    "supported action, loginfo %x", act,
14675 			    LE_32(rep.IOCLogInfo));
14676 			return (DDI_FAILURE);
14677 		}
14678 		mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc "
14679 		    "status:%x", act, LE_16(rep.IOCStatus));
14680 		return (DDI_FAILURE);
14681 	}
14682 	if (act != MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
14683 		*status = LE_32(rep.SlotStatus);
14684 	}
14685 
14686 	return (DDI_SUCCESS);
14687 }
14688