xref: /freebsd-src/sys/dev/mpi3mr/mpi3mr.c (revision b411372b7d17ae7e5d6c944732d41b979bde2ac4)
1 /*
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2016-2023, Broadcom Inc. All rights reserved.
5  * Support: <fbsd-storage-driver.pdl@broadcom.com>
6  *
7  * Authors: Sumit Saxena <sumit.saxena@broadcom.com>
8  *	    Chandrakanth Patil <chandrakanth.patil@broadcom.com>
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are
12  * met:
13  *
14  * 1. Redistributions of source code must retain the above copyright notice,
15  *    this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright notice,
17  *    this list of conditions and the following disclaimer in the documentation and/or other
18  *    materials provided with the distribution.
19  * 3. Neither the name of the Broadcom Inc. nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software without
21  *    specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  *
35  * The views and conclusions contained in the software and documentation are
36  * those of the authors and should not be interpreted as representing
37  * official policies,either expressed or implied, of the FreeBSD Project.
38  *
39  * Mail to: Broadcom Inc 1320 Ridder Park Dr, San Jose, CA 95131
40  *
41  * Broadcom Inc. (Broadcom) MPI3MR Adapter FreeBSD
42  */
43 
44 #include <sys/types.h>
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/module.h>
49 #include <sys/bus.h>
50 #include <sys/conf.h>
51 #include <sys/malloc.h>
52 #include <sys/sysctl.h>
53 #include <sys/uio.h>
54 
55 #include <machine/bus.h>
56 #include <machine/resource.h>
57 #include <sys/rman.h>
58 
59 #include <dev/pci/pcireg.h>
60 #include <dev/pci/pcivar.h>
61 #include <dev/pci/pci_private.h>
62 
63 #include <cam/cam.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #include <cam/scsi/smp_all.h>
73 #include <sys/queue.h>
74 #include <sys/kthread.h>
75 #include "mpi3mr.h"
76 #include "mpi3mr_cam.h"
77 #include "mpi3mr_app.h"
78 
79 static void mpi3mr_repost_reply_buf(struct mpi3mr_softc *sc,
80 	U64 reply_dma);
81 static int mpi3mr_complete_admin_cmd(struct mpi3mr_softc *sc);
82 static void mpi3mr_port_enable_complete(struct mpi3mr_softc *sc,
83 	struct mpi3mr_drvr_cmd *drvrcmd);
84 static void mpi3mr_flush_io(struct mpi3mr_softc *sc);
85 static int mpi3mr_issue_reset(struct mpi3mr_softc *sc, U16 reset_type,
86 	U32 reset_reason);
87 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_softc *sc, U16 handle,
88 	struct mpi3mr_drvr_cmd *cmdparam, U8 iou_rc);
89 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_softc *sc,
90 	struct mpi3mr_drvr_cmd *drv_cmd);
91 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_softc *sc,
92 	struct mpi3mr_drvr_cmd *drv_cmd);
93 static void mpi3mr_send_evt_ack(struct mpi3mr_softc *sc, U8 event,
94 	struct mpi3mr_drvr_cmd *cmdparam, U32 event_ctx);
95 static void mpi3mr_print_fault_info(struct mpi3mr_softc *sc);
96 static inline void mpi3mr_set_diagsave(struct mpi3mr_softc *sc);
97 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code);
98 
99 void
100 mpi3mr_hexdump(void *buf, int sz, int format)
101 {
102         int i;
103         U32 *buf_loc = (U32 *)buf;
104 
105         for (i = 0; i < (sz / sizeof(U32)); i++) {
106                 if ((i % format) == 0) {
107                         if (i != 0)
108                                 printf("\n");
109                         printf("%08x: ", (i * 4));
110                 }
111                 printf("%08x ", buf_loc[i]);
112         }
113         printf("\n");
114 }
115 
116 void
117 init_completion(struct completion *completion)
118 {
119 	completion->done = 0;
120 }
121 
122 void
123 complete(struct completion *completion)
124 {
125 	completion->done = 1;
126 	wakeup(complete);
127 }
128 
129 void wait_for_completion_timeout(struct completion *completion,
130 	    U32 timeout)
131 {
132 	U32 count = timeout * 1000;
133 
134 	while ((completion->done == 0) && count) {
135                 DELAY(1000);
136 		count--;
137 	}
138 
139 	if (completion->done == 0) {
140 		printf("%s: Command is timedout\n", __func__);
141 		completion->done = 1;
142 	}
143 }
144 void wait_for_completion_timeout_tm(struct completion *completion,
145 	    U32 timeout, struct mpi3mr_softc *sc)
146 {
147 	U32 count = timeout * 1000;
148 
149 	while ((completion->done == 0) && count) {
150 		msleep(&sc->tm_chan, &sc->mpi3mr_mtx, PRIBIO,
151 		       "TM command", 1 * hz);
152 		count--;
153 	}
154 
155 	if (completion->done == 0) {
156 		printf("%s: Command is timedout\n", __func__);
157 		completion->done = 1;
158 	}
159 }
160 
161 
162 void
163 poll_for_command_completion(struct mpi3mr_softc *sc,
164        struct mpi3mr_drvr_cmd *cmd, U16 wait)
165 {
166 	int wait_time = wait * 1000;
167        while (wait_time) {
168                mpi3mr_complete_admin_cmd(sc);
169                if (cmd->state & MPI3MR_CMD_COMPLETE)
170                        break;
171 	       DELAY(1000);
172                wait_time--;
173        }
174 }
175 
176 /**
177  * mpi3mr_trigger_snapdump - triggers firmware snapdump
178  * @sc: Adapter instance reference
179  * @reason_code: reason code for the fault.
180  *
181  * This routine will trigger the snapdump and wait for it to
182  * complete or timeout before it returns.
183  * This will be called during initilaization time faults/resets/timeouts
184  * before soft reset invocation.
185  *
186  * Return:  None.
187  */
188 static void
189 mpi3mr_trigger_snapdump(struct mpi3mr_softc *sc, U32 reason_code)
190 {
191 	U32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
192 
193 	mpi3mr_dprint(sc, MPI3MR_INFO, "snapdump triggered: reason code: %s\n",
194 	    mpi3mr_reset_rc_name(reason_code));
195 
196 	mpi3mr_set_diagsave(sc);
197 	mpi3mr_issue_reset(sc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
198 			   reason_code);
199 
200 	do {
201 		host_diagnostic = mpi3mr_regread(sc, MPI3_SYSIF_HOST_DIAG_OFFSET);
202 		if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
203 			break;
204                 DELAY(100 * 1000);
205 	} while (--timeout);
206 
207 	return;
208 }
209 
210 /**
211  * mpi3mr_check_rh_fault_ioc - check reset history and fault
212  * controller
213  * @sc: Adapter instance reference
214  * @reason_code, reason code for the fault.
215  *
216  * This routine will fault the controller with
217  * the given reason code if it is not already in the fault or
218  * not asynchronosuly reset. This will be used to handle
219  * initilaization time faults/resets/timeout as in those cases
220  * immediate soft reset invocation is not required.
221  *
222  * Return:  None.
223  */
224 static void mpi3mr_check_rh_fault_ioc(struct mpi3mr_softc *sc, U32 reason_code)
225 {
226 	U32 ioc_status;
227 
228 	if (sc->unrecoverable) {
229 		mpi3mr_dprint(sc, MPI3MR_ERROR, "controller is unrecoverable\n");
230 		return;
231 	}
232 
233 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
234 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
235 	    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
236 		mpi3mr_print_fault_info(sc);
237 		return;
238 	}
239 
240 	mpi3mr_trigger_snapdump(sc, reason_code);
241 
242 	return;
243 }
244 
245 static void * mpi3mr_get_reply_virt_addr(struct mpi3mr_softc *sc,
246     bus_addr_t phys_addr)
247 {
248 	if (!phys_addr)
249 		return NULL;
250 	if ((phys_addr < sc->reply_buf_dma_min_address) ||
251 	    (phys_addr > sc->reply_buf_dma_max_address))
252 		return NULL;
253 
254 	return sc->reply_buf + (phys_addr - sc->reply_buf_phys);
255 }
256 
257 static void * mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_softc *sc,
258     bus_addr_t phys_addr)
259 {
260 	if (!phys_addr)
261 		return NULL;
262 	return sc->sense_buf + (phys_addr - sc->sense_buf_phys);
263 }
264 
265 static void mpi3mr_repost_reply_buf(struct mpi3mr_softc *sc,
266     U64 reply_dma)
267 {
268 	U32 old_idx = 0;
269 
270 	mtx_lock_spin(&sc->reply_free_q_lock);
271 	old_idx  =  sc->reply_free_q_host_index;
272 	sc->reply_free_q_host_index = ((sc->reply_free_q_host_index ==
273 	    (sc->reply_free_q_sz - 1)) ? 0 :
274 	    (sc->reply_free_q_host_index + 1));
275 	sc->reply_free_q[old_idx] = reply_dma;
276 	mpi3mr_regwrite(sc, MPI3_SYSIF_REPLY_FREE_HOST_INDEX_OFFSET,
277 		sc->reply_free_q_host_index);
278 	mtx_unlock_spin(&sc->reply_free_q_lock);
279 }
280 
281 static void mpi3mr_repost_sense_buf(struct mpi3mr_softc *sc,
282     U64 sense_buf_phys)
283 {
284 	U32 old_idx = 0;
285 
286 	mtx_lock_spin(&sc->sense_buf_q_lock);
287 	old_idx  =  sc->sense_buf_q_host_index;
288 	sc->sense_buf_q_host_index = ((sc->sense_buf_q_host_index ==
289 	    (sc->sense_buf_q_sz - 1)) ? 0 :
290 	    (sc->sense_buf_q_host_index + 1));
291 	sc->sense_buf_q[old_idx] = sense_buf_phys;
292 	mpi3mr_regwrite(sc, MPI3_SYSIF_SENSE_BUF_FREE_HOST_INDEX_OFFSET,
293 		sc->sense_buf_q_host_index);
294 	mtx_unlock_spin(&sc->sense_buf_q_lock);
295 
296 }
297 
298 void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_softc *sc,
299 	struct mpi3mr_throttle_group_info *tg, U8 divert_value)
300 {
301 	struct mpi3mr_target *target;
302 
303 	mtx_lock_spin(&sc->target_lock);
304 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
305 		if (target->throttle_group == tg)
306 			target->io_divert = divert_value;
307 	}
308 	mtx_unlock_spin(&sc->target_lock);
309 }
310 
311 /**
312  * mpi3mr_submit_admin_cmd - Submit request to admin queue
313  * @mrioc: Adapter reference
314  * @admin_req: MPI3 request
315  * @admin_req_sz: Request size
316  *
317  * Post the MPI3 request into admin request queue and
318  * inform the controller, if the queue is full return
319  * appropriate error.
320  *
321  * Return: 0 on success, non-zero on failure.
322  */
323 int mpi3mr_submit_admin_cmd(struct mpi3mr_softc *sc, void *admin_req,
324     U16 admin_req_sz)
325 {
326 	U16 areq_pi = 0, areq_ci = 0, max_entries = 0;
327 	int retval = 0;
328 	U8 *areq_entry;
329 
330 	mtx_lock_spin(&sc->admin_req_lock);
331 	areq_pi = sc->admin_req_pi;
332 	areq_ci = sc->admin_req_ci;
333 	max_entries = sc->num_admin_reqs;
334 
335 	if (sc->unrecoverable)
336 		return -EFAULT;
337 
338 	if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
339 					   (areq_pi == (max_entries - 1)))) {
340 		printf(IOCNAME "AdminReqQ full condition detected\n",
341 		    sc->name);
342 		retval = -EAGAIN;
343 		goto out;
344 	}
345 	areq_entry = (U8 *)sc->admin_req + (areq_pi *
346 						     MPI3MR_AREQ_FRAME_SZ);
347 	memset(areq_entry, 0, MPI3MR_AREQ_FRAME_SZ);
348 	memcpy(areq_entry, (U8 *)admin_req, admin_req_sz);
349 
350 	if (++areq_pi == max_entries)
351 		areq_pi = 0;
352 	sc->admin_req_pi = areq_pi;
353 
354 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET, sc->admin_req_pi);
355 
356 out:
357 	mtx_unlock_spin(&sc->admin_req_lock);
358 	return retval;
359 }
360 
361 /**
362  * mpi3mr_check_req_qfull - Check request queue is full or not
363  * @op_req_q: Operational reply queue info
364  *
365  * Return: true when queue full, false otherwise.
366  */
367 static inline bool
368 mpi3mr_check_req_qfull(struct mpi3mr_op_req_queue *op_req_q)
369 {
370 	U16 pi, ci, max_entries;
371 	bool is_qfull = false;
372 
373 	pi = op_req_q->pi;
374 	ci = op_req_q->ci;
375 	max_entries = op_req_q->num_reqs;
376 
377 	if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
378 		is_qfull = true;
379 
380 	return is_qfull;
381 }
382 
383 /**
384  * mpi3mr_submit_io - Post IO command to firmware
385  * @sc:		      Adapter instance reference
386  * @op_req_q:	      Operational Request queue reference
387  * @req:	      MPT request data
388  *
389  * This function submits IO command to firmware.
390  *
391  * Return: Nothing
392  */
393 int mpi3mr_submit_io(struct mpi3mr_softc *sc,
394     struct mpi3mr_op_req_queue *op_req_q, U8 *req)
395 {
396 	U16 pi, max_entries;
397 	int retval = 0;
398 	U8 *req_entry;
399 	U16 req_sz = sc->facts.op_req_sz;
400 	struct mpi3mr_irq_context *irq_ctx;
401 
402 	mtx_lock_spin(&op_req_q->q_lock);
403 
404 	pi = op_req_q->pi;
405 	max_entries = op_req_q->num_reqs;
406 	if (mpi3mr_check_req_qfull(op_req_q)) {
407 		irq_ctx = &sc->irq_ctx[op_req_q->reply_qid - 1];
408 		mpi3mr_complete_io_cmd(sc, irq_ctx);
409 
410 		if (mpi3mr_check_req_qfull(op_req_q)) {
411 			printf(IOCNAME "OpReqQ full condition detected\n",
412 				sc->name);
413 			retval = -EBUSY;
414 			goto out;
415 		}
416 	}
417 
418 	req_entry = (U8 *)op_req_q->q_base + (pi * req_sz);
419 	memset(req_entry, 0, req_sz);
420 	memcpy(req_entry, req, MPI3MR_AREQ_FRAME_SZ);
421 	if (++pi == max_entries)
422 		pi = 0;
423 	op_req_q->pi = pi;
424 
425 	mpi3mr_atomic_inc(&sc->op_reply_q[op_req_q->reply_qid - 1].pend_ios);
426 
427 	mpi3mr_regwrite(sc, MPI3_SYSIF_OPER_REQ_Q_N_PI_OFFSET(op_req_q->qid), op_req_q->pi);
428 	if (sc->mpi3mr_debug & MPI3MR_TRACE) {
429 		device_printf(sc->mpi3mr_dev, "IO submission: QID:%d PI:0x%x\n", op_req_q->qid, op_req_q->pi);
430 		mpi3mr_hexdump(req_entry, MPI3MR_AREQ_FRAME_SZ, 8);
431 	}
432 
433 out:
434 	mtx_unlock_spin(&op_req_q->q_lock);
435 	return retval;
436 }
437 
438 inline void
439 mpi3mr_add_sg_single(void *paddr, U8 flags, U32 length,
440 		     bus_addr_t dma_addr)
441 {
442 	Mpi3SGESimple_t *sgel = paddr;
443 
444 	sgel->Flags = flags;
445 	sgel->Length = (length);
446 	sgel->Address = (U64)dma_addr;
447 }
448 
449 void mpi3mr_build_zero_len_sge(void *paddr)
450 {
451 	U8 sgl_flags = (MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
452 		MPI3_SGE_FLAGS_DLAS_SYSTEM | MPI3_SGE_FLAGS_END_OF_LIST);
453 
454 	mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
455 
456 }
457 
458 void mpi3mr_enable_interrupts(struct mpi3mr_softc *sc)
459 {
460 	sc->intr_enabled = 1;
461 }
462 
463 void mpi3mr_disable_interrupts(struct mpi3mr_softc *sc)
464 {
465 	sc->intr_enabled = 0;
466 }
467 
468 void
469 mpi3mr_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
470 {
471 	bus_addr_t *addr;
472 
473 	addr = arg;
474 	*addr = segs[0].ds_addr;
475 }
476 
477 static int mpi3mr_delete_op_reply_queue(struct mpi3mr_softc *sc, U16 qid)
478 {
479 	Mpi3DeleteReplyQueueRequest_t delq_req;
480 	struct mpi3mr_op_reply_queue *op_reply_q;
481 	int retval = 0;
482 
483 
484 	op_reply_q = &sc->op_reply_q[qid - 1];
485 
486 	if (!op_reply_q->qid)
487 	{
488 		retval = -1;
489 		printf(IOCNAME "Issue DelRepQ: called with invalid Reply QID\n",
490 		    sc->name);
491 		goto out;
492 	}
493 
494 	memset(&delq_req, 0, sizeof(delq_req));
495 
496 	mtx_lock(&sc->init_cmds.completion.lock);
497 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
498 		retval = -1;
499 		printf(IOCNAME "Issue DelRepQ: Init command is in use\n",
500 		    sc->name);
501 		mtx_unlock(&sc->init_cmds.completion.lock);
502 		goto out;
503 	}
504 
505 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
506 		retval = -1;
507 		printf(IOCNAME "Issue DelRepQ: Init command is in use\n",
508 		    sc->name);
509 		goto out;
510 	}
511 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
512 	sc->init_cmds.is_waiting = 1;
513 	sc->init_cmds.callback = NULL;
514 	delq_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
515 	delq_req.Function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
516 	delq_req.QueueID = qid;
517 
518 	init_completion(&sc->init_cmds.completion);
519 	retval = mpi3mr_submit_admin_cmd(sc, &delq_req, sizeof(delq_req));
520 	if (retval) {
521 		printf(IOCNAME "Issue DelRepQ: Admin Post failed\n",
522 		    sc->name);
523 		goto out_unlock;
524 	}
525 	wait_for_completion_timeout(&sc->init_cmds.completion,
526 	    (MPI3MR_INTADMCMD_TIMEOUT));
527 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
528 		printf(IOCNAME "Issue DelRepQ: command timed out\n",
529 		    sc->name);
530 		mpi3mr_check_rh_fault_ioc(sc,
531 		    MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
532 		sc->unrecoverable = 1;
533 
534 		retval = -1;
535 		goto out_unlock;
536 	}
537 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
538 	     != MPI3_IOCSTATUS_SUCCESS ) {
539 		printf(IOCNAME "Issue DelRepQ: Failed IOCStatus(0x%04x) "
540 		    " Loginfo(0x%08x) \n" , sc->name,
541 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
542 		    sc->init_cmds.ioc_loginfo);
543 		retval = -1;
544 		goto out_unlock;
545 	}
546 	sc->irq_ctx[qid - 1].op_reply_q = NULL;
547 
548 	if (sc->op_reply_q[qid - 1].q_base_phys != 0)
549 		bus_dmamap_unload(sc->op_reply_q[qid - 1].q_base_tag, sc->op_reply_q[qid - 1].q_base_dmamap);
550 	if (sc->op_reply_q[qid - 1].q_base != NULL)
551 		bus_dmamem_free(sc->op_reply_q[qid - 1].q_base_tag, sc->op_reply_q[qid - 1].q_base, sc->op_reply_q[qid - 1].q_base_dmamap);
552 	if (sc->op_reply_q[qid - 1].q_base_tag != NULL)
553 		bus_dma_tag_destroy(sc->op_reply_q[qid - 1].q_base_tag);
554 
555 	sc->op_reply_q[qid - 1].q_base = NULL;
556 	sc->op_reply_q[qid - 1].qid = 0;
557 out_unlock:
558 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
559 	mtx_unlock(&sc->init_cmds.completion.lock);
560 out:
561 	return retval;
562 }
563 
564 /**
565  * mpi3mr_create_op_reply_queue - create operational reply queue
566  * @sc: Adapter instance reference
567  * @qid: operational reply queue id
568  *
569  * Create operatinal reply queue by issuing MPI request
570  * through admin queue.
571  *
572  * Return:  0 on success, non-zero on failure.
573  */
574 static int mpi3mr_create_op_reply_queue(struct mpi3mr_softc *sc, U16 qid)
575 {
576 	Mpi3CreateReplyQueueRequest_t create_req;
577 	struct mpi3mr_op_reply_queue *op_reply_q;
578 	int retval = 0;
579 	char q_lock_name[32];
580 
581 	op_reply_q = &sc->op_reply_q[qid - 1];
582 
583 	if (op_reply_q->qid)
584 	{
585 		retval = -1;
586 		printf(IOCNAME "CreateRepQ: called for duplicate qid %d\n",
587 		    sc->name, op_reply_q->qid);
588 		return retval;
589 	}
590 
591 	op_reply_q->ci = 0;
592 	if (pci_get_revid(sc->mpi3mr_dev) == SAS4116_CHIP_REV_A0)
593 		op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD_A0;
594 	else
595 		op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
596 
597 	op_reply_q->qsz = op_reply_q->num_replies * sc->op_reply_sz;
598 	op_reply_q->ephase = 1;
599 
600         if (!op_reply_q->q_base) {
601 		snprintf(q_lock_name, 32, "Reply Queue Lock[%d]", qid);
602 		mtx_init(&op_reply_q->q_lock, q_lock_name, NULL, MTX_SPIN);
603 
604 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
605 					4, 0,			/* algnmnt, boundary */
606 					BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
607 					BUS_SPACE_MAXADDR,	/* highaddr */
608 					NULL, NULL,		/* filter, filterarg */
609 					op_reply_q->qsz,		/* maxsize */
610 					1,			/* nsegments */
611 					op_reply_q->qsz,		/* maxsegsize */
612 					0,			/* flags */
613 					NULL, NULL,		/* lockfunc, lockarg */
614 					&op_reply_q->q_base_tag)) {
615 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Operational reply DMA tag\n");
616 			return (ENOMEM);
617 		}
618 
619 		if (bus_dmamem_alloc(op_reply_q->q_base_tag, (void **)&op_reply_q->q_base,
620 		    BUS_DMA_NOWAIT, &op_reply_q->q_base_dmamap)) {
621 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
622 			return (ENOMEM);
623 		}
624 		bzero(op_reply_q->q_base, op_reply_q->qsz);
625 		bus_dmamap_load(op_reply_q->q_base_tag, op_reply_q->q_base_dmamap, op_reply_q->q_base, op_reply_q->qsz,
626 		    mpi3mr_memaddr_cb, &op_reply_q->q_base_phys, 0);
627 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Operational Reply queue ID: %d phys addr= %#016jx virt_addr: %pa size= %d\n",
628 		    qid, (uintmax_t)op_reply_q->q_base_phys, op_reply_q->q_base, op_reply_q->qsz);
629 
630 		if (!op_reply_q->q_base)
631 		{
632 			retval = -1;
633 			printf(IOCNAME "CreateRepQ: memory alloc failed for qid %d\n",
634 			    sc->name, qid);
635 			goto out;
636 		}
637 	}
638 
639 	memset(&create_req, 0, sizeof(create_req));
640 
641 	mtx_lock(&sc->init_cmds.completion.lock);
642 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
643 		retval = -1;
644 		printf(IOCNAME "CreateRepQ: Init command is in use\n",
645 		    sc->name);
646 		mtx_unlock(&sc->init_cmds.completion.lock);
647 		goto out;
648 	}
649 
650 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
651 	sc->init_cmds.is_waiting = 1;
652 	sc->init_cmds.callback = NULL;
653 	create_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
654 	create_req.Function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
655 	create_req.QueueID = qid;
656 	create_req.Flags = MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
657 	create_req.MSIxIndex = sc->irq_ctx[qid - 1].msix_index;
658 	create_req.BaseAddress = (U64)op_reply_q->q_base_phys;
659 	create_req.Size = op_reply_q->num_replies;
660 
661 	init_completion(&sc->init_cmds.completion);
662 	retval = mpi3mr_submit_admin_cmd(sc, &create_req,
663 	    sizeof(create_req));
664 	if (retval) {
665 		printf(IOCNAME "CreateRepQ: Admin Post failed\n",
666 		    sc->name);
667 		goto out_unlock;
668 	}
669 
670 	wait_for_completion_timeout(&sc->init_cmds.completion,
671 	  	MPI3MR_INTADMCMD_TIMEOUT);
672 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
673 		printf(IOCNAME "CreateRepQ: command timed out\n",
674 		    sc->name);
675 		mpi3mr_check_rh_fault_ioc(sc,
676 		    MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
677 		sc->unrecoverable = 1;
678 		retval = -1;
679 		goto out_unlock;
680 	}
681 
682 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
683 	     != MPI3_IOCSTATUS_SUCCESS ) {
684 		printf(IOCNAME "CreateRepQ: Failed IOCStatus(0x%04x) "
685 		    " Loginfo(0x%08x) \n" , sc->name,
686 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
687 		    sc->init_cmds.ioc_loginfo);
688 		retval = -1;
689 		goto out_unlock;
690 	}
691 	op_reply_q->qid = qid;
692 	sc->irq_ctx[qid - 1].op_reply_q = op_reply_q;
693 
694 out_unlock:
695 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
696 	mtx_unlock(&sc->init_cmds.completion.lock);
697 out:
698 	if (retval) {
699 		if (op_reply_q->q_base_phys != 0)
700 			bus_dmamap_unload(op_reply_q->q_base_tag, op_reply_q->q_base_dmamap);
701 		if (op_reply_q->q_base != NULL)
702 			bus_dmamem_free(op_reply_q->q_base_tag, op_reply_q->q_base, op_reply_q->q_base_dmamap);
703 		if (op_reply_q->q_base_tag != NULL)
704 			bus_dma_tag_destroy(op_reply_q->q_base_tag);
705 		op_reply_q->q_base = NULL;
706 		op_reply_q->qid = 0;
707 	}
708 
709 	return retval;
710 }
711 
712 /**
713  * mpi3mr_create_op_req_queue - create operational request queue
714  * @sc: Adapter instance reference
715  * @req_qid: operational request queue id
716  * @reply_qid: Reply queue ID
717  *
718  * Create operatinal request queue by issuing MPI request
719  * through admin queue.
720  *
721  * Return:  0 on success, non-zero on failure.
722  */
723 static int mpi3mr_create_op_req_queue(struct mpi3mr_softc *sc, U16 req_qid, U8 reply_qid)
724 {
725 	Mpi3CreateRequestQueueRequest_t create_req;
726 	struct mpi3mr_op_req_queue *op_req_q;
727 	int retval = 0;
728 	char q_lock_name[32];
729 
730 	op_req_q = &sc->op_req_q[req_qid - 1];
731 
732 	if (op_req_q->qid)
733 	{
734 		retval = -1;
735 		printf(IOCNAME "CreateReqQ: called for duplicate qid %d\n",
736 		    sc->name, op_req_q->qid);
737 		return retval;
738 	}
739 
740 	op_req_q->ci = 0;
741 	op_req_q->pi = 0;
742 	op_req_q->num_reqs = MPI3MR_OP_REQ_Q_QD;
743 	op_req_q->qsz = op_req_q->num_reqs * sc->facts.op_req_sz;
744 	op_req_q->reply_qid = reply_qid;
745 
746 	if (!op_req_q->q_base) {
747 		snprintf(q_lock_name, 32, "Request Queue Lock[%d]", req_qid);
748 		mtx_init(&op_req_q->q_lock, q_lock_name, NULL, MTX_SPIN);
749 
750 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
751 					4, 0,			/* algnmnt, boundary */
752 					BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
753 					BUS_SPACE_MAXADDR,	/* highaddr */
754 					NULL, NULL,		/* filter, filterarg */
755 					op_req_q->qsz,		/* maxsize */
756 					1,			/* nsegments */
757 					op_req_q->qsz,		/* maxsegsize */
758 					0,			/* flags */
759 					NULL, NULL,		/* lockfunc, lockarg */
760 					&op_req_q->q_base_tag)) {
761 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
762 			return (ENOMEM);
763 		}
764 
765 		if (bus_dmamem_alloc(op_req_q->q_base_tag, (void **)&op_req_q->q_base,
766 		    BUS_DMA_NOWAIT, &op_req_q->q_base_dmamap)) {
767 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
768 			return (ENOMEM);
769 		}
770 
771 		bzero(op_req_q->q_base, op_req_q->qsz);
772 
773 		bus_dmamap_load(op_req_q->q_base_tag, op_req_q->q_base_dmamap, op_req_q->q_base, op_req_q->qsz,
774 		    mpi3mr_memaddr_cb, &op_req_q->q_base_phys, 0);
775 
776 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Operational Request QID: %d phys addr= %#016jx virt addr= %pa size= %d associated Reply QID: %d\n",
777 		    req_qid, (uintmax_t)op_req_q->q_base_phys, op_req_q->q_base, op_req_q->qsz, reply_qid);
778 
779 		if (!op_req_q->q_base) {
780 			retval = -1;
781 			printf(IOCNAME "CreateReqQ: memory alloc failed for qid %d\n",
782 			    sc->name, req_qid);
783 			goto out;
784 		}
785 	}
786 
787 	memset(&create_req, 0, sizeof(create_req));
788 
789 	mtx_lock(&sc->init_cmds.completion.lock);
790 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
791 		retval = -1;
792 		printf(IOCNAME "CreateReqQ: Init command is in use\n",
793 		    sc->name);
794 		mtx_unlock(&sc->init_cmds.completion.lock);
795 		goto out;
796 	}
797 
798 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
799 	sc->init_cmds.is_waiting = 1;
800 	sc->init_cmds.callback = NULL;
801 	create_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
802 	create_req.Function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
803 	create_req.QueueID = req_qid;
804 	create_req.Flags = 0;
805 	create_req.ReplyQueueID = reply_qid;
806 	create_req.BaseAddress = (U64)op_req_q->q_base_phys;
807 	create_req.Size = op_req_q->num_reqs;
808 
809 	init_completion(&sc->init_cmds.completion);
810 	retval = mpi3mr_submit_admin_cmd(sc, &create_req,
811 	    sizeof(create_req));
812 	if (retval) {
813 		printf(IOCNAME "CreateReqQ: Admin Post failed\n",
814 		    sc->name);
815 		goto out_unlock;
816 	}
817 
818 	wait_for_completion_timeout(&sc->init_cmds.completion,
819 	    (MPI3MR_INTADMCMD_TIMEOUT));
820 
821 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
822 		printf(IOCNAME "CreateReqQ: command timed out\n",
823 		    sc->name);
824 		mpi3mr_check_rh_fault_ioc(sc,
825 			MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
826 		sc->unrecoverable = 1;
827 		retval = -1;
828 		goto out_unlock;
829 	}
830 
831 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
832 	     != MPI3_IOCSTATUS_SUCCESS ) {
833 		printf(IOCNAME "CreateReqQ: Failed IOCStatus(0x%04x) "
834 		    " Loginfo(0x%08x) \n" , sc->name,
835 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
836 		    sc->init_cmds.ioc_loginfo);
837 		retval = -1;
838 		goto out_unlock;
839 	}
840 	op_req_q->qid = req_qid;
841 
842 out_unlock:
843 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
844 	mtx_unlock(&sc->init_cmds.completion.lock);
845 out:
846 	if (retval) {
847 		if (op_req_q->q_base_phys != 0)
848 			bus_dmamap_unload(op_req_q->q_base_tag, op_req_q->q_base_dmamap);
849 		if (op_req_q->q_base != NULL)
850 			bus_dmamem_free(op_req_q->q_base_tag, op_req_q->q_base, op_req_q->q_base_dmamap);
851 		if (op_req_q->q_base_tag != NULL)
852 			bus_dma_tag_destroy(op_req_q->q_base_tag);
853 		op_req_q->q_base = NULL;
854 		op_req_q->qid = 0;
855 	}
856 	return retval;
857 }
858 
859 /**
860  * mpi3mr_create_op_queues - create operational queues
861  * @sc: Adapter instance reference
862  *
863  * Create operatinal queues(request queues and reply queues).
864  * Return:  0 on success, non-zero on failure.
865  */
866 static int mpi3mr_create_op_queues(struct mpi3mr_softc *sc)
867 {
868 	int retval = 0;
869 	U16 num_queues = 0, i = 0, qid;
870 
871 	num_queues = min(sc->facts.max_op_reply_q,
872 	    sc->facts.max_op_req_q);
873 	num_queues = min(num_queues, sc->msix_count);
874 
875 	/*
876 	 * During reset set the num_queues to the number of queues
877 	 * that was set before the reset.
878 	 */
879 	if (sc->num_queues)
880 		num_queues = sc->num_queues;
881 
882 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Trying to create %d Operational Q pairs\n",
883 	    num_queues);
884 
885 	if (!sc->op_req_q) {
886 		sc->op_req_q = malloc(sizeof(struct mpi3mr_op_req_queue) *
887 		    num_queues, M_MPI3MR, M_NOWAIT | M_ZERO);
888 
889 		if (!sc->op_req_q) {
890 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to alloc memory for Request queue info\n");
891 			retval = -1;
892 			goto out_failed;
893 		}
894 	}
895 
896 	if (!sc->op_reply_q) {
897 		sc->op_reply_q = malloc(sizeof(struct mpi3mr_op_reply_queue) * num_queues,
898 			M_MPI3MR, M_NOWAIT | M_ZERO);
899 
900 		if (!sc->op_reply_q) {
901 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to alloc memory for Reply queue info\n");
902 			retval = -1;
903 			goto out_failed;
904 		}
905 	}
906 
907 	sc->num_hosttag_op_req_q = (sc->max_host_ios + 1) / num_queues;
908 
909 	/*Operational Request and reply queue ID starts with 1*/
910 	for (i = 0; i < num_queues; i++) {
911 		qid = i + 1;
912 		if (mpi3mr_create_op_reply_queue(sc, qid)) {
913 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to create Reply queue %d\n",
914 			    qid);
915 			break;
916 		}
917 		if (mpi3mr_create_op_req_queue(sc, qid,
918 		    sc->op_reply_q[qid - 1].qid)) {
919 			mpi3mr_delete_op_reply_queue(sc, qid);
920 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to create Request queue %d\n",
921 			    qid);
922 			break;
923 		}
924 
925 	}
926 
927 	/* Not even one queue is created successfully*/
928         if (i == 0) {
929                 retval = -1;
930                 goto out_failed;
931         }
932 
933 	if (!sc->num_queues) {
934 		sc->num_queues = i;
935 	} else {
936 		if (num_queues != i) {
937 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Number of queues (%d) post reset are not same as"
938 					"queues allocated (%d) during driver init\n", i, num_queues);
939 			goto out_failed;
940 		}
941 	}
942 
943 	mpi3mr_dprint(sc, MPI3MR_INFO, "Successfully created %d Operational Queue pairs\n",
944 	    sc->num_queues);
945 	mpi3mr_dprint(sc, MPI3MR_INFO, "Request Queue QD: %d Reply queue QD: %d\n",
946 	    sc->op_req_q[0].num_reqs, sc->op_reply_q[0].num_replies);
947 
948 	return retval;
949 out_failed:
950 	if (sc->op_req_q) {
951 		free(sc->op_req_q, M_MPI3MR);
952 		sc->op_req_q = NULL;
953 	}
954 	if (sc->op_reply_q) {
955 		free(sc->op_reply_q, M_MPI3MR);
956 		sc->op_reply_q = NULL;
957 	}
958 	return retval;
959 }
960 
961 /**
962  * mpi3mr_setup_admin_qpair - Setup admin queue pairs
963  * @sc: Adapter instance reference
964  *
965  * Allocation and setup admin queues(request queues and reply queues).
966  * Return:  0 on success, non-zero on failure.
967  */
968 static int mpi3mr_setup_admin_qpair(struct mpi3mr_softc *sc)
969 {
970 	int retval = 0;
971 	U32 num_adm_entries = 0;
972 
973 	sc->admin_req_q_sz = MPI3MR_AREQQ_SIZE;
974 	sc->num_admin_reqs = sc->admin_req_q_sz / MPI3MR_AREQ_FRAME_SZ;
975 	sc->admin_req_ci = sc->admin_req_pi = 0;
976 
977 	sc->admin_reply_q_sz = MPI3MR_AREPQ_SIZE;
978 	sc->num_admin_replies = sc->admin_reply_q_sz/ MPI3MR_AREP_FRAME_SZ;
979 	sc->admin_reply_ci = 0;
980 	sc->admin_reply_ephase = 1;
981 
982 	if (!sc->admin_req) {
983 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
984 					4, 0,			/* algnmnt, boundary */
985 					BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
986 					BUS_SPACE_MAXADDR,	/* highaddr */
987 					NULL, NULL,		/* filter, filterarg */
988 					sc->admin_req_q_sz,	/* maxsize */
989 					1,			/* nsegments */
990 					sc->admin_req_q_sz,	/* maxsegsize */
991 					0,			/* flags */
992 					NULL, NULL,		/* lockfunc, lockarg */
993 					&sc->admin_req_tag)) {
994 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
995 			return (ENOMEM);
996 		}
997 
998 		if (bus_dmamem_alloc(sc->admin_req_tag, (void **)&sc->admin_req,
999 		    BUS_DMA_NOWAIT, &sc->admin_req_dmamap)) {
1000 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
1001 			return (ENOMEM);
1002 		}
1003 		bzero(sc->admin_req, sc->admin_req_q_sz);
1004 		bus_dmamap_load(sc->admin_req_tag, sc->admin_req_dmamap, sc->admin_req, sc->admin_req_q_sz,
1005 		    mpi3mr_memaddr_cb, &sc->admin_req_phys, 0);
1006 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Admin Req queue phys addr= %#016jx size= %d\n",
1007 		    (uintmax_t)sc->admin_req_phys, sc->admin_req_q_sz);
1008 
1009 		if (!sc->admin_req)
1010 		{
1011 			retval = -1;
1012 			printf(IOCNAME "Memory alloc for AdminReqQ: failed\n",
1013 			    sc->name);
1014 			goto out_failed;
1015 		}
1016 	}
1017 
1018 	if (!sc->admin_reply) {
1019 		mtx_init(&sc->admin_reply_lock, "Admin Reply Queue Lock", NULL, MTX_SPIN);
1020 
1021 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1022 					4, 0,			/* algnmnt, boundary */
1023 					BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1024 					BUS_SPACE_MAXADDR,	/* highaddr */
1025 					NULL, NULL,		/* filter, filterarg */
1026 					sc->admin_reply_q_sz,	/* maxsize */
1027 					1,			/* nsegments */
1028 					sc->admin_reply_q_sz,	/* maxsegsize */
1029 					0,			/* flags */
1030 					NULL, NULL,		/* lockfunc, lockarg */
1031 					&sc->admin_reply_tag)) {
1032 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate reply DMA tag\n");
1033 			return (ENOMEM);
1034 		}
1035 
1036 		if (bus_dmamem_alloc(sc->admin_reply_tag, (void **)&sc->admin_reply,
1037 		    BUS_DMA_NOWAIT, &sc->admin_reply_dmamap)) {
1038 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
1039 			return (ENOMEM);
1040 		}
1041 		bzero(sc->admin_reply, sc->admin_reply_q_sz);
1042 		bus_dmamap_load(sc->admin_reply_tag, sc->admin_reply_dmamap, sc->admin_reply, sc->admin_reply_q_sz,
1043 		    mpi3mr_memaddr_cb, &sc->admin_reply_phys, 0);
1044 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Admin Reply queue phys addr= %#016jx size= %d\n",
1045 		    (uintmax_t)sc->admin_reply_phys, sc->admin_req_q_sz);
1046 
1047 
1048 		if (!sc->admin_reply)
1049 		{
1050 			retval = -1;
1051 			printf(IOCNAME "Memory alloc for AdminRepQ: failed\n",
1052 			    sc->name);
1053 			goto out_failed;
1054 		}
1055 	}
1056 
1057 	num_adm_entries = (sc->num_admin_replies << 16) |
1058 				(sc->num_admin_reqs);
1059 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_OFFSET, num_adm_entries);
1060 	mpi3mr_regwrite64(sc, MPI3_SYSIF_ADMIN_REQ_Q_ADDR_LOW_OFFSET, sc->admin_req_phys);
1061 	mpi3mr_regwrite64(sc, MPI3_SYSIF_ADMIN_REPLY_Q_ADDR_LOW_OFFSET, sc->admin_reply_phys);
1062 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET, sc->admin_req_pi);
1063 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET, sc->admin_reply_ci);
1064 
1065 	return retval;
1066 
1067 out_failed:
1068 	/* Free Admin reply*/
1069 	if (sc->admin_reply_phys)
1070 		bus_dmamap_unload(sc->admin_reply_tag, sc->admin_reply_dmamap);
1071 
1072 	if (sc->admin_reply != NULL)
1073 		bus_dmamem_free(sc->admin_reply_tag, sc->admin_reply,
1074 		    sc->admin_reply_dmamap);
1075 
1076 	if (sc->admin_reply_tag != NULL)
1077 		bus_dma_tag_destroy(sc->admin_reply_tag);
1078 
1079 	/* Free Admin request*/
1080 	if (sc->admin_req_phys)
1081 		bus_dmamap_unload(sc->admin_req_tag, sc->admin_req_dmamap);
1082 
1083 	if (sc->admin_req != NULL)
1084 		bus_dmamem_free(sc->admin_req_tag, sc->admin_req,
1085 		    sc->admin_req_dmamap);
1086 
1087 	if (sc->admin_req_tag != NULL)
1088 		bus_dma_tag_destroy(sc->admin_req_tag);
1089 
1090 	return retval;
1091 }
1092 
1093 /**
1094  * mpi3mr_print_fault_info - Display fault information
1095  * @sc: Adapter instance reference
1096  *
1097  * Display the controller fault information if there is a
1098  * controller fault.
1099  *
1100  * Return: Nothing.
1101  */
1102 static void mpi3mr_print_fault_info(struct mpi3mr_softc *sc)
1103 {
1104 	U32 ioc_status, code, code1, code2, code3;
1105 
1106 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1107 
1108 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1109 		code = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_OFFSET) &
1110 			MPI3_SYSIF_FAULT_CODE_MASK;
1111 		code1 = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_INFO0_OFFSET);
1112 		code2 = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_INFO1_OFFSET);
1113 		code3 = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_INFO2_OFFSET);
1114 		printf(IOCNAME "fault codes 0x%04x:0x%04x:0x%04x:0x%04x\n",
1115 		    sc->name, code, code1, code2, code3);
1116 	}
1117 }
1118 
1119 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_softc *sc)
1120 {
1121 	U32 ioc_status, ioc_control;
1122 	U8 ready, enabled;
1123 
1124 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1125 	ioc_control = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1126 
1127 	if(sc->unrecoverable)
1128 		return MRIOC_STATE_UNRECOVERABLE;
1129 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
1130 		return MRIOC_STATE_FAULT;
1131 
1132 	ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
1133 	enabled = (ioc_control & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
1134 
1135 	if (ready && enabled)
1136 		return MRIOC_STATE_READY;
1137 	if ((!ready) && (!enabled))
1138 		return MRIOC_STATE_RESET;
1139 	if ((!ready) && (enabled))
1140 		return MRIOC_STATE_BECOMING_READY;
1141 
1142 	return MRIOC_STATE_RESET_REQUESTED;
1143 }
1144 
1145 static inline void mpi3mr_clear_resethistory(struct mpi3mr_softc *sc)
1146 {
1147         U32 ioc_status;
1148 
1149 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1150         if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1151 		mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_STATUS_OFFSET, ioc_status);
1152 
1153 }
1154 
1155 /**
1156  * mpi3mr_mur_ioc - Message unit Reset handler
1157  * @sc: Adapter instance reference
1158  * @reset_reason: Reset reason code
1159  *
1160  * Issue Message unit Reset to the controller and wait for it to
1161  * be complete.
1162  *
1163  * Return: 0 on success, -1 on failure.
1164  */
1165 static int mpi3mr_mur_ioc(struct mpi3mr_softc *sc, U32 reset_reason)
1166 {
1167         U32 ioc_config, timeout, ioc_status;
1168         int retval = -1;
1169 
1170         mpi3mr_dprint(sc, MPI3MR_INFO, "Issuing Message Unit Reset(MUR)\n");
1171         if (sc->unrecoverable) {
1172                 mpi3mr_dprint(sc, MPI3MR_ERROR, "IOC is unrecoverable MUR not issued\n");
1173                 return retval;
1174         }
1175         mpi3mr_clear_resethistory(sc);
1176 	mpi3mr_regwrite(sc, MPI3_SYSIF_SCRATCHPAD0_OFFSET, reset_reason);
1177 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1178         ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1179 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
1180 
1181         timeout = MPI3MR_MUR_TIMEOUT * 10;
1182         do {
1183 		ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1184                 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
1185                         mpi3mr_clear_resethistory(sc);
1186 			ioc_config =
1187 				mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1188                         if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1189                             (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1190                             (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) {
1191                                 retval = 0;
1192                                 break;
1193                         }
1194                 }
1195                 DELAY(100 * 1000);
1196         } while (--timeout);
1197 
1198 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1199 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1200 
1201         mpi3mr_dprint(sc, MPI3MR_INFO, "IOC Status/Config after %s MUR is (0x%x)/(0x%x)\n",
1202                 !retval ? "successful":"failed", ioc_status, ioc_config);
1203         return retval;
1204 }
1205 
1206 /**
1207  * mpi3mr_bring_ioc_ready - Bring controller to ready state
1208  * @sc: Adapter instance reference
1209  *
1210  * Set Enable IOC bit in IOC configuration register and wait for
1211  * the controller to become ready.
1212  *
1213  * Return: 0 on success, appropriate error on failure.
1214  */
1215 static int mpi3mr_bring_ioc_ready(struct mpi3mr_softc *sc)
1216 {
1217         U32 ioc_config, timeout;
1218         enum mpi3mr_iocstate current_state;
1219 
1220 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1221         ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1222 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
1223 
1224         timeout = sc->ready_timeout * 10;
1225         do {
1226                 current_state = mpi3mr_get_iocstate(sc);
1227                 if (current_state == MRIOC_STATE_READY)
1228                         return 0;
1229                 DELAY(100 * 1000);
1230         } while (--timeout);
1231 
1232         return -1;
1233 }
1234 
1235 static const struct {
1236 	enum mpi3mr_iocstate value;
1237 	char *name;
1238 } mrioc_states[] = {
1239 	{ MRIOC_STATE_READY, "ready" },
1240 	{ MRIOC_STATE_FAULT, "fault" },
1241 	{ MRIOC_STATE_RESET, "reset" },
1242 	{ MRIOC_STATE_BECOMING_READY, "becoming ready" },
1243 	{ MRIOC_STATE_RESET_REQUESTED, "reset requested" },
1244 	{ MRIOC_STATE_COUNT, "Count" },
1245 };
1246 
1247 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
1248 {
1249 	int i;
1250 	char *name = NULL;
1251 
1252 	for (i = 0; i < MRIOC_STATE_COUNT; i++) {
1253 		if (mrioc_states[i].value == mrioc_state){
1254 			name = mrioc_states[i].name;
1255 			break;
1256 		}
1257 	}
1258 	return name;
1259 }
1260 
1261 /* Reset reason to name mapper structure*/
1262 static const struct {
1263 	enum mpi3mr_reset_reason value;
1264 	char *name;
1265 } mpi3mr_reset_reason_codes[] = {
1266 	{ MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
1267 	{ MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
1268 	{ MPI3MR_RESET_FROM_IOCTL, "application" },
1269 	{ MPI3MR_RESET_FROM_EH_HOS, "error handling" },
1270 	{ MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
1271 	{ MPI3MR_RESET_FROM_IOCTL_TIMEOUT, "IOCTL timeout" },
1272 	{ MPI3MR_RESET_FROM_SCSIIO_TIMEOUT, "SCSIIO timeout" },
1273 	{ MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
1274 	{ MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
1275 	{ MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
1276 	{ MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
1277 	{ MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
1278 	{ MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
1279 	{ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
1280 	{
1281 		MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
1282 		"create request queue timeout"
1283 	},
1284 	{
1285 		MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
1286 		"create reply queue timeout"
1287 	},
1288 	{ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
1289 	{ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
1290 	{ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
1291 	{ MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
1292 	{
1293 		MPI3MR_RESET_FROM_CIACTVRST_TIMER,
1294 		"component image activation timeout"
1295 	},
1296 	{
1297 		MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
1298 		"get package version timeout"
1299 	},
1300 	{
1301 		MPI3MR_RESET_FROM_PELABORT_TIMEOUT,
1302 		"persistent event log abort timeout"
1303 	},
1304 	{ MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
1305 	{ MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
1306 	{
1307 		MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT,
1308 		"diagnostic buffer post timeout"
1309 	},
1310 	{ MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronus reset" },
1311 	{ MPI3MR_RESET_REASON_COUNT, "Reset reason count" },
1312 };
1313 
1314 /**
1315  * mpi3mr_reset_rc_name - get reset reason code name
1316  * @reason_code: reset reason code value
1317  *
1318  * Map reset reason to an NULL terminated ASCII string
1319  *
1320  * Return: Name corresponding to reset reason value or NULL.
1321  */
1322 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
1323 {
1324 	int i;
1325 	char *name = NULL;
1326 
1327 	for (i = 0; i < MPI3MR_RESET_REASON_COUNT; i++) {
1328 		if (mpi3mr_reset_reason_codes[i].value == reason_code) {
1329 			name = mpi3mr_reset_reason_codes[i].name;
1330 			break;
1331 		}
1332 	}
1333 	return name;
1334 }
1335 
1336 #define MAX_RESET_TYPE 3
1337 /* Reset type to name mapper structure*/
1338 static const struct {
1339 	U16 reset_type;
1340 	char *name;
1341 } mpi3mr_reset_types[] = {
1342 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
1343 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
1344 	{ MAX_RESET_TYPE, "count"}
1345 };
1346 
1347 /**
1348  * mpi3mr_reset_type_name - get reset type name
1349  * @reset_type: reset type value
1350  *
1351  * Map reset type to an NULL terminated ASCII string
1352  *
1353  * Return: Name corresponding to reset type value or NULL.
1354  */
1355 static const char *mpi3mr_reset_type_name(U16 reset_type)
1356 {
1357 	int i;
1358 	char *name = NULL;
1359 
1360 	for (i = 0; i < MAX_RESET_TYPE; i++) {
1361 		if (mpi3mr_reset_types[i].reset_type == reset_type) {
1362 			name = mpi3mr_reset_types[i].name;
1363 			break;
1364 		}
1365 	}
1366 	return name;
1367 }
1368 
1369 /**
1370  * mpi3mr_soft_reset_success - Check softreset is success or not
1371  * @ioc_status: IOC status register value
1372  * @ioc_config: IOC config register value
1373  *
1374  * Check whether the soft reset is successful or not based on
1375  * IOC status and IOC config register values.
1376  *
1377  * Return: True when the soft reset is success, false otherwise.
1378  */
1379 static inline bool
1380 mpi3mr_soft_reset_success(U32 ioc_status, U32 ioc_config)
1381 {
1382 	if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1383 	    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1384 	    (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1385 		return true;
1386 	return false;
1387 }
1388 
1389 /**
1390  * mpi3mr_diagfault_success - Check diag fault is success or not
1391  * @sc: Adapter reference
1392  * @ioc_status: IOC status register value
1393  *
1394  * Check whether the controller hit diag reset fault code.
1395  *
1396  * Return: True when there is diag fault, false otherwise.
1397  */
1398 static inline bool mpi3mr_diagfault_success(struct mpi3mr_softc *sc,
1399 	U32 ioc_status)
1400 {
1401 	U32 fault;
1402 
1403 	if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
1404 		return false;
1405 	fault = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_OFFSET) & MPI3_SYSIF_FAULT_CODE_MASK;
1406 	if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET)
1407 		return true;
1408 	return false;
1409 }
1410 
1411 /**
1412  * mpi3mr_issue_iocfacts - Send IOC Facts
1413  * @sc: Adapter instance reference
1414  * @facts_data: Cached IOC facts data
1415  *
1416  * Issue IOC Facts MPI request through admin queue and wait for
1417  * the completion of it or time out.
1418  *
1419  * Return: 0 on success, non-zero on failures.
1420  */
1421 static int mpi3mr_issue_iocfacts(struct mpi3mr_softc *sc,
1422     Mpi3IOCFactsData_t *facts_data)
1423 {
1424 	Mpi3IOCFactsRequest_t iocfacts_req;
1425 	bus_dma_tag_t data_tag = NULL;
1426 	bus_dmamap_t data_map = NULL;
1427 	bus_addr_t data_phys = 0;
1428 	void *data = NULL;
1429 	U32 data_len = sizeof(*facts_data);
1430 	int retval = 0;
1431 
1432 	U8 sgl_flags = (MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
1433                 	MPI3_SGE_FLAGS_DLAS_SYSTEM |
1434 			MPI3_SGE_FLAGS_END_OF_LIST);
1435 
1436 
1437         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1438 				4, 0,			/* algnmnt, boundary */
1439 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1440 				BUS_SPACE_MAXADDR,	/* highaddr */
1441 				NULL, NULL,		/* filter, filterarg */
1442                                 data_len,		/* maxsize */
1443                                 1,			/* nsegments */
1444                                 data_len,		/* maxsegsize */
1445                                 0,			/* flags */
1446                                 NULL, NULL,		/* lockfunc, lockarg */
1447                                 &data_tag)) {
1448 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
1449 		return (ENOMEM);
1450         }
1451 
1452         if (bus_dmamem_alloc(data_tag, (void **)&data,
1453 	    BUS_DMA_NOWAIT, &data_map)) {
1454 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d Data  DMA mem alloc failed\n",
1455 			__func__, __LINE__);
1456 		return (ENOMEM);
1457         }
1458 
1459         bzero(data, data_len);
1460         bus_dmamap_load(data_tag, data_map, data, data_len,
1461 	    mpi3mr_memaddr_cb, &data_phys, 0);
1462 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d IOCfacts data phys addr= %#016jx size= %d\n",
1463 	    __func__, __LINE__, (uintmax_t)data_phys, data_len);
1464 
1465 	if (!data)
1466 	{
1467 		retval = -1;
1468 		printf(IOCNAME "Memory alloc for IOCFactsData: failed\n",
1469 		    sc->name);
1470 		goto out;
1471 	}
1472 
1473 	mtx_lock(&sc->init_cmds.completion.lock);
1474 	memset(&iocfacts_req, 0, sizeof(iocfacts_req));
1475 
1476 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
1477 		retval = -1;
1478 		printf(IOCNAME "Issue IOCFacts: Init command is in use\n",
1479 		    sc->name);
1480 		mtx_unlock(&sc->init_cmds.completion.lock);
1481 		goto out;
1482 	}
1483 
1484 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
1485 	sc->init_cmds.is_waiting = 1;
1486 	sc->init_cmds.callback = NULL;
1487 	iocfacts_req.HostTag = (MPI3MR_HOSTTAG_INITCMDS);
1488 	iocfacts_req.Function = MPI3_FUNCTION_IOC_FACTS;
1489 
1490 	mpi3mr_add_sg_single(&iocfacts_req.SGL, sgl_flags, data_len,
1491 	    data_phys);
1492 
1493 	init_completion(&sc->init_cmds.completion);
1494 
1495 	retval = mpi3mr_submit_admin_cmd(sc, &iocfacts_req,
1496 	    sizeof(iocfacts_req));
1497 
1498 	if (retval) {
1499 		printf(IOCNAME "Issue IOCFacts: Admin Post failed\n",
1500 		    sc->name);
1501 		goto out_unlock;
1502 	}
1503 
1504 	wait_for_completion_timeout(&sc->init_cmds.completion,
1505 	    (MPI3MR_INTADMCMD_TIMEOUT));
1506 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1507 		printf(IOCNAME "Issue IOCFacts: command timed out\n",
1508 		    sc->name);
1509 		mpi3mr_check_rh_fault_ioc(sc,
1510 		    MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
1511 		sc->unrecoverable = 1;
1512 		retval = -1;
1513 		goto out_unlock;
1514 	}
1515 
1516 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1517 	     != MPI3_IOCSTATUS_SUCCESS ) {
1518 		printf(IOCNAME "Issue IOCFacts: Failed IOCStatus(0x%04x) "
1519 		    " Loginfo(0x%08x) \n" , sc->name,
1520 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1521 		    sc->init_cmds.ioc_loginfo);
1522 		retval = -1;
1523 		goto out_unlock;
1524 	}
1525 
1526 	memcpy(facts_data, (U8 *)data, data_len);
1527 out_unlock:
1528 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1529 	mtx_unlock(&sc->init_cmds.completion.lock);
1530 
1531 out:
1532 	if (data_phys != 0)
1533 		bus_dmamap_unload(data_tag, data_map);
1534 	if (data != NULL)
1535 		bus_dmamem_free(data_tag, data, data_map);
1536 	if (data_tag != NULL)
1537 		bus_dma_tag_destroy(data_tag);
1538 	return retval;
1539 }
1540 
1541 /**
1542  * mpi3mr_process_factsdata - Process IOC facts data
1543  * @sc: Adapter instance reference
1544  * @facts_data: Cached IOC facts data
1545  *
1546  * Convert IOC facts data into cpu endianness and cache it in
1547  * the driver .
1548  *
1549  * Return: Nothing.
1550  */
1551 static int mpi3mr_process_factsdata(struct mpi3mr_softc *sc,
1552     Mpi3IOCFactsData_t *facts_data)
1553 {
1554 	int retval = 0;
1555 	U32 ioc_config, req_sz, facts_flags;
1556 
1557 	if (le16toh(facts_data->IOCFactsDataLength) !=
1558 	    (sizeof(*facts_data) / 4)) {
1559 		mpi3mr_dprint(sc, MPI3MR_INFO, "IOCFacts data length mismatch "
1560 		    " driver_sz(%ld) firmware_sz(%d) \n",
1561 		    sizeof(*facts_data),
1562 		    facts_data->IOCFactsDataLength);
1563 	}
1564 
1565 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1566         req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
1567                   MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
1568 
1569 	if (facts_data->IOCRequestFrameSize != (req_sz/4)) {
1570 		 mpi3mr_dprint(sc, MPI3MR_INFO, "IOCFacts data reqFrameSize mismatch "
1571 		    " hw_size(%d) firmware_sz(%d) \n" , req_sz/4,
1572 		    facts_data->IOCRequestFrameSize);
1573 	}
1574 
1575 	memset(&sc->facts, 0, sizeof(sc->facts));
1576 
1577 	facts_flags = le32toh(facts_data->Flags);
1578 	sc->facts.op_req_sz = req_sz;
1579 	sc->op_reply_sz = 1 << ((ioc_config &
1580                                   MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
1581                                   MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
1582 
1583 	sc->facts.ioc_num = facts_data->IOCNumber;
1584         sc->facts.who_init = facts_data->WhoInit;
1585         sc->facts.max_msix_vectors = facts_data->MaxMSIxVectors;
1586 	sc->facts.personality = (facts_flags &
1587 	    MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
1588 	sc->facts.dma_mask = (facts_flags &
1589 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
1590 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
1591         sc->facts.protocol_flags = facts_data->ProtocolFlags;
1592         sc->facts.mpi_version = (facts_data->MPIVersion.Word);
1593         sc->facts.max_reqs = (facts_data->MaxOutstandingRequests);
1594         sc->facts.product_id = (facts_data->ProductID);
1595 	sc->facts.reply_sz = (facts_data->ReplyFrameSize) * 4;
1596         sc->facts.exceptions = (facts_data->IOCExceptions);
1597         sc->facts.max_perids = (facts_data->MaxPersistentID);
1598         sc->facts.max_vds = (facts_data->MaxVDs);
1599         sc->facts.max_hpds = (facts_data->MaxHostPDs);
1600         sc->facts.max_advhpds = (facts_data->MaxAdvHostPDs);
1601         sc->facts.max_raidpds = (facts_data->MaxRAIDPDs);
1602         sc->facts.max_nvme = (facts_data->MaxNVMe);
1603         sc->facts.max_pcieswitches =
1604                 (facts_data->MaxPCIeSwitches);
1605         sc->facts.max_sasexpanders =
1606                 (facts_data->MaxSASExpanders);
1607         sc->facts.max_sasinitiators =
1608                 (facts_data->MaxSASInitiators);
1609         sc->facts.max_enclosures = (facts_data->MaxEnclosures);
1610         sc->facts.min_devhandle = (facts_data->MinDevHandle);
1611         sc->facts.max_devhandle = (facts_data->MaxDevHandle);
1612 	sc->facts.max_op_req_q =
1613                 (facts_data->MaxOperationalRequestQueues);
1614 	sc->facts.max_op_reply_q =
1615                 (facts_data->MaxOperationalReplyQueues);
1616         sc->facts.ioc_capabilities =
1617                 (facts_data->IOCCapabilities);
1618         sc->facts.fw_ver.build_num =
1619                 (facts_data->FWVersion.BuildNum);
1620         sc->facts.fw_ver.cust_id =
1621                 (facts_data->FWVersion.CustomerID);
1622         sc->facts.fw_ver.ph_minor = facts_data->FWVersion.PhaseMinor;
1623         sc->facts.fw_ver.ph_major = facts_data->FWVersion.PhaseMajor;
1624         sc->facts.fw_ver.gen_minor = facts_data->FWVersion.GenMinor;
1625         sc->facts.fw_ver.gen_major = facts_data->FWVersion.GenMajor;
1626         sc->max_msix_vectors = min(sc->max_msix_vectors,
1627             sc->facts.max_msix_vectors);
1628         sc->facts.sge_mod_mask = facts_data->SGEModifierMask;
1629         sc->facts.sge_mod_value = facts_data->SGEModifierValue;
1630         sc->facts.sge_mod_shift = facts_data->SGEModifierShift;
1631         sc->facts.shutdown_timeout =
1632                 (facts_data->ShutdownTimeout);
1633 	sc->facts.max_dev_per_tg = facts_data->MaxDevicesPerThrottleGroup;
1634 	sc->facts.io_throttle_data_length =
1635 	    facts_data->IOThrottleDataLength;
1636 	sc->facts.max_io_throttle_group =
1637 	    facts_data->MaxIOThrottleGroup;
1638 	sc->facts.io_throttle_low = facts_data->IOThrottleLow;
1639 	sc->facts.io_throttle_high = facts_data->IOThrottleHigh;
1640 
1641 	/*Store in 512b block count*/
1642 	if (sc->facts.io_throttle_data_length)
1643 		sc->io_throttle_data_length =
1644 		    (sc->facts.io_throttle_data_length * 2 * 4);
1645 	else
1646 		/* set the length to 1MB + 1K to disable throttle*/
1647 		sc->io_throttle_data_length = MPI3MR_MAX_SECTORS + 2;
1648 
1649 	sc->io_throttle_high = (sc->facts.io_throttle_high * 2 * 1024);
1650 	sc->io_throttle_low = (sc->facts.io_throttle_low * 2 * 1024);
1651 
1652 	mpi3mr_dprint(sc, MPI3MR_INFO, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),"
1653             "maxreqs(%d), mindh(%d) maxPDs(%d) maxvectors(%d) maxperids(%d)\n",
1654 	    sc->facts.ioc_num, sc->facts.max_op_req_q,
1655 	    sc->facts.max_op_reply_q, sc->facts.max_devhandle,
1656             sc->facts.max_reqs, sc->facts.min_devhandle,
1657             sc->facts.max_pds, sc->facts.max_msix_vectors,
1658             sc->facts.max_perids);
1659         mpi3mr_dprint(sc, MPI3MR_INFO, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x\n",
1660             sc->facts.sge_mod_mask, sc->facts.sge_mod_value,
1661             sc->facts.sge_mod_shift);
1662 	mpi3mr_dprint(sc, MPI3MR_INFO,
1663 	    "max_dev_per_throttle_group(%d), max_throttle_groups(%d), io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
1664 	    sc->facts.max_dev_per_tg, sc->facts.max_io_throttle_group,
1665 	    sc->facts.io_throttle_data_length * 4,
1666 	    sc->facts.io_throttle_high, sc->facts.io_throttle_low);
1667 
1668 	sc->max_host_ios = sc->facts.max_reqs -
1669 	    (MPI3MR_INTERNALCMDS_RESVD + 1);
1670 
1671 	return retval;
1672 }
1673 
1674 static inline void mpi3mr_setup_reply_free_queues(struct mpi3mr_softc *sc)
1675 {
1676 	int i;
1677 	bus_addr_t phys_addr;
1678 
1679 	/* initialize Reply buffer Queue */
1680 	for (i = 0, phys_addr = sc->reply_buf_phys;
1681 	    i < sc->num_reply_bufs; i++, phys_addr += sc->reply_sz)
1682 		sc->reply_free_q[i] = phys_addr;
1683 	sc->reply_free_q[i] = (0);
1684 
1685 	/* initialize Sense Buffer Queue */
1686 	for (i = 0, phys_addr = sc->sense_buf_phys;
1687 	    i < sc->num_sense_bufs; i++, phys_addr += MPI3MR_SENSEBUF_SZ)
1688 		sc->sense_buf_q[i] = phys_addr;
1689 	sc->sense_buf_q[i] = (0);
1690 
1691 }
1692 
1693 static int mpi3mr_reply_dma_alloc(struct mpi3mr_softc *sc)
1694 {
1695 	U32 sz;
1696 
1697 	sc->num_reply_bufs = sc->facts.max_reqs + MPI3MR_NUM_EVTREPLIES;
1698 	sc->reply_free_q_sz = sc->num_reply_bufs + 1;
1699 	sc->num_sense_bufs = sc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
1700 	sc->sense_buf_q_sz = sc->num_sense_bufs + 1;
1701 
1702 	sz = sc->num_reply_bufs * sc->reply_sz;
1703 
1704 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
1705 				16, 0,			/* algnmnt, boundary */
1706 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1707 				BUS_SPACE_MAXADDR,	/* highaddr */
1708 				NULL, NULL,		/* filter, filterarg */
1709                                 sz,			/* maxsize */
1710                                 1,			/* nsegments */
1711                                 sz,			/* maxsegsize */
1712                                 0,			/* flags */
1713                                 NULL, NULL,		/* lockfunc, lockarg */
1714                                 &sc->reply_buf_tag)) {
1715 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
1716 		return (ENOMEM);
1717         }
1718 
1719 	if (bus_dmamem_alloc(sc->reply_buf_tag, (void **)&sc->reply_buf,
1720 	    BUS_DMA_NOWAIT, &sc->reply_buf_dmamap)) {
1721 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1722 			__func__, __LINE__);
1723 		return (ENOMEM);
1724         }
1725 
1726 	bzero(sc->reply_buf, sz);
1727         bus_dmamap_load(sc->reply_buf_tag, sc->reply_buf_dmamap, sc->reply_buf, sz,
1728 	    mpi3mr_memaddr_cb, &sc->reply_buf_phys, 0);
1729 
1730 	sc->reply_buf_dma_min_address = sc->reply_buf_phys;
1731 	sc->reply_buf_dma_max_address = sc->reply_buf_phys + sz;
1732 	mpi3mr_dprint(sc, MPI3MR_XINFO, "reply buf (0x%p): depth(%d), frame_size(%d), "
1733 	    "pool_size(%d kB), reply_buf_dma(0x%llx)\n",
1734 	    sc->reply_buf, sc->num_reply_bufs, sc->reply_sz,
1735 	    (sz / 1024), (unsigned long long)sc->reply_buf_phys);
1736 
1737 	/* reply free queue, 8 byte align */
1738 	sz = sc->reply_free_q_sz * 8;
1739 
1740         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1741 				8, 0,			/* algnmnt, boundary */
1742 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1743 				BUS_SPACE_MAXADDR,	/* highaddr */
1744 				NULL, NULL,		/* filter, filterarg */
1745                                 sz,			/* maxsize */
1746                                 1,			/* nsegments */
1747                                 sz,			/* maxsegsize */
1748                                 0,			/* flags */
1749                                 NULL, NULL,		/* lockfunc, lockarg */
1750                                 &sc->reply_free_q_tag)) {
1751 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate reply free queue DMA tag\n");
1752 		return (ENOMEM);
1753         }
1754 
1755         if (bus_dmamem_alloc(sc->reply_free_q_tag, (void **)&sc->reply_free_q,
1756 	    BUS_DMA_NOWAIT, &sc->reply_free_q_dmamap)) {
1757 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1758 			__func__, __LINE__);
1759 		return (ENOMEM);
1760         }
1761 
1762 	bzero(sc->reply_free_q, sz);
1763         bus_dmamap_load(sc->reply_free_q_tag, sc->reply_free_q_dmamap, sc->reply_free_q, sz,
1764 	    mpi3mr_memaddr_cb, &sc->reply_free_q_phys, 0);
1765 
1766 	mpi3mr_dprint(sc, MPI3MR_XINFO, "reply_free_q (0x%p): depth(%d), frame_size(%d), "
1767 	    "pool_size(%d kB), reply_free_q_dma(0x%llx)\n",
1768 	    sc->reply_free_q, sc->reply_free_q_sz, 8, (sz / 1024),
1769 	    (unsigned long long)sc->reply_free_q_phys);
1770 
1771 	/* sense buffer pool,  4 byte align */
1772 	sz = sc->num_sense_bufs * MPI3MR_SENSEBUF_SZ;
1773 
1774         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1775 				4, 0,			/* algnmnt, boundary */
1776 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1777 				BUS_SPACE_MAXADDR,	/* highaddr */
1778 				NULL, NULL,		/* filter, filterarg */
1779                                 sz,			/* maxsize */
1780                                 1,			/* nsegments */
1781                                 sz,			/* maxsegsize */
1782                                 0,			/* flags */
1783                                 NULL, NULL,		/* lockfunc, lockarg */
1784                                 &sc->sense_buf_tag)) {
1785 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Sense buffer DMA tag\n");
1786 		return (ENOMEM);
1787         }
1788 
1789 	if (bus_dmamem_alloc(sc->sense_buf_tag, (void **)&sc->sense_buf,
1790 	    BUS_DMA_NOWAIT, &sc->sense_buf_dmamap)) {
1791 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1792 			__func__, __LINE__);
1793 		return (ENOMEM);
1794         }
1795 
1796 	bzero(sc->sense_buf, sz);
1797         bus_dmamap_load(sc->sense_buf_tag, sc->sense_buf_dmamap, sc->sense_buf, sz,
1798 	    mpi3mr_memaddr_cb, &sc->sense_buf_phys, 0);
1799 
1800 	mpi3mr_dprint(sc, MPI3MR_XINFO, "sense_buf (0x%p): depth(%d), frame_size(%d), "
1801 	    "pool_size(%d kB), sense_dma(0x%llx)\n",
1802 	    sc->sense_buf, sc->num_sense_bufs, MPI3MR_SENSEBUF_SZ,
1803 	    (sz / 1024), (unsigned long long)sc->sense_buf_phys);
1804 
1805 	/* sense buffer queue, 8 byte align */
1806 	sz = sc->sense_buf_q_sz * 8;
1807 
1808         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1809 				8, 0,			/* algnmnt, boundary */
1810 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1811 				BUS_SPACE_MAXADDR,	/* highaddr */
1812 				NULL, NULL,		/* filter, filterarg */
1813                                 sz,			/* maxsize */
1814                                 1,			/* nsegments */
1815                                 sz,			/* maxsegsize */
1816                                 0,			/* flags */
1817                                 NULL, NULL,		/* lockfunc, lockarg */
1818                                 &sc->sense_buf_q_tag)) {
1819 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Sense buffer Queue DMA tag\n");
1820 		return (ENOMEM);
1821         }
1822 
1823 	if (bus_dmamem_alloc(sc->sense_buf_q_tag, (void **)&sc->sense_buf_q,
1824 	    BUS_DMA_NOWAIT, &sc->sense_buf_q_dmamap)) {
1825 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1826 			__func__, __LINE__);
1827 		return (ENOMEM);
1828         }
1829 
1830 	bzero(sc->sense_buf_q, sz);
1831         bus_dmamap_load(sc->sense_buf_q_tag, sc->sense_buf_q_dmamap, sc->sense_buf_q, sz,
1832 	    mpi3mr_memaddr_cb, &sc->sense_buf_q_phys, 0);
1833 
1834 	mpi3mr_dprint(sc, MPI3MR_XINFO, "sense_buf_q (0x%p): depth(%d), frame_size(%d), "
1835 	    "pool_size(%d kB), sense_dma(0x%llx)\n",
1836 	    sc->sense_buf_q, sc->sense_buf_q_sz, 8, (sz / 1024),
1837 	    (unsigned long long)sc->sense_buf_q_phys);
1838 
1839 	return 0;
1840 }
1841 
1842 static int mpi3mr_reply_alloc(struct mpi3mr_softc *sc)
1843 {
1844 	int retval = 0;
1845 	U32 i;
1846 
1847 	if (sc->init_cmds.reply)
1848 		goto post_reply_sbuf;
1849 
1850 	sc->init_cmds.reply = malloc(sc->reply_sz,
1851 		M_MPI3MR, M_NOWAIT | M_ZERO);
1852 
1853 	if (!sc->init_cmds.reply) {
1854 		printf(IOCNAME "Cannot allocate memory for init_cmds.reply\n",
1855 		    sc->name);
1856 		goto out_failed;
1857 	}
1858 
1859 	sc->ioctl_cmds.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
1860 	if (!sc->ioctl_cmds.reply) {
1861 		printf(IOCNAME "Cannot allocate memory for ioctl_cmds.reply\n",
1862 		    sc->name);
1863 		goto out_failed;
1864 	}
1865 
1866 	sc->host_tm_cmds.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
1867 	if (!sc->host_tm_cmds.reply) {
1868 		printf(IOCNAME "Cannot allocate memory for host_tm.reply\n",
1869 		    sc->name);
1870 		goto out_failed;
1871 	}
1872 	for (i=0; i<MPI3MR_NUM_DEVRMCMD; i++) {
1873 		sc->dev_rmhs_cmds[i].reply = malloc(sc->reply_sz,
1874 		    M_MPI3MR, M_NOWAIT | M_ZERO);
1875 		if (!sc->dev_rmhs_cmds[i].reply) {
1876 			printf(IOCNAME "Cannot allocate memory for"
1877 			    " dev_rmhs_cmd[%d].reply\n",
1878 			    sc->name, i);
1879 			goto out_failed;
1880 		}
1881 	}
1882 
1883 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
1884 		sc->evtack_cmds[i].reply = malloc(sc->reply_sz,
1885 			M_MPI3MR, M_NOWAIT | M_ZERO);
1886 		if (!sc->evtack_cmds[i].reply)
1887 			goto out_failed;
1888 	}
1889 
1890 	sc->dev_handle_bitmap_sz = MPI3MR_DIV_ROUND_UP(sc->facts.max_devhandle, 8);
1891 
1892 	sc->removepend_bitmap = malloc(sc->dev_handle_bitmap_sz,
1893 	    M_MPI3MR, M_NOWAIT | M_ZERO);
1894 	if (!sc->removepend_bitmap) {
1895 		printf(IOCNAME "Cannot alloc memory for remove pend bitmap\n",
1896 		    sc->name);
1897 		goto out_failed;
1898 	}
1899 
1900 	sc->devrem_bitmap_sz = MPI3MR_DIV_ROUND_UP(MPI3MR_NUM_DEVRMCMD, 8);
1901 	sc->devrem_bitmap = malloc(sc->devrem_bitmap_sz,
1902 	    M_MPI3MR, M_NOWAIT | M_ZERO);
1903 	if (!sc->devrem_bitmap) {
1904 		printf(IOCNAME "Cannot alloc memory for dev remove bitmap\n",
1905 		    sc->name);
1906 		goto out_failed;
1907 	}
1908 
1909 	sc->evtack_cmds_bitmap_sz = MPI3MR_DIV_ROUND_UP(MPI3MR_NUM_EVTACKCMD, 8);
1910 
1911 	sc->evtack_cmds_bitmap = malloc(sc->evtack_cmds_bitmap_sz,
1912 		M_MPI3MR, M_NOWAIT | M_ZERO);
1913 	if (!sc->evtack_cmds_bitmap)
1914 		goto out_failed;
1915 
1916 	if (mpi3mr_reply_dma_alloc(sc)) {
1917 		printf(IOCNAME "func:%s line:%d DMA memory allocation failed\n",
1918 		    sc->name, __func__, __LINE__);
1919 		goto out_failed;
1920 	}
1921 
1922 post_reply_sbuf:
1923 	mpi3mr_setup_reply_free_queues(sc);
1924 	return retval;
1925 out_failed:
1926 	mpi3mr_cleanup_interrupts(sc);
1927 	mpi3mr_free_mem(sc);
1928 	retval = -1;
1929 	return retval;
1930 }
1931 
1932 static void
1933 mpi3mr_print_fw_pkg_ver(struct mpi3mr_softc *sc)
1934 {
1935 	int retval = 0;
1936 	void *fw_pkg_ver = NULL;
1937 	bus_dma_tag_t fw_pkg_ver_tag;
1938 	bus_dmamap_t fw_pkg_ver_map;
1939 	bus_addr_t fw_pkg_ver_dma;
1940 	Mpi3CIUploadRequest_t ci_upload;
1941 	Mpi3ComponentImageHeader_t *ci_header;
1942 	U32 fw_pkg_ver_len = sizeof(*ci_header);
1943 	U8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
1944 
1945 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
1946 				4, 0,			/* algnmnt, boundary */
1947 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1948 				BUS_SPACE_MAXADDR,	/* highaddr */
1949 				NULL, NULL,		/* filter, filterarg */
1950 				fw_pkg_ver_len,		/* maxsize */
1951 				1,			/* nsegments */
1952 				fw_pkg_ver_len,		/* maxsegsize */
1953 				0,			/* flags */
1954 				NULL, NULL,		/* lockfunc, lockarg */
1955 				&fw_pkg_ver_tag)) {
1956 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate fw package version request DMA tag\n");
1957 		return;
1958 	}
1959 
1960 	if (bus_dmamem_alloc(fw_pkg_ver_tag, (void **)&fw_pkg_ver, BUS_DMA_NOWAIT, &fw_pkg_ver_map)) {
1961 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d fw package version DMA mem alloc failed\n",
1962 			      __func__, __LINE__);
1963 		return;
1964 	}
1965 
1966 	bzero(fw_pkg_ver, fw_pkg_ver_len);
1967 
1968 	bus_dmamap_load(fw_pkg_ver_tag, fw_pkg_ver_map, fw_pkg_ver, fw_pkg_ver_len, mpi3mr_memaddr_cb, &fw_pkg_ver_dma, 0);
1969 
1970 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d fw package version phys addr= %#016jx size= %d\n",
1971 		      __func__, __LINE__, (uintmax_t)fw_pkg_ver_dma, fw_pkg_ver_len);
1972 
1973 	if (!fw_pkg_ver) {
1974 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Memory alloc for fw package version failed\n");
1975 		goto out;
1976 	}
1977 
1978 	memset(&ci_upload, 0, sizeof(ci_upload));
1979 	mtx_lock(&sc->init_cmds.completion.lock);
1980 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
1981 		mpi3mr_dprint(sc, MPI3MR_INFO,"Issue CI Header Upload: command is in use\n");
1982 		mtx_unlock(&sc->init_cmds.completion.lock);
1983 		goto out;
1984 	}
1985 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
1986 	sc->init_cmds.is_waiting = 1;
1987 	sc->init_cmds.callback = NULL;
1988 	ci_upload.HostTag = htole16(MPI3MR_HOSTTAG_INITCMDS);
1989 	ci_upload.Function = MPI3_FUNCTION_CI_UPLOAD;
1990 	ci_upload.MsgFlags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
1991 	ci_upload.ImageOffset = MPI3_IMAGE_HEADER_SIGNATURE0_OFFSET;
1992 	ci_upload.SegmentSize = MPI3_IMAGE_HEADER_SIZE;
1993 
1994 	mpi3mr_add_sg_single(&ci_upload.SGL, sgl_flags, fw_pkg_ver_len,
1995 	    fw_pkg_ver_dma);
1996 
1997 	init_completion(&sc->init_cmds.completion);
1998 	if ((retval = mpi3mr_submit_admin_cmd(sc, &ci_upload, sizeof(ci_upload)))) {
1999 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue CI Header Upload: Admin Post failed\n");
2000 		goto out_unlock;
2001 	}
2002 	wait_for_completion_timeout(&sc->init_cmds.completion,
2003 		(MPI3MR_INTADMCMD_TIMEOUT));
2004 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2005 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue CI Header Upload: command timed out\n");
2006 		sc->init_cmds.is_waiting = 0;
2007 		if (!(sc->init_cmds.state & MPI3MR_CMD_RESET))
2008 			mpi3mr_check_rh_fault_ioc(sc,
2009 				MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
2010 		goto out_unlock;
2011 	}
2012 	if ((GET_IOC_STATUS(sc->init_cmds.ioc_status)) != MPI3_IOCSTATUS_SUCCESS) {
2013 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2014 			      "Issue CI Header Upload: Failed IOCStatus(0x%04x) Loginfo(0x%08x)\n",
2015 			      GET_IOC_STATUS(sc->init_cmds.ioc_status), sc->init_cmds.ioc_loginfo);
2016 		goto out_unlock;
2017 	}
2018 
2019 	ci_header = (Mpi3ComponentImageHeader_t *) fw_pkg_ver;
2020 	mpi3mr_dprint(sc, MPI3MR_XINFO,
2021 		      "Issue CI Header Upload:EnvVariableOffset(0x%x) \
2022 		      HeaderSize(0x%x) Signature1(0x%x)\n",
2023 		      ci_header->EnvironmentVariableOffset,
2024 		      ci_header->HeaderSize,
2025 		      ci_header->Signature1);
2026 	mpi3mr_dprint(sc, MPI3MR_INFO, "FW Package Version: %02d.%02d.%02d.%02d\n",
2027 		      ci_header->ComponentImageVersion.GenMajor,
2028 		      ci_header->ComponentImageVersion.GenMinor,
2029 		      ci_header->ComponentImageVersion.PhaseMajor,
2030 		      ci_header->ComponentImageVersion.PhaseMinor);
2031 out_unlock:
2032 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2033 	mtx_unlock(&sc->init_cmds.completion.lock);
2034 
2035 out:
2036 	if (fw_pkg_ver_dma != 0)
2037 		bus_dmamap_unload(fw_pkg_ver_tag, fw_pkg_ver_map);
2038 	if (fw_pkg_ver)
2039 		bus_dmamem_free(fw_pkg_ver_tag, fw_pkg_ver, fw_pkg_ver_map);
2040 	if (fw_pkg_ver_tag)
2041 		bus_dma_tag_destroy(fw_pkg_ver_tag);
2042 
2043 }
2044 
2045 /**
2046  * mpi3mr_issue_iocinit - Send IOC Init
2047  * @sc: Adapter instance reference
2048  *
2049  * Issue IOC Init MPI request through admin queue and wait for
2050  * the completion of it or time out.
2051  *
2052  * Return: 0 on success, non-zero on failures.
2053  */
2054 static int mpi3mr_issue_iocinit(struct mpi3mr_softc *sc)
2055 {
2056 	Mpi3IOCInitRequest_t iocinit_req;
2057 	Mpi3DriverInfoLayout_t *drvr_info = NULL;
2058 	bus_dma_tag_t drvr_info_tag;
2059 	bus_dmamap_t drvr_info_map;
2060 	bus_addr_t drvr_info_phys;
2061 	U32 drvr_info_len = sizeof(*drvr_info);
2062 	int retval = 0;
2063 	struct timeval now;
2064 	uint64_t time_in_msec;
2065 
2066 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
2067 				4, 0,			/* algnmnt, boundary */
2068 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2069 				BUS_SPACE_MAXADDR,	/* highaddr */
2070 				NULL, NULL,		/* filter, filterarg */
2071                                 drvr_info_len,		/* maxsize */
2072                                 1,			/* nsegments */
2073                                 drvr_info_len,		/* maxsegsize */
2074                                 0,			/* flags */
2075                                 NULL, NULL,		/* lockfunc, lockarg */
2076                                 &drvr_info_tag)) {
2077 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
2078 		return (ENOMEM);
2079         }
2080 
2081 	if (bus_dmamem_alloc(drvr_info_tag, (void **)&drvr_info,
2082 	    BUS_DMA_NOWAIT, &drvr_info_map)) {
2083 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d Data  DMA mem alloc failed\n",
2084 			__func__, __LINE__);
2085 		return (ENOMEM);
2086         }
2087 
2088 	bzero(drvr_info, drvr_info_len);
2089         bus_dmamap_load(drvr_info_tag, drvr_info_map, drvr_info, drvr_info_len,
2090 	    mpi3mr_memaddr_cb, &drvr_info_phys, 0);
2091 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d IOCfacts drvr_info phys addr= %#016jx size= %d\n",
2092 	    __func__, __LINE__, (uintmax_t)drvr_info_phys, drvr_info_len);
2093 
2094 	if (!drvr_info)
2095 	{
2096 		retval = -1;
2097 		printf(IOCNAME "Memory alloc for Driver Info failed\n",
2098 		    sc->name);
2099 		goto out;
2100 	}
2101 	drvr_info->InformationLength = (drvr_info_len);
2102 	strcpy(drvr_info->DriverSignature, "Broadcom");
2103 	strcpy(drvr_info->OsName, "FreeBSD");
2104 	strcpy(drvr_info->OsVersion, fmt_os_ver);
2105 	strcpy(drvr_info->DriverName, MPI3MR_DRIVER_NAME);
2106 	strcpy(drvr_info->DriverVersion, MPI3MR_DRIVER_VERSION);
2107 	strcpy(drvr_info->DriverReleaseDate, MPI3MR_DRIVER_RELDATE);
2108 	drvr_info->DriverCapabilities = 0;
2109 	memcpy((U8 *)&sc->driver_info, (U8 *)drvr_info, sizeof(sc->driver_info));
2110 
2111 	memset(&iocinit_req, 0, sizeof(iocinit_req));
2112 	mtx_lock(&sc->init_cmds.completion.lock);
2113 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2114 		retval = -1;
2115 		printf(IOCNAME "Issue IOCInit: Init command is in use\n",
2116 		    sc->name);
2117 		mtx_unlock(&sc->init_cmds.completion.lock);
2118 		goto out;
2119 	}
2120 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2121 	sc->init_cmds.is_waiting = 1;
2122 	sc->init_cmds.callback = NULL;
2123         iocinit_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
2124         iocinit_req.Function = MPI3_FUNCTION_IOC_INIT;
2125         iocinit_req.MPIVersion.Struct.Dev = MPI3_VERSION_DEV;
2126         iocinit_req.MPIVersion.Struct.Unit = MPI3_VERSION_UNIT;
2127         iocinit_req.MPIVersion.Struct.Major = MPI3_VERSION_MAJOR;
2128         iocinit_req.MPIVersion.Struct.Minor = MPI3_VERSION_MINOR;
2129         iocinit_req.WhoInit = MPI3_WHOINIT_HOST_DRIVER;
2130         iocinit_req.ReplyFreeQueueDepth = sc->reply_free_q_sz;
2131         iocinit_req.ReplyFreeQueueAddress =
2132                 sc->reply_free_q_phys;
2133         iocinit_req.SenseBufferLength = MPI3MR_SENSEBUF_SZ;
2134         iocinit_req.SenseBufferFreeQueueDepth =
2135                 sc->sense_buf_q_sz;
2136         iocinit_req.SenseBufferFreeQueueAddress =
2137                 sc->sense_buf_q_phys;
2138         iocinit_req.DriverInformationAddress = drvr_info_phys;
2139 
2140 	getmicrotime(&now);
2141 	time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000);
2142 	iocinit_req.TimeStamp = htole64(time_in_msec);
2143 
2144 	init_completion(&sc->init_cmds.completion);
2145 	retval = mpi3mr_submit_admin_cmd(sc, &iocinit_req,
2146 	    sizeof(iocinit_req));
2147 
2148 	if (retval) {
2149 		printf(IOCNAME "Issue IOCInit: Admin Post failed\n",
2150 		    sc->name);
2151 		goto out_unlock;
2152 	}
2153 
2154 	wait_for_completion_timeout(&sc->init_cmds.completion,
2155 	    (MPI3MR_INTADMCMD_TIMEOUT));
2156 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2157 		printf(IOCNAME "Issue IOCInit: command timed out\n",
2158 		    sc->name);
2159 		mpi3mr_check_rh_fault_ioc(sc,
2160 		    MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
2161 		sc->unrecoverable = 1;
2162 		retval = -1;
2163 		goto out_unlock;
2164 	}
2165 
2166 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2167 	     != MPI3_IOCSTATUS_SUCCESS ) {
2168 		printf(IOCNAME "Issue IOCInit: Failed IOCStatus(0x%04x) "
2169 		    " Loginfo(0x%08x) \n" , sc->name,
2170 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2171 		    sc->init_cmds.ioc_loginfo);
2172 		retval = -1;
2173 		goto out_unlock;
2174 	}
2175 
2176 out_unlock:
2177 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2178 	mtx_unlock(&sc->init_cmds.completion.lock);
2179 
2180 out:
2181 	if (drvr_info_phys != 0)
2182 		bus_dmamap_unload(drvr_info_tag, drvr_info_map);
2183 	if (drvr_info != NULL)
2184 		bus_dmamem_free(drvr_info_tag, drvr_info, drvr_info_map);
2185 	if (drvr_info_tag != NULL)
2186 		bus_dma_tag_destroy(drvr_info_tag);
2187 	return retval;
2188 }
2189 
2190 static void
2191 mpi3mr_display_ioc_info(struct mpi3mr_softc *sc)
2192 {
2193         int i = 0;
2194         char personality[16];
2195         struct mpi3mr_compimg_ver *fwver = &sc->facts.fw_ver;
2196 
2197         switch (sc->facts.personality) {
2198         case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
2199                 strcpy(personality, "Enhanced HBA");
2200                 break;
2201         case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
2202                 strcpy(personality, "RAID");
2203                 break;
2204         default:
2205                 strcpy(personality, "Unknown");
2206                 break;
2207         }
2208 
2209 	mpi3mr_dprint(sc, MPI3MR_INFO, "Current Personality: %s\n", personality);
2210 
2211 	mpi3mr_dprint(sc, MPI3MR_INFO, "FW Version: %d.%d.%d.%d.%05d-%05d\n",
2212 		      fwver->gen_major, fwver->gen_minor, fwver->ph_major,
2213 		      fwver->ph_minor, fwver->cust_id, fwver->build_num);
2214 
2215         mpi3mr_dprint(sc, MPI3MR_INFO, "Protocol=(");
2216 
2217         if (sc->facts.protocol_flags &
2218             MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
2219                 printf("Initiator");
2220                 i++;
2221         }
2222 
2223         if (sc->facts.protocol_flags &
2224             MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET) {
2225                 printf("%sTarget", i ? "," : "");
2226                 i++;
2227         }
2228 
2229         if (sc->facts.protocol_flags &
2230             MPI3_IOCFACTS_PROTOCOL_NVME) {
2231                 printf("%sNVMe attachment", i ? "," : "");
2232                 i++;
2233         }
2234         i = 0;
2235         printf("), ");
2236         printf("Capabilities=(");
2237 
2238         if (sc->facts.ioc_capabilities &
2239             MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE) {
2240                 printf("RAID");
2241                 i++;
2242         }
2243 
2244         printf(")\n");
2245 }
2246 
2247 /**
2248  * mpi3mr_unmask_events - Unmask events in event mask bitmap
2249  * @sc: Adapter instance reference
2250  * @event: MPI event ID
2251  *
2252  * Un mask the specific event by resetting the event_mask
2253  * bitmap.
2254  *
2255  * Return: None.
2256  */
2257 static void mpi3mr_unmask_events(struct mpi3mr_softc *sc, U16 event)
2258 {
2259 	U32 desired_event;
2260 
2261 	if (event >= 128)
2262 		return;
2263 
2264 	desired_event = (1 << (event % 32));
2265 
2266 	if (event < 32)
2267 		sc->event_masks[0] &= ~desired_event;
2268 	else if (event < 64)
2269 		sc->event_masks[1] &= ~desired_event;
2270 	else if (event < 96)
2271 		sc->event_masks[2] &= ~desired_event;
2272 	else if (event < 128)
2273 		sc->event_masks[3] &= ~desired_event;
2274 }
2275 
2276 static void mpi3mr_set_events_mask(struct mpi3mr_softc *sc)
2277 {
2278 	int i;
2279 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2280 		sc->event_masks[i] = -1;
2281 
2282         mpi3mr_unmask_events(sc, MPI3_EVENT_DEVICE_ADDED);
2283         mpi3mr_unmask_events(sc, MPI3_EVENT_DEVICE_INFO_CHANGED);
2284         mpi3mr_unmask_events(sc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
2285 
2286         mpi3mr_unmask_events(sc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
2287 
2288         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
2289         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_DISCOVERY);
2290         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
2291         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
2292 
2293         mpi3mr_unmask_events(sc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
2294         mpi3mr_unmask_events(sc, MPI3_EVENT_PCIE_ENUMERATION);
2295 
2296         mpi3mr_unmask_events(sc, MPI3_EVENT_PREPARE_FOR_RESET);
2297         mpi3mr_unmask_events(sc, MPI3_EVENT_CABLE_MGMT);
2298         mpi3mr_unmask_events(sc, MPI3_EVENT_ENERGY_PACK_CHANGE);
2299 }
2300 
2301 /**
2302  * mpi3mr_issue_event_notification - Send event notification
2303  * @sc: Adapter instance reference
2304  *
2305  * Issue event notification MPI request through admin queue and
2306  * wait for the completion of it or time out.
2307  *
2308  * Return: 0 on success, non-zero on failures.
2309  */
2310 int mpi3mr_issue_event_notification(struct mpi3mr_softc *sc)
2311 {
2312 	Mpi3EventNotificationRequest_t evtnotify_req;
2313 	int retval = 0;
2314 	U8 i;
2315 
2316 	memset(&evtnotify_req, 0, sizeof(evtnotify_req));
2317 	mtx_lock(&sc->init_cmds.completion.lock);
2318 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2319 		retval = -1;
2320 		printf(IOCNAME "Issue EvtNotify: Init command is in use\n",
2321 		    sc->name);
2322 		mtx_unlock(&sc->init_cmds.completion.lock);
2323 		goto out;
2324 	}
2325 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2326 	sc->init_cmds.is_waiting = 1;
2327 	sc->init_cmds.callback = NULL;
2328 	evtnotify_req.HostTag = (MPI3MR_HOSTTAG_INITCMDS);
2329 	evtnotify_req.Function = MPI3_FUNCTION_EVENT_NOTIFICATION;
2330 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2331 		evtnotify_req.EventMasks[i] =
2332 		    (sc->event_masks[i]);
2333 	init_completion(&sc->init_cmds.completion);
2334 	retval = mpi3mr_submit_admin_cmd(sc, &evtnotify_req,
2335 	    sizeof(evtnotify_req));
2336 	if (retval) {
2337 		printf(IOCNAME "Issue EvtNotify: Admin Post failed\n",
2338 		    sc->name);
2339 		goto out_unlock;
2340 	}
2341 
2342 	poll_for_command_completion(sc,
2343 				    &sc->init_cmds,
2344 				    (MPI3MR_INTADMCMD_TIMEOUT));
2345 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2346 		printf(IOCNAME "Issue EvtNotify: command timed out\n",
2347 		    sc->name);
2348 		mpi3mr_check_rh_fault_ioc(sc,
2349 		    MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
2350 		retval = -1;
2351 		goto out_unlock;
2352 	}
2353 
2354 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2355 	     != MPI3_IOCSTATUS_SUCCESS ) {
2356 		printf(IOCNAME "Issue EvtNotify: Failed IOCStatus(0x%04x) "
2357 		    " Loginfo(0x%08x) \n" , sc->name,
2358 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2359 		    sc->init_cmds.ioc_loginfo);
2360 		retval = -1;
2361 		goto out_unlock;
2362 	}
2363 
2364 out_unlock:
2365 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2366 	mtx_unlock(&sc->init_cmds.completion.lock);
2367 
2368 out:
2369 	return retval;
2370 }
2371 
2372 int
2373 mpi3mr_register_events(struct mpi3mr_softc *sc)
2374 {
2375 	int error;
2376 
2377 	mpi3mr_set_events_mask(sc);
2378 
2379 	error = mpi3mr_issue_event_notification(sc);
2380 
2381 	if (error) {
2382 		printf(IOCNAME "Failed to issue event notification %d\n",
2383 		    sc->name, error);
2384 	}
2385 
2386 	return error;
2387 }
2388 
2389 /**
2390  * mpi3mr_process_event_ack - Process event acknowledgment
2391  * @sc: Adapter instance reference
2392  * @event: MPI3 event ID
2393  * @event_ctx: Event context
2394  *
2395  * Send event acknowledgement through admin queue and wait for
2396  * it to complete.
2397  *
2398  * Return: 0 on success, non-zero on failures.
2399  */
2400 int mpi3mr_process_event_ack(struct mpi3mr_softc *sc, U8 event,
2401 	U32 event_ctx)
2402 {
2403 	Mpi3EventAckRequest_t evtack_req;
2404 	int retval = 0;
2405 
2406 	memset(&evtack_req, 0, sizeof(evtack_req));
2407 	mtx_lock(&sc->init_cmds.completion.lock);
2408 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2409 		retval = -1;
2410 		printf(IOCNAME "Issue EvtAck: Init command is in use\n",
2411 		    sc->name);
2412 		mtx_unlock(&sc->init_cmds.completion.lock);
2413 		goto out;
2414 	}
2415 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2416 	sc->init_cmds.is_waiting = 1;
2417 	sc->init_cmds.callback = NULL;
2418 	evtack_req.HostTag = htole16(MPI3MR_HOSTTAG_INITCMDS);
2419 	evtack_req.Function = MPI3_FUNCTION_EVENT_ACK;
2420 	evtack_req.Event = event;
2421 	evtack_req.EventContext = htole32(event_ctx);
2422 
2423 	init_completion(&sc->init_cmds.completion);
2424 	retval = mpi3mr_submit_admin_cmd(sc, &evtack_req,
2425 	    sizeof(evtack_req));
2426 	if (retval) {
2427 		printf(IOCNAME "Issue EvtAck: Admin Post failed\n",
2428 		    sc->name);
2429 		goto out_unlock;
2430 	}
2431 
2432 	wait_for_completion_timeout(&sc->init_cmds.completion,
2433 	    (MPI3MR_INTADMCMD_TIMEOUT));
2434 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2435 		printf(IOCNAME "Issue EvtAck: command timed out\n",
2436 		    sc->name);
2437 		retval = -1;
2438 		goto out_unlock;
2439 	}
2440 
2441 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2442 	     != MPI3_IOCSTATUS_SUCCESS ) {
2443 		printf(IOCNAME "Issue EvtAck: Failed IOCStatus(0x%04x) "
2444 		    " Loginfo(0x%08x) \n" , sc->name,
2445 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2446 		    sc->init_cmds.ioc_loginfo);
2447 		retval = -1;
2448 		goto out_unlock;
2449 	}
2450 
2451 out_unlock:
2452 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2453 	mtx_unlock(&sc->init_cmds.completion.lock);
2454 
2455 out:
2456 	return retval;
2457 }
2458 
2459 
2460 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_softc *sc)
2461 {
2462 	int retval = 0;
2463 	U32 sz, i;
2464 	U16 num_chains;
2465 
2466 	num_chains = sc->max_host_ios;
2467 
2468 	sc->chain_buf_count = num_chains;
2469 	sz = sizeof(struct mpi3mr_chain) * num_chains;
2470 
2471 	sc->chain_sgl_list = malloc(sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2472 
2473 	if (!sc->chain_sgl_list) {
2474 		printf(IOCNAME "Cannot allocate memory for chain SGL list\n",
2475 		    sc->name);
2476 		retval = -1;
2477 		goto out_failed;
2478 	}
2479 
2480 	sz = MPI3MR_CHAINSGE_SIZE;
2481 
2482         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
2483 				4096, 0,		/* algnmnt, boundary */
2484 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2485 				BUS_SPACE_MAXADDR,	/* highaddr */
2486 				NULL, NULL,		/* filter, filterarg */
2487                                 sz,			/* maxsize */
2488                                 1,			/* nsegments */
2489                                 sz,			/* maxsegsize */
2490                                 0,			/* flags */
2491                                 NULL, NULL,		/* lockfunc, lockarg */
2492                                 &sc->chain_sgl_list_tag)) {
2493 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Chain buffer DMA tag\n");
2494 		return (ENOMEM);
2495         }
2496 
2497 	for (i = 0; i < num_chains; i++) {
2498 		if (bus_dmamem_alloc(sc->chain_sgl_list_tag, (void **)&sc->chain_sgl_list[i].buf,
2499 		    BUS_DMA_NOWAIT, &sc->chain_sgl_list[i].buf_dmamap)) {
2500 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
2501 				__func__, __LINE__);
2502 			return (ENOMEM);
2503 		}
2504 
2505 		bzero(sc->chain_sgl_list[i].buf, sz);
2506 		bus_dmamap_load(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf_dmamap, sc->chain_sgl_list[i].buf, sz,
2507 		    mpi3mr_memaddr_cb, &sc->chain_sgl_list[i].buf_phys, 0);
2508 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d phys addr= %#016jx size= %d\n",
2509 		    __func__, __LINE__, (uintmax_t)sc->chain_sgl_list[i].buf_phys, sz);
2510 	}
2511 
2512 	sc->chain_bitmap_sz = MPI3MR_DIV_ROUND_UP(num_chains, 8);
2513 
2514 	sc->chain_bitmap = malloc(sc->chain_bitmap_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2515 	if (!sc->chain_bitmap) {
2516 		mpi3mr_dprint(sc, MPI3MR_INFO, "Cannot alloc memory for chain bitmap\n");
2517 		retval = -1;
2518 		goto out_failed;
2519 	}
2520 	return retval;
2521 
2522 out_failed:
2523 	for (i = 0; i < num_chains; i++) {
2524 		if (sc->chain_sgl_list[i].buf_phys != 0)
2525 			bus_dmamap_unload(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf_dmamap);
2526 		if (sc->chain_sgl_list[i].buf != NULL)
2527 			bus_dmamem_free(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf, sc->chain_sgl_list[i].buf_dmamap);
2528 	}
2529 	if (sc->chain_sgl_list_tag != NULL)
2530 		bus_dma_tag_destroy(sc->chain_sgl_list_tag);
2531 	return retval;
2532 }
2533 
2534 static int mpi3mr_pel_alloc(struct mpi3mr_softc *sc)
2535 {
2536 	int retval = 0;
2537 
2538 	if (!sc->pel_cmds.reply) {
2539 		sc->pel_cmds.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2540 		if (!sc->pel_cmds.reply) {
2541 			printf(IOCNAME "Cannot allocate memory for pel_cmds.reply\n",
2542 			    sc->name);
2543 			goto out_failed;
2544 		}
2545 	}
2546 
2547 	if (!sc->pel_abort_cmd.reply) {
2548 		sc->pel_abort_cmd.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2549 		if (!sc->pel_abort_cmd.reply) {
2550 			printf(IOCNAME "Cannot allocate memory for pel_abort_cmd.reply\n",
2551 			    sc->name);
2552 			goto out_failed;
2553 		}
2554 	}
2555 
2556 	if (!sc->pel_seq_number) {
2557 		sc->pel_seq_number_sz = sizeof(Mpi3PELSeq_t);
2558 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,   /* parent */
2559 				 4, 0,                           /* alignment, boundary */
2560 				 BUS_SPACE_MAXADDR_32BIT,        /* lowaddr */
2561 				 BUS_SPACE_MAXADDR,              /* highaddr */
2562 				 NULL, NULL,                     /* filter, filterarg */
2563 				 sc->pel_seq_number_sz,		 /* maxsize */
2564 				 1,                              /* nsegments */
2565 				 sc->pel_seq_number_sz,          /* maxsegsize */
2566 				 0,                              /* flags */
2567 				 NULL, NULL,                     /* lockfunc, lockarg */
2568 				 &sc->pel_seq_num_dmatag)) {
2569 			 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot create PEL seq number dma memory tag\n");
2570 			 retval = -ENOMEM;
2571 			 goto out_failed;
2572 		}
2573 
2574 		if (bus_dmamem_alloc(sc->pel_seq_num_dmatag, (void **)&sc->pel_seq_number,
2575 		    BUS_DMA_NOWAIT, &sc->pel_seq_num_dmamap)) {
2576 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate PEL seq number kernel buffer dma memory\n");
2577 			retval = -ENOMEM;
2578 			goto out_failed;
2579 		}
2580 
2581 		bzero(sc->pel_seq_number, sc->pel_seq_number_sz);
2582 
2583 		bus_dmamap_load(sc->pel_seq_num_dmatag, sc->pel_seq_num_dmamap, sc->pel_seq_number,
2584 		    sc->pel_seq_number_sz, mpi3mr_memaddr_cb, &sc->pel_seq_number_dma, 0);
2585 
2586 		if (!sc->pel_seq_number) {
2587 			printf(IOCNAME "%s:%d Cannot load PEL seq number dma memory for size: %d\n", sc->name,
2588 				__func__, __LINE__, sc->pel_seq_number_sz);
2589 			retval = -ENOMEM;
2590 			goto out_failed;
2591 		}
2592 	}
2593 
2594 out_failed:
2595 	return retval;
2596 }
2597 
2598 /**
2599  * mpi3mr_validate_fw_update - validate IOCFacts post adapter reset
2600  * @sc: Adapter instance reference
2601  *
2602  * Return zero if the new IOCFacts is compatible with previous values
2603  * else return appropriate error
2604  */
2605 static int
2606 mpi3mr_validate_fw_update(struct mpi3mr_softc *sc)
2607 {
2608 	U16 dev_handle_bitmap_sz;
2609 	U8 *removepend_bitmap;
2610 
2611 	if (sc->facts.reply_sz > sc->reply_sz) {
2612 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2613 		    "Cannot increase reply size from %d to %d\n",
2614 		    sc->reply_sz, sc->reply_sz);
2615 		return -EPERM;
2616 	}
2617 
2618 	if (sc->num_io_throttle_group != sc->facts.max_io_throttle_group) {
2619 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2620 		    "max io throttle group doesn't match old(%d), new(%d)\n",
2621 		    sc->num_io_throttle_group,
2622 		    sc->facts.max_io_throttle_group);
2623 		return -EPERM;
2624 	}
2625 
2626 	if (sc->facts.max_op_reply_q < sc->num_queues) {
2627 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2628 		    "Cannot reduce number of operational reply queues from %d to %d\n",
2629 		    sc->num_queues,
2630 		    sc->facts.max_op_reply_q);
2631 		return -EPERM;
2632 	}
2633 
2634 	if (sc->facts.max_op_req_q < sc->num_queues) {
2635 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2636 		    "Cannot reduce number of operational request queues from %d to %d\n",
2637 		    sc->num_queues, sc->facts.max_op_req_q);
2638 		return -EPERM;
2639 	}
2640 
2641 	dev_handle_bitmap_sz = MPI3MR_DIV_ROUND_UP(sc->facts.max_devhandle, 8);
2642 
2643 	if (dev_handle_bitmap_sz > sc->dev_handle_bitmap_sz) {
2644 		removepend_bitmap = realloc(sc->removepend_bitmap,
2645 		    dev_handle_bitmap_sz, M_MPI3MR, M_NOWAIT);
2646 
2647 		if (!removepend_bitmap) {
2648 			mpi3mr_dprint(sc, MPI3MR_ERROR,
2649 			    "failed to increase removepend_bitmap sz from: %d to %d\n",
2650 			    sc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
2651 			return -ENOMEM;
2652 		}
2653 
2654 		memset(removepend_bitmap + sc->dev_handle_bitmap_sz, 0,
2655 		    dev_handle_bitmap_sz - sc->dev_handle_bitmap_sz);
2656 		sc->removepend_bitmap = removepend_bitmap;
2657 		mpi3mr_dprint(sc, MPI3MR_INFO,
2658 		    "increased dev_handle_bitmap_sz from %d to %d\n",
2659 		    sc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
2660 		sc->dev_handle_bitmap_sz = dev_handle_bitmap_sz;
2661 	}
2662 
2663 	return 0;
2664 }
2665 
2666 /*
2667  * mpi3mr_initialize_ioc - Controller initialization
2668  * @dev: pointer to device struct
2669  *
2670  * This function allocates the controller wide resources and brings
2671  * the controller to operational state
2672  *
2673  * Return: 0 on success and proper error codes on failure
2674  */
2675 int mpi3mr_initialize_ioc(struct mpi3mr_softc *sc, U8 init_type)
2676 {
2677 	int retval = 0;
2678 	enum mpi3mr_iocstate ioc_state;
2679 	U64 ioc_info;
2680 	U32 ioc_status, ioc_control, i, timeout;
2681 	Mpi3IOCFactsData_t facts_data;
2682 	char str[32];
2683 	U32 size;
2684 
2685 	sc->cpu_count = mp_ncpus;
2686 
2687 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
2688 	ioc_control = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
2689 	ioc_info = mpi3mr_regread64(sc, MPI3_SYSIF_IOC_INFO_LOW_OFFSET);
2690 
2691 	mpi3mr_dprint(sc, MPI3MR_INFO, "SOD ioc_status: 0x%x ioc_control: 0x%x "
2692 	    "ioc_info: 0x%lx\n", ioc_status, ioc_control, ioc_info);
2693 
2694         /*The timeout value is in 2sec unit, changing it to seconds*/
2695 	sc->ready_timeout =
2696                 ((ioc_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
2697                     MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
2698 
2699 	ioc_state = mpi3mr_get_iocstate(sc);
2700 
2701 	mpi3mr_dprint(sc, MPI3MR_INFO, "IOC state: %s   IOC ready timeout: %d\n",
2702 	    mpi3mr_iocstate_name(ioc_state), sc->ready_timeout);
2703 
2704 	if (ioc_state == MRIOC_STATE_BECOMING_READY ||
2705 	    ioc_state == MRIOC_STATE_RESET_REQUESTED) {
2706 		timeout = sc->ready_timeout * 10;
2707 		do {
2708 			DELAY(1000 * 100);
2709 		} while (--timeout);
2710 
2711 		ioc_state = mpi3mr_get_iocstate(sc);
2712 		mpi3mr_dprint(sc, MPI3MR_INFO,
2713 			"IOC in %s state after waiting for reset time\n",
2714 			mpi3mr_iocstate_name(ioc_state));
2715 	}
2716 
2717 	if (ioc_state == MRIOC_STATE_READY) {
2718                 retval = mpi3mr_mur_ioc(sc, MPI3MR_RESET_FROM_BRINGUP);
2719                 if (retval) {
2720                         mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to MU reset IOC, error 0x%x\n",
2721                                 retval);
2722                 }
2723                 ioc_state = mpi3mr_get_iocstate(sc);
2724         }
2725 
2726         if (ioc_state != MRIOC_STATE_RESET) {
2727                 mpi3mr_print_fault_info(sc);
2728 		 mpi3mr_dprint(sc, MPI3MR_ERROR, "issuing soft reset to bring to reset state\n");
2729                  retval = mpi3mr_issue_reset(sc,
2730                      MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
2731                      MPI3MR_RESET_FROM_BRINGUP);
2732                 if (retval) {
2733                         mpi3mr_dprint(sc, MPI3MR_ERROR,
2734                             "%s :Failed to soft reset IOC, error 0x%d\n",
2735                             __func__, retval);
2736                         goto out_failed;
2737                 }
2738         }
2739 
2740 	ioc_state = mpi3mr_get_iocstate(sc);
2741 
2742         if (ioc_state != MRIOC_STATE_RESET) {
2743 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot bring IOC to reset state\n");
2744 		goto out_failed;
2745         }
2746 
2747 	retval = mpi3mr_setup_admin_qpair(sc);
2748 	if (retval) {
2749 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to setup Admin queues, error 0x%x\n",
2750 		    retval);
2751 		goto out_failed;
2752 	}
2753 
2754 	retval = mpi3mr_bring_ioc_ready(sc);
2755 	if (retval) {
2756 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to bring IOC ready, error 0x%x\n",
2757 		    retval);
2758 		goto out_failed;
2759 	}
2760 
2761 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2762 		retval = mpi3mr_alloc_interrupts(sc, 1);
2763 		if (retval) {
2764 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate interrupts, error 0x%x\n",
2765 			    retval);
2766 			goto out_failed;
2767 		}
2768 
2769 		retval = mpi3mr_setup_irqs(sc);
2770 		if (retval) {
2771 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to setup ISR, error 0x%x\n",
2772 			    retval);
2773 			goto out_failed;
2774 		}
2775 	}
2776 
2777 	mpi3mr_enable_interrupts(sc);
2778 
2779 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2780 		mtx_init(&sc->mpi3mr_mtx, "SIM lock", NULL, MTX_DEF);
2781 		mtx_init(&sc->io_lock, "IO lock", NULL, MTX_DEF);
2782 		mtx_init(&sc->admin_req_lock, "Admin Request Queue lock", NULL, MTX_SPIN);
2783 		mtx_init(&sc->reply_free_q_lock, "Reply free Queue lock", NULL, MTX_SPIN);
2784 		mtx_init(&sc->sense_buf_q_lock, "Sense buffer Queue lock", NULL, MTX_SPIN);
2785 		mtx_init(&sc->chain_buf_lock, "Chain buffer lock", NULL, MTX_SPIN);
2786 		mtx_init(&sc->cmd_pool_lock, "Command pool lock", NULL, MTX_DEF);
2787 		mtx_init(&sc->fwevt_lock, "Firmware Event lock", NULL, MTX_DEF);
2788 		mtx_init(&sc->target_lock, "Target lock", NULL, MTX_SPIN);
2789 		mtx_init(&sc->reset_mutex, "Reset lock", NULL, MTX_DEF);
2790 
2791 		mtx_init(&sc->init_cmds.completion.lock, "Init commands lock", NULL, MTX_DEF);
2792 		sc->init_cmds.reply = NULL;
2793 		sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2794 		sc->init_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2795 		sc->init_cmds.host_tag = MPI3MR_HOSTTAG_INITCMDS;
2796 
2797 		mtx_init(&sc->ioctl_cmds.completion.lock, "IOCTL commands lock", NULL, MTX_DEF);
2798 		sc->ioctl_cmds.reply = NULL;
2799 		sc->ioctl_cmds.state = MPI3MR_CMD_NOTUSED;
2800 		sc->ioctl_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2801 		sc->ioctl_cmds.host_tag = MPI3MR_HOSTTAG_IOCTLCMDS;
2802 
2803 		mtx_init(&sc->pel_abort_cmd.completion.lock, "PEL Abort command lock", NULL, MTX_DEF);
2804 		sc->pel_abort_cmd.reply = NULL;
2805 		sc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED;
2806 		sc->pel_abort_cmd.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2807 		sc->pel_abort_cmd.host_tag = MPI3MR_HOSTTAG_PELABORT;
2808 
2809 		mtx_init(&sc->host_tm_cmds.completion.lock, "TM commands lock", NULL, MTX_DEF);
2810 		sc->host_tm_cmds.reply = NULL;
2811 		sc->host_tm_cmds.state = MPI3MR_CMD_NOTUSED;
2812 		sc->host_tm_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2813 		sc->host_tm_cmds.host_tag = MPI3MR_HOSTTAG_TMS;
2814 
2815 		TAILQ_INIT(&sc->cmd_list_head);
2816 		TAILQ_INIT(&sc->event_list);
2817 		TAILQ_INIT(&sc->delayed_rmhs_list);
2818 		TAILQ_INIT(&sc->delayed_evtack_cmds_list);
2819 
2820 		for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
2821 			snprintf(str, 32, "Dev REMHS commands lock[%d]", i);
2822 			mtx_init(&sc->dev_rmhs_cmds[i].completion.lock, str, NULL, MTX_DEF);
2823 			sc->dev_rmhs_cmds[i].reply = NULL;
2824 			sc->dev_rmhs_cmds[i].state = MPI3MR_CMD_NOTUSED;
2825 			sc->dev_rmhs_cmds[i].dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2826 			sc->dev_rmhs_cmds[i].host_tag = MPI3MR_HOSTTAG_DEVRMCMD_MIN
2827 							    + i;
2828 		}
2829 	}
2830 
2831 	retval = mpi3mr_issue_iocfacts(sc, &facts_data);
2832 	if (retval) {
2833 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to Issue IOC Facts, retval: 0x%x\n",
2834 		    retval);
2835 		goto out_failed;
2836 	}
2837 
2838 	retval = mpi3mr_process_factsdata(sc, &facts_data);
2839 	if (retval) {
2840 		mpi3mr_dprint(sc, MPI3MR_ERROR, "IOC Facts data processing failedi, retval: 0x%x\n",
2841 		    retval);
2842 		goto out_failed;
2843 	}
2844 
2845 	sc->num_io_throttle_group = sc->facts.max_io_throttle_group;
2846 	mpi3mr_atomic_set(&sc->pend_large_data_sz, 0);
2847 
2848 	if (init_type == MPI3MR_INIT_TYPE_RESET) {
2849 		retval = mpi3mr_validate_fw_update(sc);
2850 		if (retval)
2851 			goto out_failed;
2852 	} else {
2853 		sc->reply_sz = sc->facts.reply_sz;
2854 	}
2855 
2856 
2857 	mpi3mr_display_ioc_info(sc);
2858 
2859 	retval = mpi3mr_reply_alloc(sc);
2860 	if (retval) {
2861 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocated reply and sense buffers, retval: 0x%x\n",
2862 		    retval);
2863 		goto out_failed;
2864 	}
2865 
2866 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2867 		retval = mpi3mr_alloc_chain_bufs(sc);
2868 		if (retval) {
2869 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocated chain buffers, retval: 0x%x\n",
2870 			    retval);
2871 			goto out_failed;
2872 		}
2873 	}
2874 
2875 	retval = mpi3mr_issue_iocinit(sc);
2876 	if (retval) {
2877 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to Issue IOC Init, retval: 0x%x\n",
2878 		    retval);
2879 		goto out_failed;
2880 	}
2881 
2882 	mpi3mr_print_fw_pkg_ver(sc);
2883 
2884 	sc->reply_free_q_host_index = sc->num_reply_bufs;
2885 	mpi3mr_regwrite(sc, MPI3_SYSIF_REPLY_FREE_HOST_INDEX_OFFSET,
2886 		sc->reply_free_q_host_index);
2887 
2888 	sc->sense_buf_q_host_index = sc->num_sense_bufs;
2889 
2890 	mpi3mr_regwrite(sc, MPI3_SYSIF_SENSE_BUF_FREE_HOST_INDEX_OFFSET,
2891 		sc->sense_buf_q_host_index);
2892 
2893 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2894 		retval = mpi3mr_alloc_interrupts(sc, 0);
2895 		if (retval) {
2896 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate interrupts, retval: 0x%x\n",
2897 			    retval);
2898 			goto out_failed;
2899 		}
2900 
2901 		retval = mpi3mr_setup_irqs(sc);
2902 		if (retval) {
2903 			printf(IOCNAME "Failed to setup ISR, error: 0x%x\n",
2904 			    sc->name, retval);
2905 			goto out_failed;
2906 		}
2907 
2908 		mpi3mr_enable_interrupts(sc);
2909 
2910 	} else
2911 		mpi3mr_enable_interrupts(sc);
2912 
2913 	retval = mpi3mr_create_op_queues(sc);
2914 
2915 	if (retval) {
2916 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to create operational queues, error: %d\n",
2917 		    retval);
2918 		goto out_failed;
2919 	}
2920 
2921 	if (!sc->throttle_groups && sc->num_io_throttle_group) {
2922 		mpi3mr_dprint(sc, MPI3MR_ERROR, "allocating memory for throttle groups\n");
2923 		size = sizeof(struct mpi3mr_throttle_group_info);
2924 		sc->throttle_groups = (struct mpi3mr_throttle_group_info *)
2925 					  malloc(sc->num_io_throttle_group *
2926 					      size, M_MPI3MR, M_NOWAIT | M_ZERO);
2927 		if (!sc->throttle_groups)
2928 			goto out_failed;
2929 	}
2930 
2931 	if (init_type == MPI3MR_INIT_TYPE_RESET) {
2932 		mpi3mr_dprint(sc, MPI3MR_INFO, "Re-register events\n");
2933 		retval = mpi3mr_register_events(sc);
2934 		if (retval) {
2935 			mpi3mr_dprint(sc, MPI3MR_INFO, "Failed to re-register events, retval: 0x%x\n",
2936 			    retval);
2937 			goto out_failed;
2938 		}
2939 
2940 		mpi3mr_dprint(sc, MPI3MR_INFO, "Issuing Port Enable\n");
2941 		retval = mpi3mr_issue_port_enable(sc, 0);
2942 		if (retval) {
2943 			mpi3mr_dprint(sc, MPI3MR_INFO, "Failed to issue port enable, retval: 0x%x\n",
2944 			    retval);
2945 			goto out_failed;
2946 		}
2947 	}
2948 	retval = mpi3mr_pel_alloc(sc);
2949 	if (retval) {
2950 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate memory for PEL, retval: 0x%x\n",
2951 		    retval);
2952 		goto out_failed;
2953 	}
2954 
2955 	return retval;
2956 
2957 out_failed:
2958 	retval = -1;
2959 	return retval;
2960 }
2961 
2962 static void mpi3mr_port_enable_complete(struct mpi3mr_softc *sc,
2963     struct mpi3mr_drvr_cmd *drvrcmd)
2964 {
2965 	drvrcmd->state = MPI3MR_CMD_NOTUSED;
2966 	drvrcmd->callback = NULL;
2967 	printf(IOCNAME "Completing Port Enable Request\n", sc->name);
2968 	sc->mpi3mr_flags |= MPI3MR_FLAGS_PORT_ENABLE_DONE;
2969 	mpi3mr_startup_decrement(sc->cam_sc);
2970 }
2971 
2972 int mpi3mr_issue_port_enable(struct mpi3mr_softc *sc, U8 async)
2973 {
2974 	Mpi3PortEnableRequest_t pe_req;
2975 	int retval = 0;
2976 
2977 	memset(&pe_req, 0, sizeof(pe_req));
2978 	mtx_lock(&sc->init_cmds.completion.lock);
2979 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2980 		retval = -1;
2981 		printf(IOCNAME "Issue PortEnable: Init command is in use\n", sc->name);
2982 		mtx_unlock(&sc->init_cmds.completion.lock);
2983 		goto out;
2984 	}
2985 
2986 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2987 
2988 	if (async) {
2989 		sc->init_cmds.is_waiting = 0;
2990 		sc->init_cmds.callback = mpi3mr_port_enable_complete;
2991 	} else {
2992 		sc->init_cmds.is_waiting = 1;
2993 		sc->init_cmds.callback = NULL;
2994 		init_completion(&sc->init_cmds.completion);
2995 	}
2996 	pe_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
2997 	pe_req.Function = MPI3_FUNCTION_PORT_ENABLE;
2998 
2999 	printf(IOCNAME "Sending Port Enable Request\n", sc->name);
3000 	retval = mpi3mr_submit_admin_cmd(sc, &pe_req, sizeof(pe_req));
3001 	if (retval) {
3002 		printf(IOCNAME "Issue PortEnable: Admin Post failed\n",
3003 		    sc->name);
3004 		goto out_unlock;
3005 	}
3006 
3007 	if (!async) {
3008 		wait_for_completion_timeout(&sc->init_cmds.completion,
3009 		    MPI3MR_PORTENABLE_TIMEOUT);
3010 		if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3011 			printf(IOCNAME "Issue PortEnable: command timed out\n",
3012 			    sc->name);
3013 			retval = -1;
3014 			mpi3mr_check_rh_fault_ioc(sc, MPI3MR_RESET_FROM_PE_TIMEOUT);
3015 			goto out_unlock;
3016 		}
3017 		mpi3mr_port_enable_complete(sc, &sc->init_cmds);
3018 	}
3019 out_unlock:
3020 	mtx_unlock(&sc->init_cmds.completion.lock);
3021 
3022 out:
3023 	return retval;
3024 }
3025 
3026 void
3027 mpi3mr_watchdog_thread(void *arg)
3028 {
3029 	struct mpi3mr_softc *sc;
3030 	enum mpi3mr_iocstate ioc_state;
3031 	U32 fault, host_diagnostic, ioc_status;
3032 
3033 	sc = (struct mpi3mr_softc *)arg;
3034 
3035 	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s\n", __func__);
3036 
3037 	sc->watchdog_thread_active = 1;
3038 	mtx_lock(&sc->reset_mutex);
3039 	for (;;) {
3040 		/* Sleep for 1 second and check the queue status */
3041 		msleep(&sc->watchdog_chan, &sc->reset_mutex, PRIBIO,
3042 		    "mpi3mr_watchdog", 1 * hz);
3043 		if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN ||
3044 		    (sc->unrecoverable == 1)) {
3045 			mpi3mr_dprint(sc, MPI3MR_INFO,
3046 			    "Exit due to %s from %s\n",
3047 			   sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN ? "Shutdown" :
3048 			    "Hardware critical error", __func__);
3049 			break;
3050 		}
3051 
3052 		if ((sc->prepare_for_reset) &&
3053 		    ((sc->prepare_for_reset_timeout_counter++) >=
3054 		     MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
3055 			mpi3mr_soft_reset_handler(sc,
3056 			    MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
3057 			continue;
3058 		}
3059 
3060 		ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
3061 
3062 		if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
3063 			mpi3mr_soft_reset_handler(sc, MPI3MR_RESET_FROM_FIRMWARE, 0);
3064 			continue;
3065 		}
3066 
3067 		ioc_state = mpi3mr_get_iocstate(sc);
3068 		if (ioc_state == MRIOC_STATE_FAULT) {
3069 			fault = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_OFFSET) &
3070 			    MPI3_SYSIF_FAULT_CODE_MASK;
3071 
3072 			host_diagnostic = mpi3mr_regread(sc, MPI3_SYSIF_HOST_DIAG_OFFSET);
3073 			if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
3074 				if (!sc->diagsave_timeout) {
3075 					mpi3mr_print_fault_info(sc);
3076 					mpi3mr_dprint(sc, MPI3MR_INFO,
3077 						"diag save in progress\n");
3078 				}
3079 				if ((sc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
3080 					continue;
3081 			}
3082 			mpi3mr_print_fault_info(sc);
3083 			sc->diagsave_timeout = 0;
3084 
3085 			if ((fault == MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED) ||
3086 			    (fault == MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED)) {
3087 				mpi3mr_dprint(sc, MPI3MR_INFO,
3088 				    "Controller requires system power cycle or complete reset is needed,"
3089 				    "fault code: 0x%x. marking controller as unrecoverable\n", fault);
3090 				sc->unrecoverable = 1;
3091 				goto out;
3092 			}
3093 			if ((fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET)
3094 			    || (fault == MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS)
3095 			    || (sc->reset_in_progress))
3096 				goto out;
3097 			if (fault == MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET)
3098 				mpi3mr_soft_reset_handler(sc,
3099 				    MPI3MR_RESET_FROM_CIACTIV_FAULT, 0);
3100 			else
3101 				mpi3mr_soft_reset_handler(sc,
3102 				    MPI3MR_RESET_FROM_FAULT_WATCH, 0);
3103 
3104 		}
3105 
3106 		if (sc->reset.type == MPI3MR_TRIGGER_SOFT_RESET) {
3107 			mpi3mr_print_fault_info(sc);
3108 			mpi3mr_soft_reset_handler(sc, sc->reset.reason, 1);
3109 		}
3110 	}
3111 out:
3112 	mtx_unlock(&sc->reset_mutex);
3113 	sc->watchdog_thread_active = 0;
3114 	mpi3mr_kproc_exit(0);
3115 }
3116 
3117 static void mpi3mr_display_event_data(struct mpi3mr_softc *sc,
3118 	Mpi3EventNotificationReply_t *event_rep)
3119 {
3120 	char *desc = NULL;
3121 	U16 event;
3122 
3123 	event = event_rep->Event;
3124 
3125 	switch (event) {
3126 	case MPI3_EVENT_LOG_DATA:
3127 		desc = "Log Data";
3128 		break;
3129 	case MPI3_EVENT_CHANGE:
3130 		desc = "Event Change";
3131 		break;
3132 	case MPI3_EVENT_GPIO_INTERRUPT:
3133 		desc = "GPIO Interrupt";
3134 		break;
3135 	case MPI3_EVENT_CABLE_MGMT:
3136 		desc = "Cable Management";
3137 		break;
3138 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
3139 		desc = "Energy Pack Change";
3140 		break;
3141 	case MPI3_EVENT_DEVICE_ADDED:
3142 	{
3143 		Mpi3DevicePage0_t *event_data =
3144 		    (Mpi3DevicePage0_t *)event_rep->EventData;
3145 		mpi3mr_dprint(sc, MPI3MR_EVENT, "Device Added: Dev=0x%04x Form=0x%x Perst id: 0x%x\n",
3146 			event_data->DevHandle, event_data->DeviceForm, event_data->PersistentID);
3147 		return;
3148 	}
3149 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
3150 	{
3151 		Mpi3DevicePage0_t *event_data =
3152 		    (Mpi3DevicePage0_t *)event_rep->EventData;
3153 		mpi3mr_dprint(sc, MPI3MR_EVENT, "Device Info Changed: Dev=0x%04x Form=0x%x\n",
3154 			event_data->DevHandle, event_data->DeviceForm);
3155 		return;
3156 	}
3157 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
3158 	{
3159 		Mpi3EventDataDeviceStatusChange_t *event_data =
3160 		    (Mpi3EventDataDeviceStatusChange_t *)event_rep->EventData;
3161 		mpi3mr_dprint(sc, MPI3MR_EVENT, "Device Status Change: Dev=0x%04x RC=0x%x\n",
3162 			event_data->DevHandle, event_data->ReasonCode);
3163 		return;
3164 	}
3165 	case MPI3_EVENT_SAS_DISCOVERY:
3166 	{
3167 		Mpi3EventDataSasDiscovery_t *event_data =
3168 		    (Mpi3EventDataSasDiscovery_t *)event_rep->EventData;
3169 		mpi3mr_dprint(sc, MPI3MR_EVENT, "SAS Discovery: (%s)",
3170 			(event_data->ReasonCode == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
3171 		    "start" : "stop");
3172 		if (event_data->DiscoveryStatus &&
3173 		    (sc->mpi3mr_debug & MPI3MR_EVENT)) {
3174 			printf("discovery_status(0x%08x)",
3175 			    event_data->DiscoveryStatus);
3176 
3177 		}
3178 
3179 		if (sc->mpi3mr_debug & MPI3MR_EVENT)
3180 			printf("\n");
3181 		return;
3182 	}
3183 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
3184 		desc = "SAS Broadcast Primitive";
3185 		break;
3186 	case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
3187 		desc = "SAS Notify Primitive";
3188 		break;
3189 	case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
3190 		desc = "SAS Init Device Status Change";
3191 		break;
3192 	case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
3193 		desc = "SAS Init Table Overflow";
3194 		break;
3195 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
3196 		desc = "SAS Topology Change List";
3197 		break;
3198 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
3199 		desc = "Enclosure Device Status Change";
3200 		break;
3201 	case MPI3_EVENT_HARD_RESET_RECEIVED:
3202 		desc = "Hard Reset Received";
3203 		break;
3204 	case MPI3_EVENT_SAS_PHY_COUNTER:
3205 		desc = "SAS PHY Counter";
3206 		break;
3207 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
3208 		desc = "SAS Device Discovery Error";
3209 		break;
3210 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
3211 		desc = "PCIE Topology Change List";
3212 		break;
3213 	case MPI3_EVENT_PCIE_ENUMERATION:
3214 	{
3215 		Mpi3EventDataPcieEnumeration_t *event_data =
3216 			(Mpi3EventDataPcieEnumeration_t *)event_rep->EventData;
3217 		mpi3mr_dprint(sc, MPI3MR_EVENT, "PCIE Enumeration: (%s)",
3218 			(event_data->ReasonCode ==
3219 			    MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" :
3220 			    "stop");
3221 		if (event_data->EnumerationStatus)
3222 			mpi3mr_dprint(sc, MPI3MR_EVENT, "enumeration_status(0x%08x)",
3223 			   event_data->EnumerationStatus);
3224 		if (sc->mpi3mr_debug & MPI3MR_EVENT)
3225 			printf("\n");
3226 		return;
3227 	}
3228 	case MPI3_EVENT_PREPARE_FOR_RESET:
3229 		desc = "Prepare For Reset";
3230 		break;
3231 	}
3232 
3233 	if (!desc)
3234 		return;
3235 
3236 	mpi3mr_dprint(sc, MPI3MR_EVENT, "%s\n", desc);
3237 }
3238 
3239 struct mpi3mr_target *
3240 mpi3mr_find_target_by_per_id(struct mpi3mr_cam_softc *cam_sc,
3241     uint16_t per_id)
3242 {
3243 	struct mpi3mr_target *target = NULL;
3244 
3245 	mtx_lock_spin(&cam_sc->sc->target_lock);
3246 	TAILQ_FOREACH(target, &cam_sc->tgt_list, tgt_next) {
3247 		if (target->per_id == per_id)
3248 			break;
3249 	}
3250 
3251 	mtx_unlock_spin(&cam_sc->sc->target_lock);
3252 	return target;
3253 }
3254 
3255 struct mpi3mr_target *
3256 mpi3mr_find_target_by_dev_handle(struct mpi3mr_cam_softc *cam_sc,
3257     uint16_t handle)
3258 {
3259 	struct mpi3mr_target *target = NULL;
3260 
3261 	mtx_lock_spin(&cam_sc->sc->target_lock);
3262 	TAILQ_FOREACH(target, &cam_sc->tgt_list, tgt_next) {
3263 		if (target->dev_handle == handle)
3264 			break;
3265 
3266 	}
3267 	mtx_unlock_spin(&cam_sc->sc->target_lock);
3268 	return target;
3269 }
3270 
3271 void mpi3mr_update_device(struct mpi3mr_softc *sc,
3272     struct mpi3mr_target *tgtdev, Mpi3DevicePage0_t *dev_pg0,
3273     bool is_added)
3274 {
3275 	U16 flags = 0;
3276 
3277 	tgtdev->per_id = (dev_pg0->PersistentID);
3278 	tgtdev->dev_handle = (dev_pg0->DevHandle);
3279 	tgtdev->dev_type = dev_pg0->DeviceForm;
3280 	tgtdev->encl_handle = (dev_pg0->EnclosureHandle);
3281 	tgtdev->parent_handle = (dev_pg0->ParentDevHandle);
3282 	tgtdev->slot = (dev_pg0->Slot);
3283 	tgtdev->qdepth = (dev_pg0->QueueDepth);
3284 	tgtdev->wwid = (dev_pg0->WWID);
3285 
3286 	flags = (dev_pg0->Flags);
3287 	tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
3288 	if (is_added == true)
3289 		tgtdev->io_throttle_enabled =
3290 		    (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0;
3291 
3292 	switch (dev_pg0->AccessStatus) {
3293 	case MPI3_DEVICE0_ASTATUS_NO_ERRORS:
3294 	case MPI3_DEVICE0_ASTATUS_PREPARE:
3295 	case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION:
3296 	case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY:
3297 		break;
3298 	default:
3299 		tgtdev->is_hidden = 1;
3300 		break;
3301 	}
3302 
3303 	switch (tgtdev->dev_type) {
3304 	case MPI3_DEVICE_DEVFORM_SAS_SATA:
3305 	{
3306 		Mpi3Device0SasSataFormat_t *sasinf =
3307 		    &dev_pg0->DeviceSpecific.SasSataFormat;
3308 		U16 dev_info = (sasinf->DeviceInfo);
3309 		tgtdev->dev_spec.sassata_inf.dev_info = dev_info;
3310 		tgtdev->dev_spec.sassata_inf.sas_address =
3311 		    (sasinf->SASAddress);
3312 		if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) !=
3313 		    MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE)
3314 			tgtdev->is_hidden = 1;
3315 		else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET |
3316 			    MPI3_SAS_DEVICE_INFO_SSP_TARGET)))
3317 			tgtdev->is_hidden = 1;
3318 		break;
3319 	}
3320 	case MPI3_DEVICE_DEVFORM_PCIE:
3321 	{
3322 		Mpi3Device0PcieFormat_t *pcieinf =
3323 		    &dev_pg0->DeviceSpecific.PcieFormat;
3324 		U16 dev_info = (pcieinf->DeviceInfo);
3325 
3326 		tgtdev->q_depth = dev_pg0->QueueDepth;
3327 		tgtdev->dev_spec.pcie_inf.dev_info = dev_info;
3328 		tgtdev->dev_spec.pcie_inf.capb =
3329 		    (pcieinf->Capabilities);
3330 		tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS;
3331 		if (dev_pg0->AccessStatus == MPI3_DEVICE0_ASTATUS_NO_ERRORS) {
3332 			tgtdev->dev_spec.pcie_inf.mdts =
3333 			    (pcieinf->MaximumDataTransferSize);
3334 			tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->PageSize;
3335 			tgtdev->dev_spec.pcie_inf.reset_to =
3336 				pcieinf->ControllerResetTO;
3337 			tgtdev->dev_spec.pcie_inf.abort_to =
3338 				pcieinf->NVMeAbortTO;
3339 		}
3340 		if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024))
3341 			tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024);
3342 
3343 		if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
3344 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
3345 		    ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
3346 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE))
3347 			tgtdev->is_hidden = 1;
3348 
3349 		break;
3350 	}
3351 	case MPI3_DEVICE_DEVFORM_VD:
3352 	{
3353 		Mpi3Device0VdFormat_t *vdinf =
3354 		    &dev_pg0->DeviceSpecific.VdFormat;
3355 		struct mpi3mr_throttle_group_info *tg = NULL;
3356 
3357 		tgtdev->dev_spec.vol_inf.state = vdinf->VdState;
3358 		if (vdinf->VdState == MPI3_DEVICE0_VD_STATE_OFFLINE)
3359 			tgtdev->is_hidden = 1;
3360 		tgtdev->dev_spec.vol_inf.tg_id = vdinf->IOThrottleGroup;
3361 		tgtdev->dev_spec.vol_inf.tg_high =
3362 			vdinf->IOThrottleGroupHigh * 2048;
3363 		tgtdev->dev_spec.vol_inf.tg_low =
3364 			vdinf->IOThrottleGroupLow * 2048;
3365 		if (vdinf->IOThrottleGroup < sc->num_io_throttle_group) {
3366 			tg = sc->throttle_groups + vdinf->IOThrottleGroup;
3367 			tg->id = vdinf->IOThrottleGroup;
3368 			tg->high = tgtdev->dev_spec.vol_inf.tg_high;
3369 			tg->low = tgtdev->dev_spec.vol_inf.tg_low;
3370 			if (is_added == true)
3371 				tg->fw_qd = tgtdev->q_depth;
3372 			tg->modified_qd = tgtdev->q_depth;
3373 		}
3374 		tgtdev->dev_spec.vol_inf.tg = tg;
3375 		tgtdev->throttle_group = tg;
3376 		break;
3377 	}
3378 	default:
3379 		goto out;
3380 	}
3381 
3382 out:
3383 	return;
3384 }
3385 
3386 int mpi3mr_create_device(struct mpi3mr_softc *sc,
3387     Mpi3DevicePage0_t *dev_pg0)
3388 {
3389 	int retval = 0;
3390 	struct mpi3mr_target *target = NULL;
3391 	U16 per_id = 0;
3392 
3393 	per_id = dev_pg0->PersistentID;
3394 
3395 	mtx_lock_spin(&sc->target_lock);
3396 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
3397 		if (target->per_id == per_id) {
3398 			target->state = MPI3MR_DEV_CREATED;
3399 			break;
3400 		}
3401 	}
3402 	mtx_unlock_spin(&sc->target_lock);
3403 
3404 	if (target) {
3405 			mpi3mr_update_device(sc, target, dev_pg0, true);
3406 	} else {
3407 			target = malloc(sizeof(*target), M_MPI3MR,
3408 				 M_NOWAIT | M_ZERO);
3409 
3410 			if (target == NULL) {
3411 				retval = -1;
3412 				goto out;
3413 			}
3414 
3415 			target->exposed_to_os = 0;
3416 			mpi3mr_update_device(sc, target, dev_pg0, true);
3417 			mtx_lock_spin(&sc->target_lock);
3418 			TAILQ_INSERT_TAIL(&sc->cam_sc->tgt_list, target, tgt_next);
3419 			target->state = MPI3MR_DEV_CREATED;
3420 			mtx_unlock_spin(&sc->target_lock);
3421 	}
3422 out:
3423 	return retval;
3424 }
3425 
3426 /**
3427  * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion
3428  * @sc: Adapter instance reference
3429  * @drv_cmd: Internal command tracker
3430  *
3431  * Issues a target reset TM to the firmware from the device
3432  * removal TM pend list or retry the removal handshake sequence
3433  * based on the IOU control request IOC status.
3434  *
3435  * Return: Nothing
3436  */
3437 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_softc *sc,
3438 	struct mpi3mr_drvr_cmd *drv_cmd)
3439 {
3440 	U16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
3441 	struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
3442 
3443 	mpi3mr_dprint(sc, MPI3MR_EVENT,
3444 	    "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n",
3445 	    __func__, drv_cmd->dev_handle, drv_cmd->ioc_status,
3446 	    drv_cmd->ioc_loginfo);
3447 	if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
3448 		if (drv_cmd->retry_count < MPI3MR_DEVRMHS_RETRYCOUNT) {
3449 			drv_cmd->retry_count++;
3450 			mpi3mr_dprint(sc, MPI3MR_EVENT,
3451 			    "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n",
3452 			    __func__, drv_cmd->dev_handle,
3453 			    drv_cmd->retry_count);
3454 			mpi3mr_dev_rmhs_send_tm(sc, drv_cmd->dev_handle,
3455 			    drv_cmd, drv_cmd->iou_rc);
3456 			return;
3457 		}
3458 		mpi3mr_dprint(sc, MPI3MR_ERROR,
3459 		    "%s :dev removal handshake failed after all retries: handle(0x%04x)\n",
3460 		    __func__, drv_cmd->dev_handle);
3461 	} else {
3462 		mpi3mr_dprint(sc, MPI3MR_INFO,
3463 		    "%s :dev removal handshake completed successfully: handle(0x%04x)\n",
3464 		    __func__, drv_cmd->dev_handle);
3465 		mpi3mr_clear_bit(drv_cmd->dev_handle, sc->removepend_bitmap);
3466 	}
3467 
3468 	if (!TAILQ_EMPTY(&sc->delayed_rmhs_list)) {
3469 		delayed_dev_rmhs = TAILQ_FIRST(&sc->delayed_rmhs_list);
3470 		drv_cmd->dev_handle = delayed_dev_rmhs->handle;
3471 		drv_cmd->retry_count = 0;
3472 		drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc;
3473 		mpi3mr_dprint(sc, MPI3MR_EVENT,
3474 		    "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n",
3475 		    __func__, drv_cmd->dev_handle);
3476 		mpi3mr_dev_rmhs_send_tm(sc, drv_cmd->dev_handle, drv_cmd,
3477 		    drv_cmd->iou_rc);
3478 		TAILQ_REMOVE(&sc->delayed_rmhs_list, delayed_dev_rmhs, list);
3479 		free(delayed_dev_rmhs, M_MPI3MR);
3480 		return;
3481 	}
3482 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3483 	drv_cmd->callback = NULL;
3484 	drv_cmd->retry_count = 0;
3485 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
3486 	mpi3mr_clear_bit(cmd_idx, sc->devrem_bitmap);
3487 }
3488 
3489 /**
3490  * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion
3491  * @sc: Adapter instance reference
3492  * @drv_cmd: Internal command tracker
3493  *
3494  * Issues a target reset TM to the firmware from the device
3495  * removal TM pend list or issue IO Unit control request as
3496  * part of device removal or hidden acknowledgment handshake.
3497  *
3498  * Return: Nothing
3499  */
3500 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_softc *sc,
3501 	struct mpi3mr_drvr_cmd *drv_cmd)
3502 {
3503 	Mpi3IoUnitControlRequest_t iou_ctrl;
3504 	U16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
3505 	Mpi3SCSITaskMgmtReply_t *tm_reply = NULL;
3506 	int retval;
3507 
3508 	if (drv_cmd->state & MPI3MR_CMD_REPLYVALID)
3509 		tm_reply = (Mpi3SCSITaskMgmtReply_t *)drv_cmd->reply;
3510 
3511 	if (tm_reply)
3512 		printf(IOCNAME
3513 		    "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n",
3514 		    sc->name, drv_cmd->dev_handle, drv_cmd->ioc_status,
3515 		    drv_cmd->ioc_loginfo,
3516 		    le32toh(tm_reply->TerminationCount));
3517 
3518 	printf(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n",
3519 	    sc->name, drv_cmd->dev_handle, cmd_idx);
3520 
3521 	memset(&iou_ctrl, 0, sizeof(iou_ctrl));
3522 
3523 	drv_cmd->state = MPI3MR_CMD_PENDING;
3524 	drv_cmd->is_waiting = 0;
3525 	drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou;
3526 	iou_ctrl.Operation = drv_cmd->iou_rc;
3527 	iou_ctrl.Param16[0] = htole16(drv_cmd->dev_handle);
3528 	iou_ctrl.HostTag = htole16(drv_cmd->host_tag);
3529 	iou_ctrl.Function = MPI3_FUNCTION_IO_UNIT_CONTROL;
3530 
3531 	retval = mpi3mr_submit_admin_cmd(sc, &iou_ctrl, sizeof(iou_ctrl));
3532 	if (retval) {
3533 		printf(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n",
3534 		    sc->name);
3535 		goto out_failed;
3536 	}
3537 
3538 	return;
3539 out_failed:
3540 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3541 	drv_cmd->callback = NULL;
3542 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
3543 	drv_cmd->retry_count = 0;
3544 	mpi3mr_clear_bit(cmd_idx, sc->devrem_bitmap);
3545 }
3546 
3547 /**
3548  * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal
3549  * @sc: Adapter instance reference
3550  * @handle: Device handle
3551  * @cmdparam: Internal command tracker
3552  * @iou_rc: IO Unit reason code
3553  *
3554  * Issues a target reset TM to the firmware or add it to a pend
3555  * list as part of device removal or hidden acknowledgment
3556  * handshake.
3557  *
3558  * Return: Nothing
3559  */
3560 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_softc *sc, U16 handle,
3561 	struct mpi3mr_drvr_cmd *cmdparam, U8 iou_rc)
3562 {
3563 	Mpi3SCSITaskMgmtRequest_t tm_req;
3564 	int retval = 0;
3565 	U16 cmd_idx = MPI3MR_NUM_DEVRMCMD;
3566 	U8 retrycount = 5;
3567 	struct mpi3mr_drvr_cmd *drv_cmd = cmdparam;
3568 	struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
3569 	struct mpi3mr_target *tgtdev = NULL;
3570 
3571 	mtx_lock_spin(&sc->target_lock);
3572 	TAILQ_FOREACH(tgtdev, &sc->cam_sc->tgt_list, tgt_next) {
3573 		if ((tgtdev->dev_handle == handle) &&
3574 		    (iou_rc == MPI3_CTRL_OP_REMOVE_DEVICE)) {
3575 			tgtdev->state = MPI3MR_DEV_REMOVE_HS_STARTED;
3576 			break;
3577 		}
3578 	}
3579 	mtx_unlock_spin(&sc->target_lock);
3580 
3581 	if (drv_cmd)
3582 		goto issue_cmd;
3583 	do {
3584 		cmd_idx = mpi3mr_find_first_zero_bit(sc->devrem_bitmap,
3585 		    MPI3MR_NUM_DEVRMCMD);
3586 		if (cmd_idx < MPI3MR_NUM_DEVRMCMD) {
3587 			if (!mpi3mr_test_and_set_bit(cmd_idx, sc->devrem_bitmap))
3588 				break;
3589 			cmd_idx = MPI3MR_NUM_DEVRMCMD;
3590 		}
3591 	} while (retrycount--);
3592 
3593 	if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) {
3594 		delayed_dev_rmhs = malloc(sizeof(*delayed_dev_rmhs),M_MPI3MR,
3595 		     M_ZERO|M_NOWAIT);
3596 
3597 		if (!delayed_dev_rmhs)
3598 			return;
3599 		delayed_dev_rmhs->handle = handle;
3600 		delayed_dev_rmhs->iou_rc = iou_rc;
3601 		TAILQ_INSERT_TAIL(&(sc->delayed_rmhs_list), delayed_dev_rmhs, list);
3602 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n",
3603 		    __func__, handle);
3604 
3605 
3606 		return;
3607 	}
3608 	drv_cmd = &sc->dev_rmhs_cmds[cmd_idx];
3609 
3610 issue_cmd:
3611 	cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
3612 	mpi3mr_dprint(sc, MPI3MR_EVENT,
3613 	    "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n",
3614 	    __func__, handle, cmd_idx);
3615 
3616 	memset(&tm_req, 0, sizeof(tm_req));
3617 	if (drv_cmd->state & MPI3MR_CMD_PENDING) {
3618 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :Issue TM: Command is in use\n", __func__);
3619 		goto out;
3620 	}
3621 	drv_cmd->state = MPI3MR_CMD_PENDING;
3622 	drv_cmd->is_waiting = 0;
3623 	drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm;
3624 	drv_cmd->dev_handle = handle;
3625 	drv_cmd->iou_rc = iou_rc;
3626 	tm_req.DevHandle = htole16(handle);
3627 	tm_req.TaskType = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3628 	tm_req.HostTag = htole16(drv_cmd->host_tag);
3629 	tm_req.TaskHostTag = htole16(MPI3MR_HOSTTAG_INVALID);
3630 	tm_req.Function = MPI3_FUNCTION_SCSI_TASK_MGMT;
3631 
3632 	mpi3mr_set_bit(handle, sc->removepend_bitmap);
3633 	retval = mpi3mr_submit_admin_cmd(sc, &tm_req, sizeof(tm_req));
3634 	if (retval) {
3635 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s :Issue DevRmHsTM: Admin Post failed\n",
3636 		    __func__);
3637 		goto out_failed;
3638 	}
3639 out:
3640 	return;
3641 out_failed:
3642 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3643 	drv_cmd->callback = NULL;
3644 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
3645 	drv_cmd->retry_count = 0;
3646 	mpi3mr_clear_bit(cmd_idx, sc->devrem_bitmap);
3647 }
3648 
3649 /**
3650  * mpi3mr_complete_evt_ack - Event ack request completion
3651  * @sc: Adapter instance reference
3652  * @drv_cmd: Internal command tracker
3653  *
3654  * This is the completion handler for non blocking event
3655  * acknowledgment sent to the firmware and this will issue any
3656  * pending event acknowledgment request.
3657  *
3658  * Return: Nothing
3659  */
3660 static void mpi3mr_complete_evt_ack(struct mpi3mr_softc *sc,
3661 	struct mpi3mr_drvr_cmd *drv_cmd)
3662 {
3663 	U16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
3664 	struct delayed_evtack_node *delayed_evtack = NULL;
3665 
3666 	if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
3667 		mpi3mr_dprint(sc, MPI3MR_EVENT,
3668 		    "%s: Failed IOCStatus(0x%04x) Loginfo(0x%08x)\n", __func__,
3669 		    (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3670 		    drv_cmd->ioc_loginfo);
3671 	}
3672 
3673 	if (!TAILQ_EMPTY(&sc->delayed_evtack_cmds_list)) {
3674 		delayed_evtack = TAILQ_FIRST(&sc->delayed_evtack_cmds_list);
3675 		mpi3mr_dprint(sc, MPI3MR_EVENT,
3676 		    "%s: processing delayed event ack for event %d\n",
3677 		    __func__, delayed_evtack->event);
3678 		mpi3mr_send_evt_ack(sc, delayed_evtack->event, drv_cmd,
3679 		    delayed_evtack->event_ctx);
3680 		TAILQ_REMOVE(&sc->delayed_evtack_cmds_list, delayed_evtack, list);
3681 		free(delayed_evtack, M_MPI3MR);
3682 		return;
3683 	}
3684 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3685 	drv_cmd->callback = NULL;
3686 	mpi3mr_clear_bit(cmd_idx, sc->evtack_cmds_bitmap);
3687 }
3688 
3689 /**
3690  * mpi3mr_send_evt_ack - Issue event acknwoledgment request
3691  * @sc: Adapter instance reference
3692  * @event: MPI3 event id
3693  * @cmdparam: Internal command tracker
3694  * @event_ctx: Event context
3695  *
3696  * Issues event acknowledgment request to the firmware if there
3697  * is a free command to send the event ack else it to a pend
3698  * list so that it will be processed on a completion of a prior
3699  * event acknowledgment .
3700  *
3701  * Return: Nothing
3702  */
3703 static void mpi3mr_send_evt_ack(struct mpi3mr_softc *sc, U8 event,
3704 	struct mpi3mr_drvr_cmd *cmdparam, U32 event_ctx)
3705 {
3706 	Mpi3EventAckRequest_t evtack_req;
3707 	int retval = 0;
3708 	U8 retrycount = 5;
3709 	U16 cmd_idx = MPI3MR_NUM_EVTACKCMD;
3710 	struct mpi3mr_drvr_cmd *drv_cmd = cmdparam;
3711 	struct delayed_evtack_node *delayed_evtack = NULL;
3712 
3713 	if (drv_cmd)
3714 		goto issue_cmd;
3715 	do {
3716 		cmd_idx = mpi3mr_find_first_zero_bit(sc->evtack_cmds_bitmap,
3717 		    MPI3MR_NUM_EVTACKCMD);
3718 		if (cmd_idx < MPI3MR_NUM_EVTACKCMD) {
3719 			if (!mpi3mr_test_and_set_bit(cmd_idx,
3720 			    sc->evtack_cmds_bitmap))
3721 				break;
3722 			cmd_idx = MPI3MR_NUM_EVTACKCMD;
3723 		}
3724 	} while (retrycount--);
3725 
3726 	if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) {
3727 		delayed_evtack = malloc(sizeof(*delayed_evtack),M_MPI3MR,
3728 		     M_ZERO | M_NOWAIT);
3729 		if (!delayed_evtack)
3730 			return;
3731 		delayed_evtack->event = event;
3732 		delayed_evtack->event_ctx = event_ctx;
3733 		TAILQ_INSERT_TAIL(&(sc->delayed_evtack_cmds_list), delayed_evtack, list);
3734 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s : Event ack for event:%d is postponed\n",
3735 		    __func__, event);
3736 		return;
3737 	}
3738 	drv_cmd = &sc->evtack_cmds[cmd_idx];
3739 
3740 issue_cmd:
3741 	cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
3742 
3743 	memset(&evtack_req, 0, sizeof(evtack_req));
3744 	if (drv_cmd->state & MPI3MR_CMD_PENDING) {
3745 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s: Command is in use\n", __func__);
3746 		goto out;
3747 	}
3748 	drv_cmd->state = MPI3MR_CMD_PENDING;
3749 	drv_cmd->is_waiting = 0;
3750 	drv_cmd->callback = mpi3mr_complete_evt_ack;
3751 	evtack_req.HostTag = htole16(drv_cmd->host_tag);
3752 	evtack_req.Function = MPI3_FUNCTION_EVENT_ACK;
3753 	evtack_req.Event = event;
3754 	evtack_req.EventContext = htole32(event_ctx);
3755 	retval = mpi3mr_submit_admin_cmd(sc, &evtack_req,
3756 	    sizeof(evtack_req));
3757 
3758 	if (retval) {
3759 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Admin Post failed\n", __func__);
3760 		goto out_failed;
3761 	}
3762 out:
3763 	return;
3764 out_failed:
3765 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3766 	drv_cmd->callback = NULL;
3767 	mpi3mr_clear_bit(cmd_idx, sc->evtack_cmds_bitmap);
3768 }
3769 
3770 /*
3771  * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf
3772  * @sc: Adapter instance reference
3773  * @event_reply: Event data
3774  *
3775  * Checks for the reason code and based on that either block I/O
3776  * to device, or unblock I/O to the device, or start the device
3777  * removal handshake with reason as remove with the firmware for
3778  * PCIe devices.
3779  *
3780  * Return: Nothing
3781  */
3782 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_softc *sc,
3783 	Mpi3EventNotificationReply_t *event_reply)
3784 {
3785 	Mpi3EventDataPcieTopologyChangeList_t *topo_evt =
3786 	    (Mpi3EventDataPcieTopologyChangeList_t *) event_reply->EventData;
3787 	int i;
3788 	U16 handle;
3789 	U8 reason_code;
3790 	struct mpi3mr_target *tgtdev = NULL;
3791 
3792 	for (i = 0; i < topo_evt->NumEntries; i++) {
3793 		handle = le16toh(topo_evt->PortEntry[i].AttachedDevHandle);
3794 		if (!handle)
3795 			continue;
3796 		reason_code = topo_evt->PortEntry[i].PortStatus;
3797 		tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
3798 		switch (reason_code) {
3799 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
3800 			if (tgtdev) {
3801 				tgtdev->dev_removed = 1;
3802 				tgtdev->dev_removedelay = 0;
3803 				mpi3mr_atomic_set(&tgtdev->block_io, 0);
3804 			}
3805 			mpi3mr_dev_rmhs_send_tm(sc, handle, NULL,
3806 			    MPI3_CTRL_OP_REMOVE_DEVICE);
3807 			break;
3808 		case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
3809 			if (tgtdev) {
3810 				tgtdev->dev_removedelay = 1;
3811 				mpi3mr_atomic_inc(&tgtdev->block_io);
3812 			}
3813 			break;
3814 		case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
3815 			if (tgtdev &&
3816 			    tgtdev->dev_removedelay) {
3817 				tgtdev->dev_removedelay = 0;
3818 				if (mpi3mr_atomic_read(&tgtdev->block_io) > 0)
3819 					mpi3mr_atomic_dec(&tgtdev->block_io);
3820 			}
3821 			break;
3822 		case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
3823 		default:
3824 			break;
3825 		}
3826 	}
3827 }
3828 
3829 /**
3830  * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf
3831  * @sc: Adapter instance reference
3832  * @event_reply: Event data
3833  *
3834  * Checks for the reason code and based on that either block I/O
3835  * to device, or unblock I/O to the device, or start the device
3836  * removal handshake with reason as remove with the firmware for
3837  * SAS/SATA devices.
3838  *
3839  * Return: Nothing
3840  */
3841 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_softc *sc,
3842 	Mpi3EventNotificationReply_t *event_reply)
3843 {
3844 	Mpi3EventDataSasTopologyChangeList_t *topo_evt =
3845 	    (Mpi3EventDataSasTopologyChangeList_t *)event_reply->EventData;
3846 	int i;
3847 	U16 handle;
3848 	U8 reason_code;
3849 	struct mpi3mr_target *tgtdev = NULL;
3850 
3851 	for (i = 0; i < topo_evt->NumEntries; i++) {
3852 		handle = le16toh(topo_evt->PhyEntry[i].AttachedDevHandle);
3853 		if (!handle)
3854 			continue;
3855 		reason_code = topo_evt->PhyEntry[i].Status &
3856 		    MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
3857 		tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
3858 		switch (reason_code) {
3859 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
3860 			if (tgtdev) {
3861 				tgtdev->dev_removed = 1;
3862 				tgtdev->dev_removedelay = 0;
3863 				mpi3mr_atomic_set(&tgtdev->block_io, 0);
3864 			}
3865 			mpi3mr_dev_rmhs_send_tm(sc, handle, NULL,
3866 			    MPI3_CTRL_OP_REMOVE_DEVICE);
3867 			break;
3868 		case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
3869 			if (tgtdev) {
3870 				tgtdev->dev_removedelay = 1;
3871 				mpi3mr_atomic_inc(&tgtdev->block_io);
3872 			}
3873 			break;
3874 		case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
3875 			if (tgtdev &&
3876 			    tgtdev->dev_removedelay) {
3877 				tgtdev->dev_removedelay = 0;
3878 				if (mpi3mr_atomic_read(&tgtdev->block_io) > 0)
3879 					mpi3mr_atomic_dec(&tgtdev->block_io);
3880 			}
3881 		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
3882 		default:
3883 			break;
3884 		}
3885 	}
3886 
3887 }
3888 /**
3889  * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf
3890  * @sc: Adapter instance reference
3891  * @event_reply: Event data
3892  *
3893  * Checks for the reason code and based on that either block I/O
3894  * to device, or unblock I/O to the device, or start the device
3895  * removal handshake with reason as remove/hide acknowledgment
3896  * with the firmware.
3897  *
3898  * Return: Nothing
3899  */
3900 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_softc *sc,
3901 	Mpi3EventNotificationReply_t *event_reply)
3902 {
3903 	U16 dev_handle = 0;
3904 	U8 ublock = 0, block = 0, hide = 0, uhide = 0, delete = 0, remove = 0;
3905 	struct mpi3mr_target *tgtdev = NULL;
3906 	Mpi3EventDataDeviceStatusChange_t *evtdata =
3907 	    (Mpi3EventDataDeviceStatusChange_t *) event_reply->EventData;
3908 
3909 	dev_handle = le16toh(evtdata->DevHandle);
3910 
3911 	switch (evtdata->ReasonCode) {
3912 	case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT:
3913 	case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT:
3914 		block = 1;
3915 		break;
3916 	case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
3917 		delete = 1;
3918 		hide = 1;
3919 		break;
3920 	case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
3921 		uhide = 1;
3922 		break;
3923 	case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
3924 		delete = 1;
3925 		remove = 1;
3926 		break;
3927 	case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP:
3928 	case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP:
3929 		ublock = 1;
3930 		break;
3931 	default:
3932 		break;
3933 	}
3934 
3935 	tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle);
3936 
3937 	if (!tgtdev) {
3938 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s :target with dev_handle:0x%x not found\n",
3939 		    __func__, dev_handle);
3940 		return;
3941 	}
3942 
3943 	if (block)
3944 		mpi3mr_atomic_inc(&tgtdev->block_io);
3945 
3946 	if (hide)
3947 		tgtdev->is_hidden = hide;
3948 
3949 	if (uhide) {
3950 		tgtdev->is_hidden = 0;
3951 		tgtdev->dev_removed = 0;
3952 	}
3953 
3954 	if (delete)
3955 		tgtdev->dev_removed = 1;
3956 
3957 	if (ublock) {
3958 		if (mpi3mr_atomic_read(&tgtdev->block_io) > 0)
3959 			mpi3mr_atomic_dec(&tgtdev->block_io);
3960 	}
3961 
3962 	if (remove) {
3963 		mpi3mr_dev_rmhs_send_tm(sc, dev_handle, NULL,
3964 					MPI3_CTRL_OP_REMOVE_DEVICE);
3965 	}
3966 	if (hide)
3967 		mpi3mr_dev_rmhs_send_tm(sc, dev_handle, NULL,
3968 					MPI3_CTRL_OP_HIDDEN_ACK);
3969 }
3970 
3971 /**
3972  * mpi3mr_preparereset_evt_th - Prepareforreset evt tophalf
3973  * @sc: Adapter instance reference
3974  * @event_reply: Event data
3975  *
3976  * Blocks and unblocks host level I/O based on the reason code
3977  *
3978  * Return: Nothing
3979  */
3980 static void mpi3mr_preparereset_evt_th(struct mpi3mr_softc *sc,
3981 	Mpi3EventNotificationReply_t *event_reply)
3982 {
3983 	Mpi3EventDataPrepareForReset_t *evtdata =
3984 	    (Mpi3EventDataPrepareForReset_t *)event_reply->EventData;
3985 
3986 	if (evtdata->ReasonCode == MPI3_EVENT_PREPARE_RESET_RC_START) {
3987 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :Recieved PrepForReset Event with RC=START\n",
3988 		    __func__);
3989 		if (sc->prepare_for_reset)
3990 			return;
3991 		sc->prepare_for_reset = 1;
3992 		sc->prepare_for_reset_timeout_counter = 0;
3993 	} else if (evtdata->ReasonCode == MPI3_EVENT_PREPARE_RESET_RC_ABORT) {
3994 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :Recieved PrepForReset Event with RC=ABORT\n",
3995 		    __func__);
3996 		sc->prepare_for_reset = 0;
3997 		sc->prepare_for_reset_timeout_counter = 0;
3998 	}
3999 	if ((event_reply->MsgFlags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
4000 	    == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
4001 		mpi3mr_send_evt_ack(sc, event_reply->Event, NULL,
4002 		    le32toh(event_reply->EventContext));
4003 }
4004 
4005 /**
4006  * mpi3mr_energypackchg_evt_th - Energypackchange evt tophalf
4007  * @sc: Adapter instance reference
4008  * @event_reply: Event data
4009  *
4010  * Identifies the new shutdown timeout value and update.
4011  *
4012  * Return: Nothing
4013  */
4014 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_softc *sc,
4015 	Mpi3EventNotificationReply_t *event_reply)
4016 {
4017 	Mpi3EventDataEnergyPackChange_t *evtdata =
4018 	    (Mpi3EventDataEnergyPackChange_t *)event_reply->EventData;
4019 	U16 shutdown_timeout = le16toh(evtdata->ShutdownTimeout);
4020 
4021 	if (shutdown_timeout <= 0) {
4022 		mpi3mr_dprint(sc, MPI3MR_ERROR,
4023 		    "%s :Invalid Shutdown Timeout received = %d\n",
4024 		    __func__, shutdown_timeout);
4025 		return;
4026 	}
4027 
4028 	mpi3mr_dprint(sc, MPI3MR_EVENT,
4029 	    "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n",
4030 	    __func__, sc->facts.shutdown_timeout, shutdown_timeout);
4031 	sc->facts.shutdown_timeout = shutdown_timeout;
4032 }
4033 
4034 /**
4035  * mpi3mr_cablemgmt_evt_th - Cable mgmt evt tophalf
4036  * @sc: Adapter instance reference
4037  * @event_reply: Event data
4038  *
4039  * Displays Cable manegemt event details.
4040  *
4041  * Return: Nothing
4042  */
4043 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_softc *sc,
4044 	Mpi3EventNotificationReply_t *event_reply)
4045 {
4046 	Mpi3EventDataCableManagement_t *evtdata =
4047 	    (Mpi3EventDataCableManagement_t *)event_reply->EventData;
4048 
4049 	switch (evtdata->Status) {
4050 	case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER:
4051 	{
4052 		mpi3mr_dprint(sc, MPI3MR_INFO, "An active cable with ReceptacleID %d cannot be powered.\n"
4053 		    "Devices connected to this cable are not detected.\n"
4054 		    "This cable requires %d mW of power.\n",
4055 		    evtdata->ReceptacleID,
4056 		    le32toh(evtdata->ActiveCablePowerRequirement));
4057 		break;
4058 	}
4059 	case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED:
4060 	{
4061 		mpi3mr_dprint(sc, MPI3MR_INFO, "A cable with ReceptacleID %d is not running at optimal speed\n",
4062 		    evtdata->ReceptacleID);
4063 		break;
4064 	}
4065 	default:
4066 		break;
4067 	}
4068 }
4069 
4070 /**
4071  * mpi3mr_process_events - Event's toph-half handler
4072  * @sc: Adapter instance reference
4073  * @event_reply: Event data
4074  *
4075  * Top half of event processing.
4076  *
4077  * Return: Nothing
4078  */
4079 static void mpi3mr_process_events(struct mpi3mr_softc *sc,
4080     uintptr_t data, Mpi3EventNotificationReply_t *event_reply)
4081 {
4082 	U16 evt_type;
4083 	bool ack_req = 0, process_evt_bh = 0;
4084 	struct mpi3mr_fw_event_work *fw_event;
4085 	U16 sz;
4086 
4087 	if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN)
4088 		goto out;
4089 
4090 	if ((event_reply->MsgFlags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
4091 	    == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
4092 		ack_req = 1;
4093 
4094 	evt_type = event_reply->Event;
4095 
4096 	switch (evt_type) {
4097 	case MPI3_EVENT_DEVICE_ADDED:
4098 	{
4099 		Mpi3DevicePage0_t *dev_pg0 =
4100 			(Mpi3DevicePage0_t *) event_reply->EventData;
4101 		if (mpi3mr_create_device(sc, dev_pg0))
4102 			mpi3mr_dprint(sc, MPI3MR_ERROR,
4103 			"%s :Failed to add device in the device add event\n",
4104 			__func__);
4105 		else
4106 			process_evt_bh = 1;
4107 		break;
4108 	}
4109 
4110 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
4111 	{
4112 		process_evt_bh = 1;
4113 		mpi3mr_devstatuschg_evt_th(sc, event_reply);
4114 		break;
4115 	}
4116 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
4117 	{
4118 		process_evt_bh = 1;
4119 		mpi3mr_sastopochg_evt_th(sc, event_reply);
4120 		break;
4121 	}
4122 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
4123 	{
4124 		process_evt_bh = 1;
4125 		mpi3mr_pcietopochg_evt_th(sc, event_reply);
4126 		break;
4127 	}
4128 	case MPI3_EVENT_PREPARE_FOR_RESET:
4129 	{
4130 		mpi3mr_preparereset_evt_th(sc, event_reply);
4131 		ack_req = 0;
4132 		break;
4133 	}
4134 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
4135 	case MPI3_EVENT_LOG_DATA:
4136 	{
4137 		process_evt_bh = 1;
4138 		break;
4139 	}
4140 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
4141 	{
4142 		mpi3mr_energypackchg_evt_th(sc, event_reply);
4143 		break;
4144 	}
4145 	case MPI3_EVENT_CABLE_MGMT:
4146 	{
4147 		mpi3mr_cablemgmt_evt_th(sc, event_reply);
4148 		break;
4149 	}
4150 
4151 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
4152 	case MPI3_EVENT_SAS_DISCOVERY:
4153 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
4154 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
4155 	case MPI3_EVENT_PCIE_ENUMERATION:
4156 		break;
4157 	default:
4158 		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :Event 0x%02x is not handled by driver\n",
4159 		    __func__, evt_type);
4160 		break;
4161 	}
4162 
4163 	if (process_evt_bh || ack_req) {
4164 		fw_event = malloc(sizeof(struct mpi3mr_fw_event_work), M_MPI3MR,
4165 		     M_ZERO|M_NOWAIT);
4166 
4167 		if (!fw_event) {
4168 			printf("%s: allocate failed for fw_event\n", __func__);
4169 			return;
4170 		}
4171 
4172 		sz = le16toh(event_reply->EventDataLength) * 4;
4173 		fw_event->event_data = malloc(sz, M_MPI3MR, M_ZERO|M_NOWAIT);
4174 
4175 		if (!fw_event->event_data) {
4176 			printf("%s: allocate failed for event_data\n", __func__);
4177 			free(fw_event, M_MPI3MR);
4178 			return;
4179 		}
4180 
4181 		bcopy(event_reply->EventData, fw_event->event_data, sz);
4182 		fw_event->event = event_reply->Event;
4183 		if ((event_reply->Event == MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4184 		    event_reply->Event == MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4185 		    event_reply->Event == MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE ) &&
4186 		    sc->track_mapping_events)
4187 			sc->pending_map_events++;
4188 
4189 		/*
4190 		 * Events should be processed after Port enable is completed.
4191 		 */
4192 		if ((event_reply->Event == MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4193 		    event_reply->Event == MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ) &&
4194 		    !(sc->mpi3mr_flags & MPI3MR_FLAGS_PORT_ENABLE_DONE))
4195 			mpi3mr_startup_increment(sc->cam_sc);
4196 
4197 		fw_event->send_ack = ack_req;
4198 		fw_event->event_context = le32toh(event_reply->EventContext);
4199 		fw_event->event_data_size = sz;
4200 		fw_event->process_event = process_evt_bh;
4201 
4202 		mtx_lock(&sc->fwevt_lock);
4203 		TAILQ_INSERT_TAIL(&sc->cam_sc->ev_queue, fw_event, ev_link);
4204 		taskqueue_enqueue(sc->cam_sc->ev_tq, &sc->cam_sc->ev_task);
4205 		mtx_unlock(&sc->fwevt_lock);
4206 
4207 	}
4208 out:
4209 	return;
4210 }
4211 
4212 static void mpi3mr_handle_events(struct mpi3mr_softc *sc, uintptr_t data,
4213     Mpi3DefaultReply_t *def_reply)
4214 {
4215 	Mpi3EventNotificationReply_t *event_reply =
4216 		(Mpi3EventNotificationReply_t *)def_reply;
4217 
4218 	sc->change_count = event_reply->IOCChangeCount;
4219 	mpi3mr_display_event_data(sc, event_reply);
4220 
4221 	mpi3mr_process_events(sc, data, event_reply);
4222 }
4223 
4224 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_softc *sc,
4225     Mpi3DefaultReplyDescriptor_t *reply_desc, U64 *reply_dma)
4226 {
4227 	U16 reply_desc_type, host_tag = 0, idx;
4228 	U16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
4229 	U32 ioc_loginfo = 0;
4230 	Mpi3StatusReplyDescriptor_t *status_desc;
4231 	Mpi3AddressReplyDescriptor_t *addr_desc;
4232 	Mpi3SuccessReplyDescriptor_t *success_desc;
4233 	Mpi3DefaultReply_t *def_reply = NULL;
4234 	struct mpi3mr_drvr_cmd *cmdptr = NULL;
4235 	Mpi3SCSIIOReply_t *scsi_reply;
4236 	U8 *sense_buf = NULL;
4237 
4238 	*reply_dma = 0;
4239 	reply_desc_type = reply_desc->ReplyFlags &
4240 			    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
4241 	switch (reply_desc_type) {
4242 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
4243 		status_desc = (Mpi3StatusReplyDescriptor_t *)reply_desc;
4244 		host_tag = status_desc->HostTag;
4245 		ioc_status = status_desc->IOCStatus;
4246 		if (ioc_status &
4247 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
4248 			ioc_loginfo = status_desc->IOCLogInfo;
4249 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
4250 		break;
4251 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
4252 		addr_desc = (Mpi3AddressReplyDescriptor_t *)reply_desc;
4253 		*reply_dma = addr_desc->ReplyFrameAddress;
4254 		def_reply = mpi3mr_get_reply_virt_addr(sc, *reply_dma);
4255 		if (def_reply == NULL)
4256 			goto out;
4257 		host_tag = def_reply->HostTag;
4258 		ioc_status = def_reply->IOCStatus;
4259 		if (ioc_status &
4260 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
4261 			ioc_loginfo = def_reply->IOCLogInfo;
4262 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
4263 		if (def_reply->Function == MPI3_FUNCTION_SCSI_IO) {
4264 			scsi_reply = (Mpi3SCSIIOReply_t *)def_reply;
4265 			sense_buf = mpi3mr_get_sensebuf_virt_addr(sc,
4266 			    scsi_reply->SenseDataBufferAddress);
4267 		}
4268 		break;
4269 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
4270 		success_desc = (Mpi3SuccessReplyDescriptor_t *)reply_desc;
4271 		host_tag = success_desc->HostTag;
4272 		break;
4273 	default:
4274 		break;
4275 	}
4276 	switch (host_tag) {
4277 	case MPI3MR_HOSTTAG_INITCMDS:
4278 		cmdptr = &sc->init_cmds;
4279 		break;
4280 	case MPI3MR_HOSTTAG_IOCTLCMDS:
4281 		cmdptr = &sc->ioctl_cmds;
4282 		break;
4283 	case MPI3MR_HOSTTAG_TMS:
4284 		cmdptr = &sc->host_tm_cmds;
4285 		wakeup((void *)&sc->tm_chan);
4286 		break;
4287 	case MPI3MR_HOSTTAG_PELABORT:
4288 		cmdptr = &sc->pel_abort_cmd;
4289 		break;
4290 	case MPI3MR_HOSTTAG_PELWAIT:
4291 		cmdptr = &sc->pel_cmds;
4292 		break;
4293 	case MPI3MR_HOSTTAG_INVALID:
4294 		if (def_reply && def_reply->Function ==
4295 		    MPI3_FUNCTION_EVENT_NOTIFICATION)
4296 			mpi3mr_handle_events(sc, *reply_dma ,def_reply);
4297 	default:
4298 		break;
4299 	}
4300 
4301 	if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
4302 	    host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX ) {
4303 		idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
4304 		cmdptr = &sc->dev_rmhs_cmds[idx];
4305 	}
4306 
4307 	if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
4308 	    host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
4309 		idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
4310 		cmdptr = &sc->evtack_cmds[idx];
4311 	}
4312 
4313 	if (cmdptr) {
4314 		if (cmdptr->state & MPI3MR_CMD_PENDING) {
4315 			cmdptr->state |= MPI3MR_CMD_COMPLETE;
4316 			cmdptr->ioc_loginfo = ioc_loginfo;
4317 			cmdptr->ioc_status = ioc_status;
4318 			cmdptr->state &= ~MPI3MR_CMD_PENDING;
4319 			if (def_reply) {
4320 				cmdptr->state |= MPI3MR_CMD_REPLYVALID;
4321 				memcpy((U8 *)cmdptr->reply, (U8 *)def_reply,
4322 				    sc->reply_sz);
4323 			}
4324 			if (sense_buf && cmdptr->sensebuf) {
4325 				cmdptr->is_senseprst = 1;
4326 				memcpy(cmdptr->sensebuf, sense_buf,
4327 				    MPI3MR_SENSEBUF_SZ);
4328 			}
4329 			if (cmdptr->is_waiting) {
4330 				complete(&cmdptr->completion);
4331 				cmdptr->is_waiting = 0;
4332 			} else if (cmdptr->callback)
4333 				cmdptr->callback(sc, cmdptr);
4334 		}
4335 	}
4336 out:
4337 	if (sense_buf != NULL)
4338 		mpi3mr_repost_sense_buf(sc,
4339 		    scsi_reply->SenseDataBufferAddress);
4340 	return;
4341 }
4342 
4343 /*
4344  * mpi3mr_complete_admin_cmd:	ISR routine for admin commands
4345  * @sc:				Adapter's soft instance
4346  *
4347  * This function processes admin command completions.
4348  */
4349 static int mpi3mr_complete_admin_cmd(struct mpi3mr_softc *sc)
4350 {
4351 	U32 exp_phase = sc->admin_reply_ephase;
4352 	U32 adm_reply_ci = sc->admin_reply_ci;
4353 	U32 num_adm_reply = 0;
4354 	U64 reply_dma = 0;
4355 	Mpi3DefaultReplyDescriptor_t *reply_desc;
4356 
4357 	mtx_lock_spin(&sc->admin_reply_lock);
4358 	if (sc->admin_in_use == false) {
4359 		sc->admin_in_use = true;
4360 		mtx_unlock_spin(&sc->admin_reply_lock);
4361 	} else {
4362 		mtx_unlock_spin(&sc->admin_reply_lock);
4363 		return 0;
4364 	}
4365 
4366 	reply_desc = (Mpi3DefaultReplyDescriptor_t *)sc->admin_reply +
4367 		adm_reply_ci;
4368 
4369 	if ((reply_desc->ReplyFlags &
4370 	     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
4371 		mtx_lock_spin(&sc->admin_reply_lock);
4372 		sc->admin_in_use = false;
4373 		mtx_unlock_spin(&sc->admin_reply_lock);
4374 		return 0;
4375 	}
4376 
4377 	do {
4378 		sc->admin_req_ci = reply_desc->RequestQueueCI;
4379 		mpi3mr_process_admin_reply_desc(sc, reply_desc, &reply_dma);
4380 		if (reply_dma)
4381 			mpi3mr_repost_reply_buf(sc, reply_dma);
4382 		num_adm_reply++;
4383 		if (++adm_reply_ci == sc->num_admin_replies) {
4384 			adm_reply_ci = 0;
4385 			exp_phase ^= 1;
4386 		}
4387 		reply_desc =
4388 			(Mpi3DefaultReplyDescriptor_t *)sc->admin_reply +
4389 			    adm_reply_ci;
4390 		if ((reply_desc->ReplyFlags &
4391 		     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
4392 			break;
4393 	} while (1);
4394 
4395 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET, adm_reply_ci);
4396 	sc->admin_reply_ci = adm_reply_ci;
4397 	sc->admin_reply_ephase = exp_phase;
4398 	mtx_lock_spin(&sc->admin_reply_lock);
4399 	sc->admin_in_use = false;
4400 	mtx_unlock_spin(&sc->admin_reply_lock);
4401 	return num_adm_reply;
4402 }
4403 
4404 static void
4405 mpi3mr_cmd_done(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cmd)
4406 {
4407 	mpi3mr_unmap_request(sc, cmd);
4408 
4409 	mtx_lock(&sc->mpi3mr_mtx);
4410 	if (cmd->callout_owner) {
4411 		callout_stop(&cmd->callout);
4412 		cmd->callout_owner = false;
4413 	}
4414 
4415 	if (sc->unrecoverable)
4416 		mpi3mr_set_ccbstatus(cmd->ccb, CAM_DEV_NOT_THERE);
4417 
4418 	xpt_done(cmd->ccb);
4419 	cmd->ccb = NULL;
4420 	mtx_unlock(&sc->mpi3mr_mtx);
4421 	mpi3mr_release_command(cmd);
4422 }
4423 
4424 void mpi3mr_process_op_reply_desc(struct mpi3mr_softc *sc,
4425     Mpi3DefaultReplyDescriptor_t *reply_desc, U64 *reply_dma)
4426 {
4427 	U16 reply_desc_type, host_tag = 0;
4428 	U16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
4429 	U32 ioc_loginfo = 0;
4430 	Mpi3StatusReplyDescriptor_t *status_desc = NULL;
4431 	Mpi3AddressReplyDescriptor_t *addr_desc = NULL;
4432 	Mpi3SuccessReplyDescriptor_t *success_desc = NULL;
4433 	Mpi3SCSIIOReply_t *scsi_reply = NULL;
4434 	U8 *sense_buf = NULL;
4435 	U8 scsi_state = 0, scsi_status = 0, sense_state = 0;
4436 	U32 xfer_count = 0, sense_count =0, resp_data = 0;
4437 	struct mpi3mr_cmd *cm = NULL;
4438 	union ccb *ccb;
4439 	struct ccb_scsiio *csio;
4440 	struct mpi3mr_cam_softc *cam_sc;
4441 	U32 target_id;
4442 	U8 *scsi_cdb;
4443 	struct mpi3mr_target *target = NULL;
4444 	U32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0;
4445 	struct mpi3mr_throttle_group_info *tg = NULL;
4446 	U8 throttle_enabled_dev = 0;
4447 	static int ratelimit;
4448 
4449 	*reply_dma = 0;
4450 	reply_desc_type = reply_desc->ReplyFlags &
4451 			    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
4452 	switch (reply_desc_type) {
4453 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
4454 		status_desc = (Mpi3StatusReplyDescriptor_t *)reply_desc;
4455 		host_tag = status_desc->HostTag;
4456 		ioc_status = status_desc->IOCStatus;
4457 		if (ioc_status &
4458 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
4459 			ioc_loginfo = status_desc->IOCLogInfo;
4460 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
4461 		break;
4462 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
4463 		addr_desc = (Mpi3AddressReplyDescriptor_t *)reply_desc;
4464 		*reply_dma = addr_desc->ReplyFrameAddress;
4465 		scsi_reply = mpi3mr_get_reply_virt_addr(sc,
4466 		    *reply_dma);
4467 		if (scsi_reply == NULL) {
4468 			mpi3mr_dprint(sc, MPI3MR_ERROR, "scsi_reply is NULL, "
4469 			    "this shouldn't happen, reply_desc: %p\n",
4470 			    reply_desc);
4471 			goto out;
4472 		}
4473 
4474 		host_tag = scsi_reply->HostTag;
4475 		ioc_status = scsi_reply->IOCStatus;
4476 		scsi_status = scsi_reply->SCSIStatus;
4477 		scsi_state = scsi_reply->SCSIState;
4478 		sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK);
4479 		xfer_count = scsi_reply->TransferCount;
4480 		sense_count = scsi_reply->SenseCount;
4481 		resp_data = scsi_reply->ResponseData;
4482 		sense_buf = mpi3mr_get_sensebuf_virt_addr(sc,
4483 		    scsi_reply->SenseDataBufferAddress);
4484 		if (ioc_status &
4485 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
4486 			ioc_loginfo = scsi_reply->IOCLogInfo;
4487 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
4488 		if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)
4489 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Ran out of sense buffers\n");
4490 
4491 		break;
4492 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
4493 		success_desc = (Mpi3SuccessReplyDescriptor_t *)reply_desc;
4494 		host_tag = success_desc->HostTag;
4495 
4496 	default:
4497 		break;
4498 	}
4499 
4500 	cm = sc->cmd_list[host_tag];
4501 
4502 	if (cm->state == MPI3MR_CMD_STATE_FREE)
4503 		goto out;
4504 
4505 	cam_sc = sc->cam_sc;
4506 	ccb = cm->ccb;
4507 	csio = &ccb->csio;
4508 	target_id = csio->ccb_h.target_id;
4509 
4510 	scsi_cdb = scsiio_cdb_ptr(csio);
4511 
4512 	target = mpi3mr_find_target_by_per_id(cam_sc, target_id);
4513 	if (sc->iot_enable) {
4514 		data_len_blks = csio->dxfer_len >> 9;
4515 
4516 		if (target) {
4517 			tg = target->throttle_group;
4518 			throttle_enabled_dev =
4519 				target->io_throttle_enabled;
4520 		}
4521 
4522 		if ((data_len_blks >= sc->io_throttle_data_length) &&
4523 		     throttle_enabled_dev) {
4524 			mpi3mr_atomic_sub(&sc->pend_large_data_sz, data_len_blks);
4525 			ioc_pend_data_len = mpi3mr_atomic_read(
4526 			    &sc->pend_large_data_sz);
4527 			if (tg) {
4528 				mpi3mr_atomic_sub(&tg->pend_large_data_sz,
4529 					data_len_blks);
4530 				tg_pend_data_len = mpi3mr_atomic_read(&tg->pend_large_data_sz);
4531 				if (ratelimit % 1000) {
4532 					mpi3mr_dprint(sc, MPI3MR_IOT,
4533 						"large vd_io completion persist_id(%d), handle(0x%04x), data_len(%d),"
4534 						"ioc_pending(%d), tg_pending(%d), ioc_low(%d), tg_low(%d)\n",
4535 						    target->per_id,
4536 						    target->dev_handle,
4537 						    data_len_blks, ioc_pend_data_len,
4538 						    tg_pend_data_len,
4539 						    sc->io_throttle_low,
4540 						    tg->low);
4541 					ratelimit++;
4542 				}
4543 				if (tg->io_divert  && ((ioc_pend_data_len <=
4544 				    sc->io_throttle_low) &&
4545 				    (tg_pend_data_len <= tg->low))) {
4546 					tg->io_divert = 0;
4547 					mpi3mr_dprint(sc, MPI3MR_IOT,
4548 						"VD: Coming out of divert perst_id(%d) tg_id(%d)\n",
4549 						target->per_id, tg->id);
4550 					mpi3mr_set_io_divert_for_all_vd_in_tg(
4551 					    sc, tg, 0);
4552 				}
4553 			} else {
4554 				if (ratelimit % 1000) {
4555 					mpi3mr_dprint(sc, MPI3MR_IOT,
4556 					    "large pd_io completion persist_id(%d), handle(0x%04x), data_len(%d), ioc_pending(%d), ioc_low(%d)\n",
4557 					    target->per_id,
4558 					    target->dev_handle,
4559 					    data_len_blks, ioc_pend_data_len,
4560 					    sc->io_throttle_low);
4561 					ratelimit++;
4562 				}
4563 
4564 				if (ioc_pend_data_len <= sc->io_throttle_low) {
4565 					target->io_divert = 0;
4566 					mpi3mr_dprint(sc, MPI3MR_IOT,
4567 						"PD: Coming out of divert perst_id(%d)\n",
4568 						target->per_id);
4569 				}
4570 			}
4571 
4572 			} else if (target->io_divert) {
4573 			ioc_pend_data_len = mpi3mr_atomic_read(&sc->pend_large_data_sz);
4574 			if (!tg) {
4575 				if (ratelimit % 1000) {
4576 					mpi3mr_dprint(sc, MPI3MR_IOT,
4577 					    "pd_io completion persist_id(%d), handle(0x%04x), data_len(%d), ioc_pending(%d), ioc_low(%d)\n",
4578 					    target->per_id,
4579 					    target->dev_handle,
4580 					    data_len_blks, ioc_pend_data_len,
4581 					    sc->io_throttle_low);
4582 					ratelimit++;
4583 				}
4584 
4585 				if ( ioc_pend_data_len <= sc->io_throttle_low) {
4586 					mpi3mr_dprint(sc, MPI3MR_IOT,
4587 						"PD: Coming out of divert perst_id(%d)\n",
4588 						target->per_id);
4589 					target->io_divert = 0;
4590 				}
4591 
4592 			} else if (ioc_pend_data_len <= sc->io_throttle_low) {
4593 				tg_pend_data_len = mpi3mr_atomic_read(&tg->pend_large_data_sz);
4594 				if (ratelimit % 1000) {
4595 					mpi3mr_dprint(sc, MPI3MR_IOT,
4596 						"vd_io completion persist_id(%d), handle(0x%04x), data_len(%d),"
4597 						"ioc_pending(%d), tg_pending(%d), ioc_low(%d), tg_low(%d)\n",
4598 						    target->per_id,
4599 						    target->dev_handle,
4600 						    data_len_blks, ioc_pend_data_len,
4601 						    tg_pend_data_len,
4602 						    sc->io_throttle_low,
4603 						    tg->low);
4604 					ratelimit++;
4605 				}
4606 				if (tg->io_divert  && (tg_pend_data_len <= tg->low)) {
4607 					tg->io_divert = 0;
4608 					mpi3mr_dprint(sc, MPI3MR_IOT,
4609 						"VD: Coming out of divert perst_id(%d) tg_id(%d)\n",
4610 						target->per_id, tg->id);
4611 					mpi3mr_set_io_divert_for_all_vd_in_tg(
4612 					    sc, tg, 0);
4613 				}
4614 
4615 			}
4616 		}
4617 	}
4618 
4619 	if (success_desc) {
4620 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
4621 		goto out_success;
4622 	}
4623 
4624 	if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN
4625 	    && xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY ||
4626 	    scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT ||
4627 	    scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL))
4628 		ioc_status = MPI3_IOCSTATUS_SUCCESS;
4629 
4630 	if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count
4631 	    && sense_buf) {
4632 		int sense_len, returned_sense_len;
4633 
4634 		returned_sense_len = min(le32toh(sense_count),
4635 		    sizeof(struct scsi_sense_data));
4636 		if (returned_sense_len < csio->sense_len)
4637 			csio->sense_resid = csio->sense_len -
4638 			    returned_sense_len;
4639 		else
4640 			csio->sense_resid = 0;
4641 
4642 		sense_len = min(returned_sense_len,
4643 		    csio->sense_len - csio->sense_resid);
4644 		bzero(&csio->sense_data, sizeof(csio->sense_data));
4645 		bcopy(sense_buf, &csio->sense_data, sense_len);
4646 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
4647 	}
4648 
4649 	switch (ioc_status) {
4650 	case MPI3_IOCSTATUS_BUSY:
4651 	case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES:
4652 		mpi3mr_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
4653 		break;
4654 	case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
4655 		/*
4656 		 * If devinfo is 0 this will be a volume.  In that case don't
4657 		 * tell CAM that the volume is not there.  We want volumes to
4658 		 * be enumerated until they are deleted/removed, not just
4659 		 * failed.
4660 		 */
4661 		if (cm->targ->devinfo == 0)
4662 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
4663 		else
4664 			mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
4665 		break;
4666 	case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED:
4667 	case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
4668 	case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED:
4669 		mpi3mr_set_ccbstatus(ccb, CAM_SCSI_BUSY);
4670 		mpi3mr_dprint(sc, MPI3MR_TRACE,
4671 		    "func: %s line:%d tgt %u Hosttag %u loginfo %x\n",
4672 		    __func__, __LINE__,
4673 		    target_id, cm->hosttag,
4674 		    le32toh(scsi_reply->IOCLogInfo));
4675 		mpi3mr_dprint(sc, MPI3MR_TRACE,
4676 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
4677 		    scsi_reply->SCSIStatus, scsi_reply->SCSIState,
4678 		    le32toh(xfer_count));
4679 		break;
4680 	case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN:
4681 		/* resid is ignored for this condition */
4682 		csio->resid = 0;
4683 		mpi3mr_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
4684 		break;
4685 	case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN:
4686 		csio->resid = cm->length - le32toh(xfer_count);
4687 	case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR:
4688 	case MPI3_IOCSTATUS_SUCCESS:
4689 		if ((scsi_reply->IOCStatus & MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK) ==
4690 		    MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR)
4691 			mpi3mr_dprint(sc, MPI3MR_XINFO, "func: %s line: %d recovered error\n",  __func__, __LINE__);
4692 
4693 		/* Completion failed at the transport level. */
4694 		if (scsi_reply->SCSIState & (MPI3_SCSI_STATE_NO_SCSI_STATUS |
4695 		    MPI3_SCSI_STATE_TERMINATED)) {
4696 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
4697 			break;
4698 		}
4699 
4700 		/* In a modern packetized environment, an autosense failure
4701 		 * implies that there's not much else that can be done to
4702 		 * recover the command.
4703 		 */
4704 		if (scsi_reply->SCSIState & MPI3_SCSI_STATE_SENSE_VALID) {
4705 			mpi3mr_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
4706 			break;
4707 		}
4708 
4709 		/*
4710 		 * Intentionally override the normal SCSI status reporting
4711 		 * for these two cases.  These are likely to happen in a
4712 		 * multi-initiator environment, and we want to make sure that
4713 		 * CAM retries these commands rather than fail them.
4714 		 */
4715 		if ((scsi_reply->SCSIStatus == MPI3_SCSI_STATUS_COMMAND_TERMINATED) ||
4716 		    (scsi_reply->SCSIStatus == MPI3_SCSI_STATUS_TASK_ABORTED)) {
4717 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_ABORTED);
4718 			break;
4719 		}
4720 
4721 		/* Handle normal status and sense */
4722 		csio->scsi_status = scsi_reply->SCSIStatus;
4723 		if (scsi_reply->SCSIStatus == MPI3_SCSI_STATUS_GOOD)
4724 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
4725 		else
4726 			mpi3mr_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
4727 
4728 		if (scsi_reply->SCSIState & MPI3_SCSI_STATE_SENSE_VALID) {
4729 			int sense_len, returned_sense_len;
4730 
4731 			returned_sense_len = min(le32toh(scsi_reply->SenseCount),
4732 			    sizeof(struct scsi_sense_data));
4733 			if (returned_sense_len < csio->sense_len)
4734 				csio->sense_resid = csio->sense_len -
4735 				    returned_sense_len;
4736 			else
4737 				csio->sense_resid = 0;
4738 
4739 			sense_len = min(returned_sense_len,
4740 			    csio->sense_len - csio->sense_resid);
4741 			bzero(&csio->sense_data, sizeof(csio->sense_data));
4742 			bcopy(cm->sense, &csio->sense_data, sense_len);
4743 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
4744 		}
4745 
4746 		break;
4747 	case MPI3_IOCSTATUS_INVALID_SGL:
4748 		mpi3mr_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
4749 		break;
4750 	case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
4751 	case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
4752 	case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
4753 	case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR:
4754 	case MPI3_IOCSTATUS_INVALID_FUNCTION:
4755 	case MPI3_IOCSTATUS_INTERNAL_ERROR:
4756 	case MPI3_IOCSTATUS_INVALID_FIELD:
4757 	case MPI3_IOCSTATUS_INVALID_STATE:
4758 	case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR:
4759 	case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
4760 	case MPI3_IOCSTATUS_INSUFFICIENT_POWER:
4761 	case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
4762 	default:
4763 		csio->resid = cm->length;
4764 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
4765 		break;
4766 	}
4767 
4768 out_success:
4769 	if (mpi3mr_get_ccbstatus(ccb) != CAM_REQ_CMP) {
4770 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
4771 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
4772 	}
4773 
4774 	mpi3mr_atomic_dec(&cm->targ->outstanding);
4775 	mpi3mr_cmd_done(sc, cm);
4776 	mpi3mr_dprint(sc, MPI3MR_TRACE, "Completion IO path :"
4777 		" cdb[0]: %x targetid: 0x%x SMID: %x ioc_status: 0x%x ioc_loginfo: 0x%x scsi_status: 0x%x "
4778 		"scsi_state: 0x%x response_data: 0x%x\n", scsi_cdb[0], target_id, host_tag,
4779 		ioc_status, ioc_loginfo, scsi_status, scsi_state, resp_data);
4780 	mpi3mr_atomic_dec(&sc->fw_outstanding);
4781 out:
4782 
4783 	if (sense_buf)
4784 		mpi3mr_repost_sense_buf(sc,
4785 		    scsi_reply->SenseDataBufferAddress);
4786 	return;
4787 }
4788 
4789 /*
4790  * mpi3mr_complete_io_cmd:	ISR routine for IO commands
4791  * @sc:				Adapter's soft instance
4792  * @irq_ctx:			Driver's internal per IRQ structure
4793  *
4794  * This function processes IO command completions.
4795  */
4796 int mpi3mr_complete_io_cmd(struct mpi3mr_softc *sc,
4797     struct mpi3mr_irq_context *irq_ctx)
4798 {
4799 	struct mpi3mr_op_reply_queue *op_reply_q = irq_ctx->op_reply_q;
4800 	U32 exp_phase = op_reply_q->ephase;
4801 	U32 reply_ci = op_reply_q->ci;
4802 	U32 num_op_replies = 0;
4803 	U64 reply_dma = 0;
4804 	Mpi3DefaultReplyDescriptor_t *reply_desc;
4805 	U16 req_qid = 0;
4806 
4807 	mtx_lock_spin(&op_reply_q->q_lock);
4808 	if (op_reply_q->in_use == false) {
4809 		op_reply_q->in_use = true;
4810 		mtx_unlock_spin(&op_reply_q->q_lock);
4811 	} else {
4812 		mtx_unlock_spin(&op_reply_q->q_lock);
4813 		return 0;
4814 	}
4815 
4816 	reply_desc = (Mpi3DefaultReplyDescriptor_t *)op_reply_q->q_base + reply_ci;
4817 	mpi3mr_dprint(sc, MPI3MR_TRACE, "[QID:%d]:reply_desc: (%pa) reply_ci: %x"
4818 		" reply_desc->ReplyFlags: 0x%x\n"
4819 		"reply_q_base_phys: %#016jx reply_q_base: (%pa) exp_phase: %x\n",
4820 		op_reply_q->qid, reply_desc, reply_ci, reply_desc->ReplyFlags, op_reply_q->q_base_phys,
4821 		op_reply_q->q_base, exp_phase);
4822 
4823 	if (((reply_desc->ReplyFlags &
4824 	     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) || !op_reply_q->qid) {
4825 		mtx_lock_spin(&op_reply_q->q_lock);
4826 		op_reply_q->in_use = false;
4827 		mtx_unlock_spin(&op_reply_q->q_lock);
4828 		return 0;
4829 	}
4830 
4831 	do {
4832 		req_qid = reply_desc->RequestQueueID;
4833 		sc->op_req_q[req_qid - 1].ci =
4834 		    reply_desc->RequestQueueCI;
4835 
4836 		mpi3mr_process_op_reply_desc(sc, reply_desc, &reply_dma);
4837 		mpi3mr_atomic_dec(&op_reply_q->pend_ios);
4838 		if (reply_dma)
4839 			mpi3mr_repost_reply_buf(sc, reply_dma);
4840 		num_op_replies++;
4841 		if (++reply_ci == op_reply_q->num_replies) {
4842 			reply_ci = 0;
4843 			exp_phase ^= 1;
4844 		}
4845 		reply_desc =
4846 		    (Mpi3DefaultReplyDescriptor_t *)op_reply_q->q_base + reply_ci;
4847 		if ((reply_desc->ReplyFlags &
4848 		     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
4849 			break;
4850 	} while (1);
4851 
4852 
4853 	mpi3mr_regwrite(sc, MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(op_reply_q->qid), reply_ci);
4854 	op_reply_q->ci = reply_ci;
4855 	op_reply_q->ephase = exp_phase;
4856 	mtx_lock_spin(&op_reply_q->q_lock);
4857 	op_reply_q->in_use = false;
4858 	mtx_unlock_spin(&op_reply_q->q_lock);
4859 	return num_op_replies;
4860 }
4861 
4862 /*
4863  * mpi3mr_isr:			Primary ISR function
4864  * privdata:			Driver's internal per IRQ structure
4865  *
4866  * This is driver's primary ISR function which is being called whenever any admin/IO
4867  * command completion.
4868  */
4869 void mpi3mr_isr(void *privdata)
4870 {
4871 	struct mpi3mr_irq_context *irq_ctx = (struct mpi3mr_irq_context *)privdata;
4872 	struct mpi3mr_softc *sc = irq_ctx->sc;
4873 	U16 msi_idx;
4874 
4875 	if (!irq_ctx)
4876 		return;
4877 
4878 	msi_idx = irq_ctx->msix_index;
4879 
4880 	if (!sc->intr_enabled)
4881 		return;
4882 
4883 	if (!msi_idx)
4884 		mpi3mr_complete_admin_cmd(sc);
4885 
4886 	if (irq_ctx->op_reply_q && irq_ctx->op_reply_q->qid) {
4887 		mpi3mr_complete_io_cmd(sc, irq_ctx);
4888 	}
4889 }
4890 
4891 /*
4892  * mpi3mr_alloc_requests - Allocates host commands
4893  * @sc: Adapter reference
4894  *
4895  * This function allocates controller supported host commands
4896  *
4897  * Return: 0 on success and proper error codes on failure
4898  */
4899 int
4900 mpi3mr_alloc_requests(struct mpi3mr_softc *sc)
4901 {
4902 	struct mpi3mr_cmd *cmd;
4903 	int i, j, nsegs, ret;
4904 
4905 	nsegs = MPI3MR_SG_DEPTH;
4906 	ret = bus_dma_tag_create( sc->mpi3mr_parent_dmat,    /* parent */
4907 				1, 0,			/* algnmnt, boundary */
4908 				BUS_SPACE_MAXADDR,	/* lowaddr */
4909 				BUS_SPACE_MAXADDR,	/* highaddr */
4910 				NULL, NULL,		/* filter, filterarg */
4911 				MAXPHYS,/* maxsize */
4912                                 nsegs,			/* nsegments */
4913 				MAXPHYS,/* maxsegsize */
4914                                 BUS_DMA_ALLOCNOW,	/* flags */
4915                                 busdma_lock_mutex,	/* lockfunc */
4916 				&sc->io_lock,	/* lockarg */
4917 				&sc->buffer_dmat);
4918 	if (ret) {
4919 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate buffer DMA tag ret: %d\n", ret);
4920 		return (ENOMEM);
4921         }
4922 
4923 	/*
4924 	 * sc->cmd_list is an array of struct mpi3mr_cmd pointers.
4925 	 * Allocate the dynamic array first and then allocate individual
4926 	 * commands.
4927 	 */
4928 	sc->cmd_list = malloc(sizeof(struct mpi3mr_cmd *) * sc->max_host_ios,
4929 	    M_MPI3MR, M_NOWAIT | M_ZERO);
4930 
4931 	if (!sc->cmd_list) {
4932 		device_printf(sc->mpi3mr_dev, "Cannot alloc memory for mpt_cmd_list.\n");
4933 		return (ENOMEM);
4934 	}
4935 
4936 	for (i = 0; i < sc->max_host_ios; i++) {
4937 		sc->cmd_list[i] = malloc(sizeof(struct mpi3mr_cmd),
4938 		    M_MPI3MR, M_NOWAIT | M_ZERO);
4939 		if (!sc->cmd_list[i]) {
4940 			for (j = 0; j < i; j++)
4941 				free(sc->cmd_list[j], M_MPI3MR);
4942 			free(sc->cmd_list, M_MPI3MR);
4943 			sc->cmd_list = NULL;
4944 			return (ENOMEM);
4945 		}
4946 	}
4947 
4948 	for (i = 1; i < sc->max_host_ios; i++) {
4949 		cmd = sc->cmd_list[i];
4950 		cmd->hosttag = i;
4951 		cmd->sc = sc;
4952 		cmd->state = MPI3MR_CMD_STATE_BUSY;
4953 		callout_init_mtx(&cmd->callout, &sc->mpi3mr_mtx, 0);
4954 		cmd->ccb = NULL;
4955 		TAILQ_INSERT_TAIL(&(sc->cmd_list_head), cmd, next);
4956 		if (bus_dmamap_create(sc->buffer_dmat, 0, &cmd->dmamap))
4957 			return ENOMEM;
4958 	}
4959 	return (0);
4960 }
4961 
4962 /*
4963  * mpi3mr_get_command:		Get a coomand structure from free command pool
4964  * @sc:				Adapter soft instance
4965  * Return:			MPT command reference
4966  *
4967  * This function returns an MPT command to the caller.
4968  */
4969 struct mpi3mr_cmd *
4970 mpi3mr_get_command(struct mpi3mr_softc *sc)
4971 {
4972 	struct mpi3mr_cmd *cmd = NULL;
4973 
4974 	mtx_lock(&sc->cmd_pool_lock);
4975 	if (!TAILQ_EMPTY(&sc->cmd_list_head)) {
4976 		cmd = TAILQ_FIRST(&sc->cmd_list_head);
4977 		TAILQ_REMOVE(&sc->cmd_list_head, cmd, next);
4978 	} else {
4979 		goto out;
4980 	}
4981 
4982 	mpi3mr_dprint(sc, MPI3MR_TRACE, "Get command SMID: 0x%x\n", cmd->hosttag);
4983 
4984 	memset((uint8_t *)&cmd->io_request, 0, MPI3MR_AREQ_FRAME_SZ);
4985 	cmd->data_dir = 0;
4986 	cmd->ccb = NULL;
4987 	cmd->targ = NULL;
4988 	cmd->max_segs = 0;
4989 	cmd->lun = 0;
4990 	cmd->state = MPI3MR_CMD_STATE_BUSY;
4991 	cmd->data = NULL;
4992 	cmd->length = 0;
4993 	cmd->out_len = 0;
4994 out:
4995 	mtx_unlock(&sc->cmd_pool_lock);
4996 	return cmd;
4997 }
4998 
4999 /*
5000  * mpi3mr_release_command:	Return a cmd to free command pool
5001  * input:			Command packet for return to free command pool
5002  *
5003  * This function returns an MPT command to the free command list.
5004  */
5005 void
5006 mpi3mr_release_command(struct mpi3mr_cmd *cmd)
5007 {
5008 	struct mpi3mr_softc *sc = cmd->sc;
5009 
5010 	mtx_lock(&sc->cmd_pool_lock);
5011 	TAILQ_INSERT_HEAD(&(sc->cmd_list_head), cmd, next);
5012 	cmd->state = MPI3MR_CMD_STATE_FREE;
5013 	cmd->req_qidx = 0;
5014 	mpi3mr_dprint(sc, MPI3MR_TRACE, "Release command SMID: 0x%x\n", cmd->hosttag);
5015 	mtx_unlock(&sc->cmd_pool_lock);
5016 
5017 	return;
5018 }
5019 
5020  /**
5021  * mpi3mr_free_ioctl_dma_memory - free memory for ioctl dma
5022  * @sc: Adapter instance reference
5023  *
5024  * Free the DMA memory allocated for IOCTL handling purpose.
5025  *
5026  * Return: None
5027  */
5028 static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_softc *sc)
5029 {
5030 	U16 i;
5031 	struct dma_memory_desc *mem_desc;
5032 
5033 	for (i=0; i<MPI3MR_NUM_IOCTL_SGE; i++) {
5034 		mem_desc = &sc->ioctl_sge[i];
5035 		if (mem_desc->addr && mem_desc->dma_addr) {
5036 			bus_dmamap_unload(mem_desc->tag, mem_desc->dmamap);
5037 			bus_dmamem_free(mem_desc->tag, mem_desc->addr, mem_desc->dmamap);
5038 			mem_desc->addr = NULL;
5039 			if (mem_desc->tag != NULL)
5040 				bus_dma_tag_destroy(mem_desc->tag);
5041 		}
5042 	}
5043 
5044 	mem_desc = &sc->ioctl_chain_sge;
5045 	if (mem_desc->addr && mem_desc->dma_addr) {
5046 		bus_dmamap_unload(mem_desc->tag, mem_desc->dmamap);
5047 		bus_dmamem_free(mem_desc->tag, mem_desc->addr, mem_desc->dmamap);
5048 		mem_desc->addr = NULL;
5049 		if (mem_desc->tag != NULL)
5050 			bus_dma_tag_destroy(mem_desc->tag);
5051 	}
5052 
5053 	mem_desc = &sc->ioctl_resp_sge;
5054 	if (mem_desc->addr && mem_desc->dma_addr) {
5055 		bus_dmamap_unload(mem_desc->tag, mem_desc->dmamap);
5056 		bus_dmamem_free(mem_desc->tag, mem_desc->addr, mem_desc->dmamap);
5057 		mem_desc->addr = NULL;
5058 		if (mem_desc->tag != NULL)
5059 			bus_dma_tag_destroy(mem_desc->tag);
5060 	}
5061 
5062 	sc->ioctl_sges_allocated = false;
5063 }
5064 
5065 /**
5066  * mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma
5067  * @sc: Adapter instance reference
5068  *
5069  * This function allocates dmaable memory required to handle the
5070  * application issued MPI3 IOCTL requests.
5071  *
5072  * Return: None
5073  */
5074 void mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_softc *sc)
5075 {
5076 	struct dma_memory_desc *mem_desc;
5077 	U16 i;
5078 
5079 	for (i=0; i<MPI3MR_NUM_IOCTL_SGE; i++) {
5080 		mem_desc = &sc->ioctl_sge[i];
5081 		mem_desc->size = MPI3MR_IOCTL_SGE_SIZE;
5082 
5083 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
5084 					4, 0,			/* algnmnt, boundary */
5085 					BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
5086 					BUS_SPACE_MAXADDR,	/* highaddr */
5087 					NULL, NULL,		/* filter, filterarg */
5088 					mem_desc->size,		/* maxsize */
5089 					1,			/* nsegments */
5090 					mem_desc->size,		/* maxsegsize */
5091 					0,			/* flags */
5092 					NULL, NULL,		/* lockfunc, lockarg */
5093 					&mem_desc->tag)) {
5094 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
5095 			goto out_failed;
5096 		}
5097 
5098 		if (bus_dmamem_alloc(mem_desc->tag, (void **)&mem_desc->addr,
5099 		    BUS_DMA_NOWAIT, &mem_desc->dmamap)) {
5100 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
5101 			goto out_failed;
5102 		}
5103 		bzero(mem_desc->addr, mem_desc->size);
5104 		bus_dmamap_load(mem_desc->tag, mem_desc->dmamap, mem_desc->addr, mem_desc->size,
5105 		    mpi3mr_memaddr_cb, &mem_desc->dma_addr, 0);
5106 
5107 		if (!mem_desc->addr)
5108 			goto out_failed;
5109 	}
5110 
5111 	mem_desc = &sc->ioctl_chain_sge;
5112 	mem_desc->size = MPI3MR_4K_PGSZ;
5113 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
5114 				4, 0,			/* algnmnt, boundary */
5115 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
5116 				BUS_SPACE_MAXADDR,	/* highaddr */
5117 				NULL, NULL,		/* filter, filterarg */
5118 				mem_desc->size,		/* maxsize */
5119 				1,			/* nsegments */
5120 				mem_desc->size,		/* maxsegsize */
5121 				0,			/* flags */
5122 				NULL, NULL,		/* lockfunc, lockarg */
5123 				&mem_desc->tag)) {
5124 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
5125 		goto out_failed;
5126 	}
5127 
5128 	if (bus_dmamem_alloc(mem_desc->tag, (void **)&mem_desc->addr,
5129 	    BUS_DMA_NOWAIT, &mem_desc->dmamap)) {
5130 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
5131 		goto out_failed;
5132 	}
5133 	bzero(mem_desc->addr, mem_desc->size);
5134 	bus_dmamap_load(mem_desc->tag, mem_desc->dmamap, mem_desc->addr, mem_desc->size,
5135 	    mpi3mr_memaddr_cb, &mem_desc->dma_addr, 0);
5136 
5137 	if (!mem_desc->addr)
5138 		goto out_failed;
5139 
5140 	mem_desc = &sc->ioctl_resp_sge;
5141 	mem_desc->size = MPI3MR_4K_PGSZ;
5142 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
5143 				4, 0,			/* algnmnt, boundary */
5144 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
5145 				BUS_SPACE_MAXADDR,	/* highaddr */
5146 				NULL, NULL,		/* filter, filterarg */
5147 				mem_desc->size,		/* maxsize */
5148 				1,			/* nsegments */
5149 				mem_desc->size,		/* maxsegsize */
5150 				0,			/* flags */
5151 				NULL, NULL,		/* lockfunc, lockarg */
5152 				&mem_desc->tag)) {
5153 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
5154 		goto out_failed;
5155 	}
5156 
5157 	if (bus_dmamem_alloc(mem_desc->tag, (void **)&mem_desc->addr,
5158 	    BUS_DMA_NOWAIT, &mem_desc->dmamap)) {
5159 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
5160 		goto out_failed;
5161 	}
5162 	bzero(mem_desc->addr, mem_desc->size);
5163 	bus_dmamap_load(mem_desc->tag, mem_desc->dmamap, mem_desc->addr, mem_desc->size,
5164 	    mpi3mr_memaddr_cb, &mem_desc->dma_addr, 0);
5165 
5166 	if (!mem_desc->addr)
5167 		goto out_failed;
5168 
5169 	sc->ioctl_sges_allocated = true;
5170 
5171 	return;
5172 out_failed:
5173 	printf("cannot allocate DMA memory for the mpt commands"
5174 	    "  from the applications, application interface for MPT command is disabled\n");
5175 	mpi3mr_free_ioctl_dma_memory(sc);
5176 }
5177 
5178 void
5179 mpi3mr_destory_mtx(struct mpi3mr_softc *sc)
5180 {
5181 	int i;
5182 	struct mpi3mr_op_req_queue *op_req_q;
5183 	struct mpi3mr_op_reply_queue *op_reply_q;
5184 
5185 	if (sc->admin_reply) {
5186 		if (mtx_initialized(&sc->admin_reply_lock))
5187 			mtx_destroy(&sc->admin_reply_lock);
5188 	}
5189 
5190 	if (sc->op_reply_q) {
5191 		for(i = 0; i < sc->num_queues; i++) {
5192 			op_reply_q = sc->op_reply_q + i;
5193 			if (mtx_initialized(&op_reply_q->q_lock))
5194 				mtx_destroy(&op_reply_q->q_lock);
5195 		}
5196 	}
5197 
5198 	if (sc->op_req_q) {
5199 		for(i = 0; i < sc->num_queues; i++) {
5200 			op_req_q = sc->op_req_q + i;
5201 			if (mtx_initialized(&op_req_q->q_lock))
5202 				mtx_destroy(&op_req_q->q_lock);
5203 		}
5204 	}
5205 
5206 	if (mtx_initialized(&sc->init_cmds.completion.lock))
5207 		mtx_destroy(&sc->init_cmds.completion.lock);
5208 
5209 	if (mtx_initialized(&sc->ioctl_cmds.completion.lock))
5210 		mtx_destroy(&sc->ioctl_cmds.completion.lock);
5211 
5212 	if (mtx_initialized(&sc->host_tm_cmds.completion.lock))
5213 		mtx_destroy(&sc->host_tm_cmds.completion.lock);
5214 
5215 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5216 		if (mtx_initialized(&sc->dev_rmhs_cmds[i].completion.lock))
5217 			mtx_destroy(&sc->dev_rmhs_cmds[i].completion.lock);
5218 	}
5219 
5220 	if (mtx_initialized(&sc->reset_mutex))
5221 		mtx_destroy(&sc->reset_mutex);
5222 
5223 	if (mtx_initialized(&sc->target_lock))
5224 		mtx_destroy(&sc->target_lock);
5225 
5226 	if (mtx_initialized(&sc->fwevt_lock))
5227 		mtx_destroy(&sc->fwevt_lock);
5228 
5229 	if (mtx_initialized(&sc->cmd_pool_lock))
5230 		mtx_destroy(&sc->cmd_pool_lock);
5231 
5232 	if (mtx_initialized(&sc->reply_free_q_lock))
5233 		mtx_destroy(&sc->reply_free_q_lock);
5234 
5235 	if (mtx_initialized(&sc->sense_buf_q_lock))
5236 		mtx_destroy(&sc->sense_buf_q_lock);
5237 
5238 	if (mtx_initialized(&sc->chain_buf_lock))
5239 		mtx_destroy(&sc->chain_buf_lock);
5240 
5241 	if (mtx_initialized(&sc->admin_req_lock))
5242 		mtx_destroy(&sc->admin_req_lock);
5243 
5244 	if (mtx_initialized(&sc->mpi3mr_mtx))
5245 		mtx_destroy(&sc->mpi3mr_mtx);
5246 }
5247 
5248 /**
5249  * mpi3mr_free_mem - Freeup adapter level data structures
5250  * @sc: Adapter reference
5251  *
5252  * Return: Nothing.
5253  */
5254 void
5255 mpi3mr_free_mem(struct mpi3mr_softc *sc)
5256 {
5257 	int i;
5258 	struct mpi3mr_op_req_queue *op_req_q;
5259 	struct mpi3mr_op_reply_queue *op_reply_q;
5260 	struct mpi3mr_irq_context *irq_ctx;
5261 
5262 	if (sc->cmd_list) {
5263 		for (i = 0; i < sc->max_host_ios; i++) {
5264 			free(sc->cmd_list[i], M_MPI3MR);
5265 		}
5266 		free(sc->cmd_list, M_MPI3MR);
5267 		sc->cmd_list = NULL;
5268 	}
5269 
5270 	if (sc->pel_seq_number && sc->pel_seq_number_dma) {
5271 		bus_dmamap_unload(sc->pel_seq_num_dmatag, sc->pel_seq_num_dmamap);
5272 		bus_dmamem_free(sc->pel_seq_num_dmatag, sc->pel_seq_number, sc->pel_seq_num_dmamap);
5273 		sc->pel_seq_number = NULL;
5274 		if (sc->pel_seq_num_dmatag != NULL)
5275 			bus_dma_tag_destroy(sc->pel_seq_num_dmatag);
5276 	}
5277 
5278 	if (sc->throttle_groups) {
5279 		free(sc->throttle_groups, M_MPI3MR);
5280 		sc->throttle_groups = NULL;
5281 	}
5282 
5283 	/* Free up operational queues*/
5284 	if (sc->op_req_q) {
5285 		for (i = 0; i < sc->num_queues; i++) {
5286 			op_req_q = sc->op_req_q + i;
5287 			if (op_req_q->q_base && op_req_q->q_base_phys) {
5288 				bus_dmamap_unload(op_req_q->q_base_tag, op_req_q->q_base_dmamap);
5289 				bus_dmamem_free(op_req_q->q_base_tag, op_req_q->q_base, op_req_q->q_base_dmamap);
5290 				op_req_q->q_base = NULL;
5291 				if (op_req_q->q_base_tag != NULL)
5292 					bus_dma_tag_destroy(op_req_q->q_base_tag);
5293 			}
5294 		}
5295 		free(sc->op_req_q, M_MPI3MR);
5296 		sc->op_req_q = NULL;
5297 	}
5298 
5299 	if (sc->op_reply_q) {
5300 		for (i = 0; i < sc->num_queues; i++) {
5301 			op_reply_q = sc->op_reply_q + i;
5302 			if (op_reply_q->q_base && op_reply_q->q_base_phys) {
5303 				bus_dmamap_unload(op_reply_q->q_base_tag, op_reply_q->q_base_dmamap);
5304 				bus_dmamem_free(op_reply_q->q_base_tag, op_reply_q->q_base, op_reply_q->q_base_dmamap);
5305 				op_reply_q->q_base = NULL;
5306 				if (op_reply_q->q_base_tag != NULL)
5307 					bus_dma_tag_destroy(op_reply_q->q_base_tag);
5308 			}
5309 		}
5310 		free(sc->op_reply_q, M_MPI3MR);
5311 		sc->op_reply_q = NULL;
5312 	}
5313 
5314 	/* Free up chain buffers*/
5315 	if (sc->chain_sgl_list) {
5316 		for (i = 0; i < sc->chain_buf_count; i++) {
5317 			if (sc->chain_sgl_list[i].buf && sc->chain_sgl_list[i].buf_phys) {
5318 				bus_dmamap_unload(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf_dmamap);
5319 				bus_dmamem_free(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf,
5320 						sc->chain_sgl_list[i].buf_dmamap);
5321 				sc->chain_sgl_list[i].buf = NULL;
5322 			}
5323 		}
5324 		if (sc->chain_sgl_list_tag != NULL)
5325 			bus_dma_tag_destroy(sc->chain_sgl_list_tag);
5326 		free(sc->chain_sgl_list, M_MPI3MR);
5327 		sc->chain_sgl_list = NULL;
5328 	}
5329 
5330 	if (sc->chain_bitmap) {
5331 		free(sc->chain_bitmap, M_MPI3MR);
5332 		sc->chain_bitmap = NULL;
5333 	}
5334 
5335 	for (i = 0; i < sc->msix_count; i++) {
5336 		irq_ctx = sc->irq_ctx + i;
5337 		if (irq_ctx)
5338 			irq_ctx->op_reply_q = NULL;
5339 	}
5340 
5341 	/* Free reply_buf_tag */
5342 	if (sc->reply_buf && sc->reply_buf_phys) {
5343 		bus_dmamap_unload(sc->reply_buf_tag, sc->reply_buf_dmamap);
5344 		bus_dmamem_free(sc->reply_buf_tag, sc->reply_buf,
5345 				sc->reply_buf_dmamap);
5346 		sc->reply_buf = NULL;
5347 		if (sc->reply_buf_tag != NULL)
5348 			bus_dma_tag_destroy(sc->reply_buf_tag);
5349 	}
5350 
5351 	/* Free reply_free_q_tag */
5352 	if (sc->reply_free_q && sc->reply_free_q_phys) {
5353 		bus_dmamap_unload(sc->reply_free_q_tag, sc->reply_free_q_dmamap);
5354 		bus_dmamem_free(sc->reply_free_q_tag, sc->reply_free_q,
5355 				sc->reply_free_q_dmamap);
5356 		sc->reply_free_q = NULL;
5357 		if (sc->reply_free_q_tag != NULL)
5358 			bus_dma_tag_destroy(sc->reply_free_q_tag);
5359 	}
5360 
5361 	/* Free sense_buf_tag */
5362 	if (sc->sense_buf && sc->sense_buf_phys) {
5363 		bus_dmamap_unload(sc->sense_buf_tag, sc->sense_buf_dmamap);
5364 		bus_dmamem_free(sc->sense_buf_tag, sc->sense_buf,
5365 				sc->sense_buf_dmamap);
5366 		sc->sense_buf = NULL;
5367 		if (sc->sense_buf_tag != NULL)
5368 			bus_dma_tag_destroy(sc->sense_buf_tag);
5369 	}
5370 
5371 	/* Free sense_buf_q_tag */
5372 	if (sc->sense_buf_q && sc->sense_buf_q_phys) {
5373 		bus_dmamap_unload(sc->sense_buf_q_tag, sc->sense_buf_q_dmamap);
5374 		bus_dmamem_free(sc->sense_buf_q_tag, sc->sense_buf_q,
5375 				sc->sense_buf_q_dmamap);
5376 		sc->sense_buf_q = NULL;
5377 		if (sc->sense_buf_q_tag != NULL)
5378 			bus_dma_tag_destroy(sc->sense_buf_q_tag);
5379 	}
5380 
5381 	/* Free up internal(non-IO) commands*/
5382 	if (sc->init_cmds.reply) {
5383 		free(sc->init_cmds.reply, M_MPI3MR);
5384 		sc->init_cmds.reply = NULL;
5385 	}
5386 
5387 	if (sc->ioctl_cmds.reply) {
5388 		free(sc->ioctl_cmds.reply, M_MPI3MR);
5389 		sc->ioctl_cmds.reply = NULL;
5390 	}
5391 
5392 	if (sc->pel_cmds.reply) {
5393 		free(sc->pel_cmds.reply, M_MPI3MR);
5394 		sc->pel_cmds.reply = NULL;
5395 	}
5396 
5397 	if (sc->pel_abort_cmd.reply) {
5398 		free(sc->pel_abort_cmd.reply, M_MPI3MR);
5399 		sc->pel_abort_cmd.reply = NULL;
5400 	}
5401 
5402 	if (sc->host_tm_cmds.reply) {
5403 		free(sc->host_tm_cmds.reply, M_MPI3MR);
5404 		sc->host_tm_cmds.reply = NULL;
5405 	}
5406 
5407 	if (sc->log_data_buffer) {
5408 		free(sc->log_data_buffer, M_MPI3MR);
5409 		sc->log_data_buffer = NULL;
5410 	}
5411 
5412 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5413 		if (sc->dev_rmhs_cmds[i].reply) {
5414 			free(sc->dev_rmhs_cmds[i].reply, M_MPI3MR);
5415 			sc->dev_rmhs_cmds[i].reply = NULL;
5416 		}
5417 	}
5418 
5419 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
5420 		if (sc->evtack_cmds[i].reply) {
5421 			free(sc->evtack_cmds[i].reply, M_MPI3MR);
5422 			sc->evtack_cmds[i].reply = NULL;
5423 		}
5424 	}
5425 
5426 	if (sc->removepend_bitmap) {
5427 		free(sc->removepend_bitmap, M_MPI3MR);
5428 		sc->removepend_bitmap = NULL;
5429 	}
5430 
5431 	if (sc->devrem_bitmap) {
5432 		free(sc->devrem_bitmap, M_MPI3MR);
5433 		sc->devrem_bitmap = NULL;
5434 	}
5435 
5436 	if (sc->evtack_cmds_bitmap) {
5437 		free(sc->evtack_cmds_bitmap, M_MPI3MR);
5438 		sc->evtack_cmds_bitmap = NULL;
5439 	}
5440 
5441 	/* Free Admin reply*/
5442 	if (sc->admin_reply && sc->admin_reply_phys) {
5443 		bus_dmamap_unload(sc->admin_reply_tag, sc->admin_reply_dmamap);
5444 		bus_dmamem_free(sc->admin_reply_tag, sc->admin_reply,
5445 				sc->admin_reply_dmamap);
5446 		sc->admin_reply = NULL;
5447 		if (sc->admin_reply_tag != NULL)
5448 			bus_dma_tag_destroy(sc->admin_reply_tag);
5449 	}
5450 
5451 	/* Free Admin request*/
5452 	if (sc->admin_req && sc->admin_req_phys) {
5453 		bus_dmamap_unload(sc->admin_req_tag, sc->admin_req_dmamap);
5454 		bus_dmamem_free(sc->admin_req_tag, sc->admin_req,
5455 				sc->admin_req_dmamap);
5456 		sc->admin_req = NULL;
5457 		if (sc->admin_req_tag != NULL)
5458 			bus_dma_tag_destroy(sc->admin_req_tag);
5459 	}
5460 	mpi3mr_free_ioctl_dma_memory(sc);
5461 
5462 }
5463 
5464 /**
5465  * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
5466  * @sc: Adapter instance reference
5467  * @cmdptr: Internal command tracker
5468  *
5469  * Complete an internal driver commands with state indicating it
5470  * is completed due to reset.
5471  *
5472  * Return: Nothing.
5473  */
5474 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_softc *sc,
5475 	struct mpi3mr_drvr_cmd *cmdptr)
5476 {
5477 	if (cmdptr->state & MPI3MR_CMD_PENDING) {
5478 		cmdptr->state |= MPI3MR_CMD_RESET;
5479 		cmdptr->state &= ~MPI3MR_CMD_PENDING;
5480 		if (cmdptr->is_waiting) {
5481 			complete(&cmdptr->completion);
5482 			cmdptr->is_waiting = 0;
5483 		} else if (cmdptr->callback)
5484 			cmdptr->callback(sc, cmdptr);
5485 	}
5486 }
5487 
5488 /**
5489  * mpi3mr_flush_drv_cmds - Flush internal driver commands
5490  * @sc: Adapter instance reference
5491  *
5492  * Flush all internal driver commands post reset
5493  *
5494  * Return: Nothing.
5495  */
5496 static void mpi3mr_flush_drv_cmds(struct mpi3mr_softc *sc)
5497 {
5498 	int i = 0;
5499 	struct mpi3mr_drvr_cmd *cmdptr;
5500 
5501 	cmdptr = &sc->init_cmds;
5502 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5503 
5504 	cmdptr = &sc->ioctl_cmds;
5505 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5506 
5507 	cmdptr = &sc->host_tm_cmds;
5508 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5509 
5510 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5511 		cmdptr = &sc->dev_rmhs_cmds[i];
5512 		mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5513 	}
5514 
5515 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
5516 		cmdptr = &sc->evtack_cmds[i];
5517 		mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5518 	}
5519 
5520 	cmdptr = &sc->pel_cmds;
5521 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5522 
5523 	cmdptr = &sc->pel_abort_cmd;
5524 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5525 }
5526 
5527 
5528 /**
5529  * mpi3mr_memset_buffers - memset memory for a controller
5530  * @sc: Adapter instance reference
5531  *
5532  * clear all the memory allocated for a controller, typically
5533  * called post reset to reuse the memory allocated during the
5534  * controller init.
5535  *
5536  * Return: Nothing.
5537  */
5538 static void mpi3mr_memset_buffers(struct mpi3mr_softc *sc)
5539 {
5540 	U16 i;
5541 	struct mpi3mr_throttle_group_info *tg;
5542 
5543 	memset(sc->admin_req, 0, sc->admin_req_q_sz);
5544 	memset(sc->admin_reply, 0, sc->admin_reply_q_sz);
5545 
5546 	memset(sc->init_cmds.reply, 0, sc->reply_sz);
5547 	memset(sc->ioctl_cmds.reply, 0, sc->reply_sz);
5548 	memset(sc->host_tm_cmds.reply, 0, sc->reply_sz);
5549 	memset(sc->pel_cmds.reply, 0, sc->reply_sz);
5550 	memset(sc->pel_abort_cmd.reply, 0, sc->reply_sz);
5551 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
5552 		memset(sc->dev_rmhs_cmds[i].reply, 0, sc->reply_sz);
5553 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
5554 		memset(sc->evtack_cmds[i].reply, 0, sc->reply_sz);
5555 	memset(sc->removepend_bitmap, 0, sc->dev_handle_bitmap_sz);
5556 	memset(sc->devrem_bitmap, 0, sc->devrem_bitmap_sz);
5557 	memset(sc->evtack_cmds_bitmap, 0, sc->evtack_cmds_bitmap_sz);
5558 
5559 	for (i = 0; i < sc->num_queues; i++) {
5560 		sc->op_reply_q[i].qid = 0;
5561 		sc->op_reply_q[i].ci = 0;
5562 		sc->op_reply_q[i].num_replies = 0;
5563 		sc->op_reply_q[i].ephase = 0;
5564 		mpi3mr_atomic_set(&sc->op_reply_q[i].pend_ios, 0);
5565 		memset(sc->op_reply_q[i].q_base, 0, sc->op_reply_q[i].qsz);
5566 
5567 		sc->op_req_q[i].ci = 0;
5568 		sc->op_req_q[i].pi = 0;
5569 		sc->op_req_q[i].num_reqs = 0;
5570 		sc->op_req_q[i].qid = 0;
5571 		sc->op_req_q[i].reply_qid = 0;
5572 		memset(sc->op_req_q[i].q_base, 0, sc->op_req_q[i].qsz);
5573 	}
5574 
5575 	mpi3mr_atomic_set(&sc->pend_large_data_sz, 0);
5576 	if (sc->throttle_groups) {
5577 		tg = sc->throttle_groups;
5578 		for (i = 0; i < sc->num_io_throttle_group; i++, tg++) {
5579 			tg->id = 0;
5580 			tg->fw_qd = 0;
5581 			tg->modified_qd = 0;
5582 			tg->io_divert= 0;
5583 			tg->high = 0;
5584 			tg->low = 0;
5585 			mpi3mr_atomic_set(&tg->pend_large_data_sz, 0);
5586 		}
5587  	}
5588 }
5589 
5590 /**
5591  * mpi3mr_invalidate_devhandles -Invalidate device handles
5592  * @sc: Adapter instance reference
5593  *
5594  * Invalidate the device handles in the target device structures
5595  * . Called post reset prior to reinitializing the controller.
5596  *
5597  * Return: Nothing.
5598  */
5599 static void mpi3mr_invalidate_devhandles(struct mpi3mr_softc *sc)
5600 {
5601 	struct mpi3mr_target *target = NULL;
5602 
5603 	mtx_lock_spin(&sc->target_lock);
5604 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
5605 		if (target) {
5606 			target->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
5607 			target->io_throttle_enabled = 0;
5608 			target->io_divert = 0;
5609 			target->throttle_group = NULL;
5610 		}
5611 	}
5612 	mtx_unlock_spin(&sc->target_lock);
5613 }
5614 
5615 /**
5616  * mpi3mr_rfresh_tgtdevs - Refresh target device exposure
5617  * @sc: Adapter instance reference
5618  *
5619  * This is executed post controller reset to identify any
5620  * missing devices during reset and remove from the upper layers
5621  * or expose any newly detected device to the upper layers.
5622  *
5623  * Return: Nothing.
5624  */
5625 
5626 static void mpi3mr_rfresh_tgtdevs(struct mpi3mr_softc *sc)
5627 {
5628 	struct mpi3mr_target *target = NULL;
5629 	struct mpi3mr_target *target_temp = NULL;
5630 
5631 	TAILQ_FOREACH_SAFE(target, &sc->cam_sc->tgt_list, tgt_next, target_temp) {
5632 		if (target->dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
5633 			if (target->exposed_to_os)
5634 				mpi3mr_remove_device_from_os(sc, target->dev_handle);
5635 			mpi3mr_remove_device_from_list(sc, target, true);
5636 		}
5637 	}
5638 
5639 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
5640 		if ((target->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
5641 		    !target->is_hidden && !target->exposed_to_os) {
5642 			mpi3mr_add_device(sc, target->per_id);
5643 		}
5644 	}
5645 
5646 }
5647 
5648 static void mpi3mr_flush_io(struct mpi3mr_softc *sc)
5649 {
5650 	int i;
5651 	struct mpi3mr_cmd *cmd = NULL;
5652 	union ccb *ccb = NULL;
5653 
5654 	for (i = 0; i < sc->max_host_ios; i++) {
5655 		cmd = sc->cmd_list[i];
5656 
5657 		if (cmd && cmd->ccb) {
5658 			if (cmd->callout_owner) {
5659 				ccb = (union ccb *)(cmd->ccb);
5660 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
5661 				mpi3mr_cmd_done(sc, cmd);
5662 			} else {
5663 				cmd->ccb = NULL;
5664 				mpi3mr_release_command(cmd);
5665 			}
5666 		}
5667 	}
5668 }
5669 /**
5670  * mpi3mr_clear_reset_history - Clear reset history
5671  * @sc: Adapter instance reference
5672  *
5673  * Write the reset history bit in IOC Status to clear the bit,
5674  * if it is already set.
5675  *
5676  * Return: Nothing.
5677  */
5678 static inline void mpi3mr_clear_reset_history(struct mpi3mr_softc *sc)
5679 {
5680 	U32 ioc_status;
5681 
5682 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5683 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
5684 		mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_STATUS_OFFSET, ioc_status);
5685 }
5686 
5687 /**
5688  * mpi3mr_set_diagsave - Set diag save bit for snapdump
5689  * @sc: Adapter reference
5690  *
5691  * Set diag save bit in IOC configuration register to enable
5692  * snapdump.
5693  *
5694  * Return: Nothing.
5695  */
5696 static inline void mpi3mr_set_diagsave(struct mpi3mr_softc *sc)
5697 {
5698 	U32 ioc_config;
5699 
5700 	ioc_config =
5701 	    mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
5702 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
5703 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
5704 }
5705 
5706 /**
5707  * mpi3mr_issue_reset - Issue reset to the controller
5708  * @sc: Adapter reference
5709  * @reset_type: Reset type
5710  * @reset_reason: Reset reason code
5711  *
5712  * Unlock the host diagnostic registers and write the specific
5713  * reset type to that, wait for reset acknowledgement from the
5714  * controller, if the reset is not successful retry for the
5715  * predefined number of times.
5716  *
5717  * Return: 0 on success, non-zero on failure.
5718  */
5719 static int mpi3mr_issue_reset(struct mpi3mr_softc *sc, U16 reset_type,
5720 	U32 reset_reason)
5721 {
5722 	int retval = -1;
5723 	U8 unlock_retry_count = 0;
5724 	U32 host_diagnostic, ioc_status, ioc_config;
5725 	U32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
5726 
5727 	if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
5728 	    (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
5729 		return retval;
5730 	if (sc->unrecoverable)
5731 		return retval;
5732 
5733 	if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
5734 		retval = 0;
5735 		return retval;
5736 	}
5737 
5738 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s reset due to %s(0x%x)\n",
5739 	    mpi3mr_reset_type_name(reset_type),
5740 	    mpi3mr_reset_rc_name(reset_reason), reset_reason);
5741 
5742 	mpi3mr_clear_reset_history(sc);
5743 	do {
5744 		mpi3mr_dprint(sc, MPI3MR_INFO,
5745 		    "Write magic sequence to unlock host diag register (retry=%d)\n",
5746 		    ++unlock_retry_count);
5747 		if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
5748 			mpi3mr_dprint(sc, MPI3MR_ERROR,
5749 			    "%s reset failed! due to host diag register unlock failure"
5750 			    "host_diagnostic(0x%08x)\n", mpi3mr_reset_type_name(reset_type),
5751 			    host_diagnostic);
5752 			sc->unrecoverable = 1;
5753 			return retval;
5754 		}
5755 
5756 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5757 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH);
5758 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5759 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST);
5760 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5761 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND);
5762 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5763 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD);
5764 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5765 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH);
5766 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5767 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH);
5768 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5769 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH);
5770 
5771 		DELAY(1000); /* delay in usec */
5772 		host_diagnostic = mpi3mr_regread(sc, MPI3_SYSIF_HOST_DIAG_OFFSET);
5773 		mpi3mr_dprint(sc, MPI3MR_INFO,
5774 		    "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
5775 		    unlock_retry_count, host_diagnostic);
5776 	} while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
5777 
5778 	mpi3mr_regwrite(sc, MPI3_SYSIF_SCRATCHPAD0_OFFSET, reset_reason);
5779 	mpi3mr_regwrite(sc, MPI3_SYSIF_HOST_DIAG_OFFSET, host_diagnostic | reset_type);
5780 
5781 	if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) {
5782 		do {
5783 			ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5784 			if (ioc_status &
5785 			    MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
5786 				ioc_config =
5787 				    mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
5788 				if (mpi3mr_soft_reset_success(ioc_status,
5789 				    ioc_config)) {
5790 					mpi3mr_clear_reset_history(sc);
5791 					retval = 0;
5792 					break;
5793 				}
5794 			}
5795 			DELAY(100 * 1000);
5796 		} while (--timeout);
5797 	} else if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT) {
5798 		do {
5799 			ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5800 			if (mpi3mr_diagfault_success(sc, ioc_status)) {
5801 				retval = 0;
5802 				break;
5803 			}
5804 			DELAY(100 * 1000);
5805 		} while (--timeout);
5806 	}
5807 
5808 	mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5809 		MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND);
5810 
5811 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5812 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
5813 
5814 	mpi3mr_dprint(sc, MPI3MR_INFO,
5815 	    "IOC Status/Config after %s reset is (0x%x)/(0x%x)\n",
5816 	    !retval ? "successful":"failed", ioc_status,
5817 	    ioc_config);
5818 
5819 	if (retval)
5820 		sc->unrecoverable = 1;
5821 
5822 	return retval;
5823 }
5824 
5825 inline void mpi3mr_cleanup_event_taskq(struct mpi3mr_softc *sc)
5826 {
5827 	/*
5828 	 * Block the taskqueue before draining. This means any new tasks won't
5829 	 * be queued to a worker thread. But it doesn't stop the current workers
5830 	 * that are running. taskqueue_drain waits for those correctly in the
5831 	 * case of thread backed taskqueues.
5832 	 */
5833 	taskqueue_block(sc->cam_sc->ev_tq);
5834 	taskqueue_drain(sc->cam_sc->ev_tq, &sc->cam_sc->ev_task);
5835 }
5836 
5837 /**
5838  * mpi3mr_soft_reset_handler - Reset the controller
5839  * @sc: Adapter instance reference
5840  * @reset_reason: Reset reason code
5841  * @snapdump: snapdump enable/disbale bit
5842  *
5843  * This is an handler for recovering controller by issuing soft
5844  * reset or diag fault reset. This is a blocking function and
5845  * when one reset is executed if any other resets they will be
5846  * blocked. All IOCTLs/IO will be blocked during the reset. If
5847  * controller reset is successful then the controller will be
5848  * reinitalized, otherwise the controller will be marked as not
5849  * recoverable
5850  *
5851  * Return: 0 on success, non-zero on failure.
5852  */
5853 int mpi3mr_soft_reset_handler(struct mpi3mr_softc *sc,
5854 	U32 reset_reason, bool snapdump)
5855 {
5856 	int retval = 0, i = 0;
5857 	enum mpi3mr_iocstate ioc_state;
5858 
5859 	mpi3mr_dprint(sc, MPI3MR_INFO, "soft reset invoked: reason code: %s\n",
5860 	    mpi3mr_reset_rc_name(reset_reason));
5861 
5862 	if ((reset_reason == MPI3MR_RESET_FROM_IOCTL) &&
5863 	     (sc->reset.ioctl_reset_snapdump != true))
5864 		snapdump = false;
5865 
5866 	mpi3mr_dprint(sc, MPI3MR_INFO,
5867 	    "soft_reset_handler: wait if diag save is in progress\n");
5868 	while (sc->diagsave_timeout)
5869 		DELAY(1000 * 1000);
5870 
5871 	ioc_state = mpi3mr_get_iocstate(sc);
5872 	if (ioc_state == MRIOC_STATE_UNRECOVERABLE) {
5873 		mpi3mr_dprint(sc, MPI3MR_ERROR, "controller is in unrecoverable state, exit\n");
5874 		sc->reset.type = MPI3MR_NO_RESET;
5875 		sc->reset.reason = MPI3MR_DEFAULT_RESET_REASON;
5876 		sc->reset.status = -1;
5877 		sc->reset.ioctl_reset_snapdump = false;
5878 		return -1;
5879 	}
5880 
5881 	if (sc->reset_in_progress) {
5882 		mpi3mr_dprint(sc, MPI3MR_INFO, "reset is already in progress, exit\n");
5883 		return -1;
5884 	}
5885 
5886 	/* Pause IOs, drain and block the event taskqueue */
5887 	xpt_freeze_simq(sc->cam_sc->sim, 1);
5888 
5889 	mpi3mr_cleanup_event_taskq(sc);
5890 
5891 	sc->reset_in_progress = 1;
5892 	sc->block_ioctls = 1;
5893 
5894 	while (mpi3mr_atomic_read(&sc->pend_ioctls) && (i < PEND_IOCTLS_COMP_WAIT_TIME)) {
5895 		ioc_state = mpi3mr_get_iocstate(sc);
5896 		if (ioc_state == MRIOC_STATE_FAULT)
5897 			break;
5898 		i++;
5899 		if (!(i % 5)) {
5900 			mpi3mr_dprint(sc, MPI3MR_INFO,
5901 			    "[%2ds]waiting for IOCTL to be finished from %s\n", i, __func__);
5902 		}
5903 		DELAY(1000 * 1000);
5904 	}
5905 
5906 	if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
5907 	    (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
5908 	    (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
5909 
5910 		mpi3mr_dprint(sc, MPI3MR_INFO, "Turn off events prior to reset\n");
5911 
5912 		for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5913 			sc->event_masks[i] = -1;
5914 		mpi3mr_issue_event_notification(sc);
5915 	}
5916 
5917 	mpi3mr_disable_interrupts(sc);
5918 
5919 	if (snapdump)
5920 		mpi3mr_trigger_snapdump(sc, reset_reason);
5921 
5922 	retval = mpi3mr_issue_reset(sc,
5923 	    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
5924 	if (retval) {
5925 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to issue soft reset to the ioc\n");
5926 		goto out;
5927 	}
5928 
5929 	mpi3mr_flush_drv_cmds(sc);
5930 	mpi3mr_flush_io(sc);
5931 	mpi3mr_invalidate_devhandles(sc);
5932 	mpi3mr_memset_buffers(sc);
5933 
5934 	if (sc->prepare_for_reset) {
5935 		sc->prepare_for_reset = 0;
5936 		sc->prepare_for_reset_timeout_counter = 0;
5937 	}
5938 
5939 	retval = mpi3mr_initialize_ioc(sc, MPI3MR_INIT_TYPE_RESET);
5940 	if (retval) {
5941 		mpi3mr_dprint(sc, MPI3MR_ERROR, "reinit after soft reset failed: reason %d\n",
5942 		    reset_reason);
5943 		goto out;
5944 	}
5945 
5946 	DELAY((1000 * 1000) * 10);
5947 out:
5948 	if (!retval) {
5949 		sc->diagsave_timeout = 0;
5950 		sc->reset_in_progress = 0;
5951 		mpi3mr_rfresh_tgtdevs(sc);
5952 		sc->ts_update_counter = 0;
5953 		sc->block_ioctls = 0;
5954 		sc->pel_abort_requested = 0;
5955 		if (sc->pel_wait_pend) {
5956 			sc->pel_cmds.retry_count = 0;
5957 			mpi3mr_issue_pel_wait(sc, &sc->pel_cmds);
5958 			mpi3mr_app_send_aen(sc);
5959 		}
5960 	} else {
5961 		mpi3mr_issue_reset(sc,
5962 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
5963 		sc->unrecoverable = 1;
5964 		sc->reset_in_progress = 0;
5965 	}
5966 
5967 	mpi3mr_dprint(sc, MPI3MR_INFO, "Soft Reset: %s\n", ((retval == 0) ? "SUCCESS" : "FAILED"));
5968 
5969 	taskqueue_unblock(sc->cam_sc->ev_tq);
5970 	xpt_release_simq(sc->cam_sc->sim, 1);
5971 
5972 	sc->reset.type = MPI3MR_NO_RESET;
5973 	sc->reset.reason = MPI3MR_DEFAULT_RESET_REASON;
5974 	sc->reset.status = retval;
5975 	sc->reset.ioctl_reset_snapdump = false;
5976 
5977 	return retval;
5978 }
5979 
5980 /**
5981  * mpi3mr_issue_ioc_shutdown - shutdown controller
5982  * @sc: Adapter instance reference
5983  *
5984  * Send shutodwn notification to the controller and wait for the
5985  * shutdown_timeout for it to be completed.
5986  *
5987  * Return: Nothing.
5988  */
5989 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_softc *sc)
5990 {
5991 	U32 ioc_config, ioc_status;
5992 	U8 retval = 1, retry = 0;
5993 	U32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
5994 
5995 	mpi3mr_dprint(sc, MPI3MR_INFO, "sending shutdown notification\n");
5996 	if (sc->unrecoverable) {
5997 		mpi3mr_dprint(sc, MPI3MR_ERROR,
5998 		    "controller is unrecoverable, shutdown not issued\n");
5999 		return;
6000 	}
6001 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6002 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
6003 	    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
6004 		mpi3mr_dprint(sc, MPI3MR_ERROR, "shutdown already in progress\n");
6005 		return;
6006 	}
6007 
6008 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6009 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
6010 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
6011 
6012 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
6013 
6014 	if (sc->facts.shutdown_timeout)
6015 		timeout = sc->facts.shutdown_timeout * 10;
6016 
6017 	do {
6018 		ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6019 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
6020 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
6021 			retval = 0;
6022 			break;
6023 		}
6024 
6025 		if (sc->unrecoverable)
6026 			break;
6027 
6028 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
6029 			mpi3mr_print_fault_info(sc);
6030 
6031 			if (retry >= MPI3MR_MAX_SHUTDOWN_RETRY_COUNT)
6032 				break;
6033 
6034 			if (mpi3mr_issue_reset(sc,
6035 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
6036 			    MPI3MR_RESET_FROM_CTLR_CLEANUP))
6037 				break;
6038 
6039 			ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6040 			ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
6041 			ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
6042 
6043 			mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
6044 
6045 			if (sc->facts.shutdown_timeout)
6046 				timeout = sc->facts.shutdown_timeout * 10;
6047 
6048 			retry++;
6049 		}
6050 
6051                 DELAY(100 * 1000);
6052 
6053 	} while (--timeout);
6054 
6055 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6056 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6057 
6058 	if (retval) {
6059 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
6060 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
6061 			mpi3mr_dprint(sc, MPI3MR_ERROR,
6062 			    "shutdown still in progress after timeout\n");
6063 	}
6064 
6065 	mpi3mr_dprint(sc, MPI3MR_INFO,
6066 	    "ioc_status/ioc_config after %s shutdown is (0x%x)/(0x%x)\n",
6067 	    (!retval)?"successful":"failed", ioc_status,
6068 	    ioc_config);
6069 }
6070 
6071 /**
6072  * mpi3mr_cleanup_ioc - Cleanup controller
6073  * @sc: Adapter instance reference
6074 
6075  * controller cleanup handler, Message unit reset or soft reset
6076  * and shutdown notification is issued to the controller.
6077  *
6078  * Return: Nothing.
6079  */
6080 void mpi3mr_cleanup_ioc(struct mpi3mr_softc *sc)
6081 {
6082 	enum mpi3mr_iocstate ioc_state;
6083 
6084 	mpi3mr_dprint(sc, MPI3MR_INFO, "cleaning up the controller\n");
6085 	mpi3mr_disable_interrupts(sc);
6086 
6087 	ioc_state = mpi3mr_get_iocstate(sc);
6088 
6089 	if ((!sc->unrecoverable) && (!sc->reset_in_progress) &&
6090 	    (ioc_state == MRIOC_STATE_READY)) {
6091 		if (mpi3mr_mur_ioc(sc,
6092 		    MPI3MR_RESET_FROM_CTLR_CLEANUP))
6093 			mpi3mr_issue_reset(sc,
6094 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
6095 			    MPI3MR_RESET_FROM_MUR_FAILURE);
6096 		mpi3mr_issue_ioc_shutdown(sc);
6097 	}
6098 
6099 	mpi3mr_dprint(sc, MPI3MR_INFO, "controller cleanup completed\n");
6100 }
6101