Lines Matching +full:firmware +full:- +full:initialised

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
8 * Copyright 1994-2009 The FreeBSD Project.
88 0, "Force a firmware reset condition");
145 * mfi_tbolt_adp_reset - For controller reset
168 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%d, " in mfi_tbolt_adp_reset()
175 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: HostDiag=%#x\n", HostDiag); in mfi_tbolt_adp_reset()
187 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%d, " in mfi_tbolt_adp_reset()
203 sc->reply_size = MEGASAS_THUNDERBOLT_REPLY_SIZE; in mfi_tbolt_init_globals()
204 sc->raid_io_msg_size = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE; in mfi_tbolt_init_globals()
208 * (size of the Message - Raid SCSI IO message size(except SGE)) in mfi_tbolt_init_globals()
210 * (0x100 - (0x90 - 0x10)) / 0x10 = 8 in mfi_tbolt_init_globals()
212 sc->max_SGEs_in_main_message = in mfi_tbolt_init_globals()
213 (uint8_t)((sc->raid_io_msg_size in mfi_tbolt_init_globals()
214 - (sizeof(struct mfi_mpi2_request_raid_scsi_io) in mfi_tbolt_init_globals()
215 - sizeof(MPI2_SGE_IO_UNION))) / sizeof(MPI2_SGE_IO_UNION)); in mfi_tbolt_init_globals()
217 * (Command frame size allocaed in SRB ext - Raid SCSI IO message size) in mfi_tbolt_init_globals()
219 * (1280 - 256) / 16 = 64 in mfi_tbolt_init_globals()
221 sc->max_SGEs_in_chain_message = (MR_COMMAND_SIZE in mfi_tbolt_init_globals()
222 - sc->raid_io_msg_size) / sizeof(MPI2_SGE_IO_UNION); in mfi_tbolt_init_globals()
224 * (0x08-1) + 0x40 = 0x47 - 0x01 = 0x46 one is left for command in mfi_tbolt_init_globals()
227 sc->mfi_max_sge = (sc->max_SGEs_in_main_message - 1) in mfi_tbolt_init_globals()
228 + sc->max_SGEs_in_chain_message - 1; in mfi_tbolt_init_globals()
231 * (0x100 - 0x10)/0x10 = 0xF(15) in mfi_tbolt_init_globals()
233 sc->chain_offset_value_for_main_message = (sc->raid_io_msg_size in mfi_tbolt_init_globals()
234 - sizeof(MPI2_SGE_IO_UNION))/16; in mfi_tbolt_init_globals()
235 sc->chain_offset_value_for_mpt_ptmsg in mfi_tbolt_init_globals()
237 sc->mfi_cmd_pool_tbolt = NULL; in mfi_tbolt_init_globals()
238 sc->request_desc_pool = NULL; in mfi_tbolt_init_globals()
251 size += sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1); in mfi_tbolt_get_memory_requirement()
252 size += sc->reply_size * sc->mfi_max_fw_cmds; in mfi_tbolt_get_memory_requirement()
254 size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->mfi_max_fw_cmds; in mfi_tbolt_get_memory_requirement()
262 * DevExt - HBA miniport driver's adapter data storage structure
263 * pMemLocation - start of the memory allocated for Thunderbolt.
280 addr = &addr[sc->raid_io_msg_size]; in mfi_tbolt_init_desc_pool()
282 sc->request_message_pool_align = addr; in mfi_tbolt_init_desc_pool()
284 sc->request_message_pool_align = addr; in mfi_tbolt_init_desc_pool()
286 offset = sc->request_message_pool_align - sc->request_message_pool; in mfi_tbolt_init_desc_pool()
287 sc->request_msg_busaddr = sc->mfi_tb_busaddr + offset; in mfi_tbolt_init_desc_pool()
291 addr = &addr[sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1)]; in mfi_tbolt_init_desc_pool()
293 sc->reply_frame_pool = (struct mfi_mpi2_reply_header *) addr; in mfi_tbolt_init_desc_pool()
295 addr = &addr[sc->reply_size]; in mfi_tbolt_init_desc_pool()
298 sc->reply_frame_pool_align in mfi_tbolt_init_desc_pool()
301 offset = (uintptr_t)sc->reply_frame_pool_align in mfi_tbolt_init_desc_pool()
302 - (uintptr_t)sc->request_message_pool; in mfi_tbolt_init_desc_pool()
303 sc->reply_frame_busaddr = sc->mfi_tb_busaddr + offset; in mfi_tbolt_init_desc_pool()
306 addr += sc->reply_size * sc->mfi_max_fw_cmds; in mfi_tbolt_init_desc_pool()
307 sc->reply_pool_limit = addr; in mfi_tbolt_init_desc_pool()
310 memset((uint8_t *)sc->reply_frame_pool, 0xFF, in mfi_tbolt_init_desc_pool()
311 (sc->reply_size * sc->mfi_max_fw_cmds)); in mfi_tbolt_init_desc_pool()
313 offset = sc->reply_size * sc->mfi_max_fw_cmds; in mfi_tbolt_init_desc_pool()
314 sc->sg_frame_busaddr = sc->reply_frame_busaddr + offset; in mfi_tbolt_init_desc_pool()
316 sc->last_reply_idx = 0; in mfi_tbolt_init_desc_pool()
317 MFI_WRITE4(sc, MFI_RFPI, sc->mfi_max_fw_cmds - 1); in mfi_tbolt_init_desc_pool()
318 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx); in mfi_tbolt_init_desc_pool()
319 offset = (sc->sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME * in mfi_tbolt_init_desc_pool()
320 sc->mfi_max_fw_cmds)) - sc->mfi_tb_busaddr; in mfi_tbolt_init_desc_pool()
322 device_printf(sc->mfi_dev, "Error:Initialized more than " in mfi_tbolt_init_desc_pool()
328 * This routine prepare and issue INIT2 frame to the Firmware
342 mtx_assert(&sc->mfi_io_lock, MA_OWNED); in mfi_tbolt_init_MFI_queue()
345 if (sc->MFA_enabled) { in mfi_tbolt_init_MFI_queue()
346 device_printf(sc->mfi_dev, "tbolt_init already initialised!\n"); in mfi_tbolt_init_MFI_queue()
351 device_printf(sc->mfi_dev, "tbolt_init failed to get command " in mfi_tbolt_init_MFI_queue()
356 cmd_tmp.cm_frame = cm->cm_frame; in mfi_tbolt_init_MFI_queue()
357 cmd_tmp.cm_frame_busaddr = cm->cm_frame_busaddr; in mfi_tbolt_init_MFI_queue()
358 cmd_tmp.cm_dmamap = cm->cm_dmamap; in mfi_tbolt_init_MFI_queue()
360 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_tb_init); in mfi_tbolt_init_MFI_queue()
361 cm->cm_frame_busaddr = sc->mfi_tb_init_busaddr; in mfi_tbolt_init_MFI_queue()
362 cm->cm_dmamap = sc->mfi_tb_init_dmamap; in mfi_tbolt_init_MFI_queue()
363 cm->cm_frame->header.context = 0; in mfi_tbolt_init_MFI_queue()
369 mfi_init = &cm->cm_frame->init; in mfi_tbolt_init_MFI_queue()
371 mpi2IocInit = (struct MPI2_IOC_INIT_REQUEST *)sc->mfi_tb_ioc_init_desc; in mfi_tbolt_init_MFI_queue()
373 mpi2IocInit->Function = MPI2_FUNCTION_IOC_INIT; in mfi_tbolt_init_MFI_queue()
374 mpi2IocInit->WhoInit = MPI2_WHOINIT_HOST_DRIVER; in mfi_tbolt_init_MFI_queue()
377 mpi2IocInit->MsgVersion = MPI2_VERSION; in mfi_tbolt_init_MFI_queue()
378 mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION; in mfi_tbolt_init_MFI_queue()
379 mpi2IocInit->SystemRequestFrameSize = sc->raid_io_msg_size/4; in mfi_tbolt_init_MFI_queue()
380 mpi2IocInit->ReplyDescriptorPostQueueDepth in mfi_tbolt_init_MFI_queue()
381 = (uint16_t)sc->mfi_max_fw_cmds; in mfi_tbolt_init_MFI_queue()
382 mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */ in mfi_tbolt_init_MFI_queue()
385 offset = (uintptr_t) sc->reply_frame_pool_align in mfi_tbolt_init_MFI_queue()
386 - (uintptr_t)sc->request_message_pool; in mfi_tbolt_init_MFI_queue()
387 phyAddress = sc->mfi_tb_busaddr + offset; in mfi_tbolt_init_MFI_queue()
389 (MFI_ADDRESS *)&mpi2IocInit->ReplyDescriptorPostQueueAddress; in mfi_tbolt_init_MFI_queue()
390 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress; in mfi_tbolt_init_MFI_queue()
391 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32); in mfi_tbolt_init_MFI_queue()
394 offset = sc->request_message_pool_align - sc->request_message_pool; in mfi_tbolt_init_MFI_queue()
395 phyAddress = sc->mfi_tb_busaddr + offset; in mfi_tbolt_init_MFI_queue()
396 mfiAddressTemp = (MFI_ADDRESS *)&mpi2IocInit->SystemRequestFrameBaseAddress; in mfi_tbolt_init_MFI_queue()
397 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress; in mfi_tbolt_init_MFI_queue()
398 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32); in mfi_tbolt_init_MFI_queue()
399 mpi2IocInit->ReplyFreeQueueAddress = 0; /* Not supported by MR. */ in mfi_tbolt_init_MFI_queue()
400 mpi2IocInit->TimeStamp = time_uptime; in mfi_tbolt_init_MFI_queue()
402 if (sc->verbuf) { in mfi_tbolt_init_MFI_queue()
403 snprintf((char *)sc->verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n", in mfi_tbolt_init_MFI_queue()
405 mfi_init->driver_ver_lo = (uint32_t)sc->verbuf_h_busaddr; in mfi_tbolt_init_MFI_queue()
406 mfi_init->driver_ver_hi = in mfi_tbolt_init_MFI_queue()
407 (uint32_t)((uint64_t)sc->verbuf_h_busaddr >> 32); in mfi_tbolt_init_MFI_queue()
410 phyAddress = sc->mfi_tb_ioc_init_busaddr; in mfi_tbolt_init_MFI_queue()
411 mfi_init->qinfo_new_addr_lo = (uint32_t)phyAddress; in mfi_tbolt_init_MFI_queue()
412 mfi_init->qinfo_new_addr_hi = (uint32_t)((uint64_t)phyAddress >> 32); in mfi_tbolt_init_MFI_queue()
413 mfi_init->header.flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; in mfi_tbolt_init_MFI_queue()
415 mfi_init->header.cmd = MFI_CMD_INIT; in mfi_tbolt_init_MFI_queue()
416 mfi_init->header.data_len = sizeof(struct MPI2_IOC_INIT_REQUEST); in mfi_tbolt_init_MFI_queue()
417 mfi_init->header.cmd_status = MFI_STAT_INVALID_STATUS; in mfi_tbolt_init_MFI_queue()
419 cm->cm_data = NULL; in mfi_tbolt_init_MFI_queue()
420 cm->cm_flags |= MFI_CMD_POLLED; in mfi_tbolt_init_MFI_queue()
421 cm->cm_timestamp = time_uptime; in mfi_tbolt_init_MFI_queue()
423 device_printf(sc->mfi_dev, "failed to send IOC init2 " in mfi_tbolt_init_MFI_queue()
424 "command %d at %lx\n", error, (long)cm->cm_frame_busaddr); in mfi_tbolt_init_MFI_queue()
428 if (mfi_init->header.cmd_status == MFI_STAT_OK) { in mfi_tbolt_init_MFI_queue()
429 sc->MFA_enabled = 1; in mfi_tbolt_init_MFI_queue()
431 device_printf(sc->mfi_dev, "Init command Failed %#x\n", in mfi_tbolt_init_MFI_queue()
432 mfi_init->header.cmd_status); in mfi_tbolt_init_MFI_queue()
433 error = mfi_init->header.cmd_status; in mfi_tbolt_init_MFI_queue()
438 cm->cm_frame = cmd_tmp.cm_frame; in mfi_tbolt_init_MFI_queue()
439 cm->cm_frame_busaddr = cmd_tmp.cm_frame_busaddr; in mfi_tbolt_init_MFI_queue()
440 cm->cm_dmamap = cmd_tmp.cm_dmamap; in mfi_tbolt_init_MFI_queue()
456 * sc->mfi_cmd_pool_tbolt is an array of struct mfi_cmd_tbolt pointers. in mfi_tbolt_alloc_cmd()
460 sc->request_desc_pool = malloc(sizeof( in mfi_tbolt_alloc_cmd()
461 union mfi_mpi2_request_descriptor) * sc->mfi_max_fw_cmds, in mfi_tbolt_alloc_cmd()
464 if (sc->request_desc_pool == NULL) { in mfi_tbolt_alloc_cmd()
465 device_printf(sc->mfi_dev, "Could not alloc " in mfi_tbolt_alloc_cmd()
470 sc->mfi_cmd_pool_tbolt = malloc(sizeof(struct mfi_cmd_tbolt*) in mfi_tbolt_alloc_cmd()
471 * sc->mfi_max_fw_cmds, M_MFIBUF, M_NOWAIT|M_ZERO); in mfi_tbolt_alloc_cmd()
473 if (sc->mfi_cmd_pool_tbolt == NULL) { in mfi_tbolt_alloc_cmd()
474 free(sc->request_desc_pool, M_MFIBUF); in mfi_tbolt_alloc_cmd()
475 device_printf(sc->mfi_dev, "Could not alloc " in mfi_tbolt_alloc_cmd()
480 for (i = 0; i < sc->mfi_max_fw_cmds; i++) { in mfi_tbolt_alloc_cmd()
481 sc->mfi_cmd_pool_tbolt[i] = malloc(sizeof( in mfi_tbolt_alloc_cmd()
484 if (!sc->mfi_cmd_pool_tbolt[i]) { in mfi_tbolt_alloc_cmd()
485 device_printf(sc->mfi_dev, "Could not alloc " in mfi_tbolt_alloc_cmd()
489 free(sc->mfi_cmd_pool_tbolt[j], M_MFIBUF); in mfi_tbolt_alloc_cmd()
491 free(sc->request_desc_pool, M_MFIBUF); in mfi_tbolt_alloc_cmd()
492 sc->request_desc_pool = NULL; in mfi_tbolt_alloc_cmd()
493 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF); in mfi_tbolt_alloc_cmd()
494 sc->mfi_cmd_pool_tbolt = NULL; in mfi_tbolt_alloc_cmd()
504 io_req_base = sc->request_message_pool_align in mfi_tbolt_alloc_cmd()
506 io_req_base_phys = sc->request_msg_busaddr in mfi_tbolt_alloc_cmd()
510 * Add all the commands to command pool (instance->cmd_pool) in mfi_tbolt_alloc_cmd()
514 for (i = 0; i < sc->mfi_max_fw_cmds; i++) { in mfi_tbolt_alloc_cmd()
515 cmd = sc->mfi_cmd_pool_tbolt[i]; in mfi_tbolt_alloc_cmd()
517 cmd->index = i + 1; in mfi_tbolt_alloc_cmd()
518 cmd->request_desc = (union mfi_mpi2_request_descriptor *) in mfi_tbolt_alloc_cmd()
519 (sc->request_desc_pool + i); in mfi_tbolt_alloc_cmd()
520 cmd->io_request = (struct mfi_mpi2_request_raid_scsi_io *) in mfi_tbolt_alloc_cmd()
522 cmd->io_request_phys_addr = io_req_base_phys + offset; in mfi_tbolt_alloc_cmd()
523 cmd->sg_frame = (MPI2_SGE_IO_UNION *)(sc->reply_pool_limit in mfi_tbolt_alloc_cmd()
525 cmd->sg_frame_phys_addr = sc->sg_frame_busaddr + i in mfi_tbolt_alloc_cmd()
527 cmd->sync_cmd_idx = sc->mfi_max_fw_cmds; in mfi_tbolt_alloc_cmd()
529 TAILQ_INSERT_TAIL(&(sc->mfi_cmd_tbolt_tqh), cmd, next); in mfi_tbolt_alloc_cmd()
539 mtx_lock(&sc->mfi_io_lock); in mfi_tbolt_reset()
540 if (sc->hw_crit_error) { in mfi_tbolt_reset()
541 device_printf(sc->mfi_dev, "HW CRITICAL ERROR\n"); in mfi_tbolt_reset()
542 mtx_unlock(&sc->mfi_io_lock); in mfi_tbolt_reset()
546 if (sc->mfi_flags & MFI_FLAGS_TBOLT) { in mfi_tbolt_reset()
547 fw_state = sc->mfi_read_fw_status(sc); in mfi_tbolt_reset()
550 if ((sc->disableOnlineCtrlReset == 0) in mfi_tbolt_reset()
551 && (sc->adpreset == 0)) { in mfi_tbolt_reset()
552 device_printf(sc->mfi_dev, "Adapter RESET " in mfi_tbolt_reset()
554 sc->adpreset = 1; in mfi_tbolt_reset()
555 sc->issuepend_done = 0; in mfi_tbolt_reset()
556 sc->MFA_enabled = 0; in mfi_tbolt_reset()
557 sc->last_reply_idx = 0; in mfi_tbolt_reset()
560 mtx_unlock(&sc->mfi_io_lock); in mfi_tbolt_reset()
564 mtx_unlock(&sc->mfi_io_lock); in mfi_tbolt_reset()
569 * mfi_intr_tbolt - isr entry point
576 if (sc->mfi_check_clear_intr(sc) == 1) { in mfi_intr_tbolt()
579 if (sc->mfi_detaching) in mfi_intr_tbolt()
581 mtx_lock(&sc->mfi_io_lock); in mfi_intr_tbolt()
583 sc->mfi_flags &= ~MFI_FLAGS_QFRZN; in mfi_intr_tbolt()
585 mtx_unlock(&sc->mfi_io_lock); in mfi_intr_tbolt()
590 * map_cmd_status - Maps FW cmd status to OS cmd status
602 mfi_cmd->cm_frame->header.cmd_status = MFI_STAT_OK; in map_tbolt_cmd_status()
603 mfi_cmd->cm_frame->dcmd.header.cmd_status = MFI_STAT_OK; in map_tbolt_cmd_status()
604 mfi_cmd->cm_error = MFI_STAT_OK; in map_tbolt_cmd_status()
609 mfi_cmd->cm_frame->header.cmd_status = status; in map_tbolt_cmd_status()
610 mfi_cmd->cm_frame->header.scsi_status = ext_status; in map_tbolt_cmd_status()
611 mfi_cmd->cm_frame->dcmd.header.cmd_status = status; in map_tbolt_cmd_status()
612 mfi_cmd->cm_frame->dcmd.header.scsi_status in map_tbolt_cmd_status()
617 mfi_cmd->cm_frame->header.cmd_status = ext_status; in map_tbolt_cmd_status()
618 mfi_cmd->cm_frame->dcmd.header.cmd_status = ext_status; in map_tbolt_cmd_status()
623 mfi_cmd->cm_frame->header.cmd_status = status; in map_tbolt_cmd_status()
624 mfi_cmd->cm_frame->dcmd.header.cmd_status = status; in map_tbolt_cmd_status()
628 mfi_cmd->cm_frame->header.cmd_status = status; in map_tbolt_cmd_status()
629 mfi_cmd->cm_frame->dcmd.header.cmd_status = status; in map_tbolt_cmd_status()
635 * mfi_tbolt_return_cmd - Return a cmd to free command pool
644 mtx_assert(&sc->mfi_io_lock, MA_OWNED); in mfi_tbolt_return_cmd()
646 mfi_cmd->cm_flags &= ~MFI_CMD_TBOLT; in mfi_tbolt_return_cmd()
647 mfi_cmd->cm_extra_frames = 0; in mfi_tbolt_return_cmd()
648 tbolt_cmd->sync_cmd_idx = sc->mfi_max_fw_cmds; in mfi_tbolt_return_cmd()
650 TAILQ_INSERT_TAIL(&sc->mfi_cmd_tbolt_tqh, tbolt_cmd, next); in mfi_tbolt_return_cmd()
664 mtx_assert(&sc->mfi_io_lock, MA_OWNED); in mfi_tbolt_complete_cmd()
667 ((uintptr_t)sc->reply_frame_pool_align in mfi_tbolt_complete_cmd()
668 + sc->last_reply_idx * sc->reply_size); in mfi_tbolt_complete_cmd()
672 device_printf(sc->mfi_dev, "reply desc is NULL!!\n"); in mfi_tbolt_complete_cmd()
676 reply_descript_type = reply_desc->ReplyFlags in mfi_tbolt_complete_cmd()
682 val.word = ((union mfi_mpi2_reply_descriptor *)desc)->words; in mfi_tbolt_complete_cmd()
686 smid = reply_desc->SMID; in mfi_tbolt_complete_cmd()
687 if (smid == 0 || smid > sc->mfi_max_fw_cmds) { in mfi_tbolt_complete_cmd()
688 device_printf(sc->mfi_dev, "smid is %d cannot " in mfi_tbolt_complete_cmd()
689 "proceed - skipping\n", smid); in mfi_tbolt_complete_cmd()
692 cmd_tbolt = sc->mfi_cmd_pool_tbolt[smid - 1]; in mfi_tbolt_complete_cmd()
693 if (cmd_tbolt->sync_cmd_idx == sc->mfi_max_fw_cmds) { in mfi_tbolt_complete_cmd()
694 device_printf(sc->mfi_dev, "cmd_tbolt %p " in mfi_tbolt_complete_cmd()
695 "has invalid sync_cmd_idx=%d - skipping\n", in mfi_tbolt_complete_cmd()
696 cmd_tbolt, cmd_tbolt->sync_cmd_idx); in mfi_tbolt_complete_cmd()
699 cmd_mfi = &sc->mfi_commands[cmd_tbolt->sync_cmd_idx]; in mfi_tbolt_complete_cmd()
701 status = cmd_mfi->cm_frame->dcmd.header.cmd_status; in mfi_tbolt_complete_cmd()
702 extStatus = cmd_mfi->cm_frame->dcmd.header.scsi_status; in mfi_tbolt_complete_cmd()
706 if ((cmd_mfi->cm_flags & MFI_CMD_SCSI) != 0 && in mfi_tbolt_complete_cmd()
707 (cmd_mfi->cm_flags & MFI_CMD_POLLED) != 0) { in mfi_tbolt_complete_cmd()
710 cmd_mfi->cm_frame->header.cmd_status = MFI_STAT_OK; in mfi_tbolt_complete_cmd()
714 if ((cmd_mfi->cm_flags & MFI_ON_MFIQ_BUSY) != 0) in mfi_tbolt_complete_cmd()
722 sc->last_reply_idx++; in mfi_tbolt_complete_cmd()
723 if (sc->last_reply_idx >= sc->mfi_max_fw_cmds) { in mfi_tbolt_complete_cmd()
724 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx); in mfi_tbolt_complete_cmd()
725 sc->last_reply_idx = 0; in mfi_tbolt_complete_cmd()
729 ((union mfi_mpi2_reply_descriptor*)desc)->words = in mfi_tbolt_complete_cmd()
736 ((uintptr_t)sc->reply_frame_pool_align in mfi_tbolt_complete_cmd()
737 + sc->last_reply_idx * sc->reply_size); in mfi_tbolt_complete_cmd()
739 val.word = ((union mfi_mpi2_reply_descriptor*)desc)->words; in mfi_tbolt_complete_cmd()
740 reply_descript_type = reply_desc->ReplyFlags in mfi_tbolt_complete_cmd()
750 if (sc->last_reply_idx) in mfi_tbolt_complete_cmd()
751 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx); in mfi_tbolt_complete_cmd()
757 * mfi_get_cmd - Get a command from the free pool
768 mtx_assert(&sc->mfi_io_lock, MA_OWNED); in mfi_tbolt_get_cmd()
770 if ((cmd = TAILQ_FIRST(&sc->mfi_cmd_tbolt_tqh)) == NULL) in mfi_tbolt_get_cmd()
772 TAILQ_REMOVE(&sc->mfi_cmd_tbolt_tqh, cmd, next); in mfi_tbolt_get_cmd()
773 memset((uint8_t *)cmd->sg_frame, 0, MEGASAS_MAX_SZ_CHAIN_FRAME); in mfi_tbolt_get_cmd()
774 memset((uint8_t *)cmd->io_request, 0, in mfi_tbolt_get_cmd()
777 cmd->sync_cmd_idx = mfi_cmd->cm_index; in mfi_tbolt_get_cmd()
778 mfi_cmd->cm_extra_frames = cmd->index; /* Frame count used as SMID */ in mfi_tbolt_get_cmd()
779 mfi_cmd->cm_flags |= MFI_CMD_TBOLT; in mfi_tbolt_get_cmd()
789 if (index >= sc->mfi_max_fw_cmds) { in mfi_tbolt_get_request_descriptor()
790 device_printf(sc->mfi_dev, "Invalid SMID (0x%x)request " in mfi_tbolt_get_request_descriptor()
794 p = sc->request_desc_pool + sizeof(union mfi_mpi2_request_descriptor) in mfi_tbolt_get_request_descriptor()
811 io_req = cmd->io_request; in mfi_build_mpt_pass_thru()
812 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain; in mfi_build_mpt_pass_thru()
814 io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST; in mfi_build_mpt_pass_thru()
815 io_req->SGLOffset0 = offsetof(struct mfi_mpi2_request_raid_scsi_io, in mfi_build_mpt_pass_thru()
817 io_req->ChainOffset = sc->chain_offset_value_for_mpt_ptmsg; in mfi_build_mpt_pass_thru()
819 mpi25_ieee_chain->Address = mfi_cmd->cm_frame_busaddr; in mfi_build_mpt_pass_thru()
825 mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT in mfi_build_mpt_pass_thru()
829 mpi25_ieee_chain->Length = 1024; in mfi_build_mpt_pass_thru()
842 device_id = mfi_cmd->cm_frame->io.header.target_id; in mfi_tbolt_build_ldio()
843 io_request = cmd->io_request; in mfi_tbolt_build_ldio()
844 io_request->RaidContext.TargetID = device_id; in mfi_tbolt_build_ldio()
845 io_request->RaidContext.Status = 0; in mfi_tbolt_build_ldio()
846 io_request->RaidContext.exStatus = 0; in mfi_tbolt_build_ldio()
847 io_request->RaidContext.regLockFlags = 0; in mfi_tbolt_build_ldio()
849 start_lba_lo = mfi_cmd->cm_frame->io.lba_lo; in mfi_tbolt_build_ldio()
850 start_lba_hi = mfi_cmd->cm_frame->io.lba_hi; in mfi_tbolt_build_ldio()
854 io_info.numBlocks = mfi_cmd->cm_frame->io.header.data_len; in mfi_tbolt_build_ldio()
856 if ((mfi_cmd->cm_frame->header.flags & MFI_FRAME_DIR_READ) == in mfi_tbolt_build_ldio()
860 io_request->RaidContext.timeoutValue in mfi_tbolt_build_ldio()
862 io_request->Function = MPI2_FUNCTION_LD_IO_REQUEST; in mfi_tbolt_build_ldio()
863 io_request->DevHandle = device_id; in mfi_tbolt_build_ldio()
864 cmd->request_desc->header.RequestFlags in mfi_tbolt_build_ldio()
867 if ((io_request->IoFlags == 6) && (io_info.numBlocks == 0)) in mfi_tbolt_build_ldio()
868 io_request->RaidContext.RegLockLength = 0x100; in mfi_tbolt_build_ldio()
869 io_request->DataLength = mfi_cmd->cm_frame->io.header.data_len in mfi_tbolt_build_ldio()
883 io_request = cmd->io_request; in mfi_tbolt_build_io()
884 if (!(mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_READ in mfi_tbolt_build_io()
885 || mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) in mfi_tbolt_build_io()
891 bzero(io_request->CDB.CDB32, sizeof(io_request->CDB.CDB32)); in mfi_tbolt_build_io()
892 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE) in mfi_tbolt_build_io()
897 lba = mfi_cmd->cm_frame->io.lba_hi; in mfi_tbolt_build_io()
898 lba = (lba << 32) + mfi_cmd->cm_frame->io.lba_lo; in mfi_tbolt_build_io()
900 mfi_cmd->cm_frame->io.header.data_len, io_request->CDB.CDB32); in mfi_tbolt_build_io()
903 io_request->IoFlags = cdb_len; in mfi_tbolt_build_io()
909 (pMpi25IeeeSgeChain64_t) &io_request->SGL, cmd); in mfi_tbolt_build_io()
910 if (sge_count > sc->mfi_max_sge) { in mfi_tbolt_build_io()
911 device_printf(sc->mfi_dev, "Error. sge_count (0x%x) exceeds " in mfi_tbolt_build_io()
912 "max (0x%x) allowed\n", sge_count, sc->mfi_max_sge); in mfi_tbolt_build_io()
915 io_request->RaidContext.numSGE = sge_count; in mfi_tbolt_build_io()
916 io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING; in mfi_tbolt_build_io()
918 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE) in mfi_tbolt_build_io()
919 io_request->Control = MPI2_SCSIIO_CONTROL_WRITE; in mfi_tbolt_build_io()
921 io_request->Control = MPI2_SCSIIO_CONTROL_READ; in mfi_tbolt_build_io()
923 io_request->SGLOffset0 = offsetof( in mfi_tbolt_build_io()
926 io_request->SenseBufferLowAddress = mfi_cmd->cm_sense_busaddr; in mfi_tbolt_build_io()
927 io_request->SenseBufferLength = MFI_SENSE_LEN; in mfi_tbolt_build_io()
928 io_request->RaidContext.Status = MFI_STAT_INVALID_STATUS; in mfi_tbolt_build_io()
929 io_request->RaidContext.exStatus = MFI_STAT_INVALID_STATUS; in mfi_tbolt_build_io()
946 if (!mfi_cmd->cm_sg || !mfi_cmd->cm_len) { in mfi_tbolt_make_sgl()
947 device_printf(sc->mfi_dev, "Buffer empty \n"); in mfi_tbolt_make_sgl()
950 os_sgl = mfi_cmd->cm_sg; in mfi_tbolt_make_sgl()
951 sge_count = mfi_cmd->cm_frame->header.sg_count; in mfi_tbolt_make_sgl()
953 if (sge_count > sc->mfi_max_sge) { in mfi_tbolt_make_sgl()
954 device_printf(sc->mfi_dev, "sgl ptr %p sg_cnt %d \n", in mfi_tbolt_make_sgl()
959 if (sge_count > sc->max_SGEs_in_main_message) in mfi_tbolt_make_sgl()
961 sge_idx = sc->max_SGEs_in_main_message - 1; in mfi_tbolt_make_sgl()
965 if (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY)) { in mfi_tbolt_make_sgl()
966 sgl_end = sgl_ptr + (sc->max_SGEs_in_main_message - 1); in mfi_tbolt_make_sgl()
967 sgl_end->Flags = 0; in mfi_tbolt_make_sgl()
976 if (sc->mfi_flags & MFI_FLAGS_SKINNY) { in mfi_tbolt_make_sgl()
977 sgl_ptr->Length = os_sgl->sg_skinny[i].len; in mfi_tbolt_make_sgl()
978 sgl_ptr->Address = os_sgl->sg_skinny[i].addr; in mfi_tbolt_make_sgl()
980 sgl_ptr->Length = os_sgl->sg32[i].len; in mfi_tbolt_make_sgl()
981 sgl_ptr->Address = os_sgl->sg32[i].addr; in mfi_tbolt_make_sgl()
983 if (i == sge_count - 1 && in mfi_tbolt_make_sgl()
984 (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY))) in mfi_tbolt_make_sgl()
985 sgl_ptr->Flags = MPI25_IEEE_SGE_FLAGS_END_OF_LIST; in mfi_tbolt_make_sgl()
987 sgl_ptr->Flags = 0; in mfi_tbolt_make_sgl()
989 cmd->io_request->ChainOffset = 0; in mfi_tbolt_make_sgl()
997 cmd->io_request->ChainOffset = in mfi_tbolt_make_sgl()
998 sc->chain_offset_value_for_main_message; in mfi_tbolt_make_sgl()
1001 sg_chain->NextChainOffset = 0; in mfi_tbolt_make_sgl()
1002 if (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY)) in mfi_tbolt_make_sgl()
1003 sg_chain->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT; in mfi_tbolt_make_sgl()
1005 sg_chain->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | in mfi_tbolt_make_sgl()
1007 sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) * in mfi_tbolt_make_sgl()
1008 (sge_count - sg_processed)); in mfi_tbolt_make_sgl()
1009 sg_chain->Address = cmd->sg_frame_phys_addr; in mfi_tbolt_make_sgl()
1010 sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->sg_frame; in mfi_tbolt_make_sgl()
1012 if (sc->mfi_flags & MFI_FLAGS_SKINNY) { in mfi_tbolt_make_sgl()
1013 sgl_ptr->Length = os_sgl->sg_skinny[i].len; in mfi_tbolt_make_sgl()
1014 sgl_ptr->Address = os_sgl->sg_skinny[i].addr; in mfi_tbolt_make_sgl()
1016 sgl_ptr->Length = os_sgl->sg32[i].len; in mfi_tbolt_make_sgl()
1017 sgl_ptr->Address = os_sgl->sg32[i].addr; in mfi_tbolt_make_sgl()
1019 if (i == sge_count - 1 && in mfi_tbolt_make_sgl()
1020 (sc->mfi_flags & in mfi_tbolt_make_sgl()
1022 sgl_ptr->Flags = in mfi_tbolt_make_sgl()
1025 sgl_ptr->Flags = 0; in mfi_tbolt_make_sgl()
1042 index = cmd->index; in mfi_build_and_issue_cmd()
1043 req_desc = mfi_tbolt_get_request_descriptor(sc, index-1); in mfi_build_and_issue_cmd()
1053 req_desc->header.SMID = index; in mfi_build_and_issue_cmd()
1063 device_printf(sc->mfi_dev, "Couldn't build MFI pass thru " in mfi_tbolt_build_mpt_cmd()
1068 index = cmd->cm_extra_frames; in mfi_tbolt_build_mpt_cmd()
1070 req_desc = mfi_tbolt_get_request_descriptor(sc, index - 1); in mfi_tbolt_build_mpt_cmd()
1075 req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << in mfi_tbolt_build_mpt_cmd()
1077 req_desc->header.SMID = index; in mfi_tbolt_build_mpt_cmd()
1088 hdr = &cm->cm_frame->header; in mfi_tbolt_send_frame()
1089 if (sc->adpreset) in mfi_tbolt_send_frame()
1091 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) { in mfi_tbolt_send_frame()
1092 cm->cm_timestamp = time_uptime; in mfi_tbolt_send_frame()
1095 hdr->cmd_status = MFI_STAT_INVALID_STATUS; in mfi_tbolt_send_frame()
1096 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; in mfi_tbolt_send_frame()
1099 if (hdr->cmd == MFI_CMD_PD_SCSI_IO) { in mfi_tbolt_send_frame()
1103 device_printf(sc->mfi_dev, "Mapping from MFI " in mfi_tbolt_send_frame()
1107 } else if (hdr->cmd == MFI_CMD_LD_SCSI_IO || in mfi_tbolt_send_frame()
1108 hdr->cmd == MFI_CMD_LD_READ || hdr->cmd == MFI_CMD_LD_WRITE) { in mfi_tbolt_send_frame()
1109 cm->cm_flags |= MFI_CMD_SCSI; in mfi_tbolt_send_frame()
1111 device_printf(sc->mfi_dev, "LDIO Failed \n"); in mfi_tbolt_send_frame()
1115 device_printf(sc->mfi_dev, "Mapping from MFI to MPT Failed\n"); in mfi_tbolt_send_frame()
1119 if (cm->cm_flags & MFI_CMD_SCSI) { in mfi_tbolt_send_frame()
1125 hdr->flags &= ~MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; in mfi_tbolt_send_frame()
1128 MFI_WRITE4(sc, MFI_ILQP, (req_desc->words & 0xFFFFFFFF)); in mfi_tbolt_send_frame()
1129 MFI_WRITE4(sc, MFI_IHQP, (req_desc->words >>0x20)); in mfi_tbolt_send_frame()
1131 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) in mfi_tbolt_send_frame()
1135 * This is a polled command, so busy-wait for it to complete. in mfi_tbolt_send_frame()
1137 * The value of hdr->cmd_status is updated directly by the hardware in mfi_tbolt_send_frame()
1141 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) { in mfi_tbolt_send_frame()
1143 tm -= 1; in mfi_tbolt_send_frame()
1146 if (cm->cm_flags & MFI_CMD_SCSI) { in mfi_tbolt_send_frame()
1158 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) { in mfi_tbolt_send_frame()
1159 device_printf(sc->mfi_dev, "Frame %p timed out " in mfi_tbolt_send_frame()
1160 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode); in mfi_tbolt_send_frame()
1172 mtx_assert(&sc->mfi_io_lock, MA_OWNED); in mfi_issue_pending_cmds_again()
1173 TAILQ_FOREACH_REVERSE_SAFE(cm, &sc->mfi_busy, BUSYQ, cm_link, tmp) { in mfi_issue_pending_cmds_again()
1174 cm->retry_for_fw_reset++; in mfi_issue_pending_cmds_again()
1181 if (cm->retry_for_fw_reset == 3) { in mfi_issue_pending_cmds_again()
1182 device_printf(sc->mfi_dev, "megaraid_sas: command %p " in mfi_issue_pending_cmds_again()
1184 "reset - Shutting down the HBA\n", cm, cm->cm_index); in mfi_issue_pending_cmds_again()
1186 sc->hw_crit_error = 1; in mfi_issue_pending_cmds_again()
1191 if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) { in mfi_issue_pending_cmds_again()
1192 if (cm->cm_extra_frames != 0 && cm->cm_extra_frames <= in mfi_issue_pending_cmds_again()
1193 sc->mfi_max_fw_cmds) { in mfi_issue_pending_cmds_again()
1194 cmd = sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1]; in mfi_issue_pending_cmds_again()
1197 device_printf(sc->mfi_dev, in mfi_issue_pending_cmds_again()
1199 cm->cm_extra_frames); in mfi_issue_pending_cmds_again()
1203 if (cm->cm_frame->dcmd.opcode != MFI_DCMD_CTRL_EVENT_WAIT) { in mfi_issue_pending_cmds_again()
1204 device_printf(sc->mfi_dev, in mfi_issue_pending_cmds_again()
1206 cm, cm->cm_index); in mfi_issue_pending_cmds_again()
1217 if (sc->mfi_flags & MFI_FLAGS_TBOLT) in mfi_kill_hba()
1229 if (sc->adpreset == 1) { in mfi_process_fw_state_chg_isr()
1230 device_printf(sc->mfi_dev, "First stage of FW reset " in mfi_process_fw_state_chg_isr()
1233 sc->mfi_adp_reset(sc); in mfi_process_fw_state_chg_isr()
1234 sc->mfi_enable_intr(sc); in mfi_process_fw_state_chg_isr()
1236 device_printf(sc->mfi_dev, "First stage of reset complete, " in mfi_process_fw_state_chg_isr()
1239 sc->adpreset = 2; in mfi_process_fw_state_chg_isr()
1244 device_printf(sc->mfi_dev, "Second stage of FW reset " in mfi_process_fw_state_chg_isr()
1248 sc->mfi_disable_intr(sc); in mfi_process_fw_state_chg_isr()
1252 device_printf(sc->mfi_dev, "controller is not in " in mfi_process_fw_state_chg_isr()
1255 sc->hw_crit_error = 1; in mfi_process_fw_state_chg_isr()
1259 device_printf(sc->mfi_dev, "Failed to initialise MFI " in mfi_process_fw_state_chg_isr()
1262 sc->hw_crit_error = 1; in mfi_process_fw_state_chg_isr()
1267 MFI_WRITE4(sc, MFI_RFPI, sc->mfi_max_fw_cmds - 1); in mfi_process_fw_state_chg_isr()
1268 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx); in mfi_process_fw_state_chg_isr()
1270 sc->mfi_enable_intr(sc); in mfi_process_fw_state_chg_isr()
1271 sc->adpreset = 0; in mfi_process_fw_state_chg_isr()
1272 if (sc->mfi_aen_cm != NULL) { in mfi_process_fw_state_chg_isr()
1273 free(sc->mfi_aen_cm->cm_data, M_MFIBUF); in mfi_process_fw_state_chg_isr()
1274 mfi_remove_busy(sc->mfi_aen_cm); in mfi_process_fw_state_chg_isr()
1275 mfi_release_command(sc->mfi_aen_cm); in mfi_process_fw_state_chg_isr()
1276 sc->mfi_aen_cm = NULL; in mfi_process_fw_state_chg_isr()
1279 if (sc->mfi_map_sync_cm != NULL) { in mfi_process_fw_state_chg_isr()
1280 mfi_remove_busy(sc->mfi_map_sync_cm); in mfi_process_fw_state_chg_isr()
1281 mfi_release_command(sc->mfi_map_sync_cm); in mfi_process_fw_state_chg_isr()
1282 sc->mfi_map_sync_cm = NULL; in mfi_process_fw_state_chg_isr()
1288 * dead because of too many re-tries. Check for that in mfi_process_fw_state_chg_isr()
1291 if (!sc->hw_crit_error) { in mfi_process_fw_state_chg_isr()
1296 mfi_aen_setup(sc, sc->last_seq_num); in mfi_process_fw_state_chg_isr()
1299 sc->issuepend_done = 1; in mfi_process_fw_state_chg_isr()
1300 device_printf(sc->mfi_dev, "second stage of reset " in mfi_process_fw_state_chg_isr()
1303 device_printf(sc->mfi_dev, "second stage of reset " in mfi_process_fw_state_chg_isr()
1307 device_printf(sc->mfi_dev, "mfi_process_fw_state_chg_isr " in mfi_process_fw_state_chg_isr()
1308 "called with unhandled value:%d\n", sc->adpreset); in mfi_process_fw_state_chg_isr()
1322 * AEN like command is used to inform the RAID firmware to "sync"
1324 * command in write mode will return when the RAID firmware has
1331 * If this is not done right the RAID firmware will not remove a
1357 mtx_assert(&sc->mfi_io_lock, MA_OWNED); in mfi_tbolt_sync_map_info()
1359 if (sc->mfi_map_sync_cm != NULL || sc->cm_map_abort) in mfi_tbolt_sync_map_info()
1367 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAIN; in mfi_tbolt_sync_map_info()
1370 device_printf(sc->mfi_dev, "Failed to get device listing\n"); in mfi_tbolt_sync_map_info()
1374 hdr = &cm->cm_frame->header; in mfi_tbolt_sync_map_info()
1375 if (hdr->cmd_status != MFI_STAT_OK) { in mfi_tbolt_sync_map_info()
1376 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n", in mfi_tbolt_sync_map_info()
1377 hdr->cmd_status); in mfi_tbolt_sync_map_info()
1381 ld_size = sizeof(*ld_sync) * list->ld_count; in mfi_tbolt_sync_map_info()
1385 device_printf(sc->mfi_dev, "Failed to allocate sync\n"); in mfi_tbolt_sync_map_info()
1388 for (i = 0; i < list->ld_count; i++) in mfi_tbolt_sync_map_info()
1389 ld_sync[i].ref = list->ld_list[i].ld.ref; in mfi_tbolt_sync_map_info()
1392 device_printf(sc->mfi_dev, "Failed to get command\n"); in mfi_tbolt_sync_map_info()
1397 context = cmd->cm_frame->header.context; in mfi_tbolt_sync_map_info()
1398 bzero(cmd->cm_frame, sizeof(union mfi_frame)); in mfi_tbolt_sync_map_info()
1399 cmd->cm_frame->header.context = context; in mfi_tbolt_sync_map_info()
1401 dcmd = &cmd->cm_frame->dcmd; in mfi_tbolt_sync_map_info()
1402 bzero(dcmd->mbox, MFI_MBOX_SIZE); in mfi_tbolt_sync_map_info()
1403 dcmd->header.cmd = MFI_CMD_DCMD; in mfi_tbolt_sync_map_info()
1404 dcmd->header.flags = MFI_FRAME_DIR_WRITE; in mfi_tbolt_sync_map_info()
1405 dcmd->header.timeout = 0; in mfi_tbolt_sync_map_info()
1406 dcmd->header.data_len = ld_size; in mfi_tbolt_sync_map_info()
1407 dcmd->header.scsi_status = 0; in mfi_tbolt_sync_map_info()
1408 dcmd->opcode = MFI_DCMD_LD_MAP_GET_INFO; in mfi_tbolt_sync_map_info()
1409 cmd->cm_sg = &dcmd->sgl; in mfi_tbolt_sync_map_info()
1410 cmd->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; in mfi_tbolt_sync_map_info()
1411 cmd->cm_data = ld_sync; in mfi_tbolt_sync_map_info()
1412 cmd->cm_private = ld_sync; in mfi_tbolt_sync_map_info()
1414 cmd->cm_len = ld_size; in mfi_tbolt_sync_map_info()
1415 cmd->cm_complete = mfi_sync_map_complete; in mfi_tbolt_sync_map_info()
1416 sc->mfi_map_sync_cm = cmd; in mfi_tbolt_sync_map_info()
1418 cmd->cm_flags = MFI_CMD_DATAOUT; in mfi_tbolt_sync_map_info()
1419 cmd->cm_frame->dcmd.mbox[0] = list->ld_count; in mfi_tbolt_sync_map_info()
1420 cmd->cm_frame->dcmd.mbox[1] = MFI_DCMD_MBOX_PEND_FLAG; in mfi_tbolt_sync_map_info()
1423 device_printf(sc->mfi_dev, "failed to send map sync\n"); in mfi_tbolt_sync_map_info()
1425 sc->mfi_map_sync_cm = NULL; in mfi_tbolt_sync_map_info()
1444 sc = cm->cm_sc; in mfi_sync_map_complete()
1445 mtx_assert(&sc->mfi_io_lock, MA_OWNED); in mfi_sync_map_complete()
1447 hdr = &cm->cm_frame->header; in mfi_sync_map_complete()
1449 if (sc->mfi_map_sync_cm == NULL) in mfi_sync_map_complete()
1452 if (sc->cm_map_abort || in mfi_sync_map_complete()
1453 hdr->cmd_status == MFI_STAT_INVALID_STATUS) { in mfi_sync_map_complete()
1454 sc->cm_map_abort = 0; in mfi_sync_map_complete()
1458 free(cm->cm_data, M_MFIBUF); in mfi_sync_map_complete()
1459 wakeup(&sc->mfi_map_sync_cm); in mfi_sync_map_complete()
1460 sc->mfi_map_sync_cm = NULL; in mfi_sync_map_complete()
1471 mtx_assert(&sc->mfi_io_lock, MA_OWNED); in mfi_queue_map_sync()
1472 taskqueue_enqueue(taskqueue_swi, &sc->mfi_map_sync_task); in mfi_queue_map_sync()
1481 mtx_lock(&sc->mfi_io_lock); in mfi_handle_map_sync()
1483 mtx_unlock(&sc->mfi_io_lock); in mfi_handle_map_sync()