1 /* 2 * Copyright (c) 2014, LSI Corp. 3 * All rights reserved. 4 * Author: Marian Choy 5 * Support: freebsdraid@lsi.com 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of the <ORGANIZATION> nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 * 34 * The views and conclusions contained in the software and documentation 35 * are those of the authors and should not be interpreted as representing 36 * official policies,either expressed or implied, of the FreeBSD Project. 37 * 38 * Send feedback to: <megaraidfbsd@lsi.com> 39 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 40 * ATTN: MegaRaid FreeBSD 41 * 42 * $FreeBSD: head/sys/dev/mrsas/mrsas.c 265555 2014-05-07 16:16:49Z ambrisko $ 43 */ 44 45 #include <dev/raid/mrsas/mrsas.h> 46 #include <dev/raid/mrsas/mrsas_ioctl.h> 47 48 #include <bus/cam/cam.h> 49 #include <bus/cam/cam_ccb.h> 50 51 #include <sys/sysctl.h> 52 #include <sys/types.h> 53 #include <sys/kthread.h> 54 #include <sys/taskqueue.h> 55 #include <sys/device.h> 56 #include <sys/spinlock2.h> 57 58 59 /* 60 * Function prototypes 61 */ 62 static d_open_t mrsas_open; 63 static d_close_t mrsas_close; 64 static d_read_t mrsas_read; 65 static d_write_t mrsas_write; 66 static d_ioctl_t mrsas_ioctl; 67 68 static struct mrsas_ident *mrsas_find_ident(device_t); 69 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode); 70 static void mrsas_flush_cache(struct mrsas_softc *sc); 71 static void mrsas_reset_reply_desc(struct mrsas_softc *sc); 72 static void mrsas_ocr_thread(void *arg); 73 static int mrsas_get_map_info(struct mrsas_softc *sc); 74 static int mrsas_get_ld_map_info(struct mrsas_softc *sc); 75 static int mrsas_sync_map_info(struct mrsas_softc *sc); 76 static int mrsas_get_pd_list(struct mrsas_softc *sc); 77 static int mrsas_get_ld_list(struct mrsas_softc *sc); 78 static int mrsas_setup_irq(struct mrsas_softc *sc); 79 static int mrsas_alloc_mem(struct mrsas_softc *sc); 80 static int mrsas_init_fw(struct mrsas_softc *sc); 81 static int mrsas_setup_raidmap(struct mrsas_softc *sc); 82 static int mrsas_complete_cmd(struct mrsas_softc *sc); 83 static int mrsas_clear_intr(struct mrsas_softc *sc); 84 static int mrsas_get_ctrl_info(struct mrsas_softc *sc, 85 struct mrsas_ctrl_info *ctrl_info); 86 static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, 87 struct mrsas_mfi_cmd *cmd_to_abort); 88 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset); 89 u_int8_t mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, 90 struct mrsas_mfi_cmd *mfi_cmd); 91 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr); 92 int mrsas_init_adapter(struct mrsas_softc *sc); 93 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc); 94 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc); 95 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc); 96 int mrsas_ioc_init(struct mrsas_softc *sc); 97 int mrsas_bus_scan(struct mrsas_softc *sc); 98 int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 99 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 100 int mrsas_reset_ctrl(struct mrsas_softc *sc); 101 int mrsas_wait_for_outstanding(struct mrsas_softc *sc); 102 int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, 103 struct mrsas_mfi_cmd *cmd); 104 int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd, 105 int size); 106 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd); 107 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 108 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 109 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 110 void mrsas_disable_intr(struct mrsas_softc *sc); 111 void mrsas_enable_intr(struct mrsas_softc *sc); 112 void mrsas_free_ioc_cmd(struct mrsas_softc *sc); 113 void mrsas_free_mem(struct mrsas_softc *sc); 114 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp); 115 void mrsas_isr(void *arg); 116 void mrsas_teardown_intr(struct mrsas_softc *sc); 117 void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error); 118 void mrsas_kill_hba (struct mrsas_softc *sc); 119 void mrsas_aen_handler(struct mrsas_softc *sc); 120 void mrsas_write_reg(struct mrsas_softc *sc, int offset, 121 u_int32_t value); 122 void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 123 u_int32_t req_desc_hi); 124 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc); 125 void mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, 126 struct mrsas_mfi_cmd *cmd, u_int8_t status); 127 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, 128 u_int8_t extStatus); 129 struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc); 130 MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_build_mpt_cmd(struct mrsas_softc *sc, 131 struct mrsas_mfi_cmd *cmd); 132 133 extern int mrsas_cam_attach(struct mrsas_softc *sc); 134 extern void mrsas_cam_detach(struct mrsas_softc *sc); 135 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); 136 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 137 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc); 138 extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd); 139 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc); 140 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg); 141 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc); 142 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_FW_RAID_MAP_ALL *map); 143 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map); 144 extern void mrsas_xpt_freeze(struct mrsas_softc *sc); 145 extern void mrsas_xpt_release(struct mrsas_softc *sc); 146 extern MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_get_request_desc(struct mrsas_softc *sc, 147 u_int16_t index); 148 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim); 149 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc); 150 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc); 151 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters"); 152 153 154 /** 155 * PCI device struct and table 156 * 157 */ 158 typedef struct mrsas_ident { 159 uint16_t vendor; 160 uint16_t device; 161 uint16_t subvendor; 162 uint16_t subdevice; 163 const char *desc; 164 } MRSAS_CTLR_ID; 165 166 MRSAS_CTLR_ID device_table[] = { 167 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "LSI Thunderbolt SAS Controller"}, 168 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "LSI Invader SAS Controller"}, 169 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "LSI Fury SAS Controller"}, 170 {0, 0, 0, 0, NULL} 171 }; 172 173 /** 174 * Character device entry points 175 * 176 */ 177 static struct dev_ops mrsas_ops = { 178 { "mrsas", 0, 0 }, 179 .d_open = mrsas_open, 180 .d_close = mrsas_close, 181 .d_read = mrsas_read, 182 .d_write = mrsas_write, 183 .d_ioctl = mrsas_ioctl, 184 }; 185 186 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver"); 187 188 static int mrsas_msi_enable = 1; 189 TUNABLE_INT("hw.mrsas.msi.enable", &mrsas_msi_enable); 190 191 /** 192 * In the cdevsw routines, we find our softc by using the si_drv1 member 193 * of struct cdev. We set this variable to point to our softc in our 194 * attach routine when we create the /dev entry. 195 */ 196 int 197 mrsas_open(struct dev_open_args *ap) 198 { 199 cdev_t dev = ap->a_head.a_dev; 200 struct mrsas_softc *sc; 201 202 sc = dev->si_drv1; 203 return (0); 204 } 205 206 int 207 mrsas_close(struct dev_close_args *ap) 208 { 209 cdev_t dev = ap->a_head.a_dev; 210 struct mrsas_softc *sc; 211 212 sc = dev->si_drv1; 213 return (0); 214 } 215 216 int 217 mrsas_read(struct dev_read_args *ap) 218 { 219 cdev_t dev = ap->a_head.a_dev; 220 struct mrsas_softc *sc; 221 222 sc = dev->si_drv1; 223 return (0); 224 } 225 int 226 mrsas_write(struct dev_write_args *ap) 227 { 228 cdev_t dev = ap->a_head.a_dev; 229 struct mrsas_softc *sc; 230 231 sc = dev->si_drv1; 232 return (0); 233 } 234 235 /** 236 * Register Read/Write Functions 237 * 238 */ 239 void 240 mrsas_write_reg(struct mrsas_softc *sc, int offset, 241 u_int32_t value) 242 { 243 bus_space_tag_t bus_tag = sc->bus_tag; 244 bus_space_handle_t bus_handle = sc->bus_handle; 245 246 bus_space_write_4(bus_tag, bus_handle, offset, value); 247 } 248 249 u_int32_t 250 mrsas_read_reg(struct mrsas_softc *sc, int offset) 251 { 252 bus_space_tag_t bus_tag = sc->bus_tag; 253 bus_space_handle_t bus_handle = sc->bus_handle; 254 255 return((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset)); 256 } 257 258 259 /** 260 * Interrupt Disable/Enable/Clear Functions 261 * 262 */ 263 void mrsas_disable_intr(struct mrsas_softc *sc) 264 { 265 u_int32_t mask = 0xFFFFFFFF; 266 u_int32_t status; 267 268 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask); 269 /* Dummy read to force pci flush */ 270 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); 271 } 272 273 void mrsas_enable_intr(struct mrsas_softc *sc) 274 { 275 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK; 276 u_int32_t status; 277 278 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0); 279 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 280 281 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask); 282 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); 283 } 284 285 static int mrsas_clear_intr(struct mrsas_softc *sc) 286 { 287 u_int32_t status, fw_status, fw_state; 288 289 /* Read received interrupt */ 290 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 291 292 /* If FW state change interrupt is received, write to it again to clear */ 293 if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) { 294 fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 295 outbound_scratch_pad)); 296 fw_state = fw_status & MFI_STATE_MASK; 297 if (fw_state == MFI_STATE_FAULT) { 298 device_printf(sc->mrsas_dev, "FW is in FAULT state!\n"); 299 if(sc->ocr_thread_active) 300 wakeup(&sc->ocr_chan); 301 } 302 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status); 303 mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 304 return(1); 305 } 306 307 /* Not our interrupt, so just return */ 308 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) 309 return(0); 310 311 /* We got a reply interrupt */ 312 return(1); 313 } 314 315 /** 316 * PCI Support Functions 317 * 318 */ 319 static struct mrsas_ident * mrsas_find_ident(device_t dev) 320 { 321 struct mrsas_ident *pci_device; 322 323 for (pci_device=device_table; pci_device->vendor != 0; pci_device++) 324 { 325 if ((pci_device->vendor == pci_get_vendor(dev)) && 326 (pci_device->device == pci_get_device(dev)) && 327 ((pci_device->subvendor == pci_get_subvendor(dev)) || 328 (pci_device->subvendor == 0xffff)) && 329 ((pci_device->subdevice == pci_get_subdevice(dev)) || 330 (pci_device->subdevice == 0xffff))) 331 return (pci_device); 332 } 333 return (NULL); 334 } 335 336 static int mrsas_probe(device_t dev) 337 { 338 static u_int8_t first_ctrl = 1; 339 struct mrsas_ident *id; 340 341 if ((id = mrsas_find_ident(dev)) != NULL) { 342 if (first_ctrl) { 343 kprintf("LSI MegaRAID SAS FreeBSD mrsas driver version: %s\n", MRSAS_VERSION); 344 first_ctrl = 0; 345 } 346 device_set_desc(dev, id->desc); 347 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */ 348 return (-30); 349 } 350 return (ENXIO); 351 } 352 353 /** 354 * mrsas_setup_sysctl: setup sysctl values for mrsas 355 * input: Adapter instance soft state 356 * 357 * Setup sysctl entries for mrsas driver. 358 */ 359 static void 360 mrsas_setup_sysctl(struct mrsas_softc *sc) 361 { 362 struct sysctl_ctx_list *sysctl_ctx = NULL; 363 struct sysctl_oid *sysctl_tree = NULL; 364 char tmpstr[80], tmpstr2[80]; 365 366 /* 367 * Setup the sysctl variable so the user can change the debug level 368 * on the fly. 369 */ 370 ksnprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d", 371 device_get_unit(sc->mrsas_dev)); 372 ksnprintf(tmpstr2, sizeof(tmpstr2), "mrsas%d", device_get_unit(sc->mrsas_dev)); 373 374 #if 0 375 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev); 376 if (sysctl_ctx != NULL) 377 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev); 378 379 if (sysctl_tree == NULL) { 380 #endif 381 sysctl_ctx_init(&sc->sysctl_ctx); 382 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 383 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, tmpstr2, 384 CTLFLAG_RD, 0, tmpstr); 385 if (sc->sysctl_tree == NULL) 386 return; 387 sysctl_ctx = &sc->sysctl_ctx; 388 sysctl_tree = sc->sysctl_tree; 389 #if 0 390 } 391 #endif 392 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 393 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0, 394 "Disable the use of OCR"); 395 396 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 397 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION, 398 strlen(MRSAS_VERSION), "driver version"); 399 400 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 401 OID_AUTO, "reset_count", CTLFLAG_RD, 402 &sc->reset_count, 0, "number of ocr from start of the day"); 403 404 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 405 OID_AUTO, "fw_outstanding", CTLFLAG_RD, 406 &sc->fw_outstanding, 0, "FW outstanding commands"); 407 408 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 409 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, 410 &sc->io_cmds_highwater, 0, "Max FW outstanding commands"); 411 412 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 413 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0, 414 "Driver debug level"); 415 416 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 417 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout, 418 0, "Driver IO timeout value in mili-second."); 419 420 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 421 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW, 422 &sc->mrsas_fw_fault_check_delay, 423 0, "FW fault check thread delay in seconds. <default is 1 sec>"); 424 425 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 426 OID_AUTO, "reset_in_progress", CTLFLAG_RD, 427 &sc->reset_in_progress, 0, "ocr in progress status"); 428 429 } 430 431 /** 432 * mrsas_get_tunables: get tunable parameters. 433 * input: Adapter instance soft state 434 * 435 * Get tunable parameters. This will help to debug driver at boot time. 436 */ 437 static void 438 mrsas_get_tunables(struct mrsas_softc *sc) 439 { 440 char tmpstr[80]; 441 442 /* XXX default to some debugging for now */ 443 sc->mrsas_debug = MRSAS_FAULT; 444 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT; 445 sc->mrsas_fw_fault_check_delay = 1; 446 sc->reset_count = 0; 447 sc->reset_in_progress = 0; 448 449 /* 450 * Grab the global variables. 451 */ 452 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug); 453 454 /* Grab the unit-instance variables */ 455 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level", 456 device_get_unit(sc->mrsas_dev)); 457 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug); 458 } 459 460 /** 461 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information. 462 * Used to get sequence number at driver load time. 463 * input: Adapter soft state 464 * 465 * Allocates DMAable memory for the event log info internal command. 466 */ 467 int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc) 468 { 469 int el_info_size; 470 471 /* Allocate get event log info command */ 472 el_info_size = sizeof(struct mrsas_evt_log_info); 473 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent 474 1, 0, // algnmnt, boundary 475 BUS_SPACE_MAXADDR_32BIT,// lowaddr 476 BUS_SPACE_MAXADDR, // highaddr 477 NULL, NULL, // filter, filterarg 478 el_info_size, // maxsize 479 1, // msegments 480 el_info_size, // maxsegsize 481 BUS_DMA_ALLOCNOW, // flags 482 &sc->el_info_tag)) { 483 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n"); 484 return (ENOMEM); 485 } 486 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem, 487 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) { 488 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n"); 489 return (ENOMEM); 490 } 491 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap, 492 sc->el_info_mem, el_info_size, mrsas_addr_cb, 493 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) { 494 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n"); 495 return (ENOMEM); 496 } 497 498 memset(sc->el_info_mem, 0, el_info_size); 499 return (0); 500 } 501 502 /** 503 * mrsas_free_evt_info_cmd: Free memory for Event log info command 504 * input: Adapter soft state 505 * 506 * Deallocates memory for the event log info internal command. 507 */ 508 void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc) 509 { 510 if (sc->el_info_phys_addr) 511 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap); 512 if (sc->el_info_mem != NULL) 513 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap); 514 if (sc->el_info_tag != NULL) 515 bus_dma_tag_destroy(sc->el_info_tag); 516 } 517 518 /** 519 * mrsas_get_seq_num: Get latest event sequence number 520 * @sc: Adapter soft state 521 * @eli: Firmware event log sequence number information. 522 * Firmware maintains a log of all events in a non-volatile area. 523 * Driver get the sequence number using DCMD 524 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time. 525 */ 526 527 static int 528 mrsas_get_seq_num(struct mrsas_softc *sc, 529 struct mrsas_evt_log_info *eli) 530 { 531 struct mrsas_mfi_cmd *cmd; 532 struct mrsas_dcmd_frame *dcmd; 533 534 cmd = mrsas_get_mfi_cmd(sc); 535 536 if (!cmd) { 537 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); 538 return -ENOMEM; 539 } 540 541 dcmd = &cmd->frame->dcmd; 542 543 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) { 544 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n"); 545 mrsas_release_mfi_cmd(cmd); 546 return -ENOMEM; 547 } 548 549 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 550 551 dcmd->cmd = MFI_CMD_DCMD; 552 dcmd->cmd_status = 0x0; 553 dcmd->sge_count = 1; 554 dcmd->flags = MFI_FRAME_DIR_READ; 555 dcmd->timeout = 0; 556 dcmd->pad_0 = 0; 557 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info); 558 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; 559 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr; 560 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info); 561 562 mrsas_issue_blocked_cmd(sc, cmd); 563 564 /* 565 * Copy the data back into callers buffer 566 */ 567 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info)); 568 mrsas_free_evt_log_info_cmd(sc); 569 mrsas_release_mfi_cmd(cmd); 570 571 return 0; 572 } 573 574 575 /** 576 * mrsas_register_aen: Register for asynchronous event notification 577 * @sc: Adapter soft state 578 * @seq_num: Starting sequence number 579 * @class_locale: Class of the event 580 * This function subscribes for events beyond the @seq_num 581 * and type @class_locale. 582 * 583 * */ 584 static int 585 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num, 586 u_int32_t class_locale_word) 587 { 588 int ret_val; 589 struct mrsas_mfi_cmd *cmd; 590 struct mrsas_dcmd_frame *dcmd; 591 union mrsas_evt_class_locale curr_aen; 592 union mrsas_evt_class_locale prev_aen; 593 594 /* 595 * If there an AEN pending already (aen_cmd), check if the 596 * class_locale of that pending AEN is inclusive of the new 597 * AEN request we currently have. If it is, then we don't have 598 * to do anything. In other words, whichever events the current 599 * AEN request is subscribing to, have already been subscribed 600 * to. 601 * If the old_cmd is _not_ inclusive, then we have to abort 602 * that command, form a class_locale that is superset of both 603 * old and current and re-issue to the FW 604 * */ 605 606 curr_aen.word = class_locale_word; 607 608 if (sc->aen_cmd) { 609 610 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1]; 611 612 /* 613 * A class whose enum value is smaller is inclusive of all 614 * higher values. If a PROGRESS (= -1) was previously 615 * registered, then a new registration requests for higher 616 * classes need not be sent to FW. They are automatically 617 * included. 618 * Locale numbers don't have such hierarchy. They are bitmap values 619 */ 620 if ((prev_aen.members.class <= curr_aen.members.class) && 621 !((prev_aen.members.locale & curr_aen.members.locale) ^ 622 curr_aen.members.locale)) { 623 /* 624 * Previously issued event registration includes 625 * current request. Nothing to do. 626 */ 627 return 0; 628 } else { 629 curr_aen.members.locale |= prev_aen.members.locale; 630 631 if (prev_aen.members.class < curr_aen.members.class) 632 curr_aen.members.class = prev_aen.members.class; 633 634 sc->aen_cmd->abort_aen = 1; 635 ret_val = mrsas_issue_blocked_abort_cmd(sc, 636 sc->aen_cmd); 637 638 if (ret_val) { 639 kprintf("mrsas: Failed to abort " 640 "previous AEN command\n"); 641 return ret_val; 642 } 643 } 644 } 645 646 cmd = mrsas_get_mfi_cmd(sc); 647 648 if (!cmd) 649 return -ENOMEM; 650 651 dcmd = &cmd->frame->dcmd; 652 653 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail)); 654 655 /* 656 * Prepare DCMD for aen registration 657 */ 658 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 659 660 dcmd->cmd = MFI_CMD_DCMD; 661 dcmd->cmd_status = 0x0; 662 dcmd->sge_count = 1; 663 dcmd->flags = MFI_FRAME_DIR_READ; 664 dcmd->timeout = 0; 665 dcmd->pad_0 = 0; 666 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail); 667 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; 668 dcmd->mbox.w[0] = seq_num; 669 sc->last_seq_num = seq_num; 670 dcmd->mbox.w[1] = curr_aen.word; 671 dcmd->sgl.sge32[0].phys_addr = (u_int32_t) sc->evt_detail_phys_addr; 672 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail); 673 674 if (sc->aen_cmd != NULL) { 675 mrsas_release_mfi_cmd(cmd); 676 return 0; 677 } 678 679 /* 680 * Store reference to the cmd used to register for AEN. When an 681 * application wants us to register for AEN, we have to abort this 682 * cmd and re-register with a new EVENT LOCALE supplied by that app 683 */ 684 sc->aen_cmd = cmd; 685 686 /* 687 Issue the aen registration frame 688 */ 689 if (mrsas_issue_dcmd(sc, cmd)){ 690 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n"); 691 return(1); 692 } 693 694 return 0; 695 } 696 /** 697 * mrsas_start_aen - Subscribes to AEN during driver load time 698 * @instance: Adapter soft state 699 */ 700 static int mrsas_start_aen(struct mrsas_softc *sc) 701 { 702 struct mrsas_evt_log_info eli; 703 union mrsas_evt_class_locale class_locale; 704 705 706 /* Get the latest sequence number from FW*/ 707 708 memset(&eli, 0, sizeof(eli)); 709 710 if (mrsas_get_seq_num(sc, &eli)) 711 return -1; 712 713 /* Register AEN with FW for latest sequence number plus 1*/ 714 class_locale.members.reserved = 0; 715 class_locale.members.locale = MR_EVT_LOCALE_ALL; 716 class_locale.members.class = MR_EVT_CLASS_DEBUG; 717 718 return mrsas_register_aen(sc, eli.newest_seq_num + 1, 719 class_locale.word); 720 } 721 722 /** 723 * mrsas_attach: PCI entry point 724 * input: device struct pointer 725 * 726 * Performs setup of PCI and registers, initializes mutexes and 727 * linked lists, registers interrupts and CAM, and initializes 728 * the adapter/controller to its proper state. 729 */ 730 static int mrsas_attach(device_t dev) 731 { 732 struct mrsas_softc *sc = device_get_softc(dev); 733 uint32_t cmd, bar, error; 734 735 /* Look up our softc and initialize its fields. */ 736 sc->mrsas_dev = dev; 737 sc->device_id = pci_get_device(dev); 738 739 mrsas_get_tunables(sc); 740 741 /* 742 * Set up PCI and registers 743 */ 744 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 745 if ( (cmd & PCIM_CMD_PORTEN) == 0) { 746 return (ENXIO); 747 } 748 /* Force the busmaster enable bit on. */ 749 cmd |= PCIM_CMD_BUSMASTEREN; 750 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 751 752 //bar = pci_read_config(dev, MRSAS_PCI_BAR0, 4); 753 bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4); 754 755 sc->reg_res_id = MRSAS_PCI_BAR1; /* BAR1 offset */ 756 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, 757 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE)) 758 == NULL) { 759 device_printf(dev, "Cannot allocate PCI registers\n"); 760 goto attach_fail; 761 } 762 sc->bus_tag = rman_get_bustag(sc->reg_res); 763 sc->bus_handle = rman_get_bushandle(sc->reg_res); 764 765 /* Intialize mutexes */ 766 lockinit(&sc->sim_lock, "mrsas_sim_lock", 0, LK_CANRECURSE); 767 lockinit(&sc->pci_lock, "mrsas_pci_lock", 0, LK_CANRECURSE); 768 lockinit(&sc->io_lock, "mrsas_io_lock", 0, LK_CANRECURSE); 769 lockinit(&sc->aen_lock, "mrsas_aen_lock", 0, LK_CANRECURSE); 770 spin_init(&sc->ioctl_lock); 771 lockinit(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", 0, 772 LK_CANRECURSE); 773 lockinit(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", 0, 774 LK_CANRECURSE); 775 lockinit(&sc->raidmap_lock, "mrsas_raidmap_lock", 0, LK_CANRECURSE); 776 777 /* Intialize linked list */ 778 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head); 779 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head); 780 781 atomic_set(&sc->fw_outstanding,0); 782 783 sc->io_cmds_highwater = 0; 784 785 /* Create a /dev entry for this device. */ 786 sc->mrsas_cdev = make_dev(&mrsas_ops, device_get_unit(dev), UID_ROOT, 787 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u", 788 device_get_unit(dev)); 789 if (sc->mrsas_cdev) 790 sc->mrsas_cdev->si_drv1 = sc; 791 792 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 793 sc->UnevenSpanSupport = 0; 794 795 /* Initialize Firmware */ 796 if (mrsas_init_fw(sc) != SUCCESS) { 797 goto attach_fail_fw; 798 } 799 800 /* Register SCSI mid-layer */ 801 if ((mrsas_cam_attach(sc) != SUCCESS)) { 802 goto attach_fail_cam; 803 } 804 805 /* Register IRQs */ 806 if (mrsas_setup_irq(sc) != SUCCESS) { 807 goto attach_fail_irq; 808 } 809 810 /* Enable Interrupts */ 811 mrsas_enable_intr(sc); 812 813 error = kthread_create(mrsas_ocr_thread, sc, &sc->ocr_thread, "mrsas_ocr%d", 814 device_get_unit(sc->mrsas_dev)); 815 if (error) { 816 kprintf("Error %d starting rescan thread\n", error); 817 goto attach_fail_irq; 818 } 819 820 mrsas_setup_sysctl(sc); 821 822 /* Initiate AEN (Asynchronous Event Notification)*/ 823 824 if (mrsas_start_aen(sc)) { 825 kprintf("Error: start aen failed\n"); 826 goto fail_start_aen; 827 } 828 829 return (0); 830 831 fail_start_aen: 832 attach_fail_irq: 833 mrsas_teardown_intr(sc); 834 attach_fail_cam: 835 mrsas_cam_detach(sc); 836 attach_fail_fw: 837 //attach_fail_raidmap: 838 mrsas_free_mem(sc); 839 lockuninit(&sc->sim_lock); 840 lockuninit(&sc->aen_lock); 841 lockuninit(&sc->pci_lock); 842 lockuninit(&sc->io_lock); 843 spin_uninit(&sc->ioctl_lock); 844 lockuninit(&sc->mpt_cmd_pool_lock); 845 lockuninit(&sc->mfi_cmd_pool_lock); 846 lockuninit(&sc->raidmap_lock); 847 attach_fail: 848 destroy_dev(sc->mrsas_cdev); 849 if (sc->reg_res){ 850 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY, 851 sc->reg_res_id, sc->reg_res); 852 } 853 return (ENXIO); 854 } 855 856 /** 857 * mrsas_detach: De-allocates and teardown resources 858 * input: device struct pointer 859 * 860 * This function is the entry point for device disconnect and detach. It 861 * performs memory de-allocations, shutdown of the controller and various 862 * teardown and destroy resource functions. 863 */ 864 static int mrsas_detach(device_t dev) 865 { 866 struct mrsas_softc *sc; 867 int i = 0; 868 869 sc = device_get_softc(dev); 870 sc->remove_in_progress = 1; 871 if(sc->ocr_thread_active) 872 wakeup(&sc->ocr_chan); 873 while(sc->reset_in_progress){ 874 i++; 875 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 876 mrsas_dprint(sc, MRSAS_INFO, 877 "[%2d]waiting for ocr to be finished\n",i); 878 } 879 tsleep(mrsas_detach, 0, "mr_shutdown", hz); 880 } 881 i = 0; 882 while(sc->ocr_thread_active){ 883 i++; 884 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 885 mrsas_dprint(sc, MRSAS_INFO, 886 "[%2d]waiting for " 887 "mrsas_ocr thread to quit ocr %d\n",i, 888 sc->ocr_thread_active); 889 } 890 tsleep(mrsas_detach, 0, "mr_shutdown", hz); 891 } 892 mrsas_flush_cache(sc); 893 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN); 894 mrsas_disable_intr(sc); 895 mrsas_cam_detach(sc); 896 mrsas_teardown_intr(sc); 897 mrsas_free_mem(sc); 898 lockuninit(&sc->sim_lock); 899 lockuninit(&sc->aen_lock); 900 lockuninit(&sc->pci_lock); 901 lockuninit(&sc->io_lock); 902 spin_uninit(&sc->ioctl_lock); 903 lockuninit(&sc->mpt_cmd_pool_lock); 904 lockuninit(&sc->mfi_cmd_pool_lock); 905 lockuninit(&sc->raidmap_lock); 906 if (sc->reg_res){ 907 bus_release_resource(sc->mrsas_dev, 908 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res); 909 } 910 destroy_dev(sc->mrsas_cdev); 911 if (sc->sysctl_tree != NULL) 912 sysctl_ctx_free(&sc->sysctl_ctx); 913 return (0); 914 } 915 916 /** 917 * mrsas_free_mem: Frees allocated memory 918 * input: Adapter instance soft state 919 * 920 * This function is called from mrsas_detach() to free previously allocated 921 * memory. 922 */ 923 void mrsas_free_mem(struct mrsas_softc *sc) 924 { 925 int i; 926 u_int32_t max_cmd; 927 struct mrsas_mfi_cmd *mfi_cmd; 928 struct mrsas_mpt_cmd *mpt_cmd; 929 930 /* 931 * Free RAID map memory 932 */ 933 for (i=0; i < 2; i++) 934 { 935 if (sc->raidmap_phys_addr[i]) 936 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]); 937 if (sc->raidmap_mem[i] != NULL) 938 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]); 939 if (sc->raidmap_tag[i] != NULL) 940 bus_dma_tag_destroy(sc->raidmap_tag[i]); 941 } 942 943 /* 944 * Free version buffer memroy 945 */ 946 if (sc->verbuf_phys_addr) 947 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap); 948 if (sc->verbuf_mem != NULL) 949 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap); 950 if (sc->verbuf_tag != NULL) 951 bus_dma_tag_destroy(sc->verbuf_tag); 952 953 954 /* 955 * Free sense buffer memory 956 */ 957 if (sc->sense_phys_addr) 958 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap); 959 if (sc->sense_mem != NULL) 960 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap); 961 if (sc->sense_tag != NULL) 962 bus_dma_tag_destroy(sc->sense_tag); 963 964 /* 965 * Free chain frame memory 966 */ 967 if (sc->chain_frame_phys_addr) 968 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap); 969 if (sc->chain_frame_mem != NULL) 970 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap); 971 if (sc->chain_frame_tag != NULL) 972 bus_dma_tag_destroy(sc->chain_frame_tag); 973 974 /* 975 * Free IO Request memory 976 */ 977 if (sc->io_request_phys_addr) 978 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap); 979 if (sc->io_request_mem != NULL) 980 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap); 981 if (sc->io_request_tag != NULL) 982 bus_dma_tag_destroy(sc->io_request_tag); 983 984 /* 985 * Free Reply Descriptor memory 986 */ 987 if (sc->reply_desc_phys_addr) 988 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap); 989 if (sc->reply_desc_mem != NULL) 990 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap); 991 if (sc->reply_desc_tag != NULL) 992 bus_dma_tag_destroy(sc->reply_desc_tag); 993 994 /* 995 * Free event detail memory 996 */ 997 if (sc->evt_detail_phys_addr) 998 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap); 999 if (sc->evt_detail_mem != NULL) 1000 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap); 1001 if (sc->evt_detail_tag != NULL) 1002 bus_dma_tag_destroy(sc->evt_detail_tag); 1003 1004 /* 1005 * Free MFI frames 1006 */ 1007 if (sc->mfi_cmd_list) { 1008 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { 1009 mfi_cmd = sc->mfi_cmd_list[i]; 1010 mrsas_free_frame(sc, mfi_cmd); 1011 } 1012 } 1013 if (sc->mficmd_frame_tag != NULL) 1014 bus_dma_tag_destroy(sc->mficmd_frame_tag); 1015 1016 /* 1017 * Free MPT internal command list 1018 */ 1019 max_cmd = sc->max_fw_cmds; 1020 if (sc->mpt_cmd_list) { 1021 for (i = 0; i < max_cmd; i++) { 1022 mpt_cmd = sc->mpt_cmd_list[i]; 1023 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap); 1024 kfree(sc->mpt_cmd_list[i], M_MRSAS); 1025 } 1026 kfree(sc->mpt_cmd_list, M_MRSAS); 1027 sc->mpt_cmd_list = NULL; 1028 } 1029 1030 /* 1031 * Free MFI internal command list 1032 */ 1033 1034 if (sc->mfi_cmd_list) { 1035 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { 1036 kfree(sc->mfi_cmd_list[i], M_MRSAS); 1037 } 1038 kfree(sc->mfi_cmd_list, M_MRSAS); 1039 sc->mfi_cmd_list = NULL; 1040 } 1041 1042 /* 1043 * Free request descriptor memory 1044 */ 1045 kfree(sc->req_desc, M_MRSAS); 1046 sc->req_desc = NULL; 1047 1048 /* 1049 * Destroy parent tag 1050 */ 1051 if (sc->mrsas_parent_tag != NULL) 1052 bus_dma_tag_destroy(sc->mrsas_parent_tag); 1053 } 1054 1055 /** 1056 * mrsas_teardown_intr: Teardown interrupt 1057 * input: Adapter instance soft state 1058 * 1059 * This function is called from mrsas_detach() to teardown and release 1060 * bus interrupt resourse. 1061 */ 1062 void mrsas_teardown_intr(struct mrsas_softc *sc) 1063 { 1064 if (sc->intr_handle) 1065 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq, sc->intr_handle); 1066 if (sc->mrsas_irq != NULL) 1067 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, sc->irq_id, sc->mrsas_irq); 1068 if (sc->irq_type == PCI_INTR_TYPE_MSI) 1069 pci_release_msi(sc->mrsas_dev); 1070 sc->intr_handle = NULL; 1071 } 1072 1073 /** 1074 * mrsas_suspend: Suspend entry point 1075 * input: Device struct pointer 1076 * 1077 * This function is the entry point for system suspend from the OS. 1078 */ 1079 static int mrsas_suspend(device_t dev) 1080 { 1081 struct mrsas_softc *sc; 1082 1083 sc = device_get_softc(dev); 1084 return (0); 1085 } 1086 1087 /** 1088 * mrsas_resume: Resume entry point 1089 * input: Device struct pointer 1090 * 1091 * This function is the entry point for system resume from the OS. 1092 */ 1093 static int mrsas_resume(device_t dev) 1094 { 1095 struct mrsas_softc *sc; 1096 1097 sc = device_get_softc(dev); 1098 return (0); 1099 } 1100 1101 /** 1102 * mrsas_ioctl: IOCtl commands entry point. 1103 * 1104 * This function is the entry point for IOCtls from the OS. It calls the 1105 * appropriate function for processing depending on the command received. 1106 */ 1107 static int 1108 mrsas_ioctl(struct dev_ioctl_args *ap) 1109 { 1110 cdev_t dev = ap->a_head.a_dev; 1111 u_long cmd = ap->a_cmd; 1112 caddr_t arg = ap->a_data; 1113 struct mrsas_softc *sc; 1114 int ret = 0, i = 0; 1115 1116 sc = (struct mrsas_softc *)(dev->si_drv1); 1117 1118 if (sc->remove_in_progress) { 1119 mrsas_dprint(sc, MRSAS_INFO, 1120 "Driver remove or shutdown called.\n"); 1121 return ENOENT; 1122 } 1123 1124 spin_lock(&sc->ioctl_lock); 1125 if (!sc->reset_in_progress) { 1126 spin_unlock(&sc->ioctl_lock); 1127 goto do_ioctl; 1128 } 1129 1130 /* Release ioclt_lock, and wait for OCR 1131 * to be finished */ 1132 spin_unlock(&sc->ioctl_lock); 1133 while(sc->reset_in_progress){ 1134 i++; 1135 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1136 mrsas_dprint(sc, MRSAS_INFO, 1137 "[%2d]waiting for " 1138 "OCR to be finished %d\n",i, 1139 sc->ocr_thread_active); 1140 } 1141 tsleep(mrsas_ioctl, 0, "mr_ioctl", hz); 1142 } 1143 1144 do_ioctl: 1145 switch (cmd) { 1146 case MRSAS_IOC_FIRMWARE_PASS_THROUGH: 1147 ret = mrsas_passthru(sc, (void *)arg); 1148 break; 1149 case MRSAS_IOC_SCAN_BUS: 1150 ret = mrsas_bus_scan(sc); 1151 break; 1152 } 1153 1154 return (ret); 1155 } 1156 1157 /** 1158 * mrsas_setup_irq: Set up interrupt. 1159 * input: Adapter instance soft state 1160 * 1161 * This function sets up interrupts as a bus resource, with flags indicating 1162 * resource permitting contemporaneous sharing and for resource to activate 1163 * atomically. 1164 */ 1165 static int mrsas_setup_irq(struct mrsas_softc *sc) 1166 { 1167 u_int irq_flags; 1168 1169 sc->irq_id = 0; 1170 sc->irq_type = pci_alloc_1intr(sc->mrsas_dev, mrsas_msi_enable, 1171 &sc->irq_id, &irq_flags); 1172 1173 sc->mrsas_irq = bus_alloc_resource_any(sc->mrsas_dev, SYS_RES_IRQ, 1174 &sc->irq_id, irq_flags); 1175 if (sc->mrsas_irq == NULL){ 1176 device_printf(sc->mrsas_dev, "Cannot allocate interrupt\n"); 1177 return (FAIL); 1178 } 1179 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq, INTR_MPSAFE, 1180 mrsas_isr, sc, &sc->intr_handle, NULL)) { 1181 device_printf(sc->mrsas_dev, "Cannot set up interrupt\n"); 1182 return (FAIL); 1183 } 1184 1185 return (0); 1186 } 1187 1188 /* 1189 * mrsas_isr: ISR entry point 1190 * input: argument pointer 1191 * 1192 * This function is the interrupt service routine entry point. There 1193 * are two types of interrupts, state change interrupt and response 1194 * interrupt. If an interrupt is not ours, we just return. 1195 */ 1196 void mrsas_isr(void *arg) 1197 { 1198 struct mrsas_softc *sc = (struct mrsas_softc *)arg; 1199 int status; 1200 1201 /* Clear FW state change interrupt */ 1202 status = mrsas_clear_intr(sc); 1203 1204 /* Not our interrupt */ 1205 if (!status) 1206 return; 1207 1208 /* If we are resetting, bail */ 1209 if (test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) { 1210 kprintf(" Entered into ISR when OCR is going active. \n"); 1211 mrsas_clear_intr(sc); 1212 return; 1213 } 1214 /* Process for reply request and clear response interrupt */ 1215 if (mrsas_complete_cmd(sc) != SUCCESS) 1216 mrsas_clear_intr(sc); 1217 1218 return; 1219 } 1220 1221 /* 1222 * mrsas_complete_cmd: Process reply request 1223 * input: Adapter instance soft state 1224 * 1225 * This function is called from mrsas_isr() to process reply request and 1226 * clear response interrupt. Processing of the reply request entails 1227 * walking through the reply descriptor array for the command request 1228 * pended from Firmware. We look at the Function field to determine 1229 * the command type and perform the appropriate action. Before we 1230 * return, we clear the response interrupt. 1231 */ 1232 static int mrsas_complete_cmd(struct mrsas_softc *sc) 1233 { 1234 Mpi2ReplyDescriptorsUnion_t *desc; 1235 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; 1236 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req; 1237 struct mrsas_mpt_cmd *cmd_mpt; 1238 struct mrsas_mfi_cmd *cmd_mfi; 1239 u_int8_t arm, reply_descript_type; 1240 u_int16_t smid, num_completed; 1241 u_int8_t status, extStatus; 1242 union desc_value desc_val; 1243 PLD_LOAD_BALANCE_INFO lbinfo; 1244 u_int32_t device_id; 1245 int threshold_reply_count = 0; 1246 1247 1248 /* If we have a hardware error, not need to continue */ 1249 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 1250 return (DONE); 1251 1252 desc = sc->reply_desc_mem; 1253 desc += sc->last_reply_idx; 1254 1255 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; 1256 1257 desc_val.word = desc->Words; 1258 num_completed = 0; 1259 1260 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1261 1262 /* Find our reply descriptor for the command and process */ 1263 while((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) 1264 { 1265 smid = reply_desc->SMID; 1266 cmd_mpt = sc->mpt_cmd_list[smid -1]; 1267 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *)cmd_mpt->io_request; 1268 1269 status = scsi_io_req->RaidContext.status; 1270 extStatus = scsi_io_req->RaidContext.exStatus; 1271 1272 switch (scsi_io_req->Function) 1273 { 1274 case MPI2_FUNCTION_SCSI_IO_REQUEST : /*Fast Path IO.*/ 1275 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id; 1276 lbinfo = &sc->load_balance_info[device_id]; 1277 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) { 1278 arm = lbinfo->raid1DevHandle[0] == scsi_io_req->DevHandle ? 0 : 1; 1279 atomic_dec(&lbinfo->scsi_pending_cmds[arm]); 1280 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG; 1281 } 1282 //Fall thru and complete IO 1283 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST: 1284 mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus); 1285 mrsas_cmd_done(sc, cmd_mpt); 1286 scsi_io_req->RaidContext.status = 0; 1287 scsi_io_req->RaidContext.exStatus = 0; 1288 atomic_dec(&sc->fw_outstanding); 1289 break; 1290 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */ 1291 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; 1292 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status); 1293 cmd_mpt->flags = 0; 1294 mrsas_release_mpt_cmd(cmd_mpt); 1295 break; 1296 } 1297 1298 sc->last_reply_idx++; 1299 if (sc->last_reply_idx >= sc->reply_q_depth) 1300 sc->last_reply_idx = 0; 1301 1302 desc->Words = ~((uint64_t)0x00); /* set it back to all 0xFFFFFFFFs */ 1303 num_completed++; 1304 threshold_reply_count++; 1305 1306 /* Get the next reply descriptor */ 1307 if (!sc->last_reply_idx) 1308 desc = sc->reply_desc_mem; 1309 else 1310 desc++; 1311 1312 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; 1313 desc_val.word = desc->Words; 1314 1315 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1316 1317 if(reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 1318 break; 1319 1320 /* 1321 * Write to reply post index after completing threshold reply count 1322 * and still there are more replies in reply queue pending to be 1323 * completed. 1324 */ 1325 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) { 1326 mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index), 1327 sc->last_reply_idx); 1328 threshold_reply_count = 0; 1329 } 1330 } 1331 1332 /* No match, just return */ 1333 if (num_completed == 0) 1334 return (DONE); 1335 1336 /* Clear response interrupt */ 1337 mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),sc->last_reply_idx); 1338 1339 return(0); 1340 } 1341 1342 /* 1343 * mrsas_map_mpt_cmd_status: Allocate DMAable memory. 1344 * input: Adapter instance soft state 1345 * 1346 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO. 1347 * It checks the command status and maps the appropriate CAM status for the CCB. 1348 */ 1349 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus) 1350 { 1351 struct mrsas_softc *sc = cmd->sc; 1352 u_int8_t *sense_data; 1353 1354 switch (status) { 1355 case MFI_STAT_OK: 1356 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP; 1357 break; 1358 case MFI_STAT_SCSI_IO_FAILED: 1359 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1360 cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1361 sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data; 1362 if (sense_data) { 1363 /* For now just copy 18 bytes back */ 1364 memcpy(sense_data, cmd->sense, 18); 1365 cmd->ccb_ptr->csio.sense_len = 18; 1366 cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID; 1367 } 1368 break; 1369 case MFI_STAT_LD_OFFLINE: 1370 case MFI_STAT_DEVICE_NOT_FOUND: 1371 if (cmd->ccb_ptr->ccb_h.target_lun) 1372 cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID; 1373 else 1374 cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE; 1375 break; 1376 case MFI_STAT_CONFIG_SEQ_MISMATCH: 1377 /*send status to CAM layer to retry sending command without 1378 * decrementing retry counter*/ 1379 cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ; 1380 break; 1381 default: 1382 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status); 1383 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR; 1384 cmd->ccb_ptr->csio.scsi_status = status; 1385 } 1386 return; 1387 } 1388 1389 /* 1390 * mrsas_alloc_mem: Allocate DMAable memory. 1391 * input: Adapter instance soft state 1392 * 1393 * This function creates the parent DMA tag and allocates DMAable memory. 1394 * DMA tag describes constraints of DMA mapping. Memory allocated is mapped 1395 * into Kernel virtual address. Callback argument is physical memory address. 1396 */ 1397 static int mrsas_alloc_mem(struct mrsas_softc *sc) 1398 { 1399 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, 1400 chain_frame_size, evt_detail_size; 1401 1402 /* 1403 * Allocate parent DMA tag 1404 */ 1405 if (bus_dma_tag_create(NULL, /* parent */ 1406 1, /* alignment */ 1407 0, /* boundary */ 1408 BUS_SPACE_MAXADDR, /* lowaddr */ 1409 BUS_SPACE_MAXADDR, /* highaddr */ 1410 NULL, NULL, /* filter, filterarg */ 1411 MRSAS_MAX_IO_SIZE,/* maxsize */ 1412 MRSAS_MAX_SGL, /* nsegments */ 1413 MRSAS_MAX_IO_SIZE,/* maxsegsize */ 1414 0, /* flags */ 1415 &sc->mrsas_parent_tag /* tag */ 1416 )) { 1417 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n"); 1418 return(ENOMEM); 1419 } 1420 1421 /* 1422 * Allocate for version buffer 1423 */ 1424 verbuf_size = MRSAS_MAX_NAME_LENGTH*(sizeof(bus_addr_t)); 1425 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent 1426 1, 0, // algnmnt, boundary 1427 BUS_SPACE_MAXADDR_32BIT,// lowaddr 1428 BUS_SPACE_MAXADDR, // highaddr 1429 NULL, NULL, // filter, filterarg 1430 verbuf_size, // maxsize 1431 1, // msegments 1432 verbuf_size, // maxsegsize 1433 BUS_DMA_ALLOCNOW, // flags 1434 &sc->verbuf_tag)) { 1435 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n"); 1436 return (ENOMEM); 1437 } 1438 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem, 1439 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) { 1440 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n"); 1441 return (ENOMEM); 1442 } 1443 bzero(sc->verbuf_mem, verbuf_size); 1444 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem, 1445 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, BUS_DMA_NOWAIT)){ 1446 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n"); 1447 return(ENOMEM); 1448 } 1449 1450 /* 1451 * Allocate IO Request Frames 1452 */ 1453 io_req_size = sc->io_frames_alloc_sz; 1454 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent 1455 16, 0, // algnmnt, boundary 1456 BUS_SPACE_MAXADDR_32BIT,// lowaddr 1457 BUS_SPACE_MAXADDR, // highaddr 1458 NULL, NULL, // filter, filterarg 1459 io_req_size, // maxsize 1460 1, // msegments 1461 io_req_size, // maxsegsize 1462 BUS_DMA_ALLOCNOW, // flags 1463 &sc->io_request_tag)) { 1464 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n"); 1465 return (ENOMEM); 1466 } 1467 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem, 1468 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) { 1469 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n"); 1470 return (ENOMEM); 1471 } 1472 bzero(sc->io_request_mem, io_req_size); 1473 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap, 1474 sc->io_request_mem, io_req_size, mrsas_addr_cb, 1475 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) { 1476 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n"); 1477 return (ENOMEM); 1478 } 1479 1480 /* 1481 * Allocate Chain Frames 1482 */ 1483 chain_frame_size = sc->chain_frames_alloc_sz; 1484 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent 1485 4, 0, // algnmnt, boundary 1486 BUS_SPACE_MAXADDR_32BIT,// lowaddr 1487 BUS_SPACE_MAXADDR, // highaddr 1488 NULL, NULL, // filter, filterarg 1489 chain_frame_size, // maxsize 1490 1, // msegments 1491 chain_frame_size, // maxsegsize 1492 BUS_DMA_ALLOCNOW, // flags 1493 &sc->chain_frame_tag)) { 1494 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n"); 1495 return (ENOMEM); 1496 } 1497 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem, 1498 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) { 1499 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n"); 1500 return (ENOMEM); 1501 } 1502 bzero(sc->chain_frame_mem, chain_frame_size); 1503 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap, 1504 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb, 1505 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) { 1506 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n"); 1507 return (ENOMEM); 1508 } 1509 1510 /* 1511 * Allocate Reply Descriptor Array 1512 */ 1513 reply_desc_size = sc->reply_alloc_sz; 1514 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent 1515 16, 0, // algnmnt, boundary 1516 BUS_SPACE_MAXADDR_32BIT,// lowaddr 1517 BUS_SPACE_MAXADDR, // highaddr 1518 NULL, NULL, // filter, filterarg 1519 reply_desc_size, // maxsize 1520 1, // msegments 1521 reply_desc_size, // maxsegsize 1522 BUS_DMA_ALLOCNOW, // flags 1523 &sc->reply_desc_tag)) { 1524 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n"); 1525 return (ENOMEM); 1526 } 1527 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem, 1528 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) { 1529 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n"); 1530 return (ENOMEM); 1531 } 1532 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap, 1533 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb, 1534 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) { 1535 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n"); 1536 return (ENOMEM); 1537 } 1538 1539 /* 1540 * Allocate Sense Buffer Array. Keep in lower 4GB 1541 */ 1542 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN; 1543 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent 1544 64, 0, // algnmnt, boundary 1545 BUS_SPACE_MAXADDR_32BIT,// lowaddr 1546 BUS_SPACE_MAXADDR, // highaddr 1547 NULL, NULL, // filter, filterarg 1548 sense_size, // maxsize 1549 1, // nsegments 1550 sense_size, // maxsegsize 1551 BUS_DMA_ALLOCNOW, // flags 1552 &sc->sense_tag)) { 1553 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n"); 1554 return (ENOMEM); 1555 } 1556 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem, 1557 BUS_DMA_NOWAIT, &sc->sense_dmamap)) { 1558 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n"); 1559 return (ENOMEM); 1560 } 1561 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap, 1562 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr, 1563 BUS_DMA_NOWAIT)){ 1564 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n"); 1565 return (ENOMEM); 1566 } 1567 1568 /* 1569 * Allocate for Event detail structure 1570 */ 1571 evt_detail_size = sizeof(struct mrsas_evt_detail); 1572 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent 1573 1, 0, // algnmnt, boundary 1574 BUS_SPACE_MAXADDR_32BIT,// lowaddr 1575 BUS_SPACE_MAXADDR, // highaddr 1576 NULL, NULL, // filter, filterarg 1577 evt_detail_size, // maxsize 1578 1, // msegments 1579 evt_detail_size, // maxsegsize 1580 BUS_DMA_ALLOCNOW, // flags 1581 &sc->evt_detail_tag)) { 1582 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n"); 1583 return (ENOMEM); 1584 } 1585 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem, 1586 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) { 1587 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n"); 1588 return (ENOMEM); 1589 } 1590 bzero(sc->evt_detail_mem, evt_detail_size); 1591 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap, 1592 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb, 1593 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) { 1594 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n"); 1595 return (ENOMEM); 1596 } 1597 1598 1599 /* 1600 * Create a dma tag for data buffers; size will be the maximum 1601 * possible I/O size (280kB). 1602 */ 1603 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent 1604 1, // alignment 1605 0, // boundary 1606 BUS_SPACE_MAXADDR, // lowaddr 1607 BUS_SPACE_MAXADDR, // highaddr 1608 NULL, NULL, // filter, filterarg 1609 MRSAS_MAX_IO_SIZE, // maxsize 1610 MRSAS_MAX_SGL, // nsegments 1611 MRSAS_MAX_IO_SIZE, // maxsegsize 1612 BUS_DMA_ALLOCNOW, // flags 1613 &sc->data_tag)) { 1614 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n"); 1615 return(ENOMEM); 1616 } 1617 1618 return(0); 1619 } 1620 1621 /* 1622 * mrsas_addr_cb: Callback function of bus_dmamap_load() 1623 * input: callback argument, 1624 * machine dependent type that describes DMA segments, 1625 * number of segments, 1626 * error code. 1627 * 1628 * This function is for the driver to receive mapping information resultant 1629 * of the bus_dmamap_load(). The information is actually not being used, 1630 * but the address is saved anyway. 1631 */ 1632 void 1633 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1634 { 1635 bus_addr_t *addr; 1636 1637 addr = arg; 1638 *addr = segs[0].ds_addr; 1639 } 1640 1641 /* 1642 * mrsas_setup_raidmap: Set up RAID map. 1643 * input: Adapter instance soft state 1644 * 1645 * Allocate DMA memory for the RAID maps and perform setup. 1646 */ 1647 static int mrsas_setup_raidmap(struct mrsas_softc *sc) 1648 { 1649 sc->map_sz = sizeof(MR_FW_RAID_MAP) + 1650 (sizeof(MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1)); 1651 1652 for (int i=0; i < 2; i++) 1653 { 1654 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent 1655 4, 0, // algnmnt, boundary 1656 BUS_SPACE_MAXADDR_32BIT,// lowaddr 1657 BUS_SPACE_MAXADDR, // highaddr 1658 NULL, NULL, // filter, filterarg 1659 sc->map_sz, // maxsize 1660 1, // nsegments 1661 sc->map_sz, // maxsegsize 1662 BUS_DMA_ALLOCNOW, // flags 1663 &sc->raidmap_tag[i])) { 1664 device_printf(sc->mrsas_dev, "Cannot allocate raid map tag.\n"); 1665 return (ENOMEM); 1666 } 1667 if (bus_dmamem_alloc(sc->raidmap_tag[i], (void **)&sc->raidmap_mem[i], 1668 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) { 1669 device_printf(sc->mrsas_dev, "Cannot allocate raidmap memory.\n"); 1670 return (ENOMEM); 1671 } 1672 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i], 1673 sc->raidmap_mem[i], sc->map_sz, mrsas_addr_cb, &sc->raidmap_phys_addr[i], 1674 BUS_DMA_NOWAIT)){ 1675 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n"); 1676 return (ENOMEM); 1677 } 1678 if (!sc->raidmap_mem[i]) { 1679 device_printf(sc->mrsas_dev, "Cannot allocate memory for raid map.\n"); 1680 return (ENOMEM); 1681 } 1682 } 1683 1684 if (!mrsas_get_map_info(sc)) 1685 mrsas_sync_map_info(sc); 1686 1687 return (0); 1688 } 1689 1690 /** 1691 * mrsas_init_fw: Initialize Firmware 1692 * input: Adapter soft state 1693 * 1694 * Calls transition_to_ready() to make sure Firmware is in operational 1695 * state and calls mrsas_init_adapter() to send IOC_INIT command to 1696 * Firmware. It issues internal commands to get the controller info 1697 * after the IOC_INIT command response is received by Firmware. 1698 * Note: code relating to get_pdlist, get_ld_list and max_sectors 1699 * are currently not being used, it is left here as placeholder. 1700 */ 1701 static int mrsas_init_fw(struct mrsas_softc *sc) 1702 { 1703 u_int32_t max_sectors_1; 1704 u_int32_t max_sectors_2; 1705 u_int32_t tmp_sectors; 1706 struct mrsas_ctrl_info *ctrl_info; 1707 1708 int ret, ocr = 0; 1709 1710 1711 /* Make sure Firmware is ready */ 1712 ret = mrsas_transition_to_ready(sc, ocr); 1713 if (ret != SUCCESS) { 1714 return(ret); 1715 } 1716 1717 /* Get operational params, sge flags, send init cmd to ctlr */ 1718 if (mrsas_init_adapter(sc) != SUCCESS){ 1719 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n"); 1720 return(1); 1721 } 1722 1723 /* Allocate internal commands for pass-thru */ 1724 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS){ 1725 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n"); 1726 return(1); 1727 } 1728 1729 if (mrsas_setup_raidmap(sc) != SUCCESS) { 1730 device_printf(sc->mrsas_dev, "Set up RAID map failed.\n"); 1731 return(1); 1732 } 1733 1734 /* For pass-thru, get PD/LD list and controller info */ 1735 memset(sc->pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); 1736 mrsas_get_pd_list(sc); 1737 1738 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD); 1739 mrsas_get_ld_list(sc); 1740 1741 //memset(sc->log_to_span, 0, MRSAS_MAX_LD * sizeof(LD_SPAN_INFO)); 1742 1743 ctrl_info = kmalloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT); 1744 1745 /* 1746 * Compute the max allowed sectors per IO: The controller info has two 1747 * limits on max sectors. Driver should use the minimum of these two. 1748 * 1749 * 1 << stripe_sz_ops.min = max sectors per strip 1750 * 1751 * Note that older firmwares ( < FW ver 30) didn't report information 1752 * to calculate max_sectors_1. So the number ended up as zero always. 1753 */ 1754 tmp_sectors = 0; 1755 if (ctrl_info && !mrsas_get_ctrl_info(sc, ctrl_info)) { 1756 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * 1757 ctrl_info->max_strips_per_io; 1758 max_sectors_2 = ctrl_info->max_request_size; 1759 tmp_sectors = min(max_sectors_1 , max_sectors_2); 1760 sc->disableOnlineCtrlReset = 1761 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 1762 sc->UnevenSpanSupport = 1763 ctrl_info->adapterOperations2.supportUnevenSpans; 1764 if(sc->UnevenSpanSupport) { 1765 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n", 1766 sc->UnevenSpanSupport); 1767 if (MR_ValidateMapInfo(sc)) 1768 sc->fast_path_io = 1; 1769 else 1770 sc->fast_path_io = 0; 1771 1772 } 1773 } 1774 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512; 1775 1776 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors)) 1777 sc->max_sectors_per_req = tmp_sectors; 1778 1779 if (ctrl_info) 1780 kfree(ctrl_info, M_MRSAS); 1781 1782 return(0); 1783 } 1784 1785 /** 1786 * mrsas_init_adapter: Initializes the adapter/controller 1787 * input: Adapter soft state 1788 * 1789 * Prepares for the issuing of the IOC Init cmd to FW for initializing the 1790 * ROC/controller. The FW register is read to determined the number of 1791 * commands that is supported. All memory allocations for IO is based on 1792 * max_cmd. Appropriate calculations are performed in this function. 1793 */ 1794 int mrsas_init_adapter(struct mrsas_softc *sc) 1795 { 1796 uint32_t status; 1797 u_int32_t max_cmd; 1798 int ret; 1799 1800 /* Read FW status register */ 1801 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 1802 1803 /* Get operational params from status register */ 1804 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK; 1805 1806 /* Decrement the max supported by 1, to correlate with FW */ 1807 sc->max_fw_cmds = sc->max_fw_cmds-1; 1808 max_cmd = sc->max_fw_cmds; 1809 1810 /* Determine allocation size of command frames */ 1811 sc->reply_q_depth = ((max_cmd *2 +1 +15)/16*16); 1812 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd; 1813 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth); 1814 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1)); 1815 sc->chain_frames_alloc_sz = 1024 * max_cmd; 1816 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 1817 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL))/16; 1818 1819 sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION); 1820 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2; 1821 1822 /* Used for pass thru MFI frame (DCMD) */ 1823 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)/16; 1824 1825 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 1826 sizeof(MPI2_SGE_IO_UNION))/16; 1827 1828 sc->last_reply_idx = 0; 1829 1830 ret = mrsas_alloc_mem(sc); 1831 if (ret != SUCCESS) 1832 return(ret); 1833 1834 ret = mrsas_alloc_mpt_cmds(sc); 1835 if (ret != SUCCESS) 1836 return(ret); 1837 1838 ret = mrsas_ioc_init(sc); 1839 if (ret != SUCCESS) 1840 return(ret); 1841 1842 1843 return(0); 1844 } 1845 1846 /** 1847 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command 1848 * input: Adapter soft state 1849 * 1850 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller. 1851 */ 1852 int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc) 1853 { 1854 int ioc_init_size; 1855 1856 /* Allocate IOC INIT command */ 1857 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST); 1858 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent 1859 1, 0, // algnmnt, boundary 1860 BUS_SPACE_MAXADDR_32BIT,// lowaddr 1861 BUS_SPACE_MAXADDR, // highaddr 1862 NULL, NULL, // filter, filterarg 1863 ioc_init_size, // maxsize 1864 1, // msegments 1865 ioc_init_size, // maxsegsize 1866 BUS_DMA_ALLOCNOW, // flags 1867 &sc->ioc_init_tag)) { 1868 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n"); 1869 return (ENOMEM); 1870 } 1871 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem, 1872 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) { 1873 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n"); 1874 return (ENOMEM); 1875 } 1876 bzero(sc->ioc_init_mem, ioc_init_size); 1877 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap, 1878 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb, 1879 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) { 1880 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n"); 1881 return (ENOMEM); 1882 } 1883 1884 return (0); 1885 } 1886 1887 /** 1888 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command 1889 * input: Adapter soft state 1890 * 1891 * Deallocates memory of the IOC Init cmd. 1892 */ 1893 void mrsas_free_ioc_cmd(struct mrsas_softc *sc) 1894 { 1895 if (sc->ioc_init_phys_mem) 1896 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap); 1897 if (sc->ioc_init_mem != NULL) 1898 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap); 1899 if (sc->ioc_init_tag != NULL) 1900 bus_dma_tag_destroy(sc->ioc_init_tag); 1901 } 1902 1903 /** 1904 * mrsas_ioc_init: Sends IOC Init command to FW 1905 * input: Adapter soft state 1906 * 1907 * Issues the IOC Init cmd to FW to initialize the ROC/controller. 1908 */ 1909 int mrsas_ioc_init(struct mrsas_softc *sc) 1910 { 1911 struct mrsas_init_frame *init_frame; 1912 pMpi2IOCInitRequest_t IOCInitMsg; 1913 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc; 1914 u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME; 1915 bus_addr_t phys_addr; 1916 int i, retcode = 0; 1917 1918 /* Allocate memory for the IOC INIT command */ 1919 if (mrsas_alloc_ioc_cmd(sc)) { 1920 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n"); 1921 return(1); 1922 } 1923 1924 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) +1024); 1925 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT; 1926 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER; 1927 IOCInitMsg->MsgVersion = MPI2_VERSION; 1928 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION; 1929 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4; 1930 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth; 1931 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr; 1932 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr; 1933 1934 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem; 1935 init_frame->cmd = MFI_CMD_INIT; 1936 init_frame->cmd_status = 0xFF; 1937 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 1938 1939 if (sc->verbuf_mem) { 1940 ksnprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION)+2,"%s\n", 1941 MRSAS_VERSION); 1942 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr; 1943 init_frame->driver_ver_hi = 0; 1944 } 1945 1946 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024; 1947 init_frame->queue_info_new_phys_addr_lo = phys_addr; 1948 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t); 1949 1950 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem; 1951 req_desc.MFAIo.RequestFlags = 1952 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1953 1954 mrsas_disable_intr(sc); 1955 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n"); 1956 //device_printf(sc->mrsas_dev, "Issuing IOC INIT command to FW.\n");del? 1957 mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high); 1958 1959 /* 1960 * Poll response timer to wait for Firmware response. While this 1961 * timer with the DELAY call could block CPU, the time interval for 1962 * this is only 1 millisecond. 1963 */ 1964 if (init_frame->cmd_status == 0xFF) { 1965 for (i=0; i < (max_wait * 1000); i++){ 1966 if (init_frame->cmd_status == 0xFF) 1967 DELAY(1000); 1968 else 1969 break; 1970 } 1971 } 1972 1973 if (init_frame->cmd_status == 0) 1974 mrsas_dprint(sc, MRSAS_OCR, 1975 "IOC INIT response received from FW.\n"); 1976 //device_printf(sc->mrsas_dev, "IOC INIT response received from FW.\n");del? 1977 else 1978 { 1979 if (init_frame->cmd_status == 0xFF) 1980 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait); 1981 else 1982 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status); 1983 retcode = 1; 1984 } 1985 1986 mrsas_free_ioc_cmd(sc); 1987 return (retcode); 1988 } 1989 1990 /** 1991 * mrsas_alloc_mpt_cmds: Allocates the command packets 1992 * input: Adapter instance soft state 1993 * 1994 * This function allocates the internal commands for IOs. Each command that is 1995 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. 1996 * An array is allocated with mrsas_mpt_cmd context. The free commands are 1997 * maintained in a linked list (cmd pool). SMID value range is from 1 to 1998 * max_fw_cmds. 1999 */ 2000 int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc) 2001 { 2002 int i, j; 2003 u_int32_t max_cmd; 2004 struct mrsas_mpt_cmd *cmd; 2005 pMpi2ReplyDescriptorsUnion_t reply_desc; 2006 u_int32_t offset, chain_offset, sense_offset; 2007 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys; 2008 u_int8_t *io_req_base, *chain_frame_base, *sense_base; 2009 2010 max_cmd = sc->max_fw_cmds; 2011 2012 sc->req_desc = kmalloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT); 2013 if (!sc->req_desc) { 2014 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n"); 2015 return(ENOMEM); 2016 } 2017 memset(sc->req_desc, 0, sc->request_alloc_sz); 2018 2019 /* 2020 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. Allocate the 2021 * dynamic array first and then allocate individual commands. 2022 */ 2023 sc->mpt_cmd_list = kmalloc(sizeof(struct mrsas_mpt_cmd*)*max_cmd, M_MRSAS, M_NOWAIT); 2024 if (!sc->mpt_cmd_list) { 2025 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n"); 2026 return(ENOMEM); 2027 } 2028 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *)*max_cmd); 2029 for (i = 0; i < max_cmd; i++) { 2030 sc->mpt_cmd_list[i] = kmalloc(sizeof(struct mrsas_mpt_cmd), 2031 M_MRSAS, M_NOWAIT); 2032 if (!sc->mpt_cmd_list[i]) { 2033 for (j = 0; j < i; j++) 2034 kfree(sc->mpt_cmd_list[j],M_MRSAS); 2035 kfree(sc->mpt_cmd_list, M_MRSAS); 2036 sc->mpt_cmd_list = NULL; 2037 return(ENOMEM); 2038 } 2039 } 2040 2041 io_req_base = (u_int8_t*)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 2042 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 2043 chain_frame_base = (u_int8_t*)sc->chain_frame_mem; 2044 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr; 2045 sense_base = (u_int8_t*)sc->sense_mem; 2046 sense_base_phys = (bus_addr_t)sc->sense_phys_addr; 2047 for (i = 0; i < max_cmd; i++) { 2048 cmd = sc->mpt_cmd_list[i]; 2049 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; 2050 chain_offset = 1024 * i; 2051 sense_offset = MRSAS_SENSE_LEN * i; 2052 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd)); 2053 cmd->index = i + 1; 2054 cmd->ccb_ptr = NULL; 2055 callout_init(&cmd->cm_callout); 2056 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; 2057 cmd->sc = sc; 2058 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset); 2059 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST)); 2060 cmd->io_request_phys_addr = io_req_base_phys + offset; 2061 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset); 2062 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset; 2063 cmd->sense = sense_base + sense_offset; 2064 cmd->sense_phys_addr = sense_base_phys + sense_offset; 2065 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) { 2066 return(FAIL); 2067 } 2068 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next); 2069 } 2070 2071 /* Initialize reply descriptor array to 0xFFFFFFFF */ 2072 reply_desc = sc->reply_desc_mem; 2073 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) { 2074 reply_desc->Words = MRSAS_ULONG_MAX; 2075 } 2076 return(0); 2077 } 2078 2079 /** 2080 * mrsas_fire_cmd: Sends command to FW 2081 * input: Adapter soft state 2082 * request descriptor address low 2083 * request descriptor address high 2084 * 2085 * This functions fires the command to Firmware by writing to the 2086 * inbound_low_queue_port and inbound_high_queue_port. 2087 */ 2088 void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 2089 u_int32_t req_desc_hi) 2090 { 2091 lockmgr(&sc->pci_lock, LK_EXCLUSIVE); 2092 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port), 2093 req_desc_lo); 2094 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port), 2095 req_desc_hi); 2096 lockmgr(&sc->pci_lock, LK_RELEASE); 2097 } 2098 2099 /** 2100 * mrsas_transition_to_ready: Move FW to Ready state 2101 * input: Adapter instance soft state 2102 * 2103 * During the initialization, FW passes can potentially be in any one of 2104 * several possible states. If the FW in operational, waiting-for-handshake 2105 * states, driver must take steps to bring it to ready state. Otherwise, it 2106 * has to wait for the ready state. 2107 */ 2108 int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr) 2109 { 2110 int i; 2111 u_int8_t max_wait; 2112 u_int32_t val, fw_state; 2113 u_int32_t cur_state; 2114 u_int32_t abs_state, curr_abs_state; 2115 2116 val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2117 fw_state = val & MFI_STATE_MASK; 2118 max_wait = MRSAS_RESET_WAIT_TIME; 2119 2120 if (fw_state != MFI_STATE_READY) 2121 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n"); 2122 2123 while (fw_state != MFI_STATE_READY) { 2124 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2125 switch (fw_state) { 2126 case MFI_STATE_FAULT: 2127 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n"); 2128 if (ocr) { 2129 cur_state = MFI_STATE_FAULT; 2130 break; 2131 } 2132 else 2133 return -ENODEV; 2134 case MFI_STATE_WAIT_HANDSHAKE: 2135 /* Set the CLR bit in inbound doorbell */ 2136 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 2137 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG); 2138 cur_state = MFI_STATE_WAIT_HANDSHAKE; 2139 break; 2140 case MFI_STATE_BOOT_MESSAGE_PENDING: 2141 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 2142 MFI_INIT_HOTPLUG); 2143 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 2144 break; 2145 case MFI_STATE_OPERATIONAL: 2146 /* Bring it to READY state; assuming max wait 10 secs */ 2147 mrsas_disable_intr(sc); 2148 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS); 2149 for (i=0; i < max_wait * 1000; i++) { 2150 if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1) 2151 DELAY(1000); 2152 else 2153 break; 2154 } 2155 cur_state = MFI_STATE_OPERATIONAL; 2156 break; 2157 case MFI_STATE_UNDEFINED: 2158 /* This state should not last for more than 2 seconds */ 2159 cur_state = MFI_STATE_UNDEFINED; 2160 break; 2161 case MFI_STATE_BB_INIT: 2162 cur_state = MFI_STATE_BB_INIT; 2163 break; 2164 case MFI_STATE_FW_INIT: 2165 cur_state = MFI_STATE_FW_INIT; 2166 break; 2167 case MFI_STATE_FW_INIT_2: 2168 cur_state = MFI_STATE_FW_INIT_2; 2169 break; 2170 case MFI_STATE_DEVICE_SCAN: 2171 cur_state = MFI_STATE_DEVICE_SCAN; 2172 break; 2173 case MFI_STATE_FLUSH_CACHE: 2174 cur_state = MFI_STATE_FLUSH_CACHE; 2175 break; 2176 default: 2177 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state); 2178 return -ENODEV; 2179 } 2180 2181 /* 2182 * The cur_state should not last for more than max_wait secs 2183 */ 2184 for (i = 0; i < (max_wait * 1000); i++) { 2185 fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2186 outbound_scratch_pad))& MFI_STATE_MASK); 2187 curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2188 outbound_scratch_pad)); 2189 if (abs_state == curr_abs_state) 2190 DELAY(1000); 2191 else 2192 break; 2193 } 2194 2195 /* 2196 * Return error if fw_state hasn't changed after max_wait 2197 */ 2198 if (curr_abs_state == abs_state) { 2199 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed " 2200 "in %d secs\n", fw_state, max_wait); 2201 return -ENODEV; 2202 } 2203 } 2204 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n"); 2205 //device_printf(sc->mrsas_dev, "FW now in Ready state\n");del? 2206 return 0; 2207 } 2208 2209 /** 2210 * mrsas_get_mfi_cmd: Get a cmd from free command pool 2211 * input: Adapter soft state 2212 * 2213 * This function removes an MFI command from the command list. 2214 */ 2215 struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc) 2216 { 2217 struct mrsas_mfi_cmd *cmd = NULL; 2218 2219 lockmgr(&sc->mfi_cmd_pool_lock, LK_EXCLUSIVE); 2220 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)){ 2221 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head); 2222 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next); 2223 } 2224 lockmgr(&sc->mfi_cmd_pool_lock, LK_RELEASE); 2225 2226 return cmd; 2227 } 2228 2229 /** 2230 * mrsas_ocr_thread Thread to handle OCR/Kill Adapter. 2231 * input: Adapter Context. 2232 * 2233 * This function will check FW status register and flag 2234 * do_timeout_reset flag. It will do OCR/Kill adapter if 2235 * FW is in fault state or IO timed out has trigger reset. 2236 */ 2237 static void 2238 mrsas_ocr_thread(void *arg) 2239 { 2240 struct mrsas_softc *sc; 2241 u_int32_t fw_status, fw_state; 2242 2243 sc = (struct mrsas_softc *)arg; 2244 2245 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__); 2246 2247 sc->ocr_thread_active = 1; 2248 lockmgr(&sc->sim_lock, LK_EXCLUSIVE); 2249 for (;;) { 2250 /* Sleep for 1 second and check the queue status*/ 2251 lksleep(&sc->ocr_chan, &sc->sim_lock, 0, 2252 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz); 2253 if (sc->remove_in_progress) { 2254 mrsas_dprint(sc, MRSAS_OCR, 2255 "Exit due to shutdown from %s\n", __func__); 2256 break; 2257 } 2258 fw_status = mrsas_read_reg(sc, 2259 offsetof(mrsas_reg_set, outbound_scratch_pad)); 2260 fw_state = fw_status & MFI_STATE_MASK; 2261 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) { 2262 device_printf(sc->mrsas_dev, "OCR started due to %s!\n", 2263 sc->do_timedout_reset?"IO Timeout": 2264 "FW fault detected"); 2265 spin_lock(&sc->ioctl_lock); 2266 sc->reset_in_progress = 1; 2267 sc->reset_count++; 2268 spin_unlock(&sc->ioctl_lock); 2269 mrsas_xpt_freeze(sc); 2270 mrsas_reset_ctrl(sc); 2271 mrsas_xpt_release(sc); 2272 sc->reset_in_progress = 0; 2273 sc->do_timedout_reset = 0; 2274 } 2275 } 2276 lockmgr(&sc->sim_lock, LK_RELEASE); 2277 sc->ocr_thread_active = 0; 2278 kthread_exit(); 2279 } 2280 2281 /** 2282 * mrsas_reset_reply_desc Reset Reply descriptor as part of OCR. 2283 * input: Adapter Context. 2284 * 2285 * This function will clear reply descriptor so that post OCR 2286 * driver and FW will lost old history. 2287 */ 2288 void mrsas_reset_reply_desc(struct mrsas_softc *sc) 2289 { 2290 int i; 2291 pMpi2ReplyDescriptorsUnion_t reply_desc; 2292 2293 sc->last_reply_idx = 0; 2294 reply_desc = sc->reply_desc_mem; 2295 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) { 2296 reply_desc->Words = MRSAS_ULONG_MAX; 2297 } 2298 } 2299 2300 /** 2301 * mrsas_reset_ctrl Core function to OCR/Kill adapter. 2302 * input: Adapter Context. 2303 * 2304 * This function will run from thread context so that it can sleep. 2305 * 1. Do not handle OCR if FW is in HW critical error. 2306 * 2. Wait for outstanding command to complete for 180 seconds. 2307 * 3. If #2 does not find any outstanding command Controller is in working 2308 * state, so skip OCR. 2309 * Otherwise, do OCR/kill Adapter based on flag disableOnlineCtrlReset. 2310 * 4. Start of the OCR, return all SCSI command back to CAM layer which has 2311 * ccb_ptr. 2312 * 5. Post OCR, Re-fire Managment command and move Controller to Operation 2313 * state. 2314 */ 2315 int mrsas_reset_ctrl(struct mrsas_softc *sc) 2316 { 2317 int retval = SUCCESS, i, j, retry = 0; 2318 u_int32_t host_diag, abs_state, status_reg, reset_adapter; 2319 union ccb *ccb; 2320 struct mrsas_mfi_cmd *mfi_cmd; 2321 struct mrsas_mpt_cmd *mpt_cmd; 2322 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2323 2324 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { 2325 device_printf(sc->mrsas_dev, 2326 "mrsas: Hardware critical error, returning FAIL.\n"); 2327 return FAIL; 2328 } 2329 2330 set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 2331 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT; 2332 mrsas_disable_intr(sc); 2333 DELAY(1000 * 1000); 2334 2335 /* First try waiting for commands to complete */ 2336 if (mrsas_wait_for_outstanding(sc)) { 2337 mrsas_dprint(sc, MRSAS_OCR, 2338 "resetting adapter from %s.\n", 2339 __func__); 2340 /* Now return commands back to the CAM layer */ 2341 for (i = 0 ; i < sc->max_fw_cmds; i++) { 2342 mpt_cmd = sc->mpt_cmd_list[i]; 2343 if (mpt_cmd->ccb_ptr) { 2344 ccb = (union ccb *)(mpt_cmd->ccb_ptr); 2345 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 2346 mrsas_cmd_done(sc, mpt_cmd); 2347 atomic_dec(&sc->fw_outstanding); 2348 } 2349 } 2350 2351 status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2352 outbound_scratch_pad)); 2353 abs_state = status_reg & MFI_STATE_MASK; 2354 reset_adapter = status_reg & MFI_RESET_ADAPTER; 2355 if (sc->disableOnlineCtrlReset || 2356 (abs_state == MFI_STATE_FAULT && !reset_adapter)) { 2357 /* Reset not supported, kill adapter */ 2358 mrsas_dprint(sc, MRSAS_OCR,"Reset not supported, killing adapter.\n"); 2359 mrsas_kill_hba(sc); 2360 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR; 2361 retval = FAIL; 2362 goto out; 2363 } 2364 2365 /* Now try to reset the chip */ 2366 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) { 2367 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2368 MPI2_WRSEQ_FLUSH_KEY_VALUE); 2369 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2370 MPI2_WRSEQ_1ST_KEY_VALUE); 2371 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2372 MPI2_WRSEQ_2ND_KEY_VALUE); 2373 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2374 MPI2_WRSEQ_3RD_KEY_VALUE); 2375 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2376 MPI2_WRSEQ_4TH_KEY_VALUE); 2377 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2378 MPI2_WRSEQ_5TH_KEY_VALUE); 2379 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2380 MPI2_WRSEQ_6TH_KEY_VALUE); 2381 2382 /* Check that the diag write enable (DRWE) bit is on */ 2383 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2384 fusion_host_diag)); 2385 retry = 0; 2386 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { 2387 DELAY(100 * 1000); 2388 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2389 fusion_host_diag)); 2390 if (retry++ == 100) { 2391 mrsas_dprint(sc, MRSAS_OCR, 2392 "Host diag unlock failed!\n"); 2393 break; 2394 } 2395 } 2396 if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) 2397 continue; 2398 2399 /* Send chip reset command */ 2400 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag), 2401 host_diag | HOST_DIAG_RESET_ADAPTER); 2402 DELAY(3000 * 1000); 2403 2404 /* Make sure reset adapter bit is cleared */ 2405 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2406 fusion_host_diag)); 2407 retry = 0; 2408 while (host_diag & HOST_DIAG_RESET_ADAPTER) { 2409 DELAY(100 * 1000); 2410 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2411 fusion_host_diag)); 2412 if (retry++ == 1000) { 2413 mrsas_dprint(sc, MRSAS_OCR, 2414 "Diag reset adapter never cleared!\n"); 2415 break; 2416 } 2417 } 2418 if (host_diag & HOST_DIAG_RESET_ADAPTER) 2419 continue; 2420 2421 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2422 outbound_scratch_pad)) & MFI_STATE_MASK; 2423 retry = 0; 2424 2425 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { 2426 DELAY(100 * 1000); 2427 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2428 outbound_scratch_pad)) & MFI_STATE_MASK; 2429 } 2430 if (abs_state <= MFI_STATE_FW_INIT) { 2431 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT," 2432 " state = 0x%x\n", abs_state); 2433 continue; 2434 } 2435 2436 /* Wait for FW to become ready */ 2437 if (mrsas_transition_to_ready(sc, 1)) { 2438 mrsas_dprint(sc, MRSAS_OCR, 2439 "mrsas: Failed to transition controller to ready.\n"); 2440 continue; 2441 } 2442 2443 mrsas_reset_reply_desc(sc); 2444 if (mrsas_ioc_init(sc)) { 2445 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n"); 2446 continue; 2447 } 2448 2449 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 2450 mrsas_enable_intr(sc); 2451 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 2452 2453 /* Re-fire management commands */ 2454 for (j = 0 ; j < sc->max_fw_cmds; j++) { 2455 mpt_cmd = sc->mpt_cmd_list[j]; 2456 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { 2457 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx]; 2458 if (mfi_cmd->frame->dcmd.opcode == 2459 MR_DCMD_LD_MAP_GET_INFO) { 2460 mrsas_release_mfi_cmd(mfi_cmd); 2461 mrsas_release_mpt_cmd(mpt_cmd); 2462 } else { 2463 req_desc = mrsas_get_request_desc(sc, 2464 mfi_cmd->cmd_id.context.smid - 1); 2465 mrsas_dprint(sc, MRSAS_OCR, 2466 "Re-fire command DCMD opcode 0x%x index %d\n ", 2467 mfi_cmd->frame->dcmd.opcode, j); 2468 if (!req_desc) 2469 device_printf(sc->mrsas_dev, 2470 "Cannot build MPT cmd.\n"); 2471 else 2472 mrsas_fire_cmd(sc, req_desc->addr.u.low, 2473 req_desc->addr.u.high); 2474 } 2475 } 2476 } 2477 2478 /* Reset load balance info */ 2479 memset(sc->load_balance_info, 0, 2480 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES); 2481 2482 if (!mrsas_get_map_info(sc)) 2483 mrsas_sync_map_info(sc); 2484 2485 /* Adapter reset completed successfully */ 2486 device_printf(sc->mrsas_dev, "Reset successful\n"); 2487 retval = SUCCESS; 2488 goto out; 2489 } 2490 /* Reset failed, kill the adapter */ 2491 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n"); 2492 mrsas_kill_hba(sc); 2493 retval = FAIL; 2494 } else { 2495 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 2496 mrsas_enable_intr(sc); 2497 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 2498 } 2499 out: 2500 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 2501 mrsas_dprint(sc, MRSAS_OCR, 2502 "Reset Exit with %d.\n", retval); 2503 return retval; 2504 } 2505 2506 /** 2507 * mrsas_kill_hba Kill HBA when OCR is not supported. 2508 * input: Adapter Context. 2509 * 2510 * This function will kill HBA when OCR is not supported. 2511 */ 2512 void mrsas_kill_hba (struct mrsas_softc *sc) 2513 { 2514 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__); 2515 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 2516 MFI_STOP_ADP); 2517 /* Flush */ 2518 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)); 2519 } 2520 2521 /** 2522 * mrsas_wait_for_outstanding Wait for outstanding commands 2523 * input: Adapter Context. 2524 * 2525 * This function will wait for 180 seconds for outstanding 2526 * commands to be completed. 2527 */ 2528 int mrsas_wait_for_outstanding(struct mrsas_softc *sc) 2529 { 2530 int i, outstanding, retval = 0; 2531 u_int32_t fw_state; 2532 2533 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) { 2534 if (sc->remove_in_progress) { 2535 mrsas_dprint(sc, MRSAS_OCR, 2536 "Driver remove or shutdown called.\n"); 2537 retval = 1; 2538 goto out; 2539 } 2540 /* Check if firmware is in fault state */ 2541 fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2542 outbound_scratch_pad)) & MFI_STATE_MASK; 2543 if (fw_state == MFI_STATE_FAULT) { 2544 mrsas_dprint(sc, MRSAS_OCR, 2545 "Found FW in FAULT state, will reset adapter.\n"); 2546 retval = 1; 2547 goto out; 2548 } 2549 outstanding = atomic_read(&sc->fw_outstanding); 2550 if (!outstanding) 2551 goto out; 2552 2553 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 2554 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d " 2555 "commands to complete\n",i,outstanding); 2556 mrsas_complete_cmd(sc); 2557 } 2558 DELAY(1000 * 1000); 2559 } 2560 2561 if (atomic_read(&sc->fw_outstanding)) { 2562 mrsas_dprint(sc, MRSAS_OCR, 2563 " pending commands remain after waiting," 2564 " will reset adapter.\n"); 2565 retval = 1; 2566 } 2567 out: 2568 return retval; 2569 } 2570 2571 /** 2572 * mrsas_release_mfi_cmd: Return a cmd to free command pool 2573 * input: Command packet for return to free cmd pool 2574 * 2575 * This function returns the MFI command to the command list. 2576 */ 2577 void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd) 2578 { 2579 struct mrsas_softc *sc = cmd->sc; 2580 2581 lockmgr(&sc->mfi_cmd_pool_lock, LK_EXCLUSIVE); 2582 cmd->ccb_ptr = NULL; 2583 cmd->cmd_id.frame_count = 0; 2584 TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next); 2585 lockmgr(&sc->mfi_cmd_pool_lock, LK_RELEASE); 2586 2587 return; 2588 } 2589 2590 /** 2591 * mrsas_get_controller_info - Returns FW's controller structure 2592 * input: Adapter soft state 2593 * Controller information structure 2594 * 2595 * Issues an internal command (DCMD) to get the FW's controller structure. 2596 * This information is mainly used to find out the maximum IO transfer per 2597 * command supported by the FW. 2598 */ 2599 static int mrsas_get_ctrl_info(struct mrsas_softc *sc, 2600 struct mrsas_ctrl_info *ctrl_info) 2601 { 2602 int retcode = 0; 2603 struct mrsas_mfi_cmd *cmd; 2604 struct mrsas_dcmd_frame *dcmd; 2605 2606 cmd = mrsas_get_mfi_cmd(sc); 2607 2608 if (!cmd) { 2609 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); 2610 return -ENOMEM; 2611 } 2612 dcmd = &cmd->frame->dcmd; 2613 2614 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) { 2615 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n"); 2616 mrsas_release_mfi_cmd(cmd); 2617 return -ENOMEM; 2618 } 2619 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2620 2621 dcmd->cmd = MFI_CMD_DCMD; 2622 dcmd->cmd_status = 0xFF; 2623 dcmd->sge_count = 1; 2624 dcmd->flags = MFI_FRAME_DIR_READ; 2625 dcmd->timeout = 0; 2626 dcmd->pad_0 = 0; 2627 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info); 2628 dcmd->opcode = MR_DCMD_CTRL_GET_INFO; 2629 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr; 2630 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info); 2631 2632 if (!mrsas_issue_polled(sc, cmd)) 2633 memcpy(ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info)); 2634 else 2635 retcode = 1; 2636 2637 mrsas_free_ctlr_info_cmd(sc); 2638 mrsas_release_mfi_cmd(cmd); 2639 return(retcode); 2640 } 2641 2642 /** 2643 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command 2644 * input: Adapter soft state 2645 * 2646 * Allocates DMAable memory for the controller info internal command. 2647 */ 2648 int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc) 2649 { 2650 int ctlr_info_size; 2651 2652 /* Allocate get controller info command */ 2653 ctlr_info_size = sizeof(struct mrsas_ctrl_info); 2654 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent 2655 1, 0, // algnmnt, boundary 2656 BUS_SPACE_MAXADDR_32BIT,// lowaddr 2657 BUS_SPACE_MAXADDR, // highaddr 2658 NULL, NULL, // filter, filterarg 2659 ctlr_info_size, // maxsize 2660 1, // msegments 2661 ctlr_info_size, // maxsegsize 2662 BUS_DMA_ALLOCNOW, // flags 2663 &sc->ctlr_info_tag)) { 2664 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n"); 2665 return (ENOMEM); 2666 } 2667 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem, 2668 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) { 2669 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n"); 2670 return (ENOMEM); 2671 } 2672 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap, 2673 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb, 2674 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) { 2675 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n"); 2676 return (ENOMEM); 2677 } 2678 2679 memset(sc->ctlr_info_mem, 0, ctlr_info_size); 2680 return (0); 2681 } 2682 2683 /** 2684 * mrsas_free_ctlr_info_cmd: Free memory for controller info command 2685 * input: Adapter soft state 2686 * 2687 * Deallocates memory of the get controller info cmd. 2688 */ 2689 void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc) 2690 { 2691 if (sc->ctlr_info_phys_addr) 2692 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap); 2693 if (sc->ctlr_info_mem != NULL) 2694 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap); 2695 if (sc->ctlr_info_tag != NULL) 2696 bus_dma_tag_destroy(sc->ctlr_info_tag); 2697 } 2698 2699 /** 2700 * mrsas_issue_polled: Issues a polling command 2701 * inputs: Adapter soft state 2702 * Command packet to be issued 2703 * 2704 * This function is for posting of internal commands to Firmware. MFI 2705 * requires the cmd_status to be set to 0xFF before posting. The maximun 2706 * wait time of the poll response timer is 180 seconds. 2707 */ 2708 int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 2709 { 2710 struct mrsas_header *frame_hdr = &cmd->frame->hdr; 2711 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 2712 int i, retcode = 0; 2713 2714 frame_hdr->cmd_status = 0xFF; 2715 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 2716 2717 /* Issue the frame using inbound queue port */ 2718 if (mrsas_issue_dcmd(sc, cmd)) { 2719 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); 2720 return(1); 2721 } 2722 2723 /* 2724 * Poll response timer to wait for Firmware response. While this 2725 * timer with the DELAY call could block CPU, the time interval for 2726 * this is only 1 millisecond. 2727 */ 2728 if (frame_hdr->cmd_status == 0xFF) { 2729 for (i=0; i < (max_wait * 1000); i++){ 2730 if (frame_hdr->cmd_status == 0xFF) 2731 DELAY(1000); 2732 else 2733 break; 2734 } 2735 } 2736 if (frame_hdr->cmd_status != 0) 2737 { 2738 if (frame_hdr->cmd_status == 0xFF) 2739 device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait); 2740 else 2741 device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status); 2742 retcode = 1; 2743 } 2744 return(retcode); 2745 } 2746 2747 /** 2748 * mrsas_issue_dcmd - Issues a MFI Pass thru cmd 2749 * input: Adapter soft state 2750 * mfi cmd pointer 2751 * 2752 * This function is called by mrsas_issued_blocked_cmd() and 2753 * mrsas_issued_polled(), to build the MPT command and then fire the 2754 * command to Firmware. 2755 */ 2756 int 2757 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 2758 { 2759 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2760 2761 req_desc = mrsas_build_mpt_cmd(sc, cmd); 2762 if (!req_desc) { 2763 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n"); 2764 return(1); 2765 } 2766 2767 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); 2768 2769 return(0); 2770 } 2771 2772 /** 2773 * mrsas_build_mpt_cmd - Calls helper function to build Passthru cmd 2774 * input: Adapter soft state 2775 * mfi cmd to build 2776 * 2777 * This function is called by mrsas_issue_cmd() to build the MPT-MFI 2778 * passthru command and prepares the MPT command to send to Firmware. 2779 */ 2780 MRSAS_REQUEST_DESCRIPTOR_UNION * 2781 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 2782 { 2783 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2784 u_int16_t index; 2785 2786 if (mrsas_build_mptmfi_passthru(sc, cmd)) { 2787 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n"); 2788 return NULL; 2789 } 2790 2791 index = cmd->cmd_id.context.smid; 2792 2793 req_desc = mrsas_get_request_desc(sc, index-1); 2794 if(!req_desc) 2795 return NULL; 2796 2797 req_desc->addr.Words = 0; 2798 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2799 2800 req_desc->SCSIIO.SMID = index; 2801 2802 return(req_desc); 2803 } 2804 2805 /** 2806 * mrsas_build_mptmfi_passthru - Builds a MPT MFI Passthru command 2807 * input: Adapter soft state 2808 * mfi cmd pointer 2809 * 2810 * The MPT command and the io_request are setup as a passthru command. 2811 * The SGE chain address is set to frame_phys_addr of the MFI command. 2812 */ 2813 u_int8_t 2814 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd) 2815 { 2816 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; 2817 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req; 2818 struct mrsas_mpt_cmd *mpt_cmd; 2819 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr; 2820 2821 mpt_cmd = mrsas_get_mpt_cmd(sc); 2822 if (!mpt_cmd) 2823 return(1); 2824 2825 /* Save the smid. To be used for returning the cmd */ 2826 mfi_cmd->cmd_id.context.smid = mpt_cmd->index; 2827 2828 mpt_cmd->sync_cmd_idx = mfi_cmd->index; 2829 2830 /* 2831 * For cmds where the flag is set, store the flag and check 2832 * on completion. For cmds with this flag, don't call 2833 * mrsas_complete_cmd. 2834 */ 2835 2836 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) 2837 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 2838 2839 io_req = mpt_cmd->io_request; 2840 2841 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) { 2842 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t) &io_req->SGL; 2843 sgl_ptr_end += sc->max_sge_in_main_msg - 1; 2844 sgl_ptr_end->Flags = 0; 2845 } 2846 2847 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain; 2848 2849 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; 2850 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4; 2851 io_req->ChainOffset = sc->chain_offset_mfi_pthru; 2852 2853 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr; 2854 2855 mpi25_ieee_chain->Flags= IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2856 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 2857 2858 mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME; 2859 2860 return(0); 2861 } 2862 2863 /** 2864 * mrsas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds 2865 * input: Adapter soft state 2866 * Command to be issued 2867 * 2868 * This function waits on an event for the command to be returned 2869 * from the ISR. Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. 2870 * Used for issuing internal and ioctl commands. 2871 */ 2872 int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 2873 { 2874 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 2875 unsigned long total_time = 0; 2876 int retcode = 0; 2877 2878 /* Initialize cmd_status */ 2879 cmd->cmd_status = ECONNREFUSED; 2880 2881 /* Build MPT-MFI command for issue to FW */ 2882 if (mrsas_issue_dcmd(sc, cmd)){ 2883 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); 2884 return(1); 2885 } 2886 2887 sc->chan = (void*)&cmd; 2888 2889 /* The following is for debug only... */ 2890 //device_printf(sc->mrsas_dev,"DCMD issued to FW, about to sleep-wait...\n"); 2891 //device_printf(sc->mrsas_dev,"sc->chan = %p\n", sc->chan); 2892 2893 while (1) { 2894 if (cmd->cmd_status == ECONNREFUSED){ 2895 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); 2896 } 2897 else 2898 break; 2899 total_time++; 2900 if (total_time >= max_wait) { 2901 device_printf(sc->mrsas_dev, "Internal command timed out after %d seconds.\n", max_wait); 2902 retcode = 1; 2903 break; 2904 } 2905 } 2906 return(retcode); 2907 } 2908 2909 /** 2910 * mrsas_complete_mptmfi_passthru - Completes a command 2911 * input: sc: Adapter soft state 2912 * cmd: Command to be completed 2913 * status: cmd completion status 2914 * 2915 * This function is called from mrsas_complete_cmd() after an interrupt 2916 * is received from Firmware, and io_request->Function is 2917 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST. 2918 */ 2919 void 2920 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd, 2921 u_int8_t status) 2922 { 2923 struct mrsas_header *hdr = &cmd->frame->hdr; 2924 u_int8_t cmd_status = cmd->frame->hdr.cmd_status; 2925 2926 /* Reset the retry counter for future re-tries */ 2927 cmd->retry_for_fw_reset = 0; 2928 2929 if (cmd->ccb_ptr) 2930 cmd->ccb_ptr = NULL; 2931 2932 switch (hdr->cmd) { 2933 case MFI_CMD_INVALID: 2934 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n"); 2935 break; 2936 case MFI_CMD_PD_SCSI_IO: 2937 case MFI_CMD_LD_SCSI_IO: 2938 /* 2939 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 2940 * issued either through an IO path or an IOCTL path. If it 2941 * was via IOCTL, we will send it to internal completion. 2942 */ 2943 if (cmd->sync_cmd) { 2944 cmd->sync_cmd = 0; 2945 mrsas_wakeup(sc, cmd); 2946 break; 2947 } 2948 case MFI_CMD_SMP: 2949 case MFI_CMD_STP: 2950 case MFI_CMD_DCMD: 2951 /* Check for LD map update */ 2952 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) && 2953 (cmd->frame->dcmd.mbox.b[1] == 1)) { 2954 sc->fast_path_io = 0; 2955 lockmgr(&sc->raidmap_lock, LK_EXCLUSIVE); 2956 if (cmd_status != 0) { 2957 if (cmd_status != MFI_STAT_NOT_FOUND) 2958 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n",cmd_status); 2959 else { 2960 mrsas_release_mfi_cmd(cmd); 2961 lockmgr(&sc->raidmap_lock, LK_RELEASE); 2962 break; 2963 } 2964 } 2965 else 2966 sc->map_id++; 2967 mrsas_release_mfi_cmd(cmd); 2968 if (MR_ValidateMapInfo(sc)) 2969 sc->fast_path_io = 0; 2970 else 2971 sc->fast_path_io = 1; 2972 mrsas_sync_map_info(sc); 2973 lockmgr(&sc->raidmap_lock, LK_RELEASE); 2974 break; 2975 } 2976 #if 0 //currently not supporting event handling, so commenting out 2977 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 2978 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { 2979 mrsas_poll_wait_aen = 0; 2980 } 2981 #endif 2982 /* See if got an event notification */ 2983 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) 2984 mrsas_complete_aen(sc, cmd); 2985 else 2986 mrsas_wakeup(sc, cmd); 2987 break; 2988 case MFI_CMD_ABORT: 2989 /* Command issued to abort another cmd return */ 2990 mrsas_complete_abort(sc, cmd); 2991 break; 2992 default: 2993 device_printf(sc->mrsas_dev,"Unknown command completed! [0x%X]\n", hdr->cmd); 2994 break; 2995 } 2996 } 2997 2998 /** 2999 * mrsas_wakeup - Completes an internal command 3000 * input: Adapter soft state 3001 * Command to be completed 3002 * 3003 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, 3004 * a wait timer is started. This function is called from 3005 * mrsas_complete_mptmfi_passthru() as it completes the command, 3006 * to wake up from the command wait. 3007 */ 3008 void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3009 { 3010 cmd->cmd_status = cmd->frame->io.cmd_status; 3011 3012 if (cmd->cmd_status == ECONNREFUSED) 3013 cmd->cmd_status = 0; 3014 3015 /* For debug only ... */ 3016 //device_printf(sc->mrsas_dev,"DCMD rec'd for wakeup, sc->chan=%p\n", sc->chan); 3017 3018 sc->chan = (void*)&cmd; 3019 wakeup_one((void *)&sc->chan); 3020 return; 3021 } 3022 3023 /** 3024 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller 3025 * input: Adapter soft state 3026 * Shutdown/Hibernate 3027 * 3028 * This function issues a DCMD internal command to Firmware to initiate 3029 * shutdown of the controller. 3030 */ 3031 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode) 3032 { 3033 struct mrsas_mfi_cmd *cmd; 3034 struct mrsas_dcmd_frame *dcmd; 3035 3036 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 3037 return; 3038 3039 cmd = mrsas_get_mfi_cmd(sc); 3040 if (!cmd) { 3041 device_printf(sc->mrsas_dev,"Cannot allocate for shutdown cmd.\n"); 3042 return; 3043 } 3044 3045 if (sc->aen_cmd) 3046 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd); 3047 3048 if (sc->map_update_cmd) 3049 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd); 3050 3051 dcmd = &cmd->frame->dcmd; 3052 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3053 3054 dcmd->cmd = MFI_CMD_DCMD; 3055 dcmd->cmd_status = 0x0; 3056 dcmd->sge_count = 0; 3057 dcmd->flags = MFI_FRAME_DIR_NONE; 3058 dcmd->timeout = 0; 3059 dcmd->pad_0 = 0; 3060 dcmd->data_xfer_len = 0; 3061 dcmd->opcode = opcode; 3062 3063 device_printf(sc->mrsas_dev,"Preparing to shut down controller.\n"); 3064 3065 mrsas_issue_blocked_cmd(sc, cmd); 3066 mrsas_release_mfi_cmd(cmd); 3067 3068 return; 3069 } 3070 3071 /** 3072 * mrsas_flush_cache: Requests FW to flush all its caches 3073 * input: Adapter soft state 3074 * 3075 * This function is issues a DCMD internal command to Firmware to initiate 3076 * flushing of all caches. 3077 */ 3078 static void mrsas_flush_cache(struct mrsas_softc *sc) 3079 { 3080 struct mrsas_mfi_cmd *cmd; 3081 struct mrsas_dcmd_frame *dcmd; 3082 3083 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 3084 return; 3085 3086 cmd = mrsas_get_mfi_cmd(sc); 3087 if (!cmd) { 3088 device_printf(sc->mrsas_dev,"Cannot allocate for flush cache cmd.\n"); 3089 return; 3090 } 3091 3092 dcmd = &cmd->frame->dcmd; 3093 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3094 3095 dcmd->cmd = MFI_CMD_DCMD; 3096 dcmd->cmd_status = 0x0; 3097 dcmd->sge_count = 0; 3098 dcmd->flags = MFI_FRAME_DIR_NONE; 3099 dcmd->timeout = 0; 3100 dcmd->pad_0 = 0; 3101 dcmd->data_xfer_len = 0; 3102 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; 3103 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 3104 3105 mrsas_issue_blocked_cmd(sc, cmd); 3106 mrsas_release_mfi_cmd(cmd); 3107 3108 return; 3109 } 3110 3111 /** 3112 * mrsas_get_map_info: Load and validate RAID map 3113 * input: Adapter instance soft state 3114 * 3115 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() 3116 * to load and validate RAID map. It returns 0 if successful, 1 other- 3117 * wise. 3118 */ 3119 static int mrsas_get_map_info(struct mrsas_softc *sc) 3120 { 3121 uint8_t retcode = 0; 3122 3123 sc->fast_path_io = 0; 3124 if (!mrsas_get_ld_map_info(sc)) { 3125 retcode = MR_ValidateMapInfo(sc); 3126 if (retcode == 0) { 3127 sc->fast_path_io = 1; 3128 return 0; 3129 } 3130 } 3131 return 1; 3132 } 3133 3134 /** 3135 * mrsas_get_ld_map_info: Get FW's ld_map structure 3136 * input: Adapter instance soft state 3137 * 3138 * Issues an internal command (DCMD) to get the FW's controller PD 3139 * list structure. 3140 */ 3141 static int mrsas_get_ld_map_info(struct mrsas_softc *sc) 3142 { 3143 int retcode = 0; 3144 struct mrsas_mfi_cmd *cmd; 3145 struct mrsas_dcmd_frame *dcmd; 3146 MR_FW_RAID_MAP_ALL *map; 3147 bus_addr_t map_phys_addr = 0; 3148 3149 cmd = mrsas_get_mfi_cmd(sc); 3150 if (!cmd) { 3151 device_printf(sc->mrsas_dev, "Cannot alloc for ld map info cmd.\n"); 3152 return 1; 3153 } 3154 3155 dcmd = &cmd->frame->dcmd; 3156 3157 map = sc->raidmap_mem[(sc->map_id & 1)]; 3158 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)]; 3159 if (!map) { 3160 device_printf(sc->mrsas_dev, "Failed to alloc mem for ld map info.\n"); 3161 mrsas_release_mfi_cmd(cmd); 3162 return (ENOMEM); 3163 } 3164 memset(map, 0, sizeof(*map)); 3165 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3166 3167 dcmd->cmd = MFI_CMD_DCMD; 3168 dcmd->cmd_status = 0xFF; 3169 dcmd->sge_count = 1; 3170 dcmd->flags = MFI_FRAME_DIR_READ; 3171 dcmd->timeout = 0; 3172 dcmd->pad_0 = 0; 3173 dcmd->data_xfer_len = sc->map_sz; 3174 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; 3175 dcmd->sgl.sge32[0].phys_addr = map_phys_addr; 3176 dcmd->sgl.sge32[0].length = sc->map_sz; 3177 if (!mrsas_issue_polled(sc, cmd)) 3178 retcode = 0; 3179 else 3180 { 3181 device_printf(sc->mrsas_dev, "Fail to send get LD map info cmd.\n"); 3182 retcode = 1; 3183 } 3184 mrsas_release_mfi_cmd(cmd); 3185 return(retcode); 3186 } 3187 3188 /** 3189 * mrsas_sync_map_info: Get FW's ld_map structure 3190 * input: Adapter instance soft state 3191 * 3192 * Issues an internal command (DCMD) to get the FW's controller PD 3193 * list structure. 3194 */ 3195 static int mrsas_sync_map_info(struct mrsas_softc *sc) 3196 { 3197 int retcode = 0, i; 3198 struct mrsas_mfi_cmd *cmd; 3199 struct mrsas_dcmd_frame *dcmd; 3200 uint32_t size_sync_info, num_lds; 3201 MR_LD_TARGET_SYNC *target_map = NULL; 3202 MR_FW_RAID_MAP_ALL *map; 3203 MR_LD_RAID *raid; 3204 MR_LD_TARGET_SYNC *ld_sync; 3205 bus_addr_t map_phys_addr = 0; 3206 3207 cmd = mrsas_get_mfi_cmd(sc); 3208 if (!cmd) { 3209 device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n"); 3210 return 1; 3211 } 3212 3213 map = sc->raidmap_mem[sc->map_id & 1]; 3214 num_lds = map->raidMap.ldCount; 3215 3216 dcmd = &cmd->frame->dcmd; 3217 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds; 3218 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3219 3220 target_map = (MR_LD_TARGET_SYNC *)sc->raidmap_mem[(sc->map_id - 1) & 1]; 3221 memset(target_map, 0, sizeof(MR_FW_RAID_MAP_ALL)); 3222 3223 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1]; 3224 3225 ld_sync = (MR_LD_TARGET_SYNC *)target_map; 3226 3227 for (i = 0; i < num_lds; i++, ld_sync++) { 3228 raid = MR_LdRaidGet(i, map); 3229 ld_sync->targetId = MR_GetLDTgtId(i, map); 3230 ld_sync->seqNum = raid->seqNum; 3231 } 3232 3233 dcmd->cmd = MFI_CMD_DCMD; 3234 dcmd->cmd_status = 0xFF; 3235 dcmd->sge_count = 1; 3236 dcmd->flags = MFI_FRAME_DIR_WRITE; 3237 dcmd->timeout = 0; 3238 dcmd->pad_0 = 0; 3239 dcmd->data_xfer_len = sc->map_sz; 3240 dcmd->mbox.b[0] = num_lds; 3241 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG; 3242 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; 3243 dcmd->sgl.sge32[0].phys_addr = map_phys_addr; 3244 dcmd->sgl.sge32[0].length = sc->map_sz; 3245 3246 sc->map_update_cmd = cmd; 3247 if (mrsas_issue_dcmd(sc, cmd)) { 3248 device_printf(sc->mrsas_dev, "Fail to send sync map info command.\n"); 3249 return(1); 3250 } 3251 return(retcode); 3252 } 3253 3254 /** 3255 * mrsas_get_pd_list: Returns FW's PD list structure 3256 * input: Adapter soft state 3257 * 3258 * Issues an internal command (DCMD) to get the FW's controller PD 3259 * list structure. This information is mainly used to find out about 3260 * system supported by Firmware. 3261 */ 3262 static int mrsas_get_pd_list(struct mrsas_softc *sc) 3263 { 3264 int retcode = 0, pd_index = 0, pd_count=0, pd_list_size; 3265 struct mrsas_mfi_cmd *cmd; 3266 struct mrsas_dcmd_frame *dcmd; 3267 struct MR_PD_LIST *pd_list_mem; 3268 struct MR_PD_ADDRESS *pd_addr; 3269 bus_addr_t pd_list_phys_addr = 0; 3270 struct mrsas_tmp_dcmd *tcmd; 3271 3272 cmd = mrsas_get_mfi_cmd(sc); 3273 if (!cmd) { 3274 device_printf(sc->mrsas_dev, "Cannot alloc for get PD list cmd\n"); 3275 return 1; 3276 } 3277 3278 dcmd = &cmd->frame->dcmd; 3279 3280 tcmd = kmalloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); 3281 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 3282 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) { 3283 device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get PD list cmd\n"); 3284 mrsas_release_mfi_cmd(cmd); 3285 return(ENOMEM); 3286 } 3287 else { 3288 pd_list_mem = tcmd->tmp_dcmd_mem; 3289 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr; 3290 } 3291 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3292 3293 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 3294 dcmd->mbox.b[1] = 0; 3295 dcmd->cmd = MFI_CMD_DCMD; 3296 dcmd->cmd_status = 0xFF; 3297 dcmd->sge_count = 1; 3298 dcmd->flags = MFI_FRAME_DIR_READ; 3299 dcmd->timeout = 0; 3300 dcmd->pad_0 = 0; 3301 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 3302 dcmd->opcode = MR_DCMD_PD_LIST_QUERY; 3303 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr; 3304 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 3305 3306 if (!mrsas_issue_polled(sc, cmd)) 3307 retcode = 0; 3308 else 3309 retcode = 1; 3310 3311 /* Get the instance PD list */ 3312 pd_count = MRSAS_MAX_PD; 3313 pd_addr = pd_list_mem->addr; 3314 if (retcode == 0 && pd_list_mem->count < pd_count) { 3315 memset(sc->local_pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); 3316 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) { 3317 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId; 3318 sc->local_pd_list[pd_addr->deviceId].driveType = pd_addr->scsiDevType; 3319 sc->local_pd_list[pd_addr->deviceId].driveState = MR_PD_STATE_SYSTEM; 3320 pd_addr++; 3321 } 3322 } 3323 3324 /* Use mutext/spinlock if pd_list component size increase more than 32 bit. */ 3325 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list)); 3326 mrsas_free_tmp_dcmd(tcmd); 3327 mrsas_release_mfi_cmd(cmd); 3328 kfree(tcmd, M_MRSAS); 3329 return(retcode); 3330 } 3331 3332 /** 3333 * mrsas_get_ld_list: Returns FW's LD list structure 3334 * input: Adapter soft state 3335 * 3336 * Issues an internal command (DCMD) to get the FW's controller PD 3337 * list structure. This information is mainly used to find out about 3338 * supported by the FW. 3339 */ 3340 static int mrsas_get_ld_list(struct mrsas_softc *sc) 3341 { 3342 int ld_list_size, retcode = 0, ld_index = 0, ids = 0; 3343 struct mrsas_mfi_cmd *cmd; 3344 struct mrsas_dcmd_frame *dcmd; 3345 struct MR_LD_LIST *ld_list_mem; 3346 bus_addr_t ld_list_phys_addr = 0; 3347 struct mrsas_tmp_dcmd *tcmd; 3348 3349 cmd = mrsas_get_mfi_cmd(sc); 3350 if (!cmd) { 3351 device_printf(sc->mrsas_dev, "Cannot alloc for get LD list cmd\n"); 3352 return 1; 3353 } 3354 3355 dcmd = &cmd->frame->dcmd; 3356 3357 tcmd = kmalloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); 3358 ld_list_size = sizeof(struct MR_LD_LIST); 3359 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) { 3360 device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get LD list cmd\n"); 3361 mrsas_release_mfi_cmd(cmd); 3362 return(ENOMEM); 3363 } 3364 else { 3365 ld_list_mem = tcmd->tmp_dcmd_mem; 3366 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr; 3367 } 3368 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3369 3370 dcmd->cmd = MFI_CMD_DCMD; 3371 dcmd->cmd_status = 0xFF; 3372 dcmd->sge_count = 1; 3373 dcmd->flags = MFI_FRAME_DIR_READ; 3374 dcmd->timeout = 0; 3375 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST); 3376 dcmd->opcode = MR_DCMD_LD_GET_LIST; 3377 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr; 3378 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST); 3379 dcmd->pad_0 = 0; 3380 3381 if (!mrsas_issue_polled(sc, cmd)) 3382 retcode = 0; 3383 else 3384 retcode = 1; 3385 3386 /* Get the instance LD list */ 3387 if ((retcode == 0) && (ld_list_mem->ldCount <= (MAX_LOGICAL_DRIVES))){ 3388 sc->CurLdCount = ld_list_mem->ldCount; 3389 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD); 3390 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) { 3391 if (ld_list_mem->ldList[ld_index].state != 0) { 3392 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; 3393 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; 3394 } 3395 } 3396 } 3397 3398 mrsas_free_tmp_dcmd(tcmd); 3399 mrsas_release_mfi_cmd(cmd); 3400 kfree(tcmd, M_MRSAS); 3401 return(retcode); 3402 } 3403 3404 /** 3405 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command 3406 * input: Adapter soft state 3407 * Temp command 3408 * Size of alloction 3409 * 3410 * Allocates DMAable memory for a temporary internal command. The allocated 3411 * memory is initialized to all zeros upon successful loading of the dma 3412 * mapped memory. 3413 */ 3414 int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd, 3415 int size) 3416 { 3417 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent 3418 1, 0, // algnmnt, boundary 3419 BUS_SPACE_MAXADDR_32BIT,// lowaddr 3420 BUS_SPACE_MAXADDR, // highaddr 3421 NULL, NULL, // filter, filterarg 3422 size, // maxsize 3423 1, // msegments 3424 size, // maxsegsize 3425 BUS_DMA_ALLOCNOW, // flags 3426 &tcmd->tmp_dcmd_tag)) { 3427 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n"); 3428 return (ENOMEM); 3429 } 3430 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem, 3431 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) { 3432 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n"); 3433 return (ENOMEM); 3434 } 3435 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap, 3436 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb, 3437 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) { 3438 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n"); 3439 return (ENOMEM); 3440 } 3441 3442 memset(tcmd->tmp_dcmd_mem, 0, size); 3443 return (0); 3444 } 3445 3446 /** 3447 * mrsas_free_tmp_dcmd: Free memory for temporary command 3448 * input: temporary dcmd pointer 3449 * 3450 * Deallocates memory of the temporary command for use in the construction 3451 * of the internal DCMD. 3452 */ 3453 void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp) 3454 { 3455 if (tmp->tmp_dcmd_phys_addr) 3456 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap); 3457 if (tmp->tmp_dcmd_mem != NULL) 3458 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap); 3459 if (tmp->tmp_dcmd_tag != NULL) 3460 bus_dma_tag_destroy(tmp->tmp_dcmd_tag); 3461 } 3462 3463 /** 3464 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd 3465 * input: Adapter soft state 3466 * Previously issued cmd to be aborted 3467 * 3468 * This function is used to abort previously issued commands, such as AEN and 3469 * RAID map sync map commands. The abort command is sent as a DCMD internal 3470 * command and subsequently the driver will wait for a return status. The 3471 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds. 3472 */ 3473 static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, 3474 struct mrsas_mfi_cmd *cmd_to_abort) 3475 { 3476 struct mrsas_mfi_cmd *cmd; 3477 struct mrsas_abort_frame *abort_fr; 3478 u_int8_t retcode = 0; 3479 unsigned long total_time = 0; 3480 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 3481 3482 cmd = mrsas_get_mfi_cmd(sc); 3483 if (!cmd) { 3484 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n"); 3485 return(1); 3486 } 3487 3488 abort_fr = &cmd->frame->abort; 3489 3490 /* Prepare and issue the abort frame */ 3491 abort_fr->cmd = MFI_CMD_ABORT; 3492 abort_fr->cmd_status = 0xFF; 3493 abort_fr->flags = 0; 3494 abort_fr->abort_context = cmd_to_abort->index; 3495 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; 3496 abort_fr->abort_mfi_phys_addr_hi = 0; 3497 3498 cmd->sync_cmd = 1; 3499 cmd->cmd_status = 0xFF; 3500 3501 if (mrsas_issue_dcmd(sc, cmd)) { 3502 device_printf(sc->mrsas_dev, "Fail to send abort command.\n"); 3503 return(1); 3504 } 3505 3506 /* Wait for this cmd to complete */ 3507 sc->chan = (void*)&cmd; 3508 while (1) { 3509 if (cmd->cmd_status == 0xFF){ 3510 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); 3511 } 3512 else 3513 break; 3514 total_time++; 3515 if (total_time >= max_wait) { 3516 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait); 3517 retcode = 1; 3518 break; 3519 } 3520 } 3521 3522 cmd->sync_cmd = 0; 3523 mrsas_release_mfi_cmd(cmd); 3524 return(retcode); 3525 } 3526 3527 /** 3528 * mrsas_complete_abort: Completes aborting a command 3529 * input: Adapter soft state 3530 * Cmd that was issued to abort another cmd 3531 * 3532 * The mrsas_issue_blocked_abort_cmd() function waits for the command status 3533 * to change after sending the command. This function is called from 3534 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated. 3535 */ 3536 void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3537 { 3538 if (cmd->sync_cmd) { 3539 cmd->sync_cmd = 0; 3540 cmd->cmd_status = 0; 3541 sc->chan = (void*)&cmd; 3542 wakeup_one((void *)&sc->chan); 3543 } 3544 return; 3545 } 3546 3547 /** 3548 * mrsas_aen_handler: Callback function for AEN processing from thread context. 3549 * input: Adapter soft state 3550 * 3551 */ 3552 void mrsas_aen_handler(struct mrsas_softc *sc) 3553 { 3554 union mrsas_evt_class_locale class_locale; 3555 int doscan = 0; 3556 u_int32_t seq_num; 3557 int error; 3558 3559 if (!sc) { 3560 device_printf(sc->mrsas_dev, "invalid instance!\n"); 3561 return; 3562 } 3563 3564 if (sc->evt_detail_mem) { 3565 switch (sc->evt_detail_mem->code) { 3566 case MR_EVT_PD_INSERTED: 3567 mrsas_get_pd_list(sc); 3568 mrsas_bus_scan_sim(sc, sc->sim_1); 3569 doscan = 0; 3570 break; 3571 case MR_EVT_PD_REMOVED: 3572 mrsas_get_pd_list(sc); 3573 mrsas_bus_scan_sim(sc, sc->sim_1); 3574 doscan = 0; 3575 break; 3576 case MR_EVT_LD_OFFLINE: 3577 case MR_EVT_CFG_CLEARED: 3578 case MR_EVT_LD_DELETED: 3579 mrsas_bus_scan_sim(sc, sc->sim_0); 3580 doscan = 0; 3581 break; 3582 case MR_EVT_LD_CREATED: 3583 mrsas_get_ld_list(sc); 3584 mrsas_bus_scan_sim(sc, sc->sim_0); 3585 doscan = 0; 3586 break; 3587 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 3588 case MR_EVT_FOREIGN_CFG_IMPORTED: 3589 case MR_EVT_LD_STATE_CHANGE: 3590 doscan = 1; 3591 break; 3592 default: 3593 doscan = 0; 3594 break; 3595 } 3596 } else { 3597 device_printf(sc->mrsas_dev, "invalid evt_detail\n"); 3598 return; 3599 } 3600 if (doscan) { 3601 mrsas_get_pd_list(sc); 3602 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n"); 3603 mrsas_bus_scan_sim(sc, sc->sim_1); 3604 mrsas_get_ld_list(sc); 3605 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n"); 3606 mrsas_bus_scan_sim(sc, sc->sim_0); 3607 } 3608 3609 seq_num = sc->evt_detail_mem->seq_num + 1; 3610 3611 // Register AEN with FW for latest sequence number plus 1 3612 class_locale.members.reserved = 0; 3613 class_locale.members.locale = MR_EVT_LOCALE_ALL; 3614 class_locale.members.class = MR_EVT_CLASS_DEBUG; 3615 3616 if (sc->aen_cmd != NULL ) 3617 return ; 3618 3619 lockmgr(&sc->aen_lock, LK_EXCLUSIVE); 3620 error = mrsas_register_aen(sc, seq_num, 3621 class_locale.word); 3622 lockmgr(&sc->aen_lock, LK_RELEASE); 3623 3624 if (error) 3625 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error); 3626 3627 } 3628 3629 3630 /** 3631 * mrsas_complete_aen: Completes AEN command 3632 * input: Adapter soft state 3633 * Cmd that was issued to abort another cmd 3634 * 3635 * This function will be called from ISR and will continue 3636 * event processing from thread context by enqueuing task 3637 * in ev_tq (callback function "mrsas_aen_handler"). 3638 */ 3639 void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3640 { 3641 /* 3642 * Don't signal app if it is just an aborted previously registered aen 3643 */ 3644 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) { 3645 /* TO DO (?) */ 3646 } 3647 else 3648 cmd->abort_aen = 0; 3649 3650 sc->aen_cmd = NULL; 3651 mrsas_release_mfi_cmd(cmd); 3652 3653 if (!sc->remove_in_progress) 3654 taskqueue_enqueue(sc->ev_tq, &sc->ev_task); 3655 3656 return; 3657 } 3658 3659 static device_method_t mrsas_methods[] = { 3660 DEVMETHOD(device_probe, mrsas_probe), 3661 DEVMETHOD(device_attach, mrsas_attach), 3662 DEVMETHOD(device_detach, mrsas_detach), 3663 DEVMETHOD(device_suspend, mrsas_suspend), 3664 DEVMETHOD(device_resume, mrsas_resume), 3665 DEVMETHOD(bus_print_child, bus_generic_print_child), 3666 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 3667 { 0, 0 } 3668 }; 3669 3670 static driver_t mrsas_driver = { 3671 "mrsas", 3672 mrsas_methods, 3673 sizeof(struct mrsas_softc) 3674 }; 3675 3676 static devclass_t mrsas_devclass; 3677 DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, NULL, NULL); 3678 MODULE_VERSION(mrsas, 1); 3679 MODULE_DEPEND(mrsas, cam, 1, 1, 1); 3680 3681