1 /*
2 * megaraid_sas.c: source for mega_sas driver
3 *
4 * MegaRAID device driver for SAS controllers
5 * Copyright (c) 2005-2008, LSI Logic Corporation.
6 * All rights reserved.
7 *
8 * Version:
9 * Author:
10 * Rajesh Prabhakaran<Rajesh.Prabhakaran@lsil.com>
11 * Seokmann Ju
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright notice,
17 * this list of conditions and the following disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above copyright notice,
20 * this list of conditions and the following disclaimer in the documentation
21 * and/or other materials provided with the distribution.
22 *
23 * 3. Neither the name of the author nor the names of its contributors may be
24 * used to endorse or promote products derived from this software without
25 * specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
30 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
31 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
34 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
35 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
36 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
37 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE.
39 */
40
41 /*
42 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
43 * Use is subject to license terms.
44 */
45
46 #include <sys/types.h>
47 #include <sys/param.h>
48 #include <sys/file.h>
49 #include <sys/errno.h>
50 #include <sys/open.h>
51 #include <sys/cred.h>
52 #include <sys/modctl.h>
53 #include <sys/conf.h>
54 #include <sys/devops.h>
55 #include <sys/cmn_err.h>
56 #include <sys/kmem.h>
57 #include <sys/stat.h>
58 #include <sys/mkdev.h>
59 #include <sys/pci.h>
60 #include <sys/scsi/scsi.h>
61 #include <sys/ddi.h>
62 #include <sys/sunddi.h>
63 #include <sys/atomic.h>
64 #include <sys/signal.h>
65
66 #include "megaraid_sas.h"
67
68 /*
69 * FMA header files
70 */
71 #include <sys/ddifm.h>
72 #include <sys/fm/protocol.h>
73 #include <sys/fm/util.h>
74 #include <sys/fm/io/ddi.h>
75
76 /*
77 * Local static data
78 */
79 static void *megasas_state = NULL;
80 static int debug_level_g = CL_ANN;
81
82 #pragma weak scsi_hba_open
83 #pragma weak scsi_hba_close
84 #pragma weak scsi_hba_ioctl
85
86 static ddi_dma_attr_t megasas_generic_dma_attr = {
87 DMA_ATTR_V0, /* dma_attr_version */
88 0, /* low DMA address range */
89 0xFFFFFFFFU, /* high DMA address range */
90 0xFFFFFFFFU, /* DMA counter register */
91 8, /* DMA address alignment */
92 0x07, /* DMA burstsizes */
93 1, /* min DMA size */
94 0xFFFFFFFFU, /* max DMA size */
95 0xFFFFFFFFU, /* segment boundary */
96 MEGASAS_MAX_SGE_CNT, /* dma_attr_sglen */
97 512, /* granularity of device */
98 0 /* bus specific DMA flags */
99 };
100
101 int32_t megasas_max_cap_maxxfer = 0x1000000;
102
103 /*
104 * cb_ops contains base level routines
105 */
106 static struct cb_ops megasas_cb_ops = {
107 megasas_open, /* open */
108 megasas_close, /* close */
109 nodev, /* strategy */
110 nodev, /* print */
111 nodev, /* dump */
112 nodev, /* read */
113 nodev, /* write */
114 megasas_ioctl, /* ioctl */
115 nodev, /* devmap */
116 nodev, /* mmap */
117 nodev, /* segmap */
118 nochpoll, /* poll */
119 nodev, /* cb_prop_op */
120 0, /* streamtab */
121 D_NEW | D_HOTPLUG, /* cb_flag */
122 CB_REV, /* cb_rev */
123 nodev, /* cb_aread */
124 nodev /* cb_awrite */
125 };
126
127 /*
128 * dev_ops contains configuration routines
129 */
130 static struct dev_ops megasas_ops = {
131 DEVO_REV, /* rev, */
132 0, /* refcnt */
133 megasas_getinfo, /* getinfo */
134 nulldev, /* identify */
135 nulldev, /* probe */
136 megasas_attach, /* attach */
137 megasas_detach, /* detach */
138 megasas_reset, /* reset */
139 &megasas_cb_ops, /* char/block ops */
140 NULL, /* bus ops */
141 NULL, /* power */
142 ddi_quiesce_not_supported, /* devo_quiesce */
143 };
144
145 char _depends_on[] = "misc/scsi";
146
147 static struct modldrv modldrv = {
148 &mod_driverops, /* module type - driver */
149 MEGASAS_VERSION,
150 &megasas_ops, /* driver ops */
151 };
152
153 static struct modlinkage modlinkage = {
154 MODREV_1, /* ml_rev - must be MODREV_1 */
155 &modldrv, /* ml_linkage */
156 NULL /* end of driver linkage */
157 };
158
159 static struct ddi_device_acc_attr endian_attr = {
160 DDI_DEVICE_ATTR_V1,
161 DDI_STRUCTURE_LE_ACC,
162 DDI_STRICTORDER_ACC,
163 DDI_DEFAULT_ACC
164 };
165
166
167 /*
168 * ************************************************************************** *
169 * *
170 * common entry points - for loadable kernel modules *
171 * *
172 * ************************************************************************** *
173 */
174
175 /*
176 * _init - initialize a loadable module
177 * @void
178 *
179 * The driver should perform any one-time resource allocation or data
180 * initialization during driver loading in _init(). For example, the driver
181 * should initialize any mutexes global to the driver in this routine.
182 * The driver should not, however, use _init() to allocate or initialize
183 * anything that has to do with a particular instance of the device.
184 * Per-instance initialization must be done in attach().
185 */
186 int
_init(void)187 _init(void)
188 {
189 int ret;
190
191 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
192
193 ret = ddi_soft_state_init(&megasas_state,
194 sizeof (struct megasas_instance), 0);
195
196 if (ret != 0) {
197 con_log(CL_ANN, (CE_WARN, "megaraid: could not init state"));
198 return (ret);
199 }
200
201 if ((ret = scsi_hba_init(&modlinkage)) != 0) {
202 con_log(CL_ANN, (CE_WARN, "megaraid: could not init scsi hba"));
203 ddi_soft_state_fini(&megasas_state);
204 return (ret);
205 }
206
207 ret = mod_install(&modlinkage);
208
209 if (ret != 0) {
210 con_log(CL_ANN, (CE_WARN, "megaraid: mod_install failed"));
211 scsi_hba_fini(&modlinkage);
212 ddi_soft_state_fini(&megasas_state);
213 }
214
215 return (ret);
216 }
217
218 /*
219 * _info - returns information about a loadable module.
220 * @void
221 *
222 * _info() is called to return module information. This is a typical entry
223 * point that does predefined role. It simply calls mod_info().
224 */
225 int
_info(struct modinfo * modinfop)226 _info(struct modinfo *modinfop)
227 {
228 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
229
230 return (mod_info(&modlinkage, modinfop));
231 }
232
233 /*
234 * _fini - prepare a loadable module for unloading
235 * @void
236 *
237 * In _fini(), the driver should release any resources that were allocated in
238 * _init(). The driver must remove itself from the system module list.
239 */
240 int
_fini(void)241 _fini(void)
242 {
243 int ret;
244
245 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
246
247 if ((ret = mod_remove(&modlinkage)) != 0)
248 return (ret);
249
250 scsi_hba_fini(&modlinkage);
251
252 ddi_soft_state_fini(&megasas_state);
253
254 return (ret);
255 }
256
257
258 /*
259 * ************************************************************************** *
260 * *
261 * common entry points - for autoconfiguration *
262 * *
263 * ************************************************************************** *
264 */
265 /*
266 * attach - adds a device to the system as part of initialization
267 * @dip:
268 * @cmd:
269 *
270 * The kernel calls a driver's attach() entry point to attach an instance of
271 * a device (for MegaRAID, it is instance of a controller) or to resume
272 * operation for an instance of a device that has been suspended or has been
273 * shut down by the power management framework
274 * The attach() entry point typically includes the following types of
275 * processing:
276 * - allocate a soft-state structure for the device instance (for MegaRAID,
277 * controller instance)
278 * - initialize per-instance mutexes
279 * - initialize condition variables
280 * - register the device's interrupts (for MegaRAID, controller's interrupts)
281 * - map the registers and memory of the device instance (for MegaRAID,
282 * controller instance)
283 * - create minor device nodes for the device instance (for MegaRAID,
284 * controller instance)
285 * - report that the device instance (for MegaRAID, controller instance) has
286 * attached
287 */
288 static int
megasas_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)289 megasas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
290 {
291 int instance_no;
292 int nregs;
293 uint8_t added_isr_f = 0;
294 uint8_t added_soft_isr_f = 0;
295 uint8_t create_devctl_node_f = 0;
296 uint8_t create_scsi_node_f = 0;
297 uint8_t create_ioc_node_f = 0;
298 uint8_t tran_alloc_f = 0;
299 uint8_t irq;
300 uint16_t vendor_id;
301 uint16_t device_id;
302 uint16_t subsysvid;
303 uint16_t subsysid;
304 uint16_t command;
305
306 scsi_hba_tran_t *tran;
307 ddi_dma_attr_t tran_dma_attr;
308 struct megasas_instance *instance;
309
310 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
311
312 /* CONSTCOND */
313 ASSERT(NO_COMPETING_THREADS);
314
315 instance_no = ddi_get_instance(dip);
316
317 /*
318 * Since we know that some instantiations of this device can be
319 * plugged into slave-only SBus slots, check to see whether this is
320 * one such.
321 */
322 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
323 con_log(CL_ANN, (CE_WARN,
324 "mega%d: Device in slave-only slot, unused", instance_no));
325 return (DDI_FAILURE);
326 }
327
328 switch (cmd) {
329 case DDI_ATTACH:
330 con_log(CL_DLEVEL1, (CE_NOTE, "megasas: DDI_ATTACH"));
331 /* allocate the soft state for the instance */
332 if (ddi_soft_state_zalloc(megasas_state, instance_no)
333 != DDI_SUCCESS) {
334 con_log(CL_ANN, (CE_WARN,
335 "mega%d: Failed to allocate soft state",
336 instance_no));
337
338 return (DDI_FAILURE);
339 }
340
341 instance = (struct megasas_instance *)ddi_get_soft_state
342 (megasas_state, instance_no);
343
344 if (instance == NULL) {
345 con_log(CL_ANN, (CE_WARN,
346 "mega%d: Bad soft state", instance_no));
347
348 ddi_soft_state_free(megasas_state, instance_no);
349
350 return (DDI_FAILURE);
351 }
352
353 bzero((caddr_t)instance,
354 sizeof (struct megasas_instance));
355
356 instance->func_ptr = kmem_zalloc(
357 sizeof (struct megasas_func_ptr), KM_SLEEP);
358 ASSERT(instance->func_ptr);
359
360 /* Setup the PCI configuration space handles */
361 if (pci_config_setup(dip, &instance->pci_handle) !=
362 DDI_SUCCESS) {
363 con_log(CL_ANN, (CE_WARN,
364 "mega%d: pci config setup failed ",
365 instance_no));
366
367 kmem_free(instance->func_ptr,
368 sizeof (struct megasas_func_ptr));
369 ddi_soft_state_free(megasas_state, instance_no);
370
371 return (DDI_FAILURE);
372 }
373
374 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) {
375 con_log(CL_ANN, (CE_WARN,
376 "megaraid: failed to get registers."));
377
378 pci_config_teardown(&instance->pci_handle);
379 kmem_free(instance->func_ptr,
380 sizeof (struct megasas_func_ptr));
381 ddi_soft_state_free(megasas_state, instance_no);
382
383 return (DDI_FAILURE);
384 }
385
386 vendor_id = pci_config_get16(instance->pci_handle,
387 PCI_CONF_VENID);
388 device_id = pci_config_get16(instance->pci_handle,
389 PCI_CONF_DEVID);
390
391 subsysvid = pci_config_get16(instance->pci_handle,
392 PCI_CONF_SUBVENID);
393 subsysid = pci_config_get16(instance->pci_handle,
394 PCI_CONF_SUBSYSID);
395
396 pci_config_put16(instance->pci_handle, PCI_CONF_COMM,
397 (pci_config_get16(instance->pci_handle,
398 PCI_CONF_COMM) | PCI_COMM_ME));
399 irq = pci_config_get8(instance->pci_handle,
400 PCI_CONF_ILINE);
401
402 con_log(CL_DLEVEL1, (CE_CONT, "megasas%d: "
403 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s\n",
404 instance_no, vendor_id, device_id, subsysvid,
405 subsysid, irq, MEGASAS_VERSION));
406
407 /* enable bus-mastering */
408 command = pci_config_get16(instance->pci_handle,
409 PCI_CONF_COMM);
410
411 if (!(command & PCI_COMM_ME)) {
412 command |= PCI_COMM_ME;
413
414 pci_config_put16(instance->pci_handle,
415 PCI_CONF_COMM, command);
416
417 con_log(CL_ANN, (CE_CONT, "megaraid%d: "
418 "enable bus-mastering\n", instance_no));
419 } else {
420 con_log(CL_DLEVEL1, (CE_CONT, "megaraid%d: "
421 "bus-mastering already set\n", instance_no));
422 }
423
424 /* initialize function pointers */
425 if ((device_id == PCI_DEVICE_ID_LSI_1078) ||
426 (device_id == PCI_DEVICE_ID_LSI_1078DE)) {
427 con_log(CL_DLEVEL1, (CE_CONT, "megasas%d: "
428 "1078R/DE detected\n", instance_no));
429 instance->func_ptr->read_fw_status_reg =
430 read_fw_status_reg_ppc;
431 instance->func_ptr->issue_cmd = issue_cmd_ppc;
432 instance->func_ptr->issue_cmd_in_sync_mode =
433 issue_cmd_in_sync_mode_ppc;
434 instance->func_ptr->issue_cmd_in_poll_mode =
435 issue_cmd_in_poll_mode_ppc;
436 instance->func_ptr->enable_intr =
437 enable_intr_ppc;
438 instance->func_ptr->disable_intr =
439 disable_intr_ppc;
440 instance->func_ptr->intr_ack = intr_ack_ppc;
441 } else {
442 con_log(CL_DLEVEL1, (CE_CONT, "megasas%d: "
443 "1064/8R detected\n", instance_no));
444 instance->func_ptr->read_fw_status_reg =
445 read_fw_status_reg_xscale;
446 instance->func_ptr->issue_cmd =
447 issue_cmd_xscale;
448 instance->func_ptr->issue_cmd_in_sync_mode =
449 issue_cmd_in_sync_mode_xscale;
450 instance->func_ptr->issue_cmd_in_poll_mode =
451 issue_cmd_in_poll_mode_xscale;
452 instance->func_ptr->enable_intr =
453 enable_intr_xscale;
454 instance->func_ptr->disable_intr =
455 disable_intr_xscale;
456 instance->func_ptr->intr_ack =
457 intr_ack_xscale;
458 }
459
460 instance->baseaddress = pci_config_get32(
461 instance->pci_handle, PCI_CONF_BASE0);
462 instance->baseaddress &= 0x0fffc;
463
464 instance->dip = dip;
465 instance->vendor_id = vendor_id;
466 instance->device_id = device_id;
467 instance->subsysvid = subsysvid;
468 instance->subsysid = subsysid;
469
470 /* Initialize FMA */
471 instance->fm_capabilities = ddi_prop_get_int(
472 DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS,
473 "fm-capable", DDI_FM_EREPORT_CAPABLE |
474 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE
475 | DDI_FM_ERRCB_CAPABLE);
476
477 megasas_fm_init(instance);
478
479 /* setup the mfi based low level driver */
480 if (init_mfi(instance) != DDI_SUCCESS) {
481 con_log(CL_ANN, (CE_WARN, "megaraid: "
482 "could not initialize the low level driver"));
483
484 goto fail_attach;
485 }
486
487 /*
488 * Allocate the interrupt blocking cookie.
489 * It represents the information the framework
490 * needs to block interrupts. This cookie will
491 * be used by the locks shared accross our ISR.
492 * These locks must be initialized before we
493 * register our ISR.
494 * ddi_add_intr(9F)
495 */
496 if (ddi_get_iblock_cookie(dip, 0,
497 &instance->iblock_cookie) != DDI_SUCCESS) {
498
499 goto fail_attach;
500 }
501
502 if (ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_HIGH,
503 &instance->soft_iblock_cookie) != DDI_SUCCESS) {
504
505 goto fail_attach;
506 }
507
508 /*
509 * Initialize the driver mutexes common to
510 * normal/high level isr
511 */
512 if (ddi_intr_hilevel(dip, 0)) {
513 instance->isr_level = HIGH_LEVEL_INTR;
514 mutex_init(&instance->cmd_pool_mtx,
515 "cmd_pool_mtx", MUTEX_DRIVER,
516 instance->soft_iblock_cookie);
517 mutex_init(&instance->cmd_pend_mtx,
518 "cmd_pend_mtx", MUTEX_DRIVER,
519 instance->soft_iblock_cookie);
520 } else {
521 /*
522 * Initialize the driver mutexes
523 * specific to soft-isr
524 */
525 instance->isr_level = NORMAL_LEVEL_INTR;
526 mutex_init(&instance->cmd_pool_mtx,
527 "cmd_pool_mtx", MUTEX_DRIVER,
528 instance->iblock_cookie);
529 mutex_init(&instance->cmd_pend_mtx,
530 "cmd_pend_mtx", MUTEX_DRIVER,
531 instance->iblock_cookie);
532 }
533
534 mutex_init(&instance->completed_pool_mtx,
535 "completed_pool_mtx", MUTEX_DRIVER,
536 instance->iblock_cookie);
537 mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx",
538 MUTEX_DRIVER, instance->iblock_cookie);
539 mutex_init(&instance->aen_cmd_mtx, "aen_cmd_mtx",
540 MUTEX_DRIVER, instance->iblock_cookie);
541 mutex_init(&instance->abort_cmd_mtx, "abort_cmd_mtx",
542 MUTEX_DRIVER, instance->iblock_cookie);
543
544 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL);
545 cv_init(&instance->abort_cmd_cv, NULL, CV_DRIVER, NULL);
546
547 INIT_LIST_HEAD(&instance->completed_pool_list);
548
549 /* Register our isr. */
550 if (ddi_add_intr(dip, 0, NULL, NULL, megasas_isr,
551 (caddr_t)instance) != DDI_SUCCESS) {
552 con_log(CL_ANN, (CE_WARN,
553 " ISR did not register"));
554
555 goto fail_attach;
556 }
557
558 added_isr_f = 1;
559
560 /* Register our soft-isr for highlevel interrupts. */
561 if (instance->isr_level == HIGH_LEVEL_INTR) {
562 if (ddi_add_softintr(dip, DDI_SOFTINT_HIGH,
563 &instance->soft_intr_id, NULL, NULL,
564 megasas_softintr, (caddr_t)instance) !=
565 DDI_SUCCESS) {
566 con_log(CL_ANN, (CE_WARN,
567 " Software ISR did not register"));
568
569 goto fail_attach;
570 }
571
572 added_soft_isr_f = 1;
573 }
574
575 /* Allocate a transport structure */
576 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
577
578 if (tran == NULL) {
579 con_log(CL_ANN, (CE_WARN,
580 "scsi_hba_tran_alloc failed"));
581 goto fail_attach;
582 }
583
584 tran_alloc_f = 1;
585
586 instance->tran = tran;
587
588 tran->tran_hba_private = instance;
589 tran->tran_tgt_private = NULL;
590 tran->tran_tgt_init = megasas_tran_tgt_init;
591 tran->tran_tgt_probe = scsi_hba_probe;
592 tran->tran_tgt_free = (void (*)())NULL;
593 tran->tran_init_pkt = megasas_tran_init_pkt;
594 tran->tran_start = megasas_tran_start;
595 tran->tran_abort = megasas_tran_abort;
596 tran->tran_reset = megasas_tran_reset;
597 tran->tran_bus_reset = megasas_tran_bus_reset;
598 tran->tran_getcap = megasas_tran_getcap;
599 tran->tran_setcap = megasas_tran_setcap;
600 tran->tran_destroy_pkt = megasas_tran_destroy_pkt;
601 tran->tran_dmafree = megasas_tran_dmafree;
602 tran->tran_sync_pkt = megasas_tran_sync_pkt;
603 tran->tran_reset_notify = NULL;
604 tran->tran_quiesce = megasas_tran_quiesce;
605 tran->tran_unquiesce = megasas_tran_unquiesce;
606
607 tran_dma_attr = megasas_generic_dma_attr;
608 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge;
609
610 /* Attach this instance of the hba */
611 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0)
612 != DDI_SUCCESS) {
613 con_log(CL_ANN, (CE_WARN,
614 "scsi_hba_attach failed\n"));
615
616 goto fail_attach;
617 }
618
619 /* create devctl node for cfgadm command */
620 if (ddi_create_minor_node(dip, "devctl",
621 S_IFCHR, INST2DEVCTL(instance_no),
622 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) {
623 con_log(CL_ANN, (CE_WARN,
624 "megaraid: failed to create devctl node."));
625
626 goto fail_attach;
627 }
628
629 create_devctl_node_f = 1;
630
631 /* create scsi node for cfgadm command */
632 if (ddi_create_minor_node(dip, "scsi", S_IFCHR,
633 INST2SCSI(instance_no),
634 DDI_NT_SCSI_ATTACHMENT_POINT, 0) ==
635 DDI_FAILURE) {
636 con_log(CL_ANN, (CE_WARN,
637 "megaraid: failed to create scsi node."));
638
639 goto fail_attach;
640 }
641
642 create_scsi_node_f = 1;
643
644 (void) sprintf(instance->iocnode, "%d:lsirdctl",
645 instance_no);
646
647 /*
648 * Create a node for applications
649 * for issuing ioctl to the driver.
650 */
651 if (ddi_create_minor_node(dip, instance->iocnode,
652 S_IFCHR, INST2LSIRDCTL(instance_no),
653 DDI_PSEUDO, 0) == DDI_FAILURE) {
654 con_log(CL_ANN, (CE_WARN,
655 "megaraid: failed to create ioctl node."));
656
657 goto fail_attach;
658 }
659
660 create_ioc_node_f = 1;
661
662 /* enable interrupt */
663 instance->func_ptr->enable_intr(instance);
664
665 /* initiate AEN */
666 if (start_mfi_aen(instance)) {
667 con_log(CL_ANN, (CE_WARN,
668 "megaraid: failed to initiate AEN."));
669 goto fail_initiate_aen;
670 }
671
672 con_log(CL_DLEVEL1, (CE_NOTE,
673 "AEN started for instance %d.", instance_no));
674
675 /* Finally! We are on the air. */
676 ddi_report_dev(dip);
677
678 if (megasas_check_acc_handle(instance->regmap_handle) !=
679 DDI_SUCCESS) {
680 goto fail_attach;
681 }
682 if (megasas_check_acc_handle(instance->pci_handle) !=
683 DDI_SUCCESS) {
684 goto fail_attach;
685 }
686 break;
687 case DDI_PM_RESUME:
688 con_log(CL_ANN, (CE_NOTE,
689 "megasas: DDI_PM_RESUME"));
690 break;
691 case DDI_RESUME:
692 con_log(CL_ANN, (CE_NOTE,
693 "megasas: DDI_RESUME"));
694 break;
695 default:
696 con_log(CL_ANN, (CE_WARN,
697 "megasas: invalid attach cmd=%x", cmd));
698 return (DDI_FAILURE);
699 }
700
701 return (DDI_SUCCESS);
702
703 fail_initiate_aen:
704 fail_attach:
705 if (create_devctl_node_f) {
706 ddi_remove_minor_node(dip, "devctl");
707 }
708
709 if (create_scsi_node_f) {
710 ddi_remove_minor_node(dip, "scsi");
711 }
712
713 if (create_ioc_node_f) {
714 ddi_remove_minor_node(dip, instance->iocnode);
715 }
716
717 if (tran_alloc_f) {
718 scsi_hba_tran_free(tran);
719 }
720
721
722 if (added_soft_isr_f) {
723 ddi_remove_softintr(instance->soft_intr_id);
724 }
725
726 if (added_isr_f) {
727 ddi_remove_intr(dip, 0, instance->iblock_cookie);
728 }
729
730 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
731 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
732
733 megasas_fm_fini(instance);
734
735 pci_config_teardown(&instance->pci_handle);
736
737 ddi_soft_state_free(megasas_state, instance_no);
738
739 con_log(CL_ANN, (CE_NOTE,
740 "megasas: return failure from mega_attach\n"));
741
742 return (DDI_FAILURE);
743 }
744
745 /*
746 * getinfo - gets device information
747 * @dip:
748 * @cmd:
749 * @arg:
750 * @resultp:
751 *
752 * The system calls getinfo() to obtain configuration information that only
753 * the driver knows. The mapping of minor numbers to device instance is
754 * entirely under the control of the driver. The system sometimes needs to ask
755 * the driver which device a particular dev_t represents.
756 * Given the device number return the devinfo pointer from the scsi_device
757 * structure.
758 */
759 /*ARGSUSED*/
760 static int
megasas_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** resultp)761 megasas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
762 {
763 int rval;
764 int megasas_minor = getminor((dev_t)arg);
765
766 struct megasas_instance *instance;
767
768 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
769
770 switch (cmd) {
771 case DDI_INFO_DEVT2DEVINFO:
772 instance = (struct megasas_instance *)
773 ddi_get_soft_state(megasas_state,
774 MINOR2INST(megasas_minor));
775
776 if (instance == NULL) {
777 *resultp = NULL;
778 rval = DDI_FAILURE;
779 } else {
780 *resultp = instance->dip;
781 rval = DDI_SUCCESS;
782 }
783 break;
784 case DDI_INFO_DEVT2INSTANCE:
785 *resultp = (void *)instance;
786 rval = DDI_SUCCESS;
787 break;
788 default:
789 *resultp = NULL;
790 rval = DDI_FAILURE;
791 }
792
793 return (rval);
794 }
795
796 /*
797 * detach - detaches a device from the system
798 * @dip: pointer to the device's dev_info structure
799 * @cmd: type of detach
800 *
801 * A driver's detach() entry point is called to detach an instance of a device
802 * that is bound to the driver. The entry point is called with the instance of
803 * the device node to be detached and with DDI_DETACH, which is specified as
804 * the cmd argument to the entry point.
805 * This routine is called during driver unload. We free all the allocated
806 * resources and call the corresponding LLD so that it can also release all
807 * its resources.
808 */
809 static int
megasas_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)810 megasas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
811 {
812 int instance_no;
813
814 struct megasas_instance *instance;
815
816 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
817
818 /* CONSTCOND */
819 ASSERT(NO_COMPETING_THREADS);
820
821 instance_no = ddi_get_instance(dip);
822
823 instance = (struct megasas_instance *)ddi_get_soft_state(megasas_state,
824 instance_no);
825
826 if (!instance) {
827 con_log(CL_ANN, (CE_WARN,
828 "megasas:%d could not get instance in detach",
829 instance_no));
830
831 return (DDI_FAILURE);
832 }
833
834 con_log(CL_ANN, (CE_NOTE,
835 "megasas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x\n",
836 instance_no, instance->vendor_id, instance->device_id,
837 instance->subsysvid, instance->subsysid));
838
839 switch (cmd) {
840 case DDI_DETACH:
841 con_log(CL_ANN, (CE_NOTE,
842 "megasas_detach: DDI_DETACH\n"));
843
844 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
845 con_log(CL_ANN, (CE_WARN,
846 "megasas:%d failed to detach",
847 instance_no));
848
849 return (DDI_FAILURE);
850 }
851
852 scsi_hba_tran_free(instance->tran);
853
854 if (abort_aen_cmd(instance, instance->aen_cmd)) {
855 con_log(CL_ANN, (CE_WARN, "megasas_detach: "
856 "failed to abort prevous AEN command\n"));
857
858 return (DDI_FAILURE);
859 }
860
861 instance->func_ptr->disable_intr(instance);
862
863 if (instance->isr_level == HIGH_LEVEL_INTR) {
864 ddi_remove_softintr(instance->soft_intr_id);
865 }
866
867 ddi_remove_intr(dip, 0, instance->iblock_cookie);
868
869 free_space_for_mfi(instance);
870
871 megasas_fm_fini(instance);
872
873 pci_config_teardown(&instance->pci_handle);
874
875 kmem_free(instance->func_ptr,
876 sizeof (struct megasas_func_ptr));
877
878 ddi_soft_state_free(megasas_state, instance_no);
879 break;
880 case DDI_PM_SUSPEND:
881 con_log(CL_ANN, (CE_NOTE,
882 "megasas_detach: DDI_PM_SUSPEND\n"));
883
884 break;
885 case DDI_SUSPEND:
886 con_log(CL_ANN, (CE_NOTE,
887 "megasas_detach: DDI_SUSPEND\n"));
888
889 break;
890 default:
891 con_log(CL_ANN, (CE_WARN,
892 "invalid detach command:0x%x", cmd));
893 return (DDI_FAILURE);
894 }
895
896 return (DDI_SUCCESS);
897 }
898
899 /*
900 * ************************************************************************** *
901 * *
902 * common entry points - for character driver types *
903 * *
904 * ************************************************************************** *
905 */
906 /*
907 * open - gets access to a device
908 * @dev:
909 * @openflags:
910 * @otyp:
911 * @credp:
912 *
913 * Access to a device by one or more application programs is controlled
914 * through the open() and close() entry points. The primary function of
915 * open() is to verify that the open request is allowed.
916 */
917 static int
megasas_open(dev_t * dev,int openflags,int otyp,cred_t * credp)918 megasas_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
919 {
920 int rval = 0;
921
922 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
923
924 /* Check root permissions */
925 if (drv_priv(credp) != 0) {
926 con_log(CL_ANN, (CE_WARN,
927 "megaraid: Non-root ioctl access tried!"));
928 return (EPERM);
929 }
930
931 /* Verify we are being opened as a character device */
932 if (otyp != OTYP_CHR) {
933 con_log(CL_ANN, (CE_WARN,
934 "megaraid: ioctl node must be a char node\n"));
935 return (EINVAL);
936 }
937
938 if (ddi_get_soft_state(megasas_state, MINOR2INST(getminor(*dev)))
939 == NULL) {
940 return (ENXIO);
941 }
942
943 if (scsi_hba_open) {
944 rval = scsi_hba_open(dev, openflags, otyp, credp);
945 }
946
947 return (rval);
948 }
949
950 /*
951 * close - gives up access to a device
952 * @dev:
953 * @openflags:
954 * @otyp:
955 * @credp:
956 *
957 * close() should perform any cleanup necessary to finish using the minor
958 * device, and prepare the device (and driver) to be opened again.
959 */
960 static int
megasas_close(dev_t dev,int openflags,int otyp,cred_t * credp)961 megasas_close(dev_t dev, int openflags, int otyp, cred_t *credp)
962 {
963 int rval = 0;
964
965 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
966
967 /* no need for locks! */
968
969 if (scsi_hba_close) {
970 rval = scsi_hba_close(dev, openflags, otyp, credp);
971 }
972
973 return (rval);
974 }
975
976 /*
977 * ioctl - performs a range of I/O commands for character drivers
978 * @dev:
979 * @cmd:
980 * @arg:
981 * @mode:
982 * @credp:
983 * @rvalp:
984 *
985 * ioctl() routine must make sure that user data is copied into or out of the
986 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(),
987 * and ddi_copyout(), as appropriate.
988 * This is a wrapper routine to serialize access to the actual ioctl routine.
989 * ioctl() should return 0 on success, or the appropriate error number. The
990 * driver may also set the value returned to the calling process through rvalp.
991 */
992 static int
megasas_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)993 megasas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
994 int *rvalp)
995 {
996 int rval = 0;
997
998 struct megasas_instance *instance;
999 struct megasas_ioctl ioctl;
1000 struct megasas_aen aen;
1001
1002 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1003
1004 instance = ddi_get_soft_state(megasas_state, MINOR2INST(getminor(dev)));
1005
1006 if (instance == NULL) {
1007 /* invalid minor number */
1008 con_log(CL_ANN, (CE_WARN, "megaraid: adapter not found."));
1009 return (ENXIO);
1010 }
1011
1012 switch ((uint_t)cmd) {
1013 case MEGASAS_IOCTL_FIRMWARE:
1014 if (ddi_copyin((void *) arg, &ioctl,
1015 sizeof (struct megasas_ioctl), mode)) {
1016 con_log(CL_ANN, (CE_WARN, "megasas_ioctl: "
1017 "ERROR IOCTL copyin"));
1018 return (EFAULT);
1019 }
1020
1021 if (ioctl.control_code == MR_DRIVER_IOCTL_COMMON) {
1022 rval = handle_drv_ioctl(instance, &ioctl, mode);
1023 } else {
1024 rval = handle_mfi_ioctl(instance, &ioctl, mode);
1025 }
1026
1027 if (ddi_copyout((void *) &ioctl, (void *)arg,
1028 (sizeof (struct megasas_ioctl) - 1), mode)) {
1029 con_log(CL_ANN, (CE_WARN,
1030 "megasas_ioctl: copy_to_user failed\n"));
1031 rval = 1;
1032 }
1033
1034 break;
1035 case MEGASAS_IOCTL_AEN:
1036 if (ddi_copyin((void *) arg, &aen,
1037 sizeof (struct megasas_aen), mode)) {
1038 con_log(CL_ANN, (CE_WARN,
1039 "megasas_ioctl: ERROR AEN copyin"));
1040 return (EFAULT);
1041 }
1042
1043 rval = handle_mfi_aen(instance, &aen);
1044
1045 if (ddi_copyout((void *) &aen, (void *)arg,
1046 sizeof (struct megasas_aen), mode)) {
1047 con_log(CL_ANN, (CE_WARN,
1048 "megasas_ioctl: copy_to_user failed\n"));
1049 rval = 1;
1050 }
1051
1052 break;
1053 default:
1054 rval = scsi_hba_ioctl(dev, cmd, arg,
1055 mode, credp, rvalp);
1056
1057 con_log(CL_DLEVEL1, (CE_NOTE, "megasas_ioctl: "
1058 "scsi_hba_ioctl called, ret = %x.", rval));
1059 }
1060
1061 return (rval);
1062 }
1063
1064 /*
1065 * ************************************************************************** *
1066 * *
1067 * common entry points - for block driver types *
1068 * *
1069 * ************************************************************************** *
1070 */
1071 /*
1072 * reset - TBD
1073 * @dip:
1074 * @cmd:
1075 *
1076 * TBD
1077 */
1078 /*ARGSUSED*/
1079 static int
megasas_reset(dev_info_t * dip,ddi_reset_cmd_t cmd)1080 megasas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1081 {
1082 int instance_no;
1083
1084 struct megasas_instance *instance;
1085
1086 instance_no = ddi_get_instance(dip);
1087 instance = (struct megasas_instance *)ddi_get_soft_state
1088 (megasas_state, instance_no);
1089
1090 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1091
1092 if (!instance) {
1093 con_log(CL_ANN, (CE_WARN,
1094 "megaraid:%d could not get adapter in reset",
1095 instance_no));
1096 return (DDI_FAILURE);
1097 }
1098
1099 con_log(CL_ANN, (CE_NOTE, "flushing cache for instance %d ..",
1100 instance_no));
1101
1102 flush_cache(instance);
1103
1104 return (DDI_SUCCESS);
1105 }
1106
1107
1108 /*
1109 * ************************************************************************** *
1110 * *
1111 * entry points (SCSI HBA) *
1112 * *
1113 * ************************************************************************** *
1114 */
1115 /*
1116 * tran_tgt_init - initialize a target device instance
1117 * @hba_dip:
1118 * @tgt_dip:
1119 * @tran:
1120 * @sd:
1121 *
1122 * The tran_tgt_init() entry point enables the HBA to allocate and initialize
1123 * any per-target resources. tran_tgt_init() also enables the HBA to qualify
1124 * the device's address as valid and supportable for that particular HBA.
1125 * By returning DDI_FAILURE, the instance of the target driver for that device
1126 * is not probed or attached.
1127 */
1128 /*ARGSUSED*/
1129 static int
megasas_tran_tgt_init(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * tran,struct scsi_device * sd)1130 megasas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1131 scsi_hba_tran_t *tran, struct scsi_device *sd)
1132 {
1133 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1134
1135 return (DDI_SUCCESS);
1136 }
1137
1138 /*
1139 * tran_init_pkt - allocate & initialize a scsi_pkt structure
1140 * @ap:
1141 * @pkt:
1142 * @bp:
1143 * @cmdlen:
1144 * @statuslen:
1145 * @tgtlen:
1146 * @flags:
1147 * @callback:
1148 *
1149 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1150 * structure and DMA resources for a target driver request. The
1151 * tran_init_pkt() entry point is called when the target driver calls the
1152 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1153 * is a request to perform one or more of three possible services:
1154 * - allocation and initialization of a scsi_pkt structure
1155 * - allocation of DMA resources for data transfer
1156 * - reallocation of DMA resources for the next portion of the data transfer
1157 */
1158 static struct scsi_pkt *
megasas_tran_init_pkt(struct scsi_address * ap,register struct scsi_pkt * pkt,struct buf * bp,int cmdlen,int statuslen,int tgtlen,int flags,int (* callback)(),caddr_t arg)1159 megasas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt,
1160 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1161 int flags, int (*callback)(), caddr_t arg)
1162 {
1163 struct scsa_cmd *acmd;
1164 struct megasas_instance *instance;
1165 struct scsi_pkt *new_pkt;
1166
1167 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1168
1169 instance = ADDR2MEGA(ap);
1170
1171 /* step #1 : pkt allocation */
1172 if (pkt == NULL) {
1173 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1174 tgtlen, sizeof (struct scsa_cmd), callback, arg);
1175 if (pkt == NULL) {
1176 return (NULL);
1177 }
1178
1179 acmd = PKT2CMD(pkt);
1180
1181 /*
1182 * Initialize the new pkt - we redundantly initialize
1183 * all the fields for illustrative purposes.
1184 */
1185 acmd->cmd_pkt = pkt;
1186 acmd->cmd_flags = 0;
1187 acmd->cmd_scblen = statuslen;
1188 acmd->cmd_cdblen = cmdlen;
1189 acmd->cmd_dmahandle = NULL;
1190 acmd->cmd_ncookies = 0;
1191 acmd->cmd_cookie = 0;
1192 acmd->cmd_cookiecnt = 0;
1193 acmd->cmd_nwin = 0;
1194
1195 pkt->pkt_address = *ap;
1196 pkt->pkt_comp = (void (*)())NULL;
1197 pkt->pkt_flags = 0;
1198 pkt->pkt_time = 0;
1199 pkt->pkt_resid = 0;
1200 pkt->pkt_state = 0;
1201 pkt->pkt_statistics = 0;
1202 pkt->pkt_reason = 0;
1203 new_pkt = pkt;
1204 } else {
1205 acmd = PKT2CMD(pkt);
1206 new_pkt = NULL;
1207 }
1208
1209 /* step #2 : dma allocation/move */
1210 if (bp && bp->b_bcount != 0) {
1211 if (acmd->cmd_dmahandle == NULL) {
1212 if (megasas_dma_alloc(instance, pkt, bp, flags,
1213 callback) == -1) {
1214 if (new_pkt) {
1215 scsi_hba_pkt_free(ap, new_pkt);
1216 }
1217
1218 return ((struct scsi_pkt *)NULL);
1219 }
1220 } else {
1221 if (megasas_dma_move(instance, pkt, bp) == -1) {
1222 return ((struct scsi_pkt *)NULL);
1223 }
1224 }
1225 }
1226
1227 return (pkt);
1228 }
1229
1230 /*
1231 * tran_start - transport a SCSI command to the addressed target
1232 * @ap:
1233 * @pkt:
1234 *
1235 * The tran_start() entry point for a SCSI HBA driver is called to transport a
1236 * SCSI command to the addressed target. The SCSI command is described
1237 * entirely within the scsi_pkt structure, which the target driver allocated
1238 * through the HBA driver's tran_init_pkt() entry point. If the command
1239 * involves a data transfer, DMA resources must also have been allocated for
1240 * the scsi_pkt structure.
1241 *
1242 * Return Values :
1243 * TRAN_BUSY - request queue is full, no more free scbs
1244 * TRAN_ACCEPT - pkt has been submitted to the instance
1245 */
1246 static int
megasas_tran_start(struct scsi_address * ap,register struct scsi_pkt * pkt)1247 megasas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt)
1248 {
1249 uchar_t cmd_done = 0;
1250
1251 struct megasas_instance *instance = ADDR2MEGA(ap);
1252 struct megasas_cmd *cmd;
1253
1254 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d:SCSI CDB[0]=0x%x",
1255 __func__, __LINE__, pkt->pkt_cdbp[0]));
1256
1257 pkt->pkt_reason = CMD_CMPLT;
1258 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1259
1260 cmd = build_cmd(instance, ap, pkt, &cmd_done);
1261
1262 /*
1263 * Check if the command is already completed by the mega_build_cmd()
1264 * routine. In which case the busy_flag would be clear and scb will be
1265 * NULL and appropriate reason provided in pkt_reason field
1266 */
1267 if (cmd_done) {
1268 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1269 scsi_hba_pkt_comp(pkt);
1270 }
1271 pkt->pkt_reason = CMD_CMPLT;
1272 pkt->pkt_scbp[0] = STATUS_GOOD;
1273 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1274 | STATE_SENT_CMD;
1275 return (TRAN_ACCEPT);
1276 }
1277
1278 if (cmd == NULL) {
1279 return (TRAN_BUSY);
1280 }
1281
1282 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1283 if (instance->fw_outstanding > instance->max_fw_cmds) {
1284 con_log(CL_ANN, (CE_CONT, "megasas:Firmware busy"));
1285 return_mfi_pkt(instance, cmd);
1286 return (TRAN_BUSY);
1287 }
1288
1289 /* Syncronize the Cmd frame for the controller */
1290 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1291 DDI_DMA_SYNC_FORDEV);
1292
1293 instance->func_ptr->issue_cmd(cmd, instance);
1294
1295 } else {
1296 struct megasas_header *hdr = &cmd->frame->hdr;
1297
1298 cmd->sync_cmd = MEGASAS_TRUE;
1299
1300 instance->func_ptr-> issue_cmd_in_poll_mode(instance, cmd);
1301
1302 pkt->pkt_reason = CMD_CMPLT;
1303 pkt->pkt_statistics = 0;
1304 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1305
1306 switch (hdr->cmd_status) {
1307 case MFI_STAT_OK:
1308 pkt->pkt_scbp[0] = STATUS_GOOD;
1309 break;
1310
1311 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1312
1313 pkt->pkt_reason = CMD_CMPLT;
1314 pkt->pkt_statistics = 0;
1315
1316 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
1317 break;
1318
1319 case MFI_STAT_DEVICE_NOT_FOUND:
1320 pkt->pkt_reason = CMD_DEV_GONE;
1321 pkt->pkt_statistics = STAT_DISCON;
1322 break;
1323
1324 default:
1325 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1;
1326 }
1327
1328 return_mfi_pkt(instance, cmd);
1329 (void) megasas_common_check(instance, cmd);
1330
1331 scsi_hba_pkt_comp(pkt);
1332
1333 }
1334
1335 return (TRAN_ACCEPT);
1336 }
1337
1338 /*
1339 * tran_abort - Abort any commands that are currently in transport
1340 * @ap:
1341 * @pkt:
1342 *
1343 * The tran_abort() entry point for a SCSI HBA driver is called to abort any
1344 * commands that are currently in transport for a particular target. This entry
1345 * point is called when a target driver calls scsi_abort(). The tran_abort()
1346 * entry point should attempt to abort the command denoted by the pkt
1347 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to
1348 * abort all outstanding commands in the transport layer for the particular
1349 * target or logical unit.
1350 */
1351 /*ARGSUSED*/
1352 static int
megasas_tran_abort(struct scsi_address * ap,struct scsi_pkt * pkt)1353 megasas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1354 {
1355 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1356
1357 /* aborting command not supported by H/W */
1358
1359 return (DDI_FAILURE);
1360 }
1361
1362 /*
1363 * tran_reset - reset either the SCSI bus or target
1364 * @ap:
1365 * @level:
1366 *
1367 * The tran_reset() entry point for a SCSI HBA driver is called to reset either
1368 * the SCSI bus or a particular SCSI target device. This entry point is called
1369 * when a target driver calls scsi_reset(). The tran_reset() entry point must
1370 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the
1371 * particular target or logical unit must be reset.
1372 */
1373 /*ARGSUSED*/
1374 static int
megasas_tran_reset(struct scsi_address * ap,int level)1375 megasas_tran_reset(struct scsi_address *ap, int level)
1376 {
1377 struct megasas_instance *instance = ADDR2MEGA(ap);
1378
1379 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1380
1381 if (wait_for_outstanding(instance)) {
1382 return (DDI_FAILURE);
1383 } else {
1384 return (DDI_SUCCESS);
1385 }
1386 }
1387
1388 /*
1389 * tran_bus_reset - reset the SCSI bus
1390 * @dip:
1391 * @level:
1392 *
1393 * The tran_bus_reset() vector in the scsi_hba_tran structure should be
1394 * initialized during the HBA driver's attach(). The vector should point to
1395 * an HBA entry point that is to be called when a user initiates a bus reset.
1396 * Implementation is hardware specific. If the HBA driver cannot reset the
1397 * SCSI bus without affecting the targets, the driver should fail RESET_BUS
1398 * or not initialize this vector.
1399 */
1400 /*ARGSUSED*/
1401 static int
megasas_tran_bus_reset(dev_info_t * dip,int level)1402 megasas_tran_bus_reset(dev_info_t *dip, int level)
1403 {
1404 int instance_no = ddi_get_instance(dip);
1405
1406 struct megasas_instance *instance = ddi_get_soft_state(megasas_state,
1407 instance_no);
1408
1409 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1410
1411 if (wait_for_outstanding(instance)) {
1412 return (DDI_FAILURE);
1413 } else {
1414 return (DDI_SUCCESS);
1415 }
1416 }
1417
1418 /*
1419 * tran_getcap - get one of a set of SCSA-defined capabilities
1420 * @ap:
1421 * @cap:
1422 * @whom:
1423 *
1424 * The target driver can request the current setting of the capability for a
1425 * particular target by setting the whom parameter to nonzero. A whom value of
1426 * zero indicates a request for the current setting of the general capability
1427 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1
1428 * for undefined capabilities or the current value of the requested capability.
1429 */
1430 /*ARGSUSED*/
1431 static int
megasas_tran_getcap(struct scsi_address * ap,char * cap,int whom)1432 megasas_tran_getcap(struct scsi_address *ap, char *cap, int whom)
1433 {
1434 int rval = 0;
1435
1436 struct megasas_instance *instance = ADDR2MEGA(ap);
1437
1438 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1439
1440 /* we do allow inquiring about capabilities for other targets */
1441 if (cap == NULL) {
1442 return (-1);
1443 }
1444
1445 switch (scsi_hba_lookup_capstr(cap)) {
1446 case SCSI_CAP_DMA_MAX:
1447 /* Limit to 16MB max transfer */
1448 rval = megasas_max_cap_maxxfer;
1449 break;
1450 case SCSI_CAP_MSG_OUT:
1451 rval = 1;
1452 break;
1453 case SCSI_CAP_DISCONNECT:
1454 rval = 0;
1455 break;
1456 case SCSI_CAP_SYNCHRONOUS:
1457 rval = 0;
1458 break;
1459 case SCSI_CAP_WIDE_XFER:
1460 rval = 1;
1461 break;
1462 case SCSI_CAP_TAGGED_QING:
1463 rval = 1;
1464 break;
1465 case SCSI_CAP_UNTAGGED_QING:
1466 rval = 1;
1467 break;
1468 case SCSI_CAP_PARITY:
1469 rval = 1;
1470 break;
1471 case SCSI_CAP_INITIATOR_ID:
1472 rval = instance->init_id;
1473 break;
1474 case SCSI_CAP_ARQ:
1475 rval = 1;
1476 break;
1477 case SCSI_CAP_LINKED_CMDS:
1478 rval = 0;
1479 break;
1480 case SCSI_CAP_RESET_NOTIFICATION:
1481 rval = 1;
1482 break;
1483 case SCSI_CAP_GEOMETRY:
1484 rval = -1;
1485
1486 break;
1487 default:
1488 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x",
1489 scsi_hba_lookup_capstr(cap)));
1490 rval = -1;
1491 break;
1492 }
1493
1494 return (rval);
1495 }
1496
1497 /*
1498 * tran_setcap - set one of a set of SCSA-defined capabilities
1499 * @ap:
1500 * @cap:
1501 * @value:
1502 * @whom:
1503 *
1504 * The target driver might request that the new value be set for a particular
1505 * target by setting the whom parameter to nonzero. A whom value of zero
1506 * means that request is to set the new value for the SCSI bus or for adapter
1507 * hardware in general.
1508 * The tran_setcap() should return the following values as appropriate:
1509 * - -1 for undefined capabilities
1510 * - 0 if the HBA driver cannot set the capability to the requested value
1511 * - 1 if the HBA driver is able to set the capability to the requested value
1512 */
1513 /*ARGSUSED*/
1514 static int
megasas_tran_setcap(struct scsi_address * ap,char * cap,int value,int whom)1515 megasas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
1516 {
1517 int rval = 1;
1518
1519 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1520
1521 /* We don't allow setting capabilities for other targets */
1522 if (cap == NULL || whom == 0) {
1523 return (-1);
1524 }
1525
1526 switch (scsi_hba_lookup_capstr(cap)) {
1527 case SCSI_CAP_DMA_MAX:
1528 case SCSI_CAP_MSG_OUT:
1529 case SCSI_CAP_PARITY:
1530 case SCSI_CAP_LINKED_CMDS:
1531 case SCSI_CAP_RESET_NOTIFICATION:
1532 case SCSI_CAP_DISCONNECT:
1533 case SCSI_CAP_SYNCHRONOUS:
1534 case SCSI_CAP_UNTAGGED_QING:
1535 case SCSI_CAP_WIDE_XFER:
1536 case SCSI_CAP_INITIATOR_ID:
1537 case SCSI_CAP_ARQ:
1538 /*
1539 * None of these are settable via
1540 * the capability interface.
1541 */
1542 break;
1543 case SCSI_CAP_TAGGED_QING:
1544 rval = 1;
1545 break;
1546 case SCSI_CAP_SECTOR_SIZE:
1547 rval = 1;
1548 break;
1549
1550 case SCSI_CAP_TOTAL_SECTORS:
1551 rval = 1;
1552 break;
1553 default:
1554 rval = -1;
1555 break;
1556 }
1557
1558 return (rval);
1559 }
1560
1561 /*
1562 * tran_destroy_pkt - deallocate scsi_pkt structure
1563 * @ap:
1564 * @pkt:
1565 *
1566 * The tran_destroy_pkt() entry point is the HBA driver function that
1567 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is
1568 * called when the target driver calls scsi_destroy_pkt(). The
1569 * tran_destroy_pkt() entry point must free any DMA resources that have been
1570 * allocated for the packet. An implicit DMA synchronization occurs if the
1571 * DMA resources are freed and any cached data remains after the completion
1572 * of the transfer.
1573 */
1574 static void
megasas_tran_destroy_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)1575 megasas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1576 {
1577 struct scsa_cmd *acmd = PKT2CMD(pkt);
1578
1579 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1580
1581 if (acmd->cmd_flags & CFLAG_DMAVALID) {
1582 acmd->cmd_flags &= ~CFLAG_DMAVALID;
1583
1584 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
1585
1586 ddi_dma_free_handle(&acmd->cmd_dmahandle);
1587
1588 acmd->cmd_dmahandle = NULL;
1589 }
1590
1591 /* free the pkt */
1592 scsi_hba_pkt_free(ap, pkt);
1593 }
1594
1595 /*
1596 * tran_dmafree - deallocates DMA resources
1597 * @ap:
1598 * @pkt:
1599 *
1600 * The tran_dmafree() entry point deallocates DMAQ resources that have been
1601 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is
1602 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must
1603 * free only DMA resources allocated for a scsi_pkt structure, not the
1604 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is
1605 * implicitly performed.
1606 */
1607 /*ARGSUSED*/
1608 static void
megasas_tran_dmafree(struct scsi_address * ap,struct scsi_pkt * pkt)1609 megasas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1610 {
1611 register struct scsa_cmd *acmd = PKT2CMD(pkt);
1612
1613 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1614
1615 if (acmd->cmd_flags & CFLAG_DMAVALID) {
1616 acmd->cmd_flags &= ~CFLAG_DMAVALID;
1617
1618 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
1619
1620 ddi_dma_free_handle(&acmd->cmd_dmahandle);
1621
1622 acmd->cmd_dmahandle = NULL;
1623 }
1624 }
1625
1626 /*
1627 * tran_sync_pkt - synchronize the DMA object allocated
1628 * @ap:
1629 * @pkt:
1630 *
1631 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for
1632 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt()
1633 * entry point is called when the target driver calls scsi_sync_pkt(). If the
1634 * data transfer direction is a DMA read from device to memory, tran_sync_pkt()
1635 * must synchronize the CPU's view of the data. If the data transfer direction
1636 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the
1637 * device's view of the data.
1638 */
1639 /*ARGSUSED*/
1640 static void
megasas_tran_sync_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)1641 megasas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1642 {
1643 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1644
1645 /*
1646 * following 'ddi_dma_sync()' API call
1647 * already called for each I/O in the ISR
1648 */
1649 #if 0
1650 int i;
1651
1652 register struct scsa_cmd *acmd = PKT2CMD(pkt);
1653
1654 if (acmd->cmd_flags & CFLAG_DMAVALID) {
1655 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset,
1656 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ?
1657 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
1658 }
1659 #endif
1660 }
1661
1662 /*ARGSUSED*/
1663 static int
megasas_tran_quiesce(dev_info_t * dip)1664 megasas_tran_quiesce(dev_info_t *dip)
1665 {
1666 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1667
1668 return (1);
1669 }
1670
1671 /*ARGSUSED*/
1672 static int
megasas_tran_unquiesce(dev_info_t * dip)1673 megasas_tran_unquiesce(dev_info_t *dip)
1674 {
1675 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1676
1677 return (1);
1678 }
1679
1680 /*
1681 * megasas_isr(caddr_t)
1682 *
1683 * The Interrupt Service Routine
1684 *
1685 * Collect status for all completed commands and do callback
1686 *
1687 */
1688 static uint_t
megasas_isr(struct megasas_instance * instance)1689 megasas_isr(struct megasas_instance *instance)
1690 {
1691 int need_softintr;
1692 uint32_t producer;
1693 uint32_t consumer;
1694 uint32_t context;
1695
1696 struct megasas_cmd *cmd;
1697
1698 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1699
1700 ASSERT(instance);
1701 if (!instance->func_ptr->intr_ack(instance)) {
1702 return (DDI_INTR_UNCLAIMED);
1703 }
1704
1705 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
1706 0, 0, DDI_DMA_SYNC_FORCPU);
1707
1708 if (megasas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
1709 != DDI_SUCCESS) {
1710 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
1711 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
1712 return (DDI_INTR_UNCLAIMED);
1713 }
1714
1715 producer = *instance->producer;
1716 consumer = *instance->consumer;
1717
1718 con_log(CL_ANN1, (CE_CONT, " producer %x consumer %x ",
1719 producer, consumer));
1720
1721 mutex_enter(&instance->completed_pool_mtx);
1722
1723 while (consumer != producer) {
1724 context = instance->reply_queue[consumer];
1725 cmd = instance->cmd_list[context];
1726 mlist_add_tail(&cmd->list, &instance->completed_pool_list);
1727
1728 consumer++;
1729 if (consumer == (instance->max_fw_cmds + 1)) {
1730 consumer = 0;
1731 }
1732 }
1733
1734 mutex_exit(&instance->completed_pool_mtx);
1735
1736 *instance->consumer = consumer;
1737 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
1738 0, 0, DDI_DMA_SYNC_FORDEV);
1739
1740 if (instance->softint_running) {
1741 need_softintr = 0;
1742 } else {
1743 need_softintr = 1;
1744 }
1745
1746 if (instance->isr_level == HIGH_LEVEL_INTR) {
1747 if (need_softintr) {
1748 ddi_trigger_softintr(instance->soft_intr_id);
1749 }
1750 } else {
1751 /*
1752 * Not a high-level interrupt, therefore call the soft level
1753 * interrupt explicitly
1754 */
1755 (void) megasas_softintr(instance);
1756 }
1757
1758 return (DDI_INTR_CLAIMED);
1759 }
1760
1761
1762 /*
1763 * ************************************************************************** *
1764 * *
1765 * libraries *
1766 * *
1767 * ************************************************************************** *
1768 */
1769 /*
1770 * get_mfi_pkt : Get a command from the free pool
1771 */
1772 static struct megasas_cmd *
get_mfi_pkt(struct megasas_instance * instance)1773 get_mfi_pkt(struct megasas_instance *instance)
1774 {
1775 mlist_t *head = &instance->cmd_pool_list;
1776 struct megasas_cmd *cmd = NULL;
1777
1778 mutex_enter(&instance->cmd_pool_mtx);
1779 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
1780
1781 if (!mlist_empty(head)) {
1782 cmd = mlist_entry(head->next, struct megasas_cmd, list);
1783 mlist_del_init(head->next);
1784 }
1785 if (cmd != NULL)
1786 cmd->pkt = NULL;
1787 mutex_exit(&instance->cmd_pool_mtx);
1788
1789 return (cmd);
1790 }
1791
1792 /*
1793 * return_mfi_pkt : Return a cmd to free command pool
1794 */
1795 static void
return_mfi_pkt(struct megasas_instance * instance,struct megasas_cmd * cmd)1796 return_mfi_pkt(struct megasas_instance *instance, struct megasas_cmd *cmd)
1797 {
1798 mutex_enter(&instance->cmd_pool_mtx);
1799 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
1800
1801 mlist_add(&cmd->list, &instance->cmd_pool_list);
1802
1803 mutex_exit(&instance->cmd_pool_mtx);
1804 }
1805
1806 /*
1807 * destroy_mfi_frame_pool
1808 */
1809 static void
destroy_mfi_frame_pool(struct megasas_instance * instance)1810 destroy_mfi_frame_pool(struct megasas_instance *instance)
1811 {
1812 int i;
1813 uint32_t max_cmd = instance->max_fw_cmds;
1814
1815 struct megasas_cmd *cmd;
1816
1817 /* return all frames to pool */
1818 for (i = 0; i < max_cmd; i++) {
1819
1820 cmd = instance->cmd_list[i];
1821
1822 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
1823 (void) mega_free_dma_obj(instance, cmd->frame_dma_obj);
1824
1825 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
1826 }
1827
1828 }
1829
1830 /*
1831 * create_mfi_frame_pool
1832 */
1833 static int
create_mfi_frame_pool(struct megasas_instance * instance)1834 create_mfi_frame_pool(struct megasas_instance *instance)
1835 {
1836 int i = 0;
1837 int cookie_cnt;
1838 uint16_t max_cmd;
1839 uint16_t sge_sz;
1840 uint32_t sgl_sz;
1841 uint32_t tot_frame_size;
1842
1843 struct megasas_cmd *cmd;
1844
1845 max_cmd = instance->max_fw_cmds;
1846
1847 sge_sz = sizeof (struct megasas_sge64);
1848
1849 /* calculated the number of 64byte frames required for SGL */
1850 sgl_sz = sge_sz * instance->max_num_sge;
1851 tot_frame_size = sgl_sz + MEGAMFI_FRAME_SIZE + SENSE_LENGTH;
1852
1853 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: "
1854 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size));
1855
1856 while (i < max_cmd) {
1857 cmd = instance->cmd_list[i];
1858
1859 cmd->frame_dma_obj.size = tot_frame_size;
1860 cmd->frame_dma_obj.dma_attr = megasas_generic_dma_attr;
1861 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1862 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1863 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1;
1864 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64;
1865
1866
1867 cookie_cnt = mega_alloc_dma_obj(instance, &cmd->frame_dma_obj);
1868
1869 if (cookie_cnt == -1 || cookie_cnt > 1) {
1870 con_log(CL_ANN, (CE_WARN,
1871 "create_mfi_frame_pool: could not alloc."));
1872 return (DDI_FAILURE);
1873 }
1874
1875 bzero(cmd->frame_dma_obj.buffer, tot_frame_size);
1876
1877 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED;
1878 cmd->frame = (union megasas_frame *)cmd->frame_dma_obj.buffer;
1879 cmd->frame_phys_addr =
1880 cmd->frame_dma_obj.dma_cookie[0].dmac_address;
1881
1882 cmd->sense = (uint8_t *)(((unsigned long)
1883 cmd->frame_dma_obj.buffer) +
1884 tot_frame_size - SENSE_LENGTH);
1885 cmd->sense_phys_addr =
1886 cmd->frame_dma_obj.dma_cookie[0].dmac_address +
1887 tot_frame_size - SENSE_LENGTH;
1888
1889 if (!cmd->frame || !cmd->sense) {
1890 con_log(CL_ANN, (CE_NOTE,
1891 "megasas: pci_pool_alloc failed \n"));
1892
1893 return (-ENOMEM);
1894 }
1895
1896 cmd->frame->io.context = cmd->index;
1897 i++;
1898
1899 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x",
1900 cmd->frame->io.context, cmd->frame_phys_addr));
1901 }
1902
1903 return (DDI_SUCCESS);
1904 }
1905
1906 /*
1907 * free_additional_dma_buffer
1908 */
1909 static void
free_additional_dma_buffer(struct megasas_instance * instance)1910 free_additional_dma_buffer(struct megasas_instance *instance)
1911 {
1912 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
1913 (void) mega_free_dma_obj(instance,
1914 instance->mfi_internal_dma_obj);
1915 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
1916 }
1917
1918 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
1919 (void) mega_free_dma_obj(instance,
1920 instance->mfi_evt_detail_obj);
1921 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
1922 }
1923 }
1924
1925 /*
1926 * alloc_additional_dma_buffer
1927 */
1928 static int
alloc_additional_dma_buffer(struct megasas_instance * instance)1929 alloc_additional_dma_buffer(struct megasas_instance *instance)
1930 {
1931 uint32_t reply_q_sz;
1932 uint32_t internal_buf_size = PAGESIZE*2;
1933
1934 /* max cmds plus 1 + producer & consumer */
1935 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2);
1936
1937 instance->mfi_internal_dma_obj.size = internal_buf_size;
1938 instance->mfi_internal_dma_obj.dma_attr = megasas_generic_dma_attr;
1939 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1940 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
1941 0xFFFFFFFFU;
1942 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
1943
1944 if (mega_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj)
1945 != 1) {
1946 con_log(CL_ANN, (CE_WARN, "megaraid: could not alloc reply Q"));
1947 return (DDI_FAILURE);
1948 }
1949
1950 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
1951
1952 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
1953
1954 instance->producer = (uint32_t *)((unsigned long)
1955 instance->mfi_internal_dma_obj.buffer);
1956 instance->consumer = (uint32_t *)((unsigned long)
1957 instance->mfi_internal_dma_obj.buffer + 4);
1958 instance->reply_queue = (uint32_t *)((unsigned long)
1959 instance->mfi_internal_dma_obj.buffer + 8);
1960 instance->internal_buf = (caddr_t)(((unsigned long)
1961 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8);
1962 instance->internal_buf_dmac_add =
1963 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address +
1964 reply_q_sz;
1965 instance->internal_buf_size = internal_buf_size -
1966 (reply_q_sz + 8);
1967
1968 /* allocate evt_detail */
1969 instance->mfi_evt_detail_obj.size = sizeof (struct megasas_evt_detail);
1970 instance->mfi_evt_detail_obj.dma_attr = megasas_generic_dma_attr;
1971 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1972 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1973 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
1974 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1;
1975
1976 if (mega_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj) != 1) {
1977 con_log(CL_ANN, (CE_WARN, "alloc_additional_dma_buffer: "
1978 "could not data transfer buffer alloc."));
1979 return (DDI_FAILURE);
1980 }
1981
1982 bzero(instance->mfi_evt_detail_obj.buffer,
1983 sizeof (struct megasas_evt_detail));
1984
1985 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
1986
1987 return (DDI_SUCCESS);
1988 }
1989
1990 /*
1991 * free_space_for_mfi
1992 */
1993 static void
free_space_for_mfi(struct megasas_instance * instance)1994 free_space_for_mfi(struct megasas_instance *instance)
1995 {
1996 int i;
1997 uint32_t max_cmd = instance->max_fw_cmds;
1998
1999 /* already freed */
2000 if (instance->cmd_list == NULL) {
2001 return;
2002 }
2003
2004 free_additional_dma_buffer(instance);
2005
2006 /* first free the MFI frame pool */
2007 destroy_mfi_frame_pool(instance);
2008
2009 /* free all the commands in the cmd_list */
2010 for (i = 0; i < instance->max_fw_cmds; i++) {
2011 kmem_free(instance->cmd_list[i],
2012 sizeof (struct megasas_cmd));
2013
2014 instance->cmd_list[i] = NULL;
2015 }
2016
2017 /* free the cmd_list buffer itself */
2018 kmem_free(instance->cmd_list,
2019 sizeof (struct megasas_cmd *) * max_cmd);
2020
2021 instance->cmd_list = NULL;
2022
2023 INIT_LIST_HEAD(&instance->cmd_pool_list);
2024 }
2025
2026 /*
2027 * alloc_space_for_mfi
2028 */
2029 static int
alloc_space_for_mfi(struct megasas_instance * instance)2030 alloc_space_for_mfi(struct megasas_instance *instance)
2031 {
2032 int i;
2033 uint32_t max_cmd;
2034 size_t sz;
2035
2036 struct megasas_cmd *cmd;
2037
2038 max_cmd = instance->max_fw_cmds;
2039 sz = sizeof (struct megasas_cmd *) * max_cmd;
2040
2041 /*
2042 * instance->cmd_list is an array of struct megasas_cmd pointers.
2043 * Allocate the dynamic array first and then allocate individual
2044 * commands.
2045 */
2046 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
2047 ASSERT(instance->cmd_list);
2048
2049 for (i = 0; i < max_cmd; i++) {
2050 instance->cmd_list[i] = kmem_zalloc(sizeof (struct megasas_cmd),
2051 KM_SLEEP);
2052 ASSERT(instance->cmd_list[i]);
2053 }
2054
2055 INIT_LIST_HEAD(&instance->cmd_pool_list);
2056
2057 /* add all the commands to command pool (instance->cmd_pool) */
2058 for (i = 0; i < max_cmd; i++) {
2059 cmd = instance->cmd_list[i];
2060 cmd->index = i;
2061
2062 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2063 }
2064
2065 /* create a frame pool and assign one frame to each cmd */
2066 if (create_mfi_frame_pool(instance)) {
2067 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool\n"));
2068 return (DDI_FAILURE);
2069 }
2070
2071 /* create a frame pool and assign one frame to each cmd */
2072 if (alloc_additional_dma_buffer(instance)) {
2073 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool\n"));
2074 return (DDI_FAILURE);
2075 }
2076
2077 return (DDI_SUCCESS);
2078 }
2079
2080 /*
2081 * get_ctrl_info
2082 */
2083 static int
get_ctrl_info(struct megasas_instance * instance,struct megasas_ctrl_info * ctrl_info)2084 get_ctrl_info(struct megasas_instance *instance,
2085 struct megasas_ctrl_info *ctrl_info)
2086 {
2087 int ret = 0;
2088
2089 struct megasas_cmd *cmd;
2090 struct megasas_dcmd_frame *dcmd;
2091 struct megasas_ctrl_info *ci;
2092
2093 cmd = get_mfi_pkt(instance);
2094
2095 if (!cmd) {
2096 con_log(CL_ANN, (CE_WARN,
2097 "Failed to get a cmd for ctrl info\n"));
2098 return (DDI_FAILURE);
2099 }
2100
2101 dcmd = &cmd->frame->dcmd;
2102
2103 ci = (struct megasas_ctrl_info *)instance->internal_buf;
2104
2105 if (!ci) {
2106 con_log(CL_ANN, (CE_WARN,
2107 "Failed to alloc mem for ctrl info\n"));
2108 return_mfi_pkt(instance, cmd);
2109 return (DDI_FAILURE);
2110 }
2111
2112 (void) memset(ci, 0, sizeof (struct megasas_ctrl_info));
2113
2114 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
2115 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2116
2117 dcmd->cmd = MFI_CMD_OP_DCMD;
2118 dcmd->cmd_status = MFI_CMD_STATUS_POLL_MODE;
2119 dcmd->sge_count = 1;
2120 dcmd->flags = MFI_FRAME_DIR_READ;
2121 dcmd->timeout = 0;
2122 dcmd->data_xfer_len = sizeof (struct megasas_ctrl_info);
2123 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
2124 dcmd->sgl.sge32[0].phys_addr = instance->internal_buf_dmac_add;
2125 dcmd->sgl.sge32[0].length = sizeof (struct megasas_ctrl_info);
2126
2127 cmd->frame_count = 1;
2128
2129 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2130 ret = 0;
2131 (void) memcpy(ctrl_info, ci, sizeof (struct megasas_ctrl_info));
2132 } else {
2133 con_log(CL_ANN, (CE_WARN, "get_ctrl_info: Ctrl info failed\n"));
2134 ret = -1;
2135 }
2136
2137 return_mfi_pkt(instance, cmd);
2138 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) {
2139 ret = -1;
2140 }
2141
2142 return (ret);
2143 }
2144
2145 /*
2146 * abort_aen_cmd
2147 */
2148 static int
abort_aen_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd_to_abort)2149 abort_aen_cmd(struct megasas_instance *instance,
2150 struct megasas_cmd *cmd_to_abort)
2151 {
2152 int ret = 0;
2153
2154 struct megasas_cmd *cmd;
2155 struct megasas_abort_frame *abort_fr;
2156
2157 cmd = get_mfi_pkt(instance);
2158
2159 if (!cmd) {
2160 con_log(CL_ANN, (CE_WARN,
2161 "Failed to get a cmd for ctrl info\n"));
2162 return (DDI_FAILURE);
2163 }
2164
2165 abort_fr = &cmd->frame->abort;
2166
2167 /* prepare and issue the abort frame */
2168 abort_fr->cmd = MFI_CMD_OP_ABORT;
2169 abort_fr->cmd_status = MFI_CMD_STATUS_SYNC_MODE;
2170 abort_fr->flags = 0;
2171 abort_fr->abort_context = cmd_to_abort->index;
2172 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
2173 abort_fr->abort_mfi_phys_addr_hi = 0;
2174
2175 instance->aen_cmd->abort_aen = 1;
2176
2177 cmd->sync_cmd = MEGASAS_TRUE;
2178 cmd->frame_count = 1;
2179
2180 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
2181 con_log(CL_ANN, (CE_WARN,
2182 "abort_aen_cmd: issue_cmd_in_sync_mode failed\n"));
2183 ret = -1;
2184 } else {
2185 ret = 0;
2186 }
2187
2188 instance->aen_cmd->abort_aen = 1;
2189 instance->aen_cmd = 0;
2190
2191 return_mfi_pkt(instance, cmd);
2192 (void) megasas_common_check(instance, cmd);
2193
2194 return (ret);
2195 }
2196
2197 /*
2198 * init_mfi
2199 */
2200 static int
init_mfi(struct megasas_instance * instance)2201 init_mfi(struct megasas_instance *instance)
2202 {
2203 off_t reglength;
2204 struct megasas_cmd *cmd;
2205 struct megasas_ctrl_info ctrl_info;
2206 struct megasas_init_frame *init_frame;
2207 struct megasas_init_queue_info *initq_info;
2208
2209 if ((ddi_dev_regsize(instance->dip, REGISTER_SET_IO, ®length)
2210 != DDI_SUCCESS) || reglength < MINIMUM_MFI_MEM_SZ) {
2211 return (DDI_FAILURE);
2212 }
2213
2214 if (reglength > DEFAULT_MFI_MEM_SZ) {
2215 reglength = DEFAULT_MFI_MEM_SZ;
2216 con_log(CL_DLEVEL1, (CE_NOTE,
2217 "mega: register length to map is 0x%lx bytes", reglength));
2218 }
2219
2220 if (ddi_regs_map_setup(instance->dip, REGISTER_SET_IO,
2221 &instance->regmap, 0, reglength, &endian_attr,
2222 &instance->regmap_handle) != DDI_SUCCESS) {
2223 con_log(CL_ANN, (CE_NOTE,
2224 "megaraid: couldn't map control registers"));
2225
2226 goto fail_mfi_reg_setup;
2227 }
2228
2229 /* we expect the FW state to be READY */
2230 if (mfi_state_transition_to_ready(instance)) {
2231 con_log(CL_ANN, (CE_WARN, "megaraid: F/W is not ready"));
2232 goto fail_ready_state;
2233 }
2234
2235 /* get various operational parameters from status register */
2236 instance->max_num_sge =
2237 (instance->func_ptr->read_fw_status_reg(instance) &
2238 0xFF0000) >> 0x10;
2239 /*
2240 * Reduce the max supported cmds by 1. This is to ensure that the
2241 * reply_q_sz (1 more than the max cmd that driver may send)
2242 * does not exceed max cmds that the FW can support
2243 */
2244 instance->max_fw_cmds =
2245 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF;
2246 instance->max_fw_cmds = instance->max_fw_cmds - 1;
2247
2248 instance->max_num_sge =
2249 (instance->max_num_sge > MEGASAS_MAX_SGE_CNT) ?
2250 MEGASAS_MAX_SGE_CNT : instance->max_num_sge;
2251
2252 /* create a pool of commands */
2253 if (alloc_space_for_mfi(instance))
2254 goto fail_alloc_fw_space;
2255
2256 /* disable interrupt for initial preparation */
2257 instance->func_ptr->disable_intr(instance);
2258
2259 /*
2260 * Prepare a init frame. Note the init frame points to queue info
2261 * structure. Each frame has SGL allocated after first 64 bytes. For
2262 * this frame - since we don't need any SGL - we use SGL's space as
2263 * queue info structure
2264 */
2265 cmd = get_mfi_pkt(instance);
2266
2267 init_frame = (struct megasas_init_frame *)cmd->frame;
2268 initq_info = (struct megasas_init_queue_info *)
2269 ((unsigned long)init_frame + 64);
2270
2271 (void) memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
2272 (void) memset(initq_info, 0, sizeof (struct megasas_init_queue_info));
2273
2274 initq_info->init_flags = 0;
2275
2276 initq_info->reply_queue_entries = instance->max_fw_cmds + 1;
2277
2278 initq_info->producer_index_phys_addr_hi = 0;
2279 initq_info->producer_index_phys_addr_lo =
2280 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
2281
2282 initq_info->consumer_index_phys_addr_hi = 0;
2283 initq_info->consumer_index_phys_addr_lo =
2284 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4;
2285
2286 initq_info->reply_queue_start_phys_addr_hi = 0;
2287 initq_info->reply_queue_start_phys_addr_lo =
2288 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8;
2289
2290 init_frame->cmd = MFI_CMD_OP_INIT;
2291 init_frame->cmd_status = MFI_CMD_STATUS_POLL_MODE;
2292 init_frame->flags = 0;
2293 init_frame->queue_info_new_phys_addr_lo =
2294 cmd->frame_phys_addr + 64;
2295 init_frame->queue_info_new_phys_addr_hi = 0;
2296
2297 init_frame->data_xfer_len = sizeof (struct megasas_init_queue_info);
2298
2299 cmd->frame_count = 1;
2300
2301 /* issue the init frame in polled mode */
2302 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2303 con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
2304 goto fail_fw_init;
2305 }
2306
2307 return_mfi_pkt(instance, cmd);
2308 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) {
2309 goto fail_fw_init;
2310 }
2311
2312 /* gather misc FW related information */
2313 if (!get_ctrl_info(instance, &ctrl_info)) {
2314 instance->max_sectors_per_req = ctrl_info.max_request_size;
2315 con_log(CL_ANN1, (CE_NOTE, "product name %s ld present %d",
2316 ctrl_info.product_name, ctrl_info.ld_present_count));
2317 } else {
2318 instance->max_sectors_per_req = instance->max_num_sge *
2319 PAGESIZE / 512;
2320 }
2321
2322 if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2323 goto fail_fw_init;
2324 }
2325
2326 return (0);
2327
2328 fail_fw_init:
2329 fail_alloc_fw_space:
2330
2331 free_space_for_mfi(instance);
2332
2333 fail_ready_state:
2334 ddi_regs_map_free(&instance->regmap_handle);
2335
2336 fail_mfi_reg_setup:
2337 return (DDI_FAILURE);
2338 }
2339
2340 /*
2341 * mfi_state_transition_to_ready : Move the FW to READY state
2342 *
2343 * @reg_set : MFI register set
2344 */
2345 static int
mfi_state_transition_to_ready(struct megasas_instance * instance)2346 mfi_state_transition_to_ready(struct megasas_instance *instance)
2347 {
2348 int i;
2349 uint8_t max_wait;
2350 uint32_t fw_ctrl;
2351 uint32_t fw_state;
2352 uint32_t cur_state;
2353
2354 fw_state =
2355 instance->func_ptr->read_fw_status_reg(instance) & MFI_STATE_MASK;
2356 con_log(CL_ANN1, (CE_NOTE,
2357 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state));
2358
2359 while (fw_state != MFI_STATE_READY) {
2360 con_log(CL_ANN, (CE_NOTE,
2361 "mfi_state_transition_to_ready:FW state%x", fw_state));
2362
2363 switch (fw_state) {
2364 case MFI_STATE_FAULT:
2365 con_log(CL_ANN, (CE_NOTE,
2366 "megasas: FW in FAULT state!!"));
2367
2368 return (-ENODEV);
2369 case MFI_STATE_WAIT_HANDSHAKE:
2370 /* set the CLR bit in IMR0 */
2371 con_log(CL_ANN, (CE_NOTE,
2372 "megasas: FW waiting for HANDSHAKE"));
2373 /*
2374 * PCI_Hot Plug: MFI F/W requires
2375 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
2376 * to be set
2377 */
2378 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
2379 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE |
2380 MFI_INIT_HOTPLUG, instance);
2381
2382 max_wait = 2;
2383 cur_state = MFI_STATE_WAIT_HANDSHAKE;
2384 break;
2385 case MFI_STATE_BOOT_MESSAGE_PENDING:
2386 /* set the CLR bit in IMR0 */
2387 con_log(CL_ANN, (CE_NOTE,
2388 "megasas: FW state boot message pending"));
2389 /*
2390 * PCI_Hot Plug: MFI F/W requires
2391 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
2392 * to be set
2393 */
2394 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance);
2395
2396 max_wait = 10;
2397 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2398 break;
2399 case MFI_STATE_OPERATIONAL:
2400 /* bring it to READY state; assuming max wait 2 secs */
2401 instance->func_ptr->disable_intr(instance);
2402 con_log(CL_ANN1, (CE_NOTE,
2403 "megasas: FW in OPERATIONAL state"));
2404 /*
2405 * PCI_Hot Plug: MFI F/W requires
2406 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
2407 * to be set
2408 */
2409 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
2410 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance);
2411
2412 max_wait = 10;
2413 cur_state = MFI_STATE_OPERATIONAL;
2414 break;
2415 case MFI_STATE_UNDEFINED:
2416 /* this state should not last for more than 2 seconds */
2417 con_log(CL_ANN, (CE_NOTE, "FW state undefined\n"));
2418
2419 max_wait = 2;
2420 cur_state = MFI_STATE_UNDEFINED;
2421 break;
2422 case MFI_STATE_BB_INIT:
2423 max_wait = 2;
2424 cur_state = MFI_STATE_BB_INIT;
2425 break;
2426 case MFI_STATE_FW_INIT:
2427 max_wait = 2;
2428 cur_state = MFI_STATE_FW_INIT;
2429 break;
2430 case MFI_STATE_DEVICE_SCAN:
2431 max_wait = 10;
2432 cur_state = MFI_STATE_DEVICE_SCAN;
2433 break;
2434 default:
2435 con_log(CL_ANN, (CE_NOTE,
2436 "megasas: Unknown state 0x%x\n", fw_state));
2437 return (-ENODEV);
2438 }
2439
2440 /* the cur_state should not last for more than max_wait secs */
2441 for (i = 0; i < (max_wait * MILLISEC); i++) {
2442 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */
2443 fw_state =
2444 instance->func_ptr->read_fw_status_reg(instance) &
2445 MFI_STATE_MASK;
2446
2447 if (fw_state == cur_state) {
2448 delay(1 * drv_usectohz(MILLISEC));
2449 } else {
2450 break;
2451 }
2452 }
2453
2454 /* return error if fw_state hasn't changed after max_wait */
2455 if (fw_state == cur_state) {
2456 con_log(CL_ANN, (CE_NOTE,
2457 "FW state hasn't changed in %d secs\n", max_wait));
2458 return (-ENODEV);
2459 }
2460 };
2461
2462 fw_ctrl = RD_IB_DOORBELL(instance);
2463
2464 con_log(CL_ANN1, (CE_NOTE,
2465 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
2466
2467 /*
2468 * Write 0xF to the doorbell register to do the following.
2469 * - Abort all outstanding commands (bit 0).
2470 * - Transition from OPERATIONAL to READY state (bit 1).
2471 * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
2472 * - Set to release FW to continue running (i.e. BIOS handshake
2473 * (bit 3).
2474 */
2475 WR_IB_DOORBELL(0xF, instance);
2476
2477 if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2478 return (-ENODEV);
2479 }
2480 return (0);
2481 }
2482
2483 /*
2484 * get_seq_num
2485 */
2486 static int
get_seq_num(struct megasas_instance * instance,struct megasas_evt_log_info * eli)2487 get_seq_num(struct megasas_instance *instance,
2488 struct megasas_evt_log_info *eli)
2489 {
2490 int ret = 0;
2491
2492 dma_obj_t dcmd_dma_obj;
2493 struct megasas_cmd *cmd;
2494 struct megasas_dcmd_frame *dcmd;
2495
2496 cmd = get_mfi_pkt(instance);
2497
2498 if (!cmd) {
2499 cmn_err(CE_WARN, "megasas: failed to get a cmd\n");
2500 return (-ENOMEM);
2501 }
2502
2503 dcmd = &cmd->frame->dcmd;
2504
2505 /* allocate the data transfer buffer */
2506 dcmd_dma_obj.size = sizeof (struct megasas_evt_log_info);
2507 dcmd_dma_obj.dma_attr = megasas_generic_dma_attr;
2508 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2509 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
2510 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
2511 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
2512
2513 if (mega_alloc_dma_obj(instance, &dcmd_dma_obj) != 1) {
2514 con_log(CL_ANN, (CE_WARN,
2515 "get_seq_num: could not data transfer buffer alloc."));
2516 return (DDI_FAILURE);
2517 }
2518
2519 (void) memset(dcmd_dma_obj.buffer, 0,
2520 sizeof (struct megasas_evt_log_info));
2521
2522 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2523
2524 dcmd->cmd = MFI_CMD_OP_DCMD;
2525 dcmd->cmd_status = 0;
2526 dcmd->sge_count = 1;
2527 dcmd->flags = MFI_FRAME_DIR_READ;
2528 dcmd->timeout = 0;
2529 dcmd->data_xfer_len = sizeof (struct megasas_evt_log_info);
2530 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
2531 dcmd->sgl.sge32[0].length = sizeof (struct megasas_evt_log_info);
2532 dcmd->sgl.sge32[0].phys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
2533
2534 cmd->sync_cmd = MEGASAS_TRUE;
2535 cmd->frame_count = 1;
2536
2537 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
2538 cmn_err(CE_WARN, "get_seq_num: "
2539 "failed to issue MR_DCMD_CTRL_EVENT_GET_INFO\n");
2540 ret = -1;
2541 } else {
2542 /* copy the data back into callers buffer */
2543 bcopy(dcmd_dma_obj.buffer, eli,
2544 sizeof (struct megasas_evt_log_info));
2545 ret = 0;
2546 }
2547
2548 if (mega_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
2549 ret = -1;
2550
2551 return_mfi_pkt(instance, cmd);
2552 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) {
2553 ret = -1;
2554 }
2555 return (ret);
2556 }
2557
2558 /*
2559 * start_mfi_aen
2560 */
2561 static int
start_mfi_aen(struct megasas_instance * instance)2562 start_mfi_aen(struct megasas_instance *instance)
2563 {
2564 int ret = 0;
2565
2566 struct megasas_evt_log_info eli;
2567 union megasas_evt_class_locale class_locale;
2568
2569 /* get the latest sequence number from FW */
2570 (void) memset(&eli, 0, sizeof (struct megasas_evt_log_info));
2571
2572 if (get_seq_num(instance, &eli)) {
2573 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num\n");
2574 return (-1);
2575 }
2576
2577 /* register AEN with FW for latest sequence number plus 1 */
2578 class_locale.members.reserved = 0;
2579 class_locale.members.locale = MR_EVT_LOCALE_ALL;
2580 class_locale.members.class = MR_EVT_CLASS_CRITICAL;
2581
2582 ret = register_mfi_aen(instance, eli.newest_seq_num + 1,
2583 class_locale.word);
2584
2585 if (ret) {
2586 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed\n");
2587 return (-1);
2588 }
2589
2590 return (ret);
2591 }
2592
2593 /*
2594 * flush_cache
2595 */
2596 static void
flush_cache(struct megasas_instance * instance)2597 flush_cache(struct megasas_instance *instance)
2598 {
2599 struct megasas_cmd *cmd;
2600 struct megasas_dcmd_frame *dcmd;
2601
2602 if (!(cmd = get_mfi_pkt(instance)))
2603 return;
2604
2605 dcmd = &cmd->frame->dcmd;
2606
2607 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2608
2609 dcmd->cmd = MFI_CMD_OP_DCMD;
2610 dcmd->cmd_status = 0x0;
2611 dcmd->sge_count = 0;
2612 dcmd->flags = MFI_FRAME_DIR_NONE;
2613 dcmd->timeout = 0;
2614 dcmd->data_xfer_len = 0;
2615 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
2616 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
2617
2618 cmd->frame_count = 1;
2619
2620 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2621 cmn_err(CE_WARN,
2622 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH\n");
2623 }
2624 con_log(CL_DLEVEL1, (CE_NOTE, "done"));
2625 return_mfi_pkt(instance, cmd);
2626 (void) megasas_common_check(instance, cmd);
2627 }
2628
2629 /*
2630 * service_mfi_aen- Completes an AEN command
2631 * @instance: Adapter soft state
2632 * @cmd: Command to be completed
2633 *
2634 */
2635 static void
service_mfi_aen(struct megasas_instance * instance,struct megasas_cmd * cmd)2636 service_mfi_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2637 {
2638 uint32_t seq_num;
2639 struct megasas_evt_detail *evt_detail =
2640 (struct megasas_evt_detail *)instance->mfi_evt_detail_obj.buffer;
2641
2642 cmd->cmd_status = cmd->frame->io.cmd_status;
2643
2644 if (cmd->cmd_status == ENODATA) {
2645 cmd->cmd_status = 0;
2646 }
2647
2648 /*
2649 * log the MFI AEN event to the sysevent queue so that
2650 * application will get noticed
2651 */
2652 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS",
2653 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) {
2654 int instance_no = ddi_get_instance(instance->dip);
2655 con_log(CL_ANN, (CE_WARN,
2656 "mega%d: Failed to log AEN event", instance_no));
2657 }
2658
2659 /* get copy of seq_num and class/locale for re-registration */
2660 seq_num = evt_detail->seq_num;
2661 seq_num++;
2662 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
2663 sizeof (struct megasas_evt_detail));
2664
2665 cmd->frame->dcmd.cmd_status = 0x0;
2666 cmd->frame->dcmd.mbox.w[0] = seq_num;
2667
2668 instance->aen_seq_num = seq_num;
2669
2670 cmd->frame_count = 1;
2671
2672 /* Issue the aen registration frame */
2673 instance->func_ptr->issue_cmd(cmd, instance);
2674 }
2675
2676 /*
2677 * complete_cmd_in_sync_mode - Completes an internal command
2678 * @instance: Adapter soft state
2679 * @cmd: Command to be completed
2680 *
2681 * The issue_cmd_in_sync_mode() function waits for a command to complete
2682 * after it issues a command. This function wakes up that waiting routine by
2683 * calling wake_up() on the wait queue.
2684 */
2685 static void
complete_cmd_in_sync_mode(struct megasas_instance * instance,struct megasas_cmd * cmd)2686 complete_cmd_in_sync_mode(struct megasas_instance *instance,
2687 struct megasas_cmd *cmd)
2688 {
2689 cmd->cmd_status = cmd->frame->io.cmd_status;
2690
2691 cmd->sync_cmd = MEGASAS_FALSE;
2692
2693 if (cmd->cmd_status == ENODATA) {
2694 cmd->cmd_status = 0;
2695 }
2696
2697 cv_broadcast(&instance->int_cmd_cv);
2698 }
2699
2700 /*
2701 * megasas_softintr - The Software ISR
2702 * @param arg : HBA soft state
2703 *
2704 * called from high-level interrupt if hi-level interrupt are not there,
2705 * otherwise triggered as a soft interrupt
2706 */
2707 static uint_t
megasas_softintr(struct megasas_instance * instance)2708 megasas_softintr(struct megasas_instance *instance)
2709 {
2710 struct scsi_pkt *pkt;
2711 struct scsa_cmd *acmd;
2712 struct megasas_cmd *cmd;
2713 struct mlist_head *pos, *next;
2714 mlist_t process_list;
2715 struct megasas_header *hdr;
2716 struct scsi_arq_status *arqstat;
2717
2718 con_log(CL_ANN1, (CE_CONT, "megasas_softintr called"));
2719
2720 ASSERT(instance);
2721 mutex_enter(&instance->completed_pool_mtx);
2722
2723 if (mlist_empty(&instance->completed_pool_list)) {
2724 mutex_exit(&instance->completed_pool_mtx);
2725 return (DDI_INTR_UNCLAIMED);
2726 }
2727
2728 instance->softint_running = 1;
2729
2730 INIT_LIST_HEAD(&process_list);
2731 mlist_splice(&instance->completed_pool_list, &process_list);
2732 INIT_LIST_HEAD(&instance->completed_pool_list);
2733
2734 mutex_exit(&instance->completed_pool_mtx);
2735
2736 /* perform all callbacks first, before releasing the SCBs */
2737 mlist_for_each_safe(pos, next, &process_list) {
2738 cmd = mlist_entry(pos, struct megasas_cmd, list);
2739
2740 /* syncronize the Cmd frame for the controller */
2741 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle,
2742 0, 0, DDI_DMA_SYNC_FORCPU);
2743
2744 if (megasas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
2745 DDI_SUCCESS) {
2746 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2747 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2748 return (DDI_INTR_UNCLAIMED);
2749 }
2750
2751 hdr = &cmd->frame->hdr;
2752
2753 /* remove the internal command from the process list */
2754 mlist_del_init(&cmd->list);
2755
2756 switch (hdr->cmd) {
2757 case MFI_CMD_OP_PD_SCSI:
2758 case MFI_CMD_OP_LD_SCSI:
2759 case MFI_CMD_OP_LD_READ:
2760 case MFI_CMD_OP_LD_WRITE:
2761 /*
2762 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI
2763 * could have been issued either through an
2764 * IO path or an IOCTL path. If it was via IOCTL,
2765 * we will send it to internal completion.
2766 */
2767 if (cmd->sync_cmd == MEGASAS_TRUE) {
2768 complete_cmd_in_sync_mode(instance, cmd);
2769 break;
2770 }
2771
2772 /* regular commands */
2773 acmd = cmd->cmd;
2774 pkt = CMD2PKT(acmd);
2775
2776 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2777 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2778 (void) ddi_dma_sync(acmd->cmd_dmahandle,
2779 acmd->cmd_dma_offset,
2780 acmd->cmd_dma_len,
2781 DDI_DMA_SYNC_FORCPU);
2782 }
2783 }
2784
2785 pkt->pkt_reason = CMD_CMPLT;
2786 pkt->pkt_statistics = 0;
2787 pkt->pkt_state = STATE_GOT_BUS
2788 | STATE_GOT_TARGET | STATE_SENT_CMD
2789 | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2790
2791 con_log(CL_ANN1, (CE_CONT,
2792 "CDB[0] = %x completed for %s: size %lx context %x",
2793 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"),
2794 acmd->cmd_dmacount, hdr->context));
2795
2796 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2797 struct scsi_inquiry *inq;
2798
2799 if (acmd->cmd_dmacount != 0) {
2800 bp_mapin(acmd->cmd_buf);
2801 inq = (struct scsi_inquiry *)
2802 acmd->cmd_buf->b_un.b_addr;
2803
2804 /* don't expose physical drives to OS */
2805 if (acmd->islogical &&
2806 (hdr->cmd_status == MFI_STAT_OK)) {
2807 display_scsi_inquiry(
2808 (caddr_t)inq);
2809 } else if ((hdr->cmd_status ==
2810 MFI_STAT_OK) && inq->inq_dtype ==
2811 DTYPE_DIRECT) {
2812
2813 display_scsi_inquiry(
2814 (caddr_t)inq);
2815
2816 /* for physical disk */
2817 hdr->cmd_status =
2818 MFI_STAT_DEVICE_NOT_FOUND;
2819 }
2820 }
2821 }
2822
2823 switch (hdr->cmd_status) {
2824 case MFI_STAT_OK:
2825 pkt->pkt_scbp[0] = STATUS_GOOD;
2826 break;
2827 case MFI_STAT_LD_CC_IN_PROGRESS:
2828 case MFI_STAT_LD_RECON_IN_PROGRESS:
2829 /* SJ - these are not correct way */
2830 pkt->pkt_scbp[0] = STATUS_GOOD;
2831 break;
2832 case MFI_STAT_LD_INIT_IN_PROGRESS:
2833 con_log(CL_ANN,
2834 (CE_WARN, "Initialization in Progress"));
2835 pkt->pkt_reason = CMD_TRAN_ERR;
2836
2837 break;
2838 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2839 con_log(CL_ANN1, (CE_CONT, "scsi_done error"));
2840
2841 pkt->pkt_reason = CMD_CMPLT;
2842 ((struct scsi_status *)
2843 pkt->pkt_scbp)->sts_chk = 1;
2844
2845 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2846
2847 con_log(CL_ANN,
2848 (CE_WARN, "TEST_UNIT_READY fail"));
2849
2850 } else {
2851 pkt->pkt_state |= STATE_ARQ_DONE;
2852 arqstat = (void *)(pkt->pkt_scbp);
2853 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2854 arqstat->sts_rqpkt_resid = 0;
2855 arqstat->sts_rqpkt_state |=
2856 STATE_GOT_BUS | STATE_GOT_TARGET
2857 | STATE_SENT_CMD
2858 | STATE_XFERRED_DATA;
2859 *(uint8_t *)&arqstat->sts_rqpkt_status =
2860 STATUS_GOOD;
2861
2862 bcopy(cmd->sense,
2863 &(arqstat->sts_sensedata),
2864 acmd->cmd_scblen -
2865 offsetof(struct scsi_arq_status,
2866 sts_sensedata));
2867 }
2868 break;
2869 case MFI_STAT_LD_OFFLINE:
2870 case MFI_STAT_DEVICE_NOT_FOUND:
2871 con_log(CL_ANN1, (CE_CONT,
2872 "device not found error"));
2873 pkt->pkt_reason = CMD_DEV_GONE;
2874 pkt->pkt_statistics = STAT_DISCON;
2875 break;
2876 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2877 pkt->pkt_state |= STATE_ARQ_DONE;
2878 pkt->pkt_reason = CMD_CMPLT;
2879 ((struct scsi_status *)
2880 pkt->pkt_scbp)->sts_chk = 1;
2881
2882 arqstat = (void *)(pkt->pkt_scbp);
2883 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2884 arqstat->sts_rqpkt_resid = 0;
2885 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2886 | STATE_GOT_TARGET | STATE_SENT_CMD
2887 | STATE_XFERRED_DATA;
2888 *(uint8_t *)&arqstat->sts_rqpkt_status =
2889 STATUS_GOOD;
2890
2891 arqstat->sts_sensedata.es_valid = 1;
2892 arqstat->sts_sensedata.es_key =
2893 KEY_ILLEGAL_REQUEST;
2894 arqstat->sts_sensedata.es_class =
2895 CLASS_EXTENDED_SENSE;
2896
2897 /*
2898 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2899 * ASC: 0x21h; ASCQ: 0x00h;
2900 */
2901 arqstat->sts_sensedata.es_add_code = 0x21;
2902 arqstat->sts_sensedata.es_qual_code = 0x00;
2903
2904 break;
2905
2906 default:
2907 con_log(CL_ANN, (CE_CONT, "Unknown status!"));
2908 pkt->pkt_reason = CMD_TRAN_ERR;
2909
2910 break;
2911 }
2912
2913 atomic_add_16(&instance->fw_outstanding, (-1));
2914
2915 return_mfi_pkt(instance, cmd);
2916
2917 (void) megasas_common_check(instance, cmd);
2918
2919 if (acmd->cmd_dmahandle) {
2920 if (megasas_check_dma_handle(
2921 acmd->cmd_dmahandle) != DDI_SUCCESS) {
2922 ddi_fm_service_impact(instance->dip,
2923 DDI_SERVICE_UNAFFECTED);
2924 pkt->pkt_reason = CMD_TRAN_ERR;
2925 pkt->pkt_statistics = 0;
2926 }
2927 }
2928
2929 /* Call the callback routine */
2930 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
2931 scsi_hba_pkt_comp(pkt);
2932 }
2933
2934 break;
2935 case MFI_CMD_OP_SMP:
2936 case MFI_CMD_OP_STP:
2937 complete_cmd_in_sync_mode(instance, cmd);
2938 break;
2939 case MFI_CMD_OP_DCMD:
2940 /* see if got an event notification */
2941 if (cmd->frame->dcmd.opcode ==
2942 MR_DCMD_CTRL_EVENT_WAIT) {
2943 if ((instance->aen_cmd == cmd) &&
2944 (instance->aen_cmd->abort_aen)) {
2945 con_log(CL_ANN, (CE_WARN,
2946 "megasas_softintr: "
2947 "aborted_aen returned"));
2948 } else {
2949 service_mfi_aen(instance, cmd);
2950
2951 atomic_add_16(&instance->fw_outstanding,
2952 (-1));
2953 }
2954 } else {
2955 complete_cmd_in_sync_mode(instance, cmd);
2956 }
2957
2958 break;
2959 case MFI_CMD_OP_ABORT:
2960 con_log(CL_ANN, (CE_WARN, "MFI_CMD_OP_ABORT complete"));
2961 /*
2962 * MFI_CMD_OP_ABORT successfully completed
2963 * in the synchronous mode
2964 */
2965 complete_cmd_in_sync_mode(instance, cmd);
2966 break;
2967 default:
2968 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2969 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2970
2971 if (cmd->pkt != NULL) {
2972 pkt = cmd->pkt;
2973 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
2974 scsi_hba_pkt_comp(pkt);
2975 }
2976 }
2977 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !!"));
2978 break;
2979 }
2980 }
2981
2982 instance->softint_running = 0;
2983
2984 return (DDI_INTR_CLAIMED);
2985 }
2986
2987 /*
2988 * mega_alloc_dma_obj
2989 *
2990 * Allocate the memory and other resources for an dma object.
2991 */
2992 static int
mega_alloc_dma_obj(struct megasas_instance * instance,dma_obj_t * obj)2993 mega_alloc_dma_obj(struct megasas_instance *instance, dma_obj_t *obj)
2994 {
2995 int i;
2996 size_t alen = 0;
2997 uint_t cookie_cnt;
2998 struct ddi_device_acc_attr tmp_endian_attr;
2999
3000 tmp_endian_attr = endian_attr;
3001 tmp_endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
3002 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr,
3003 DDI_DMA_SLEEP, NULL, &obj->dma_handle);
3004 if (i != DDI_SUCCESS) {
3005
3006 switch (i) {
3007 case DDI_DMA_BADATTR :
3008 con_log(CL_ANN, (CE_WARN,
3009 "Failed ddi_dma_alloc_handle- Bad atrib"));
3010 break;
3011 case DDI_DMA_NORESOURCES :
3012 con_log(CL_ANN, (CE_WARN,
3013 "Failed ddi_dma_alloc_handle- No Resources"));
3014 break;
3015 default :
3016 con_log(CL_ANN, (CE_WARN,
3017 "Failed ddi_dma_alloc_handle :unknown %d", i));
3018 break;
3019 }
3020
3021 return (-1);
3022 }
3023
3024 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr,
3025 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
3026 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) ||
3027 alen < obj->size) {
3028
3029 ddi_dma_free_handle(&obj->dma_handle);
3030
3031 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
3032
3033 return (-1);
3034 }
3035
3036 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer,
3037 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
3038 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) {
3039
3040 ddi_dma_mem_free(&obj->acc_handle);
3041 ddi_dma_free_handle(&obj->dma_handle);
3042
3043 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
3044
3045 return (-1);
3046 }
3047
3048 if (megasas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) {
3049 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
3050 return (-1);
3051 }
3052
3053 if (megasas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) {
3054 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
3055 return (-1);
3056 }
3057
3058 return (cookie_cnt);
3059 }
3060
3061 /*
3062 * mega_free_dma_obj(struct megasas_instance *, dma_obj_t)
3063 *
3064 * De-allocate the memory and other resources for an dma object, which must
3065 * have been alloated by a previous call to mega_alloc_dma_obj()
3066 */
3067 static int
mega_free_dma_obj(struct megasas_instance * instance,dma_obj_t obj)3068 mega_free_dma_obj(struct megasas_instance *instance, dma_obj_t obj)
3069 {
3070
3071 if (megasas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) {
3072 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
3073 return (DDI_FAILURE);
3074 }
3075
3076 if (megasas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) {
3077 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
3078 return (DDI_FAILURE);
3079 }
3080
3081 (void) ddi_dma_unbind_handle(obj.dma_handle);
3082 ddi_dma_mem_free(&obj.acc_handle);
3083 ddi_dma_free_handle(&obj.dma_handle);
3084
3085 return (DDI_SUCCESS);
3086 }
3087
3088 /*
3089 * megasas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *,
3090 * int, int (*)())
3091 *
3092 * Allocate dma resources for a new scsi command
3093 */
3094 static int
megasas_dma_alloc(struct megasas_instance * instance,struct scsi_pkt * pkt,struct buf * bp,int flags,int (* callback)())3095 megasas_dma_alloc(struct megasas_instance *instance, struct scsi_pkt *pkt,
3096 struct buf *bp, int flags, int (*callback)())
3097 {
3098 int dma_flags;
3099 int (*cb)(caddr_t);
3100 int i;
3101
3102 ddi_dma_attr_t tmp_dma_attr = megasas_generic_dma_attr;
3103 struct scsa_cmd *acmd = PKT2CMD(pkt);
3104
3105 acmd->cmd_buf = bp;
3106
3107 if (bp->b_flags & B_READ) {
3108 acmd->cmd_flags &= ~CFLAG_DMASEND;
3109 dma_flags = DDI_DMA_READ;
3110 } else {
3111 acmd->cmd_flags |= CFLAG_DMASEND;
3112 dma_flags = DDI_DMA_WRITE;
3113 }
3114
3115 if (flags & PKT_CONSISTENT) {
3116 acmd->cmd_flags |= CFLAG_CONSISTENT;
3117 dma_flags |= DDI_DMA_CONSISTENT;
3118 }
3119
3120 if (flags & PKT_DMA_PARTIAL) {
3121 dma_flags |= DDI_DMA_PARTIAL;
3122 }
3123
3124 dma_flags |= DDI_DMA_REDZONE;
3125
3126 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
3127
3128 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge;
3129 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
3130
3131 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr,
3132 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) {
3133 switch (i) {
3134 case DDI_DMA_BADATTR:
3135 bioerror(bp, EFAULT);
3136 return (-1);
3137
3138 case DDI_DMA_NORESOURCES:
3139 bioerror(bp, 0);
3140 return (-1);
3141
3142 default:
3143 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: "
3144 "0x%x impossible\n", i));
3145 bioerror(bp, EFAULT);
3146 return (-1);
3147 }
3148 }
3149
3150 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags,
3151 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies);
3152
3153 switch (i) {
3154 case DDI_DMA_PARTIAL_MAP:
3155 if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
3156 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
3157 "DDI_DMA_PARTIAL_MAP impossible\n"));
3158 goto no_dma_cookies;
3159 }
3160
3161 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) ==
3162 DDI_FAILURE) {
3163 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed\n"));
3164 goto no_dma_cookies;
3165 }
3166
3167 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
3168 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
3169 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
3170 DDI_FAILURE) {
3171
3172 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed\n"));
3173 goto no_dma_cookies;
3174 }
3175
3176 goto get_dma_cookies;
3177 case DDI_DMA_MAPPED:
3178 acmd->cmd_nwin = 1;
3179 acmd->cmd_dma_len = 0;
3180 acmd->cmd_dma_offset = 0;
3181
3182 get_dma_cookies:
3183 i = 0;
3184 acmd->cmd_dmacount = 0;
3185 for (;;) {
3186 acmd->cmd_dmacount +=
3187 acmd->cmd_dmacookies[i++].dmac_size;
3188
3189 if (i == instance->max_num_sge ||
3190 i == acmd->cmd_ncookies)
3191 break;
3192
3193 ddi_dma_nextcookie(acmd->cmd_dmahandle,
3194 &acmd->cmd_dmacookies[i]);
3195 }
3196
3197 acmd->cmd_cookie = i;
3198 acmd->cmd_cookiecnt = i;
3199
3200 acmd->cmd_flags |= CFLAG_DMAVALID;
3201
3202 if (bp->b_bcount >= acmd->cmd_dmacount) {
3203 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
3204 } else {
3205 pkt->pkt_resid = 0;
3206 }
3207
3208 return (0);
3209 case DDI_DMA_NORESOURCES:
3210 bioerror(bp, 0);
3211 break;
3212 case DDI_DMA_NOMAPPING:
3213 bioerror(bp, EFAULT);
3214 break;
3215 case DDI_DMA_TOOBIG:
3216 bioerror(bp, EINVAL);
3217 break;
3218 case DDI_DMA_INUSE:
3219 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:"
3220 " DDI_DMA_INUSE impossible\n"));
3221 break;
3222 default:
3223 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
3224 "0x%x impossible\n", i));
3225 break;
3226 }
3227
3228 no_dma_cookies:
3229 ddi_dma_free_handle(&acmd->cmd_dmahandle);
3230 acmd->cmd_dmahandle = NULL;
3231 acmd->cmd_flags &= ~CFLAG_DMAVALID;
3232 return (-1);
3233 }
3234
3235 /*
3236 * megasas_dma_move(struct megasas_instance *, struct scsi_pkt *, struct buf *)
3237 *
3238 * move dma resources to next dma window
3239 *
3240 */
3241 static int
megasas_dma_move(struct megasas_instance * instance,struct scsi_pkt * pkt,struct buf * bp)3242 megasas_dma_move(struct megasas_instance *instance, struct scsi_pkt *pkt,
3243 struct buf *bp)
3244 {
3245 int i = 0;
3246
3247 struct scsa_cmd *acmd = PKT2CMD(pkt);
3248
3249 /*
3250 * If there are no more cookies remaining in this window,
3251 * must move to the next window first.
3252 */
3253 if (acmd->cmd_cookie == acmd->cmd_ncookies) {
3254 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) {
3255 return (0);
3256 }
3257
3258 /* at last window, cannot move */
3259 if (++acmd->cmd_curwin >= acmd->cmd_nwin) {
3260 return (-1);
3261 }
3262
3263 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
3264 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
3265 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
3266 DDI_FAILURE) {
3267 return (-1);
3268 }
3269
3270 acmd->cmd_cookie = 0;
3271 } else {
3272 /* still more cookies in this window - get the next one */
3273 ddi_dma_nextcookie(acmd->cmd_dmahandle,
3274 &acmd->cmd_dmacookies[0]);
3275 }
3276
3277 /* get remaining cookies in this window, up to our maximum */
3278 for (;;) {
3279 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size;
3280 acmd->cmd_cookie++;
3281
3282 if (i == instance->max_num_sge ||
3283 acmd->cmd_cookie == acmd->cmd_ncookies) {
3284 break;
3285 }
3286
3287 ddi_dma_nextcookie(acmd->cmd_dmahandle,
3288 &acmd->cmd_dmacookies[i]);
3289 }
3290
3291 acmd->cmd_cookiecnt = i;
3292
3293 if (bp->b_bcount >= acmd->cmd_dmacount) {
3294 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
3295 } else {
3296 pkt->pkt_resid = 0;
3297 }
3298
3299 return (0);
3300 }
3301
3302 /*
3303 * build_cmd
3304 */
3305 static struct megasas_cmd *
build_cmd(struct megasas_instance * instance,struct scsi_address * ap,struct scsi_pkt * pkt,uchar_t * cmd_done)3306 build_cmd(struct megasas_instance *instance, struct scsi_address *ap,
3307 struct scsi_pkt *pkt, uchar_t *cmd_done)
3308 {
3309 uint16_t flags = 0;
3310 uint32_t i;
3311 uint32_t context;
3312 uint32_t sge_bytes;
3313
3314 struct megasas_cmd *cmd;
3315 struct megasas_sge64 *mfi_sgl;
3316 struct scsa_cmd *acmd = PKT2CMD(pkt);
3317 struct megasas_pthru_frame *pthru;
3318 struct megasas_io_frame *ldio;
3319
3320 /* find out if this is logical or physical drive command. */
3321 acmd->islogical = MEGADRV_IS_LOGICAL(ap);
3322 acmd->device_id = MAP_DEVICE_ID(instance, ap);
3323 *cmd_done = 0;
3324
3325 /* get the command packet */
3326 if (!(cmd = get_mfi_pkt(instance))) {
3327 return (NULL);
3328 }
3329
3330 cmd->pkt = pkt;
3331 cmd->cmd = acmd;
3332
3333 /* lets get the command directions */
3334 if (acmd->cmd_flags & CFLAG_DMASEND) {
3335 flags = MFI_FRAME_DIR_WRITE;
3336
3337 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
3338 (void) ddi_dma_sync(acmd->cmd_dmahandle,
3339 acmd->cmd_dma_offset, acmd->cmd_dma_len,
3340 DDI_DMA_SYNC_FORDEV);
3341 }
3342 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
3343 flags = MFI_FRAME_DIR_READ;
3344
3345 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
3346 (void) ddi_dma_sync(acmd->cmd_dmahandle,
3347 acmd->cmd_dma_offset, acmd->cmd_dma_len,
3348 DDI_DMA_SYNC_FORCPU);
3349 }
3350 } else {
3351 flags = MFI_FRAME_DIR_NONE;
3352 }
3353
3354 flags |= MFI_FRAME_SGL64;
3355
3356 switch (pkt->pkt_cdbp[0]) {
3357
3358 /*
3359 * case SCMD_SYNCHRONIZE_CACHE:
3360 * flush_cache(instance);
3361 * return_mfi_pkt(instance, cmd);
3362 * *cmd_done = 1;
3363 *
3364 * return (NULL);
3365 */
3366
3367 case SCMD_READ:
3368 case SCMD_WRITE:
3369 case SCMD_READ_G1:
3370 case SCMD_WRITE_G1:
3371 if (acmd->islogical) {
3372 ldio = (struct megasas_io_frame *)cmd->frame;
3373
3374 /*
3375 * preare the Logical IO frame:
3376 * 2nd bit is zero for all read cmds
3377 */
3378 ldio->cmd = (pkt->pkt_cdbp[0] & 0x02) ?
3379 MFI_CMD_OP_LD_WRITE : MFI_CMD_OP_LD_READ;
3380 ldio->cmd_status = 0x0;
3381 ldio->scsi_status = 0x0;
3382 ldio->target_id = acmd->device_id;
3383 ldio->timeout = 0;
3384 ldio->reserved_0 = 0;
3385 ldio->pad_0 = 0;
3386 ldio->flags = flags;
3387
3388 /* Initialize sense Information */
3389 bzero(cmd->sense, SENSE_LENGTH);
3390 ldio->sense_len = SENSE_LENGTH;
3391 ldio->sense_buf_phys_addr_hi = 0;
3392 ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
3393
3394 ldio->start_lba_hi = 0;
3395 ldio->access_byte = (acmd->cmd_cdblen != 6) ?
3396 pkt->pkt_cdbp[1] : 0;
3397 ldio->sge_count = acmd->cmd_cookiecnt;
3398 mfi_sgl = (struct megasas_sge64 *)&ldio->sgl;
3399
3400 context = ldio->context;
3401
3402 if (acmd->cmd_cdblen == CDB_GROUP0) {
3403 ldio->lba_count = host_to_le16(
3404 (uint16_t)(pkt->pkt_cdbp[4]));
3405
3406 ldio->start_lba_lo = host_to_le32(
3407 ((uint32_t)(pkt->pkt_cdbp[3])) |
3408 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
3409 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
3410 << 16));
3411 } else if (acmd->cmd_cdblen == CDB_GROUP1) {
3412 ldio->lba_count = host_to_le16(
3413 ((uint16_t)(pkt->pkt_cdbp[8])) |
3414 ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
3415
3416 ldio->start_lba_lo = host_to_le32(
3417 ((uint32_t)(pkt->pkt_cdbp[5])) |
3418 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
3419 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
3420 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
3421 } else if (acmd->cmd_cdblen == CDB_GROUP2) {
3422 ldio->lba_count = host_to_le16(
3423 ((uint16_t)(pkt->pkt_cdbp[9])) |
3424 ((uint16_t)(pkt->pkt_cdbp[8]) << 8) |
3425 ((uint16_t)(pkt->pkt_cdbp[7]) << 16) |
3426 ((uint16_t)(pkt->pkt_cdbp[6]) << 24));
3427
3428 ldio->start_lba_lo = host_to_le32(
3429 ((uint32_t)(pkt->pkt_cdbp[5])) |
3430 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
3431 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
3432 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
3433 } else if (acmd->cmd_cdblen == CDB_GROUP3) {
3434 ldio->lba_count = host_to_le16(
3435 ((uint16_t)(pkt->pkt_cdbp[13])) |
3436 ((uint16_t)(pkt->pkt_cdbp[12]) << 8) |
3437 ((uint16_t)(pkt->pkt_cdbp[11]) << 16) |
3438 ((uint16_t)(pkt->pkt_cdbp[10]) << 24));
3439
3440 ldio->start_lba_lo = host_to_le32(
3441 ((uint32_t)(pkt->pkt_cdbp[9])) |
3442 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
3443 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
3444 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
3445
3446 ldio->start_lba_lo = host_to_le32(
3447 ((uint32_t)(pkt->pkt_cdbp[5])) |
3448 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
3449 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
3450 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
3451 }
3452
3453 break;
3454 }
3455 /* fall through For all non-rd/wr cmds */
3456 default:
3457 pthru = (struct megasas_pthru_frame *)cmd->frame;
3458
3459 /* prepare the DCDB frame */
3460 pthru->cmd = (acmd->islogical) ?
3461 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI;
3462 pthru->cmd_status = 0x0;
3463 pthru->scsi_status = 0x0;
3464 pthru->target_id = acmd->device_id;
3465 pthru->lun = 0;
3466 pthru->cdb_len = acmd->cmd_cdblen;
3467 pthru->timeout = 0;
3468 pthru->flags = flags;
3469 pthru->data_xfer_len = acmd->cmd_dmacount;
3470 pthru->sge_count = acmd->cmd_cookiecnt;
3471 mfi_sgl = (struct megasas_sge64 *)&pthru->sgl;
3472
3473 bzero(cmd->sense, SENSE_LENGTH);
3474 pthru->sense_len = SENSE_LENGTH;
3475 pthru->sense_buf_phys_addr_hi = 0;
3476 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
3477
3478 context = pthru->context;
3479
3480 bcopy(pkt->pkt_cdbp, pthru->cdb, acmd->cmd_cdblen);
3481
3482 break;
3483 }
3484 #ifdef lint
3485 context = context;
3486 #endif
3487 /* bzero(mfi_sgl, sizeof (struct megasas_sge64) * MAX_SGL); */
3488
3489 /* prepare the scatter-gather list for the firmware */
3490 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) {
3491 mfi_sgl->phys_addr = acmd->cmd_dmacookies[i].dmac_laddress;
3492 mfi_sgl->length = acmd->cmd_dmacookies[i].dmac_size;
3493 }
3494
3495 sge_bytes = sizeof (struct megasas_sge64)*acmd->cmd_cookiecnt;
3496
3497 cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
3498 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1;
3499
3500 if (cmd->frame_count >= 8) {
3501 cmd->frame_count = 8;
3502 }
3503
3504 return (cmd);
3505 }
3506
3507 /*
3508 * wait_for_outstanding - Wait for all outstanding cmds
3509 * @instance: Adapter soft state
3510 *
3511 * This function waits for upto MEGASAS_RESET_WAIT_TIME seconds for FW to
3512 * complete all its outstanding commands. Returns error if one or more IOs
3513 * are pending after this time period.
3514 */
3515 static int
wait_for_outstanding(struct megasas_instance * instance)3516 wait_for_outstanding(struct megasas_instance *instance)
3517 {
3518 int i;
3519 uint32_t wait_time = 90;
3520
3521 for (i = 0; i < wait_time; i++) {
3522 if (!instance->fw_outstanding) {
3523 break;
3524 }
3525
3526 drv_usecwait(MILLISEC); /* wait for 1000 usecs */;
3527 }
3528
3529 if (instance->fw_outstanding) {
3530 return (1);
3531 }
3532
3533 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VERSION);
3534
3535 return (0);
3536 }
3537
3538 /*
3539 * issue_mfi_pthru
3540 */
3541 static int
issue_mfi_pthru(struct megasas_instance * instance,struct megasas_ioctl * ioctl,struct megasas_cmd * cmd,int mode)3542 issue_mfi_pthru(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
3543 struct megasas_cmd *cmd, int mode)
3544 {
3545 void *ubuf;
3546 uint32_t kphys_addr = 0;
3547 uint32_t xferlen = 0;
3548 uint_t model;
3549
3550 dma_obj_t pthru_dma_obj;
3551 struct megasas_pthru_frame *kpthru;
3552 struct megasas_pthru_frame *pthru;
3553
3554 pthru = &cmd->frame->pthru;
3555 kpthru = (struct megasas_pthru_frame *)&ioctl->frame[0];
3556
3557 model = ddi_model_convert_from(mode & FMODELS);
3558 if (model == DDI_MODEL_ILP32) {
3559 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32"));
3560
3561 xferlen = kpthru->sgl.sge32[0].length;
3562
3563 /* SJ! - ubuf needs to be virtual address. */
3564 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
3565 } else {
3566 #ifdef _ILP32
3567 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32"));
3568 xferlen = kpthru->sgl.sge32[0].length;
3569 /* SJ! - ubuf needs to be virtual address. */
3570 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
3571 #else
3572 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP64"));
3573 xferlen = kpthru->sgl.sge64[0].length;
3574 /* SJ! - ubuf needs to be virtual address. */
3575 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr;
3576 #endif
3577 }
3578
3579 if (xferlen) {
3580 /* means IOCTL requires DMA */
3581 /* allocate the data transfer buffer */
3582 pthru_dma_obj.size = xferlen;
3583 pthru_dma_obj.dma_attr = megasas_generic_dma_attr;
3584 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3585 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3586 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1;
3587 pthru_dma_obj.dma_attr.dma_attr_align = 1;
3588
3589 /* allocate kernel buffer for DMA */
3590 if (mega_alloc_dma_obj(instance, &pthru_dma_obj) != 1) {
3591 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
3592 "could not data transfer buffer alloc."));
3593 return (DDI_FAILURE);
3594 }
3595
3596 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3597 if (kpthru->flags & MFI_FRAME_DIR_WRITE) {
3598 if (ddi_copyin(ubuf, (void *)pthru_dma_obj.buffer,
3599 xferlen, mode)) {
3600 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
3601 "copy from user space failed\n"));
3602 return (1);
3603 }
3604 }
3605
3606 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address;
3607 }
3608
3609 pthru->cmd = kpthru->cmd;
3610 pthru->sense_len = kpthru->sense_len;
3611 pthru->cmd_status = kpthru->cmd_status;
3612 pthru->scsi_status = kpthru->scsi_status;
3613 pthru->target_id = kpthru->target_id;
3614 pthru->lun = kpthru->lun;
3615 pthru->cdb_len = kpthru->cdb_len;
3616 pthru->sge_count = kpthru->sge_count;
3617 pthru->timeout = kpthru->timeout;
3618 pthru->data_xfer_len = kpthru->data_xfer_len;
3619
3620 pthru->sense_buf_phys_addr_hi = 0;
3621 /* pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; */
3622 pthru->sense_buf_phys_addr_lo = 0;
3623
3624 bcopy((void *)kpthru->cdb, (void *)pthru->cdb, pthru->cdb_len);
3625
3626 pthru->flags = kpthru->flags & ~MFI_FRAME_SGL64;
3627 pthru->sgl.sge32[0].length = xferlen;
3628 pthru->sgl.sge32[0].phys_addr = kphys_addr;
3629
3630 cmd->sync_cmd = MEGASAS_TRUE;
3631 cmd->frame_count = 1;
3632
3633 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
3634 con_log(CL_ANN, (CE_WARN,
3635 "issue_mfi_pthru: fw_ioctl failed\n"));
3636 } else {
3637 if (xferlen && (kpthru->flags & MFI_FRAME_DIR_READ)) {
3638
3639 if (ddi_copyout(pthru_dma_obj.buffer, ubuf,
3640 xferlen, mode)) {
3641 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
3642 "copy to user space failed\n"));
3643 return (1);
3644 }
3645 }
3646 }
3647
3648 kpthru->cmd_status = pthru->cmd_status;
3649 kpthru->scsi_status = pthru->scsi_status;
3650
3651 con_log(CL_ANN, (CE_NOTE, "issue_mfi_pthru: cmd_status %x, "
3652 "scsi_status %x\n", pthru->cmd_status, pthru->scsi_status));
3653
3654 if (xferlen) {
3655 /* free kernel buffer */
3656 if (mega_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS)
3657 return (1);
3658 }
3659
3660 return (0);
3661 }
3662
3663 /*
3664 * issue_mfi_dcmd
3665 */
3666 static int
issue_mfi_dcmd(struct megasas_instance * instance,struct megasas_ioctl * ioctl,struct megasas_cmd * cmd,int mode)3667 issue_mfi_dcmd(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
3668 struct megasas_cmd *cmd, int mode)
3669 {
3670 void *ubuf;
3671 uint32_t kphys_addr = 0;
3672 uint32_t xferlen = 0;
3673 uint32_t model;
3674 dma_obj_t dcmd_dma_obj;
3675 struct megasas_dcmd_frame *kdcmd;
3676 struct megasas_dcmd_frame *dcmd;
3677
3678 dcmd = &cmd->frame->dcmd;
3679 kdcmd = (struct megasas_dcmd_frame *)&ioctl->frame[0];
3680
3681 model = ddi_model_convert_from(mode & FMODELS);
3682 if (model == DDI_MODEL_ILP32) {
3683 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
3684
3685 xferlen = kdcmd->sgl.sge32[0].length;
3686
3687 /* SJ! - ubuf needs to be virtual address. */
3688 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
3689 }
3690 else
3691 {
3692 #ifdef _ILP32
3693 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
3694 xferlen = kdcmd->sgl.sge32[0].length;
3695 /* SJ! - ubuf needs to be virtual address. */
3696 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
3697 #else
3698 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_LP64"));
3699 xferlen = kdcmd->sgl.sge64[0].length;
3700 /* SJ! - ubuf needs to be virtual address. */
3701 ubuf = (void *)(ulong_t)dcmd->sgl.sge64[0].phys_addr;
3702 #endif
3703 }
3704 if (xferlen) {
3705 /* means IOCTL requires DMA */
3706 /* allocate the data transfer buffer */
3707 dcmd_dma_obj.size = xferlen;
3708 dcmd_dma_obj.dma_attr = megasas_generic_dma_attr;
3709 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3710 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3711 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3712 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3713
3714 /* allocate kernel buffer for DMA */
3715 if (mega_alloc_dma_obj(instance, &dcmd_dma_obj) != 1) {
3716 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: "
3717 "could not data transfer buffer alloc."));
3718 return (DDI_FAILURE);
3719 }
3720
3721 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3722 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) {
3723 if (ddi_copyin(ubuf, (void *)dcmd_dma_obj.buffer,
3724 xferlen, mode)) {
3725 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: "
3726 "copy from user space failed\n"));
3727 return (1);
3728 }
3729 }
3730
3731 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
3732 }
3733
3734 dcmd->cmd = kdcmd->cmd;
3735 dcmd->cmd_status = kdcmd->cmd_status;
3736 dcmd->sge_count = kdcmd->sge_count;
3737 dcmd->timeout = kdcmd->timeout;
3738 dcmd->data_xfer_len = kdcmd->data_xfer_len;
3739 dcmd->opcode = kdcmd->opcode;
3740
3741 bcopy((void *)kdcmd->mbox.b, (void *)dcmd->mbox.b, DCMD_MBOX_SZ);
3742
3743 dcmd->flags = kdcmd->flags & ~MFI_FRAME_SGL64;
3744 dcmd->sgl.sge32[0].length = xferlen;
3745 dcmd->sgl.sge32[0].phys_addr = kphys_addr;
3746
3747 cmd->sync_cmd = MEGASAS_TRUE;
3748 cmd->frame_count = 1;
3749
3750 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
3751 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed\n"));
3752 } else {
3753 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) {
3754
3755 if (ddi_copyout(dcmd_dma_obj.buffer, ubuf,
3756 xferlen, mode)) {
3757 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: "
3758 "copy to user space failed\n"));
3759 return (1);
3760 }
3761 }
3762 }
3763
3764 kdcmd->cmd_status = dcmd->cmd_status;
3765
3766 if (xferlen) {
3767 /* free kernel buffer */
3768 if (mega_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
3769 return (1);
3770 }
3771
3772 return (0);
3773 }
3774
3775 /*
3776 * issue_mfi_smp
3777 */
3778 static int
issue_mfi_smp(struct megasas_instance * instance,struct megasas_ioctl * ioctl,struct megasas_cmd * cmd,int mode)3779 issue_mfi_smp(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
3780 struct megasas_cmd *cmd, int mode)
3781 {
3782 void *request_ubuf;
3783 void *response_ubuf;
3784 uint32_t request_xferlen = 0;
3785 uint32_t response_xferlen = 0;
3786 uint_t model;
3787 dma_obj_t request_dma_obj;
3788 dma_obj_t response_dma_obj;
3789 struct megasas_smp_frame *ksmp;
3790 struct megasas_smp_frame *smp;
3791 struct megasas_sge32 *sge32;
3792 #ifndef _ILP32
3793 struct megasas_sge64 *sge64;
3794 #endif
3795
3796 smp = &cmd->frame->smp;
3797 ksmp = (struct megasas_smp_frame *)&ioctl->frame[0];
3798
3799 model = ddi_model_convert_from(mode & FMODELS);
3800 if (model == DDI_MODEL_ILP32) {
3801 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32"));
3802
3803 sge32 = &ksmp->sgl[0].sge32[0];
3804 response_xferlen = sge32[0].length;
3805 request_xferlen = sge32[1].length;
3806 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: "
3807 "response_xferlen = %x, request_xferlen = %x",
3808 response_xferlen, request_xferlen));
3809
3810 /* SJ! - ubuf needs to be virtual address. */
3811
3812 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
3813 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
3814 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: "
3815 "response_ubuf = %p, request_ubuf = %p",
3816 response_ubuf, request_ubuf));
3817 } else {
3818 #ifdef _ILP32
3819 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32"));
3820
3821 sge32 = &ksmp->sgl[0].sge32[0];
3822 response_xferlen = sge32[0].length;
3823 request_xferlen = sge32[1].length;
3824 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: "
3825 "response_xferlen = %x, request_xferlen = %x",
3826 response_xferlen, request_xferlen));
3827
3828 /* SJ! - ubuf needs to be virtual address. */
3829
3830 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
3831 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
3832 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: "
3833 "response_ubuf = %p, request_ubuf = %p",
3834 response_ubuf, request_ubuf));
3835 #else
3836 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_LP64"));
3837
3838 sge64 = &ksmp->sgl[0].sge64[0];
3839 response_xferlen = sge64[0].length;
3840 request_xferlen = sge64[1].length;
3841
3842 /* SJ! - ubuf needs to be virtual address. */
3843 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr;
3844 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr;
3845 #endif
3846 }
3847 if (request_xferlen) {
3848 /* means IOCTL requires DMA */
3849 /* allocate the data transfer buffer */
3850 request_dma_obj.size = request_xferlen;
3851 request_dma_obj.dma_attr = megasas_generic_dma_attr;
3852 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3853 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3854 request_dma_obj.dma_attr.dma_attr_sgllen = 1;
3855 request_dma_obj.dma_attr.dma_attr_align = 1;
3856
3857 /* allocate kernel buffer for DMA */
3858 if (mega_alloc_dma_obj(instance, &request_dma_obj) != 1) {
3859 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3860 "could not data transfer buffer alloc."));
3861 return (DDI_FAILURE);
3862 }
3863
3864 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3865 if (ddi_copyin(request_ubuf, (void *) request_dma_obj.buffer,
3866 request_xferlen, mode)) {
3867 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3868 "copy from user space failed\n"));
3869 return (1);
3870 }
3871 }
3872
3873 if (response_xferlen) {
3874 /* means IOCTL requires DMA */
3875 /* allocate the data transfer buffer */
3876 response_dma_obj.size = response_xferlen;
3877 response_dma_obj.dma_attr = megasas_generic_dma_attr;
3878 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3879 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3880 response_dma_obj.dma_attr.dma_attr_sgllen = 1;
3881 response_dma_obj.dma_attr.dma_attr_align = 1;
3882
3883 /* allocate kernel buffer for DMA */
3884 if (mega_alloc_dma_obj(instance, &response_dma_obj) != 1) {
3885 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3886 "could not data transfer buffer alloc."));
3887 return (DDI_FAILURE);
3888 }
3889
3890 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3891 if (ddi_copyin(response_ubuf, (void *) response_dma_obj.buffer,
3892 response_xferlen, mode)) {
3893 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3894 "copy from user space failed\n"));
3895 return (1);
3896 }
3897 }
3898
3899 smp->cmd = ksmp->cmd;
3900 smp->cmd_status = ksmp->cmd_status;
3901 smp->connection_status = ksmp->connection_status;
3902 smp->sge_count = ksmp->sge_count;
3903 /* smp->context = ksmp->context; */
3904 smp->timeout = ksmp->timeout;
3905 smp->data_xfer_len = ksmp->data_xfer_len;
3906
3907 bcopy((void *)&ksmp->sas_addr, (void *)&smp->sas_addr,
3908 sizeof (uint64_t));
3909
3910 smp->flags = ksmp->flags & ~MFI_FRAME_SGL64;
3911
3912 model = ddi_model_convert_from(mode & FMODELS);
3913 if (model == DDI_MODEL_ILP32) {
3914 con_log(CL_ANN1, (CE_NOTE,
3915 "handle_drv_ioctl: DDI_MODEL_ILP32"));
3916
3917 sge32 = &smp->sgl[0].sge32[0];
3918 sge32[0].length = response_xferlen;
3919 sge32[0].phys_addr =
3920 response_dma_obj.dma_cookie[0].dmac_address;
3921 sge32[1].length = request_xferlen;
3922 sge32[1].phys_addr =
3923 request_dma_obj.dma_cookie[0].dmac_address;
3924 } else {
3925 #ifdef _ILP32
3926 con_log(CL_ANN1, (CE_NOTE,
3927 "handle_drv_ioctl: DDI_MODEL_ILP32"));
3928 sge32 = &smp->sgl[0].sge32[0];
3929 sge32[0].length = response_xferlen;
3930 sge32[0].phys_addr =
3931 response_dma_obj.dma_cookie[0].dmac_address;
3932 sge32[1].length = request_xferlen;
3933 sge32[1].phys_addr =
3934 request_dma_obj.dma_cookie[0].dmac_address;
3935 #else
3936 con_log(CL_ANN1, (CE_NOTE,
3937 "issue_mfi_smp: DDI_MODEL_LP64"));
3938 sge64 = &smp->sgl[0].sge64[0];
3939 sge64[0].length = response_xferlen;
3940 sge64[0].phys_addr =
3941 response_dma_obj.dma_cookie[0].dmac_address;
3942 sge64[1].length = request_xferlen;
3943 sge64[1].phys_addr =
3944 request_dma_obj.dma_cookie[0].dmac_address;
3945 #endif
3946 }
3947 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: "
3948 "smp->response_xferlen = %d, smp->request_xferlen = %d "
3949 "smp->data_xfer_len = %d", sge32[0].length, sge32[1].length,
3950 smp->data_xfer_len));
3951
3952 cmd->sync_cmd = MEGASAS_TRUE;
3953 cmd->frame_count = 1;
3954
3955 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
3956 con_log(CL_ANN, (CE_WARN,
3957 "issue_mfi_smp: fw_ioctl failed\n"));
3958 } else {
3959 con_log(CL_ANN1, (CE_NOTE,
3960 "issue_mfi_smp: copy to user space\n"));
3961
3962 if (request_xferlen) {
3963 if (ddi_copyout(request_dma_obj.buffer, request_ubuf,
3964 request_xferlen, mode)) {
3965 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3966 "copy to user space failed\n"));
3967 return (1);
3968 }
3969 }
3970
3971 if (response_xferlen) {
3972 if (ddi_copyout(response_dma_obj.buffer, response_ubuf,
3973 response_xferlen, mode)) {
3974 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3975 "copy to user space failed\n"));
3976 return (1);
3977 }
3978 }
3979 }
3980
3981 ksmp->cmd_status = smp->cmd_status;
3982 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d",
3983 smp->cmd_status));
3984
3985
3986 if (request_xferlen) {
3987 /* free kernel buffer */
3988 if (mega_free_dma_obj(instance, request_dma_obj) != DDI_SUCCESS)
3989 return (1);
3990 }
3991
3992 if (response_xferlen) {
3993 /* free kernel buffer */
3994 if (mega_free_dma_obj(instance, response_dma_obj) !=
3995 DDI_SUCCESS)
3996 return (1);
3997 }
3998
3999 return (0);
4000 }
4001
4002 /*
4003 * issue_mfi_stp
4004 */
4005 static int
issue_mfi_stp(struct megasas_instance * instance,struct megasas_ioctl * ioctl,struct megasas_cmd * cmd,int mode)4006 issue_mfi_stp(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
4007 struct megasas_cmd *cmd, int mode)
4008 {
4009 void *fis_ubuf;
4010 void *data_ubuf;
4011 uint32_t fis_xferlen = 0;
4012 uint32_t data_xferlen = 0;
4013 uint_t model;
4014 dma_obj_t fis_dma_obj;
4015 dma_obj_t data_dma_obj;
4016 struct megasas_stp_frame *kstp;
4017 struct megasas_stp_frame *stp;
4018
4019 stp = &cmd->frame->stp;
4020 kstp = (struct megasas_stp_frame *)&ioctl->frame[0];
4021
4022 model = ddi_model_convert_from(mode & FMODELS);
4023 if (model == DDI_MODEL_ILP32) {
4024 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32"));
4025
4026 fis_xferlen = kstp->sgl.sge32[0].length;
4027 data_xferlen = kstp->sgl.sge32[1].length;
4028
4029 /* SJ! - ubuf needs to be virtual address. */
4030 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
4031 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
4032 }
4033 else
4034 {
4035 #ifdef _ILP32
4036 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32"));
4037
4038 fis_xferlen = kstp->sgl.sge32[0].length;
4039 data_xferlen = kstp->sgl.sge32[1].length;
4040
4041 /* SJ! - ubuf needs to be virtual address. */
4042 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
4043 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
4044 #else
4045 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_LP64"));
4046
4047 fis_xferlen = kstp->sgl.sge64[0].length;
4048 data_xferlen = kstp->sgl.sge64[1].length;
4049
4050 /* SJ! - ubuf needs to be virtual address. */
4051 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr;
4052 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr;
4053 #endif
4054 }
4055
4056
4057 if (fis_xferlen) {
4058 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: "
4059 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen));
4060
4061 /* means IOCTL requires DMA */
4062 /* allocate the data transfer buffer */
4063 fis_dma_obj.size = fis_xferlen;
4064 fis_dma_obj.dma_attr = megasas_generic_dma_attr;
4065 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
4066 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
4067 fis_dma_obj.dma_attr.dma_attr_sgllen = 1;
4068 fis_dma_obj.dma_attr.dma_attr_align = 1;
4069
4070 /* allocate kernel buffer for DMA */
4071 if (mega_alloc_dma_obj(instance, &fis_dma_obj) != 1) {
4072 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4073 "could not data transfer buffer alloc."));
4074 return (DDI_FAILURE);
4075 }
4076
4077 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
4078 if (ddi_copyin(fis_ubuf, (void *)fis_dma_obj.buffer,
4079 fis_xferlen, mode)) {
4080 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4081 "copy from user space failed\n"));
4082 return (1);
4083 }
4084 }
4085
4086 if (data_xferlen) {
4087 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: data_ubuf = %p "
4088 "data_xferlen = %x", data_ubuf, data_xferlen));
4089
4090 /* means IOCTL requires DMA */
4091 /* allocate the data transfer buffer */
4092 data_dma_obj.size = data_xferlen;
4093 data_dma_obj.dma_attr = megasas_generic_dma_attr;
4094 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
4095 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
4096 data_dma_obj.dma_attr.dma_attr_sgllen = 1;
4097 data_dma_obj.dma_attr.dma_attr_align = 1;
4098
4099 /* allocate kernel buffer for DMA */
4100 if (mega_alloc_dma_obj(instance, &data_dma_obj) != 1) {
4101 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4102 "could not data transfer buffer alloc."));
4103 return (DDI_FAILURE);
4104 }
4105
4106 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
4107 if (ddi_copyin(data_ubuf, (void *) data_dma_obj.buffer,
4108 data_xferlen, mode)) {
4109 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4110 "copy from user space failed\n"));
4111 return (1);
4112 }
4113 }
4114
4115 stp->cmd = kstp->cmd;
4116 stp->cmd_status = kstp->cmd_status;
4117 stp->connection_status = kstp->connection_status;
4118 stp->target_id = kstp->target_id;
4119 stp->sge_count = kstp->sge_count;
4120 /* stp->context = kstp->context; */
4121 stp->timeout = kstp->timeout;
4122 stp->data_xfer_len = kstp->data_xfer_len;
4123
4124 bcopy((void *)kstp->fis, (void *)stp->fis, 10);
4125
4126 stp->flags = kstp->flags & ~MFI_FRAME_SGL64;
4127 stp->stp_flags = kstp->stp_flags;
4128 stp->sgl.sge32[0].length = fis_xferlen;
4129 stp->sgl.sge32[0].phys_addr = fis_dma_obj.dma_cookie[0].dmac_address;
4130 stp->sgl.sge32[1].length = data_xferlen;
4131 stp->sgl.sge32[1].phys_addr = data_dma_obj.dma_cookie[0].dmac_address;
4132
4133 cmd->sync_cmd = MEGASAS_TRUE;
4134 cmd->frame_count = 1;
4135
4136 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
4137 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed\n"));
4138 } else {
4139
4140 if (fis_xferlen) {
4141 if (ddi_copyout(fis_dma_obj.buffer, fis_ubuf,
4142 fis_xferlen, mode)) {
4143 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4144 "copy to user space failed\n"));
4145 return (1);
4146 }
4147 }
4148
4149 if (data_xferlen) {
4150 if (ddi_copyout(data_dma_obj.buffer, data_ubuf,
4151 data_xferlen, mode)) {
4152 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4153 "copy to user space failed\n"));
4154 return (1);
4155 }
4156 }
4157 }
4158
4159 kstp->cmd_status = stp->cmd_status;
4160
4161 if (fis_xferlen) {
4162 /* free kernel buffer */
4163 if (mega_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS)
4164 return (1);
4165 }
4166
4167 if (data_xferlen) {
4168 /* free kernel buffer */
4169 if (mega_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS)
4170 return (1);
4171 }
4172
4173 return (0);
4174 }
4175
4176 /*
4177 * fill_up_drv_ver
4178 */
4179 static void
fill_up_drv_ver(struct megasas_drv_ver * dv)4180 fill_up_drv_ver(struct megasas_drv_ver *dv)
4181 {
4182 (void) memset(dv, 0, sizeof (struct megasas_drv_ver));
4183
4184 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$"));
4185 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris"));
4186 (void) memcpy(dv->drv_name, "megaraid_sas", strlen("megaraid_sas"));
4187 (void) memcpy(dv->drv_ver, MEGASAS_VERSION, strlen(MEGASAS_VERSION));
4188 (void) memcpy(dv->drv_rel_date, MEGASAS_RELDATE,
4189 strlen(MEGASAS_RELDATE));
4190 }
4191
4192 /*
4193 * handle_drv_ioctl
4194 */
4195 static int
handle_drv_ioctl(struct megasas_instance * instance,struct megasas_ioctl * ioctl,int mode)4196 handle_drv_ioctl(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
4197 int mode)
4198 {
4199 int i;
4200 int rval = 0;
4201 int *props = NULL;
4202 void *ubuf;
4203
4204 uint8_t *pci_conf_buf;
4205 uint32_t xferlen;
4206 uint32_t num_props;
4207 uint_t model;
4208 struct megasas_dcmd_frame *kdcmd;
4209 struct megasas_drv_ver dv;
4210 struct megasas_pci_information pi;
4211
4212 kdcmd = (struct megasas_dcmd_frame *)&ioctl->frame[0];
4213
4214 model = ddi_model_convert_from(mode & FMODELS);
4215 if (model == DDI_MODEL_ILP32) {
4216 con_log(CL_ANN1, (CE_NOTE,
4217 "handle_drv_ioctl: DDI_MODEL_ILP32"));
4218
4219 xferlen = kdcmd->sgl.sge32[0].length;
4220
4221 /* SJ! - ubuf needs to be virtual address. */
4222 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
4223 } else {
4224 #ifdef _ILP32
4225 con_log(CL_ANN1, (CE_NOTE,
4226 "handle_drv_ioctl: DDI_MODEL_ILP32"));
4227 xferlen = kdcmd->sgl.sge32[0].length;
4228 /* SJ! - ubuf needs to be virtual address. */
4229 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
4230 #else
4231 con_log(CL_ANN1, (CE_NOTE,
4232 "handle_drv_ioctl: DDI_MODEL_LP64"));
4233 xferlen = kdcmd->sgl.sge64[0].length;
4234 /* SJ! - ubuf needs to be virtual address. */
4235 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
4236 #endif
4237 }
4238 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
4239 "dataBuf=%p size=%d bytes", ubuf, xferlen));
4240
4241 switch (kdcmd->opcode) {
4242 case MR_DRIVER_IOCTL_DRIVER_VERSION:
4243 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
4244 "MR_DRIVER_IOCTL_DRIVER_VERSION"));
4245
4246 fill_up_drv_ver(&dv);
4247
4248 if (ddi_copyout(&dv, ubuf, xferlen, mode)) {
4249 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4250 "MR_DRIVER_IOCTL_DRIVER_VERSION : "
4251 "copy to user space failed\n"));
4252 kdcmd->cmd_status = 1;
4253 rval = 1;
4254 } else {
4255 kdcmd->cmd_status = 0;
4256 }
4257 break;
4258 case MR_DRIVER_IOCTL_PCI_INFORMATION:
4259 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
4260 "MR_DRIVER_IOCTL_PCI_INFORMAITON"));
4261
4262 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip,
4263 0, "reg", &props, &num_props)) {
4264 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4265 "MR_DRIVER_IOCTL_PCI_INFORMATION : "
4266 "ddi_prop_look_int_array failed\n"));
4267 rval = 1;
4268 } else {
4269
4270 pi.busNumber = (props[0] >> 16) & 0xFF;
4271 pi.deviceNumber = (props[0] >> 11) & 0x1f;
4272 pi.functionNumber = (props[0] >> 8) & 0x7;
4273 ddi_prop_free((void *)props);
4274 }
4275
4276 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo;
4277
4278 for (i = 0; i < (sizeof (struct megasas_pci_information) -
4279 offsetof(struct megasas_pci_information, pciHeaderInfo));
4280 i++) {
4281 pci_conf_buf[i] =
4282 pci_config_get8(instance->pci_handle, i);
4283 }
4284
4285 if (ddi_copyout(&pi, ubuf, xferlen, mode)) {
4286 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4287 "MR_DRIVER_IOCTL_PCI_INFORMATION : "
4288 "copy to user space failed\n"));
4289 kdcmd->cmd_status = 1;
4290 rval = 1;
4291 } else {
4292 kdcmd->cmd_status = 0;
4293 }
4294 break;
4295 default:
4296 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4297 "invalid driver specific IOCTL opcode = 0x%x",
4298 kdcmd->opcode));
4299 kdcmd->cmd_status = 1;
4300 rval = 1;
4301 break;
4302 }
4303
4304 return (rval);
4305 }
4306
4307 /*
4308 * handle_mfi_ioctl
4309 */
4310 static int
handle_mfi_ioctl(struct megasas_instance * instance,struct megasas_ioctl * ioctl,int mode)4311 handle_mfi_ioctl(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
4312 int mode)
4313 {
4314 int rval = 0;
4315
4316 struct megasas_header *hdr;
4317 struct megasas_cmd *cmd;
4318
4319 cmd = get_mfi_pkt(instance);
4320
4321 if (!cmd) {
4322 con_log(CL_ANN, (CE_WARN, "megasas: "
4323 "failed to get a cmd packet\n"));
4324 return (1);
4325 }
4326
4327 hdr = (struct megasas_header *)&ioctl->frame[0];
4328
4329 switch (hdr->cmd) {
4330 case MFI_CMD_OP_DCMD:
4331 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode);
4332 break;
4333 case MFI_CMD_OP_SMP:
4334 rval = issue_mfi_smp(instance, ioctl, cmd, mode);
4335 break;
4336 case MFI_CMD_OP_STP:
4337 rval = issue_mfi_stp(instance, ioctl, cmd, mode);
4338 break;
4339 case MFI_CMD_OP_LD_SCSI:
4340 case MFI_CMD_OP_PD_SCSI:
4341 rval = issue_mfi_pthru(instance, ioctl, cmd, mode);
4342 break;
4343 default:
4344 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: "
4345 "invalid mfi ioctl hdr->cmd = %d\n", hdr->cmd));
4346 rval = 1;
4347 break;
4348 }
4349
4350
4351 return_mfi_pkt(instance, cmd);
4352 if (megasas_common_check(instance, cmd) != DDI_SUCCESS)
4353 rval = 1;
4354 return (rval);
4355 }
4356
4357 /*
4358 * AEN
4359 */
4360 static int
handle_mfi_aen(struct megasas_instance * instance,struct megasas_aen * aen)4361 handle_mfi_aen(struct megasas_instance *instance, struct megasas_aen *aen)
4362 {
4363 int rval = 0;
4364
4365 rval = register_mfi_aen(instance, instance->aen_seq_num,
4366 aen->class_locale_word);
4367
4368 aen->cmd_status = (uint8_t)rval;
4369
4370 return (rval);
4371 }
4372
4373 static int
register_mfi_aen(struct megasas_instance * instance,uint32_t seq_num,uint32_t class_locale_word)4374 register_mfi_aen(struct megasas_instance *instance, uint32_t seq_num,
4375 uint32_t class_locale_word)
4376 {
4377 int ret_val;
4378
4379 struct megasas_cmd *cmd;
4380 struct megasas_dcmd_frame *dcmd;
4381 union megasas_evt_class_locale curr_aen;
4382 union megasas_evt_class_locale prev_aen;
4383
4384 /*
4385 * If there an AEN pending already (aen_cmd), check if the
4386 * class_locale of that pending AEN is inclusive of the new
4387 * AEN request we currently have. If it is, then we don't have
4388 * to do anything. In other words, whichever events the current
4389 * AEN request is subscribing to, have already been subscribed
4390 * to.
4391 *
4392 * If the old_cmd is _not_ inclusive, then we have to abort
4393 * that command, form a class_locale that is superset of both
4394 * old and current and re-issue to the FW
4395 */
4396
4397 curr_aen.word = class_locale_word;
4398
4399 if (instance->aen_cmd) {
4400 prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1];
4401
4402 /*
4403 * A class whose enum value is smaller is inclusive of all
4404 * higher values. If a PROGRESS (= -1) was previously
4405 * registered, then a new registration requests for higher
4406 * classes need not be sent to FW. They are automatically
4407 * included.
4408 *
4409 * Locale numbers don't have such hierarchy. They are bitmap
4410 * values
4411 */
4412 if ((prev_aen.members.class <= curr_aen.members.class) &&
4413 !((prev_aen.members.locale & curr_aen.members.locale) ^
4414 curr_aen.members.locale)) {
4415 /*
4416 * Previously issued event registration includes
4417 * current request. Nothing to do.
4418 */
4419
4420 return (0);
4421 } else {
4422 curr_aen.members.locale |= prev_aen.members.locale;
4423
4424 if (prev_aen.members.class < curr_aen.members.class)
4425 curr_aen.members.class = prev_aen.members.class;
4426
4427 ret_val = abort_aen_cmd(instance, instance->aen_cmd);
4428
4429 if (ret_val) {
4430 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: "
4431 "failed to abort prevous AEN command\n"));
4432
4433 return (ret_val);
4434 }
4435 }
4436 } else {
4437 curr_aen.word = class_locale_word;
4438 }
4439
4440 cmd = get_mfi_pkt(instance);
4441
4442 if (!cmd)
4443 return (-ENOMEM);
4444
4445 dcmd = &cmd->frame->dcmd;
4446
4447 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
4448 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4449
4450 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
4451 sizeof (struct megasas_evt_detail));
4452
4453 /* Prepare DCMD for aen registration */
4454 dcmd->cmd = MFI_CMD_OP_DCMD;
4455 dcmd->cmd_status = 0x0;
4456 dcmd->sge_count = 1;
4457 dcmd->flags = MFI_FRAME_DIR_READ;
4458 dcmd->timeout = 0;
4459 dcmd->data_xfer_len = sizeof (struct megasas_evt_detail);
4460 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
4461 dcmd->mbox.w[0] = seq_num;
4462 dcmd->mbox.w[1] = curr_aen.word;
4463 dcmd->sgl.sge32[0].phys_addr =
4464 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address;
4465 dcmd->sgl.sge32[0].length = sizeof (struct megasas_evt_detail);
4466
4467 instance->aen_seq_num = seq_num;
4468
4469 /*
4470 * Store reference to the cmd used to register for AEN. When an
4471 * application wants us to register for AEN, we have to abort this
4472 * cmd and re-register with a new EVENT LOCALE supplied by that app
4473 */
4474 instance->aen_cmd = cmd;
4475
4476 cmd->frame_count = 1;
4477
4478 /* Issue the aen registration frame */
4479 /* atomic_add_16 (&instance->fw_outstanding, 1); */
4480 instance->func_ptr->issue_cmd(cmd, instance);
4481
4482 return (0);
4483 }
4484
4485 static void
display_scsi_inquiry(caddr_t scsi_inq)4486 display_scsi_inquiry(caddr_t scsi_inq)
4487 {
4488 #define MAX_SCSI_DEVICE_CODE 14
4489 int i;
4490 char inquiry_buf[256] = {0};
4491 int len;
4492 const char *const scsi_device_types[] = {
4493 "Direct-Access ",
4494 "Sequential-Access",
4495 "Printer ",
4496 "Processor ",
4497 "WORM ",
4498 "CD-ROM ",
4499 "Scanner ",
4500 "Optical Device ",
4501 "Medium Changer ",
4502 "Communications ",
4503 "Unknown ",
4504 "Unknown ",
4505 "Unknown ",
4506 "Enclosure ",
4507 };
4508
4509 len = 0;
4510
4511 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: ");
4512 for (i = 8; i < 16; i++) {
4513 len += snprintf(inquiry_buf + len, 265 - len, "%c",
4514 scsi_inq[i]);
4515 }
4516
4517 len += snprintf(inquiry_buf + len, 265 - len, " Model: ");
4518
4519 for (i = 16; i < 32; i++) {
4520 len += snprintf(inquiry_buf + len, 265 - len, "%c",
4521 scsi_inq[i]);
4522 }
4523
4524 len += snprintf(inquiry_buf + len, 265 - len, " Rev: ");
4525
4526 for (i = 32; i < 36; i++) {
4527 len += snprintf(inquiry_buf + len, 265 - len, "%c",
4528 scsi_inq[i]);
4529 }
4530
4531 len += snprintf(inquiry_buf + len, 265 - len, "\n");
4532
4533
4534 i = scsi_inq[0] & 0x1f;
4535
4536
4537 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ",
4538 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
4539 "Unknown ");
4540
4541
4542 len += snprintf(inquiry_buf + len, 265 - len,
4543 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
4544
4545 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) {
4546 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n");
4547 } else {
4548 len += snprintf(inquiry_buf + len, 265 - len, "\n");
4549 }
4550
4551 con_log(CL_ANN1, (CE_CONT, inquiry_buf));
4552 }
4553
4554 static int
read_fw_status_reg_xscale(struct megasas_instance * instance)4555 read_fw_status_reg_xscale(struct megasas_instance *instance)
4556 {
4557 return ((int)RD_OB_MSG_0(instance));
4558 }
4559
4560 static int
read_fw_status_reg_ppc(struct megasas_instance * instance)4561 read_fw_status_reg_ppc(struct megasas_instance *instance)
4562 {
4563 return ((int)RD_OB_SCRATCH_PAD_0(instance));
4564 }
4565
4566 static void
issue_cmd_xscale(struct megasas_cmd * cmd,struct megasas_instance * instance)4567 issue_cmd_xscale(struct megasas_cmd *cmd, struct megasas_instance *instance)
4568 {
4569 atomic_add_16(&instance->fw_outstanding, 1);
4570
4571 /* Issue the command to the FW */
4572 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) |
4573 (cmd->frame_count - 1), instance);
4574 }
4575
4576 static void
issue_cmd_ppc(struct megasas_cmd * cmd,struct megasas_instance * instance)4577 issue_cmd_ppc(struct megasas_cmd *cmd, struct megasas_instance *instance)
4578 {
4579 atomic_add_16(&instance->fw_outstanding, 1);
4580
4581 /* Issue the command to the FW */
4582 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) |
4583 (((cmd->frame_count - 1) << 1) | 1), instance);
4584 }
4585
4586 /*
4587 * issue_cmd_in_sync_mode
4588 */
4589 static int
issue_cmd_in_sync_mode_xscale(struct megasas_instance * instance,struct megasas_cmd * cmd)4590 issue_cmd_in_sync_mode_xscale(struct megasas_instance *instance,
4591 struct megasas_cmd *cmd)
4592 {
4593 int i;
4594 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
4595
4596 cmd->cmd_status = ENODATA;
4597
4598 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) |
4599 (cmd->frame_count - 1), instance);
4600
4601 mutex_enter(&instance->int_cmd_mtx);
4602
4603 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
4604 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
4605 }
4606
4607 mutex_exit(&instance->int_cmd_mtx);
4608
4609 if (i < (msecs -1)) {
4610 return (0);
4611 } else {
4612 return (1);
4613 }
4614 }
4615
4616 static int
issue_cmd_in_sync_mode_ppc(struct megasas_instance * instance,struct megasas_cmd * cmd)4617 issue_cmd_in_sync_mode_ppc(struct megasas_instance *instance,
4618 struct megasas_cmd *cmd)
4619 {
4620 int i;
4621 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
4622
4623 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called\n"));
4624
4625 cmd->cmd_status = ENODATA;
4626
4627 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) |
4628 (((cmd->frame_count - 1) << 1) | 1), instance);
4629
4630 mutex_enter(&instance->int_cmd_mtx);
4631
4632 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
4633 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
4634 }
4635
4636 mutex_exit(&instance->int_cmd_mtx);
4637
4638 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done\n"));
4639
4640 if (i < (msecs -1)) {
4641 return (0);
4642 } else {
4643 return (1);
4644 }
4645 }
4646
4647 /*
4648 * issue_cmd_in_poll_mode
4649 */
4650 static int
issue_cmd_in_poll_mode_xscale(struct megasas_instance * instance,struct megasas_cmd * cmd)4651 issue_cmd_in_poll_mode_xscale(struct megasas_instance *instance,
4652 struct megasas_cmd *cmd)
4653 {
4654 int i;
4655 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
4656 struct megasas_header *frame_hdr;
4657
4658 frame_hdr = (struct megasas_header *)cmd->frame;
4659 frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
4660 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
4661
4662 /* issue the frame using inbound queue port */
4663 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) |
4664 (cmd->frame_count - 1), instance);
4665
4666 /* wait for cmd_status to change from 0xFF */
4667 for (i = 0; i < msecs && (frame_hdr->cmd_status ==
4668 MFI_CMD_STATUS_POLL_MODE); i++) {
4669 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
4670 }
4671
4672 if (frame_hdr->cmd_status == MFI_CMD_STATUS_POLL_MODE) {
4673 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
4674 "cmd polling timed out"));
4675 return (DDI_FAILURE);
4676 }
4677
4678 return (DDI_SUCCESS);
4679 }
4680
4681 static int
issue_cmd_in_poll_mode_ppc(struct megasas_instance * instance,struct megasas_cmd * cmd)4682 issue_cmd_in_poll_mode_ppc(struct megasas_instance *instance,
4683 struct megasas_cmd *cmd)
4684 {
4685 int i;
4686 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
4687 struct megasas_header *frame_hdr;
4688
4689 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called\n"));
4690
4691 frame_hdr = (struct megasas_header *)cmd->frame;
4692 frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
4693 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
4694
4695 /* issue the frame using inbound queue port */
4696 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) |
4697 (((cmd->frame_count - 1) << 1) | 1), instance);
4698
4699 /* wait for cmd_status to change from 0xFF */
4700 for (i = 0; i < msecs && (frame_hdr->cmd_status ==
4701 MFI_CMD_STATUS_POLL_MODE); i++) {
4702 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
4703 }
4704
4705 if (frame_hdr->cmd_status == MFI_CMD_STATUS_POLL_MODE) {
4706 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
4707 "cmd polling timed out"));
4708 return (DDI_FAILURE);
4709 }
4710
4711 return (DDI_SUCCESS);
4712 }
4713
4714 static void
enable_intr_xscale(struct megasas_instance * instance)4715 enable_intr_xscale(struct megasas_instance *instance)
4716 {
4717 MFI_ENABLE_INTR(instance);
4718 }
4719
4720 static void
enable_intr_ppc(struct megasas_instance * instance)4721 enable_intr_ppc(struct megasas_instance *instance)
4722 {
4723 uint32_t mask;
4724
4725 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called\n"));
4726
4727 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
4728 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance);
4729
4730 /*
4731 * As 1078DE is same as 1078 chip, the interrupt mask
4732 * remains the same.
4733 */
4734 /* WR_OB_INTR_MASK(~0x80000000, instance); */
4735 WR_OB_INTR_MASK(~(MFI_REPLY_1078_MESSAGE_INTR), instance);
4736
4737 /* dummy read to force PCI flush */
4738 mask = RD_OB_INTR_MASK(instance);
4739
4740 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: "
4741 "outbound_intr_mask = 0x%x\n", mask));
4742 }
4743
4744 static void
disable_intr_xscale(struct megasas_instance * instance)4745 disable_intr_xscale(struct megasas_instance *instance)
4746 {
4747 MFI_DISABLE_INTR(instance);
4748 }
4749
4750 static void
disable_intr_ppc(struct megasas_instance * instance)4751 disable_intr_ppc(struct megasas_instance *instance)
4752 {
4753 uint32_t mask;
4754
4755 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called\n"));
4756
4757 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : "
4758 "outbound_intr_mask = 0x%x\n", RD_OB_INTR_MASK(instance)));
4759
4760 /* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */
4761 WR_OB_INTR_MASK(OB_INTR_MASK, instance);
4762
4763 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : "
4764 "outbound_intr_mask = 0x%x\n", RD_OB_INTR_MASK(instance)));
4765
4766 /* dummy read to force PCI flush */
4767 mask = RD_OB_INTR_MASK(instance);
4768 #ifdef lint
4769 mask = mask;
4770 #endif
4771 }
4772
4773 static int
intr_ack_xscale(struct megasas_instance * instance)4774 intr_ack_xscale(struct megasas_instance *instance)
4775 {
4776 uint32_t status;
4777
4778 /* check if it is our interrupt */
4779 status = RD_OB_INTR_STATUS(instance);
4780
4781 if (!(status & MFI_OB_INTR_STATUS_MASK)) {
4782 return (DDI_INTR_UNCLAIMED);
4783 }
4784
4785 /* clear the interrupt by writing back the same value */
4786 WR_OB_INTR_STATUS(status, instance);
4787
4788 return (DDI_INTR_CLAIMED);
4789 }
4790
4791 static int
intr_ack_ppc(struct megasas_instance * instance)4792 intr_ack_ppc(struct megasas_instance *instance)
4793 {
4794 uint32_t status;
4795
4796 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called\n"));
4797
4798 /* check if it is our interrupt */
4799 status = RD_OB_INTR_STATUS(instance);
4800
4801 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x\n", status));
4802
4803 /*
4804 * As 1078DE is same as 1078 chip, the status field
4805 * remains the same.
4806 */
4807 if (!(status & MFI_REPLY_1078_MESSAGE_INTR)) {
4808 return (DDI_INTR_UNCLAIMED);
4809 }
4810
4811 /* clear the interrupt by writing back the same value */
4812 WR_OB_DOORBELL_CLEAR(status, instance);
4813
4814 /* dummy READ */
4815 status = RD_OB_INTR_STATUS(instance);
4816
4817 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared\n"));
4818
4819 return (DDI_INTR_CLAIMED);
4820 }
4821
4822 static int
megasas_common_check(struct megasas_instance * instance,struct megasas_cmd * cmd)4823 megasas_common_check(struct megasas_instance *instance,
4824 struct megasas_cmd *cmd)
4825 {
4826 int ret = DDI_SUCCESS;
4827
4828 if (megasas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
4829 DDI_SUCCESS) {
4830 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4831 if (cmd->pkt != NULL) {
4832 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4833 cmd->pkt->pkt_statistics = 0;
4834 }
4835 ret = DDI_FAILURE;
4836 }
4837 if (megasas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
4838 != DDI_SUCCESS) {
4839 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4840 if (cmd->pkt != NULL) {
4841 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4842 cmd->pkt->pkt_statistics = 0;
4843 }
4844 ret = DDI_FAILURE;
4845 }
4846 if (megasas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) !=
4847 DDI_SUCCESS) {
4848 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4849 if (cmd->pkt != NULL) {
4850 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4851 cmd->pkt->pkt_statistics = 0;
4852 }
4853 ret = DDI_FAILURE;
4854 }
4855 if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
4856 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4857 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0);
4858 if (cmd->pkt != NULL) {
4859 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4860 cmd->pkt->pkt_statistics = 0;
4861 }
4862 ret = DDI_FAILURE;
4863 }
4864
4865 return (ret);
4866 }
4867
4868 /*ARGSUSED*/
4869 static int
megasas_fm_error_cb(dev_info_t * dip,ddi_fm_error_t * err,const void * impl_data)4870 megasas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
4871 {
4872 /*
4873 * as the driver can always deal with an error in any dma or
4874 * access handle, we can just return the fme_status value.
4875 */
4876 pci_ereport_post(dip, err, NULL);
4877 return (err->fme_status);
4878 }
4879
4880 static void
megasas_fm_init(struct megasas_instance * instance)4881 megasas_fm_init(struct megasas_instance *instance)
4882 {
4883 /* Need to change iblock to priority for new MSI intr */
4884 ddi_iblock_cookie_t fm_ibc;
4885
4886 /* Only register with IO Fault Services if we have some capability */
4887 if (instance->fm_capabilities) {
4888 /* Adjust access and dma attributes for FMA */
4889 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC;
4890 megasas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
4891
4892 /*
4893 * Register capabilities with IO Fault Services.
4894 * fm_capabilities will be updated to indicate
4895 * capabilities actually supported (not requested.)
4896 */
4897
4898 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc);
4899
4900 /*
4901 * Initialize pci ereport capabilities if ereport
4902 * capable (should always be.)
4903 */
4904
4905 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
4906 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4907 pci_ereport_setup(instance->dip);
4908 }
4909
4910 /*
4911 * Register error callback if error callback capable.
4912 */
4913 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4914 ddi_fm_handler_register(instance->dip,
4915 megasas_fm_error_cb, (void*) instance);
4916 }
4917 } else {
4918 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4919 megasas_generic_dma_attr.dma_attr_flags = 0;
4920 }
4921 }
4922
4923 static void
megasas_fm_fini(struct megasas_instance * instance)4924 megasas_fm_fini(struct megasas_instance *instance)
4925 {
4926 /* Only unregister FMA capabilities if registered */
4927 if (instance->fm_capabilities) {
4928 /*
4929 * Un-register error callback if error callback capable.
4930 */
4931 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4932 ddi_fm_handler_unregister(instance->dip);
4933 }
4934
4935 /*
4936 * Release any resources allocated by pci_ereport_setup()
4937 */
4938 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
4939 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4940 pci_ereport_teardown(instance->dip);
4941 }
4942
4943 /* Unregister from IO Fault Services */
4944 ddi_fm_fini(instance->dip);
4945
4946 /* Adjust access and dma attributes for FMA */
4947 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4948 megasas_generic_dma_attr.dma_attr_flags = 0;
4949 }
4950 }
4951
4952 int
megasas_check_acc_handle(ddi_acc_handle_t handle)4953 megasas_check_acc_handle(ddi_acc_handle_t handle)
4954 {
4955 ddi_fm_error_t de;
4956
4957 if (handle == NULL) {
4958 return (DDI_FAILURE);
4959 }
4960
4961 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
4962
4963 return (de.fme_status);
4964 }
4965
4966 int
megasas_check_dma_handle(ddi_dma_handle_t handle)4967 megasas_check_dma_handle(ddi_dma_handle_t handle)
4968 {
4969 ddi_fm_error_t de;
4970
4971 if (handle == NULL) {
4972 return (DDI_FAILURE);
4973 }
4974
4975 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
4976
4977 return (de.fme_status);
4978 }
4979
4980 void
megasas_fm_ereport(struct megasas_instance * instance,char * detail)4981 megasas_fm_ereport(struct megasas_instance *instance, char *detail)
4982 {
4983 uint64_t ena;
4984 char buf[FM_MAX_CLASS];
4985
4986 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
4987 ena = fm_ena_generate(0, FM_ENA_FMT1);
4988 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) {
4989 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP,
4990 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
4991 }
4992 }
4993