1 /*
2 * O.S : Solaris
3 * FILE NAME : arcmsr.c
4 * BY : Erich Chen
5 * Description: SCSI RAID Device Driver for
6 * ARECA RAID Host adapter
7 *
8 * Copyright (C) 2002,2007 Areca Technology Corporation All rights reserved.
9 * Copyright (C) 2002,2007 Erich Chen
10 * Web site: www.areca.com.tw
11 * E-mail: erich@areca.com.tw
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. The party using or redistributing the source code and binary forms
22 * agrees to the disclaimer below and the terms and conditions set forth
23 * herein.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
40 * Use is subject to license terms.
41 */
42
43 #include <sys/types.h>
44 #include <sys/ddidmareq.h>
45 #include <sys/scsi/scsi.h>
46 #include <sys/ddi.h>
47 #include <sys/sunddi.h>
48 #include <sys/file.h>
49 #include <sys/disp.h>
50 #include <sys/signal.h>
51 #include <sys/debug.h>
52 #include <sys/pci.h>
53 #include <sys/policy.h>
54 #include <sys/atomic.h>
55
56 #include "arcmsr.h"
57
58 static int arcmsr_attach(dev_info_t *dev_info, ddi_attach_cmd_t cmd);
59 static int arcmsr_cb_ioctl(dev_t dev, int ioctl_cmd, intptr_t arg,
60 int mode, cred_t *credp, int *rvalp);
61 static int arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd);
62 static int arcmsr_reset(dev_info_t *resetdev, ddi_reset_cmd_t cmd);
63 static int arcmsr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt);
64 static int arcmsr_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
65 static int arcmsr_tran_reset(struct scsi_address *ap, int level);
66 static int arcmsr_tran_getcap(struct scsi_address *ap, char *cap, int whom);
67 static int arcmsr_tran_setcap(struct scsi_address *ap, char *cap, int value,
68 int whom);
69 static int arcmsr_tran_tgt_init(dev_info_t *host_dev_info,
70 dev_info_t *target_dev_info, scsi_hba_tran_t *hosttran,
71 struct scsi_device *sd);
72 static void arcmsr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
73 static void arcmsr_tran_destroy_pkt(struct scsi_address *ap,
74 struct scsi_pkt *pkt);
75 static void arcmsr_tran_sync_pkt(struct scsi_address *ap,
76 struct scsi_pkt *pkt);
77 static struct scsi_pkt *arcmsr_tran_init_pkt(struct scsi_address *ap,
78 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
79 int tgtlen, int flags, int (*callback)(), caddr_t arg);
80
81 static int arcmsr_config_lun(struct ACB *acb, uint16_t tgt, uint8_t lun,
82 dev_info_t **ldip);
83 static uint_t arcmsr_interrupt(caddr_t arg);
84 static int arcmsr_initialize(struct ACB *acb);
85 static int arcmsr_dma_alloc(struct ACB *acb,
86 struct scsi_pkt *pkt, struct buf *bp, int flags, int (*callback)());
87 static int arcmsr_dma_move(struct ACB *acb,
88 struct scsi_pkt *pkt, struct buf *bp);
89 static void arcmsr_pcidev_disattach(struct ACB *acb);
90 static void arcmsr_ccb_complete(struct CCB *ccb, int flag);
91 static void arcmsr_iop_init(struct ACB *acb);
92 static void arcmsr_iop_parking(struct ACB *acb);
93 static void arcmsr_log(struct ACB *acb, int level, char *fmt, ...);
94 static struct CCB *arcmsr_get_freeccb(struct ACB *acb);
95 static void arcmsr_flush_hba_cache(struct ACB *acb);
96 static void arcmsr_flush_hbb_cache(struct ACB *acb);
97 static void arcmsr_stop_hba_bgrb(struct ACB *acb);
98 static void arcmsr_stop_hbb_bgrb(struct ACB *acb);
99 static void arcmsr_start_hba_bgrb(struct ACB *acb);
100 static void arcmsr_start_hba_bgrb(struct ACB *acb);
101 static void arcmsr_polling_hba_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
102 static void arcmsr_polling_hbb_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
103 static void arcmsr_build_ccb(struct CCB *ccb);
104 static int arcmsr_tran_bus_config(dev_info_t *parent, uint_t flags,
105 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
106 static int arcmsr_name_node(dev_info_t *dip, char *name, int len);
107 static dev_info_t *arcmsr_find_child(struct ACB *acb, uint16_t tgt,
108 uint8_t lun);
109
110 static struct ACB *ArcMSRHBA[ARCMSR_MAX_ADAPTER];
111 static int arcmsr_hba_count;
112 static void *arcmsr_soft_state = NULL;
113 static kmutex_t arcmsr_global_mutex;
114
115 #define MSR_MINOR 32
116 #define INST2MSR(x) (((x) << INST_MINOR_SHIFT) | MSR_MINOR)
117
118 static ddi_dma_attr_t arcmsr_dma_attr = {
119 DMA_ATTR_V0, /* ddi_dma_attr version */
120 0, /* low DMA address range */
121 0xffffffff, /* high DMA address range */
122 0x00ffffff, /* DMA counter counter upper bound */
123 1, /* DMA address alignment requirements */
124 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* burst sizes */
125 1, /* minimum effective DMA size */
126 ARCMSR_MAX_XFER_LEN, /* maximum DMA xfer size */
127 /*
128 * The dma_attr_seg field supplies the limit of each Scatter/Gather
129 * list element's "address+length". The Intel IOP331 can not use
130 * segments over the 4G boundary due to segment boundary restrictions
131 */
132 0x00ffffff,
133 ARCMSR_MAX_SG_ENTRIES, /* scatter/gather list count */
134 1, /* device granularity */
135 DDI_DMA_FORCE_PHYSICAL /* Bus specific DMA flags */
136 };
137
138 static ddi_dma_attr_t arcmsr_ccb_attr = {
139 DMA_ATTR_V0, /* ddi_dma_attr version */
140 0, /* low DMA address range */
141 0xffffffff, /* high DMA address range */
142 0x00ffffff, /* DMA counter counter upper bound */
143 1, /* default byte alignment */
144 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* burst sizes */
145 1, /* minimum effective DMA size */
146 0xffffffff, /* maximum DMA xfer size */
147 0x00ffffff, /* max segment size, segment boundary restrictions */
148 1, /* scatter/gather list count */
149 1, /* device granularity */
150 DDI_DMA_FORCE_PHYSICAL /* Bus specific DMA flags */
151 };
152
153 static struct cb_ops arcmsr_cb_ops = {
154 scsi_hba_open, /* open(9E) */
155 scsi_hba_close, /* close(9E) */
156 nodev, /* strategy(9E), returns ENXIO */
157 nodev, /* print(9E) */
158 nodev, /* dump(9E) Cannot be used as a dump device */
159 nodev, /* read(9E) */
160 nodev, /* write(9E) */
161 arcmsr_cb_ioctl, /* ioctl(9E) */
162 nodev, /* devmap(9E) */
163 nodev, /* mmap(9E) */
164 nodev, /* segmap(9E) */
165 NULL, /* chpoll(9E) returns ENXIO */
166 nodev, /* prop_op(9E) */
167 NULL, /* streamtab(9S) */
168 #ifdef _LP64
169 /*
170 * cb_ops cb_flag:
171 * D_NEW | D_MP compatibility flags, see conf.h
172 * D_MP flag indicates that the driver is safe for
173 * multi-threaded operation
174 * D_64BIT flag driver properly handles 64-bit offsets
175 */
176 D_HOTPLUG | D_MP | D_64BIT,
177 #else
178 D_HOTPLUG | D_MP,
179 #endif
180 CB_REV,
181 nodev, /* aread(9E) */
182 nodev /* awrite(9E) */
183 };
184
185 static struct dev_ops arcmsr_ops = {
186 DEVO_REV, /* devo_rev */
187 0, /* reference count */
188 nodev, /* getinfo */
189 nulldev, /* identify */
190 nulldev, /* probe */
191 arcmsr_attach, /* attach */
192 arcmsr_detach, /* detach */
193 arcmsr_reset, /* reset, shutdown, reboot notify */
194 &arcmsr_cb_ops, /* driver operations */
195 NULL, /* bus operations */
196 nulldev /* power */
197 };
198
199 char _depends_on[] = "misc/scsi";
200
201 static struct modldrv arcmsr_modldrv = {
202 &mod_driverops, /* Type of module. This is a driver. */
203 ARCMSR_DRIVER_VERSION, /* module name, from arcmsr.h */
204 &arcmsr_ops, /* driver ops */
205 };
206
207 static struct modlinkage arcmsr_modlinkage = {
208 MODREV_1,
209 &arcmsr_modldrv,
210 NULL
211 };
212
213
214 int
_init(void)215 _init(void) {
216 int ret;
217
218
219 mutex_init(&arcmsr_global_mutex, "arcmsr global mutex",
220 MUTEX_DRIVER, NULL);
221 ret = ddi_soft_state_init(&arcmsr_soft_state,
222 sizeof (struct ACB), ARCMSR_MAX_ADAPTER);
223 if (ret != 0) {
224 return (ret);
225 }
226 if ((ret = scsi_hba_init(&arcmsr_modlinkage)) != 0) {
227 ddi_soft_state_fini(&arcmsr_soft_state);
228 return (ret);
229 }
230
231 if ((ret = mod_install(&arcmsr_modlinkage)) != 0) {
232 mutex_destroy(&arcmsr_global_mutex);
233 scsi_hba_fini(&arcmsr_modlinkage);
234 if (arcmsr_soft_state != NULL) {
235 ddi_soft_state_fini(&arcmsr_soft_state);
236 }
237 }
238 return (ret);
239 }
240
241
242 int
_fini(void)243 _fini(void) {
244 int ret;
245
246 ret = mod_remove(&arcmsr_modlinkage);
247 if (ret == 0) {
248 /* if ret = 0 , said driver can remove */
249 mutex_destroy(&arcmsr_global_mutex);
250 scsi_hba_fini(&arcmsr_modlinkage);
251 if (arcmsr_soft_state != NULL) {
252 ddi_soft_state_fini(&arcmsr_soft_state);
253 }
254 }
255 return (ret);
256 }
257
258
259 int
_info(struct modinfo * modinfop)260 _info(struct modinfo *modinfop) {
261 return (mod_info(&arcmsr_modlinkage, modinfop));
262 }
263
264
265
266 #if defined(ARCMSR_DEBUG)
267 static void
arcmsr_dump_scsi_cdb(struct scsi_address * ap,struct scsi_pkt * pkt)268 arcmsr_dump_scsi_cdb(struct scsi_address *ap, struct scsi_pkt *pkt) {
269
270 static char hex[] = "0123456789abcdef";
271 struct ACB *acb =
272 (struct ACB *)ap->a_hba_tran->tran_hba_private;
273 struct CCB *ccb =
274 (struct CCB *)pkt->pkt_ha_private;
275 uint8_t *cdb = pkt->pkt_cdbp;
276 char buf [256];
277 char *p;
278 int i;
279
280
281 (void) sprintf(buf, "arcmsr%d: sgcount=%d <%d, %d> "
282 "cdb ",
283 ddi_get_instance(acb->dev_info), ccb->arcmsr_cdb.sgcount,
284 ap->a_target, ap->a_lun);
285
286 p = buf + strlen(buf);
287 *p++ = '[';
288
289 for (i = 0; i < ccb->arcmsr_cdb.CdbLength; i++, cdb++) {
290 if (i != 0) {
291 *p++ = ' ';
292 }
293 *p++ = hex[(*cdb >> 4) & 0x0f];
294 *p++ = hex[*cdb & 0x0f];
295 }
296 *p++ = ']';
297 *p++ = '.';
298 *p = 0;
299 cmn_err(CE_CONT, buf);
300 }
301 #endif /* ARCMSR_DEBUG */
302
303 static void
arcmsr_devmap_req_timeout(void * arg)304 arcmsr_devmap_req_timeout(void* arg) {
305
306 struct ACB *acb = (struct ACB *)arg;
307 switch (acb->adapter_type) {
308 case ACB_ADAPTER_TYPE_A:
309 {
310 struct HBA_msgUnit *phbamu;
311
312 phbamu = (struct HBA_msgUnit *)acb->pmu;
313 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
314 &phbamu->inbound_msgaddr0,
315 ARCMSR_INBOUND_MESG0_GET_CONFIG);
316 }
317 break;
318 case ACB_ADAPTER_TYPE_B:
319 {
320 struct HBB_msgUnit *phbbmu;
321 phbbmu = (struct HBB_msgUnit *)acb->pmu;
322 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
323 &phbbmu->hbb_doorbell->drv2iop_doorbell,
324 ARCMSR_MESSAGE_GET_CONFIG);
325 }
326 break;
327 }
328
329 if ((acb->timeout_id != 0) &&
330 ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
331 /* do pkt timeout check each 5 secs */
332 acb->timeout_id = timeout(arcmsr_devmap_req_timeout,
333 (void*)acb, (5 * drv_usectohz(1000000)));
334 }
335 }
336
337
338 static void
arcmsr_ccbs_timeout(void * arg)339 arcmsr_ccbs_timeout(void* arg) {
340
341 struct ACB *acb = (struct ACB *)arg;
342 struct CCB *ccb;
343 int i;
344 int current_time = ddi_get_time();
345
346
347 if (acb->ccboutstandingcount != 0) {
348 /* check each ccb */
349 i = ddi_dma_sync(acb->ccbs_pool_handle, 0,
350 acb->dma_sync_size, DDI_DMA_SYNC_FORKERNEL);
351 if (i != DDI_SUCCESS) {
352 if ((acb->timeout_id != 0) &&
353 ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
354 /* do pkt timeout check each 60 secs */
355 acb->timeout_id = timeout(arcmsr_ccbs_timeout,
356 (void*)acb,
357 (60 * drv_usectohz(1000000)));
358 }
359 return;
360 }
361 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
362 ccb = acb->pccb_pool[i];
363 if (ccb->acb != acb) {
364 break;
365 }
366 if (ccb->startdone == ARCMSR_CCB_DONE) {
367 continue;
368 }
369 if (ccb->pkt == NULL) {
370 continue;
371 }
372 if (ccb->pkt->pkt_time == 0) {
373 continue;
374 }
375 if ((int)ccb->ccb_time >= current_time) {
376 continue;
377 }
378 if (ccb->startdone == ARCMSR_CCB_START) {
379 int id = ccb->pkt->pkt_address.a_target;
380 int lun = ccb->pkt->pkt_address.a_lun;
381
382 /*
383 * handle outstanding command of timeout ccb
384 */
385 ccb->pkt->pkt_reason = CMD_TIMEOUT;
386 ccb->pkt->pkt_statistics = STAT_TIMEOUT;
387
388 cmn_err(CE_CONT,
389 "arcmsr%d: scsi target %d lun %d "
390 "outstanding command timeout",
391 ddi_get_instance(acb->dev_info),
392 id, lun);
393 cmn_err(CE_CONT,
394 "arcmsr%d: scsi target %d lun %d "
395 "fatal error on target, device is gone",
396 ddi_get_instance(acb->dev_info),
397 id, lun);
398 acb->devstate[id][lun] = ARECA_RAID_GONE;
399 arcmsr_ccb_complete(ccb, 1);
400 acb->timeout_count++;
401 continue;
402 }
403 ccb->ccb_time = (time_t)(ccb->pkt->pkt_time +
404 current_time); /* adjust ccb_time of pending ccb */
405 }
406 }
407 if ((acb->timeout_id != 0) &&
408 ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
409 /* do pkt timeout check each 60 secs */
410 acb->timeout_id = timeout(arcmsr_ccbs_timeout,
411 (void*)acb, (60 * drv_usectohz(1000000)));
412 }
413 }
414
415
416 static uint32_t
arcmsr_disable_allintr(struct ACB * acb)417 arcmsr_disable_allintr(struct ACB *acb) {
418
419 uint32_t intmask_org;
420
421 switch (acb->adapter_type) {
422 case ACB_ADAPTER_TYPE_A: {
423 struct HBA_msgUnit *phbamu =
424 (struct HBA_msgUnit *)acb->pmu;
425
426 /* disable all outbound interrupt */
427 intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
428 &phbamu->outbound_intmask);
429 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
430 &phbamu->outbound_intmask,
431 intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
432 }
433 break;
434 case ACB_ADAPTER_TYPE_B: {
435 struct HBB_msgUnit *phbbmu =
436 (struct HBB_msgUnit *)acb->pmu;
437
438 /* disable all outbound interrupt */
439 intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
440 &phbbmu->hbb_doorbell->iop2drv_doorbell_mask);
441 /* disable all interrupts */
442 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
443 &phbbmu->hbb_doorbell->iop2drv_doorbell_mask, 0);
444 }
445 break;
446 }
447 return (intmask_org);
448 }
449
450
451 static void
arcmsr_enable_allintr(struct ACB * acb,uint32_t intmask_org)452 arcmsr_enable_allintr(struct ACB *acb, uint32_t intmask_org) {
453
454 int mask;
455
456 switch (acb->adapter_type) {
457 case ACB_ADAPTER_TYPE_A: {
458 struct HBA_msgUnit *phbamu =
459 (struct HBA_msgUnit *)acb->pmu;
460
461 /*
462 * enable outbound Post Queue, outbound doorbell message0
463 * Interrupt
464 */
465 mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
466 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE |
467 ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
468 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
469 &phbamu->outbound_intmask, intmask_org & mask);
470 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
471 }
472 break;
473 case ACB_ADAPTER_TYPE_B: {
474 struct HBB_msgUnit *phbbmu =
475 (struct HBB_msgUnit *)acb->pmu;
476
477 mask = (ARCMSR_IOP2DRV_DATA_WRITE_OK |
478 ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE |
479 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
480 /* 1=interrupt enable, 0=interrupt disable */
481 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
482 &phbbmu->hbb_doorbell->iop2drv_doorbell_mask,
483 intmask_org | mask);
484 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
485 }
486 break;
487 }
488 }
489
490
491 static void
arcmsr_iop_parking(struct ACB * acb)492 arcmsr_iop_parking(struct ACB *acb) {
493
494 if (acb != NULL) {
495 /* stop adapter background rebuild */
496 if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
497 uint32_t intmask_org;
498
499 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
500 /* disable all outbound interrupt */
501 intmask_org = arcmsr_disable_allintr(acb);
502 if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
503 arcmsr_stop_hba_bgrb(acb);
504 arcmsr_flush_hba_cache(acb);
505 } else {
506 arcmsr_stop_hbb_bgrb(acb);
507 arcmsr_flush_hbb_cache(acb);
508 }
509 /*
510 * enable outbound Post Queue
511 * enable outbound doorbell Interrupt
512 */
513 arcmsr_enable_allintr(acb, intmask_org);
514 }
515 }
516 }
517
518
519
520 static int
arcmsr_reset(dev_info_t * resetdev,ddi_reset_cmd_t cmd)521 arcmsr_reset(dev_info_t *resetdev, ddi_reset_cmd_t cmd) {
522
523 struct ACB *acb;
524 scsi_hba_tran_t *scsi_hba_transport;
525
526 scsi_hba_transport = (scsi_hba_tran_t *)
527 ddi_get_driver_private(resetdev);
528
529 if (!scsi_hba_transport)
530 return (DDI_FAILURE);
531
532 acb = (struct ACB *)
533 scsi_hba_transport->tran_hba_private;
534
535 if (!acb)
536 return (DDI_FAILURE);
537
538 if ((cmd == RESET_LUN) ||
539 (cmd == RESET_BUS) ||
540 (cmd == RESET_TARGET))
541 arcmsr_log(NULL, CE_WARN,
542 "arcmsr%d: reset op (%d) not supported",
543 ddi_get_instance(resetdev), cmd);
544
545 arcmsr_pcidev_disattach(acb);
546
547 return (DDI_SUCCESS);
548 }
549
550 static int
arcmsr_do_ddi_attach(dev_info_t * dev_info,int instance)551 arcmsr_do_ddi_attach(dev_info_t *dev_info, int instance) {
552
553 scsi_hba_tran_t *hba_trans;
554 ddi_device_acc_attr_t dev_acc_attr;
555 struct ACB *acb;
556 static char buf[256];
557 uint16_t wval;
558 int raid6 = 1;
559 char *type;
560
561 /*
562 * Soft State Structure
563 * The driver should allocate the per-device-instance
564 * soft state structure, being careful to clean up properly if
565 * an error occurs. Allocate data structure.
566 */
567 if (ddi_soft_state_zalloc(arcmsr_soft_state, instance)
568 != DDI_SUCCESS) {
569 arcmsr_log(NULL, CE_WARN,
570 "arcmsr%d: ddi_soft_state_zalloc failed",
571 instance);
572 return (DDI_FAILURE);
573 }
574
575 acb = ddi_get_soft_state(arcmsr_soft_state, instance);
576 if (acb == NULL) {
577 arcmsr_log(NULL, CE_WARN,
578 "arcmsr%d: ddi_get_soft_state failed",
579 instance);
580 goto error_level_1;
581 }
582
583 /* acb is already zalloc()d so we don't need to bzero() it */
584 dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
585 dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
586 dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
587
588 acb->dev_info = dev_info;
589 acb->dev_acc_attr = dev_acc_attr;
590
591 /*
592 * The driver, if providing DMA, should also check that its hardware is
593 * installed in a DMA-capable slot
594 */
595 if (ddi_slaveonly(dev_info) == DDI_SUCCESS) {
596 arcmsr_log(NULL, CE_WARN,
597 "arcmsr%d: hardware is not installed in a "
598 "DMA-capable slot",
599 instance);
600 goto error_level_0;
601 }
602 /* We do not support adapter drivers with high-level interrupts */
603 if (ddi_intr_hilevel(dev_info, 0) != 0) {
604 arcmsr_log(NULL, CE_WARN,
605 "arcmsr%d: high-level interrupt not supported",
606 instance);
607 goto error_level_0;
608 }
609
610 if (pci_config_setup(dev_info, &acb->pci_acc_handle)
611 != DDI_SUCCESS) {
612 arcmsr_log(NULL, CE_NOTE,
613 "arcmsr%d: pci_config_setup() failed, attach failed",
614 instance);
615 return (DDI_PROBE_FAILURE);
616 }
617
618 wval = pci_config_get16(acb->pci_acc_handle, PCI_CONF_VENID);
619 if (wval != PCI_VENDOR_ID_ARECA) {
620 arcmsr_log(NULL, CE_NOTE,
621 "arcmsr%d: failing attach: 'vendorid (0x%04x) "
622 "does not match 0x%04x (PCI_VENDOR_ID_ARECA)\n",
623 instance, wval, PCI_VENDOR_ID_ARECA);
624 return (DDI_PROBE_FAILURE);
625 }
626
627 wval = pci_config_get16(acb->pci_acc_handle, PCI_CONF_DEVID);
628 switch (wval) {
629 case PCI_DEVICE_ID_ARECA_1110:
630 case PCI_DEVICE_ID_ARECA_1210:
631 case PCI_DEVICE_ID_ARECA_1201:
632 raid6 = 0;
633 /*FALLTHRU*/
634 case PCI_DEVICE_ID_ARECA_1120:
635 case PCI_DEVICE_ID_ARECA_1130:
636 case PCI_DEVICE_ID_ARECA_1160:
637 case PCI_DEVICE_ID_ARECA_1170:
638 case PCI_DEVICE_ID_ARECA_1220:
639 case PCI_DEVICE_ID_ARECA_1230:
640 case PCI_DEVICE_ID_ARECA_1260:
641 case PCI_DEVICE_ID_ARECA_1270:
642 case PCI_DEVICE_ID_ARECA_1280:
643 type = "SATA";
644 break;
645 case PCI_DEVICE_ID_ARECA_1380:
646 case PCI_DEVICE_ID_ARECA_1381:
647 case PCI_DEVICE_ID_ARECA_1680:
648 case PCI_DEVICE_ID_ARECA_1681:
649 type = "SAS";
650 break;
651 default:
652 type = "X-TYPE";
653 break;
654 }
655
656 (void) sprintf(buf, "Areca %s Host Adapter RAID Controller%s",
657 type, raid6 ? " (RAID6 capable)" : "");
658 cmn_err(CE_CONT, "arcmsr%d:%s ", instance, buf);
659 cmn_err(CE_CONT, "arcmsr%d:%s ", instance, ARCMSR_DRIVER_VERSION);
660
661
662 /* we disable iop interrupt here */
663 if (arcmsr_initialize(acb) == DDI_FAILURE) {
664 arcmsr_log(NULL, CE_WARN, "arcmsr%d: arcmsr_initialize "
665 "failed", instance);
666 goto error_level_1;
667 }
668
669 /*
670 * The driver must first obtain the iblock cookie to initialize
671 * mutexes used in the driver handler. Only after those mutexes
672 * have been initialized can the interrupt handler be added.
673 */
674 if (ddi_get_iblock_cookie(dev_info, 0, &acb->iblock_cookie)
675 != DDI_SUCCESS) {
676 arcmsr_log(NULL, CE_WARN, "arcmsr%d: "
677 "ddi_get_iblock_cookie failed", instance);
678 goto error_level_2;
679 }
680 mutex_init(&acb->acb_mutex, NULL, MUTEX_DRIVER,
681 (void *)acb->iblock_cookie);
682 mutex_init(&acb->postq_mutex, NULL, MUTEX_DRIVER,
683 (void *)acb->iblock_cookie);
684 mutex_init(&acb->workingQ_mutex, NULL, MUTEX_DRIVER,
685 (void *)acb->iblock_cookie);
686 mutex_init(&acb->ioctl_mutex, NULL, MUTEX_DRIVER,
687 (void *)acb->iblock_cookie);
688
689 /* Allocate a transport structure */
690 hba_trans = scsi_hba_tran_alloc(dev_info, SCSI_HBA_CANSLEEP);
691 if (hba_trans == NULL) {
692 arcmsr_log(NULL, CE_WARN,
693 "arcmsr%d: scsi_hba_tran_alloc failed",
694 instance);
695 goto error_level_3;
696 }
697 acb->scsi_hba_transport = hba_trans;
698 acb->dev_info = dev_info;
699 /* init scsi host adapter transport entry */
700 hba_trans->tran_hba_private = acb;
701 hba_trans->tran_tgt_private = NULL;
702 /*
703 * If no per-target initialization is required, the HBA can leave
704 * tran_tgt_init set to NULL.
705 */
706 hba_trans->tran_tgt_init = arcmsr_tran_tgt_init;
707 hba_trans->tran_tgt_probe = scsi_hba_probe;
708 hba_trans->tran_tgt_free = NULL;
709 hba_trans->tran_start = arcmsr_tran_start;
710 hba_trans->tran_abort = arcmsr_tran_abort;
711 hba_trans->tran_reset = arcmsr_tran_reset;
712 hba_trans->tran_getcap = arcmsr_tran_getcap;
713 hba_trans->tran_setcap = arcmsr_tran_setcap;
714 hba_trans->tran_init_pkt = arcmsr_tran_init_pkt;
715 hba_trans->tran_destroy_pkt = arcmsr_tran_destroy_pkt;
716 hba_trans->tran_dmafree = arcmsr_tran_dmafree;
717 hba_trans->tran_sync_pkt = arcmsr_tran_sync_pkt;
718
719 hba_trans->tran_reset_notify = NULL;
720 hba_trans->tran_get_bus_addr = NULL;
721 hba_trans->tran_get_name = NULL;
722 hba_trans->tran_quiesce = NULL;
723 hba_trans->tran_unquiesce = NULL;
724 hba_trans->tran_bus_reset = NULL;
725 hba_trans->tran_bus_config = arcmsr_tran_bus_config;
726 hba_trans->tran_add_eventcall = NULL;
727 hba_trans->tran_get_eventcookie = NULL;
728 hba_trans->tran_post_event = NULL;
729 hba_trans->tran_remove_eventcall = NULL;
730
731 /* iop init and enable interrupt here */
732 mutex_enter(&arcmsr_global_mutex);
733 arcmsr_iop_init(acb);
734 mutex_exit(&arcmsr_global_mutex);
735
736 /* Adding an Interrupt Handler */
737 if (ddi_add_intr(dev_info, 0, &acb->iblock_cookie, 0,
738 arcmsr_interrupt, (caddr_t)acb) != DDI_SUCCESS) {
739 arcmsr_log(NULL, CE_WARN,
740 "arcmsr%d: failed to add interrupt handler",
741 instance);
742 goto error_level_4;
743 }
744 /*
745 * The driver should attach this instance of the device, and
746 * perform error cleanup if necessary
747 */
748 if (scsi_hba_attach_setup(dev_info, &arcmsr_dma_attr,
749 hba_trans, SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
750 arcmsr_log(NULL, CE_WARN,
751 "arcmsr%d: scsi_hba_attach_setup failed",
752 instance);
753 goto error_level_5;
754 }
755
756 if (ddi_create_minor_node(dev_info, "arcmsr",
757 S_IFCHR, INST2MSR(instance), DDI_PSEUDO, 0) == DDI_FAILURE) {
758 arcmsr_log(NULL, CE_WARN,
759 "arcmsr%d: ddi_create_minor_node fail", instance);
760 goto error_level_6;
761 }
762
763
764 /* Initialize power management bookkeeping. */
765 if (pm_create_components(dev_info, 1) == DDI_SUCCESS) {
766 if (pm_idle_component(dev_info, 0) == DDI_FAILURE) {
767 arcmsr_log(NULL, CE_WARN,
768 "arcmsr%d: pm_idle_component fail",
769 instance);
770 goto error_level_8;
771 }
772 pm_set_normal_power(dev_info, 0, 1);
773 /* acb->power_level = 1; */
774 } else {
775 arcmsr_log(NULL, CE_WARN,
776 "arcmsr%d: pm_create_components fail",
777 instance);
778 goto error_level_7;
779 }
780
781 /*
782 * Since this driver manages devices with "remote" hardware, "
783 * i.e. the devices themselves have no "reg" property, the SUSPEND/
784 * RESUME commands in detach/attach will not be called by the power
785 * management framework unless we request it by creating a
786 * "pm-hardware-state" property and setting it to value
787 * "needs-suspend-resume".
788 */
789 if (ddi_prop_update_string(DDI_DEV_T_NONE, dev_info,
790 "pm-hardware-state", "needs-suspend-resume")
791 != DDI_PROP_SUCCESS) {
792 arcmsr_log(NULL, CE_WARN,
793 "arcmsr%d: ddi_prop_update(\"pm-hardware-state\")failed",
794 instance);
795 goto error_level_8;
796 }
797
798 /* Create a taskq for dealing with dr events */
799 if ((acb->taskq = ddi_taskq_create(dev_info, "arcmsr_dr_taskq", 1,
800 TASKQ_DEFAULTPRI, 0)) == NULL) {
801 cmn_err(CE_WARN, "ddi_taskq_create failed");
802 goto error_level_8;
803 }
804
805 acb->timeout_count = 0;
806 /* active ccbs "timeout" watchdog */
807 acb->timeout_id = timeout(arcmsr_ccbs_timeout, (caddr_t)acb,
808 (60 * drv_usectohz(1000000)));
809 acb->timeout_sc_id = timeout(arcmsr_devmap_req_timeout, (caddr_t)acb,
810 (5 * drv_usectohz(1000000)));
811
812 /* report device info */
813 ddi_report_dev(dev_info);
814 ArcMSRHBA[arcmsr_hba_count] = acb;
815 arcmsr_hba_count++;
816
817 return (DDI_SUCCESS);
818
819 error_level_8:
820 pm_destroy_components(dev_info);
821
822 error_level_7:
823 /* Remove any previously allocated minor nodes */
824 ddi_remove_minor_node(dev_info, NULL);
825
826 error_level_6:
827 scsi_hba_tran_free(hba_trans);
828
829 error_level_5:
830 ddi_remove_intr(dev_info, 0, (void *)acb->iblock_cookie);
831
832 error_level_4:
833 scsi_hba_tran_free(hba_trans);
834
835 error_level_3:
836 mutex_destroy(&acb->acb_mutex);
837 mutex_destroy(&acb->postq_mutex);
838 mutex_destroy(&acb->workingQ_mutex);
839 mutex_destroy(&acb->ioctl_mutex);
840
841 error_level_2:
842 ddi_dma_mem_free(&acb->ccbs_acc_handle);
843 ddi_dma_free_handle(&acb->ccbs_pool_handle);
844
845 error_level_1:
846 ddi_soft_state_free(arcmsr_soft_state, instance);
847
848 error_level_0:
849 return (DDI_FAILURE);
850 }
851
852
853
854 /*
855 * Function: arcmsr_attach(9E)
856 * Description: Set up all device state and allocate data structures,
857 * mutexes, condition variables, etc. for device operation.
858 * Set mt_attr property for driver to indicate MT-safety.
859 * Add interrupts needed.
860 * Input: dev_info_t *dev_info, ddi_attach_cmd_t cmd
861 * Output: Return DDI_SUCCESS if device is ready,
862 * else return DDI_FAILURE
863 */
864 static int
arcmsr_attach(dev_info_t * dev_info,ddi_attach_cmd_t cmd)865 arcmsr_attach(dev_info_t *dev_info, ddi_attach_cmd_t cmd) {
866
867 scsi_hba_tran_t *hba_trans;
868 struct ACB *acb;
869
870
871 #if defined(ARCMSR_DEBUG)
872 arcmsr_log(NULL, CE_NOTE,
873 "arcmsr_attach called for device %lx (instance %d)",
874 &dev_info, ddi_get_instance(dev_info));
875 #endif
876 switch (cmd) {
877 case DDI_ATTACH:
878 return (arcmsr_do_ddi_attach(dev_info,
879 ddi_get_instance(dev_info)));
880 case DDI_RESUME:
881 case DDI_PM_RESUME:
882 /*
883 * There is no hardware state to restart and no timeouts to
884 * restart since we didn't PM_SUSPEND with active cmds or
885 * active timeouts We just need to unblock waiting threads
886 * and restart I/O the code for DDI_RESUME is almost identical
887 * except it uses the suspend flag rather than pm_suspend flag
888 */
889 hba_trans = (scsi_hba_tran_t *)ddi_get_driver_private(dev_info);
890 if (!hba_trans) {
891 return (DDI_FAILURE);
892 }
893 acb = (struct ACB *)
894 hba_trans->tran_hba_private;
895 mutex_enter(&acb->acb_mutex);
896 arcmsr_iop_init(acb);
897
898 /* restart ccbs "timeout" watchdog */
899 acb->timeout_count = 0;
900 acb->timeout_id = timeout(arcmsr_ccbs_timeout,
901 (caddr_t)acb, (60 * drv_usectohz(1000000)));
902 acb->timeout_sc_id = timeout(arcmsr_devmap_req_timeout,
903 (caddr_t)acb, (5 * drv_usectohz(1000000)));
904 mutex_exit(&acb->acb_mutex);
905 return (DDI_SUCCESS);
906
907 default:
908 arcmsr_log(NULL, CE_WARN,
909 "arcmsr%d: ddi attach cmd (%d) unsupported",
910 cmd, ddi_get_instance(dev_info));
911 return (DDI_FAILURE);
912 }
913 }
914
915 /*
916 * Function: arcmsr_detach(9E)
917 * Description: Remove all device allocation and system resources, disable
918 * device interrupt.
919 * Input: dev_info_t *dev_info
920 * ddi_detach_cmd_t cmd
921 * Output: Return DDI_SUCCESS if done,
922 * else returnDDI_FAILURE
923 */
924 static int
arcmsr_detach(dev_info_t * dev_info,ddi_detach_cmd_t cmd)925 arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd) {
926
927 int instance;
928 struct ACB *acb;
929
930
931 instance = ddi_get_instance(dev_info);
932 acb = (struct ACB *)ddi_get_soft_state(arcmsr_soft_state,
933 instance);
934 if (!acb) {
935 return (DDI_FAILURE);
936 }
937
938 switch (cmd) {
939 case DDI_DETACH:
940 mutex_enter(&acb->acb_mutex);
941 if (acb->timeout_id != 0) {
942 mutex_exit(&acb->acb_mutex);
943 (void) untimeout(acb->timeout_id);
944 mutex_enter(&acb->acb_mutex);
945 acb->timeout_id = 0;
946 }
947 if (acb->timeout_sc_id != 0) {
948 mutex_exit(&acb->acb_mutex);
949 (void) untimeout(acb->timeout_sc_id);
950 mutex_enter(&acb->acb_mutex);
951 acb->timeout_sc_id = 0;
952 }
953 arcmsr_pcidev_disattach(acb);
954 /* Remove interrupt set up by ddi_add_intr */
955 ddi_remove_intr(dev_info, 0, acb->iblock_cookie);
956 /* unbind mapping object to handle */
957 (void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
958 /* Free ccb pool memory */
959 ddi_dma_mem_free(&acb->ccbs_acc_handle);
960 /* Free DMA handle */
961 ddi_dma_free_handle(&acb->ccbs_pool_handle);
962 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
963 if (scsi_hba_detach(dev_info) != DDI_SUCCESS)
964 arcmsr_log(NULL, CE_WARN,
965 "arcmsr%d: Unable to detach instance cleanly "
966 "(should not happen)",
967 ddi_get_instance(dev_info));
968 /* free scsi_hba_transport from scsi_hba_tran_alloc */
969 scsi_hba_tran_free(acb->scsi_hba_transport);
970 ddi_remove_minor_node(dev_info, NULL);
971 ddi_taskq_destroy(acb->taskq);
972 ddi_prop_remove_all(dev_info);
973 mutex_exit(&acb->acb_mutex);
974 mutex_destroy(&acb->acb_mutex);
975 mutex_destroy(&acb->postq_mutex);
976 mutex_destroy(&acb->workingQ_mutex);
977 mutex_destroy(&acb->ioctl_mutex);
978 pci_config_teardown(&acb->pci_acc_handle);
979 ddi_set_driver_private(dev_info, NULL);
980 ddi_soft_state_free(arcmsr_soft_state, instance);
981 pm_destroy_components(dev_info);
982 return (DDI_SUCCESS);
983 case DDI_SUSPEND:
984 case DDI_PM_SUSPEND:
985 mutex_enter(&acb->acb_mutex);
986 if (acb->timeout_id != 0) {
987 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
988 mutex_exit(&acb->acb_mutex);
989 (void) untimeout(acb->timeout_id);
990 (void) untimeout(acb->timeout_sc_id);
991 mutex_enter(&acb->acb_mutex);
992 acb->timeout_id = 0;
993 }
994
995 if (acb->timeout_sc_id != 0) {
996 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
997 mutex_exit(&acb->acb_mutex);
998 (void) untimeout(acb->timeout_sc_id);
999 mutex_enter(&acb->acb_mutex);
1000 acb->timeout_sc_id = 0;
1001 }
1002
1003 /* disable all outbound interrupt */
1004 (void) arcmsr_disable_allintr(acb);
1005 /* stop adapter background rebuild */
1006 if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
1007 arcmsr_stop_hba_bgrb(acb);
1008 arcmsr_flush_hba_cache(acb);
1009 } else {
1010 arcmsr_stop_hbb_bgrb(acb);
1011 arcmsr_flush_hbb_cache(acb);
1012 }
1013 mutex_exit(&acb->acb_mutex);
1014 return (DDI_SUCCESS);
1015 default:
1016 return (DDI_FAILURE);
1017 }
1018 }
1019
1020
1021
1022 /*
1023 * Function: arcmsr_tran_tgt_init
1024 * Description: Called when initializing a target device instance. If
1025 * no per-target initialization is required, the HBA
1026 * may leave tran_tgt_init to NULL
1027 * Input:
1028 * dev_info_t *host_dev_info,
1029 * dev_info_t *target_dev_info,
1030 * scsi_hba_tran_t *tran,
1031 * struct scsi_device *sd
1032 *
1033 * Return: DDI_SUCCESS if success, else return DDI_FAILURE
1034 *
1035 * entry point enables the HBA to allocate and/or initialize any per-
1036 * target resources.
1037 * It also enables the HBA to qualify the device's address as valid and
1038 * supportable for that particular HBA.
1039 * By returning DDI_FAILURE, the instance of the target driver for that
1040 * device will not be probed or attached.
1041 * This entry point is not required, and if none is supplied,
1042 * the framework will attempt to probe and attach all possible instances
1043 * of the appropriate target drivers.
1044 */
1045 static int
arcmsr_tran_tgt_init(dev_info_t * host_dev_info,dev_info_t * target_dev_info,scsi_hba_tran_t * hosttran,struct scsi_device * sd)1046 arcmsr_tran_tgt_init(dev_info_t *host_dev_info, dev_info_t *target_dev_info,
1047 scsi_hba_tran_t *hosttran, struct scsi_device *sd) {
1048 #ifndef __lock_lint
1049 _NOTE(ARGUNUSED(hosttran, target_dev_info))
1050 #endif
1051 uint16_t target;
1052 uint8_t lun;
1053 struct ACB *acb = (struct ACB *)sd->sd_address.a_hba_tran ->
1054 tran_hba_private;
1055
1056 target = sd->sd_address.a_target;
1057 lun = sd->sd_address.a_lun;
1058 if ((target >= ARCMSR_MAX_TARGETID) || (lun >= ARCMSR_MAX_TARGETLUN)) {
1059 cmn_err(CE_WARN,
1060 "arcmsr%d: (target %d, lun %d) exceeds "
1061 "maximum supported values (%d, %d)",
1062 ddi_get_instance(host_dev_info),
1063 target, lun, ARCMSR_MAX_TARGETID, ARCMSR_MAX_TARGETLUN);
1064 return (DDI_FAILURE);
1065 }
1066
1067
1068 if (ndi_dev_is_persistent_node(target_dev_info) == 0) {
1069 /*
1070 * If no persistent node exist, we don't allow .conf node
1071 * to be created.
1072 */
1073 if (arcmsr_find_child(acb, target, lun) != NULL) {
1074 if ((ndi_merge_node(target_dev_info,
1075 arcmsr_name_node) != DDI_SUCCESS)) {
1076 return (DDI_SUCCESS);
1077 }
1078 }
1079 return (DDI_FAILURE);
1080 }
1081
1082 return (DDI_SUCCESS);
1083 }
1084
1085 /*
1086 * Function: arcmsr_tran_getcap(9E)
1087 * Description: Get the capability named, and returnits value.
1088 * Return Values: current value of capability, ifdefined
1089 * -1 ifcapability is not defined
1090 * ------------------------------------------------------
1091 * Common Capability Strings Array
1092 * ------------------------------------------------------
1093 * #define SCSI_CAP_DMA_MAX 0
1094 * #define SCSI_CAP_MSG_OUT 1
1095 * #define SCSI_CAP_DISCONNECT 2
1096 * #define SCSI_CAP_SYNCHRONOUS 3
1097 * #define SCSI_CAP_WIDE_XFER 4
1098 * #define SCSI_CAP_PARITY 5
1099 * #define SCSI_CAP_INITIATOR_ID 6
1100 * #define SCSI_CAP_UNTAGGED_QING 7
1101 * #define SCSI_CAP_TAGGED_QING 8
1102 * #define SCSI_CAP_ARQ 9
1103 * #define SCSI_CAP_LINKED_CMDS 10 a
1104 * #define SCSI_CAP_SECTOR_SIZE 11 b
1105 * #define SCSI_CAP_TOTAL_SECTORS 12 c
1106 * #define SCSI_CAP_GEOMETRY 13 d
1107 * #define SCSI_CAP_RESET_NOTIFICATION 14 e
1108 * #define SCSI_CAP_QFULL_RETRIES 15 f
1109 * #define SCSI_CAP_QFULL_RETRY_INTERVAL 16 10
1110 * #define SCSI_CAP_SCSI_VERSION 17 11
1111 * #define SCSI_CAP_INTERCONNECT_TYPE 18 12
1112 * #define SCSI_CAP_LUN_RESET 19 13
1113 */
1114 static int
arcmsr_tran_getcap(struct scsi_address * ap,char * cap,int whom)1115 arcmsr_tran_getcap(struct scsi_address *ap, char *cap, int whom) {
1116
1117 int capability = 0;
1118 struct ACB *acb =
1119 (struct ACB *)ap->a_hba_tran->tran_hba_private;
1120
1121
1122 if (cap == NULL || whom == 0) {
1123 return (DDI_FAILURE);
1124 }
1125
1126 mutex_enter(&arcmsr_global_mutex);
1127 switch (scsi_hba_lookup_capstr(cap)) {
1128 case SCSI_CAP_MSG_OUT:
1129 case SCSI_CAP_DISCONNECT:
1130 case SCSI_CAP_SYNCHRONOUS:
1131 case SCSI_CAP_WIDE_XFER:
1132 case SCSI_CAP_TAGGED_QING:
1133 case SCSI_CAP_UNTAGGED_QING:
1134 case SCSI_CAP_PARITY:
1135 case SCSI_CAP_ARQ:
1136 capability = acb->tgt_scsi_opts[ap->a_target];
1137 break;
1138 case SCSI_CAP_SECTOR_SIZE:
1139 capability = ARCMSR_DEV_SECTOR_SIZE;
1140 break;
1141 case SCSI_CAP_DMA_MAX:
1142 /* Limit to 16MB max transfer */
1143 capability = ARCMSR_MAX_XFER_LEN;
1144 break;
1145 case SCSI_CAP_INITIATOR_ID:
1146 capability = ARCMSR_SCSI_INITIATOR_ID;
1147 break;
1148 case SCSI_CAP_GEOMETRY:
1149 /* head , track , cylinder */
1150 capability = (255 << 16) | 63;
1151 break;
1152 default:
1153 capability = -1;
1154 break;
1155 }
1156 mutex_exit(&arcmsr_global_mutex);
1157 return (capability);
1158 }
1159
1160 /*
1161 * Function: arcmsr_tran_setcap(9E)
1162 * Description: Set the specific capability.
1163 * Return Values: 1 - capability exists and can be set to new value
1164 * 0 - capability could not be set to new value
1165 * -1 - no such capability
1166 */
1167 static int
arcmsr_tran_setcap(struct scsi_address * ap,char * cap,int value,int whom)1168 arcmsr_tran_setcap(struct scsi_address *ap, char *cap, int value,
1169 int whom) {
1170 #ifndef __lock_lint
1171 _NOTE(ARGUNUSED(value))
1172 #endif
1173
1174
1175 int supported = 0;
1176 struct ACB *acb =
1177 (struct ACB *)ap->a_hba_tran->tran_hba_private;
1178
1179
1180 if (cap == NULL || whom == 0) {
1181 return (-1);
1182 }
1183
1184 mutex_enter(&arcmsr_global_mutex);
1185 switch (supported = scsi_hba_lookup_capstr(cap)) {
1186 case SCSI_CAP_DISCONNECT: /* 2 */
1187 case SCSI_CAP_SYNCHRONOUS: /* 3 */
1188 case SCSI_CAP_TAGGED_QING: /* 8 */
1189 case SCSI_CAP_WIDE_XFER: /* 4 */
1190 case SCSI_CAP_ARQ: /* 9 auto request sense */
1191 case SCSI_CAP_TOTAL_SECTORS: /* c */
1192 acb->tgt_scsi_opts[ap->a_target] |= supported;
1193 supported = 1;
1194 break;
1195 case SCSI_CAP_UNTAGGED_QING: /* 7 */
1196 case SCSI_CAP_INITIATOR_ID: /* 6 */
1197 case SCSI_CAP_DMA_MAX: /* 0 */
1198 case SCSI_CAP_MSG_OUT: /* 1 */
1199 case SCSI_CAP_PARITY: /* 5 */
1200 case SCSI_CAP_LINKED_CMDS: /* a */
1201 case SCSI_CAP_RESET_NOTIFICATION: /* e */
1202 case SCSI_CAP_SECTOR_SIZE: /* b */
1203 supported = 0;
1204 break;
1205 default:
1206 supported = -1;
1207 break;
1208 }
1209 mutex_exit(&arcmsr_global_mutex);
1210 return (supported);
1211 }
1212
1213
1214
1215 static void
arcmsr_free_ccb(struct CCB * ccb)1216 arcmsr_free_ccb(struct CCB *ccb) {
1217
1218 struct ACB *acb = ccb->acb;
1219
1220 ccb->startdone = ARCMSR_CCB_DONE;
1221 ccb->pkt = NULL;
1222 ccb->ccb_flags = 0;
1223 mutex_enter(&acb->workingQ_mutex);
1224 acb->ccbworkingQ[acb->workingccb_doneindex] = ccb;
1225 acb->workingccb_doneindex++;
1226 acb->workingccb_doneindex %= ARCMSR_MAX_FREECCB_NUM;
1227 mutex_exit(&acb->workingQ_mutex);
1228 }
1229
1230 /*
1231 * Function: arcmsr_tran_init_pkt
1232 * Return Values: pointer to scsi_pkt, or NULL
1233 * Description: simultaneously allocate both a scsi_pkt(9S) structure and
1234 * DMA resources for that pkt.
1235 * Called by kernel on behalf of a target driver
1236 * calling scsi_init_pkt(9F).
1237 * Refer to tran_init_pkt(9E) man page
1238 * Context: Can be called from different kernel process threads.
1239 * Can be called by interrupt thread.
1240 * Allocates SCSI packet and DMA resources
1241 */
1242 static struct
arcmsr_tran_init_pkt(struct scsi_address * ap,register struct scsi_pkt * pkt,struct buf * bp,int cmdlen,int statuslen,int tgtlen,int flags,int (* callback)(),caddr_t arg)1243 scsi_pkt *arcmsr_tran_init_pkt(struct scsi_address *ap,
1244 register struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
1245 int tgtlen, int flags, int (*callback)(), caddr_t arg) {
1246
1247 struct CCB *ccb;
1248 struct ARCMSR_CDB *arcmsr_cdb;
1249 struct ACB *acb;
1250 int old_pkt_flag = 1;
1251
1252
1253 acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
1254
1255 if (pkt == NULL) {
1256 /* get free CCB */
1257 ccb = arcmsr_get_freeccb(acb);
1258 if (ccb == (struct CCB *)NULL) {
1259 return (NULL);
1260 }
1261
1262 if (ccb->pkt != NULL) {
1263 /*
1264 * If kmem_flags are turned on, expect to
1265 * see a message
1266 */
1267 cmn_err(CE_WARN, "arcmsr%d: invalid pkt",
1268 ddi_get_instance(acb->dev_info));
1269 return (NULL);
1270 }
1271 pkt = scsi_hba_pkt_alloc(acb->dev_info, ap, cmdlen,
1272 statuslen, tgtlen, sizeof (struct scsi_pkt),
1273 callback, arg);
1274 if (pkt == NULL) {
1275 cmn_err(CE_WARN,
1276 "arcmsr%d: scsi pkt allocation failed",
1277 ddi_get_instance(acb->dev_info));
1278 arcmsr_free_ccb(ccb);
1279 return (NULL);
1280 }
1281 /* Initialize CCB */
1282 ccb->pkt = pkt;
1283 ccb->pkt_dma_handle = NULL;
1284 /* record how many sg are needed to xfer on this pkt */
1285 ccb->pkt_ncookies = 0;
1286 /* record how many sg we got from this window */
1287 ccb->pkt_cookie = 0;
1288 /* record how many windows have partial dma map set */
1289 ccb->pkt_nwin = 0;
1290 /* record current sg window position */
1291 ccb->pkt_curwin = 0;
1292 ccb->pkt_dma_len = 0;
1293 ccb->pkt_dma_offset = 0;
1294 ccb->resid_dmacookie.dmac_size = 0;
1295
1296 /*
1297 * we will still use this point for we want to fake some
1298 * information in tran_start
1299 */
1300 ccb->bp = bp;
1301
1302 /* Initialize arcmsr_cdb */
1303 arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1304 bzero(arcmsr_cdb, sizeof (struct ARCMSR_CDB));
1305 arcmsr_cdb->Bus = 0;
1306 arcmsr_cdb->Function = 1;
1307 arcmsr_cdb->LUN = ap->a_lun;
1308 arcmsr_cdb->TargetID = ap->a_target;
1309 arcmsr_cdb->CdbLength = (uint8_t)cmdlen;
1310 arcmsr_cdb->Context = (unsigned long)arcmsr_cdb;
1311
1312 /* Fill in the rest of the structure */
1313 pkt->pkt_ha_private = ccb;
1314 pkt->pkt_address = *ap;
1315 pkt->pkt_comp = (void (*)())NULL;
1316 pkt->pkt_flags = 0;
1317 pkt->pkt_time = 0;
1318 pkt->pkt_resid = 0;
1319 pkt->pkt_statistics = 0;
1320 pkt->pkt_reason = 0;
1321 old_pkt_flag = 0;
1322 } else {
1323 ccb = (struct CCB *)pkt->pkt_ha_private;
1324 /*
1325 * you cannot update CdbLength with cmdlen here, it would
1326 * cause a data compare error
1327 */
1328 ccb->startdone = ARCMSR_CCB_UNBUILD;
1329 }
1330
1331 /* Second step : dma allocation/move */
1332 if (bp && bp->b_bcount != 0) {
1333 /*
1334 * system had a lot of data trunk need to xfer, from...20 byte
1335 * to 819200 byte.
1336 * arcmsr_dma_alloc will get pkt_dma_handle (not null) until
1337 * this lot of data trunk xfer done this mission will be done
1338 * by some of continue READ or WRITE scsi command, till this
1339 * lot of data trunk xfer completed.
1340 * arcmsr_dma_move do the action repeatedly, and use the same
1341 * ccb till this lot of data trunk xfer complete notice.
1342 * when after the arcmsr_tran_init_pkt returns the solaris
1343 * kernel is by your pkt_resid and its b_bcount to give you
1344 * which type of scsi command descriptor to implement the
1345 * length of folowing arcmsr_tran_start scsi cdb (data length)
1346 *
1347 * Each transfer should be aligned on a 512 byte boundary
1348 */
1349 if (ccb->pkt_dma_handle == NULL) {
1350 if (arcmsr_dma_alloc(acb, pkt, bp, flags,
1351 callback) == DDI_FAILURE) {
1352 /*
1353 * the HBA driver is unable to allocate DMA
1354 * resources, it must free the allocated
1355 * scsi_pkt(9S) before returning
1356 */
1357 cmn_err(CE_WARN, "arcmsr%d: dma allocation "
1358 "failure ",
1359 ddi_get_instance(acb->dev_info));
1360 if (old_pkt_flag == 0) {
1361 cmn_err(CE_WARN, "arcmsr%d: dma "
1362 "allocation failed to free scsi "
1363 "hba pkt ",
1364 ddi_get_instance(acb->dev_info));
1365 arcmsr_free_ccb(ccb);
1366 scsi_hba_pkt_free(ap, pkt);
1367 }
1368 return ((struct scsi_pkt *)NULL);
1369 }
1370 } else {
1371 /* DMA resources to next DMA window, for old pkt */
1372 if (arcmsr_dma_move(acb, pkt, bp) == -1) {
1373 cmn_err(CE_WARN, "arcmsr%d: dma move "
1374 "failed ",
1375 ddi_get_instance(acb->dev_info));
1376 return ((struct scsi_pkt *)NULL);
1377 }
1378 }
1379 } else {
1380 pkt->pkt_resid = 0;
1381 }
1382 return (pkt);
1383 }
1384
1385 /*
1386 * Function name: arcmsr_dma_alloc
1387 * Return Values: 0 if successful, -1 if failure
1388 * Description: allocate DMA resources
1389 * Context: Can only be called from arcmsr_tran_init_pkt()
1390 * register struct scsi_address *ap = &((pkt)->pkt_address);
1391 */
1392 static int
arcmsr_dma_alloc(struct ACB * acb,struct scsi_pkt * pkt,struct buf * bp,int flags,int (* callback)())1393 arcmsr_dma_alloc(struct ACB *acb, struct scsi_pkt *pkt,
1394 struct buf *bp, int flags, int (*callback)()) {
1395
1396 struct CCB *ccb = pkt->pkt_ha_private;
1397 int alloc_result, map_method, dma_flags;
1398 int resid = 0;
1399 int total_ccb_xferlen = 0;
1400 int (*cb)(caddr_t);
1401 uint8_t i;
1402
1403 /*
1404 * at this point the PKT SCSI CDB is empty, and dma xfer length
1405 * is bp->b_bcount
1406 */
1407
1408 if (bp->b_flags & B_READ) {
1409 ccb->ccb_flags &= ~CCB_FLAG_DMAWRITE;
1410 dma_flags = DDI_DMA_READ;
1411 } else {
1412 ccb->ccb_flags |= CCB_FLAG_DMAWRITE;
1413 dma_flags = DDI_DMA_WRITE;
1414 }
1415
1416 if (flags & PKT_CONSISTENT) {
1417 ccb->ccb_flags |= CCB_FLAG_DMACONSISTENT;
1418 dma_flags |= DDI_DMA_CONSISTENT;
1419 }
1420 if (flags & PKT_DMA_PARTIAL) {
1421 dma_flags |= DDI_DMA_PARTIAL;
1422 }
1423
1424 dma_flags |= DDI_DMA_REDZONE;
1425 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
1426
1427 if ((alloc_result = ddi_dma_alloc_handle(acb->dev_info,
1428 &arcmsr_dma_attr, cb, 0, &ccb->pkt_dma_handle))
1429 != DDI_SUCCESS) {
1430 switch (alloc_result) {
1431 case DDI_DMA_BADATTR:
1432 /*
1433 * If the system does not support physical DMA,
1434 * the return value from ddi_dma_alloc_handle
1435 * will be DDI_DMA_BADATTR
1436 */
1437 cmn_err(CE_WARN, "arcmsr%d: dma allocate returned "
1438 "'bad attribute'",
1439 ddi_get_instance(acb->dev_info));
1440 bioerror(bp, EFAULT);
1441 return (DDI_FAILURE);
1442 case DDI_DMA_NORESOURCES:
1443 cmn_err(CE_WARN, "arcmsr%d: dma allocate returned "
1444 "'no resources'",
1445 ddi_get_instance(acb->dev_info));
1446 bioerror(bp, 0);
1447 return (DDI_FAILURE);
1448 default:
1449 cmn_err(CE_WARN, "arcmsr%d: dma allocate returned "
1450 "'unknown failure'",
1451 ddi_get_instance(acb->dev_info));
1452 return (DDI_FAILURE);
1453 }
1454 }
1455
1456 map_method = ddi_dma_buf_bind_handle(ccb->pkt_dma_handle, bp,
1457 dma_flags, cb, 0,
1458 &ccb->pkt_dmacookies[0], /* SG List pointer */
1459 &ccb->pkt_ncookies); /* number of sgl cookies */
1460
1461 switch (map_method) {
1462 case DDI_DMA_PARTIAL_MAP:
1463 /*
1464 * When your main memory size larger then 4G
1465 * DDI_DMA_PARTIAL_MAP will be touched.
1466 *
1467 * We've already set DDI_DMA_PARTIAL in dma_flags,
1468 * so if it's now missing, there's something screwy
1469 * happening. We plow on....
1470 */
1471
1472 if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
1473 cmn_err(CE_WARN, "arcmsr%d: dma partial mapping lost "
1474 "...impossible case!",
1475 ddi_get_instance(acb->dev_info));
1476 }
1477 if (ddi_dma_numwin(ccb->pkt_dma_handle, &ccb->pkt_nwin) ==
1478 DDI_FAILURE) {
1479 cmn_err(CE_WARN, "arcmsr%d: ddi_dma_numwin() failed",
1480 ddi_get_instance(acb->dev_info));
1481 }
1482
1483 if (ddi_dma_getwin(ccb->pkt_dma_handle, ccb->pkt_curwin,
1484 &ccb->pkt_dma_offset, &ccb->pkt_dma_len,
1485 &ccb->pkt_dmacookies[0], &ccb->pkt_ncookies) ==
1486 DDI_FAILURE) {
1487 cmn_err(CE_WARN, "arcmsr%d: ddi_dma_getwin failed",
1488 ddi_get_instance(acb->dev_info));
1489 }
1490
1491 i = 0;
1492 /* first cookie is accessed from ccb->pkt_dmacookies[0] */
1493 total_ccb_xferlen = ccb->pkt_dmacookies[0].dmac_size;
1494 for (;;) {
1495 i++;
1496 if (i == ARCMSR_MAX_SG_ENTRIES ||
1497 i == ccb->pkt_ncookies ||
1498 total_ccb_xferlen == ARCMSR_MAX_XFER_LEN) {
1499 break;
1500 }
1501 /*
1502 * next cookie will be retrieved from
1503 * ccb->pkt_dmacookies[i]
1504 */
1505 ddi_dma_nextcookie(ccb->pkt_dma_handle,
1506 &ccb->pkt_dmacookies[i]);
1507 total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1508 }
1509 ccb->pkt_cookie = i;
1510 ccb->arcmsr_cdb.sgcount = i;
1511 if (total_ccb_xferlen > 512) {
1512 resid = total_ccb_xferlen % 512;
1513 if (resid != 0) {
1514 i--;
1515 total_ccb_xferlen -= resid;
1516 /* modify last sg length */
1517 ccb->pkt_dmacookies[i].dmac_size =
1518 ccb->pkt_dmacookies[i].dmac_size - resid;
1519 ccb->resid_dmacookie.dmac_size = resid;
1520 ccb->resid_dmacookie.dmac_laddress =
1521 ccb->pkt_dmacookies[i].dmac_laddress +
1522 ccb->pkt_dmacookies[i].dmac_size;
1523 }
1524 }
1525 ccb->total_dmac_size = total_ccb_xferlen;
1526 ccb->ccb_flags |= CCB_FLAG_DMAVALID;
1527 pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1528
1529 return (DDI_SUCCESS);
1530
1531 case DDI_DMA_MAPPED:
1532 ccb->pkt_nwin = 1; /* all mapped, so only one window */
1533 ccb->pkt_dma_len = 0;
1534 ccb->pkt_dma_offset = 0;
1535 i = 0;
1536 /* first cookie is accessed from ccb->pkt_dmacookies[0] */
1537 total_ccb_xferlen = ccb->pkt_dmacookies[0].dmac_size;
1538 for (;;) {
1539 i++;
1540 if (i == ARCMSR_MAX_SG_ENTRIES ||
1541 i == ccb->pkt_ncookies ||
1542 total_ccb_xferlen == ARCMSR_MAX_XFER_LEN) {
1543 break;
1544 }
1545 /*
1546 * next cookie will be retrieved from
1547 * ccb->pkt_dmacookies[i]
1548 */
1549 ddi_dma_nextcookie(ccb->pkt_dma_handle,
1550 &ccb->pkt_dmacookies[i]);
1551 total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1552 }
1553 ccb->pkt_cookie = i;
1554 ccb->arcmsr_cdb.sgcount = i;
1555 if (total_ccb_xferlen > 512) {
1556 resid = total_ccb_xferlen % 512;
1557 if (resid != 0) {
1558 i--;
1559 total_ccb_xferlen -= resid;
1560 /* modify last sg length */
1561 ccb->pkt_dmacookies[i].dmac_size =
1562 ccb->pkt_dmacookies[i].dmac_size - resid;
1563 ccb->resid_dmacookie.dmac_size = resid;
1564 ccb->resid_dmacookie.dmac_laddress =
1565 ccb->pkt_dmacookies[i].dmac_laddress +
1566 ccb->pkt_dmacookies[i].dmac_size;
1567 }
1568 }
1569 ccb->total_dmac_size = total_ccb_xferlen;
1570 ccb->ccb_flags |= CCB_FLAG_DMAVALID;
1571 pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1572 return (DDI_SUCCESS);
1573
1574 case DDI_DMA_NORESOURCES:
1575 cmn_err(CE_WARN, "arcmsr%d: dma map got 'no resources'",
1576 ddi_get_instance(acb->dev_info));
1577 bioerror(bp, ENOMEM);
1578 break;
1579
1580 case DDI_DMA_NOMAPPING:
1581 cmn_err(CE_WARN, "arcmsr%d: dma map got 'no mapping'",
1582 ddi_get_instance(acb->dev_info));
1583 bioerror(bp, EFAULT);
1584 break;
1585
1586 case DDI_DMA_TOOBIG:
1587 cmn_err(CE_WARN, "arcmsr%d: dma map got 'too big'",
1588 ddi_get_instance(acb->dev_info));
1589 bioerror(bp, EINVAL);
1590 break;
1591
1592 case DDI_DMA_INUSE:
1593 cmn_err(CE_WARN, "arcmsr%d: dma map got 'in use' "
1594 "(should not happen)",
1595 ddi_get_instance(acb->dev_info));
1596 break;
1597 default:
1598 cmn_err(CE_WARN,
1599 "arcmsr%d: dma map got 'unknown failure 0x%x' "
1600 "(should not happen)",
1601 ddi_get_instance(acb->dev_info), i);
1602 #ifdef ARCMSR_DEBUG
1603 arcmsr_dump_scsi_cdb(&pkt->pkt_address, pkt);
1604 #endif
1605 break;
1606 }
1607
1608 ddi_dma_free_handle(&ccb->pkt_dma_handle);
1609 ccb->pkt_dma_handle = NULL;
1610 ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1611 return (DDI_FAILURE);
1612 }
1613
1614
1615 /*
1616 * Function name: arcmsr_dma_move
1617 * Return Values: 0 if successful, -1 if failure
1618 * Description: move DMA resources to next DMA window
1619 * Context: Can only be called from arcmsr_tran_init_pkt()
1620 */
1621 static int
arcmsr_dma_move(struct ACB * acb,struct scsi_pkt * pkt,struct buf * bp)1622 arcmsr_dma_move(struct ACB *acb, struct scsi_pkt *pkt,
1623 struct buf *bp) {
1624
1625 struct CCB *ccb = pkt->pkt_ha_private;
1626 uint8_t i = 0;
1627 int resid = 0;
1628 int total_ccb_xferlen = 0;
1629
1630 if (ccb->resid_dmacookie.dmac_size != 0) {
1631 total_ccb_xferlen += ccb->resid_dmacookie.dmac_size;
1632 ccb->pkt_dmacookies[i].dmac_size =
1633 ccb->resid_dmacookie.dmac_size;
1634 ccb->pkt_dmacookies[i].dmac_laddress =
1635 ccb->resid_dmacookie.dmac_laddress;
1636 i++;
1637 ccb->resid_dmacookie.dmac_size = 0;
1638 }
1639 /*
1640 * If there are no more cookies remaining in this window,
1641 * move to the next window.
1642 */
1643 if (ccb->pkt_cookie == ccb->pkt_ncookies) {
1644 /*
1645 * only dma map "partial" arrive here
1646 */
1647 if ((ccb->pkt_curwin == ccb->pkt_nwin) &&
1648 (ccb->pkt_nwin == 1)) {
1649 cmn_err(CE_CONT,
1650 "arcmsr%d: dma partial set, but only "
1651 "one window allocated",
1652 ddi_get_instance(acb->dev_info));
1653 return (DDI_SUCCESS);
1654 }
1655
1656 /* At last window, cannot move */
1657 if (++ccb->pkt_curwin >= ccb->pkt_nwin) {
1658 cmn_err(CE_WARN,
1659 "arcmsr%d: dma partial set, numwin exceeded",
1660 ddi_get_instance(acb->dev_info));
1661 return (DDI_FAILURE);
1662 }
1663 if (ddi_dma_getwin(ccb->pkt_dma_handle, ccb->pkt_curwin,
1664 &ccb->pkt_dma_offset, &ccb->pkt_dma_len,
1665 &ccb->pkt_dmacookies[i], &ccb->pkt_ncookies) ==
1666 DDI_FAILURE) {
1667 cmn_err(CE_WARN,
1668 "arcmsr%d: dma partial set, "
1669 "ddi_dma_getwin failure",
1670 ddi_get_instance(acb->dev_info));
1671 return (DDI_FAILURE);
1672 }
1673 /* reset cookie pointer */
1674 ccb->pkt_cookie = 0;
1675 } else {
1676 /*
1677 * only dma map "all" arrive here
1678 * We still have more cookies in this window,
1679 * get the next one
1680 * access the pkt_dma_handle remain cookie record at
1681 * ccb->pkt_dmacookies array
1682 */
1683 ddi_dma_nextcookie(ccb->pkt_dma_handle,
1684 &ccb->pkt_dmacookies[i]);
1685 }
1686
1687 /* Get remaining cookies in this window, up to our maximum */
1688 total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1689
1690 /* retrieve and store cookies, start at ccb->pkt_dmacookies[0] */
1691 for (;;) {
1692 i++;
1693 /* handled cookies count level indicator */
1694 ccb->pkt_cookie++;
1695 if (i == ARCMSR_MAX_SG_ENTRIES ||
1696 ccb->pkt_cookie == ccb->pkt_ncookies ||
1697 total_ccb_xferlen == ARCMSR_MAX_XFER_LEN) {
1698 break;
1699 }
1700 ddi_dma_nextcookie(ccb->pkt_dma_handle,
1701 &ccb->pkt_dmacookies[i]);
1702 total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1703 }
1704
1705 ccb->arcmsr_cdb.sgcount = i;
1706 if (total_ccb_xferlen > 512) {
1707 resid = total_ccb_xferlen % 512;
1708 if (resid != 0) {
1709 i--;
1710 total_ccb_xferlen -= resid;
1711 /* modify last sg length */
1712 ccb->pkt_dmacookies[i].dmac_size =
1713 ccb->pkt_dmacookies[i].dmac_size - resid;
1714 ccb->resid_dmacookie.dmac_size = resid;
1715 ccb->resid_dmacookie.dmac_laddress =
1716 ccb->pkt_dmacookies[i].dmac_laddress +
1717 ccb->pkt_dmacookies[i].dmac_size;
1718 }
1719 }
1720 ccb->total_dmac_size += total_ccb_xferlen;
1721 pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1722
1723 return (DDI_SUCCESS);
1724 }
1725
1726 /*
1727 * Function name: arcmsr_tran_destroy_pkt
1728 * Return Values: none
1729 * Description: Called by kernel on behalf of a target driver
1730 * calling scsi_destroy_pkt(9F).
1731 * Refer to tran_destroy_pkt(9E) man page
1732 * Context: Can be called from different kernel process threads.
1733 * Can be called by interrupt thread.
1734 */
1735 static void
arcmsr_tran_destroy_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)1736 arcmsr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) {
1737
1738 struct CCB *ccb = pkt->pkt_ha_private;
1739
1740 if ((ccb != NULL) && (ccb->pkt == pkt)) {
1741 struct ACB *acb = ccb->acb;
1742 if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1743 if (ddi_dma_unbind_handle(ccb->pkt_dma_handle)
1744 != DDI_SUCCESS) {
1745 cmn_err(CE_WARN,
1746 "arcmsr%d: ddi_dma_unbind_handle() failed",
1747 ddi_get_instance(acb->dev_info));
1748 }
1749 ddi_dma_free_handle(&ccb->pkt_dma_handle);
1750 ccb->pkt_dma_handle = NULL;
1751 }
1752 arcmsr_free_ccb(ccb);
1753 }
1754
1755 scsi_hba_pkt_free(ap, pkt);
1756 }
1757
1758 /*
1759 * Function name: arcmsr_tran_dmafree()
1760 * Return Values: none
1761 * Description: free dvma resources
1762 * Context: Can be called from different kernel process threads.
1763 * Can be called by interrupt thread.
1764 */
1765 static void
arcmsr_tran_dmafree(struct scsi_address * ap,struct scsi_pkt * pkt)1766 arcmsr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) {
1767
1768 struct CCB *ccb = pkt->pkt_ha_private;
1769
1770 if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1771 ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1772 if (ddi_dma_unbind_handle(ccb->pkt_dma_handle)
1773 != DDI_SUCCESS) {
1774 cmn_err(CE_WARN,
1775 "arcmsr%d: ddi_dma_unbind_handle() failed "
1776 "(target %d lun %d)",
1777 ddi_get_instance(ccb->acb->dev_info),
1778 ap->a_target, ap->a_lun);
1779 }
1780 ddi_dma_free_handle(&ccb->pkt_dma_handle);
1781 ccb->pkt_dma_handle = NULL;
1782 }
1783 }
1784
1785 /*
1786 * Function name: arcmsr_tran_sync_pkt()
1787 * Return Values: none
1788 * Description: sync dma
1789 * Context: Can be called from different kernel process threads.
1790 * Can be called by interrupt thread.
1791 */
1792 static void
arcmsr_tran_sync_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)1793 arcmsr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) {
1794
1795 struct CCB *ccb;
1796
1797 ccb = pkt->pkt_ha_private;
1798
1799 if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1800 if (ddi_dma_sync(ccb->pkt_dma_handle,
1801 ccb->pkt_dma_offset, ccb->pkt_dma_len,
1802 (ccb->ccb_flags & CCB_FLAG_DMAWRITE) ?
1803 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU)
1804 != DDI_SUCCESS) {
1805 cmn_err(CE_WARN, "arcmsr%d: sync pkt failed "
1806 "for target %d lun %d",
1807 ddi_get_instance(ccb->acb->dev_info),
1808 ap->a_target, ap->a_lun);
1809 }
1810 }
1811 }
1812
1813
1814 static uint8_t
arcmsr_hba_wait_msgint_ready(struct ACB * acb)1815 arcmsr_hba_wait_msgint_ready(struct ACB *acb) {
1816
1817 uint32_t i;
1818 uint8_t retries = 0x00;
1819 struct HBA_msgUnit *phbamu;
1820
1821
1822 phbamu = (struct HBA_msgUnit *)acb->pmu;
1823
1824 do {
1825 for (i = 0; i < 100; i++) {
1826 if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
1827 &phbamu->outbound_intstatus) &
1828 ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
1829 /* clear interrupt */
1830 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1831 &phbamu->outbound_intstatus,
1832 ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
1833 return (TRUE);
1834 }
1835 drv_usecwait(10000);
1836 if (ddi_in_panic()) {
1837 /* clear interrupts */
1838 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1839 &phbamu->outbound_intstatus,
1840 ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
1841 return (TRUE);
1842 }
1843 } /* max 1 second */
1844 } while (retries++ < 20); /* max 20 seconds */
1845 return (FALSE);
1846 }
1847
1848
1849
1850 static uint8_t
arcmsr_hbb_wait_msgint_ready(struct ACB * acb)1851 arcmsr_hbb_wait_msgint_ready(struct ACB *acb) {
1852
1853 struct HBB_msgUnit *phbbmu;
1854 uint32_t i;
1855 uint8_t retries = 0x00;
1856
1857 phbbmu = (struct HBB_msgUnit *)acb->pmu;
1858
1859 do {
1860 for (i = 0; i < 100; i++) {
1861 if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
1862 &phbbmu->hbb_doorbell->iop2drv_doorbell) &
1863 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
1864 /* clear interrupt */
1865 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1866 &phbbmu->hbb_doorbell->iop2drv_doorbell,
1867 ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
1868 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1869 &phbbmu->hbb_doorbell->drv2iop_doorbell,
1870 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
1871 return (TRUE);
1872 }
1873 drv_usecwait(10000);
1874 if (ddi_in_panic()) {
1875 /* clear interrupts */
1876 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1877 &phbbmu->hbb_doorbell->iop2drv_doorbell,
1878 ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
1879 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1880 &phbbmu->hbb_doorbell->drv2iop_doorbell,
1881 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
1882 return (TRUE);
1883 }
1884 } /* max 1 second */
1885 } while (retries++ < 20); /* max 20 seconds */
1886
1887 return (FALSE);
1888 }
1889
1890
1891 static void
arcmsr_flush_hba_cache(struct ACB * acb)1892 arcmsr_flush_hba_cache(struct ACB *acb) {
1893
1894 struct HBA_msgUnit *phbamu;
1895 int retry_count = 30;
1896
1897 /* enlarge wait flush adapter cache time: 10 minutes */
1898
1899 phbamu = (struct HBA_msgUnit *)acb->pmu;
1900
1901 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->inbound_msgaddr0,
1902 ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
1903
1904 do {
1905 if (arcmsr_hba_wait_msgint_ready(acb)) {
1906 break;
1907 } else {
1908 retry_count--;
1909 }
1910 } while (retry_count != 0);
1911 }
1912
1913
1914
1915 static void
arcmsr_flush_hbb_cache(struct ACB * acb)1916 arcmsr_flush_hbb_cache(struct ACB *acb) {
1917
1918 struct HBB_msgUnit *phbbmu;
1919 int retry_count = 30;
1920
1921 /* enlarge wait flush adapter cache time: 10 minutes */
1922
1923 phbbmu = (struct HBB_msgUnit *)acb->pmu;
1924 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1925 &phbbmu->hbb_doorbell->drv2iop_doorbell,
1926 ARCMSR_MESSAGE_FLUSH_CACHE);
1927
1928 do {
1929 if (arcmsr_hbb_wait_msgint_ready(acb)) {
1930 break;
1931 } else {
1932 retry_count--;
1933 }
1934 } while (retry_count != 0);
1935 }
1936
1937
1938 static void
arcmsr_ccb_complete(struct CCB * ccb,int flag)1939 arcmsr_ccb_complete(struct CCB *ccb, int flag) {
1940
1941 struct ACB *acb = ccb->acb;
1942 struct scsi_pkt *pkt = ccb->pkt;
1943
1944 if (flag == 1) {
1945 atomic_add_32((volatile uint32_t *)
1946 &acb->ccboutstandingcount, -1);
1947 }
1948 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1949 STATE_SENT_CMD | STATE_GOT_STATUS);
1950
1951 if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1952 (pkt->pkt_state & STATE_XFERRED_DATA)) {
1953 (void) ddi_dma_sync(ccb->pkt_dma_handle,
1954 ccb->pkt_dma_offset, ccb->pkt_dma_len,
1955 DDI_DMA_SYNC_FORCPU);
1956 }
1957
1958 scsi_hba_pkt_comp(pkt);
1959 }
1960
1961
1962 static void
arcmsr_report_sense_info(struct CCB * ccb)1963 arcmsr_report_sense_info(struct CCB *ccb) {
1964
1965 struct scsi_pkt *pkt = ccb->pkt;
1966 struct scsi_arq_status *arq_status;
1967
1968
1969 arq_status = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp);
1970 bzero((caddr_t)arq_status, sizeof (struct scsi_arq_status));
1971 arq_status->sts_rqpkt_reason = CMD_CMPLT;
1972 arq_status->sts_rqpkt_state = (STATE_GOT_BUS | STATE_GOT_TARGET |
1973 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS);
1974 arq_status->sts_rqpkt_statistics = pkt->pkt_statistics;
1975 arq_status->sts_rqpkt_resid = 0;
1976
1977 pkt->pkt_reason = CMD_CMPLT;
1978 /* auto rqsense took place */
1979 pkt->pkt_state = (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
1980 STATE_GOT_STATUS | STATE_ARQ_DONE);
1981
1982 if (&arq_status->sts_sensedata != NULL) {
1983 struct SENSE_DATA *cdb_sensedata;
1984 struct scsi_extended_sense *sts_sensedata;
1985
1986 cdb_sensedata =
1987 (struct SENSE_DATA *)ccb->arcmsr_cdb.SenseData;
1988 sts_sensedata = &arq_status->sts_sensedata;
1989
1990 sts_sensedata->es_code = cdb_sensedata->ErrorCode;
1991 /* must eq CLASS_EXTENDED_SENSE (0x07) */
1992 sts_sensedata->es_class = cdb_sensedata->ErrorClass;
1993 sts_sensedata->es_valid = cdb_sensedata->Valid;
1994 sts_sensedata->es_segnum = cdb_sensedata->SegmentNumber;
1995 sts_sensedata->es_key = cdb_sensedata->SenseKey;
1996 sts_sensedata->es_ili = cdb_sensedata->IncorrectLength;
1997 sts_sensedata->es_eom = cdb_sensedata->EndOfMedia;
1998 sts_sensedata->es_filmk = cdb_sensedata->FileMark;
1999 sts_sensedata->es_info_1 = cdb_sensedata->Information[0];
2000 sts_sensedata->es_info_2 = cdb_sensedata->Information[1];
2001 sts_sensedata->es_info_3 = cdb_sensedata->Information[2];
2002 sts_sensedata->es_info_4 = cdb_sensedata->Information[3];
2003 sts_sensedata->es_add_len =
2004 cdb_sensedata->AdditionalSenseLength;
2005 sts_sensedata->es_cmd_info[0] =
2006 cdb_sensedata->CommandSpecificInformation[0];
2007 sts_sensedata->es_cmd_info[1] =
2008 cdb_sensedata->CommandSpecificInformation[1];
2009 sts_sensedata->es_cmd_info[2] =
2010 cdb_sensedata->CommandSpecificInformation[2];
2011 sts_sensedata->es_cmd_info[3] =
2012 cdb_sensedata->CommandSpecificInformation[3];
2013 sts_sensedata->es_add_code =
2014 cdb_sensedata->AdditionalSenseCode;
2015 sts_sensedata->es_qual_code =
2016 cdb_sensedata->AdditionalSenseCodeQualifier;
2017 sts_sensedata->es_fru_code =
2018 cdb_sensedata->FieldReplaceableUnitCode;
2019 }
2020 }
2021
2022
2023
2024 static void
arcmsr_abort_hba_allcmd(struct ACB * acb)2025 arcmsr_abort_hba_allcmd(struct ACB *acb) {
2026
2027 struct HBA_msgUnit *phbamu;
2028
2029 phbamu = (struct HBA_msgUnit *)acb->pmu;
2030
2031 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2032 &phbamu->inbound_msgaddr0,
2033 ARCMSR_INBOUND_MESG0_ABORT_CMD);
2034
2035 if (!arcmsr_hba_wait_msgint_ready(acb)) {
2036 cmn_err(CE_WARN,
2037 "arcmsr%d: timeout while waiting for 'abort all "
2038 "outstanding commands'",
2039 ddi_get_instance(acb->dev_info));
2040 }
2041 }
2042
2043
2044
2045 static void
arcmsr_abort_hbb_allcmd(struct ACB * acb)2046 arcmsr_abort_hbb_allcmd(struct ACB *acb) {
2047
2048 struct HBB_msgUnit *phbbmu =
2049 (struct HBB_msgUnit *)acb->pmu;
2050
2051 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2052 &phbbmu->hbb_doorbell->drv2iop_doorbell,
2053 ARCMSR_MESSAGE_ABORT_CMD);
2054
2055 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
2056 cmn_err(CE_WARN,
2057 "arcmsr%d: timeout while waiting for 'abort all "
2058 "outstanding commands'",
2059 ddi_get_instance(acb->dev_info));
2060 }
2061 }
2062
2063 static void
arcmsr_report_ccb_state(struct ACB * acb,struct CCB * ccb,uint32_t flag_ccb)2064 arcmsr_report_ccb_state(struct ACB *acb,
2065 struct CCB *ccb, uint32_t flag_ccb) {
2066
2067 int id, lun;
2068
2069 id = ccb->pkt->pkt_address.a_target;
2070 lun = ccb->pkt->pkt_address.a_lun;
2071
2072 if ((flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR) == 0) {
2073 if (acb->devstate[id][lun] == ARECA_RAID_GONE) {
2074 acb->devstate[id][lun] = ARECA_RAID_GOOD;
2075 }
2076 ccb->pkt->pkt_reason = CMD_CMPLT;
2077 ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
2078 arcmsr_ccb_complete(ccb, 1);
2079 } else {
2080 switch (ccb->arcmsr_cdb.DeviceStatus) {
2081 case ARCMSR_DEV_SELECT_TIMEOUT:
2082 if (acb->devstate[id][lun] == ARECA_RAID_GOOD) {
2083 cmn_err(CE_CONT,
2084 "arcmsr%d: raid volume was kicked out ",
2085 ddi_get_instance(acb->dev_info));
2086 }
2087 acb->devstate[id][lun] = ARECA_RAID_GONE;
2088 ccb->pkt->pkt_reason = CMD_TIMEOUT;
2089 ccb->pkt->pkt_statistics |= STAT_TIMEOUT;
2090 arcmsr_ccb_complete(ccb, 1);
2091 break;
2092 case ARCMSR_DEV_ABORTED:
2093 case ARCMSR_DEV_INIT_FAIL:
2094 cmn_err(CE_CONT,
2095 "arcmsr%d: isr got "
2096 "'ARCMSR_DEV_ABORTED' 'ARCMSR_DEV_INIT_FAIL'",
2097 ddi_get_instance(acb->dev_info));
2098 cmn_err(CE_CONT, "arcmsr%d: raid volume was kicked "
2099 "out", ddi_get_instance(acb->dev_info));
2100 acb->devstate[id][lun] = ARECA_RAID_GONE;
2101 ccb->pkt->pkt_reason = CMD_DEV_GONE;
2102 ccb->pkt->pkt_statistics |= STAT_TERMINATED;
2103 arcmsr_ccb_complete(ccb, 1);
2104 break;
2105 case SCSISTAT_CHECK_CONDITION:
2106 acb->devstate[id][lun] = ARECA_RAID_GOOD;
2107 arcmsr_report_sense_info(ccb);
2108 arcmsr_ccb_complete(ccb, 1);
2109 break;
2110 default:
2111 cmn_err(CE_WARN, "arcmsr%d: target %d lun %d "
2112 "isr received CMD_DONE with unknown "
2113 "DeviceStatus (0x%x)",
2114 ddi_get_instance(acb->dev_info), id, lun,
2115 ccb->arcmsr_cdb.DeviceStatus);
2116 cmn_err(CE_CONT, "arcmsr%d: raid volume was kicked "
2117 "out ", ddi_get_instance(acb->dev_info));
2118 acb->devstate[id][lun] = ARECA_RAID_GONE;
2119 /* unknown error or crc error just for retry */
2120 ccb->pkt->pkt_reason = CMD_TRAN_ERR;
2121 ccb->pkt->pkt_statistics |= STAT_TERMINATED;
2122 arcmsr_ccb_complete(ccb, 1);
2123 break;
2124 }
2125 }
2126 }
2127
2128
2129 static void
arcmsr_drain_donequeue(struct ACB * acb,uint32_t flag_ccb)2130 arcmsr_drain_donequeue(struct ACB *acb, uint32_t flag_ccb) {
2131
2132 struct CCB *ccb;
2133
2134 /* check if command completed without error */
2135 ccb = (struct CCB *)(acb->vir2phy_offset +
2136 (flag_ccb << 5)); /* frame must be aligned on 32 byte boundary */
2137
2138 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
2139 if (ccb->startdone == ARCMSR_CCB_ABORTED) {
2140 cmn_err(CE_CONT,
2141 "arcmsr%d: isr got aborted command "
2142 "while draining doneq",
2143 ddi_get_instance(acb->dev_info));
2144 ccb->pkt->pkt_reason = CMD_ABORTED;
2145 ccb->pkt->pkt_statistics |= STAT_ABORTED;
2146 arcmsr_ccb_complete(ccb, 1);
2147 return;
2148 }
2149
2150 if (ccb->startdone == ARCMSR_CCB_RESET) {
2151 cmn_err(CE_CONT,
2152 "arcmsr%d: isr got command reset "
2153 "while draining doneq",
2154 ddi_get_instance(acb->dev_info));
2155 ccb->pkt->pkt_reason = CMD_RESET;
2156 ccb->pkt->pkt_statistics |= STAT_BUS_RESET;
2157 arcmsr_ccb_complete(ccb, 1);
2158 return;
2159 }
2160
2161 cmn_err(CE_WARN, "arcmsr%d: isr got an illegal ccb command "
2162 "done while draining doneq",
2163 ddi_get_instance(acb->dev_info));
2164 return;
2165 }
2166 arcmsr_report_ccb_state(acb, ccb, flag_ccb);
2167 }
2168
2169
2170 static void
arcmsr_done4abort_postqueue(struct ACB * acb)2171 arcmsr_done4abort_postqueue(struct ACB *acb) {
2172
2173 int i = 0;
2174 uint32_t flag_ccb;
2175
2176 switch (acb->adapter_type) {
2177 case ACB_ADAPTER_TYPE_A:
2178 {
2179 struct HBA_msgUnit *phbamu;
2180 uint32_t outbound_intstatus;
2181
2182 phbamu = (struct HBA_msgUnit *)acb->pmu;
2183 /* clear and abort all outbound posted Q */
2184 outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
2185 &phbamu->outbound_intstatus) & acb->outbound_int_enable;
2186 /* clear interrupt */
2187 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2188 &phbamu->outbound_intstatus, outbound_intstatus);
2189 while (((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
2190 &phbamu->outbound_queueport)) != 0xFFFFFFFF) &&
2191 (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
2192 arcmsr_drain_donequeue(acb, flag_ccb);
2193 }
2194 }
2195 break;
2196
2197 case ACB_ADAPTER_TYPE_B:
2198 {
2199 struct HBB_msgUnit *phbbmu;
2200
2201 phbbmu = (struct HBB_msgUnit *)acb->pmu;
2202
2203 /* clear all outbound posted Q */
2204 /* clear doorbell interrupt */
2205 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2206 &phbbmu->hbb_doorbell->iop2drv_doorbell,
2207 ARCMSR_DOORBELL_INT_CLEAR_PATTERN);
2208 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
2209 if ((flag_ccb = phbbmu->done_qbuffer[i]) != 0) {
2210 phbbmu->done_qbuffer[i] = 0;
2211 arcmsr_drain_donequeue(acb, flag_ccb);
2212 }
2213 phbbmu->post_qbuffer[i] = 0;
2214 } /* drain reply FIFO */
2215 phbbmu->doneq_index = 0;
2216 phbbmu->postq_index = 0;
2217 break;
2218 }
2219 }
2220 }
2221
2222 /*
2223 * Routine Description: Reset 80331 iop.
2224 * Arguments:
2225 * Return Value: Nothing.
2226 */
2227 static void
arcmsr_iop_reset(struct ACB * acb)2228 arcmsr_iop_reset(struct ACB *acb) {
2229
2230 struct CCB *ccb;
2231 uint32_t intmask_org;
2232 int i = 0;
2233
2234 if (acb->ccboutstandingcount > 0) {
2235 /* disable all outbound interrupt */
2236 intmask_org = arcmsr_disable_allintr(acb);
2237 /* talk to iop 331 outstanding command aborted */
2238 if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
2239 arcmsr_abort_hba_allcmd(acb);
2240 } else {
2241 arcmsr_abort_hbb_allcmd(acb);
2242 }
2243 /* clear and abort all outbound posted Q */
2244 arcmsr_done4abort_postqueue(acb);
2245
2246 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2247 ccb = acb->pccb_pool[i];
2248 if (ccb->startdone == ARCMSR_CCB_START) {
2249 ccb->startdone = ARCMSR_CCB_RESET;
2250 ccb->pkt->pkt_reason = CMD_RESET;
2251 ccb->pkt->pkt_statistics |= STAT_BUS_RESET;
2252 arcmsr_ccb_complete(ccb, 1);
2253 }
2254 }
2255 /* enable all outbound interrupt */
2256 arcmsr_enable_allintr(acb, intmask_org);
2257 }
2258 }
2259
2260 /*
2261 * You can access the DMA address through the #defines:
2262 * dmac_address for 32-bit addresses and dmac_laddress for 64-bit addresses.
2263 * These macros are defined as follows:
2264 *
2265 * #define dmac_laddress _dmu._dmac_ll
2266 * #ifdef _LONG_LONG_HTOL
2267 * #define dmac_notused _dmu._dmac_la[0]
2268 * #define dmac_address _dmu._dmac_la[1]
2269 * #else
2270 * #define dmac_address _dmu._dmac_la[0]
2271 * #define dmac_notused _dmu._dmac_la[1]
2272 * #endif
2273 */
2274 /*ARGSUSED*/
2275 static void
arcmsr_build_ccb(struct CCB * ccb)2276 arcmsr_build_ccb(struct CCB *ccb) {
2277
2278 struct scsi_pkt *pkt = ccb->pkt;
2279 struct ARCMSR_CDB *arcmsr_cdb;
2280 char *psge;
2281 uint32_t address_lo, address_hi;
2282 int arccdbsize = 0x30;
2283 uint8_t sgcount;
2284
2285 arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
2286 psge = (char *)&arcmsr_cdb->sgu;
2287
2288 /* return the current time in seconds */
2289 ccb->ccb_time = (time_t)(pkt->pkt_time + ddi_get_time());
2290 bcopy((caddr_t)pkt->pkt_cdbp, arcmsr_cdb->Cdb,
2291 arcmsr_cdb->CdbLength);
2292 sgcount = ccb->arcmsr_cdb.sgcount;
2293
2294 if (sgcount) {
2295 int length, i;
2296 int cdb_sgcount = 0;
2297 int total_xfer_length = 0;
2298
2299 /* map stor port SG list to our iop SG List. */
2300 for (i = 0; i < sgcount; i++) {
2301 /* Get physaddr of the current data pointer */
2302 length = ccb->pkt_dmacookies[i].dmac_size;
2303 total_xfer_length += length;
2304 address_lo = dma_addr_lo32(
2305 ccb->pkt_dmacookies[i].dmac_laddress);
2306 address_hi = dma_addr_hi32(
2307 ccb->pkt_dmacookies[i].dmac_laddress);
2308
2309 if (address_hi == 0) {
2310 struct SG32ENTRY *dma_sg;
2311
2312 dma_sg = (struct SG32ENTRY *)(intptr_t)psge;
2313
2314 dma_sg->address = address_lo;
2315 dma_sg->length = length;
2316 psge += sizeof (struct SG32ENTRY);
2317 arccdbsize += sizeof (struct SG32ENTRY);
2318 } else {
2319 int sg64s_size = 0;
2320 int tmplength = length;
2321 int64_t span4G, length0;
2322 struct SG64ENTRY *dma_sg;
2323
2324 /*LINTED*/
2325 while (1) {
2326 dma_sg =
2327 (struct SG64ENTRY *)(intptr_t)psge;
2328 span4G =
2329 (int64_t)address_lo + tmplength;
2330
2331 dma_sg->addresshigh = address_hi;
2332 dma_sg->address = address_lo;
2333 if (span4G > 0x100000000ULL) {
2334 /* see if we cross 4G */
2335 length0 = 0x100000000ULL -
2336 address_lo;
2337 dma_sg->length =
2338 (uint32_t)length0 |
2339 IS_SG64_ADDR;
2340 address_hi = address_hi + 1;
2341 address_lo = 0;
2342 tmplength = tmplength-
2343 (int32_t)length0;
2344 sg64s_size +=
2345 sizeof (struct SG64ENTRY);
2346 psge +=
2347 sizeof (struct SG64ENTRY);
2348 cdb_sgcount++;
2349 } else {
2350 dma_sg->length = tmplength |
2351 IS_SG64_ADDR;
2352 sg64s_size +=
2353 sizeof (struct SG64ENTRY);
2354 psge +=
2355 sizeof (struct SG64ENTRY);
2356 break;
2357 }
2358 }
2359 arccdbsize += sg64s_size;
2360 }
2361 cdb_sgcount++;
2362 }
2363 arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
2364 arcmsr_cdb->DataLength = total_xfer_length;
2365 if (arccdbsize > 256) {
2366 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
2367 }
2368 } else {
2369 arcmsr_cdb->DataLength = 0;
2370 }
2371
2372 if (ccb->ccb_flags & CCB_FLAG_DMAWRITE)
2373 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
2374 }
2375
2376 /*
2377 * arcmsr_post_ccb - Send a protocol specific ARC send postcard to a AIOC.
2378 *
2379 * handle: Handle of registered ARC protocol driver
2380 * adapter_id: AIOC unique identifier(integer)
2381 * pPOSTCARD_SEND: Pointer to ARC send postcard
2382 *
2383 * This routine posts a ARC send postcard to the request post FIFO of a
2384 * specific ARC adapter.
2385 */
2386 static int
arcmsr_post_ccb(struct ACB * acb,struct CCB * ccb)2387 arcmsr_post_ccb(struct ACB *acb, struct CCB *ccb) {
2388
2389 uint32_t cdb_shifted_phyaddr = ccb->cdb_shifted_phyaddr;
2390 struct scsi_pkt *pkt = ccb->pkt;
2391 struct ARCMSR_CDB *arcmsr_cdb;
2392
2393 arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
2394
2395 /* Use correct offset and size for syncing */
2396 if (ddi_dma_sync(acb->ccbs_pool_handle, 0, acb->dma_sync_size,
2397 DDI_DMA_SYNC_FORDEV) == DDI_FAILURE)
2398 return (DDI_FAILURE);
2399
2400 atomic_add_32((volatile uint32_t *)&acb->ccboutstandingcount, 1);
2401 ccb->startdone = ARCMSR_CCB_START;
2402
2403 switch (acb->adapter_type) {
2404 case ACB_ADAPTER_TYPE_A:
2405 {
2406 struct HBA_msgUnit *phbamu;
2407
2408 phbamu = (struct HBA_msgUnit *)acb->pmu;
2409
2410 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
2411 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2412 &phbamu->inbound_queueport,
2413 cdb_shifted_phyaddr |
2414 ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
2415 } else {
2416 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2417 &phbamu->inbound_queueport, cdb_shifted_phyaddr);
2418 }
2419 if (pkt->pkt_flags & FLAG_NOINTR)
2420 arcmsr_polling_hba_ccbdone(acb, ccb);
2421 }
2422 break;
2423 case ACB_ADAPTER_TYPE_B:
2424 {
2425 struct HBB_msgUnit *phbbmu;
2426 int ending_index, index;
2427
2428 phbbmu = (struct HBB_msgUnit *)acb->pmu;
2429 mutex_enter(&acb->postq_mutex);
2430 index = phbbmu->postq_index;
2431 ending_index = ((index+1)%ARCMSR_MAX_HBB_POSTQUEUE);
2432 phbbmu->post_qbuffer[ending_index] = 0;
2433 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
2434 phbbmu->post_qbuffer[index] =
2435 (cdb_shifted_phyaddr|ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
2436 } else {
2437 phbbmu->post_qbuffer[index] = cdb_shifted_phyaddr;
2438 }
2439 index++;
2440 /* if last index number set it to 0 */
2441 index %= ARCMSR_MAX_HBB_POSTQUEUE;
2442 phbbmu->postq_index = index;
2443 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2444 &phbbmu->hbb_doorbell->drv2iop_doorbell,
2445 ARCMSR_DRV2IOP_CDB_POSTED);
2446 mutex_exit(&acb->postq_mutex);
2447 if (pkt->pkt_flags & FLAG_NOINTR)
2448 arcmsr_polling_hbb_ccbdone(acb, ccb);
2449 }
2450 break;
2451 }
2452
2453 return (DDI_SUCCESS);
2454 }
2455
2456
2457
2458
2459 static struct QBUFFER *
arcmsr_get_iop_rqbuffer(struct ACB * acb)2460 arcmsr_get_iop_rqbuffer(struct ACB *acb) {
2461
2462 struct QBUFFER *qb;
2463
2464 switch (acb->adapter_type) {
2465 case ACB_ADAPTER_TYPE_A:
2466 {
2467 struct HBA_msgUnit *phbamu;
2468
2469 phbamu = (struct HBA_msgUnit *)acb->pmu;
2470 qb = (struct QBUFFER *)&phbamu->message_rbuffer;
2471 }
2472 break;
2473 case ACB_ADAPTER_TYPE_B:
2474 {
2475 struct HBB_msgUnit *phbbmu;
2476
2477 phbbmu = (struct HBB_msgUnit *)acb->pmu;
2478 qb = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
2479 }
2480 break;
2481 }
2482
2483 return (qb);
2484 }
2485
2486
2487
2488 static struct QBUFFER *
arcmsr_get_iop_wqbuffer(struct ACB * acb)2489 arcmsr_get_iop_wqbuffer(struct ACB *acb) {
2490
2491 struct QBUFFER *qbuffer = NULL;
2492
2493 switch (acb->adapter_type) {
2494 case ACB_ADAPTER_TYPE_A:
2495 {
2496 struct HBA_msgUnit *phbamu;
2497
2498 phbamu = (struct HBA_msgUnit *)acb->pmu;
2499 qbuffer = (struct QBUFFER *)&phbamu->message_wbuffer;
2500 }
2501 break;
2502 case ACB_ADAPTER_TYPE_B:
2503 {
2504 struct HBB_msgUnit *phbbmu;
2505
2506 phbbmu = (struct HBB_msgUnit *)acb->pmu;
2507 qbuffer =
2508 (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer;
2509 }
2510 break;
2511 }
2512 return (qbuffer);
2513 }
2514
2515
2516
2517 static void
arcmsr_iop_message_read(struct ACB * acb)2518 arcmsr_iop_message_read(struct ACB *acb) {
2519
2520 switch (acb->adapter_type) {
2521 case ACB_ADAPTER_TYPE_A:
2522 {
2523 struct HBA_msgUnit *phbamu;
2524
2525 phbamu = (struct HBA_msgUnit *)acb->pmu;
2526 /* let IOP know the data has been read */
2527 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2528 &phbamu->inbound_doorbell,
2529 ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
2530 }
2531 break;
2532 case ACB_ADAPTER_TYPE_B:
2533 {
2534 struct HBB_msgUnit *phbbmu;
2535
2536 phbbmu = (struct HBB_msgUnit *)acb->pmu;
2537 /* let IOP know the data has been read */
2538 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2539 &phbbmu->hbb_doorbell->drv2iop_doorbell,
2540 ARCMSR_DRV2IOP_DATA_READ_OK);
2541 }
2542 break;
2543 }
2544 }
2545
2546
2547
2548 static void
arcmsr_iop_message_wrote(struct ACB * acb)2549 arcmsr_iop_message_wrote(struct ACB *acb) {
2550
2551 switch (acb->adapter_type) {
2552 case ACB_ADAPTER_TYPE_A:
2553 {
2554 struct HBA_msgUnit *phbamu;
2555
2556 phbamu = (struct HBA_msgUnit *)acb->pmu;
2557 /*
2558 * push inbound doorbell tell iop, driver data write ok
2559 * and wait reply on next hwinterrupt for next Qbuffer post
2560 */
2561 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2562 &phbamu->inbound_doorbell,
2563 ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
2564 }
2565 break;
2566 case ACB_ADAPTER_TYPE_B:
2567 {
2568 struct HBB_msgUnit *phbbmu;
2569
2570 phbbmu = (struct HBB_msgUnit *)acb->pmu;
2571 /*
2572 * push inbound doorbell tell iop, driver data was writen
2573 * successfully, then await reply on next hwinterrupt for
2574 * next Qbuffer post
2575 */
2576 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2577 &phbbmu->hbb_doorbell->drv2iop_doorbell,
2578 ARCMSR_DRV2IOP_DATA_WRITE_OK);
2579 }
2580 break;
2581 }
2582 }
2583
2584
2585
2586 static void
arcmsr_post_ioctldata2iop(struct ACB * acb)2587 arcmsr_post_ioctldata2iop(struct ACB *acb) {
2588
2589 uint8_t *pQbuffer;
2590 struct QBUFFER *pwbuffer;
2591 uint8_t *iop_data;
2592 int32_t allxfer_len = 0;
2593
2594 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
2595 iop_data = (uint8_t *)pwbuffer->data;
2596 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
2597 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
2598 while ((acb->wqbuf_firstidx != acb->wqbuf_lastidx) &&
2599 (allxfer_len < 124)) {
2600 pQbuffer = &acb->wqbuffer[acb->wqbuf_firstidx];
2601 (void) memcpy(iop_data, pQbuffer, 1);
2602 acb->wqbuf_firstidx++;
2603 /* if last index number set it to 0 */
2604 acb->wqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
2605 iop_data++;
2606 allxfer_len++;
2607 }
2608 pwbuffer->data_len = allxfer_len;
2609 /*
2610 * push inbound doorbell and wait reply at hwinterrupt
2611 * routine for next Qbuffer post
2612 */
2613 arcmsr_iop_message_wrote(acb);
2614 }
2615 }
2616
2617
2618
2619 static void
arcmsr_stop_hba_bgrb(struct ACB * acb)2620 arcmsr_stop_hba_bgrb(struct ACB *acb) {
2621
2622 struct HBA_msgUnit *phbamu;
2623
2624 phbamu = (struct HBA_msgUnit *)acb->pmu;
2625
2626 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
2627 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2628 &phbamu->inbound_msgaddr0,
2629 ARCMSR_INBOUND_MESG0_STOP_BGRB);
2630 if (!arcmsr_hba_wait_msgint_ready(acb))
2631 cmn_err(CE_WARN,
2632 "arcmsr%d: timeout while waiting for background "
2633 "rebuild completion",
2634 ddi_get_instance(acb->dev_info));
2635 }
2636
2637
2638 static void
arcmsr_stop_hbb_bgrb(struct ACB * acb)2639 arcmsr_stop_hbb_bgrb(struct ACB *acb) {
2640
2641 struct HBB_msgUnit *phbbmu;
2642
2643 phbbmu = (struct HBB_msgUnit *)acb->pmu;
2644
2645 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
2646 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2647 &phbbmu->hbb_doorbell->drv2iop_doorbell,
2648 ARCMSR_MESSAGE_STOP_BGRB);
2649
2650 if (!arcmsr_hbb_wait_msgint_ready(acb))
2651 cmn_err(CE_WARN,
2652 "arcmsr%d: timeout while waiting for background "
2653 "rebuild completion",
2654 ddi_get_instance(acb->dev_info));
2655 }
2656
2657 static int
arcmsr_iop_message_xfer(struct ACB * acb,struct scsi_pkt * pkt)2658 arcmsr_iop_message_xfer(struct ACB *acb, struct scsi_pkt *pkt) {
2659
2660 struct CMD_MESSAGE_FIELD *pcmdmessagefld;
2661 struct CCB *ccb = pkt->pkt_ha_private;
2662 struct buf *bp = ccb->bp;
2663 uint8_t *pQbuffer;
2664 int retvalue = 0, transfer_len = 0;
2665 char *buffer;
2666 uint32_t controlcode;
2667
2668
2669 /* 4 bytes: Areca io control code */
2670 controlcode = (uint32_t)pkt->pkt_cdbp[5] << 24 |
2671 (uint32_t)pkt->pkt_cdbp[6] << 16 |
2672 (uint32_t)pkt->pkt_cdbp[7] << 8 |
2673 (uint32_t)pkt->pkt_cdbp[8];
2674
2675 if (bp->b_flags & (B_PHYS | B_PAGEIO))
2676 bp_mapin(bp);
2677
2678
2679 buffer = bp->b_un.b_addr;
2680 transfer_len = bp->b_bcount;
2681 if (transfer_len > sizeof (struct CMD_MESSAGE_FIELD)) {
2682 retvalue = ARCMSR_MESSAGE_FAIL;
2683 goto message_out;
2684 }
2685
2686 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)(intptr_t)buffer;
2687
2688 switch (controlcode) {
2689 case ARCMSR_MESSAGE_READ_RQBUFFER:
2690 {
2691 unsigned long *ver_addr;
2692 uint8_t *ptmpQbuffer;
2693 int32_t allxfer_len = 0;
2694
2695 ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
2696 if (!ver_addr) {
2697 retvalue = ARCMSR_MESSAGE_FAIL;
2698 goto message_out;
2699 }
2700
2701 ptmpQbuffer = (uint8_t *)ver_addr;
2702 while ((acb->rqbuf_firstidx != acb->rqbuf_lastidx) &&
2703 (allxfer_len < (MSGDATABUFLEN - 1))) {
2704 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstidx];
2705 (void) memcpy(ptmpQbuffer, pQbuffer, 1);
2706 acb->rqbuf_firstidx++;
2707 acb->rqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
2708 ptmpQbuffer++;
2709 allxfer_len++;
2710 }
2711
2712 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2713 struct QBUFFER *prbuffer;
2714 uint8_t *iop_data;
2715 int32_t iop_len;
2716
2717 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2718 prbuffer = arcmsr_get_iop_rqbuffer(acb);
2719 iop_data = (uint8_t *)prbuffer->data;
2720 iop_len = (int32_t)prbuffer->data_len;
2721
2722 while (iop_len > 0) {
2723 pQbuffer = &acb->rqbuffer[acb->rqbuf_lastidx];
2724 (void) memcpy(pQbuffer, iop_data, 1);
2725 acb->rqbuf_lastidx++;
2726 acb->rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
2727 iop_data++;
2728 iop_len--;
2729 }
2730 arcmsr_iop_message_read(acb);
2731 }
2732
2733 (void) memcpy(pcmdmessagefld->messagedatabuffer,
2734 (uint8_t *)ver_addr, allxfer_len);
2735 pcmdmessagefld->cmdmessage.Length = allxfer_len;
2736 pcmdmessagefld->cmdmessage.ReturnCode =
2737 ARCMSR_MESSAGE_RETURNCODE_OK;
2738 kmem_free(ver_addr, MSGDATABUFLEN);
2739 }
2740 break;
2741 case ARCMSR_MESSAGE_WRITE_WQBUFFER:
2742 {
2743 unsigned long *ver_addr;
2744 int32_t my_empty_len, user_len, wqbuf_firstidx, wqbuf_lastidx;
2745 uint8_t *ptmpuserbuffer;
2746
2747 ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
2748 if (!ver_addr) {
2749 retvalue = ARCMSR_MESSAGE_FAIL;
2750 goto message_out;
2751 }
2752 ptmpuserbuffer = (uint8_t *)ver_addr;
2753 user_len = pcmdmessagefld->cmdmessage.Length;
2754 (void) memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer,
2755 user_len);
2756 wqbuf_lastidx = acb->wqbuf_lastidx;
2757 wqbuf_firstidx = acb->wqbuf_firstidx;
2758 if (wqbuf_lastidx != wqbuf_firstidx) {
2759 struct scsi_arq_status *arq_status;
2760
2761 arcmsr_post_ioctldata2iop(acb);
2762 arq_status =
2763 (struct scsi_arq_status *)(intptr_t)
2764 (pkt->pkt_scbp);
2765 bzero((caddr_t)arq_status,
2766 sizeof (struct scsi_arq_status));
2767 arq_status->sts_rqpkt_reason = CMD_CMPLT;
2768 arq_status->sts_rqpkt_state = (STATE_GOT_BUS |
2769 STATE_GOT_TARGET |STATE_SENT_CMD |
2770 STATE_XFERRED_DATA | STATE_GOT_STATUS);
2771
2772 arq_status->sts_rqpkt_statistics = pkt->pkt_statistics;
2773 arq_status->sts_rqpkt_resid = 0;
2774 if (&arq_status->sts_sensedata != NULL) {
2775 struct scsi_extended_sense *sts_sensedata;
2776
2777 sts_sensedata = &arq_status->sts_sensedata;
2778
2779 /* has error report sensedata */
2780 sts_sensedata->es_code = 0x0;
2781 sts_sensedata->es_valid = 0x01;
2782 sts_sensedata->es_key = KEY_ILLEGAL_REQUEST;
2783 /* AdditionalSenseLength */
2784 sts_sensedata->es_add_len = 0x0A;
2785 /* AdditionalSenseCode */
2786 sts_sensedata->es_add_code = 0x20;
2787 }
2788 retvalue = ARCMSR_MESSAGE_FAIL;
2789 } else {
2790 my_empty_len = (wqbuf_firstidx-wqbuf_lastidx - 1) &
2791 (ARCMSR_MAX_QBUFFER - 1);
2792 if (my_empty_len >= user_len) {
2793 while (user_len > 0) {
2794 pQbuffer =
2795 &acb->wqbuffer[acb->wqbuf_lastidx];
2796 (void) memcpy(pQbuffer,
2797 ptmpuserbuffer, 1);
2798 acb->wqbuf_lastidx++;
2799 acb->wqbuf_lastidx %=
2800 ARCMSR_MAX_QBUFFER;
2801 ptmpuserbuffer++;
2802 user_len--;
2803 }
2804 if (acb->acb_flags &
2805 ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2806 acb->acb_flags &=
2807 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2808 arcmsr_post_ioctldata2iop(acb);
2809 }
2810 } else {
2811 struct scsi_arq_status *arq_status;
2812
2813 /* has error report sensedata */
2814 arq_status =
2815 (struct scsi_arq_status *)
2816 (intptr_t)(pkt->pkt_scbp);
2817 bzero((caddr_t)arq_status,
2818 sizeof (struct scsi_arq_status));
2819 arq_status->sts_rqpkt_reason = CMD_CMPLT;
2820 arq_status->sts_rqpkt_state = (STATE_GOT_BUS |
2821 STATE_GOT_TARGET |STATE_SENT_CMD |
2822 STATE_XFERRED_DATA | STATE_GOT_STATUS);
2823 arq_status->sts_rqpkt_statistics =
2824 pkt->pkt_statistics;
2825 arq_status->sts_rqpkt_resid = 0;
2826 if (&arq_status->sts_sensedata != NULL) {
2827 struct scsi_extended_sense
2828 *sts_sensedata;
2829
2830 sts_sensedata =
2831 &arq_status->sts_sensedata;
2832
2833 /* has error report sensedata */
2834 sts_sensedata->es_code = 0x0;
2835 sts_sensedata->es_valid = 0x01;
2836 sts_sensedata->es_key =
2837 KEY_ILLEGAL_REQUEST;
2838 /* AdditionalSenseLength */
2839 sts_sensedata->es_add_len = 0x0A;
2840 /* AdditionalSenseCode */
2841 sts_sensedata->es_add_code = 0x20;
2842 }
2843 retvalue = ARCMSR_MESSAGE_FAIL;
2844 }
2845 }
2846 kmem_free(ver_addr, MSGDATABUFLEN);
2847 }
2848 break;
2849 case ARCMSR_MESSAGE_CLEAR_RQBUFFER:
2850 {
2851 pQbuffer = acb->rqbuffer;
2852
2853 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2854 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2855 arcmsr_iop_message_read(acb);
2856 }
2857 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2858 acb->rqbuf_firstidx = 0;
2859 acb->rqbuf_lastidx = 0;
2860 (void) memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2861 pcmdmessagefld->cmdmessage.ReturnCode =
2862 ARCMSR_MESSAGE_RETURNCODE_OK;
2863 }
2864 break;
2865 case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
2866 {
2867 pQbuffer = acb->wqbuffer;
2868
2869 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2870 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2871 arcmsr_iop_message_read(acb);
2872 }
2873 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2874 ACB_F_MESSAGE_WQBUFFER_READ);
2875 acb->wqbuf_firstidx = 0;
2876 acb->wqbuf_lastidx = 0;
2877 (void) memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2878 pcmdmessagefld->cmdmessage.ReturnCode =
2879 ARCMSR_MESSAGE_RETURNCODE_OK;
2880 }
2881 break;
2882 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER:
2883 {
2884
2885 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2886 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2887 arcmsr_iop_message_read(acb);
2888 }
2889 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2890 ACB_F_MESSAGE_RQBUFFER_CLEARED |
2891 ACB_F_MESSAGE_WQBUFFER_READ);
2892 acb->rqbuf_firstidx = 0;
2893 acb->rqbuf_lastidx = 0;
2894 acb->wqbuf_firstidx = 0;
2895 acb->wqbuf_lastidx = 0;
2896 pQbuffer = acb->rqbuffer;
2897 (void) memset(pQbuffer, 0, sizeof (struct QBUFFER));
2898 pQbuffer = acb->wqbuffer;
2899 (void) memset(pQbuffer, 0, sizeof (struct QBUFFER));
2900 pcmdmessagefld->cmdmessage.ReturnCode =
2901 ARCMSR_MESSAGE_RETURNCODE_OK;
2902 }
2903 break;
2904 case ARCMSR_MESSAGE_REQUEST_RETURN_CODE_3F:
2905 pcmdmessagefld->cmdmessage.ReturnCode =
2906 ARCMSR_MESSAGE_RETURNCODE_3F;
2907 break;
2908 /*
2909 * Not supported - ARCMSR_MESSAGE_SAY_HELLO
2910 */
2911 case ARCMSR_MESSAGE_SAY_GOODBYE:
2912 arcmsr_iop_parking(acb);
2913 break;
2914 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
2915 if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
2916 arcmsr_flush_hba_cache(acb);
2917 } else {
2918 arcmsr_flush_hbb_cache(acb);
2919 }
2920 break;
2921 default:
2922 retvalue = ARCMSR_MESSAGE_FAIL;
2923 }
2924
2925 message_out:
2926
2927 return (retvalue);
2928 }
2929
2930
2931
2932 static int
arcmsr_cb_ioctl(dev_t dev,int ioctl_cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)2933 arcmsr_cb_ioctl(dev_t dev, int ioctl_cmd, intptr_t arg, int mode,
2934 cred_t *credp, int *rvalp) {
2935 #ifndef __lock_lint
2936 _NOTE(ARGUNUSED(rvalp))
2937 #endif
2938
2939 struct ACB *acb;
2940 struct CMD_MESSAGE_FIELD *pktioctlfld;
2941 int retvalue = 0;
2942 int instance = MINOR2INST(getminor(dev));
2943
2944 if (instance < 0)
2945 return (ENXIO);
2946
2947 if (secpolicy_sys_config(credp, B_FALSE) != 0)
2948 return (EPERM);
2949
2950 acb = ddi_get_soft_state(arcmsr_soft_state, instance);
2951 if (acb == NULL)
2952 return (ENXIO);
2953
2954 pktioctlfld = kmem_zalloc(sizeof (struct CMD_MESSAGE_FIELD),
2955 KM_SLEEP);
2956 if (pktioctlfld == NULL)
2957 return (ENXIO);
2958
2959 /*
2960 * if we got here, we either are a 64-bit app in a 64-bit kernel
2961 * or a 32-bit app in a 32-bit kernel. Either way, we can just
2962 * copy in the args without any special conversions.
2963 */
2964
2965 mutex_enter(&acb->ioctl_mutex);
2966 if (ddi_copyin((void *)arg, pktioctlfld,
2967 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0) {
2968 retvalue = ENXIO;
2969 goto ioctl_out;
2970 }
2971
2972 if (memcmp(pktioctlfld->cmdmessage.Signature, "ARCMSR", 6) != 0) {
2973 /* validity check */
2974 retvalue = ENXIO;
2975 goto ioctl_out;
2976 }
2977
2978 switch ((unsigned int)ioctl_cmd) {
2979 case ARCMSR_MESSAGE_READ_RQBUFFER:
2980 {
2981 unsigned long *ver_addr;
2982 uint8_t *pQbuffer, *ptmpQbuffer;
2983 int32_t allxfer_len = 0;
2984
2985 ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
2986 if (ver_addr == NULL) {
2987 retvalue = ENXIO;
2988 goto ioctl_out;
2989 }
2990
2991 ptmpQbuffer = (uint8_t *)ver_addr;
2992 while ((acb->rqbuf_firstidx != acb->rqbuf_lastidx) &&
2993 (allxfer_len < (MSGDATABUFLEN - 1))) {
2994 /* copy READ QBUFFER to srb */
2995 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstidx];
2996 (void) memcpy(ptmpQbuffer, pQbuffer, 1);
2997 acb->rqbuf_firstidx++;
2998 /* if last index number set it to 0 */
2999 acb->rqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
3000 ptmpQbuffer++;
3001 allxfer_len++;
3002 }
3003
3004 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
3005 struct QBUFFER *prbuffer;
3006 uint8_t *pQbuffer;
3007 uint8_t *iop_data;
3008 int32_t iop_len;
3009
3010 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
3011 prbuffer = arcmsr_get_iop_rqbuffer(acb);
3012 iop_data = (uint8_t *)prbuffer->data;
3013 iop_len = (int32_t)prbuffer->data_len;
3014 /*
3015 * this iop data does no chance to make me overflow
3016 * again here, so just do it
3017 */
3018 while (iop_len > 0) {
3019 pQbuffer = &acb->rqbuffer[acb->rqbuf_lastidx];
3020 (void) memcpy(pQbuffer, iop_data, 1);
3021 acb->rqbuf_lastidx++;
3022 /* if last index number set it to 0 */
3023 acb->rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
3024 iop_data++;
3025 iop_len--;
3026 }
3027 /* let IOP know data has been read */
3028 arcmsr_iop_message_read(acb);
3029 }
3030 (void) memcpy(pktioctlfld->messagedatabuffer,
3031 (uint8_t *)ver_addr, allxfer_len);
3032 pktioctlfld->cmdmessage.Length = allxfer_len;
3033 pktioctlfld->cmdmessage.ReturnCode =
3034 ARCMSR_MESSAGE_RETURNCODE_OK;
3035
3036 if (ddi_copyout(pktioctlfld, (void *)arg,
3037 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
3038 retvalue = ENXIO;
3039
3040 kmem_free(ver_addr, MSGDATABUFLEN);
3041 }
3042 break;
3043 case ARCMSR_MESSAGE_WRITE_WQBUFFER:
3044 {
3045 unsigned long *ver_addr;
3046 int32_t my_empty_len, user_len;
3047 int32_t wqbuf_firstidx, wqbuf_lastidx;
3048 uint8_t *pQbuffer, *ptmpuserbuffer;
3049
3050 ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
3051
3052 if (ver_addr == NULL) {
3053 retvalue = ENXIO;
3054 goto ioctl_out;
3055 }
3056
3057 ptmpuserbuffer = (uint8_t *)ver_addr;
3058 user_len = pktioctlfld->cmdmessage.Length;
3059 (void) memcpy(ptmpuserbuffer,
3060 pktioctlfld->messagedatabuffer, user_len);
3061 /*
3062 * check ifdata xfer length of this request will overflow
3063 * my array qbuffer
3064 */
3065 wqbuf_lastidx = acb->wqbuf_lastidx;
3066 wqbuf_firstidx = acb->wqbuf_firstidx;
3067 if (wqbuf_lastidx != wqbuf_firstidx) {
3068 arcmsr_post_ioctldata2iop(acb);
3069 pktioctlfld->cmdmessage.ReturnCode =
3070 ARCMSR_MESSAGE_RETURNCODE_ERROR;
3071 } else {
3072 my_empty_len = (wqbuf_firstidx - wqbuf_lastidx - 1)
3073 & (ARCMSR_MAX_QBUFFER - 1);
3074 if (my_empty_len >= user_len) {
3075 while (user_len > 0) {
3076 /* copy srb data to wqbuffer */
3077 pQbuffer =
3078 &acb->wqbuffer[acb->wqbuf_lastidx];
3079 (void) memcpy(pQbuffer,
3080 ptmpuserbuffer, 1);
3081 acb->wqbuf_lastidx++;
3082 /* iflast index number set it to 0 */
3083 acb->wqbuf_lastidx %=
3084 ARCMSR_MAX_QBUFFER;
3085 ptmpuserbuffer++;
3086 user_len--;
3087 }
3088 /* post first Qbuffer */
3089 if (acb->acb_flags &
3090 ACB_F_MESSAGE_WQBUFFER_CLEARED) {
3091 acb->acb_flags &=
3092 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
3093 arcmsr_post_ioctldata2iop(acb);
3094 }
3095 pktioctlfld->cmdmessage.ReturnCode =
3096 ARCMSR_MESSAGE_RETURNCODE_OK;
3097 } else {
3098 pktioctlfld->cmdmessage.ReturnCode =
3099 ARCMSR_MESSAGE_RETURNCODE_ERROR;
3100 }
3101 }
3102 if (ddi_copyout(pktioctlfld, (void *)arg,
3103 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
3104 retvalue = ENXIO;
3105
3106 kmem_free(ver_addr, MSGDATABUFLEN);
3107 }
3108 break;
3109 case ARCMSR_MESSAGE_CLEAR_RQBUFFER:
3110 {
3111 uint8_t *pQbuffer = acb->rqbuffer;
3112
3113 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
3114 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
3115 arcmsr_iop_message_read(acb);
3116 }
3117 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
3118 acb->rqbuf_firstidx = 0;
3119 acb->rqbuf_lastidx = 0;
3120 bzero(pQbuffer, ARCMSR_MAX_QBUFFER);
3121 /* report success */
3122 pktioctlfld->cmdmessage.ReturnCode =
3123 ARCMSR_MESSAGE_RETURNCODE_OK;
3124 if (ddi_copyout(pktioctlfld, (void *)arg,
3125 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
3126 retvalue = ENXIO;
3127
3128 }
3129 break;
3130 case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
3131 {
3132 uint8_t *pQbuffer = acb->wqbuffer;
3133
3134 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
3135 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
3136 arcmsr_iop_message_read(acb);
3137 }
3138 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
3139 ACB_F_MESSAGE_WQBUFFER_READ);
3140 acb->wqbuf_firstidx = 0;
3141 acb->wqbuf_lastidx = 0;
3142 bzero(pQbuffer, ARCMSR_MAX_QBUFFER);
3143 /* report success */
3144 pktioctlfld->cmdmessage.ReturnCode =
3145 ARCMSR_MESSAGE_RETURNCODE_OK;
3146 if (ddi_copyout(pktioctlfld, (void *)arg,
3147 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
3148 retvalue = ENXIO;
3149
3150 }
3151 break;
3152 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER:
3153 {
3154 uint8_t *pQbuffer;
3155
3156 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
3157 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
3158 arcmsr_iop_message_read(acb);
3159 }
3160 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
3161 ACB_F_MESSAGE_RQBUFFER_CLEARED |
3162 ACB_F_MESSAGE_WQBUFFER_READ);
3163 acb->rqbuf_firstidx = 0;
3164 acb->rqbuf_lastidx = 0;
3165 acb->wqbuf_firstidx = 0;
3166 acb->wqbuf_lastidx = 0;
3167 pQbuffer = acb->rqbuffer;
3168 bzero(pQbuffer, sizeof (struct QBUFFER));
3169 pQbuffer = acb->wqbuffer;
3170 bzero(pQbuffer, sizeof (struct QBUFFER));
3171 /* report success */
3172 pktioctlfld->cmdmessage.ReturnCode =
3173 ARCMSR_MESSAGE_RETURNCODE_OK;
3174 if (ddi_copyout(pktioctlfld, (void *)arg,
3175 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
3176 retvalue = ENXIO;
3177
3178 }
3179 break;
3180 case ARCMSR_MESSAGE_REQUEST_RETURN_CODE_3F:
3181 {
3182 pktioctlfld->cmdmessage.ReturnCode =
3183 ARCMSR_MESSAGE_RETURNCODE_3F;
3184 if (ddi_copyout(pktioctlfld, (void *)arg,
3185 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
3186 retvalue = ENXIO;
3187 }
3188 break;
3189 /* Not supported: ARCMSR_MESSAGE_SAY_HELLO */
3190 case ARCMSR_MESSAGE_SAY_GOODBYE:
3191 arcmsr_iop_parking(acb);
3192 break;
3193 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
3194 if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3195 arcmsr_flush_hba_cache(acb);
3196 } else {
3197 arcmsr_flush_hbb_cache(acb);
3198 }
3199 break;
3200 default:
3201 retvalue = ENOTTY;
3202 }
3203
3204 ioctl_out:
3205 kmem_free(pktioctlfld, sizeof (struct CMD_MESSAGE_FIELD));
3206 mutex_exit(&acb->ioctl_mutex);
3207
3208 return (retvalue);
3209 }
3210
3211
3212
3213 static struct CCB *
arcmsr_get_freeccb(struct ACB * acb)3214 arcmsr_get_freeccb(struct ACB *acb) {
3215
3216 struct CCB *ccb;
3217 int workingccb_startindex, workingccb_doneindex;
3218
3219
3220 mutex_enter(&acb->workingQ_mutex);
3221 workingccb_doneindex = acb->workingccb_doneindex;
3222 workingccb_startindex = acb->workingccb_startindex;
3223 ccb = acb->ccbworkingQ[workingccb_startindex];
3224 workingccb_startindex++;
3225 workingccb_startindex %= ARCMSR_MAX_FREECCB_NUM;
3226 if (workingccb_doneindex != workingccb_startindex) {
3227 acb->workingccb_startindex = workingccb_startindex;
3228 } else {
3229 ccb = NULL;
3230 }
3231
3232 mutex_exit(&acb->workingQ_mutex);
3233 return (ccb);
3234 }
3235
3236
3237
3238 static int
arcmsr_seek_cmd2abort(struct ACB * acb,struct scsi_pkt * abortpkt)3239 arcmsr_seek_cmd2abort(struct ACB *acb,
3240 struct scsi_pkt *abortpkt) {
3241
3242 struct CCB *ccb;
3243 uint32_t intmask_org = 0;
3244 int i = 0;
3245
3246 acb->num_aborts++;
3247
3248 if (abortpkt == NULL) {
3249 /*
3250 * if abortpkt is NULL, the upper layer needs us
3251 * to abort all commands
3252 */
3253 if (acb->ccboutstandingcount != 0) {
3254 /* disable all outbound interrupt */
3255 intmask_org = arcmsr_disable_allintr(acb);
3256 /* clear and abort all outbound posted Q */
3257 arcmsr_done4abort_postqueue(acb);
3258 /* talk to iop 331 outstanding command aborted */
3259 if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3260 arcmsr_abort_hba_allcmd(acb);
3261 } else {
3262 arcmsr_abort_hbb_allcmd(acb);
3263 }
3264
3265 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3266 ccb = acb->pccb_pool[i];
3267 if (ccb->startdone == ARCMSR_CCB_START) {
3268 /*
3269 * this ccb will complete at
3270 * hwinterrupt
3271 */
3272 ccb->startdone = ARCMSR_CCB_ABORTED;
3273 ccb->pkt->pkt_reason = CMD_ABORTED;
3274 ccb->pkt->pkt_statistics |=
3275 STAT_ABORTED;
3276 arcmsr_ccb_complete(ccb, 1);
3277 }
3278 }
3279 /*
3280 * enable outbound Post Queue, outbound
3281 * doorbell Interrupt
3282 */
3283 arcmsr_enable_allintr(acb, intmask_org);
3284 }
3285 return (DDI_SUCCESS);
3286 }
3287
3288 /*
3289 * It is the upper layer do abort command this lock
3290 * just prior to calling us.
3291 * First determine if we currently own this command.
3292 * Start by searching the device queue. If not found
3293 * at all, and the system wanted us to just abort the
3294 * command returnsuccess.
3295 */
3296
3297 if (acb->ccboutstandingcount != 0) {
3298 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3299 ccb = acb->pccb_pool[i];
3300 if (ccb->startdone == ARCMSR_CCB_START) {
3301 if (ccb->pkt == abortpkt) {
3302 ccb->startdone =
3303 ARCMSR_CCB_ABORTED;
3304 goto abort_outstanding_cmd;
3305 }
3306 }
3307 }
3308 }
3309
3310 return (DDI_FAILURE);
3311
3312 abort_outstanding_cmd:
3313 /* disable all outbound interrupts */
3314 intmask_org = arcmsr_disable_allintr(acb);
3315 if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3316 arcmsr_polling_hba_ccbdone(acb, ccb);
3317 } else {
3318 arcmsr_polling_hbb_ccbdone(acb, ccb);
3319 }
3320
3321 /* enable outbound Post Queue, outbound doorbell Interrupt */
3322 arcmsr_enable_allintr(acb, intmask_org);
3323 return (DDI_SUCCESS);
3324 }
3325
3326
3327
3328 static void
arcmsr_pcidev_disattach(struct ACB * acb)3329 arcmsr_pcidev_disattach(struct ACB *acb) {
3330
3331 struct CCB *ccb;
3332 int i = 0;
3333
3334 /* disable all outbound interrupts */
3335 (void) arcmsr_disable_allintr(acb);
3336 /* stop adapter background rebuild */
3337 if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3338 arcmsr_stop_hba_bgrb(acb);
3339 arcmsr_flush_hba_cache(acb);
3340 } else {
3341 arcmsr_stop_hbb_bgrb(acb);
3342 arcmsr_flush_hbb_cache(acb);
3343 }
3344 /* abort all outstanding commands */
3345 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
3346 acb->acb_flags &= ~ACB_F_IOP_INITED;
3347
3348 if (acb->ccboutstandingcount != 0) {
3349 /* clear and abort all outbound posted Q */
3350 arcmsr_done4abort_postqueue(acb);
3351 /* talk to iop 331 outstanding command aborted */
3352 if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3353 arcmsr_abort_hba_allcmd(acb);
3354 } else {
3355 arcmsr_abort_hbb_allcmd(acb);
3356 }
3357
3358 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3359 ccb = acb->pccb_pool[i];
3360 if (ccb->startdone == ARCMSR_CCB_START) {
3361 ccb->startdone = ARCMSR_CCB_ABORTED;
3362 ccb->pkt->pkt_reason = CMD_ABORTED;
3363 ccb->pkt->pkt_statistics |= STAT_ABORTED;
3364 arcmsr_ccb_complete(ccb, 1);
3365 }
3366 }
3367 }
3368 }
3369
3370 /* get firmware miscellaneous data */
3371 static void
arcmsr_get_hba_config(struct ACB * acb)3372 arcmsr_get_hba_config(struct ACB *acb) {
3373
3374 struct HBA_msgUnit *phbamu;
3375
3376 char *acb_firm_model;
3377 char *acb_firm_version;
3378 char *acb_device_map;
3379 char *iop_firm_model;
3380 char *iop_firm_version;
3381 char *iop_device_map;
3382 int count;
3383
3384 phbamu = (struct HBA_msgUnit *)acb->pmu;
3385 acb_firm_model = acb->firm_model;
3386 acb_firm_version = acb->firm_version;
3387 acb_device_map = acb->device_map;
3388 /* firm_model, 15 */
3389 iop_firm_model = (char *)
3390 (&phbamu->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
3391 /* firm_version, 17 */
3392 iop_firm_version =
3393 (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
3394
3395 /* device_map, 21 */
3396 iop_device_map =
3397 (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
3398
3399 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->inbound_msgaddr0,
3400 ARCMSR_INBOUND_MESG0_GET_CONFIG);
3401
3402 if (!arcmsr_hba_wait_msgint_ready(acb))
3403 cmn_err(CE_CONT,
3404 "arcmsr%d: timeout while waiting for adapter firmware "
3405 "miscellaneous data",
3406 ddi_get_instance(acb->dev_info));
3407
3408 count = 8;
3409 while (count) {
3410 *acb_firm_model =
3411 CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_model);
3412 acb_firm_model++;
3413 iop_firm_model++;
3414 count--;
3415 }
3416
3417 count = 16;
3418 while (count) {
3419 *acb_firm_version =
3420 CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_version);
3421 acb_firm_version++;
3422 iop_firm_version++;
3423 count--;
3424 }
3425
3426 count = 16;
3427 while (count) {
3428 *acb_device_map =
3429 CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_device_map);
3430 acb_device_map++;
3431 iop_device_map++;
3432 count--;
3433 }
3434
3435 cmn_err(CE_CONT, "arcmsr%d: ARECA RAID FIRMWARE VERSION %s",
3436 ddi_get_instance(acb->dev_info), acb->firm_version);
3437
3438 /* firm_request_len, 1 */
3439 acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3440 &phbamu->msgcode_rwbuffer[1]);
3441 /* firm_numbers_queue, 2 */
3442 acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3443 &phbamu->msgcode_rwbuffer[2]);
3444 /* firm_sdram_size, 3 */
3445 acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3446 &phbamu->msgcode_rwbuffer[3]);
3447 /* firm_ide_channels, 4 */
3448 acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3449 &phbamu->msgcode_rwbuffer[4]);
3450 }
3451
3452 /* get firmware miscellaneous data */
3453 static void
arcmsr_get_hbb_config(struct ACB * acb)3454 arcmsr_get_hbb_config(struct ACB *acb) {
3455
3456 struct HBB_msgUnit *phbbmu;
3457 char *acb_firm_model;
3458 char *acb_firm_version;
3459 char *acb_device_map;
3460 char *iop_firm_model;
3461 char *iop_firm_version;
3462 char *iop_device_map;
3463 int count;
3464
3465 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3466 acb_firm_model = acb->firm_model;
3467 acb_firm_version = acb->firm_version;
3468 acb_device_map = acb->device_map;
3469 /* firm_model, 15 */
3470 iop_firm_model = (char *)
3471 (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
3472 /* firm_version, 17 */
3473 iop_firm_version = (char *)
3474 (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
3475 /* device_map, 21 */
3476 iop_device_map = (char *)
3477 (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
3478
3479 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3480 &phbbmu->hbb_doorbell->drv2iop_doorbell,
3481 ARCMSR_MESSAGE_GET_CONFIG);
3482
3483 if (!arcmsr_hbb_wait_msgint_ready(acb))
3484 cmn_err(CE_CONT,
3485 "arcmsr%d: timeout while waiting for adapter firmware "
3486 "miscellaneous data",
3487 ddi_get_instance(acb->dev_info));
3488
3489 count = 8;
3490 while (count) {
3491 *acb_firm_model = CHIP_REG_READ8(acb->reg_mu_acc_handle1,
3492 iop_firm_model);
3493 acb_firm_model++;
3494 iop_firm_model++;
3495 count--;
3496 }
3497
3498 count = 16;
3499 while (count) {
3500 *acb_firm_version = CHIP_REG_READ8(acb->reg_mu_acc_handle1,
3501 iop_firm_version);
3502 acb_firm_version++;
3503 iop_firm_version++;
3504 count--;
3505 }
3506 count = 16;
3507 while (count) {
3508 *acb_device_map =
3509 CHIP_REG_READ8(acb->reg_mu_acc_handle1, iop_device_map);
3510 acb_device_map++;
3511 iop_device_map++;
3512 count--;
3513 }
3514
3515 cmn_err(CE_CONT, "arcmsr%d: ARECA RAID FIRMWARE VERSION %s",
3516 ddi_get_instance(acb->dev_info), acb->firm_version);
3517
3518 /* firm_request_len, 1 */
3519 acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
3520 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[1]);
3521 /* firm_numbers_queue, 2 */
3522 acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
3523 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[2]);
3524 /* firm_sdram_size, 3 */
3525 acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
3526 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[3]);
3527 /* firm_ide_channels, 4 */
3528 acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
3529 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[4]);
3530 }
3531
3532
3533
3534 /* start background rebuild */
3535 static void
arcmsr_start_hba_bgrb(struct ACB * acb)3536 arcmsr_start_hba_bgrb(struct ACB *acb) {
3537
3538 struct HBA_msgUnit *phbamu;
3539
3540 phbamu = (struct HBA_msgUnit *)acb->pmu;
3541
3542 acb->acb_flags |= ACB_F_MSG_START_BGRB;
3543 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3544 &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
3545
3546 if (!arcmsr_hba_wait_msgint_ready(acb))
3547 cmn_err(CE_WARN,
3548 "arcmsr%d: timeout while waiting for background "
3549 "rebuild to start",
3550 ddi_get_instance(acb->dev_info));
3551 }
3552
3553
3554 static void
arcmsr_start_hbb_bgrb(struct ACB * acb)3555 arcmsr_start_hbb_bgrb(struct ACB *acb) {
3556
3557 struct HBB_msgUnit *phbbmu;
3558
3559 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3560
3561 acb->acb_flags |= ACB_F_MSG_START_BGRB;
3562 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3563 &phbbmu->hbb_doorbell->drv2iop_doorbell,
3564 ARCMSR_MESSAGE_START_BGRB);
3565
3566 if (!arcmsr_hbb_wait_msgint_ready(acb))
3567 cmn_err(CE_WARN,
3568 "arcmsr%d: timeout while waiting for background "
3569 "rebuild to start",
3570 ddi_get_instance(acb->dev_info));
3571 }
3572
3573
3574 static void
arcmsr_polling_hba_ccbdone(struct ACB * acb,struct CCB * poll_ccb)3575 arcmsr_polling_hba_ccbdone(struct ACB *acb, struct CCB *poll_ccb) {
3576
3577 struct HBA_msgUnit *phbamu;
3578 struct CCB *ccb;
3579 uint32_t flag_ccb, outbound_intstatus;
3580 uint32_t poll_ccb_done = 0;
3581 uint32_t poll_count = 0;
3582
3583
3584 phbamu = (struct HBA_msgUnit *)acb->pmu;
3585
3586 polling_ccb_retry:
3587 poll_count++;
3588 outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3589 &phbamu->outbound_intstatus) & acb->outbound_int_enable;
3590
3591 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->outbound_intstatus,
3592 outbound_intstatus); /* clear interrupt */
3593
3594 /* Use correct offset and size for syncing */
3595 if (ddi_dma_sync(acb->ccbs_pool_handle, 0, acb->dma_sync_size,
3596 DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
3597 return;
3598
3599 /*LINTED*/
3600 while (1) {
3601 if ((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3602 &phbamu->outbound_queueport)) == 0xFFFFFFFF) {
3603 if (poll_ccb_done) {
3604 /* chip FIFO no ccb for completion already */
3605 break;
3606 } else {
3607 drv_usecwait(25000);
3608 if ((poll_count > 100) && (poll_ccb != NULL)) {
3609 break;
3610 }
3611 if (acb->ccboutstandingcount == 0) {
3612 break;
3613 }
3614 goto polling_ccb_retry;
3615 }
3616 }
3617
3618 /* check ifcommand done with no error */
3619 ccb = (struct CCB *)(acb->vir2phy_offset +
3620 (flag_ccb << 5)); /* frame must be 32 bytes aligned */
3621 if (poll_ccb != NULL)
3622 poll_ccb_done = (ccb == poll_ccb) ? 1 : 0;
3623
3624 if ((ccb->acb != acb) ||
3625 (ccb->startdone != ARCMSR_CCB_START)) {
3626 if (ccb->startdone == ARCMSR_CCB_ABORTED) {
3627 ccb->pkt->pkt_reason = CMD_ABORTED;
3628 ccb->pkt->pkt_statistics |= STAT_ABORTED;
3629 arcmsr_ccb_complete(ccb, 1);
3630 continue;
3631 }
3632 cmn_err(CE_WARN, "arcmsr%d: polling op got "
3633 "unexpected ccb command done",
3634 ddi_get_instance(acb->dev_info));
3635 continue;
3636 }
3637 arcmsr_report_ccb_state(acb, ccb, flag_ccb);
3638 } /* drain reply FIFO */
3639 }
3640
3641
3642 static void
arcmsr_polling_hbb_ccbdone(struct ACB * acb,struct CCB * poll_ccb)3643 arcmsr_polling_hbb_ccbdone(struct ACB *acb,
3644 struct CCB *poll_ccb) {
3645
3646 struct HBB_msgUnit *phbbmu;
3647 struct CCB *ccb;
3648 uint32_t flag_ccb;
3649 uint32_t poll_ccb_done = 0;
3650 uint32_t poll_count = 0;
3651 int index;
3652
3653
3654 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3655
3656
3657 polling_ccb_retry:
3658 poll_count++;
3659 /* clear doorbell interrupt */
3660 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3661 &phbbmu->hbb_doorbell->iop2drv_doorbell,
3662 ARCMSR_DOORBELL_INT_CLEAR_PATTERN);
3663
3664 /* Use correct offset and size for syncing */
3665 if (ddi_dma_sync(acb->ccbs_pool_handle, 0, acb->dma_sync_size,
3666 DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
3667 return;
3668
3669
3670 /*LINTED*/
3671 while (1) {
3672 index = phbbmu->doneq_index;
3673 if ((flag_ccb = phbbmu->done_qbuffer[index]) == 0) {
3674 if (poll_ccb_done) {
3675 /* chip FIFO no ccb for completion already */
3676 break;
3677 } else {
3678 drv_usecwait(25000);
3679 if ((poll_count > 100) && (poll_ccb != NULL))
3680 break;
3681 if (acb->ccboutstandingcount == 0)
3682 break;
3683 goto polling_ccb_retry;
3684 }
3685 }
3686
3687 phbbmu->done_qbuffer[index] = 0;
3688 index++;
3689 /* if last index number set it to 0 */
3690 index %= ARCMSR_MAX_HBB_POSTQUEUE;
3691 phbbmu->doneq_index = index;
3692 /* check if command done with no error */
3693 /* frame must be 32 bytes aligned */
3694 ccb = (struct CCB *)(acb->vir2phy_offset +
3695 (flag_ccb << 5));
3696 if (poll_ccb != NULL)
3697 poll_ccb_done = (ccb == poll_ccb) ? 1 : 0;
3698 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
3699 if (ccb->startdone == ARCMSR_CCB_ABORTED) {
3700 ccb->pkt->pkt_reason = CMD_ABORTED;
3701 ccb->pkt->pkt_statistics |= STAT_ABORTED;
3702 arcmsr_ccb_complete(ccb, 1);
3703 continue;
3704 }
3705 cmn_err(CE_WARN, "arcmsr%d: polling op got"
3706 "unexpect ccb command done",
3707 ddi_get_instance(acb->dev_info));
3708 continue;
3709 }
3710 arcmsr_report_ccb_state(acb, ccb, flag_ccb);
3711 } /* drain reply FIFO */
3712 }
3713
3714
3715 /*
3716 * Function: arcmsr_tran_start(9E)
3717 * Description: Transport the command in pktp to the target device.
3718 * The command is not finished when this returns, only
3719 * sent to the target; arcmsr_interrupt will call
3720 * (*pktp->pkt_comp)(pktp) when the target device has done.
3721 *
3722 * Input: struct scsi_address *ap, struct scsi_pkt *pktp
3723 * Output: TRAN_ACCEPT if pkt is OK and not driver not busy
3724 * TRAN_BUSY if driver is
3725 * TRAN_BADPKT if pkt is invalid
3726 */
3727 static int
arcmsr_tran_start(struct scsi_address * ap,struct scsi_pkt * pkt)3728 arcmsr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt) {
3729
3730 struct ACB *acb;
3731 struct CCB *ccb;
3732 int target = ap->a_target;
3733 int lun = ap->a_lun;
3734
3735
3736 acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
3737 ccb = pkt->pkt_ha_private;
3738
3739 if ((ccb->ccb_flags & CCB_FLAG_DMAVALID) &&
3740 (ccb->ccb_flags & DDI_DMA_CONSISTENT))
3741 (void) ddi_dma_sync(ccb->pkt_dma_handle, ccb->pkt_dma_offset,
3742 ccb->pkt_dma_len, DDI_DMA_SYNC_FORDEV);
3743
3744
3745 if (ccb->startdone == ARCMSR_CCB_UNBUILD)
3746 arcmsr_build_ccb(ccb);
3747
3748
3749 if (acb->acb_flags & ACB_F_BUS_RESET) {
3750 cmn_err(CE_CONT,
3751 "arcmsr%d: bus reset returned busy",
3752 ddi_get_instance(acb->dev_info));
3753 pkt->pkt_reason = CMD_RESET;
3754 pkt->pkt_statistics |= STAT_BUS_RESET;
3755 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
3756 STATE_SENT_CMD | STATE_GOT_STATUS);
3757 if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
3758 (pkt->pkt_state & STATE_XFERRED_DATA))
3759 (void) ddi_dma_sync(ccb->pkt_dma_handle,
3760 ccb->pkt_dma_offset, ccb->pkt_dma_len,
3761 DDI_DMA_SYNC_FORCPU);
3762
3763 scsi_hba_pkt_comp(pkt);
3764
3765 return (TRAN_ACCEPT);
3766 }
3767
3768 if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
3769 uint8_t block_cmd;
3770
3771 block_cmd = pkt->pkt_cdbp[0] & 0x0f;
3772
3773 if (block_cmd == 0x08 || block_cmd == 0x0a) {
3774 cmn_err(CE_CONT,
3775 "arcmsr%d: block read/write command while raid"
3776 "volume missing (cmd %02x for target %d lun %d)",
3777 ddi_get_instance(acb->dev_info),
3778 block_cmd, target, lun);
3779 pkt->pkt_reason = CMD_TIMEOUT;
3780 pkt->pkt_statistics |= CMD_TIMEOUT;
3781 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
3782 STATE_SENT_CMD | STATE_GOT_STATUS);
3783
3784 if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
3785 (pkt->pkt_state & STATE_XFERRED_DATA))
3786 (void) ddi_dma_sync(ccb->pkt_dma_handle,
3787 ccb->pkt_dma_offset, ccb->pkt_dma_len,
3788 DDI_DMA_SYNC_FORCPU);
3789
3790
3791 if (pkt->pkt_comp)
3792 (*pkt->pkt_comp)(pkt);
3793
3794
3795 return (TRAN_ACCEPT);
3796 }
3797 }
3798
3799
3800 /* IMPORTANT: Target 16 is a virtual device for iop message transfer */
3801 if (target == 16) {
3802
3803 struct buf *bp = ccb->bp;
3804 uint8_t scsicmd = pkt->pkt_cdbp[0];
3805
3806 switch (scsicmd) {
3807 case SCMD_INQUIRY: {
3808 if (lun != 0) {
3809 ccb->pkt->pkt_reason = CMD_TIMEOUT;
3810 ccb->pkt->pkt_statistics |= STAT_TIMEOUT;
3811 arcmsr_ccb_complete(ccb, 0);
3812 return (TRAN_ACCEPT);
3813 }
3814
3815 if (bp && bp->b_un.b_addr && bp->b_bcount) {
3816 uint8_t inqdata[36];
3817
3818 /* The EVDP and pagecode is not supported */
3819 if (pkt->pkt_cdbp[1] || pkt->pkt_cdbp[2]) {
3820 inqdata[1] = 0xFF;
3821 inqdata[2] = 0x00;
3822 } else {
3823 /* Periph Qualifier & Periph Dev Type */
3824 inqdata[0] = DTYPE_PROCESSOR;
3825 /* rem media bit & Dev Type Modifier */
3826 inqdata[1] = 0;
3827 /* ISO, ECMA, & ANSI versions */
3828 inqdata[2] = 0;
3829 /* length of additional data */
3830 inqdata[4] = 31;
3831 /* Vendor Identification */
3832 bcopy("Areca ",
3833 &inqdata[8], VIDLEN);
3834 /* Product Identification */
3835 bcopy("RAID controller ",
3836 &inqdata[16], PIDLEN);
3837 /* Product Revision */
3838 bcopy(&inqdata[32],
3839 "R001", REVLEN);
3840 if (bp->b_flags & (B_PHYS | B_PAGEIO))
3841 bp_mapin(bp);
3842
3843 (void) memcpy(bp->b_un.b_addr,
3844 inqdata, sizeof (inqdata));
3845 }
3846 ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
3847 }
3848 arcmsr_ccb_complete(ccb, 0);
3849 return (TRAN_ACCEPT);
3850 }
3851 case SCMD_WRITE_BUFFER:
3852 case SCMD_READ_BUFFER: {
3853 if (arcmsr_iop_message_xfer(acb, pkt)) {
3854 /* error just for retry */
3855 ccb->pkt->pkt_reason = CMD_TRAN_ERR;
3856 ccb->pkt->pkt_statistics |= STAT_TERMINATED;
3857 }
3858 ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
3859 arcmsr_ccb_complete(ccb, 0);
3860 return (TRAN_ACCEPT);
3861 }
3862 default:
3863 ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
3864 arcmsr_ccb_complete(ccb, 0);
3865 return (TRAN_ACCEPT);
3866 }
3867 }
3868
3869 if (acb->ccboutstandingcount >= ARCMSR_MAX_OUTSTANDING_CMD) {
3870 cmn_err(CE_CONT,
3871 "arcmsr%d: too many outstanding commands (%d > %d)",
3872 ddi_get_instance(acb->dev_info),
3873 acb->ccboutstandingcount,
3874 ARCMSR_MAX_OUTSTANDING_CMD);
3875 return (TRAN_BUSY);
3876 } else if (arcmsr_post_ccb(acb, ccb) == DDI_FAILURE) {
3877 cmn_err(CE_CONT,
3878 "arcmsr%d: post failure, ccboutstandingcount = %d",
3879 ddi_get_instance(acb->dev_info),
3880 acb->ccboutstandingcount);
3881 return (TRAN_BUSY);
3882 }
3883
3884 return (TRAN_ACCEPT);
3885 }
3886
3887 /*
3888 * Function: arcmsr_tran_abort(9E)
3889 * SCSA interface routine to abort pkt(s) in progress.
3890 * Aborts the pkt specified. If NULL pkt, aborts ALL pkts.
3891 * Output: Return 1 if success
3892 * Return 0 if failure
3893 */
3894 static int
arcmsr_tran_abort(struct scsi_address * ap,struct scsi_pkt * abortpkt)3895 arcmsr_tran_abort(struct scsi_address *ap, struct scsi_pkt *abortpkt) {
3896
3897 struct ACB *acb;
3898 int return_code;
3899
3900 acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
3901
3902
3903 cmn_err(CE_WARN,
3904 "arcmsr%d: tran_abort called for target %d lun %d",
3905 ddi_get_instance(acb->dev_info), ap->a_target, ap->a_lun);
3906
3907 while (acb->ccboutstandingcount != 0) {
3908 drv_usecwait(10000);
3909 }
3910
3911 mutex_enter(&acb->acb_mutex);
3912 return_code = arcmsr_seek_cmd2abort(acb, abortpkt);
3913 mutex_exit(&acb->acb_mutex);
3914
3915 if (return_code != DDI_SUCCESS) {
3916 cmn_err(CE_WARN,
3917 "arcmsr%d: abort command failed for target %d lun %d",
3918 ddi_get_instance(acb->dev_info),
3919 ap->a_target, ap->a_lun);
3920 return (0);
3921 }
3922
3923 return (1);
3924 }
3925
3926
3927 /*
3928 * Function: arcmsr_tran_reset(9E)
3929 * SCSA interface routine to perform scsi resets on either
3930 * a specified target or the bus (default).
3931 * Output: Return 1 if success
3932 * Return 0 if failure
3933 */
3934 static int
arcmsr_tran_reset(struct scsi_address * ap,int level)3935 arcmsr_tran_reset(struct scsi_address *ap, int level) {
3936
3937 struct ACB *acb;
3938 int return_code = 1;
3939 int retry = 0;
3940
3941
3942 /* Are we in the middle of dumping core? */
3943 if (ddi_in_panic())
3944 return (return_code);
3945
3946 acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
3947
3948 cmn_err(CE_WARN, "arcmsr%d: tran reset (level 0x%x) called "
3949 "for target %d lun %d",
3950 ddi_get_instance(acb->dev_info), level,
3951 ap->a_target, ap->a_lun);
3952 mutex_enter(&acb->acb_mutex);
3953
3954 while ((acb->ccboutstandingcount > 0) && (retry < 400)) {
3955 (void) arcmsr_interrupt((caddr_t)acb);
3956 drv_usecwait(25000);
3957 retry++;
3958 }
3959
3960 switch (level) {
3961 case RESET_ALL: /* level 1 */
3962 acb->num_resets++;
3963 acb->acb_flags |= ACB_F_BUS_RESET;
3964 if (acb->timeout_count)
3965 arcmsr_iop_reset(acb);
3966 acb->acb_flags &= ~ACB_F_BUS_RESET;
3967 return_code = 0;
3968 break;
3969 case RESET_TARGET: /* level 0 */
3970 cmn_err(CE_WARN, "arcmsr%d: target reset not supported",
3971 ddi_get_instance(acb->dev_info));
3972 return_code = 0;
3973 break;
3974 default:
3975 return_code = 0;
3976 }
3977
3978 mutex_exit(&acb->acb_mutex);
3979 return (return_code);
3980 }
3981
3982
3983 static void
arcmsr_log(struct ACB * acb,int level,char * fmt,...)3984 arcmsr_log(struct ACB *acb, int level, char *fmt, ...) {
3985
3986 char buf[256];
3987 va_list ap;
3988
3989 va_start(ap, fmt);
3990 (void) vsprintf(buf, fmt, ap);
3991 va_end(ap);
3992 scsi_log(acb ? acb->dev_info : NULL, "arcmsr", level, "%s", buf);
3993 }
3994
3995
3996 static void
arcmsr_iop2drv_data_wrote_handle(struct ACB * acb)3997 arcmsr_iop2drv_data_wrote_handle(struct ACB *acb) {
3998
3999 struct QBUFFER *prbuffer;
4000 uint8_t *pQbuffer;
4001 uint8_t *iop_data;
4002 int my_empty_len, iop_len;
4003 int rqbuf_firstidx, rqbuf_lastidx;
4004
4005 /* check this iop data if overflow my rqbuffer */
4006 rqbuf_lastidx = acb->rqbuf_lastidx;
4007 rqbuf_firstidx = acb->rqbuf_firstidx;
4008 prbuffer = arcmsr_get_iop_rqbuffer(acb);
4009 iop_data = (uint8_t *)prbuffer->data;
4010 iop_len = prbuffer->data_len;
4011 my_empty_len = (rqbuf_firstidx-rqbuf_lastidx - 1) &
4012 (ARCMSR_MAX_QBUFFER - 1);
4013
4014 if (my_empty_len >= iop_len) {
4015 while (iop_len > 0) {
4016 pQbuffer = &acb->rqbuffer[rqbuf_lastidx];
4017 (void) memcpy(pQbuffer, iop_data, 1);
4018 rqbuf_lastidx++;
4019 /* if last index number set it to 0 */
4020 rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
4021 iop_data++;
4022 iop_len--;
4023 }
4024 acb->rqbuf_lastidx = rqbuf_lastidx;
4025 arcmsr_iop_message_read(acb);
4026 /* signature, let IOP know data has been read */
4027 } else {
4028 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
4029 }
4030 }
4031
4032
4033
4034 static void
arcmsr_iop2drv_data_read_handle(struct ACB * acb)4035 arcmsr_iop2drv_data_read_handle(struct ACB *acb) {
4036
4037 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
4038 /*
4039 * check if there are any mail packages from user space program
4040 * in my post bag, now is the time to send them into Areca's firmware
4041 */
4042
4043 if (acb->wqbuf_firstidx != acb->wqbuf_lastidx) {
4044
4045 uint8_t *pQbuffer;
4046 struct QBUFFER *pwbuffer;
4047 uint8_t *iop_data;
4048 int allxfer_len = 0;
4049
4050 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
4051 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
4052 iop_data = (uint8_t *)pwbuffer->data;
4053
4054 while ((acb->wqbuf_firstidx != acb->wqbuf_lastidx) &&
4055 (allxfer_len < 124)) {
4056 pQbuffer = &acb->wqbuffer[acb->wqbuf_firstidx];
4057 (void) memcpy(iop_data, pQbuffer, 1);
4058 acb->wqbuf_firstidx++;
4059 /* if last index number set it to 0 */
4060 acb->wqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
4061 iop_data++;
4062 allxfer_len++;
4063 }
4064 pwbuffer->data_len = allxfer_len;
4065 /*
4066 * push inbound doorbell, tell iop driver data write ok
4067 * await reply on next hwinterrupt for next Qbuffer post
4068 */
4069 arcmsr_iop_message_wrote(acb);
4070 }
4071
4072 if (acb->wqbuf_firstidx == acb->wqbuf_lastidx)
4073 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
4074 }
4075
4076
4077 static void
arcmsr_hba_doorbell_isr(struct ACB * acb)4078 arcmsr_hba_doorbell_isr(struct ACB *acb) {
4079
4080 uint32_t outbound_doorbell;
4081 struct HBA_msgUnit *phbamu;
4082
4083 phbamu = (struct HBA_msgUnit *)acb->pmu;
4084
4085 /*
4086 * Maybe here we need to check wrqbuffer_lock is locked or not
4087 * DOORBELL: ding! dong!
4088 * check if there are any mail need to pack from firmware
4089 */
4090
4091 outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4092 &phbamu->outbound_doorbell);
4093 /* clear doorbell interrupt */
4094 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4095 &phbamu->outbound_doorbell, outbound_doorbell);
4096
4097 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
4098 arcmsr_iop2drv_data_wrote_handle(acb);
4099
4100
4101 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
4102 arcmsr_iop2drv_data_read_handle(acb);
4103 }
4104
4105
4106
4107 static void
arcmsr_hba_postqueue_isr(struct ACB * acb)4108 arcmsr_hba_postqueue_isr(struct ACB *acb) {
4109
4110 uint32_t flag_ccb;
4111 struct HBA_msgUnit *phbamu;
4112
4113
4114 phbamu = (struct HBA_msgUnit *)acb->pmu;
4115
4116 /* areca cdb command done */
4117 /* Use correct offset and size for syncing */
4118 (void) ddi_dma_sync(acb->ccbs_pool_handle, 0, acb->dma_sync_size,
4119 DDI_DMA_SYNC_FORKERNEL);
4120
4121 while ((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4122 &phbamu->outbound_queueport)) != 0xFFFFFFFF) {
4123 /* check if command done with no error */
4124 arcmsr_drain_donequeue(acb, flag_ccb);
4125 } /* drain reply FIFO */
4126 }
4127
arcmsr_dr_handle(struct ACB * acb)4128 static void arcmsr_dr_handle(struct ACB *acb)
4129 {
4130 char *acb_dev_map = (char *)acb->device_map;
4131 char *devicemap;
4132 int target, lun;
4133 char diff;
4134 int circ1;
4135 dev_info_t *dip;
4136 ddi_acc_handle_t reg;
4137 switch (acb->adapter_type) {
4138 case ACB_ADAPTER_TYPE_A:
4139 {
4140 struct HBA_msgUnit *phbamu = (struct HBA_msgUnit *)
4141 acb->pmu;
4142 devicemap = (char *)&phbamu->msgcode_rwbuffer[21];
4143 reg = acb->reg_mu_acc_handle0;
4144 }
4145 break;
4146 case ACB_ADAPTER_TYPE_B:
4147 {
4148 struct HBB_msgUnit *phbbmu = (struct HBB_msgUnit *)
4149 acb->pmu;
4150 devicemap = (char *)
4151 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[21];
4152 reg = acb->reg_mu_acc_handle1;
4153 }
4154 break;
4155 }
4156
4157 for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
4158 diff =
4159 (*acb_dev_map)^CHIP_REG_READ8(reg, devicemap);
4160 if (diff != 0) {
4161 char temp;
4162 *acb_dev_map =
4163 CHIP_REG_READ8(reg, devicemap);
4164 temp = *acb_dev_map;
4165 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
4166 if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
4167 ndi_devi_enter(acb->dev_info, &circ1);
4168 (void) arcmsr_config_lun(acb, target,
4169 lun, NULL);
4170 ndi_devi_exit(acb->dev_info, circ1);
4171 } else if ((temp & 0x01) == 0 && (diff & 0x01)
4172 == 1) {
4173 dip = arcmsr_find_child(acb, target,
4174 lun);
4175 if (dip != NULL) {
4176 (void) ndi_devi_offline(dip,
4177 NDI_DEVI_REMOVE);
4178 cmn_err(CE_NOTE, "arcmsr%d: "
4179 "T%dL%d offlined",
4180 ddi_get_instance
4181 (acb->dev_info), target,
4182 lun);
4183 }
4184 }
4185 temp >>= 1;
4186 diff >>= 1;
4187 }
4188 }
4189 devicemap++;
4190 acb_dev_map++;
4191 }
4192 }
4193
arcmsr_hba_message_isr(struct ACB * acb)4194 static void arcmsr_hba_message_isr(struct ACB *acb)
4195 {
4196 struct HBA_msgUnit *phbamu = (struct HBA_msgUnit *)acb->pmu;
4197 uint32_t *signature = (&phbamu->msgcode_rwbuffer[0]);
4198 uint32_t outbound_message;
4199
4200 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->outbound_intstatus,
4201 ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
4202
4203 outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4204 signature);
4205 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
4206 if ((ddi_taskq_dispatch(acb->taskq, (void (*)(void *))
4207 arcmsr_dr_handle, acb, DDI_NOSLEEP)) != DDI_SUCCESS)
4208 cmn_err(CE_WARN, "DR task start failed");
4209 }
4210
arcmsr_hbb_message_isr(struct ACB * acb)4211 static void arcmsr_hbb_message_isr(struct ACB *acb)
4212 {
4213 struct HBB_msgUnit *phbbmu = (struct HBB_msgUnit *)acb->pmu;
4214 uint32_t *signature = (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[0]);
4215 uint32_t outbound_message;
4216
4217 /* clear interrupts */
4218 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4219 &phbbmu->hbb_doorbell->iop2drv_doorbell,
4220 ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
4221 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4222 &phbbmu->hbb_doorbell->drv2iop_doorbell,
4223 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
4224
4225 outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4226 signature);
4227 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
4228 if ((ddi_taskq_dispatch(acb->taskq,
4229 (void (*)(void *))arcmsr_dr_handle, acb,
4230 DDI_NOSLEEP)) != DDI_SUCCESS) {
4231 cmn_err(CE_WARN, "DR task start failed");
4232 }
4233 }
4234
4235 static void
arcmsr_hbb_postqueue_isr(struct ACB * acb)4236 arcmsr_hbb_postqueue_isr(struct ACB *acb) {
4237
4238 int index;
4239 uint32_t flag_ccb;
4240 struct HBB_msgUnit *phbbmu;
4241
4242 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4243
4244
4245 /* areca cdb command done */
4246 index = phbbmu->doneq_index;
4247
4248 while ((flag_ccb = phbbmu->done_qbuffer[index]) != 0) {
4249 phbbmu->done_qbuffer[index] = 0;
4250 index++;
4251 /* if last index number set it to 0 */
4252 index %= ARCMSR_MAX_HBB_POSTQUEUE;
4253 phbbmu->doneq_index = index;
4254 /* check if command done with no error */
4255 arcmsr_drain_donequeue(acb, flag_ccb);
4256 } /* drain reply FIFO */
4257 }
4258
4259
4260
4261
4262
4263 static uint_t
arcmsr_handle_hba_isr(struct ACB * acb)4264 arcmsr_handle_hba_isr(struct ACB *acb) {
4265
4266 uint32_t outbound_intstatus;
4267 struct HBA_msgUnit *phbamu;
4268
4269 phbamu = (struct HBA_msgUnit *)acb->pmu;
4270
4271 outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4272 &phbamu->outbound_intstatus) & acb->outbound_int_enable;
4273
4274 if (!outbound_intstatus)
4275 /* it must be a shared irq */
4276 return (DDI_INTR_UNCLAIMED);
4277
4278 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->outbound_intstatus,
4279 outbound_intstatus); /* clear interrupt */
4280
4281
4282 /* MU doorbell interrupts */
4283
4284 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
4285 arcmsr_hba_doorbell_isr(acb);
4286
4287 /* MU post queue interrupts */
4288 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
4289 arcmsr_hba_postqueue_isr(acb);
4290
4291 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
4292 arcmsr_hba_message_isr(acb);
4293 }
4294
4295 return (DDI_INTR_CLAIMED);
4296 }
4297
4298
4299 static uint_t
arcmsr_handle_hbb_isr(struct ACB * acb)4300 arcmsr_handle_hbb_isr(struct ACB *acb) {
4301
4302 uint32_t outbound_doorbell;
4303 struct HBB_msgUnit *phbbmu;
4304
4305
4306 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4307
4308 outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4309 &phbbmu->hbb_doorbell->iop2drv_doorbell) & acb->outbound_int_enable;
4310
4311 if (!outbound_doorbell)
4312 /* it must be a shared irq */
4313 return (DDI_INTR_UNCLAIMED);
4314
4315 /* clear doorbell interrupt */
4316 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4317 &phbbmu->hbb_doorbell->iop2drv_doorbell, ~outbound_doorbell);
4318 /* wait a cycle */
4319 (void) CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4320 &phbbmu->hbb_doorbell->iop2drv_doorbell);
4321 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4322 &phbbmu->hbb_doorbell->drv2iop_doorbell,
4323 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
4324
4325 /* MU ioctl transfer doorbell interrupts */
4326 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
4327 arcmsr_iop2drv_data_wrote_handle(acb);
4328
4329 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
4330 arcmsr_iop2drv_data_read_handle(acb);
4331
4332 /* MU post queue interrupts */
4333 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
4334 arcmsr_hbb_postqueue_isr(acb);
4335
4336 /* MU message interrupt */
4337
4338 if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
4339 arcmsr_hbb_message_isr(acb);
4340 }
4341
4342 return (DDI_INTR_CLAIMED);
4343 }
4344
4345
4346 static uint_t
arcmsr_interrupt(caddr_t arg)4347 arcmsr_interrupt(caddr_t arg) {
4348
4349
4350 struct ACB *acb = (struct ACB *)(intptr_t)arg;
4351
4352 switch (acb->adapter_type) {
4353 case ACB_ADAPTER_TYPE_A:
4354 return (arcmsr_handle_hba_isr(acb));
4355 case ACB_ADAPTER_TYPE_B:
4356 return (arcmsr_handle_hbb_isr(acb));
4357 default:
4358 cmn_err(CE_WARN, "arcmsr%d: unknown adapter type (%d)",
4359 ddi_get_instance(acb->dev_info), acb->adapter_type);
4360 return (DDI_INTR_UNCLAIMED);
4361 }
4362 }
4363
4364
4365 static void
arcmsr_wait_firmware_ready(struct ACB * acb)4366 arcmsr_wait_firmware_ready(struct ACB *acb) {
4367
4368 uint32_t firmware_state;
4369
4370 firmware_state = 0;
4371
4372 switch (acb->adapter_type) {
4373 case ACB_ADAPTER_TYPE_A:
4374 {
4375 struct HBA_msgUnit *phbamu;
4376
4377 phbamu = (struct HBA_msgUnit *)acb->pmu;
4378 do {
4379 firmware_state =
4380 CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4381 &phbamu->outbound_msgaddr1);
4382 } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK)
4383 == 0);
4384 }
4385 break;
4386 case ACB_ADAPTER_TYPE_B:
4387 {
4388 struct HBB_msgUnit *phbbmu;
4389
4390 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4391 do {
4392 firmware_state =
4393 CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4394 &phbbmu->hbb_doorbell->iop2drv_doorbell);
4395 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
4396 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4397 &phbbmu->hbb_doorbell->drv2iop_doorbell,
4398 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
4399 }
4400 break;
4401 }
4402 }
4403
4404 static void
arcmsr_clear_doorbell_queue_buffer(struct ACB * acb)4405 arcmsr_clear_doorbell_queue_buffer(struct ACB *acb) {
4406
4407 switch (acb->adapter_type) {
4408 case ACB_ADAPTER_TYPE_A:
4409 {
4410 struct HBA_msgUnit *phbamu;
4411 uint32_t outbound_doorbell;
4412
4413 phbamu = (struct HBA_msgUnit *)acb->pmu;
4414 /* empty doorbell Qbuffer if door bell rung */
4415 outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4416 &phbamu->outbound_doorbell);
4417 /* clear doorbell interrupt */
4418 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4419 &phbamu->outbound_doorbell, outbound_doorbell);
4420 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4421 &phbamu->inbound_doorbell,
4422 ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
4423 }
4424 break;
4425 case ACB_ADAPTER_TYPE_B:
4426 {
4427 struct HBB_msgUnit *phbbmu;
4428
4429 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4430
4431 /* clear interrupt and message state */
4432 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4433 &phbbmu->hbb_doorbell->iop2drv_doorbell,
4434 ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
4435 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4436 &phbbmu->hbb_doorbell->drv2iop_doorbell,
4437 ARCMSR_DRV2IOP_DATA_READ_OK);
4438 /* let IOP know data has been read */
4439 }
4440 break;
4441 }
4442 }
4443
4444
4445 static uint32_t
arcmsr_iop_confirm(struct ACB * acb)4446 arcmsr_iop_confirm(struct ACB *acb) {
4447
4448 unsigned long ccb_phyaddr;
4449 uint32_t ccb_phyaddr_hi32;
4450
4451 /*
4452 * here we need to tell iop 331 about our freeccb.HighPart
4453 * if freeccb.HighPart is non-zero
4454 */
4455 ccb_phyaddr = (unsigned long)acb->ccb_cookie.dmac_address;
4456 ccb_phyaddr_hi32 = (uint32_t)((ccb_phyaddr >> 16) >> 16);
4457
4458 switch (acb->adapter_type) {
4459 case ACB_ADAPTER_TYPE_A:
4460 {
4461 if (ccb_phyaddr_hi32 != 0) {
4462 struct HBA_msgUnit *phbamu;
4463
4464 phbamu = (struct HBA_msgUnit *)acb->pmu;
4465 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4466 &phbamu->msgcode_rwbuffer[0],
4467 ARCMSR_SIGNATURE_SET_CONFIG);
4468 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4469 &phbamu->msgcode_rwbuffer[1], ccb_phyaddr_hi32);
4470 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4471 &phbamu->inbound_msgaddr0,
4472 ARCMSR_INBOUND_MESG0_SET_CONFIG);
4473 if (!arcmsr_hba_wait_msgint_ready(acb)) {
4474 cmn_err(CE_WARN,
4475 "arcmsr%d: timeout setting ccb high "
4476 "physical address",
4477 ddi_get_instance(acb->dev_info));
4478 return (FALSE);
4479 }
4480 }
4481 }
4482 break;
4483
4484 /* if adapter is type B, set window of "post command queue" */
4485
4486 case ACB_ADAPTER_TYPE_B:
4487 {
4488 uint32_t post_queue_phyaddr;
4489 struct HBB_msgUnit *phbbmu;
4490
4491 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4492 phbbmu->postq_index = 0;
4493 phbbmu->doneq_index = 0;
4494 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4495 &phbbmu->hbb_doorbell->drv2iop_doorbell,
4496 ARCMSR_MESSAGE_SET_POST_WINDOW);
4497
4498 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
4499 cmn_err(CE_WARN,
4500 "arcmsr%d: timeout setting post command "
4501 "queue window",
4502 ddi_get_instance(acb->dev_info));
4503 return (FALSE);
4504 }
4505
4506 post_queue_phyaddr = ccb_phyaddr +
4507 ARCMSR_MAX_FREECCB_NUM *
4508 sizeof (struct CCB)
4509 + ARCOFFSET(struct HBB_msgUnit, post_qbuffer);
4510 /* driver "set config" signature */
4511 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
4512 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[0],
4513 ARCMSR_SIGNATURE_SET_CONFIG);
4514 /* normal should be zero */
4515 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
4516 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[1],
4517 ccb_phyaddr_hi32);
4518 /* postQ size (256+8)*4 */
4519 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
4520 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[2],
4521 post_queue_phyaddr);
4522 /* doneQ size (256+8)*4 */
4523 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
4524 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[3],
4525 post_queue_phyaddr+1056);
4526 /* ccb maxQ size must be --> [(256+8)*4] */
4527 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
4528 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[4], 1056);
4529 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4530 &phbbmu->hbb_doorbell->drv2iop_doorbell,
4531 ARCMSR_MESSAGE_SET_CONFIG);
4532
4533 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
4534 cmn_err(CE_WARN,
4535 "arcmsr%d: timeout setting command queue window",
4536 ddi_get_instance(acb->dev_info));
4537 return (FALSE);
4538 }
4539 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4540 &phbbmu->hbb_doorbell->drv2iop_doorbell,
4541 ARCMSR_MESSAGE_START_DRIVER_MODE);
4542
4543 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
4544 cmn_err(CE_WARN,
4545 "arcmsr%d: timeout in 'start driver mode'",
4546 ddi_get_instance(acb->dev_info));
4547 return (FALSE);
4548 }
4549 }
4550 break;
4551 }
4552 return (TRUE);
4553 }
4554
4555
4556 /*
4557 * ONLY used for Adapter type B
4558 */
4559 static void
arcmsr_enable_eoi_mode(struct ACB * acb)4560 arcmsr_enable_eoi_mode(struct ACB *acb) {
4561
4562 struct HBB_msgUnit *phbbmu;
4563
4564 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4565
4566 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4567 &phbbmu->hbb_doorbell->drv2iop_doorbell,
4568 ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
4569
4570 if (!arcmsr_hbb_wait_msgint_ready(acb))
4571 cmn_err(CE_WARN,
4572 "arcmsr%d (Adapter type B): "
4573 "'iop enable eoi mode' timeout ",
4574 ddi_get_instance(acb->dev_info));
4575
4576 }
4577
4578 /* start background rebuild */
4579 static void
arcmsr_iop_init(struct ACB * acb)4580 arcmsr_iop_init(struct ACB *acb) {
4581
4582 uint32_t intmask_org;
4583
4584 /* disable all outbound interrupt */
4585 intmask_org = arcmsr_disable_allintr(acb);
4586 arcmsr_wait_firmware_ready(acb);
4587 (void) arcmsr_iop_confirm(acb);
4588
4589 /* start background rebuild */
4590 if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
4591 arcmsr_get_hba_config(acb);
4592 arcmsr_start_hba_bgrb(acb);
4593 } else {
4594 arcmsr_get_hbb_config(acb);
4595 arcmsr_start_hbb_bgrb(acb);
4596 }
4597
4598 /* empty doorbell Qbuffer if door bell rang */
4599 arcmsr_clear_doorbell_queue_buffer(acb);
4600
4601 if (acb->adapter_type == ACB_ADAPTER_TYPE_B)
4602 arcmsr_enable_eoi_mode(acb);
4603
4604 /* enable outbound Post Queue, outbound doorbell Interrupt */
4605 arcmsr_enable_allintr(acb, intmask_org);
4606 acb->acb_flags |= ACB_F_IOP_INITED;
4607 }
4608
4609
4610 static int
arcmsr_initialize(struct ACB * acb)4611 arcmsr_initialize(struct ACB *acb) {
4612
4613 struct CCB *pccb_tmp;
4614 size_t allocated_length;
4615 uint16_t wval;
4616 uint32_t wlval;
4617 uint_t intmask_org, count;
4618 caddr_t arcmsr_ccbs_area;
4619 unsigned long ccb_phyaddr;
4620 int32_t dma_sync_size;
4621 int i, id, lun;
4622
4623 acb->irq = pci_config_get8(acb->pci_acc_handle,
4624 ARCMSR_PCI2PCI_PRIMARY_INTERRUPT_LINE_REG);
4625 wlval = pci_config_get32(acb->pci_acc_handle, 0);
4626 wval = (uint16_t)((wlval >> 16) & 0xffff);
4627
4628 if (wval == PCI_DEVICE_ID_ARECA_1201) {
4629 uint32_t *iop_mu_regs_map0;
4630 uint32_t *iop_mu_regs_map1;
4631 struct CCB *freeccb;
4632 struct HBB_msgUnit *phbbmu;
4633
4634 acb->adapter_type = ACB_ADAPTER_TYPE_B; /* marvell */
4635 dma_sync_size = (ARCMSR_MAX_FREECCB_NUM*
4636 sizeof (struct CCB) + 0x20) +
4637 sizeof (struct HBB_msgUnit);
4638
4639
4640 /* Allocate memory for the ccb */
4641 if ((i = ddi_dma_alloc_handle(acb->dev_info,
4642 &arcmsr_ccb_attr, DDI_DMA_SLEEP, NULL,
4643 &acb->ccbs_pool_handle)) != DDI_SUCCESS) {
4644 switch (i) {
4645 case DDI_DMA_BADATTR:
4646 cmn_err(CE_WARN,
4647 "arcmsr%d: ddi_dma_alloc_handle got "
4648 "DDI_DMA_BADATTR",
4649 ddi_get_instance(acb->dev_info));
4650 return (DDI_FAILURE);
4651
4652 case DDI_DMA_NORESOURCES:
4653 cmn_err(CE_WARN, "arcmsr%d: "
4654 "ddi_dma_alloc_handle got "
4655 "DDI_DMA_NORESOURCES ",
4656 ddi_get_instance(acb->dev_info));
4657 return (DDI_FAILURE);
4658 }
4659 cmn_err(CE_WARN,
4660 "arcmsr%d: ddi_dma_alloc_handle got DDI_FAILURE",
4661 ddi_get_instance(acb->dev_info));
4662 return (DDI_FAILURE);
4663 }
4664
4665 if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
4666 &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
4667 DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
4668 &allocated_length, &acb->ccbs_acc_handle)
4669 != DDI_SUCCESS) {
4670 cmn_err(CE_CONT,
4671 "arcmsr%d: ddi_dma_mem_alloc failed ",
4672 ddi_get_instance(acb->dev_info));
4673 ddi_dma_free_handle(&acb->ccbs_pool_handle);
4674 return (DDI_FAILURE);
4675 }
4676
4677 if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
4678 (caddr_t)arcmsr_ccbs_area, dma_sync_size,
4679 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
4680 NULL, &acb->ccb_cookie, &count) != DDI_DMA_MAPPED) {
4681 cmn_err(CE_WARN,
4682 "arcmsr%d: ddi_dma_addr_bind_handle failed",
4683 ddi_get_instance(acb->dev_info));
4684 ddi_dma_mem_free(&acb->ccbs_acc_handle);
4685 ddi_dma_free_handle(&acb->ccbs_pool_handle);
4686 return (DDI_FAILURE);
4687 }
4688 bzero(arcmsr_ccbs_area, dma_sync_size);
4689 freeccb = (struct CCB *)(intptr_t)arcmsr_ccbs_area;
4690 acb->pmu = (struct msgUnit *)
4691 &freeccb[ARCMSR_MAX_FREECCB_NUM];
4692 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4693
4694 /* setup device register */
4695 if (ddi_regs_map_setup(acb->dev_info, 1,
4696 (caddr_t *)&iop_mu_regs_map0, 0,
4697 sizeof (struct HBB_DOORBELL), &acb->dev_acc_attr,
4698 &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
4699 arcmsr_log(NULL, CE_WARN,
4700 "arcmsr%d: unable to map PCI device "
4701 "base0 address registers",
4702 ddi_get_instance(acb->dev_info));
4703 return (DDI_FAILURE);
4704 }
4705
4706 /* ARCMSR_DRV2IOP_DOORBELL */
4707 phbbmu->hbb_doorbell =
4708 (struct HBB_DOORBELL *)iop_mu_regs_map0;
4709 if (ddi_regs_map_setup(acb->dev_info, 2,
4710 (caddr_t *)&iop_mu_regs_map1, 0,
4711 sizeof (struct HBB_RWBUFFER), &acb->dev_acc_attr,
4712 &acb->reg_mu_acc_handle1) != DDI_SUCCESS) {
4713 arcmsr_log(NULL, CE_WARN,
4714 "arcmsr%d: unable to map PCI device "
4715 "base1 address registers",
4716 ddi_get_instance(acb->dev_info));
4717 return (DDI_FAILURE);
4718 }
4719
4720 /* ARCMSR_MSGCODE_RWBUFFER */
4721 phbbmu->hbb_rwbuffer =
4722 (struct HBB_RWBUFFER *)iop_mu_regs_map1;
4723 } else {
4724 uint32_t *iop_mu_regs_map0;
4725
4726 acb->adapter_type = ACB_ADAPTER_TYPE_A; /* intel */
4727 dma_sync_size = ARCMSR_MAX_FREECCB_NUM*
4728 sizeof (struct CCB) + 0x20;
4729 if (ddi_regs_map_setup(acb->dev_info, 1,
4730 (caddr_t *)&iop_mu_regs_map0, 0,
4731 sizeof (struct HBA_msgUnit), &acb->dev_acc_attr,
4732 &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
4733 arcmsr_log(NULL, CE_WARN,
4734 "arcmsr%d: unable to map registers",
4735 ddi_get_instance(acb->dev_info));
4736 return (DDI_FAILURE);
4737 }
4738
4739 if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
4740 DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
4741 DDI_SUCCESS) {
4742 switch (i) {
4743 case DDI_DMA_BADATTR:
4744 cmn_err(CE_WARN,
4745 "arcmsr%d: ddi_dma_alloc_handle "
4746 "got DDI_DMA_BADATTR",
4747 ddi_get_instance(acb->dev_info));
4748 return (DDI_FAILURE);
4749 case DDI_DMA_NORESOURCES:
4750 cmn_err(CE_WARN, "arcmsr%d: "
4751 "ddi_dma_alloc_handle got "
4752 "DDI_DMA_NORESOURCES",
4753 ddi_get_instance(acb->dev_info));
4754 return (DDI_FAILURE);
4755 }
4756 cmn_err(CE_WARN,
4757 "arcmsr%d: ddi_dma_alloc_handle failed",
4758 ddi_get_instance(acb->dev_info));
4759 return (DDI_FAILURE);
4760 }
4761
4762 if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
4763 &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
4764 DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
4765 &allocated_length, &acb->ccbs_acc_handle)
4766 != DDI_SUCCESS) {
4767 cmn_err(CE_WARN, "arcmsr%d: ddi_dma_mem_alloc failed",
4768 ddi_get_instance(acb->dev_info));
4769 ddi_dma_free_handle(&acb->ccbs_pool_handle);
4770 return (DDI_FAILURE);
4771 }
4772
4773 if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
4774 (caddr_t)arcmsr_ccbs_area, dma_sync_size, DDI_DMA_RDWR |
4775 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &acb->ccb_cookie,
4776 &count) != DDI_DMA_MAPPED) {
4777 cmn_err(CE_WARN, "arcmsr%d: ddi_dma_addr_bind_handle "
4778 "failed",
4779 ddi_get_instance(acb->dev_info));
4780 ddi_dma_mem_free(&acb->ccbs_acc_handle);
4781 ddi_dma_free_handle(&acb->ccbs_pool_handle);
4782 return (DDI_FAILURE);
4783 }
4784 bzero(arcmsr_ccbs_area, dma_sync_size);
4785 /* ioport base */
4786 acb->pmu = (struct msgUnit *)(intptr_t)iop_mu_regs_map0;
4787 }
4788
4789 /* here we can not access pci configuration again */
4790 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
4791 ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READ);
4792 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
4793 /* physical address of acb->pccb_pool */
4794 ccb_phyaddr = acb->ccb_cookie.dmac_address;
4795
4796 if (((unsigned long)arcmsr_ccbs_area & 0x1F) != 0) {
4797 /* ccb address must 32 (0x20) boundary */
4798 arcmsr_ccbs_area = (caddr_t)((unsigned long)arcmsr_ccbs_area +
4799 (0x20 - ((unsigned long)arcmsr_ccbs_area & 0x1F)));
4800 ccb_phyaddr = (unsigned long)ccb_phyaddr +
4801 (0x20 - ((unsigned long)ccb_phyaddr & 0x1F));
4802 }
4803
4804 pccb_tmp = (struct CCB *)(intptr_t)arcmsr_ccbs_area;
4805
4806 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
4807 pccb_tmp->cdb_shifted_phyaddr = ccb_phyaddr >> 5;
4808 pccb_tmp->acb = acb;
4809 acb->ccbworkingQ[i] = acb->pccb_pool[i] = pccb_tmp;
4810 ccb_phyaddr = ccb_phyaddr + sizeof (struct CCB);
4811 pccb_tmp++;
4812 }
4813
4814 acb->vir2phy_offset = (unsigned long)pccb_tmp -
4815 (unsigned long)ccb_phyaddr;
4816
4817 /* disable all outbound interrupt */
4818 intmask_org = arcmsr_disable_allintr(acb);
4819
4820 if (!arcmsr_iop_confirm(acb)) {
4821 cmn_err(CE_WARN, "arcmsr%d: arcmsr_iop_confirm error",
4822 ddi_get_instance(acb->dev_info));
4823 ddi_dma_mem_free(&acb->ccbs_acc_handle);
4824 ddi_dma_free_handle(&acb->ccbs_pool_handle);
4825 return (DDI_FAILURE);
4826 }
4827
4828 for (id = 0; id < ARCMSR_MAX_TARGETID; id++) {
4829 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
4830 acb->devstate[id][lun] = ARECA_RAID_GONE;
4831 }
4832 }
4833
4834 /* enable outbound Post Queue, outbound doorbell Interrupt */
4835 arcmsr_enable_allintr(acb, intmask_org);
4836
4837 return (0);
4838 }
4839
4840 /*
4841 * Autoconfiguration support
4842 */
4843 static int
arcmsr_parse_devname(char * devnm,int * tgt,int * lun)4844 arcmsr_parse_devname(char *devnm, int *tgt, int *lun)
4845 {
4846 char devbuf[SCSI_MAXNAMELEN];
4847 char *addr;
4848 char *p, *tp, *lp;
4849 long num;
4850
4851 /* Parse dev name and address */
4852 (void) strcpy(devbuf, devnm);
4853 addr = "";
4854 for (p = devbuf; *p != '\0'; p++) {
4855 if (*p == '@') {
4856 addr = p + 1;
4857 *p = '\0';
4858 } else if (*p == ':') {
4859 *p = '\0';
4860 break;
4861 }
4862 }
4863
4864 /* Parse target and lun */
4865 for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
4866 if (*p == ',') {
4867 lp = p + 1;
4868 *p = '\0';
4869 break;
4870 }
4871 }
4872 if (tgt && tp) {
4873 if (ddi_strtol(tp, NULL, 0x10, &num))
4874 return (-1);
4875 *tgt = (int)num;
4876 }
4877 if (lun && lp) {
4878 if (ddi_strtol(lp, NULL, 0x10, &num))
4879 return (-1);
4880 *lun = (int)num;
4881 }
4882 return (0);
4883 }
4884
4885 static int
arcmsr_name_node(dev_info_t * dip,char * name,int len)4886 arcmsr_name_node(dev_info_t *dip, char *name, int len)
4887 {
4888 int tgt, lun;
4889
4890 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4891 DDI_PROP_DONTPASS, "target", -1);
4892 if (tgt == -1)
4893 return (DDI_FAILURE);
4894 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4895 DDI_PROP_DONTPASS, "lun", -1);
4896 if (lun == -1)
4897 return (DDI_FAILURE);
4898
4899 (void) snprintf(name, len, "%x,%x", tgt, lun);
4900 return (DDI_SUCCESS);
4901 }
4902
4903 static dev_info_t *
arcmsr_find_child(struct ACB * acb,uint16_t tgt,uint8_t lun)4904 arcmsr_find_child(struct ACB *acb, uint16_t tgt, uint8_t lun)
4905 {
4906 dev_info_t *child = NULL;
4907 char addr[SCSI_MAXNAMELEN];
4908 char tmp[MAXNAMELEN];
4909
4910 (void) sprintf(addr, "%x,%x", tgt, lun);
4911 for (child = ddi_get_child(acb->dev_info);
4912 child; child = ddi_get_next_sibling(child)) {
4913 /* We don't care about non-persistent node */
4914 if (ndi_dev_is_persistent_node(child) == 0)
4915 continue;
4916
4917 if (arcmsr_name_node(child, tmp, MAXNAMELEN) !=
4918 DDI_SUCCESS)
4919 continue;
4920 if (strcmp(addr, tmp) == 0)
4921 break;
4922 }
4923 return (child);
4924 }
4925
4926 static int
arcmsr_config_child(struct ACB * acb,struct scsi_device * sd,dev_info_t ** dipp)4927 arcmsr_config_child(struct ACB *acb, struct scsi_device *sd,
4928 dev_info_t **dipp)
4929 {
4930 char *nodename = NULL;
4931 char **compatible = NULL;
4932 int ncompatible = 0;
4933 dev_info_t *ldip = NULL;
4934 int tgt = sd->sd_address.a_target;
4935 int lun = sd->sd_address.a_lun;
4936 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
4937 int rval;
4938
4939 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
4940 NULL, &nodename, &compatible, &ncompatible);
4941 if (nodename == NULL) {
4942 cmn_err(CE_WARN,
4943 "found no comptible driver for T%dL%d", tgt, lun);
4944 rval = NDI_FAILURE;
4945 goto finish;
4946 }
4947
4948 /* Create dev node */
4949 rval = ndi_devi_alloc(acb->dev_info, nodename, DEVI_SID_NODEID,
4950 &ldip);
4951 if (rval == NDI_SUCCESS) {
4952 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt)
4953 != DDI_PROP_SUCCESS) {
4954 cmn_err(CE_WARN, "arcmsr%d: unable to create "
4955 "property for T%dL%d (target)",
4956 ddi_get_instance(acb->dev_info), tgt, lun);
4957 rval = NDI_FAILURE;
4958 goto finish;
4959 }
4960 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun)
4961 != DDI_PROP_SUCCESS) {
4962 cmn_err(CE_WARN, "arcmsr%d: unable to create "
4963 "property for T%dL%d (lun)",
4964 ddi_get_instance(acb->dev_info), tgt, lun);
4965 rval = NDI_FAILURE;
4966 goto finish;
4967 }
4968 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
4969 "compatible", compatible, ncompatible)
4970 != DDI_PROP_SUCCESS) {
4971 cmn_err(CE_WARN, "arcmsr%d: unable to create"
4972 "property for T%dL%d (compatible)",
4973 ddi_get_instance(acb->dev_info), tgt, lun);
4974 rval = NDI_FAILURE;
4975 goto finish;
4976 }
4977
4978 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
4979 if (rval != NDI_SUCCESS) {
4980 cmn_err(CE_WARN, "arcmsr%d: unable to online T%dL%d",
4981 ddi_get_instance(acb->dev_info), tgt, lun);
4982 ndi_prop_remove_all(ldip);
4983 (void) ndi_devi_free(ldip);
4984 } else
4985 cmn_err(CE_NOTE, "arcmsr%d: T%dL%d onlined",
4986 ddi_get_instance(acb->dev_info), tgt, lun);
4987 }
4988 finish:
4989 if (dipp)
4990 *dipp = ldip;
4991
4992 scsi_hba_nodename_compatible_free(nodename, compatible);
4993 return (rval);
4994 }
4995
4996 static int
arcmsr_config_lun(struct ACB * acb,uint16_t tgt,uint8_t lun,dev_info_t ** ldip)4997 arcmsr_config_lun(struct ACB *acb, uint16_t tgt, uint8_t lun,
4998 dev_info_t **ldip)
4999 {
5000 struct scsi_device sd;
5001 dev_info_t *child;
5002 int rval;
5003
5004 if ((child = arcmsr_find_child(acb, tgt, lun)) != NULL) {
5005 if (ldip)
5006 *ldip = child;
5007 return (NDI_SUCCESS);
5008 }
5009
5010 bzero(&sd, sizeof (struct scsi_device));
5011 sd.sd_address.a_hba_tran = acb->scsi_hba_transport;
5012 sd.sd_address.a_target = (uint16_t)tgt;
5013 sd.sd_address.a_lun = (uint8_t)lun;
5014 rval = scsi_hba_probe(&sd, NULL);
5015 if (rval == SCSIPROBE_EXISTS)
5016 rval = arcmsr_config_child(acb, &sd, ldip);
5017 scsi_unprobe(&sd);
5018 return (rval);
5019 }
5020
5021 static int
arcmsr_tran_bus_config(dev_info_t * parent,uint_t flags,ddi_bus_config_op_t op,void * arg,dev_info_t ** childp)5022 arcmsr_tran_bus_config(dev_info_t *parent, uint_t flags, ddi_bus_config_op_t op,
5023 void *arg, dev_info_t **childp)
5024 {
5025 struct ACB *acb;
5026 int circ = 0;
5027 int rval;
5028 int tgt, lun;
5029 if ((acb = ddi_get_soft_state(arcmsr_soft_state,
5030 ddi_get_instance(parent))) == NULL)
5031 return (NDI_FAILURE);
5032
5033 ndi_devi_enter(parent, &circ);
5034 switch (op) {
5035 case BUS_CONFIG_ONE:
5036 if (arcmsr_parse_devname(arg, &tgt, &lun) != 0) {
5037 rval = NDI_FAILURE;
5038 break;
5039 }
5040 mutex_enter(&acb->acb_mutex);
5041 if (acb->device_map[tgt] & 1 << lun) {
5042 rval = arcmsr_config_lun(acb, tgt, lun, childp);
5043 }
5044 mutex_exit(&acb->acb_mutex);
5045 break;
5046
5047 case BUS_CONFIG_DRIVER:
5048 case BUS_CONFIG_ALL:
5049 for (tgt = 0; tgt < ARCMSR_MAX_TARGETID; tgt++)
5050 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++)
5051 if (acb->device_map[tgt] & 1 << lun)
5052 (void) arcmsr_config_lun(acb, tgt,
5053 lun, NULL);
5054
5055 rval = NDI_SUCCESS;
5056 break;
5057 }
5058 if (rval == NDI_SUCCESS)
5059 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
5060 ndi_devi_exit(parent, circ);
5061 return (rval);
5062 }
5063