1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26
27 /*
28 * ISSUES
29 *
30 * - more consistent error messages
31 * - report name of device on errors?
32 * - if wide target renegotiates sync, back to narrow?
33 * - last_msgout is not accurate ????
34 * - resolve XXXX
35 * - improve msg reject code (use special msg reject handler)
36 * - better use of IDE message
37 * - keep track if ATN remains asserted and target not going into
38 * a msg-out phase
39 * - improve comments
40 * - no slave accesses when start address is odd and dma hasn't started
41 * this affect asserting ATN
42 */
43
44 /*
45 * fas - QLogic fas366 wide/fast SCSI Processor HBA driver with
46 * tagged and non-tagged queueing support
47 */
48 #if defined(lint) && !defined(DEBUG)
49 #define DEBUG 1
50 #define FASDEBUG
51 #endif
52
53 #define DMA_REG_TRACING /* enable dma register access tracing */
54
55
56 /*
57 * standard header files
58 */
59 #include <sys/note.h>
60 #include <sys/scsi/scsi.h>
61 #include <sys/file.h>
62 #include <sys/vtrace.h>
63
64 /*
65 * private header files
66 */
67 #include <sys/scsi/adapters/fasdma.h>
68 #include <sys/scsi/adapters/fasreg.h>
69 #include <sys/scsi/adapters/fasvar.h>
70 #include <sys/scsi/adapters/fascmd.h>
71 #include <sys/scsi/impl/scsi_reset_notify.h>
72
73 /*
74 * tunables
75 */
76 static int fas_selection_timeout = 250; /* 250 milliseconds */
77 static uchar_t fas_default_offset = DEFAULT_OFFSET;
78
79 /*
80 * needed for presto support, do not remove
81 */
82 static int fas_enable_sbus64 = 1;
83
84 #ifdef FASDEBUG
85 int fasdebug = 0;
86 int fasdebug_instance = -1; /* debug all instances */
87 static int fas_burstsizes_limit = -1;
88 static int fas_no_sync_wide_backoff = 0;
89 #endif /* FASDEBUG */
90
91 /*
92 * Local static data protected by global mutex
93 */
94 static kmutex_t fas_global_mutex; /* to allow concurrent attach */
95
96 static int fas_scsi_watchdog_tick; /* in seconds, for all */
97 /* instances */
98 static clock_t fas_tick; /* fas_watch() interval in Hz */
99 static timeout_id_t fas_reset_watch; /* timeout id for reset watch */
100 static timeout_id_t fas_timeout_id = 0;
101 static int fas_timeout_initted = 0;
102
103 static krwlock_t fas_global_rwlock;
104
105 static void *fas_state; /* soft state ptr */
106 static struct fas *fas_head; /* link all softstate structures */
107 static struct fas *fas_tail; /* for fas_watch() */
108
109 static kmutex_t fas_log_mutex;
110 static char fas_log_buf[256];
111 _NOTE(MUTEX_PROTECTS_DATA(fas_global_mutex, fas_reset_watch))
112 _NOTE(DATA_READABLE_WITHOUT_LOCK(fas_state fas_head fas_tail \
113 fas_scsi_watchdog_tick fas_tick))
114 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", fas::f_quiesce_timeid))
115
116 /*
117 * dma attribute structure for scsi engine
118 */
119 static ddi_dma_attr_t dma_fasattr = {
120 DMA_ATTR_V0, (unsigned long long)0,
121 (unsigned long long)0xffffffff, (unsigned long long)((1<<24)-1),
122 1, DEFAULT_BURSTSIZE, 1,
123 (unsigned long long)0xffffffff, (unsigned long long)0xffffffff,
124 1, 512, 0
125 };
126
127 /*
128 * optional torture test stuff
129 */
130 #ifdef FASDEBUG
131 #define FAS_TEST
132 static int fas_ptest_emsgin;
133 static int fas_ptest_msgin;
134 static int fas_ptest_msg = -1;
135 static int fas_ptest_status;
136 static int fas_ptest_data_in;
137 static int fas_atest;
138 static int fas_atest_disc;
139 static int fas_atest_reconn;
140 static void fas_test_abort(struct fas *fas, int slot);
141 static int fas_rtest;
142 static int fas_rtest_type;
143 static void fas_test_reset(struct fas *fas, int slot);
144 static int fas_force_timeout;
145 static int fas_btest;
146 static int fas_test_stop;
147 static int fas_transport_busy;
148 static int fas_transport_busy_rqs;
149 static int fas_transport_reject;
150 static int fas_arqs_failure;
151 static int fas_tran_err;
152 static int fas_test_untagged;
153 static int fas_enable_untagged;
154 #endif
155
156 /*
157 * warlock directives
158 */
159 _NOTE(DATA_READABLE_WITHOUT_LOCK(dma fasdebug))
160 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_transport_busy))
161 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_transport_busy_rqs))
162 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_transport_reject))
163 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_arqs_failure))
164 _NOTE(SCHEME_PROTECTS_DATA("just test variables", fas_tran_err))
165 _NOTE(MUTEX_PROTECTS_DATA(fas_log_mutex, fas_log_buf))
166 _NOTE(MUTEX_PROTECTS_DATA(fas_global_mutex, fas_reset_watch))
167 _NOTE(DATA_READABLE_WITHOUT_LOCK(fas_state fas_head fas_tail \
168 fas_scsi_watchdog_tick fas_tick))
169
170 /*
171 * function prototypes
172 *
173 * scsa functions are exported by means of the transport table:
174 */
175 static int fas_scsi_tgt_probe(struct scsi_device *sd,
176 int (*waitfunc)(void));
177 static int fas_scsi_tgt_init(dev_info_t *, dev_info_t *,
178 scsi_hba_tran_t *, struct scsi_device *);
179 static int fas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
180 static int fas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
181 static int fas_scsi_reset(struct scsi_address *ap, int level);
182 static int fas_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
183 static int fas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
184 int whom);
185 static struct scsi_pkt *fas_scsi_init_pkt(struct scsi_address *ap,
186 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
187 int tgtlen, int flags, int (*callback)(), caddr_t arg);
188 static void fas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
189 static void fas_scsi_dmafree(struct scsi_address *ap,
190 struct scsi_pkt *pkt);
191 static void fas_scsi_sync_pkt(struct scsi_address *ap,
192 struct scsi_pkt *pkt);
193
194 /*
195 * internal functions:
196 */
197 static int fas_prepare_pkt(struct fas *fas, struct fas_cmd *sp);
198 static int fas_alloc_tag(struct fas *fas, struct fas_cmd *sp);
199 static int fas_accept_pkt(struct fas *fas, struct fas_cmd *sp, int flag);
200 static void fas_empty_waitQ(struct fas *fas);
201 static void fas_move_waitQ_to_readyQ(struct fas *fas);
202 static void fas_check_waitQ_and_mutex_exit(struct fas *fas);
203 static int fas_istart(struct fas *fas);
204 static int fas_ustart(struct fas *fas);
205 static int fas_startcmd(struct fas *fas, struct fas_cmd *sp);
206
207 static int fas_pkt_alloc_extern(struct fas *fas, struct fas_cmd *sp,
208 int cmdlen, int tgtlen, int statuslen, int kf);
209 static void fas_pkt_destroy_extern(struct fas *fas, struct fas_cmd *sp);
210 static int fas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
211 static void fas_kmem_cache_destructor(void *buf, void *cdrarg);
212
213 static int fas_finish(struct fas *fas);
214 static void fas_handle_qfull(struct fas *fas, struct fas_cmd *sp);
215 static void fas_restart_cmd(void *);
216 static int fas_dopoll(struct fas *fas, int timeout);
217 static void fas_runpoll(struct fas *fas, short slot, struct fas_cmd *sp);
218 static uint_t fas_intr(caddr_t arg);
219 static int fas_intr_svc(struct fas *fas);
220 static int fas_phasemanage(struct fas *fas);
221 static int fas_handle_unknown(struct fas *fas);
222 static int fas_handle_cmd_start(struct fas *fas);
223 static int fas_handle_cmd_done(struct fas *fas);
224 static int fas_handle_msg_out_start(struct fas *fas);
225 static int fas_handle_msg_out_done(struct fas *fas);
226 static int fas_handle_clearing(struct fas *fas);
227 static int fas_handle_data_start(struct fas *fas);
228 static int fas_handle_data_done(struct fas *fas);
229 static int fas_handle_c_cmplt(struct fas *fas);
230 static int fas_handle_msg_in_start(struct fas *fas);
231 static int fas_handle_more_msgin(struct fas *fas);
232 static int fas_handle_msg_in_done(struct fas *fas);
233 static int fas_onebyte_msg(struct fas *fas);
234 static int fas_twobyte_msg(struct fas *fas);
235 static int fas_multibyte_msg(struct fas *fas);
236 static void fas_revert_to_async(struct fas *fas, int tgt);
237 static int fas_finish_select(struct fas *fas);
238 static int fas_reselect_preempt(struct fas *fas);
239 static int fas_reconnect(struct fas *fas);
240 static int fas_handle_selection(struct fas *fas);
241 static void fas_head_of_readyQ(struct fas *fas, struct fas_cmd *sp);
242 static int fas_handle_gross_err(struct fas *fas);
243 static int fas_illegal_cmd_or_bus_reset(struct fas *fas);
244 static int fas_check_dma_error(struct fas *fas);
245
246 static void fas_make_sdtr(struct fas *fas, int msgout_offset, int target);
247 static void fas_make_wdtr(struct fas *fas, int msgout_offset, int target,
248 int width);
249 static void fas_update_props(struct fas *fas, int tgt);
250 static void fas_update_this_prop(struct fas *fas, char *property, int value);
251
252 static int fas_commoncap(struct scsi_address *ap, char *cap, int val,
253 int tgtonly, int doset);
254
255 static void fas_watch(void *arg);
256 static void fas_watchsubr(struct fas *fas);
257 static void fas_cmd_timeout(struct fas *fas, int slot);
258 static void fas_sync_wide_backoff(struct fas *fas, struct fas_cmd *sp,
259 int slot);
260 static void fas_reset_sync_wide(struct fas *fas);
261 static void fas_set_wide_conf3(struct fas *fas, int target, int width);
262 static void fas_force_renegotiation(struct fas *fas, int target);
263
264 static int fas_set_new_window(struct fas *fas, struct fas_cmd *sp);
265 static int fas_restore_pointers(struct fas *fas, struct fas_cmd *sp);
266 static int fas_next_window(struct fas *fas, struct fas_cmd *sp, uint64_t end);
267
268 /*PRINTFLIKE3*/
269 static void fas_log(struct fas *fas, int level, const char *fmt, ...);
270 /*PRINTFLIKE2*/
271 static void fas_printf(struct fas *fas, const char *fmt, ...);
272 static void fas_printstate(struct fas *fas, char *msg);
273 static void fas_dump_cmd(struct fas *fas, struct fas_cmd *sp);
274 static void fas_short_dump_cmd(struct fas *fas, struct fas_cmd *sp);
275 static char *fas_state_name(ushort_t state);
276
277 static void fas_makeproxy_cmd(struct fas_cmd *sp,
278 struct scsi_address *ap, struct scsi_pkt *pkt, int nmsg, ...);
279 static int fas_do_proxy_cmd(struct fas *fas, struct fas_cmd *sp,
280 struct scsi_address *ap, char *what);
281
282 static void fas_internal_reset(struct fas *fas, int reset_action);
283 static int fas_alloc_active_slots(struct fas *fas, int slot, int flag);
284
285 static int fas_abort_curcmd(struct fas *fas);
286 static int fas_abort_cmd(struct fas *fas, struct fas_cmd *sp, int slot);
287 static int fas_do_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
288 static int fas_do_scsi_reset(struct scsi_address *ap, int level);
289 static int fas_remove_from_readyQ(struct fas *fas, struct fas_cmd *sp,
290 int slot);
291 static void fas_flush_readyQ(struct fas *fas, int slot);
292 static void fas_flush_tagQ(struct fas *fas, int slot);
293 static void fas_flush_cmd(struct fas *fas, struct fas_cmd *sp,
294 uchar_t reason, uint_t stat);
295 static int fas_abort_connected_cmd(struct fas *fas, struct fas_cmd *sp,
296 uchar_t msg);
297 static int fas_abort_disconnected_cmd(struct fas *fas, struct scsi_address *ap,
298 struct fas_cmd *sp, uchar_t msg, int slot);
299 static void fas_mark_packets(struct fas *fas, int slot, uchar_t reason,
300 uint_t stat);
301 static void fas_set_pkt_reason(struct fas *fas, struct fas_cmd *sp,
302 uchar_t reason, uint_t stat);
303
304 static int fas_reset_bus(struct fas *fas);
305 static int fas_reset_recovery(struct fas *fas);
306 static int fas_reset_connected_cmd(struct fas *fas, struct scsi_address *ap);
307 static int fas_reset_disconnected_cmd(struct fas *fas, struct scsi_address *ap);
308 static void fas_start_watch_reset_delay(struct fas *);
309 static void fas_setup_reset_delay(struct fas *fas);
310 static void fas_watch_reset_delay(void *arg);
311 static int fas_watch_reset_delay_subr(struct fas *fas);
312 static void fas_reset_cleanup(struct fas *fas, int slot);
313 static int fas_scsi_reset_notify(struct scsi_address *ap, int flag,
314 void (*callback)(caddr_t), caddr_t arg);
315 static int fas_scsi_quiesce(dev_info_t *hba_dip);
316 static int fas_scsi_unquiesce(dev_info_t *hba_dip);
317
318 static void fas_set_throttles(struct fas *fas, int slot,
319 int n, int what);
320 static void fas_set_all_lun_throttles(struct fas *fas, int slot, int what);
321 static void fas_full_throttle(struct fas *fas, int slot);
322 static void fas_remove_cmd(struct fas *fas, struct fas_cmd *sp, int timeout);
323 static void fas_decrement_ncmds(struct fas *fas, struct fas_cmd *sp);
324
325 static int fas_quiesce_bus(struct fas *fas);
326 static int fas_unquiesce_bus(struct fas *fas);
327 static void fas_ncmds_checkdrain(void *arg);
328 static int fas_check_outstanding(struct fas *fas);
329
330 static int fas_create_arq_pkt(struct fas *fas, struct scsi_address *ap);
331 static int fas_delete_arq_pkt(struct fas *fas, struct scsi_address *ap);
332 static int fas_handle_sts_chk(struct fas *fas, struct fas_cmd *sp);
333 void fas_complete_arq_pkt(struct scsi_pkt *pkt);
334
335 void fas_call_pkt_comp(struct fas *fas, struct fas_cmd *sp);
336 void fas_empty_callbackQ(struct fas *fas);
337 int fas_init_callbacks(struct fas *fas);
338 void fas_destroy_callbacks(struct fas *fas);
339
340 static int fas_check_dma_error(struct fas *fas);
341 static int fas_init_chip(struct fas *fas, uchar_t id);
342
343 static void fas_read_fifo(struct fas *fas);
344 static void fas_write_fifo(struct fas *fas, uchar_t *buf, int length, int pad);
345
346 #ifdef FASDEBUG
347 static void fas_reg_cmd_write(struct fas *fas, uint8_t cmd);
348 static void fas_reg_write(struct fas *fas, volatile uint8_t *p, uint8_t what);
349 static uint8_t fas_reg_read(struct fas *fas, volatile uint8_t *p);
350
351 static void fas_dma_reg_write(struct fas *fas, volatile uint32_t *p,
352 uint32_t what);
353 static uint32_t fas_dma_reg_read(struct fas *fas, volatile uint32_t *p);
354 #else
355 #define fas_reg_cmd_write(fas, cmd) \
356 fas->f_reg->fas_cmd = (cmd), fas->f_last_cmd = (cmd)
357 #define fas_reg_write(fas, p, what) *(p) = (what)
358 #define fas_reg_read(fas, p) *(p)
359 #define fas_dma_reg_write(fas, p, what) *(p) = (what)
360 #define fas_dma_reg_read(fas, p) *(p)
361 #endif
362
363 /*
364 * autoconfiguration data and routines.
365 */
366 static int fas_attach(dev_info_t *dev, ddi_attach_cmd_t cmd);
367 static int fas_detach(dev_info_t *dev, ddi_detach_cmd_t cmd);
368 static int fas_dr_detach(dev_info_t *dev);
369
370 static struct dev_ops fas_ops = {
371 DEVO_REV, /* devo_rev, */
372 0, /* refcnt */
373 ddi_no_info, /* info */
374 nulldev, /* identify */
375 nulldev, /* probe */
376 fas_attach, /* attach */
377 fas_detach, /* detach */
378 nodev, /* reset */
379 NULL, /* driver operations */
380 NULL, /* bus operations */
381 NULL, /* power */
382 ddi_quiesce_not_supported, /* devo_quiesce */
383 };
384
385 char _depends_on[] = "misc/scsi";
386
387 static struct modldrv modldrv = {
388 &mod_driverops, /* Type of module. This one is a driver */
389 "FAS SCSI HBA Driver", /* Name of the module. */
390 &fas_ops, /* driver ops */
391 };
392
393 static struct modlinkage modlinkage = {
394 MODREV_1, (void *)&modldrv, NULL
395 };
396
397 int
_init(void)398 _init(void)
399 {
400 int rval;
401 /* CONSTCOND */
402 ASSERT(NO_COMPETING_THREADS);
403
404 rval = ddi_soft_state_init(&fas_state, sizeof (struct fas),
405 FAS_INITIAL_SOFT_SPACE);
406 if (rval != 0) {
407 return (rval);
408 }
409
410 if ((rval = scsi_hba_init(&modlinkage)) != 0) {
411 ddi_soft_state_fini(&fas_state);
412 return (rval);
413 }
414
415 mutex_init(&fas_global_mutex, NULL, MUTEX_DRIVER, NULL);
416 rw_init(&fas_global_rwlock, NULL, RW_DRIVER, NULL);
417
418 mutex_init(&fas_log_mutex, NULL, MUTEX_DRIVER, NULL);
419
420 if ((rval = mod_install(&modlinkage)) != 0) {
421 mutex_destroy(&fas_log_mutex);
422 rw_destroy(&fas_global_rwlock);
423 mutex_destroy(&fas_global_mutex);
424 ddi_soft_state_fini(&fas_state);
425 scsi_hba_fini(&modlinkage);
426 return (rval);
427 }
428
429 return (rval);
430 }
431
432 int
_fini(void)433 _fini(void)
434 {
435 int rval;
436 /* CONSTCOND */
437 ASSERT(NO_COMPETING_THREADS);
438
439 if ((rval = mod_remove(&modlinkage)) == 0) {
440 ddi_soft_state_fini(&fas_state);
441 scsi_hba_fini(&modlinkage);
442 mutex_destroy(&fas_log_mutex);
443 rw_destroy(&fas_global_rwlock);
444 mutex_destroy(&fas_global_mutex);
445 }
446 return (rval);
447 }
448
449 int
_info(struct modinfo * modinfop)450 _info(struct modinfo *modinfop)
451 {
452 /* CONSTCOND */
453 ASSERT(NO_COMPETING_THREADS);
454
455 return (mod_info(&modlinkage, modinfop));
456 }
457
458 static int
fas_scsi_tgt_probe(struct scsi_device * sd,int (* waitfunc)(void))459 fas_scsi_tgt_probe(struct scsi_device *sd,
460 int (*waitfunc)(void))
461 {
462 dev_info_t *dip = ddi_get_parent(sd->sd_dev);
463 int rval = SCSIPROBE_FAILURE;
464 scsi_hba_tran_t *tran;
465 struct fas *fas;
466 int tgt = sd->sd_address.a_target;
467
468 tran = ddi_get_driver_private(dip);
469 ASSERT(tran != NULL);
470 fas = TRAN2FAS(tran);
471
472 /*
473 * force renegotiation since inquiry cmds do not cause
474 * check conditions
475 */
476 mutex_enter(FAS_MUTEX(fas));
477 fas_force_renegotiation(fas, tgt);
478 mutex_exit(FAS_MUTEX(fas));
479 rval = scsi_hba_probe(sd, waitfunc);
480
481 /*
482 * the scsi-options precedence is:
483 * target-scsi-options highest
484 * device-type-scsi-options
485 * per bus scsi-options
486 * global scsi-options lowest
487 */
488 mutex_enter(FAS_MUTEX(fas));
489 if ((rval == SCSIPROBE_EXISTS) &&
490 ((fas->f_target_scsi_options_defined & (1 << tgt)) == 0)) {
491 int options;
492
493 options = scsi_get_device_type_scsi_options(dip, sd, -1);
494 if (options != -1) {
495 fas->f_target_scsi_options[tgt] = options;
496 fas_log(fas, CE_NOTE,
497 "?target%x-scsi-options = 0x%x\n", tgt,
498 fas->f_target_scsi_options[tgt]);
499 fas_force_renegotiation(fas, tgt);
500 }
501 }
502 mutex_exit(FAS_MUTEX(fas));
503
504 IPRINTF2("target%x-scsi-options= 0x%x\n",
505 tgt, fas->f_target_scsi_options[tgt]);
506
507 return (rval);
508 }
509
510
511 /*ARGSUSED*/
512 static int
fas_scsi_tgt_init(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)513 fas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
514 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
515 {
516 return (((sd->sd_address.a_target < NTARGETS_WIDE) &&
517 (sd->sd_address.a_lun < NLUNS_PER_TARGET)) ?
518 DDI_SUCCESS : DDI_FAILURE);
519 }
520
521 /*ARGSUSED*/
522 static int
fas_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)523 fas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
524 {
525 struct fas *fas = NULL;
526 volatile struct dma *dmar = NULL;
527 volatile struct fasreg *fasreg;
528 ddi_dma_attr_t *fas_dma_attr;
529 ddi_device_acc_attr_t dev_attr;
530
531 int instance, id, slot, i, hm_rev;
532 size_t rlen;
533 uint_t count;
534 char buf[64];
535 scsi_hba_tran_t *tran = NULL;
536 char intr_added = 0;
537 char mutex_init_done = 0;
538 char hba_attached = 0;
539 char bound_handle = 0;
540 char *prop_template = "target%d-scsi-options";
541 char prop_str[32];
542
543 /* CONSTCOND */
544 ASSERT(NO_COMPETING_THREADS);
545
546 switch (cmd) {
547 case DDI_ATTACH:
548 break;
549
550 case DDI_RESUME:
551 if ((tran = ddi_get_driver_private(dip)) == NULL)
552 return (DDI_FAILURE);
553
554 fas = TRAN2FAS(tran);
555 if (!fas) {
556 return (DDI_FAILURE);
557 }
558 /*
559 * Reset hardware and softc to "no outstanding commands"
560 * Note that a check condition can result on first command
561 * to a target.
562 */
563 mutex_enter(FAS_MUTEX(fas));
564 fas_internal_reset(fas,
565 FAS_RESET_SOFTC|FAS_RESET_FAS|FAS_RESET_DMA);
566
567 (void) fas_reset_bus(fas);
568
569 fas->f_suspended = 0;
570
571 /* make sure that things get started */
572 (void) fas_istart(fas);
573 fas_check_waitQ_and_mutex_exit(fas);
574
575 mutex_enter(&fas_global_mutex);
576 if (fas_timeout_id == 0) {
577 fas_timeout_id = timeout(fas_watch, NULL, fas_tick);
578 fas_timeout_initted = 1;
579 }
580 mutex_exit(&fas_global_mutex);
581
582 return (DDI_SUCCESS);
583
584 default:
585 return (DDI_FAILURE);
586 }
587
588 instance = ddi_get_instance(dip);
589
590 /*
591 * Since we know that some instantiations of this device can
592 * be plugged into slave-only SBus slots, check to see whether
593 * this is one such.
594 */
595 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
596 cmn_err(CE_WARN,
597 "fas%d: device in slave-only slot", instance);
598 return (DDI_FAILURE);
599 }
600
601 if (ddi_intr_hilevel(dip, 0)) {
602 /*
603 * Interrupt number '0' is a high-level interrupt.
604 * At this point you either add a special interrupt
605 * handler that triggers a soft interrupt at a lower level,
606 * or - more simply and appropriately here - you just
607 * fail the attach.
608 */
609 cmn_err(CE_WARN,
610 "fas%d: Device is using a hilevel intr", instance);
611 return (DDI_FAILURE);
612 }
613
614 /*
615 * Allocate softc information.
616 */
617 if (ddi_soft_state_zalloc(fas_state, instance) != DDI_SUCCESS) {
618 cmn_err(CE_WARN,
619 "fas%d: cannot allocate soft state", instance);
620 goto fail;
621 }
622
623 fas = (struct fas *)ddi_get_soft_state(fas_state, instance);
624
625 if (fas == NULL) {
626 goto fail;
627 }
628
629 /*
630 * map in device registers
631 */
632 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
633 dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
634 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
635
636 if (ddi_regs_map_setup(dip, (uint_t)0, (caddr_t *)&dmar,
637 (off_t)0, (off_t)sizeof (struct dma),
638 &dev_attr, &fas->f_dmar_acc_handle) != DDI_SUCCESS) {
639 cmn_err(CE_WARN, "fas%d: cannot map dma", instance);
640 goto fail;
641 }
642
643 if (ddi_regs_map_setup(dip, (uint_t)1, (caddr_t *)&fasreg,
644 (off_t)0, (off_t)sizeof (struct fasreg),
645 &dev_attr, &fas->f_regs_acc_handle) != DDI_SUCCESS) {
646 cmn_err(CE_WARN,
647 "fas%d: unable to map fas366 registers", instance);
648 goto fail;
649 }
650
651 fas_dma_attr = &dma_fasattr;
652 if (ddi_dma_alloc_handle(dip, fas_dma_attr,
653 DDI_DMA_SLEEP, NULL, &fas->f_dmahandle) != DDI_SUCCESS) {
654 cmn_err(CE_WARN,
655 "fas%d: cannot alloc dma handle", instance);
656 goto fail;
657 }
658
659 /*
660 * allocate cmdarea and its dma handle
661 */
662 if (ddi_dma_mem_alloc(fas->f_dmahandle,
663 (uint_t)2*FIFOSIZE,
664 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
665 NULL, (caddr_t *)&fas->f_cmdarea, &rlen,
666 &fas->f_cmdarea_acc_handle) != DDI_SUCCESS) {
667 cmn_err(CE_WARN,
668 "fas%d: cannot alloc cmd area", instance);
669 goto fail;
670 }
671
672 fas->f_reg = fasreg;
673 fas->f_dma = dmar;
674 fas->f_instance = instance;
675
676 if (ddi_dma_addr_bind_handle(fas->f_dmahandle,
677 NULL, (caddr_t)fas->f_cmdarea,
678 rlen, DDI_DMA_RDWR|DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
679 &fas->f_dmacookie, &count) != DDI_DMA_MAPPED) {
680 cmn_err(CE_WARN,
681 "fas%d: cannot bind cmdarea", instance);
682 goto fail;
683 }
684 bound_handle++;
685
686 ASSERT(count == 1);
687
688 /*
689 * Allocate a transport structure
690 */
691 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
692
693 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
694 scsi_size_clean(dip); /* SCSI_SIZE_CLEAN_VERIFY ok */
695
696 /*
697 * initialize transport structure
698 */
699 fas->f_tran = tran;
700 fas->f_dev = dip;
701 tran->tran_hba_private = fas;
702 tran->tran_tgt_private = NULL;
703 tran->tran_tgt_init = fas_scsi_tgt_init;
704 tran->tran_tgt_probe = fas_scsi_tgt_probe;
705 tran->tran_tgt_free = NULL;
706 tran->tran_start = fas_scsi_start;
707 tran->tran_abort = fas_scsi_abort;
708 tran->tran_reset = fas_scsi_reset;
709 tran->tran_getcap = fas_scsi_getcap;
710 tran->tran_setcap = fas_scsi_setcap;
711 tran->tran_init_pkt = fas_scsi_init_pkt;
712 tran->tran_destroy_pkt = fas_scsi_destroy_pkt;
713 tran->tran_dmafree = fas_scsi_dmafree;
714 tran->tran_sync_pkt = fas_scsi_sync_pkt;
715 tran->tran_reset_notify = fas_scsi_reset_notify;
716 tran->tran_get_bus_addr = NULL;
717 tran->tran_get_name = NULL;
718 tran->tran_quiesce = fas_scsi_quiesce;
719 tran->tran_unquiesce = fas_scsi_unquiesce;
720 tran->tran_bus_reset = NULL;
721 tran->tran_add_eventcall = NULL;
722 tran->tran_get_eventcookie = NULL;
723 tran->tran_post_event = NULL;
724 tran->tran_remove_eventcall = NULL;
725
726 fas->f_force_async = 0;
727
728 /*
729 * disable tagged queuing and wide for all targets
730 * (will be enabled by target driver if required)
731 * sync is enabled by default
732 */
733 fas->f_nowide = fas->f_notag = ALL_TARGETS;
734 fas->f_force_narrow = ALL_TARGETS;
735
736 /*
737 * By default we assume embedded devices and save time
738 * checking for timeouts in fas_watch() by skipping
739 * the rest of luns
740 * If we're talking to any non-embedded devices,
741 * we can't cheat and skip over non-zero luns anymore
742 * in fas_watch() and fas_ustart().
743 */
744 fas->f_dslot = NLUNS_PER_TARGET;
745
746 /*
747 * f_active is used for saving disconnected cmds;
748 * For tagged targets, we need to increase the size later
749 * Only allocate for Lun == 0, if we probe a lun > 0 then
750 * we allocate an active structure
751 * If TQ gets enabled then we need to increase the size
752 * to hold 256 cmds
753 */
754 for (slot = 0; slot < N_SLOTS; slot += NLUNS_PER_TARGET) {
755 (void) fas_alloc_active_slots(fas, slot, KM_SLEEP);
756 }
757
758 /*
759 * initialize the qfull retry counts
760 */
761 for (i = 0; i < NTARGETS_WIDE; i++) {
762 fas->f_qfull_retries[i] = QFULL_RETRIES;
763 fas->f_qfull_retry_interval[i] =
764 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
765
766 }
767
768 /*
769 * Initialize throttles.
770 */
771 fas_set_throttles(fas, 0, N_SLOTS, MAX_THROTTLE);
772
773 /*
774 * Initialize mask of deferred property updates
775 */
776 fas->f_props_update = 0;
777
778 /*
779 * set host ID
780 */
781 fas->f_fasconf = DEFAULT_HOSTID;
782 id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "initiator-id", -1);
783 if (id == -1) {
784 id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
785 "scsi-initiator-id", -1);
786 }
787 if (id != DEFAULT_HOSTID && id >= 0 && id < NTARGETS_WIDE) {
788 fas_log(fas, CE_NOTE, "?initiator SCSI ID now %d\n", id);
789 fas->f_fasconf = (uchar_t)id;
790 }
791
792 /*
793 * find the burstsize and reduce ours if necessary
794 */
795 fas->f_dma_attr = fas_dma_attr;
796 fas->f_dma_attr->dma_attr_burstsizes &=
797 ddi_dma_burstsizes(fas->f_dmahandle);
798
799 #ifdef FASDEBUG
800 fas->f_dma_attr->dma_attr_burstsizes &= fas_burstsizes_limit;
801 IPRINTF1("dma burstsize=%x\n", fas->f_dma_attr->dma_attr_burstsizes);
802 #endif
803 /*
804 * Attach this instance of the hba
805 */
806 if (scsi_hba_attach_setup(dip, fas->f_dma_attr, tran, 0) !=
807 DDI_SUCCESS) {
808 fas_log(fas, CE_WARN, "scsi_hba_attach_setup failed");
809 goto fail;
810 }
811 hba_attached++;
812
813 /*
814 * if scsi-options property exists, use it
815 */
816 fas->f_scsi_options = ddi_prop_get_int(DDI_DEV_T_ANY,
817 dip, 0, "scsi-options", DEFAULT_SCSI_OPTIONS);
818
819 /*
820 * if scsi-selection-timeout property exists, use it
821 */
822 fas_selection_timeout = ddi_prop_get_int(DDI_DEV_T_ANY,
823 dip, 0, "scsi-selection-timeout", SCSI_DEFAULT_SELECTION_TIMEOUT);
824
825 /*
826 * if hm-rev property doesn't exist, use old scheme for rev
827 */
828 hm_rev = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
829 "hm-rev", -1);
830
831 if (hm_rev == 0xa0 || hm_rev == -1) {
832 if (DMAREV(dmar) != 0) {
833 fas->f_hm_rev = 0x20;
834 fas_log(fas, CE_WARN,
835 "obsolete rev 2.0 FEPS chip, "
836 "possible data corruption");
837 } else {
838 fas->f_hm_rev = 0x10;
839 fas_log(fas, CE_WARN,
840 "obsolete and unsupported rev 1.0 FEPS chip");
841 goto fail;
842 }
843 } else if (hm_rev == 0x20) {
844 fas->f_hm_rev = 0x21;
845 fas_log(fas, CE_WARN, "obsolete rev 2.1 FEPS chip");
846 } else {
847 fas->f_hm_rev = (uchar_t)hm_rev;
848 fas_log(fas, CE_NOTE, "?rev %x.%x FEPS chip\n",
849 (hm_rev >> 4) & 0xf, hm_rev & 0xf);
850 }
851
852 if ((fas->f_scsi_options & SCSI_OPTIONS_SYNC) == 0) {
853 fas->f_nosync = ALL_TARGETS;
854 }
855
856 if ((fas->f_scsi_options & SCSI_OPTIONS_WIDE) == 0) {
857 fas->f_nowide = ALL_TARGETS;
858 }
859
860 /*
861 * if target<n>-scsi-options property exists, use it;
862 * otherwise use the f_scsi_options
863 */
864 for (i = 0; i < NTARGETS_WIDE; i++) {
865 (void) sprintf(prop_str, prop_template, i);
866 fas->f_target_scsi_options[i] = ddi_prop_get_int(
867 DDI_DEV_T_ANY, dip, 0, prop_str, -1);
868
869 if (fas->f_target_scsi_options[i] != -1) {
870 fas_log(fas, CE_NOTE, "?target%x-scsi-options=0x%x\n",
871 i, fas->f_target_scsi_options[i]);
872 fas->f_target_scsi_options_defined |= 1 << i;
873 } else {
874 fas->f_target_scsi_options[i] = fas->f_scsi_options;
875 }
876 if (((fas->f_target_scsi_options[i] &
877 SCSI_OPTIONS_DR) == 0) &&
878 (fas->f_target_scsi_options[i] & SCSI_OPTIONS_TAG)) {
879 fas->f_target_scsi_options[i] &= ~SCSI_OPTIONS_TAG;
880 fas_log(fas, CE_WARN,
881 "Disabled TQ since disconnects are disabled");
882 }
883 }
884
885 fas->f_scsi_tag_age_limit =
886 ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "scsi-tag-age-limit",
887 DEFAULT_TAG_AGE_LIMIT);
888
889 fas->f_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
890 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
891 if (fas->f_scsi_reset_delay == 0) {
892 fas_log(fas, CE_NOTE,
893 "scsi_reset_delay of 0 is not recommended,"
894 " resetting to SCSI_DEFAULT_RESET_DELAY\n");
895 fas->f_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
896 }
897
898 /*
899 * get iblock cookie and initialize mutexes
900 */
901 if (ddi_get_iblock_cookie(dip, (uint_t)0, &fas->f_iblock)
902 != DDI_SUCCESS) {
903 cmn_err(CE_WARN, "fas_attach: cannot get iblock cookie");
904 goto fail;
905 }
906
907 mutex_init(&fas->f_mutex, NULL, MUTEX_DRIVER, fas->f_iblock);
908 cv_init(&fas->f_cv, NULL, CV_DRIVER, NULL);
909
910 /*
911 * initialize mutex for waitQ
912 */
913 mutex_init(&fas->f_waitQ_mutex, NULL, MUTEX_DRIVER, fas->f_iblock);
914 mutex_init_done++;
915
916 /*
917 * initialize callback mechanism (immediate callback)
918 */
919 mutex_enter(&fas_global_mutex);
920 if (fas_init_callbacks(fas)) {
921 mutex_exit(&fas_global_mutex);
922 goto fail;
923 }
924 mutex_exit(&fas_global_mutex);
925
926 /*
927 * kstat_intr support
928 */
929 (void) sprintf(buf, "fas%d", instance);
930 fas->f_intr_kstat = kstat_create("fas", instance, buf, "controller", \
931 KSTAT_TYPE_INTR, 1, KSTAT_FLAG_PERSISTENT);
932 if (fas->f_intr_kstat)
933 kstat_install(fas->f_intr_kstat);
934
935 /*
936 * install interrupt handler
937 */
938 mutex_enter(FAS_MUTEX(fas));
939 if (ddi_add_intr(dip, (uint_t)0, &fas->f_iblock, NULL,
940 fas_intr, (caddr_t)fas)) {
941 cmn_err(CE_WARN, "fas: cannot add intr");
942 mutex_exit(FAS_MUTEX(fas));
943 goto fail;
944 }
945 intr_added++;
946
947 /*
948 * initialize fas chip
949 */
950 if (fas_init_chip(fas, id)) {
951 cmn_err(CE_WARN, "fas: cannot initialize");
952 mutex_exit(FAS_MUTEX(fas));
953 goto fail;
954 }
955 mutex_exit(FAS_MUTEX(fas));
956
957 /*
958 * create kmem cache for packets
959 */
960 (void) sprintf(buf, "fas%d_cache", instance);
961 fas->f_kmem_cache = kmem_cache_create(buf,
962 EXTCMD_SIZE, 8,
963 fas_kmem_cache_constructor, fas_kmem_cache_destructor,
964 NULL, (void *)fas, NULL, 0);
965 if (fas->f_kmem_cache == NULL) {
966 cmn_err(CE_WARN, "fas: cannot create kmem_cache");
967 goto fail;
968 }
969
970 /*
971 * at this point, we are not going to fail the attach
972 * so there is no need to undo the rest:
973 *
974 * add this fas to the list, this makes debugging easier
975 * and fas_watch() needs it to walk thru all fas's
976 */
977 rw_enter(&fas_global_rwlock, RW_WRITER);
978 if (fas_head == NULL) {
979 fas_head = fas;
980 } else {
981 fas_tail->f_next = fas;
982 }
983 fas_tail = fas; /* point to last fas in list */
984 rw_exit(&fas_global_rwlock);
985
986 /*
987 * there is one watchdog handler for all driver instances.
988 * start the watchdog if it hasn't been done yet
989 */
990 mutex_enter(&fas_global_mutex);
991 if (fas_scsi_watchdog_tick == 0) {
992 fas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
993 dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
994 if (fas_scsi_watchdog_tick != DEFAULT_WD_TICK) {
995 fas_log(fas, CE_NOTE, "?scsi-watchdog-tick=%d\n",
996 fas_scsi_watchdog_tick);
997 }
998 fas_tick = drv_usectohz((clock_t)
999 fas_scsi_watchdog_tick * 1000000);
1000 IPRINTF2("fas scsi watchdog tick=%x, fas_tick=%lx\n",
1001 fas_scsi_watchdog_tick, fas_tick);
1002 if (fas_timeout_id == 0) {
1003 fas_timeout_id = timeout(fas_watch, NULL, fas_tick);
1004 fas_timeout_initted = 1;
1005 }
1006 }
1007 mutex_exit(&fas_global_mutex);
1008
1009 ddi_report_dev(dip);
1010
1011 return (DDI_SUCCESS);
1012
1013 fail:
1014 cmn_err(CE_WARN, "fas%d: cannot attach", instance);
1015 if (fas) {
1016 for (slot = 0; slot < N_SLOTS; slot++) {
1017 struct f_slots *active = fas->f_active[slot];
1018 if (active) {
1019 kmem_free(active, active->f_size);
1020 fas->f_active[slot] = NULL;
1021 }
1022 }
1023 if (mutex_init_done) {
1024 mutex_destroy(&fas->f_mutex);
1025 mutex_destroy(&fas->f_waitQ_mutex);
1026 cv_destroy(&fas->f_cv);
1027 }
1028 if (intr_added) {
1029 ddi_remove_intr(dip, (uint_t)0, fas->f_iblock);
1030 }
1031 /*
1032 * kstat_intr support
1033 */
1034 if (fas->f_intr_kstat) {
1035 kstat_delete(fas->f_intr_kstat);
1036 }
1037 if (hba_attached) {
1038 (void) scsi_hba_detach(dip);
1039 }
1040 if (tran) {
1041 scsi_hba_tran_free(tran);
1042 }
1043 if (fas->f_kmem_cache) {
1044 kmem_cache_destroy(fas->f_kmem_cache);
1045 }
1046 if (fas->f_cmdarea) {
1047 if (bound_handle) {
1048 (void) ddi_dma_unbind_handle(fas->f_dmahandle);
1049 }
1050 ddi_dma_mem_free(&fas->f_cmdarea_acc_handle);
1051 }
1052 if (fas->f_dmahandle) {
1053 ddi_dma_free_handle(&fas->f_dmahandle);
1054 }
1055 fas_destroy_callbacks(fas);
1056 if (fas->f_regs_acc_handle) {
1057 ddi_regs_map_free(&fas->f_regs_acc_handle);
1058 }
1059 if (fas->f_dmar_acc_handle) {
1060 ddi_regs_map_free(&fas->f_dmar_acc_handle);
1061 }
1062 ddi_soft_state_free(fas_state, instance);
1063
1064 ddi_remove_minor_node(dip, NULL);
1065 }
1066 return (DDI_FAILURE);
1067 }
1068
1069 /*ARGSUSED*/
1070 static int
fas_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)1071 fas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1072 {
1073 struct fas *fas, *nfas;
1074 scsi_hba_tran_t *tran;
1075
1076 /* CONSTCOND */
1077 ASSERT(NO_COMPETING_THREADS);
1078
1079 switch (cmd) {
1080 case DDI_DETACH:
1081 return (fas_dr_detach(dip));
1082
1083 case DDI_SUSPEND:
1084 if ((tran = ddi_get_driver_private(dip)) == NULL)
1085 return (DDI_FAILURE);
1086
1087 fas = TRAN2FAS(tran);
1088 if (!fas) {
1089 return (DDI_FAILURE);
1090 }
1091
1092 mutex_enter(FAS_MUTEX(fas));
1093
1094 fas->f_suspended = 1;
1095
1096 if (fas->f_ncmds) {
1097 (void) fas_reset_bus(fas);
1098 (void) fas_dopoll(fas, SHORT_POLL_TIMEOUT);
1099 }
1100 /*
1101 * disable dma and fas interrupt
1102 */
1103 fas->f_dma_csr &= ~DMA_INTEN;
1104 fas->f_dma_csr &= ~DMA_ENDVMA;
1105 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, fas->f_dma_csr);
1106
1107 mutex_exit(FAS_MUTEX(fas));
1108
1109 if (fas->f_quiesce_timeid) {
1110 (void) untimeout(fas->f_quiesce_timeid);
1111 fas->f_quiesce_timeid = 0;
1112 }
1113
1114 if (fas->f_restart_cmd_timeid) {
1115 (void) untimeout(fas->f_restart_cmd_timeid);
1116 fas->f_restart_cmd_timeid = 0;
1117 }
1118
1119 /* Last fas? */
1120 rw_enter(&fas_global_rwlock, RW_WRITER);
1121 for (nfas = fas_head; nfas; nfas = nfas->f_next) {
1122 if (!nfas->f_suspended) {
1123 rw_exit(&fas_global_rwlock);
1124 return (DDI_SUCCESS);
1125 }
1126 }
1127 rw_exit(&fas_global_rwlock);
1128
1129 mutex_enter(&fas_global_mutex);
1130 if (fas_timeout_id != 0) {
1131 timeout_id_t tid = fas_timeout_id;
1132 fas_timeout_id = 0;
1133 fas_timeout_initted = 0;
1134 mutex_exit(&fas_global_mutex);
1135 (void) untimeout(tid);
1136 } else {
1137 mutex_exit(&fas_global_mutex);
1138 }
1139
1140 mutex_enter(&fas_global_mutex);
1141 if (fas_reset_watch) {
1142 timeout_id_t tid = fas_reset_watch;
1143 fas_reset_watch = 0;
1144 mutex_exit(&fas_global_mutex);
1145 (void) untimeout(tid);
1146 } else {
1147 mutex_exit(&fas_global_mutex);
1148 }
1149
1150 return (DDI_SUCCESS);
1151
1152 default:
1153 return (DDI_FAILURE);
1154 }
1155 _NOTE(NOT_REACHED)
1156 /* NOTREACHED */
1157 }
1158
1159 static int
fas_dr_detach(dev_info_t * dip)1160 fas_dr_detach(dev_info_t *dip)
1161 {
1162 struct fas *fas, *f;
1163 scsi_hba_tran_t *tran;
1164 short slot;
1165 int i, j;
1166
1167 if ((tran = ddi_get_driver_private(dip)) == NULL)
1168 return (DDI_FAILURE);
1169
1170 fas = TRAN2FAS(tran);
1171 if (!fas) {
1172 return (DDI_FAILURE);
1173 }
1174
1175 /*
1176 * disable interrupts
1177 */
1178 fas->f_dma_csr &= ~DMA_INTEN;
1179 fas->f_dma->dma_csr = fas->f_dma_csr;
1180 ddi_remove_intr(dip, (uint_t)0, fas->f_iblock);
1181
1182 /*
1183 * Remove device instance from the global linked list
1184 */
1185 rw_enter(&fas_global_rwlock, RW_WRITER);
1186
1187 if (fas_head == fas) {
1188 f = fas_head = fas->f_next;
1189 } else {
1190 for (f = fas_head; f != (struct fas *)NULL; f = f->f_next) {
1191 if (f->f_next == fas) {
1192 f->f_next = fas->f_next;
1193 break;
1194 }
1195 }
1196
1197 /*
1198 * Instance not in softc list. Since the
1199 * instance is not there in softc list, don't
1200 * enable interrupts, the instance is effectively
1201 * unusable.
1202 */
1203 if (f == (struct fas *)NULL) {
1204 cmn_err(CE_WARN, "fas_dr_detach: fas instance not"
1205 " in softc list!");
1206 rw_exit(&fas_global_rwlock);
1207 return (DDI_FAILURE);
1208 }
1209
1210
1211 }
1212
1213 if (fas_tail == fas)
1214 fas_tail = f;
1215
1216 rw_exit(&fas_global_rwlock);
1217
1218 if (fas->f_intr_kstat)
1219 kstat_delete(fas->f_intr_kstat);
1220
1221 fas_destroy_callbacks(fas);
1222
1223 scsi_hba_reset_notify_tear_down(fas->f_reset_notify_listf);
1224
1225 mutex_enter(&fas_global_mutex);
1226 /*
1227 * destroy any outstanding tagged command info
1228 */
1229 for (slot = 0; slot < N_SLOTS; slot++) {
1230 struct f_slots *active = fas->f_active[slot];
1231 if (active) {
1232 ushort_t tag;
1233 for (tag = 0; tag < active->f_n_slots; tag++) {
1234 struct fas_cmd *sp = active->f_slot[tag];
1235 if (sp) {
1236 struct scsi_pkt *pkt = sp->cmd_pkt;
1237 if (pkt) {
1238 (void) fas_scsi_destroy_pkt(
1239 &pkt->pkt_address, pkt);
1240 }
1241 /* sp freed in fas_scsi_destroy_pkt */
1242 active->f_slot[tag] = NULL;
1243 }
1244 }
1245 kmem_free(active, active->f_size);
1246 fas->f_active[slot] = NULL;
1247 }
1248 ASSERT(fas->f_tcmds[slot] == 0);
1249 }
1250
1251 /*
1252 * disallow timeout thread rescheduling
1253 */
1254 fas->f_flags |= FAS_FLG_NOTIMEOUTS;
1255 mutex_exit(&fas_global_mutex);
1256
1257 if (fas->f_quiesce_timeid) {
1258 (void) untimeout(fas->f_quiesce_timeid);
1259 }
1260
1261 /*
1262 * last fas? ... if active, CANCEL watch threads.
1263 */
1264 mutex_enter(&fas_global_mutex);
1265 if (fas_head == (struct fas *)NULL) {
1266 if (fas_timeout_initted) {
1267 timeout_id_t tid = fas_timeout_id;
1268 fas_timeout_initted = 0;
1269 fas_timeout_id = 0; /* don't resched */
1270 mutex_exit(&fas_global_mutex);
1271 (void) untimeout(tid);
1272 mutex_enter(&fas_global_mutex);
1273 }
1274
1275 if (fas_reset_watch) {
1276 mutex_exit(&fas_global_mutex);
1277 (void) untimeout(fas_reset_watch);
1278 mutex_enter(&fas_global_mutex);
1279 fas_reset_watch = 0;
1280 }
1281 }
1282 mutex_exit(&fas_global_mutex);
1283
1284 if (fas->f_restart_cmd_timeid) {
1285 (void) untimeout(fas->f_restart_cmd_timeid);
1286 fas->f_restart_cmd_timeid = 0;
1287 }
1288
1289 /*
1290 * destroy outstanding ARQ pkts
1291 */
1292 for (i = 0; i < NTARGETS_WIDE; i++) {
1293 for (j = 0; j < NLUNS_PER_TARGET; j++) {
1294 int slot = i * NLUNS_PER_TARGET | j;
1295 if (fas->f_arq_pkt[slot]) {
1296 struct scsi_address sa;
1297 sa.a_hba_tran = NULL; /* not used */
1298 sa.a_target = (ushort_t)i;
1299 sa.a_lun = (uchar_t)j;
1300 (void) fas_delete_arq_pkt(fas, &sa);
1301 }
1302 }
1303 }
1304
1305 /*
1306 * Remove device MT locks and CV
1307 */
1308 mutex_destroy(&fas->f_waitQ_mutex);
1309 mutex_destroy(&fas->f_mutex);
1310 cv_destroy(&fas->f_cv);
1311
1312 /*
1313 * Release miscellaneous device resources
1314 */
1315
1316 if (fas->f_kmem_cache) {
1317 kmem_cache_destroy(fas->f_kmem_cache);
1318 }
1319
1320 if (fas->f_cmdarea != (uchar_t *)NULL) {
1321 (void) ddi_dma_unbind_handle(fas->f_dmahandle);
1322 ddi_dma_mem_free(&fas->f_cmdarea_acc_handle);
1323 }
1324
1325 if (fas->f_dmahandle != (ddi_dma_handle_t)NULL) {
1326 ddi_dma_free_handle(&fas->f_dmahandle);
1327 }
1328
1329 if (fas->f_regs_acc_handle) {
1330 ddi_regs_map_free(&fas->f_regs_acc_handle);
1331 }
1332 if (fas->f_dmar_acc_handle) {
1333 ddi_regs_map_free(&fas->f_dmar_acc_handle);
1334 }
1335
1336 /*
1337 * Remove properties created during attach()
1338 */
1339 ddi_prop_remove_all(dip);
1340
1341 /*
1342 * Delete the DMA limits, transport vectors and remove the device
1343 * links to the scsi_transport layer.
1344 * -- ddi_set_driver_private(dip, NULL)
1345 */
1346 (void) scsi_hba_detach(dip);
1347
1348 /*
1349 * Free the scsi_transport structure for this device.
1350 */
1351 scsi_hba_tran_free(tran);
1352
1353 ddi_soft_state_free(fas_state, ddi_get_instance(dip));
1354
1355 return (DDI_SUCCESS);
1356 }
1357
1358 static int
fas_quiesce_bus(struct fas * fas)1359 fas_quiesce_bus(struct fas *fas)
1360 {
1361 mutex_enter(FAS_MUTEX(fas));
1362 IPRINTF("fas_quiesce: QUIESCEing\n");
1363 IPRINTF3("fas_quiesce: ncmds (%d) ndisc (%d) state (%d)\n",
1364 fas->f_ncmds, fas->f_ndisc, fas->f_softstate);
1365 fas_set_throttles(fas, 0, N_SLOTS, HOLD_THROTTLE);
1366 if (fas_check_outstanding(fas)) {
1367 fas->f_softstate |= FAS_SS_DRAINING;
1368 fas->f_quiesce_timeid = timeout(fas_ncmds_checkdrain,
1369 fas, (FAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
1370 if (cv_wait_sig(FAS_CV(fas), FAS_MUTEX(fas)) == 0) {
1371 /*
1372 * quiesce has been interrupted.
1373 */
1374 IPRINTF("fas_quiesce: abort QUIESCE\n");
1375 fas->f_softstate &= ~FAS_SS_DRAINING;
1376 fas_set_throttles(fas, 0, N_SLOTS, MAX_THROTTLE);
1377 (void) fas_istart(fas);
1378 if (fas->f_quiesce_timeid != 0) {
1379 mutex_exit(FAS_MUTEX(fas));
1380 #ifndef __lock_lint /* warlock complains but there is a NOTE on this */
1381 (void) untimeout(fas->f_quiesce_timeid);
1382 fas->f_quiesce_timeid = 0;
1383 #endif
1384 return (-1);
1385 }
1386 mutex_exit(FAS_MUTEX(fas));
1387 return (-1);
1388 } else {
1389 IPRINTF("fas_quiesce: bus is QUIESCED\n");
1390 ASSERT(fas->f_quiesce_timeid == 0);
1391 fas->f_softstate &= ~FAS_SS_DRAINING;
1392 fas->f_softstate |= FAS_SS_QUIESCED;
1393 mutex_exit(FAS_MUTEX(fas));
1394 return (0);
1395 }
1396 }
1397 IPRINTF("fas_quiesce: bus was not busy QUIESCED\n");
1398 mutex_exit(FAS_MUTEX(fas));
1399 return (0);
1400 }
1401
1402 static int
fas_unquiesce_bus(struct fas * fas)1403 fas_unquiesce_bus(struct fas *fas)
1404 {
1405 mutex_enter(FAS_MUTEX(fas));
1406 fas->f_softstate &= ~FAS_SS_QUIESCED;
1407 fas_set_throttles(fas, 0, N_SLOTS, MAX_THROTTLE);
1408 (void) fas_istart(fas);
1409 IPRINTF("fas_quiesce: bus has been UNQUIESCED\n");
1410 mutex_exit(FAS_MUTEX(fas));
1411
1412 return (0);
1413 }
1414
1415 /*
1416 * invoked from timeout() to check the number of outstanding commands
1417 */
1418 static void
fas_ncmds_checkdrain(void * arg)1419 fas_ncmds_checkdrain(void *arg)
1420 {
1421 struct fas *fas = arg;
1422
1423 mutex_enter(FAS_MUTEX(fas));
1424 IPRINTF3("fas_checkdrain: ncmds (%d) ndisc (%d) state (%d)\n",
1425 fas->f_ncmds, fas->f_ndisc, fas->f_softstate);
1426 if (fas->f_softstate & FAS_SS_DRAINING) {
1427 fas->f_quiesce_timeid = 0;
1428 if (fas_check_outstanding(fas) == 0) {
1429 IPRINTF("fas_drain: bus has drained\n");
1430 cv_signal(FAS_CV(fas));
1431 } else {
1432 /*
1433 * throttle may have been reset by a bus reset
1434 * or fas_runpoll()
1435 * XXX shouldn't be necessary
1436 */
1437 fas_set_throttles(fas, 0, N_SLOTS, HOLD_THROTTLE);
1438 IPRINTF("fas_drain: rescheduling timeout\n");
1439 fas->f_quiesce_timeid = timeout(fas_ncmds_checkdrain,
1440 fas, (FAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
1441 }
1442 }
1443 mutex_exit(FAS_MUTEX(fas));
1444 }
1445
1446 static int
fas_check_outstanding(struct fas * fas)1447 fas_check_outstanding(struct fas *fas)
1448 {
1449 uint_t slot;
1450 uint_t d = ((fas->f_dslot == 0)? 1 : fas->f_dslot);
1451 int ncmds = 0;
1452
1453 ASSERT(mutex_owned(FAS_MUTEX(fas)));
1454
1455 for (slot = 0; slot < N_SLOTS; slot += d)
1456 ncmds += fas->f_tcmds[slot];
1457
1458 return (ncmds);
1459 }
1460
1461
1462 #ifdef FASDEBUG
1463 /*
1464 * fas register read/write functions with tracing
1465 */
1466 static void
fas_reg_tracing(struct fas * fas,int type,int regno,uint32_t what)1467 fas_reg_tracing(struct fas *fas, int type, int regno, uint32_t what)
1468 {
1469 fas->f_reg_trace[fas->f_reg_trace_index++] = type;
1470 fas->f_reg_trace[fas->f_reg_trace_index++] = regno;
1471 fas->f_reg_trace[fas->f_reg_trace_index++] = what;
1472 fas->f_reg_trace[fas->f_reg_trace_index++] = gethrtime();
1473 fas->f_reg_trace[fas->f_reg_trace_index] = 0xff;
1474 if (fas->f_reg_trace_index >= REG_TRACE_BUF_SIZE) {
1475 fas->f_reg_trace_index = 0;
1476 }
1477 }
1478
1479 static void
fas_reg_cmd_write(struct fas * fas,uint8_t cmd)1480 fas_reg_cmd_write(struct fas *fas, uint8_t cmd)
1481 {
1482 volatile struct fasreg *fasreg = fas->f_reg;
1483 int regno = (uintptr_t)&fasreg->fas_cmd - (uintptr_t)fasreg;
1484
1485 fasreg->fas_cmd = cmd;
1486 fas->f_last_cmd = cmd;
1487
1488 EPRINTF1("issuing cmd %x\n", (uchar_t)cmd);
1489 fas_reg_tracing(fas, 0, regno, cmd);
1490
1491 fas->f_reg_cmds++;
1492 }
1493
1494 static void
fas_reg_write(struct fas * fas,volatile uint8_t * p,uint8_t what)1495 fas_reg_write(struct fas *fas, volatile uint8_t *p, uint8_t what)
1496 {
1497 int regno = (uintptr_t)p - (uintptr_t)fas->f_reg;
1498
1499 *p = what;
1500
1501 EPRINTF2("writing reg%x = %x\n", regno, what);
1502 fas_reg_tracing(fas, 1, regno, what);
1503
1504 fas->f_reg_writes++;
1505 }
1506
1507 static uint8_t
fas_reg_read(struct fas * fas,volatile uint8_t * p)1508 fas_reg_read(struct fas *fas, volatile uint8_t *p)
1509 {
1510 uint8_t what;
1511 int regno = (uintptr_t)p - (uintptr_t)fas->f_reg;
1512
1513 what = *p;
1514
1515 EPRINTF2("reading reg%x => %x\n", regno, what);
1516 fas_reg_tracing(fas, 2, regno, what);
1517
1518 fas->f_reg_reads++;
1519
1520 return (what);
1521 }
1522
1523 /*
1524 * dma register access routines
1525 */
1526 static void
fas_dma_reg_write(struct fas * fas,volatile uint32_t * p,uint32_t what)1527 fas_dma_reg_write(struct fas *fas, volatile uint32_t *p, uint32_t what)
1528 {
1529 *p = what;
1530 fas->f_reg_dma_writes++;
1531
1532 #ifdef DMA_REG_TRACING
1533 {
1534 int regno = (uintptr_t)p - (uintptr_t)fas->f_dma;
1535 EPRINTF2("writing dma reg%x = %x\n", regno, what);
1536 fas_reg_tracing(fas, 3, regno, what);
1537 }
1538 #endif
1539 }
1540
1541 static uint32_t
fas_dma_reg_read(struct fas * fas,volatile uint32_t * p)1542 fas_dma_reg_read(struct fas *fas, volatile uint32_t *p)
1543 {
1544 uint32_t what = *p;
1545 fas->f_reg_dma_reads++;
1546
1547 #ifdef DMA_REG_TRACING
1548 {
1549 int regno = (uintptr_t)p - (uintptr_t)fas->f_dma;
1550 EPRINTF2("reading dma reg%x => %x\n", regno, what);
1551 fas_reg_tracing(fas, 4, regno, what);
1552 }
1553 #endif
1554 return (what);
1555 }
1556 #endif
1557
1558 #define FIFO_EMPTY(fas) (fas_reg_read(fas, &fas->f_reg->fas_stat2) & \
1559 FAS_STAT2_EMPTY)
1560 #define FIFO_CNT(fas) \
1561 (fas_reg_read(fas, &fas->f_reg->fas_fifo_flag) & FIFO_CNT_MASK)
1562
1563 #ifdef FASDEBUG
1564 static void
fas_assert_atn(struct fas * fas)1565 fas_assert_atn(struct fas *fas)
1566 {
1567 fas_reg_cmd_write(fas, CMD_SET_ATN);
1568 #ifdef FAS_TEST
1569 if (fas_test_stop > 1)
1570 debug_enter("asserted atn");
1571 #endif
1572 }
1573 #else
1574 #define fas_assert_atn(fas) fas_reg_cmd_write(fas, CMD_SET_ATN)
1575 #endif
1576
1577 /*
1578 * DMA macros; we use a shadow copy of the dma_csr to save unnecessary
1579 * reads
1580 */
1581 #define FAS_DMA_WRITE(fas, count, base, cmd) { \
1582 volatile struct fasreg *fasreg = fas->f_reg; \
1583 volatile struct dma *dmar = fas->f_dma; \
1584 ASSERT((fas_dma_reg_read(fas, &dmar->dma_csr) & DMA_ENDVMA) == 0); \
1585 SET_FAS_COUNT(fasreg, count); \
1586 fas_reg_cmd_write(fas, cmd); \
1587 fas_dma_reg_write(fas, &dmar->dma_count, count); \
1588 fas->f_dma_csr |= \
1589 DMA_WRITE | DMA_ENDVMA | DMA_DSBL_DRAIN; \
1590 fas_dma_reg_write(fas, &dmar->dma_addr, (fas->f_lastdma = base)); \
1591 fas_dma_reg_write(fas, &dmar->dma_csr, fas->f_dma_csr); \
1592 }
1593
1594 #define FAS_DMA_WRITE_SETUP(fas, count, base) { \
1595 volatile struct fasreg *fasreg = fas->f_reg; \
1596 volatile struct dma *dmar = fas->f_dma; \
1597 ASSERT((fas_dma_reg_read(fas, &dmar->dma_csr) & DMA_ENDVMA) == 0); \
1598 SET_FAS_COUNT(fasreg, count); \
1599 fas_dma_reg_write(fas, &dmar->dma_count, count); \
1600 fas->f_dma_csr |= \
1601 DMA_WRITE | DMA_ENDVMA | DMA_DSBL_DRAIN; \
1602 fas_dma_reg_write(fas, &dmar->dma_addr, (fas->f_lastdma = base)); \
1603 }
1604
1605
1606 #define FAS_DMA_READ(fas, count, base, dmacount, cmd) { \
1607 volatile struct fasreg *fasreg = fas->f_reg; \
1608 volatile struct dma *dmar = fas->f_dma; \
1609 ASSERT((fas_dma_reg_read(fas, &dmar->dma_csr) & DMA_ENDVMA) == 0); \
1610 SET_FAS_COUNT(fasreg, count); \
1611 fas_reg_cmd_write(fas, cmd); \
1612 fas->f_dma_csr |= \
1613 (fas->f_dma_csr & ~DMA_WRITE) | DMA_ENDVMA | DMA_DSBL_DRAIN; \
1614 fas_dma_reg_write(fas, &dmar->dma_count, dmacount); \
1615 fas_dma_reg_write(fas, &dmar->dma_addr, (fas->f_lastdma = base)); \
1616 fas_dma_reg_write(fas, &dmar->dma_csr, fas->f_dma_csr); \
1617 }
1618
1619 static void
FAS_FLUSH_DMA(struct fas * fas)1620 FAS_FLUSH_DMA(struct fas *fas)
1621 {
1622 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, DMA_RESET);
1623 fas->f_dma_csr |= (DMA_INTEN|DMA_TWO_CYCLE|DMA_DSBL_PARITY|
1624 DMA_DSBL_DRAIN);
1625 fas->f_dma_csr &= ~(DMA_ENDVMA | DMA_WRITE);
1626 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, 0);
1627 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, fas->f_dma_csr);
1628 fas_dma_reg_write(fas, &fas->f_dma->dma_addr, 0);
1629 }
1630
1631 /*
1632 * FAS_FLUSH_DMA_HARD checks on REQPEND before taking away the reset
1633 */
1634 static void
FAS_FLUSH_DMA_HARD(struct fas * fas)1635 FAS_FLUSH_DMA_HARD(struct fas *fas)
1636 {
1637 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, DMA_RESET);
1638 fas->f_dma_csr |= (DMA_INTEN|DMA_TWO_CYCLE|DMA_DSBL_PARITY|
1639 DMA_DSBL_DRAIN);
1640 fas->f_dma_csr &= ~(DMA_ENDVMA | DMA_WRITE);
1641 while (fas_dma_reg_read(fas, &fas->f_dma->dma_csr) & DMA_REQPEND)
1642 ;
1643 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, 0);
1644 fas_dma_reg_write(fas, &fas->f_dma->dma_csr, fas->f_dma_csr);
1645 fas_dma_reg_write(fas, &fas->f_dma->dma_addr, 0);
1646 }
1647
1648 /*
1649 * update period, conf3, offset reg, if necessary
1650 */
1651 #define FAS_SET_PERIOD_OFFSET_CONF3_REGS(fas, target) \
1652 { \
1653 uchar_t period, offset, conf3; \
1654 period = fas->f_sync_period[target] & SYNC_PERIOD_MASK; \
1655 offset = fas->f_offset[target]; \
1656 conf3 = fas->f_fasconf3[target]; \
1657 if ((period != fas->f_period_reg_last) || \
1658 (offset != fas->f_offset_reg_last) || \
1659 (conf3 != fas->f_fasconf3_reg_last)) { \
1660 fas->f_period_reg_last = period; \
1661 fas->f_offset_reg_last = offset; \
1662 fas->f_fasconf3_reg_last = conf3; \
1663 fas_reg_write(fas, &fasreg->fas_sync_period, period); \
1664 fas_reg_write(fas, &fasreg->fas_sync_offset, offset); \
1665 fas_reg_write(fas, &fasreg->fas_conf3, conf3); \
1666 } \
1667 }
1668
1669 /*
1670 * fifo read/write routines
1671 * always read the fifo bytes before reading the interrupt register
1672 */
1673
1674 static void
fas_read_fifo(struct fas * fas)1675 fas_read_fifo(struct fas *fas)
1676 {
1677 int stat = fas->f_stat;
1678 volatile struct fasreg *fasreg = fas->f_reg;
1679 int i;
1680
1681 i = fas_reg_read(fas, &fasreg->fas_fifo_flag) & FIFO_CNT_MASK;
1682 EPRINTF2("fas_read_fifo: fifo cnt=%x, stat=%x\n", i, stat);
1683 ASSERT(i <= FIFOSIZE);
1684
1685 fas->f_fifolen = 0;
1686 while (i-- > 0) {
1687 fas->f_fifo[fas->f_fifolen++] = fas_reg_read(fas,
1688 &fasreg->fas_fifo_data);
1689 fas->f_fifo[fas->f_fifolen++] = fas_reg_read(fas,
1690 &fasreg->fas_fifo_data);
1691 }
1692 if (fas->f_stat2 & FAS_STAT2_ISHUTTLE) {
1693
1694 /* write pad byte */
1695 fas_reg_write(fas, &fasreg->fas_fifo_data, 0);
1696 fas->f_fifo[fas->f_fifolen++] = fas_reg_read(fas,
1697 &fasreg->fas_fifo_data);
1698 /* flush pad byte */
1699 fas_reg_cmd_write(fas, CMD_FLUSH);
1700 }
1701 EPRINTF2("fas_read_fifo: fifo len=%x, stat2=%x\n",
1702 fas->f_fifolen, stat);
1703 } /* fas_read_fifo */
1704
1705 static void
fas_write_fifo(struct fas * fas,uchar_t * buf,int length,int pad)1706 fas_write_fifo(struct fas *fas, uchar_t *buf, int length, int pad)
1707 {
1708 int i;
1709 volatile struct fasreg *fasreg = fas->f_reg;
1710
1711 EPRINTF1("writing fifo %x bytes\n", length);
1712 ASSERT(length <= 15);
1713 fas_reg_cmd_write(fas, CMD_FLUSH);
1714 for (i = 0; i < length; i++) {
1715 fas_reg_write(fas, &fasreg->fas_fifo_data, buf[i]);
1716 if (pad) {
1717 fas_reg_write(fas, &fasreg->fas_fifo_data, 0);
1718 }
1719 }
1720 }
1721
1722 /*
1723 * Hardware and Software internal reset routines
1724 */
1725 static int
fas_init_chip(struct fas * fas,uchar_t initiator_id)1726 fas_init_chip(struct fas *fas, uchar_t initiator_id)
1727 {
1728 int i;
1729 uchar_t clock_conv;
1730 uchar_t initial_conf3;
1731 uint_t ticks;
1732 static char *prop_cfreq = "clock-frequency";
1733
1734 /*
1735 * Determine clock frequency of attached FAS chip.
1736 */
1737 i = ddi_prop_get_int(DDI_DEV_T_ANY,
1738 fas->f_dev, DDI_PROP_DONTPASS, prop_cfreq, -1);
1739 clock_conv = (i + FIVE_MEG - 1) / FIVE_MEG;
1740 if (clock_conv != CLOCK_40MHZ) {
1741 fas_log(fas, CE_WARN, "Bad clock frequency");
1742 return (-1);
1743 }
1744
1745 fas->f_clock_conv = clock_conv;
1746 fas->f_clock_cycle = CLOCK_PERIOD(i);
1747 ticks = FAS_CLOCK_TICK(fas);
1748 fas->f_stval = FAS_CLOCK_TIMEOUT(ticks, fas_selection_timeout);
1749
1750 DPRINTF5("%d mhz, clock_conv %d, clock_cycle %d, ticks %d, stval %d\n",
1751 i, fas->f_clock_conv, fas->f_clock_cycle,
1752 ticks, fas->f_stval);
1753 /*
1754 * set up conf registers
1755 */
1756 fas->f_fasconf |= FAS_CONF_PAREN;
1757 fas->f_fasconf2 = (uchar_t)(FAS_CONF2_FENABLE | FAS_CONF2_XL32);
1758
1759 if (initiator_id < NTARGETS) {
1760 initial_conf3 = FAS_CONF3_FASTCLK | FAS_CONF3_ODDBYTE_AUTO;
1761 } else {
1762 initial_conf3 = FAS_CONF3_FASTCLK | FAS_CONF3_ODDBYTE_AUTO |
1763 FAS_CONF3_IDBIT3;
1764 }
1765
1766 for (i = 0; i < NTARGETS_WIDE; i++) {
1767 fas->f_fasconf3[i] = initial_conf3;
1768 }
1769
1770 /*
1771 * Avoid resetting the scsi bus since this causes a few seconds
1772 * delay per fas in boot and also causes busy conditions in some
1773 * tape devices.
1774 */
1775 fas_internal_reset(fas, FAS_RESET_SOFTC|FAS_RESET_FAS|FAS_RESET_DMA);
1776
1777 /*
1778 * initialize period and offset for each target
1779 */
1780 for (i = 0; i < NTARGETS_WIDE; i++) {
1781 if (fas->f_target_scsi_options[i] & SCSI_OPTIONS_SYNC) {
1782 fas->f_offset[i] = fas_default_offset |
1783 fas->f_req_ack_delay;
1784 } else {
1785 fas->f_offset[i] = 0;
1786 }
1787 if (fas->f_target_scsi_options[i] & SCSI_OPTIONS_FAST) {
1788 fas->f_neg_period[i] =
1789 (uchar_t)MIN_SYNC_PERIOD(fas);
1790 } else {
1791 fas->f_neg_period[i] =
1792 (uchar_t)CONVERT_PERIOD(DEFAULT_SYNC_PERIOD);
1793 }
1794 }
1795 return (0);
1796 }
1797
1798 /*
1799 * reset bus, chip, dma, or soft state
1800 */
1801 static void
fas_internal_reset(struct fas * fas,int reset_action)1802 fas_internal_reset(struct fas *fas, int reset_action)
1803 {
1804 volatile struct fasreg *fasreg = fas->f_reg;
1805 volatile struct dma *dmar = fas->f_dma;
1806
1807 if (reset_action & FAS_RESET_SCSIBUS) {
1808 fas_reg_cmd_write(fas, CMD_RESET_SCSI);
1809 fas_setup_reset_delay(fas);
1810 }
1811
1812 FAS_FLUSH_DMA_HARD(fas); /* resets and reinits the dma */
1813
1814 /*
1815 * NOTE: if dma is aborted while active, indefinite hangs
1816 * may occur; it is preferable to stop the target first before
1817 * flushing the dma
1818 */
1819 if (reset_action & FAS_RESET_DMA) {
1820 int burstsizes = fas->f_dma_attr->dma_attr_burstsizes;
1821 if (burstsizes & BURST64) {
1822 IPRINTF("64 byte burstsize\n");
1823 fas->f_dma_csr |= DMA_BURST64;
1824 } else if (burstsizes & BURST32) {
1825 IPRINTF("32 byte burstsize\n");
1826 fas->f_dma_csr |= DMA_BURST32;
1827 } else {
1828 IPRINTF("16 byte burstsize\n");
1829 }
1830 if ((fas->f_hm_rev > 0x20) && (fas_enable_sbus64) &&
1831 (ddi_dma_set_sbus64(fas->f_dmahandle, burstsizes) ==
1832 DDI_SUCCESS)) {
1833 IPRINTF("enabled 64 bit sbus\n");
1834 fas->f_dma_csr |= DMA_WIDE_EN;
1835 }
1836 }
1837
1838 if (reset_action & FAS_RESET_FAS) {
1839 /*
1840 * 2 NOPs with DMA are required here
1841 * id_code is unreliable if we don't do this)
1842 */
1843 uchar_t idcode, fcode;
1844 int dmarev;
1845
1846 fas_reg_cmd_write(fas, CMD_RESET_FAS);
1847 fas_reg_cmd_write(fas, CMD_NOP | CMD_DMA);
1848 fas_reg_cmd_write(fas, CMD_NOP | CMD_DMA);
1849
1850 /*
1851 * Re-load chip configurations
1852 * Only load registers which are not loaded in fas_startcmd()
1853 */
1854 fas_reg_write(fas, &fasreg->fas_clock_conv,
1855 (fas->f_clock_conv & CLOCK_MASK));
1856
1857 fas_reg_write(fas, &fasreg->fas_timeout, fas->f_stval);
1858
1859 /*
1860 * enable default configurations
1861 */
1862 fas->f_idcode = idcode =
1863 fas_reg_read(fas, &fasreg->fas_id_code);
1864 fcode = (uchar_t)(idcode & FAS_FCODE_MASK) >> (uchar_t)3;
1865 fas->f_type = FAS366;
1866 IPRINTF2("Family code %d, revision %d\n",
1867 fcode, (idcode & FAS_REV_MASK));
1868 dmarev = fas_dma_reg_read(fas, &dmar->dma_csr);
1869 dmarev = (dmarev >> 11) & 0xf;
1870 IPRINTF1("DMA channel revision %d\n", dmarev);
1871
1872 fas_reg_write(fas, &fasreg->fas_conf, fas->f_fasconf);
1873 fas_reg_write(fas, &fasreg->fas_conf2, fas->f_fasconf2);
1874
1875 fas->f_req_ack_delay = DEFAULT_REQ_ACK_DELAY;
1876
1877 /*
1878 * Just in case... clear interrupt
1879 */
1880 (void) fas_reg_read(fas, &fasreg->fas_intr);
1881 }
1882
1883 if (reset_action & FAS_RESET_SOFTC) {
1884 fas->f_wdtr_sent = fas->f_sdtr_sent = 0;
1885 fas->f_wide_known = fas->f_sync_known = 0;
1886 fas->f_wide_enabled = fas->f_sync_enabled = 0;
1887 fas->f_omsglen = 0;
1888 fas->f_cur_msgout[0] = fas->f_last_msgout =
1889 fas->f_last_msgin = INVALID_MSG;
1890 fas->f_abort_msg_sent = fas->f_reset_msg_sent = 0;
1891 fas->f_next_slot = 0;
1892 fas->f_current_sp = NULL;
1893 fas->f_fifolen = 0;
1894 fas->f_fasconf3_reg_last = fas->f_offset_reg_last =
1895 fas->f_period_reg_last = 0xff;
1896
1897 New_state(fas, STATE_FREE);
1898 }
1899 }
1900
1901
1902 #ifdef FASDEBUG
1903 /*
1904 * check if ncmds still reflects the truth
1905 * count all cmds for this driver instance and compare with ncmds
1906 */
1907 static void
fas_check_ncmds(struct fas * fas)1908 fas_check_ncmds(struct fas *fas)
1909 {
1910 int slot = 0;
1911 ushort_t tag, t;
1912 int n, total = 0;
1913
1914 do {
1915 if (fas->f_active[slot]) {
1916 struct fas_cmd *sp = fas->f_readyf[slot];
1917 t = fas->f_active[slot]->f_n_slots;
1918 while (sp != 0) {
1919 sp = sp->cmd_forw;
1920 total++;
1921 }
1922 for (n = tag = 0; tag < t; tag++) {
1923 if (fas->f_active[slot]->f_slot[tag] != 0) {
1924 n++;
1925 total++;
1926 }
1927 }
1928 ASSERT(n == fas->f_tcmds[slot]);
1929 }
1930 slot = NEXTSLOT(slot, fas->f_dslot);
1931 } while (slot != 0);
1932
1933 if (total != fas->f_ncmds) {
1934 IPRINTF2("fas_check_ncmds: total=%x, ncmds=%x\n",
1935 total, fas->f_ncmds);
1936 }
1937 ASSERT(fas->f_ncmds >= fas->f_ndisc);
1938 }
1939 #else
1940 #define fas_check_ncmds(fas)
1941 #endif
1942
1943 /*
1944 * SCSA Interface functions
1945 *
1946 * Visible to the external world via the transport structure.
1947 *
1948 * fas_scsi_abort: abort a current cmd or all cmds for a target
1949 */
1950 /*ARGSUSED*/
1951 static int
fas_scsi_abort(struct scsi_address * ap,struct scsi_pkt * pkt)1952 fas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1953 {
1954 struct fas *fas = ADDR2FAS(ap);
1955 int rval;
1956
1957 IPRINTF2("fas_scsi_abort: target %d.%d\n", ap->a_target, ap->a_lun);
1958
1959 mutex_enter(FAS_MUTEX(fas));
1960 rval = fas_do_scsi_abort(ap, pkt);
1961 fas_check_waitQ_and_mutex_exit(fas);
1962 return (rval);
1963 }
1964
1965 /*
1966 * reset handling: reset bus or target
1967 */
1968 /*ARGSUSED*/
1969 static int
fas_scsi_reset(struct scsi_address * ap,int level)1970 fas_scsi_reset(struct scsi_address *ap, int level)
1971 {
1972 struct fas *fas = ADDR2FAS(ap);
1973 int rval;
1974
1975 IPRINTF3("fas_scsi_reset: target %d.%d, level %d\n",
1976 ap->a_target, ap->a_lun, level);
1977
1978 mutex_enter(FAS_MUTEX(fas));
1979 rval = fas_do_scsi_reset(ap, level);
1980 fas_check_waitQ_and_mutex_exit(fas);
1981 return (rval);
1982 }
1983
1984 /*
1985 * entry point for reset notification setup, to register or to cancel.
1986 */
1987 static int
fas_scsi_reset_notify(struct scsi_address * ap,int flag,void (* callback)(caddr_t),caddr_t arg)1988 fas_scsi_reset_notify(struct scsi_address *ap, int flag,
1989 void (*callback)(caddr_t), caddr_t arg)
1990 {
1991 struct fas *fas = ADDR2FAS(ap);
1992
1993 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
1994 &fas->f_mutex, &fas->f_reset_notify_listf));
1995 }
1996
1997 /*
1998 * capability interface
1999 */
2000 /*ARGSUSED*/
2001 static int
fas_scsi_getcap(struct scsi_address * ap,char * cap,int whom)2002 fas_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
2003 {
2004 struct fas *fas = ADDR2FAS(ap);
2005 DPRINTF3("fas_scsi_getcap: tgt=%x, cap=%s, whom=%x\n",
2006 ap->a_target, cap, whom);
2007 return (fas_commoncap(ap, cap, 0, whom, 0));
2008 }
2009
2010 /*ARGSUSED*/
2011 static int
fas_scsi_setcap(struct scsi_address * ap,char * cap,int value,int whom)2012 fas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
2013 {
2014 struct fas *fas = ADDR2FAS(ap);
2015 IPRINTF4("fas_scsi_setcap: tgt=%x, cap=%s, value=%x, whom=%x\n",
2016 ap->a_target, cap, value, whom);
2017 return (fas_commoncap(ap, cap, value, whom, 1));
2018 }
2019
2020 /*
2021 * pkt and dma allocation and deallocation
2022 */
2023 /*ARGSUSED*/
2024 static void
fas_scsi_dmafree(struct scsi_address * ap,struct scsi_pkt * pkt)2025 fas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2026 {
2027 struct fas_cmd *cmd = PKT2CMD(pkt);
2028
2029 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_START,
2030 "fas_scsi_dmafree_start");
2031
2032 if (cmd->cmd_flags & CFLAG_DMAVALID) {
2033 /*
2034 * Free the mapping.
2035 */
2036 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
2037 cmd->cmd_flags ^= CFLAG_DMAVALID;
2038 }
2039 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_END,
2040 "fas_scsi_dmafree_end");
2041 }
2042
2043 /*ARGSUSED*/
2044 static void
fas_scsi_sync_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)2045 fas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2046 {
2047 struct fas_cmd *sp = PKT2CMD(pkt);
2048
2049 if (sp->cmd_flags & CFLAG_DMAVALID) {
2050 if (ddi_dma_sync(sp->cmd_dmahandle, 0, 0,
2051 (sp->cmd_flags & CFLAG_DMASEND) ?
2052 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU) !=
2053 DDI_SUCCESS) {
2054 fas_log(ADDR2FAS(ap), CE_WARN,
2055 "sync of pkt (%p) failed", (void *)pkt);
2056 }
2057 }
2058 }
2059
2060 /*
2061 * initialize pkt and allocate DVMA resources
2062 */
2063 static struct scsi_pkt *
fas_scsi_init_pkt(struct scsi_address * ap,struct scsi_pkt * pkt,struct buf * bp,int cmdlen,int statuslen,int tgtlen,int flags,int (* callback)(),caddr_t arg)2064 fas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
2065 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
2066 int flags, int (*callback)(), caddr_t arg)
2067 {
2068 int kf;
2069 int failure = 1;
2070 struct fas_cmd *cmd;
2071 struct fas *fas = ADDR2FAS(ap);
2072 struct fas_cmd *new_cmd;
2073 int rval;
2074
2075 /* #define FAS_TEST_EXTRN_ALLOC */
2076 #ifdef FAS_TEST_EXTRN_ALLOC
2077 cmdlen *= 4; statuslen *= 4; tgtlen *= 4;
2078 #endif
2079 /*
2080 * if no pkt was passed then allocate a pkt first
2081 */
2082 if (pkt == NULL) {
2083 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTALLOC_START,
2084 "fas_scsi_impl_pktalloc_start");
2085
2086 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
2087
2088 /*
2089 * only one size of pkt (with arq).
2090 */
2091 cmd = kmem_cache_alloc(fas->f_kmem_cache, kf);
2092
2093 if (cmd) {
2094
2095 ddi_dma_handle_t save_dma_handle;
2096
2097 save_dma_handle = cmd->cmd_dmahandle;
2098 bzero(cmd, EXTCMD_SIZE);
2099 cmd->cmd_dmahandle = save_dma_handle;
2100
2101 pkt = (struct scsi_pkt *)((uchar_t *)cmd +
2102 sizeof (struct fas_cmd));
2103 cmd->cmd_pkt = pkt;
2104 pkt->pkt_ha_private = (opaque_t)cmd;
2105 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
2106 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
2107 pkt->pkt_address = *ap;
2108
2109 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
2110 pkt->pkt_private = cmd->cmd_pkt_private;
2111
2112 cmd->cmd_cdblen = cmdlen;
2113 cmd->cmd_scblen = statuslen;
2114 cmd->cmd_privlen = tgtlen;
2115 cmd->cmd_slot =
2116 (Tgt(cmd) * NLUNS_PER_TARGET) | Lun(cmd);
2117 failure = 0;
2118 }
2119 if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
2120 (tgtlen > PKT_PRIV_LEN) ||
2121 (statuslen > EXTCMDS_STATUS_SIZE)) {
2122 if (failure == 0) {
2123 /*
2124 * if extern alloc fails, all will be
2125 * deallocated, including cmd
2126 */
2127 failure = fas_pkt_alloc_extern(fas, cmd,
2128 cmdlen, tgtlen, statuslen, kf);
2129 }
2130 if (failure) {
2131 /*
2132 * nothing to deallocate so just return
2133 */
2134 TRACE_0(TR_FAC_SCSI_FAS,
2135 TR_FAS_SCSI_IMPL_PKTALLOC_END,
2136 "fas_scsi_impl_pktalloc_end");
2137 return (NULL);
2138 }
2139 }
2140
2141 new_cmd = cmd;
2142
2143 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTALLOC_END,
2144 "fas_scsi_impl_pktalloc_end");
2145 } else {
2146 cmd = PKT2CMD(pkt);
2147 new_cmd = NULL;
2148 }
2149
2150 /*
2151 * Second step of fas_scsi_init_pkt:
2152 * bind the buf to the handle
2153 */
2154 if (bp && bp->b_bcount != 0 &&
2155 (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
2156
2157 int cmd_flags, dma_flags;
2158 uint_t dmacookie_count;
2159
2160 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAGET_START,
2161 "fas_scsi_impl_dmaget_start");
2162
2163 cmd_flags = cmd->cmd_flags;
2164
2165 if (bp->b_flags & B_READ) {
2166 cmd_flags &= ~CFLAG_DMASEND;
2167 dma_flags = DDI_DMA_READ | DDI_DMA_PARTIAL;
2168 } else {
2169 cmd_flags |= CFLAG_DMASEND;
2170 dma_flags = DDI_DMA_WRITE | DDI_DMA_PARTIAL;
2171 }
2172 if (flags & PKT_CONSISTENT) {
2173 cmd_flags |= CFLAG_CMDIOPB;
2174 dma_flags |= DDI_DMA_CONSISTENT;
2175 }
2176
2177 /*
2178 * bind the handle to the buf
2179 */
2180 ASSERT(cmd->cmd_dmahandle != NULL);
2181 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
2182 dma_flags, callback, arg, &cmd->cmd_dmacookie,
2183 &dmacookie_count);
2184
2185 if (rval && rval != DDI_DMA_PARTIAL_MAP) {
2186 switch (rval) {
2187 case DDI_DMA_NORESOURCES:
2188 bioerror(bp, 0);
2189 break;
2190 case DDI_DMA_BADATTR:
2191 case DDI_DMA_NOMAPPING:
2192 bioerror(bp, EFAULT);
2193 break;
2194 case DDI_DMA_TOOBIG:
2195 default:
2196 bioerror(bp, EINVAL);
2197 break;
2198 }
2199 cmd->cmd_flags = cmd_flags & ~CFLAG_DMAVALID;
2200 if (new_cmd) {
2201 fas_scsi_destroy_pkt(ap, pkt);
2202 }
2203 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAGET_END,
2204 "fas_scsi_impl_dmaget_end");
2205 return ((struct scsi_pkt *)NULL);
2206 }
2207 ASSERT(dmacookie_count == 1);
2208 cmd->cmd_dmacount = bp->b_bcount;
2209 cmd->cmd_flags = cmd_flags | CFLAG_DMAVALID;
2210
2211 ASSERT(cmd->cmd_dmahandle != NULL);
2212 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAGET_END,
2213 "fas_scsi_impl_dmaget_end");
2214 }
2215
2216 return (pkt);
2217 }
2218
2219 /*
2220 * unbind dma resources and deallocate the pkt
2221 */
2222 static void
fas_scsi_destroy_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)2223 fas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2224 {
2225 struct fas_cmd *sp = PKT2CMD(pkt);
2226 struct fas *fas = ADDR2FAS(ap);
2227
2228 /*
2229 * fas_scsi_impl_dmafree inline to speed things up
2230 */
2231 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_START,
2232 "fas_scsi_impl_dmafree_start");
2233
2234 if (sp->cmd_flags & CFLAG_DMAVALID) {
2235 /*
2236 * Free the mapping.
2237 */
2238 (void) ddi_dma_unbind_handle(sp->cmd_dmahandle);
2239 sp->cmd_flags ^= CFLAG_DMAVALID;
2240 }
2241
2242 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_DMAFREE_END,
2243 "fas_scsi_impl_dmafree_end");
2244
2245 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTFREE_START,
2246 "fas_scsi_impl_pktfree_start");
2247
2248 if ((sp->cmd_flags &
2249 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
2250 CFLAG_SCBEXTERN)) == 0) {
2251 sp->cmd_flags = CFLAG_FREE;
2252 kmem_cache_free(fas->f_kmem_cache, (void *)sp);
2253 } else {
2254 fas_pkt_destroy_extern(fas, sp);
2255 }
2256
2257 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_SCSI_IMPL_PKTFREE_END,
2258 "fas_scsi_impl_pktfree_end");
2259 }
2260
2261 /*
2262 * allocate and deallocate external pkt space (ie. not part of fas_cmd) for
2263 * non-standard length cdb, pkt_private, status areas
2264 * if allocation fails, then deallocate all external space and the pkt
2265 */
2266 /* ARGSUSED */
2267 static int
fas_pkt_alloc_extern(struct fas * fas,struct fas_cmd * sp,int cmdlen,int tgtlen,int statuslen,int kf)2268 fas_pkt_alloc_extern(struct fas *fas, struct fas_cmd *sp,
2269 int cmdlen, int tgtlen, int statuslen, int kf)
2270 {
2271 caddr_t cdbp, scbp, tgt;
2272 int failure = 0;
2273
2274 tgt = cdbp = scbp = NULL;
2275 if (cmdlen > sizeof (sp->cmd_cdb)) {
2276 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
2277 failure++;
2278 } else {
2279 sp->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
2280 sp->cmd_flags |= CFLAG_CDBEXTERN;
2281 }
2282 }
2283 if (tgtlen > PKT_PRIV_LEN) {
2284 if ((tgt = kmem_zalloc(tgtlen, kf)) == NULL) {
2285 failure++;
2286 } else {
2287 sp->cmd_flags |= CFLAG_PRIVEXTERN;
2288 sp->cmd_pkt->pkt_private = tgt;
2289 }
2290 }
2291 if (statuslen > EXTCMDS_STATUS_SIZE) {
2292 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
2293 failure++;
2294 } else {
2295 sp->cmd_flags |= CFLAG_SCBEXTERN;
2296 sp->cmd_pkt->pkt_scbp = (opaque_t)scbp;
2297 }
2298 }
2299 if (failure) {
2300 fas_pkt_destroy_extern(fas, sp);
2301 }
2302 return (failure);
2303 }
2304
2305 /*
2306 * deallocate external pkt space and deallocate the pkt
2307 */
2308 static void
fas_pkt_destroy_extern(struct fas * fas,struct fas_cmd * sp)2309 fas_pkt_destroy_extern(struct fas *fas, struct fas_cmd *sp)
2310 {
2311 if (sp->cmd_flags & CFLAG_FREE) {
2312 panic("fas_pkt_destroy_extern: freeing free packet");
2313 _NOTE(NOT_REACHED)
2314 /* NOTREACHED */
2315 }
2316 if (sp->cmd_flags & CFLAG_CDBEXTERN) {
2317 kmem_free((caddr_t)sp->cmd_pkt->pkt_cdbp,
2318 (size_t)sp->cmd_cdblen);
2319 }
2320 if (sp->cmd_flags & CFLAG_SCBEXTERN) {
2321 kmem_free((caddr_t)sp->cmd_pkt->pkt_scbp,
2322 (size_t)sp->cmd_scblen);
2323 }
2324 if (sp->cmd_flags & CFLAG_PRIVEXTERN) {
2325 kmem_free((caddr_t)sp->cmd_pkt->pkt_private,
2326 (size_t)sp->cmd_privlen);
2327 }
2328 sp->cmd_flags = CFLAG_FREE;
2329 kmem_cache_free(fas->f_kmem_cache, (void *)sp);
2330 }
2331
2332 /*
2333 * kmem cache constructor and destructor:
2334 * When constructing, we bzero the cmd and allocate the dma handle
2335 * When destructing, just free the dma handle
2336 */
2337 static int
fas_kmem_cache_constructor(void * buf,void * cdrarg,int kmflags)2338 fas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
2339 {
2340 struct fas_cmd *cmd = buf;
2341 struct fas *fas = cdrarg;
2342 int (*callback)(caddr_t) = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP:
2343 DDI_DMA_DONTWAIT;
2344
2345 bzero(buf, EXTCMD_SIZE);
2346
2347 /*
2348 * allocate a dma handle
2349 */
2350 if ((ddi_dma_alloc_handle(fas->f_dev, fas->f_dma_attr, callback,
2351 NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
2352 return (-1);
2353 }
2354 return (0);
2355 }
2356
2357 /*ARGSUSED*/
2358 static void
fas_kmem_cache_destructor(void * buf,void * cdrarg)2359 fas_kmem_cache_destructor(void *buf, void *cdrarg)
2360 {
2361 struct fas_cmd *cmd = buf;
2362 if (cmd->cmd_dmahandle) {
2363 ddi_dma_free_handle(&cmd->cmd_dmahandle);
2364 }
2365 }
2366
2367 /*
2368 * fas_scsi_start - Accept commands for transport
2369 */
2370 static int
fas_scsi_start(struct scsi_address * ap,struct scsi_pkt * pkt)2371 fas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
2372 {
2373 struct fas_cmd *sp = PKT2CMD(pkt);
2374 struct fas *fas = ADDR2FAS(ap);
2375 int rval;
2376 int intr = 0;
2377
2378 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_START_START, "fas_scsi_start_start");
2379
2380 #ifdef FAS_TEST
2381 if (fas_transport_busy > 0) {
2382 fas_transport_busy--;
2383 return (TRAN_BUSY);
2384 }
2385 if ((fas_transport_busy_rqs > 0) &&
2386 (*(sp->cmd_pkt->pkt_cdbp) == SCMD_REQUEST_SENSE)) {
2387 fas_transport_busy_rqs--;
2388 return (TRAN_BUSY);
2389 }
2390 if (fas_transport_reject > 0) {
2391 fas_transport_reject--;
2392 return (TRAN_BADPKT);
2393 }
2394 #endif
2395 /*
2396 * prepare packet before taking the mutex
2397 */
2398 rval = fas_prepare_pkt(fas, sp);
2399 if (rval != TRAN_ACCEPT) {
2400 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_START_PREPARE_PKT_END,
2401 "fas_scsi_start_end (prepare_pkt)");
2402 return (rval);
2403 }
2404
2405 /*
2406 * fas mutex can be held for a long time; therefore, if the mutex is
2407 * held, we queue the packet in a waitQ; we now should check
2408 * the waitQ on every mutex_exit(FAS_MUTEX(fas)) but we really only
2409 * need to do this when the bus is free
2410 * don't put NOINTR cmds including proxy cmds in waitQ! These
2411 * cmds are handled by fas_runpoll()
2412 * if the waitQ is non-empty, queue the pkt anyway to preserve
2413 * order
2414 * the goal is to queue in waitQ as much as possible so at
2415 * interrupt time, we can move the packets to readyQ or start
2416 * a packet immediately. It helps to do this at interrupt
2417 * time because we can then field more interrupts
2418 */
2419 if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) {
2420
2421 /*
2422 * if the bus is not free, we will get an interrupt shortly
2423 * so we don't want to take the fas mutex but queue up
2424 * the packet in the waitQ
2425 * also, if the waitQ is non-empty or there is an interrupt
2426 * pending then queue up the packet in the waitQ and let the
2427 * interrupt handler empty the waitQ
2428 */
2429 mutex_enter(&fas->f_waitQ_mutex);
2430
2431 if ((fas->f_state != STATE_FREE) ||
2432 fas->f_waitf || (intr = INTPENDING(fas))) {
2433 goto queue_in_waitQ;
2434 }
2435
2436 /*
2437 * we didn't queue up in the waitQ, so now try to accept
2438 * the packet. if we fail to get the fas mutex, go back to
2439 * the waitQ again
2440 * do not release the waitQ mutex yet because that
2441 * leaves a window where the interrupt handler has
2442 * emptied the waitQ but not released the fas mutex yet
2443 *
2444 * the interrupt handler gets the locks in opposite order
2445 * but because we do a tryenter, there is no deadlock
2446 *
2447 * if another thread has the fas mutex then either this
2448 * thread or the other may find the bus free and
2449 * empty the waitQ
2450 */
2451 if (mutex_tryenter(FAS_MUTEX(fas))) {
2452 mutex_exit(&fas->f_waitQ_mutex);
2453 rval = fas_accept_pkt(fas, sp, TRAN_BUSY_OK);
2454 } else {
2455 /*
2456 * we didn't get the fas mutex so
2457 * the packet has to go in the waitQ now
2458 */
2459 goto queue_in_waitQ;
2460 }
2461 } else {
2462 /*
2463 * for polled cmds, we have to take the mutex and
2464 * start the packet using fas_runpoll()
2465 */
2466 mutex_enter(FAS_MUTEX(fas));
2467 rval = fas_accept_pkt(fas, sp, TRAN_BUSY_OK);
2468 }
2469
2470 /*
2471 * if the bus is free then empty waitQ and release the mutex
2472 * (this should be unlikely that the bus is still free after
2473 * accepting the packet. it may be the relatively unusual case
2474 * that we are throttling)
2475 */
2476 if (fas->f_state == STATE_FREE) {
2477 FAS_CHECK_WAITQ_AND_FAS_MUTEX_EXIT(fas);
2478 } else {
2479 mutex_exit(FAS_MUTEX(fas));
2480 }
2481
2482 done:
2483 TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_START_END,
2484 "fas_scsi_start_end: fas 0x%p", fas);
2485 return (rval);
2486
2487 queue_in_waitQ:
2488 if (fas->f_waitf == NULL) {
2489 fas->f_waitb = fas->f_waitf = sp;
2490 sp->cmd_forw = NULL;
2491 } else {
2492 struct fas_cmd *dp = fas->f_waitb;
2493 dp->cmd_forw = fas->f_waitb = sp;
2494 sp->cmd_forw = NULL;
2495 }
2496
2497 /*
2498 * check again the fas mutex
2499 * if there was an interrupt then the interrupt
2500 * handler will eventually empty the waitQ
2501 */
2502 if ((intr == 0) && (fas->f_state == STATE_FREE) &&
2503 mutex_tryenter(FAS_MUTEX(fas))) {
2504 /*
2505 * double check if the bus is still free
2506 * (this actually reduced mutex contention a bit)
2507 */
2508 if (fas->f_state == STATE_FREE) {
2509 fas_empty_waitQ(fas);
2510 }
2511 mutex_exit(FAS_MUTEX(fas));
2512 }
2513 mutex_exit(&fas->f_waitQ_mutex);
2514
2515 TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_START_END,
2516 "fas_scsi_start_end: fas 0x%p", fas);
2517 return (rval);
2518 }
2519
2520 /*
2521 * prepare the pkt:
2522 * the pkt may have been resubmitted or just reused so
2523 * initialize some fields, reset the dma window, and do some checks
2524 */
2525 static int
fas_prepare_pkt(struct fas * fas,struct fas_cmd * sp)2526 fas_prepare_pkt(struct fas *fas, struct fas_cmd *sp)
2527 {
2528 struct scsi_pkt *pkt = CMD2PKT(sp);
2529
2530 /*
2531 * Reinitialize some fields that need it; the packet may
2532 * have been resubmitted
2533 */
2534 pkt->pkt_reason = CMD_CMPLT;
2535 pkt->pkt_state = 0;
2536 pkt->pkt_statistics = 0;
2537 pkt->pkt_resid = 0;
2538 sp->cmd_age = 0;
2539 sp->cmd_pkt_flags = pkt->pkt_flags;
2540
2541 /*
2542 * Copy the cdb pointer to the pkt wrapper area as we
2543 * might modify this pointer. Zero status byte
2544 */
2545 sp->cmd_cdbp = pkt->pkt_cdbp;
2546 *(pkt->pkt_scbp) = 0;
2547
2548 if (sp->cmd_flags & CFLAG_DMAVALID) {
2549 pkt->pkt_resid = sp->cmd_dmacount;
2550
2551 /*
2552 * if the pkt was resubmitted then the
2553 * windows may be at the wrong number
2554 */
2555 if (sp->cmd_cur_win) {
2556 sp->cmd_cur_win = 0;
2557 if (fas_set_new_window(fas, sp)) {
2558 IPRINTF("cannot reset window\n");
2559 return (TRAN_BADPKT);
2560 }
2561 }
2562 sp->cmd_saved_cur_addr =
2563 sp->cmd_cur_addr = sp->cmd_dmacookie.dmac_address;
2564
2565 /*
2566 * the common case is just one window, we worry
2567 * about multiple windows when we run out of the
2568 * current window
2569 */
2570 sp->cmd_nwin = sp->cmd_saved_win = 0;
2571 sp->cmd_data_count = sp->cmd_saved_data_count = 0;
2572
2573 /*
2574 * consistent packets need to be sync'ed first
2575 * (only for data going out)
2576 */
2577 if ((sp->cmd_flags & (CFLAG_CMDIOPB | CFLAG_DMASEND)) ==
2578 (CFLAG_CMDIOPB | CFLAG_DMASEND)) {
2579 (void) ddi_dma_sync(sp->cmd_dmahandle, 0, (uint_t)0,
2580 DDI_DMA_SYNC_FORDEV);
2581 }
2582 }
2583
2584 sp->cmd_actual_cdblen = sp->cmd_cdblen;
2585
2586 #ifdef FAS_TEST
2587 #ifndef __lock_lint
2588 if (fas_test_untagged > 0) {
2589 if (TAGGED(Tgt(sp))) {
2590 int slot = sp->cmd_slot;
2591 sp->cmd_pkt_flags &= ~FLAG_TAGMASK;
2592 sp->cmd_pkt_flags &= ~FLAG_NODISCON;
2593 sp->cmd_pkt_flags |= 0x80000000;
2594 fas_log(fas, CE_NOTE,
2595 "starting untagged cmd, target=%d,"
2596 " tcmds=%d, sp=0x%p, throttle=%d\n",
2597 Tgt(sp), fas->f_tcmds[slot], (void *)sp,
2598 fas->f_throttle[slot]);
2599 fas_test_untagged = -10;
2600 }
2601 }
2602 #endif
2603 #endif
2604
2605 #ifdef FASDEBUG
2606 if (NOTAG(Tgt(sp)) && (pkt->pkt_flags & FLAG_TAGMASK)) {
2607 IPRINTF2("tagged packet for non-tagged target %d.%d\n",
2608 Tgt(sp), Lun(sp));
2609 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PREPARE_PKT_TRAN_BADPKT_END,
2610 "fas_prepare_pkt_end (tran_badpkt)");
2611 return (TRAN_BADPKT);
2612 }
2613
2614 /*
2615 * the SCSA spec states that it is an error to have no
2616 * completion function when FLAG_NOINTR is not set
2617 */
2618 if ((pkt->pkt_comp == NULL) &&
2619 ((pkt->pkt_flags & FLAG_NOINTR) == 0)) {
2620 IPRINTF("intr packet with pkt_comp == 0\n");
2621 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PREPARE_PKT_TRAN_BADPKT_END,
2622 "fas_prepare_pkt_end (tran_badpkt)");
2623 return (TRAN_BADPKT);
2624 }
2625 #endif /* FASDEBUG */
2626
2627 if ((fas->f_target_scsi_options[Tgt(sp)] & SCSI_OPTIONS_DR) == 0) {
2628 /*
2629 * no need to reset tag bits since tag queueing will
2630 * not be enabled if disconnects are disabled
2631 */
2632 sp->cmd_pkt_flags |= FLAG_NODISCON;
2633 }
2634
2635 sp->cmd_flags = (sp->cmd_flags & ~CFLAG_TRANFLAG) |
2636 CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
2637
2638 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PREPARE_PKT_TRAN_ACCEPT_END,
2639 "fas_prepare_pkt_end (tran_accept)");
2640 return (TRAN_ACCEPT);
2641 }
2642
2643 /*
2644 * emptying the waitQ just before releasing FAS_MUTEX is a bit
2645 * tricky; if we release the waitQ mutex and then the FAS_MUTEX,
2646 * another thread could queue a cmd in the waitQ, just before
2647 * the FAS_MUTEX is released. This cmd is then stuck in the waitQ unless
2648 * another cmd comes in or fas_intr() or fas_watch() checks the waitQ.
2649 * Therefore, by releasing the FAS_MUTEX before releasing the waitQ mutex,
2650 * we prevent fas_scsi_start() filling the waitQ
2651 *
2652 * By setting NO_TRAN_BUSY, we force fas_accept_pkt() to queue up
2653 * the waitQ pkts in the readyQ.
2654 * If a QFull condition occurs, the target driver may set its throttle
2655 * too high because of the requests queued up in the readyQ but this
2656 * is not a big problem. The throttle should be periodically reset anyway.
2657 */
2658 static void
fas_empty_waitQ(struct fas * fas)2659 fas_empty_waitQ(struct fas *fas)
2660 {
2661 struct fas_cmd *sp;
2662 int rval;
2663 struct fas_cmd *waitf, *waitb;
2664
2665 ASSERT(mutex_owned(&fas->f_waitQ_mutex));
2666 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_EMPTY_WAITQ_START,
2667 "fas_empty_waitQ_start");
2668
2669 while (fas->f_waitf) {
2670
2671 /* copy waitQ, zero the waitQ and release the mutex */
2672 waitf = fas->f_waitf;
2673 waitb = fas->f_waitb;
2674 fas->f_waitf = fas->f_waitb = NULL;
2675 mutex_exit(&fas->f_waitQ_mutex);
2676
2677 do {
2678 sp = waitf;
2679 waitf = sp->cmd_forw;
2680 if (waitb == sp) {
2681 waitb = NULL;
2682 }
2683
2684 rval = fas_accept_pkt(fas, sp, NO_TRAN_BUSY);
2685
2686 /*
2687 * If the packet was rejected for other reasons then
2688 * complete it here
2689 */
2690 if (rval != TRAN_ACCEPT) {
2691 ASSERT(rval != TRAN_BUSY);
2692 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
2693 if (sp->cmd_pkt->pkt_comp) {
2694 sp->cmd_flags |= CFLAG_FINISHED;
2695 fas_call_pkt_comp(fas, sp);
2696 }
2697 }
2698
2699 if (INTPENDING(fas)) {
2700 /*
2701 * stop processing the waitQ and put back
2702 * the remaining packets on the waitQ
2703 */
2704 mutex_enter(&fas->f_waitQ_mutex);
2705 if (waitf) {
2706 ASSERT(waitb != NULL);
2707 waitb->cmd_forw = fas->f_waitf;
2708 fas->f_waitf = waitf;
2709 if (fas->f_waitb == NULL) {
2710 fas->f_waitb = waitb;
2711 }
2712 }
2713 return;
2714 }
2715 } while (waitf);
2716
2717 mutex_enter(&fas->f_waitQ_mutex);
2718 }
2719 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_EMPTY_WAITQ_END,
2720 "fas_empty_waitQ_end");
2721 }
2722
2723 static void
fas_move_waitQ_to_readyQ(struct fas * fas)2724 fas_move_waitQ_to_readyQ(struct fas *fas)
2725 {
2726 /*
2727 * this may actually start cmds but it is most likely
2728 * that if waitQ is not empty that the bus is not free
2729 */
2730 ASSERT(mutex_owned(FAS_MUTEX(fas)));
2731 mutex_enter(&fas->f_waitQ_mutex);
2732 fas_empty_waitQ(fas);
2733 mutex_exit(&fas->f_waitQ_mutex);
2734 }
2735
2736
2737 /*
2738 * function wrapper for two frequently used macros. for the non-critical
2739 * path we use the function
2740 */
2741 static void
fas_check_waitQ_and_mutex_exit(struct fas * fas)2742 fas_check_waitQ_and_mutex_exit(struct fas *fas)
2743 {
2744 _NOTE(LOCK_RELEASED_AS_SIDE_EFFECT(fas->f_mutex))
2745 FAS_CHECK_WAITQ_AND_FAS_MUTEX_EXIT(fas);
2746 FAS_EMPTY_CALLBACKQ(fas);
2747 }
2748
2749 /*
2750 * fas_accept_pkt():
2751 * the flag argument is to force fas_accept_pkt to accept the pkt;
2752 * the caller cannot take the pkt back and it has to be queued up in
2753 * the readyQ
2754 */
2755 static int
fas_accept_pkt(struct fas * fas,struct fas_cmd * sp,int flag)2756 fas_accept_pkt(struct fas *fas, struct fas_cmd *sp, int flag)
2757 {
2758 short slot = sp->cmd_slot;
2759 int rval = TRAN_ACCEPT;
2760
2761 TRACE_0(TR_FAC_SCSI_FAS, TR__FAS_START_START, "fas_accept_pkt_start");
2762 ASSERT(mutex_owned(FAS_MUTEX(fas)));
2763 ASSERT(fas->f_ncmds >= 0 && fas->f_ndisc >= 0);
2764 ASSERT(fas->f_ncmds >= fas->f_ndisc);
2765 ASSERT(fas->f_tcmds[slot] >= 0);
2766
2767 /*
2768 * prepare packet for transport if this hasn't been done yet and
2769 * do some checks
2770 */
2771 if ((sp->cmd_flags & CFLAG_PREPARED) == 0) {
2772 rval = fas_prepare_pkt(fas, sp);
2773 if (rval != TRAN_ACCEPT) {
2774 IPRINTF1("prepare pkt failed, slot=%x\n", slot);
2775 sp->cmd_flags &= ~CFLAG_TRANFLAG;
2776 goto done;
2777 }
2778 }
2779
2780 if (Lun(sp)) {
2781 EPRINTF("fas_accept_pkt: switching target and lun slot scan\n");
2782 fas->f_dslot = 1;
2783
2784 if ((fas->f_active[slot] == NULL) ||
2785 ((fas->f_active[slot]->f_n_slots != NTAGS) &&
2786 TAGGED(Tgt(sp)))) {
2787 (void) fas_alloc_active_slots(fas, slot, KM_NOSLEEP);
2788 }
2789 if ((fas->f_active[slot] == NULL) ||
2790 (NOTAG(Tgt(sp)) && (sp->cmd_pkt_flags & FLAG_TAGMASK))) {
2791 IPRINTF("fatal error on non-zero lun pkt\n");
2792 return (TRAN_FATAL_ERROR);
2793 }
2794 }
2795
2796 /*
2797 * we accepted the command; increment the count
2798 * (we may still reject later if TRAN_BUSY_OK)
2799 */
2800 fas_check_ncmds(fas);
2801 fas->f_ncmds++;
2802
2803 /*
2804 * if it is a nointr packet, start it now
2805 * (NO_INTR pkts are not queued in the waitQ)
2806 */
2807 if (sp->cmd_pkt_flags & FLAG_NOINTR) {
2808 EPRINTF("starting a nointr cmd\n");
2809 fas_runpoll(fas, slot, sp);
2810 sp->cmd_flags &= ~CFLAG_TRANFLAG;
2811 goto done;
2812 }
2813
2814 /*
2815 * reset the throttle if we were draining
2816 */
2817 if ((fas->f_tcmds[slot] == 0) &&
2818 (fas->f_throttle[slot] == DRAIN_THROTTLE)) {
2819 DPRINTF("reset throttle\n");
2820 ASSERT(fas->f_reset_delay[Tgt(sp)] == 0);
2821 fas_full_throttle(fas, slot);
2822 }
2823
2824 /*
2825 * accept the command:
2826 * If no readyQ and no bus free, and throttle is OK,
2827 * run cmd immediately.
2828 */
2829 #ifdef FASDEBUG
2830 fas->f_total_cmds++;
2831 #endif
2832
2833 if ((fas->f_readyf[slot] == NULL) && (fas->f_state == STATE_FREE) &&
2834 (fas->f_throttle[slot] > fas->f_tcmds[slot])) {
2835 ASSERT(fas->f_current_sp == 0);
2836 (void) fas_startcmd(fas, sp);
2837 goto exit;
2838 } else {
2839 /*
2840 * If FLAG_HEAD is set, run cmd if target and bus are
2841 * available. if first cmd in ready Q is request sense
2842 * then insert after this command, there shouldn't be more
2843 * than one request sense.
2844 */
2845 if (sp->cmd_pkt_flags & FLAG_HEAD) {
2846 struct fas_cmd *ssp = fas->f_readyf[slot];
2847 EPRINTF("que head\n");
2848 if (ssp &&
2849 *(ssp->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
2850 fas_head_of_readyQ(fas, sp);
2851 } else if (ssp) {
2852 struct fas_cmd *dp = ssp->cmd_forw;
2853 ssp->cmd_forw = sp;
2854 sp->cmd_forw = dp;
2855 if (fas->f_readyb[slot] == ssp) {
2856 fas->f_readyb[slot] = sp;
2857 }
2858 } else {
2859 fas->f_readyf[slot] = fas->f_readyb[slot] = sp;
2860 sp->cmd_forw = NULL;
2861 }
2862
2863 /*
2864 * for tagged targets, check for qfull condition and
2865 * return TRAN_BUSY (if permitted), if throttle has been
2866 * exceeded
2867 */
2868 } else if (TAGGED(Tgt(sp)) &&
2869 (fas->f_tcmds[slot] >= fas->f_throttle[slot]) &&
2870 (fas->f_throttle[slot] > HOLD_THROTTLE) &&
2871 (flag == TRAN_BUSY_OK)) {
2872 IPRINTF2(
2873 "transport busy, slot=%x, ncmds=%x\n",
2874 slot, fas->f_ncmds);
2875 rval = TRAN_BUSY;
2876 fas->f_ncmds--;
2877 sp->cmd_flags &=
2878 ~(CFLAG_PREPARED | CFLAG_IN_TRANSPORT);
2879 goto done;
2880 /*
2881 * append to readyQ or start a new readyQ
2882 */
2883 } else if (fas->f_readyf[slot]) {
2884 struct fas_cmd *dp = fas->f_readyb[slot];
2885 ASSERT(dp != 0);
2886 fas->f_readyb[slot] = sp;
2887 sp->cmd_forw = NULL;
2888 dp->cmd_forw = sp;
2889 } else {
2890 fas->f_readyf[slot] = fas->f_readyb[slot] = sp;
2891 sp->cmd_forw = NULL;
2892 }
2893
2894 }
2895
2896 done:
2897 /*
2898 * just in case that the bus is free and we haven't
2899 * been able to restart for some reason
2900 */
2901 if (fas->f_state == STATE_FREE) {
2902 (void) fas_istart(fas);
2903 }
2904
2905 exit:
2906 fas_check_ncmds(fas);
2907 ASSERT(mutex_owned(FAS_MUTEX(fas)));
2908 TRACE_0(TR_FAC_SCSI_FAS, TR__FAS_START_END, "fas_accept_pkt_end");
2909 return (rval);
2910 }
2911
2912 /*
2913 * allocate a tag byte and check for tag aging
2914 */
2915 static char fas_tag_lookup[] =
2916 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
2917
2918 static int
fas_alloc_tag(struct fas * fas,struct fas_cmd * sp)2919 fas_alloc_tag(struct fas *fas, struct fas_cmd *sp)
2920 {
2921 struct f_slots *tag_slots;
2922 int tag;
2923 short slot = sp->cmd_slot;
2924
2925 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ALLOC_TAG_START, "fas_alloc_tag_start");
2926 ASSERT(mutex_owned(FAS_MUTEX(fas)));
2927
2928 tag_slots = fas->f_active[slot];
2929 ASSERT(tag_slots->f_n_slots == NTAGS);
2930
2931 alloc_tag:
2932 tag = (fas->f_active[slot]->f_tags)++;
2933 if (fas->f_active[slot]->f_tags >= NTAGS) {
2934 /*
2935 * we reserve tag 0 for non-tagged cmds
2936 */
2937 fas->f_active[slot]->f_tags = 1;
2938 }
2939 EPRINTF1("tagged cmd, tag = %d\n", tag);
2940
2941 /* Validate tag, should never fail. */
2942 if (tag_slots->f_slot[tag] == 0) {
2943 /*
2944 * Store assigned tag and tag queue type.
2945 * Note, in case of multiple choice, default to simple queue.
2946 */
2947 ASSERT(tag < NTAGS);
2948 sp->cmd_tag[1] = (uchar_t)tag;
2949 sp->cmd_tag[0] = fas_tag_lookup[((sp->cmd_pkt_flags &
2950 FLAG_TAGMASK) >> 12)];
2951 EPRINTF1("tag= %d\n", tag);
2952 tag_slots->f_slot[tag] = sp;
2953 (fas->f_tcmds[slot])++;
2954 ASSERT(mutex_owned(FAS_MUTEX(fas)));
2955 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ALLOC_TAG_END,
2956 "fas_alloc_tag_end");
2957 return (0);
2958
2959 } else {
2960 int age, i;
2961
2962 /*
2963 * Check tag age. If timeouts enabled and
2964 * tag age greater than 1, print warning msg.
2965 * If timeouts enabled and tag age greater than
2966 * age limit, begin draining tag que to check for
2967 * lost tag cmd.
2968 */
2969 age = tag_slots->f_slot[tag]->cmd_age++;
2970 if (age >= fas->f_scsi_tag_age_limit &&
2971 tag_slots->f_slot[tag]->cmd_pkt->pkt_time) {
2972 IPRINTF2("tag %d in use, age= %d\n", tag, age);
2973 DPRINTF("draining tag queue\n");
2974 if (fas->f_reset_delay[Tgt(sp)] == 0) {
2975 fas->f_throttle[slot] = DRAIN_THROTTLE;
2976 }
2977 }
2978
2979 /* If tag in use, scan until a free one is found. */
2980 for (i = 1; i < NTAGS; i++) {
2981 tag = fas->f_active[slot]->f_tags;
2982 if (!tag_slots->f_slot[tag]) {
2983 EPRINTF1("found free tag %d\n", tag);
2984 break;
2985 }
2986 if (++(fas->f_active[slot]->f_tags) >= NTAGS) {
2987 /*
2988 * we reserve tag 0 for non-tagged cmds
2989 */
2990 fas->f_active[slot]->f_tags = 1;
2991 }
2992 EPRINTF1("found in use tag %d\n", tag);
2993 }
2994
2995 /*
2996 * If no free tags, we're in serious trouble.
2997 * the target driver submitted more than 255
2998 * requests
2999 */
3000 if (tag_slots->f_slot[tag]) {
3001 IPRINTF1("slot %x: All tags in use!!!\n", slot);
3002 goto fail;
3003 }
3004 goto alloc_tag;
3005 }
3006
3007 fail:
3008 fas_head_of_readyQ(fas, sp);
3009
3010 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ALLOC_TAG_END,
3011 "fas_alloc_tag_end");
3012 return (-1);
3013 }
3014
3015 /*
3016 * Internal Search Routine.
3017 *
3018 * Search for a command to start.
3019 */
3020 static int
fas_istart(struct fas * fas)3021 fas_istart(struct fas *fas)
3022 {
3023 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ISTART_START,
3024 "fas_istart_start");
3025 EPRINTF("fas_istart:\n");
3026
3027 if (fas->f_state == STATE_FREE && fas->f_ncmds > fas->f_ndisc) {
3028 (void) fas_ustart(fas);
3029 }
3030 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_ISTART_END,
3031 "fas_istart_end");
3032 return (ACTION_RETURN);
3033 }
3034
3035 static int
fas_ustart(struct fas * fas)3036 fas_ustart(struct fas *fas)
3037 {
3038 struct fas_cmd *sp;
3039 short slot = fas->f_next_slot;
3040 short start_slot = slot;
3041 short dslot = fas->f_dslot;
3042
3043 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_USTART_START, "fas_ustart_start");
3044 EPRINTF1("fas_ustart: start_slot=%x\n", fas->f_next_slot);
3045 ASSERT(fas->f_current_sp == NULL);
3046 ASSERT(dslot != 0);
3047 if (dslot == NLUNS_PER_TARGET) {
3048 ASSERT((slot % NLUNS_PER_TARGET) == 0);
3049 }
3050
3051 /*
3052 * if readyQ not empty and we are not draining, then we
3053 * can start another cmd
3054 */
3055 do {
3056 /*
3057 * If all cmds drained from tag Q, back to full throttle and
3058 * start queueing up new cmds again.
3059 */
3060 if (fas->f_throttle[slot] == DRAIN_THROTTLE &&
3061 fas->f_tcmds[slot] == 0) {
3062 fas_full_throttle(fas, slot);
3063 }
3064
3065 if (fas->f_readyf[slot] &&
3066 (fas->f_throttle[slot] > fas->f_tcmds[slot])) {
3067 sp = fas->f_readyf[slot];
3068 fas->f_readyf[slot] = sp->cmd_forw;
3069 if (sp->cmd_forw == NULL) {
3070 fas->f_readyb[slot] = NULL;
3071 }
3072 fas->f_next_slot = NEXTSLOT(slot, dslot);
3073 ASSERT((sp->cmd_pkt_flags & FLAG_NOINTR) == 0);
3074 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_USTART_END,
3075 "fas_ustart_end");
3076 return (fas_startcmd(fas, sp));
3077 } else {
3078 slot = NEXTSLOT(slot, dslot);
3079 }
3080 } while (slot != start_slot);
3081
3082 EPRINTF("fas_ustart: no cmds to start\n");
3083 fas->f_next_slot = NEXTSLOT(slot, dslot);
3084 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_USTART_NOT_FOUND_END,
3085 "fas_ustart_end (not_found)");
3086 return (FALSE);
3087 }
3088
3089 /*
3090 * Start a command off
3091 */
3092 static int
fas_startcmd(struct fas * fas,struct fas_cmd * sp)3093 fas_startcmd(struct fas *fas, struct fas_cmd *sp)
3094 {
3095 volatile struct fasreg *fasreg = fas->f_reg;
3096 ushort_t nstate;
3097 uchar_t cmd, target, lun;
3098 ushort_t tshift;
3099 volatile uchar_t *tp = fas->f_cmdarea;
3100 struct scsi_pkt *pkt = CMD2PKT(sp);
3101 int slot = sp->cmd_slot;
3102 struct f_slots *slots = fas->f_active[slot];
3103 int i, cdb_len;
3104
3105 #define LOAD_CMDP *(tp++)
3106
3107 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_STARTCMD_START, "fas_startcmd_start");
3108
3109 EPRINTF2("fas_startcmd: sp=0x%p flags=%x\n",
3110 (void *)sp, sp->cmd_pkt_flags);
3111 ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
3112 ASSERT((sp->cmd_flags & CFLAG_COMPLETED) == 0);
3113 ASSERT(fas->f_current_sp == NULL && fas->f_state == STATE_FREE);
3114 if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) {
3115 ASSERT(fas->f_throttle[slot] > 0);
3116 ASSERT(fas->f_reset_delay[Tgt(sp)] == 0);
3117 }
3118
3119 target = Tgt(sp);
3120 lun = Lun(sp);
3121
3122 /*
3123 * if a non-tagged cmd is submitted to an active tagged target
3124 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
3125 * to be untagged
3126 */
3127 if (((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
3128 TAGGED(target) && fas->f_tcmds[slot] &&
3129 ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) &&
3130 (*(sp->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
3131 if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) {
3132 struct fas_cmd *dp;
3133
3134 IPRINTF("untagged cmd, start draining\n");
3135
3136 if (fas->f_reset_delay[Tgt(sp)] == 0) {
3137 fas->f_throttle[slot] = DRAIN_THROTTLE;
3138 }
3139 dp = fas->f_readyf[slot];
3140 fas->f_readyf[slot] = sp;
3141 sp->cmd_forw = dp;
3142 if (fas->f_readyb[slot] == NULL) {
3143 fas->f_readyb[slot] = sp;
3144 }
3145 }
3146 return (FALSE);
3147 }
3148
3149 /*
3150 * allocate a tag; if no tag available then put request back
3151 * on the ready queue and return; eventually a cmd returns and we
3152 * get going again or we timeout
3153 */
3154 if (TAGGED(target) && (sp->cmd_pkt_flags & FLAG_TAGMASK)) {
3155 if (fas_alloc_tag(fas, sp)) {
3156 return (FALSE);
3157 }
3158 } else {
3159 /*
3160 * tag slot 0 is reserved for non-tagged cmds
3161 * and should be empty because we have drained
3162 */
3163 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
3164 ASSERT(fas->f_active[slot]->f_slot[0] == NULL);
3165 fas->f_active[slot]->f_slot[0] = sp;
3166 sp->cmd_tag[1] = 0;
3167 if (*(sp->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
3168 ASSERT(fas->f_tcmds[slot] == 0);
3169 /*
3170 * don't start any other cmd until this
3171 * one is finished. The throttle is reset
3172 * later in fas_watch()
3173 */
3174 fas->f_throttle[slot] = 1;
3175 }
3176 (fas->f_tcmds[slot])++;
3177
3178 }
3179 }
3180
3181 fas->f_current_sp = sp;
3182 fas->f_omsglen = 0;
3183 tshift = 1<<target;
3184 fas->f_sdtr_sent = fas->f_wdtr_sent = 0;
3185 cdb_len = sp->cmd_actual_cdblen;
3186
3187 if (sp->cmd_pkt_flags & FLAG_RENEGOTIATE_WIDE_SYNC) {
3188 fas_force_renegotiation(fas, Tgt(sp));
3189 }
3190
3191 /*
3192 * first send identify message, with or without disconnect priv.
3193 */
3194 if (sp->cmd_pkt_flags & FLAG_NODISCON) {
3195 LOAD_CMDP = fas->f_last_msgout = MSG_IDENTIFY | lun;
3196 ASSERT((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0);
3197 } else {
3198 LOAD_CMDP = fas->f_last_msgout = MSG_DR_IDENTIFY | lun;
3199 }
3200
3201 /*
3202 * normal case, tagQ and we have negotiated wide and sync
3203 * or we don't need to renegotiate because wide and sync
3204 * have been disabled
3205 * (proxy msg's don't have tag flag set)
3206 */
3207 if ((sp->cmd_pkt_flags & FLAG_TAGMASK) &&
3208 ((fas->f_wide_known | fas->f_nowide) &
3209 (fas->f_sync_known | fas->f_nosync) & tshift)) {
3210
3211 EPRINTF("tag cmd\n");
3212 ASSERT((sp->cmd_pkt_flags & FLAG_NODISCON) == 0);
3213
3214 fas->f_last_msgout = LOAD_CMDP = sp->cmd_tag[0];
3215 LOAD_CMDP = sp->cmd_tag[1];
3216
3217 nstate = STATE_SELECT_NORMAL;
3218 cmd = CMD_SEL_ATN3 | CMD_DMA;
3219
3220 /*
3221 * is this a proxy message
3222 */
3223 } else if (sp->cmd_flags & CFLAG_CMDPROXY) {
3224
3225 IPRINTF2("proxy cmd, len=%x, msg=%x\n",
3226 sp->cmd_cdb[FAS_PROXY_DATA],
3227 sp->cmd_cdb[FAS_PROXY_DATA+1]);
3228 /*
3229 * This is a proxy command. It will have
3230 * a message to send as part of post-selection
3231 * (e.g, MSG_ABORT or MSG_DEVICE_RESET)
3232 */
3233 fas->f_omsglen = sp->cmd_cdb[FAS_PROXY_DATA];
3234 for (i = 0; i < (uint_t)fas->f_omsglen; i++) {
3235 fas->f_cur_msgout[i] =
3236 sp->cmd_cdb[FAS_PROXY_DATA+1+i];
3237 }
3238 sp->cmd_cdb[FAS_PROXY_RESULT] = FALSE;
3239 cdb_len = 0;
3240 cmd = CMD_SEL_STOP | CMD_DMA;
3241 nstate = STATE_SELECT_N_SENDMSG;
3242
3243 /*
3244 * always negotiate wide first and sync after wide
3245 */
3246 } else if (((fas->f_wide_known | fas->f_nowide) & tshift) == 0) {
3247 int i = 0;
3248
3249 /* First the tag message bytes */
3250 if (sp->cmd_pkt_flags & FLAG_TAGMASK) {
3251 fas->f_cur_msgout[i++] = sp->cmd_tag[0];
3252 fas->f_cur_msgout[i++] = sp->cmd_tag[1];
3253 }
3254
3255 /*
3256 * Set up to send wide negotiating message. This is getting
3257 * a bit tricky as we dma out the identify message and
3258 * send the other messages via the fifo buffer.
3259 */
3260 EPRINTF1("cmd with wdtr msg, tag=%x\n", sp->cmd_tag[1]);
3261
3262 fas_make_wdtr(fas, i, target, FAS_XFER_WIDTH);
3263
3264 cdb_len = 0;
3265 nstate = STATE_SELECT_N_SENDMSG;
3266 cmd = CMD_SEL_STOP | CMD_DMA;
3267
3268 /*
3269 * negotiate sync xfer rate
3270 */
3271 } else if (((fas->f_sync_known | fas->f_nosync) & tshift) == 0) {
3272 int i = 0;
3273 /*
3274 * Set up to send sync negotiating message. This is getting
3275 * a bit tricky as we dma out the identify message and
3276 * send the other messages via the fifo buffer.
3277 */
3278 if (sp->cmd_pkt_flags & FLAG_TAGMASK) {
3279 fas->f_cur_msgout[i++] = sp->cmd_tag[0];
3280 fas->f_cur_msgout[i++] = sp->cmd_tag[1];
3281 }
3282
3283 fas_make_sdtr(fas, i, target);
3284
3285 cdb_len = 0;
3286 cmd = CMD_SEL_STOP | CMD_DMA;
3287 nstate = STATE_SELECT_N_SENDMSG;
3288
3289 /*
3290 * normal cmds, no negotiations and not a proxy and no TQ
3291 */
3292 } else {
3293
3294 ASSERT((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0);
3295 EPRINTF("std. cmd\n");
3296
3297 nstate = STATE_SELECT_NORMAL;
3298 cmd = CMD_SEL_ATN | CMD_DMA;
3299 }
3300
3301 /*
3302 * Now load cdb (if any)
3303 */
3304 for (i = 0; i < cdb_len; i++) {
3305 LOAD_CMDP = sp->cmd_cdbp[i];
3306 }
3307
3308 /*
3309 * calculate total dma amount:
3310 */
3311 fas->f_lastcount = (uintptr_t)tp - (uintptr_t)fas->f_cmdarea;
3312
3313 /*
3314 * load target id and enable bus id encoding and 32 bit counter
3315 */
3316 fas_reg_write(fas, (uchar_t *)&fasreg->fas_busid,
3317 (target & 0xf) | FAS_BUSID_ENCODID | FAS_BUSID_32BIT_COUNTER);
3318
3319 FAS_SET_PERIOD_OFFSET_CONF3_REGS(fas, target);
3320
3321 fas_reg_cmd_write(fas, CMD_FLUSH);
3322
3323 FAS_DMA_READ(fas, fas->f_lastcount,
3324 fas->f_dmacookie.dmac_address, 16, cmd);
3325
3326 New_state(fas, (int)nstate);
3327
3328 #ifdef FASDEBUG
3329 if (DDEBUGGING) {
3330 fas_dump_cmd(fas, sp);
3331 }
3332 #endif /* FASDEBUG */
3333
3334 /*
3335 * if timeout == 0, then it has no effect on the timeout
3336 * handling; we deal with this when an actual timeout occurs.
3337 */
3338 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
3339 ASSERT(fas->f_tcmds[slot] >= 1);
3340 }
3341 i = pkt->pkt_time - slots->f_timebase;
3342
3343 if (i == 0) {
3344 EPRINTF("dup timeout\n");
3345 (slots->f_dups)++;
3346 slots->f_timeout = slots->f_timebase;
3347 } else if (i > 0) {
3348 EPRINTF("new timeout\n");
3349 slots->f_timeout = slots->f_timebase = pkt->pkt_time;
3350 slots->f_dups = 1;
3351 }
3352
3353 fas_check_ncmds(fas);
3354
3355 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_STARTCMD_END, "fas_startcmd_end");
3356
3357 return (TRUE);
3358 }
3359
3360 /*
3361 * Interrupt Entry Point.
3362 * Poll interrupts until they go away
3363 */
3364 static uint_t
fas_intr(caddr_t arg)3365 fas_intr(caddr_t arg)
3366 {
3367 struct fas *fas = (struct fas *)arg;
3368 int rval = DDI_INTR_UNCLAIMED;
3369 int kstat_updated = 0;
3370
3371 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_POLL_START, "fas_intr_start");
3372
3373 do {
3374 mutex_enter(FAS_MUTEX(fas));
3375
3376 do {
3377 if (fas_intr_svc(fas)) {
3378 /*
3379 * do not return immediately here because
3380 * we have to guarantee to always empty
3381 * the waitQ and callbackQ in the interrupt
3382 * handler
3383 */
3384 if (fas->f_polled_intr) {
3385 rval = DDI_INTR_CLAIMED;
3386 fas->f_polled_intr = 0;
3387 }
3388 } else {
3389 rval = DDI_INTR_CLAIMED;
3390 }
3391 } while (INTPENDING(fas));
3392
3393 if (!kstat_updated && fas->f_intr_kstat &&
3394 rval == DDI_INTR_CLAIMED) {
3395 FAS_KSTAT_INTR(fas);
3396 kstat_updated++;
3397 }
3398
3399 /*
3400 * check and empty the waitQ and the callbackQ
3401 */
3402 FAS_CHECK_WAITQ_AND_FAS_MUTEX_EXIT(fas);
3403 FAS_EMPTY_CALLBACKQ(fas);
3404
3405 } while (INTPENDING(fas));
3406
3407 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_POLL_END, "fas_intr_end");
3408
3409 return (rval);
3410 }
3411
3412 /*
3413 * General interrupt service routine.
3414 */
3415 static char *dma_bits = DMA_BITS;
3416
3417 static int
fas_intr_svc(struct fas * fas)3418 fas_intr_svc(struct fas *fas)
3419 {
3420 static int (*evec[])(struct fas *fas) = {
3421 fas_finish_select,
3422 fas_reconnect,
3423 fas_phasemanage,
3424 fas_finish,
3425 fas_reset_recovery,
3426 fas_istart,
3427 fas_abort_curcmd,
3428 fas_reset_bus,
3429 fas_reset_bus,
3430 fas_handle_selection
3431 };
3432 int action;
3433 uchar_t intr, stat;
3434 volatile struct fasreg *fasreg = fas->f_reg;
3435 int i = 0;
3436
3437 TRACE_0(TR_FAC_SCSI_FAS, TR_FASSVC_START, "fas_intr_svc_start");
3438
3439 /*
3440 * A read of FAS interrupt register clears interrupt,
3441 * so any other volatile information needs to be latched
3442 * up prior to reading the interrupt register.
3443 */
3444 fas->f_stat = fas_reg_read(fas, &fasreg->fas_stat);
3445
3446 EPRINTF2("fas_intr_svc: state=%x stat=%x\n", fas->f_state,
3447 fas->f_stat);
3448
3449 /*
3450 * this wasn't our interrupt?
3451 */
3452 if ((fas->f_stat & FAS_STAT_IPEND) == 0) {
3453 if (fas_check_dma_error(fas)) {
3454 action = ACTION_RESET;
3455 goto start_action;
3456 }
3457 return (-1);
3458 }
3459
3460 /*
3461 * if we are reset state, handle this first
3462 */
3463 if (fas->f_state == ACTS_RESET) {
3464 action = ACTION_FINRST;
3465 goto start_action;
3466 }
3467
3468 /*
3469 * check for gross error. fas366 hardware seems to register
3470 * the gross error bit when a parity error is found. Make sure
3471 * to ignore the gross error bit when a parity error is detected.
3472 */
3473 if ((fas->f_stat & FAS_STAT_GERR) &&
3474 (fas->f_stat & FAS_STAT_PERR) == 0) {
3475 action = fas_handle_gross_err(fas);
3476 goto start_action;
3477 }
3478
3479 /*
3480 * now it is finally safe to read the interrupt register
3481 * if we haven't done so yet
3482 * Note: we don't read step register here but only in
3483 * fas_finish_select(). It is not entirely safe but saves
3484 * redundant PIOs or extra code in this critical path
3485 */
3486 fas->f_intr =
3487 intr = fas_reg_read(fas, (uchar_t *)&fasreg->fas_intr);
3488
3489 /*
3490 * read the fifo if there is something there or still in the
3491 * input shuttle
3492 */
3493 stat = fas->f_stat & FAS_PHASE_MASK;
3494
3495 if ((intr & FAS_INT_RESEL) ||
3496 ((stat != FAS_PHASE_DATA_IN) && (stat != FAS_PHASE_DATA_OUT) &&
3497 ((fas->f_state & STATE_SELECTING) == 0) &&
3498 (fas->f_state != ACTS_DATA_DONE) &&
3499 (fas->f_state != ACTS_C_CMPLT))) {
3500
3501 fas->f_stat2 = fas_reg_read(fas, &fasreg->fas_stat2);
3502
3503 if (((fas->f_stat2 & FAS_STAT2_EMPTY) == 0) ||
3504 (fas->f_stat2 & FAS_STAT2_ISHUTTLE)) {
3505 fas_read_fifo(fas);
3506 }
3507 }
3508
3509 EPRINTF2("fas_intr_svc: intr=%x, stat=%x\n", fas->f_intr, fas->f_stat);
3510 EPRINTF2("dmacsr=%b\n", fas->f_dma->dma_csr, dma_bits);
3511
3512 /*
3513 * Based upon the current state of the host adapter driver
3514 * we should be able to figure out what to do with an interrupt.
3515 *
3516 * The FAS asserts an interrupt with one or more of 8 possible
3517 * bits set in its interrupt register. These conditions are
3518 * SCSI bus reset detected, an illegal command fed to the FAS,
3519 * one of DISCONNECT, BUS SERVICE, FUNCTION COMPLETE conditions
3520 * for the FAS, a Reselection interrupt, or one of Selection
3521 * or Selection with Attention.
3522 *
3523 * Of these possible interrupts, we can deal with some right
3524 * here and now, irrespective of the current state of the driver.
3525 *
3526 * take care of the most likely interrupts first and call the action
3527 * immediately
3528 */
3529 if ((intr & (FAS_INT_RESET|FAS_INT_ILLEGAL|FAS_INT_SEL|FAS_INT_SELATN|
3530 FAS_INT_RESEL)) == 0) {
3531 /*
3532 * The rest of the reasons for an interrupt can
3533 * be handled based purely on the state that the driver
3534 * is currently in now.
3535 */
3536 if (fas->f_state & STATE_SELECTING) {
3537 action = fas_finish_select(fas);
3538
3539 } else if (fas->f_state & STATE_ITPHASES) {
3540 action = fas_phasemanage(fas);
3541
3542 } else {
3543 fas_log(fas, CE_WARN, "spurious interrupt");
3544 action = ACTION_RETURN;
3545 }
3546
3547 } else if ((intr & FAS_INT_RESEL) && ((intr &
3548 (FAS_INT_RESET|FAS_INT_ILLEGAL|FAS_INT_SEL|FAS_INT_SELATN)) == 0)) {
3549
3550 if ((fas->f_state & STATE_SELECTING) == 0) {
3551 ASSERT(fas->f_state == STATE_FREE);
3552 action = fas_reconnect(fas);
3553 } else {
3554 action = fas_reselect_preempt(fas);
3555 }
3556
3557 } else if (intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) {
3558 action = fas_illegal_cmd_or_bus_reset(fas);
3559
3560 } else if (intr & (FAS_INT_SEL|FAS_INT_SELATN)) {
3561 action = ACTION_SELECT;
3562 }
3563
3564 start_action:
3565 while (action != ACTION_RETURN) {
3566 ASSERT((action >= 0) && (action <= ACTION_SELECT));
3567 TRACE_3(TR_FAC_SCSI_FAS, TR_FASSVC_ACTION_CALL,
3568 "fas_intr_svc call: fas 0x%p, action %d (%d)",
3569 fas, action, i);
3570 i++;
3571 action = (*evec[action])(fas);
3572 }
3573 exit:
3574 TRACE_0(TR_FAC_SCSI_FAS, TR_FASSVC_END, "fas_intr_svc_end");
3575
3576 return (0);
3577 }
3578
3579 /*
3580 * Manage phase transitions.
3581 */
3582 static int
fas_phasemanage(struct fas * fas)3583 fas_phasemanage(struct fas *fas)
3584 {
3585 ushort_t state;
3586 int action;
3587 static int (*pvecs[])(struct fas *fas) = {
3588 fas_handle_cmd_start,
3589 fas_handle_cmd_done,
3590 fas_handle_msg_out_start,
3591 fas_handle_msg_out_done,
3592 fas_handle_msg_in_start,
3593 fas_handle_more_msgin,
3594 fas_handle_msg_in_done,
3595 fas_handle_clearing,
3596 fas_handle_data_start,
3597 fas_handle_data_done,
3598 fas_handle_c_cmplt,
3599 fas_reconnect,
3600 fas_handle_unknown,
3601 fas_reset_recovery
3602 };
3603 int i = 0;
3604
3605 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PHASEMANAGE_START,
3606 "fas_phasemanage_start");
3607
3608 do {
3609 EPRINTF1("fas_phasemanage: %s\n",
3610 fas_state_name(fas->f_state & STATE_ITPHASES));
3611
3612 TRACE_2(TR_FAC_SCSI_FAS, TR_FAS_PHASEMANAGE_CALL,
3613 "fas_phasemanage_call: fas 0x%p (%d)", fas, i++);
3614
3615 state = fas->f_state;
3616
3617 if (!(state == STATE_FREE || state > ACTS_ENDVEC)) {
3618 ASSERT(pvecs[state-1] != NULL);
3619 action = (*pvecs[state-1]) (fas);
3620 } else {
3621 fas_log(fas, CE_WARN, "lost state in phasemanage");
3622 action = ACTION_ABORT_ALLCMDS;
3623 }
3624
3625 } while (action == ACTION_PHASEMANAGE);
3626
3627 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_PHASEMANAGE_END,
3628 "fas_phasemanage_end");
3629 return (action);
3630 }
3631
3632 /*
3633 * remove a cmd from active list and if timeout flag is set, then
3634 * adjust timeouts; if a the same cmd will be resubmitted soon, don't
3635 * bother to adjust timeouts (ie. don't set this flag)
3636 */
3637 static void
fas_remove_cmd(struct fas * fas,struct fas_cmd * sp,int new_timeout_flag)3638 fas_remove_cmd(struct fas *fas, struct fas_cmd *sp, int new_timeout_flag)
3639 {
3640 int tag = sp->cmd_tag[1];
3641 int slot = sp->cmd_slot;
3642 struct f_slots *tag_slots = fas->f_active[slot];
3643
3644 ASSERT(sp != NULL);
3645 EPRINTF4("remove tag %d slot %d for target %d.%d\n",
3646 tag, slot, Tgt(sp), Lun(sp));
3647
3648 if (sp == tag_slots->f_slot[tag]) {
3649 tag_slots->f_slot[tag] = NULL;
3650 fas->f_tcmds[slot]--;
3651 }
3652 if (fas->f_current_sp == sp) {
3653 fas->f_current_sp = NULL;
3654 }
3655
3656 ASSERT(sp != fas->f_active[sp->cmd_slot]->f_slot[sp->cmd_tag[1]]);
3657
3658 if (new_timeout_flag != NEW_TIMEOUT) {
3659 return;
3660 }
3661
3662 /*
3663 * Figure out what to set tag Q timeout for...
3664 *
3665 * Optimize: If we have duplicate's of same timeout
3666 * we're using, then we'll use it again until we run
3667 * out of duplicates. This should be the normal case
3668 * for block and raw I/O.
3669 * If no duplicates, we have to scan through tag que and
3670 * find the longest timeout value and use it. This is
3671 * going to take a while...
3672 */
3673 if (sp->cmd_pkt->pkt_time == tag_slots->f_timebase) {
3674 if (--(tag_slots->f_dups) <= 0) {
3675 if (fas->f_tcmds[slot]) {
3676 struct fas_cmd *ssp;
3677 uint_t n = 0;
3678 ushort_t t = tag_slots->f_n_slots;
3679 ushort_t i;
3680 /*
3681 * This crude check assumes we don't do
3682 * this too often which seems reasonable
3683 * for block and raw I/O.
3684 */
3685 for (i = 0; i < t; i++) {
3686 ssp = tag_slots->f_slot[i];
3687 if (ssp &&
3688 (ssp->cmd_pkt->pkt_time > n)) {
3689 n = ssp->cmd_pkt->pkt_time;
3690 tag_slots->f_dups = 1;
3691 } else if (ssp &&
3692 (ssp->cmd_pkt->pkt_time == n)) {
3693 tag_slots->f_dups++;
3694 }
3695 }
3696 tag_slots->f_timebase = n;
3697 EPRINTF1("searching, new_timeout= %d\n", n);
3698 } else {
3699 tag_slots->f_dups = 0;
3700 tag_slots->f_timebase = 0;
3701 }
3702 }
3703 }
3704 tag_slots->f_timeout = tag_slots->f_timebase;
3705
3706 ASSERT(fas->f_ncmds >= fas->f_ndisc);
3707 }
3708
3709 /*
3710 * decrement f_ncmds and f_ndisc for this cmd before completing
3711 */
3712 static void
fas_decrement_ncmds(struct fas * fas,struct fas_cmd * sp)3713 fas_decrement_ncmds(struct fas *fas, struct fas_cmd *sp)
3714 {
3715 ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
3716 if ((sp->cmd_flags & CFLAG_FINISHED) == 0) {
3717 fas->f_ncmds--;
3718 if (sp->cmd_flags & CFLAG_CMDDISC) {
3719 fas->f_ndisc--;
3720 }
3721 sp->cmd_flags |= CFLAG_FINISHED;
3722 sp->cmd_flags &= ~CFLAG_CMDDISC;
3723 }
3724 ASSERT((fas->f_ncmds >= 0) && (fas->f_ndisc >= 0));
3725 ASSERT(fas->f_ncmds >= fas->f_ndisc);
3726 }
3727
3728 /*
3729 * Most commonly called phase handlers:
3730 *
3731 * Finish routines
3732 */
3733 static int
fas_finish(struct fas * fas)3734 fas_finish(struct fas *fas)
3735 {
3736 struct fas_cmd *sp = fas->f_current_sp;
3737 struct scsi_pkt *pkt = CMD2PKT(sp);
3738 int action = ACTION_SEARCH;
3739 struct scsi_status *status =
3740 (struct scsi_status *)sp->cmd_pkt->pkt_scbp;
3741
3742 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_START,
3743 "fas_finish_start");
3744 EPRINTF("fas_finish\n");
3745
3746 #ifdef FAS_TEST
3747 if (fas_test_stop && (sp->cmd_pkt_flags & 0x80000000)) {
3748 debug_enter("untagged cmd completed");
3749 }
3750 #endif
3751
3752 /*
3753 * immediately enable reselects
3754 */
3755 fas_reg_cmd_write(fas, CMD_EN_RESEL);
3756 if (status->sts_chk) {
3757 /*
3758 * In the case that we are getting a check condition
3759 * clear our knowledge of synchronous capabilities.
3760 * This will unambiguously force a renegotiation
3761 * prior to any possible data transfer (we hope),
3762 * including the data transfer for a UNIT ATTENTION
3763 * condition generated by somebody powering on and
3764 * off a target.
3765 */
3766 fas_force_renegotiation(fas, Tgt(sp));
3767 }
3768
3769 /*
3770 * backoff sync/wide if there were parity errors
3771 */
3772 if (sp->cmd_pkt->pkt_statistics & STAT_PERR) {
3773 fas_sync_wide_backoff(fas, sp, sp->cmd_slot);
3774 #ifdef FAS_TEST
3775 if (fas_test_stop) {
3776 debug_enter("parity error");
3777 }
3778 #endif
3779 }
3780
3781 /*
3782 * Free from active list and update counts
3783 * We need to clean up this cmd now, just in case fas_ustart()
3784 * hits a reset or other fatal transport error
3785 */
3786 fas_check_ncmds(fas);
3787 fas_remove_cmd(fas, sp, NEW_TIMEOUT);
3788 fas_decrement_ncmds(fas, sp);
3789 fas_check_ncmds(fas);
3790
3791 /*
3792 * go to state free and try to start a new cmd now
3793 */
3794 New_state(fas, STATE_FREE);
3795
3796 if ((fas->f_ncmds > fas->f_ndisc) && (*((char *)status) == 0) &&
3797 (INTPENDING(fas) == 0)) {
3798 if (fas_ustart(fas)) {
3799 action = ACTION_RETURN;
3800 }
3801 }
3802
3803 /*
3804 * if there was a data xfer then calculate residue and
3805 * sync data for consistent memory xfers
3806 */
3807 if (pkt->pkt_state & STATE_XFERRED_DATA) {
3808 pkt->pkt_resid = sp->cmd_dmacount - sp->cmd_data_count;
3809 if (sp->cmd_flags & CFLAG_CMDIOPB) {
3810 (void) ddi_dma_sync(sp->cmd_dmahandle, 0, (uint_t)0,
3811 DDI_DMA_SYNC_FORCPU);
3812 }
3813 if (pkt->pkt_resid) {
3814 IPRINTF3("%d.%d finishes with %ld resid\n",
3815 Tgt(sp), Lun(sp), pkt->pkt_resid);
3816 }
3817 }
3818
3819 if (sp->cmd_pkt_flags & FLAG_NOINTR) {
3820 fas_call_pkt_comp(fas, sp);
3821 action = ACTION_RETURN;
3822 } else {
3823 /*
3824 * start an autorequest sense if there was a check condition.
3825 * if arq has not been enabled, fas_handle_sts_chk will do
3826 * do the callback
3827 */
3828 if (status->sts_chk) {
3829 if (fas_handle_sts_chk(fas, sp)) {
3830 /*
3831 * we can't start an arq because one is
3832 * already in progress. the target is
3833 * probably confused
3834 */
3835 action = ACTION_ABORT_CURCMD;
3836 }
3837 } else if ((*((char *)status) & STATUS_MASK) ==
3838 STATUS_QFULL) {
3839 fas_handle_qfull(fas, sp);
3840 } else {
3841 #ifdef FAS_TEST
3842 if (fas_arqs_failure && (status->sts_chk == 0)) {
3843 struct scsi_arq_status *arqstat;
3844 status->sts_chk = 1;
3845 arqstat = (struct scsi_arq_status *)
3846 (sp->cmd_pkt->pkt_scbp);
3847 arqstat->sts_rqpkt_reason = CMD_TRAN_ERR;
3848 sp->cmd_pkt->pkt_state |= STATE_ARQ_DONE;
3849 fas_arqs_failure = 0;
3850 }
3851 if (fas_tran_err) {
3852 sp->cmd_pkt->pkt_reason = CMD_TRAN_ERR;
3853 fas_tran_err = 0;
3854 }
3855 #endif
3856 fas_call_pkt_comp(fas, sp);
3857 }
3858 }
3859
3860 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_END, "fas_finish_end");
3861 return (action);
3862 }
3863
3864 /*
3865 * Complete the process of selecting a target
3866 */
3867 static int
fas_finish_select(struct fas * fas)3868 fas_finish_select(struct fas *fas)
3869 {
3870 volatile struct dma *dmar = fas->f_dma;
3871 struct fas_cmd *sp = fas->f_current_sp;
3872 uchar_t intr = fas->f_intr;
3873 uchar_t step;
3874
3875 step = fas_reg_read(fas, &fas->f_reg->fas_step) & FAS_STEP_MASK;
3876
3877 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_START,
3878 "fas_finish_select_start");
3879 EPRINTF("fas_finish_select:\n");
3880 ASSERT(sp != 0);
3881
3882 /*
3883 * Check for DMA gate array errors
3884 */
3885 if ((fas->f_dma_csr = fas_dma_reg_read(fas, &dmar->dma_csr))
3886 & DMA_ERRPEND) {
3887 /*
3888 * It would be desirable to set the ATN* line and attempt to
3889 * do the whole schmear of INITIATOR DETECTED ERROR here,
3890 * but that is too hard to do at present.
3891 */
3892 fas_log(fas, CE_WARN,
3893 "Unrecoverable DMA error during selection");
3894 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
3895
3896 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_RESET1_END,
3897 "fas_finish_select_end (ACTION_RESET1)");
3898 return (ACTION_RESET);
3899 }
3900
3901 /*
3902 * Shut off DMA gate array
3903 */
3904 FAS_FLUSH_DMA(fas);
3905
3906 /*
3907 * Did something respond to selection?
3908 */
3909 if (intr == (FAS_INT_BUS|FAS_INT_FCMP)) {
3910 /*
3911 * We succesfully selected a target (we think).
3912 * Now we figure out how botched things are
3913 * based upon the kind of selection we were
3914 * doing and the state of the step register.
3915 */
3916 switch (step) {
3917 case FAS_STEP_ARBSEL:
3918 /*
3919 * In this case, we selected the target, but went
3920 * neither into MESSAGE OUT nor COMMAND phase.
3921 * However, this isn't a fatal error, so we just
3922 * drive on.
3923 *
3924 * This might be a good point to note that we have
3925 * a target that appears to not accomodate
3926 * disconnecting,
3927 * but it really isn't worth the effort to distinguish
3928 * such targets fasecially from others.
3929 */
3930 /* FALLTHROUGH */
3931
3932 case FAS_STEP_SENTID:
3933 /*
3934 * In this case, we selected the target and sent
3935 * message byte and have stopped with ATN* still on.
3936 * This case should only occur if we use the SELECT
3937 * AND STOP command.
3938 */
3939 /* FALLTHROUGH */
3940
3941 case FAS_STEP_NOTCMD:
3942 /*
3943 * In this case, we either didn't transition to command
3944 * phase, or,
3945 * if we were using the SELECT WITH ATN3 command,
3946 * we possibly didn't send all message bytes.
3947 */
3948 break;
3949
3950 case FAS_STEP_PCMD:
3951 /*
3952 * In this case, not all command bytes transferred.
3953 */
3954 /* FALLTHROUGH */
3955
3956 case FAS_STEP_DONE:
3957 /*
3958 * This is the usual 'good' completion point.
3959 * If we we sent message byte(s), we subtract
3960 * off the number of message bytes that were
3961 * ahead of the command.
3962 */
3963 sp->cmd_pkt->pkt_state |= STATE_SENT_CMD;
3964 break;
3965
3966 default:
3967 fas_log(fas, CE_WARN,
3968 "bad sequence step (0x%x) in selection", step);
3969 TRACE_0(TR_FAC_SCSI_FAS,
3970 TR_FAS_FINISH_SELECT_RESET3_END,
3971 "fas_finish_select_end (ACTION_RESET3)");
3972 return (ACTION_RESET);
3973 }
3974
3975 /*
3976 * OR in common state...
3977 */
3978 sp->cmd_pkt->pkt_state |= (STATE_GOT_BUS|STATE_GOT_TARGET);
3979
3980 /*
3981 * data pointer initialization has already been done
3982 */
3983 New_state(fas, ACTS_UNKNOWN);
3984 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_ACTION3_END,
3985 "fas_finish_select_end (action3)");
3986 return (fas_handle_unknown(fas));
3987
3988 } else if (intr == FAS_INT_DISCON) {
3989 /*
3990 * make sure we negotiate when this target comes
3991 * on line later on
3992 */
3993 fas_force_renegotiation(fas, Tgt(sp));
3994
3995 fas->f_sdtr_sent = fas->f_wdtr_sent = 0;
3996 sp->cmd_pkt->pkt_state |= STATE_GOT_BUS;
3997
3998 /*
3999 * Set the throttle to DRAIN_THROTTLE to make
4000 * sure any disconnected commands will get timed out
4001 * incase the drive dies
4002 */
4003
4004 if (fas->f_reset_delay[Tgt(sp)] == 0) {
4005 fas->f_throttle[sp->cmd_slot] = DRAIN_THROTTLE;
4006 }
4007
4008 fas_set_pkt_reason(fas, sp, CMD_INCOMPLETE, 0);
4009
4010 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_FINISH_END,
4011 "fas_finish_select_end (ACTION_FINISH)");
4012 return (ACTION_FINISH);
4013 } else {
4014 fas_printstate(fas, "undetermined selection failure");
4015 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_FINISH_SELECT_RESET2_END,
4016 "fas_finish_select_end (ACTION_RESET2)");
4017 return (ACTION_RESET);
4018 }
4019 _NOTE(NOT_REACHED)
4020 /* NOTREACHED */
4021 }
4022
4023 /*
4024 * a selection got preempted by a reselection; shut down dma
4025 * and put back cmd in the ready queue unless NOINTR
4026 */
4027 static int
fas_reselect_preempt(struct fas * fas)4028 fas_reselect_preempt(struct fas *fas)
4029 {
4030 int rval;
4031
4032 /*
4033 * A reselection attempt glotzed our selection attempt.
4034 * we put request back in the ready queue
4035 */
4036 struct fas_cmd *sp = fas->f_current_sp;
4037
4038 /*
4039 * Shut off DMA gate array
4040 */
4041 FAS_FLUSH_DMA(fas);
4042
4043 /*
4044 * service the reconnect now and clean up later
4045 */
4046 New_state(fas, STATE_FREE);
4047 rval = fas_reconnect(fas);
4048
4049 /*
4050 * If selection for a non-tagged command is preempted, the
4051 * command could be stuck because throttle was set to DRAIN,
4052 * and a disconnected command timeout follows.
4053 */
4054 if ((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0)
4055 fas->f_throttle[sp->cmd_slot] = 1;
4056
4057 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
4058 fas_remove_cmd(fas, sp, NEW_TIMEOUT);
4059 }
4060
4061 /*
4062 * if we attempted to renegotiate on this cmd, undo this now
4063 */
4064 if (fas->f_wdtr_sent) {
4065 fas->f_wide_known &= ~(1<<Tgt(sp));
4066 fas->f_wdtr_sent = 0;
4067 }
4068 if (fas->f_sdtr_sent) {
4069 fas->f_sync_known &= ~(1<<Tgt(sp));
4070 fas->f_sdtr_sent = 0;
4071 }
4072
4073 fas_head_of_readyQ(fas, sp);
4074
4075 return (rval);
4076 }
4077
4078 /*
4079 * Handle the reconnection of a target
4080 */
4081 static int
fas_reconnect(struct fas * fas)4082 fas_reconnect(struct fas *fas)
4083 {
4084 volatile struct fasreg *fasreg = fas->f_reg;
4085 struct fas_cmd *sp = NULL;
4086 uchar_t target, lun;
4087 uchar_t tmp;
4088 uchar_t slot;
4089 char *bad_reselect = NULL;
4090
4091 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RECONNECT_START,
4092 "fas_reconnect_start");
4093 EPRINTF("fas_reconnect:\n");
4094
4095 fas_check_ncmds(fas);
4096
4097 switch (fas->f_state) {
4098 default:
4099 /*
4100 * Pick up target id from fifo
4101 *
4102 * There should only be the reselecting target's id
4103 * and an identify message in the fifo.
4104 */
4105 target = fas->f_fifo[0];
4106
4107 /*
4108 * we know the target so update period, conf3,
4109 * offset reg, if necessary, and accept the msg
4110 */
4111 FAS_SET_PERIOD_OFFSET_CONF3_REGS(fas, target);
4112
4113 /*
4114 * now we can accept the message. an untagged
4115 * target will go immediately into data phase so
4116 * the period/offset/conf3 registers need to be
4117 * updated before accepting the message
4118 */
4119 fas_reg_cmd_write(fas, CMD_MSG_ACPT);
4120
4121 if (fas->f_fifolen != 2) {
4122 bad_reselect = "bad reselect bytes";
4123 break;
4124 }
4125
4126 /*
4127 * normal initial reconnect; we get another interrupt later
4128 * for the tag
4129 */
4130 New_state(fas, ACTS_RESEL);
4131
4132 if (fas->f_stat & FAS_STAT_PERR) {
4133 break;
4134 }
4135
4136 /*
4137 * Check sanity of message.
4138 */
4139 tmp = fas->f_fifo[1];
4140 fas->f_last_msgin = tmp;
4141
4142 if (!(IS_IDENTIFY_MSG(tmp)) || (tmp & INI_CAN_DISCON)) {
4143 bad_reselect = "bad identify msg";
4144 break;
4145 }
4146
4147 lun = tmp & (NLUNS_PER_TARGET-1);
4148
4149 EPRINTF2("fas_reconnect: target=%x, idmsg=%x\n",
4150 target, tmp);
4151
4152 fas->f_resel_slot = slot = (target * NLUNS_PER_TARGET) | lun;
4153
4154 fas_reg_write(fas, (uchar_t *)&fasreg->fas_busid,
4155 (target & 0xf) | FAS_BUSID_ENCODID |
4156 FAS_BUSID_32BIT_COUNTER);
4157
4158 /*
4159 * If tag queueing in use, DMA in tag.
4160 * Otherwise, we're ready to go.
4161 * if tag 0 slot is non-empty, a non-tagged cmd is
4162 * reconnecting
4163 */
4164 if (TAGGED(target) && fas->f_tcmds[slot] &&
4165 (fas->f_active[slot]->f_slot[0] == NULL)) {
4166 volatile uchar_t *c =
4167 (uchar_t *)fas->f_cmdarea;
4168
4169 /*
4170 * If we've been doing tagged queueing and this
4171 * request doesn't do it,
4172 * maybe it was disabled for this one. This is rather
4173 * dangerous as it blows all pending tagged cmds away.
4174 * But if target is confused, then we'll blow up
4175 * shortly.
4176 */
4177 *c++ = INVALID_MSG;
4178 *c = INVALID_MSG;
4179
4180 FAS_DMA_WRITE_SETUP(fas, 2,
4181 fas->f_dmacookie.dmac_address);
4182
4183 /*
4184 * For tagged queuing, we should still be in msgin
4185 * phase.
4186 * If not, then either we aren't running tagged
4187 * queueing like we thought or the target died.
4188 */
4189 if (INTPENDING(fas) == 0) {
4190 EPRINTF1("slow reconnect, slot=%x\n", slot);
4191 TRACE_0(TR_FAC_SCSI_FAS,
4192 TR_FAS_RECONNECT_RETURN1_END,
4193 "fas_reconnect_end (_RETURN1)");
4194 return (ACTION_RETURN);
4195 }
4196
4197 fas->f_stat = fas_reg_read(fas, &fasreg->fas_stat);
4198 fas->f_intr = fas_reg_read(fas, &fasreg->fas_intr);
4199 if (fas->f_intr & (FAS_INT_ILLEGAL | FAS_INT_RESET)) {
4200 return (fas_illegal_cmd_or_bus_reset(fas));
4201 }
4202
4203 if ((fas->f_stat & FAS_PHASE_MASK) !=
4204 FAS_PHASE_MSG_IN) {
4205 bad_reselect = "not in msgin phase";
4206 break;
4207 }
4208
4209 if (fas->f_intr & FAS_INT_DISCON) {
4210 bad_reselect = "unexpected bus free";
4211 break;
4212 }
4213 } else {
4214 fas->f_current_sp = sp = fas->f_active[slot]->f_slot[0];
4215 break;
4216 }
4217 /*FALLTHROUGH*/
4218
4219 case ACTS_RESEL:
4220 {
4221 volatile uchar_t *c =
4222 (uchar_t *)fas->f_cmdarea;
4223 struct f_slots *tag_slots;
4224 int id, tag;
4225 uint_t i;
4226
4227 slot = fas->f_resel_slot;
4228 target = slot/NLUNS_PER_TARGET;
4229
4230 if ((fas->f_stat & FAS_PHASE_MASK) !=
4231 FAS_PHASE_MSG_IN) {
4232 IPRINTF1("no tag for slot %x\n", slot);
4233 if (fas->f_intr & ~(FAS_INT_BUS |
4234 FAS_INT_FCMP)) {
4235 New_state(fas, ACTS_UNKNOWN);
4236 TRACE_0(TR_FAC_SCSI_FAS,
4237 TR_FAS_RECONNECT_PHASEMANAGE_END,
4238 "fas_reconnect_end (_PHASEMANAGE)");
4239 return (ACTION_PHASEMANAGE);
4240 } else {
4241 bad_reselect = "not in msgin phase";
4242 break;
4243 }
4244 }
4245 fas_reg_cmd_write(fas, CMD_TRAN_INFO|CMD_DMA);
4246 fas_dma_reg_write(fas, &fas->f_dma->dma_csr,
4247 fas->f_dma_csr);
4248
4249 fas_reg_cmd_write(fas, CMD_MSG_ACPT);
4250
4251 for (i = 0; i < (uint_t)RECONNECT_TAG_RCV_TIMEOUT;
4252 i++) {
4253 /*
4254 * timeout is not very accurate but this
4255 * should take no time at all
4256 */
4257 if (INTPENDING(fas)) {
4258 fas->f_stat = fas_reg_read(fas,
4259 (uchar_t *)&fas->f_reg->fas_stat);
4260 fas->f_intr = fas_reg_read(fas,
4261 (uchar_t *)&fas->f_reg->fas_intr);
4262 if (fas->f_intr & (FAS_INT_RESET |
4263 FAS_INT_ILLEGAL)) {
4264 return (
4265 fas_illegal_cmd_or_bus_reset
4266 (fas));
4267 }
4268 if (fas->f_intr & FAS_INT_FCMP) {
4269 break;
4270 }
4271 }
4272 }
4273
4274 if (i == (uint_t)RECONNECT_TAG_RCV_TIMEOUT) {
4275 bad_reselect = "timeout on receiving tag msg";
4276 break;
4277 }
4278
4279 FAS_FLUSH_DMA(fas);
4280
4281 /*
4282 * we should really do a sync here but that
4283 * hurts performance too much; we'll just hang
4284 * around till the tag byte flips
4285 * This is necessary on any system with an
4286 * XBox
4287 */
4288 if (*c == INVALID_MSG) {
4289 EPRINTF(
4290 "fas_reconnect: invalid msg, polling\n");
4291 for (i = 0; i < 1000000; i++) {
4292 if (*c != INVALID_MSG)
4293 break;
4294 }
4295 }
4296
4297 if (fas->f_stat & FAS_STAT_PERR) {
4298 break;
4299 }
4300
4301 if ((fas->f_stat & FAS_STAT_XZERO) == 0 ||
4302 (id = *c++) < MSG_SIMPLE_QTAG ||
4303 id > MSG_ORDERED_QTAG) {
4304 /*
4305 * Target agreed to do tagged queueing
4306 * and lied!
4307 * This problem implies the drive firmware is
4308 * broken.
4309 */
4310 bad_reselect = "botched tag";
4311 break;
4312 }
4313 tag = *c;
4314
4315 /* Set ptr to reconnecting scsi pkt */
4316 tag_slots = fas->f_active[slot];
4317 if (tag_slots != NULL) {
4318 sp = tag_slots->f_slot[tag];
4319 } else {
4320 bad_reselect = "Invalid tag";
4321 break;
4322 }
4323
4324 fas->f_current_sp = sp;
4325 }
4326 }
4327
4328 if (fas->f_stat & FAS_STAT_PERR) {
4329 sp = NULL;
4330 bad_reselect = "Parity error in reconnect msg's";
4331 }
4332
4333 if ((sp == NULL ||
4334 #ifdef FAS_TEST
4335 (fas_atest_reconn & (1<<Tgt(sp))) ||
4336 #endif
4337 (sp->cmd_flags & (CFLAG_CMDDISC|CFLAG_CMDPROXY)) == 0)) {
4338 /*
4339 * this shouldn't really happen, so it is better
4340 * to reset the bus; some disks accept the abort
4341 * and then still reconnect
4342 */
4343 if (bad_reselect == NULL) {
4344 bad_reselect = "no command";
4345 }
4346 #ifdef FAS_TEST
4347 if (sp && !(fas_atest_reconn & (1<<Tgt(sp))) &&
4348 fas_test_stop) {
4349 debug_enter("bad reconnect");
4350 } else {
4351 fas_atest_reconn = 0;
4352 }
4353 #endif
4354 goto bad;
4355
4356 /*
4357 * XXX remove this case or make it an ASSERT
4358 */
4359 } else if (sp->cmd_flags & CFLAG_CMDPROXY) {
4360 /*
4361 * If we got here, we were already attempting to
4362 * run a polled proxy command for this target.
4363 * Set ATN and, copy in the message, and drive
4364 * on (ignoring any parity error on the identify).
4365 */
4366 IPRINTF1("fas_reconnect: fielding proxy cmd for %d\n",
4367 target);
4368 fas_assert_atn(fas);
4369 fas->f_omsglen = sp->cmd_cdb[FAS_PROXY_DATA];
4370 tmp = 0;
4371 while (tmp < fas->f_omsglen) {
4372 fas->f_cur_msgout[tmp] =
4373 sp->cmd_cdb[FAS_PROXY_DATA+1+tmp];
4374 tmp++;
4375 }
4376 sp->cmd_cdb[FAS_PROXY_RESULT] = FALSE;
4377
4378 /*
4379 * pretend that the disconnected cmd is still disconnected
4380 * (this prevents ndisc from going negative)
4381 */
4382 fas->f_ndisc++;
4383 ASSERT((fas->f_ncmds >= 0) && (fas->f_ndisc >= 0));
4384 ASSERT(fas->f_ncmds >= fas->f_ndisc);
4385 }
4386
4387 ASSERT(fas->f_resel_slot == slot);
4388 ASSERT(fas->f_ndisc > 0);
4389 fas->f_ndisc--;
4390 sp->cmd_flags &= ~CFLAG_CMDDISC;
4391 New_state(fas, ACTS_UNKNOWN);
4392
4393 /*
4394 * A reconnect may imply a restore pointers operation
4395 * Note that some older disks (Micropolis in Pbox) do not
4396 * send a save data ptr on disconnect if all data has been
4397 * xferred. So, we cannot restore ptrs yet here.
4398 */
4399 if ((sp->cmd_flags & CFLAG_DMAVALID) &&
4400 (sp->cmd_data_count != sp->cmd_saved_data_count)) {
4401 sp->cmd_flags |= CFLAG_RESTORE_PTRS;
4402 }
4403
4404 /*
4405 * Return to await the FUNCTION COMPLETE interrupt we
4406 * should get out of accepting the IDENTIFY message.
4407 */
4408 EPRINTF2("Reconnecting %d.%d\n", target, slot % NLUNS_PER_TARGET);
4409 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RECONNECT_RETURN2_END,
4410 "fas_reconnect_end (_RETURN2)");
4411 return (ACTION_RETURN);
4412
4413 bad:
4414 if (sp && (fas->f_stat & FAS_STAT_PERR)) {
4415 sp->cmd_pkt->pkt_statistics |= STAT_PERR;
4416 }
4417 fas_log(fas, CE_WARN, "target %x: failed reselection (%s)",
4418 target, bad_reselect);
4419
4420 #ifdef FASDEBUG
4421 fas_printstate(fas, "failed reselection");
4422 #endif
4423 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RECONNECT_RESET5_END,
4424 "fas_reconnect_end (_RESET5)");
4425 return (ACTION_RESET);
4426 }
4427
4428 /*
4429 * handle unknown bus phase
4430 * we don't know what to expect so check status register for current
4431 * phase
4432 */
4433 int
fas_handle_unknown(struct fas * fas)4434 fas_handle_unknown(struct fas *fas)
4435 {
4436 TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_UNKNOWN_START,
4437 "fas_handle_unknown_start: fas 0x%p", fas);
4438 EPRINTF("fas_handle_unknown:\n");
4439
4440 if ((fas->f_intr & FAS_INT_DISCON) == 0) {
4441 /*
4442 * we call actions here rather than returning to phasemanage
4443 * (this is the most frequently called action)
4444 */
4445 switch (fas->f_stat & FAS_PHASE_MASK) {
4446 case FAS_PHASE_DATA_IN:
4447 case FAS_PHASE_DATA_OUT:
4448 New_state(fas, ACTS_DATA);
4449 TRACE_0(TR_FAC_SCSI_FAS,
4450 TR_FAS_HANDLE_UNKNOWN_PHASE_DATA_END,
4451 "fas_handle_unknown_end (phase_data)");
4452 return (fas_handle_data_start(fas));
4453
4454 case FAS_PHASE_MSG_OUT:
4455 New_state(fas, ACTS_MSG_OUT);
4456 TRACE_0(TR_FAC_SCSI_FAS,
4457 TR_FAS_HANDLE_UNKNOWN_PHASE_MSG_OUT_END,
4458 "fas_handle_unknown_end (phase_msg_out)");
4459 return (fas_handle_msg_out_start(fas));
4460
4461 case FAS_PHASE_MSG_IN:
4462 New_state(fas, ACTS_MSG_IN);
4463 TRACE_0(TR_FAC_SCSI_FAS,
4464 TR_FAS_HANDLE_UNKNOWN_PHASE_MSG_IN_END,
4465 "fas_handle_unknown_end (phase_msg_in)");
4466 return (fas_handle_msg_in_start(fas));
4467
4468 case FAS_PHASE_STATUS:
4469 fas_reg_cmd_write(fas, CMD_FLUSH);
4470 #ifdef FAS_TEST
4471 if (fas_ptest_status & (1<<Tgt(fas->f_current_sp))) {
4472 fas_assert_atn(fas);
4473 }
4474 #endif /* FAS_TEST */
4475
4476 fas_reg_cmd_write(fas, CMD_COMP_SEQ);
4477 New_state(fas, ACTS_C_CMPLT);
4478
4479 TRACE_0(TR_FAC_SCSI_FAS,
4480 TR_FAS_HANDLE_UNKNOWN_PHASE_STATUS_END,
4481 "fas_handle_unknown_end (phase_status)");
4482 return (fas_handle_c_cmplt(fas));
4483
4484 case FAS_PHASE_COMMAND:
4485 New_state(fas, ACTS_CMD_START);
4486 TRACE_0(TR_FAC_SCSI_FAS,
4487 TR_FAS_HANDLE_UNKNOWN_PHASE_CMD_END,
4488 "fas_handle_unknown_end (phase_cmd)");
4489 return (fas_handle_cmd_start(fas));
4490 }
4491
4492 fas_printstate(fas, "Unknown bus phase");
4493 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_UNKNOWN_RESET_END,
4494 "fas_handle_unknown_end (reset)");
4495 return (ACTION_RESET);
4496
4497 } else {
4498 /*
4499 * Okay. What to do now? Let's try (for the time being)
4500 * assuming that the target went south and dropped busy,
4501 * as a disconnect implies that either we received
4502 * a completion or a disconnect message, or that we
4503 * had sent an ABORT OPERATION or BUS DEVICE RESET
4504 * message. In either case, we expected the disconnect
4505 * and should have fielded it elsewhere.
4506 *
4507 * If we see a chip disconnect here, this is an unexpected
4508 * loss of BSY*. Clean up the state of the chip and return.
4509 *
4510 */
4511 int msgout = fas->f_cur_msgout[0];
4512 struct fas_cmd *sp = fas->f_current_sp;
4513 int target = Tgt(sp);
4514
4515 if (msgout == MSG_HEAD_QTAG || msgout == MSG_SIMPLE_QTAG) {
4516 msgout = fas->f_cur_msgout[2];
4517 }
4518 EPRINTF4("msgout: %x %x %x, last_msgout=%x\n",
4519 fas->f_cur_msgout[0], fas->f_cur_msgout[1],
4520 fas->f_cur_msgout[2], fas->f_last_msgout);
4521
4522 if (msgout == MSG_ABORT || msgout == MSG_ABORT_TAG ||
4523 msgout == MSG_DEVICE_RESET) {
4524 IPRINTF2("Successful %s message to target %d\n",
4525 scsi_mname(msgout), Tgt(sp));
4526 if (sp->cmd_flags & CFLAG_CMDPROXY) {
4527 sp->cmd_cdb[FAS_PROXY_RESULT] = TRUE;
4528 }
4529 if (msgout == MSG_ABORT || msgout == MSG_ABORT_TAG) {
4530 fas->f_abort_msg_sent++;
4531 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
4532 fas_set_pkt_reason(fas, sp,
4533 CMD_ABORTED, STAT_ABORTED);
4534 }
4535 } else if (msgout == MSG_DEVICE_RESET) {
4536 fas->f_reset_msg_sent++;
4537 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
4538 fas_set_pkt_reason(fas, sp,
4539 CMD_RESET, STAT_DEV_RESET);
4540 }
4541 fas_force_renegotiation(fas, target);
4542 }
4543 } else {
4544 if ((fas->f_last_msgout == MSG_EXTENDED) &&
4545 (fas->f_last_msgin == MSG_REJECT)) {
4546 /*
4547 * the target rejected the negotiations,
4548 * so resubmit again (no_sync/no_wide
4549 * is now set)
4550 */
4551 New_state(fas, STATE_FREE);
4552 fas_reg_cmd_write(fas, CMD_EN_RESEL);
4553 fas_remove_cmd(fas, sp, NEW_TIMEOUT);
4554 fas_decrement_ncmds(fas, sp);
4555 fas_check_ncmds(fas);
4556 sp->cmd_flags &= ~CFLAG_TRANFLAG;
4557 (void) fas_accept_pkt(fas, sp, NO_TRAN_BUSY);
4558 fas_check_ncmds(fas);
4559 TRACE_0(TR_FAC_SCSI_FAS,
4560 TR_FAS_HANDLE_UNKNOWN_INT_DISCON_END,
4561 "fas_handle_unknown_end (int_discon)");
4562 return (ACTION_SEARCH);
4563
4564 } else if (fas->f_last_msgout == MSG_EXTENDED) {
4565 /*
4566 * target dropped off the bus during
4567 * negotiations
4568 */
4569 fas_reset_sync_wide(fas);
4570 fas->f_sdtr_sent = fas->f_wdtr_sent = 0;
4571 }
4572
4573 fas_set_pkt_reason(fas, sp, CMD_UNX_BUS_FREE, 0);
4574 #ifdef FASDEBUG
4575 fas_printstate(fas, "unexpected bus free");
4576 #endif
4577 }
4578 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_UNKNOWN_INT_DISCON_END,
4579 "fas_handle_unknown_end (int_discon)");
4580 return (ACTION_FINISH);
4581 }
4582 _NOTE(NOT_REACHED)
4583 /* NOTREACHED */
4584 }
4585
4586 /*
4587 * handle target disconnecting
4588 */
4589 static int
fas_handle_clearing(struct fas * fas)4590 fas_handle_clearing(struct fas *fas)
4591 {
4592 struct fas_cmd *sp = fas->f_current_sp;
4593
4594 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CLEARING_START,
4595 "fas_handle_clearing_start");
4596 EPRINTF("fas_handle_clearing:\n");
4597
4598 if (fas->f_laststate == ACTS_C_CMPLT ||
4599 fas->f_laststate == ACTS_MSG_IN_DONE) {
4600 if (INTPENDING(fas)) {
4601 volatile struct fasreg *fasreg = fas->f_reg;
4602
4603 fas->f_stat = fas_reg_read(fas,
4604 (uchar_t *)&fasreg->fas_stat);
4605 fas->f_intr = fas_reg_read(fas,
4606 (uchar_t *)&fasreg->fas_intr);
4607 if (fas->f_intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) {
4608 return (fas_illegal_cmd_or_bus_reset(fas));
4609 }
4610 } else {
4611 /*
4612 * change e_laststate for the next time around
4613 */
4614 fas->f_laststate = ACTS_CLEARING;
4615 TRACE_0(TR_FAC_SCSI_FAS,
4616 TR_FAS_HANDLE_CLEARING_RETURN1_END,
4617 "fas_handle_clearing_end (ACTION_RETURN1)");
4618 return (ACTION_RETURN);
4619 }
4620 }
4621
4622 if (fas->f_intr == FAS_INT_DISCON) {
4623 /*
4624 * At this point the FAS chip has disconnected. The bus should
4625 * be either quiet or someone may be attempting a reselection
4626 * of us (or somebody else). Call the routine that sets the
4627 * chip back to a correct and known state.
4628 * If the last message in was a disconnect, search
4629 * for new work to do, else return to call fas_finish()
4630 */
4631 fas->f_last_msgout = 0xff;
4632 fas->f_omsglen = 0;
4633 if (fas->f_last_msgin == MSG_DISCONNECT) {
4634
4635 fas_reg_cmd_write(fas, CMD_EN_RESEL);
4636
4637 New_state(fas, STATE_FREE);
4638
4639 ASSERT(fas->f_current_sp != NULL);
4640 EPRINTF2("disconnecting %d.%d\n", Tgt(sp), Lun(sp));
4641
4642 sp->cmd_pkt->pkt_statistics |= STAT_DISCON;
4643 sp->cmd_flags |= CFLAG_CMDDISC;
4644 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
4645 fas->f_ndisc++;
4646 }
4647 ASSERT((fas->f_ncmds >= 0) && (fas->f_ndisc >= 0));
4648 ASSERT(fas->f_ncmds >= fas->f_ndisc);
4649
4650 fas->f_current_sp = NULL;
4651
4652 /*
4653 * start a cmd here to save time
4654 */
4655 if ((fas->f_ncmds > fas->f_ndisc) && fas_ustart(fas)) {
4656 TRACE_0(TR_FAC_SCSI_FAS,
4657 TR_FAS_HANDLE_CLEARING_RETURN2_END,
4658 "fas_handle_clearing_end (ACTION_RETURN2)");
4659 return (ACTION_RETURN);
4660 }
4661
4662
4663 TRACE_0(TR_FAC_SCSI_FAS,
4664 TR_FAS_HANDLE_CLEARING_RETURN3_END,
4665 "fas_handle_clearing_end (ACTION_RETURN3)");
4666 return (ACTION_RETURN);
4667 } else {
4668 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CLEARING_END,
4669 "fas_handle_clearing_end");
4670 return (fas_finish(fas));
4671 }
4672 } else {
4673 /*
4674 * If the target didn't disconnect from the
4675 * bus, that is a gross fatal error.
4676 * XXX this can be caused by asserting ATN
4677 * XXX check bus phase and if msgout, send a message
4678 */
4679 fas_log(fas, CE_WARN,
4680 "Target %d didn't disconnect after sending %s",
4681 Tgt(sp), scsi_mname(fas->f_last_msgin));
4682
4683 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
4684
4685 #ifdef FASDEBUG
4686 IPRINTF4("msgout: %x %x %x, last_msgout=%x\n",
4687 fas->f_cur_msgout[0], fas->f_cur_msgout[1],
4688 fas->f_cur_msgout[2], fas->f_last_msgout);
4689 IPRINTF1("last msgin=%x\n", fas->f_last_msgin);
4690 #endif
4691 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CLEARING_ABORT_END,
4692 "fas_handle_clearing_end (ACTION_ABORT_CURCMD)");
4693 return (ACTION_ABORT_ALLCMDS);
4694 }
4695 }
4696
4697 /*
4698 * handle data phase start
4699 */
4700 static int
fas_handle_data_start(struct fas * fas)4701 fas_handle_data_start(struct fas *fas)
4702 {
4703 uint64_t end;
4704 uint32_t amt;
4705 struct fas_cmd *sp = fas->f_current_sp;
4706 int sending, phase;
4707
4708 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_START,
4709 "fas_handle_data_start");
4710 EPRINTF("fas_handle_data_start:\n");
4711
4712 if ((sp->cmd_flags & CFLAG_DMAVALID) == 0) {
4713 fas_printstate(fas, "unexpected data phase");
4714 bad:
4715 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
4716
4717 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_ABORT1_END,
4718 "fas_handle_data_end (ACTION_ABORT_CURCMD1)");
4719 return (ACTION_ABORT_CURCMD);
4720 } else {
4721 sending = (sp->cmd_flags & CFLAG_DMASEND)? 1 : 0;
4722 }
4723
4724 if (sp->cmd_flags & CFLAG_RESTORE_PTRS) {
4725 if (fas_restore_pointers(fas, sp)) {
4726 return (ACTION_ABORT_CURCMD);
4727 }
4728 sp->cmd_flags &= ~CFLAG_RESTORE_PTRS;
4729 }
4730
4731 /*
4732 * And make sure our DMA pointers are in good shape.
4733 *
4734 * Because SCSI is SCSI, the current DMA pointer has got to be
4735 * greater than or equal to our DMA base address. All other cases
4736 * that might have affected this always set curaddr to be >=
4737 * to the DMA base address.
4738 */
4739 ASSERT(sp->cmd_cur_addr >= sp->cmd_dmacookie.dmac_address);
4740 end = (uint64_t)sp->cmd_dmacookie.dmac_address +
4741 (uint64_t)sp->cmd_dmacookie.dmac_size;
4742
4743 DPRINTF5(
4744 "cmd_data_count=%x, dmacount=%x, curaddr=%x, end=%"
4745 PRIx64 ", nwin=%x\n",
4746 sp->cmd_data_count, sp->cmd_dmacount, sp->cmd_cur_addr, end,
4747 sp->cmd_nwin);
4748 DPRINTF2("dmac_address = %x, dmac_size=%lx\n",
4749 sp->cmd_dmacookie.dmac_address, sp->cmd_dmacookie.dmac_size);
4750
4751 if (sp->cmd_cur_addr >= end) {
4752 if (fas_next_window(fas, sp, end)) {
4753 goto bad;
4754 }
4755 end = (uint64_t)sp->cmd_dmacookie.dmac_address +
4756 (uint64_t)sp->cmd_dmacookie.dmac_size;
4757 DPRINTF2("dmac_address=%x, dmac_size=%lx\n",
4758 sp->cmd_dmacookie.dmac_address,
4759 sp->cmd_dmacookie.dmac_size);
4760 }
4761
4762 amt = end - sp->cmd_cur_addr;
4763 if (fas->f_dma_attr->dma_attr_count_max < amt) {
4764 amt = fas->f_dma_attr->dma_attr_count_max;
4765 }
4766 DPRINTF3("amt=%x, end=%lx, cur_addr=%x\n", amt, end, sp->cmd_cur_addr);
4767
4768 #ifdef FASDEBUG
4769 /*
4770 * Make sure that we don't cross a boundary we can't handle
4771 */
4772 end = (uint64_t)sp->cmd_cur_addr + (uint64_t)amt - 1;
4773 if ((end & ~fas->f_dma_attr->dma_attr_seg) !=
4774 (sp->cmd_cur_addr & ~fas->f_dma_attr->dma_attr_seg)) {
4775 EPRINTF3("curaddr %x curaddr+amt %" PRIx64
4776 " cntr_max %" PRIx64 "\n",
4777 sp->cmd_cur_addr, end, fas->f_dma_attr->dma_attr_seg);
4778 amt = (end & ~fas->f_dma_attr->dma_attr_seg) - sp->cmd_cur_addr;
4779 if (amt == 0 || amt > fas->f_dma_attr->dma_attr_count_max) {
4780 fas_log(fas, CE_WARN, "illegal dma boundary? %x", amt);
4781 goto bad;
4782 }
4783 }
4784 #endif
4785
4786 end = (uint64_t)sp->cmd_dmacookie.dmac_address +
4787 (uint64_t)sp->cmd_dmacookie.dmac_size -
4788 (uint64_t)sp->cmd_cur_addr;
4789 if (amt > end) {
4790 EPRINTF4("ovflow amt %x s.b. %" PRIx64 " curaddr %x count %x\n",
4791 amt, end, sp->cmd_cur_addr, sp->cmd_dmacount);
4792 amt = (uint32_t)end;
4793 }
4794
4795 fas->f_lastcount = amt;
4796
4797 EPRINTF4("%d.%d cmd 0x%x to xfer %x\n", Tgt(sp), Lun(sp),
4798 sp->cmd_pkt->pkt_cdbp[0], amt);
4799
4800 phase = fas->f_stat & FAS_PHASE_MASK;
4801
4802 if ((phase == FAS_PHASE_DATA_IN) && !sending) {
4803 FAS_DMA_WRITE(fas, amt, sp->cmd_cur_addr,
4804 CMD_TRAN_INFO|CMD_DMA);
4805 } else if ((phase == FAS_PHASE_DATA_OUT) && sending) {
4806 FAS_DMA_READ(fas, amt, sp->cmd_cur_addr, amt,
4807 CMD_TRAN_INFO|CMD_DMA);
4808 } else {
4809 fas_log(fas, CE_WARN,
4810 "unwanted data xfer direction for Target %d", Tgt(sp));
4811 fas_set_pkt_reason(fas, sp, CMD_DMA_DERR, 0);
4812 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_ABORT2_END,
4813 "fas_handle_data_end (ACTION_ABORT_CURCMD2)");
4814 return (ACTION_ABORT_CURCMD);
4815 }
4816
4817 #ifdef FAS_TEST
4818 if (!sending && (fas_ptest_data_in & (1<<Tgt(sp)))) {
4819 fas_assert_atn(fas);
4820 }
4821 #endif /* FAS_TEST */
4822
4823 New_state(fas, ACTS_DATA_DONE);
4824
4825 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_END,
4826 "fas_handle_data_end (ACTION_RETURN)");
4827 return (ACTION_RETURN);
4828 }
4829
4830 static int
fas_handle_data_done(struct fas * fas)4831 fas_handle_data_done(struct fas *fas)
4832 {
4833 volatile struct fasreg *fasreg = fas->f_reg;
4834 volatile struct dma *dmar = fas->f_dma;
4835 struct fas_cmd *sp = fas->f_current_sp;
4836 uint32_t xfer_amt;
4837 char was_sending;
4838 uchar_t stat, fifoamt, tgt;
4839
4840 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_START,
4841 "fas_handle_data_done_start");
4842 EPRINTF("fas_handle_data_done\n");
4843
4844 tgt = Tgt(sp);
4845 stat = fas->f_stat;
4846 was_sending = (sp->cmd_flags & CFLAG_DMASEND) ? 1 : 0;
4847
4848 /*
4849 * Check for DMA errors (parity or memory fault)
4850 */
4851 if ((fas->f_dma_csr = fas_dma_reg_read(fas, &dmar->dma_csr)) &
4852 DMA_ERRPEND) {
4853 /*
4854 * It would be desirable to set the ATN* line and attempt to
4855 * do the whole schmear of INITIATOR DETECTED ERROR here,
4856 * but that is too hard to do at present.
4857 */
4858 fas_log(fas, CE_WARN, "Unrecoverable DMA error on dma %s",
4859 (was_sending) ? "send" : "receive");
4860 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
4861 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_RESET_END,
4862 "fas_handle_data_done_end (ACTION_RESET)");
4863 return (ACTION_RESET);
4864 }
4865
4866 /*
4867 * Data Receive conditions:
4868 *
4869 * Check for parity errors. If we have a parity error upon
4870 * receive, the FAS chip has asserted ATN* for us already.
4871 */
4872 if (!was_sending) {
4873 #ifdef FAS_TEST
4874 if (fas_ptest_data_in & (1<<tgt)) {
4875 fas_ptest_data_in = 0;
4876 stat |= FAS_STAT_PERR;
4877 if (fas_test_stop > 1) {
4878 debug_enter("ptest_data_in");
4879 }
4880 }
4881 #endif /* FAS_TEST */
4882 if (stat & FAS_STAT_PERR) {
4883 fas_log(fas, CE_WARN,
4884 "SCSI bus DATA IN phase parity error");
4885 fas->f_cur_msgout[0] = MSG_INITIATOR_ERROR;
4886 fas->f_omsglen = 1;
4887 sp->cmd_pkt->pkt_statistics |= STAT_PERR;
4888 sp->cmd_pkt->pkt_reason = CMD_TRAN_ERR;
4889 }
4890 }
4891
4892 FAS_FLUSH_DMA(fas);
4893
4894 /*
4895 * Check to make sure we're still connected to the target.
4896 * If the target dropped the bus, that is a fatal error.
4897 * We don't even attempt to count what we were transferring
4898 * here. Let fas_handle_unknown clean up for us.
4899 */
4900 if (fas->f_intr != FAS_INT_BUS) {
4901 New_state(fas, ACTS_UNKNOWN);
4902 TRACE_0(TR_FAC_SCSI_FAS,
4903 TR_FAS_HANDLE_DATA_DONE_PHASEMANAGE_END,
4904 "fas_handle_data_done_end (ACTION_PHASEMANAGE)");
4905 return (ACTION_PHASEMANAGE);
4906 }
4907
4908 /*
4909 * Figure out how far we got.
4910 * Latch up fifo amount first and double if wide has been enabled
4911 */
4912 fifoamt = FIFO_CNT(fas);
4913 if (fas->f_wide_enabled & (1<<tgt)) {
4914 fifoamt = fifoamt << 1;
4915 }
4916
4917 if (stat & FAS_STAT_XZERO) {
4918 xfer_amt = fas->f_lastcount;
4919 } else {
4920 GET_FAS_COUNT(fasreg, xfer_amt);
4921 xfer_amt = fas->f_lastcount - xfer_amt;
4922 }
4923 DPRINTF4("fifoamt=%x, xfer_amt=%x, lastcount=%x, stat=%x\n",
4924 fifoamt, xfer_amt, fas->f_lastcount, stat);
4925
4926
4927 /*
4928 * Unconditionally knock off by the amount left
4929 * in the fifo if we were sending out the SCSI bus.
4930 *
4931 * If we were receiving from the SCSI bus, believe
4932 * what the chip told us (either XZERO or by the
4933 * value calculated from the counter register).
4934 * The reason we don't look at the fifo for
4935 * incoming data is that in synchronous mode
4936 * the fifo may have further data bytes, and
4937 * for async mode we assume that all data in
4938 * the fifo will have been transferred before
4939 * the fas asserts an interrupt.
4940 */
4941 if (was_sending) {
4942 xfer_amt -= fifoamt;
4943 }
4944
4945 #ifdef FASDEBUG
4946 {
4947 int phase = stat & FAS_PHASE_MASK;
4948 fas->f_stat2 = fas_reg_read(fas,
4949 (uchar_t *)&fasreg->fas_stat2);
4950
4951 if (((fas->f_stat & FAS_STAT_XZERO) == 0) &&
4952 (phase != FAS_PHASE_DATA_IN) &&
4953 (phase != FAS_PHASE_DATA_OUT) &&
4954 (fas->f_stat2 & FAS_STAT2_ISHUTTLE)) {
4955 fas_log(fas, CE_WARN,
4956 "input shuttle not empty at end of data phase");
4957 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
4958 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_RESET_END,
4959 "fas_handle_data_done_end (ACTION_RESET)");
4960 return (ACTION_RESET);
4961 }
4962 }
4963 #endif /* FASDEBUG */
4964
4965 /*
4966 * If this was a synchronous transfer, flag it.
4967 * Also check for the errata condition of long
4968 * last REQ/ pulse for some synchronous targets
4969 */
4970 if (fas->f_offset[tgt]) {
4971 /*
4972 * flag that a synchronous data xfer took place
4973 */
4974 sp->cmd_pkt->pkt_statistics |= STAT_SYNC;
4975
4976 if (was_sending)
4977 fas_reg_cmd_write(fas, CMD_FLUSH);
4978 } else {
4979 /*
4980 * If we aren't doing Synchronous Data Transfers,
4981 * definitely offload the fifo.
4982 */
4983 fas_reg_cmd_write(fas, CMD_FLUSH);
4984 }
4985
4986 /*
4987 * adjust pointers...
4988 */
4989 DPRINTF3("before:cmd_data_count=%x, cmd_cur_addr=%x, xfer_amt=%x\n",
4990 sp->cmd_data_count, sp->cmd_cur_addr, xfer_amt);
4991 sp->cmd_data_count += xfer_amt;
4992 sp->cmd_cur_addr += xfer_amt;
4993 sp->cmd_pkt->pkt_state |= STATE_XFERRED_DATA;
4994 New_state(fas, ACTS_UNKNOWN);
4995 DPRINTF3("after:cmd_data_count=%x, cmd_cur_addr=%x, xfer_amt=%x\n",
4996 sp->cmd_data_count, sp->cmd_cur_addr, xfer_amt);
4997
4998 stat &= FAS_PHASE_MASK;
4999 if (stat == FAS_PHASE_DATA_IN || stat == FAS_PHASE_DATA_OUT) {
5000 fas->f_state = ACTS_DATA;
5001 TRACE_0(TR_FAC_SCSI_FAS,
5002 TR_FAS_HANDLE_DATA_DONE_ACTION1_END,
5003 "fas_handle_data_done_end (action1)");
5004 return (fas_handle_data_start(fas));
5005 }
5006
5007 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_DATA_DONE_ACTION2_END,
5008 "fas_handle_data_done_end (action2)");
5009 return (fas_handle_unknown(fas));
5010 }
5011
5012 static char msginperr[] = "SCSI bus MESSAGE IN phase parity error";
5013
5014 static int
fas_handle_c_cmplt(struct fas * fas)5015 fas_handle_c_cmplt(struct fas *fas)
5016 {
5017 struct fas_cmd *sp = fas->f_current_sp;
5018 volatile struct fasreg *fasreg = fas->f_reg;
5019 uchar_t sts, msg, intr, perr;
5020
5021 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_C_CMPLT_START,
5022 "fas_handle_c_cmplt_start");
5023 EPRINTF("fas_handle_c_cmplt:\n");
5024
5025
5026 /*
5027 * if target is fast, we can get cmd. completion by the time we get
5028 * here. Otherwise, we'll have to taken an interrupt.
5029 */
5030 if (fas->f_laststate == ACTS_UNKNOWN) {
5031 if (INTPENDING(fas)) {
5032 fas->f_stat = fas_reg_read(fas,
5033 (uchar_t *)&fasreg->fas_stat);
5034 intr = fas_reg_read(fas, (uchar_t *)&fasreg->fas_intr);
5035 fas->f_intr = intr;
5036 if (fas->f_intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) {
5037 return (fas_illegal_cmd_or_bus_reset(fas));
5038 }
5039 } else {
5040 /*
5041 * change f_laststate for the next time around
5042 */
5043 fas->f_laststate = ACTS_C_CMPLT;
5044 TRACE_0(TR_FAC_SCSI_FAS,
5045 TR_FAS_HANDLE_C_CMPLT_RETURN1_END,
5046 "fas_handle_c_cmplt_end (ACTION_RETURN1)");
5047 return (ACTION_RETURN);
5048 }
5049 } else {
5050 intr = fas->f_intr;
5051 }
5052
5053 #ifdef FAS_TEST
5054 if (fas_ptest_status & (1<<Tgt(sp))) {
5055 fas_ptest_status = 0;
5056 fas->f_stat |= FAS_STAT_PERR;
5057 if (fas_test_stop > 1) {
5058 debug_enter("ptest_status");
5059 }
5060 } else if ((fas_ptest_msgin & (1<<Tgt(sp))) && fas_ptest_msg == 0) {
5061 fas_ptest_msgin = 0;
5062 fas_ptest_msg = -1;
5063 fas->f_stat |= FAS_STAT_PERR;
5064 if (fas_test_stop > 1) {
5065 debug_enter("ptest_completion");
5066 }
5067 }
5068 #endif /* FAS_TEST */
5069
5070 if (intr == FAS_INT_DISCON) {
5071 New_state(fas, ACTS_UNKNOWN);
5072 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_C_CMPLT_ACTION1_END,
5073 "fas_handle_c_cmplt_end (action1)");
5074 return (fas_handle_unknown(fas));
5075 }
5076
5077 if ((perr = (fas->f_stat & FAS_STAT_PERR)) != 0) {
5078 fas_assert_atn(fas);
5079 sp->cmd_pkt->pkt_statistics |= STAT_PERR;
5080 }
5081
5082 /*
5083 * do a msg accept now and read the fifo data
5084 */
5085 if (intr & FAS_INT_FCMP) {
5086 /*
5087 * The FAS manuals state that this sequence completes
5088 * with a BUS SERVICE interrupt if just the status
5089 * byte was received, else a FUNCTION COMPLETE interrupt
5090 * if both status and a message was received.
5091 *
5092 * if we give the MSG_ACT before reading the msg byte
5093 * we get the status byte again and if the status is zero
5094 * then we won't detect a failure
5095 */
5096 *(sp->cmd_pkt->pkt_scbp) =
5097 sts = fas_reg_read(fas, (uchar_t *)&fasreg->fas_fifo_data);
5098 fas->f_last_msgin = fas->f_imsgarea[0] =
5099 msg = fas_reg_read(fas, (uchar_t *)&fasreg->fas_fifo_data);
5100
5101 fas_reg_cmd_write(fas, CMD_MSG_ACPT);
5102 sp->cmd_pkt->pkt_state |= STATE_GOT_STATUS;
5103
5104 /*
5105 * The manuals also state that ATN* is asserted if
5106 * bad parity is detected.
5107 *
5108 * The one case that we cannot handle is where we detect
5109 * bad parity for the status byte, but the target refuses
5110 * to go to MESSAGE OUT phase right away. This means that
5111 * if that happens, we will misconstrue the parity error
5112 * to be for the completion message, not the status byte.
5113 */
5114 if (perr) {
5115 fas_log(fas, CE_WARN, msginperr);
5116 sp->cmd_pkt->pkt_statistics |= STAT_PERR;
5117
5118 fas->f_cur_msgout[0] = MSG_MSG_PARITY;
5119 fas->f_omsglen = 1;
5120 New_state(fas, ACTS_UNKNOWN);
5121 TRACE_0(TR_FAC_SCSI_FAS,
5122 TR_FAS_HANDLE_C_CMPLT_ACTION5_END,
5123 "fas_handle_c_cmplt_end (action5)");
5124 return (ACTION_RETURN);
5125 }
5126
5127 } else if (intr == FAS_INT_BUS) {
5128 /*
5129 * We only got the status byte.
5130 */
5131 sts = fas_reg_read(fas, (uchar_t *)&fasreg->fas_fifo_data);
5132 sp->cmd_pkt->pkt_state |= STATE_GOT_STATUS;
5133 *(sp->cmd_pkt->pkt_scbp) = sts;
5134 msg = INVALID_MSG;
5135
5136 IPRINTF1("fas_handle_cmd_cmplt: sts=%x, no msg byte\n", sts);
5137
5138 if (perr) {
5139 /*
5140 * If we get a parity error on a status byte
5141 * assume that it was a CHECK CONDITION
5142 */
5143 sts = STATUS_CHECK;
5144 fas_log(fas, CE_WARN,
5145 "SCSI bus STATUS phase parity error");
5146 fas->f_cur_msgout[0] = MSG_INITIATOR_ERROR;
5147 fas->f_omsglen = 1;
5148 New_state(fas, ACTS_UNKNOWN);
5149 TRACE_0(TR_FAC_SCSI_FAS,
5150 TR_FAS_HANDLE_C_CMPLT_ACTION5_END,
5151 "fas_handle_c_cmplt_end (action5)");
5152 return (fas_handle_unknown(fas));
5153 }
5154
5155 } else {
5156 msg = sts = INVALID_MSG;
5157 IPRINTF("fas_handle_cmd_cmplt: unexpected intr\n");
5158 New_state(fas, ACTS_UNKNOWN);
5159 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_C_CMPLT_ACTION2_END,
5160 "fas_handle_c_cmplt_end (action2)");
5161 return (fas_handle_unknown(fas));
5162 }
5163
5164 EPRINTF2("fas_handle_c_cmplt: status=%x, msg=%x\n", sts, msg);
5165
5166 EPRINTF1("Completion Message=%s\n", scsi_mname(msg));
5167 if (msg == MSG_COMMAND_COMPLETE) {
5168 /*
5169 * Actually, if the message was a 'linked command
5170 * complete' message, the target isn't going to be
5171 * clearing the bus.
5172 */
5173 New_state(fas, ACTS_CLEARING);
5174 TRACE_0(TR_FAC_SCSI_FAS,
5175 TR_FAS_HANDLE_C_CMPLT_ACTION4_END,
5176 "fas_handle_c_cmplt_end (action4)");
5177 return (fas_handle_clearing(fas));
5178 } else {
5179 fas->f_imsglen = 1;
5180 fas->f_imsgindex = 1;
5181 New_state(fas, ACTS_MSG_IN_DONE);
5182 TRACE_0(TR_FAC_SCSI_FAS,
5183 TR_FAS_HANDLE_C_CMPLT_ACTION3_END,
5184 "fas_handle_c_cmplt_end (action3)");
5185 return (fas_handle_msg_in_done(fas));
5186 }
5187 }
5188
5189 /*
5190 * prepare for accepting a message byte from the fifo
5191 */
5192 static int
fas_handle_msg_in_start(struct fas * fas)5193 fas_handle_msg_in_start(struct fas *fas)
5194 {
5195 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_START,
5196 "fas_handle_msg_in_start");
5197 EPRINTF("fas_handle_msg_in_start\n");
5198
5199 /*
5200 * Pick up a message byte.
5201 * Clear the FIFO so we
5202 * don't get confused.
5203 */
5204 if (!FIFO_EMPTY(fas)) {
5205 fas_reg_cmd_write(fas, CMD_FLUSH);
5206 }
5207 fas_reg_cmd_write(fas, CMD_TRAN_INFO);
5208 fas->f_imsglen = 1;
5209 fas->f_imsgindex = 0;
5210 New_state(fas, ACTS_MSG_IN_DONE);
5211
5212 /*
5213 * give a little extra time by returning to phasemanage
5214 */
5215 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_END,
5216 "fas_handle_msg_in_end (ACTION_PHASEMANAGE)");
5217 return (ACTION_PHASEMANAGE);
5218 }
5219
5220 /*
5221 * We come here after issuing a MSG_ACCEPT
5222 * command and are expecting more message bytes.
5223 * The FAS should be asserting a BUS SERVICE
5224 * interrupt status, but may have asserted
5225 * a different interrupt in the case that
5226 * the target disconnected and dropped BSY*.
5227 *
5228 * In the case that we are eating up message
5229 * bytes (and throwing them away unread) because
5230 * we have ATN* asserted (we are trying to send
5231 * a message), we do not consider it an error
5232 * if the phase has changed out of MESSAGE IN.
5233 */
5234 static int
fas_handle_more_msgin(struct fas * fas)5235 fas_handle_more_msgin(struct fas *fas)
5236 {
5237 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MORE_MSGIN_START,
5238 "fas_handle_more_msgin_start");
5239 EPRINTF("fas_handle_more_msgin\n");
5240
5241 if (fas->f_intr & FAS_INT_BUS) {
5242 if ((fas->f_stat & FAS_PHASE_MASK) == FAS_PHASE_MSG_IN) {
5243 /*
5244 * Fetch another byte of a message in.
5245 */
5246 fas_reg_cmd_write(fas, CMD_TRAN_INFO);
5247 New_state(fas, ACTS_MSG_IN_DONE);
5248 TRACE_0(TR_FAC_SCSI_FAS,
5249 TR_FAS_HANDLE_MORE_MSGIN_RETURN1_END,
5250 "fas_handle_more_msgin_end (ACTION_RETURN)");
5251 return (ACTION_RETURN);
5252 }
5253
5254 /*
5255 * If we were gobbling up a message and we have
5256 * changed phases, handle this silently, else
5257 * complain. In either case, we return to let
5258 * fas_phasemanage() handle things.
5259 *
5260 * If it wasn't a BUS SERVICE interrupt,
5261 * let fas_phasemanage() find out if the
5262 * chip disconnected.
5263 */
5264 if (fas->f_imsglen != 0) {
5265 fas_log(fas, CE_WARN,
5266 "Premature end of extended message");
5267 }
5268 }
5269 New_state(fas, ACTS_UNKNOWN);
5270 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MORE_MSGIN_RETURN2_END,
5271 "fas_handle_more_msgin_end (action)");
5272 return (fas_handle_unknown(fas));
5273 }
5274
5275 static int
fas_handle_msg_in_done(struct fas * fas)5276 fas_handle_msg_in_done(struct fas *fas)
5277 {
5278 struct fas_cmd *sp = fas->f_current_sp;
5279 volatile struct fasreg *fasreg = fas->f_reg;
5280 int sndmsg = 0;
5281 uchar_t msgin;
5282
5283 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_START,
5284 "fas_handle_msg_in_done_start");
5285 EPRINTF("fas_handle_msg_in_done:\n");
5286 if (fas->f_laststate == ACTS_MSG_IN) {
5287 if (INTPENDING(fas)) {
5288 fas->f_stat = fas_reg_read(fas,
5289 (uchar_t *)&fasreg->fas_stat);
5290 fas->f_stat2 = fas_reg_read(fas,
5291 (uchar_t *)&fasreg->fas_stat2);
5292
5293 fas_read_fifo(fas);
5294
5295 fas->f_intr = fas_reg_read(fas,
5296 (uchar_t *)&fasreg->fas_intr);
5297 if (fas->f_intr & (FAS_INT_RESET | FAS_INT_ILLEGAL)) {
5298 return (fas_illegal_cmd_or_bus_reset(fas));
5299 }
5300 } else {
5301 /*
5302 * change f_laststate for the next time around
5303 */
5304 fas->f_laststate = ACTS_MSG_IN_DONE;
5305 TRACE_0(TR_FAC_SCSI_FAS,
5306 TR_FAS_HANDLE_MSG_IN_DONE_RETURN1_END,
5307 "fas_handle_msg_in_done_end (ACTION_RETURN1)");
5308 return (ACTION_RETURN);
5309 }
5310 }
5311
5312 /*
5313 * the most common case is a disconnect message. we do
5314 * a fast path for this condition and if it fails then
5315 * we go for the detailed error handling
5316 */
5317 #ifndef FAS_TEST
5318 if (((fas->f_laststate == ACTS_MSG_IN) ||
5319 (fas->f_laststate == ACTS_MSG_IN_DONE)) &&
5320 ((fas->f_intr & FAS_INT_DISCON) == 0) &&
5321 ((fas->f_stat & FAS_STAT_PERR) == 0) &&
5322 ((sp->cmd_pkt_flags & FLAG_NODISCON) == 0)) {
5323
5324 if ((fas->f_fifolen == 1) &&
5325 (fas->f_imsglen == 1) &&
5326 (fas->f_fifo[0] == MSG_DISCONNECT)) {
5327
5328 fas_reg_cmd_write(fas, CMD_MSG_ACPT);
5329 fas->f_imsgarea[fas->f_imsgindex++] = fas->f_fifo[0];
5330 fas->f_last_msgin = MSG_DISCONNECT;
5331 New_state(fas, ACTS_CLEARING);
5332
5333 TRACE_0(TR_FAC_SCSI_FAS,
5334 TR_FAS_HANDLE_MSG_IN_DONE_ACTION_END,
5335 "fas_handle_msg_in_done_end (action)");
5336
5337 return (fas_handle_clearing(fas));
5338 }
5339 }
5340 #endif /* not FAS_TEST */
5341
5342 /*
5343 * We can be called here for both the case where
5344 * we had requested the FAS chip to fetch a message
5345 * byte from the target (at the target's request).
5346 * We can also be called in the case where we had
5347 * been using the CMD_COMP_SEQ command to pick up
5348 * both a status byte and a completion message from
5349 * a target, but where the message wasn't one of
5350 * COMMAND COMPLETE, LINKED COMMAND COMPLETE, or
5351 * LINKED COMMAND COMPLETE (with flag). This is a
5352 * legal (albeit extremely unusual) SCSI bus trans-
5353 * -ition, so we have to handle it.
5354 */
5355 if (fas->f_laststate != ACTS_C_CMPLT) {
5356 #ifdef FAS_TEST
5357 reloop:
5358 #endif /* FAS_TEST */
5359
5360 if (fas->f_intr & FAS_INT_DISCON) {
5361 fas_log(fas, CE_WARN,
5362 "premature end of input message");
5363 New_state(fas, ACTS_UNKNOWN);
5364 TRACE_0(TR_FAC_SCSI_FAS,
5365 TR_FAS_HANDLE_MSG_IN_DONE_PHASEMANAGE_END,
5366 "fas_handle_msg_in_done_end (ACTION_PHASEMANAGE)");
5367 return (ACTION_PHASEMANAGE);
5368 }
5369
5370 /*
5371 * Note that if f_imsglen is zero, then we are skipping
5372 * input message bytes, so there is no reason to look for
5373 * parity errors.
5374 */
5375 if (fas->f_imsglen != 0 && (fas->f_stat & FAS_STAT_PERR)) {
5376 fas_log(fas, CE_WARN, msginperr);
5377 sndmsg = MSG_MSG_PARITY;
5378 sp->cmd_pkt->pkt_statistics |= STAT_PERR;
5379 fas_reg_cmd_write(fas, CMD_FLUSH);
5380
5381 } else if ((msgin = fas->f_fifolen) != 1) {
5382
5383 /*
5384 * If we have got more than one or 0 bytes in the fifo,
5385 * that is a gross screwup, and we should let the
5386 * target know that we have completely fouled up.
5387 */
5388 fas_printf(fas, "fifocount=%x", msgin);
5389 fas_printstate(fas, "input message botch");
5390 sndmsg = MSG_INITIATOR_ERROR;
5391 fas_reg_cmd_write(fas, CMD_FLUSH);
5392 fas_log(fas, CE_WARN, "input message botch");
5393
5394 } else if (fas->f_imsglen == 0) {
5395 /*
5396 * If we are in the middle of gobbling up and throwing
5397 * away a message (due to a previous message input
5398 * error), drive on.
5399 */
5400 msgin = fas_reg_read(fas,
5401 (uchar_t *)&fasreg->fas_fifo_data);
5402 New_state(fas, ACTS_MSG_IN_MORE);
5403
5404 } else {
5405 msgin = fas->f_fifo[0];
5406 fas->f_imsgarea[fas->f_imsgindex++] = msgin;
5407 }
5408
5409 } else {
5410 /*
5411 * In this case, we have been called (from
5412 * fas_handle_c_cmplt()) with the message
5413 * already stored in the message array.
5414 */
5415 msgin = fas->f_imsgarea[0];
5416 }
5417
5418 /*
5419 * Process this message byte (but not if we are
5420 * going to be trying to send back some error
5421 * anyway)
5422 */
5423 if (sndmsg == 0 && fas->f_imsglen != 0) {
5424
5425 if (fas->f_imsgindex < fas->f_imsglen) {
5426
5427 EPRINTF2("message byte %d: 0x%x\n",
5428 fas->f_imsgindex-1,
5429 fas->f_imsgarea[fas->f_imsgindex-1]);
5430
5431 New_state(fas, ACTS_MSG_IN_MORE);
5432
5433 } else if (fas->f_imsglen == 1) {
5434
5435 #ifdef FAS_TEST
5436 if ((fas_ptest_msgin & (1<<Tgt(sp))) &&
5437 fas_ptest_msg == msgin) {
5438 fas_ptest_msgin = 0;
5439 fas_ptest_msg = -1;
5440 fas_assert_atn(fas);
5441 fas->f_stat |= FAS_STAT_PERR;
5442 fas->f_imsgindex -= 1;
5443 if (fas_test_stop > 1) {
5444 debug_enter("ptest msgin");
5445 }
5446 goto reloop;
5447 }
5448 #endif /* FAS_TEST */
5449
5450 sndmsg = fas_onebyte_msg(fas);
5451
5452 } else if (fas->f_imsglen == 2) {
5453 #ifdef FAS_TEST
5454 if (fas_ptest_emsgin & (1<<Tgt(sp))) {
5455 fas_ptest_emsgin = 0;
5456 fas_assert_atn(fas);
5457 fas->f_stat |= FAS_STAT_PERR;
5458 fas->f_imsgindex -= 1;
5459 if (fas_test_stop > 1) {
5460 debug_enter("ptest emsgin");
5461 }
5462 goto reloop;
5463 }
5464 #endif /* FAS_TEST */
5465
5466 if (fas->f_imsgarea[0] == MSG_EXTENDED) {
5467 static char *tool =
5468 "Extended message 0x%x is too long";
5469
5470 /*
5471 * Is the incoming message too long
5472 * to be stored in our local array?
5473 */
5474 if ((int)(msgin+2) > IMSGSIZE) {
5475 fas_log(fas, CE_WARN,
5476 tool, fas->f_imsgarea[0]);
5477 sndmsg = MSG_REJECT;
5478 } else {
5479 fas->f_imsglen = msgin + 2;
5480 New_state(fas, ACTS_MSG_IN_MORE);
5481 }
5482 } else {
5483 sndmsg = fas_twobyte_msg(fas);
5484 }
5485
5486 } else {
5487 sndmsg = fas_multibyte_msg(fas);
5488 }
5489 }
5490
5491 if (sndmsg < 0) {
5492 /*
5493 * If sndmsg is less than zero, one of the subsidiary
5494 * routines needs to return some other state than
5495 * ACTION_RETURN.
5496 */
5497 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_SNDMSG_END,
5498 "fas_handle_msg_in_done_end (-sndmsg)");
5499 return (-sndmsg);
5500
5501 } else if (sndmsg > 0) {
5502 if (IS_1BYTE_MSG(sndmsg)) {
5503 fas->f_omsglen = 1;
5504 }
5505 fas->f_cur_msgout[0] = (uchar_t)sndmsg;
5506
5507 /*
5508 * The target is not guaranteed to go to message out
5509 * phase, period. Moreover, until the entire incoming
5510 * message is transferred, the target may (and likely
5511 * will) continue to transfer message bytes (which
5512 * we will have to ignore).
5513 *
5514 * In order to do this, we'll go to 'infinite'
5515 * message in handling by setting the current input
5516 * message length to a sentinel of zero.
5517 *
5518 * This works regardless of the message we are trying
5519 * to send out. At the point in time which we want
5520 * to send a message in response to an incoming message
5521 * we do not care any more about the incoming message.
5522 *
5523 * If we are sending a message in response to detecting
5524 * a parity error on input, the FAS chip has already
5525 * set ATN* for us, but it doesn't hurt to set it here
5526 * again anyhow.
5527 */
5528 fas_assert_atn(fas);
5529 New_state(fas, ACTS_MSG_IN_MORE);
5530 fas->f_imsglen = 0;
5531 }
5532
5533 fas_reg_cmd_write(fas, CMD_FLUSH);
5534
5535 fas_reg_cmd_write(fas, CMD_MSG_ACPT);
5536
5537 if ((fas->f_laststate == ACTS_MSG_IN_DONE) &&
5538 (fas->f_state == ACTS_CLEARING)) {
5539 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_ACTION_END,
5540 "fas_handle_msg_in_done_end (action)");
5541 return (fas_handle_clearing(fas));
5542 }
5543 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_IN_DONE_RETURN2_END,
5544 "fas_handle_msg_in_done_end (ACTION_RETURN2)");
5545 return (ACTION_RETURN);
5546 }
5547
5548 static int
fas_onebyte_msg(struct fas * fas)5549 fas_onebyte_msg(struct fas *fas)
5550 {
5551 struct fas_cmd *sp = fas->f_current_sp;
5552 int msgout = 0;
5553 uchar_t msgin = fas->f_last_msgin = fas->f_imsgarea[0];
5554 int tgt = Tgt(sp);
5555
5556 EPRINTF("fas_onebyte_msg\n");
5557
5558 if (msgin & MSG_IDENTIFY) {
5559 /*
5560 * How did we get here? We should only see identify
5561 * messages on a reconnection, but we'll handle this
5562 * fine here (just in case we get this) as long as
5563 * we believe that this is a valid identify message.
5564 *
5565 * For this to be a valid incoming message,
5566 * bits 6-4 must must be zero. Also, the
5567 * bit that says that I'm an initiator and
5568 * can support disconnection cannot possibly
5569 * be set here.
5570 */
5571
5572 char garbled = ((msgin & (BAD_IDENTIFY|INI_CAN_DISCON)) != 0);
5573
5574 fas_log(fas, CE_WARN, "%s message 0x%x from Target %d",
5575 garbled ? "Garbled" : "Identify", msgin, tgt);
5576
5577 if (garbled) {
5578 /*
5579 * If it's a garbled message,
5580 * try and tell the target...
5581 */
5582 msgout = MSG_INITIATOR_ERROR;
5583 } else {
5584 New_state(fas, ACTS_UNKNOWN);
5585 }
5586 return (msgout);
5587
5588 } else if (IS_2BYTE_MSG(msgin) || IS_EXTENDED_MSG(msgin)) {
5589 fas->f_imsglen = 2;
5590 New_state(fas, ACTS_MSG_IN_MORE);
5591 return (0);
5592 }
5593
5594 New_state(fas, ACTS_UNKNOWN);
5595
5596 switch (msgin) {
5597 case MSG_DISCONNECT:
5598 /*
5599 * If we 'cannot' disconnect- reject this message.
5600 * Note that we only key off of the pkt_flags here-
5601 * the FLAG_NODISCON was set in fas_accept_pkt() if
5602 * no disconnect was enabled in scsi_options
5603 */
5604 if (sp->cmd_pkt_flags & FLAG_NODISCON) {
5605 msgout = MSG_REJECT;
5606 break;
5607 }
5608 /* FALLTHROUGH */
5609 case MSG_COMMAND_COMPLETE:
5610 fas->f_state = ACTS_CLEARING;
5611 break;
5612
5613 case MSG_NOP:
5614 break;
5615
5616 /* XXX Make it a MSG_REJECT handler */
5617 case MSG_REJECT:
5618 {
5619 uchar_t reason = 0;
5620 uchar_t lastmsg = fas->f_last_msgout;
5621 /*
5622 * The target is rejecting the last message we sent.
5623 *
5624 * If the last message we attempted to send out was an
5625 * extended message, we were trying to negotiate sync
5626 * xfers- and we're okay.
5627 *
5628 * Otherwise, a target has rejected a message that
5629 * it should have handled. We will abort the operation
5630 * in progress and set the pkt_reason value here to
5631 * show why we have completed. The process of aborting
5632 * may be via a message or may be via a bus reset (as
5633 * a last resort).
5634 */
5635 msgout = (TAGGED(tgt)? MSG_ABORT_TAG : MSG_ABORT);
5636
5637 switch (lastmsg) {
5638 case MSG_EXTENDED:
5639 if (fas->f_wdtr_sent) {
5640 /*
5641 * Disable wide, Target rejected
5642 * out WDTR message
5643 */
5644 fas_set_wide_conf3(fas, tgt, 0);
5645 fas->f_nowide |= (1<<tgt);
5646 fas->f_wdtr_sent = 0;
5647 /*
5648 * we still want to negotiate sync
5649 */
5650 if ((fas->f_nosync & (1<<tgt)) == 0) {
5651 fas_assert_atn(fas);
5652 fas_make_sdtr(fas, 0, tgt);
5653 }
5654 } else if (fas->f_sdtr_sent) {
5655 fas_reg_cmd_write(fas, CMD_CLR_ATN);
5656 fas_revert_to_async(fas, tgt);
5657 fas->f_nosync |= (1<<tgt);
5658 fas->f_sdtr_sent = 0;
5659 }
5660 msgout = 0;
5661 break;
5662 case MSG_NOP:
5663 reason = CMD_NOP_FAIL;
5664 break;
5665 case MSG_INITIATOR_ERROR:
5666 reason = CMD_IDE_FAIL;
5667 break;
5668 case MSG_MSG_PARITY:
5669 reason = CMD_PER_FAIL;
5670 break;
5671 case MSG_REJECT:
5672 reason = CMD_REJECT_FAIL;
5673 break;
5674 /* XXX - abort not good, queue full handling or drain (?) */
5675 case MSG_SIMPLE_QTAG:
5676 case MSG_ORDERED_QTAG:
5677 case MSG_HEAD_QTAG:
5678 msgout = MSG_ABORT;
5679 reason = CMD_TAG_REJECT;
5680 break;
5681 case MSG_DEVICE_RESET:
5682 reason = CMD_BDR_FAIL;
5683 msgout = -ACTION_ABORT_CURCMD;
5684 break;
5685 case MSG_ABORT:
5686 case MSG_ABORT_TAG:
5687 /*
5688 * If an RESET/ABORT OPERATION message is rejected
5689 * it is time to yank the chain on the bus...
5690 */
5691 reason = CMD_ABORT_FAIL;
5692 msgout = -ACTION_ABORT_CURCMD;
5693 break;
5694 default:
5695 if (IS_IDENTIFY_MSG(lastmsg)) {
5696 if (TAGGED(tgt)) {
5697 /*
5698 * this often happens when the
5699 * target rejected our tag
5700 */
5701 reason = CMD_TAG_REJECT;
5702 } else {
5703 reason = CMD_ID_FAIL;
5704 }
5705 } else {
5706 reason = CMD_TRAN_ERR;
5707 msgout = -ACTION_ABORT_CURCMD;
5708 }
5709
5710 break;
5711 }
5712
5713 if (msgout) {
5714 fas_log(fas, CE_WARN,
5715 "Target %d rejects our message '%s'",
5716 tgt, scsi_mname(lastmsg));
5717 fas_set_pkt_reason(fas, sp, reason, 0);
5718 }
5719
5720 break;
5721 }
5722 case MSG_RESTORE_PTRS:
5723 sp->cmd_cdbp = sp->cmd_pkt->pkt_cdbp;
5724 if (sp->cmd_data_count != sp->cmd_saved_data_count) {
5725 if (fas_restore_pointers(fas, sp)) {
5726 msgout = -ACTION_ABORT_CURCMD;
5727 } else if ((sp->cmd_pkt->pkt_reason & CMD_TRAN_ERR) &&
5728 (sp->cmd_pkt->pkt_statistics & STAT_PERR) &&
5729 (sp->cmd_cur_win == 0) &&
5730 (sp->cmd_data_count == 0)) {
5731 sp->cmd_pkt->pkt_reason &= ~CMD_TRAN_ERR;
5732 }
5733 }
5734 break;
5735
5736 case MSG_SAVE_DATA_PTR:
5737 sp->cmd_saved_data_count = sp->cmd_data_count;
5738 sp->cmd_saved_win = sp->cmd_cur_win;
5739 sp->cmd_saved_cur_addr = sp->cmd_cur_addr;
5740 break;
5741
5742 /* These don't make sense for us, and */
5743 /* will be rejected */
5744 /* case MSG_INITIATOR_ERROR */
5745 /* case MSG_ABORT */
5746 /* case MSG_MSG_PARITY */
5747 /* case MSG_DEVICE_RESET */
5748 default:
5749 msgout = MSG_REJECT;
5750 fas_log(fas, CE_WARN,
5751 "Rejecting message '%s' from Target %d",
5752 scsi_mname(msgin), tgt);
5753 break;
5754 }
5755
5756 EPRINTF1("Message in: %s\n", scsi_mname(msgin));
5757
5758 return (msgout);
5759 }
5760
5761 /*
5762 * phase handlers that are rarely used
5763 */
5764 static int
fas_handle_cmd_start(struct fas * fas)5765 fas_handle_cmd_start(struct fas *fas)
5766 {
5767 struct fas_cmd *sp = fas->f_current_sp;
5768 volatile uchar_t *tp = fas->f_cmdarea;
5769 int i;
5770 int amt = sp->cmd_cdblen;
5771
5772 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_START_START,
5773 "fas_handle_cmd_start_start");
5774 EPRINTF("fas_handle_cmd: send cmd\n");
5775
5776 for (i = 0; i < amt; i++) {
5777 *tp++ = sp->cmd_cdbp[i];
5778 }
5779 fas_reg_cmd_write(fas, CMD_FLUSH);
5780
5781 FAS_DMA_READ(fas, amt, fas->f_dmacookie.dmac_address, amt,
5782 CMD_TRAN_INFO|CMD_DMA);
5783 fas->f_lastcount = amt;
5784
5785 New_state(fas, ACTS_CMD_DONE);
5786
5787 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_START_END,
5788 "fas_handle_cmd_start_end");
5789 return (ACTION_RETURN);
5790 }
5791
5792 static int
fas_handle_cmd_done(struct fas * fas)5793 fas_handle_cmd_done(struct fas *fas)
5794 {
5795 struct fas_cmd *sp = fas->f_current_sp;
5796 uchar_t intr = fas->f_intr;
5797 volatile struct dma *dmar = fas->f_dma;
5798
5799 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_DONE_START,
5800 "fas_handle_cmd_done_start");
5801 EPRINTF("fas_handle_cmd_done\n");
5802
5803 /*
5804 * We should have gotten a BUS SERVICE interrupt.
5805 * If it isn't that, and it isn't a DISCONNECT
5806 * interrupt, we have a "cannot happen" situation.
5807 */
5808 if ((intr & FAS_INT_BUS) == 0) {
5809 if ((intr & FAS_INT_DISCON) == 0) {
5810 fas_printstate(fas, "cmd transmission error");
5811 TRACE_0(TR_FAC_SCSI_FAS,
5812 TR_FAS_HANDLE_CMD_DONE_ABORT1_END,
5813 "fas_handle_cmd_done_end (abort1)");
5814 return (ACTION_ABORT_CURCMD);
5815 }
5816 } else {
5817 sp->cmd_pkt->pkt_state |= STATE_SENT_CMD;
5818 }
5819
5820 fas->f_dma_csr = fas_dma_reg_read(fas, &dmar->dma_csr);
5821 FAS_FLUSH_DMA(fas);
5822
5823 New_state(fas, ACTS_UNKNOWN);
5824 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_CMD_DONE_END,
5825 "fas_handle_cmd_done_end");
5826 return (fas_handle_unknown(fas));
5827 }
5828
5829 /*
5830 * Begin to send a message out
5831 */
5832 static int
fas_handle_msg_out_start(struct fas * fas)5833 fas_handle_msg_out_start(struct fas *fas)
5834 {
5835 struct fas_cmd *sp = fas->f_current_sp;
5836 uchar_t *msgout = fas->f_cur_msgout;
5837 uchar_t amt = fas->f_omsglen;
5838
5839 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_START,
5840 "fas_handle_msg_out_start");
5841 EPRINTF("fas_handle_msg_out_start\n");
5842
5843 /*
5844 * Check to make *sure* that we are really
5845 * in MESSAGE OUT phase. If the last state
5846 * was ACTS_MSG_OUT_DONE, then we are trying
5847 * to resend a message that the target stated
5848 * had a parity error in it.
5849 *
5850 * If this is the case, and mark completion reason as CMD_NOMSGOUT.
5851 * XXX: Right now, we just *drive* on. Should we abort the command?
5852 */
5853 if ((fas->f_stat & FAS_PHASE_MASK) != FAS_PHASE_MSG_OUT &&
5854 fas->f_laststate == ACTS_MSG_OUT_DONE) {
5855 fas_log(fas, CE_WARN,
5856 "Target %d refused message resend", Tgt(sp));
5857 fas_set_pkt_reason(fas, sp, CMD_NOMSGOUT, 0);
5858 New_state(fas, ACTS_UNKNOWN);
5859 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_PHASEMANAGE_END,
5860 "fas_handle_msg_out_end (ACTION_PHASEMANAGE)");
5861 return (ACTION_PHASEMANAGE);
5862 }
5863
5864 /*
5865 * Clean the fifo.
5866 */
5867 fas_reg_cmd_write(fas, CMD_FLUSH);
5868
5869 if (amt == 0) {
5870 /*
5871 * no msg to send
5872 */
5873 *msgout = MSG_NOP;
5874 amt = fas->f_omsglen = 1;
5875 }
5876
5877 /*
5878 * If msg only 1 byte, just dump it in the fifo and go. For
5879 * multi-byte msgs, dma them to save time. If we have no
5880 * msg to send and we're in msg out phase, send a NOP.
5881 */
5882 fas->f_last_msgout = *msgout;
5883
5884 /*
5885 * There is a bug in the fas366 that occasionaly
5886 * deasserts the ATN signal prematurely when we send
5887 * the sync/wide negotiation bytes out using DMA. The
5888 * workaround here is to send the negotiation bytes out
5889 * using PIO
5890 */
5891 fas_write_fifo(fas, msgout, fas->f_omsglen, 1);
5892 fas_reg_cmd_write(fas, CMD_TRAN_INFO);
5893
5894 EPRINTF2("amt=%x, last_msgout=%x\n", amt, fas->f_last_msgout);
5895
5896 New_state(fas, ACTS_MSG_OUT_DONE);
5897 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_END,
5898 "fas_handle_msg_out_end");
5899 return (ACTION_RETURN);
5900 }
5901
5902 static int
fas_handle_msg_out_done(struct fas * fas)5903 fas_handle_msg_out_done(struct fas *fas)
5904 {
5905 struct fas_cmd *sp = fas->f_current_sp;
5906 uchar_t msgout, phase;
5907 int target = Tgt(sp);
5908 int amt = fas->f_omsglen;
5909 int action;
5910
5911 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_DONE_START,
5912 "fas_handle_msg_out_done_start");
5913 msgout = fas->f_cur_msgout[0];
5914 if ((msgout == MSG_HEAD_QTAG) || (msgout == MSG_SIMPLE_QTAG)) {
5915 msgout = fas->f_cur_msgout[2];
5916 }
5917 EPRINTF4("msgout: %x %x %x, last_msgout=%x\n",
5918 fas->f_cur_msgout[0], fas->f_cur_msgout[1],
5919 fas->f_cur_msgout[2], fas->f_last_msgout);
5920
5921 EPRINTF1("fas_handle_msgout_done: msgout=%x\n", msgout);
5922
5923 /*
5924 * flush fifo, just in case some bytes were not sent
5925 */
5926 fas_reg_cmd_write(fas, CMD_FLUSH);
5927
5928 /*
5929 * If the FAS disconnected, then the message we sent caused
5930 * the target to decide to drop BSY* and clear the bus.
5931 */
5932 if (fas->f_intr == FAS_INT_DISCON) {
5933 if (msgout == MSG_DEVICE_RESET || msgout == MSG_ABORT ||
5934 msgout == MSG_ABORT_TAG) {
5935 /*
5936 * If we sent a device reset msg, then we need to do
5937 * a synch negotiate again unless we have already
5938 * inhibited synch.
5939 */
5940 if (msgout == MSG_ABORT || msgout == MSG_ABORT_TAG) {
5941 fas->f_abort_msg_sent++;
5942 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
5943 fas_set_pkt_reason(fas, sp,
5944 CMD_ABORTED, STAT_ABORTED);
5945 }
5946 } else if (msgout == MSG_DEVICE_RESET) {
5947 fas->f_reset_msg_sent++;
5948 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
5949 fas_set_pkt_reason(fas, sp,
5950 CMD_RESET, STAT_DEV_RESET);
5951 }
5952 fas_force_renegotiation(fas, Tgt(sp));
5953 }
5954 EPRINTF2("Successful %s message to target %d\n",
5955 scsi_mname(msgout), target);
5956
5957 if (sp->cmd_flags & CFLAG_CMDPROXY) {
5958 sp->cmd_cdb[FAS_PROXY_RESULT] = TRUE;
5959 }
5960 TRACE_0(TR_FAC_SCSI_FAS,
5961 TR_FAS_HANDLE_MSG_OUT_DONE_FINISH_END,
5962 "fas_handle_msg_out_done_end (ACTION_FINISH)");
5963 return (ACTION_FINISH);
5964 }
5965 /*
5966 * If the target dropped busy on any other message, it
5967 * wasn't expected. We will let the code in fas_phasemanage()
5968 * handle this unexpected bus free event.
5969 */
5970 goto out;
5971 }
5972
5973 /*
5974 * What phase have we transitioned to?
5975 */
5976 phase = fas->f_stat & FAS_PHASE_MASK;
5977
5978 /*
5979 * If we finish sending a message out, and we are
5980 * still in message out phase, then the target has
5981 * detected one or more parity errors in the message
5982 * we just sent and it is asking us to resend the
5983 * previous message.
5984 */
5985 if ((fas->f_intr & FAS_INT_BUS) && phase == FAS_PHASE_MSG_OUT) {
5986 /*
5987 * As per SCSI-2 specification, if the message to
5988 * be re-sent is greater than one byte, then we
5989 * have to set ATN*.
5990 */
5991 if (amt > 1) {
5992 fas_assert_atn(fas);
5993 }
5994 fas_log(fas, CE_WARN,
5995 "SCSI bus MESSAGE OUT phase parity error");
5996 sp->cmd_pkt->pkt_statistics |= STAT_PERR;
5997 New_state(fas, ACTS_MSG_OUT);
5998 TRACE_0(TR_FAC_SCSI_FAS,
5999 TR_FAS_HANDLE_MSG_OUT_DONE_PHASEMANAGE_END,
6000 "fas_handle_msg_out_done_end (ACTION_PHASEMANAGE)");
6001 return (ACTION_PHASEMANAGE);
6002 }
6003
6004
6005 out:
6006 fas->f_last_msgout = msgout;
6007 fas->f_omsglen = 0;
6008 New_state(fas, ACTS_UNKNOWN);
6009 action = fas_handle_unknown(fas);
6010 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_HANDLE_MSG_OUT_DONE_END,
6011 "fas_handle_msg_out_done_end");
6012 return (action);
6013 }
6014
6015 static int
fas_twobyte_msg(struct fas * fas)6016 fas_twobyte_msg(struct fas *fas)
6017 {
6018 struct fas_cmd *sp = fas->f_current_sp;
6019
6020 if ((fas->f_imsgarea[0] == MSG_IGNORE_WIDE_RESID) &&
6021 (fas->f_imsgarea[1] == 1)) {
6022 int xfer_amt;
6023
6024 /*
6025 * Knock off one byte if there
6026 * is a last transfer and is even number of bytes
6027 */
6028 xfer_amt = sp->cmd_data_count - sp->cmd_saved_data_count;
6029 if (xfer_amt && (!(xfer_amt & 1))) {
6030 ASSERT(sp->cmd_data_count > 0);
6031 sp->cmd_data_count--;
6032 sp->cmd_cur_addr--;
6033 }
6034 IPRINTF1("ignore wide resid %d\n", fas->f_imsgarea[1]);
6035 New_state(fas, ACTS_UNKNOWN);
6036 return (0);
6037 }
6038
6039 fas_log(fas, CE_WARN,
6040 "Two byte message '%s' 0x%x rejected",
6041 scsi_mname(fas->f_imsgarea[0]), fas->f_imsgarea[1]);
6042 return (MSG_REJECT);
6043 }
6044
6045 /*
6046 * handle receiving extended messages
6047 */
6048 static int
fas_multibyte_msg(struct fas * fas)6049 fas_multibyte_msg(struct fas *fas)
6050 {
6051 #ifdef FASDEBUG
6052 static char *mbs =
6053 "Target %d now Synchronous at %d.%d MB/s max transmit rate\n";
6054 static char *mbs1 =
6055 "Target %d now Synchronous at %d.0%d MB/s max transmit rate\n";
6056 static char *mbs2 =
6057 "Target %d now Synchronous at %d.00%d MB/s max transmit rate\n";
6058 #endif
6059 struct fas_cmd *sp = fas->f_current_sp;
6060 volatile struct fasreg *fasreg = fas->f_reg;
6061 uchar_t emsg = fas->f_imsgarea[2];
6062 int tgt = Tgt(sp);
6063 int msgout = 0;
6064
6065 EPRINTF("fas_multibyte_msg:\n");
6066
6067 if (emsg == MSG_SYNCHRONOUS) {
6068 uint_t period, offset, regval;
6069 uint_t minsync, maxsync, clockval;
6070 uint_t xfer_freq, xfer_div, xfer_mod, xfer_rate;
6071
6072 period = fas->f_imsgarea[3] & 0xff;
6073 offset = fas->f_imsgarea[4] & 0xff;
6074 minsync = MIN_SYNC_PERIOD(fas);
6075 maxsync = MAX_SYNC_PERIOD(fas);
6076 DPRINTF5("sync msg received: %x %x %x %x %x\n",
6077 fas->f_imsgarea[0], fas->f_imsgarea[1],
6078 fas->f_imsgarea[2], fas->f_imsgarea[3],
6079 fas->f_imsgarea[4]);
6080 DPRINTF3("received period %d offset %d from tgt %d\n",
6081 period, offset, tgt);
6082 DPRINTF3("calculated minsync %d, maxsync %d for tgt %d\n",
6083 minsync, maxsync, tgt);
6084 DPRINTF2("sync period %d, neg period %d\n",
6085 fas->f_sync_period[tgt], fas->f_neg_period[tgt]);
6086
6087 if ((++(fas->f_sdtr_sent)) & 1) {
6088 /*
6089 * In cases where the target negotiates synchronous
6090 * mode before we do, and we either have sync mode
6091 * disabled, or this target is known to be a weak
6092 * signal target, we send back a message indicating
6093 * a desire to stay in asynchronous mode (the SCSI-2
6094 * spec states that if we have synchronous capability
6095 * then we cannot reject a SYNCHRONOUS DATA TRANSFER
6096 * REQUEST message).
6097 */
6098 IPRINTF1("SYNC negotiation initiated by target %d\n",
6099 tgt);
6100
6101 msgout = MSG_EXTENDED;
6102
6103 period =
6104 period ? max(period, MIN_SYNC_PERIOD(fas)) : 0;
6105
6106 if (fas->f_backoff & (1<<tgt)) {
6107 period = period ?
6108 max(period, fas->f_neg_period[tgt]) : 0;
6109 }
6110 offset = min(offset, fas_default_offset);
6111 }
6112 xfer_freq = regval = 0;
6113
6114 /*
6115 * If the target's offset is bigger than ours,
6116 * the target has violated the scsi protocol.
6117 */
6118 if (offset > fas_default_offset) {
6119 period = offset = 0;
6120 msgout = MSG_REJECT;
6121 }
6122
6123 if (offset && (period > maxsync)) {
6124 /*
6125 * We cannot transmit data in synchronous
6126 * mode this slow, so convert to asynchronous
6127 * mode.
6128 */
6129 msgout = MSG_EXTENDED;
6130 period = offset = 0;
6131
6132 } else if (offset && (period < minsync)) {
6133 /*
6134 * If the target's period is less than ours,
6135 * the target has violated the scsi protocol.
6136 */
6137 period = offset = 0;
6138 msgout = MSG_REJECT;
6139
6140 } else if (offset) {
6141 /*
6142 * Conversion method for received PERIOD value
6143 * to the number of input clock ticks to the FAS.
6144 *
6145 * We adjust the input period value such that
6146 * we always will transmit data *not* faster
6147 * than the period value received.
6148 */
6149
6150 clockval = fas->f_clock_cycle / 1000;
6151 regval = (((period << 2) + clockval - 1) / clockval);
6152
6153 /*
6154 * correction if xfer rate <= 5MB/sec
6155 * XXX do we need this?
6156 */
6157 if (regval && (period >= FASTSCSI_THRESHOLD)) {
6158 regval--;
6159 }
6160 }
6161
6162 fas->f_offset[tgt] = offset;
6163 fas->f_neg_period[tgt] = period;
6164
6165 /*
6166 * Is is now safe to produce a responce to a target
6167 * initiated sdtr. period and offset have been checked.
6168 */
6169 if (msgout == MSG_EXTENDED) {
6170 fas_make_sdtr(fas, 0, tgt);
6171 period = fas->f_neg_period[tgt];
6172 offset = (fas->f_offset[tgt] & 0xf);
6173 }
6174
6175 if (offset) {
6176 fas->f_sync_period[tgt] = regval & SYNC_PERIOD_MASK;
6177 fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_period,
6178 fas->f_sync_period[tgt]);
6179
6180 fas->f_offset[tgt] = offset | fas->f_req_ack_delay;
6181 fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_offset,
6182 fas->f_offset[tgt]);
6183
6184 /*
6185 * if transferring > 5 MB/sec then enable
6186 * fastscsi in conf3
6187 */
6188 if (period < FASTSCSI_THRESHOLD) {
6189 fas->f_fasconf3[tgt] |= FAS_CONF3_FASTSCSI;
6190 } else {
6191 fas->f_fasconf3[tgt] &= ~FAS_CONF3_FASTSCSI;
6192 }
6193
6194 fas_reg_write(fas, (uchar_t *)&fasreg->fas_conf3,
6195 fas->f_fasconf3[tgt]);
6196
6197 DPRINTF4("period %d (%d), offset %d to tgt %d\n",
6198 period,
6199 fas->f_sync_period[tgt] & SYNC_PERIOD_MASK,
6200 fas->f_offset[tgt] & 0xf, tgt);
6201 DPRINTF1("req/ack delay = %x\n", fas->f_req_ack_delay);
6202 DPRINTF1("conf3 = %x\n", fas->f_fasconf3[tgt]);
6203 #ifdef FASDEBUG
6204 /*
6205 * Convert input clock cycle per
6206 * byte to nanoseconds per byte.
6207 * (ns/b), and convert that to
6208 * k-bytes/second.
6209 */
6210 xfer_freq = FAS_SYNC_KBPS((regval *
6211 fas->f_clock_cycle) / 1000);
6212 xfer_rate = ((fas->f_nowide & (1<<tgt))? 1 : 2) *
6213 xfer_freq;
6214 xfer_div = xfer_rate / 1000;
6215 xfer_mod = xfer_rate % 1000;
6216
6217
6218 if (xfer_mod > 99) {
6219 IPRINTF3(mbs, tgt, xfer_div, xfer_mod);
6220 } else if (xfer_mod > 9) {
6221 IPRINTF3(mbs1, tgt, xfer_div, xfer_mod);
6222 } else {
6223 IPRINTF3(mbs2, tgt, xfer_div, xfer_mod);
6224 }
6225 #endif
6226 fas->f_sync_enabled |= (1<<tgt);
6227
6228 } else {
6229 /*
6230 * We are converting back to async mode.
6231 */
6232 fas_revert_to_async(fas, tgt);
6233 }
6234
6235 /*
6236 * If this target violated the scsi spec, reject the
6237 * sdtr msg and don't negotiate sdtr again.
6238 */
6239 if (msgout == MSG_REJECT) {
6240 fas->f_nosync |= (1<<tgt);
6241 }
6242
6243 fas->f_props_update |= (1<<tgt);
6244
6245 } else if (emsg == MSG_WIDE_DATA_XFER) {
6246 uchar_t width = fas->f_imsgarea[3] & 0xff;
6247
6248 DPRINTF4("wide msg received: %x %x %x %x\n",
6249 fas->f_imsgarea[0], fas->f_imsgarea[1],
6250 fas->f_imsgarea[2], fas->f_imsgarea[3]);
6251
6252 /* always renegotiate sync after wide */
6253 msgout = MSG_EXTENDED;
6254
6255 if ((++(fas->f_wdtr_sent)) & 1) {
6256 IPRINTF1("Wide negotiation initiated by target %d\n",
6257 tgt);
6258 /*
6259 * allow wide neg even if the target driver hasn't
6260 * enabled wide yet.
6261 */
6262 fas->f_nowide &= ~(1<<tgt);
6263 fas_make_wdtr(fas, 0, tgt, width);
6264 IPRINTF1("sending wide sync %d back\n", width);
6265 /*
6266 * Let us go back to async mode(SCSI spec)
6267 * and depend on target to do sync
6268 * after wide negotiations.
6269 * If target does not do a sync neg and enters
6270 * async mode we will negotiate sync on next command
6271 */
6272 fas_revert_to_async(fas, tgt);
6273 fas->f_sync_known &= ~(1<<tgt);
6274 } else {
6275 /*
6276 * renegotiate sync after wide
6277 */
6278 fas_set_wide_conf3(fas, tgt, width);
6279 ASSERT(width <= 1);
6280 fas->f_wdtr_sent = 0;
6281 if ((fas->f_nosync & (1<<tgt)) == 0) {
6282 fas_make_sdtr(fas, 0, tgt);
6283 } else {
6284 msgout = 0;
6285 }
6286 }
6287
6288 fas->f_props_update |= (1<<tgt);
6289
6290 } else if (emsg == MSG_MODIFY_DATA_PTR) {
6291 msgout = MSG_REJECT;
6292 } else {
6293 fas_log(fas, CE_WARN,
6294 "Rejecting message %s 0x%x from Target %d",
6295 scsi_mname(MSG_EXTENDED), emsg, tgt);
6296 msgout = MSG_REJECT;
6297 }
6298 out:
6299 New_state(fas, ACTS_UNKNOWN);
6300 return (msgout);
6301 }
6302
6303 /*
6304 * Back off sync negotiation
6305 * and got to async mode
6306 */
6307 static void
fas_revert_to_async(struct fas * fas,int tgt)6308 fas_revert_to_async(struct fas *fas, int tgt)
6309 {
6310 volatile struct fasreg *fasreg = fas->f_reg;
6311
6312 fas->f_sync_period[tgt] = 0;
6313 fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_period, 0);
6314 fas->f_offset[tgt] = 0;
6315 fas_reg_write(fas, (uchar_t *)&fasreg->fas_sync_offset, 0);
6316 fas->f_fasconf3[tgt] &= ~FAS_CONF3_FASTSCSI;
6317 fas_reg_write(fas, &fasreg->fas_conf3, fas->f_fasconf3[tgt]);
6318 fas->f_sync_enabled &= ~(1<<tgt);
6319 }
6320
6321 /*
6322 * handle an unexpected selection attempt
6323 * XXX look for better way: msg reject, drop off the bus
6324 */
6325 static int
fas_handle_selection(struct fas * fas)6326 fas_handle_selection(struct fas *fas)
6327 {
6328 fas_reg_cmd_write(fas, CMD_DISCONNECT);
6329 fas_reg_cmd_write(fas, CMD_FLUSH);
6330 fas_reg_cmd_write(fas, CMD_EN_RESEL);
6331 return (ACTION_RETURN);
6332 }
6333
6334 /*
6335 * dma window handling
6336 */
6337 static int
fas_restore_pointers(struct fas * fas,struct fas_cmd * sp)6338 fas_restore_pointers(struct fas *fas, struct fas_cmd *sp)
6339 {
6340 if (sp->cmd_data_count != sp->cmd_saved_data_count) {
6341 sp->cmd_data_count = sp->cmd_saved_data_count;
6342 sp->cmd_cur_addr = sp->cmd_saved_cur_addr;
6343
6344 if (sp->cmd_cur_win != sp->cmd_saved_win) {
6345 sp->cmd_cur_win = sp->cmd_saved_win;
6346 if (fas_set_new_window(fas, sp)) {
6347 return (-1);
6348 }
6349 }
6350 DPRINTF1("curaddr=%x\n", sp->cmd_cur_addr);
6351 }
6352 return (0);
6353 }
6354
6355 static int
fas_set_new_window(struct fas * fas,struct fas_cmd * sp)6356 fas_set_new_window(struct fas *fas, struct fas_cmd *sp)
6357 {
6358 off_t offset;
6359 size_t len;
6360 uint_t count;
6361
6362 if (ddi_dma_getwin(sp->cmd_dmahandle, sp->cmd_cur_win,
6363 &offset, &len, &sp->cmd_dmacookie, &count) != DDI_SUCCESS) {
6364 return (-1);
6365 }
6366
6367 DPRINTF4("new window %x: off=%lx, len=%lx, count=%x\n",
6368 sp->cmd_cur_win, offset, len, count);
6369
6370 ASSERT(count == 1);
6371 return (0);
6372 }
6373
6374 static int
fas_next_window(struct fas * fas,struct fas_cmd * sp,uint64_t end)6375 fas_next_window(struct fas *fas, struct fas_cmd *sp, uint64_t end)
6376 {
6377
6378 /* are there more windows? */
6379 if (sp->cmd_nwin == 0) {
6380 uint_t nwin = 0;
6381 (void) ddi_dma_numwin(sp->cmd_dmahandle, &nwin);
6382 sp->cmd_nwin = (uchar_t)nwin;
6383 }
6384
6385 DPRINTF5(
6386 "cmd_data_count=%x, dmacount=%x, curaddr=%x, end=%lx, nwin=%x\n",
6387 sp->cmd_data_count, sp->cmd_dmacount, sp->cmd_cur_addr, end,
6388 sp->cmd_nwin);
6389
6390 if (sp->cmd_cur_win < sp->cmd_nwin) {
6391 sp->cmd_cur_win++;
6392 if (fas_set_new_window(fas, sp)) {
6393 fas_printstate(fas, "cannot set new window");
6394 sp->cmd_cur_win--;
6395 return (-1);
6396 }
6397 /*
6398 * if there are no more windows, we have a data overrun condition
6399 */
6400 } else {
6401 int slot = sp->cmd_slot;
6402
6403 fas_printstate(fas, "data transfer overrun");
6404 fas_set_pkt_reason(fas, sp, CMD_DATA_OVR, 0);
6405
6406 /*
6407 * if we get data transfer overruns, assume we have
6408 * a weak scsi bus. Note that this won't catch consistent
6409 * underruns or other noise related syndromes.
6410 */
6411 fas_sync_wide_backoff(fas, sp, slot);
6412 return (-1);
6413 }
6414 sp->cmd_cur_addr = sp->cmd_dmacookie.dmac_address;
6415 DPRINTF1("cur_addr=%x\n", sp->cmd_cur_addr);
6416 return (0);
6417 }
6418
6419 /*
6420 * dma error handler
6421 */
6422 static int
fas_check_dma_error(struct fas * fas)6423 fas_check_dma_error(struct fas *fas)
6424 {
6425 /*
6426 * was there a dma error that caused fas_intr_svc() to be called?
6427 */
6428 if (fas->f_dma->dma_csr & DMA_ERRPEND) {
6429 /*
6430 * It would be desirable to set the ATN* line and attempt to
6431 * do the whole schmear of INITIATOR DETECTED ERROR here,
6432 * but that is too hard to do at present.
6433 */
6434 fas_log(fas, CE_WARN, "Unrecoverable DMA error");
6435 fas_printstate(fas, "dma error");
6436 fas_set_pkt_reason(fas, fas->f_current_sp, CMD_TRAN_ERR, 0);
6437 return (-1);
6438 }
6439 return (0);
6440 }
6441
6442 /*
6443 * check for gross error or spurious interrupt
6444 */
6445 static int
fas_handle_gross_err(struct fas * fas)6446 fas_handle_gross_err(struct fas *fas)
6447 {
6448 volatile struct fasreg *fasreg = fas->f_reg;
6449
6450 fas_log(fas, CE_WARN,
6451 "gross error in fas status (%x)", fas->f_stat);
6452
6453 IPRINTF5("fas_cmd=%x, stat=%x, intr=%x, step=%x, fifoflag=%x\n",
6454 fasreg->fas_cmd, fas->f_stat, fas->f_intr, fasreg->fas_step,
6455 fasreg->fas_fifo_flag);
6456
6457 fas_set_pkt_reason(fas, fas->f_current_sp, CMD_TRAN_ERR, 0);
6458
6459 fas_internal_reset(fas, FAS_RESET_FAS);
6460 return (ACTION_RESET);
6461 }
6462
6463
6464 /*
6465 * handle illegal cmd interrupt or (external) bus reset cleanup
6466 */
6467 static int
fas_illegal_cmd_or_bus_reset(struct fas * fas)6468 fas_illegal_cmd_or_bus_reset(struct fas *fas)
6469 {
6470 /*
6471 * If we detect a SCSI reset, we blow away the current
6472 * command (if there is one) and all disconnected commands
6473 * because we now don't know the state of them at all.
6474 */
6475 ASSERT(fas->f_intr & (FAS_INT_ILLEGAL | FAS_INT_RESET));
6476
6477 if (fas->f_intr & FAS_INT_RESET) {
6478 return (ACTION_FINRST);
6479 }
6480
6481 /*
6482 * Illegal cmd to fas:
6483 * This should not happen. The one situation where
6484 * we can get an ILLEGAL COMMAND interrupt is due to
6485 * a bug in the FAS366 during reselection which we
6486 * should be handling in fas_reconnect().
6487 */
6488 if (fas->f_intr & FAS_INT_ILLEGAL) {
6489 IPRINTF1("lastcmd=%x\n", fas->f_reg->fas_cmd);
6490 fas_printstate(fas, "ILLEGAL bit set");
6491 return (ACTION_RESET);
6492 }
6493 /*NOTREACHED*/
6494 return (ACTION_RETURN);
6495 }
6496
6497 /*
6498 * set throttles for all luns of this target
6499 */
6500 static void
fas_set_throttles(struct fas * fas,int slot,int n,int what)6501 fas_set_throttles(struct fas *fas, int slot, int n, int what)
6502 {
6503 int i;
6504
6505 /*
6506 * if the bus is draining/quiesced, no changes to the throttles
6507 * are allowed. Not allowing change of throttles during draining
6508 * limits error recovery but will reduce draining time
6509 *
6510 * all throttles should have been set to HOLD_THROTTLE
6511 */
6512 if (fas->f_softstate & (FAS_SS_QUIESCED | FAS_SS_DRAINING)) {
6513 return;
6514 }
6515
6516 ASSERT((n == 1) || (n == N_SLOTS) || (n == NLUNS_PER_TARGET));
6517 ASSERT((slot + n) <= N_SLOTS);
6518 if (n == NLUNS_PER_TARGET) {
6519 slot &= ~(NLUNS_PER_TARGET - 1);
6520 }
6521
6522 for (i = slot; i < (slot + n); i++) {
6523 if (what == HOLD_THROTTLE) {
6524 fas->f_throttle[i] = HOLD_THROTTLE;
6525 } else if ((fas->f_reset_delay[i/NLUNS_PER_TARGET]) == 0) {
6526 if (what == MAX_THROTTLE) {
6527 int tshift = 1 << (i/NLUNS_PER_TARGET);
6528 fas->f_throttle[i] = (short)
6529 ((fas->f_notag & tshift)? 1 : what);
6530 } else {
6531 fas->f_throttle[i] = what;
6532 }
6533 }
6534 }
6535 }
6536
6537 static void
fas_set_all_lun_throttles(struct fas * fas,int slot,int what)6538 fas_set_all_lun_throttles(struct fas *fas, int slot, int what)
6539 {
6540 /*
6541 * fas_set_throttle will adjust slot to starting at LUN 0
6542 */
6543 fas_set_throttles(fas, slot, NLUNS_PER_TARGET, what);
6544 }
6545
6546 static void
fas_full_throttle(struct fas * fas,int slot)6547 fas_full_throttle(struct fas *fas, int slot)
6548 {
6549 fas_set_throttles(fas, slot, 1, MAX_THROTTLE);
6550 }
6551
6552 /*
6553 * run a polled cmd
6554 */
6555 static void
fas_runpoll(struct fas * fas,short slot,struct fas_cmd * sp)6556 fas_runpoll(struct fas *fas, short slot, struct fas_cmd *sp)
6557 {
6558 int limit, i, n;
6559 int timeout = 0;
6560
6561 DPRINTF4("runpoll: slot=%x, cmd=%x, current_sp=0x%p, tcmds=%x\n",
6562 slot, *((uchar_t *)sp->cmd_pkt->pkt_cdbp),
6563 (void *)fas->f_current_sp, fas->f_tcmds[slot]);
6564
6565 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RUNPOLL_START, "fas_runpoll_start");
6566
6567 /*
6568 * wait for cmd to complete
6569 * don't start new cmds so set throttles to HOLD_THROTTLE
6570 */
6571 while ((sp->cmd_flags & CFLAG_COMPLETED) == 0) {
6572 if (!(sp->cmd_flags & CFLAG_CMDPROXY)) {
6573 fas_set_all_lun_throttles(fas, slot, HOLD_THROTTLE);
6574 }
6575 if ((fas->f_state != STATE_FREE) || INTPENDING(fas)) {
6576 if (fas_dopoll(fas, POLL_TIMEOUT) <= 0) {
6577 IPRINTF("runpoll: timeout on draining\n");
6578 goto bad;
6579 }
6580 }
6581
6582 ASSERT(fas->f_state == STATE_FREE);
6583 ASSERT(fas->f_current_sp == NULL);
6584
6585 /*
6586 * if this is not a proxy cmd, don't start the cmd
6587 * without draining the active cmd(s)
6588 * for proxy cmds, we zap the active cmd and assume
6589 * that the caller will take care of this
6590 * For tagged cmds, wait with submitting a non-tagged
6591 * cmd until the queue has been drained
6592 * If the cmd is a request sense, then draining won't
6593 * help since we are in contingence allegiance condition
6594 */
6595 if (!(sp->cmd_flags & CFLAG_CMDPROXY)) {
6596 uchar_t *cmdp = (uchar_t *)sp->cmd_pkt->pkt_cdbp;
6597
6598 if ((fas->f_tcmds[slot]) &&
6599 (NOTAG(Tgt(sp)) ||
6600 (((sp->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
6601 (*cmdp != SCMD_REQUEST_SENSE)))) {
6602 if (timeout < POLL_TIMEOUT) {
6603 timeout += 100;
6604 drv_usecwait(100);
6605 continue;
6606 } else {
6607 fas_log(fas, CE_WARN,
6608 "polled cmd failed (target busy)");
6609 goto cleanup;
6610 }
6611 }
6612 }
6613
6614 /*
6615 * If the draining of active commands killed the
6616 * the current polled command, we're done..
6617 */
6618 if (sp->cmd_flags & CFLAG_COMPLETED) {
6619 break;
6620 }
6621
6622 /*
6623 * ensure we are not accessing a target too quickly
6624 * after a reset. the throttles get set back later
6625 * by the reset delay watch; hopefully, we don't go
6626 * thru this loop more than once
6627 */
6628 if (fas->f_reset_delay[slot/NLUNS_PER_TARGET]) {
6629 IPRINTF1("reset delay set for slot %x\n", slot);
6630 drv_usecwait(fas->f_scsi_reset_delay * 1000);
6631 for (i = 0; i < NTARGETS_WIDE; i++) {
6632 if (fas->f_reset_delay[i]) {
6633 int s = i * NLUNS_PER_TARGET;
6634 int e = s + NLUNS_PER_TARGET;
6635 fas->f_reset_delay[i] = 0;
6636 for (; s < e; s++) {
6637 fas_full_throttle(fas, s);
6638 }
6639 }
6640 }
6641 }
6642
6643 /*
6644 * fas_startcmd() will return false if preempted
6645 * or draining
6646 */
6647 if (fas_startcmd(fas, sp) != TRUE) {
6648 IPRINTF("runpoll: cannot start new cmds\n");
6649 ASSERT(fas->f_current_sp != sp);
6650 continue;
6651 }
6652
6653 /*
6654 * We're now 'running' this command.
6655 *
6656 * fas_dopoll will always return when
6657 * fas->f_state is STATE_FREE, and
6658 */
6659 limit = sp->cmd_pkt->pkt_time * 1000000;
6660 if (limit == 0) {
6661 limit = POLL_TIMEOUT;
6662 }
6663
6664 /*
6665 * if the cmd disconnected, the first call to fas_dopoll
6666 * will return with bus free; we go thru the loop one more
6667 * time and wait limit usec for the target to reconnect
6668 */
6669 for (i = 0; i <= POLL_TIMEOUT; i += 100) {
6670
6671 if ((n = fas_dopoll(fas, limit)) <= 0) {
6672 IPRINTF("runpoll: timeout on polling\n");
6673 goto bad;
6674 }
6675
6676 /*
6677 * If a preemption occurred that caused this
6678 * command to actually not start, go around
6679 * the loop again. If CFLAG_COMPLETED is set, the
6680 * command completed
6681 */
6682 if ((sp->cmd_flags & CFLAG_COMPLETED) ||
6683 (sp->cmd_pkt->pkt_state == 0)) {
6684 break;
6685 }
6686
6687 /*
6688 * the bus may have gone free because the target
6689 * disconnected; go thru the loop again
6690 */
6691 ASSERT(fas->f_state == STATE_FREE);
6692 if (n == 0) {
6693 /*
6694 * bump i, we have waited limit usecs in
6695 * fas_dopoll
6696 */
6697 i += limit - 100;
6698 }
6699 }
6700
6701 if ((sp->cmd_flags & CFLAG_COMPLETED) == 0) {
6702
6703 if (i > POLL_TIMEOUT) {
6704 IPRINTF("polled timeout on disc. cmd\n");
6705 goto bad;
6706 }
6707
6708 if (sp->cmd_pkt->pkt_state) {
6709 /*
6710 * don't go thru the loop again; the cmd
6711 * was already started
6712 */
6713 IPRINTF("fas_runpoll: cmd started??\n");
6714 goto bad;
6715 }
6716 }
6717 }
6718
6719 /*
6720 * blindly restore throttles which is preferable over
6721 * leaving throttle hanging at 0 and noone to clear it
6722 */
6723 if (!(sp->cmd_flags & CFLAG_CMDPROXY)) {
6724 fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE);
6725 }
6726
6727 /*
6728 * ensure that the cmd is completely removed
6729 */
6730 fas_remove_cmd(fas, sp, 0);
6731
6732 /*
6733 * If we stored up commands to do, start them off now.
6734 */
6735 if ((fas->f_state == STATE_FREE) &&
6736 (!(sp->cmd_flags & CFLAG_CMDPROXY))) {
6737 (void) fas_ustart(fas);
6738 }
6739 exit:
6740 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_RUNPOLL_END, "fas_runpoll_end");
6741 return;
6742
6743 bad:
6744 fas_log(fas, CE_WARN, "Polled cmd failed");
6745 #ifdef FASDEBUG
6746 fas_printstate(fas, "fas_runpoll: polled cmd failed");
6747 #endif /* FASDEBUG */
6748
6749 cleanup:
6750 fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE);
6751
6752 /*
6753 * clean up all traces of this sp because fas_runpoll will return
6754 * before fas_reset_recovery() cleans up
6755 */
6756 fas_remove_cmd(fas, sp, NEW_TIMEOUT);
6757 fas_decrement_ncmds(fas, sp);
6758 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
6759
6760 if ((sp->cmd_flags & CFLAG_CMDPROXY) == 0) {
6761 (void) fas_reset_bus(fas);
6762 }
6763 goto exit;
6764 }
6765
6766 /*
6767 * Poll for command completion (i.e., no interrupts)
6768 * limit is in usec (and will not be very accurate)
6769 *
6770 * the assumption is that we only run polled cmds in interrupt context
6771 * as scsi_transport will filter out FLAG_NOINTR
6772 */
6773 static int
fas_dopoll(struct fas * fas,int limit)6774 fas_dopoll(struct fas *fas, int limit)
6775 {
6776 int i, n;
6777
6778 /*
6779 * timeout is not very accurate since we don't know how
6780 * long the poll takes
6781 * also if the packet gets started fairly late, we may
6782 * timeout prematurely
6783 * fas_dopoll always returns if e_state transitions to STATE_FREE
6784 */
6785 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_DOPOLL_START, "fas_dopoll_start");
6786
6787 if (limit == 0) {
6788 limit = POLL_TIMEOUT;
6789 }
6790
6791 for (n = i = 0; i < limit; i += 100) {
6792 if (INTPENDING(fas)) {
6793 fas->f_polled_intr = 1;
6794 n++;
6795 (void) fas_intr_svc(fas);
6796 if (fas->f_state == STATE_FREE)
6797 break;
6798 }
6799 drv_usecwait(100);
6800 }
6801
6802 if (i >= limit && fas->f_state != STATE_FREE) {
6803 fas_printstate(fas, "polled command timeout");
6804 n = -1;
6805 }
6806 TRACE_1(TR_FAC_SCSI_FAS, TR_FAS_DOPOLL_END,
6807 "fas_dopoll_end: rval %x", n);
6808 return (n);
6809 }
6810
6811 /*
6812 * prepare a sync negotiation message
6813 */
6814 static void
fas_make_sdtr(struct fas * fas,int msgout_offset,int target)6815 fas_make_sdtr(struct fas *fas, int msgout_offset, int target)
6816 {
6817 uchar_t *p = fas->f_cur_msgout + msgout_offset;
6818 ushort_t tshift = 1<<target;
6819 uchar_t period = MIN_SYNC_PERIOD(fas);
6820 uchar_t offset = fas_default_offset;
6821
6822 /*
6823 * If this target experienced a sync backoff use the
6824 * target's sync speed that was adjusted in
6825 * fas_sync_wide_backoff. For second sync backoff,
6826 * offset will be ajusted below in sanity checks.
6827 */
6828 if (fas->f_backoff & tshift) {
6829 period = fas->f_neg_period[target];
6830 }
6831
6832 /*
6833 * If this is a responce to a target initiated sdtr,
6834 * use the agreed upon values.
6835 */
6836 if (fas->f_sdtr_sent & 1) {
6837 period = fas->f_neg_period[target];
6838 offset = fas->f_offset[target];
6839 }
6840
6841 /*
6842 * If the target driver disabled
6843 * sync then make offset = 0
6844 */
6845 if (fas->f_force_async & tshift) {
6846 offset = 0;
6847 }
6848
6849 /*
6850 * sanity check of period and offset
6851 */
6852 if (fas->f_target_scsi_options[target] & SCSI_OPTIONS_FAST) {
6853 if (period < (uchar_t)(DEFAULT_FASTSYNC_PERIOD/4)) {
6854 period = (uchar_t)(DEFAULT_FASTSYNC_PERIOD/4);
6855 }
6856 } else if (fas->f_target_scsi_options[target] & SCSI_OPTIONS_SYNC) {
6857 if (period < (uchar_t)(DEFAULT_SYNC_PERIOD/4)) {
6858 period = (uchar_t)(DEFAULT_SYNC_PERIOD/4);
6859 }
6860 } else {
6861 fas->f_nosync |= tshift;
6862 }
6863
6864 if (fas->f_nosync & tshift) {
6865 offset = 0;
6866 }
6867
6868 if ((uchar_t)(offset & 0xf) > fas_default_offset) {
6869 offset = fas_default_offset | fas->f_req_ack_delay;
6870 }
6871
6872 fas->f_neg_period[target] = (uchar_t)period;
6873 fas->f_offset[target] = (uchar_t)offset;
6874
6875 *p++ = (uchar_t)MSG_EXTENDED;
6876 *p++ = (uchar_t)3;
6877 *p++ = (uchar_t)MSG_SYNCHRONOUS;
6878 *p++ = period;
6879 *p++ = offset & 0xf;
6880 fas->f_omsglen = 5 + msgout_offset;
6881
6882 IPRINTF2("fas_make_sdtr: period = %x, offset = %x\n",
6883 period, offset);
6884 /*
6885 * increment sdtr flag, odd value indicates that we initiated
6886 * the negotiation
6887 */
6888 fas->f_sdtr_sent++;
6889
6890 /*
6891 * the target may reject the optional sync message so
6892 * to avoid negotiating on every cmd, set sync known here
6893 * we should not negotiate wide after sync again
6894 */
6895 fas->f_sync_known |= 1<<target;
6896 fas->f_wide_known |= 1<<target;
6897 }
6898
6899 /*
6900 * prepare a wide negotiation message
6901 */
6902 static void
fas_make_wdtr(struct fas * fas,int msgout_offset,int target,int width)6903 fas_make_wdtr(struct fas *fas, int msgout_offset, int target, int width)
6904 {
6905 uchar_t *p = fas->f_cur_msgout + msgout_offset;
6906
6907 if (((fas->f_target_scsi_options[target] & SCSI_OPTIONS_WIDE) == 0) ||
6908 (fas->f_nowide & (1<<target))) {
6909 fas->f_nowide |= 1<<target;
6910 width = 0;
6911 }
6912 if (fas->f_force_narrow & (1<<target)) {
6913 width = 0;
6914 }
6915 width = min(FAS_XFER_WIDTH, width);
6916
6917 *p++ = (uchar_t)MSG_EXTENDED;
6918 *p++ = (uchar_t)2;
6919 *p++ = (uchar_t)MSG_WIDE_DATA_XFER;
6920 *p++ = (uchar_t)width;
6921 fas->f_omsglen = 4 + msgout_offset;
6922 IPRINTF1("fas_make_wdtr: width=%x\n", width);
6923
6924 /*
6925 * increment wdtr flag, odd value indicates that we initiated
6926 * the negotiation
6927 */
6928 fas->f_wdtr_sent++;
6929
6930 /*
6931 * the target may reject the optional wide message so
6932 * to avoid negotiating on every cmd, set wide known here
6933 */
6934 fas->f_wide_known |= 1<<target;
6935
6936 fas_set_wide_conf3(fas, target, width);
6937 }
6938
6939 /*
6940 * auto request sense support
6941 * create or destroy an auto request sense packet
6942 */
6943 static int
fas_create_arq_pkt(struct fas * fas,struct scsi_address * ap)6944 fas_create_arq_pkt(struct fas *fas, struct scsi_address *ap)
6945 {
6946 /*
6947 * Allocate a request sense packet using get_pktiopb
6948 */
6949 struct fas_cmd *rqpktp;
6950 uchar_t slot = ap->a_target * NLUNS_PER_TARGET | ap->a_lun;
6951 struct buf *bp;
6952 struct arq_private_data *arq_data;
6953
6954 /*
6955 * if one exists, don't create another
6956 */
6957 if (fas->f_arq_pkt[slot] != 0) {
6958 return (0);
6959 }
6960
6961 /*
6962 * it would be nicer if we could allow the target driver
6963 * to specify the size but this is easier and OK for most
6964 * drivers to use SENSE_LENGTH
6965 * Allocate a request sense packet.
6966 */
6967 bp = scsi_alloc_consistent_buf(ap, (struct buf *)NULL,
6968 SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL);
6969 rqpktp = PKT2CMD(scsi_init_pkt(ap,
6970 NULL, bp, CDB_GROUP0, 1, PKT_PRIV_LEN,
6971 PKT_CONSISTENT, SLEEP_FUNC, NULL));
6972 arq_data =
6973 (struct arq_private_data *)(rqpktp->cmd_pkt->pkt_private);
6974 arq_data->arq_save_bp = bp;
6975
6976 RQ_MAKECOM_G0((CMD2PKT(rqpktp)),
6977 FLAG_SENSING | FLAG_HEAD | FLAG_NODISCON,
6978 (char)SCMD_REQUEST_SENSE, 0, (char)SENSE_LENGTH);
6979 rqpktp->cmd_flags |= CFLAG_CMDARQ;
6980 rqpktp->cmd_slot = slot;
6981 rqpktp->cmd_pkt->pkt_ha_private = rqpktp;
6982 fas->f_arq_pkt[slot] = rqpktp;
6983
6984 /*
6985 * we need a function ptr here so abort/reset can
6986 * defer callbacks; fas_call_pkt_comp() calls
6987 * fas_complete_arq_pkt() directly without releasing the lock
6988 * However, since we are not calling back directly thru
6989 * pkt_comp, don't check this with warlock
6990 */
6991 #ifndef __lock_lint
6992 rqpktp->cmd_pkt->pkt_comp =
6993 (void (*)(struct scsi_pkt *))fas_complete_arq_pkt;
6994 #endif
6995 return (0);
6996 }
6997
6998 static int
fas_delete_arq_pkt(struct fas * fas,struct scsi_address * ap)6999 fas_delete_arq_pkt(struct fas *fas, struct scsi_address *ap)
7000 {
7001 struct fas_cmd *rqpktp;
7002 int slot = ap->a_target * NLUNS_PER_TARGET | ap->a_lun;
7003
7004 /*
7005 * if there is still a pkt saved or no rqpkt
7006 * then we cannot deallocate or there is nothing to do
7007 */
7008 if ((rqpktp = fas->f_arq_pkt[slot]) != NULL) {
7009 struct arq_private_data *arq_data =
7010 (struct arq_private_data *)(rqpktp->cmd_pkt->pkt_private);
7011 struct buf *bp = arq_data->arq_save_bp;
7012 /*
7013 * is arq pkt in use?
7014 */
7015 if (arq_data->arq_save_sp) {
7016 return (-1);
7017 }
7018
7019 scsi_destroy_pkt(CMD2PKT(rqpktp));
7020 scsi_free_consistent_buf(bp);
7021 fas->f_arq_pkt[slot] = 0;
7022 }
7023 return (0);
7024 }
7025
7026 /*
7027 * complete an arq packet by copying over transport info and the actual
7028 * request sense data; called with mutex held from fas_call_pkt_comp()
7029 */
7030 void
fas_complete_arq_pkt(struct scsi_pkt * pkt)7031 fas_complete_arq_pkt(struct scsi_pkt *pkt)
7032 {
7033 struct fas *fas = ADDR2FAS(&pkt->pkt_address);
7034 struct fas_cmd *sp = pkt->pkt_ha_private;
7035 struct scsi_arq_status *arqstat;
7036 struct arq_private_data *arq_data =
7037 (struct arq_private_data *)sp->cmd_pkt->pkt_private;
7038 struct fas_cmd *ssp = arq_data->arq_save_sp;
7039 struct buf *bp = arq_data->arq_save_bp;
7040 int slot = sp->cmd_slot;
7041
7042 DPRINTF1("completing arq pkt sp=0x%p\n", (void *)sp);
7043 ASSERT(sp == fas->f_arq_pkt[slot]);
7044 ASSERT(arq_data->arq_save_sp != NULL);
7045 ASSERT(ssp != fas->f_active[sp->cmd_slot]->f_slot[sp->cmd_tag[1]]);
7046
7047 arqstat = (struct scsi_arq_status *)(ssp->cmd_pkt->pkt_scbp);
7048 arqstat->sts_rqpkt_status = *((struct scsi_status *)
7049 (sp->cmd_pkt->pkt_scbp));
7050 arqstat->sts_rqpkt_reason = sp->cmd_pkt->pkt_reason;
7051 arqstat->sts_rqpkt_state = sp->cmd_pkt->pkt_state;
7052 arqstat->sts_rqpkt_statistics = sp->cmd_pkt->pkt_statistics;
7053 arqstat->sts_rqpkt_resid = sp->cmd_pkt->pkt_resid;
7054 arqstat->sts_sensedata =
7055 *((struct scsi_extended_sense *)bp->b_un.b_addr);
7056 ssp->cmd_pkt->pkt_state |= STATE_ARQ_DONE;
7057 arq_data->arq_save_sp = NULL;
7058
7059 /*
7060 * ASC=0x47 is parity error
7061 */
7062 if (arqstat->sts_sensedata.es_key == KEY_ABORTED_COMMAND &&
7063 arqstat->sts_sensedata.es_add_code == 0x47) {
7064 fas_sync_wide_backoff(fas, sp, slot);
7065 }
7066
7067 fas_call_pkt_comp(fas, ssp);
7068 }
7069
7070 /*
7071 * handle check condition and start an arq packet
7072 */
7073 static int
fas_handle_sts_chk(struct fas * fas,struct fas_cmd * sp)7074 fas_handle_sts_chk(struct fas *fas, struct fas_cmd *sp)
7075 {
7076 struct fas_cmd *arqsp = fas->f_arq_pkt[sp->cmd_slot];
7077 struct arq_private_data *arq_data;
7078 struct buf *bp;
7079
7080 if ((arqsp == NULL) || (arqsp == sp) ||
7081 (sp->cmd_scblen < sizeof (struct scsi_arq_status))) {
7082 IPRINTF("no arq packet or cannot arq on arq pkt\n");
7083 fas_call_pkt_comp(fas, sp);
7084 return (0);
7085 }
7086
7087 arq_data = (struct arq_private_data *)arqsp->cmd_pkt->pkt_private;
7088 bp = arq_data->arq_save_bp;
7089
7090 ASSERT(sp->cmd_flags & CFLAG_FINISHED);
7091 ASSERT(sp != fas->f_active[sp->cmd_slot]->f_slot[sp->cmd_tag[1]]);
7092 DPRINTF3("start arq for slot=%x, arqsp=0x%p, rqpkt=0x%p\n",
7093 sp->cmd_slot, (void *)arqsp, (void *)fas->f_arq_pkt[sp->cmd_slot]);
7094 if (arq_data->arq_save_sp != NULL) {
7095 IPRINTF("auto request sense already in progress\n");
7096 goto fail;
7097 }
7098
7099 arq_data->arq_save_sp = sp;
7100
7101 bzero(bp->b_un.b_addr, sizeof (struct scsi_extended_sense));
7102
7103 /*
7104 * copy the timeout from the original packet by lack of a better
7105 * value
7106 * we could take the residue of the timeout but that could cause
7107 * premature timeouts perhaps
7108 */
7109 arqsp->cmd_pkt->pkt_time = sp->cmd_pkt->pkt_time;
7110 arqsp->cmd_flags &= ~CFLAG_TRANFLAG;
7111 ASSERT(arqsp->cmd_pkt->pkt_comp != NULL);
7112
7113 /*
7114 * make sure that auto request sense always goes out
7115 * after queue full and after throttle was set to draining
7116 */
7117 fas_full_throttle(fas, sp->cmd_slot);
7118 (void) fas_accept_pkt(fas, arqsp, NO_TRAN_BUSY);
7119 return (0);
7120
7121 fail:
7122 fas_set_pkt_reason(fas, sp, CMD_TRAN_ERR, 0);
7123 fas_log(fas, CE_WARN, "auto request sense failed\n");
7124 fas_dump_cmd(fas, sp);
7125 fas_call_pkt_comp(fas, sp);
7126 return (-1);
7127 }
7128
7129
7130 /*
7131 * handle qfull condition
7132 */
7133 static void
fas_handle_qfull(struct fas * fas,struct fas_cmd * sp)7134 fas_handle_qfull(struct fas *fas, struct fas_cmd *sp)
7135 {
7136 int slot = sp->cmd_slot;
7137
7138 if ((++sp->cmd_qfull_retries > fas->f_qfull_retries[Tgt(sp)]) ||
7139 (fas->f_qfull_retries[Tgt(sp)] == 0)) {
7140 /*
7141 * We have exhausted the retries on QFULL, or,
7142 * the target driver has indicated that it
7143 * wants to handle QFULL itself by setting
7144 * qfull-retries capability to 0. In either case
7145 * we want the target driver's QFULL handling
7146 * to kick in. We do this by having pkt_reason
7147 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
7148 */
7149 IPRINTF2("%d.%d: status queue full, retries over\n",
7150 Tgt(sp), Lun(sp));
7151 fas_set_all_lun_throttles(fas, slot, DRAIN_THROTTLE);
7152 fas_call_pkt_comp(fas, sp);
7153 } else {
7154 if (fas->f_reset_delay[Tgt(sp)] == 0) {
7155 fas->f_throttle[slot] =
7156 max((fas->f_tcmds[slot] - 2), 0);
7157 }
7158 IPRINTF3("%d.%d: status queue full, new throttle = %d, "
7159 "retrying\n", Tgt(sp), Lun(sp), fas->f_throttle[slot]);
7160 sp->cmd_pkt->pkt_flags |= FLAG_HEAD;
7161 sp->cmd_flags &= ~CFLAG_TRANFLAG;
7162 (void) fas_accept_pkt(fas, sp, NO_TRAN_BUSY);
7163
7164 /*
7165 * when target gives queue full status with no commands
7166 * outstanding (f_tcmds[] == 0), throttle is set to 0
7167 * (HOLD_THROTTLE), and the queue full handling starts
7168 * (see psarc/1994/313); if there are commands outstanding,
7169 * the throttle is set to (f_tcmds[] - 2)
7170 */
7171 if (fas->f_throttle[slot] == HOLD_THROTTLE) {
7172 /*
7173 * By setting throttle to QFULL_THROTTLE, we
7174 * avoid submitting new commands and in
7175 * fas_restart_cmd find out slots which need
7176 * their throttles to be cleared.
7177 */
7178 fas_set_all_lun_throttles(fas, slot, QFULL_THROTTLE);
7179 if (fas->f_restart_cmd_timeid == 0) {
7180 fas->f_restart_cmd_timeid =
7181 timeout(fas_restart_cmd, fas,
7182 fas->f_qfull_retry_interval[Tgt(sp)]);
7183 }
7184 }
7185 }
7186 }
7187
7188 /*
7189 * invoked from timeout() to restart qfull cmds with throttle == 0
7190 */
7191 static void
fas_restart_cmd(void * fas_arg)7192 fas_restart_cmd(void *fas_arg)
7193 {
7194 struct fas *fas = fas_arg;
7195 int i;
7196
7197 IPRINTF("fas_restart_cmd:\n");
7198
7199 mutex_enter(FAS_MUTEX(fas));
7200 fas->f_restart_cmd_timeid = 0;
7201
7202 for (i = 0; i < N_SLOTS; i += NLUNS_PER_TARGET) {
7203 if (fas->f_reset_delay[i/NLUNS_PER_TARGET] == 0) {
7204 if (fas->f_throttle[i] == QFULL_THROTTLE) {
7205 fas_set_all_lun_throttles(fas,
7206 i, MAX_THROTTLE);
7207 }
7208 }
7209 }
7210
7211 (void) fas_ustart(fas);
7212 mutex_exit(FAS_MUTEX(fas));
7213 }
7214
7215 /*
7216 * Timeout handling:
7217 * Command watchdog routines
7218 */
7219
7220 /*ARGSUSED*/
7221 static void
fas_watch(void * arg)7222 fas_watch(void *arg)
7223 {
7224 struct fas *fas;
7225 ushort_t props_update = 0;
7226
7227 rw_enter(&fas_global_rwlock, RW_READER);
7228
7229 for (fas = fas_head; fas != (struct fas *)NULL; fas = fas->f_next) {
7230
7231 mutex_enter(FAS_MUTEX(fas));
7232 IPRINTF2("ncmds=%x, ndisc=%x\n", fas->f_ncmds, fas->f_ndisc);
7233
7234 #ifdef FAS_PIO_COUNTS
7235 if (fas->f_total_cmds) {
7236 int n = fas->f_total_cmds;
7237
7238 fas_log(fas, CE_NOTE,
7239 "total=%d, cmds=%d fas-rd=%d, fas-wrt=%d, dma-rd=%d, dma-wrt=%d\n",
7240 fas->f_total_cmds,
7241 fas->f_reg_cmds/n,
7242 fas->f_reg_reads/n, fas->f_reg_writes/n,
7243 fas->f_reg_dma_reads/n, fas->f_reg_dma_writes/n);
7244
7245 fas->f_reg_reads = fas->f_reg_writes =
7246 fas->f_reg_dma_reads = fas->f_reg_dma_writes =
7247 fas->f_reg_cmds = fas->f_total_cmds = 0;
7248 }
7249 #endif
7250 if (fas->f_ncmds) {
7251 int i;
7252 fas_watchsubr(fas);
7253
7254 /*
7255 * reset throttle. the throttle may have been
7256 * too low if queue full was caused by
7257 * another initiator
7258 * Only reset throttle if no cmd active in slot 0
7259 * (untagged cmd)
7260 */
7261 #ifdef FAS_TEST
7262 if (fas_enable_untagged) {
7263 fas_test_untagged++;
7264 }
7265 #endif
7266 for (i = 0; i < N_SLOTS; i++) {
7267 if ((fas->f_throttle[i] > HOLD_THROTTLE) &&
7268 (fas->f_active[i] &&
7269 (fas->f_active[i]->f_slot[0] == NULL))) {
7270 fas_full_throttle(fas, i);
7271 }
7272 }
7273 }
7274
7275 if (fas->f_props_update) {
7276 int i;
7277 /*
7278 * f_mutex will be released and reentered in
7279 * fas_props_update().
7280 * Hence we save the fas->f_props_update now and
7281 * set to 0 indicating that property has been
7282 * updated. This will avoid a race condition with
7283 * any thread that runs in interrupt context that
7284 * attempts to set the f_props_update to non-zero value
7285 */
7286 props_update = fas->f_props_update;
7287 fas->f_props_update = 0;
7288 for (i = 0; i < NTARGETS_WIDE; i++) {
7289 if (props_update & (1<<i)) {
7290 fas_update_props(fas, i);
7291 }
7292 }
7293 }
7294 fas_check_waitQ_and_mutex_exit(fas);
7295
7296 }
7297 rw_exit(&fas_global_rwlock);
7298
7299 again:
7300 mutex_enter(&fas_global_mutex);
7301 if (fas_timeout_initted && fas_timeout_id) {
7302 fas_timeout_id = timeout(fas_watch, NULL, fas_tick);
7303 }
7304 mutex_exit(&fas_global_mutex);
7305 TRACE_0(TR_FAC_SCSI_FAS, TR_FAS_WATCH_END, "fas_watch_end");
7306 }
7307
7308 static void
fas_watchsubr(struct fas * fas)7309 fas_watchsubr(struct fas *fas)
7310 {
7311 short slot;
7312 int d = ((fas->f_dslot == 0)? 1 : fas->f_dslot);
7313 struct f_slots *tag_slots;
7314
7315 for (slot = 0; slot < N_SLOTS; slot += d) {
7316
7317 #ifdef FAS_TEST
7318 if (fas_btest) {
7319 fas_btest = 0;
7320 (void) fas_reset_bus(fas);
7321 return;
7322 }
7323 if (fas_force_timeout && fas->f_tcmds[slot]) {
7324 fas_cmd_timeout(fas, slot);
7325 fas_force_timeout = 0;
7326 return;
7327 }
7328 fas_test_reset(fas, slot);
7329 fas_test_abort(fas, slot);
7330 #endif /* FAS_TEST */
7331
7332 /*
7333 * check tagged cmds first
7334 */
7335 tag_slots = fas->f_active[slot];
7336 DPRINTF3(
7337 "fas_watchsubr: slot %x: tcmds=%x, timeout=%x\n",
7338 slot, fas->f_tcmds[slot], tag_slots->f_timeout);
7339
7340 if ((fas->f_tcmds[slot] > 0) && (tag_slots->f_timebase)) {
7341
7342 if (tag_slots->f_timebase <=
7343 fas_scsi_watchdog_tick) {
7344 tag_slots->f_timebase +=
7345 fas_scsi_watchdog_tick;
7346 continue;
7347 }
7348
7349 tag_slots->f_timeout -= fas_scsi_watchdog_tick;
7350
7351 if (tag_slots->f_timeout < 0) {
7352 fas_cmd_timeout(fas, slot);
7353 return;
7354 }
7355 if ((tag_slots->f_timeout) <=
7356 fas_scsi_watchdog_tick) {
7357 IPRINTF1("pending timeout on slot=%x\n",
7358 slot);
7359 IPRINTF("draining all queues\n");
7360 fas_set_throttles(fas, 0, N_SLOTS,
7361 DRAIN_THROTTLE);
7362 }
7363 }
7364 }
7365 }
7366
7367 /*
7368 * timeout recovery
7369 */
7370 static void
fas_cmd_timeout(struct fas * fas,int slot)7371 fas_cmd_timeout(struct fas *fas, int slot)
7372 {
7373 int d = ((fas->f_dslot == 0)? 1 : fas->f_dslot);
7374 int target, lun, i, n, tag, ncmds;
7375 struct fas_cmd *sp = NULL;
7376 struct fas_cmd *ssp;
7377
7378 ASSERT(fas->f_tcmds[slot]);
7379
7380 #ifdef FAS_TEST
7381 if (fas_test_stop) {
7382 debug_enter("timeout");
7383 }
7384 #endif
7385
7386 /*
7387 * set throttle back; no more draining necessary
7388 */
7389 for (i = 0; i < N_SLOTS; i += d) {
7390 if (fas->f_throttle[i] == DRAIN_THROTTLE) {
7391 fas_full_throttle(fas, i);
7392 }
7393 }
7394
7395 if (NOTAG(slot/NLUNS_PER_TARGET)) {
7396 sp = fas->f_active[slot]->f_slot[0];
7397 }
7398
7399 /*
7400 * if no interrupt pending for next second then the current
7401 * cmd must be stuck; switch slot and sp to current slot and cmd
7402 */
7403 if (fas->f_current_sp && fas->f_state != STATE_FREE) {
7404 for (i = 0; (i < 10000) && (INTPENDING(fas) == 0); i++) {
7405 drv_usecwait(100);
7406 }
7407 if (INTPENDING(fas) == 0) {
7408 slot = fas->f_current_sp->cmd_slot;
7409 sp = fas->f_current_sp;
7410 }
7411 }
7412
7413 target = slot / NLUNS_PER_TARGET;
7414 lun = slot % NLUNS_PER_TARGET;
7415
7416 /*
7417 * update all outstanding pkts for this slot
7418 */
7419 n = fas->f_active[slot]->f_n_slots;
7420 for (ncmds = tag = 0; tag < n; tag++) {
7421 ssp = fas->f_active[slot]->f_slot[tag];
7422 if (ssp && ssp->cmd_pkt->pkt_time) {
7423 fas_set_pkt_reason(fas, ssp, CMD_TIMEOUT,
7424 STAT_TIMEOUT | STAT_ABORTED);
7425 fas_short_dump_cmd(fas, ssp);
7426 ncmds++;
7427 }
7428 }
7429
7430 /*
7431 * no timed-out cmds here?
7432 */
7433 if (ncmds == 0) {
7434 return;
7435 }
7436
7437 /*
7438 * dump all we know about this timeout
7439 */
7440 if (sp) {
7441 if (sp->cmd_flags & CFLAG_CMDDISC) {
7442 fas_log(fas, CE_WARN,
7443 "Disconnected command timeout for Target %d.%d",
7444 target, lun);
7445 } else {
7446 ASSERT(sp == fas->f_current_sp);
7447 fas_log(fas, CE_WARN,
7448 "Connected command timeout for Target %d.%d",
7449 target, lun);
7450 /*
7451 * Current command timeout appears to relate often
7452 * to noisy SCSI in synchronous mode.
7453 */
7454 if (fas->f_state == ACTS_DATA_DONE) {
7455 fas_sync_wide_backoff(fas, sp, slot);
7456 }
7457 }
7458 #ifdef FASDEBUG
7459 fas_printstate(fas, "timeout");
7460 #endif
7461 } else {
7462 fas_log(fas, CE_WARN,
7463 "Disconnected tagged cmd(s) (%d) timeout for Target %d.%d",
7464 fas->f_tcmds[slot], target, lun);
7465 }
7466
7467 if (fas_abort_cmd(fas, sp, slot) == ACTION_SEARCH) {
7468 (void) fas_istart(fas);
7469 }
7470 }
7471
7472 /*
7473 * fas_sync_wide_backoff() increases sync period and enables slow
7474 * cable mode.
7475 * the second time, we revert back to narrow/async
7476 * we count on a bus reset to disable wide in the target and will
7477 * never renegotiate wide again
7478 */
7479 static void
fas_sync_wide_backoff(struct fas * fas,struct fas_cmd * sp,int slot)7480 fas_sync_wide_backoff(struct fas *fas, struct fas_cmd *sp,
7481 int slot)
7482 {
7483 char phase;
7484 ushort_t state = fas->f_state;
7485 uchar_t tgt = slot / NLUNS_PER_TARGET;
7486 uint_t tshift = 1 << tgt;
7487
7488 phase = fas_reg_read(fas, &fas->f_reg->fas_stat);
7489 phase &= FAS_PHASE_MASK;
7490
7491 IPRINTF4(
7492 "fas_sync_wide_backoff: target %d: state=%x, phase=%x, sp=0x%p\n",
7493 tgt, state, phase, (void *)sp);
7494
7495 #ifdef FASDEBUG
7496 if (fas_no_sync_wide_backoff) {
7497 return;
7498 }
7499 #endif
7500
7501 /*
7502 * if this not the first time or sync is disabled
7503 * thru scsi_options then disable wide
7504 */
7505 if ((fas->f_backoff & tshift) ||
7506 (fas->f_nosync & tshift)) {
7507 /*
7508 * disable wide for just this target
7509 */
7510 if ((fas->f_nowide & tshift) == 0) {
7511 fas_log(fas, CE_WARN,
7512 "Target %d disabled wide SCSI mode", tgt);
7513 }
7514 /*
7515 * do not reset the bit in f_nowide because that
7516 * would not force a renegotiation of wide
7517 * and do not change any register value yet because
7518 * we may have reconnects before the renegotiations
7519 */
7520 fas->f_target_scsi_options[tgt] &= ~SCSI_OPTIONS_WIDE;
7521 }
7522
7523 /*
7524 * reduce xfer rate. if this is the first time, reduce by
7525 * 100%. second time, disable sync and wide.
7526 */
7527 if (fas->f_offset[tgt] != 0) {
7528 /*
7529 * do not reset the bit in f_nosync because that
7530 * would not force a renegotiation of sync
7531 */
7532 if (fas->f_backoff & tshift) {
7533 if ((fas->f_nosync & tshift) == 0) {
7534 fas_log(fas, CE_WARN,
7535 "Target %d reverting to async. mode",
7536 tgt);
7537 }
7538 fas->f_target_scsi_options[tgt] &=
7539 ~(SCSI_OPTIONS_SYNC | SCSI_OPTIONS_FAST);
7540 } else {
7541 /* increase period by 100% */
7542 fas->f_neg_period[tgt] *= 2;
7543
7544 fas_log(fas, CE_WARN,
7545 "Target %d reducing sync. transfer rate", tgt);
7546 }
7547 }
7548 fas->f_backoff |= tshift;
7549
7550 /*
7551 * always enable slow cable mode, if not already enabled
7552 */
7553 if ((fas->f_fasconf & FAS_CONF_SLOWMODE) == 0) {
7554 fas->f_fasconf |= FAS_CONF_SLOWMODE;
7555 fas_reg_write(fas, &fas->f_reg->fas_conf, fas->f_fasconf);
7556 IPRINTF("Reverting to slow SCSI cable mode\n");
7557 }
7558
7559 /*
7560 * Force sync renegotiation and update properties
7561 */
7562 fas_force_renegotiation(fas, tgt);
7563 fas->f_props_update |= (1<<tgt);
7564 }
7565
7566 /*
7567 * handle failed negotiations (either reject or bus free condition)
7568 */
7569 static void
fas_reset_sync_wide(struct fas * fas)7570 fas_reset_sync_wide(struct fas *fas)
7571 {
7572 struct fas_cmd *sp = fas->f_current_sp;
7573 int tgt = Tgt(sp);
7574
7575 if (fas->f_wdtr_sent) {
7576 IPRINTF("wide neg message rejected or bus free\n");
7577 fas->f_nowide |= (1<<tgt);
7578 fas->f_fasconf3[tgt] &= ~FAS_CONF3_WIDE;
7579 fas_reg_write(fas, &fas->f_reg->fas_conf3,
7580 fas->f_fasconf3[tgt]);
7581 /*
7582 * clear offset just in case it goes to
7583 * data phase
7584 */
7585 fas_reg_write(fas,
7586 (uchar_t *)&fas->f_reg->fas_sync_offset, 0);
7587 } else if (fas->f_sdtr_sent) {
7588 volatile struct fasreg *fasreg =
7589 fas->f_reg;
7590 IPRINTF("sync neg message rejected or bus free\n");
7591 fas->f_nosync |= (1<<tgt);
7592 fas->f_offset[tgt] = 0;
7593 fas->f_sync_period[tgt] = 0;
7594 fas_reg_write(fas,
7595 (uchar_t *)&fasreg->fas_sync_period, 0);
7596 fas_reg_write(fas,
7597 (uchar_t *)&fasreg->fas_sync_offset, 0);
7598 fas->f_offset[tgt] = 0;
7599 fas->f_fasconf3[tgt] &= ~FAS_CONF3_FASTSCSI;
7600 fas_reg_write(fas, &fasreg->fas_conf3,
7601 fas->f_fasconf3[tgt]);
7602 }
7603
7604 fas_force_renegotiation(fas, tgt);
7605 }
7606
7607 /*
7608 * force wide and sync renegotiation
7609 */
7610 static void
fas_force_renegotiation(struct fas * fas,int target)7611 fas_force_renegotiation(struct fas *fas, int target)
7612 {
7613 ushort_t tshift = 1<<target;
7614 fas->f_sync_known &= ~tshift;
7615 fas->f_sync_enabled &= ~tshift;
7616 fas->f_wide_known &= ~tshift;
7617 fas->f_wide_enabled &= ~tshift;
7618 }
7619
7620 /*
7621 * update conf3 register for wide negotiation
7622 */
7623 static void
fas_set_wide_conf3(struct fas * fas,int target,int width)7624 fas_set_wide_conf3(struct fas *fas, int target, int width)
7625 {
7626 ASSERT(width <= 1);
7627 switch (width) {
7628 case 0:
7629 fas->f_fasconf3[target] &= ~FAS_CONF3_WIDE;
7630 break;
7631 case 1:
7632 fas->f_fasconf3[target] |= FAS_CONF3_WIDE;
7633 fas->f_wide_enabled |= (1<<target);
7634 break;
7635 }
7636
7637 fas_reg_write(fas, &fas->f_reg->fas_conf3, fas->f_fasconf3[target]);
7638 fas->f_fasconf3_reg_last = fas->f_fasconf3[target];
7639 }
7640
7641 /*
7642 * Abort command handling
7643 *
7644 * abort current cmd, either by device reset or immediately with bus reset
7645 * (usually an abort msg doesn't completely solve the problem, therefore
7646 * a device or bus reset is recommended)
7647 */
7648 static int
fas_abort_curcmd(struct fas * fas)7649 fas_abort_curcmd(struct fas *fas)
7650 {
7651 if (fas->f_current_sp) {
7652 return (fas_abort_cmd(fas, fas->f_current_sp,
7653 fas->f_current_sp->cmd_slot));
7654 } else {
7655 return (fas_reset_bus(fas));
7656 }
7657 }
7658
7659 static int
fas_abort_cmd(struct fas * fas,struct fas_cmd * sp,int slot)7660 fas_abort_cmd(struct fas *fas, struct fas_cmd *sp, int slot)
7661 {
7662 struct scsi_address ap;
7663
7664 ap.a_hba_tran = fas->f_tran;
7665 ap.a_target = slot / NLUNS_PER_TARGET;
7666 ap.a_lun = slot % NLUNS_PER_TARGET;
7667
7668 IPRINTF1("abort cmd 0x%p\n", (void *)sp);
7669
7670 /*
7671 * attempting to abort a connected cmd is usually fruitless, so
7672 * only try disconnected cmds
7673 * a reset is preferable over an abort (see 1161701)
7674 */
7675 if ((fas->f_current_sp && (fas->f_current_sp->cmd_slot != slot)) ||
7676 (fas->f_state == STATE_FREE)) {
7677 IPRINTF2("attempting to reset target %d.%d\n",
7678 ap.a_target, ap.a_lun);
7679 if (fas_do_scsi_reset(&ap, RESET_TARGET)) {
7680 return (ACTION_SEARCH);
7681 }
7682 }
7683
7684 /*
7685 * if the target won't listen, then a retry is useless
7686 * there is also the possibility that the cmd still completed while
7687 * we were trying to reset and the target driver may have done a
7688 * device reset which has blown away this sp.
7689 * well, we've tried, now pull the chain
7690 */
7691 IPRINTF("aborting all cmds by bus reset\n");
7692 return (fas_reset_bus(fas));
7693 }
7694
7695 /*
7696 * fas_do_scsi_abort() assumes that we already have the mutex.
7697 * during the abort, we hold the mutex and prevent callbacks by setting
7698 * completion pointer to NULL. this will also avoid that a target driver
7699 * attempts to do a scsi_abort/reset while we are aborting.
7700 * because the completion pointer is NULL we can still update the
7701 * packet after completion
7702 * the throttle for this slot is cleared either by fas_abort_connected_cmd
7703 * or fas_runpoll which prevents new cmds from starting while aborting
7704 */
7705 static int
fas_do_scsi_abort(struct scsi_address * ap,struct scsi_pkt * pkt)7706 fas_do_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
7707 {
7708 struct fas *fas = ADDR2FAS(ap);
7709 struct fas_cmd *sp;
7710 int rval = FALSE;
7711 short slot;
7712 struct fas_cmd *cur_sp = fas->f_current_sp;
7713 void (*cur_savec)(), (*sp_savec)();
7714 int sp_tagged_flag, abort_msg;
7715
7716 if (pkt) {
7717 sp = PKT2CMD(pkt);
7718 slot = sp->cmd_slot;
7719 ASSERT(slot == ((ap->a_target * NLUNS_PER_TARGET) | ap->a_lun));
7720 } else {
7721 sp = NULL;
7722 slot = (ap->a_target * NLUNS_PER_TARGET) | ap->a_lun;
7723 }
7724
7725 fas_move_waitQ_to_readyQ(fas);
7726
7727 /*
7728 * If no specific command was passed, all cmds here will be aborted
7729 * If a specific command was passed as an argument (to be aborted)
7730 * only the specified command will be aborted
7731 */
7732 ASSERT(mutex_owned(FAS_MUTEX(fas)));
7733 IPRINTF4("fas_scsi_abort for slot %x, "
7734 "sp=0x%p, pkt_flags=%x, cur_sp=0x%p\n",
7735 slot, (void *)sp, (sp? sp->cmd_pkt_flags : 0), (void *)cur_sp);
7736
7737 /*
7738 * first check if the cmd is in the ready queue or
7739 * in the active queue
7740 */
7741 if (sp) {
7742 IPRINTF3("aborting one command 0x%p for %d.%d\n",
7743 (void *)sp, ap->a_target, ap->a_lun);
7744 rval = fas_remove_from_readyQ(fas, sp, slot);
7745 if (rval) {
7746 IPRINTF("aborted one ready cmd\n");
7747 fas_set_pkt_reason(fas, sp, CMD_ABORTED, STAT_ABORTED);
7748 fas_decrement_ncmds(fas, sp);
7749 fas_call_pkt_comp(fas, sp);
7750 goto exit;
7751
7752 } else if ((sp !=
7753 fas->f_active[slot]->f_slot[sp->cmd_tag[1]])) {
7754 IPRINTF("cmd doesn't exist here\n");
7755 rval = TRUE;
7756 goto exit;
7757 }
7758 }
7759
7760 /*
7761 * hold off any new commands while attempting to abort
7762 * an active cmd
7763 */
7764 fas_set_throttles(fas, slot, 1, HOLD_THROTTLE);
7765
7766 if (cur_sp) {
7767 /*
7768 * prevent completion on current cmd
7769 */
7770 cur_savec = cur_sp->cmd_pkt->pkt_comp;
7771 cur_sp->cmd_pkt->pkt_comp = NULL;
7772 }
7773
7774 if (sp) {
7775 /*
7776 * the cmd exists here. is it connected or disconnected?
7777 * if connected but still selecting then can't abort now.
7778 * prevent completion on this cmd
7779 */
7780 sp_tagged_flag = (sp->cmd_pkt_flags & FLAG_TAGMASK);
7781 abort_msg = (sp_tagged_flag? MSG_ABORT_TAG : MSG_ABORT);
7782 sp_savec = sp->cmd_pkt->pkt_comp;
7783 sp->cmd_pkt->pkt_comp = NULL;
7784
7785 /* connected but not selecting? */
7786 if ((sp == cur_sp) && (fas->f_state != STATE_FREE) &&
7787 (sp->cmd_pkt->pkt_state)) {
7788 rval = fas_abort_connected_cmd(fas, sp, abort_msg);
7789 }
7790
7791 /* if abort connected cmd failed, try abort disconnected */
7792 if ((rval == 0) &&
7793 (sp->cmd_flags & CFLAG_CMDDISC) &&
7794 ((sp->cmd_flags & CFLAG_COMPLETED) == 0)) {
7795 rval = fas_abort_disconnected_cmd(fas, ap, sp,
7796 abort_msg, slot);
7797 }
7798
7799 if (rval) {
7800 sp->cmd_flags |= CFLAG_COMPLETED;
7801 fas_set_pkt_reason(fas, sp, CMD_ABORTED, STAT_ABORTED);
7802 }
7803
7804 sp->cmd_pkt->pkt_comp = sp_savec;
7805
7806 } else {
7807 IPRINTF2("aborting all commands for %d.%d\n",
7808 ap->a_target, ap->a_lun);
7809 abort_msg = MSG_ABORT;
7810
7811 /* active and not selecting ? */
7812 if (cur_sp && (fas->f_state != STATE_FREE) &&
7813 (cur_sp->cmd_slot == slot) &&
7814 cur_sp->cmd_pkt->pkt_state) {
7815 rval = fas_abort_connected_cmd(fas, cur_sp,
7816 abort_msg);
7817 }
7818 if (rval == 0) {
7819 rval = fas_abort_disconnected_cmd(fas, ap,
7820 NULL, abort_msg, slot);
7821 }
7822 }
7823
7824 done:
7825 /* complete the current sp */
7826 if (cur_sp) {
7827 cur_sp->cmd_pkt->pkt_comp = cur_savec;
7828 if (cur_sp->cmd_flags & CFLAG_COMPLETED) {
7829 fas_remove_cmd(fas, cur_sp, NEW_TIMEOUT);
7830 cur_sp->cmd_flags &= ~CFLAG_COMPLETED;
7831 fas_decrement_ncmds(fas, cur_sp);
7832 fas_call_pkt_comp(fas, cur_sp);
7833 }
7834 }
7835
7836 /* complete the sp passed as 2nd arg */
7837 if (sp && (sp != cur_sp) && (sp->cmd_flags & CFLAG_COMPLETED)) {
7838 sp->cmd_flags &= ~CFLAG_COMPLETED;
7839 fas_remove_cmd(fas, sp, NEW_TIMEOUT);
7840 fas_decrement_ncmds(fas, sp);
7841 fas_call_pkt_comp(fas, sp);
7842 }
7843
7844 /* clean up all cmds for this slot */
7845 if (rval && (abort_msg == MSG_ABORT)) {
7846 /*
7847 * mark all commands here as aborted
7848 * abort msg has been accepted, now cleanup queues;
7849 */
7850 fas_mark_packets(fas, slot, CMD_ABORTED, STAT_ABORTED);
7851 fas_flush_tagQ(fas, slot);
7852 fas_flush_readyQ(fas, slot);
7853 }
7854 fas_set_throttles(fas, slot, 1, MAX_THROTTLE);
7855
7856 exit:
7857 if (fas->f_state == STATE_FREE) {
7858 (void) fas_ustart(fas);
7859 }
7860
7861 ASSERT(mutex_owned(FAS_MUTEX(fas)));
7862
7863 #ifdef FASDEBUG
7864 if (rval && fas_test_stop) {
7865 debug_enter("abort succeeded");
7866 }
7867 #endif
7868 return (rval);
7869 }
7870
7871 /*
7872 * mark all packets with new reason and update statistics
7873 */
7874 static void
fas_mark_packets(struct fas * fas,int slot,uchar_t reason,uint_t stat)7875 fas_mark_packets(struct fas *fas, int slot, uchar_t reason, uint_t stat)
7876 {
7877 struct fas_cmd *sp = fas->f_readyf[slot];
7878
7879 while (sp != 0) {
7880 fas_set_pkt_reason(fas, sp, reason, STAT_ABORTED);
7881 sp = sp->cmd_forw;
7882 }
7883 if (fas->f_tcmds[slot]) {
7884 int n = 0;
7885 ushort_t tag;
7886
7887 for (tag = 0; tag < fas->f_active[slot]->f_n_slots; tag++) {
7888 if ((sp = fas->f_active[slot]->f_slot[tag]) != 0) {
7889 fas_set_pkt_reason(fas, sp, reason, stat);
7890 n++;
7891 }
7892 }
7893 ASSERT(fas->f_tcmds[slot] == n);
7894 }
7895 }
7896
7897 /*
7898 * set pkt_reason and OR in pkt_statistics flag
7899 */
7900 static void
fas_set_pkt_reason(struct fas * fas,struct fas_cmd * sp,uchar_t reason,uint_t stat)7901 fas_set_pkt_reason(struct fas *fas, struct fas_cmd *sp, uchar_t reason,
7902 uint_t stat)
7903 {
7904 if (sp) {
7905 if (sp->cmd_pkt->pkt_reason == CMD_CMPLT) {
7906 sp->cmd_pkt->pkt_reason = reason;
7907 }
7908 sp->cmd_pkt->pkt_statistics |= stat;
7909 IPRINTF3("sp=0x%p, pkt_reason=%x, pkt_stat=%x\n",
7910 (void *)sp, reason, sp->cmd_pkt->pkt_statistics);
7911 }
7912 }
7913
7914 /*
7915 * delete specified cmd from the ready queue
7916 */
7917 static int
fas_remove_from_readyQ(struct fas * fas,struct fas_cmd * sp,int slot)7918 fas_remove_from_readyQ(struct fas *fas, struct fas_cmd *sp, int slot)
7919 {
7920 struct fas_cmd *ssp, *psp;
7921
7922 /*
7923 * command has not been started yet and is still in the ready queue
7924 */
7925 if (sp) {
7926 ASSERT(fas->f_ncmds > 0);
7927 /*
7928 * find packet on the ready queue and remove it
7929 */
7930 for (psp = NULL, ssp = fas->f_readyf[slot]; ssp != NULL;
7931 psp = ssp, ssp = ssp->cmd_forw) {
7932 if (ssp == sp) {
7933 if (fas->f_readyf[slot] == sp) {
7934 fas->f_readyf[slot] = sp->cmd_forw;
7935 } else {
7936 psp->cmd_forw = sp->cmd_forw;
7937 }
7938 if (fas->f_readyb[slot] == sp) {
7939 fas->f_readyb[slot] = psp;
7940 }
7941 return (TRUE);
7942 }
7943 }
7944 }
7945 return (FALSE);
7946 }
7947
7948 /*
7949 * add cmd to to head of the readyQ
7950 * due to tag allocation failure or preemption we have to return
7951 * this cmd to the readyQ
7952 */
7953 static void
fas_head_of_readyQ(struct fas * fas,struct fas_cmd * sp)7954 fas_head_of_readyQ(struct fas *fas, struct fas_cmd *sp)
7955 {
7956 /*
7957 * never return a NOINTR pkt to the readyQ
7958 * (fas_runpoll will resubmit)
7959 */
7960 if ((sp->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7961 struct fas_cmd *dp;
7962 int slot = sp->cmd_slot;
7963
7964 dp = fas->f_readyf[slot];
7965 fas->f_readyf[slot] = sp;
7966 sp->cmd_forw = dp;
7967 if (fas->f_readyb[slot] == NULL) {
7968 fas->f_readyb[slot] = sp;
7969 }
7970 }
7971 }
7972
7973 /*
7974 * flush cmds in ready queue
7975 */
7976 static void
fas_flush_readyQ(struct fas * fas,int slot)7977 fas_flush_readyQ(struct fas *fas, int slot)
7978 {
7979 if (fas->f_readyf[slot]) {
7980 struct fas_cmd *sp, *nsp;
7981
7982 IPRINTF1("flushing ready queue, slot=%x\n", slot);
7983 ASSERT(fas->f_ncmds > 0);
7984
7985 sp = fas->f_readyf[slot];
7986 fas->f_readyf[slot] = fas->f_readyb[slot] = NULL;
7987
7988 while (sp != 0) {
7989 /*
7990 * save the forward pointer before calling
7991 * the completion routine
7992 */
7993 nsp = sp->cmd_forw;
7994 ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
7995 ASSERT(Tgt(sp) == slot/NLUNS_PER_TARGET);
7996 fas_decrement_ncmds(fas, sp);
7997 fas_call_pkt_comp(fas, sp);
7998 sp = nsp;
7999 }
8000 fas_check_ncmds(fas);
8001 }
8002 }
8003
8004 /*
8005 * cleanup the tag queue
8006 * preserve some order by starting with the oldest tag
8007 */
8008 static void
fas_flush_tagQ(struct fas * fas,int slot)8009 fas_flush_tagQ(struct fas *fas, int slot)
8010 {
8011 ushort_t tag, starttag;
8012 struct fas_cmd *sp;
8013 struct f_slots *tagque = fas->f_active[slot];
8014
8015 if (tagque == NULL) {
8016 return;
8017 }
8018
8019 DPRINTF2("flushing entire tag queue, slot=%x, tcmds=%x\n",
8020 slot, fas->f_tcmds[slot]);
8021
8022 #ifdef FASDEBUG
8023 {
8024 int n = 0;
8025 for (tag = 0; tag < fas->f_active[slot]->f_n_slots; tag++) {
8026 if ((sp = tagque->f_slot[tag]) != 0) {
8027 n++;
8028 ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
8029 if (sp->cmd_pkt->pkt_reason == CMD_CMPLT) {
8030 if ((sp->cmd_flags & CFLAG_FINISHED) ==
8031 0) {
8032 debug_enter("fas_flush_tagQ");
8033 }
8034 }
8035 }
8036 }
8037 ASSERT(fas->f_tcmds[slot] == n);
8038 }
8039 #endif
8040 tag = starttag = fas->f_active[slot]->f_tags;
8041
8042 do {
8043 if ((sp = tagque->f_slot[tag]) != 0) {
8044 fas_flush_cmd(fas, sp, 0, 0);
8045 }
8046 tag = ((ushort_t)(tag + 1)) %
8047 (ushort_t)fas->f_active[slot]->f_n_slots;
8048 } while (tag != starttag);
8049
8050 ASSERT(fas->f_tcmds[slot] == 0);
8051 EPRINTF2("ncmds = %x, ndisc=%x\n", fas->f_ncmds, fas->f_ndisc);
8052 fas_check_ncmds(fas);
8053 }
8054
8055 /*
8056 * cleanup one active command
8057 */
8058 static void
fas_flush_cmd(struct fas * fas,struct fas_cmd * sp,uchar_t reason,uint_t stat)8059 fas_flush_cmd(struct fas *fas, struct fas_cmd *sp, uchar_t reason,
8060 uint_t stat)
8061 {
8062 short slot = sp->cmd_slot;
8063
8064 ASSERT(fas->f_ncmds > 0);
8065 ASSERT((sp->cmd_flags & CFLAG_FREE) == 0);
8066 ASSERT(sp == fas->f_active[slot]->f_slot[sp->cmd_tag[1]]);
8067
8068 fas_remove_cmd(fas, sp, NEW_TIMEOUT);
8069 fas_decrement_ncmds(fas, sp);
8070 fas_set_pkt_reason(fas, sp, reason, stat);
8071 fas_call_pkt_comp(fas, sp);
8072
8073 EPRINTF2("ncmds = %x, ndisc=%x\n", fas->f_ncmds, fas->f_ndisc);
8074 fas_check_ncmds(fas);
8075 }
8076
8077 /*
8078 * prepare a proxy cmd (a cmd sent on behalf of the target driver,
8079 * usually for error recovery or abort/reset)
8080 */
8081 static void
fas_makeproxy_cmd(struct fas_cmd * sp,struct scsi_address * ap,struct scsi_pkt * pkt,int nmsgs,...)8082 fas_makeproxy_cmd(struct fas_cmd *sp, struct scsi_address *ap,
8083 struct scsi_pkt *pkt, int nmsgs, ...)
8084 {
8085 va_list vap;
8086 int i;
8087
8088 ASSERT(nmsgs <= (CDB_GROUP5 - CDB_GROUP0 - 3));
8089
8090 bzero(sp, sizeof (*sp));
8091 bzero(pkt, scsi_pkt_size());
8092
8093 pkt->pkt_address = *ap;
8094 pkt->pkt_cdbp = (opaque_t)&sp->cmd_cdb[0];
8095 pkt->pkt_scbp = (opaque_t)&sp->cmd_scb;
8096 pkt->pkt_ha_private = (opaque_t)sp;
8097 sp->cmd_pkt = pkt;
8098 sp->cmd_scblen = 1;
8099 sp->cmd_pkt_flags = pkt->pkt_flags = FLAG_NOINTR;
8100 sp->cmd_flags = CFLAG_CMDPROXY;
8101 sp->cmd_cdb[FAS_PROXY_TYPE] = FAS_PROXY_SNDMSG;
8102 sp->cmd_cdb[FAS_PROXY_RESULT] = FALSE;
8103 sp->cmd_cdb[FAS_PROXY_DATA] = (char)nmsgs;
8104
8105 va_start(vap, nmsgs);
8106 for (i = 0; i < nmsgs; i++) {
8107 sp->cmd_cdb[FAS_PROXY_DATA + 1 + i] = (uchar_t)va_arg(vap, int);
8108 }
8109 va_end(vap);
8110 }
8111
8112 /*
8113 * send a proxy cmd and check the result
8114 */
8115 static int
fas_do_proxy_cmd(struct fas * fas,struct fas_cmd * sp,struct scsi_address * ap,char * what)8116 fas_do_proxy_cmd(struct fas *fas, struct fas_cmd *sp,
8117 struct scsi_address *ap, char *what)
8118 {
8119 int rval;
8120
8121 IPRINTF3("Sending proxy %s message to %d.%d\n", what,
8122 ap->a_target, ap->a_lun);
8123 if (fas_accept_pkt(fas, sp, TRAN_BUSY_OK) == TRAN_ACCEPT &&
8124 sp->cmd_pkt->pkt_reason == CMD_CMPLT &&
8125 sp->cmd_cdb[FAS_PROXY_RESULT] == TRUE) {
8126 IPRINTF3("Proxy %s succeeded for %d.%d\n", what,
8127 ap->a_target, ap->a_lun);
8128 ASSERT(fas->f_current_sp != sp);
8129 rval = TRUE;
8130 } else {
8131 IPRINTF5(
8132 "Proxy %s failed for %d.%d, result=%x, reason=%x\n", what,
8133 ap->a_target, ap->a_lun, sp->cmd_cdb[FAS_PROXY_RESULT],
8134 sp->cmd_pkt->pkt_reason);
8135 ASSERT(fas->f_current_sp != sp);
8136 rval = FALSE;
8137 }
8138 return (rval);
8139 }
8140
8141 /*
8142 * abort a connected command by sending an abort msg; hold off on
8143 * starting new cmds by setting throttles to HOLD_THROTTLE
8144 */
8145 static int
fas_abort_connected_cmd(struct fas * fas,struct fas_cmd * sp,uchar_t msg)8146 fas_abort_connected_cmd(struct fas *fas, struct fas_cmd *sp, uchar_t msg)
8147 {
8148 int rval = FALSE;
8149 int flags = sp->cmd_pkt_flags;
8150
8151 /*
8152 * if reset delay active we cannot access the target.
8153 */
8154 if (fas->f_reset_delay[Tgt(sp)]) {
8155 return (rval);
8156 }
8157
8158 /*
8159 * only abort while in data phase; otherwise we mess up msg phase
8160 */
8161 if (!((fas->f_state == ACTS_DATA) ||
8162 (fas->f_state == ACTS_DATA_DONE))) {
8163 return (rval);
8164 }
8165
8166
8167 IPRINTF3("Sending abort message %s to connected %d.%d\n",
8168 scsi_mname(msg), Tgt(sp), Lun(sp));
8169
8170
8171 fas->f_abort_msg_sent = 0;
8172 fas->f_omsglen = 1;
8173 fas->f_cur_msgout[0] = msg;
8174 sp->cmd_pkt_flags |= FLAG_NOINTR;
8175 fas_assert_atn(fas);
8176
8177 (void) fas_dopoll(fas, SHORT_POLL_TIMEOUT);
8178
8179 /*
8180 * now check if the msg was taken
8181 * e_abort is set in fas_handle_msg_out_done when the abort
8182 * msg has actually gone out (ie. msg out phase occurred
8183 */
8184 if (fas->f_abort_msg_sent && (sp->cmd_flags & CFLAG_COMPLETED)) {
8185 IPRINTF2("target %d.%d aborted\n",
8186 Tgt(sp), Lun(sp));
8187 rval = TRUE;
8188 } else {
8189 IPRINTF2("target %d.%d did not abort\n",
8190 Tgt(sp), Lun(sp));
8191 }
8192 sp->cmd_pkt_flags = flags;
8193 fas->f_omsglen = 0;
8194 return (rval);
8195 }
8196
8197 /*
8198 * abort a disconnected command; if it is a tagged command, we need
8199 * to include the tag
8200 */
8201 static int
fas_abort_disconnected_cmd(struct fas * fas,struct scsi_address * ap,struct fas_cmd * sp,uchar_t msg,int slot)8202 fas_abort_disconnected_cmd(struct fas *fas, struct scsi_address *ap,
8203 struct fas_cmd *sp, uchar_t msg, int slot)
8204 {
8205 auto struct fas_cmd local;
8206 struct fas_cmd *proxy_cmdp = &local;
8207 struct scsi_pkt *pkt;
8208 int rval;
8209 int target = ap->a_target;
8210
8211 /*
8212 * if reset delay is active, we cannot start a selection
8213 * and there shouldn't be a cmd outstanding
8214 */
8215 if (fas->f_reset_delay[target] != 0) {
8216 return (FALSE);
8217 }
8218
8219 if (sp)
8220 ASSERT(sp->cmd_slot == slot);
8221
8222 IPRINTF1("aborting disconnected tagged cmd(s) with %s\n",
8223 scsi_mname(msg));
8224 pkt = kmem_alloc(scsi_pkt_size(), KM_SLEEP);
8225 if (sp && (TAGGED(target) && (msg == MSG_ABORT_TAG))) {
8226 int tag = sp->cmd_tag[1];
8227 ASSERT(sp == fas->f_active[slot]->f_slot[tag]);
8228 fas_makeproxy_cmd(proxy_cmdp, ap, pkt, 3,
8229 MSG_SIMPLE_QTAG, tag, msg);
8230 } else {
8231 fas_makeproxy_cmd(proxy_cmdp, ap, pkt, 1, msg);
8232 }
8233
8234 rval = fas_do_proxy_cmd(fas, proxy_cmdp, ap, scsi_mname(msg));
8235 kmem_free(pkt, scsi_pkt_size());
8236 return (rval);
8237 }
8238
8239 /*
8240 * reset handling:
8241 * fas_do_scsi_reset assumes that we have already entered the mutex
8242 */
8243 static int
fas_do_scsi_reset(struct scsi_address * ap,int level)8244 fas_do_scsi_reset(struct scsi_address *ap, int level)
8245 {
8246 int rval = FALSE;
8247 struct fas *fas = ADDR2FAS(ap);
8248 short slot = (ap->a_target * NLUNS_PER_TARGET) | ap->a_lun;
8249
8250 ASSERT(mutex_owned(FAS_MUTEX(fas)));
8251 IPRINTF3("fas_scsi_reset for slot %x, level=%x, tcmds=%x\n",
8252 slot, level, fas->f_tcmds[slot]);
8253
8254 fas_move_waitQ_to_readyQ(fas);
8255
8256 if (level == RESET_ALL) {
8257 /*
8258 * We know that fas_reset_bus() returns ACTION_RETURN.
8259 */
8260 (void) fas_reset_bus(fas);
8261
8262 /*
8263 * Now call fas_dopoll() to field the reset interrupt
8264 * which will then call fas_reset_recovery which will
8265 * call the completion function for all commands.
8266 */
8267 if (fas_dopoll(fas, SHORT_POLL_TIMEOUT) <= 0) {
8268 /*
8269 * reset fas
8270 */
8271 fas_internal_reset(fas, FAS_RESET_FAS);
8272 (void) fas_reset_bus(fas);
8273 if (fas_dopoll(fas, SHORT_POLL_TIMEOUT) <= 0) {
8274 fas_log(fas,
8275 CE_WARN, "reset scsi bus failed");
8276 New_state(fas, STATE_FREE);
8277 } else {
8278 rval = TRUE;
8279 }
8280 } else {
8281 rval = TRUE;
8282 }
8283
8284 } else {
8285 struct fas_cmd *cur_sp = fas->f_current_sp;
8286 void (*savec)() = NULL;
8287
8288 /*
8289 * prevent new commands from starting
8290 */
8291 fas_set_all_lun_throttles(fas, slot, HOLD_THROTTLE);
8292
8293 /*
8294 * zero pkt_comp so it won't complete during the reset and
8295 * we can still update the packet after the reset.
8296 */
8297 if (cur_sp) {
8298 savec = cur_sp->cmd_pkt->pkt_comp;
8299 cur_sp->cmd_pkt->pkt_comp = NULL;
8300 }
8301
8302 /*
8303 * is this a connected cmd but not selecting?
8304 */
8305 if (cur_sp && (fas->f_state != STATE_FREE) &&
8306 (cur_sp->cmd_pkt->pkt_state != 0) &&
8307 (ap->a_target == (Tgt(cur_sp)))) {
8308 rval = fas_reset_connected_cmd(fas, ap);
8309 }
8310
8311 /*
8312 * if not connected or fas_reset_connected_cmd() failed,
8313 * attempt a reset_disconnected_cmd
8314 */
8315 if (rval == FALSE) {
8316 rval = fas_reset_disconnected_cmd(fas, ap);
8317 }
8318
8319 /*
8320 * cleanup if reset was successful
8321 * complete the current sp first.
8322 */
8323 if (cur_sp) {
8324 cur_sp->cmd_pkt->pkt_comp = savec;
8325 if (cur_sp->cmd_flags & CFLAG_COMPLETED) {
8326 if (ap->a_target == (Tgt(cur_sp))) {
8327 fas_set_pkt_reason(fas, cur_sp,
8328 CMD_RESET, STAT_DEV_RESET);
8329 }
8330 fas_remove_cmd(fas, cur_sp, NEW_TIMEOUT);
8331 cur_sp->cmd_flags &= ~CFLAG_COMPLETED;
8332 fas_decrement_ncmds(fas, cur_sp);
8333 fas_call_pkt_comp(fas, cur_sp);
8334 }
8335 }
8336
8337 if (rval == TRUE) {
8338 fas_reset_cleanup(fas, slot);
8339 } else {
8340 IPRINTF1("fas_scsi_reset failed for slot %x\n", slot);
8341
8342 /*
8343 * restore throttles to max throttle, regardless
8344 * of what it was (fas_set_throttles() will deal
8345 * with reset delay active)
8346 * restoring to the old throttle is not
8347 * a such a good idea
8348 */
8349 fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE);
8350
8351 }
8352
8353 if (fas->f_state == STATE_FREE) {
8354 (void) fas_ustart(fas);
8355 }
8356 }
8357 exit:
8358 ASSERT(mutex_owned(FAS_MUTEX(fas)));
8359 ASSERT(fas->f_ncmds >= fas->f_ndisc);
8360
8361 #ifdef FASDEBUG
8362 if (rval && fas_test_stop) {
8363 debug_enter("reset succeeded");
8364 }
8365 #endif
8366 return (rval);
8367 }
8368
8369 /*
8370 * reset delay is handled by a separate watchdog; this ensures that
8371 * regardless of fas_scsi_watchdog_tick, the reset delay will not change
8372 */
8373 static void
fas_start_watch_reset_delay(struct fas * fas)8374 fas_start_watch_reset_delay(struct fas *fas)
8375 {
8376 mutex_enter(&fas_global_mutex);
8377 if ((fas_reset_watch == 0) && FAS_CAN_SCHED) {
8378 fas_reset_watch = timeout(fas_watch_reset_delay, NULL,
8379 drv_usectohz((clock_t)FAS_WATCH_RESET_DELAY_TICK * 1000));
8380 }
8381 ASSERT((fas_reset_watch != 0) || (fas->f_flags & FAS_FLG_NOTIMEOUTS));
8382 mutex_exit(&fas_global_mutex);
8383 }
8384
8385 /*
8386 * set throttles to HOLD and set reset_delay for all target/luns
8387 */
8388 static void
fas_setup_reset_delay(struct fas * fas)8389 fas_setup_reset_delay(struct fas *fas)
8390 {
8391 if (!ddi_in_panic()) {
8392 int i;
8393
8394 fas_set_throttles(fas, 0, N_SLOTS, HOLD_THROTTLE);
8395 for (i = 0; i < NTARGETS_WIDE; i++) {
8396 fas->f_reset_delay[i] = fas->f_scsi_reset_delay;
8397 }
8398 fas_start_watch_reset_delay(fas);
8399 } else {
8400 drv_usecwait(fas->f_scsi_reset_delay * 1000);
8401 }
8402 }
8403
8404 /*
8405 * fas_watch_reset_delay(_subr) is invoked by timeout() and checks every
8406 * fas instance for active reset delays
8407 */
8408 /*ARGSUSED*/
8409 static void
fas_watch_reset_delay(void * arg)8410 fas_watch_reset_delay(void *arg)
8411 {
8412 struct fas *fas;
8413 struct fas *lfas; /* last not_done fas */
8414 int not_done = 0;
8415
8416 mutex_enter(&fas_global_mutex);
8417 fas_reset_watch = 0;
8418 mutex_exit(&fas_global_mutex);
8419
8420 rw_enter(&fas_global_rwlock, RW_READER);
8421 for (fas = fas_head; fas != (struct fas *)NULL; fas = fas->f_next) {
8422 if (fas->f_tran == 0) {
8423 continue;
8424 }
8425 mutex_enter(FAS_MUTEX(fas));
8426 not_done += fas_watch_reset_delay_subr(fas);
8427 lfas = fas;
8428 fas_check_waitQ_and_mutex_exit(fas);
8429 }
8430 rw_exit(&fas_global_rwlock);
8431 if (not_done) {
8432 ASSERT(lfas != NULL);
8433 fas_start_watch_reset_delay(lfas);
8434 }
8435 }
8436
8437 static int
fas_watch_reset_delay_subr(struct fas * fas)8438 fas_watch_reset_delay_subr(struct fas *fas)
8439 {
8440 short slot, s;
8441 int start_slot = -1;
8442 int done = 0;
8443
8444 for (slot = 0; slot < N_SLOTS; slot += NLUNS_PER_TARGET) {
8445
8446 /*
8447 * check if a reset delay is active; if so back to full throttle
8448 * which will unleash the cmds in the ready Q
8449 */
8450 s = slot/NLUNS_PER_TARGET;
8451 if (fas->f_reset_delay[s] != 0) {
8452 EPRINTF2("target%d: reset delay=%d\n", s,
8453 fas->f_reset_delay[s]);
8454 fas->f_reset_delay[s] -= FAS_WATCH_RESET_DELAY_TICK;
8455 if (fas->f_reset_delay[s] <= 0) {
8456 /*
8457 * clear throttle for all luns on this target
8458 */
8459 fas->f_reset_delay[s] = 0;
8460 fas_set_all_lun_throttles(fas,
8461 slot, MAX_THROTTLE);
8462 IPRINTF1("reset delay completed, slot=%x\n",
8463 slot);
8464 if (start_slot == -1) {
8465 start_slot = slot;
8466 }
8467 } else {
8468 done = -1;
8469 }
8470 }
8471 }
8472
8473 /*
8474 * start a cmd if a reset delay expired
8475 */
8476 if (start_slot != -1 && fas->f_state == STATE_FREE) {
8477 (void) fas_ustart(fas);
8478 }
8479 return (done);
8480 }
8481
8482 /*
8483 * cleanup after a device reset. this affects all target's luns
8484 */
8485 static void
fas_reset_cleanup(struct fas * fas,int slot)8486 fas_reset_cleanup(struct fas *fas, int slot)
8487 {
8488 /*
8489 * reset msg has been accepted, now cleanup queues;
8490 * for all luns of this target
8491 */
8492 int i, start, end;
8493 int target = slot/NLUNS_PER_TARGET;
8494
8495 start = slot & ~(NLUNS_PER_TARGET-1);
8496 end = start + NLUNS_PER_TARGET;
8497 IPRINTF4("fas_reset_cleanup: slot %x, start=%x, end=%x, tcmds=%x\n",
8498 slot, start, end, fas->f_tcmds[slot]);
8499
8500 ASSERT(!(fas->f_current_sp &&
8501 (fas->f_current_sp->cmd_slot == slot) &&
8502 (fas->f_state & STATE_SELECTING)));
8503
8504 /*
8505 * if we are not in panic set up a reset delay for this target,
8506 * a zero throttle forces all new requests into the ready Q
8507 */
8508 if (!ddi_in_panic()) {
8509 fas_set_all_lun_throttles(fas, start, HOLD_THROTTLE);
8510 fas->f_reset_delay[target] = fas->f_scsi_reset_delay;
8511 fas_start_watch_reset_delay(fas);
8512 } else {
8513 drv_usecwait(fas->f_scsi_reset_delay * 1000);
8514 }
8515
8516 for (i = start; i < end; i++) {
8517 fas_mark_packets(fas, i, CMD_RESET, STAT_DEV_RESET);
8518 fas_flush_tagQ(fas, i);
8519 fas_flush_readyQ(fas, i);
8520 if (fas->f_arq_pkt[i]) {
8521 struct fas_cmd *sp = fas->f_arq_pkt[i];
8522 struct arq_private_data *arq_data =
8523 (struct arq_private_data *)
8524 (sp->cmd_pkt->pkt_private);
8525 if (sp->cmd_pkt->pkt_comp) {
8526 ASSERT(arq_data->arq_save_sp == NULL);
8527 }
8528 }
8529 ASSERT(fas->f_tcmds[i] == 0);
8530 }
8531 ASSERT(fas->f_ncmds >= fas->f_ndisc);
8532
8533 fas_force_renegotiation(fas, target);
8534 }
8535
8536 /*
8537 * reset a currently disconnected target
8538 */
8539 static int
fas_reset_disconnected_cmd(struct fas * fas,struct scsi_address * ap)8540 fas_reset_disconnected_cmd(struct fas *fas, struct scsi_address *ap)
8541 {
8542 auto struct fas_cmd local;
8543 struct fas_cmd *sp = &local;
8544 struct scsi_pkt *pkt;
8545 int rval;
8546
8547 pkt = kmem_alloc(scsi_pkt_size(), KM_SLEEP);
8548 fas_makeproxy_cmd(sp, ap, pkt, 1, MSG_DEVICE_RESET);
8549 rval = fas_do_proxy_cmd(fas, sp, ap, scsi_mname(MSG_DEVICE_RESET));
8550 kmem_free(pkt, scsi_pkt_size());
8551 return (rval);
8552 }
8553
8554 /*
8555 * reset a target with a currently connected command
8556 * Assert ATN and send MSG_DEVICE_RESET, zero throttles temporarily
8557 * to prevent new cmds from starting regardless of the outcome
8558 */
8559 static int
fas_reset_connected_cmd(struct fas * fas,struct scsi_address * ap)8560 fas_reset_connected_cmd(struct fas *fas, struct scsi_address *ap)
8561 {
8562 int rval = FALSE;
8563 struct fas_cmd *sp = fas->f_current_sp;
8564 int flags = sp->cmd_pkt_flags;
8565
8566 /*
8567 * only attempt to reset in data phase; during other phases
8568 * asserting ATN may just cause confusion
8569 */
8570 if (!((fas->f_state == ACTS_DATA) ||
8571 (fas->f_state == ACTS_DATA_DONE))) {
8572 return (rval);
8573 }
8574
8575 IPRINTF2("Sending reset message to connected %d.%d\n",
8576 ap->a_target, ap->a_lun);
8577 fas->f_reset_msg_sent = 0;
8578 fas->f_omsglen = 1;
8579 fas->f_cur_msgout[0] = MSG_DEVICE_RESET;
8580 sp->cmd_pkt_flags |= FLAG_NOINTR;
8581
8582 fas_assert_atn(fas);
8583
8584 /*
8585 * poll for interrupts until bus free
8586 */
8587 (void) fas_dopoll(fas, SHORT_POLL_TIMEOUT);
8588
8589 /*
8590 * now check if the msg was taken
8591 * f_reset is set in fas_handle_msg_out_done when
8592 * msg has actually gone out (ie. msg out phase occurred)
8593 */
8594 if (fas->f_reset_msg_sent && (sp->cmd_flags & CFLAG_COMPLETED)) {
8595 IPRINTF2("target %d.%d reset\n", ap->a_target, ap->a_lun);
8596 rval = TRUE;
8597 } else {
8598 IPRINTF2("target %d.%d did not reset\n",
8599 ap->a_target, ap->a_lun);
8600 }
8601 sp->cmd_pkt_flags = flags;
8602 fas->f_omsglen = 0;
8603
8604 return (rval);
8605 }
8606
8607 /*
8608 * reset the scsi bus to blow all commands away
8609 */
8610 static int
fas_reset_bus(struct fas * fas)8611 fas_reset_bus(struct fas *fas)
8612 {
8613 IPRINTF("fas_reset_bus:\n");
8614 New_state(fas, ACTS_RESET);
8615
8616 fas_internal_reset(fas, FAS_RESET_SCSIBUS);
8617
8618 /*
8619 * Now that we've reset the SCSI bus, we'll take a SCSI RESET
8620 * interrupt and use that to clean up the state of things.
8621 */
8622 return (ACTION_RETURN);
8623 }
8624
8625 /*
8626 * fas_reset_recovery is called on the reset interrupt and cleans
8627 * up all cmds (active or waiting)
8628 */
8629 static int
fas_reset_recovery(struct fas * fas)8630 fas_reset_recovery(struct fas *fas)
8631 {
8632 short slot, start_slot;
8633 int i;
8634 int rval = ACTION_SEARCH;
8635 int max_loop = 0;
8636
8637 IPRINTF("fas_reset_recovery:\n");
8638 fas_check_ncmds(fas);
8639
8640 /*
8641 * renegotiate wide and sync for all targets
8642 */
8643 fas->f_sync_known = fas->f_wide_known = 0;
8644
8645 /*
8646 * reset dma engine
8647 */
8648 FAS_FLUSH_DMA_HARD(fas);
8649
8650 /*
8651 * set throttles and reset delay
8652 */
8653 fas_setup_reset_delay(fas);
8654
8655 /*
8656 * clear interrupts until they go away
8657 */
8658 while (INTPENDING(fas) && (max_loop < FAS_RESET_SPIN_MAX_LOOP)) {
8659 volatile struct fasreg *fasreg = fas->f_reg;
8660 fas->f_stat = fas_reg_read(fas, &fasreg->fas_stat);
8661 fas->f_stat2 = fas_reg_read(fas, &fasreg->fas_stat2);
8662 fas->f_step = fas_reg_read(fas, &fasreg->fas_step);
8663 fas->f_intr = fas_reg_read(fas, &fasreg->fas_intr);
8664 drv_usecwait(FAS_RESET_SPIN_DELAY_USEC);
8665 max_loop++;
8666 }
8667
8668 if (max_loop >= FAS_RESET_SPIN_MAX_LOOP) {
8669 fas_log(fas, CE_WARN, "Resetting SCSI bus failed");
8670 }
8671
8672 fas_reg_cmd_write(fas, CMD_FLUSH);
8673
8674 /*
8675 * reset the chip, this shouldn't be necessary but sometimes
8676 * we get a hang in the next data in phase
8677 */
8678 fas_internal_reset(fas, FAS_RESET_FAS);
8679
8680 /*
8681 * reset was expected? if not, it must be external bus reset
8682 */
8683 if (fas->f_state != ACTS_RESET) {
8684 if (fas->f_ncmds) {
8685 fas_log(fas, CE_WARN, "external SCSI bus reset");
8686 }
8687 }
8688
8689 if (fas->f_ncmds == 0) {
8690 rval = ACTION_RETURN;
8691 goto done;
8692 }
8693
8694 /*
8695 * completely reset the state of the softc data.
8696 */
8697 fas_internal_reset(fas, FAS_RESET_SOFTC);
8698
8699 /*
8700 * Hold the state of the host adapter open
8701 */
8702 New_state(fas, ACTS_FROZEN);
8703
8704 /*
8705 * for right now just claim that all
8706 * commands have been destroyed by a SCSI reset
8707 * and let already set reason fields or callers
8708 * decide otherwise for specific commands.
8709 */
8710 start_slot = fas->f_next_slot;
8711 slot = start_slot;
8712 do {
8713 fas_check_ncmds(fas);
8714 fas_mark_packets(fas, slot, CMD_RESET, STAT_BUS_RESET);
8715 fas_flush_tagQ(fas, slot);
8716 fas_flush_readyQ(fas, slot);
8717 if (fas->f_arq_pkt[slot]) {
8718 struct fas_cmd *sp = fas->f_arq_pkt[slot];
8719 struct arq_private_data *arq_data =
8720 (struct arq_private_data *)
8721 (sp->cmd_pkt->pkt_private);
8722 if (sp->cmd_pkt->pkt_comp) {
8723 ASSERT(arq_data->arq_save_sp == NULL);
8724 }
8725 }
8726 slot = NEXTSLOT(slot, fas->f_dslot);
8727 } while (slot != start_slot);
8728
8729 fas_check_ncmds(fas);
8730
8731 /*
8732 * reset timeouts
8733 */
8734 for (i = 0; i < N_SLOTS; i++) {
8735 if (fas->f_active[i]) {
8736 fas->f_active[i]->f_timebase = 0;
8737 fas->f_active[i]->f_timeout = 0;
8738 fas->f_active[i]->f_dups = 0;
8739 }
8740 }
8741
8742 done:
8743 /*
8744 * Move the state back to free...
8745 */
8746 New_state(fas, STATE_FREE);
8747 ASSERT(fas->f_ncmds >= fas->f_ndisc);
8748
8749 /*
8750 * perform the reset notification callbacks that are registered.
8751 */
8752 (void) scsi_hba_reset_notify_callback(&fas->f_mutex,
8753 &fas->f_reset_notify_listf);
8754
8755 /*
8756 * if reset delay is still active a search is meaningless
8757 * but do it anyway
8758 */
8759 return (rval);
8760 }
8761
8762 /*
8763 * hba_tran ops for quiesce and unquiesce
8764 */
8765 static int
fas_scsi_quiesce(dev_info_t * dip)8766 fas_scsi_quiesce(dev_info_t *dip)
8767 {
8768 struct fas *fas;
8769 scsi_hba_tran_t *tran;
8770
8771 tran = ddi_get_driver_private(dip);
8772 if ((tran == NULL) || ((fas = TRAN2FAS(tran)) == NULL)) {
8773 return (-1);
8774 }
8775
8776 return (fas_quiesce_bus(fas));
8777 }
8778
8779 static int
fas_scsi_unquiesce(dev_info_t * dip)8780 fas_scsi_unquiesce(dev_info_t *dip)
8781 {
8782 struct fas *fas;
8783 scsi_hba_tran_t *tran;
8784
8785 tran = ddi_get_driver_private(dip);
8786 if ((tran == NULL) || ((fas = TRAN2FAS(tran)) == NULL)) {
8787 return (-1);
8788 }
8789
8790 return (fas_unquiesce_bus(fas));
8791 }
8792
8793 #ifdef FAS_TEST
8794 /*
8795 * torture test functions
8796 */
8797 static void
fas_test_reset(struct fas * fas,int slot)8798 fas_test_reset(struct fas *fas, int slot)
8799 {
8800 struct scsi_address ap;
8801 char target = slot/NLUNS_PER_TARGET;
8802
8803 if (fas_rtest & (1 << target)) {
8804 ap.a_hba_tran = fas->f_tran;
8805 ap.a_target = target;
8806 ap.a_lun = 0;
8807 if ((fas_rtest_type == 1) &&
8808 (fas->f_state == ACTS_DATA_DONE)) {
8809 if (fas_do_scsi_reset(&ap, RESET_TARGET)) {
8810 fas_rtest = 0;
8811 }
8812 } else if ((fas_rtest_type == 2) &&
8813 (fas->f_state == ACTS_DATA_DONE)) {
8814 if (fas_do_scsi_reset(&ap, RESET_ALL)) {
8815 fas_rtest = 0;
8816 }
8817 } else {
8818 if (fas_do_scsi_reset(&ap, RESET_TARGET)) {
8819 fas_rtest = 0;
8820 }
8821 }
8822 }
8823 }
8824
8825 static void
fas_test_abort(struct fas * fas,int slot)8826 fas_test_abort(struct fas *fas, int slot)
8827 {
8828 struct fas_cmd *sp = fas->f_current_sp;
8829 struct scsi_address ap;
8830 char target = slot/NLUNS_PER_TARGET;
8831 struct scsi_pkt *pkt = NULL;
8832
8833 if (fas_atest & (1 << target)) {
8834 ap.a_hba_tran = fas->f_tran;
8835 ap.a_target = target;
8836 ap.a_lun = 0;
8837
8838 if ((fas_atest_disc == 0) && sp &&
8839 (sp->cmd_slot == slot) &&
8840 ((sp->cmd_flags & CFLAG_CMDDISC) == 0)) {
8841 pkt = sp->cmd_pkt;
8842 } else if ((fas_atest_disc == 1) && NOTAG(target)) {
8843 sp = fas->f_active[slot]->f_slot[0];
8844 if (sp && (sp->cmd_flags & CFLAG_CMDDISC)) {
8845 pkt = sp->cmd_pkt;
8846 }
8847 } else if ((fas_atest_disc == 1) && (sp == 0) &&
8848 TAGGED(target) &&
8849 (fas->f_tcmds[slot] != 0)) {
8850 int tag;
8851 /*
8852 * find the oldest tag
8853 */
8854 for (tag = NTAGS-1; tag >= 0; tag--) {
8855 if ((sp = fas->f_active[slot]->f_slot[tag])
8856 != 0)
8857 break;
8858 }
8859 if (sp) {
8860 pkt = sp->cmd_pkt;
8861 ASSERT(sp->cmd_slot == slot);
8862 } else {
8863 return;
8864 }
8865 } else if (fas_atest_disc == 2 && (sp == 0) &&
8866 (fas->f_tcmds[slot] != 0)) {
8867 pkt = NULL;
8868 } else if (fas_atest_disc == 2 && NOTAG(target)) {
8869 pkt = NULL;
8870 } else if (fas_atest_disc == 3 && fas->f_readyf[slot]) {
8871 pkt = fas->f_readyf[slot]->cmd_pkt;
8872 } else if (fas_atest_disc == 4 &&
8873 fas->f_readyf[slot] && fas->f_readyf[slot]->cmd_forw) {
8874 pkt = fas->f_readyf[slot]->cmd_forw->cmd_pkt;
8875 } else if (fas_atest_disc == 5 && fas->f_readyb[slot]) {
8876 pkt = fas->f_readyb[slot]->cmd_pkt;
8877 } else if ((fas_atest_disc == 6) && sp &&
8878 (sp->cmd_slot == slot) &&
8879 (fas->f_state == ACTS_DATA_DONE)) {
8880 pkt = sp->cmd_pkt;
8881 } else if (fas_atest_disc == 7) {
8882 if (fas_do_scsi_abort(&ap, NULL)) {
8883 if (fas_do_scsi_abort(&ap, NULL)) {
8884 if (fas_do_scsi_reset(&ap,
8885 RESET_TARGET)) {
8886 fas_atest = 0;
8887 }
8888 }
8889 }
8890 return;
8891 } else {
8892 return;
8893 }
8894
8895 fas_log(fas, CE_NOTE, "aborting pkt=0x%p state=%x\n",
8896 (void *)pkt, (pkt != NULL? pkt->pkt_state : 0));
8897 if (fas_do_scsi_abort(&ap, pkt)) {
8898 fas_atest = 0;
8899 }
8900 }
8901 }
8902 #endif /* FAS_TEST */
8903
8904 /*
8905 * capability interface
8906 */
8907 static int
fas_commoncap(struct scsi_address * ap,char * cap,int val,int tgtonly,int doset)8908 fas_commoncap(struct scsi_address *ap, char *cap, int val,
8909 int tgtonly, int doset)
8910 {
8911 struct fas *fas = ADDR2FAS(ap);
8912 int cidx;
8913 int target = ap->a_target;
8914 ushort_t tshift = (1<<target);
8915 ushort_t ntshift = ~tshift;
8916 int rval = FALSE;
8917
8918 mutex_enter(FAS_MUTEX(fas));
8919
8920 if (cap == (char *)0) {
8921 goto exit;
8922 }
8923
8924 cidx = scsi_hba_lookup_capstr(cap);
8925 if (cidx == -1) {
8926 rval = UNDEFINED;
8927 } else if (doset) {
8928 /*
8929 * we usually don't allow setting capabilities for
8930 * other targets!
8931 */
8932 if (!tgtonly) {
8933 goto exit;
8934 }
8935 switch (cidx) {
8936 case SCSI_CAP_DMA_MAX:
8937 case SCSI_CAP_MSG_OUT:
8938 case SCSI_CAP_PARITY:
8939 case SCSI_CAP_INITIATOR_ID:
8940 case SCSI_CAP_LINKED_CMDS:
8941 case SCSI_CAP_UNTAGGED_QING:
8942 case SCSI_CAP_RESET_NOTIFICATION:
8943 /*
8944 * None of these are settable via
8945 * the capability interface.
8946 */
8947 break;
8948
8949 case SCSI_CAP_DISCONNECT:
8950 if (val)
8951 fas->f_target_scsi_options[ap->a_target] |=
8952 SCSI_OPTIONS_DR;
8953 else
8954 fas->f_target_scsi_options[ap->a_target] &=
8955 ~SCSI_OPTIONS_DR;
8956
8957 break;
8958
8959 case SCSI_CAP_SYNCHRONOUS:
8960 if (val) {
8961 fas->f_force_async &= ~tshift;
8962 } else {
8963 fas->f_force_async |= tshift;
8964 }
8965 fas_force_renegotiation(fas, target);
8966 rval = TRUE;
8967 break;
8968
8969 case SCSI_CAP_TAGGED_QING:
8970 {
8971 int slot = target * NLUNS_PER_TARGET | ap->a_lun;
8972 ushort_t old_notag = fas->f_notag;
8973
8974 /* do not allow with active tgt */
8975 if (fas->f_tcmds[slot]) {
8976 break;
8977 }
8978
8979 slot = target * NLUNS_PER_TARGET | ap->a_lun;
8980
8981 if (val) {
8982 if (fas->f_target_scsi_options[target] &
8983 SCSI_OPTIONS_TAG) {
8984 IPRINTF1("target %d: TQ enabled\n",
8985 target);
8986 fas->f_notag &= ntshift;
8987 } else {
8988 break;
8989 }
8990 } else {
8991 IPRINTF1("target %d: TQ disabled\n",
8992 target);
8993 fas->f_notag |= tshift;
8994 }
8995
8996 if (val && fas_alloc_active_slots(fas, slot,
8997 KM_NOSLEEP)) {
8998 fas->f_notag = old_notag;
8999 break;
9000 }
9001
9002 fas_set_all_lun_throttles(fas, slot, MAX_THROTTLE);
9003
9004 fas_update_props(fas, target);
9005 rval = TRUE;
9006 break;
9007 }
9008
9009 case SCSI_CAP_WIDE_XFER:
9010 if (val) {
9011 if (fas->f_target_scsi_options[target] &
9012 SCSI_OPTIONS_WIDE) {
9013 fas->f_nowide &= ntshift;
9014 fas->f_force_narrow &= ~tshift;
9015 } else {
9016 break;
9017 }
9018 } else {
9019 fas->f_force_narrow |= tshift;
9020 }
9021 fas_force_renegotiation(fas, target);
9022 rval = TRUE;
9023 break;
9024
9025 case SCSI_CAP_ARQ:
9026 if (val) {
9027 if (fas_create_arq_pkt(fas, ap)) {
9028 break;
9029 }
9030 } else {
9031 if (fas_delete_arq_pkt(fas, ap)) {
9032 break;
9033 }
9034 }
9035 rval = TRUE;
9036 break;
9037
9038 case SCSI_CAP_QFULL_RETRIES:
9039 fas->f_qfull_retries[target] = (uchar_t)val;
9040 rval = TRUE;
9041 break;
9042
9043 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9044 fas->f_qfull_retry_interval[target] =
9045 drv_usectohz(val * 1000);
9046 rval = TRUE;
9047 break;
9048
9049 default:
9050 rval = UNDEFINED;
9051 break;
9052 }
9053
9054 } else if (doset == 0) {
9055 int slot = target * NLUNS_PER_TARGET | ap->a_lun;
9056
9057 switch (cidx) {
9058 case SCSI_CAP_DMA_MAX:
9059 /* very high limit because of multiple dma windows */
9060 rval = 1<<30;
9061 break;
9062 case SCSI_CAP_MSG_OUT:
9063 rval = TRUE;
9064 break;
9065 case SCSI_CAP_DISCONNECT:
9066 if (tgtonly &&
9067 (fas->f_target_scsi_options[target] &
9068 SCSI_OPTIONS_DR)) {
9069 rval = TRUE;
9070 }
9071 break;
9072 case SCSI_CAP_SYNCHRONOUS:
9073 if (tgtonly && fas->f_offset[target]) {
9074 rval = TRUE;
9075 }
9076 break;
9077 case SCSI_CAP_PARITY:
9078 rval = TRUE;
9079 break;
9080 case SCSI_CAP_INITIATOR_ID:
9081 rval = MY_ID(fas);
9082 break;
9083 case SCSI_CAP_TAGGED_QING:
9084 if (tgtonly && ((fas->f_notag & tshift) == 0)) {
9085 rval = TRUE;
9086 }
9087 break;
9088 case SCSI_CAP_WIDE_XFER:
9089 if ((tgtonly && (fas->f_nowide & tshift) == 0)) {
9090 rval = TRUE;
9091 }
9092 break;
9093 case SCSI_CAP_UNTAGGED_QING:
9094 rval = TRUE;
9095 break;
9096 case SCSI_CAP_ARQ:
9097 if (tgtonly && fas->f_arq_pkt[slot]) {
9098 rval = TRUE;
9099 }
9100 break;
9101 case SCSI_CAP_LINKED_CMDS:
9102 break;
9103 case SCSI_CAP_RESET_NOTIFICATION:
9104 rval = TRUE;
9105 break;
9106 case SCSI_CAP_QFULL_RETRIES:
9107 rval = fas->f_qfull_retries[target];
9108 break;
9109 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9110 rval = drv_hztousec(
9111 fas->f_qfull_retry_interval[target]) /
9112 1000;
9113 break;
9114
9115 default:
9116 rval = UNDEFINED;
9117 break;
9118 }
9119 }
9120 exit:
9121 if (val && tgtonly) {
9122 fas_update_props(fas, target);
9123 }
9124 fas_check_waitQ_and_mutex_exit(fas);
9125
9126 if (doset) {
9127 IPRINTF6(
9128 "fas_commoncap:tgt=%x,cap=%s,tgtonly=%x,doset=%x,val=%x,rval=%x\n",
9129 target, cap, tgtonly, doset, val, rval);
9130 }
9131 return (rval);
9132 }
9133
9134 /*
9135 * property management
9136 * fas_update_props:
9137 * create/update sync/wide/TQ/scsi-options properties for this target
9138 */
9139 static void
fas_update_props(struct fas * fas,int tgt)9140 fas_update_props(struct fas *fas, int tgt)
9141 {
9142 char property[32];
9143 uint_t xfer_speed = 0;
9144 uint_t xfer_rate = 0;
9145 int wide_enabled, tq_enabled;
9146 uint_t regval = fas->f_sync_period[tgt];
9147 int offset = fas->f_offset[tgt];
9148
9149 wide_enabled = ((fas->f_nowide & (1<<tgt)) == 0);
9150 if (offset && regval) {
9151 xfer_speed =
9152 FAS_SYNC_KBPS((regval * fas->f_clock_cycle) / 1000);
9153 xfer_rate = ((wide_enabled)? 2 : 1) * xfer_speed;
9154 }
9155 (void) sprintf(property, "target%x-sync-speed", tgt);
9156 fas_update_this_prop(fas, property, xfer_rate);
9157
9158 (void) sprintf(property, "target%x-wide", tgt);
9159 fas_update_this_prop(fas, property, wide_enabled);
9160
9161 (void) sprintf(property, "target%x-TQ", tgt);
9162 tq_enabled = ((fas->f_notag & (1<<tgt))? 0 : 1);
9163 fas_update_this_prop(fas, property, tq_enabled);
9164
9165 }
9166
9167 static void
fas_update_this_prop(struct fas * fas,char * property,int value)9168 fas_update_this_prop(struct fas *fas, char *property, int value)
9169 {
9170 dev_info_t *dip = fas->f_dev;
9171
9172 IPRINTF2("update prop: %s value=%x\n", property, value);
9173 ASSERT(mutex_owned(FAS_MUTEX(fas)));
9174 /*
9175 * We cannot hold any mutex at this point because the call to
9176 * ddi_prop_update_int() may block.
9177 */
9178 mutex_exit(FAS_MUTEX(fas));
9179 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
9180 property, value) != DDI_PROP_SUCCESS) {
9181 IPRINTF1("cannot modify/create %s property\n", property);
9182 }
9183 mutex_enter(FAS_MUTEX(fas));
9184 }
9185
9186 /*
9187 * allocate active slots array, size is dependent on whether tagQ enabled
9188 */
9189 static int
fas_alloc_active_slots(struct fas * fas,int slot,int flag)9190 fas_alloc_active_slots(struct fas *fas, int slot, int flag)
9191 {
9192 int target = slot / NLUNS_PER_TARGET;
9193 struct f_slots *old_active = fas->f_active[slot];
9194 struct f_slots *new_active;
9195 ushort_t size;
9196 int rval = -1;
9197
9198 if (fas->f_tcmds[slot]) {
9199 IPRINTF("cannot change size of active slots array\n");
9200 return (rval);
9201 }
9202
9203 size = ((NOTAG(target)) ? FAS_F_SLOT_SIZE : FAS_F_SLOTS_SIZE_TQ);
9204 EPRINTF4(
9205 "fas_alloc_active_slots: target=%x size=%x, old=0x%p, oldsize=%x\n",
9206 target, size, (void *)old_active,
9207 ((old_active == NULL) ? -1 : old_active->f_size));
9208
9209 new_active = kmem_zalloc(size, flag);
9210 if (new_active == NULL) {
9211 IPRINTF("new active alloc failed\n");
9212 } else {
9213 fas->f_active[slot] = new_active;
9214 fas->f_active[slot]->f_n_slots = (NOTAG(target) ? 1 : NTAGS);
9215 fas->f_active[slot]->f_size = size;
9216 /*
9217 * reserve tag 0 for non-tagged cmds to tagged targets
9218 */
9219 if (TAGGED(target)) {
9220 fas->f_active[slot]->f_tags = 1;
9221 }
9222 if (old_active) {
9223 kmem_free((caddr_t)old_active, old_active->f_size);
9224 }
9225 rval = 0;
9226 }
9227 return (rval);
9228 }
9229
9230 /*
9231 * Error logging, printing, and debug print routines
9232 */
9233 static char *fas_label = "fas";
9234
9235 /*PRINTFLIKE3*/
9236 static void
fas_log(struct fas * fas,int level,const char * fmt,...)9237 fas_log(struct fas *fas, int level, const char *fmt, ...)
9238 {
9239 dev_info_t *dev;
9240 va_list ap;
9241
9242 if (fas) {
9243 dev = fas->f_dev;
9244 } else {
9245 dev = 0;
9246 }
9247
9248 mutex_enter(&fas_log_mutex);
9249
9250 va_start(ap, fmt);
9251 (void) vsprintf(fas_log_buf, fmt, ap);
9252 va_end(ap);
9253
9254 if (level == CE_CONT) {
9255 scsi_log(dev, fas_label, level, "%s\n", fas_log_buf);
9256 } else {
9257 scsi_log(dev, fas_label, level, "%s", fas_log_buf);
9258 }
9259
9260 mutex_exit(&fas_log_mutex);
9261 }
9262
9263 /*PRINTFLIKE2*/
9264 static void
fas_printf(struct fas * fas,const char * fmt,...)9265 fas_printf(struct fas *fas, const char *fmt, ...)
9266 {
9267 dev_info_t *dev = 0;
9268 va_list ap;
9269 int level = CE_CONT;
9270
9271 mutex_enter(&fas_log_mutex);
9272
9273 va_start(ap, fmt);
9274 (void) vsprintf(fas_log_buf, fmt, ap);
9275 va_end(ap);
9276
9277 if (fas) {
9278 dev = fas->f_dev;
9279 level = CE_NOTE;
9280 scsi_log(dev, fas_label, level, "%s", fas_log_buf);
9281 } else {
9282 scsi_log(dev, fas_label, level, "%s\n", fas_log_buf);
9283 }
9284
9285 mutex_exit(&fas_log_mutex);
9286 }
9287
9288 #ifdef FASDEBUG
9289 /*PRINTFLIKE2*/
9290 void
fas_dprintf(struct fas * fas,const char * fmt,...)9291 fas_dprintf(struct fas *fas, const char *fmt, ...)
9292 {
9293 dev_info_t *dev = 0;
9294 va_list ap;
9295
9296 if (fas) {
9297 dev = fas->f_dev;
9298 }
9299
9300 mutex_enter(&fas_log_mutex);
9301
9302 va_start(ap, fmt);
9303 (void) vsprintf(fas_log_buf, fmt, ap);
9304 va_end(ap);
9305
9306 scsi_log(dev, fas_label, SCSI_DEBUG, "%s", fas_log_buf);
9307
9308 mutex_exit(&fas_log_mutex);
9309 }
9310 #endif
9311
9312
9313 static void
fas_printstate(struct fas * fas,char * msg)9314 fas_printstate(struct fas *fas, char *msg)
9315 {
9316 volatile struct fasreg *fasreg = fas->f_reg;
9317 volatile struct dma *dmar = fas->f_dma;
9318 uint_t csr = fas_dma_reg_read(fas, &dmar->dma_csr);
9319 uint_t count = fas_dma_reg_read(fas, &dmar->dma_count);
9320 uint_t addr = fas_dma_reg_read(fas, &dmar->dma_addr);
9321 uint_t test = fas_dma_reg_read(fas, &dmar->dma_test);
9322 uint_t fas_cnt;
9323
9324 fas_log(fas, CE_WARN, "%s: current fas state:", msg);
9325 fas_printf(NULL, "Latched stat=0x%b intr=0x%b",
9326 fas->f_stat, FAS_STAT_BITS, fas->f_intr, FAS_INT_BITS);
9327 fas_printf(NULL, "last msgout: %s, last msgin: %s",
9328 scsi_mname(fas->f_last_msgout), scsi_mname(fas->f_last_msgin));
9329 fas_printf(NULL, "DMA csr=0x%b", csr, dma_bits);
9330 fas_printf(NULL,
9331 "addr=%x dmacnt=%x test=%x last=%x last_cnt=%x",
9332 addr, count, test, fas->f_lastdma, fas->f_lastcount);
9333
9334 GET_FAS_COUNT(fasreg, fas_cnt);
9335 fas_printf(NULL, "fas state:");
9336 fas_printf(NULL, "\tcount(32)=%x cmd=%x stat=%x stat2=%x intr=%x",
9337 fas_cnt, fasreg->fas_cmd, fasreg->fas_stat, fasreg->fas_stat2,
9338 fasreg->fas_intr);
9339 fas_printf(NULL,
9340 "\tstep=%x fifoflag=%x conf=%x test=%x conf2=%x conf3=%x",
9341 fasreg->fas_step, fasreg->fas_fifo_flag, fasreg->fas_conf,
9342 fasreg->fas_test, fasreg->fas_conf2, fasreg->fas_conf3);
9343
9344 if (fas->f_current_sp) {
9345 fas_dump_cmd(fas, fas->f_current_sp);
9346 }
9347 }
9348
9349 /*
9350 * dump all we know about a cmd
9351 */
9352 static void
fas_dump_cmd(struct fas * fas,struct fas_cmd * sp)9353 fas_dump_cmd(struct fas *fas, struct fas_cmd *sp)
9354 {
9355 int i;
9356 uchar_t *cp = (uchar_t *)sp->cmd_pkt->pkt_cdbp;
9357 auto char buf[128];
9358
9359 buf[0] = '\0';
9360 fas_printf(NULL, "Cmd dump for Target %d Lun %d:",
9361 Tgt(sp), Lun(sp));
9362 (void) sprintf(&buf[0], " cdb=[");
9363 for (i = 0; i < (int)sp->cmd_actual_cdblen; i++) {
9364 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
9365 }
9366 (void) sprintf(&buf[strlen(buf)], " ]");
9367 fas_printf(NULL, buf);
9368 fas_printf(NULL, "State=%s Last State=%s",
9369 fas_state_name(fas->f_state), fas_state_name(fas->f_laststate));
9370 fas_printf(NULL,
9371 "pkt_state=0x%b pkt_flags=0x%x pkt_statistics=0x%x",
9372 sp->cmd_pkt->pkt_state, scsi_state_bits, sp->cmd_pkt_flags,
9373 sp->cmd_pkt->pkt_statistics);
9374 if (sp->cmd_pkt->pkt_state & STATE_GOT_STATUS) {
9375 fas_printf(NULL, "Status=0x%x\n", sp->cmd_pkt->pkt_scbp[0]);
9376 }
9377 }
9378
9379 /*ARGSUSED*/
9380 static void
fas_short_dump_cmd(struct fas * fas,struct fas_cmd * sp)9381 fas_short_dump_cmd(struct fas *fas, struct fas_cmd *sp)
9382 {
9383 int i;
9384 uchar_t *cp = (uchar_t *)sp->cmd_pkt->pkt_cdbp;
9385 auto char buf[128];
9386
9387 buf[0] = '\0';
9388 (void) sprintf(&buf[0], "?%d.%d: cdb=[", Tgt(sp), Lun(sp));
9389 for (i = 0; i < (int)sp->cmd_actual_cdblen; i++) {
9390 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
9391 }
9392 (void) sprintf(&buf[strlen(buf)], " ]");
9393 fas_printf(NULL, buf);
9394 }
9395
9396 /*
9397 * state decoding for error messages
9398 */
9399 static char *
fas_state_name(ushort_t state)9400 fas_state_name(ushort_t state)
9401 {
9402 if (state == STATE_FREE) {
9403 return ("FREE");
9404 } else if (state & STATE_SELECTING) {
9405 if (state == STATE_SELECT_NORMAL)
9406 return ("SELECT");
9407 else if (state == STATE_SELECT_N_STOP)
9408 return ("SEL&STOP");
9409 else if (state == STATE_SELECT_N_SENDMSG)
9410 return ("SELECT_SNDMSG");
9411 else
9412 return ("SEL_NO_ATN");
9413 } else {
9414 static struct {
9415 char *sname;
9416 char state;
9417 } names[] = {
9418 "CMD_START", ACTS_CMD_START,
9419 "CMD_DONE", ACTS_CMD_DONE,
9420 "MSG_OUT", ACTS_MSG_OUT,
9421 "MSG_OUT_DONE", ACTS_MSG_OUT_DONE,
9422 "MSG_IN", ACTS_MSG_IN,
9423 "MSG_IN_MORE", ACTS_MSG_IN_MORE,
9424 "MSG_IN_DONE", ACTS_MSG_IN_DONE,
9425 "CLEARING", ACTS_CLEARING,
9426 "DATA", ACTS_DATA,
9427 "DATA_DONE", ACTS_DATA_DONE,
9428 "CMD_CMPLT", ACTS_C_CMPLT,
9429 "UNKNOWN", ACTS_UNKNOWN,
9430 "RESEL", ACTS_RESEL,
9431 "ENDVEC", ACTS_ENDVEC,
9432 "RESET", ACTS_RESET,
9433 "ABORTING", ACTS_ABORTING,
9434 "FROZEN", ACTS_FROZEN,
9435 0
9436 };
9437 int i;
9438 for (i = 0; names[i].sname; i++) {
9439 if (names[i].state == state)
9440 return (names[i].sname);
9441 }
9442 }
9443 return ("<BAD>");
9444 }
9445