11709Smlf /*
21709Smlf * CDDL HEADER START
31709Smlf *
41709Smlf * The contents of this file are subject to the terms of the
51709Smlf * Common Development and Distribution License (the "License").
61709Smlf * You may not use this file except in compliance with the License.
71709Smlf *
81709Smlf * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
91709Smlf * or http://www.opensolaris.org/os/licensing.
101709Smlf * See the License for the specific language governing permissions
111709Smlf * and limitations under the License.
121709Smlf *
131709Smlf * When distributing Covered Code, include this CDDL HEADER in each
141709Smlf * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
151709Smlf * If applicable, add the following below this CDDL HEADER, with the
161709Smlf * fields enclosed by brackets "[]" replaced with your own identifying
171709Smlf * information: Portions Copyright [yyyy] [name of copyright owner]
181709Smlf *
191709Smlf * CDDL HEADER END
201709Smlf */
211709Smlf
221709Smlf /*
23*7240Srh87107 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
241709Smlf * Use is subject to license terms.
251709Smlf */
261709Smlf
271709Smlf #pragma ident "%Z%%M% %I% %E% SMI"
281709Smlf
291709Smlf #include <sys/types.h>
301709Smlf #include <sys/kmem.h>
311709Smlf #include <sys/note.h>
321709Smlf
331709Smlf #include "ghd.h"
341709Smlf
351709Smlf
361709Smlf
371709Smlf /*ARGSUSED*/
381709Smlf gtgt_t *
ghd_target_init(dev_info_t * hba_dip,dev_info_t * tgt_dip,ccc_t * cccp,size_t tgt_private_size,void * hba_private,ushort_t target,uchar_t lun)391709Smlf ghd_target_init(dev_info_t *hba_dip,
401709Smlf dev_info_t *tgt_dip,
411709Smlf ccc_t *cccp,
421709Smlf size_t tgt_private_size,
431709Smlf void *hba_private,
441709Smlf ushort_t target,
451709Smlf uchar_t lun)
461709Smlf {
471709Smlf _NOTE(ARGUNUSED(hba_dip))
481709Smlf gtgt_t *gtgtp;
491709Smlf size_t size = sizeof (*gtgtp) + tgt_private_size;
501709Smlf gdev_t *gdevp;
511709Smlf ulong_t maxactive;
521709Smlf
531709Smlf gtgtp = kmem_zalloc(size, KM_SLEEP);
541709Smlf
551709Smlf /*
561709Smlf * initialize the per instance structure
571709Smlf */
581709Smlf
591709Smlf gtgtp->gt_tgt_private = (void *)(gtgtp + 1);
601709Smlf gtgtp->gt_size = size;
611709Smlf gtgtp->gt_hba_private = hba_private;
621709Smlf gtgtp->gt_target = target;
631709Smlf gtgtp->gt_lun = lun;
641709Smlf gtgtp->gt_ccc = cccp;
651709Smlf
661709Smlf /*
671709Smlf * set the queue's maxactive to 1 if
681709Smlf * property not specified on target or hba devinfo node
691709Smlf */
701709Smlf maxactive = ddi_getprop(DDI_DEV_T_ANY, tgt_dip, 0, "ghd-maxactive", 1);
711709Smlf gtgtp->gt_maxactive = maxactive;
721709Smlf
731709Smlf /* initialize the linked list pointers */
741709Smlf GTGT_INIT(gtgtp);
751709Smlf
761709Smlf /*
771709Smlf * grab both mutexes so the queue structures
781709Smlf * stay stable while adding this instance to the linked lists
791709Smlf */
801709Smlf mutex_enter(&cccp->ccc_hba_mutex);
811709Smlf mutex_enter(&cccp->ccc_waitq_mutex);
821709Smlf
831709Smlf /*
841709Smlf * Search the HBA's linked list of device structures.
851709Smlf *
861709Smlf * If this device is already attached then link this instance
871709Smlf * to the existing per-device-structure on the ccc_devs list.
881709Smlf *
891709Smlf */
901709Smlf gdevp = CCCP2GDEVP(cccp);
911709Smlf while (gdevp != NULL) {
921709Smlf if (gdevp->gd_target == target && gdevp->gd_lun == lun) {
931709Smlf GDBG_WAITQ(("ghd_target_init(%d,%d) found gdevp 0x%p"
94*7240Srh87107 " gtgtp 0x%p max %lu\n", target, lun,
95*7240Srh87107 (void *)gdevp, (void *)gtgtp, maxactive));
961709Smlf
971709Smlf goto foundit;
981709Smlf }
991709Smlf gdevp = GDEV_NEXTP(gdevp);
1001709Smlf }
1011709Smlf
1021709Smlf /*
1031709Smlf * Not found. This is the first instance for this device.
1041709Smlf */
1051709Smlf
1061709Smlf
1071709Smlf /* allocate the per-device-structure */
1081709Smlf
1091709Smlf gdevp = kmem_zalloc(sizeof (*gdevp), KM_SLEEP);
1101709Smlf gdevp->gd_target = target;
1111709Smlf gdevp->gd_lun = lun;
1121709Smlf
1131709Smlf /*
1141709Smlf * link this second level queue to the HBA's first
1151709Smlf * level queue
1161709Smlf */
1171709Smlf GDEV_QATTACH(gdevp, cccp, maxactive);
1181709Smlf
1191709Smlf GDBG_WAITQ(("ghd_target_init(%d,%d) new gdevp 0x%p gtgtp 0x%p"
120*7240Srh87107 " max %lu\n", target, lun, (void *)gdevp, (void *)gtgtp,
121*7240Srh87107 maxactive));
1221709Smlf
1231709Smlf foundit:
1241709Smlf
1251709Smlf /* save the ptr to the per device structure */
1261709Smlf gtgtp->gt_gdevp = gdevp;
1271709Smlf
1281709Smlf /* Add the per instance structure to the per device list */
1291709Smlf GTGT_ATTACH(gtgtp, gdevp);
1301709Smlf
1311709Smlf ghd_waitq_process_and_mutex_exit(cccp);
1321709Smlf
1331709Smlf return (gtgtp);
1341709Smlf }
1351709Smlf
1361709Smlf /*ARGSUSED*/
1371709Smlf void
ghd_target_free(dev_info_t * hba_dip,dev_info_t * tgt_dip,ccc_t * cccp,gtgt_t * gtgtp)1381709Smlf ghd_target_free(dev_info_t *hba_dip,
1391709Smlf dev_info_t *tgt_dip,
1401709Smlf ccc_t *cccp,
1411709Smlf gtgt_t *gtgtp)
1421709Smlf {
1431709Smlf _NOTE(ARGUNUSED(hba_dip,tgt_dip))
1441709Smlf
1451709Smlf gdev_t *gdevp = gtgtp->gt_gdevp;
1461709Smlf
1471709Smlf GDBG_WAITQ(("ghd_target_free(%d,%d) gdevp-0x%p gtgtp 0x%p\n",
148*7240Srh87107 gtgtp->gt_target, gtgtp->gt_lun, (void *)gdevp, (void *)gtgtp));
1491709Smlf
1501709Smlf /*
1511709Smlf * grab both mutexes so the queue structures
1521709Smlf * stay stable while deleting this instance
1531709Smlf */
1541709Smlf mutex_enter(&cccp->ccc_hba_mutex);
1551709Smlf mutex_enter(&cccp->ccc_waitq_mutex);
1561709Smlf
1571709Smlf ASSERT(gdevp->gd_ninstances > 0);
1581709Smlf
1591709Smlf /*
1601709Smlf * remove this per-instance structure from the device list and
1611709Smlf * free the memory
1621709Smlf */
1631709Smlf GTGT_DEATTACH(gtgtp, gdevp);
1641709Smlf kmem_free((caddr_t)gtgtp, gtgtp->gt_size);
1651709Smlf
1661709Smlf if (gdevp->gd_ninstances == 1) {
167*7240Srh87107 GDBG_WAITQ(("ghd_target_free: N=1 gdevp 0x%p\n",
168*7240Srh87107 (void *)gdevp));
1691709Smlf /*
1701709Smlf * If there's now just one instance left attached to this
1711709Smlf * device then reset the queue's max active value
1721709Smlf * from that instance's saved value.
1731709Smlf */
1741709Smlf gtgtp = GDEVP2GTGTP(gdevp);
1751709Smlf GDEV_MAXACTIVE(gdevp) = gtgtp->gt_maxactive;
1761709Smlf
1771709Smlf } else if (gdevp->gd_ninstances == 0) {
1781709Smlf /* else no instances left */
179*7240Srh87107 GDBG_WAITQ(("ghd_target_free: N=0 gdevp 0x%p\n",
180*7240Srh87107 (void *)gdevp));
1811709Smlf
1821709Smlf /* detach this per-dev-structure from the HBA's dev list */
1831709Smlf GDEV_QDETACH(gdevp, cccp);
1841709Smlf kmem_free(gdevp, sizeof (*gdevp));
1851709Smlf
1861709Smlf }
1871709Smlf #if defined(GHD_DEBUG) || defined(__lint)
1881709Smlf else {
1891709Smlf /* leave maxactive set to 1 */
190*7240Srh87107 GDBG_WAITQ(("ghd_target_free: N>1 gdevp 0x%p\n",
191*7240Srh87107 (void *)gdevp));
1921709Smlf }
1931709Smlf #endif
1941709Smlf
1951709Smlf ghd_waitq_process_and_mutex_exit(cccp);
1961709Smlf }
1971709Smlf
1981709Smlf void
ghd_waitq_shuffle_up(ccc_t * cccp,gdev_t * gdevp)1991709Smlf ghd_waitq_shuffle_up(ccc_t *cccp, gdev_t *gdevp)
2001709Smlf {
2011709Smlf gcmd_t *gcmdp;
2021709Smlf
2031709Smlf ASSERT(mutex_owned(&cccp->ccc_waitq_mutex));
2041709Smlf
2051709Smlf GDBG_WAITQ(("ghd_waitq_shuffle_up: cccp 0x%p gdevp 0x%p N %ld "
206*7240Srh87107 "max %ld\n", (void *)cccp, (void *)gdevp, GDEV_NACTIVE(gdevp),
2071709Smlf GDEV_MAXACTIVE(gdevp)));
2081709Smlf for (;;) {
2091709Smlf /*
2101709Smlf * Now check the device wait queue throttle to see if I can
2111709Smlf * shuffle up a request to the HBA wait queue.
2121709Smlf */
2131709Smlf if (GDEV_NACTIVE(gdevp) >= GDEV_MAXACTIVE(gdevp)) {
2141709Smlf GDBG_WAITQ(("ghd_waitq_shuffle_up: N>MAX gdevp 0x%p\n",
215*7240Srh87107 (void *)gdevp));
2161709Smlf return;
2171709Smlf }
2181709Smlf
2191709Smlf /*
2201709Smlf * single thread requests while multiple instances
2211709Smlf * because the different target drives might have
2221709Smlf * conflicting maxactive throttles.
2231709Smlf */
2241709Smlf if (gdevp->gd_ninstances > 1 && GDEV_NACTIVE(gdevp) > 0) {
2251709Smlf GDBG_WAITQ(("ghd_waitq_shuffle_up: multi gdevp 0x%p\n",
226*7240Srh87107 (void *)gdevp));
2271709Smlf return;
2281709Smlf }
2291709Smlf
2301709Smlf /*
2311709Smlf * promote the topmost request from the device queue to
2321709Smlf * the HBA queue.
2331709Smlf */
2341709Smlf if ((gcmdp = L2_remove_head(&GDEV_QHEAD(gdevp))) == NULL) {
2351709Smlf /* the device is empty so we're done */
2361709Smlf GDBG_WAITQ(("ghd_waitq_shuffle_up: MT gdevp 0x%p\n",
237*7240Srh87107 (void *)gdevp));
2381709Smlf return;
2391709Smlf }
2401709Smlf L2_add(&GHBA_QHEAD(cccp), &gcmdp->cmd_q, gcmdp);
2411709Smlf GDEV_NACTIVE(gdevp)++;
2421709Smlf gcmdp->cmd_waitq_level++;
2431709Smlf GDBG_WAITQ(("ghd_waitq_shuffle_up: gdevp 0x%p gcmdp 0x%p\n",
244*7240Srh87107 (void *)gdevp, (void *)gcmdp));
2451709Smlf }
2461709Smlf }
2471709Smlf
2481709Smlf
2491709Smlf void
ghd_waitq_delete(ccc_t * cccp,gcmd_t * gcmdp)2501709Smlf ghd_waitq_delete(ccc_t *cccp, gcmd_t *gcmdp)
2511709Smlf {
2521709Smlf gtgt_t *gtgtp = GCMDP2GTGTP(gcmdp);
2531709Smlf gdev_t *gdevp = gtgtp->gt_gdevp;
2541709Smlf #if defined(GHD_DEBUG) || defined(__lint)
2551709Smlf Q_t *qp = &gdevp->gd_waitq;
2561709Smlf #endif
2571709Smlf
2581709Smlf ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
2591709Smlf mutex_enter(&cccp->ccc_waitq_mutex);
2601709Smlf
2611709Smlf /*
2621709Smlf * Adjust all queue counters. If this request is being aborted
2631709Smlf * it might only have made it to the target queue. Otherwise,
2641709Smlf * both the target and hba queue have to be adjusted when a
2651709Smlf * request is completed normally. The cmd_waitq_level value
2661709Smlf * indicates which queue counters need to be adjusted. It's
2671709Smlf * incremented as the request progresses up the queues.
2681709Smlf */
2691709Smlf switch (gcmdp->cmd_waitq_level) {
2701709Smlf case 0:
2711709Smlf break;
2721709Smlf case 1:
2731709Smlf /*
2741709Smlf * If this is an early-timeout, or early-abort, the request
2751709Smlf * is still linked onto a waitq. Remove it now. If it's
2761709Smlf * an active request and no longer on the waitq then calling
2771709Smlf * L2_delete a second time does no harm.
2781709Smlf */
2791709Smlf L2_delete(&gcmdp->cmd_q);
2801709Smlf break;
2811709Smlf
2821709Smlf case 2:
2831709Smlf L2_delete(&gcmdp->cmd_q);
2841709Smlf #if defined(GHD_DEBUG) || defined(__lint)
2851709Smlf if (GDEV_NACTIVE(gdevp) == 0)
2861709Smlf debug_enter("\n\nGHD WAITQ DELETE\n\n");
2871709Smlf #endif
2881709Smlf GDEV_NACTIVE(gdevp)--;
2891709Smlf break;
2901709Smlf
2911709Smlf case 3:
2921709Smlf /* it's an active or completed command */
2931709Smlf #if defined(GHD_DEBUG) || defined(__lint)
2941709Smlf if (GDEV_NACTIVE(gdevp) == 0 || GHBA_NACTIVE(cccp) == 0)
2951709Smlf debug_enter("\n\nGHD WAITQ DELETE\n\n");
2961709Smlf #endif
2971709Smlf GDEV_NACTIVE(gdevp)--;
2981709Smlf GHBA_NACTIVE(cccp)--;
2991709Smlf break;
3001709Smlf
3011709Smlf default:
3021709Smlf /* this shouldn't happen */
3031709Smlf #if defined(GHD_DEBUG) || defined(__lint)
3041709Smlf debug_enter("\n\nGHD WAITQ LEVEL > 3\n\n");
3051709Smlf #endif
3061709Smlf break;
3071709Smlf }
3081709Smlf
3091709Smlf GDBG_WAITQ(("ghd_waitq_delete: gcmdp 0x%p qp 0x%p level %ld\n",
310*7240Srh87107 (void *)gcmdp, (void *)qp, gcmdp->cmd_waitq_level));
3111709Smlf
3121709Smlf
3131709Smlf /*
3141709Smlf * There's probably now more room in the HBA queue. Move
3151709Smlf * up as many requests as possible.
3161709Smlf */
3171709Smlf ghd_waitq_shuffle_up(cccp, gdevp);
3181709Smlf
3191709Smlf mutex_exit(&cccp->ccc_waitq_mutex);
3201709Smlf }
3211709Smlf
3221709Smlf
3231709Smlf int
ghd_waitq_process_and_mutex_hold(ccc_t * cccp)3241709Smlf ghd_waitq_process_and_mutex_hold(ccc_t *cccp)
3251709Smlf {
3261709Smlf gcmd_t *gcmdp;
3271709Smlf int rc = FALSE;
3281709Smlf
3291709Smlf ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
3301709Smlf ASSERT(mutex_owned(&cccp->ccc_waitq_mutex));
3311709Smlf
3321709Smlf for (;;) {
3331709Smlf if (L2_EMPTY(&GHBA_QHEAD(cccp))) {
3341709Smlf /* return if the list is empty */
3351709Smlf GDBG_WAITQ(("ghd_waitq_proc: MT cccp 0x%p qp 0x%p\n",
336*7240Srh87107 (void *)cccp, (void *)&cccp->ccc_waitq));
3371709Smlf break;
3381709Smlf }
3391709Smlf if (GHBA_NACTIVE(cccp) >= GHBA_MAXACTIVE(cccp)) {
3401709Smlf /* return if the HBA is too active */
3411709Smlf GDBG_WAITQ(("ghd_waitq_proc: N>M cccp 0x%p qp 0x%p"
342*7240Srh87107 " N %ld max %ld\n", (void *)cccp,
343*7240Srh87107 (void *)&cccp->ccc_waitq,
344*7240Srh87107 GHBA_NACTIVE(cccp),
345*7240Srh87107 GHBA_MAXACTIVE(cccp)));
3461709Smlf break;
3471709Smlf }
3481709Smlf
3491709Smlf /*
3501709Smlf * bail out if the wait queue has been
3511709Smlf * "held" by the HBA driver
3521709Smlf */
3531709Smlf if (cccp->ccc_waitq_held) {
3541709Smlf GDBG_WAITQ(("ghd_waitq_proc: held"));
3551709Smlf return (rc);
3561709Smlf }
3571709Smlf
3581709Smlf if (cccp->ccc_waitq_frozen) {
3591709Smlf
3601709Smlf clock_t lbolt, delay_in_hz, time_to_wait;
3611709Smlf
3621709Smlf delay_in_hz =
3631709Smlf drv_usectohz(cccp->ccc_waitq_freezedelay * 1000);
3641709Smlf
3651709Smlf lbolt = ddi_get_lbolt();
3661709Smlf time_to_wait = delay_in_hz -
3671709Smlf (lbolt - cccp->ccc_waitq_freezetime);
3681709Smlf
3691709Smlf if (time_to_wait > 0) {
3701709Smlf /*
3711709Smlf * stay frozen; we'll be called again
3721709Smlf * by ghd_timeout_softintr()
3731709Smlf */
3741709Smlf GDBG_WAITQ(("ghd_waitq_proc: frozen"));
3751709Smlf return (rc);
3761709Smlf } else {
3771709Smlf /* unfreeze and continue */
3781709Smlf GDBG_WAITQ(("ghd_waitq_proc: unfreezing"));
3791709Smlf cccp->ccc_waitq_freezetime = 0;
3801709Smlf cccp->ccc_waitq_freezedelay = 0;
3811709Smlf cccp->ccc_waitq_frozen = 0;
3821709Smlf }
3831709Smlf }
3841709Smlf
3851709Smlf gcmdp = (gcmd_t *)L2_remove_head(&GHBA_QHEAD(cccp));
3861709Smlf GHBA_NACTIVE(cccp)++;
3871709Smlf gcmdp->cmd_waitq_level++;
3881709Smlf mutex_exit(&cccp->ccc_waitq_mutex);
3891709Smlf
3901709Smlf /*
3911709Smlf * Start up the next I/O request
3921709Smlf */
3931709Smlf ASSERT(gcmdp != NULL);
3941709Smlf gcmdp->cmd_state = GCMD_STATE_ACTIVE;
3951709Smlf if (!(*cccp->ccc_hba_start)(cccp->ccc_hba_handle, gcmdp)) {
3961709Smlf /* if the HBA rejected the request, requeue it */
3971709Smlf gcmdp->cmd_state = GCMD_STATE_WAITQ;
3981709Smlf mutex_enter(&cccp->ccc_waitq_mutex);
3991709Smlf GHBA_NACTIVE(cccp)--;
4001709Smlf gcmdp->cmd_waitq_level--;
4011709Smlf L2_add_head(&GHBA_QHEAD(cccp), &gcmdp->cmd_q, gcmdp);
4021709Smlf GDBG_WAITQ(("ghd_waitq_proc: busy cccp 0x%p gcmdp 0x%p"
403*7240Srh87107 " handle 0x%p\n", (void *)cccp, (void *)gcmdp,
404*7240Srh87107 cccp->ccc_hba_handle));
4051709Smlf break;
4061709Smlf }
4071709Smlf rc = TRUE;
4081709Smlf mutex_enter(&cccp->ccc_waitq_mutex);
4091709Smlf GDBG_WAITQ(("ghd_waitq_proc: ++ cccp 0x%p gcmdp 0x%p N %ld\n",
410*7240Srh87107 (void *)cccp, (void *)gcmdp, GHBA_NACTIVE(cccp)));
4111709Smlf }
4121709Smlf ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
4131709Smlf ASSERT(mutex_owned(&cccp->ccc_waitq_mutex));
4141709Smlf return (rc);
4151709Smlf }
4161709Smlf
4171709Smlf void
ghd_waitq_process_and_mutex_exit(ccc_t * cccp)4181709Smlf ghd_waitq_process_and_mutex_exit(ccc_t *cccp)
4191709Smlf {
4201709Smlf ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
4211709Smlf ASSERT(mutex_owned(&cccp->ccc_waitq_mutex));
4221709Smlf
423*7240Srh87107 GDBG_WAITQ(("ghd_waitq_process_and_mutex_exit: cccp 0x%p\n",
424*7240Srh87107 (void *)cccp));
4251709Smlf
4261709Smlf (void) ghd_waitq_process_and_mutex_hold(cccp);
4271709Smlf
4281709Smlf /*
4291709Smlf * Release the mutexes in the opposite order that they
4301709Smlf * were acquired to prevent requests queued by
4311709Smlf * ghd_transport() from getting hung up in the wait queue.
4321709Smlf */
4331709Smlf mutex_exit(&cccp->ccc_hba_mutex);
4341709Smlf mutex_exit(&cccp->ccc_waitq_mutex);
4351709Smlf }
436